xref: /xnu-8796.101.5/osfmk/kern/thread_act.c (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Copyright (c) 1993 The University of Utah and
33  * the Center for Software Science (CSS).  All rights reserved.
34  *
35  * Permission to use, copy, modify and distribute this software and its
36  * documentation is hereby granted, provided that both the copyright
37  * notice and this permission notice appear in all copies of the
38  * software, derivative works or modified versions, and any portions
39  * thereof, and that both notices appear in supporting documentation.
40  *
41  * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44  *
45  * CSS requests users of this software to return to [email protected] any
46  * improvements that they make and grant CSS redistribution rights.
47  *
48  *	Author:	Bryan Ford, University of Utah CSS
49  *
50  *	Thread management routines
51  */
52 
53 #include <mach/mach_types.h>
54 #include <mach/kern_return.h>
55 #include <mach/thread_act_server.h>
56 #include <mach/thread_act.h>
57 
58 #include <kern/kern_types.h>
59 #include <kern/ast.h>
60 #include <kern/mach_param.h>
61 #include <kern/zalloc.h>
62 #include <kern/extmod_statistics.h>
63 #include <kern/thread.h>
64 #include <kern/task.h>
65 #include <kern/sched_prim.h>
66 #include <kern/misc_protos.h>
67 #include <kern/assert.h>
68 #include <kern/exception.h>
69 #include <kern/ipc_mig.h>
70 #include <kern/ipc_tt.h>
71 #include <kern/machine.h>
72 #include <kern/spl.h>
73 #include <kern/syscall_subr.h>
74 #include <kern/processor.h>
75 #include <kern/restartable.h>
76 #include <kern/timer.h>
77 #include <kern/affinity.h>
78 #include <kern/host.h>
79 #include <kern/exc_guard.h>
80 #include <ipc/port.h>
81 #include <mach/arm/thread_status.h>
82 
83 
84 #include <stdatomic.h>
85 
86 #include <security/mac_mach_internal.h>
87 #include <libkern/coreanalytics/coreanalytics.h>
88 
89 static void act_abort(thread_t thread);
90 
91 static void thread_suspended(void *arg, wait_result_t result);
92 static void thread_set_apc_ast(thread_t thread);
93 static void thread_set_apc_ast_locked(thread_t thread);
94 
95 extern void proc_name(int pid, char * buf, int size);
96 extern boolean_t IOCurrentTaskHasEntitlement(const char *);
97 
98 CA_EVENT(thread_set_state,
99     CA_STATIC_STRING(CA_PROCNAME_LEN), current_proc);
100 
101 static void
send_thread_set_state_telemetry(void)102 send_thread_set_state_telemetry(void)
103 {
104 	ca_event_t ca_event = CA_EVENT_ALLOCATE(thread_set_state);
105 	CA_EVENT_TYPE(thread_set_state) * event = ca_event->data;
106 
107 	proc_name(task_pid(current_task()), (char *) &event->current_proc, CA_PROCNAME_LEN);
108 
109 	CA_EVENT_SEND(ca_event);
110 }
111 
112 /* bootarg to create lightweight corpse for thread set state lockdown */
113 TUNABLE(bool, tss_should_crash, "tss_should_crash", true);
114 
115 static inline boolean_t
thread_set_state_allowed(thread_t thread,int flavor)116 thread_set_state_allowed(thread_t thread, int flavor)
117 {
118 	/* platform binaries must have entitlement - all others ok */
119 	if ((task_ro_flags_get(current_task()) & TFRO_PLATFORM)
120 	    && !(thread->options & TH_IN_MACH_EXCEPTION)        /* Allowed for now - rdar://103085786 */
121 	    && FLAVOR_MODIFIES_CORE_CPU_REGISTERS(flavor)       /* only care about locking down PC/LR */
122 #if CONFIG_ROSETTA
123 	    && !task_is_translated(get_threadtask(thread))      /* Ignore translated tasks */
124 #endif /* CONFIG_ROSETTA */
125 	    && !IOCurrentTaskHasEntitlement("com.apple.private.thread-set-state")
126 	    && !IOCurrentTaskHasEntitlement("com.apple.private.cs.debugger")    /* Temporary hack - rdar://104898327 */
127 	    && !IOCurrentTaskHasEntitlement("com.apple.security.cs.debugger")   /* Temporary hack - rdar://104898327 */
128 	    && tss_should_crash
129 	    ) {
130 		/* fatal crash */
131 		mach_port_guard_exception(MACH_PORT_NULL, 0, 0, kGUARD_EXC_THREAD_SET_STATE);
132 		send_thread_set_state_telemetry();
133 		return FALSE;
134 	}
135 
136 	return TRUE;
137 }
138 
139 /*
140  * Internal routine to mark a thread as started.
141  * Always called with the thread mutex locked.
142  */
143 void
thread_start(thread_t thread)144 thread_start(
145 	thread_t                        thread)
146 {
147 	clear_wait(thread, THREAD_AWAKENED);
148 	thread->started = TRUE;
149 }
150 
151 /*
152  * Internal routine to mark a thread as waiting
153  * right after it has been created.  The caller
154  * is responsible to call wakeup()/thread_wakeup()
155  * or thread_terminate() to get it going.
156  *
157  * Always called with the thread mutex locked.
158  *
159  * Task and task_threads mutexes also held
160  * (so nobody can set the thread running before
161  * this point)
162  *
163  * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
164  * to allow termination from this point forward.
165  */
166 void
thread_start_in_assert_wait(thread_t thread,event_t event,wait_interrupt_t interruptible)167 thread_start_in_assert_wait(
168 	thread_t                        thread,
169 	event_t             event,
170 	wait_interrupt_t    interruptible)
171 {
172 	struct waitq *waitq = assert_wait_queue(event);
173 	wait_result_t wait_result;
174 	spl_t spl;
175 
176 	spl = splsched();
177 	waitq_lock(waitq);
178 
179 	/* clear out startup condition (safe because thread not started yet) */
180 	thread_lock(thread);
181 	assert(!thread->started);
182 	assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
183 	thread->state &= ~(TH_WAIT | TH_UNINT);
184 	thread_unlock(thread);
185 
186 	/* assert wait interruptibly forever */
187 	wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
188 	    interruptible,
189 	    TIMEOUT_URGENCY_SYS_NORMAL,
190 	    TIMEOUT_WAIT_FOREVER,
191 	    TIMEOUT_NO_LEEWAY,
192 	    thread);
193 	assert(wait_result == THREAD_WAITING);
194 
195 	/* mark thread started while we still hold the waitq lock */
196 	thread_lock(thread);
197 	thread->started = TRUE;
198 	thread_unlock(thread);
199 
200 	waitq_unlock(waitq);
201 	splx(spl);
202 }
203 
204 /*
205  * Internal routine to terminate a thread.
206  * Sometimes called with task already locked.
207  *
208  * If thread is on core, cause AST check immediately;
209  * Otherwise, let the thread continue running in kernel
210  * until it hits AST.
211  */
212 kern_return_t
thread_terminate_internal(thread_t thread)213 thread_terminate_internal(
214 	thread_t                        thread)
215 {
216 	kern_return_t           result = KERN_SUCCESS;
217 
218 	thread_mtx_lock(thread);
219 
220 	if (thread->active) {
221 		thread->active = FALSE;
222 
223 		act_abort(thread);
224 
225 		if (thread->started) {
226 			clear_wait(thread, THREAD_INTERRUPTED);
227 		} else {
228 			thread_start(thread);
229 		}
230 	} else {
231 		result = KERN_TERMINATED;
232 	}
233 
234 	if (thread->affinity_set != NULL) {
235 		thread_affinity_terminate(thread);
236 	}
237 
238 	/* unconditionally unpin the thread in internal termination */
239 	ipc_thread_port_unpin(get_thread_ro(thread)->tro_self_port);
240 
241 	thread_mtx_unlock(thread);
242 
243 	if (thread != current_thread() && result == KERN_SUCCESS) {
244 		thread_wait(thread, FALSE);
245 	}
246 
247 	return result;
248 }
249 
250 kern_return_t
thread_terminate(thread_t thread)251 thread_terminate(
252 	thread_t                thread)
253 {
254 	task_t task;
255 
256 	if (thread == THREAD_NULL) {
257 		return KERN_INVALID_ARGUMENT;
258 	}
259 
260 	task = get_threadtask(thread);
261 
262 	/* Kernel threads can't be terminated without their own cooperation */
263 	if (task == kernel_task && thread != current_thread()) {
264 		return KERN_FAILURE;
265 	}
266 
267 	kern_return_t result = thread_terminate_internal(thread);
268 
269 	/*
270 	 * If a kernel thread is terminating itself, force handle the APC_AST here.
271 	 * Kernel threads don't pass through the return-to-user AST checking code,
272 	 * but all threads must finish their own termination in thread_apc_ast.
273 	 */
274 	if (task == kernel_task) {
275 		assert(thread->active == FALSE);
276 		thread_ast_clear(thread, AST_APC);
277 		thread_apc_ast(thread);
278 
279 		panic("thread_terminate");
280 		/* NOTREACHED */
281 	}
282 
283 	return result;
284 }
285 
286 /*
287  * [MIG Call] Terminate a thread.
288  *
289  * Cannot be used on threads managed by pthread.
290  */
291 kern_return_t
thread_terminate_from_user(thread_t thread)292 thread_terminate_from_user(
293 	thread_t                thread)
294 {
295 	if (thread == THREAD_NULL) {
296 		return KERN_INVALID_ARGUMENT;
297 	}
298 
299 	if (thread_get_tag(thread) & THREAD_TAG_PTHREAD) {
300 		return KERN_DENIED;
301 	}
302 
303 	return thread_terminate(thread);
304 }
305 
306 /*
307  * Terminate a thread with pinned control port.
308  *
309  * Can only be used on threads managed by pthread. Exported in pthread_kern.
310  */
311 kern_return_t
thread_terminate_pinned(thread_t thread)312 thread_terminate_pinned(
313 	thread_t                thread)
314 {
315 	task_t task;
316 
317 	if (thread == THREAD_NULL) {
318 		return KERN_INVALID_ARGUMENT;
319 	}
320 
321 	task = get_threadtask(thread);
322 
323 
324 	assert(task != kernel_task);
325 	assert(thread_get_tag(thread) & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD));
326 
327 	thread_mtx_lock(thread);
328 	if (task_is_pinned(task) && thread->active) {
329 		assert(get_thread_ro(thread)->tro_self_port->ip_pinned == 1);
330 	}
331 	thread_mtx_unlock(thread);
332 
333 	kern_return_t result = thread_terminate_internal(thread);
334 	return result;
335 }
336 
337 /*
338  * Suspend execution of the specified thread.
339  * This is a recursive-style suspension of the thread, a count of
340  * suspends is maintained.
341  *
342  * Called with thread mutex held.
343  */
344 void
thread_hold(thread_t thread)345 thread_hold(thread_t thread)
346 {
347 	if (thread->suspend_count++ == 0) {
348 		thread_set_apc_ast(thread);
349 		assert(thread->suspend_parked == FALSE);
350 	}
351 }
352 
353 /*
354  * Decrement internal suspension count, setting thread
355  * runnable when count falls to zero.
356  *
357  * Because the wait is abortsafe, we can't be guaranteed that the thread
358  * is currently actually waiting even if suspend_parked is set.
359  *
360  * Called with thread mutex held.
361  */
362 void
thread_release(thread_t thread)363 thread_release(thread_t thread)
364 {
365 	assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
366 
367 	/* fail-safe on non-assert builds */
368 	if (thread->suspend_count == 0) {
369 		return;
370 	}
371 
372 	if (--thread->suspend_count == 0) {
373 		if (!thread->started) {
374 			thread_start(thread);
375 		} else if (thread->suspend_parked) {
376 			thread->suspend_parked = FALSE;
377 			thread_wakeup_thread(&thread->suspend_count, thread);
378 		}
379 	}
380 }
381 
382 kern_return_t
thread_suspend(thread_t thread)383 thread_suspend(thread_t thread)
384 {
385 	kern_return_t result = KERN_SUCCESS;
386 
387 	if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
388 		return KERN_INVALID_ARGUMENT;
389 	}
390 
391 	thread_mtx_lock(thread);
392 
393 	if (thread->active) {
394 		if (thread->user_stop_count++ == 0) {
395 			thread_hold(thread);
396 		}
397 	} else {
398 		result = KERN_TERMINATED;
399 	}
400 
401 	thread_mtx_unlock(thread);
402 
403 	if (thread != current_thread() && result == KERN_SUCCESS) {
404 		thread_wait(thread, FALSE);
405 	}
406 
407 	return result;
408 }
409 
410 kern_return_t
thread_resume(thread_t thread)411 thread_resume(thread_t thread)
412 {
413 	kern_return_t result = KERN_SUCCESS;
414 
415 	if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
416 		return KERN_INVALID_ARGUMENT;
417 	}
418 
419 	thread_mtx_lock(thread);
420 
421 	if (thread->active) {
422 		if (thread->user_stop_count > 0) {
423 			if (--thread->user_stop_count == 0) {
424 				thread_release(thread);
425 			}
426 		} else {
427 			result = KERN_FAILURE;
428 		}
429 	} else {
430 		result = KERN_TERMINATED;
431 	}
432 
433 	thread_mtx_unlock(thread);
434 
435 	return result;
436 }
437 
438 /*
439  *	thread_depress_abort_from_user:
440  *
441  *	Prematurely abort priority depression if there is one.
442  */
443 kern_return_t
thread_depress_abort_from_user(thread_t thread)444 thread_depress_abort_from_user(thread_t thread)
445 {
446 	kern_return_t result;
447 
448 	if (thread == THREAD_NULL) {
449 		return KERN_INVALID_ARGUMENT;
450 	}
451 
452 	thread_mtx_lock(thread);
453 
454 	if (thread->active) {
455 		result = thread_depress_abort(thread);
456 	} else {
457 		result = KERN_TERMINATED;
458 	}
459 
460 	thread_mtx_unlock(thread);
461 
462 	return result;
463 }
464 
465 
466 /*
467  * Indicate that the thread should run the AST_APC callback
468  * to detect an abort condition.
469  *
470  * Called with thread mutex held.
471  */
472 static void
act_abort(thread_t thread)473 act_abort(
474 	thread_t        thread)
475 {
476 	spl_t           s = splsched();
477 
478 	thread_lock(thread);
479 
480 	if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
481 		thread->sched_flags |= TH_SFLAG_ABORT;
482 		thread_set_apc_ast_locked(thread);
483 		thread_depress_abort_locked(thread);
484 	} else {
485 		thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
486 	}
487 
488 	thread_unlock(thread);
489 	splx(s);
490 }
491 
492 kern_return_t
thread_abort(thread_t thread)493 thread_abort(
494 	thread_t        thread)
495 {
496 	kern_return_t   result = KERN_SUCCESS;
497 
498 	if (thread == THREAD_NULL) {
499 		return KERN_INVALID_ARGUMENT;
500 	}
501 
502 	thread_mtx_lock(thread);
503 
504 	if (thread->active) {
505 		act_abort(thread);
506 		clear_wait(thread, THREAD_INTERRUPTED);
507 	} else {
508 		result = KERN_TERMINATED;
509 	}
510 
511 	thread_mtx_unlock(thread);
512 
513 	return result;
514 }
515 
516 kern_return_t
thread_abort_safely(thread_t thread)517 thread_abort_safely(
518 	thread_t                thread)
519 {
520 	kern_return_t   result = KERN_SUCCESS;
521 
522 	if (thread == THREAD_NULL) {
523 		return KERN_INVALID_ARGUMENT;
524 	}
525 
526 	thread_mtx_lock(thread);
527 
528 	if (thread->active) {
529 		spl_t           s = splsched();
530 
531 		thread_lock(thread);
532 		if (!thread->at_safe_point ||
533 		    clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
534 			if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
535 				thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
536 				thread_set_apc_ast_locked(thread);
537 				thread_depress_abort_locked(thread);
538 			}
539 		}
540 		thread_unlock(thread);
541 		splx(s);
542 	} else {
543 		result = KERN_TERMINATED;
544 	}
545 
546 	thread_mtx_unlock(thread);
547 
548 	return result;
549 }
550 
551 /*** backward compatibility hacks ***/
552 #include <mach/thread_info.h>
553 #include <mach/thread_special_ports.h>
554 #include <ipc/ipc_port.h>
555 
556 kern_return_t
thread_info(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)557 thread_info(
558 	thread_t                        thread,
559 	thread_flavor_t                 flavor,
560 	thread_info_t                   thread_info_out,
561 	mach_msg_type_number_t  *thread_info_count)
562 {
563 	kern_return_t                   result;
564 
565 	if (thread == THREAD_NULL) {
566 		return KERN_INVALID_ARGUMENT;
567 	}
568 
569 	thread_mtx_lock(thread);
570 
571 	if (thread->active || thread->inspection) {
572 		result = thread_info_internal(
573 			thread, flavor, thread_info_out, thread_info_count);
574 	} else {
575 		result = KERN_TERMINATED;
576 	}
577 
578 	thread_mtx_unlock(thread);
579 
580 	return result;
581 }
582 
583 static inline kern_return_t
thread_get_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count,thread_set_status_flags_t flags)584 thread_get_state_internal(
585 	thread_t                thread,
586 	int                                             flavor,
587 	thread_state_t                  state,                  /* pointer to OUT array */
588 	mach_msg_type_number_t  *state_count,   /*IN/OUT*/
589 	thread_set_status_flags_t  flags)
590 {
591 	kern_return_t           result = KERN_SUCCESS;
592 	boolean_t               to_user = !!(flags & TSSF_TRANSLATE_TO_USER);
593 
594 	if (thread == THREAD_NULL) {
595 		return KERN_INVALID_ARGUMENT;
596 	}
597 
598 	thread_mtx_lock(thread);
599 
600 	if (thread->active) {
601 		if (thread != current_thread()) {
602 			thread_hold(thread);
603 
604 			thread_mtx_unlock(thread);
605 
606 			if (thread_stop(thread, FALSE)) {
607 				thread_mtx_lock(thread);
608 				result = machine_thread_get_state(
609 					thread, flavor, state, state_count);
610 				thread_unstop(thread);
611 			} else {
612 				thread_mtx_lock(thread);
613 				result = KERN_ABORTED;
614 			}
615 
616 			thread_release(thread);
617 		} else {
618 			result = machine_thread_get_state(
619 				thread, flavor, state, state_count);
620 		}
621 	} else if (thread->inspection) {
622 		result = machine_thread_get_state(
623 			thread, flavor, state, state_count);
624 	} else {
625 		result = KERN_TERMINATED;
626 	}
627 
628 	if (to_user && result == KERN_SUCCESS) {
629 		result = machine_thread_state_convert_to_user(thread, flavor, state,
630 		    state_count, flags);
631 	}
632 
633 	thread_mtx_unlock(thread);
634 
635 	return result;
636 }
637 
638 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
639 
640 kern_return_t
641 thread_get_state(
642 	thread_t                thread,
643 	int                                             flavor,
644 	thread_state_t                  state,
645 	mach_msg_type_number_t  *state_count);
646 
647 kern_return_t
thread_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)648 thread_get_state(
649 	thread_t                thread,
650 	int                                             flavor,
651 	thread_state_t                  state,                  /* pointer to OUT array */
652 	mach_msg_type_number_t  *state_count)   /*IN/OUT*/
653 {
654 	return thread_get_state_internal(thread, flavor, state, state_count, TSSF_FLAGS_NONE);
655 }
656 
657 kern_return_t
thread_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)658 thread_get_state_to_user(
659 	thread_t                thread,
660 	int                                             flavor,
661 	thread_state_t                  state,                  /* pointer to OUT array */
662 	mach_msg_type_number_t  *state_count)   /*IN/OUT*/
663 {
664 	return thread_get_state_internal(thread, flavor, state, state_count, TSSF_TRANSLATE_TO_USER);
665 }
666 
667 /*
668  *	Change thread's machine-dependent state.  Called with nothing
669  *	locked.  Returns same way.
670  */
671 static inline kern_return_t
thread_set_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count,thread_state_t old_state,mach_msg_type_number_t old_state_count,thread_set_status_flags_t flags)672 thread_set_state_internal(
673 	thread_t                        thread,
674 	int                             flavor,
675 	thread_state_t                  state,
676 	mach_msg_type_number_t          state_count,
677 	thread_state_t                  old_state,
678 	mach_msg_type_number_t          old_state_count,
679 	thread_set_status_flags_t       flags)
680 {
681 	kern_return_t           result = KERN_SUCCESS;
682 	boolean_t               from_user = !!(flags & TSSF_TRANSLATE_TO_USER);
683 
684 	if (thread == THREAD_NULL) {
685 		return KERN_INVALID_ARGUMENT;
686 	}
687 
688 	if ((flags & TSSF_CHECK_ENTITLEMENT) &&
689 	    !thread_set_state_allowed(thread, flavor)) {
690 		return KERN_NO_ACCESS;
691 	}
692 
693 	thread_mtx_lock(thread);
694 
695 	if (thread->active) {
696 		if (from_user) {
697 			result = machine_thread_state_convert_from_user(thread, flavor,
698 			    state, state_count, old_state, old_state_count, flags);
699 			if (result != KERN_SUCCESS) {
700 				goto out;
701 			}
702 		}
703 		if (thread != current_thread()) {
704 			thread_hold(thread);
705 
706 			thread_mtx_unlock(thread);
707 
708 			if (thread_stop(thread, TRUE)) {
709 				thread_mtx_lock(thread);
710 				result = machine_thread_set_state(
711 					thread, flavor, state, state_count);
712 				thread_unstop(thread);
713 			} else {
714 				thread_mtx_lock(thread);
715 				result = KERN_ABORTED;
716 			}
717 
718 			thread_release(thread);
719 		} else {
720 			result = machine_thread_set_state(
721 				thread, flavor, state, state_count);
722 		}
723 	} else {
724 		result = KERN_TERMINATED;
725 	}
726 
727 	if ((result == KERN_SUCCESS) && from_user) {
728 		extmod_statistics_incr_thread_set_state(thread);
729 	}
730 
731 out:
732 	thread_mtx_unlock(thread);
733 
734 	return result;
735 }
736 
737 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
738 kern_return_t
739 thread_set_state(
740 	thread_t                thread,
741 	int                                             flavor,
742 	thread_state_t                  state,
743 	mach_msg_type_number_t  state_count);
744 
745 kern_return_t
thread_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)746 thread_set_state(
747 	thread_t                thread,
748 	int                                             flavor,
749 	thread_state_t                  state,
750 	mach_msg_type_number_t  state_count)
751 {
752 	return thread_set_state_internal(thread, flavor, state, state_count, NULL, 0, TSSF_FLAGS_NONE);
753 }
754 
755 kern_return_t
thread_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)756 thread_set_state_from_user(
757 	thread_t                thread,
758 	int                                             flavor,
759 	thread_state_t                  state,
760 	mach_msg_type_number_t  state_count)
761 {
762 	return thread_set_state_internal(thread, flavor, state, state_count, NULL,
763 	           0, TSSF_TRANSLATE_TO_USER | TSSF_CHECK_ENTITLEMENT);
764 }
765 
766 kern_return_t
thread_convert_thread_state(thread_t thread,int direction,thread_state_flavor_t flavor,thread_state_t in_state,mach_msg_type_number_t in_state_count,thread_state_t out_state,mach_msg_type_number_t * out_state_count)767 thread_convert_thread_state(
768 	thread_t                thread,
769 	int                     direction,
770 	thread_state_flavor_t   flavor,
771 	thread_state_t          in_state,          /* pointer to IN array */
772 	mach_msg_type_number_t  in_state_count,
773 	thread_state_t          out_state,         /* pointer to OUT array */
774 	mach_msg_type_number_t  *out_state_count)   /*IN/OUT*/
775 {
776 	kern_return_t kr;
777 	thread_t to_thread = THREAD_NULL;
778 	thread_t from_thread = THREAD_NULL;
779 	mach_msg_type_number_t state_count = in_state_count;
780 
781 	if (direction != THREAD_CONVERT_THREAD_STATE_TO_SELF &&
782 	    direction != THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
783 		return KERN_INVALID_ARGUMENT;
784 	}
785 
786 	if (thread == THREAD_NULL) {
787 		return KERN_INVALID_ARGUMENT;
788 	}
789 
790 	if (state_count > *out_state_count) {
791 		return KERN_INSUFFICIENT_BUFFER_SIZE;
792 	}
793 
794 	if (direction == THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
795 		to_thread = thread;
796 		from_thread = current_thread();
797 	} else {
798 		to_thread = current_thread();
799 		from_thread = thread;
800 	}
801 
802 	/* Authenticate and convert thread state to kernel representation */
803 	kr = machine_thread_state_convert_from_user(from_thread, flavor,
804 	    in_state, state_count, NULL, 0, TSSF_FLAGS_NONE);
805 
806 	/* Return early if one of the thread was jop disabled while other wasn't */
807 	if (kr != KERN_SUCCESS) {
808 		return kr;
809 	}
810 
811 	/* Convert thread state to target thread user representation */
812 	kr = machine_thread_state_convert_to_user(to_thread, flavor,
813 	    in_state, &state_count, TSSF_PRESERVE_FLAGS);
814 
815 	if (kr == KERN_SUCCESS) {
816 		if (state_count <= *out_state_count) {
817 			memcpy(out_state, in_state, state_count * sizeof(uint32_t));
818 			*out_state_count = state_count;
819 		} else {
820 			kr = KERN_INSUFFICIENT_BUFFER_SIZE;
821 		}
822 	}
823 
824 	return kr;
825 }
826 
827 /*
828  * Kernel-internal "thread" interfaces used outside this file:
829  */
830 
831 /* Initialize (or re-initialize) a thread state.  Called from execve
832  * with nothing locked, returns same way.
833  */
834 kern_return_t
thread_state_initialize(thread_t thread)835 thread_state_initialize(
836 	thread_t                thread)
837 {
838 	kern_return_t           result = KERN_SUCCESS;
839 
840 	if (thread == THREAD_NULL) {
841 		return KERN_INVALID_ARGUMENT;
842 	}
843 
844 	thread_mtx_lock(thread);
845 
846 	if (thread->active) {
847 		if (thread != current_thread()) {
848 			/* Thread created in exec should be blocked in UNINT wait */
849 			assert(!(thread->state & TH_RUN));
850 		}
851 		machine_thread_state_initialize( thread );
852 	} else {
853 		result = KERN_TERMINATED;
854 	}
855 
856 	thread_mtx_unlock(thread);
857 
858 	return result;
859 }
860 
861 kern_return_t
thread_dup(thread_t target)862 thread_dup(
863 	thread_t        target)
864 {
865 	thread_t                        self = current_thread();
866 	kern_return_t           result = KERN_SUCCESS;
867 
868 	if (target == THREAD_NULL || target == self) {
869 		return KERN_INVALID_ARGUMENT;
870 	}
871 
872 	thread_mtx_lock(target);
873 
874 	if (target->active) {
875 		thread_hold(target);
876 
877 		thread_mtx_unlock(target);
878 
879 		if (thread_stop(target, TRUE)) {
880 			thread_mtx_lock(target);
881 			result = machine_thread_dup(self, target, FALSE);
882 
883 			if (self->affinity_set != AFFINITY_SET_NULL) {
884 				thread_affinity_dup(self, target);
885 			}
886 			thread_unstop(target);
887 		} else {
888 			thread_mtx_lock(target);
889 			result = KERN_ABORTED;
890 		}
891 
892 		thread_release(target);
893 	} else {
894 		result = KERN_TERMINATED;
895 	}
896 
897 	thread_mtx_unlock(target);
898 
899 	return result;
900 }
901 
902 
903 kern_return_t
thread_dup2(thread_t source,thread_t target)904 thread_dup2(
905 	thread_t        source,
906 	thread_t        target)
907 {
908 	kern_return_t           result = KERN_SUCCESS;
909 	uint32_t                active = 0;
910 
911 	if (source == THREAD_NULL || target == THREAD_NULL || target == source) {
912 		return KERN_INVALID_ARGUMENT;
913 	}
914 
915 	thread_mtx_lock(source);
916 	active = source->active;
917 	thread_mtx_unlock(source);
918 
919 	if (!active) {
920 		return KERN_TERMINATED;
921 	}
922 
923 	thread_mtx_lock(target);
924 
925 	if (target->active || target->inspection) {
926 		thread_hold(target);
927 
928 		thread_mtx_unlock(target);
929 
930 		if (thread_stop(target, TRUE)) {
931 			thread_mtx_lock(target);
932 			result = machine_thread_dup(source, target, TRUE);
933 			if (source->affinity_set != AFFINITY_SET_NULL) {
934 				thread_affinity_dup(source, target);
935 			}
936 			thread_unstop(target);
937 		} else {
938 			thread_mtx_lock(target);
939 			result = KERN_ABORTED;
940 		}
941 
942 		thread_release(target);
943 	} else {
944 		result = KERN_TERMINATED;
945 	}
946 
947 	thread_mtx_unlock(target);
948 
949 	return result;
950 }
951 
952 /*
953  *	thread_setstatus:
954  *
955  *	Set the status of the specified thread.
956  *	Called with (and returns with) no locks held.
957  */
958 kern_return_t
thread_setstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count)959 thread_setstatus(
960 	thread_t                thread,
961 	int                                             flavor,
962 	thread_state_t                  tstate,
963 	mach_msg_type_number_t  count)
964 {
965 	return thread_set_state(thread, flavor, tstate, count);
966 }
967 
968 kern_return_t
thread_setstatus_from_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count,thread_state_t old_tstate,mach_msg_type_number_t old_count,thread_set_status_flags_t flags)969 thread_setstatus_from_user(
970 	thread_t                thread,
971 	int                                             flavor,
972 	thread_state_t                  tstate,
973 	mach_msg_type_number_t  count,
974 	thread_state_t                  old_tstate,
975 	mach_msg_type_number_t  old_count,
976 	thread_set_status_flags_t flags)
977 {
978 	return thread_set_state_internal(thread, flavor, tstate, count, old_tstate,
979 	           old_count, flags | TSSF_TRANSLATE_TO_USER);
980 }
981 
982 /*
983  *	thread_getstatus:
984  *
985  *	Get the status of the specified thread.
986  */
987 kern_return_t
thread_getstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count)988 thread_getstatus(
989 	thread_t                thread,
990 	int                                             flavor,
991 	thread_state_t                  tstate,
992 	mach_msg_type_number_t  *count)
993 {
994 	return thread_get_state(thread, flavor, tstate, count);
995 }
996 
997 kern_return_t
thread_getstatus_to_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count,thread_set_status_flags_t flags)998 thread_getstatus_to_user(
999 	thread_t                thread,
1000 	int                                             flavor,
1001 	thread_state_t                  tstate,
1002 	mach_msg_type_number_t  *count,
1003 	thread_set_status_flags_t flags)
1004 {
1005 	return thread_get_state_internal(thread, flavor, tstate, count, flags | TSSF_TRANSLATE_TO_USER);
1006 }
1007 
1008 /*
1009  *	Change thread's machine-dependent userspace TSD base.
1010  *  Called with nothing locked.  Returns same way.
1011  */
1012 kern_return_t
thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)1013 thread_set_tsd_base(
1014 	thread_t                        thread,
1015 	mach_vm_offset_t        tsd_base)
1016 {
1017 	kern_return_t           result = KERN_SUCCESS;
1018 
1019 	if (thread == THREAD_NULL) {
1020 		return KERN_INVALID_ARGUMENT;
1021 	}
1022 
1023 	thread_mtx_lock(thread);
1024 
1025 	if (thread->active) {
1026 		if (thread != current_thread()) {
1027 			thread_hold(thread);
1028 
1029 			thread_mtx_unlock(thread);
1030 
1031 			if (thread_stop(thread, TRUE)) {
1032 				thread_mtx_lock(thread);
1033 				result = machine_thread_set_tsd_base(thread, tsd_base);
1034 				thread_unstop(thread);
1035 			} else {
1036 				thread_mtx_lock(thread);
1037 				result = KERN_ABORTED;
1038 			}
1039 
1040 			thread_release(thread);
1041 		} else {
1042 			result = machine_thread_set_tsd_base(thread, tsd_base);
1043 		}
1044 	} else {
1045 		result = KERN_TERMINATED;
1046 	}
1047 
1048 	thread_mtx_unlock(thread);
1049 
1050 	return result;
1051 }
1052 
1053 /*
1054  * thread_set_apc_ast:
1055  *
1056  * Register the AST_APC callback that handles suspension and
1057  * termination, if it hasn't been installed already.
1058  *
1059  * Called with the thread mutex held.
1060  */
1061 static void
thread_set_apc_ast(thread_t thread)1062 thread_set_apc_ast(thread_t thread)
1063 {
1064 	spl_t s = splsched();
1065 
1066 	thread_lock(thread);
1067 	thread_set_apc_ast_locked(thread);
1068 	thread_unlock(thread);
1069 
1070 	splx(s);
1071 }
1072 
1073 /*
1074  * thread_set_apc_ast_locked:
1075  *
1076  * Do the work of registering for the AST_APC callback.
1077  *
1078  * Called with the thread mutex and scheduling lock held.
1079  */
1080 static void
thread_set_apc_ast_locked(thread_t thread)1081 thread_set_apc_ast_locked(thread_t thread)
1082 {
1083 	thread_ast_set(thread, AST_APC);
1084 
1085 	if (thread == current_thread()) {
1086 		ast_propagate(thread);
1087 	} else {
1088 		processor_t processor = thread->last_processor;
1089 
1090 		if (processor != PROCESSOR_NULL &&
1091 		    processor->state == PROCESSOR_RUNNING &&
1092 		    processor->active_thread == thread) {
1093 			cause_ast_check(processor);
1094 		}
1095 	}
1096 }
1097 
1098 /*
1099  * Activation control support routines internal to this file:
1100  *
1101  */
1102 
1103 /*
1104  * thread_suspended
1105  *
1106  * Continuation routine for thread suspension.  It checks
1107  * to see whether there has been any new suspensions.  If so, it
1108  * installs the AST_APC handler again.
1109  */
1110 __attribute__((noreturn))
1111 static void
thread_suspended(__unused void * parameter,wait_result_t result)1112 thread_suspended(__unused void *parameter, wait_result_t result)
1113 {
1114 	thread_t thread = current_thread();
1115 
1116 	thread_mtx_lock(thread);
1117 
1118 	if (result == THREAD_INTERRUPTED) {
1119 		thread->suspend_parked = FALSE;
1120 	} else {
1121 		assert(thread->suspend_parked == FALSE);
1122 	}
1123 
1124 	if (thread->suspend_count > 0) {
1125 		thread_set_apc_ast(thread);
1126 	}
1127 
1128 	thread_mtx_unlock(thread);
1129 
1130 	thread_exception_return();
1131 	/*NOTREACHED*/
1132 }
1133 
1134 /*
1135  * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
1136  * Called with nothing locked.  Returns (if it returns) the same way.
1137  */
1138 void
thread_apc_ast(thread_t thread)1139 thread_apc_ast(thread_t thread)
1140 {
1141 	thread_mtx_lock(thread);
1142 
1143 	assert(thread->suspend_parked == FALSE);
1144 
1145 	spl_t s = splsched();
1146 	thread_lock(thread);
1147 
1148 	/* TH_SFLAG_POLLDEPRESS is OK to have here */
1149 	assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
1150 
1151 	thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1152 	thread_unlock(thread);
1153 	splx(s);
1154 
1155 	if (!thread->active) {
1156 		/* Thread is ready to terminate, time to tear it down */
1157 		thread_mtx_unlock(thread);
1158 
1159 		thread_terminate_self();
1160 		/*NOTREACHED*/
1161 	}
1162 
1163 	/* If we're suspended, go to sleep and wait for someone to wake us up. */
1164 	if (thread->suspend_count > 0) {
1165 		thread->suspend_parked = TRUE;
1166 		assert_wait(&thread->suspend_count,
1167 		    THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
1168 		thread_mtx_unlock(thread);
1169 
1170 		thread_block(thread_suspended);
1171 		/*NOTREACHED*/
1172 	}
1173 
1174 	thread_mtx_unlock(thread);
1175 }
1176 
1177 #if CONFIG_ROSETTA
1178 extern kern_return_t
1179 exception_deliver(
1180 	thread_t                thread,
1181 	exception_type_t        exception,
1182 	mach_exception_data_t   code,
1183 	mach_msg_type_number_t  codeCnt,
1184 	struct exception_action *excp,
1185 	lck_mtx_t               *mutex);
1186 
1187 kern_return_t
thread_raise_exception(thread_t thread,exception_type_t exception,natural_t code_count,int64_t code,int64_t sub_code)1188 thread_raise_exception(
1189 	thread_t thread,
1190 	exception_type_t exception,
1191 	natural_t code_count,
1192 	int64_t code,
1193 	int64_t sub_code)
1194 {
1195 	task_t task;
1196 
1197 	if (thread == THREAD_NULL) {
1198 		return KERN_INVALID_ARGUMENT;
1199 	}
1200 
1201 	task = get_threadtask(thread);
1202 
1203 	if (task != current_task()) {
1204 		return KERN_FAILURE;
1205 	}
1206 
1207 	if (!task_is_translated(task)) {
1208 		return KERN_FAILURE;
1209 	}
1210 
1211 	if (exception == EXC_CRASH) {
1212 		return KERN_INVALID_ARGUMENT;
1213 	}
1214 
1215 	int64_t codes[] = { code, sub_code };
1216 	host_priv_t host_priv = host_priv_self();
1217 	kern_return_t kr = exception_deliver(thread, exception, codes, code_count, host_priv->exc_actions, &host_priv->lock);
1218 	if (kr != KERN_SUCCESS) {
1219 		return kr;
1220 	}
1221 
1222 	return thread_resume(thread);
1223 }
1224 #endif
1225 
1226 void
thread_debug_return_to_user_ast(thread_t thread)1227 thread_debug_return_to_user_ast(
1228 	thread_t thread)
1229 {
1230 #pragma unused(thread)
1231 #if MACH_ASSERT
1232 	if ((thread->sched_flags & TH_SFLAG_RW_PROMOTED) ||
1233 	    thread->rwlock_count > 0) {
1234 		panic("Returning to userspace with rw lock held, thread %p sched_flag %u rwlock_count %d", thread, thread->sched_flags, thread->rwlock_count);
1235 	}
1236 
1237 	if ((thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) ||
1238 	    thread->priority_floor_count > 0) {
1239 		panic("Returning to userspace with floor boost set, thread %p sched_flag %u priority_floor_count %d", thread, thread->sched_flags, thread->priority_floor_count);
1240 	}
1241 #endif /* MACH_ASSERT */
1242 }
1243 
1244 
1245 /* Prototype, see justification above */
1246 kern_return_t
1247 act_set_state(
1248 	thread_t                                thread,
1249 	int                                             flavor,
1250 	thread_state_t                  state,
1251 	mach_msg_type_number_t  count);
1252 
1253 kern_return_t
act_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1254 act_set_state(
1255 	thread_t                                thread,
1256 	int                                             flavor,
1257 	thread_state_t                  state,
1258 	mach_msg_type_number_t  count)
1259 {
1260 	if (thread == current_thread()) {
1261 		return KERN_INVALID_ARGUMENT;
1262 	}
1263 
1264 	return thread_set_state(thread, flavor, state, count);
1265 }
1266 
1267 kern_return_t
act_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1268 act_set_state_from_user(
1269 	thread_t                                thread,
1270 	int                                             flavor,
1271 	thread_state_t                  state,
1272 	mach_msg_type_number_t  count)
1273 {
1274 	if (thread == current_thread()) {
1275 		return KERN_INVALID_ARGUMENT;
1276 	}
1277 
1278 	return thread_set_state_from_user(thread, flavor, state, count);
1279 }
1280 
1281 /* Prototype, see justification above */
1282 kern_return_t
1283 act_get_state(
1284 	thread_t                                thread,
1285 	int                                             flavor,
1286 	thread_state_t                  state,
1287 	mach_msg_type_number_t  *count);
1288 
1289 kern_return_t
act_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1290 act_get_state(
1291 	thread_t                                thread,
1292 	int                                             flavor,
1293 	thread_state_t                  state,
1294 	mach_msg_type_number_t  *count)
1295 {
1296 	if (thread == current_thread()) {
1297 		return KERN_INVALID_ARGUMENT;
1298 	}
1299 
1300 	return thread_get_state(thread, flavor, state, count);
1301 }
1302 
1303 kern_return_t
act_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1304 act_get_state_to_user(
1305 	thread_t                                thread,
1306 	int                                             flavor,
1307 	thread_state_t                  state,
1308 	mach_msg_type_number_t  *count)
1309 {
1310 	if (thread == current_thread()) {
1311 		return KERN_INVALID_ARGUMENT;
1312 	}
1313 
1314 	return thread_get_state_to_user(thread, flavor, state, count);
1315 }
1316 
1317 static void
act_set_ast(thread_t thread,ast_t ast)1318 act_set_ast(
1319 	thread_t   thread,
1320 	ast_t      ast)
1321 {
1322 	spl_t s = splsched();
1323 
1324 	if (thread == current_thread()) {
1325 		thread_ast_set(thread, ast);
1326 		ast_propagate(thread);
1327 	} else {
1328 		processor_t processor;
1329 
1330 		thread_lock(thread);
1331 		thread_ast_set(thread, ast);
1332 		processor = thread->last_processor;
1333 		if (processor != PROCESSOR_NULL &&
1334 		    processor->state == PROCESSOR_RUNNING &&
1335 		    processor->active_thread == thread) {
1336 			cause_ast_check(processor);
1337 		}
1338 		thread_unlock(thread);
1339 	}
1340 
1341 	splx(s);
1342 }
1343 
1344 /*
1345  * set AST on thread without causing an AST check
1346  * and without taking the thread lock
1347  *
1348  * If thread is not the current thread, then it may take
1349  * up until the next context switch or quantum expiration
1350  * on that thread for it to notice the AST.
1351  */
1352 static void
act_set_ast_async(thread_t thread,ast_t ast)1353 act_set_ast_async(thread_t  thread,
1354     ast_t     ast)
1355 {
1356 	thread_ast_set(thread, ast);
1357 
1358 	if (thread == current_thread()) {
1359 		spl_t s = splsched();
1360 		ast_propagate(thread);
1361 		splx(s);
1362 	}
1363 }
1364 
1365 void
act_set_debug_assert(void)1366 act_set_debug_assert(void)
1367 {
1368 	thread_t thread = current_thread();
1369 	if (thread_ast_peek(thread, AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1370 		thread_ast_set(thread, AST_DEBUG_ASSERT);
1371 	}
1372 	if (ast_peek(AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1373 		spl_t s = splsched();
1374 		ast_propagate(thread);
1375 		splx(s);
1376 	}
1377 }
1378 
1379 void
act_set_astbsd(thread_t thread)1380 act_set_astbsd(thread_t thread)
1381 {
1382 	act_set_ast(thread, AST_BSD);
1383 }
1384 
1385 void
act_set_astkevent(thread_t thread,uint16_t bits)1386 act_set_astkevent(thread_t thread, uint16_t bits)
1387 {
1388 	os_atomic_or(&thread->kevent_ast_bits, bits, relaxed);
1389 
1390 	/* kevent AST shouldn't send immediate IPIs */
1391 	act_set_ast_async(thread, AST_KEVENT);
1392 }
1393 
1394 uint16_t
act_clear_astkevent(thread_t thread,uint16_t bits)1395 act_clear_astkevent(thread_t thread, uint16_t bits)
1396 {
1397 	/*
1398 	 * avoid the atomic operation if none of the bits is set,
1399 	 * which will be the common case.
1400 	 */
1401 	uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed);
1402 	if (cur & bits) {
1403 		cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed);
1404 	}
1405 	return cur & bits;
1406 }
1407 
1408 bool
act_set_ast_reset_pcs(task_t task,thread_t thread)1409 act_set_ast_reset_pcs(task_t task, thread_t thread)
1410 {
1411 	processor_t processor;
1412 	bool needs_wait = false;
1413 	spl_t s;
1414 
1415 	s = splsched();
1416 
1417 	if (thread == current_thread()) {
1418 		/*
1419 		 * this is called from the signal code,
1420 		 * just set the AST and move on
1421 		 */
1422 		thread_ast_set(thread, AST_RESET_PCS);
1423 		ast_propagate(thread);
1424 	} else {
1425 		thread_lock(thread);
1426 
1427 		assert(thread->t_rr_state.trr_ipi_ack_pending == 0);
1428 		assert(thread->t_rr_state.trr_sync_waiting == 0);
1429 
1430 		processor = thread->last_processor;
1431 		if (!thread->active) {
1432 			/*
1433 			 * ->active is being set before the thread is added
1434 			 * to the thread list (under the task lock which
1435 			 * the caller holds), and is reset before the thread
1436 			 * lock is being taken by thread_terminate_self().
1437 			 *
1438 			 * The result is that this will never fail to
1439 			 * set the AST on an thread that is active,
1440 			 * but will not set it past thread_terminate_self().
1441 			 */
1442 		} else if (processor != PROCESSOR_NULL &&
1443 		    processor->state == PROCESSOR_RUNNING &&
1444 		    processor->active_thread == thread) {
1445 			thread->t_rr_state.trr_ipi_ack_pending = true;
1446 			needs_wait = true;
1447 			thread_ast_set(thread, AST_RESET_PCS);
1448 			cause_ast_check(processor);
1449 		} else if (thread_reset_pcs_in_range(task, thread)) {
1450 			if (thread->t_rr_state.trr_fault_state) {
1451 				thread->t_rr_state.trr_fault_state =
1452 				    TRR_FAULT_OBSERVED;
1453 				needs_wait = true;
1454 			}
1455 			thread_ast_set(thread, AST_RESET_PCS);
1456 		}
1457 		thread_unlock(thread);
1458 	}
1459 
1460 	splx(s);
1461 
1462 	return needs_wait;
1463 }
1464 
1465 void
act_set_kperf(thread_t thread)1466 act_set_kperf(thread_t thread)
1467 {
1468 	/* safety check */
1469 	if (thread != current_thread()) {
1470 		if (!ml_get_interrupts_enabled()) {
1471 			panic("unsafe act_set_kperf operation");
1472 		}
1473 	}
1474 
1475 	act_set_ast(thread, AST_KPERF);
1476 }
1477 
1478 #if CONFIG_MACF
1479 void
act_set_astmacf(thread_t thread)1480 act_set_astmacf(
1481 	thread_t        thread)
1482 {
1483 	act_set_ast( thread, AST_MACF);
1484 }
1485 #endif
1486 
1487 void
act_set_astledger(thread_t thread)1488 act_set_astledger(thread_t thread)
1489 {
1490 	act_set_ast(thread, AST_LEDGER);
1491 }
1492 
1493 /*
1494  * The ledger AST may need to be set while already holding
1495  * the thread lock.  This routine skips sending the IPI,
1496  * allowing us to avoid the lock hold.
1497  *
1498  * However, it means the targeted thread must context switch
1499  * to recognize the ledger AST.
1500  */
1501 void
act_set_astledger_async(thread_t thread)1502 act_set_astledger_async(thread_t thread)
1503 {
1504 	act_set_ast_async(thread, AST_LEDGER);
1505 }
1506 
1507 void
act_set_io_telemetry_ast(thread_t thread)1508 act_set_io_telemetry_ast(thread_t thread)
1509 {
1510 	act_set_ast(thread, AST_TELEMETRY_IO);
1511 }
1512 
1513 void
act_set_macf_telemetry_ast(thread_t thread)1514 act_set_macf_telemetry_ast(thread_t thread)
1515 {
1516 	act_set_ast(thread, AST_TELEMETRY_MACF);
1517 }
1518 
1519 void
act_set_astproc_resource(thread_t thread)1520 act_set_astproc_resource(thread_t thread)
1521 {
1522 	act_set_ast(thread, AST_PROC_RESOURCE);
1523 }
1524