xref: /xnu-11215.41.3/osfmk/kern/thread_act.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Copyright (c) 1993 The University of Utah and
33  * the Center for Software Science (CSS).  All rights reserved.
34  *
35  * Permission to use, copy, modify and distribute this software and its
36  * documentation is hereby granted, provided that both the copyright
37  * notice and this permission notice appear in all copies of the
38  * software, derivative works or modified versions, and any portions
39  * thereof, and that both notices appear in supporting documentation.
40  *
41  * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44  *
45  * CSS requests users of this software to return to [email protected] any
46  * improvements that they make and grant CSS redistribution rights.
47  *
48  *	Author:	Bryan Ford, University of Utah CSS
49  *
50  *	Thread management routines
51  */
52 
53 #include <sys/kdebug.h>
54 #include <mach/mach_types.h>
55 #include <mach/kern_return.h>
56 #include <mach/thread_act_server.h>
57 #include <mach/thread_act.h>
58 
59 #include <kern/kern_types.h>
60 #include <kern/ast.h>
61 #include <kern/mach_param.h>
62 #include <kern/zalloc.h>
63 #include <kern/extmod_statistics.h>
64 #include <kern/thread.h>
65 #include <kern/task.h>
66 #include <kern/sched_prim.h>
67 #include <kern/misc_protos.h>
68 #include <kern/assert.h>
69 #include <kern/exception.h>
70 #include <kern/ipc_mig.h>
71 #include <kern/ipc_tt.h>
72 #include <kern/machine.h>
73 #include <kern/spl.h>
74 #include <kern/syscall_subr.h>
75 #include <kern/processor.h>
76 #include <kern/restartable.h>
77 #include <kern/timer.h>
78 #include <kern/affinity.h>
79 #include <kern/host.h>
80 #include <kern/exc_guard.h>
81 #include <ipc/port.h>
82 #include <mach/arm/thread_status.h>
83 
84 
85 #include <stdatomic.h>
86 
87 #include <security/mac_mach_internal.h>
88 #include <libkern/coreanalytics/coreanalytics.h>
89 
90 static void act_abort(thread_t thread);
91 
92 static void thread_suspended(void *arg, wait_result_t result);
93 static void thread_set_apc_ast(thread_t thread);
94 static void thread_set_apc_ast_locked(thread_t thread);
95 
96 extern void proc_name(int pid, char * buf, int size);
97 extern boolean_t IOCurrentTaskHasEntitlement(const char *);
98 
99 CA_EVENT(thread_set_state,
100     CA_STATIC_STRING(CA_PROCNAME_LEN), current_proc);
101 
102 static void
send_thread_set_state_telemetry(void)103 send_thread_set_state_telemetry(void)
104 {
105 	ca_event_t ca_event = CA_EVENT_ALLOCATE(thread_set_state);
106 	CA_EVENT_TYPE(thread_set_state) * event = ca_event->data;
107 
108 	proc_name(task_pid(current_task()), (char *) &event->current_proc, CA_PROCNAME_LEN);
109 
110 	CA_EVENT_SEND(ca_event);
111 }
112 
113 /* bootarg to create lightweight corpse for thread set state lockdown */
114 TUNABLE(bool, tss_should_crash, "tss_should_crash", true);
115 
116 static inline boolean_t
thread_set_state_allowed(thread_t thread,int flavor)117 thread_set_state_allowed(thread_t thread, int flavor)
118 {
119 	task_t target_task = get_threadtask(thread);
120 
121 #if DEVELOPMENT || DEBUG
122 	/* disable the feature if the boot-arg is disabled. */
123 	if (!tss_should_crash) {
124 		return TRUE;
125 	}
126 #endif /* DEVELOPMENT || DEBUG */
127 
128 	/* hardened binaries must have entitlement - all others ok */
129 	if (task_is_hardened_binary(target_task)
130 	    && !(thread->options & TH_IN_MACH_EXCEPTION)            /* Allowed for now - rdar://103085786 */
131 	    && FLAVOR_MODIFIES_CORE_CPU_REGISTERS(flavor) /* only care about locking down PC/LR */
132 #if XNU_TARGET_OS_OSX
133 	    && !task_opted_out_mach_hardening(target_task)
134 #endif /* XNU_TARGET_OS_OSX */
135 #if CONFIG_ROSETTA
136 	    && !task_is_translated(target_task)  /* Ignore translated tasks */
137 #endif /* CONFIG_ROSETTA */
138 	    && !IOCurrentTaskHasEntitlement("com.apple.private.thread-set-state")
139 	    ) {
140 		/* fatal crash */
141 		mach_port_guard_exception(MACH_PORT_NULL, 0, 0, kGUARD_EXC_THREAD_SET_STATE);
142 		send_thread_set_state_telemetry();
143 		return FALSE;
144 	}
145 
146 #if __has_feature(ptrauth_calls)
147 	/* Do not allow Fatal PAC exception binaries to set Debug state */
148 	if (task_is_pac_exception_fatal(target_task)
149 	    && machine_thread_state_is_debug_flavor(flavor)
150 #if XNU_TARGET_OS_OSX
151 	    && !task_opted_out_mach_hardening(target_task)
152 #endif /* XNU_TARGET_OS_OSX */
153 #if CONFIG_ROSETTA
154 	    && !task_is_translated(target_task)      /* Ignore translated tasks */
155 #endif /* CONFIG_ROSETTA */
156 	    && !IOCurrentTaskHasEntitlement("com.apple.private.thread-set-state")
157 	    ) {
158 		/* fatal crash */
159 		mach_port_guard_exception(MACH_PORT_NULL, 0, 0, kGUARD_EXC_THREAD_SET_STATE);
160 		send_thread_set_state_telemetry();
161 		return FALSE;
162 	}
163 #endif /* __has_feature(ptrauth_calls) */
164 
165 	return TRUE;
166 }
167 
168 /*
169  * Internal routine to mark a thread as started.
170  * Always called with the thread mutex locked.
171  */
172 void
thread_start(thread_t thread)173 thread_start(
174 	thread_t                        thread)
175 {
176 	clear_wait(thread, THREAD_AWAKENED);
177 	thread->started = TRUE;
178 }
179 
180 /*
181  * Internal routine to mark a thread as waiting
182  * right after it has been created.  The caller
183  * is responsible to call wakeup()/thread_wakeup()
184  * or thread_terminate() to get it going.
185  *
186  * Always called with the thread mutex locked.
187  *
188  * Task and task_threads mutexes also held
189  * (so nobody can set the thread running before
190  * this point)
191  *
192  * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
193  * to allow termination from this point forward.
194  */
195 void
thread_start_in_assert_wait(thread_t thread,struct waitq * waitq,event64_t event,wait_interrupt_t interruptible)196 thread_start_in_assert_wait(
197 	thread_t            thread,
198 	struct waitq       *waitq,
199 	event64_t           event,
200 	wait_interrupt_t    interruptible)
201 {
202 	wait_result_t wait_result;
203 	spl_t spl;
204 
205 	spl = splsched();
206 	waitq_lock(waitq);
207 
208 	/* clear out startup condition (safe because thread not started yet) */
209 	thread_lock(thread);
210 	assert(!thread->started);
211 	assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
212 	thread->state &= ~(TH_WAIT | TH_UNINT);
213 	thread_unlock(thread);
214 
215 	/* assert wait interruptibly forever */
216 	wait_result = waitq_assert_wait64_locked(waitq, event,
217 	    interruptible,
218 	    TIMEOUT_URGENCY_SYS_NORMAL,
219 	    TIMEOUT_WAIT_FOREVER,
220 	    TIMEOUT_NO_LEEWAY,
221 	    thread);
222 	assert(wait_result == THREAD_WAITING);
223 
224 	/* mark thread started while we still hold the waitq lock */
225 	thread_lock(thread);
226 	thread->started = TRUE;
227 	thread_unlock(thread);
228 
229 	waitq_unlock(waitq);
230 	splx(spl);
231 }
232 
233 /*
234  * Internal routine to terminate a thread.
235  * Sometimes called with task already locked.
236  *
237  * If thread is on core, cause AST check immediately;
238  * Otherwise, let the thread continue running in kernel
239  * until it hits AST.
240  */
241 kern_return_t
thread_terminate_internal(thread_t thread)242 thread_terminate_internal(
243 	thread_t                        thread)
244 {
245 	kern_return_t           result = KERN_SUCCESS;
246 
247 	thread_mtx_lock(thread);
248 
249 	if (thread->active) {
250 		thread->active = FALSE;
251 
252 		act_abort(thread);
253 
254 		if (thread->started) {
255 			clear_wait(thread, THREAD_INTERRUPTED);
256 		} else {
257 			thread_start(thread);
258 		}
259 	} else {
260 		result = KERN_TERMINATED;
261 	}
262 
263 	if (thread->affinity_set != NULL) {
264 		thread_affinity_terminate(thread);
265 	}
266 
267 	/* unconditionally unpin the thread in internal termination */
268 	ipc_thread_port_unpin(get_thread_ro(thread)->tro_self_port);
269 
270 	thread_mtx_unlock(thread);
271 
272 	if (thread != current_thread() && result == KERN_SUCCESS) {
273 		thread_wait(thread, FALSE);
274 	}
275 
276 	return result;
277 }
278 
279 kern_return_t
thread_terminate(thread_t thread)280 thread_terminate(
281 	thread_t                thread)
282 {
283 	task_t task;
284 
285 	if (thread == THREAD_NULL) {
286 		return KERN_INVALID_ARGUMENT;
287 	}
288 
289 	if (thread->state & TH_IDLE) {
290 		panic("idle thread calling thread_terminate!");
291 	}
292 
293 	task = get_threadtask(thread);
294 
295 	/* Kernel threads can't be terminated without their own cooperation */
296 	if (task == kernel_task && thread != current_thread()) {
297 		return KERN_FAILURE;
298 	}
299 
300 	kern_return_t result = thread_terminate_internal(thread);
301 
302 	/*
303 	 * If a kernel thread is terminating itself, force handle the APC_AST here.
304 	 * Kernel threads don't pass through the return-to-user AST checking code,
305 	 * but all threads must finish their own termination in thread_apc_ast.
306 	 */
307 	if (task == kernel_task) {
308 		assert(thread->active == FALSE);
309 		thread_ast_clear(thread, AST_APC);
310 		thread_apc_ast(thread);
311 
312 		panic("thread_terminate");
313 		/* NOTREACHED */
314 	}
315 
316 	return result;
317 }
318 
319 /*
320  * [MIG Call] Terminate a thread.
321  *
322  * Cannot be used on threads managed by pthread.
323  */
324 kern_return_t
thread_terminate_from_user(thread_t thread)325 thread_terminate_from_user(
326 	thread_t                thread)
327 {
328 	if (thread == THREAD_NULL) {
329 		return KERN_INVALID_ARGUMENT;
330 	}
331 
332 	if (thread_get_tag(thread) & THREAD_TAG_PTHREAD) {
333 		return KERN_DENIED;
334 	}
335 
336 	return thread_terminate(thread);
337 }
338 
339 /*
340  * Terminate a thread with pinned control port.
341  *
342  * Can only be used on threads managed by pthread. Exported in pthread_kern.
343  */
344 kern_return_t
thread_terminate_pinned(thread_t thread)345 thread_terminate_pinned(
346 	thread_t                thread)
347 {
348 	task_t task;
349 
350 	if (thread == THREAD_NULL) {
351 		return KERN_INVALID_ARGUMENT;
352 	}
353 
354 	task = get_threadtask(thread);
355 
356 
357 	assert(task != kernel_task);
358 	assert(thread_get_tag(thread) & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD));
359 
360 	thread_mtx_lock(thread);
361 	if (task_is_pinned(task) && thread->active) {
362 		assert(get_thread_ro(thread)->tro_self_port->ip_pinned == 1);
363 	}
364 	thread_mtx_unlock(thread);
365 
366 	kern_return_t result = thread_terminate_internal(thread);
367 	return result;
368 }
369 
370 /*
371  * Suspend execution of the specified thread.
372  * This is a recursive-style suspension of the thread, a count of
373  * suspends is maintained.
374  *
375  * Called with thread mutex held.
376  */
377 void
thread_hold(thread_t thread)378 thread_hold(thread_t thread)
379 {
380 	if (thread->suspend_count++ == 0) {
381 		thread_set_apc_ast(thread);
382 		assert(thread->suspend_parked == FALSE);
383 	}
384 }
385 
386 /*
387  * Decrement internal suspension count, setting thread
388  * runnable when count falls to zero.
389  *
390  * Because the wait is abortsafe, we can't be guaranteed that the thread
391  * is currently actually waiting even if suspend_parked is set.
392  *
393  * Called with thread mutex held.
394  */
395 void
thread_release(thread_t thread)396 thread_release(thread_t thread)
397 {
398 	assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
399 
400 	/* fail-safe on non-assert builds */
401 	if (thread->suspend_count == 0) {
402 		return;
403 	}
404 
405 	if (--thread->suspend_count == 0) {
406 		if (!thread->started) {
407 			thread_start(thread);
408 		} else if (thread->suspend_parked) {
409 			thread->suspend_parked = FALSE;
410 			thread_wakeup_thread(&thread->suspend_count, thread);
411 		}
412 	}
413 }
414 
415 kern_return_t
thread_suspend(thread_t thread)416 thread_suspend(thread_t thread)
417 {
418 	kern_return_t result = KERN_SUCCESS;
419 	int32_t thread_user_stop_count;
420 
421 	if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
422 		return KERN_INVALID_ARGUMENT;
423 	}
424 
425 	thread_mtx_lock(thread);
426 
427 	if (thread->active) {
428 		if (thread->user_stop_count++ == 0) {
429 			thread_hold(thread);
430 		}
431 	} else {
432 		result = KERN_TERMINATED;
433 	}
434 	thread_user_stop_count = thread->user_stop_count;
435 
436 	thread_mtx_unlock(thread);
437 
438 	if (result == KERN_SUCCESS) {
439 		KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_SUSPEND) | DBG_FUNC_NONE,
440 		    thread->thread_id, thread_user_stop_count);
441 	}
442 
443 	if (thread != current_thread() && result == KERN_SUCCESS) {
444 		thread_wait(thread, FALSE);
445 	}
446 
447 	return result;
448 }
449 
450 kern_return_t
thread_resume(thread_t thread)451 thread_resume(thread_t thread)
452 {
453 	kern_return_t result = KERN_SUCCESS;
454 	int32_t thread_user_stop_count;
455 
456 	if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
457 		return KERN_INVALID_ARGUMENT;
458 	}
459 
460 	thread_mtx_lock(thread);
461 
462 	if (thread->active) {
463 		if (thread->user_stop_count > 0) {
464 			if (--thread->user_stop_count == 0) {
465 				thread_release(thread);
466 			}
467 		} else {
468 			result = KERN_FAILURE;
469 		}
470 	} else {
471 		result = KERN_TERMINATED;
472 	}
473 	thread_user_stop_count = thread->user_stop_count;
474 
475 	thread_mtx_unlock(thread);
476 
477 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_IPC, MACH_THREAD_RESUME) | DBG_FUNC_NONE,
478 	    thread->thread_id, thread_user_stop_count, result);
479 
480 	return result;
481 }
482 
483 /*
484  *	thread_depress_abort_from_user:
485  *
486  *	Prematurely abort priority depression if there is one.
487  */
488 kern_return_t
thread_depress_abort_from_user(thread_t thread)489 thread_depress_abort_from_user(thread_t thread)
490 {
491 	kern_return_t result;
492 
493 	if (thread == THREAD_NULL) {
494 		return KERN_INVALID_ARGUMENT;
495 	}
496 
497 	thread_mtx_lock(thread);
498 
499 	if (thread->active) {
500 		result = thread_depress_abort(thread);
501 	} else {
502 		result = KERN_TERMINATED;
503 	}
504 
505 	thread_mtx_unlock(thread);
506 
507 	return result;
508 }
509 
510 
511 /*
512  * Indicate that the thread should run the AST_APC callback
513  * to detect an abort condition.
514  *
515  * Called with thread mutex held.
516  */
517 static void
act_abort(thread_t thread)518 act_abort(
519 	thread_t        thread)
520 {
521 	spl_t           s = splsched();
522 
523 	thread_lock(thread);
524 
525 	if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
526 		thread->sched_flags |= TH_SFLAG_ABORT;
527 		thread_set_apc_ast_locked(thread);
528 		thread_depress_abort_locked(thread);
529 	} else {
530 		thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
531 	}
532 
533 	thread_unlock(thread);
534 	splx(s);
535 }
536 
537 kern_return_t
thread_abort(thread_t thread)538 thread_abort(
539 	thread_t        thread)
540 {
541 	kern_return_t   result = KERN_SUCCESS;
542 
543 	if (thread == THREAD_NULL) {
544 		return KERN_INVALID_ARGUMENT;
545 	}
546 
547 	thread_mtx_lock(thread);
548 
549 	if (thread->active) {
550 		act_abort(thread);
551 		clear_wait(thread, THREAD_INTERRUPTED);
552 	} else {
553 		result = KERN_TERMINATED;
554 	}
555 
556 	thread_mtx_unlock(thread);
557 
558 	return result;
559 }
560 
561 kern_return_t
thread_abort_safely(thread_t thread)562 thread_abort_safely(
563 	thread_t                thread)
564 {
565 	kern_return_t   result = KERN_SUCCESS;
566 
567 	if (thread == THREAD_NULL) {
568 		return KERN_INVALID_ARGUMENT;
569 	}
570 
571 	thread_mtx_lock(thread);
572 
573 	if (thread->active) {
574 		spl_t           s = splsched();
575 
576 		thread_lock(thread);
577 		if (!thread->at_safe_point ||
578 		    clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
579 			if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
580 				thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
581 				thread_set_apc_ast_locked(thread);
582 				thread_depress_abort_locked(thread);
583 			}
584 		}
585 		thread_unlock(thread);
586 		splx(s);
587 	} else {
588 		result = KERN_TERMINATED;
589 	}
590 
591 	thread_mtx_unlock(thread);
592 
593 	return result;
594 }
595 
596 /*** backward compatibility hacks ***/
597 #include <mach/thread_info.h>
598 #include <mach/thread_special_ports.h>
599 #include <ipc/ipc_port.h>
600 
601 kern_return_t
thread_info(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)602 thread_info(
603 	thread_t                        thread,
604 	thread_flavor_t                 flavor,
605 	thread_info_t                   thread_info_out,
606 	mach_msg_type_number_t  *thread_info_count)
607 {
608 	kern_return_t                   result;
609 
610 	if (thread == THREAD_NULL) {
611 		return KERN_INVALID_ARGUMENT;
612 	}
613 
614 	thread_mtx_lock(thread);
615 
616 	if (thread->active || thread->inspection) {
617 		result = thread_info_internal(
618 			thread, flavor, thread_info_out, thread_info_count);
619 	} else {
620 		result = KERN_TERMINATED;
621 	}
622 
623 	thread_mtx_unlock(thread);
624 
625 	return result;
626 }
627 
628 static inline kern_return_t
thread_get_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count,thread_set_status_flags_t flags)629 thread_get_state_internal(
630 	thread_t                thread,
631 	int                                             flavor,
632 	thread_state_t                  state,                  /* pointer to OUT array */
633 	mach_msg_type_number_t  *state_count,   /*IN/OUT*/
634 	thread_set_status_flags_t  flags)
635 {
636 	kern_return_t           result = KERN_SUCCESS;
637 	boolean_t               to_user = !!(flags & TSSF_TRANSLATE_TO_USER);
638 
639 	if (thread == THREAD_NULL) {
640 		return KERN_INVALID_ARGUMENT;
641 	}
642 
643 	thread_mtx_lock(thread);
644 
645 	if (thread->active) {
646 		if (thread != current_thread()) {
647 			thread_hold(thread);
648 
649 			thread_mtx_unlock(thread);
650 
651 			if (thread_stop(thread, FALSE)) {
652 				thread_mtx_lock(thread);
653 				result = machine_thread_get_state(
654 					thread, flavor, state, state_count);
655 				thread_unstop(thread);
656 			} else {
657 				thread_mtx_lock(thread);
658 				result = KERN_ABORTED;
659 			}
660 
661 			thread_release(thread);
662 		} else {
663 			result = machine_thread_get_state(
664 				thread, flavor, state, state_count);
665 		}
666 	} else if (thread->inspection) {
667 		result = machine_thread_get_state(
668 			thread, flavor, state, state_count);
669 	} else {
670 		result = KERN_TERMINATED;
671 	}
672 
673 	if (to_user && result == KERN_SUCCESS) {
674 		result = machine_thread_state_convert_to_user(thread, flavor, state,
675 		    state_count, flags);
676 	}
677 
678 	thread_mtx_unlock(thread);
679 
680 	return result;
681 }
682 
683 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
684 
685 kern_return_t
686 thread_get_state(
687 	thread_t                thread,
688 	int                                             flavor,
689 	thread_state_t                  state,
690 	mach_msg_type_number_t  *state_count);
691 
692 kern_return_t
thread_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)693 thread_get_state(
694 	thread_t                thread,
695 	int                                             flavor,
696 	thread_state_t                  state,                  /* pointer to OUT array */
697 	mach_msg_type_number_t  *state_count)   /*IN/OUT*/
698 {
699 	return thread_get_state_internal(thread, flavor, state, state_count, TSSF_FLAGS_NONE);
700 }
701 
702 kern_return_t
thread_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)703 thread_get_state_to_user(
704 	thread_t                thread,
705 	int                                             flavor,
706 	thread_state_t                  state,                  /* pointer to OUT array */
707 	mach_msg_type_number_t  *state_count)   /*IN/OUT*/
708 {
709 	return thread_get_state_internal(thread, flavor, state, state_count, TSSF_TRANSLATE_TO_USER);
710 }
711 
712 /*
713  *	Change thread's machine-dependent state.  Called with nothing
714  *	locked.  Returns same way.
715  */
716 static inline kern_return_t
thread_set_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count,thread_state_t old_state,mach_msg_type_number_t old_state_count,thread_set_status_flags_t flags)717 thread_set_state_internal(
718 	thread_t                        thread,
719 	int                             flavor,
720 	thread_state_t                  state,
721 	mach_msg_type_number_t          state_count,
722 	thread_state_t                  old_state,
723 	mach_msg_type_number_t          old_state_count,
724 	thread_set_status_flags_t       flags)
725 {
726 	kern_return_t           result = KERN_SUCCESS;
727 	boolean_t               from_user = !!(flags & TSSF_TRANSLATE_TO_USER);
728 
729 	if (thread == THREAD_NULL) {
730 		return KERN_INVALID_ARGUMENT;
731 	}
732 
733 	if ((flags & TSSF_CHECK_ENTITLEMENT) &&
734 	    !thread_set_state_allowed(thread, flavor)) {
735 		return KERN_NO_ACCESS;
736 	}
737 
738 	thread_mtx_lock(thread);
739 
740 	if (thread->active) {
741 		if (from_user) {
742 			result = machine_thread_state_convert_from_user(thread, flavor,
743 			    state, state_count, old_state, old_state_count, flags);
744 			if (result != KERN_SUCCESS) {
745 				goto out;
746 			}
747 		}
748 		if (thread != current_thread()) {
749 			thread_hold(thread);
750 
751 			thread_mtx_unlock(thread);
752 
753 			if (thread_stop(thread, TRUE)) {
754 				thread_mtx_lock(thread);
755 				result = machine_thread_set_state(
756 					thread, flavor, state, state_count);
757 				thread_unstop(thread);
758 			} else {
759 				thread_mtx_lock(thread);
760 				result = KERN_ABORTED;
761 			}
762 
763 			thread_release(thread);
764 		} else {
765 			result = machine_thread_set_state(
766 				thread, flavor, state, state_count);
767 		}
768 	} else {
769 		result = KERN_TERMINATED;
770 	}
771 
772 	if ((result == KERN_SUCCESS) && from_user) {
773 		extmod_statistics_incr_thread_set_state(thread);
774 	}
775 
776 out:
777 	thread_mtx_unlock(thread);
778 
779 	return result;
780 }
781 
782 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
783 kern_return_t
784 thread_set_state(
785 	thread_t                thread,
786 	int                                             flavor,
787 	thread_state_t                  state,
788 	mach_msg_type_number_t  state_count);
789 
790 kern_return_t
thread_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)791 thread_set_state(
792 	thread_t                thread,
793 	int                                             flavor,
794 	thread_state_t                  state,
795 	mach_msg_type_number_t  state_count)
796 {
797 	return thread_set_state_internal(thread, flavor, state, state_count, NULL, 0, TSSF_FLAGS_NONE);
798 }
799 
800 kern_return_t
thread_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)801 thread_set_state_from_user(
802 	thread_t                thread,
803 	int                                             flavor,
804 	thread_state_t                  state,
805 	mach_msg_type_number_t  state_count)
806 {
807 	return thread_set_state_internal(thread, flavor, state, state_count, NULL,
808 	           0, TSSF_TRANSLATE_TO_USER | TSSF_CHECK_ENTITLEMENT);
809 }
810 
811 kern_return_t
thread_convert_thread_state(thread_t thread,int direction,thread_state_flavor_t flavor,thread_state_t in_state,mach_msg_type_number_t in_state_count,thread_state_t out_state,mach_msg_type_number_t * out_state_count)812 thread_convert_thread_state(
813 	thread_t                thread,
814 	int                     direction,
815 	thread_state_flavor_t   flavor,
816 	thread_state_t          in_state,          /* pointer to IN array */
817 	mach_msg_type_number_t  in_state_count,
818 	thread_state_t          out_state,         /* pointer to OUT array */
819 	mach_msg_type_number_t  *out_state_count)   /*IN/OUT*/
820 {
821 	kern_return_t kr;
822 	thread_t to_thread = THREAD_NULL;
823 	thread_t from_thread = THREAD_NULL;
824 	mach_msg_type_number_t state_count = in_state_count;
825 
826 	if (direction != THREAD_CONVERT_THREAD_STATE_TO_SELF &&
827 	    direction != THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
828 		return KERN_INVALID_ARGUMENT;
829 	}
830 
831 	if (thread == THREAD_NULL) {
832 		return KERN_INVALID_ARGUMENT;
833 	}
834 
835 	if (state_count > *out_state_count) {
836 		return KERN_INSUFFICIENT_BUFFER_SIZE;
837 	}
838 
839 	if (direction == THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
840 		to_thread = thread;
841 		from_thread = current_thread();
842 	} else {
843 		to_thread = current_thread();
844 		from_thread = thread;
845 	}
846 
847 	/* Authenticate and convert thread state to kernel representation */
848 	kr = machine_thread_state_convert_from_user(from_thread, flavor,
849 	    in_state, state_count, NULL, 0, TSSF_FLAGS_NONE);
850 
851 	/* Return early if one of the thread was jop disabled while other wasn't */
852 	if (kr != KERN_SUCCESS) {
853 		return kr;
854 	}
855 
856 	/* Convert thread state to target thread user representation */
857 	kr = machine_thread_state_convert_to_user(to_thread, flavor,
858 	    in_state, &state_count, TSSF_PRESERVE_FLAGS);
859 
860 	if (kr == KERN_SUCCESS) {
861 		if (state_count <= *out_state_count) {
862 			memcpy(out_state, in_state, state_count * sizeof(uint32_t));
863 			*out_state_count = state_count;
864 		} else {
865 			kr = KERN_INSUFFICIENT_BUFFER_SIZE;
866 		}
867 	}
868 
869 	return kr;
870 }
871 
872 /*
873  * Kernel-internal "thread" interfaces used outside this file:
874  */
875 
876 /* Initialize (or re-initialize) a thread state.  Called from execve
877  * with nothing locked, returns same way.
878  */
879 kern_return_t
thread_state_initialize(thread_t thread)880 thread_state_initialize(
881 	thread_t                thread)
882 {
883 	kern_return_t           result = KERN_SUCCESS;
884 
885 	if (thread == THREAD_NULL) {
886 		return KERN_INVALID_ARGUMENT;
887 	}
888 
889 	thread_mtx_lock(thread);
890 
891 	if (thread->active) {
892 		if (thread != current_thread()) {
893 			/* Thread created in exec should be blocked in UNINT wait */
894 			assert(!(thread->state & TH_RUN));
895 		}
896 		machine_thread_state_initialize( thread );
897 	} else {
898 		result = KERN_TERMINATED;
899 	}
900 
901 	thread_mtx_unlock(thread);
902 
903 	return result;
904 }
905 
906 kern_return_t
thread_dup(thread_t target)907 thread_dup(
908 	thread_t        target)
909 {
910 	thread_t                        self = current_thread();
911 	kern_return_t           result = KERN_SUCCESS;
912 
913 	if (target == THREAD_NULL || target == self) {
914 		return KERN_INVALID_ARGUMENT;
915 	}
916 
917 	thread_mtx_lock(target);
918 
919 	if (target->active) {
920 		thread_hold(target);
921 
922 		thread_mtx_unlock(target);
923 
924 		if (thread_stop(target, TRUE)) {
925 			thread_mtx_lock(target);
926 			result = machine_thread_dup(self, target, FALSE);
927 
928 			if (self->affinity_set != AFFINITY_SET_NULL) {
929 				thread_affinity_dup(self, target);
930 			}
931 			thread_unstop(target);
932 		} else {
933 			thread_mtx_lock(target);
934 			result = KERN_ABORTED;
935 		}
936 
937 		thread_release(target);
938 	} else {
939 		result = KERN_TERMINATED;
940 	}
941 
942 	thread_mtx_unlock(target);
943 
944 	return result;
945 }
946 
947 
948 kern_return_t
thread_dup2(thread_t source,thread_t target)949 thread_dup2(
950 	thread_t        source,
951 	thread_t        target)
952 {
953 	kern_return_t           result = KERN_SUCCESS;
954 	uint32_t                active = 0;
955 
956 	if (source == THREAD_NULL || target == THREAD_NULL || target == source) {
957 		return KERN_INVALID_ARGUMENT;
958 	}
959 
960 	thread_mtx_lock(source);
961 	active = source->active;
962 	thread_mtx_unlock(source);
963 
964 	if (!active) {
965 		return KERN_TERMINATED;
966 	}
967 
968 	thread_mtx_lock(target);
969 
970 	if (target->active || target->inspection) {
971 		thread_hold(target);
972 
973 		thread_mtx_unlock(target);
974 
975 		if (thread_stop(target, TRUE)) {
976 			thread_mtx_lock(target);
977 			result = machine_thread_dup(source, target, TRUE);
978 			if (source->affinity_set != AFFINITY_SET_NULL) {
979 				thread_affinity_dup(source, target);
980 			}
981 			thread_unstop(target);
982 		} else {
983 			thread_mtx_lock(target);
984 			result = KERN_ABORTED;
985 		}
986 
987 		thread_release(target);
988 	} else {
989 		result = KERN_TERMINATED;
990 	}
991 
992 	thread_mtx_unlock(target);
993 
994 	return result;
995 }
996 
997 /*
998  *	thread_setstatus:
999  *
1000  *	Set the status of the specified thread.
1001  *	Called with (and returns with) no locks held.
1002  */
1003 kern_return_t
thread_setstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count)1004 thread_setstatus(
1005 	thread_t                thread,
1006 	int                                             flavor,
1007 	thread_state_t                  tstate,
1008 	mach_msg_type_number_t  count)
1009 {
1010 	return thread_set_state(thread, flavor, tstate, count);
1011 }
1012 
1013 kern_return_t
thread_setstatus_from_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count,thread_state_t old_tstate,mach_msg_type_number_t old_count,thread_set_status_flags_t flags)1014 thread_setstatus_from_user(
1015 	thread_t                thread,
1016 	int                                             flavor,
1017 	thread_state_t                  tstate,
1018 	mach_msg_type_number_t  count,
1019 	thread_state_t                  old_tstate,
1020 	mach_msg_type_number_t  old_count,
1021 	thread_set_status_flags_t flags)
1022 {
1023 	return thread_set_state_internal(thread, flavor, tstate, count, old_tstate,
1024 	           old_count, flags | TSSF_TRANSLATE_TO_USER);
1025 }
1026 
1027 /*
1028  *	thread_getstatus:
1029  *
1030  *	Get the status of the specified thread.
1031  */
1032 kern_return_t
thread_getstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count)1033 thread_getstatus(
1034 	thread_t                thread,
1035 	int                                             flavor,
1036 	thread_state_t                  tstate,
1037 	mach_msg_type_number_t  *count)
1038 {
1039 	return thread_get_state(thread, flavor, tstate, count);
1040 }
1041 
1042 kern_return_t
thread_getstatus_to_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count,thread_set_status_flags_t flags)1043 thread_getstatus_to_user(
1044 	thread_t                thread,
1045 	int                                             flavor,
1046 	thread_state_t                  tstate,
1047 	mach_msg_type_number_t  *count,
1048 	thread_set_status_flags_t flags)
1049 {
1050 	return thread_get_state_internal(thread, flavor, tstate, count, flags | TSSF_TRANSLATE_TO_USER);
1051 }
1052 
1053 /*
1054  *	Change thread's machine-dependent userspace TSD base.
1055  *  Called with nothing locked.  Returns same way.
1056  */
1057 kern_return_t
thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)1058 thread_set_tsd_base(
1059 	thread_t                        thread,
1060 	mach_vm_offset_t        tsd_base)
1061 {
1062 	kern_return_t           result = KERN_SUCCESS;
1063 
1064 	if (thread == THREAD_NULL) {
1065 		return KERN_INVALID_ARGUMENT;
1066 	}
1067 
1068 	thread_mtx_lock(thread);
1069 
1070 	if (thread->active) {
1071 		if (thread != current_thread()) {
1072 			thread_hold(thread);
1073 
1074 			thread_mtx_unlock(thread);
1075 
1076 			if (thread_stop(thread, TRUE)) {
1077 				thread_mtx_lock(thread);
1078 				result = machine_thread_set_tsd_base(thread, tsd_base);
1079 				thread_unstop(thread);
1080 			} else {
1081 				thread_mtx_lock(thread);
1082 				result = KERN_ABORTED;
1083 			}
1084 
1085 			thread_release(thread);
1086 		} else {
1087 			result = machine_thread_set_tsd_base(thread, tsd_base);
1088 		}
1089 	} else {
1090 		result = KERN_TERMINATED;
1091 	}
1092 
1093 	thread_mtx_unlock(thread);
1094 
1095 	return result;
1096 }
1097 
1098 /*
1099  * thread_set_apc_ast:
1100  *
1101  * Register the AST_APC callback that handles suspension and
1102  * termination, if it hasn't been installed already.
1103  *
1104  * Called with the thread mutex held.
1105  */
1106 static void
thread_set_apc_ast(thread_t thread)1107 thread_set_apc_ast(thread_t thread)
1108 {
1109 	spl_t s = splsched();
1110 
1111 	thread_lock(thread);
1112 	thread_set_apc_ast_locked(thread);
1113 	thread_unlock(thread);
1114 
1115 	splx(s);
1116 }
1117 
1118 /*
1119  * thread_set_apc_ast_locked:
1120  *
1121  * Do the work of registering for the AST_APC callback.
1122  *
1123  * Called with the thread mutex and scheduling lock held.
1124  */
1125 static void
thread_set_apc_ast_locked(thread_t thread)1126 thread_set_apc_ast_locked(thread_t thread)
1127 {
1128 	thread_ast_set(thread, AST_APC);
1129 
1130 	if (thread == current_thread()) {
1131 		ast_propagate(thread);
1132 	} else {
1133 		processor_t processor = thread->last_processor;
1134 
1135 		if (processor != PROCESSOR_NULL &&
1136 		    processor->state == PROCESSOR_RUNNING &&
1137 		    processor->active_thread == thread) {
1138 			cause_ast_check(processor);
1139 		}
1140 	}
1141 }
1142 
1143 /*
1144  * Activation control support routines internal to this file:
1145  *
1146  */
1147 
1148 /*
1149  * thread_suspended
1150  *
1151  * Continuation routine for thread suspension.  It checks
1152  * to see whether there has been any new suspensions.  If so, it
1153  * installs the AST_APC handler again.
1154  */
1155 __attribute__((noreturn))
1156 static void
thread_suspended(__unused void * parameter,wait_result_t result)1157 thread_suspended(__unused void *parameter, wait_result_t result)
1158 {
1159 	thread_t thread = current_thread();
1160 
1161 	thread_mtx_lock(thread);
1162 
1163 	if (result == THREAD_INTERRUPTED) {
1164 		thread->suspend_parked = FALSE;
1165 	} else {
1166 		assert(thread->suspend_parked == FALSE);
1167 	}
1168 
1169 	if (thread->suspend_count > 0) {
1170 		thread_set_apc_ast(thread);
1171 	}
1172 
1173 	thread_mtx_unlock(thread);
1174 
1175 	thread_exception_return();
1176 	/*NOTREACHED*/
1177 }
1178 
1179 /*
1180  * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
1181  * Called with nothing locked.  Returns (if it returns) the same way.
1182  */
1183 void
thread_apc_ast(thread_t thread)1184 thread_apc_ast(thread_t thread)
1185 {
1186 	thread_mtx_lock(thread);
1187 
1188 	assert(thread->suspend_parked == FALSE);
1189 
1190 	spl_t s = splsched();
1191 	thread_lock(thread);
1192 
1193 	/* TH_SFLAG_POLLDEPRESS is OK to have here */
1194 	assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
1195 
1196 	thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1197 	thread_unlock(thread);
1198 	splx(s);
1199 
1200 	if (!thread->active) {
1201 		/* Thread is ready to terminate, time to tear it down */
1202 		thread_mtx_unlock(thread);
1203 
1204 		thread_terminate_self();
1205 		/*NOTREACHED*/
1206 	}
1207 
1208 	/* If we're suspended, go to sleep and wait for someone to wake us up. */
1209 	if (thread->suspend_count > 0) {
1210 		thread->suspend_parked = TRUE;
1211 		assert_wait(&thread->suspend_count,
1212 		    THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
1213 		thread_mtx_unlock(thread);
1214 
1215 		thread_block(thread_suspended);
1216 		/*NOTREACHED*/
1217 	}
1218 
1219 	thread_mtx_unlock(thread);
1220 }
1221 
1222 #if CONFIG_ROSETTA
1223 extern kern_return_t
1224 exception_deliver(
1225 	thread_t                thread,
1226 	exception_type_t        exception,
1227 	mach_exception_data_t   code,
1228 	mach_msg_type_number_t  codeCnt,
1229 	struct exception_action *excp,
1230 	lck_mtx_t               *mutex);
1231 
1232 kern_return_t
thread_raise_exception(thread_t thread,exception_type_t exception,natural_t code_count,int64_t code,int64_t sub_code)1233 thread_raise_exception(
1234 	thread_t thread,
1235 	exception_type_t exception,
1236 	natural_t code_count,
1237 	int64_t code,
1238 	int64_t sub_code)
1239 {
1240 	task_t task;
1241 
1242 	if (thread == THREAD_NULL) {
1243 		return KERN_INVALID_ARGUMENT;
1244 	}
1245 
1246 	task = get_threadtask(thread);
1247 
1248 	if (task != current_task()) {
1249 		return KERN_FAILURE;
1250 	}
1251 
1252 	if (!task_is_translated(task)) {
1253 		return KERN_FAILURE;
1254 	}
1255 
1256 	if (exception == EXC_CRASH) {
1257 		return KERN_INVALID_ARGUMENT;
1258 	}
1259 
1260 	int64_t codes[] = { code, sub_code };
1261 	host_priv_t host_priv = host_priv_self();
1262 	kern_return_t kr = exception_deliver(thread, exception, codes, code_count, host_priv->exc_actions, &host_priv->lock);
1263 	if (kr != KERN_SUCCESS) {
1264 		return kr;
1265 	}
1266 
1267 	return thread_resume(thread);
1268 }
1269 #endif
1270 
1271 void
thread_debug_return_to_user_ast(thread_t thread)1272 thread_debug_return_to_user_ast(
1273 	thread_t thread)
1274 {
1275 #pragma unused(thread)
1276 #if MACH_ASSERT
1277 	if ((thread->sched_flags & TH_SFLAG_RW_PROMOTED) ||
1278 	    thread->rwlock_count > 0) {
1279 		panic("Returning to userspace with rw lock held, thread %p sched_flag %u rwlock_count %d", thread, thread->sched_flags, thread->rwlock_count);
1280 	}
1281 
1282 	if ((thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) ||
1283 	    thread->priority_floor_count > 0) {
1284 		panic("Returning to userspace with floor boost set, thread %p sched_flag %u priority_floor_count %d", thread, thread->sched_flags, thread->priority_floor_count);
1285 	}
1286 
1287 	if (thread->th_vm_faults_disabled) {
1288 		panic("Returning to userspace with vm faults disabled, thread %p", thread);
1289 	}
1290 
1291 #if CONFIG_EXCLAVES
1292 	assert3u(thread->th_exclaves_state & TH_EXCLAVES_STATE_ANY, ==, 0);
1293 #endif /* CONFIG_EXCLAVES */
1294 
1295 #endif /* MACH_ASSERT */
1296 }
1297 
1298 
1299 /* Prototype, see justification above */
1300 kern_return_t
1301 act_set_state(
1302 	thread_t                                thread,
1303 	int                                             flavor,
1304 	thread_state_t                  state,
1305 	mach_msg_type_number_t  count);
1306 
1307 kern_return_t
act_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1308 act_set_state(
1309 	thread_t                                thread,
1310 	int                                             flavor,
1311 	thread_state_t                  state,
1312 	mach_msg_type_number_t  count)
1313 {
1314 	if (thread == current_thread()) {
1315 		return KERN_INVALID_ARGUMENT;
1316 	}
1317 
1318 	return thread_set_state(thread, flavor, state, count);
1319 }
1320 
1321 kern_return_t
act_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1322 act_set_state_from_user(
1323 	thread_t                                thread,
1324 	int                                             flavor,
1325 	thread_state_t                  state,
1326 	mach_msg_type_number_t  count)
1327 {
1328 	if (thread == current_thread()) {
1329 		return KERN_INVALID_ARGUMENT;
1330 	}
1331 
1332 	return thread_set_state_from_user(thread, flavor, state, count);
1333 }
1334 
1335 /* Prototype, see justification above */
1336 kern_return_t
1337 act_get_state(
1338 	thread_t                                thread,
1339 	int                                             flavor,
1340 	thread_state_t                  state,
1341 	mach_msg_type_number_t  *count);
1342 
1343 kern_return_t
act_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1344 act_get_state(
1345 	thread_t                                thread,
1346 	int                                             flavor,
1347 	thread_state_t                  state,
1348 	mach_msg_type_number_t  *count)
1349 {
1350 	if (thread == current_thread()) {
1351 		return KERN_INVALID_ARGUMENT;
1352 	}
1353 
1354 	return thread_get_state(thread, flavor, state, count);
1355 }
1356 
1357 kern_return_t
act_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1358 act_get_state_to_user(
1359 	thread_t                                thread,
1360 	int                                             flavor,
1361 	thread_state_t                  state,
1362 	mach_msg_type_number_t  *count)
1363 {
1364 	if (thread == current_thread()) {
1365 		return KERN_INVALID_ARGUMENT;
1366 	}
1367 
1368 	return thread_get_state_to_user(thread, flavor, state, count);
1369 }
1370 
1371 static void
act_set_ast(thread_t thread,ast_t ast)1372 act_set_ast(
1373 	thread_t   thread,
1374 	ast_t      ast)
1375 {
1376 	spl_t s = splsched();
1377 
1378 	if (thread == current_thread()) {
1379 		thread_ast_set(thread, ast);
1380 		ast_propagate(thread);
1381 	} else {
1382 		processor_t processor;
1383 
1384 		thread_lock(thread);
1385 		thread_ast_set(thread, ast);
1386 		processor = thread->last_processor;
1387 		if (processor != PROCESSOR_NULL &&
1388 		    processor->state == PROCESSOR_RUNNING &&
1389 		    processor->active_thread == thread) {
1390 			cause_ast_check(processor);
1391 		}
1392 		thread_unlock(thread);
1393 	}
1394 
1395 	splx(s);
1396 }
1397 
1398 /*
1399  * set AST on thread without causing an AST check
1400  * and without taking the thread lock
1401  *
1402  * If thread is not the current thread, then it may take
1403  * up until the next context switch or quantum expiration
1404  * on that thread for it to notice the AST.
1405  */
1406 static void
act_set_ast_async(thread_t thread,ast_t ast)1407 act_set_ast_async(thread_t  thread,
1408     ast_t     ast)
1409 {
1410 	thread_ast_set(thread, ast);
1411 
1412 	if (thread == current_thread()) {
1413 		spl_t s = splsched();
1414 		ast_propagate(thread);
1415 		splx(s);
1416 	}
1417 }
1418 
1419 void
act_set_debug_assert(void)1420 act_set_debug_assert(void)
1421 {
1422 	thread_t thread = current_thread();
1423 	if (thread_ast_peek(thread, AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1424 		thread_ast_set(thread, AST_DEBUG_ASSERT);
1425 	}
1426 	if (ast_peek(AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1427 		spl_t s = splsched();
1428 		ast_propagate(thread);
1429 		splx(s);
1430 	}
1431 }
1432 
1433 void
act_set_astbsd(thread_t thread)1434 act_set_astbsd(thread_t thread)
1435 {
1436 	act_set_ast(thread, AST_BSD);
1437 }
1438 
1439 void
act_set_astkevent(thread_t thread,uint16_t bits)1440 act_set_astkevent(thread_t thread, uint16_t bits)
1441 {
1442 	os_atomic_or(&thread->kevent_ast_bits, bits, relaxed);
1443 
1444 	/* kevent AST shouldn't send immediate IPIs */
1445 	act_set_ast_async(thread, AST_KEVENT);
1446 }
1447 
1448 uint16_t
act_clear_astkevent(thread_t thread,uint16_t bits)1449 act_clear_astkevent(thread_t thread, uint16_t bits)
1450 {
1451 	/*
1452 	 * avoid the atomic operation if none of the bits is set,
1453 	 * which will be the common case.
1454 	 */
1455 	uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed);
1456 	if (cur & bits) {
1457 		cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed);
1458 	}
1459 	return cur & bits;
1460 }
1461 
1462 bool
act_set_ast_reset_pcs(task_t task,thread_t thread)1463 act_set_ast_reset_pcs(task_t task, thread_t thread)
1464 {
1465 	processor_t processor;
1466 	bool needs_wait = false;
1467 	spl_t s;
1468 
1469 	s = splsched();
1470 
1471 	if (thread == current_thread()) {
1472 		/*
1473 		 * this is called from the signal code,
1474 		 * just set the AST and move on
1475 		 */
1476 		thread_ast_set(thread, AST_RESET_PCS);
1477 		ast_propagate(thread);
1478 	} else {
1479 		thread_lock(thread);
1480 
1481 		assert(thread->t_rr_state.trr_ipi_ack_pending == 0);
1482 		assert(thread->t_rr_state.trr_sync_waiting == 0);
1483 
1484 		processor = thread->last_processor;
1485 		if (!thread->active) {
1486 			/*
1487 			 * ->active is being set before the thread is added
1488 			 * to the thread list (under the task lock which
1489 			 * the caller holds), and is reset before the thread
1490 			 * lock is being taken by thread_terminate_self().
1491 			 *
1492 			 * The result is that this will never fail to
1493 			 * set the AST on an thread that is active,
1494 			 * but will not set it past thread_terminate_self().
1495 			 */
1496 		} else if (processor != PROCESSOR_NULL &&
1497 		    processor->state == PROCESSOR_RUNNING &&
1498 		    processor->active_thread == thread) {
1499 			thread->t_rr_state.trr_ipi_ack_pending = true;
1500 			needs_wait = true;
1501 			thread_ast_set(thread, AST_RESET_PCS);
1502 			cause_ast_check(processor);
1503 		} else if (thread_reset_pcs_in_range(task, thread)) {
1504 			if (thread->t_rr_state.trr_fault_state) {
1505 				thread->t_rr_state.trr_fault_state =
1506 				    TRR_FAULT_OBSERVED;
1507 				needs_wait = true;
1508 			}
1509 			thread_ast_set(thread, AST_RESET_PCS);
1510 		}
1511 		thread_unlock(thread);
1512 	}
1513 
1514 	splx(s);
1515 
1516 	return needs_wait;
1517 }
1518 
1519 void
act_set_kperf(thread_t thread)1520 act_set_kperf(thread_t thread)
1521 {
1522 	/* safety check */
1523 	if (thread != current_thread()) {
1524 		if (!ml_get_interrupts_enabled()) {
1525 			panic("unsafe act_set_kperf operation");
1526 		}
1527 	}
1528 
1529 	act_set_ast(thread, AST_KPERF);
1530 }
1531 
1532 #if CONFIG_MACF
1533 void
act_set_astmacf(thread_t thread)1534 act_set_astmacf(
1535 	thread_t        thread)
1536 {
1537 	act_set_ast( thread, AST_MACF);
1538 }
1539 #endif
1540 
1541 void
act_set_astledger(thread_t thread)1542 act_set_astledger(thread_t thread)
1543 {
1544 	act_set_ast(thread, AST_LEDGER);
1545 }
1546 
1547 /*
1548  * The ledger AST may need to be set while already holding
1549  * the thread lock.  This routine skips sending the IPI,
1550  * allowing us to avoid the lock hold.
1551  *
1552  * However, it means the targeted thread must context switch
1553  * to recognize the ledger AST.
1554  */
1555 void
act_set_astledger_async(thread_t thread)1556 act_set_astledger_async(thread_t thread)
1557 {
1558 	act_set_ast_async(thread, AST_LEDGER);
1559 }
1560 
1561 void
act_set_io_telemetry_ast(thread_t thread)1562 act_set_io_telemetry_ast(thread_t thread)
1563 {
1564 	act_set_ast(thread, AST_TELEMETRY_IO);
1565 }
1566 
1567 void
act_set_macf_telemetry_ast(thread_t thread)1568 act_set_macf_telemetry_ast(thread_t thread)
1569 {
1570 	act_set_ast(thread, AST_TELEMETRY_MACF);
1571 }
1572 
1573 void
act_set_astproc_resource(thread_t thread)1574 act_set_astproc_resource(thread_t thread)
1575 {
1576 	act_set_ast(thread, AST_PROC_RESOURCE);
1577 }
1578