xref: /xnu-10002.61.3/osfmk/kern/thread_act.c (revision 0f4c859e951fba394238ab619495c4e1d54d0f34)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Copyright (c) 1993 The University of Utah and
33  * the Center for Software Science (CSS).  All rights reserved.
34  *
35  * Permission to use, copy, modify and distribute this software and its
36  * documentation is hereby granted, provided that both the copyright
37  * notice and this permission notice appear in all copies of the
38  * software, derivative works or modified versions, and any portions
39  * thereof, and that both notices appear in supporting documentation.
40  *
41  * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44  *
45  * CSS requests users of this software to return to [email protected] any
46  * improvements that they make and grant CSS redistribution rights.
47  *
48  *	Author:	Bryan Ford, University of Utah CSS
49  *
50  *	Thread management routines
51  */
52 
53 #include <mach/mach_types.h>
54 #include <mach/kern_return.h>
55 #include <mach/thread_act_server.h>
56 #include <mach/thread_act.h>
57 
58 #include <kern/kern_types.h>
59 #include <kern/ast.h>
60 #include <kern/mach_param.h>
61 #include <kern/zalloc.h>
62 #include <kern/extmod_statistics.h>
63 #include <kern/thread.h>
64 #include <kern/task.h>
65 #include <kern/sched_prim.h>
66 #include <kern/misc_protos.h>
67 #include <kern/assert.h>
68 #include <kern/exception.h>
69 #include <kern/ipc_mig.h>
70 #include <kern/ipc_tt.h>
71 #include <kern/machine.h>
72 #include <kern/spl.h>
73 #include <kern/syscall_subr.h>
74 #include <kern/processor.h>
75 #include <kern/restartable.h>
76 #include <kern/timer.h>
77 #include <kern/affinity.h>
78 #include <kern/host.h>
79 #include <kern/exc_guard.h>
80 #include <ipc/port.h>
81 #include <mach/arm/thread_status.h>
82 
83 
84 #include <stdatomic.h>
85 
86 #include <security/mac_mach_internal.h>
87 #include <libkern/coreanalytics/coreanalytics.h>
88 
89 static void act_abort(thread_t thread);
90 
91 static void thread_suspended(void *arg, wait_result_t result);
92 static void thread_set_apc_ast(thread_t thread);
93 static void thread_set_apc_ast_locked(thread_t thread);
94 
95 extern void proc_name(int pid, char * buf, int size);
96 extern boolean_t IOCurrentTaskHasEntitlement(const char *);
97 
98 CA_EVENT(thread_set_state,
99     CA_STATIC_STRING(CA_PROCNAME_LEN), current_proc);
100 
101 static void
send_thread_set_state_telemetry(void)102 send_thread_set_state_telemetry(void)
103 {
104 	ca_event_t ca_event = CA_EVENT_ALLOCATE(thread_set_state);
105 	CA_EVENT_TYPE(thread_set_state) * event = ca_event->data;
106 
107 	proc_name(task_pid(current_task()), (char *) &event->current_proc, CA_PROCNAME_LEN);
108 
109 	CA_EVENT_SEND(ca_event);
110 }
111 
112 /* bootarg to create lightweight corpse for thread set state lockdown */
113 TUNABLE(bool, tss_should_crash, "tss_should_crash", true);
114 
115 static inline boolean_t
thread_set_state_allowed(thread_t thread,int flavor)116 thread_set_state_allowed(thread_t thread, int flavor)
117 {
118 	/* platform binaries must have entitlement - all others ok */
119 	if ((task_ro_flags_get(current_task()) & TFRO_PLATFORM)
120 	    && !(thread->options & TH_IN_MACH_EXCEPTION)        /* Allowed for now - rdar://103085786 */
121 	    && FLAVOR_MODIFIES_CORE_CPU_REGISTERS(flavor)       /* only care about locking down PC/LR */
122 #if CONFIG_ROSETTA
123 	    && !task_is_translated(get_threadtask(thread))      /* Ignore translated tasks */
124 #endif /* CONFIG_ROSETTA */
125 	    && !IOCurrentTaskHasEntitlement("com.apple.private.thread-set-state")
126 	    && tss_should_crash
127 	    ) {
128 		/* fatal crash */
129 		mach_port_guard_exception(MACH_PORT_NULL, 0, 0, kGUARD_EXC_THREAD_SET_STATE);
130 		send_thread_set_state_telemetry();
131 		return FALSE;
132 	}
133 
134 #if __has_feature(ptrauth_calls)
135 	/* Do not allow Fatal PAC exception binaries to set Debug state */
136 	if (task_is_pac_exception_fatal(get_threadtask(thread))
137 	    && machine_thread_state_is_debug_flavor(flavor)
138 #if CONFIG_ROSETTA
139 	    && !task_is_translated(get_threadtask(thread))      /* Ignore translated tasks */
140 #endif /* CONFIG_ROSETTA */
141 	    && !IOCurrentTaskHasEntitlement("com.apple.private.thread-set-state")) {
142 		/* fatal crash */
143 		mach_port_guard_exception(MACH_PORT_NULL, 0, 0, kGUARD_EXC_THREAD_SET_STATE);
144 		send_thread_set_state_telemetry();
145 		return FALSE;
146 	}
147 #endif /* __has_feature(ptrauth_calls) */
148 
149 	return TRUE;
150 }
151 
152 /*
153  * Internal routine to mark a thread as started.
154  * Always called with the thread mutex locked.
155  */
156 void
thread_start(thread_t thread)157 thread_start(
158 	thread_t                        thread)
159 {
160 	clear_wait(thread, THREAD_AWAKENED);
161 	thread->started = TRUE;
162 }
163 
164 /*
165  * Internal routine to mark a thread as waiting
166  * right after it has been created.  The caller
167  * is responsible to call wakeup()/thread_wakeup()
168  * or thread_terminate() to get it going.
169  *
170  * Always called with the thread mutex locked.
171  *
172  * Task and task_threads mutexes also held
173  * (so nobody can set the thread running before
174  * this point)
175  *
176  * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
177  * to allow termination from this point forward.
178  */
179 void
thread_start_in_assert_wait(thread_t thread,struct waitq * waitq,event64_t event,wait_interrupt_t interruptible)180 thread_start_in_assert_wait(
181 	thread_t            thread,
182 	struct waitq       *waitq,
183 	event64_t           event,
184 	wait_interrupt_t    interruptible)
185 {
186 	wait_result_t wait_result;
187 	spl_t spl;
188 
189 	spl = splsched();
190 	waitq_lock(waitq);
191 
192 	/* clear out startup condition (safe because thread not started yet) */
193 	thread_lock(thread);
194 	assert(!thread->started);
195 	assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
196 	thread->state &= ~(TH_WAIT | TH_UNINT);
197 	thread_unlock(thread);
198 
199 	/* assert wait interruptibly forever */
200 	wait_result = waitq_assert_wait64_locked(waitq, event,
201 	    interruptible,
202 	    TIMEOUT_URGENCY_SYS_NORMAL,
203 	    TIMEOUT_WAIT_FOREVER,
204 	    TIMEOUT_NO_LEEWAY,
205 	    thread);
206 	assert(wait_result == THREAD_WAITING);
207 
208 	/* mark thread started while we still hold the waitq lock */
209 	thread_lock(thread);
210 	thread->started = TRUE;
211 	thread_unlock(thread);
212 
213 	waitq_unlock(waitq);
214 	splx(spl);
215 }
216 
217 /*
218  * Internal routine to terminate a thread.
219  * Sometimes called with task already locked.
220  *
221  * If thread is on core, cause AST check immediately;
222  * Otherwise, let the thread continue running in kernel
223  * until it hits AST.
224  */
225 kern_return_t
thread_terminate_internal(thread_t thread)226 thread_terminate_internal(
227 	thread_t                        thread)
228 {
229 	kern_return_t           result = KERN_SUCCESS;
230 
231 	thread_mtx_lock(thread);
232 
233 	if (thread->active) {
234 		thread->active = FALSE;
235 
236 		act_abort(thread);
237 
238 		if (thread->started) {
239 			clear_wait(thread, THREAD_INTERRUPTED);
240 		} else {
241 			thread_start(thread);
242 		}
243 	} else {
244 		result = KERN_TERMINATED;
245 	}
246 
247 	if (thread->affinity_set != NULL) {
248 		thread_affinity_terminate(thread);
249 	}
250 
251 	/* unconditionally unpin the thread in internal termination */
252 	ipc_thread_port_unpin(get_thread_ro(thread)->tro_self_port);
253 
254 	thread_mtx_unlock(thread);
255 
256 	if (thread != current_thread() && result == KERN_SUCCESS) {
257 		thread_wait(thread, FALSE);
258 	}
259 
260 	return result;
261 }
262 
263 kern_return_t
thread_terminate(thread_t thread)264 thread_terminate(
265 	thread_t                thread)
266 {
267 	task_t task;
268 
269 	if (thread == THREAD_NULL) {
270 		return KERN_INVALID_ARGUMENT;
271 	}
272 
273 	task = get_threadtask(thread);
274 
275 	/* Kernel threads can't be terminated without their own cooperation */
276 	if (task == kernel_task && thread != current_thread()) {
277 		return KERN_FAILURE;
278 	}
279 
280 	kern_return_t result = thread_terminate_internal(thread);
281 
282 	/*
283 	 * If a kernel thread is terminating itself, force handle the APC_AST here.
284 	 * Kernel threads don't pass through the return-to-user AST checking code,
285 	 * but all threads must finish their own termination in thread_apc_ast.
286 	 */
287 	if (task == kernel_task) {
288 		assert(thread->active == FALSE);
289 		thread_ast_clear(thread, AST_APC);
290 		thread_apc_ast(thread);
291 
292 		panic("thread_terminate");
293 		/* NOTREACHED */
294 	}
295 
296 	return result;
297 }
298 
299 /*
300  * [MIG Call] Terminate a thread.
301  *
302  * Cannot be used on threads managed by pthread.
303  */
304 kern_return_t
thread_terminate_from_user(thread_t thread)305 thread_terminate_from_user(
306 	thread_t                thread)
307 {
308 	if (thread == THREAD_NULL) {
309 		return KERN_INVALID_ARGUMENT;
310 	}
311 
312 	if (thread_get_tag(thread) & THREAD_TAG_PTHREAD) {
313 		return KERN_DENIED;
314 	}
315 
316 	return thread_terminate(thread);
317 }
318 
319 /*
320  * Terminate a thread with pinned control port.
321  *
322  * Can only be used on threads managed by pthread. Exported in pthread_kern.
323  */
324 kern_return_t
thread_terminate_pinned(thread_t thread)325 thread_terminate_pinned(
326 	thread_t                thread)
327 {
328 	task_t task;
329 
330 	if (thread == THREAD_NULL) {
331 		return KERN_INVALID_ARGUMENT;
332 	}
333 
334 	task = get_threadtask(thread);
335 
336 
337 	assert(task != kernel_task);
338 	assert(thread_get_tag(thread) & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD));
339 
340 	thread_mtx_lock(thread);
341 	if (task_is_pinned(task) && thread->active) {
342 		assert(get_thread_ro(thread)->tro_self_port->ip_pinned == 1);
343 	}
344 	thread_mtx_unlock(thread);
345 
346 	kern_return_t result = thread_terminate_internal(thread);
347 	return result;
348 }
349 
350 /*
351  * Suspend execution of the specified thread.
352  * This is a recursive-style suspension of the thread, a count of
353  * suspends is maintained.
354  *
355  * Called with thread mutex held.
356  */
357 void
thread_hold(thread_t thread)358 thread_hold(thread_t thread)
359 {
360 	if (thread->suspend_count++ == 0) {
361 		thread_set_apc_ast(thread);
362 		assert(thread->suspend_parked == FALSE);
363 	}
364 }
365 
366 /*
367  * Decrement internal suspension count, setting thread
368  * runnable when count falls to zero.
369  *
370  * Because the wait is abortsafe, we can't be guaranteed that the thread
371  * is currently actually waiting even if suspend_parked is set.
372  *
373  * Called with thread mutex held.
374  */
375 void
thread_release(thread_t thread)376 thread_release(thread_t thread)
377 {
378 	assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
379 
380 	/* fail-safe on non-assert builds */
381 	if (thread->suspend_count == 0) {
382 		return;
383 	}
384 
385 	if (--thread->suspend_count == 0) {
386 		if (!thread->started) {
387 			thread_start(thread);
388 		} else if (thread->suspend_parked) {
389 			thread->suspend_parked = FALSE;
390 			thread_wakeup_thread(&thread->suspend_count, thread);
391 		}
392 	}
393 }
394 
395 kern_return_t
thread_suspend(thread_t thread)396 thread_suspend(thread_t thread)
397 {
398 	kern_return_t result = KERN_SUCCESS;
399 
400 	if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
401 		return KERN_INVALID_ARGUMENT;
402 	}
403 
404 	thread_mtx_lock(thread);
405 
406 	if (thread->active) {
407 		if (thread->user_stop_count++ == 0) {
408 			thread_hold(thread);
409 		}
410 	} else {
411 		result = KERN_TERMINATED;
412 	}
413 
414 	thread_mtx_unlock(thread);
415 
416 	if (thread != current_thread() && result == KERN_SUCCESS) {
417 		thread_wait(thread, FALSE);
418 	}
419 
420 	return result;
421 }
422 
423 kern_return_t
thread_resume(thread_t thread)424 thread_resume(thread_t thread)
425 {
426 	kern_return_t result = KERN_SUCCESS;
427 
428 	if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
429 		return KERN_INVALID_ARGUMENT;
430 	}
431 
432 	thread_mtx_lock(thread);
433 
434 	if (thread->active) {
435 		if (thread->user_stop_count > 0) {
436 			if (--thread->user_stop_count == 0) {
437 				thread_release(thread);
438 			}
439 		} else {
440 			result = KERN_FAILURE;
441 		}
442 	} else {
443 		result = KERN_TERMINATED;
444 	}
445 
446 	thread_mtx_unlock(thread);
447 
448 	return result;
449 }
450 
451 /*
452  *	thread_depress_abort_from_user:
453  *
454  *	Prematurely abort priority depression if there is one.
455  */
456 kern_return_t
thread_depress_abort_from_user(thread_t thread)457 thread_depress_abort_from_user(thread_t thread)
458 {
459 	kern_return_t result;
460 
461 	if (thread == THREAD_NULL) {
462 		return KERN_INVALID_ARGUMENT;
463 	}
464 
465 	thread_mtx_lock(thread);
466 
467 	if (thread->active) {
468 		result = thread_depress_abort(thread);
469 	} else {
470 		result = KERN_TERMINATED;
471 	}
472 
473 	thread_mtx_unlock(thread);
474 
475 	return result;
476 }
477 
478 
479 /*
480  * Indicate that the thread should run the AST_APC callback
481  * to detect an abort condition.
482  *
483  * Called with thread mutex held.
484  */
485 static void
act_abort(thread_t thread)486 act_abort(
487 	thread_t        thread)
488 {
489 	spl_t           s = splsched();
490 
491 	thread_lock(thread);
492 
493 	if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
494 		thread->sched_flags |= TH_SFLAG_ABORT;
495 		thread_set_apc_ast_locked(thread);
496 		thread_depress_abort_locked(thread);
497 	} else {
498 		thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
499 	}
500 
501 	thread_unlock(thread);
502 	splx(s);
503 }
504 
505 kern_return_t
thread_abort(thread_t thread)506 thread_abort(
507 	thread_t        thread)
508 {
509 	kern_return_t   result = KERN_SUCCESS;
510 
511 	if (thread == THREAD_NULL) {
512 		return KERN_INVALID_ARGUMENT;
513 	}
514 
515 	thread_mtx_lock(thread);
516 
517 	if (thread->active) {
518 		act_abort(thread);
519 		clear_wait(thread, THREAD_INTERRUPTED);
520 	} else {
521 		result = KERN_TERMINATED;
522 	}
523 
524 	thread_mtx_unlock(thread);
525 
526 	return result;
527 }
528 
529 kern_return_t
thread_abort_safely(thread_t thread)530 thread_abort_safely(
531 	thread_t                thread)
532 {
533 	kern_return_t   result = KERN_SUCCESS;
534 
535 	if (thread == THREAD_NULL) {
536 		return KERN_INVALID_ARGUMENT;
537 	}
538 
539 	thread_mtx_lock(thread);
540 
541 	if (thread->active) {
542 		spl_t           s = splsched();
543 
544 		thread_lock(thread);
545 		if (!thread->at_safe_point ||
546 		    clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
547 			if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
548 				thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
549 				thread_set_apc_ast_locked(thread);
550 				thread_depress_abort_locked(thread);
551 			}
552 		}
553 		thread_unlock(thread);
554 		splx(s);
555 	} else {
556 		result = KERN_TERMINATED;
557 	}
558 
559 	thread_mtx_unlock(thread);
560 
561 	return result;
562 }
563 
564 /*** backward compatibility hacks ***/
565 #include <mach/thread_info.h>
566 #include <mach/thread_special_ports.h>
567 #include <ipc/ipc_port.h>
568 
569 kern_return_t
thread_info(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)570 thread_info(
571 	thread_t                        thread,
572 	thread_flavor_t                 flavor,
573 	thread_info_t                   thread_info_out,
574 	mach_msg_type_number_t  *thread_info_count)
575 {
576 	kern_return_t                   result;
577 
578 	if (thread == THREAD_NULL) {
579 		return KERN_INVALID_ARGUMENT;
580 	}
581 
582 	thread_mtx_lock(thread);
583 
584 	if (thread->active || thread->inspection) {
585 		result = thread_info_internal(
586 			thread, flavor, thread_info_out, thread_info_count);
587 	} else {
588 		result = KERN_TERMINATED;
589 	}
590 
591 	thread_mtx_unlock(thread);
592 
593 	return result;
594 }
595 
596 static inline kern_return_t
thread_get_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count,thread_set_status_flags_t flags)597 thread_get_state_internal(
598 	thread_t                thread,
599 	int                                             flavor,
600 	thread_state_t                  state,                  /* pointer to OUT array */
601 	mach_msg_type_number_t  *state_count,   /*IN/OUT*/
602 	thread_set_status_flags_t  flags)
603 {
604 	kern_return_t           result = KERN_SUCCESS;
605 	boolean_t               to_user = !!(flags & TSSF_TRANSLATE_TO_USER);
606 
607 	if (thread == THREAD_NULL) {
608 		return KERN_INVALID_ARGUMENT;
609 	}
610 
611 	thread_mtx_lock(thread);
612 
613 	if (thread->active) {
614 		if (thread != current_thread()) {
615 			thread_hold(thread);
616 
617 			thread_mtx_unlock(thread);
618 
619 			if (thread_stop(thread, FALSE)) {
620 				thread_mtx_lock(thread);
621 				result = machine_thread_get_state(
622 					thread, flavor, state, state_count);
623 				thread_unstop(thread);
624 			} else {
625 				thread_mtx_lock(thread);
626 				result = KERN_ABORTED;
627 			}
628 
629 			thread_release(thread);
630 		} else {
631 			result = machine_thread_get_state(
632 				thread, flavor, state, state_count);
633 		}
634 	} else if (thread->inspection) {
635 		result = machine_thread_get_state(
636 			thread, flavor, state, state_count);
637 	} else {
638 		result = KERN_TERMINATED;
639 	}
640 
641 	if (to_user && result == KERN_SUCCESS) {
642 		result = machine_thread_state_convert_to_user(thread, flavor, state,
643 		    state_count, flags);
644 	}
645 
646 	thread_mtx_unlock(thread);
647 
648 	return result;
649 }
650 
651 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
652 
653 kern_return_t
654 thread_get_state(
655 	thread_t                thread,
656 	int                                             flavor,
657 	thread_state_t                  state,
658 	mach_msg_type_number_t  *state_count);
659 
660 kern_return_t
thread_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)661 thread_get_state(
662 	thread_t                thread,
663 	int                                             flavor,
664 	thread_state_t                  state,                  /* pointer to OUT array */
665 	mach_msg_type_number_t  *state_count)   /*IN/OUT*/
666 {
667 	return thread_get_state_internal(thread, flavor, state, state_count, TSSF_FLAGS_NONE);
668 }
669 
670 kern_return_t
thread_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)671 thread_get_state_to_user(
672 	thread_t                thread,
673 	int                                             flavor,
674 	thread_state_t                  state,                  /* pointer to OUT array */
675 	mach_msg_type_number_t  *state_count)   /*IN/OUT*/
676 {
677 	return thread_get_state_internal(thread, flavor, state, state_count, TSSF_TRANSLATE_TO_USER);
678 }
679 
680 /*
681  *	Change thread's machine-dependent state.  Called with nothing
682  *	locked.  Returns same way.
683  */
684 static inline kern_return_t
thread_set_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count,thread_state_t old_state,mach_msg_type_number_t old_state_count,thread_set_status_flags_t flags)685 thread_set_state_internal(
686 	thread_t                        thread,
687 	int                             flavor,
688 	thread_state_t                  state,
689 	mach_msg_type_number_t          state_count,
690 	thread_state_t                  old_state,
691 	mach_msg_type_number_t          old_state_count,
692 	thread_set_status_flags_t       flags)
693 {
694 	kern_return_t           result = KERN_SUCCESS;
695 	boolean_t               from_user = !!(flags & TSSF_TRANSLATE_TO_USER);
696 
697 	if (thread == THREAD_NULL) {
698 		return KERN_INVALID_ARGUMENT;
699 	}
700 
701 	if ((flags & TSSF_CHECK_ENTITLEMENT) &&
702 	    !thread_set_state_allowed(thread, flavor)) {
703 		return KERN_NO_ACCESS;
704 	}
705 
706 	thread_mtx_lock(thread);
707 
708 	if (thread->active) {
709 		if (from_user) {
710 			result = machine_thread_state_convert_from_user(thread, flavor,
711 			    state, state_count, old_state, old_state_count, flags);
712 			if (result != KERN_SUCCESS) {
713 				goto out;
714 			}
715 		}
716 		if (thread != current_thread()) {
717 			thread_hold(thread);
718 
719 			thread_mtx_unlock(thread);
720 
721 			if (thread_stop(thread, TRUE)) {
722 				thread_mtx_lock(thread);
723 				result = machine_thread_set_state(
724 					thread, flavor, state, state_count);
725 				thread_unstop(thread);
726 			} else {
727 				thread_mtx_lock(thread);
728 				result = KERN_ABORTED;
729 			}
730 
731 			thread_release(thread);
732 		} else {
733 			result = machine_thread_set_state(
734 				thread, flavor, state, state_count);
735 		}
736 	} else {
737 		result = KERN_TERMINATED;
738 	}
739 
740 	if ((result == KERN_SUCCESS) && from_user) {
741 		extmod_statistics_incr_thread_set_state(thread);
742 	}
743 
744 out:
745 	thread_mtx_unlock(thread);
746 
747 	return result;
748 }
749 
750 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
751 kern_return_t
752 thread_set_state(
753 	thread_t                thread,
754 	int                                             flavor,
755 	thread_state_t                  state,
756 	mach_msg_type_number_t  state_count);
757 
758 kern_return_t
thread_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)759 thread_set_state(
760 	thread_t                thread,
761 	int                                             flavor,
762 	thread_state_t                  state,
763 	mach_msg_type_number_t  state_count)
764 {
765 	return thread_set_state_internal(thread, flavor, state, state_count, NULL, 0, TSSF_FLAGS_NONE);
766 }
767 
768 kern_return_t
thread_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)769 thread_set_state_from_user(
770 	thread_t                thread,
771 	int                                             flavor,
772 	thread_state_t                  state,
773 	mach_msg_type_number_t  state_count)
774 {
775 	return thread_set_state_internal(thread, flavor, state, state_count, NULL,
776 	           0, TSSF_TRANSLATE_TO_USER | TSSF_CHECK_ENTITLEMENT);
777 }
778 
779 kern_return_t
thread_convert_thread_state(thread_t thread,int direction,thread_state_flavor_t flavor,thread_state_t in_state,mach_msg_type_number_t in_state_count,thread_state_t out_state,mach_msg_type_number_t * out_state_count)780 thread_convert_thread_state(
781 	thread_t                thread,
782 	int                     direction,
783 	thread_state_flavor_t   flavor,
784 	thread_state_t          in_state,          /* pointer to IN array */
785 	mach_msg_type_number_t  in_state_count,
786 	thread_state_t          out_state,         /* pointer to OUT array */
787 	mach_msg_type_number_t  *out_state_count)   /*IN/OUT*/
788 {
789 	kern_return_t kr;
790 	thread_t to_thread = THREAD_NULL;
791 	thread_t from_thread = THREAD_NULL;
792 	mach_msg_type_number_t state_count = in_state_count;
793 
794 	if (direction != THREAD_CONVERT_THREAD_STATE_TO_SELF &&
795 	    direction != THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
796 		return KERN_INVALID_ARGUMENT;
797 	}
798 
799 	if (thread == THREAD_NULL) {
800 		return KERN_INVALID_ARGUMENT;
801 	}
802 
803 	if (state_count > *out_state_count) {
804 		return KERN_INSUFFICIENT_BUFFER_SIZE;
805 	}
806 
807 	if (direction == THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
808 		to_thread = thread;
809 		from_thread = current_thread();
810 	} else {
811 		to_thread = current_thread();
812 		from_thread = thread;
813 	}
814 
815 	/* Authenticate and convert thread state to kernel representation */
816 	kr = machine_thread_state_convert_from_user(from_thread, flavor,
817 	    in_state, state_count, NULL, 0, TSSF_FLAGS_NONE);
818 
819 	/* Return early if one of the thread was jop disabled while other wasn't */
820 	if (kr != KERN_SUCCESS) {
821 		return kr;
822 	}
823 
824 	/* Convert thread state to target thread user representation */
825 	kr = machine_thread_state_convert_to_user(to_thread, flavor,
826 	    in_state, &state_count, TSSF_PRESERVE_FLAGS);
827 
828 	if (kr == KERN_SUCCESS) {
829 		if (state_count <= *out_state_count) {
830 			memcpy(out_state, in_state, state_count * sizeof(uint32_t));
831 			*out_state_count = state_count;
832 		} else {
833 			kr = KERN_INSUFFICIENT_BUFFER_SIZE;
834 		}
835 	}
836 
837 	return kr;
838 }
839 
840 /*
841  * Kernel-internal "thread" interfaces used outside this file:
842  */
843 
844 /* Initialize (or re-initialize) a thread state.  Called from execve
845  * with nothing locked, returns same way.
846  */
847 kern_return_t
thread_state_initialize(thread_t thread)848 thread_state_initialize(
849 	thread_t                thread)
850 {
851 	kern_return_t           result = KERN_SUCCESS;
852 
853 	if (thread == THREAD_NULL) {
854 		return KERN_INVALID_ARGUMENT;
855 	}
856 
857 	thread_mtx_lock(thread);
858 
859 	if (thread->active) {
860 		if (thread != current_thread()) {
861 			/* Thread created in exec should be blocked in UNINT wait */
862 			assert(!(thread->state & TH_RUN));
863 		}
864 		machine_thread_state_initialize( thread );
865 	} else {
866 		result = KERN_TERMINATED;
867 	}
868 
869 	thread_mtx_unlock(thread);
870 
871 	return result;
872 }
873 
874 kern_return_t
thread_dup(thread_t target)875 thread_dup(
876 	thread_t        target)
877 {
878 	thread_t                        self = current_thread();
879 	kern_return_t           result = KERN_SUCCESS;
880 
881 	if (target == THREAD_NULL || target == self) {
882 		return KERN_INVALID_ARGUMENT;
883 	}
884 
885 	thread_mtx_lock(target);
886 
887 	if (target->active) {
888 		thread_hold(target);
889 
890 		thread_mtx_unlock(target);
891 
892 		if (thread_stop(target, TRUE)) {
893 			thread_mtx_lock(target);
894 			result = machine_thread_dup(self, target, FALSE);
895 
896 			if (self->affinity_set != AFFINITY_SET_NULL) {
897 				thread_affinity_dup(self, target);
898 			}
899 			thread_unstop(target);
900 		} else {
901 			thread_mtx_lock(target);
902 			result = KERN_ABORTED;
903 		}
904 
905 		thread_release(target);
906 	} else {
907 		result = KERN_TERMINATED;
908 	}
909 
910 	thread_mtx_unlock(target);
911 
912 	return result;
913 }
914 
915 
916 kern_return_t
thread_dup2(thread_t source,thread_t target)917 thread_dup2(
918 	thread_t        source,
919 	thread_t        target)
920 {
921 	kern_return_t           result = KERN_SUCCESS;
922 	uint32_t                active = 0;
923 
924 	if (source == THREAD_NULL || target == THREAD_NULL || target == source) {
925 		return KERN_INVALID_ARGUMENT;
926 	}
927 
928 	thread_mtx_lock(source);
929 	active = source->active;
930 	thread_mtx_unlock(source);
931 
932 	if (!active) {
933 		return KERN_TERMINATED;
934 	}
935 
936 	thread_mtx_lock(target);
937 
938 	if (target->active || target->inspection) {
939 		thread_hold(target);
940 
941 		thread_mtx_unlock(target);
942 
943 		if (thread_stop(target, TRUE)) {
944 			thread_mtx_lock(target);
945 			result = machine_thread_dup(source, target, TRUE);
946 			if (source->affinity_set != AFFINITY_SET_NULL) {
947 				thread_affinity_dup(source, target);
948 			}
949 			thread_unstop(target);
950 		} else {
951 			thread_mtx_lock(target);
952 			result = KERN_ABORTED;
953 		}
954 
955 		thread_release(target);
956 	} else {
957 		result = KERN_TERMINATED;
958 	}
959 
960 	thread_mtx_unlock(target);
961 
962 	return result;
963 }
964 
965 /*
966  *	thread_setstatus:
967  *
968  *	Set the status of the specified thread.
969  *	Called with (and returns with) no locks held.
970  */
971 kern_return_t
thread_setstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count)972 thread_setstatus(
973 	thread_t                thread,
974 	int                                             flavor,
975 	thread_state_t                  tstate,
976 	mach_msg_type_number_t  count)
977 {
978 	return thread_set_state(thread, flavor, tstate, count);
979 }
980 
981 kern_return_t
thread_setstatus_from_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count,thread_state_t old_tstate,mach_msg_type_number_t old_count,thread_set_status_flags_t flags)982 thread_setstatus_from_user(
983 	thread_t                thread,
984 	int                                             flavor,
985 	thread_state_t                  tstate,
986 	mach_msg_type_number_t  count,
987 	thread_state_t                  old_tstate,
988 	mach_msg_type_number_t  old_count,
989 	thread_set_status_flags_t flags)
990 {
991 	return thread_set_state_internal(thread, flavor, tstate, count, old_tstate,
992 	           old_count, flags | TSSF_TRANSLATE_TO_USER);
993 }
994 
995 /*
996  *	thread_getstatus:
997  *
998  *	Get the status of the specified thread.
999  */
1000 kern_return_t
thread_getstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count)1001 thread_getstatus(
1002 	thread_t                thread,
1003 	int                                             flavor,
1004 	thread_state_t                  tstate,
1005 	mach_msg_type_number_t  *count)
1006 {
1007 	return thread_get_state(thread, flavor, tstate, count);
1008 }
1009 
1010 kern_return_t
thread_getstatus_to_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count,thread_set_status_flags_t flags)1011 thread_getstatus_to_user(
1012 	thread_t                thread,
1013 	int                                             flavor,
1014 	thread_state_t                  tstate,
1015 	mach_msg_type_number_t  *count,
1016 	thread_set_status_flags_t flags)
1017 {
1018 	return thread_get_state_internal(thread, flavor, tstate, count, flags | TSSF_TRANSLATE_TO_USER);
1019 }
1020 
1021 /*
1022  *	Change thread's machine-dependent userspace TSD base.
1023  *  Called with nothing locked.  Returns same way.
1024  */
1025 kern_return_t
thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)1026 thread_set_tsd_base(
1027 	thread_t                        thread,
1028 	mach_vm_offset_t        tsd_base)
1029 {
1030 	kern_return_t           result = KERN_SUCCESS;
1031 
1032 	if (thread == THREAD_NULL) {
1033 		return KERN_INVALID_ARGUMENT;
1034 	}
1035 
1036 	thread_mtx_lock(thread);
1037 
1038 	if (thread->active) {
1039 		if (thread != current_thread()) {
1040 			thread_hold(thread);
1041 
1042 			thread_mtx_unlock(thread);
1043 
1044 			if (thread_stop(thread, TRUE)) {
1045 				thread_mtx_lock(thread);
1046 				result = machine_thread_set_tsd_base(thread, tsd_base);
1047 				thread_unstop(thread);
1048 			} else {
1049 				thread_mtx_lock(thread);
1050 				result = KERN_ABORTED;
1051 			}
1052 
1053 			thread_release(thread);
1054 		} else {
1055 			result = machine_thread_set_tsd_base(thread, tsd_base);
1056 		}
1057 	} else {
1058 		result = KERN_TERMINATED;
1059 	}
1060 
1061 	thread_mtx_unlock(thread);
1062 
1063 	return result;
1064 }
1065 
1066 /*
1067  * thread_set_apc_ast:
1068  *
1069  * Register the AST_APC callback that handles suspension and
1070  * termination, if it hasn't been installed already.
1071  *
1072  * Called with the thread mutex held.
1073  */
1074 static void
thread_set_apc_ast(thread_t thread)1075 thread_set_apc_ast(thread_t thread)
1076 {
1077 	spl_t s = splsched();
1078 
1079 	thread_lock(thread);
1080 	thread_set_apc_ast_locked(thread);
1081 	thread_unlock(thread);
1082 
1083 	splx(s);
1084 }
1085 
1086 /*
1087  * thread_set_apc_ast_locked:
1088  *
1089  * Do the work of registering for the AST_APC callback.
1090  *
1091  * Called with the thread mutex and scheduling lock held.
1092  */
1093 static void
thread_set_apc_ast_locked(thread_t thread)1094 thread_set_apc_ast_locked(thread_t thread)
1095 {
1096 	thread_ast_set(thread, AST_APC);
1097 
1098 	if (thread == current_thread()) {
1099 		ast_propagate(thread);
1100 	} else {
1101 		processor_t processor = thread->last_processor;
1102 
1103 		if (processor != PROCESSOR_NULL &&
1104 		    processor->state == PROCESSOR_RUNNING &&
1105 		    processor->active_thread == thread) {
1106 			cause_ast_check(processor);
1107 		}
1108 	}
1109 }
1110 
1111 /*
1112  * Activation control support routines internal to this file:
1113  *
1114  */
1115 
1116 /*
1117  * thread_suspended
1118  *
1119  * Continuation routine for thread suspension.  It checks
1120  * to see whether there has been any new suspensions.  If so, it
1121  * installs the AST_APC handler again.
1122  */
1123 __attribute__((noreturn))
1124 static void
thread_suspended(__unused void * parameter,wait_result_t result)1125 thread_suspended(__unused void *parameter, wait_result_t result)
1126 {
1127 	thread_t thread = current_thread();
1128 
1129 	thread_mtx_lock(thread);
1130 
1131 	if (result == THREAD_INTERRUPTED) {
1132 		thread->suspend_parked = FALSE;
1133 	} else {
1134 		assert(thread->suspend_parked == FALSE);
1135 	}
1136 
1137 	if (thread->suspend_count > 0) {
1138 		thread_set_apc_ast(thread);
1139 	}
1140 
1141 	thread_mtx_unlock(thread);
1142 
1143 	thread_exception_return();
1144 	/*NOTREACHED*/
1145 }
1146 
1147 /*
1148  * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
1149  * Called with nothing locked.  Returns (if it returns) the same way.
1150  */
1151 void
thread_apc_ast(thread_t thread)1152 thread_apc_ast(thread_t thread)
1153 {
1154 	thread_mtx_lock(thread);
1155 
1156 	assert(thread->suspend_parked == FALSE);
1157 
1158 	spl_t s = splsched();
1159 	thread_lock(thread);
1160 
1161 	/* TH_SFLAG_POLLDEPRESS is OK to have here */
1162 	assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
1163 
1164 	thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1165 	thread_unlock(thread);
1166 	splx(s);
1167 
1168 	if (!thread->active) {
1169 		/* Thread is ready to terminate, time to tear it down */
1170 		thread_mtx_unlock(thread);
1171 
1172 		thread_terminate_self();
1173 		/*NOTREACHED*/
1174 	}
1175 
1176 	/* If we're suspended, go to sleep and wait for someone to wake us up. */
1177 	if (thread->suspend_count > 0) {
1178 		thread->suspend_parked = TRUE;
1179 		assert_wait(&thread->suspend_count,
1180 		    THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
1181 		thread_mtx_unlock(thread);
1182 
1183 		thread_block(thread_suspended);
1184 		/*NOTREACHED*/
1185 	}
1186 
1187 	thread_mtx_unlock(thread);
1188 }
1189 
1190 #if CONFIG_ROSETTA
1191 extern kern_return_t
1192 exception_deliver(
1193 	thread_t                thread,
1194 	exception_type_t        exception,
1195 	mach_exception_data_t   code,
1196 	mach_msg_type_number_t  codeCnt,
1197 	struct exception_action *excp,
1198 	lck_mtx_t               *mutex);
1199 
1200 kern_return_t
thread_raise_exception(thread_t thread,exception_type_t exception,natural_t code_count,int64_t code,int64_t sub_code)1201 thread_raise_exception(
1202 	thread_t thread,
1203 	exception_type_t exception,
1204 	natural_t code_count,
1205 	int64_t code,
1206 	int64_t sub_code)
1207 {
1208 	task_t task;
1209 
1210 	if (thread == THREAD_NULL) {
1211 		return KERN_INVALID_ARGUMENT;
1212 	}
1213 
1214 	task = get_threadtask(thread);
1215 
1216 	if (task != current_task()) {
1217 		return KERN_FAILURE;
1218 	}
1219 
1220 	if (!task_is_translated(task)) {
1221 		return KERN_FAILURE;
1222 	}
1223 
1224 	if (exception == EXC_CRASH) {
1225 		return KERN_INVALID_ARGUMENT;
1226 	}
1227 
1228 	int64_t codes[] = { code, sub_code };
1229 	host_priv_t host_priv = host_priv_self();
1230 	kern_return_t kr = exception_deliver(thread, exception, codes, code_count, host_priv->exc_actions, &host_priv->lock);
1231 	if (kr != KERN_SUCCESS) {
1232 		return kr;
1233 	}
1234 
1235 	return thread_resume(thread);
1236 }
1237 #endif
1238 
1239 void
thread_debug_return_to_user_ast(thread_t thread)1240 thread_debug_return_to_user_ast(
1241 	thread_t thread)
1242 {
1243 #pragma unused(thread)
1244 #if MACH_ASSERT
1245 	if ((thread->sched_flags & TH_SFLAG_RW_PROMOTED) ||
1246 	    thread->rwlock_count > 0) {
1247 		panic("Returning to userspace with rw lock held, thread %p sched_flag %u rwlock_count %d", thread, thread->sched_flags, thread->rwlock_count);
1248 	}
1249 
1250 	if ((thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) ||
1251 	    thread->priority_floor_count > 0) {
1252 		panic("Returning to userspace with floor boost set, thread %p sched_flag %u priority_floor_count %d", thread, thread->sched_flags, thread->priority_floor_count);
1253 	}
1254 #endif /* MACH_ASSERT */
1255 }
1256 
1257 
1258 /* Prototype, see justification above */
1259 kern_return_t
1260 act_set_state(
1261 	thread_t                                thread,
1262 	int                                             flavor,
1263 	thread_state_t                  state,
1264 	mach_msg_type_number_t  count);
1265 
1266 kern_return_t
act_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1267 act_set_state(
1268 	thread_t                                thread,
1269 	int                                             flavor,
1270 	thread_state_t                  state,
1271 	mach_msg_type_number_t  count)
1272 {
1273 	if (thread == current_thread()) {
1274 		return KERN_INVALID_ARGUMENT;
1275 	}
1276 
1277 	return thread_set_state(thread, flavor, state, count);
1278 }
1279 
1280 kern_return_t
act_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1281 act_set_state_from_user(
1282 	thread_t                                thread,
1283 	int                                             flavor,
1284 	thread_state_t                  state,
1285 	mach_msg_type_number_t  count)
1286 {
1287 	if (thread == current_thread()) {
1288 		return KERN_INVALID_ARGUMENT;
1289 	}
1290 
1291 	return thread_set_state_from_user(thread, flavor, state, count);
1292 }
1293 
1294 /* Prototype, see justification above */
1295 kern_return_t
1296 act_get_state(
1297 	thread_t                                thread,
1298 	int                                             flavor,
1299 	thread_state_t                  state,
1300 	mach_msg_type_number_t  *count);
1301 
1302 kern_return_t
act_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1303 act_get_state(
1304 	thread_t                                thread,
1305 	int                                             flavor,
1306 	thread_state_t                  state,
1307 	mach_msg_type_number_t  *count)
1308 {
1309 	if (thread == current_thread()) {
1310 		return KERN_INVALID_ARGUMENT;
1311 	}
1312 
1313 	return thread_get_state(thread, flavor, state, count);
1314 }
1315 
1316 kern_return_t
act_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1317 act_get_state_to_user(
1318 	thread_t                                thread,
1319 	int                                             flavor,
1320 	thread_state_t                  state,
1321 	mach_msg_type_number_t  *count)
1322 {
1323 	if (thread == current_thread()) {
1324 		return KERN_INVALID_ARGUMENT;
1325 	}
1326 
1327 	return thread_get_state_to_user(thread, flavor, state, count);
1328 }
1329 
1330 static void
act_set_ast(thread_t thread,ast_t ast)1331 act_set_ast(
1332 	thread_t   thread,
1333 	ast_t      ast)
1334 {
1335 	spl_t s = splsched();
1336 
1337 	if (thread == current_thread()) {
1338 		thread_ast_set(thread, ast);
1339 		ast_propagate(thread);
1340 	} else {
1341 		processor_t processor;
1342 
1343 		thread_lock(thread);
1344 		thread_ast_set(thread, ast);
1345 		processor = thread->last_processor;
1346 		if (processor != PROCESSOR_NULL &&
1347 		    processor->state == PROCESSOR_RUNNING &&
1348 		    processor->active_thread == thread) {
1349 			cause_ast_check(processor);
1350 		}
1351 		thread_unlock(thread);
1352 	}
1353 
1354 	splx(s);
1355 }
1356 
1357 /*
1358  * set AST on thread without causing an AST check
1359  * and without taking the thread lock
1360  *
1361  * If thread is not the current thread, then it may take
1362  * up until the next context switch or quantum expiration
1363  * on that thread for it to notice the AST.
1364  */
1365 static void
act_set_ast_async(thread_t thread,ast_t ast)1366 act_set_ast_async(thread_t  thread,
1367     ast_t     ast)
1368 {
1369 	thread_ast_set(thread, ast);
1370 
1371 	if (thread == current_thread()) {
1372 		spl_t s = splsched();
1373 		ast_propagate(thread);
1374 		splx(s);
1375 	}
1376 }
1377 
1378 void
act_set_debug_assert(void)1379 act_set_debug_assert(void)
1380 {
1381 	thread_t thread = current_thread();
1382 	if (thread_ast_peek(thread, AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1383 		thread_ast_set(thread, AST_DEBUG_ASSERT);
1384 	}
1385 	if (ast_peek(AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1386 		spl_t s = splsched();
1387 		ast_propagate(thread);
1388 		splx(s);
1389 	}
1390 }
1391 
1392 void
act_set_astbsd(thread_t thread)1393 act_set_astbsd(thread_t thread)
1394 {
1395 	act_set_ast(thread, AST_BSD);
1396 }
1397 
1398 void
act_set_astkevent(thread_t thread,uint16_t bits)1399 act_set_astkevent(thread_t thread, uint16_t bits)
1400 {
1401 	os_atomic_or(&thread->kevent_ast_bits, bits, relaxed);
1402 
1403 	/* kevent AST shouldn't send immediate IPIs */
1404 	act_set_ast_async(thread, AST_KEVENT);
1405 }
1406 
1407 uint16_t
act_clear_astkevent(thread_t thread,uint16_t bits)1408 act_clear_astkevent(thread_t thread, uint16_t bits)
1409 {
1410 	/*
1411 	 * avoid the atomic operation if none of the bits is set,
1412 	 * which will be the common case.
1413 	 */
1414 	uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed);
1415 	if (cur & bits) {
1416 		cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed);
1417 	}
1418 	return cur & bits;
1419 }
1420 
1421 bool
act_set_ast_reset_pcs(task_t task,thread_t thread)1422 act_set_ast_reset_pcs(task_t task, thread_t thread)
1423 {
1424 	processor_t processor;
1425 	bool needs_wait = false;
1426 	spl_t s;
1427 
1428 	s = splsched();
1429 
1430 	if (thread == current_thread()) {
1431 		/*
1432 		 * this is called from the signal code,
1433 		 * just set the AST and move on
1434 		 */
1435 		thread_ast_set(thread, AST_RESET_PCS);
1436 		ast_propagate(thread);
1437 	} else {
1438 		thread_lock(thread);
1439 
1440 		assert(thread->t_rr_state.trr_ipi_ack_pending == 0);
1441 		assert(thread->t_rr_state.trr_sync_waiting == 0);
1442 
1443 		processor = thread->last_processor;
1444 		if (!thread->active) {
1445 			/*
1446 			 * ->active is being set before the thread is added
1447 			 * to the thread list (under the task lock which
1448 			 * the caller holds), and is reset before the thread
1449 			 * lock is being taken by thread_terminate_self().
1450 			 *
1451 			 * The result is that this will never fail to
1452 			 * set the AST on an thread that is active,
1453 			 * but will not set it past thread_terminate_self().
1454 			 */
1455 		} else if (processor != PROCESSOR_NULL &&
1456 		    processor->state == PROCESSOR_RUNNING &&
1457 		    processor->active_thread == thread) {
1458 			thread->t_rr_state.trr_ipi_ack_pending = true;
1459 			needs_wait = true;
1460 			thread_ast_set(thread, AST_RESET_PCS);
1461 			cause_ast_check(processor);
1462 		} else if (thread_reset_pcs_in_range(task, thread)) {
1463 			if (thread->t_rr_state.trr_fault_state) {
1464 				thread->t_rr_state.trr_fault_state =
1465 				    TRR_FAULT_OBSERVED;
1466 				needs_wait = true;
1467 			}
1468 			thread_ast_set(thread, AST_RESET_PCS);
1469 		}
1470 		thread_unlock(thread);
1471 	}
1472 
1473 	splx(s);
1474 
1475 	return needs_wait;
1476 }
1477 
1478 void
act_set_kperf(thread_t thread)1479 act_set_kperf(thread_t thread)
1480 {
1481 	/* safety check */
1482 	if (thread != current_thread()) {
1483 		if (!ml_get_interrupts_enabled()) {
1484 			panic("unsafe act_set_kperf operation");
1485 		}
1486 	}
1487 
1488 	act_set_ast(thread, AST_KPERF);
1489 }
1490 
1491 #if CONFIG_MACF
1492 void
act_set_astmacf(thread_t thread)1493 act_set_astmacf(
1494 	thread_t        thread)
1495 {
1496 	act_set_ast( thread, AST_MACF);
1497 }
1498 #endif
1499 
1500 void
act_set_astledger(thread_t thread)1501 act_set_astledger(thread_t thread)
1502 {
1503 	act_set_ast(thread, AST_LEDGER);
1504 }
1505 
1506 /*
1507  * The ledger AST may need to be set while already holding
1508  * the thread lock.  This routine skips sending the IPI,
1509  * allowing us to avoid the lock hold.
1510  *
1511  * However, it means the targeted thread must context switch
1512  * to recognize the ledger AST.
1513  */
1514 void
act_set_astledger_async(thread_t thread)1515 act_set_astledger_async(thread_t thread)
1516 {
1517 	act_set_ast_async(thread, AST_LEDGER);
1518 }
1519 
1520 void
act_set_io_telemetry_ast(thread_t thread)1521 act_set_io_telemetry_ast(thread_t thread)
1522 {
1523 	act_set_ast(thread, AST_TELEMETRY_IO);
1524 }
1525 
1526 void
act_set_macf_telemetry_ast(thread_t thread)1527 act_set_macf_telemetry_ast(thread_t thread)
1528 {
1529 	act_set_ast(thread, AST_TELEMETRY_MACF);
1530 }
1531 
1532 void
act_set_astproc_resource(thread_t thread)1533 act_set_astproc_resource(thread_t thread)
1534 {
1535 	act_set_ast(thread, AST_PROC_RESOURCE);
1536 }
1537