xref: /xnu-8019.80.24/osfmk/kern/thread_act.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Copyright (c) 1993 The University of Utah and
33  * the Center for Software Science (CSS).  All rights reserved.
34  *
35  * Permission to use, copy, modify and distribute this software and its
36  * documentation is hereby granted, provided that both the copyright
37  * notice and this permission notice appear in all copies of the
38  * software, derivative works or modified versions, and any portions
39  * thereof, and that both notices appear in supporting documentation.
40  *
41  * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44  *
45  * CSS requests users of this software to return to [email protected] any
46  * improvements that they make and grant CSS redistribution rights.
47  *
48  *	Author:	Bryan Ford, University of Utah CSS
49  *
50  *	Thread management routines
51  */
52 
53 #include <mach/mach_types.h>
54 #include <mach/kern_return.h>
55 #include <mach/thread_act_server.h>
56 #include <mach/thread_act.h>
57 
58 #include <kern/kern_types.h>
59 #include <kern/ast.h>
60 #include <kern/mach_param.h>
61 #include <kern/zalloc.h>
62 #include <kern/extmod_statistics.h>
63 #include <kern/thread.h>
64 #include <kern/task.h>
65 #include <kern/sched_prim.h>
66 #include <kern/misc_protos.h>
67 #include <kern/assert.h>
68 #include <kern/exception.h>
69 #include <kern/ipc_mig.h>
70 #include <kern/ipc_tt.h>
71 #include <kern/machine.h>
72 #include <kern/spl.h>
73 #include <kern/syscall_subr.h>
74 #include <kern/processor.h>
75 #include <kern/timer.h>
76 #include <kern/affinity.h>
77 #include <kern/host.h>
78 
79 #include <stdatomic.h>
80 
81 #include <security/mac_mach_internal.h>
82 
83 static void act_abort(thread_t thread);
84 
85 static void thread_suspended(void *arg, wait_result_t result);
86 static void thread_set_apc_ast(thread_t thread);
87 static void thread_set_apc_ast_locked(thread_t thread);
88 
89 /*
90  * Internal routine to mark a thread as started.
91  * Always called with the thread mutex locked.
92  */
93 void
thread_start(thread_t thread)94 thread_start(
95 	thread_t                        thread)
96 {
97 	clear_wait(thread, THREAD_AWAKENED);
98 	thread->started = TRUE;
99 }
100 
101 /*
102  * Internal routine to mark a thread as waiting
103  * right after it has been created.  The caller
104  * is responsible to call wakeup()/thread_wakeup()
105  * or thread_terminate() to get it going.
106  *
107  * Always called with the thread mutex locked.
108  *
109  * Task and task_threads mutexes also held
110  * (so nobody can set the thread running before
111  * this point)
112  *
113  * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
114  * to allow termination from this point forward.
115  */
116 void
thread_start_in_assert_wait(thread_t thread,event_t event,wait_interrupt_t interruptible)117 thread_start_in_assert_wait(
118 	thread_t                        thread,
119 	event_t             event,
120 	wait_interrupt_t    interruptible)
121 {
122 	struct waitq *waitq = assert_wait_queue(event);
123 	wait_result_t wait_result;
124 	spl_t spl;
125 
126 	spl = splsched();
127 	waitq_lock(waitq);
128 
129 	/* clear out startup condition (safe because thread not started yet) */
130 	thread_lock(thread);
131 	assert(!thread->started);
132 	assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
133 	thread->state &= ~(TH_WAIT | TH_UNINT);
134 	thread_unlock(thread);
135 
136 	/* assert wait interruptibly forever */
137 	wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
138 	    interruptible,
139 	    TIMEOUT_URGENCY_SYS_NORMAL,
140 	    TIMEOUT_WAIT_FOREVER,
141 	    TIMEOUT_NO_LEEWAY,
142 	    thread);
143 	assert(wait_result == THREAD_WAITING);
144 
145 	/* mark thread started while we still hold the waitq lock */
146 	thread_lock(thread);
147 	thread->started = TRUE;
148 	thread_unlock(thread);
149 
150 	waitq_unlock(waitq);
151 	splx(spl);
152 }
153 
154 /*
155  * Internal routine to terminate a thread.
156  * Sometimes called with task already locked.
157  */
158 kern_return_t
thread_terminate_internal(thread_t thread)159 thread_terminate_internal(
160 	thread_t                        thread)
161 {
162 	kern_return_t           result = KERN_SUCCESS;
163 
164 	thread_mtx_lock(thread);
165 
166 	if (thread->active) {
167 		thread->active = FALSE;
168 
169 		act_abort(thread);
170 
171 		if (thread->started) {
172 			clear_wait(thread, THREAD_INTERRUPTED);
173 		} else {
174 			thread_start(thread);
175 		}
176 	} else {
177 		result = KERN_TERMINATED;
178 	}
179 
180 	if (thread->affinity_set != NULL) {
181 		thread_affinity_terminate(thread);
182 	}
183 
184 	/* unconditionally unpin the thread in internal termination */
185 	ipc_thread_port_unpin(get_thread_ro(thread)->tro_self_port);
186 
187 	thread_mtx_unlock(thread);
188 
189 	if (thread != current_thread() && result == KERN_SUCCESS) {
190 		thread_wait(thread, FALSE);
191 	}
192 
193 	return result;
194 }
195 
196 kern_return_t
thread_terminate(thread_t thread)197 thread_terminate(
198 	thread_t                thread)
199 {
200 	task_t task;
201 
202 	if (thread == THREAD_NULL) {
203 		return KERN_INVALID_ARGUMENT;
204 	}
205 
206 	task = get_threadtask(thread);
207 
208 	/* Kernel threads can't be terminated without their own cooperation */
209 	if (task == kernel_task && thread != current_thread()) {
210 		return KERN_FAILURE;
211 	}
212 
213 	kern_return_t result = thread_terminate_internal(thread);
214 
215 	/*
216 	 * If a kernel thread is terminating itself, force handle the APC_AST here.
217 	 * Kernel threads don't pass through the return-to-user AST checking code,
218 	 * but all threads must finish their own termination in thread_apc_ast.
219 	 */
220 	if (task == kernel_task) {
221 		assert(thread->active == FALSE);
222 		thread_ast_clear(thread, AST_APC);
223 		thread_apc_ast(thread);
224 
225 		panic("thread_terminate");
226 		/* NOTREACHED */
227 	}
228 
229 	return result;
230 }
231 
232 /*
233  * [MIG Call] Terminate a thread.
234  *
235  * Cannot be used on threads managed by pthread.
236  */
237 kern_return_t
thread_terminate_from_user(thread_t thread)238 thread_terminate_from_user(
239 	thread_t                thread)
240 {
241 	if (thread == THREAD_NULL) {
242 		return KERN_INVALID_ARGUMENT;
243 	}
244 
245 	if (thread_get_tag(thread) & THREAD_TAG_PTHREAD) {
246 		return KERN_DENIED;
247 	}
248 
249 	return thread_terminate(thread);
250 }
251 
252 /*
253  * Terminate a thread with pinned control port.
254  *
255  * Can only be used on threads managed by pthread. Exported in pthread_kern.
256  */
257 kern_return_t
thread_terminate_pinned(thread_t thread)258 thread_terminate_pinned(
259 	thread_t                thread)
260 {
261 	task_t task;
262 
263 	if (thread == THREAD_NULL) {
264 		return KERN_INVALID_ARGUMENT;
265 	}
266 
267 	task = get_threadtask(thread);
268 
269 
270 	assert(task != kernel_task);
271 	assert(thread_get_tag(thread) & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD));
272 
273 	thread_mtx_lock(thread);
274 	if (task_is_pinned(task) && thread->active) {
275 		assert(get_thread_ro(thread)->tro_self_port->ip_pinned == 1);
276 	}
277 	thread_mtx_unlock(thread);
278 
279 	kern_return_t result = thread_terminate_internal(thread);
280 	return result;
281 }
282 
283 /*
284  * Suspend execution of the specified thread.
285  * This is a recursive-style suspension of the thread, a count of
286  * suspends is maintained.
287  *
288  * Called with thread mutex held.
289  */
290 void
thread_hold(thread_t thread)291 thread_hold(thread_t thread)
292 {
293 	if (thread->suspend_count++ == 0) {
294 		thread_set_apc_ast(thread);
295 		assert(thread->suspend_parked == FALSE);
296 	}
297 }
298 
299 /*
300  * Decrement internal suspension count, setting thread
301  * runnable when count falls to zero.
302  *
303  * Because the wait is abortsafe, we can't be guaranteed that the thread
304  * is currently actually waiting even if suspend_parked is set.
305  *
306  * Called with thread mutex held.
307  */
308 void
thread_release(thread_t thread)309 thread_release(thread_t thread)
310 {
311 	assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
312 
313 	/* fail-safe on non-assert builds */
314 	if (thread->suspend_count == 0) {
315 		return;
316 	}
317 
318 	if (--thread->suspend_count == 0) {
319 		if (!thread->started) {
320 			thread_start(thread);
321 		} else if (thread->suspend_parked) {
322 			thread->suspend_parked = FALSE;
323 			thread_wakeup_thread(&thread->suspend_count, thread);
324 		}
325 	}
326 }
327 
328 kern_return_t
thread_suspend(thread_t thread)329 thread_suspend(thread_t thread)
330 {
331 	kern_return_t result = KERN_SUCCESS;
332 
333 	if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
334 		return KERN_INVALID_ARGUMENT;
335 	}
336 
337 	thread_mtx_lock(thread);
338 
339 	if (thread->active) {
340 		if (thread->user_stop_count++ == 0) {
341 			thread_hold(thread);
342 		}
343 	} else {
344 		result = KERN_TERMINATED;
345 	}
346 
347 	thread_mtx_unlock(thread);
348 
349 	if (thread != current_thread() && result == KERN_SUCCESS) {
350 		thread_wait(thread, FALSE);
351 	}
352 
353 	return result;
354 }
355 
356 kern_return_t
thread_resume(thread_t thread)357 thread_resume(thread_t thread)
358 {
359 	kern_return_t result = KERN_SUCCESS;
360 
361 	if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
362 		return KERN_INVALID_ARGUMENT;
363 	}
364 
365 	thread_mtx_lock(thread);
366 
367 	if (thread->active) {
368 		if (thread->user_stop_count > 0) {
369 			if (--thread->user_stop_count == 0) {
370 				thread_release(thread);
371 			}
372 		} else {
373 			result = KERN_FAILURE;
374 		}
375 	} else {
376 		result = KERN_TERMINATED;
377 	}
378 
379 	thread_mtx_unlock(thread);
380 
381 	return result;
382 }
383 
384 /*
385  *	thread_depress_abort_from_user:
386  *
387  *	Prematurely abort priority depression if there is one.
388  */
389 kern_return_t
thread_depress_abort_from_user(thread_t thread)390 thread_depress_abort_from_user(thread_t thread)
391 {
392 	kern_return_t result;
393 
394 	if (thread == THREAD_NULL) {
395 		return KERN_INVALID_ARGUMENT;
396 	}
397 
398 	thread_mtx_lock(thread);
399 
400 	if (thread->active) {
401 		result = thread_depress_abort(thread);
402 	} else {
403 		result = KERN_TERMINATED;
404 	}
405 
406 	thread_mtx_unlock(thread);
407 
408 	return result;
409 }
410 
411 
412 /*
413  * Indicate that the thread should run the AST_APC callback
414  * to detect an abort condition.
415  *
416  * Called with thread mutex held.
417  */
418 static void
act_abort(thread_t thread)419 act_abort(
420 	thread_t        thread)
421 {
422 	spl_t           s = splsched();
423 
424 	thread_lock(thread);
425 
426 	if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
427 		thread->sched_flags |= TH_SFLAG_ABORT;
428 		thread_set_apc_ast_locked(thread);
429 		thread_depress_abort_locked(thread);
430 	} else {
431 		thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
432 	}
433 
434 	thread_unlock(thread);
435 	splx(s);
436 }
437 
438 kern_return_t
thread_abort(thread_t thread)439 thread_abort(
440 	thread_t        thread)
441 {
442 	kern_return_t   result = KERN_SUCCESS;
443 
444 	if (thread == THREAD_NULL) {
445 		return KERN_INVALID_ARGUMENT;
446 	}
447 
448 	thread_mtx_lock(thread);
449 
450 	if (thread->active) {
451 		act_abort(thread);
452 		clear_wait(thread, THREAD_INTERRUPTED);
453 	} else {
454 		result = KERN_TERMINATED;
455 	}
456 
457 	thread_mtx_unlock(thread);
458 
459 	return result;
460 }
461 
462 kern_return_t
thread_abort_safely(thread_t thread)463 thread_abort_safely(
464 	thread_t                thread)
465 {
466 	kern_return_t   result = KERN_SUCCESS;
467 
468 	if (thread == THREAD_NULL) {
469 		return KERN_INVALID_ARGUMENT;
470 	}
471 
472 	thread_mtx_lock(thread);
473 
474 	if (thread->active) {
475 		spl_t           s = splsched();
476 
477 		thread_lock(thread);
478 		if (!thread->at_safe_point ||
479 		    clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
480 			if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
481 				thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
482 				thread_set_apc_ast_locked(thread);
483 				thread_depress_abort_locked(thread);
484 			}
485 		}
486 		thread_unlock(thread);
487 		splx(s);
488 	} else {
489 		result = KERN_TERMINATED;
490 	}
491 
492 	thread_mtx_unlock(thread);
493 
494 	return result;
495 }
496 
497 /*** backward compatibility hacks ***/
498 #include <mach/thread_info.h>
499 #include <mach/thread_special_ports.h>
500 #include <ipc/ipc_port.h>
501 
502 kern_return_t
thread_info(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)503 thread_info(
504 	thread_t                        thread,
505 	thread_flavor_t                 flavor,
506 	thread_info_t                   thread_info_out,
507 	mach_msg_type_number_t  *thread_info_count)
508 {
509 	kern_return_t                   result;
510 
511 	if (thread == THREAD_NULL) {
512 		return KERN_INVALID_ARGUMENT;
513 	}
514 
515 	thread_mtx_lock(thread);
516 
517 	if (thread->active || thread->inspection) {
518 		result = thread_info_internal(
519 			thread, flavor, thread_info_out, thread_info_count);
520 	} else {
521 		result = KERN_TERMINATED;
522 	}
523 
524 	thread_mtx_unlock(thread);
525 
526 	return result;
527 }
528 
529 static inline kern_return_t
thread_get_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count,boolean_t to_user)530 thread_get_state_internal(
531 	thread_t                thread,
532 	int                                             flavor,
533 	thread_state_t                  state,                  /* pointer to OUT array */
534 	mach_msg_type_number_t  *state_count,   /*IN/OUT*/
535 	boolean_t                               to_user)
536 {
537 	kern_return_t           result = KERN_SUCCESS;
538 
539 	if (thread == THREAD_NULL) {
540 		return KERN_INVALID_ARGUMENT;
541 	}
542 
543 	thread_mtx_lock(thread);
544 
545 	if (thread->active) {
546 		if (thread != current_thread()) {
547 			thread_hold(thread);
548 
549 			thread_mtx_unlock(thread);
550 
551 			if (thread_stop(thread, FALSE)) {
552 				thread_mtx_lock(thread);
553 				result = machine_thread_get_state(
554 					thread, flavor, state, state_count);
555 				thread_unstop(thread);
556 			} else {
557 				thread_mtx_lock(thread);
558 				result = KERN_ABORTED;
559 			}
560 
561 			thread_release(thread);
562 		} else {
563 			result = machine_thread_get_state(
564 				thread, flavor, state, state_count);
565 		}
566 	} else if (thread->inspection) {
567 		result = machine_thread_get_state(
568 			thread, flavor, state, state_count);
569 	} else {
570 		result = KERN_TERMINATED;
571 	}
572 
573 	if (to_user && result == KERN_SUCCESS) {
574 		result = machine_thread_state_convert_to_user(thread, flavor, state,
575 		    state_count);
576 	}
577 
578 	thread_mtx_unlock(thread);
579 
580 	return result;
581 }
582 
583 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
584 
585 kern_return_t
586 thread_get_state(
587 	thread_t                thread,
588 	int                                             flavor,
589 	thread_state_t                  state,
590 	mach_msg_type_number_t  *state_count);
591 
592 kern_return_t
thread_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)593 thread_get_state(
594 	thread_t                thread,
595 	int                                             flavor,
596 	thread_state_t                  state,                  /* pointer to OUT array */
597 	mach_msg_type_number_t  *state_count)   /*IN/OUT*/
598 {
599 	return thread_get_state_internal(thread, flavor, state, state_count, FALSE);
600 }
601 
602 kern_return_t
thread_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)603 thread_get_state_to_user(
604 	thread_t                thread,
605 	int                                             flavor,
606 	thread_state_t                  state,                  /* pointer to OUT array */
607 	mach_msg_type_number_t  *state_count)   /*IN/OUT*/
608 {
609 	return thread_get_state_internal(thread, flavor, state, state_count, TRUE);
610 }
611 
612 /*
613  *	Change thread's machine-dependent state.  Called with nothing
614  *	locked.  Returns same way.
615  */
616 static inline kern_return_t
thread_set_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count,boolean_t from_user)617 thread_set_state_internal(
618 	thread_t                thread,
619 	int                                             flavor,
620 	thread_state_t                  state,
621 	mach_msg_type_number_t  state_count,
622 	boolean_t                               from_user)
623 {
624 	kern_return_t           result = KERN_SUCCESS;
625 
626 	if (thread == THREAD_NULL) {
627 		return KERN_INVALID_ARGUMENT;
628 	}
629 
630 	thread_mtx_lock(thread);
631 
632 	if (thread->active) {
633 		if (from_user) {
634 			result = machine_thread_state_convert_from_user(thread, flavor,
635 			    state, state_count);
636 			if (result != KERN_SUCCESS) {
637 				goto out;
638 			}
639 		}
640 		if (thread != current_thread()) {
641 			thread_hold(thread);
642 
643 			thread_mtx_unlock(thread);
644 
645 			if (thread_stop(thread, TRUE)) {
646 				thread_mtx_lock(thread);
647 				result = machine_thread_set_state(
648 					thread, flavor, state, state_count);
649 				thread_unstop(thread);
650 			} else {
651 				thread_mtx_lock(thread);
652 				result = KERN_ABORTED;
653 			}
654 
655 			thread_release(thread);
656 		} else {
657 			result = machine_thread_set_state(
658 				thread, flavor, state, state_count);
659 		}
660 	} else {
661 		result = KERN_TERMINATED;
662 	}
663 
664 	if ((result == KERN_SUCCESS) && from_user) {
665 		extmod_statistics_incr_thread_set_state(thread);
666 	}
667 
668 out:
669 	thread_mtx_unlock(thread);
670 
671 	return result;
672 }
673 
674 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
675 kern_return_t
676 thread_set_state(
677 	thread_t                thread,
678 	int                                             flavor,
679 	thread_state_t                  state,
680 	mach_msg_type_number_t  state_count);
681 
682 kern_return_t
thread_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)683 thread_set_state(
684 	thread_t                thread,
685 	int                                             flavor,
686 	thread_state_t                  state,
687 	mach_msg_type_number_t  state_count)
688 {
689 	return thread_set_state_internal(thread, flavor, state, state_count, FALSE);
690 }
691 
692 kern_return_t
thread_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)693 thread_set_state_from_user(
694 	thread_t                thread,
695 	int                                             flavor,
696 	thread_state_t                  state,
697 	mach_msg_type_number_t  state_count)
698 {
699 	return thread_set_state_internal(thread, flavor, state, state_count, TRUE);
700 }
701 
702 kern_return_t
thread_convert_thread_state(thread_t thread,int direction,thread_state_flavor_t flavor,thread_state_t in_state,mach_msg_type_number_t in_state_count,thread_state_t out_state,mach_msg_type_number_t * out_state_count)703 thread_convert_thread_state(
704 	thread_t                thread,
705 	int                     direction,
706 	thread_state_flavor_t   flavor,
707 	thread_state_t          in_state,          /* pointer to IN array */
708 	mach_msg_type_number_t  in_state_count,
709 	thread_state_t          out_state,         /* pointer to OUT array */
710 	mach_msg_type_number_t  *out_state_count)   /*IN/OUT*/
711 {
712 	kern_return_t kr;
713 	thread_t to_thread = THREAD_NULL;
714 	thread_t from_thread = THREAD_NULL;
715 	mach_msg_type_number_t state_count = in_state_count;
716 
717 	if (direction != THREAD_CONVERT_THREAD_STATE_TO_SELF &&
718 	    direction != THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
719 		return KERN_INVALID_ARGUMENT;
720 	}
721 
722 	if (thread == THREAD_NULL) {
723 		return KERN_INVALID_ARGUMENT;
724 	}
725 
726 	if (state_count > *out_state_count) {
727 		return KERN_INSUFFICIENT_BUFFER_SIZE;
728 	}
729 
730 	if (direction == THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
731 		to_thread = thread;
732 		from_thread = current_thread();
733 	} else {
734 		to_thread = current_thread();
735 		from_thread = thread;
736 	}
737 
738 	/* Authenticate and convert thread state to kernel representation */
739 	kr = machine_thread_state_convert_from_user(from_thread, flavor,
740 	    in_state, state_count);
741 
742 	/* Return early if one of the thread was jop disabled while other wasn't */
743 	if (kr != KERN_SUCCESS) {
744 		return kr;
745 	}
746 
747 	/* Convert thread state to target thread user representation */
748 	kr = machine_thread_state_convert_to_user(to_thread, flavor,
749 	    in_state, &state_count);
750 
751 	if (kr == KERN_SUCCESS) {
752 		if (state_count <= *out_state_count) {
753 			memcpy(out_state, in_state, state_count * sizeof(uint32_t));
754 			*out_state_count = state_count;
755 		} else {
756 			kr = KERN_INSUFFICIENT_BUFFER_SIZE;
757 		}
758 	}
759 
760 	return kr;
761 }
762 
763 /*
764  * Kernel-internal "thread" interfaces used outside this file:
765  */
766 
767 /* Initialize (or re-initialize) a thread state.  Called from execve
768  * with nothing locked, returns same way.
769  */
770 kern_return_t
thread_state_initialize(thread_t thread)771 thread_state_initialize(
772 	thread_t                thread)
773 {
774 	kern_return_t           result = KERN_SUCCESS;
775 
776 	if (thread == THREAD_NULL) {
777 		return KERN_INVALID_ARGUMENT;
778 	}
779 
780 	thread_mtx_lock(thread);
781 
782 	if (thread->active) {
783 		if (thread != current_thread()) {
784 			thread_hold(thread);
785 
786 			thread_mtx_unlock(thread);
787 
788 			if (thread_stop(thread, TRUE)) {
789 				thread_mtx_lock(thread);
790 				machine_thread_state_initialize( thread );
791 				thread_unstop(thread);
792 			} else {
793 				thread_mtx_lock(thread);
794 				result = KERN_ABORTED;
795 			}
796 
797 			thread_release(thread);
798 		} else {
799 			machine_thread_state_initialize( thread );
800 		}
801 	} else {
802 		result = KERN_TERMINATED;
803 	}
804 
805 	thread_mtx_unlock(thread);
806 
807 	return result;
808 }
809 
810 kern_return_t
thread_dup(thread_t target)811 thread_dup(
812 	thread_t        target)
813 {
814 	thread_t                        self = current_thread();
815 	kern_return_t           result = KERN_SUCCESS;
816 
817 	if (target == THREAD_NULL || target == self) {
818 		return KERN_INVALID_ARGUMENT;
819 	}
820 
821 	thread_mtx_lock(target);
822 
823 	if (target->active) {
824 		thread_hold(target);
825 
826 		thread_mtx_unlock(target);
827 
828 		if (thread_stop(target, TRUE)) {
829 			thread_mtx_lock(target);
830 			result = machine_thread_dup(self, target, FALSE);
831 
832 			if (self->affinity_set != AFFINITY_SET_NULL) {
833 				thread_affinity_dup(self, target);
834 			}
835 			thread_unstop(target);
836 		} else {
837 			thread_mtx_lock(target);
838 			result = KERN_ABORTED;
839 		}
840 
841 		thread_release(target);
842 	} else {
843 		result = KERN_TERMINATED;
844 	}
845 
846 	thread_mtx_unlock(target);
847 
848 	return result;
849 }
850 
851 
852 kern_return_t
thread_dup2(thread_t source,thread_t target)853 thread_dup2(
854 	thread_t        source,
855 	thread_t        target)
856 {
857 	kern_return_t           result = KERN_SUCCESS;
858 	uint32_t                active = 0;
859 
860 	if (source == THREAD_NULL || target == THREAD_NULL || target == source) {
861 		return KERN_INVALID_ARGUMENT;
862 	}
863 
864 	thread_mtx_lock(source);
865 	active = source->active;
866 	thread_mtx_unlock(source);
867 
868 	if (!active) {
869 		return KERN_TERMINATED;
870 	}
871 
872 	thread_mtx_lock(target);
873 
874 	if (target->active || target->inspection) {
875 		thread_hold(target);
876 
877 		thread_mtx_unlock(target);
878 
879 		if (thread_stop(target, TRUE)) {
880 			thread_mtx_lock(target);
881 			result = machine_thread_dup(source, target, TRUE);
882 			if (source->affinity_set != AFFINITY_SET_NULL) {
883 				thread_affinity_dup(source, target);
884 			}
885 			thread_unstop(target);
886 		} else {
887 			thread_mtx_lock(target);
888 			result = KERN_ABORTED;
889 		}
890 
891 		thread_release(target);
892 	} else {
893 		result = KERN_TERMINATED;
894 	}
895 
896 	thread_mtx_unlock(target);
897 
898 	return result;
899 }
900 
901 /*
902  *	thread_setstatus:
903  *
904  *	Set the status of the specified thread.
905  *	Called with (and returns with) no locks held.
906  */
907 kern_return_t
thread_setstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count)908 thread_setstatus(
909 	thread_t                thread,
910 	int                                             flavor,
911 	thread_state_t                  tstate,
912 	mach_msg_type_number_t  count)
913 {
914 	return thread_set_state(thread, flavor, tstate, count);
915 }
916 
917 kern_return_t
thread_setstatus_from_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count)918 thread_setstatus_from_user(
919 	thread_t                thread,
920 	int                                             flavor,
921 	thread_state_t                  tstate,
922 	mach_msg_type_number_t  count)
923 {
924 	return thread_set_state_from_user(thread, flavor, tstate, count);
925 }
926 
927 /*
928  *	thread_getstatus:
929  *
930  *	Get the status of the specified thread.
931  */
932 kern_return_t
thread_getstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count)933 thread_getstatus(
934 	thread_t                thread,
935 	int                                             flavor,
936 	thread_state_t                  tstate,
937 	mach_msg_type_number_t  *count)
938 {
939 	return thread_get_state(thread, flavor, tstate, count);
940 }
941 
942 kern_return_t
thread_getstatus_to_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count)943 thread_getstatus_to_user(
944 	thread_t                thread,
945 	int                                             flavor,
946 	thread_state_t                  tstate,
947 	mach_msg_type_number_t  *count)
948 {
949 	return thread_get_state_to_user(thread, flavor, tstate, count);
950 }
951 
952 /*
953  *	Change thread's machine-dependent userspace TSD base.
954  *  Called with nothing locked.  Returns same way.
955  */
956 kern_return_t
thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)957 thread_set_tsd_base(
958 	thread_t                        thread,
959 	mach_vm_offset_t        tsd_base)
960 {
961 	kern_return_t           result = KERN_SUCCESS;
962 
963 	if (thread == THREAD_NULL) {
964 		return KERN_INVALID_ARGUMENT;
965 	}
966 
967 	thread_mtx_lock(thread);
968 
969 	if (thread->active) {
970 		if (thread != current_thread()) {
971 			thread_hold(thread);
972 
973 			thread_mtx_unlock(thread);
974 
975 			if (thread_stop(thread, TRUE)) {
976 				thread_mtx_lock(thread);
977 				result = machine_thread_set_tsd_base(thread, tsd_base);
978 				thread_unstop(thread);
979 			} else {
980 				thread_mtx_lock(thread);
981 				result = KERN_ABORTED;
982 			}
983 
984 			thread_release(thread);
985 		} else {
986 			result = machine_thread_set_tsd_base(thread, tsd_base);
987 		}
988 	} else {
989 		result = KERN_TERMINATED;
990 	}
991 
992 	thread_mtx_unlock(thread);
993 
994 	return result;
995 }
996 
997 /*
998  * thread_set_apc_ast:
999  *
1000  * Register the AST_APC callback that handles suspension and
1001  * termination, if it hasn't been installed already.
1002  *
1003  * Called with the thread mutex held.
1004  */
1005 static void
thread_set_apc_ast(thread_t thread)1006 thread_set_apc_ast(thread_t thread)
1007 {
1008 	spl_t s = splsched();
1009 
1010 	thread_lock(thread);
1011 	thread_set_apc_ast_locked(thread);
1012 	thread_unlock(thread);
1013 
1014 	splx(s);
1015 }
1016 
1017 /*
1018  * thread_set_apc_ast_locked:
1019  *
1020  * Do the work of registering for the AST_APC callback.
1021  *
1022  * Called with the thread mutex and scheduling lock held.
1023  */
1024 static void
thread_set_apc_ast_locked(thread_t thread)1025 thread_set_apc_ast_locked(thread_t thread)
1026 {
1027 	thread_ast_set(thread, AST_APC);
1028 
1029 	if (thread == current_thread()) {
1030 		ast_propagate(thread);
1031 	} else {
1032 		processor_t processor = thread->last_processor;
1033 
1034 		if (processor != PROCESSOR_NULL &&
1035 		    processor->state == PROCESSOR_RUNNING &&
1036 		    processor->active_thread == thread) {
1037 			cause_ast_check(processor);
1038 		}
1039 	}
1040 }
1041 
1042 /*
1043  * Activation control support routines internal to this file:
1044  *
1045  */
1046 
1047 /*
1048  * thread_suspended
1049  *
1050  * Continuation routine for thread suspension.  It checks
1051  * to see whether there has been any new suspensions.  If so, it
1052  * installs the AST_APC handler again.
1053  */
1054 __attribute__((noreturn))
1055 static void
thread_suspended(__unused void * parameter,wait_result_t result)1056 thread_suspended(__unused void *parameter, wait_result_t result)
1057 {
1058 	thread_t thread = current_thread();
1059 
1060 	thread_mtx_lock(thread);
1061 
1062 	if (result == THREAD_INTERRUPTED) {
1063 		thread->suspend_parked = FALSE;
1064 	} else {
1065 		assert(thread->suspend_parked == FALSE);
1066 	}
1067 
1068 	if (thread->suspend_count > 0) {
1069 		thread_set_apc_ast(thread);
1070 	}
1071 
1072 	thread_mtx_unlock(thread);
1073 
1074 	thread_exception_return();
1075 	/*NOTREACHED*/
1076 }
1077 
1078 /*
1079  * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
1080  * Called with nothing locked.  Returns (if it returns) the same way.
1081  */
1082 void
thread_apc_ast(thread_t thread)1083 thread_apc_ast(thread_t thread)
1084 {
1085 	thread_mtx_lock(thread);
1086 
1087 	assert(thread->suspend_parked == FALSE);
1088 
1089 	spl_t s = splsched();
1090 	thread_lock(thread);
1091 
1092 	/* TH_SFLAG_POLLDEPRESS is OK to have here */
1093 	assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
1094 
1095 	thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1096 	thread_unlock(thread);
1097 	splx(s);
1098 
1099 	if (!thread->active) {
1100 		/* Thread is ready to terminate, time to tear it down */
1101 		thread_mtx_unlock(thread);
1102 
1103 		thread_terminate_self();
1104 		/*NOTREACHED*/
1105 	}
1106 
1107 	/* If we're suspended, go to sleep and wait for someone to wake us up. */
1108 	if (thread->suspend_count > 0) {
1109 		thread->suspend_parked = TRUE;
1110 		assert_wait(&thread->suspend_count,
1111 		    THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
1112 		thread_mtx_unlock(thread);
1113 
1114 		thread_block(thread_suspended);
1115 		/*NOTREACHED*/
1116 	}
1117 
1118 	thread_mtx_unlock(thread);
1119 }
1120 
1121 
1122 void
thread_debug_return_to_user_ast(thread_t thread)1123 thread_debug_return_to_user_ast(
1124 	thread_t thread)
1125 {
1126 #pragma unused(thread)
1127 #if MACH_ASSERT
1128 	if ((thread->sched_flags & TH_SFLAG_RW_PROMOTED) ||
1129 	    thread->rwlock_count > 0) {
1130 		panic("Returning to userspace with rw lock held, thread %p sched_flag %u rwlock_count %d", thread, thread->sched_flags, thread->rwlock_count);
1131 	}
1132 
1133 	if ((thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) ||
1134 	    thread->priority_floor_count > 0) {
1135 		panic("Returning to userspace with floor boost set, thread %p sched_flag %u priority_floor_count %d", thread, thread->sched_flags, thread->priority_floor_count);
1136 	}
1137 #endif /* MACH_ASSERT */
1138 }
1139 
1140 
1141 /* Prototype, see justification above */
1142 kern_return_t
1143 act_set_state(
1144 	thread_t                                thread,
1145 	int                                             flavor,
1146 	thread_state_t                  state,
1147 	mach_msg_type_number_t  count);
1148 
1149 kern_return_t
act_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1150 act_set_state(
1151 	thread_t                                thread,
1152 	int                                             flavor,
1153 	thread_state_t                  state,
1154 	mach_msg_type_number_t  count)
1155 {
1156 	if (thread == current_thread()) {
1157 		return KERN_INVALID_ARGUMENT;
1158 	}
1159 
1160 	return thread_set_state(thread, flavor, state, count);
1161 }
1162 
1163 kern_return_t
act_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1164 act_set_state_from_user(
1165 	thread_t                                thread,
1166 	int                                             flavor,
1167 	thread_state_t                  state,
1168 	mach_msg_type_number_t  count)
1169 {
1170 	if (thread == current_thread()) {
1171 		return KERN_INVALID_ARGUMENT;
1172 	}
1173 
1174 	return thread_set_state_from_user(thread, flavor, state, count);
1175 }
1176 
1177 /* Prototype, see justification above */
1178 kern_return_t
1179 act_get_state(
1180 	thread_t                                thread,
1181 	int                                             flavor,
1182 	thread_state_t                  state,
1183 	mach_msg_type_number_t  *count);
1184 
1185 kern_return_t
act_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1186 act_get_state(
1187 	thread_t                                thread,
1188 	int                                             flavor,
1189 	thread_state_t                  state,
1190 	mach_msg_type_number_t  *count)
1191 {
1192 	if (thread == current_thread()) {
1193 		return KERN_INVALID_ARGUMENT;
1194 	}
1195 
1196 	return thread_get_state(thread, flavor, state, count);
1197 }
1198 
1199 kern_return_t
act_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1200 act_get_state_to_user(
1201 	thread_t                                thread,
1202 	int                                             flavor,
1203 	thread_state_t                  state,
1204 	mach_msg_type_number_t  *count)
1205 {
1206 	if (thread == current_thread()) {
1207 		return KERN_INVALID_ARGUMENT;
1208 	}
1209 
1210 	return thread_get_state_to_user(thread, flavor, state, count);
1211 }
1212 
1213 static void
act_set_ast(thread_t thread,ast_t ast,ast_gen_t * gens)1214 act_set_ast(
1215 	thread_t   thread,
1216 	ast_t      ast,
1217 	ast_gen_t *gens)
1218 {
1219 	spl_t s = splsched();
1220 
1221 	if (thread == current_thread()) {
1222 		thread_ast_set(thread, ast);
1223 		ast_propagate(thread);
1224 	} else {
1225 		processor_t processor;
1226 
1227 		thread_lock(thread);
1228 		thread_ast_set(thread, ast);
1229 		processor = thread->last_processor;
1230 		if (processor != PROCESSOR_NULL &&
1231 		    processor->state == PROCESSOR_RUNNING &&
1232 		    processor->active_thread == thread) {
1233 			if (gens) {
1234 				ast_generation_get(processor, gens);
1235 			}
1236 			cause_ast_check(processor);
1237 		}
1238 		thread_unlock(thread);
1239 	}
1240 
1241 	splx(s);
1242 }
1243 
1244 /*
1245  * set AST on thread without causing an AST check
1246  * and without taking the thread lock
1247  *
1248  * If thread is not the current thread, then it may take
1249  * up until the next context switch or quantum expiration
1250  * on that thread for it to notice the AST.
1251  */
1252 static void
act_set_ast_async(thread_t thread,ast_t ast)1253 act_set_ast_async(thread_t  thread,
1254     ast_t     ast)
1255 {
1256 	thread_ast_set(thread, ast);
1257 
1258 	if (thread == current_thread()) {
1259 		spl_t s = splsched();
1260 		ast_propagate(thread);
1261 		splx(s);
1262 	}
1263 }
1264 
1265 void
act_set_debug_assert(void)1266 act_set_debug_assert(void)
1267 {
1268 	thread_t thread = current_thread();
1269 	if (thread_ast_peek(thread, AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1270 		thread_ast_set(thread, AST_DEBUG_ASSERT);
1271 	}
1272 	if (ast_peek(AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1273 		spl_t s = splsched();
1274 		ast_propagate(thread);
1275 		splx(s);
1276 	}
1277 }
1278 
1279 void
act_set_astbsd(thread_t thread)1280 act_set_astbsd(thread_t thread)
1281 {
1282 	act_set_ast(thread, AST_BSD, NULL);
1283 }
1284 
1285 void
act_set_astkevent(thread_t thread,uint16_t bits)1286 act_set_astkevent(thread_t thread, uint16_t bits)
1287 {
1288 	os_atomic_or(&thread->kevent_ast_bits, bits, relaxed);
1289 
1290 	/* kevent AST shouldn't send immediate IPIs */
1291 	act_set_ast_async(thread, AST_KEVENT);
1292 }
1293 
1294 uint16_t
act_clear_astkevent(thread_t thread,uint16_t bits)1295 act_clear_astkevent(thread_t thread, uint16_t bits)
1296 {
1297 	/*
1298 	 * avoid the atomic operation if none of the bits is set,
1299 	 * which will be the common case.
1300 	 */
1301 	uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed);
1302 	if (cur & bits) {
1303 		cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed);
1304 	}
1305 	return cur & bits;
1306 }
1307 
1308 void
act_set_ast_reset_pcs(thread_t thread,ast_gen_t gens[])1309 act_set_ast_reset_pcs(thread_t thread, ast_gen_t gens[])
1310 {
1311 	act_set_ast(thread, AST_RESET_PCS, gens);
1312 }
1313 
1314 void
act_set_kperf(thread_t thread)1315 act_set_kperf(thread_t thread)
1316 {
1317 	/* safety check */
1318 	if (thread != current_thread()) {
1319 		if (!ml_get_interrupts_enabled()) {
1320 			panic("unsafe act_set_kperf operation");
1321 		}
1322 	}
1323 
1324 	act_set_ast(thread, AST_KPERF, NULL);
1325 }
1326 
1327 #if CONFIG_MACF
1328 void
act_set_astmacf(thread_t thread)1329 act_set_astmacf(
1330 	thread_t        thread)
1331 {
1332 	act_set_ast( thread, AST_MACF, NULL);
1333 }
1334 #endif
1335 
1336 void
act_set_astledger(thread_t thread)1337 act_set_astledger(thread_t thread)
1338 {
1339 	act_set_ast(thread, AST_LEDGER, NULL);
1340 }
1341 
1342 /*
1343  * The ledger AST may need to be set while already holding
1344  * the thread lock.  This routine skips sending the IPI,
1345  * allowing us to avoid the lock hold.
1346  *
1347  * However, it means the targeted thread must context switch
1348  * to recognize the ledger AST.
1349  */
1350 void
act_set_astledger_async(thread_t thread)1351 act_set_astledger_async(thread_t thread)
1352 {
1353 	act_set_ast_async(thread, AST_LEDGER);
1354 }
1355 
1356 void
act_set_io_telemetry_ast(thread_t thread)1357 act_set_io_telemetry_ast(thread_t thread)
1358 {
1359 	act_set_ast(thread, AST_TELEMETRY_IO, NULL);
1360 }
1361 
1362 void
act_set_macf_telemetry_ast(thread_t thread)1363 act_set_macf_telemetry_ast(thread_t thread)
1364 {
1365 	act_set_ast(thread, AST_TELEMETRY_MACF, NULL);
1366 }
1367 
1368 void
act_set_astproc_resource(thread_t thread)1369 act_set_astproc_resource(thread_t thread)
1370 {
1371 	act_set_ast(thread, AST_PROC_RESOURCE, NULL);
1372 }
1373