xref: /xnu-8020.140.41/osfmk/kern/thread_act.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Copyright (c) 1993 The University of Utah and
33  * the Center for Software Science (CSS).  All rights reserved.
34  *
35  * Permission to use, copy, modify and distribute this software and its
36  * documentation is hereby granted, provided that both the copyright
37  * notice and this permission notice appear in all copies of the
38  * software, derivative works or modified versions, and any portions
39  * thereof, and that both notices appear in supporting documentation.
40  *
41  * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42  * IS" CONDITION.  THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43  * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44  *
45  * CSS requests users of this software to return to [email protected] any
46  * improvements that they make and grant CSS redistribution rights.
47  *
48  *	Author:	Bryan Ford, University of Utah CSS
49  *
50  *	Thread management routines
51  */
52 
53 #include <mach/mach_types.h>
54 #include <mach/kern_return.h>
55 #include <mach/thread_act_server.h>
56 #include <mach/thread_act.h>
57 
58 #include <kern/kern_types.h>
59 #include <kern/ast.h>
60 #include <kern/mach_param.h>
61 #include <kern/zalloc.h>
62 #include <kern/extmod_statistics.h>
63 #include <kern/thread.h>
64 #include <kern/task.h>
65 #include <kern/sched_prim.h>
66 #include <kern/misc_protos.h>
67 #include <kern/assert.h>
68 #include <kern/exception.h>
69 #include <kern/ipc_mig.h>
70 #include <kern/ipc_tt.h>
71 #include <kern/machine.h>
72 #include <kern/spl.h>
73 #include <kern/syscall_subr.h>
74 #include <kern/processor.h>
75 #include <kern/restartable.h>
76 #include <kern/timer.h>
77 #include <kern/affinity.h>
78 #include <kern/host.h>
79 
80 #include <stdatomic.h>
81 
82 #include <security/mac_mach_internal.h>
83 
84 static void act_abort(thread_t thread);
85 
86 static void thread_suspended(void *arg, wait_result_t result);
87 static void thread_set_apc_ast(thread_t thread);
88 static void thread_set_apc_ast_locked(thread_t thread);
89 
90 /*
91  * Internal routine to mark a thread as started.
92  * Always called with the thread mutex locked.
93  */
94 void
thread_start(thread_t thread)95 thread_start(
96 	thread_t                        thread)
97 {
98 	clear_wait(thread, THREAD_AWAKENED);
99 	thread->started = TRUE;
100 }
101 
102 /*
103  * Internal routine to mark a thread as waiting
104  * right after it has been created.  The caller
105  * is responsible to call wakeup()/thread_wakeup()
106  * or thread_terminate() to get it going.
107  *
108  * Always called with the thread mutex locked.
109  *
110  * Task and task_threads mutexes also held
111  * (so nobody can set the thread running before
112  * this point)
113  *
114  * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
115  * to allow termination from this point forward.
116  */
117 void
thread_start_in_assert_wait(thread_t thread,event_t event,wait_interrupt_t interruptible)118 thread_start_in_assert_wait(
119 	thread_t                        thread,
120 	event_t             event,
121 	wait_interrupt_t    interruptible)
122 {
123 	struct waitq *waitq = assert_wait_queue(event);
124 	wait_result_t wait_result;
125 	spl_t spl;
126 
127 	spl = splsched();
128 	waitq_lock(waitq);
129 
130 	/* clear out startup condition (safe because thread not started yet) */
131 	thread_lock(thread);
132 	assert(!thread->started);
133 	assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
134 	thread->state &= ~(TH_WAIT | TH_UNINT);
135 	thread_unlock(thread);
136 
137 	/* assert wait interruptibly forever */
138 	wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
139 	    interruptible,
140 	    TIMEOUT_URGENCY_SYS_NORMAL,
141 	    TIMEOUT_WAIT_FOREVER,
142 	    TIMEOUT_NO_LEEWAY,
143 	    thread);
144 	assert(wait_result == THREAD_WAITING);
145 
146 	/* mark thread started while we still hold the waitq lock */
147 	thread_lock(thread);
148 	thread->started = TRUE;
149 	thread_unlock(thread);
150 
151 	waitq_unlock(waitq);
152 	splx(spl);
153 }
154 
155 /*
156  * Internal routine to terminate a thread.
157  * Sometimes called with task already locked.
158  *
159  * If thread is on core, cause AST check immediately;
160  * Otherwise, let the thread continue running in kernel
161  * until it hits AST.
162  */
163 kern_return_t
thread_terminate_internal(thread_t thread)164 thread_terminate_internal(
165 	thread_t                        thread)
166 {
167 	kern_return_t           result = KERN_SUCCESS;
168 
169 	thread_mtx_lock(thread);
170 
171 	if (thread->active) {
172 		thread->active = FALSE;
173 
174 		act_abort(thread);
175 
176 		if (thread->started) {
177 			clear_wait(thread, THREAD_INTERRUPTED);
178 		} else {
179 			thread_start(thread);
180 		}
181 	} else {
182 		result = KERN_TERMINATED;
183 	}
184 
185 	if (thread->affinity_set != NULL) {
186 		thread_affinity_terminate(thread);
187 	}
188 
189 	/* unconditionally unpin the thread in internal termination */
190 	ipc_thread_port_unpin(get_thread_ro(thread)->tro_self_port);
191 
192 	thread_mtx_unlock(thread);
193 
194 	if (thread != current_thread() && result == KERN_SUCCESS) {
195 		thread_wait(thread, FALSE);
196 	}
197 
198 	return result;
199 }
200 
201 kern_return_t
thread_terminate(thread_t thread)202 thread_terminate(
203 	thread_t                thread)
204 {
205 	task_t task;
206 
207 	if (thread == THREAD_NULL) {
208 		return KERN_INVALID_ARGUMENT;
209 	}
210 
211 	task = get_threadtask(thread);
212 
213 	/* Kernel threads can't be terminated without their own cooperation */
214 	if (task == kernel_task && thread != current_thread()) {
215 		return KERN_FAILURE;
216 	}
217 
218 	kern_return_t result = thread_terminate_internal(thread);
219 
220 	/*
221 	 * If a kernel thread is terminating itself, force handle the APC_AST here.
222 	 * Kernel threads don't pass through the return-to-user AST checking code,
223 	 * but all threads must finish their own termination in thread_apc_ast.
224 	 */
225 	if (task == kernel_task) {
226 		assert(thread->active == FALSE);
227 		thread_ast_clear(thread, AST_APC);
228 		thread_apc_ast(thread);
229 
230 		panic("thread_terminate");
231 		/* NOTREACHED */
232 	}
233 
234 	return result;
235 }
236 
237 /*
238  * [MIG Call] Terminate a thread.
239  *
240  * Cannot be used on threads managed by pthread.
241  */
242 kern_return_t
thread_terminate_from_user(thread_t thread)243 thread_terminate_from_user(
244 	thread_t                thread)
245 {
246 	if (thread == THREAD_NULL) {
247 		return KERN_INVALID_ARGUMENT;
248 	}
249 
250 	if (thread_get_tag(thread) & THREAD_TAG_PTHREAD) {
251 		return KERN_DENIED;
252 	}
253 
254 	return thread_terminate(thread);
255 }
256 
257 /*
258  * Terminate a thread with pinned control port.
259  *
260  * Can only be used on threads managed by pthread. Exported in pthread_kern.
261  */
262 kern_return_t
thread_terminate_pinned(thread_t thread)263 thread_terminate_pinned(
264 	thread_t                thread)
265 {
266 	task_t task;
267 
268 	if (thread == THREAD_NULL) {
269 		return KERN_INVALID_ARGUMENT;
270 	}
271 
272 	task = get_threadtask(thread);
273 
274 
275 	assert(task != kernel_task);
276 	assert(thread_get_tag(thread) & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD));
277 
278 	thread_mtx_lock(thread);
279 	if (task_is_pinned(task) && thread->active) {
280 		assert(get_thread_ro(thread)->tro_self_port->ip_pinned == 1);
281 	}
282 	thread_mtx_unlock(thread);
283 
284 	kern_return_t result = thread_terminate_internal(thread);
285 	return result;
286 }
287 
288 /*
289  * Suspend execution of the specified thread.
290  * This is a recursive-style suspension of the thread, a count of
291  * suspends is maintained.
292  *
293  * Called with thread mutex held.
294  */
295 void
thread_hold(thread_t thread)296 thread_hold(thread_t thread)
297 {
298 	if (thread->suspend_count++ == 0) {
299 		thread_set_apc_ast(thread);
300 		assert(thread->suspend_parked == FALSE);
301 	}
302 }
303 
304 /*
305  * Decrement internal suspension count, setting thread
306  * runnable when count falls to zero.
307  *
308  * Because the wait is abortsafe, we can't be guaranteed that the thread
309  * is currently actually waiting even if suspend_parked is set.
310  *
311  * Called with thread mutex held.
312  */
313 void
thread_release(thread_t thread)314 thread_release(thread_t thread)
315 {
316 	assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
317 
318 	/* fail-safe on non-assert builds */
319 	if (thread->suspend_count == 0) {
320 		return;
321 	}
322 
323 	if (--thread->suspend_count == 0) {
324 		if (!thread->started) {
325 			thread_start(thread);
326 		} else if (thread->suspend_parked) {
327 			thread->suspend_parked = FALSE;
328 			thread_wakeup_thread(&thread->suspend_count, thread);
329 		}
330 	}
331 }
332 
333 kern_return_t
thread_suspend(thread_t thread)334 thread_suspend(thread_t thread)
335 {
336 	kern_return_t result = KERN_SUCCESS;
337 
338 	if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
339 		return KERN_INVALID_ARGUMENT;
340 	}
341 
342 	thread_mtx_lock(thread);
343 
344 	if (thread->active) {
345 		if (thread->user_stop_count++ == 0) {
346 			thread_hold(thread);
347 		}
348 	} else {
349 		result = KERN_TERMINATED;
350 	}
351 
352 	thread_mtx_unlock(thread);
353 
354 	if (thread != current_thread() && result == KERN_SUCCESS) {
355 		thread_wait(thread, FALSE);
356 	}
357 
358 	return result;
359 }
360 
361 kern_return_t
thread_resume(thread_t thread)362 thread_resume(thread_t thread)
363 {
364 	kern_return_t result = KERN_SUCCESS;
365 
366 	if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
367 		return KERN_INVALID_ARGUMENT;
368 	}
369 
370 	thread_mtx_lock(thread);
371 
372 	if (thread->active) {
373 		if (thread->user_stop_count > 0) {
374 			if (--thread->user_stop_count == 0) {
375 				thread_release(thread);
376 			}
377 		} else {
378 			result = KERN_FAILURE;
379 		}
380 	} else {
381 		result = KERN_TERMINATED;
382 	}
383 
384 	thread_mtx_unlock(thread);
385 
386 	return result;
387 }
388 
389 /*
390  *	thread_depress_abort_from_user:
391  *
392  *	Prematurely abort priority depression if there is one.
393  */
394 kern_return_t
thread_depress_abort_from_user(thread_t thread)395 thread_depress_abort_from_user(thread_t thread)
396 {
397 	kern_return_t result;
398 
399 	if (thread == THREAD_NULL) {
400 		return KERN_INVALID_ARGUMENT;
401 	}
402 
403 	thread_mtx_lock(thread);
404 
405 	if (thread->active) {
406 		result = thread_depress_abort(thread);
407 	} else {
408 		result = KERN_TERMINATED;
409 	}
410 
411 	thread_mtx_unlock(thread);
412 
413 	return result;
414 }
415 
416 
417 /*
418  * Indicate that the thread should run the AST_APC callback
419  * to detect an abort condition.
420  *
421  * Called with thread mutex held.
422  */
423 static void
act_abort(thread_t thread)424 act_abort(
425 	thread_t        thread)
426 {
427 	spl_t           s = splsched();
428 
429 	thread_lock(thread);
430 
431 	if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
432 		thread->sched_flags |= TH_SFLAG_ABORT;
433 		thread_set_apc_ast_locked(thread);
434 		thread_depress_abort_locked(thread);
435 	} else {
436 		thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
437 	}
438 
439 	thread_unlock(thread);
440 	splx(s);
441 }
442 
443 kern_return_t
thread_abort(thread_t thread)444 thread_abort(
445 	thread_t        thread)
446 {
447 	kern_return_t   result = KERN_SUCCESS;
448 
449 	if (thread == THREAD_NULL) {
450 		return KERN_INVALID_ARGUMENT;
451 	}
452 
453 	thread_mtx_lock(thread);
454 
455 	if (thread->active) {
456 		act_abort(thread);
457 		clear_wait(thread, THREAD_INTERRUPTED);
458 	} else {
459 		result = KERN_TERMINATED;
460 	}
461 
462 	thread_mtx_unlock(thread);
463 
464 	return result;
465 }
466 
467 kern_return_t
thread_abort_safely(thread_t thread)468 thread_abort_safely(
469 	thread_t                thread)
470 {
471 	kern_return_t   result = KERN_SUCCESS;
472 
473 	if (thread == THREAD_NULL) {
474 		return KERN_INVALID_ARGUMENT;
475 	}
476 
477 	thread_mtx_lock(thread);
478 
479 	if (thread->active) {
480 		spl_t           s = splsched();
481 
482 		thread_lock(thread);
483 		if (!thread->at_safe_point ||
484 		    clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
485 			if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
486 				thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
487 				thread_set_apc_ast_locked(thread);
488 				thread_depress_abort_locked(thread);
489 			}
490 		}
491 		thread_unlock(thread);
492 		splx(s);
493 	} else {
494 		result = KERN_TERMINATED;
495 	}
496 
497 	thread_mtx_unlock(thread);
498 
499 	return result;
500 }
501 
502 /*** backward compatibility hacks ***/
503 #include <mach/thread_info.h>
504 #include <mach/thread_special_ports.h>
505 #include <ipc/ipc_port.h>
506 
507 kern_return_t
thread_info(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)508 thread_info(
509 	thread_t                        thread,
510 	thread_flavor_t                 flavor,
511 	thread_info_t                   thread_info_out,
512 	mach_msg_type_number_t  *thread_info_count)
513 {
514 	kern_return_t                   result;
515 
516 	if (thread == THREAD_NULL) {
517 		return KERN_INVALID_ARGUMENT;
518 	}
519 
520 	thread_mtx_lock(thread);
521 
522 	if (thread->active || thread->inspection) {
523 		result = thread_info_internal(
524 			thread, flavor, thread_info_out, thread_info_count);
525 	} else {
526 		result = KERN_TERMINATED;
527 	}
528 
529 	thread_mtx_unlock(thread);
530 
531 	return result;
532 }
533 
534 static inline kern_return_t
thread_get_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count,thread_set_status_flags_t flags)535 thread_get_state_internal(
536 	thread_t                thread,
537 	int                                             flavor,
538 	thread_state_t                  state,                  /* pointer to OUT array */
539 	mach_msg_type_number_t  *state_count,   /*IN/OUT*/
540 	thread_set_status_flags_t  flags)
541 {
542 	kern_return_t           result = KERN_SUCCESS;
543 	boolean_t               to_user = !!(flags & TSSF_TRANSLATE_TO_USER);
544 
545 	if (thread == THREAD_NULL) {
546 		return KERN_INVALID_ARGUMENT;
547 	}
548 
549 	thread_mtx_lock(thread);
550 
551 	if (thread->active) {
552 		if (thread != current_thread()) {
553 			thread_hold(thread);
554 
555 			thread_mtx_unlock(thread);
556 
557 			if (thread_stop(thread, FALSE)) {
558 				thread_mtx_lock(thread);
559 				result = machine_thread_get_state(
560 					thread, flavor, state, state_count);
561 				thread_unstop(thread);
562 			} else {
563 				thread_mtx_lock(thread);
564 				result = KERN_ABORTED;
565 			}
566 
567 			thread_release(thread);
568 		} else {
569 			result = machine_thread_get_state(
570 				thread, flavor, state, state_count);
571 		}
572 	} else if (thread->inspection) {
573 		result = machine_thread_get_state(
574 			thread, flavor, state, state_count);
575 	} else {
576 		result = KERN_TERMINATED;
577 	}
578 
579 	if (to_user && result == KERN_SUCCESS) {
580 		result = machine_thread_state_convert_to_user(thread, flavor, state,
581 		    state_count, flags);
582 	}
583 
584 	thread_mtx_unlock(thread);
585 
586 	return result;
587 }
588 
589 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
590 
591 kern_return_t
592 thread_get_state(
593 	thread_t                thread,
594 	int                                             flavor,
595 	thread_state_t                  state,
596 	mach_msg_type_number_t  *state_count);
597 
598 kern_return_t
thread_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)599 thread_get_state(
600 	thread_t                thread,
601 	int                                             flavor,
602 	thread_state_t                  state,                  /* pointer to OUT array */
603 	mach_msg_type_number_t  *state_count)   /*IN/OUT*/
604 {
605 	return thread_get_state_internal(thread, flavor, state, state_count, TSSF_FLAGS_NONE);
606 }
607 
608 kern_return_t
thread_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)609 thread_get_state_to_user(
610 	thread_t                thread,
611 	int                                             flavor,
612 	thread_state_t                  state,                  /* pointer to OUT array */
613 	mach_msg_type_number_t  *state_count)   /*IN/OUT*/
614 {
615 	return thread_get_state_internal(thread, flavor, state, state_count, TSSF_TRANSLATE_TO_USER);
616 }
617 
618 /*
619  *	Change thread's machine-dependent state.  Called with nothing
620  *	locked.  Returns same way.
621  */
622 static inline kern_return_t
thread_set_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count,thread_state_t old_state,mach_msg_type_number_t old_state_count,thread_set_status_flags_t flags)623 thread_set_state_internal(
624 	thread_t                        thread,
625 	int                             flavor,
626 	thread_state_t                  state,
627 	mach_msg_type_number_t          state_count,
628 	thread_state_t                  old_state,
629 	mach_msg_type_number_t          old_state_count,
630 	thread_set_status_flags_t       flags)
631 {
632 	kern_return_t           result = KERN_SUCCESS;
633 	boolean_t               from_user = !!(flags & TSSF_TRANSLATE_TO_USER);
634 
635 	if (thread == THREAD_NULL) {
636 		return KERN_INVALID_ARGUMENT;
637 	}
638 
639 	thread_mtx_lock(thread);
640 
641 	if (thread->active) {
642 		if (from_user) {
643 			result = machine_thread_state_convert_from_user(thread, flavor,
644 			    state, state_count, old_state, old_state_count, flags);
645 			if (result != KERN_SUCCESS) {
646 				goto out;
647 			}
648 		}
649 		if (thread != current_thread()) {
650 			thread_hold(thread);
651 
652 			thread_mtx_unlock(thread);
653 
654 			if (thread_stop(thread, TRUE)) {
655 				thread_mtx_lock(thread);
656 				result = machine_thread_set_state(
657 					thread, flavor, state, state_count);
658 				thread_unstop(thread);
659 			} else {
660 				thread_mtx_lock(thread);
661 				result = KERN_ABORTED;
662 			}
663 
664 			thread_release(thread);
665 		} else {
666 			result = machine_thread_set_state(
667 				thread, flavor, state, state_count);
668 		}
669 	} else {
670 		result = KERN_TERMINATED;
671 	}
672 
673 	if ((result == KERN_SUCCESS) && from_user) {
674 		extmod_statistics_incr_thread_set_state(thread);
675 	}
676 
677 out:
678 	thread_mtx_unlock(thread);
679 
680 	return result;
681 }
682 
683 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
684 kern_return_t
685 thread_set_state(
686 	thread_t                thread,
687 	int                                             flavor,
688 	thread_state_t                  state,
689 	mach_msg_type_number_t  state_count);
690 
691 kern_return_t
thread_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)692 thread_set_state(
693 	thread_t                thread,
694 	int                                             flavor,
695 	thread_state_t                  state,
696 	mach_msg_type_number_t  state_count)
697 {
698 	return thread_set_state_internal(thread, flavor, state, state_count, NULL, 0, TSSF_FLAGS_NONE);
699 }
700 
701 kern_return_t
thread_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)702 thread_set_state_from_user(
703 	thread_t                thread,
704 	int                                             flavor,
705 	thread_state_t                  state,
706 	mach_msg_type_number_t  state_count)
707 {
708 	return thread_set_state_internal(thread, flavor, state, state_count, NULL, 0, TSSF_TRANSLATE_TO_USER);
709 }
710 
711 kern_return_t
thread_convert_thread_state(thread_t thread,int direction,thread_state_flavor_t flavor,thread_state_t in_state,mach_msg_type_number_t in_state_count,thread_state_t out_state,mach_msg_type_number_t * out_state_count)712 thread_convert_thread_state(
713 	thread_t                thread,
714 	int                     direction,
715 	thread_state_flavor_t   flavor,
716 	thread_state_t          in_state,          /* pointer to IN array */
717 	mach_msg_type_number_t  in_state_count,
718 	thread_state_t          out_state,         /* pointer to OUT array */
719 	mach_msg_type_number_t  *out_state_count)   /*IN/OUT*/
720 {
721 	kern_return_t kr;
722 	thread_t to_thread = THREAD_NULL;
723 	thread_t from_thread = THREAD_NULL;
724 	mach_msg_type_number_t state_count = in_state_count;
725 
726 	if (direction != THREAD_CONVERT_THREAD_STATE_TO_SELF &&
727 	    direction != THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
728 		return KERN_INVALID_ARGUMENT;
729 	}
730 
731 	if (thread == THREAD_NULL) {
732 		return KERN_INVALID_ARGUMENT;
733 	}
734 
735 	if (state_count > *out_state_count) {
736 		return KERN_INSUFFICIENT_BUFFER_SIZE;
737 	}
738 
739 	if (direction == THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
740 		to_thread = thread;
741 		from_thread = current_thread();
742 	} else {
743 		to_thread = current_thread();
744 		from_thread = thread;
745 	}
746 
747 	/* Authenticate and convert thread state to kernel representation */
748 	kr = machine_thread_state_convert_from_user(from_thread, flavor,
749 	    in_state, state_count, NULL, 0, TSSF_FLAGS_NONE);
750 
751 	/* Return early if one of the thread was jop disabled while other wasn't */
752 	if (kr != KERN_SUCCESS) {
753 		return kr;
754 	}
755 
756 	/* Convert thread state to target thread user representation */
757 	kr = machine_thread_state_convert_to_user(to_thread, flavor,
758 	    in_state, &state_count, TSSF_PRESERVE_FLAGS);
759 
760 	if (kr == KERN_SUCCESS) {
761 		if (state_count <= *out_state_count) {
762 			memcpy(out_state, in_state, state_count * sizeof(uint32_t));
763 			*out_state_count = state_count;
764 		} else {
765 			kr = KERN_INSUFFICIENT_BUFFER_SIZE;
766 		}
767 	}
768 
769 	return kr;
770 }
771 
772 /*
773  * Kernel-internal "thread" interfaces used outside this file:
774  */
775 
776 /* Initialize (or re-initialize) a thread state.  Called from execve
777  * with nothing locked, returns same way.
778  */
779 kern_return_t
thread_state_initialize(thread_t thread)780 thread_state_initialize(
781 	thread_t                thread)
782 {
783 	kern_return_t           result = KERN_SUCCESS;
784 
785 	if (thread == THREAD_NULL) {
786 		return KERN_INVALID_ARGUMENT;
787 	}
788 
789 	thread_mtx_lock(thread);
790 
791 	if (thread->active) {
792 		if (thread != current_thread()) {
793 			thread_hold(thread);
794 
795 			thread_mtx_unlock(thread);
796 
797 			if (thread_stop(thread, TRUE)) {
798 				thread_mtx_lock(thread);
799 				machine_thread_state_initialize( thread );
800 				thread_unstop(thread);
801 			} else {
802 				thread_mtx_lock(thread);
803 				result = KERN_ABORTED;
804 			}
805 
806 			thread_release(thread);
807 		} else {
808 			machine_thread_state_initialize( thread );
809 		}
810 	} else {
811 		result = KERN_TERMINATED;
812 	}
813 
814 	thread_mtx_unlock(thread);
815 
816 	return result;
817 }
818 
819 kern_return_t
thread_dup(thread_t target)820 thread_dup(
821 	thread_t        target)
822 {
823 	thread_t                        self = current_thread();
824 	kern_return_t           result = KERN_SUCCESS;
825 
826 	if (target == THREAD_NULL || target == self) {
827 		return KERN_INVALID_ARGUMENT;
828 	}
829 
830 	thread_mtx_lock(target);
831 
832 	if (target->active) {
833 		thread_hold(target);
834 
835 		thread_mtx_unlock(target);
836 
837 		if (thread_stop(target, TRUE)) {
838 			thread_mtx_lock(target);
839 			result = machine_thread_dup(self, target, FALSE);
840 
841 			if (self->affinity_set != AFFINITY_SET_NULL) {
842 				thread_affinity_dup(self, target);
843 			}
844 			thread_unstop(target);
845 		} else {
846 			thread_mtx_lock(target);
847 			result = KERN_ABORTED;
848 		}
849 
850 		thread_release(target);
851 	} else {
852 		result = KERN_TERMINATED;
853 	}
854 
855 	thread_mtx_unlock(target);
856 
857 	return result;
858 }
859 
860 
861 kern_return_t
thread_dup2(thread_t source,thread_t target)862 thread_dup2(
863 	thread_t        source,
864 	thread_t        target)
865 {
866 	kern_return_t           result = KERN_SUCCESS;
867 	uint32_t                active = 0;
868 
869 	if (source == THREAD_NULL || target == THREAD_NULL || target == source) {
870 		return KERN_INVALID_ARGUMENT;
871 	}
872 
873 	thread_mtx_lock(source);
874 	active = source->active;
875 	thread_mtx_unlock(source);
876 
877 	if (!active) {
878 		return KERN_TERMINATED;
879 	}
880 
881 	thread_mtx_lock(target);
882 
883 	if (target->active || target->inspection) {
884 		thread_hold(target);
885 
886 		thread_mtx_unlock(target);
887 
888 		if (thread_stop(target, TRUE)) {
889 			thread_mtx_lock(target);
890 			result = machine_thread_dup(source, target, TRUE);
891 			if (source->affinity_set != AFFINITY_SET_NULL) {
892 				thread_affinity_dup(source, target);
893 			}
894 			thread_unstop(target);
895 		} else {
896 			thread_mtx_lock(target);
897 			result = KERN_ABORTED;
898 		}
899 
900 		thread_release(target);
901 	} else {
902 		result = KERN_TERMINATED;
903 	}
904 
905 	thread_mtx_unlock(target);
906 
907 	return result;
908 }
909 
910 /*
911  *	thread_setstatus:
912  *
913  *	Set the status of the specified thread.
914  *	Called with (and returns with) no locks held.
915  */
916 kern_return_t
thread_setstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count)917 thread_setstatus(
918 	thread_t                thread,
919 	int                                             flavor,
920 	thread_state_t                  tstate,
921 	mach_msg_type_number_t  count)
922 {
923 	return thread_set_state(thread, flavor, tstate, count);
924 }
925 
926 kern_return_t
thread_setstatus_from_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count,thread_state_t old_tstate,mach_msg_type_number_t old_count,thread_set_status_flags_t flags)927 thread_setstatus_from_user(
928 	thread_t                thread,
929 	int                                             flavor,
930 	thread_state_t                  tstate,
931 	mach_msg_type_number_t  count,
932 	thread_state_t                  old_tstate,
933 	mach_msg_type_number_t  old_count,
934 	thread_set_status_flags_t flags)
935 {
936 	return thread_set_state_internal(thread, flavor, tstate, count, old_tstate,
937 	           old_count, flags | TSSF_TRANSLATE_TO_USER);
938 }
939 
940 /*
941  *	thread_getstatus:
942  *
943  *	Get the status of the specified thread.
944  */
945 kern_return_t
thread_getstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count)946 thread_getstatus(
947 	thread_t                thread,
948 	int                                             flavor,
949 	thread_state_t                  tstate,
950 	mach_msg_type_number_t  *count)
951 {
952 	return thread_get_state(thread, flavor, tstate, count);
953 }
954 
955 kern_return_t
thread_getstatus_to_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count,thread_set_status_flags_t flags)956 thread_getstatus_to_user(
957 	thread_t                thread,
958 	int                                             flavor,
959 	thread_state_t                  tstate,
960 	mach_msg_type_number_t  *count,
961 	thread_set_status_flags_t flags)
962 {
963 	return thread_get_state_internal(thread, flavor, tstate, count, flags | TSSF_TRANSLATE_TO_USER);
964 }
965 
966 /*
967  *	Change thread's machine-dependent userspace TSD base.
968  *  Called with nothing locked.  Returns same way.
969  */
970 kern_return_t
thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)971 thread_set_tsd_base(
972 	thread_t                        thread,
973 	mach_vm_offset_t        tsd_base)
974 {
975 	kern_return_t           result = KERN_SUCCESS;
976 
977 	if (thread == THREAD_NULL) {
978 		return KERN_INVALID_ARGUMENT;
979 	}
980 
981 	thread_mtx_lock(thread);
982 
983 	if (thread->active) {
984 		if (thread != current_thread()) {
985 			thread_hold(thread);
986 
987 			thread_mtx_unlock(thread);
988 
989 			if (thread_stop(thread, TRUE)) {
990 				thread_mtx_lock(thread);
991 				result = machine_thread_set_tsd_base(thread, tsd_base);
992 				thread_unstop(thread);
993 			} else {
994 				thread_mtx_lock(thread);
995 				result = KERN_ABORTED;
996 			}
997 
998 			thread_release(thread);
999 		} else {
1000 			result = machine_thread_set_tsd_base(thread, tsd_base);
1001 		}
1002 	} else {
1003 		result = KERN_TERMINATED;
1004 	}
1005 
1006 	thread_mtx_unlock(thread);
1007 
1008 	return result;
1009 }
1010 
1011 /*
1012  * thread_set_apc_ast:
1013  *
1014  * Register the AST_APC callback that handles suspension and
1015  * termination, if it hasn't been installed already.
1016  *
1017  * Called with the thread mutex held.
1018  */
1019 static void
thread_set_apc_ast(thread_t thread)1020 thread_set_apc_ast(thread_t thread)
1021 {
1022 	spl_t s = splsched();
1023 
1024 	thread_lock(thread);
1025 	thread_set_apc_ast_locked(thread);
1026 	thread_unlock(thread);
1027 
1028 	splx(s);
1029 }
1030 
1031 /*
1032  * thread_set_apc_ast_locked:
1033  *
1034  * Do the work of registering for the AST_APC callback.
1035  *
1036  * Called with the thread mutex and scheduling lock held.
1037  */
1038 static void
thread_set_apc_ast_locked(thread_t thread)1039 thread_set_apc_ast_locked(thread_t thread)
1040 {
1041 	thread_ast_set(thread, AST_APC);
1042 
1043 	if (thread == current_thread()) {
1044 		ast_propagate(thread);
1045 	} else {
1046 		processor_t processor = thread->last_processor;
1047 
1048 		if (processor != PROCESSOR_NULL &&
1049 		    processor->state == PROCESSOR_RUNNING &&
1050 		    processor->active_thread == thread) {
1051 			cause_ast_check(processor);
1052 		}
1053 	}
1054 }
1055 
1056 /*
1057  * Activation control support routines internal to this file:
1058  *
1059  */
1060 
1061 /*
1062  * thread_suspended
1063  *
1064  * Continuation routine for thread suspension.  It checks
1065  * to see whether there has been any new suspensions.  If so, it
1066  * installs the AST_APC handler again.
1067  */
1068 __attribute__((noreturn))
1069 static void
thread_suspended(__unused void * parameter,wait_result_t result)1070 thread_suspended(__unused void *parameter, wait_result_t result)
1071 {
1072 	thread_t thread = current_thread();
1073 
1074 	thread_mtx_lock(thread);
1075 
1076 	if (result == THREAD_INTERRUPTED) {
1077 		thread->suspend_parked = FALSE;
1078 	} else {
1079 		assert(thread->suspend_parked == FALSE);
1080 	}
1081 
1082 	if (thread->suspend_count > 0) {
1083 		thread_set_apc_ast(thread);
1084 	}
1085 
1086 	thread_mtx_unlock(thread);
1087 
1088 	thread_exception_return();
1089 	/*NOTREACHED*/
1090 }
1091 
1092 /*
1093  * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
1094  * Called with nothing locked.  Returns (if it returns) the same way.
1095  */
1096 void
thread_apc_ast(thread_t thread)1097 thread_apc_ast(thread_t thread)
1098 {
1099 	thread_mtx_lock(thread);
1100 
1101 	assert(thread->suspend_parked == FALSE);
1102 
1103 	spl_t s = splsched();
1104 	thread_lock(thread);
1105 
1106 	/* TH_SFLAG_POLLDEPRESS is OK to have here */
1107 	assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
1108 
1109 	thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1110 	thread_unlock(thread);
1111 	splx(s);
1112 
1113 	if (!thread->active) {
1114 		/* Thread is ready to terminate, time to tear it down */
1115 		thread_mtx_unlock(thread);
1116 
1117 		thread_terminate_self();
1118 		/*NOTREACHED*/
1119 	}
1120 
1121 	/* If we're suspended, go to sleep and wait for someone to wake us up. */
1122 	if (thread->suspend_count > 0) {
1123 		thread->suspend_parked = TRUE;
1124 		assert_wait(&thread->suspend_count,
1125 		    THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
1126 		thread_mtx_unlock(thread);
1127 
1128 		thread_block(thread_suspended);
1129 		/*NOTREACHED*/
1130 	}
1131 
1132 	thread_mtx_unlock(thread);
1133 }
1134 
1135 
1136 void
thread_debug_return_to_user_ast(thread_t thread)1137 thread_debug_return_to_user_ast(
1138 	thread_t thread)
1139 {
1140 #pragma unused(thread)
1141 #if MACH_ASSERT
1142 	if ((thread->sched_flags & TH_SFLAG_RW_PROMOTED) ||
1143 	    thread->rwlock_count > 0) {
1144 		panic("Returning to userspace with rw lock held, thread %p sched_flag %u rwlock_count %d", thread, thread->sched_flags, thread->rwlock_count);
1145 	}
1146 
1147 	if ((thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) ||
1148 	    thread->priority_floor_count > 0) {
1149 		panic("Returning to userspace with floor boost set, thread %p sched_flag %u priority_floor_count %d", thread, thread->sched_flags, thread->priority_floor_count);
1150 	}
1151 #endif /* MACH_ASSERT */
1152 }
1153 
1154 
1155 /* Prototype, see justification above */
1156 kern_return_t
1157 act_set_state(
1158 	thread_t                                thread,
1159 	int                                             flavor,
1160 	thread_state_t                  state,
1161 	mach_msg_type_number_t  count);
1162 
1163 kern_return_t
act_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1164 act_set_state(
1165 	thread_t                                thread,
1166 	int                                             flavor,
1167 	thread_state_t                  state,
1168 	mach_msg_type_number_t  count)
1169 {
1170 	if (thread == current_thread()) {
1171 		return KERN_INVALID_ARGUMENT;
1172 	}
1173 
1174 	return thread_set_state(thread, flavor, state, count);
1175 }
1176 
1177 kern_return_t
act_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1178 act_set_state_from_user(
1179 	thread_t                                thread,
1180 	int                                             flavor,
1181 	thread_state_t                  state,
1182 	mach_msg_type_number_t  count)
1183 {
1184 	if (thread == current_thread()) {
1185 		return KERN_INVALID_ARGUMENT;
1186 	}
1187 
1188 	return thread_set_state_from_user(thread, flavor, state, count);
1189 }
1190 
1191 /* Prototype, see justification above */
1192 kern_return_t
1193 act_get_state(
1194 	thread_t                                thread,
1195 	int                                             flavor,
1196 	thread_state_t                  state,
1197 	mach_msg_type_number_t  *count);
1198 
1199 kern_return_t
act_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1200 act_get_state(
1201 	thread_t                                thread,
1202 	int                                             flavor,
1203 	thread_state_t                  state,
1204 	mach_msg_type_number_t  *count)
1205 {
1206 	if (thread == current_thread()) {
1207 		return KERN_INVALID_ARGUMENT;
1208 	}
1209 
1210 	return thread_get_state(thread, flavor, state, count);
1211 }
1212 
1213 kern_return_t
act_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1214 act_get_state_to_user(
1215 	thread_t                                thread,
1216 	int                                             flavor,
1217 	thread_state_t                  state,
1218 	mach_msg_type_number_t  *count)
1219 {
1220 	if (thread == current_thread()) {
1221 		return KERN_INVALID_ARGUMENT;
1222 	}
1223 
1224 	return thread_get_state_to_user(thread, flavor, state, count);
1225 }
1226 
1227 static void
act_set_ast(thread_t thread,ast_t ast)1228 act_set_ast(
1229 	thread_t   thread,
1230 	ast_t      ast)
1231 {
1232 	spl_t s = splsched();
1233 
1234 	if (thread == current_thread()) {
1235 		thread_ast_set(thread, ast);
1236 		ast_propagate(thread);
1237 	} else {
1238 		processor_t processor;
1239 
1240 		thread_lock(thread);
1241 		thread_ast_set(thread, ast);
1242 		processor = thread->last_processor;
1243 		if (processor != PROCESSOR_NULL &&
1244 		    processor->state == PROCESSOR_RUNNING &&
1245 		    processor->active_thread == thread) {
1246 			cause_ast_check(processor);
1247 		}
1248 		thread_unlock(thread);
1249 	}
1250 
1251 	splx(s);
1252 }
1253 
1254 /*
1255  * set AST on thread without causing an AST check
1256  * and without taking the thread lock
1257  *
1258  * If thread is not the current thread, then it may take
1259  * up until the next context switch or quantum expiration
1260  * on that thread for it to notice the AST.
1261  */
1262 static void
act_set_ast_async(thread_t thread,ast_t ast)1263 act_set_ast_async(thread_t  thread,
1264     ast_t     ast)
1265 {
1266 	thread_ast_set(thread, ast);
1267 
1268 	if (thread == current_thread()) {
1269 		spl_t s = splsched();
1270 		ast_propagate(thread);
1271 		splx(s);
1272 	}
1273 }
1274 
1275 void
act_set_debug_assert(void)1276 act_set_debug_assert(void)
1277 {
1278 	thread_t thread = current_thread();
1279 	if (thread_ast_peek(thread, AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1280 		thread_ast_set(thread, AST_DEBUG_ASSERT);
1281 	}
1282 	if (ast_peek(AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1283 		spl_t s = splsched();
1284 		ast_propagate(thread);
1285 		splx(s);
1286 	}
1287 }
1288 
1289 void
act_set_astbsd(thread_t thread)1290 act_set_astbsd(thread_t thread)
1291 {
1292 	act_set_ast(thread, AST_BSD);
1293 }
1294 
1295 void
act_set_astkevent(thread_t thread,uint16_t bits)1296 act_set_astkevent(thread_t thread, uint16_t bits)
1297 {
1298 	os_atomic_or(&thread->kevent_ast_bits, bits, relaxed);
1299 
1300 	/* kevent AST shouldn't send immediate IPIs */
1301 	act_set_ast_async(thread, AST_KEVENT);
1302 }
1303 
1304 uint16_t
act_clear_astkevent(thread_t thread,uint16_t bits)1305 act_clear_astkevent(thread_t thread, uint16_t bits)
1306 {
1307 	/*
1308 	 * avoid the atomic operation if none of the bits is set,
1309 	 * which will be the common case.
1310 	 */
1311 	uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed);
1312 	if (cur & bits) {
1313 		cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed);
1314 	}
1315 	return cur & bits;
1316 }
1317 
1318 bool
act_set_ast_reset_pcs(task_t task,thread_t thread)1319 act_set_ast_reset_pcs(task_t task, thread_t thread)
1320 {
1321 	processor_t processor;
1322 	bool needs_wait = false;
1323 	spl_t s;
1324 
1325 	s = splsched();
1326 
1327 	if (thread == current_thread()) {
1328 		/*
1329 		 * this is called from the signal code,
1330 		 * just set the AST and move on
1331 		 */
1332 		thread_ast_set(thread, AST_RESET_PCS);
1333 		ast_propagate(thread);
1334 	} else {
1335 		thread_lock(thread);
1336 
1337 		assert(thread->t_rr_state.trr_ipi_ack_pending == 0);
1338 		assert(thread->t_rr_state.trr_sync_waiting == 0);
1339 
1340 		processor = thread->last_processor;
1341 		if (!thread->active) {
1342 			/*
1343 			 * ->active is being set before the thread is added
1344 			 * to the thread list (under the task lock which
1345 			 * the caller holds), and is reset before the thread
1346 			 * lock is being taken by thread_terminate_self().
1347 			 *
1348 			 * The result is that this will never fail to
1349 			 * set the AST on an thread that is active,
1350 			 * but will not set it past thread_terminate_self().
1351 			 */
1352 		} else if (processor != PROCESSOR_NULL &&
1353 		    processor->state == PROCESSOR_RUNNING &&
1354 		    processor->active_thread == thread) {
1355 			thread->t_rr_state.trr_ipi_ack_pending = true;
1356 			needs_wait = true;
1357 			thread_ast_set(thread, AST_RESET_PCS);
1358 			cause_ast_check(processor);
1359 		} else if (thread_reset_pcs_in_range(task, thread)) {
1360 			if (thread->t_rr_state.trr_fault_state) {
1361 				thread->t_rr_state.trr_fault_state =
1362 				    TRR_FAULT_OBSERVED;
1363 				needs_wait = true;
1364 			}
1365 			thread_ast_set(thread, AST_RESET_PCS);
1366 		}
1367 		thread_unlock(thread);
1368 	}
1369 
1370 	splx(s);
1371 
1372 	return needs_wait;
1373 }
1374 
1375 void
act_set_kperf(thread_t thread)1376 act_set_kperf(thread_t thread)
1377 {
1378 	/* safety check */
1379 	if (thread != current_thread()) {
1380 		if (!ml_get_interrupts_enabled()) {
1381 			panic("unsafe act_set_kperf operation");
1382 		}
1383 	}
1384 
1385 	act_set_ast(thread, AST_KPERF);
1386 }
1387 
1388 #if CONFIG_MACF
1389 void
act_set_astmacf(thread_t thread)1390 act_set_astmacf(
1391 	thread_t        thread)
1392 {
1393 	act_set_ast( thread, AST_MACF);
1394 }
1395 #endif
1396 
1397 void
act_set_astledger(thread_t thread)1398 act_set_astledger(thread_t thread)
1399 {
1400 	act_set_ast(thread, AST_LEDGER);
1401 }
1402 
1403 /*
1404  * The ledger AST may need to be set while already holding
1405  * the thread lock.  This routine skips sending the IPI,
1406  * allowing us to avoid the lock hold.
1407  *
1408  * However, it means the targeted thread must context switch
1409  * to recognize the ledger AST.
1410  */
1411 void
act_set_astledger_async(thread_t thread)1412 act_set_astledger_async(thread_t thread)
1413 {
1414 	act_set_ast_async(thread, AST_LEDGER);
1415 }
1416 
1417 void
act_set_io_telemetry_ast(thread_t thread)1418 act_set_io_telemetry_ast(thread_t thread)
1419 {
1420 	act_set_ast(thread, AST_TELEMETRY_IO);
1421 }
1422 
1423 void
act_set_macf_telemetry_ast(thread_t thread)1424 act_set_macf_telemetry_ast(thread_t thread)
1425 {
1426 	act_set_ast(thread, AST_TELEMETRY_MACF);
1427 }
1428 
1429 void
act_set_astproc_resource(thread_t thread)1430 act_set_astproc_resource(thread_t thread)
1431 {
1432 	act_set_ast(thread, AST_PROC_RESOURCE);
1433 }
1434