1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to [email protected] any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52
53 #include <mach/mach_types.h>
54 #include <mach/kern_return.h>
55 #include <mach/thread_act_server.h>
56 #include <mach/thread_act.h>
57
58 #include <kern/kern_types.h>
59 #include <kern/ast.h>
60 #include <kern/mach_param.h>
61 #include <kern/zalloc.h>
62 #include <kern/extmod_statistics.h>
63 #include <kern/thread.h>
64 #include <kern/task.h>
65 #include <kern/sched_prim.h>
66 #include <kern/misc_protos.h>
67 #include <kern/assert.h>
68 #include <kern/exception.h>
69 #include <kern/ipc_mig.h>
70 #include <kern/ipc_tt.h>
71 #include <kern/machine.h>
72 #include <kern/spl.h>
73 #include <kern/syscall_subr.h>
74 #include <kern/processor.h>
75 #include <kern/restartable.h>
76 #include <kern/timer.h>
77 #include <kern/affinity.h>
78 #include <kern/host.h>
79
80 #include <stdatomic.h>
81
82 #include <security/mac_mach_internal.h>
83
84 static void act_abort(thread_t thread);
85
86 static void thread_suspended(void *arg, wait_result_t result);
87 static void thread_set_apc_ast(thread_t thread);
88 static void thread_set_apc_ast_locked(thread_t thread);
89
90 /*
91 * Internal routine to mark a thread as started.
92 * Always called with the thread mutex locked.
93 */
94 void
thread_start(thread_t thread)95 thread_start(
96 thread_t thread)
97 {
98 clear_wait(thread, THREAD_AWAKENED);
99 thread->started = TRUE;
100 }
101
102 /*
103 * Internal routine to mark a thread as waiting
104 * right after it has been created. The caller
105 * is responsible to call wakeup()/thread_wakeup()
106 * or thread_terminate() to get it going.
107 *
108 * Always called with the thread mutex locked.
109 *
110 * Task and task_threads mutexes also held
111 * (so nobody can set the thread running before
112 * this point)
113 *
114 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
115 * to allow termination from this point forward.
116 */
117 void
thread_start_in_assert_wait(thread_t thread,event_t event,wait_interrupt_t interruptible)118 thread_start_in_assert_wait(
119 thread_t thread,
120 event_t event,
121 wait_interrupt_t interruptible)
122 {
123 struct waitq *waitq = assert_wait_queue(event);
124 wait_result_t wait_result;
125 spl_t spl;
126
127 spl = splsched();
128 waitq_lock(waitq);
129
130 /* clear out startup condition (safe because thread not started yet) */
131 thread_lock(thread);
132 assert(!thread->started);
133 assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
134 thread->state &= ~(TH_WAIT | TH_UNINT);
135 thread_unlock(thread);
136
137 /* assert wait interruptibly forever */
138 wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
139 interruptible,
140 TIMEOUT_URGENCY_SYS_NORMAL,
141 TIMEOUT_WAIT_FOREVER,
142 TIMEOUT_NO_LEEWAY,
143 thread);
144 assert(wait_result == THREAD_WAITING);
145
146 /* mark thread started while we still hold the waitq lock */
147 thread_lock(thread);
148 thread->started = TRUE;
149 thread_unlock(thread);
150
151 waitq_unlock(waitq);
152 splx(spl);
153 }
154
155 /*
156 * Internal routine to terminate a thread.
157 * Sometimes called with task already locked.
158 *
159 * If thread is on core, cause AST check immediately;
160 * Otherwise, let the thread continue running in kernel
161 * until it hits AST.
162 */
163 kern_return_t
thread_terminate_internal(thread_t thread)164 thread_terminate_internal(
165 thread_t thread)
166 {
167 kern_return_t result = KERN_SUCCESS;
168
169 thread_mtx_lock(thread);
170
171 if (thread->active) {
172 thread->active = FALSE;
173
174 act_abort(thread);
175
176 if (thread->started) {
177 clear_wait(thread, THREAD_INTERRUPTED);
178 } else {
179 thread_start(thread);
180 }
181 } else {
182 result = KERN_TERMINATED;
183 }
184
185 if (thread->affinity_set != NULL) {
186 thread_affinity_terminate(thread);
187 }
188
189 /* unconditionally unpin the thread in internal termination */
190 ipc_thread_port_unpin(get_thread_ro(thread)->tro_self_port);
191
192 thread_mtx_unlock(thread);
193
194 if (thread != current_thread() && result == KERN_SUCCESS) {
195 thread_wait(thread, FALSE);
196 }
197
198 return result;
199 }
200
201 kern_return_t
thread_terminate(thread_t thread)202 thread_terminate(
203 thread_t thread)
204 {
205 task_t task;
206
207 if (thread == THREAD_NULL) {
208 return KERN_INVALID_ARGUMENT;
209 }
210
211 task = get_threadtask(thread);
212
213 /* Kernel threads can't be terminated without their own cooperation */
214 if (task == kernel_task && thread != current_thread()) {
215 return KERN_FAILURE;
216 }
217
218 kern_return_t result = thread_terminate_internal(thread);
219
220 /*
221 * If a kernel thread is terminating itself, force handle the APC_AST here.
222 * Kernel threads don't pass through the return-to-user AST checking code,
223 * but all threads must finish their own termination in thread_apc_ast.
224 */
225 if (task == kernel_task) {
226 assert(thread->active == FALSE);
227 thread_ast_clear(thread, AST_APC);
228 thread_apc_ast(thread);
229
230 panic("thread_terminate");
231 /* NOTREACHED */
232 }
233
234 return result;
235 }
236
237 /*
238 * [MIG Call] Terminate a thread.
239 *
240 * Cannot be used on threads managed by pthread.
241 */
242 kern_return_t
thread_terminate_from_user(thread_t thread)243 thread_terminate_from_user(
244 thread_t thread)
245 {
246 if (thread == THREAD_NULL) {
247 return KERN_INVALID_ARGUMENT;
248 }
249
250 if (thread_get_tag(thread) & THREAD_TAG_PTHREAD) {
251 return KERN_DENIED;
252 }
253
254 return thread_terminate(thread);
255 }
256
257 /*
258 * Terminate a thread with pinned control port.
259 *
260 * Can only be used on threads managed by pthread. Exported in pthread_kern.
261 */
262 kern_return_t
thread_terminate_pinned(thread_t thread)263 thread_terminate_pinned(
264 thread_t thread)
265 {
266 task_t task;
267
268 if (thread == THREAD_NULL) {
269 return KERN_INVALID_ARGUMENT;
270 }
271
272 task = get_threadtask(thread);
273
274
275 assert(task != kernel_task);
276 assert(thread_get_tag(thread) & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD));
277
278 thread_mtx_lock(thread);
279 if (task_is_pinned(task) && thread->active) {
280 assert(get_thread_ro(thread)->tro_self_port->ip_pinned == 1);
281 }
282 thread_mtx_unlock(thread);
283
284 kern_return_t result = thread_terminate_internal(thread);
285 return result;
286 }
287
288 /*
289 * Suspend execution of the specified thread.
290 * This is a recursive-style suspension of the thread, a count of
291 * suspends is maintained.
292 *
293 * Called with thread mutex held.
294 */
295 void
thread_hold(thread_t thread)296 thread_hold(thread_t thread)
297 {
298 if (thread->suspend_count++ == 0) {
299 thread_set_apc_ast(thread);
300 assert(thread->suspend_parked == FALSE);
301 }
302 }
303
304 /*
305 * Decrement internal suspension count, setting thread
306 * runnable when count falls to zero.
307 *
308 * Because the wait is abortsafe, we can't be guaranteed that the thread
309 * is currently actually waiting even if suspend_parked is set.
310 *
311 * Called with thread mutex held.
312 */
313 void
thread_release(thread_t thread)314 thread_release(thread_t thread)
315 {
316 assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
317
318 /* fail-safe on non-assert builds */
319 if (thread->suspend_count == 0) {
320 return;
321 }
322
323 if (--thread->suspend_count == 0) {
324 if (!thread->started) {
325 thread_start(thread);
326 } else if (thread->suspend_parked) {
327 thread->suspend_parked = FALSE;
328 thread_wakeup_thread(&thread->suspend_count, thread);
329 }
330 }
331 }
332
333 kern_return_t
thread_suspend(thread_t thread)334 thread_suspend(thread_t thread)
335 {
336 kern_return_t result = KERN_SUCCESS;
337
338 if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
339 return KERN_INVALID_ARGUMENT;
340 }
341
342 thread_mtx_lock(thread);
343
344 if (thread->active) {
345 if (thread->user_stop_count++ == 0) {
346 thread_hold(thread);
347 }
348 } else {
349 result = KERN_TERMINATED;
350 }
351
352 thread_mtx_unlock(thread);
353
354 if (thread != current_thread() && result == KERN_SUCCESS) {
355 thread_wait(thread, FALSE);
356 }
357
358 return result;
359 }
360
361 kern_return_t
thread_resume(thread_t thread)362 thread_resume(thread_t thread)
363 {
364 kern_return_t result = KERN_SUCCESS;
365
366 if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
367 return KERN_INVALID_ARGUMENT;
368 }
369
370 thread_mtx_lock(thread);
371
372 if (thread->active) {
373 if (thread->user_stop_count > 0) {
374 if (--thread->user_stop_count == 0) {
375 thread_release(thread);
376 }
377 } else {
378 result = KERN_FAILURE;
379 }
380 } else {
381 result = KERN_TERMINATED;
382 }
383
384 thread_mtx_unlock(thread);
385
386 return result;
387 }
388
389 /*
390 * thread_depress_abort_from_user:
391 *
392 * Prematurely abort priority depression if there is one.
393 */
394 kern_return_t
thread_depress_abort_from_user(thread_t thread)395 thread_depress_abort_from_user(thread_t thread)
396 {
397 kern_return_t result;
398
399 if (thread == THREAD_NULL) {
400 return KERN_INVALID_ARGUMENT;
401 }
402
403 thread_mtx_lock(thread);
404
405 if (thread->active) {
406 result = thread_depress_abort(thread);
407 } else {
408 result = KERN_TERMINATED;
409 }
410
411 thread_mtx_unlock(thread);
412
413 return result;
414 }
415
416
417 /*
418 * Indicate that the thread should run the AST_APC callback
419 * to detect an abort condition.
420 *
421 * Called with thread mutex held.
422 */
423 static void
act_abort(thread_t thread)424 act_abort(
425 thread_t thread)
426 {
427 spl_t s = splsched();
428
429 thread_lock(thread);
430
431 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
432 thread->sched_flags |= TH_SFLAG_ABORT;
433 thread_set_apc_ast_locked(thread);
434 thread_depress_abort_locked(thread);
435 } else {
436 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
437 }
438
439 thread_unlock(thread);
440 splx(s);
441 }
442
443 kern_return_t
thread_abort(thread_t thread)444 thread_abort(
445 thread_t thread)
446 {
447 kern_return_t result = KERN_SUCCESS;
448
449 if (thread == THREAD_NULL) {
450 return KERN_INVALID_ARGUMENT;
451 }
452
453 thread_mtx_lock(thread);
454
455 if (thread->active) {
456 act_abort(thread);
457 clear_wait(thread, THREAD_INTERRUPTED);
458 } else {
459 result = KERN_TERMINATED;
460 }
461
462 thread_mtx_unlock(thread);
463
464 return result;
465 }
466
467 kern_return_t
thread_abort_safely(thread_t thread)468 thread_abort_safely(
469 thread_t thread)
470 {
471 kern_return_t result = KERN_SUCCESS;
472
473 if (thread == THREAD_NULL) {
474 return KERN_INVALID_ARGUMENT;
475 }
476
477 thread_mtx_lock(thread);
478
479 if (thread->active) {
480 spl_t s = splsched();
481
482 thread_lock(thread);
483 if (!thread->at_safe_point ||
484 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
485 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
486 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
487 thread_set_apc_ast_locked(thread);
488 thread_depress_abort_locked(thread);
489 }
490 }
491 thread_unlock(thread);
492 splx(s);
493 } else {
494 result = KERN_TERMINATED;
495 }
496
497 thread_mtx_unlock(thread);
498
499 return result;
500 }
501
502 /*** backward compatibility hacks ***/
503 #include <mach/thread_info.h>
504 #include <mach/thread_special_ports.h>
505 #include <ipc/ipc_port.h>
506
507 kern_return_t
thread_info(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)508 thread_info(
509 thread_t thread,
510 thread_flavor_t flavor,
511 thread_info_t thread_info_out,
512 mach_msg_type_number_t *thread_info_count)
513 {
514 kern_return_t result;
515
516 if (thread == THREAD_NULL) {
517 return KERN_INVALID_ARGUMENT;
518 }
519
520 thread_mtx_lock(thread);
521
522 if (thread->active || thread->inspection) {
523 result = thread_info_internal(
524 thread, flavor, thread_info_out, thread_info_count);
525 } else {
526 result = KERN_TERMINATED;
527 }
528
529 thread_mtx_unlock(thread);
530
531 return result;
532 }
533
534 static inline kern_return_t
thread_get_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count,thread_set_status_flags_t flags)535 thread_get_state_internal(
536 thread_t thread,
537 int flavor,
538 thread_state_t state, /* pointer to OUT array */
539 mach_msg_type_number_t *state_count, /*IN/OUT*/
540 thread_set_status_flags_t flags)
541 {
542 kern_return_t result = KERN_SUCCESS;
543 boolean_t to_user = !!(flags & TSSF_TRANSLATE_TO_USER);
544
545 if (thread == THREAD_NULL) {
546 return KERN_INVALID_ARGUMENT;
547 }
548
549 thread_mtx_lock(thread);
550
551 if (thread->active) {
552 if (thread != current_thread()) {
553 thread_hold(thread);
554
555 thread_mtx_unlock(thread);
556
557 if (thread_stop(thread, FALSE)) {
558 thread_mtx_lock(thread);
559 result = machine_thread_get_state(
560 thread, flavor, state, state_count);
561 thread_unstop(thread);
562 } else {
563 thread_mtx_lock(thread);
564 result = KERN_ABORTED;
565 }
566
567 thread_release(thread);
568 } else {
569 result = machine_thread_get_state(
570 thread, flavor, state, state_count);
571 }
572 } else if (thread->inspection) {
573 result = machine_thread_get_state(
574 thread, flavor, state, state_count);
575 } else {
576 result = KERN_TERMINATED;
577 }
578
579 if (to_user && result == KERN_SUCCESS) {
580 result = machine_thread_state_convert_to_user(thread, flavor, state,
581 state_count, flags);
582 }
583
584 thread_mtx_unlock(thread);
585
586 return result;
587 }
588
589 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
590
591 kern_return_t
592 thread_get_state(
593 thread_t thread,
594 int flavor,
595 thread_state_t state,
596 mach_msg_type_number_t *state_count);
597
598 kern_return_t
thread_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)599 thread_get_state(
600 thread_t thread,
601 int flavor,
602 thread_state_t state, /* pointer to OUT array */
603 mach_msg_type_number_t *state_count) /*IN/OUT*/
604 {
605 return thread_get_state_internal(thread, flavor, state, state_count, TSSF_FLAGS_NONE);
606 }
607
608 kern_return_t
thread_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)609 thread_get_state_to_user(
610 thread_t thread,
611 int flavor,
612 thread_state_t state, /* pointer to OUT array */
613 mach_msg_type_number_t *state_count) /*IN/OUT*/
614 {
615 return thread_get_state_internal(thread, flavor, state, state_count, TSSF_TRANSLATE_TO_USER);
616 }
617
618 /*
619 * Change thread's machine-dependent state. Called with nothing
620 * locked. Returns same way.
621 */
622 static inline kern_return_t
thread_set_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count,thread_state_t old_state,mach_msg_type_number_t old_state_count,thread_set_status_flags_t flags)623 thread_set_state_internal(
624 thread_t thread,
625 int flavor,
626 thread_state_t state,
627 mach_msg_type_number_t state_count,
628 thread_state_t old_state,
629 mach_msg_type_number_t old_state_count,
630 thread_set_status_flags_t flags)
631 {
632 kern_return_t result = KERN_SUCCESS;
633 boolean_t from_user = !!(flags & TSSF_TRANSLATE_TO_USER);
634
635 if (thread == THREAD_NULL) {
636 return KERN_INVALID_ARGUMENT;
637 }
638
639 thread_mtx_lock(thread);
640
641 if (thread->active) {
642 if (from_user) {
643 result = machine_thread_state_convert_from_user(thread, flavor,
644 state, state_count, old_state, old_state_count, flags);
645 if (result != KERN_SUCCESS) {
646 goto out;
647 }
648 }
649 if (thread != current_thread()) {
650 thread_hold(thread);
651
652 thread_mtx_unlock(thread);
653
654 if (thread_stop(thread, TRUE)) {
655 thread_mtx_lock(thread);
656 result = machine_thread_set_state(
657 thread, flavor, state, state_count);
658 thread_unstop(thread);
659 } else {
660 thread_mtx_lock(thread);
661 result = KERN_ABORTED;
662 }
663
664 thread_release(thread);
665 } else {
666 result = machine_thread_set_state(
667 thread, flavor, state, state_count);
668 }
669 } else {
670 result = KERN_TERMINATED;
671 }
672
673 if ((result == KERN_SUCCESS) && from_user) {
674 extmod_statistics_incr_thread_set_state(thread);
675 }
676
677 out:
678 thread_mtx_unlock(thread);
679
680 return result;
681 }
682
683 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
684 kern_return_t
685 thread_set_state(
686 thread_t thread,
687 int flavor,
688 thread_state_t state,
689 mach_msg_type_number_t state_count);
690
691 kern_return_t
thread_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)692 thread_set_state(
693 thread_t thread,
694 int flavor,
695 thread_state_t state,
696 mach_msg_type_number_t state_count)
697 {
698 return thread_set_state_internal(thread, flavor, state, state_count, NULL, 0, TSSF_FLAGS_NONE);
699 }
700
701 kern_return_t
thread_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)702 thread_set_state_from_user(
703 thread_t thread,
704 int flavor,
705 thread_state_t state,
706 mach_msg_type_number_t state_count)
707 {
708 return thread_set_state_internal(thread, flavor, state, state_count, NULL, 0, TSSF_TRANSLATE_TO_USER);
709 }
710
711 kern_return_t
thread_convert_thread_state(thread_t thread,int direction,thread_state_flavor_t flavor,thread_state_t in_state,mach_msg_type_number_t in_state_count,thread_state_t out_state,mach_msg_type_number_t * out_state_count)712 thread_convert_thread_state(
713 thread_t thread,
714 int direction,
715 thread_state_flavor_t flavor,
716 thread_state_t in_state, /* pointer to IN array */
717 mach_msg_type_number_t in_state_count,
718 thread_state_t out_state, /* pointer to OUT array */
719 mach_msg_type_number_t *out_state_count) /*IN/OUT*/
720 {
721 kern_return_t kr;
722 thread_t to_thread = THREAD_NULL;
723 thread_t from_thread = THREAD_NULL;
724 mach_msg_type_number_t state_count = in_state_count;
725
726 if (direction != THREAD_CONVERT_THREAD_STATE_TO_SELF &&
727 direction != THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
728 return KERN_INVALID_ARGUMENT;
729 }
730
731 if (thread == THREAD_NULL) {
732 return KERN_INVALID_ARGUMENT;
733 }
734
735 if (state_count > *out_state_count) {
736 return KERN_INSUFFICIENT_BUFFER_SIZE;
737 }
738
739 if (direction == THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
740 to_thread = thread;
741 from_thread = current_thread();
742 } else {
743 to_thread = current_thread();
744 from_thread = thread;
745 }
746
747 /* Authenticate and convert thread state to kernel representation */
748 kr = machine_thread_state_convert_from_user(from_thread, flavor,
749 in_state, state_count, NULL, 0, TSSF_FLAGS_NONE);
750
751 /* Return early if one of the thread was jop disabled while other wasn't */
752 if (kr != KERN_SUCCESS) {
753 return kr;
754 }
755
756 /* Convert thread state to target thread user representation */
757 kr = machine_thread_state_convert_to_user(to_thread, flavor,
758 in_state, &state_count, TSSF_PRESERVE_FLAGS);
759
760 if (kr == KERN_SUCCESS) {
761 if (state_count <= *out_state_count) {
762 memcpy(out_state, in_state, state_count * sizeof(uint32_t));
763 *out_state_count = state_count;
764 } else {
765 kr = KERN_INSUFFICIENT_BUFFER_SIZE;
766 }
767 }
768
769 return kr;
770 }
771
772 /*
773 * Kernel-internal "thread" interfaces used outside this file:
774 */
775
776 /* Initialize (or re-initialize) a thread state. Called from execve
777 * with nothing locked, returns same way.
778 */
779 kern_return_t
thread_state_initialize(thread_t thread)780 thread_state_initialize(
781 thread_t thread)
782 {
783 kern_return_t result = KERN_SUCCESS;
784
785 if (thread == THREAD_NULL) {
786 return KERN_INVALID_ARGUMENT;
787 }
788
789 thread_mtx_lock(thread);
790
791 if (thread->active) {
792 if (thread != current_thread()) {
793 /* Thread created in exec should be blocked in UNINT wait */
794 assert(!(thread->state & TH_RUN));
795 }
796 machine_thread_state_initialize( thread );
797 } else {
798 result = KERN_TERMINATED;
799 }
800
801 thread_mtx_unlock(thread);
802
803 return result;
804 }
805
806 kern_return_t
thread_dup(thread_t target)807 thread_dup(
808 thread_t target)
809 {
810 thread_t self = current_thread();
811 kern_return_t result = KERN_SUCCESS;
812
813 if (target == THREAD_NULL || target == self) {
814 return KERN_INVALID_ARGUMENT;
815 }
816
817 thread_mtx_lock(target);
818
819 if (target->active) {
820 thread_hold(target);
821
822 thread_mtx_unlock(target);
823
824 if (thread_stop(target, TRUE)) {
825 thread_mtx_lock(target);
826 result = machine_thread_dup(self, target, FALSE);
827
828 if (self->affinity_set != AFFINITY_SET_NULL) {
829 thread_affinity_dup(self, target);
830 }
831 thread_unstop(target);
832 } else {
833 thread_mtx_lock(target);
834 result = KERN_ABORTED;
835 }
836
837 thread_release(target);
838 } else {
839 result = KERN_TERMINATED;
840 }
841
842 thread_mtx_unlock(target);
843
844 return result;
845 }
846
847
848 kern_return_t
thread_dup2(thread_t source,thread_t target)849 thread_dup2(
850 thread_t source,
851 thread_t target)
852 {
853 kern_return_t result = KERN_SUCCESS;
854 uint32_t active = 0;
855
856 if (source == THREAD_NULL || target == THREAD_NULL || target == source) {
857 return KERN_INVALID_ARGUMENT;
858 }
859
860 thread_mtx_lock(source);
861 active = source->active;
862 thread_mtx_unlock(source);
863
864 if (!active) {
865 return KERN_TERMINATED;
866 }
867
868 thread_mtx_lock(target);
869
870 if (target->active || target->inspection) {
871 thread_hold(target);
872
873 thread_mtx_unlock(target);
874
875 if (thread_stop(target, TRUE)) {
876 thread_mtx_lock(target);
877 result = machine_thread_dup(source, target, TRUE);
878 if (source->affinity_set != AFFINITY_SET_NULL) {
879 thread_affinity_dup(source, target);
880 }
881 thread_unstop(target);
882 } else {
883 thread_mtx_lock(target);
884 result = KERN_ABORTED;
885 }
886
887 thread_release(target);
888 } else {
889 result = KERN_TERMINATED;
890 }
891
892 thread_mtx_unlock(target);
893
894 return result;
895 }
896
897 /*
898 * thread_setstatus:
899 *
900 * Set the status of the specified thread.
901 * Called with (and returns with) no locks held.
902 */
903 kern_return_t
thread_setstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count)904 thread_setstatus(
905 thread_t thread,
906 int flavor,
907 thread_state_t tstate,
908 mach_msg_type_number_t count)
909 {
910 return thread_set_state(thread, flavor, tstate, count);
911 }
912
913 kern_return_t
thread_setstatus_from_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count,thread_state_t old_tstate,mach_msg_type_number_t old_count,thread_set_status_flags_t flags)914 thread_setstatus_from_user(
915 thread_t thread,
916 int flavor,
917 thread_state_t tstate,
918 mach_msg_type_number_t count,
919 thread_state_t old_tstate,
920 mach_msg_type_number_t old_count,
921 thread_set_status_flags_t flags)
922 {
923 return thread_set_state_internal(thread, flavor, tstate, count, old_tstate,
924 old_count, flags | TSSF_TRANSLATE_TO_USER);
925 }
926
927 /*
928 * thread_getstatus:
929 *
930 * Get the status of the specified thread.
931 */
932 kern_return_t
thread_getstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count)933 thread_getstatus(
934 thread_t thread,
935 int flavor,
936 thread_state_t tstate,
937 mach_msg_type_number_t *count)
938 {
939 return thread_get_state(thread, flavor, tstate, count);
940 }
941
942 kern_return_t
thread_getstatus_to_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count,thread_set_status_flags_t flags)943 thread_getstatus_to_user(
944 thread_t thread,
945 int flavor,
946 thread_state_t tstate,
947 mach_msg_type_number_t *count,
948 thread_set_status_flags_t flags)
949 {
950 return thread_get_state_internal(thread, flavor, tstate, count, flags | TSSF_TRANSLATE_TO_USER);
951 }
952
953 /*
954 * Change thread's machine-dependent userspace TSD base.
955 * Called with nothing locked. Returns same way.
956 */
957 kern_return_t
thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)958 thread_set_tsd_base(
959 thread_t thread,
960 mach_vm_offset_t tsd_base)
961 {
962 kern_return_t result = KERN_SUCCESS;
963
964 if (thread == THREAD_NULL) {
965 return KERN_INVALID_ARGUMENT;
966 }
967
968 thread_mtx_lock(thread);
969
970 if (thread->active) {
971 if (thread != current_thread()) {
972 thread_hold(thread);
973
974 thread_mtx_unlock(thread);
975
976 if (thread_stop(thread, TRUE)) {
977 thread_mtx_lock(thread);
978 result = machine_thread_set_tsd_base(thread, tsd_base);
979 thread_unstop(thread);
980 } else {
981 thread_mtx_lock(thread);
982 result = KERN_ABORTED;
983 }
984
985 thread_release(thread);
986 } else {
987 result = machine_thread_set_tsd_base(thread, tsd_base);
988 }
989 } else {
990 result = KERN_TERMINATED;
991 }
992
993 thread_mtx_unlock(thread);
994
995 return result;
996 }
997
998 /*
999 * thread_set_apc_ast:
1000 *
1001 * Register the AST_APC callback that handles suspension and
1002 * termination, if it hasn't been installed already.
1003 *
1004 * Called with the thread mutex held.
1005 */
1006 static void
thread_set_apc_ast(thread_t thread)1007 thread_set_apc_ast(thread_t thread)
1008 {
1009 spl_t s = splsched();
1010
1011 thread_lock(thread);
1012 thread_set_apc_ast_locked(thread);
1013 thread_unlock(thread);
1014
1015 splx(s);
1016 }
1017
1018 /*
1019 * thread_set_apc_ast_locked:
1020 *
1021 * Do the work of registering for the AST_APC callback.
1022 *
1023 * Called with the thread mutex and scheduling lock held.
1024 */
1025 static void
thread_set_apc_ast_locked(thread_t thread)1026 thread_set_apc_ast_locked(thread_t thread)
1027 {
1028 thread_ast_set(thread, AST_APC);
1029
1030 if (thread == current_thread()) {
1031 ast_propagate(thread);
1032 } else {
1033 processor_t processor = thread->last_processor;
1034
1035 if (processor != PROCESSOR_NULL &&
1036 processor->state == PROCESSOR_RUNNING &&
1037 processor->active_thread == thread) {
1038 cause_ast_check(processor);
1039 }
1040 }
1041 }
1042
1043 /*
1044 * Activation control support routines internal to this file:
1045 *
1046 */
1047
1048 /*
1049 * thread_suspended
1050 *
1051 * Continuation routine for thread suspension. It checks
1052 * to see whether there has been any new suspensions. If so, it
1053 * installs the AST_APC handler again.
1054 */
1055 __attribute__((noreturn))
1056 static void
thread_suspended(__unused void * parameter,wait_result_t result)1057 thread_suspended(__unused void *parameter, wait_result_t result)
1058 {
1059 thread_t thread = current_thread();
1060
1061 thread_mtx_lock(thread);
1062
1063 if (result == THREAD_INTERRUPTED) {
1064 thread->suspend_parked = FALSE;
1065 } else {
1066 assert(thread->suspend_parked == FALSE);
1067 }
1068
1069 if (thread->suspend_count > 0) {
1070 thread_set_apc_ast(thread);
1071 }
1072
1073 thread_mtx_unlock(thread);
1074
1075 thread_exception_return();
1076 /*NOTREACHED*/
1077 }
1078
1079 /*
1080 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
1081 * Called with nothing locked. Returns (if it returns) the same way.
1082 */
1083 void
thread_apc_ast(thread_t thread)1084 thread_apc_ast(thread_t thread)
1085 {
1086 thread_mtx_lock(thread);
1087
1088 assert(thread->suspend_parked == FALSE);
1089
1090 spl_t s = splsched();
1091 thread_lock(thread);
1092
1093 /* TH_SFLAG_POLLDEPRESS is OK to have here */
1094 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
1095
1096 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1097 thread_unlock(thread);
1098 splx(s);
1099
1100 if (!thread->active) {
1101 /* Thread is ready to terminate, time to tear it down */
1102 thread_mtx_unlock(thread);
1103
1104 thread_terminate_self();
1105 /*NOTREACHED*/
1106 }
1107
1108 /* If we're suspended, go to sleep and wait for someone to wake us up. */
1109 if (thread->suspend_count > 0) {
1110 thread->suspend_parked = TRUE;
1111 assert_wait(&thread->suspend_count,
1112 THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
1113 thread_mtx_unlock(thread);
1114
1115 thread_block(thread_suspended);
1116 /*NOTREACHED*/
1117 }
1118
1119 thread_mtx_unlock(thread);
1120 }
1121
1122 #if CONFIG_ROSETTA
1123 extern kern_return_t
1124 exception_deliver(
1125 thread_t thread,
1126 exception_type_t exception,
1127 mach_exception_data_t code,
1128 mach_msg_type_number_t codeCnt,
1129 struct exception_action *excp,
1130 lck_mtx_t *mutex);
1131
1132 kern_return_t
thread_raise_exception(thread_t thread,exception_type_t exception,natural_t code_count,int64_t code,int64_t sub_code)1133 thread_raise_exception(
1134 thread_t thread,
1135 exception_type_t exception,
1136 natural_t code_count,
1137 int64_t code,
1138 int64_t sub_code)
1139 {
1140 task_t task;
1141
1142 if (thread == THREAD_NULL) {
1143 return KERN_INVALID_ARGUMENT;
1144 }
1145
1146 task = get_threadtask(thread);
1147
1148 if (task != current_task()) {
1149 return KERN_FAILURE;
1150 }
1151
1152 if (!task_is_translated(task)) {
1153 return KERN_FAILURE;
1154 }
1155
1156 if (exception == EXC_CRASH) {
1157 return KERN_INVALID_ARGUMENT;
1158 }
1159
1160 int64_t codes[] = { code, sub_code };
1161 host_priv_t host_priv = host_priv_self();
1162 kern_return_t kr = exception_deliver(thread, exception, codes, code_count, host_priv->exc_actions, &host_priv->lock);
1163 if (kr != KERN_SUCCESS) {
1164 return kr;
1165 }
1166
1167 return thread_resume(thread);
1168 }
1169 #endif
1170
1171 void
thread_debug_return_to_user_ast(thread_t thread)1172 thread_debug_return_to_user_ast(
1173 thread_t thread)
1174 {
1175 #pragma unused(thread)
1176 #if MACH_ASSERT
1177 if ((thread->sched_flags & TH_SFLAG_RW_PROMOTED) ||
1178 thread->rwlock_count > 0) {
1179 panic("Returning to userspace with rw lock held, thread %p sched_flag %u rwlock_count %d", thread, thread->sched_flags, thread->rwlock_count);
1180 }
1181
1182 if ((thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) ||
1183 thread->priority_floor_count > 0) {
1184 panic("Returning to userspace with floor boost set, thread %p sched_flag %u priority_floor_count %d", thread, thread->sched_flags, thread->priority_floor_count);
1185 }
1186 #endif /* MACH_ASSERT */
1187 }
1188
1189
1190 /* Prototype, see justification above */
1191 kern_return_t
1192 act_set_state(
1193 thread_t thread,
1194 int flavor,
1195 thread_state_t state,
1196 mach_msg_type_number_t count);
1197
1198 kern_return_t
act_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1199 act_set_state(
1200 thread_t thread,
1201 int flavor,
1202 thread_state_t state,
1203 mach_msg_type_number_t count)
1204 {
1205 if (thread == current_thread()) {
1206 return KERN_INVALID_ARGUMENT;
1207 }
1208
1209 return thread_set_state(thread, flavor, state, count);
1210 }
1211
1212 kern_return_t
act_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1213 act_set_state_from_user(
1214 thread_t thread,
1215 int flavor,
1216 thread_state_t state,
1217 mach_msg_type_number_t count)
1218 {
1219 if (thread == current_thread()) {
1220 return KERN_INVALID_ARGUMENT;
1221 }
1222
1223 return thread_set_state_from_user(thread, flavor, state, count);
1224 }
1225
1226 /* Prototype, see justification above */
1227 kern_return_t
1228 act_get_state(
1229 thread_t thread,
1230 int flavor,
1231 thread_state_t state,
1232 mach_msg_type_number_t *count);
1233
1234 kern_return_t
act_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1235 act_get_state(
1236 thread_t thread,
1237 int flavor,
1238 thread_state_t state,
1239 mach_msg_type_number_t *count)
1240 {
1241 if (thread == current_thread()) {
1242 return KERN_INVALID_ARGUMENT;
1243 }
1244
1245 return thread_get_state(thread, flavor, state, count);
1246 }
1247
1248 kern_return_t
act_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1249 act_get_state_to_user(
1250 thread_t thread,
1251 int flavor,
1252 thread_state_t state,
1253 mach_msg_type_number_t *count)
1254 {
1255 if (thread == current_thread()) {
1256 return KERN_INVALID_ARGUMENT;
1257 }
1258
1259 return thread_get_state_to_user(thread, flavor, state, count);
1260 }
1261
1262 static void
act_set_ast(thread_t thread,ast_t ast)1263 act_set_ast(
1264 thread_t thread,
1265 ast_t ast)
1266 {
1267 spl_t s = splsched();
1268
1269 if (thread == current_thread()) {
1270 thread_ast_set(thread, ast);
1271 ast_propagate(thread);
1272 } else {
1273 processor_t processor;
1274
1275 thread_lock(thread);
1276 thread_ast_set(thread, ast);
1277 processor = thread->last_processor;
1278 if (processor != PROCESSOR_NULL &&
1279 processor->state == PROCESSOR_RUNNING &&
1280 processor->active_thread == thread) {
1281 cause_ast_check(processor);
1282 }
1283 thread_unlock(thread);
1284 }
1285
1286 splx(s);
1287 }
1288
1289 /*
1290 * set AST on thread without causing an AST check
1291 * and without taking the thread lock
1292 *
1293 * If thread is not the current thread, then it may take
1294 * up until the next context switch or quantum expiration
1295 * on that thread for it to notice the AST.
1296 */
1297 static void
act_set_ast_async(thread_t thread,ast_t ast)1298 act_set_ast_async(thread_t thread,
1299 ast_t ast)
1300 {
1301 thread_ast_set(thread, ast);
1302
1303 if (thread == current_thread()) {
1304 spl_t s = splsched();
1305 ast_propagate(thread);
1306 splx(s);
1307 }
1308 }
1309
1310 void
act_set_debug_assert(void)1311 act_set_debug_assert(void)
1312 {
1313 thread_t thread = current_thread();
1314 if (thread_ast_peek(thread, AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1315 thread_ast_set(thread, AST_DEBUG_ASSERT);
1316 }
1317 if (ast_peek(AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1318 spl_t s = splsched();
1319 ast_propagate(thread);
1320 splx(s);
1321 }
1322 }
1323
1324 void
act_set_astbsd(thread_t thread)1325 act_set_astbsd(thread_t thread)
1326 {
1327 act_set_ast(thread, AST_BSD);
1328 }
1329
1330 void
act_set_astkevent(thread_t thread,uint16_t bits)1331 act_set_astkevent(thread_t thread, uint16_t bits)
1332 {
1333 os_atomic_or(&thread->kevent_ast_bits, bits, relaxed);
1334
1335 /* kevent AST shouldn't send immediate IPIs */
1336 act_set_ast_async(thread, AST_KEVENT);
1337 }
1338
1339 uint16_t
act_clear_astkevent(thread_t thread,uint16_t bits)1340 act_clear_astkevent(thread_t thread, uint16_t bits)
1341 {
1342 /*
1343 * avoid the atomic operation if none of the bits is set,
1344 * which will be the common case.
1345 */
1346 uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed);
1347 if (cur & bits) {
1348 cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed);
1349 }
1350 return cur & bits;
1351 }
1352
1353 bool
act_set_ast_reset_pcs(task_t task,thread_t thread)1354 act_set_ast_reset_pcs(task_t task, thread_t thread)
1355 {
1356 processor_t processor;
1357 bool needs_wait = false;
1358 spl_t s;
1359
1360 s = splsched();
1361
1362 if (thread == current_thread()) {
1363 /*
1364 * this is called from the signal code,
1365 * just set the AST and move on
1366 */
1367 thread_ast_set(thread, AST_RESET_PCS);
1368 ast_propagate(thread);
1369 } else {
1370 thread_lock(thread);
1371
1372 assert(thread->t_rr_state.trr_ipi_ack_pending == 0);
1373 assert(thread->t_rr_state.trr_sync_waiting == 0);
1374
1375 processor = thread->last_processor;
1376 if (!thread->active) {
1377 /*
1378 * ->active is being set before the thread is added
1379 * to the thread list (under the task lock which
1380 * the caller holds), and is reset before the thread
1381 * lock is being taken by thread_terminate_self().
1382 *
1383 * The result is that this will never fail to
1384 * set the AST on an thread that is active,
1385 * but will not set it past thread_terminate_self().
1386 */
1387 } else if (processor != PROCESSOR_NULL &&
1388 processor->state == PROCESSOR_RUNNING &&
1389 processor->active_thread == thread) {
1390 thread->t_rr_state.trr_ipi_ack_pending = true;
1391 needs_wait = true;
1392 thread_ast_set(thread, AST_RESET_PCS);
1393 cause_ast_check(processor);
1394 } else if (thread_reset_pcs_in_range(task, thread)) {
1395 if (thread->t_rr_state.trr_fault_state) {
1396 thread->t_rr_state.trr_fault_state =
1397 TRR_FAULT_OBSERVED;
1398 needs_wait = true;
1399 }
1400 thread_ast_set(thread, AST_RESET_PCS);
1401 }
1402 thread_unlock(thread);
1403 }
1404
1405 splx(s);
1406
1407 return needs_wait;
1408 }
1409
1410 void
act_set_kperf(thread_t thread)1411 act_set_kperf(thread_t thread)
1412 {
1413 /* safety check */
1414 if (thread != current_thread()) {
1415 if (!ml_get_interrupts_enabled()) {
1416 panic("unsafe act_set_kperf operation");
1417 }
1418 }
1419
1420 act_set_ast(thread, AST_KPERF);
1421 }
1422
1423 #if CONFIG_MACF
1424 void
act_set_astmacf(thread_t thread)1425 act_set_astmacf(
1426 thread_t thread)
1427 {
1428 act_set_ast( thread, AST_MACF);
1429 }
1430 #endif
1431
1432 void
act_set_astledger(thread_t thread)1433 act_set_astledger(thread_t thread)
1434 {
1435 act_set_ast(thread, AST_LEDGER);
1436 }
1437
1438 /*
1439 * The ledger AST may need to be set while already holding
1440 * the thread lock. This routine skips sending the IPI,
1441 * allowing us to avoid the lock hold.
1442 *
1443 * However, it means the targeted thread must context switch
1444 * to recognize the ledger AST.
1445 */
1446 void
act_set_astledger_async(thread_t thread)1447 act_set_astledger_async(thread_t thread)
1448 {
1449 act_set_ast_async(thread, AST_LEDGER);
1450 }
1451
1452 void
act_set_io_telemetry_ast(thread_t thread)1453 act_set_io_telemetry_ast(thread_t thread)
1454 {
1455 act_set_ast(thread, AST_TELEMETRY_IO);
1456 }
1457
1458 void
act_set_macf_telemetry_ast(thread_t thread)1459 act_set_macf_telemetry_ast(thread_t thread)
1460 {
1461 act_set_ast(thread, AST_TELEMETRY_MACF);
1462 }
1463
1464 void
act_set_astproc_resource(thread_t thread)1465 act_set_astproc_resource(thread_t thread)
1466 {
1467 act_set_ast(thread, AST_PROC_RESOURCE);
1468 }
1469