1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Copyright (c) 1993 The University of Utah and
33 * the Center for Software Science (CSS). All rights reserved.
34 *
35 * Permission to use, copy, modify and distribute this software and its
36 * documentation is hereby granted, provided that both the copyright
37 * notice and this permission notice appear in all copies of the
38 * software, derivative works or modified versions, and any portions
39 * thereof, and that both notices appear in supporting documentation.
40 *
41 * THE UNIVERSITY OF UTAH AND CSS ALLOW FREE USE OF THIS SOFTWARE IN ITS "AS
42 * IS" CONDITION. THE UNIVERSITY OF UTAH AND CSS DISCLAIM ANY LIABILITY OF
43 * ANY KIND FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
44 *
45 * CSS requests users of this software to return to [email protected] any
46 * improvements that they make and grant CSS redistribution rights.
47 *
48 * Author: Bryan Ford, University of Utah CSS
49 *
50 * Thread management routines
51 */
52
53 #include <mach/mach_types.h>
54 #include <mach/kern_return.h>
55 #include <mach/thread_act_server.h>
56 #include <mach/thread_act.h>
57
58 #include <kern/kern_types.h>
59 #include <kern/ast.h>
60 #include <kern/mach_param.h>
61 #include <kern/zalloc.h>
62 #include <kern/extmod_statistics.h>
63 #include <kern/thread.h>
64 #include <kern/task.h>
65 #include <kern/sched_prim.h>
66 #include <kern/misc_protos.h>
67 #include <kern/assert.h>
68 #include <kern/exception.h>
69 #include <kern/ipc_mig.h>
70 #include <kern/ipc_tt.h>
71 #include <kern/machine.h>
72 #include <kern/spl.h>
73 #include <kern/syscall_subr.h>
74 #include <kern/processor.h>
75 #include <kern/timer.h>
76 #include <kern/affinity.h>
77 #include <kern/host.h>
78
79 #include <stdatomic.h>
80
81 #include <security/mac_mach_internal.h>
82
83 static void act_abort(thread_t thread);
84
85 static void thread_suspended(void *arg, wait_result_t result);
86 static void thread_set_apc_ast(thread_t thread);
87 static void thread_set_apc_ast_locked(thread_t thread);
88
89 /*
90 * Internal routine to mark a thread as started.
91 * Always called with the thread mutex locked.
92 */
93 void
thread_start(thread_t thread)94 thread_start(
95 thread_t thread)
96 {
97 clear_wait(thread, THREAD_AWAKENED);
98 thread->started = TRUE;
99 }
100
101 /*
102 * Internal routine to mark a thread as waiting
103 * right after it has been created. The caller
104 * is responsible to call wakeup()/thread_wakeup()
105 * or thread_terminate() to get it going.
106 *
107 * Always called with the thread mutex locked.
108 *
109 * Task and task_threads mutexes also held
110 * (so nobody can set the thread running before
111 * this point)
112 *
113 * Converts TH_UNINT wait to THREAD_INTERRUPTIBLE
114 * to allow termination from this point forward.
115 */
116 void
thread_start_in_assert_wait(thread_t thread,event_t event,wait_interrupt_t interruptible)117 thread_start_in_assert_wait(
118 thread_t thread,
119 event_t event,
120 wait_interrupt_t interruptible)
121 {
122 struct waitq *waitq = assert_wait_queue(event);
123 wait_result_t wait_result;
124 spl_t spl;
125
126 spl = splsched();
127 waitq_lock(waitq);
128
129 /* clear out startup condition (safe because thread not started yet) */
130 thread_lock(thread);
131 assert(!thread->started);
132 assert((thread->state & (TH_WAIT | TH_UNINT)) == (TH_WAIT | TH_UNINT));
133 thread->state &= ~(TH_WAIT | TH_UNINT);
134 thread_unlock(thread);
135
136 /* assert wait interruptibly forever */
137 wait_result = waitq_assert_wait64_locked(waitq, CAST_EVENT64_T(event),
138 interruptible,
139 TIMEOUT_URGENCY_SYS_NORMAL,
140 TIMEOUT_WAIT_FOREVER,
141 TIMEOUT_NO_LEEWAY,
142 thread);
143 assert(wait_result == THREAD_WAITING);
144
145 /* mark thread started while we still hold the waitq lock */
146 thread_lock(thread);
147 thread->started = TRUE;
148 thread_unlock(thread);
149
150 waitq_unlock(waitq);
151 splx(spl);
152 }
153
154 /*
155 * Internal routine to terminate a thread.
156 * Sometimes called with task already locked.
157 *
158 * If thread is on core, cause AST check immediately;
159 * Otherwise, let the thread continue running in kernel
160 * until it hits AST.
161 */
162 kern_return_t
thread_terminate_internal(thread_t thread)163 thread_terminate_internal(
164 thread_t thread)
165 {
166 kern_return_t result = KERN_SUCCESS;
167
168 thread_mtx_lock(thread);
169
170 if (thread->active) {
171 thread->active = FALSE;
172
173 act_abort(thread);
174
175 if (thread->started) {
176 clear_wait(thread, THREAD_INTERRUPTED);
177 } else {
178 thread_start(thread);
179 }
180 } else {
181 result = KERN_TERMINATED;
182 }
183
184 if (thread->affinity_set != NULL) {
185 thread_affinity_terminate(thread);
186 }
187
188 /* unconditionally unpin the thread in internal termination */
189 ipc_thread_port_unpin(get_thread_ro(thread)->tro_self_port);
190
191 thread_mtx_unlock(thread);
192
193 if (thread != current_thread() && result == KERN_SUCCESS) {
194 thread_wait(thread, FALSE);
195 }
196
197 return result;
198 }
199
200 kern_return_t
thread_terminate(thread_t thread)201 thread_terminate(
202 thread_t thread)
203 {
204 task_t task;
205
206 if (thread == THREAD_NULL) {
207 return KERN_INVALID_ARGUMENT;
208 }
209
210 task = get_threadtask(thread);
211
212 /* Kernel threads can't be terminated without their own cooperation */
213 if (task == kernel_task && thread != current_thread()) {
214 return KERN_FAILURE;
215 }
216
217 kern_return_t result = thread_terminate_internal(thread);
218
219 /*
220 * If a kernel thread is terminating itself, force handle the APC_AST here.
221 * Kernel threads don't pass through the return-to-user AST checking code,
222 * but all threads must finish their own termination in thread_apc_ast.
223 */
224 if (task == kernel_task) {
225 assert(thread->active == FALSE);
226 thread_ast_clear(thread, AST_APC);
227 thread_apc_ast(thread);
228
229 panic("thread_terminate");
230 /* NOTREACHED */
231 }
232
233 return result;
234 }
235
236 /*
237 * [MIG Call] Terminate a thread.
238 *
239 * Cannot be used on threads managed by pthread.
240 */
241 kern_return_t
thread_terminate_from_user(thread_t thread)242 thread_terminate_from_user(
243 thread_t thread)
244 {
245 if (thread == THREAD_NULL) {
246 return KERN_INVALID_ARGUMENT;
247 }
248
249 if (thread_get_tag(thread) & THREAD_TAG_PTHREAD) {
250 return KERN_DENIED;
251 }
252
253 return thread_terminate(thread);
254 }
255
256 /*
257 * Terminate a thread with pinned control port.
258 *
259 * Can only be used on threads managed by pthread. Exported in pthread_kern.
260 */
261 kern_return_t
thread_terminate_pinned(thread_t thread)262 thread_terminate_pinned(
263 thread_t thread)
264 {
265 task_t task;
266
267 if (thread == THREAD_NULL) {
268 return KERN_INVALID_ARGUMENT;
269 }
270
271 task = get_threadtask(thread);
272
273
274 assert(task != kernel_task);
275 assert(thread_get_tag(thread) & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD));
276
277 thread_mtx_lock(thread);
278 if (task_is_pinned(task) && thread->active) {
279 assert(get_thread_ro(thread)->tro_self_port->ip_pinned == 1);
280 }
281 thread_mtx_unlock(thread);
282
283 kern_return_t result = thread_terminate_internal(thread);
284 return result;
285 }
286
287 /*
288 * Suspend execution of the specified thread.
289 * This is a recursive-style suspension of the thread, a count of
290 * suspends is maintained.
291 *
292 * Called with thread mutex held.
293 */
294 void
thread_hold(thread_t thread)295 thread_hold(thread_t thread)
296 {
297 if (thread->suspend_count++ == 0) {
298 thread_set_apc_ast(thread);
299 assert(thread->suspend_parked == FALSE);
300 }
301 }
302
303 /*
304 * Decrement internal suspension count, setting thread
305 * runnable when count falls to zero.
306 *
307 * Because the wait is abortsafe, we can't be guaranteed that the thread
308 * is currently actually waiting even if suspend_parked is set.
309 *
310 * Called with thread mutex held.
311 */
312 void
thread_release(thread_t thread)313 thread_release(thread_t thread)
314 {
315 assertf(thread->suspend_count > 0, "thread %p over-resumed", thread);
316
317 /* fail-safe on non-assert builds */
318 if (thread->suspend_count == 0) {
319 return;
320 }
321
322 if (--thread->suspend_count == 0) {
323 if (!thread->started) {
324 thread_start(thread);
325 } else if (thread->suspend_parked) {
326 thread->suspend_parked = FALSE;
327 thread_wakeup_thread(&thread->suspend_count, thread);
328 }
329 }
330 }
331
332 kern_return_t
thread_suspend(thread_t thread)333 thread_suspend(thread_t thread)
334 {
335 kern_return_t result = KERN_SUCCESS;
336
337 if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
338 return KERN_INVALID_ARGUMENT;
339 }
340
341 thread_mtx_lock(thread);
342
343 if (thread->active) {
344 if (thread->user_stop_count++ == 0) {
345 thread_hold(thread);
346 }
347 } else {
348 result = KERN_TERMINATED;
349 }
350
351 thread_mtx_unlock(thread);
352
353 if (thread != current_thread() && result == KERN_SUCCESS) {
354 thread_wait(thread, FALSE);
355 }
356
357 return result;
358 }
359
360 kern_return_t
thread_resume(thread_t thread)361 thread_resume(thread_t thread)
362 {
363 kern_return_t result = KERN_SUCCESS;
364
365 if (thread == THREAD_NULL || get_threadtask(thread) == kernel_task) {
366 return KERN_INVALID_ARGUMENT;
367 }
368
369 thread_mtx_lock(thread);
370
371 if (thread->active) {
372 if (thread->user_stop_count > 0) {
373 if (--thread->user_stop_count == 0) {
374 thread_release(thread);
375 }
376 } else {
377 result = KERN_FAILURE;
378 }
379 } else {
380 result = KERN_TERMINATED;
381 }
382
383 thread_mtx_unlock(thread);
384
385 return result;
386 }
387
388 /*
389 * thread_depress_abort_from_user:
390 *
391 * Prematurely abort priority depression if there is one.
392 */
393 kern_return_t
thread_depress_abort_from_user(thread_t thread)394 thread_depress_abort_from_user(thread_t thread)
395 {
396 kern_return_t result;
397
398 if (thread == THREAD_NULL) {
399 return KERN_INVALID_ARGUMENT;
400 }
401
402 thread_mtx_lock(thread);
403
404 if (thread->active) {
405 result = thread_depress_abort(thread);
406 } else {
407 result = KERN_TERMINATED;
408 }
409
410 thread_mtx_unlock(thread);
411
412 return result;
413 }
414
415
416 /*
417 * Indicate that the thread should run the AST_APC callback
418 * to detect an abort condition.
419 *
420 * Called with thread mutex held.
421 */
422 static void
act_abort(thread_t thread)423 act_abort(
424 thread_t thread)
425 {
426 spl_t s = splsched();
427
428 thread_lock(thread);
429
430 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
431 thread->sched_flags |= TH_SFLAG_ABORT;
432 thread_set_apc_ast_locked(thread);
433 thread_depress_abort_locked(thread);
434 } else {
435 thread->sched_flags &= ~TH_SFLAG_ABORTSAFELY;
436 }
437
438 thread_unlock(thread);
439 splx(s);
440 }
441
442 kern_return_t
thread_abort(thread_t thread)443 thread_abort(
444 thread_t thread)
445 {
446 kern_return_t result = KERN_SUCCESS;
447
448 if (thread == THREAD_NULL) {
449 return KERN_INVALID_ARGUMENT;
450 }
451
452 thread_mtx_lock(thread);
453
454 if (thread->active) {
455 act_abort(thread);
456 clear_wait(thread, THREAD_INTERRUPTED);
457 } else {
458 result = KERN_TERMINATED;
459 }
460
461 thread_mtx_unlock(thread);
462
463 return result;
464 }
465
466 kern_return_t
thread_abort_safely(thread_t thread)467 thread_abort_safely(
468 thread_t thread)
469 {
470 kern_return_t result = KERN_SUCCESS;
471
472 if (thread == THREAD_NULL) {
473 return KERN_INVALID_ARGUMENT;
474 }
475
476 thread_mtx_lock(thread);
477
478 if (thread->active) {
479 spl_t s = splsched();
480
481 thread_lock(thread);
482 if (!thread->at_safe_point ||
483 clear_wait_internal(thread, THREAD_INTERRUPTED) != KERN_SUCCESS) {
484 if (!(thread->sched_flags & TH_SFLAG_ABORT)) {
485 thread->sched_flags |= TH_SFLAG_ABORTED_MASK;
486 thread_set_apc_ast_locked(thread);
487 thread_depress_abort_locked(thread);
488 }
489 }
490 thread_unlock(thread);
491 splx(s);
492 } else {
493 result = KERN_TERMINATED;
494 }
495
496 thread_mtx_unlock(thread);
497
498 return result;
499 }
500
501 /*** backward compatibility hacks ***/
502 #include <mach/thread_info.h>
503 #include <mach/thread_special_ports.h>
504 #include <ipc/ipc_port.h>
505
506 kern_return_t
thread_info(thread_t thread,thread_flavor_t flavor,thread_info_t thread_info_out,mach_msg_type_number_t * thread_info_count)507 thread_info(
508 thread_t thread,
509 thread_flavor_t flavor,
510 thread_info_t thread_info_out,
511 mach_msg_type_number_t *thread_info_count)
512 {
513 kern_return_t result;
514
515 if (thread == THREAD_NULL) {
516 return KERN_INVALID_ARGUMENT;
517 }
518
519 thread_mtx_lock(thread);
520
521 if (thread->active || thread->inspection) {
522 result = thread_info_internal(
523 thread, flavor, thread_info_out, thread_info_count);
524 } else {
525 result = KERN_TERMINATED;
526 }
527
528 thread_mtx_unlock(thread);
529
530 return result;
531 }
532
533 static inline kern_return_t
thread_get_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count,thread_set_status_flags_t flags)534 thread_get_state_internal(
535 thread_t thread,
536 int flavor,
537 thread_state_t state, /* pointer to OUT array */
538 mach_msg_type_number_t *state_count, /*IN/OUT*/
539 thread_set_status_flags_t flags)
540 {
541 kern_return_t result = KERN_SUCCESS;
542 boolean_t to_user = !!(flags & TSSF_TRANSLATE_TO_USER);
543
544 if (thread == THREAD_NULL) {
545 return KERN_INVALID_ARGUMENT;
546 }
547
548 thread_mtx_lock(thread);
549
550 if (thread->active) {
551 if (thread != current_thread()) {
552 thread_hold(thread);
553
554 thread_mtx_unlock(thread);
555
556 if (thread_stop(thread, FALSE)) {
557 thread_mtx_lock(thread);
558 result = machine_thread_get_state(
559 thread, flavor, state, state_count);
560 thread_unstop(thread);
561 } else {
562 thread_mtx_lock(thread);
563 result = KERN_ABORTED;
564 }
565
566 thread_release(thread);
567 } else {
568 result = machine_thread_get_state(
569 thread, flavor, state, state_count);
570 }
571 } else if (thread->inspection) {
572 result = machine_thread_get_state(
573 thread, flavor, state, state_count);
574 } else {
575 result = KERN_TERMINATED;
576 }
577
578 if (to_user && result == KERN_SUCCESS) {
579 result = machine_thread_state_convert_to_user(thread, flavor, state,
580 state_count, flags);
581 }
582
583 thread_mtx_unlock(thread);
584
585 return result;
586 }
587
588 /* No prototype, since thread_act_server.h has the _to_user version if KERNEL_SERVER */
589
590 kern_return_t
591 thread_get_state(
592 thread_t thread,
593 int flavor,
594 thread_state_t state,
595 mach_msg_type_number_t *state_count);
596
597 kern_return_t
thread_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)598 thread_get_state(
599 thread_t thread,
600 int flavor,
601 thread_state_t state, /* pointer to OUT array */
602 mach_msg_type_number_t *state_count) /*IN/OUT*/
603 {
604 return thread_get_state_internal(thread, flavor, state, state_count, TSSF_FLAGS_NONE);
605 }
606
607 kern_return_t
thread_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * state_count)608 thread_get_state_to_user(
609 thread_t thread,
610 int flavor,
611 thread_state_t state, /* pointer to OUT array */
612 mach_msg_type_number_t *state_count) /*IN/OUT*/
613 {
614 return thread_get_state_internal(thread, flavor, state, state_count, TSSF_TRANSLATE_TO_USER);
615 }
616
617 /*
618 * Change thread's machine-dependent state. Called with nothing
619 * locked. Returns same way.
620 */
621 static inline kern_return_t
thread_set_state_internal(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count,thread_state_t old_state,mach_msg_type_number_t old_state_count,thread_set_status_flags_t flags)622 thread_set_state_internal(
623 thread_t thread,
624 int flavor,
625 thread_state_t state,
626 mach_msg_type_number_t state_count,
627 thread_state_t old_state,
628 mach_msg_type_number_t old_state_count,
629 thread_set_status_flags_t flags)
630 {
631 kern_return_t result = KERN_SUCCESS;
632 boolean_t from_user = !!(flags & TSSF_TRANSLATE_TO_USER);
633
634 if (thread == THREAD_NULL) {
635 return KERN_INVALID_ARGUMENT;
636 }
637
638 thread_mtx_lock(thread);
639
640 if (thread->active) {
641 if (from_user) {
642 result = machine_thread_state_convert_from_user(thread, flavor,
643 state, state_count, old_state, old_state_count, flags);
644 if (result != KERN_SUCCESS) {
645 goto out;
646 }
647 }
648 if (thread != current_thread()) {
649 thread_hold(thread);
650
651 thread_mtx_unlock(thread);
652
653 if (thread_stop(thread, TRUE)) {
654 thread_mtx_lock(thread);
655 result = machine_thread_set_state(
656 thread, flavor, state, state_count);
657 thread_unstop(thread);
658 } else {
659 thread_mtx_lock(thread);
660 result = KERN_ABORTED;
661 }
662
663 thread_release(thread);
664 } else {
665 result = machine_thread_set_state(
666 thread, flavor, state, state_count);
667 }
668 } else {
669 result = KERN_TERMINATED;
670 }
671
672 if ((result == KERN_SUCCESS) && from_user) {
673 extmod_statistics_incr_thread_set_state(thread);
674 }
675
676 out:
677 thread_mtx_unlock(thread);
678
679 return result;
680 }
681
682 /* No prototype, since thread_act_server.h has the _from_user version if KERNEL_SERVER */
683 kern_return_t
684 thread_set_state(
685 thread_t thread,
686 int flavor,
687 thread_state_t state,
688 mach_msg_type_number_t state_count);
689
690 kern_return_t
thread_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)691 thread_set_state(
692 thread_t thread,
693 int flavor,
694 thread_state_t state,
695 mach_msg_type_number_t state_count)
696 {
697 return thread_set_state_internal(thread, flavor, state, state_count, NULL, 0, TSSF_FLAGS_NONE);
698 }
699
700 kern_return_t
thread_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t state_count)701 thread_set_state_from_user(
702 thread_t thread,
703 int flavor,
704 thread_state_t state,
705 mach_msg_type_number_t state_count)
706 {
707 return thread_set_state_internal(thread, flavor, state, state_count, NULL, 0, TSSF_TRANSLATE_TO_USER);
708 }
709
710 kern_return_t
thread_convert_thread_state(thread_t thread,int direction,thread_state_flavor_t flavor,thread_state_t in_state,mach_msg_type_number_t in_state_count,thread_state_t out_state,mach_msg_type_number_t * out_state_count)711 thread_convert_thread_state(
712 thread_t thread,
713 int direction,
714 thread_state_flavor_t flavor,
715 thread_state_t in_state, /* pointer to IN array */
716 mach_msg_type_number_t in_state_count,
717 thread_state_t out_state, /* pointer to OUT array */
718 mach_msg_type_number_t *out_state_count) /*IN/OUT*/
719 {
720 kern_return_t kr;
721 thread_t to_thread = THREAD_NULL;
722 thread_t from_thread = THREAD_NULL;
723 mach_msg_type_number_t state_count = in_state_count;
724
725 if (direction != THREAD_CONVERT_THREAD_STATE_TO_SELF &&
726 direction != THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
727 return KERN_INVALID_ARGUMENT;
728 }
729
730 if (thread == THREAD_NULL) {
731 return KERN_INVALID_ARGUMENT;
732 }
733
734 if (state_count > *out_state_count) {
735 return KERN_INSUFFICIENT_BUFFER_SIZE;
736 }
737
738 if (direction == THREAD_CONVERT_THREAD_STATE_FROM_SELF) {
739 to_thread = thread;
740 from_thread = current_thread();
741 } else {
742 to_thread = current_thread();
743 from_thread = thread;
744 }
745
746 /* Authenticate and convert thread state to kernel representation */
747 kr = machine_thread_state_convert_from_user(from_thread, flavor,
748 in_state, state_count, NULL, 0, TSSF_FLAGS_NONE);
749
750 /* Return early if one of the thread was jop disabled while other wasn't */
751 if (kr != KERN_SUCCESS) {
752 return kr;
753 }
754
755 /* Convert thread state to target thread user representation */
756 kr = machine_thread_state_convert_to_user(to_thread, flavor,
757 in_state, &state_count, TSSF_PRESERVE_FLAGS);
758
759 if (kr == KERN_SUCCESS) {
760 if (state_count <= *out_state_count) {
761 memcpy(out_state, in_state, state_count * sizeof(uint32_t));
762 *out_state_count = state_count;
763 } else {
764 kr = KERN_INSUFFICIENT_BUFFER_SIZE;
765 }
766 }
767
768 return kr;
769 }
770
771 /*
772 * Kernel-internal "thread" interfaces used outside this file:
773 */
774
775 /* Initialize (or re-initialize) a thread state. Called from execve
776 * with nothing locked, returns same way.
777 */
778 kern_return_t
thread_state_initialize(thread_t thread)779 thread_state_initialize(
780 thread_t thread)
781 {
782 kern_return_t result = KERN_SUCCESS;
783
784 if (thread == THREAD_NULL) {
785 return KERN_INVALID_ARGUMENT;
786 }
787
788 thread_mtx_lock(thread);
789
790 if (thread->active) {
791 if (thread != current_thread()) {
792 thread_hold(thread);
793
794 thread_mtx_unlock(thread);
795
796 if (thread_stop(thread, TRUE)) {
797 thread_mtx_lock(thread);
798 machine_thread_state_initialize( thread );
799 thread_unstop(thread);
800 } else {
801 thread_mtx_lock(thread);
802 result = KERN_ABORTED;
803 }
804
805 thread_release(thread);
806 } else {
807 machine_thread_state_initialize( thread );
808 }
809 } else {
810 result = KERN_TERMINATED;
811 }
812
813 thread_mtx_unlock(thread);
814
815 return result;
816 }
817
818 kern_return_t
thread_dup(thread_t target)819 thread_dup(
820 thread_t target)
821 {
822 thread_t self = current_thread();
823 kern_return_t result = KERN_SUCCESS;
824
825 if (target == THREAD_NULL || target == self) {
826 return KERN_INVALID_ARGUMENT;
827 }
828
829 thread_mtx_lock(target);
830
831 if (target->active) {
832 thread_hold(target);
833
834 thread_mtx_unlock(target);
835
836 if (thread_stop(target, TRUE)) {
837 thread_mtx_lock(target);
838 result = machine_thread_dup(self, target, FALSE);
839
840 if (self->affinity_set != AFFINITY_SET_NULL) {
841 thread_affinity_dup(self, target);
842 }
843 thread_unstop(target);
844 } else {
845 thread_mtx_lock(target);
846 result = KERN_ABORTED;
847 }
848
849 thread_release(target);
850 } else {
851 result = KERN_TERMINATED;
852 }
853
854 thread_mtx_unlock(target);
855
856 return result;
857 }
858
859
860 kern_return_t
thread_dup2(thread_t source,thread_t target)861 thread_dup2(
862 thread_t source,
863 thread_t target)
864 {
865 kern_return_t result = KERN_SUCCESS;
866 uint32_t active = 0;
867
868 if (source == THREAD_NULL || target == THREAD_NULL || target == source) {
869 return KERN_INVALID_ARGUMENT;
870 }
871
872 thread_mtx_lock(source);
873 active = source->active;
874 thread_mtx_unlock(source);
875
876 if (!active) {
877 return KERN_TERMINATED;
878 }
879
880 thread_mtx_lock(target);
881
882 if (target->active || target->inspection) {
883 thread_hold(target);
884
885 thread_mtx_unlock(target);
886
887 if (thread_stop(target, TRUE)) {
888 thread_mtx_lock(target);
889 result = machine_thread_dup(source, target, TRUE);
890 if (source->affinity_set != AFFINITY_SET_NULL) {
891 thread_affinity_dup(source, target);
892 }
893 thread_unstop(target);
894 } else {
895 thread_mtx_lock(target);
896 result = KERN_ABORTED;
897 }
898
899 thread_release(target);
900 } else {
901 result = KERN_TERMINATED;
902 }
903
904 thread_mtx_unlock(target);
905
906 return result;
907 }
908
909 /*
910 * thread_setstatus:
911 *
912 * Set the status of the specified thread.
913 * Called with (and returns with) no locks held.
914 */
915 kern_return_t
thread_setstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count)916 thread_setstatus(
917 thread_t thread,
918 int flavor,
919 thread_state_t tstate,
920 mach_msg_type_number_t count)
921 {
922 return thread_set_state(thread, flavor, tstate, count);
923 }
924
925 kern_return_t
thread_setstatus_from_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t count,thread_state_t old_tstate,mach_msg_type_number_t old_count,thread_set_status_flags_t flags)926 thread_setstatus_from_user(
927 thread_t thread,
928 int flavor,
929 thread_state_t tstate,
930 mach_msg_type_number_t count,
931 thread_state_t old_tstate,
932 mach_msg_type_number_t old_count,
933 thread_set_status_flags_t flags)
934 {
935 return thread_set_state_internal(thread, flavor, tstate, count, old_tstate,
936 old_count, flags | TSSF_TRANSLATE_TO_USER);
937 }
938
939 /*
940 * thread_getstatus:
941 *
942 * Get the status of the specified thread.
943 */
944 kern_return_t
thread_getstatus(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count)945 thread_getstatus(
946 thread_t thread,
947 int flavor,
948 thread_state_t tstate,
949 mach_msg_type_number_t *count)
950 {
951 return thread_get_state(thread, flavor, tstate, count);
952 }
953
954 kern_return_t
thread_getstatus_to_user(thread_t thread,int flavor,thread_state_t tstate,mach_msg_type_number_t * count,thread_set_status_flags_t flags)955 thread_getstatus_to_user(
956 thread_t thread,
957 int flavor,
958 thread_state_t tstate,
959 mach_msg_type_number_t *count,
960 thread_set_status_flags_t flags)
961 {
962 return thread_get_state_internal(thread, flavor, tstate, count, flags | TSSF_TRANSLATE_TO_USER);
963 }
964
965 /*
966 * Change thread's machine-dependent userspace TSD base.
967 * Called with nothing locked. Returns same way.
968 */
969 kern_return_t
thread_set_tsd_base(thread_t thread,mach_vm_offset_t tsd_base)970 thread_set_tsd_base(
971 thread_t thread,
972 mach_vm_offset_t tsd_base)
973 {
974 kern_return_t result = KERN_SUCCESS;
975
976 if (thread == THREAD_NULL) {
977 return KERN_INVALID_ARGUMENT;
978 }
979
980 thread_mtx_lock(thread);
981
982 if (thread->active) {
983 if (thread != current_thread()) {
984 thread_hold(thread);
985
986 thread_mtx_unlock(thread);
987
988 if (thread_stop(thread, TRUE)) {
989 thread_mtx_lock(thread);
990 result = machine_thread_set_tsd_base(thread, tsd_base);
991 thread_unstop(thread);
992 } else {
993 thread_mtx_lock(thread);
994 result = KERN_ABORTED;
995 }
996
997 thread_release(thread);
998 } else {
999 result = machine_thread_set_tsd_base(thread, tsd_base);
1000 }
1001 } else {
1002 result = KERN_TERMINATED;
1003 }
1004
1005 thread_mtx_unlock(thread);
1006
1007 return result;
1008 }
1009
1010 /*
1011 * thread_set_apc_ast:
1012 *
1013 * Register the AST_APC callback that handles suspension and
1014 * termination, if it hasn't been installed already.
1015 *
1016 * Called with the thread mutex held.
1017 */
1018 static void
thread_set_apc_ast(thread_t thread)1019 thread_set_apc_ast(thread_t thread)
1020 {
1021 spl_t s = splsched();
1022
1023 thread_lock(thread);
1024 thread_set_apc_ast_locked(thread);
1025 thread_unlock(thread);
1026
1027 splx(s);
1028 }
1029
1030 /*
1031 * thread_set_apc_ast_locked:
1032 *
1033 * Do the work of registering for the AST_APC callback.
1034 *
1035 * Called with the thread mutex and scheduling lock held.
1036 */
1037 static void
thread_set_apc_ast_locked(thread_t thread)1038 thread_set_apc_ast_locked(thread_t thread)
1039 {
1040 thread_ast_set(thread, AST_APC);
1041
1042 if (thread == current_thread()) {
1043 ast_propagate(thread);
1044 } else {
1045 processor_t processor = thread->last_processor;
1046
1047 if (processor != PROCESSOR_NULL &&
1048 processor->state == PROCESSOR_RUNNING &&
1049 processor->active_thread == thread) {
1050 cause_ast_check(processor);
1051 }
1052 }
1053 }
1054
1055 /*
1056 * Activation control support routines internal to this file:
1057 *
1058 */
1059
1060 /*
1061 * thread_suspended
1062 *
1063 * Continuation routine for thread suspension. It checks
1064 * to see whether there has been any new suspensions. If so, it
1065 * installs the AST_APC handler again.
1066 */
1067 __attribute__((noreturn))
1068 static void
thread_suspended(__unused void * parameter,wait_result_t result)1069 thread_suspended(__unused void *parameter, wait_result_t result)
1070 {
1071 thread_t thread = current_thread();
1072
1073 thread_mtx_lock(thread);
1074
1075 if (result == THREAD_INTERRUPTED) {
1076 thread->suspend_parked = FALSE;
1077 } else {
1078 assert(thread->suspend_parked == FALSE);
1079 }
1080
1081 if (thread->suspend_count > 0) {
1082 thread_set_apc_ast(thread);
1083 }
1084
1085 thread_mtx_unlock(thread);
1086
1087 thread_exception_return();
1088 /*NOTREACHED*/
1089 }
1090
1091 /*
1092 * thread_apc_ast - handles AST_APC and drives thread suspension and termination.
1093 * Called with nothing locked. Returns (if it returns) the same way.
1094 */
1095 void
thread_apc_ast(thread_t thread)1096 thread_apc_ast(thread_t thread)
1097 {
1098 thread_mtx_lock(thread);
1099
1100 assert(thread->suspend_parked == FALSE);
1101
1102 spl_t s = splsched();
1103 thread_lock(thread);
1104
1105 /* TH_SFLAG_POLLDEPRESS is OK to have here */
1106 assert((thread->sched_flags & TH_SFLAG_DEPRESS) == 0);
1107
1108 thread->sched_flags &= ~TH_SFLAG_ABORTED_MASK;
1109 thread_unlock(thread);
1110 splx(s);
1111
1112 if (!thread->active) {
1113 /* Thread is ready to terminate, time to tear it down */
1114 thread_mtx_unlock(thread);
1115
1116 thread_terminate_self();
1117 /*NOTREACHED*/
1118 }
1119
1120 /* If we're suspended, go to sleep and wait for someone to wake us up. */
1121 if (thread->suspend_count > 0) {
1122 thread->suspend_parked = TRUE;
1123 assert_wait(&thread->suspend_count,
1124 THREAD_ABORTSAFE | THREAD_WAIT_NOREPORT_USER);
1125 thread_mtx_unlock(thread);
1126
1127 thread_block(thread_suspended);
1128 /*NOTREACHED*/
1129 }
1130
1131 thread_mtx_unlock(thread);
1132 }
1133
1134
1135 void
thread_debug_return_to_user_ast(thread_t thread)1136 thread_debug_return_to_user_ast(
1137 thread_t thread)
1138 {
1139 #pragma unused(thread)
1140 #if MACH_ASSERT
1141 if ((thread->sched_flags & TH_SFLAG_RW_PROMOTED) ||
1142 thread->rwlock_count > 0) {
1143 panic("Returning to userspace with rw lock held, thread %p sched_flag %u rwlock_count %d", thread, thread->sched_flags, thread->rwlock_count);
1144 }
1145
1146 if ((thread->sched_flags & TH_SFLAG_FLOOR_PROMOTED) ||
1147 thread->priority_floor_count > 0) {
1148 panic("Returning to userspace with floor boost set, thread %p sched_flag %u priority_floor_count %d", thread, thread->sched_flags, thread->priority_floor_count);
1149 }
1150 #endif /* MACH_ASSERT */
1151 }
1152
1153
1154 /* Prototype, see justification above */
1155 kern_return_t
1156 act_set_state(
1157 thread_t thread,
1158 int flavor,
1159 thread_state_t state,
1160 mach_msg_type_number_t count);
1161
1162 kern_return_t
act_set_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1163 act_set_state(
1164 thread_t thread,
1165 int flavor,
1166 thread_state_t state,
1167 mach_msg_type_number_t count)
1168 {
1169 if (thread == current_thread()) {
1170 return KERN_INVALID_ARGUMENT;
1171 }
1172
1173 return thread_set_state(thread, flavor, state, count);
1174 }
1175
1176 kern_return_t
act_set_state_from_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t count)1177 act_set_state_from_user(
1178 thread_t thread,
1179 int flavor,
1180 thread_state_t state,
1181 mach_msg_type_number_t count)
1182 {
1183 if (thread == current_thread()) {
1184 return KERN_INVALID_ARGUMENT;
1185 }
1186
1187 return thread_set_state_from_user(thread, flavor, state, count);
1188 }
1189
1190 /* Prototype, see justification above */
1191 kern_return_t
1192 act_get_state(
1193 thread_t thread,
1194 int flavor,
1195 thread_state_t state,
1196 mach_msg_type_number_t *count);
1197
1198 kern_return_t
act_get_state(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1199 act_get_state(
1200 thread_t thread,
1201 int flavor,
1202 thread_state_t state,
1203 mach_msg_type_number_t *count)
1204 {
1205 if (thread == current_thread()) {
1206 return KERN_INVALID_ARGUMENT;
1207 }
1208
1209 return thread_get_state(thread, flavor, state, count);
1210 }
1211
1212 kern_return_t
act_get_state_to_user(thread_t thread,int flavor,thread_state_t state,mach_msg_type_number_t * count)1213 act_get_state_to_user(
1214 thread_t thread,
1215 int flavor,
1216 thread_state_t state,
1217 mach_msg_type_number_t *count)
1218 {
1219 if (thread == current_thread()) {
1220 return KERN_INVALID_ARGUMENT;
1221 }
1222
1223 return thread_get_state_to_user(thread, flavor, state, count);
1224 }
1225
1226 static void
act_set_ast(thread_t thread,ast_t ast,ast_gen_t * gens)1227 act_set_ast(
1228 thread_t thread,
1229 ast_t ast,
1230 ast_gen_t *gens)
1231 {
1232 spl_t s = splsched();
1233
1234 if (thread == current_thread()) {
1235 thread_ast_set(thread, ast);
1236 ast_propagate(thread);
1237 } else {
1238 processor_t processor;
1239
1240 thread_lock(thread);
1241 thread_ast_set(thread, ast);
1242 processor = thread->last_processor;
1243 if (processor != PROCESSOR_NULL &&
1244 processor->state == PROCESSOR_RUNNING &&
1245 processor->active_thread == thread) {
1246 if (gens) {
1247 ast_generation_get(processor, gens);
1248 }
1249 cause_ast_check(processor);
1250 }
1251 thread_unlock(thread);
1252 }
1253
1254 splx(s);
1255 }
1256
1257 /*
1258 * set AST on thread without causing an AST check
1259 * and without taking the thread lock
1260 *
1261 * If thread is not the current thread, then it may take
1262 * up until the next context switch or quantum expiration
1263 * on that thread for it to notice the AST.
1264 */
1265 static void
act_set_ast_async(thread_t thread,ast_t ast)1266 act_set_ast_async(thread_t thread,
1267 ast_t ast)
1268 {
1269 thread_ast_set(thread, ast);
1270
1271 if (thread == current_thread()) {
1272 spl_t s = splsched();
1273 ast_propagate(thread);
1274 splx(s);
1275 }
1276 }
1277
1278 void
act_set_debug_assert(void)1279 act_set_debug_assert(void)
1280 {
1281 thread_t thread = current_thread();
1282 if (thread_ast_peek(thread, AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1283 thread_ast_set(thread, AST_DEBUG_ASSERT);
1284 }
1285 if (ast_peek(AST_DEBUG_ASSERT) != AST_DEBUG_ASSERT) {
1286 spl_t s = splsched();
1287 ast_propagate(thread);
1288 splx(s);
1289 }
1290 }
1291
1292 void
act_set_astbsd(thread_t thread)1293 act_set_astbsd(thread_t thread)
1294 {
1295 act_set_ast(thread, AST_BSD, NULL);
1296 }
1297
1298 void
act_set_astkevent(thread_t thread,uint16_t bits)1299 act_set_astkevent(thread_t thread, uint16_t bits)
1300 {
1301 os_atomic_or(&thread->kevent_ast_bits, bits, relaxed);
1302
1303 /* kevent AST shouldn't send immediate IPIs */
1304 act_set_ast_async(thread, AST_KEVENT);
1305 }
1306
1307 uint16_t
act_clear_astkevent(thread_t thread,uint16_t bits)1308 act_clear_astkevent(thread_t thread, uint16_t bits)
1309 {
1310 /*
1311 * avoid the atomic operation if none of the bits is set,
1312 * which will be the common case.
1313 */
1314 uint16_t cur = os_atomic_load(&thread->kevent_ast_bits, relaxed);
1315 if (cur & bits) {
1316 cur = os_atomic_andnot_orig(&thread->kevent_ast_bits, bits, relaxed);
1317 }
1318 return cur & bits;
1319 }
1320
1321 void
act_set_ast_reset_pcs(thread_t thread,ast_gen_t gens[])1322 act_set_ast_reset_pcs(thread_t thread, ast_gen_t gens[])
1323 {
1324 act_set_ast(thread, AST_RESET_PCS, gens);
1325 }
1326
1327 void
act_set_kperf(thread_t thread)1328 act_set_kperf(thread_t thread)
1329 {
1330 /* safety check */
1331 if (thread != current_thread()) {
1332 if (!ml_get_interrupts_enabled()) {
1333 panic("unsafe act_set_kperf operation");
1334 }
1335 }
1336
1337 act_set_ast(thread, AST_KPERF, NULL);
1338 }
1339
1340 #if CONFIG_MACF
1341 void
act_set_astmacf(thread_t thread)1342 act_set_astmacf(
1343 thread_t thread)
1344 {
1345 act_set_ast( thread, AST_MACF, NULL);
1346 }
1347 #endif
1348
1349 void
act_set_astledger(thread_t thread)1350 act_set_astledger(thread_t thread)
1351 {
1352 act_set_ast(thread, AST_LEDGER, NULL);
1353 }
1354
1355 /*
1356 * The ledger AST may need to be set while already holding
1357 * the thread lock. This routine skips sending the IPI,
1358 * allowing us to avoid the lock hold.
1359 *
1360 * However, it means the targeted thread must context switch
1361 * to recognize the ledger AST.
1362 */
1363 void
act_set_astledger_async(thread_t thread)1364 act_set_astledger_async(thread_t thread)
1365 {
1366 act_set_ast_async(thread, AST_LEDGER);
1367 }
1368
1369 void
act_set_io_telemetry_ast(thread_t thread)1370 act_set_io_telemetry_ast(thread_t thread)
1371 {
1372 act_set_ast(thread, AST_TELEMETRY_IO, NULL);
1373 }
1374
1375 void
act_set_macf_telemetry_ast(thread_t thread)1376 act_set_macf_telemetry_ast(thread_t thread)
1377 {
1378 act_set_ast(thread, AST_TELEMETRY_MACF, NULL);
1379 }
1380
1381 void
act_set_astproc_resource(thread_t thread)1382 act_set_astproc_resource(thread_t thread)
1383 {
1384 act_set_ast(thread, AST_PROC_RESOURCE, NULL);
1385 }
1386