1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
86
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
94 #include <kdp/kdp_dyld.h>
95
96 #include <vm/vm_map.h>
97 #include <vm/vm_pageout.h>
98 #include <vm/vm_protos.h>
99
100 #include <security/mac_mach_internal.h>
101
102 #if CONFIG_CSR
103 #include <sys/csr.h>
104 #endif
105
106 #include <sys/code_signing.h> /* for developer mode state */
107
108 #if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL
109 extern int cs_relax_platform_task_ports;
110 #endif
111
112 extern boolean_t IOCurrentTaskHasEntitlement(const char *);
113
114 __options_decl(ipc_reply_port_type_t, uint32_t, {
115 IRPT_NONE = 0x00,
116 IRPT_USER = 0x01,
117 IRPT_KERNEL = 0x02,
118 });
119
120 /* forward declarations */
121 static kern_return_t special_port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor);
122 static kern_return_t special_port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor);
123 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port, ipc_reply_port_type_t reply_type);
124 static void ipc_port_unbind_special_reply_port(thread_t thread, ipc_reply_port_type_t reply_type);
125 extern kern_return_t task_conversion_eval(task_t caller, task_t victim, int flavor);
126 static thread_inspect_t convert_port_to_thread_inspect_no_eval(ipc_port_t port);
127 static ipc_port_t convert_thread_to_port_with_flavor(thread_t, thread_ro_t, mach_thread_flavor_t flavor);
128 ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor, task_grp_t grp);
129 kern_return_t task_set_special_port(task_t task, int which, ipc_port_t port);
130 kern_return_t task_get_special_port(task_t task, int which, ipc_port_t *portp);
131
132 /*
133 * Routine: ipc_task_init
134 * Purpose:
135 * Initialize a task's IPC state.
136 *
137 * If non-null, some state will be inherited from the parent.
138 * The parent must be appropriately initialized.
139 * Conditions:
140 * Nothing locked.
141 */
142
143 void
ipc_task_init(task_t task,task_t parent)144 ipc_task_init(
145 task_t task,
146 task_t parent)
147 {
148 ipc_space_t space;
149 ipc_port_t kport;
150 ipc_port_t nport;
151 ipc_port_t pport;
152 kern_return_t kr;
153 int i;
154
155
156 kr = ipc_space_create(IPC_LABEL_NONE, &space);
157 if (kr != KERN_SUCCESS) {
158 panic("ipc_task_init");
159 }
160
161 space->is_task = task;
162
163 kport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL,
164 IPC_KOBJECT_ALLOC_NONE);
165 pport = kport;
166
167 nport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_NAME,
168 IPC_KOBJECT_ALLOC_NONE);
169
170 itk_lock_init(task);
171 task->itk_task_ports[TASK_FLAVOR_CONTROL] = kport;
172 task->itk_task_ports[TASK_FLAVOR_NAME] = nport;
173
174 /* Lazily allocated on-demand */
175 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
176 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
177 task->itk_dyld_notify = NULL;
178 #if CONFIG_PROC_RESOURCE_LIMITS
179 task->itk_resource_notify = NULL;
180 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
181
182 task->itk_self = pport;
183 task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
184 if (task_is_a_corpse_fork(task)) {
185 /*
186 * No sender's notification for corpse would not
187 * work with a naked send right in kernel.
188 */
189 task->itk_settable_self = IP_NULL;
190 } else {
191 /* we just made the port, no need to triple check */
192 task->itk_settable_self = ipc_port_make_send_any(kport);
193 }
194 task->itk_debug_control = IP_NULL;
195 task->itk_space = space;
196
197 #if CONFIG_MACF
198 task->exc_actions[0].label = NULL;
199 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
200 mac_exc_associate_action_label(&task->exc_actions[i],
201 mac_exc_create_label(&task->exc_actions[i]));
202 }
203 #endif
204
205 /* always zero-out the first (unused) array element */
206 bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
207
208 if (parent == TASK_NULL) {
209 ipc_port_t port = IP_NULL;
210 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
211 task->exc_actions[i].port = IP_NULL;
212 task->exc_actions[i].flavor = 0;
213 task->exc_actions[i].behavior = 0;
214 task->exc_actions[i].privileged = FALSE;
215 }/* for */
216
217 kr = host_get_host_port(host_priv_self(), &port);
218 assert(kr == KERN_SUCCESS);
219 task->itk_host = port;
220
221 task->itk_bootstrap = IP_NULL;
222 task->itk_task_access = IP_NULL;
223
224 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
225 task->itk_registered[i] = IP_NULL;
226 }
227 } else {
228 itk_lock(parent);
229 assert(parent->itk_task_ports[TASK_FLAVOR_CONTROL] != IP_NULL);
230
231 /* inherit registered ports */
232
233 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
234 task->itk_registered[i] =
235 ipc_port_copy_send_any(parent->itk_registered[i]);
236 }
237
238 /* inherit exception and bootstrap ports */
239
240 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
241 task->exc_actions[i].port =
242 exception_port_copy_send(parent->exc_actions[i].port);
243 task->exc_actions[i].flavor =
244 parent->exc_actions[i].flavor;
245 task->exc_actions[i].behavior =
246 parent->exc_actions[i].behavior;
247 task->exc_actions[i].privileged =
248 parent->exc_actions[i].privileged;
249 #if CONFIG_MACF
250 mac_exc_inherit_action_label(parent->exc_actions + i,
251 task->exc_actions + i);
252 #endif
253 }
254
255 task->itk_host = host_port_copy_send(parent->itk_host);
256
257 task->itk_bootstrap =
258 ipc_port_copy_send_mqueue(parent->itk_bootstrap);
259
260 task->itk_task_access =
261 ipc_port_copy_send_mqueue(parent->itk_task_access);
262
263 itk_unlock(parent);
264 }
265 }
266
267 /*
268 * Routine: ipc_task_set_immovable_pinned
269 * Purpose:
270 * Make a task's control port immovable and/or pinned
271 * according to its control port options. If control port
272 * is immovable, allocate an immovable control port for the
273 * task and optionally pin it.
274 * Conditions:
275 * Task's control port is movable and not pinned.
276 */
277 void
ipc_task_set_immovable_pinned(task_t task)278 ipc_task_set_immovable_pinned(
279 task_t task)
280 {
281 ipc_port_t kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
282 ipc_port_t new_pport;
283
284 /* pport is the same as kport at ipc_task_init() time */
285 assert(task->itk_self == task->itk_task_ports[TASK_FLAVOR_CONTROL]);
286 assert(task->itk_self == task->itk_settable_self);
287 assert(!task_is_a_corpse(task));
288
289 /* only tasks opt in immovable control port can have pinned control port */
290 if (task_is_immovable(task)) {
291 ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
292
293 if (task_is_pinned(task)) {
294 options |= IPC_KOBJECT_ALLOC_PINNED;
295 }
296
297 new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL, options);
298
299 assert(kport != IP_NULL);
300 ipc_port_set_label(kport, IPC_LABEL_SUBST_TASK);
301 kport->ip_kolabel->ikol_alt_port = new_pport;
302
303 itk_lock(task);
304 task->itk_self = new_pport;
305 itk_unlock(task);
306
307 /* enable the pinned port */
308 ipc_kobject_enable(new_pport, task, IKOT_TASK_CONTROL);
309 }
310 }
311
312 /*
313 * Routine: ipc_task_enable
314 * Purpose:
315 * Enable a task for IPC access.
316 * Conditions:
317 * Nothing locked.
318 */
319 void
ipc_task_enable(task_t task)320 ipc_task_enable(
321 task_t task)
322 {
323 ipc_port_t kport;
324 ipc_port_t nport;
325 ipc_port_t iport;
326 ipc_port_t rdport;
327 ipc_port_t pport;
328
329 itk_lock(task);
330 if (!task->active) {
331 /*
332 * task has been terminated before we can enable IPC access.
333 * The check is to make sure we don't accidentally re-enable
334 * the task ports _after_ they've been disabled during
335 * task_terminate_internal(), in which case we will hit the
336 * !task->ipc_active assertion in ipc_task_terminate().
337 *
338 * Technically we should grab task lock when checking task
339 * active bit, but since task termination unsets task->active
340 * _before_ calling ipc_task_disable(), we can always see the
341 * truth with just itk_lock() and bail if disable has been called.
342 */
343 itk_unlock(task);
344 return;
345 }
346
347 assert(!task->ipc_active || task_is_a_corpse(task));
348 task->ipc_active = true;
349
350 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
351 if (kport != IP_NULL) {
352 ipc_kobject_enable(kport, task, IKOT_TASK_CONTROL);
353 }
354 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
355 if (nport != IP_NULL) {
356 ipc_kobject_enable(nport, task, IKOT_TASK_NAME);
357 }
358 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
359 if (iport != IP_NULL) {
360 ipc_kobject_enable(iport, task, IKOT_TASK_INSPECT);
361 }
362 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
363 if (rdport != IP_NULL) {
364 ipc_kobject_enable(rdport, task, IKOT_TASK_READ);
365 }
366 pport = task->itk_self;
367 if (pport != kport && pport != IP_NULL) {
368 assert(task_is_immovable(task));
369 ipc_kobject_enable(pport, task, IKOT_TASK_CONTROL);
370 }
371
372 itk_unlock(task);
373 }
374
375 /*
376 * Routine: ipc_task_disable
377 * Purpose:
378 * Disable IPC access to a task.
379 * Conditions:
380 * Nothing locked.
381 */
382
383 void
ipc_task_disable(task_t task)384 ipc_task_disable(
385 task_t task)
386 {
387 ipc_port_t kport;
388 ipc_port_t nport;
389 ipc_port_t iport;
390 ipc_port_t rdport;
391 ipc_port_t rport;
392 ipc_port_t pport;
393
394 itk_lock(task);
395
396 /*
397 * This innocuous looking line is load bearing.
398 *
399 * It is used to disable the creation of lazy made ports.
400 * We must do so before we drop the last reference on the task,
401 * as task ports do not own a reference on the task, and
402 * convert_port_to_task* will crash trying to resurect a task.
403 */
404 task->ipc_active = false;
405
406 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
407 if (kport != IP_NULL) {
408 /* clears ikol_alt_port */
409 ipc_kobject_disable(kport, IKOT_TASK_CONTROL);
410 }
411 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
412 if (nport != IP_NULL) {
413 ipc_kobject_disable(nport, IKOT_TASK_NAME);
414 }
415 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
416 if (iport != IP_NULL) {
417 ipc_kobject_disable(iport, IKOT_TASK_INSPECT);
418 }
419 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
420 if (rdport != IP_NULL) {
421 /* clears ikol_alt_port */
422 ipc_kobject_disable(rdport, IKOT_TASK_READ);
423 }
424 pport = task->itk_self;
425 if (pport != IP_NULL) {
426 /* see port_name_is_pinned_itk_self() */
427 pport->ip_receiver_name = MACH_PORT_SPECIAL_DEFAULT;
428 if (pport != kport) {
429 assert(task_is_immovable(task));
430 assert(pport->ip_immovable_send);
431 ipc_kobject_disable(pport, IKOT_TASK_CONTROL);
432 }
433 }
434
435 rport = task->itk_resume;
436 if (rport != IP_NULL) {
437 /*
438 * From this point onwards this task is no longer accepting
439 * resumptions.
440 *
441 * There are still outstanding suspensions on this task,
442 * even as it is being torn down. Disconnect the task
443 * from the rport, thereby "orphaning" the rport. The rport
444 * itself will go away only when the last suspension holder
445 * destroys his SO right to it -- when he either
446 * exits, or tries to actually use that last SO right to
447 * resume this (now non-existent) task.
448 */
449 ipc_kobject_disable(rport, IKOT_TASK_RESUME);
450 }
451 itk_unlock(task);
452 }
453
454 /*
455 * Routine: ipc_task_terminate
456 * Purpose:
457 * Clean up and destroy a task's IPC state.
458 * Conditions:
459 * Nothing locked. The task must be suspended.
460 * (Or the current thread must be in the task.)
461 */
462
463 void
ipc_task_terminate(task_t task)464 ipc_task_terminate(
465 task_t task)
466 {
467 ipc_port_t kport;
468 ipc_port_t nport;
469 ipc_port_t iport;
470 ipc_port_t rdport;
471 ipc_port_t rport;
472 ipc_port_t pport;
473 ipc_port_t sself;
474 ipc_port_t *notifiers_ptr = NULL;
475
476 itk_lock(task);
477
478 /*
479 * If we ever failed to clear ipc_active before the last reference
480 * was dropped, lazy ports might be made and used after the last
481 * reference is dropped and cause use after free (see comment in
482 * ipc_task_disable()).
483 */
484 assert(!task->ipc_active);
485
486 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
487 sself = task->itk_settable_self;
488 pport = IP_NULL;
489
490 if (kport == IP_NULL) {
491 /* the task is already terminated (can this happen?) */
492 itk_unlock(task);
493 return;
494 }
495 task->itk_task_ports[TASK_FLAVOR_CONTROL] = IP_NULL;
496
497 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
498 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
499
500 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
501 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
502
503 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
504 assert(nport != IP_NULL);
505 task->itk_task_ports[TASK_FLAVOR_NAME] = IP_NULL;
506
507 if (task->itk_dyld_notify) {
508 notifiers_ptr = task->itk_dyld_notify;
509 task->itk_dyld_notify = NULL;
510 }
511
512 pport = task->itk_self;
513 task->itk_self = IP_NULL;
514
515 rport = task->itk_resume;
516 task->itk_resume = IP_NULL;
517
518 itk_unlock(task);
519
520 /* release the naked send rights */
521 if (IP_VALID(sself)) {
522 ipc_port_release_send(sself);
523 }
524
525 if (notifiers_ptr) {
526 for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
527 if (IP_VALID(notifiers_ptr[i])) {
528 ipc_port_release_send(notifiers_ptr[i]);
529 }
530 }
531 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
532 }
533
534 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
535 if (IP_VALID(task->exc_actions[i].port)) {
536 ipc_port_release_send(task->exc_actions[i].port);
537 }
538 #if CONFIG_MACF
539 mac_exc_free_action_label(task->exc_actions + i);
540 #endif
541 }
542
543 if (IP_VALID(task->itk_host)) {
544 ipc_port_release_send(task->itk_host);
545 }
546
547 if (IP_VALID(task->itk_bootstrap)) {
548 ipc_port_release_send(task->itk_bootstrap);
549 }
550
551 if (IP_VALID(task->itk_task_access)) {
552 ipc_port_release_send(task->itk_task_access);
553 }
554
555 if (IP_VALID(task->itk_debug_control)) {
556 ipc_port_release_send(task->itk_debug_control);
557 }
558
559 #if CONFIG_PROC_RESOURCE_LIMITS
560 if (IP_VALID(task->itk_resource_notify)) {
561 ipc_port_release_send(task->itk_resource_notify);
562 }
563 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
564
565 for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
566 if (IP_VALID(task->itk_registered[i])) {
567 ipc_port_release_send(task->itk_registered[i]);
568 }
569 }
570
571 /* clears read port ikol_alt_port, must be done first */
572 if (rdport != IP_NULL) {
573 ipc_kobject_dealloc_port(rdport, 0, IKOT_TASK_READ);
574 }
575 ipc_kobject_dealloc_port(kport, 0, IKOT_TASK_CONTROL);
576 /* ikol_alt_port cleared */
577
578 /* destroy other kernel ports */
579 ipc_kobject_dealloc_port(nport, 0, IKOT_TASK_NAME);
580 if (iport != IP_NULL) {
581 ipc_kobject_dealloc_port(iport, 0, IKOT_TASK_INSPECT);
582 }
583 if (pport != IP_NULL && pport != kport) {
584 ipc_kobject_dealloc_port(pport, 0, IKOT_TASK_CONTROL);
585 }
586 if (rport != IP_NULL) {
587 ipc_kobject_dealloc_port(rport, 0, IKOT_TASK_RESUME);
588 }
589
590 itk_lock_destroy(task);
591 }
592
593 /*
594 * Routine: ipc_task_reset
595 * Purpose:
596 * Reset a task's IPC state to protect it when
597 * it enters an elevated security context. The
598 * task name port can remain the same - since it
599 * represents no specific privilege.
600 * Conditions:
601 * Nothing locked. The task must be suspended.
602 * (Or the current thread must be in the task.)
603 */
604
605 void
ipc_task_reset(task_t task)606 ipc_task_reset(
607 task_t task)
608 {
609 ipc_port_t old_kport, old_pport, new_kport, new_pport;
610 ipc_port_t old_sself;
611 ipc_port_t old_rdport;
612 ipc_port_t old_iport;
613 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
614 ipc_port_t *notifiers_ptr = NULL;
615
616 #if CONFIG_MACF
617 /* Fresh label to unset credentials in existing labels. */
618 struct label *unset_label = mac_exc_create_label(NULL);
619 #endif
620
621 new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task,
622 IKOT_TASK_CONTROL, IPC_KOBJECT_ALLOC_NONE);
623 /*
624 * ipc_task_reset() only happens during sugid or corpsify.
625 *
626 * (1) sugid happens early in exec_mach_imgact(), at which point the old task
627 * port has not been enabled, and is left movable/not pinned.
628 * (2) corpse cannot execute more code so the notion of the immovable/pinned
629 * task port is bogus, and should appear as if it doesn't have one.
630 *
631 * So simply leave pport the same as kport.
632 */
633 new_pport = new_kport;
634
635 itk_lock(task);
636
637 old_kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
638 old_rdport = task->itk_task_ports[TASK_FLAVOR_READ];
639 old_iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
640
641 old_pport = task->itk_self;
642
643 if (old_pport == IP_NULL) {
644 /* the task is already terminated (can this happen?) */
645 itk_unlock(task);
646 ipc_kobject_dealloc_port(new_kport, 0, IKOT_TASK_CONTROL);
647 if (new_pport != new_kport) {
648 assert(task_is_immovable(task));
649 ipc_kobject_dealloc_port(new_pport, 0, IKOT_TASK_CONTROL);
650 }
651 #if CONFIG_MACF
652 mac_exc_free_label(unset_label);
653 #endif
654 return;
655 }
656
657 old_sself = task->itk_settable_self;
658 task->itk_task_ports[TASK_FLAVOR_CONTROL] = new_kport;
659 task->itk_self = new_pport;
660
661 if (task_is_a_corpse(task)) {
662 /* No extra send right for coprse, needed to arm no-sender notification */
663 task->itk_settable_self = IP_NULL;
664 } else {
665 /* we just made the port, no need to triple check */
666 task->itk_settable_self = ipc_port_make_send_any(new_kport);
667 }
668
669 /* Set the old kport to IKOT_NONE */
670 /* clears ikol_alt_port */
671 ipc_kobject_disable(old_kport, IKOT_TASK_CONTROL);
672
673 /* Reset the read and inspect flavors of task port */
674 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
675 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
676
677 if (old_pport != old_kport) {
678 assert(task_is_immovable(task));
679 ipc_kobject_disable(old_pport, IKOT_TASK_CONTROL);
680 }
681
682 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
683 old_exc_actions[i] = IP_NULL;
684
685 if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
686 continue;
687 }
688
689 if (!task->exc_actions[i].privileged) {
690 #if CONFIG_MACF
691 mac_exc_update_action_label(task->exc_actions + i, unset_label);
692 #endif
693 old_exc_actions[i] = task->exc_actions[i].port;
694 task->exc_actions[i].port = IP_NULL;
695 }
696 }/* for */
697
698 if (IP_VALID(task->itk_debug_control)) {
699 ipc_port_release_send(task->itk_debug_control);
700 }
701 task->itk_debug_control = IP_NULL;
702
703 if (task->itk_dyld_notify) {
704 notifiers_ptr = task->itk_dyld_notify;
705 task->itk_dyld_notify = NULL;
706 }
707
708 itk_unlock(task);
709
710 #if CONFIG_MACF
711 mac_exc_free_label(unset_label);
712 #endif
713
714 /* release the naked send rights */
715
716 if (IP_VALID(old_sself)) {
717 ipc_port_release_send(old_sself);
718 }
719
720 if (notifiers_ptr) {
721 for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
722 if (IP_VALID(notifiers_ptr[i])) {
723 ipc_port_release_send(notifiers_ptr[i]);
724 }
725 }
726 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
727 }
728
729 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
730 if (IP_VALID(old_exc_actions[i])) {
731 ipc_port_release_send(old_exc_actions[i]);
732 }
733 }
734
735 /* destroy all task port flavors */
736 if (old_rdport != IP_NULL) {
737 /* read port ikol_alt_port may point to kport, dealloc first */
738 ipc_kobject_dealloc_port(old_rdport, 0, IKOT_TASK_READ);
739 }
740 ipc_kobject_dealloc_port(old_kport, 0, IKOT_TASK_CONTROL);
741 /* ikol_alt_port cleared */
742
743 if (old_iport != IP_NULL) {
744 ipc_kobject_dealloc_port(old_iport, 0, IKOT_TASK_INSPECT);
745 }
746 if (old_pport != old_kport) {
747 assert(task_is_immovable(task));
748 ipc_kobject_dealloc_port(old_pport, 0, IKOT_TASK_CONTROL);
749 }
750 }
751
752 /*
753 * Routine: ipc_thread_init
754 * Purpose:
755 * Initialize a thread's IPC state.
756 * Conditions:
757 * Nothing locked.
758 */
759
760 void
ipc_thread_init(task_t task,thread_t thread,thread_ro_t tro,ipc_thread_init_options_t options)761 ipc_thread_init(
762 task_t task,
763 thread_t thread,
764 thread_ro_t tro,
765 ipc_thread_init_options_t options)
766 {
767 ipc_port_t kport;
768 ipc_port_t pport;
769 ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
770
771 if (task_is_immovable(task) && !(options & IPC_THREAD_INIT_MAINTHREAD)) {
772 /*
773 * pthreads and raw threads both have immovable port upon creation.
774 * pthreads are subsequently pinned via ipc_port_copyout_send_pinned() whereas
775 * raw threads are left unpinned.
776 */
777 alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
778
779 pport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
780 IKOT_THREAD_CONTROL, alloc_options);
781
782 kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)thread,
783 IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD, IPC_KOBJECT_ALLOC_NONE);
784 kport->ip_kolabel->ikol_alt_port = pport;
785 } else {
786 /*
787 * Main thread is created movable but may be set immovable and pinned in
788 * main_thread_set_immovable_pinned(). It needs to be handled separately
789 * because task_control_port_options is not available at main thread creation time.
790 */
791 kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
792 IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
793
794 pport = kport;
795 }
796
797 tro->tro_self_port = pport;
798 /* we just made the port, no need to triple check */
799 tro->tro_settable_self_port = ipc_port_make_send_any(kport);
800 tro->tro_ports[THREAD_FLAVOR_CONTROL] = kport;
801
802 thread->ith_special_reply_port = NULL;
803
804 #if IMPORTANCE_INHERITANCE
805 thread->ith_assertions = 0;
806 #endif
807
808 thread->ipc_active = true;
809 ipc_kmsg_queue_init(&thread->ith_messages);
810
811 thread->ith_kernel_reply_port = IP_NULL;
812 }
813
814 void
ipc_main_thread_set_immovable_pinned(thread_t thread)815 ipc_main_thread_set_immovable_pinned(thread_t thread)
816 {
817 thread_ro_t tro = get_thread_ro(thread);
818 ipc_port_t kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
819 task_t task = tro->tro_task;
820 ipc_port_t new_pport;
821
822 assert(thread_get_tag(thread) & THREAD_TAG_MAINTHREAD);
823
824 /* pport is the same as kport at ipc_thread_init() time */
825 assert(tro->tro_self_port == tro->tro_ports[THREAD_FLAVOR_CONTROL]);
826 assert(tro->tro_self_port == tro->tro_settable_self_port);
827
828 /*
829 * Main thread port is immovable/pinned depending on whether owner task has
830 * immovable/pinned task control port. task_control_port_options is now set.
831 */
832 if (task_is_immovable(task)) {
833 ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
834
835 if (task_is_pinned(task)) {
836 options |= IPC_KOBJECT_ALLOC_PINNED;
837 }
838
839 new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_THREAD_CONTROL, options);
840
841 assert(kport != IP_NULL);
842 ipc_port_set_label(kport, IPC_LABEL_SUBST_THREAD);
843 kport->ip_kolabel->ikol_alt_port = new_pport;
844
845 thread_mtx_lock(thread);
846 zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_self_port, &new_pport);
847 thread_mtx_unlock(thread);
848
849 /* enable the pinned port */
850 ipc_kobject_enable(new_pport, thread, IKOT_THREAD_CONTROL);
851 }
852 }
853
854 struct thread_init_exc_actions {
855 struct exception_action array[EXC_TYPES_COUNT];
856 };
857
858 static void
ipc_thread_init_exc_actions(thread_ro_t tro)859 ipc_thread_init_exc_actions(thread_ro_t tro)
860 {
861 struct exception_action *actions;
862
863 actions = kalloc_type(struct thread_init_exc_actions,
864 Z_WAITOK | Z_ZERO | Z_NOFAIL)->array;
865
866 #if CONFIG_MACF
867 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
868 mac_exc_associate_action_label(&actions[i],
869 mac_exc_create_label(&actions[i]));
870 }
871 #endif
872
873 zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions, &actions);
874 }
875
876 static void
ipc_thread_destroy_exc_actions(thread_ro_t tro)877 ipc_thread_destroy_exc_actions(thread_ro_t tro)
878 {
879 struct exception_action *actions = tro->tro_exc_actions;
880
881 if (actions) {
882 #if CONFIG_MACF
883 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
884 mac_exc_free_action_label(actions + i);
885 }
886 #endif
887
888 zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions);
889 struct thread_init_exc_actions *tr_actions =
890 (struct thread_init_exc_actions *)actions;
891 kfree_type(struct thread_init_exc_actions, tr_actions);
892 }
893 }
894
895 static void
ipc_thread_ro_update_ports(thread_ro_t tro,const struct thread_ro * tro_tpl)896 ipc_thread_ro_update_ports(
897 thread_ro_t tro,
898 const struct thread_ro *tro_tpl)
899 {
900 vm_size_t offs = offsetof(struct thread_ro, tro_self_port);
901 vm_size_t size = sizeof(struct ipc_port *) * 2 + sizeof(tro_tpl->tro_ports);
902
903 static_assert(offsetof(struct thread_ro, tro_settable_self_port) ==
904 offsetof(struct thread_ro, tro_self_port) +
905 sizeof(struct ipc_port_t *));
906 static_assert(offsetof(struct thread_ro, tro_ports) ==
907 offsetof(struct thread_ro, tro_self_port) +
908 2 * sizeof(struct ipc_port_t *));
909 zalloc_ro_mut(ZONE_ID_THREAD_RO, tro,
910 offs, &tro_tpl->tro_self_port, size);
911 }
912
913 /*
914 * Routine: ipc_thread_disable
915 * Purpose:
916 * Clean up and destroy a thread's IPC state.
917 * Conditions:
918 * Thread locked.
919 */
920 void
ipc_thread_disable(thread_t thread)921 ipc_thread_disable(
922 thread_t thread)
923 {
924 thread_ro_t tro = get_thread_ro(thread);
925 ipc_port_t kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
926 ipc_port_t iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
927 ipc_port_t rdport = tro->tro_ports[THREAD_FLAVOR_READ];
928 ipc_port_t pport = tro->tro_self_port;
929
930 /*
931 * This innocuous looking line is load bearing.
932 *
933 * It is used to disable the creation of lazy made ports.
934 * We must do so before we drop the last reference on the thread,
935 * as thread ports do not own a reference on the thread, and
936 * convert_port_to_thread* will crash trying to resurect a thread.
937 */
938 thread->ipc_active = false;
939
940 if (kport != IP_NULL) {
941 /* clears ikol_alt_port */
942 ipc_kobject_disable(kport, IKOT_THREAD_CONTROL);
943 }
944
945 if (iport != IP_NULL) {
946 ipc_kobject_disable(iport, IKOT_THREAD_INSPECT);
947 }
948
949 if (rdport != IP_NULL) {
950 /* clears ikol_alt_port */
951 ipc_kobject_disable(rdport, IKOT_THREAD_READ);
952 }
953
954 if (pport != kport && pport != IP_NULL) {
955 assert(task_is_immovable(tro->tro_task));
956 assert(pport->ip_immovable_send);
957 ipc_kobject_disable(pport, IKOT_THREAD_CONTROL);
958 }
959
960 /* unbind the thread special reply port */
961 if (IP_VALID(thread->ith_special_reply_port)) {
962 ipc_port_unbind_special_reply_port(thread, IRPT_USER);
963 }
964 }
965
966 /*
967 * Routine: ipc_thread_terminate
968 * Purpose:
969 * Clean up and destroy a thread's IPC state.
970 * Conditions:
971 * Nothing locked.
972 */
973
974 void
ipc_thread_terminate(thread_t thread)975 ipc_thread_terminate(
976 thread_t thread)
977 {
978 thread_ro_t tro = get_thread_ro(thread);
979 ipc_port_t kport = IP_NULL;
980 ipc_port_t iport = IP_NULL;
981 ipc_port_t rdport = IP_NULL;
982 ipc_port_t pport = IP_NULL;
983 ipc_port_t sport = IP_NULL;
984
985 thread_mtx_lock(thread);
986
987 /*
988 * If we ever failed to clear ipc_active before the last reference
989 * was dropped, lazy ports might be made and used after the last
990 * reference is dropped and cause use after free (see comment in
991 * ipc_thread_disable()).
992 */
993 assert(!thread->ipc_active);
994
995 kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
996 iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
997 rdport = tro->tro_ports[THREAD_FLAVOR_READ];
998 pport = tro->tro_self_port;
999 sport = tro->tro_settable_self_port;
1000
1001 if (kport != IP_NULL) {
1002 if (IP_VALID(sport)) {
1003 ipc_port_release_send(sport);
1004 }
1005
1006 ipc_thread_ro_update_ports(tro, &(struct thread_ro){ });
1007
1008 if (tro->tro_exc_actions != NULL) {
1009 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1010 if (IP_VALID(tro->tro_exc_actions[i].port)) {
1011 ipc_port_release_send(tro->tro_exc_actions[i].port);
1012 }
1013 }
1014 ipc_thread_destroy_exc_actions(tro);
1015 }
1016 }
1017
1018 #if IMPORTANCE_INHERITANCE
1019 assert(thread->ith_assertions == 0);
1020 #endif
1021
1022 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
1023 thread_mtx_unlock(thread);
1024
1025 /* clears read port ikol_alt_port, must be done first */
1026 if (rdport != IP_NULL) {
1027 ipc_kobject_dealloc_port(rdport, 0, IKOT_THREAD_READ);
1028 }
1029 /* control port can also have ikol_alt_port */
1030 if (kport != IP_NULL) {
1031 ipc_kobject_dealloc_port(kport, 0, IKOT_THREAD_CONTROL);
1032 }
1033 /* ikol_alt_port cleared */
1034
1035 if (iport != IP_NULL) {
1036 ipc_kobject_dealloc_port(iport, 0, IKOT_THREAD_INSPECT);
1037 }
1038 if (pport != kport && pport != IP_NULL) {
1039 assert(task_is_immovable(tro->tro_task));
1040 ipc_kobject_dealloc_port(pport, 0, IKOT_THREAD_CONTROL);
1041 }
1042 if (thread->ith_kernel_reply_port != IP_NULL) {
1043 thread_dealloc_kernel_special_reply_port(thread);
1044 }
1045 }
1046
1047 /*
1048 * Routine: ipc_thread_reset
1049 * Purpose:
1050 * Reset the IPC state for a given Mach thread when
1051 * its task enters an elevated security context.
1052 * All flavors of thread port and its exception ports have
1053 * to be reset. Its RPC reply port cannot have any
1054 * rights outstanding, so it should be fine. The thread
1055 * inspect and read port are set to NULL.
1056 * Conditions:
1057 * Nothing locked.
1058 */
1059
1060 void
ipc_thread_reset(thread_t thread)1061 ipc_thread_reset(
1062 thread_t thread)
1063 {
1064 thread_ro_t tro = get_thread_ro(thread);
1065 ipc_port_t old_kport, new_kport, old_pport, new_pport;
1066 ipc_port_t old_sself;
1067 ipc_port_t old_rdport;
1068 ipc_port_t old_iport;
1069 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
1070 boolean_t has_old_exc_actions = FALSE;
1071 boolean_t thread_is_immovable;
1072 int i;
1073
1074 #if CONFIG_MACF
1075 struct label *new_label = mac_exc_create_label(NULL);
1076 #endif
1077
1078 thread_is_immovable = ip_is_immovable_send(tro->tro_self_port);
1079
1080 new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
1081 IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
1082 /*
1083 * ipc_thread_reset() only happens during sugid or corpsify.
1084 *
1085 * (1) sugid happens early in exec_mach_imgact(), at which point the old thread
1086 * port is still movable/not pinned.
1087 * (2) corpse cannot execute more code so the notion of the immovable/pinned
1088 * thread port is bogus, and should appear as if it doesn't have one.
1089 *
1090 * So simply leave pport the same as kport.
1091 */
1092 new_pport = new_kport;
1093
1094 thread_mtx_lock(thread);
1095
1096 old_kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1097 old_rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1098 old_iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1099
1100 old_sself = tro->tro_settable_self_port;
1101 old_pport = tro->tro_self_port;
1102
1103 if (old_kport == IP_NULL && thread->inspection == FALSE) {
1104 /* thread is already terminated (can this happen?) */
1105 thread_mtx_unlock(thread);
1106 ipc_kobject_dealloc_port(new_kport, 0, IKOT_THREAD_CONTROL);
1107 if (thread_is_immovable) {
1108 ipc_kobject_dealloc_port(new_pport, 0,
1109 IKOT_THREAD_CONTROL);
1110 }
1111 #if CONFIG_MACF
1112 mac_exc_free_label(new_label);
1113 #endif
1114 return;
1115 }
1116
1117 thread->ipc_active = true;
1118
1119 struct thread_ro tpl = {
1120 .tro_self_port = new_pport,
1121 /* we just made the port, no need to triple check */
1122 .tro_settable_self_port = ipc_port_make_send_any(new_kport),
1123 .tro_ports[THREAD_FLAVOR_CONTROL] = new_kport,
1124 };
1125
1126 ipc_thread_ro_update_ports(tro, &tpl);
1127
1128 if (old_kport != IP_NULL) {
1129 /* clears ikol_alt_port */
1130 (void)ipc_kobject_disable(old_kport, IKOT_THREAD_CONTROL);
1131 }
1132 if (old_rdport != IP_NULL) {
1133 /* clears ikol_alt_port */
1134 (void)ipc_kobject_disable(old_rdport, IKOT_THREAD_READ);
1135 }
1136 if (old_iport != IP_NULL) {
1137 (void)ipc_kobject_disable(old_iport, IKOT_THREAD_INSPECT);
1138 }
1139 if (thread_is_immovable && old_pport != IP_NULL) {
1140 (void)ipc_kobject_disable(old_pport, IKOT_THREAD_CONTROL);
1141 }
1142
1143 /*
1144 * Only ports that were set by root-owned processes
1145 * (privileged ports) should survive
1146 */
1147 if (tro->tro_exc_actions != NULL) {
1148 has_old_exc_actions = TRUE;
1149 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1150 if (tro->tro_exc_actions[i].privileged) {
1151 old_exc_actions[i] = IP_NULL;
1152 } else {
1153 #if CONFIG_MACF
1154 mac_exc_update_action_label(tro->tro_exc_actions + i, new_label);
1155 #endif
1156 old_exc_actions[i] = tro->tro_exc_actions[i].port;
1157 tro->tro_exc_actions[i].port = IP_NULL;
1158 }
1159 }
1160 }
1161
1162 thread_mtx_unlock(thread);
1163
1164 #if CONFIG_MACF
1165 mac_exc_free_label(new_label);
1166 #endif
1167
1168 /* release the naked send rights */
1169
1170 if (IP_VALID(old_sself)) {
1171 ipc_port_release_send(old_sself);
1172 }
1173
1174 if (has_old_exc_actions) {
1175 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1176 ipc_port_release_send(old_exc_actions[i]);
1177 }
1178 }
1179
1180 /* destroy the kernel ports */
1181 if (old_rdport != IP_NULL) {
1182 ipc_kobject_dealloc_port(old_rdport, 0, IKOT_THREAD_READ);
1183 }
1184 if (old_kport != IP_NULL) {
1185 ipc_kobject_dealloc_port(old_kport, 0, IKOT_THREAD_CONTROL);
1186 }
1187 /* ikol_alt_port cleared */
1188
1189 if (old_iport != IP_NULL) {
1190 ipc_kobject_dealloc_port(old_iport, 0, IKOT_THREAD_INSPECT);
1191 }
1192 if (old_pport != old_kport && old_pport != IP_NULL) {
1193 assert(thread_is_immovable);
1194 ipc_kobject_dealloc_port(old_pport, 0, IKOT_THREAD_CONTROL);
1195 }
1196
1197 /* unbind the thread special reply port */
1198 if (IP_VALID(thread->ith_special_reply_port)) {
1199 ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1200 }
1201 }
1202
1203 /*
1204 * Routine: retrieve_task_self_fast
1205 * Purpose:
1206 * Optimized version of retrieve_task_self,
1207 * that only works for the current task.
1208 *
1209 * Return a send right (possibly null/dead)
1210 * for the task's user-visible self port.
1211 * Conditions:
1212 * Nothing locked.
1213 */
1214
1215 static ipc_port_t
retrieve_task_self_fast(task_t task)1216 retrieve_task_self_fast(
1217 task_t task)
1218 {
1219 ipc_port_t port = IP_NULL;
1220
1221 assert(task == current_task());
1222
1223 itk_lock(task);
1224 assert(task->itk_self != IP_NULL);
1225
1226 #if CONFIG_CSR
1227 if (task->itk_settable_self != task->itk_task_ports[TASK_FLAVOR_CONTROL]) {
1228 port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1229 } else
1230 #endif
1231 {
1232 /* no interposing, return the IMMOVABLE port */
1233 port = ipc_kobject_make_send(task->itk_self, task,
1234 IKOT_TASK_CONTROL);
1235 #if (DEBUG || DEVELOPMENT)
1236 if (task_is_immovable(task)) {
1237 assert(ip_is_immovable_send(port));
1238 if (task_is_pinned(task)) {
1239 /* pinned port is also immovable */
1240 assert(ip_is_pinned(port));
1241 }
1242 } else {
1243 assert(!ip_is_immovable_send(port));
1244 assert(!ip_is_pinned(port));
1245 }
1246 #endif
1247 }
1248
1249 itk_unlock(task);
1250
1251 return port;
1252 }
1253
1254 /*
1255 * Routine: mach_task_is_self
1256 * Purpose:
1257 * [MIG call] Checks if the task (control/read/inspect/name/movable)
1258 * port is pointing to current_task.
1259 */
1260 kern_return_t
mach_task_is_self(task_t task,boolean_t * is_self)1261 mach_task_is_self(
1262 task_t task,
1263 boolean_t *is_self)
1264 {
1265 if (task == TASK_NULL) {
1266 return KERN_INVALID_ARGUMENT;
1267 }
1268
1269 *is_self = (task == current_task());
1270
1271 return KERN_SUCCESS;
1272 }
1273
1274 /*
1275 * Routine: retrieve_thread_self_fast
1276 * Purpose:
1277 * Return a send right (possibly null/dead)
1278 * for the thread's user-visible self port.
1279 *
1280 * Only works for the current thread.
1281 *
1282 * Conditions:
1283 * Nothing locked.
1284 */
1285
1286 ipc_port_t
retrieve_thread_self_fast(thread_t thread)1287 retrieve_thread_self_fast(
1288 thread_t thread)
1289 {
1290 thread_ro_t tro = get_thread_ro(thread);
1291 ipc_port_t port = IP_NULL;
1292
1293 assert(thread == current_thread());
1294
1295 thread_mtx_lock(thread);
1296
1297 assert(tro->tro_self_port != IP_NULL);
1298
1299 #if CONFIG_CSR
1300 if (tro->tro_settable_self_port != tro->tro_ports[THREAD_FLAVOR_CONTROL]) {
1301 port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1302 } else
1303 #endif
1304 {
1305 /* no interposing, return IMMOVABLE_PORT */
1306 port = ipc_kobject_make_send(tro->tro_self_port, thread,
1307 IKOT_THREAD_CONTROL);
1308 #if (DEBUG || DEVELOPMENT)
1309 if (task_is_immovable(tro->tro_task)) {
1310 assert(ip_is_immovable_send(port));
1311 uint16_t tag = thread_get_tag(thread);
1312 /* terminated threads are unpinned */
1313 if (thread->active && (tag & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD))) {
1314 assert(ip_is_pinned(port));
1315 } else {
1316 assert(!ip_is_pinned(port));
1317 }
1318 } else {
1319 assert(!ip_is_immovable_send(port));
1320 assert(!ip_is_pinned(port));
1321 }
1322 #endif
1323 }
1324
1325 thread_mtx_unlock(thread);
1326
1327 return port;
1328 }
1329
1330 /*
1331 * Routine: task_self_trap [mach trap]
1332 * Purpose:
1333 * Give the caller send rights for his own task port.
1334 * Conditions:
1335 * Nothing locked.
1336 * Returns:
1337 * MACH_PORT_NULL if there are any resource failures
1338 * or other errors.
1339 */
1340
1341 mach_port_name_t
task_self_trap(__unused struct task_self_trap_args * args)1342 task_self_trap(
1343 __unused struct task_self_trap_args *args)
1344 {
1345 task_t task = current_task();
1346 ipc_port_t sright;
1347 mach_port_name_t name;
1348
1349 sright = retrieve_task_self_fast(task);
1350 name = ipc_port_copyout_send(sright, task->itk_space);
1351
1352 /*
1353 * When the right is pinned, memorize the name we gave it
1354 * in ip_receiver_name (it's an abuse as this port really
1355 * isn't a message queue, but the field is up for grabs
1356 * and otherwise `MACH_PORT_SPECIAL_DEFAULT` for special ports).
1357 *
1358 * port_name_to_task* use this to fastpath IPCs to mach_task_self()
1359 * when it is pinned.
1360 *
1361 * ipc_task_disable() will revert this when the task dies.
1362 */
1363 if (sright == task->itk_self && sright->ip_pinned &&
1364 MACH_PORT_VALID(name)) {
1365 itk_lock(task);
1366 if (task->ipc_active) {
1367 if (ip_get_receiver_name(sright) == MACH_PORT_SPECIAL_DEFAULT) {
1368 sright->ip_receiver_name = name;
1369 } else if (ip_get_receiver_name(sright) != name) {
1370 panic("mach_task_self() name changed");
1371 }
1372 }
1373 itk_unlock(task);
1374 }
1375 return name;
1376 }
1377
1378 /*
1379 * Routine: thread_self_trap [mach trap]
1380 * Purpose:
1381 * Give the caller send rights for his own thread port.
1382 * Conditions:
1383 * Nothing locked.
1384 * Returns:
1385 * MACH_PORT_NULL if there are any resource failures
1386 * or other errors.
1387 */
1388
1389 mach_port_name_t
thread_self_trap(__unused struct thread_self_trap_args * args)1390 thread_self_trap(
1391 __unused struct thread_self_trap_args *args)
1392 {
1393 thread_t thread = current_thread();
1394 ipc_space_t space = current_space();
1395 ipc_port_t sright;
1396 mach_port_name_t name;
1397
1398 sright = retrieve_thread_self_fast(thread);
1399 name = ipc_port_copyout_send(sright, space);
1400 return name;
1401 }
1402
1403 /*
1404 * Routine: mach_reply_port [mach trap]
1405 * Purpose:
1406 * Allocate a port for the caller.
1407 * Conditions:
1408 * Nothing locked.
1409 * Returns:
1410 * MACH_PORT_NULL if there are any resource failures
1411 * or other errors.
1412 */
1413
1414 mach_port_name_t
mach_reply_port(__unused struct mach_reply_port_args * args)1415 mach_reply_port(
1416 __unused struct mach_reply_port_args *args)
1417 {
1418 ipc_port_t port;
1419 mach_port_name_t name;
1420 kern_return_t kr;
1421
1422 kr = ipc_port_alloc(current_task()->itk_space, IPC_PORT_INIT_MESSAGE_QUEUE,
1423 &name, &port);
1424 if (kr == KERN_SUCCESS) {
1425 ip_mq_unlock(port);
1426 } else {
1427 name = MACH_PORT_NULL;
1428 }
1429 return name;
1430 }
1431
1432 /*
1433 * Routine: thread_get_special_reply_port [mach trap]
1434 * Purpose:
1435 * Allocate a special reply port for the calling thread.
1436 * Conditions:
1437 * Nothing locked.
1438 * Returns:
1439 * mach_port_name_t: send right & receive right for special reply port.
1440 * MACH_PORT_NULL if there are any resource failures
1441 * or other errors.
1442 */
1443
1444 mach_port_name_t
thread_get_special_reply_port(__unused struct thread_get_special_reply_port_args * args)1445 thread_get_special_reply_port(
1446 __unused struct thread_get_special_reply_port_args *args)
1447 {
1448 ipc_port_t port;
1449 mach_port_name_t name;
1450 kern_return_t kr;
1451 thread_t thread = current_thread();
1452 ipc_port_init_flags_t flags = IPC_PORT_INIT_MESSAGE_QUEUE |
1453 IPC_PORT_INIT_MAKE_SEND_RIGHT | IPC_PORT_INIT_SPECIAL_REPLY;
1454
1455 /* unbind the thread special reply port */
1456 if (IP_VALID(thread->ith_special_reply_port)) {
1457 ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1458 }
1459
1460 kr = ipc_port_alloc(current_task()->itk_space, flags, &name, &port);
1461 if (kr == KERN_SUCCESS) {
1462 ipc_port_bind_special_reply_port_locked(port, IRPT_USER);
1463 ip_mq_unlock(port);
1464 } else {
1465 name = MACH_PORT_NULL;
1466 }
1467 return name;
1468 }
1469
1470 /*
1471 * Routine: thread_get_kernel_special_reply_port
1472 * Purpose:
1473 * Allocate a kernel special reply port for the calling thread.
1474 * Conditions:
1475 * Nothing locked.
1476 * Returns:
1477 * Creates and sets kernel special reply port.
1478 * KERN_SUCCESS on Success.
1479 * KERN_FAILURE on Failure.
1480 */
1481
1482 kern_return_t
thread_get_kernel_special_reply_port(void)1483 thread_get_kernel_special_reply_port(void)
1484 {
1485 ipc_port_t port = IP_NULL;
1486 thread_t thread = current_thread();
1487
1488 /* unbind the thread special reply port */
1489 if (IP_VALID(thread->ith_kernel_reply_port)) {
1490 ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1491 }
1492
1493 port = ipc_port_alloc_reply(); /*returns a reference on the port */
1494 if (port != IPC_PORT_NULL) {
1495 ip_mq_lock(port);
1496 ipc_port_bind_special_reply_port_locked(port, IRPT_KERNEL);
1497 ip_mq_unlock(port);
1498 ip_release(port); /* release the reference returned by ipc_port_alloc_reply */
1499 }
1500 return KERN_SUCCESS;
1501 }
1502
1503 /*
1504 * Routine: ipc_port_bind_special_reply_port_locked
1505 * Purpose:
1506 * Bind the given port to current thread as a special reply port.
1507 * Conditions:
1508 * Port locked.
1509 * Returns:
1510 * None.
1511 */
1512
1513 static void
ipc_port_bind_special_reply_port_locked(ipc_port_t port,ipc_reply_port_type_t reply_type)1514 ipc_port_bind_special_reply_port_locked(
1515 ipc_port_t port,
1516 ipc_reply_port_type_t reply_type)
1517 {
1518 thread_t thread = current_thread();
1519 ipc_port_t *reply_portp;
1520
1521 if (reply_type == IRPT_USER) {
1522 reply_portp = &thread->ith_special_reply_port;
1523 } else {
1524 reply_portp = &thread->ith_kernel_reply_port;
1525 }
1526
1527 assert(*reply_portp == NULL);
1528 assert(port->ip_specialreply);
1529 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1530
1531 ip_reference(port);
1532 *reply_portp = port;
1533 port->ip_messages.imq_srp_owner_thread = thread;
1534
1535 ipc_special_reply_port_bits_reset(port);
1536 }
1537
1538 /*
1539 * Routine: ipc_port_unbind_special_reply_port
1540 * Purpose:
1541 * Unbind the thread's special reply port.
1542 * If the special port has threads waiting on turnstile,
1543 * update it's inheritor.
1544 * Condition:
1545 * Nothing locked.
1546 * Returns:
1547 * None.
1548 */
1549 static void
ipc_port_unbind_special_reply_port(thread_t thread,ipc_reply_port_type_t reply_type)1550 ipc_port_unbind_special_reply_port(
1551 thread_t thread,
1552 ipc_reply_port_type_t reply_type)
1553 {
1554 ipc_port_t *reply_portp;
1555
1556 if (reply_type == IRPT_USER) {
1557 reply_portp = &thread->ith_special_reply_port;
1558 } else {
1559 reply_portp = &thread->ith_kernel_reply_port;
1560 }
1561
1562 ipc_port_t special_reply_port = *reply_portp;
1563
1564 ip_mq_lock(special_reply_port);
1565
1566 *reply_portp = NULL;
1567 ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
1568 IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
1569 /* port unlocked */
1570
1571 /* Destroy the port if its kernel special reply, else just release a ref */
1572 if (reply_type == IRPT_USER) {
1573 ip_release(special_reply_port);
1574 } else {
1575 ipc_port_dealloc_reply(special_reply_port);
1576 }
1577 return;
1578 }
1579
1580 /*
1581 * Routine: thread_dealloc_kernel_special_reply_port
1582 * Purpose:
1583 * Unbind the thread's kernel special reply port.
1584 * If the special port has threads waiting on turnstile,
1585 * update it's inheritor.
1586 * Condition:
1587 * Called on current thread or a terminated thread.
1588 * Returns:
1589 * None.
1590 */
1591
1592 void
thread_dealloc_kernel_special_reply_port(thread_t thread)1593 thread_dealloc_kernel_special_reply_port(thread_t thread)
1594 {
1595 ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1596 }
1597
1598 /*
1599 * Routine: thread_get_special_port [kernel call]
1600 * Purpose:
1601 * Clones a send right for one of the thread's
1602 * special ports.
1603 * Conditions:
1604 * Nothing locked.
1605 * Returns:
1606 * KERN_SUCCESS Extracted a send right.
1607 * KERN_INVALID_ARGUMENT The thread is null.
1608 * KERN_FAILURE The thread is dead.
1609 * KERN_INVALID_ARGUMENT Invalid special port.
1610 */
1611
1612 kern_return_t
1613 thread_get_special_port(
1614 thread_inspect_t thread,
1615 int which,
1616 ipc_port_t *portp);
1617
1618 static kern_return_t
thread_get_special_port_internal(thread_inspect_t thread,thread_ro_t tro,int which,ipc_port_t * portp,mach_thread_flavor_t flavor)1619 thread_get_special_port_internal(
1620 thread_inspect_t thread,
1621 thread_ro_t tro,
1622 int which,
1623 ipc_port_t *portp,
1624 mach_thread_flavor_t flavor)
1625 {
1626 kern_return_t kr;
1627 ipc_port_t port;
1628
1629 if ((kr = special_port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) {
1630 return kr;
1631 }
1632
1633 thread_mtx_lock(thread);
1634 if (!thread->active) {
1635 thread_mtx_unlock(thread);
1636 return KERN_FAILURE;
1637 }
1638
1639 switch (which) {
1640 case THREAD_KERNEL_PORT:
1641 port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1642 #if CONFIG_CSR
1643 if (tro->tro_settable_self_port != port) {
1644 port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1645 } else
1646 #endif
1647 {
1648 port = ipc_kobject_copy_send(port, thread, IKOT_THREAD_CONTROL);
1649 }
1650 thread_mtx_unlock(thread);
1651 break;
1652
1653 case THREAD_READ_PORT:
1654 case THREAD_INSPECT_PORT:
1655 thread_mtx_unlock(thread);
1656 mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ?
1657 THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT;
1658 /* convert_thread_to_port_with_flavor consumes a thread reference */
1659 thread_reference(thread);
1660 port = convert_thread_to_port_with_flavor(thread, tro, current_flavor);
1661 break;
1662
1663 default:
1664 thread_mtx_unlock(thread);
1665 return KERN_INVALID_ARGUMENT;
1666 }
1667
1668 *portp = port;
1669 return KERN_SUCCESS;
1670 }
1671
1672 kern_return_t
thread_get_special_port(thread_inspect_t thread,int which,ipc_port_t * portp)1673 thread_get_special_port(
1674 thread_inspect_t thread,
1675 int which,
1676 ipc_port_t *portp)
1677 {
1678 if (thread == THREAD_NULL) {
1679 return KERN_INVALID_ARGUMENT;
1680 }
1681
1682 return thread_get_special_port_internal(thread, get_thread_ro(thread),
1683 which, portp, THREAD_FLAVOR_CONTROL);
1684 }
1685
1686 static ipc_port_t
thread_get_non_substituted_self(thread_t thread,thread_ro_t tro)1687 thread_get_non_substituted_self(thread_t thread, thread_ro_t tro)
1688 {
1689 ipc_port_t port = IP_NULL;
1690
1691 thread_mtx_lock(thread);
1692 port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1693 #if CONFIG_CSR
1694 if (tro->tro_settable_self_port != port) {
1695 port = ipc_port_make_send_mqueue(tro->tro_settable_self_port);
1696 } else
1697 #endif
1698 {
1699 port = ipc_kobject_make_send(port, thread, IKOT_THREAD_CONTROL);
1700 }
1701 thread_mtx_unlock(thread);
1702
1703 /* takes ownership of the send right */
1704 return ipc_kobject_alloc_subst_once(port);
1705 }
1706
1707 kern_return_t
thread_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)1708 thread_get_special_port_from_user(
1709 mach_port_t port,
1710 int which,
1711 ipc_port_t *portp)
1712 {
1713 thread_ro_t tro;
1714 ipc_kobject_type_t kotype;
1715 mach_thread_flavor_t flavor;
1716 kern_return_t kr = KERN_SUCCESS;
1717
1718 thread_t thread = convert_port_to_thread_inspect_no_eval(port);
1719
1720 if (thread == THREAD_NULL) {
1721 return KERN_INVALID_ARGUMENT;
1722 }
1723
1724 tro = get_thread_ro(thread);
1725 kotype = ip_kotype(port);
1726
1727 if (which == THREAD_KERNEL_PORT && tro->tro_task == current_task()) {
1728 #if CONFIG_MACF
1729 /*
1730 * only check for threads belong to current_task,
1731 * because foreign thread ports are always movable
1732 */
1733 if (mac_task_check_get_movable_control_port()) {
1734 kr = KERN_DENIED;
1735 goto out;
1736 }
1737 #endif
1738 if (kotype == IKOT_THREAD_CONTROL) {
1739 *portp = thread_get_non_substituted_self(thread, tro);
1740 goto out;
1741 }
1742 }
1743
1744 switch (kotype) {
1745 case IKOT_THREAD_CONTROL:
1746 flavor = THREAD_FLAVOR_CONTROL;
1747 break;
1748 case IKOT_THREAD_READ:
1749 flavor = THREAD_FLAVOR_READ;
1750 break;
1751 case IKOT_THREAD_INSPECT:
1752 flavor = THREAD_FLAVOR_INSPECT;
1753 break;
1754 default:
1755 panic("strange kobject type");
1756 }
1757
1758 kr = thread_get_special_port_internal(thread, tro, which, portp, flavor);
1759 out:
1760 thread_deallocate(thread);
1761 return kr;
1762 }
1763
1764 static kern_return_t
special_port_allowed_with_thread_flavor(int which,mach_thread_flavor_t flavor)1765 special_port_allowed_with_thread_flavor(
1766 int which,
1767 mach_thread_flavor_t flavor)
1768 {
1769 switch (flavor) {
1770 case THREAD_FLAVOR_CONTROL:
1771 return KERN_SUCCESS;
1772
1773 case THREAD_FLAVOR_READ:
1774
1775 switch (which) {
1776 case THREAD_READ_PORT:
1777 case THREAD_INSPECT_PORT:
1778 return KERN_SUCCESS;
1779 default:
1780 return KERN_INVALID_CAPABILITY;
1781 }
1782
1783 case THREAD_FLAVOR_INSPECT:
1784
1785 switch (which) {
1786 case THREAD_INSPECT_PORT:
1787 return KERN_SUCCESS;
1788 default:
1789 return KERN_INVALID_CAPABILITY;
1790 }
1791
1792 default:
1793 return KERN_INVALID_CAPABILITY;
1794 }
1795 }
1796
1797 /*
1798 * Routine: thread_set_special_port [kernel call]
1799 * Purpose:
1800 * Changes one of the thread's special ports,
1801 * setting it to the supplied send right.
1802 * Conditions:
1803 * Nothing locked. If successful, consumes
1804 * the supplied send right.
1805 * Returns:
1806 * KERN_SUCCESS Changed the special port.
1807 * KERN_INVALID_ARGUMENT The thread is null.
1808 * KERN_INVALID_RIGHT Port is marked as immovable.
1809 * KERN_FAILURE The thread is dead.
1810 * KERN_INVALID_ARGUMENT Invalid special port.
1811 * KERN_NO_ACCESS Restricted access to set port.
1812 */
1813
1814 kern_return_t
thread_set_special_port(thread_t thread,int which,ipc_port_t port)1815 thread_set_special_port(
1816 thread_t thread,
1817 int which,
1818 ipc_port_t port)
1819 {
1820 kern_return_t result = KERN_SUCCESS;
1821 thread_ro_t tro = NULL;
1822 ipc_port_t old = IP_NULL;
1823
1824 if (thread == THREAD_NULL) {
1825 return KERN_INVALID_ARGUMENT;
1826 }
1827
1828 if (IP_VALID(port) && port->ip_immovable_send) {
1829 return KERN_INVALID_RIGHT;
1830 }
1831
1832 switch (which) {
1833 case THREAD_KERNEL_PORT:
1834 #if CONFIG_CSR
1835 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
1836 /*
1837 * Only allow setting of thread-self
1838 * special port from user-space when SIP is
1839 * disabled (for Mach-on-Mach emulation).
1840 */
1841 tro = get_thread_ro(thread);
1842
1843 thread_mtx_lock(thread);
1844 if (thread->active) {
1845 old = tro->tro_settable_self_port;
1846 zalloc_ro_update_field(ZONE_ID_THREAD_RO,
1847 tro, tro_settable_self_port, &port);
1848 } else {
1849 result = KERN_FAILURE;
1850 }
1851 thread_mtx_unlock(thread);
1852
1853 if (IP_VALID(old)) {
1854 ipc_port_release_send(old);
1855 }
1856
1857 return result;
1858 }
1859 #else
1860 (void)old;
1861 (void)result;
1862 (void)tro;
1863 #endif
1864 return KERN_NO_ACCESS;
1865
1866 default:
1867 return KERN_INVALID_ARGUMENT;
1868 }
1869 }
1870
1871 /*
1872 * Routine: task_get_special_port [kernel call]
1873 * Purpose:
1874 * Clones a send right for one of the task's
1875 * special ports.
1876 * Conditions:
1877 * Nothing locked.
1878 * Returns:
1879 * KERN_SUCCESS Extracted a send right.
1880 * KERN_INVALID_ARGUMENT The task is null.
1881 * KERN_FAILURE The task/space is dead.
1882 * KERN_INVALID_ARGUMENT Invalid special port.
1883 */
1884
1885 static kern_return_t
task_get_special_port_internal(task_t task,int which,ipc_port_t * portp,mach_task_flavor_t flavor)1886 task_get_special_port_internal(
1887 task_t task,
1888 int which,
1889 ipc_port_t *portp,
1890 mach_task_flavor_t flavor)
1891 {
1892 kern_return_t kr;
1893 ipc_port_t port;
1894
1895 if (task == TASK_NULL) {
1896 return KERN_INVALID_ARGUMENT;
1897 }
1898
1899 if ((kr = special_port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) {
1900 return kr;
1901 }
1902
1903 itk_lock(task);
1904 if (!task->ipc_active) {
1905 itk_unlock(task);
1906 return KERN_FAILURE;
1907 }
1908
1909 switch (which) {
1910 case TASK_KERNEL_PORT:
1911 port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
1912 #if CONFIG_CSR
1913 if (task->itk_settable_self != port) {
1914 port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1915 } else
1916 #endif
1917 {
1918 port = ipc_kobject_copy_send(port, task, IKOT_TASK_CONTROL);
1919 }
1920 itk_unlock(task);
1921 break;
1922
1923 case TASK_READ_PORT:
1924 case TASK_INSPECT_PORT:
1925 itk_unlock(task);
1926 mach_task_flavor_t current_flavor = (which == TASK_READ_PORT) ?
1927 TASK_FLAVOR_READ : TASK_FLAVOR_INSPECT;
1928 /* convert_task_to_port_with_flavor consumes a task reference */
1929 task_reference(task);
1930 port = convert_task_to_port_with_flavor(task, current_flavor, TASK_GRP_KERNEL);
1931 break;
1932
1933 case TASK_NAME_PORT:
1934 port = ipc_kobject_make_send(task->itk_task_ports[TASK_FLAVOR_NAME],
1935 task, IKOT_TASK_NAME);
1936 itk_unlock(task);
1937 break;
1938
1939 case TASK_HOST_PORT:
1940 port = host_port_copy_send(task->itk_host);
1941 itk_unlock(task);
1942 break;
1943
1944 case TASK_BOOTSTRAP_PORT:
1945 port = ipc_port_copy_send_mqueue(task->itk_bootstrap);
1946 itk_unlock(task);
1947 break;
1948
1949 case TASK_ACCESS_PORT:
1950 port = ipc_port_copy_send_mqueue(task->itk_task_access);
1951 itk_unlock(task);
1952 break;
1953
1954 case TASK_DEBUG_CONTROL_PORT:
1955 port = ipc_port_copy_send_mqueue(task->itk_debug_control);
1956 itk_unlock(task);
1957 break;
1958
1959 #if CONFIG_PROC_RESOURCE_LIMITS
1960 case TASK_RESOURCE_NOTIFY_PORT:
1961 port = ipc_port_copy_send_mqueue(task->itk_resource_notify);
1962 itk_unlock(task);
1963 break;
1964 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1965
1966 default:
1967 itk_unlock(task);
1968 return KERN_INVALID_ARGUMENT;
1969 }
1970
1971 *portp = port;
1972 return KERN_SUCCESS;
1973 }
1974
1975 /* Kernel/Kext call only and skips MACF checks. MIG uses task_get_special_port_from_user(). */
1976 kern_return_t
task_get_special_port(task_t task,int which,ipc_port_t * portp)1977 task_get_special_port(
1978 task_t task,
1979 int which,
1980 ipc_port_t *portp)
1981 {
1982 return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL);
1983 }
1984
1985 static ipc_port_t
task_get_non_substituted_self(task_t task)1986 task_get_non_substituted_self(task_t task)
1987 {
1988 ipc_port_t port = IP_NULL;
1989
1990 itk_lock(task);
1991 port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
1992 #if CONFIG_CSR
1993 if (task->itk_settable_self != port) {
1994 port = ipc_port_make_send_mqueue(task->itk_settable_self);
1995 } else
1996 #endif
1997 {
1998 port = ipc_kobject_make_send(port, task, IKOT_TASK_CONTROL);
1999 }
2000 itk_unlock(task);
2001
2002 /* takes ownership of the send right */
2003 return ipc_kobject_alloc_subst_once(port);
2004 }
2005
2006 /* MIG call only. Kernel/Kext uses task_get_special_port() */
2007 kern_return_t
task_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)2008 task_get_special_port_from_user(
2009 mach_port_t port,
2010 int which,
2011 ipc_port_t *portp)
2012 {
2013 ipc_kobject_type_t kotype;
2014 mach_task_flavor_t flavor;
2015 kern_return_t kr = KERN_SUCCESS;
2016
2017 task_t task = convert_port_to_task_inspect_no_eval(port);
2018
2019 if (task == TASK_NULL) {
2020 return KERN_INVALID_ARGUMENT;
2021 }
2022
2023 kotype = ip_kotype(port);
2024
2025 #if CONFIG_MACF
2026 if (mac_task_check_get_task_special_port(current_task(), task, which)) {
2027 kr = KERN_DENIED;
2028 goto out;
2029 }
2030 #endif
2031
2032 if (which == TASK_KERNEL_PORT && task == current_task()) {
2033 #if CONFIG_MACF
2034 /*
2035 * only check for current_task,
2036 * because foreign task ports are always movable
2037 */
2038 if (mac_task_check_get_movable_control_port()) {
2039 kr = KERN_DENIED;
2040 goto out;
2041 }
2042 #endif
2043 if (kotype == IKOT_TASK_CONTROL) {
2044 *portp = task_get_non_substituted_self(task);
2045 goto out;
2046 }
2047 }
2048
2049 switch (kotype) {
2050 case IKOT_TASK_CONTROL:
2051 flavor = TASK_FLAVOR_CONTROL;
2052 break;
2053 case IKOT_TASK_READ:
2054 flavor = TASK_FLAVOR_READ;
2055 break;
2056 case IKOT_TASK_INSPECT:
2057 flavor = TASK_FLAVOR_INSPECT;
2058 break;
2059 default:
2060 panic("strange kobject type");
2061 }
2062
2063 kr = task_get_special_port_internal(task, which, portp, flavor);
2064 out:
2065 task_deallocate(task);
2066 return kr;
2067 }
2068
2069 static kern_return_t
special_port_allowed_with_task_flavor(int which,mach_task_flavor_t flavor)2070 special_port_allowed_with_task_flavor(
2071 int which,
2072 mach_task_flavor_t flavor)
2073 {
2074 switch (flavor) {
2075 case TASK_FLAVOR_CONTROL:
2076 return KERN_SUCCESS;
2077
2078 case TASK_FLAVOR_READ:
2079
2080 switch (which) {
2081 case TASK_READ_PORT:
2082 case TASK_INSPECT_PORT:
2083 case TASK_NAME_PORT:
2084 return KERN_SUCCESS;
2085 default:
2086 return KERN_INVALID_CAPABILITY;
2087 }
2088
2089 case TASK_FLAVOR_INSPECT:
2090
2091 switch (which) {
2092 case TASK_INSPECT_PORT:
2093 case TASK_NAME_PORT:
2094 return KERN_SUCCESS;
2095 default:
2096 return KERN_INVALID_CAPABILITY;
2097 }
2098
2099 default:
2100 return KERN_INVALID_CAPABILITY;
2101 }
2102 }
2103
2104 /*
2105 * Routine: task_set_special_port [MIG call]
2106 * Purpose:
2107 * Changes one of the task's special ports,
2108 * setting it to the supplied send right.
2109 * Conditions:
2110 * Nothing locked. If successful, consumes
2111 * the supplied send right.
2112 * Returns:
2113 * KERN_SUCCESS Changed the special port.
2114 * KERN_INVALID_ARGUMENT The task is null.
2115 * KERN_INVALID_RIGHT Port is marked as immovable.
2116 * KERN_FAILURE The task/space is dead.
2117 * KERN_INVALID_ARGUMENT Invalid special port.
2118 * KERN_NO_ACCESS Restricted access to set port.
2119 */
2120
2121 kern_return_t
task_set_special_port_from_user(task_t task,int which,ipc_port_t port)2122 task_set_special_port_from_user(
2123 task_t task,
2124 int which,
2125 ipc_port_t port)
2126 {
2127 if (task == TASK_NULL) {
2128 return KERN_INVALID_ARGUMENT;
2129 }
2130
2131 #if CONFIG_MACF
2132 if (mac_task_check_set_task_special_port(current_task(), task, which, port)) {
2133 return KERN_DENIED;
2134 }
2135 #endif
2136
2137 return task_set_special_port(task, which, port);
2138 }
2139
2140 /* Kernel call only. MIG uses task_set_special_port_from_user() */
2141 kern_return_t
task_set_special_port(task_t task,int which,ipc_port_t port)2142 task_set_special_port(
2143 task_t task,
2144 int which,
2145 ipc_port_t port)
2146 {
2147 if (task == TASK_NULL) {
2148 return KERN_INVALID_ARGUMENT;
2149 }
2150
2151 if (task_is_driver(current_task())) {
2152 return KERN_NO_ACCESS;
2153 }
2154
2155 if (IP_VALID(port) && port->ip_immovable_send) {
2156 return KERN_INVALID_RIGHT;
2157 }
2158
2159 switch (which) {
2160 case TASK_KERNEL_PORT:
2161 case TASK_HOST_PORT:
2162 #if CONFIG_CSR
2163 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
2164 /*
2165 * Only allow setting of task-self / task-host
2166 * special ports from user-space when SIP is
2167 * disabled (for Mach-on-Mach emulation).
2168 */
2169 break;
2170 }
2171 #endif
2172 return KERN_NO_ACCESS;
2173 default:
2174 break;
2175 }
2176
2177 return task_set_special_port_internal(task, which, port);
2178 }
2179
2180 /*
2181 * Routine: task_set_special_port_internal
2182 * Purpose:
2183 * Changes one of the task's special ports,
2184 * setting it to the supplied send right.
2185 * Conditions:
2186 * Nothing locked. If successful, consumes
2187 * the supplied send right.
2188 * Returns:
2189 * KERN_SUCCESS Changed the special port.
2190 * KERN_INVALID_ARGUMENT The task is null.
2191 * KERN_FAILURE The task/space is dead.
2192 * KERN_INVALID_ARGUMENT Invalid special port.
2193 * KERN_NO_ACCESS Restricted access to overwrite port.
2194 */
2195
2196 kern_return_t
task_set_special_port_internal(task_t task,int which,ipc_port_t port)2197 task_set_special_port_internal(
2198 task_t task,
2199 int which,
2200 ipc_port_t port)
2201 {
2202 ipc_port_t old = IP_NULL;
2203 kern_return_t rc = KERN_INVALID_ARGUMENT;
2204
2205 if (task == TASK_NULL) {
2206 goto out;
2207 }
2208
2209 itk_lock(task);
2210 /*
2211 * Allow setting special port during the span of ipc_task_init() to
2212 * ipc_task_terminate(). posix_spawn() port actions can set special
2213 * ports on target task _before_ task IPC access is enabled.
2214 */
2215 if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
2216 rc = KERN_FAILURE;
2217 goto out_unlock;
2218 }
2219
2220 switch (which) {
2221 case TASK_KERNEL_PORT:
2222 old = task->itk_settable_self;
2223 task->itk_settable_self = port;
2224 break;
2225
2226 case TASK_HOST_PORT:
2227 old = task->itk_host;
2228 task->itk_host = port;
2229 break;
2230
2231 case TASK_BOOTSTRAP_PORT:
2232 old = task->itk_bootstrap;
2233 task->itk_bootstrap = port;
2234 break;
2235
2236 /* Never allow overwrite of the task access port */
2237 case TASK_ACCESS_PORT:
2238 if (IP_VALID(task->itk_task_access)) {
2239 rc = KERN_NO_ACCESS;
2240 goto out_unlock;
2241 }
2242 task->itk_task_access = port;
2243 break;
2244
2245 case TASK_DEBUG_CONTROL_PORT:
2246 old = task->itk_debug_control;
2247 task->itk_debug_control = port;
2248 break;
2249
2250 #if CONFIG_PROC_RESOURCE_LIMITS
2251 case TASK_RESOURCE_NOTIFY_PORT:
2252 old = task->itk_resource_notify;
2253 task->itk_resource_notify = port;
2254 break;
2255 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
2256
2257 default:
2258 rc = KERN_INVALID_ARGUMENT;
2259 goto out_unlock;
2260 }/* switch */
2261
2262 rc = KERN_SUCCESS;
2263
2264 out_unlock:
2265 itk_unlock(task);
2266
2267 if (IP_VALID(old)) {
2268 ipc_port_release_send(old);
2269 }
2270 out:
2271 return rc;
2272 }
2273 /*
2274 * Routine: mach_ports_register [kernel call]
2275 * Purpose:
2276 * Stash a handful of port send rights in the task.
2277 * Child tasks will inherit these rights, but they
2278 * must use mach_ports_lookup to acquire them.
2279 *
2280 * The rights are supplied in a (wired) kalloc'd segment.
2281 * Rights which aren't supplied are assumed to be null.
2282 * Conditions:
2283 * Nothing locked. If successful, consumes
2284 * the supplied rights and memory.
2285 * Returns:
2286 * KERN_SUCCESS Stashed the port rights.
2287 * KERN_INVALID_RIGHT Port in array is marked immovable.
2288 * KERN_INVALID_ARGUMENT The task is null.
2289 * KERN_INVALID_ARGUMENT The task is dead.
2290 * KERN_INVALID_ARGUMENT The memory param is null.
2291 * KERN_INVALID_ARGUMENT Too many port rights supplied.
2292 */
2293
2294 kern_return_t
mach_ports_register(task_t task,mach_port_array_t memory,mach_msg_type_number_t portsCnt)2295 mach_ports_register(
2296 task_t task,
2297 mach_port_array_t memory,
2298 mach_msg_type_number_t portsCnt)
2299 {
2300 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
2301 unsigned int i;
2302
2303 if ((task == TASK_NULL) ||
2304 (portsCnt > TASK_PORT_REGISTER_MAX) ||
2305 (portsCnt && memory == NULL)) {
2306 return KERN_INVALID_ARGUMENT;
2307 }
2308
2309 /*
2310 * Pad the port rights with nulls.
2311 */
2312
2313 for (i = 0; i < portsCnt; i++) {
2314 ports[i] = memory[i];
2315 if (IP_VALID(ports[i]) && ports[i]->ip_immovable_send) {
2316 return KERN_INVALID_RIGHT;
2317 }
2318 }
2319 for (; i < TASK_PORT_REGISTER_MAX; i++) {
2320 ports[i] = IP_NULL;
2321 }
2322
2323 itk_lock(task);
2324 if (!task->ipc_active) {
2325 itk_unlock(task);
2326 return KERN_INVALID_ARGUMENT;
2327 }
2328
2329 /*
2330 * Replace the old send rights with the new.
2331 * Release the old rights after unlocking.
2332 */
2333
2334 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2335 ipc_port_t old;
2336
2337 old = task->itk_registered[i];
2338 task->itk_registered[i] = ports[i];
2339 ports[i] = old;
2340 }
2341
2342 itk_unlock(task);
2343
2344 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2345 if (IP_VALID(ports[i])) {
2346 ipc_port_release_send(ports[i]);
2347 }
2348 }
2349
2350 /*
2351 * Now that the operation is known to be successful,
2352 * we can free the memory.
2353 */
2354
2355 if (portsCnt != 0) {
2356 kfree_type(mach_port_t, portsCnt, memory);
2357 }
2358
2359 return KERN_SUCCESS;
2360 }
2361
2362 /*
2363 * Routine: mach_ports_lookup [kernel call]
2364 * Purpose:
2365 * Retrieves (clones) the stashed port send rights.
2366 * Conditions:
2367 * Nothing locked. If successful, the caller gets
2368 * rights and memory.
2369 * Returns:
2370 * KERN_SUCCESS Retrieved the send rights.
2371 * KERN_INVALID_ARGUMENT The task is null.
2372 * KERN_INVALID_ARGUMENT The task is dead.
2373 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
2374 */
2375
2376 kern_return_t
mach_ports_lookup(task_t task,mach_port_array_t * portsp,mach_msg_type_number_t * portsCnt)2377 mach_ports_lookup(
2378 task_t task,
2379 mach_port_array_t *portsp,
2380 mach_msg_type_number_t *portsCnt)
2381 {
2382 ipc_port_t *ports;
2383
2384 if (task == TASK_NULL) {
2385 return KERN_INVALID_ARGUMENT;
2386 }
2387
2388 ports = kalloc_type(ipc_port_t, TASK_PORT_REGISTER_MAX,
2389 Z_WAITOK | Z_ZERO | Z_NOFAIL);
2390
2391 itk_lock(task);
2392 if (!task->ipc_active) {
2393 itk_unlock(task);
2394 kfree_type(ipc_port_t, TASK_PORT_REGISTER_MAX, ports);
2395
2396 return KERN_INVALID_ARGUMENT;
2397 }
2398
2399 for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2400 ports[i] = ipc_port_copy_send_any(task->itk_registered[i]);
2401 }
2402
2403 itk_unlock(task);
2404
2405 *portsp = ports;
2406 *portsCnt = TASK_PORT_REGISTER_MAX;
2407 return KERN_SUCCESS;
2408 }
2409
2410 static kern_return_t
task_conversion_eval_internal(task_t caller,task_t victim,boolean_t out_trans,int flavor)2411 task_conversion_eval_internal(
2412 task_t caller,
2413 task_t victim,
2414 boolean_t out_trans,
2415 int flavor) /* control or read */
2416 {
2417 boolean_t allow_kern_task_out_trans;
2418 boolean_t allow_kern_task;
2419
2420 assert(flavor == TASK_FLAVOR_CONTROL || flavor == TASK_FLAVOR_READ);
2421 assert(flavor == THREAD_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_READ);
2422
2423 #if defined(SECURE_KERNEL)
2424 /*
2425 * On secure kernel platforms, reject converting kernel task/threads to port
2426 * and sending it to user space.
2427 */
2428 allow_kern_task_out_trans = FALSE;
2429 #else
2430 allow_kern_task_out_trans = TRUE;
2431 #endif
2432
2433 allow_kern_task = out_trans && allow_kern_task_out_trans;
2434
2435 if (victim == TASK_NULL) {
2436 return KERN_INVALID_SECURITY;
2437 }
2438
2439 task_require(victim);
2440
2441 /*
2442 * If Developer Mode is not enabled, deny attempts to translate foreign task's
2443 * control port completely. Read port or corpse is okay.
2444 */
2445 if (!developer_mode_state()) {
2446 if ((caller != victim) &&
2447 (flavor == TASK_FLAVOR_CONTROL) && !task_is_a_corpse(victim)) {
2448 #if XNU_TARGET_OS_OSX
2449 return KERN_INVALID_SECURITY;
2450 #else
2451 /*
2452 * All control ports are immovable.
2453 * Return an error for outtrans, but panic on intrans.
2454 */
2455 if (out_trans) {
2456 return KERN_INVALID_SECURITY;
2457 } else {
2458 panic("Just like pineapple on pizza, this task/thread port doesn't belong here.");
2459 }
2460 #endif /* XNU_TARGET_OS_OSX */
2461 }
2462 }
2463
2464 /*
2465 * Tasks are allowed to resolve their own task ports, and the kernel is
2466 * allowed to resolve anyone's task port (subject to Developer Mode check).
2467 */
2468 if (caller == kernel_task) {
2469 return KERN_SUCCESS;
2470 }
2471
2472 if (caller == victim) {
2473 return KERN_SUCCESS;
2474 }
2475
2476 /*
2477 * Only the kernel can resolve the kernel's task port. We've established
2478 * by this point that the caller is not kernel_task.
2479 */
2480 if (victim == kernel_task && !allow_kern_task) {
2481 return KERN_INVALID_SECURITY;
2482 }
2483
2484 #if !defined(XNU_TARGET_OS_OSX)
2485 /*
2486 * On platforms other than macOS, only a platform binary can resolve the task port
2487 * of another platform binary.
2488 */
2489 if (task_get_platform_binary(victim) && !task_get_platform_binary(caller)) {
2490 #if SECURE_KERNEL
2491 return KERN_INVALID_SECURITY;
2492 #else
2493 if (cs_relax_platform_task_ports) {
2494 return KERN_SUCCESS;
2495 } else {
2496 return KERN_INVALID_SECURITY;
2497 }
2498 #endif /* SECURE_KERNEL */
2499 }
2500 #endif /* !defined(XNU_TARGET_OS_OSX) */
2501
2502 return KERN_SUCCESS;
2503 }
2504
2505 kern_return_t
task_conversion_eval(task_t caller,task_t victim,int flavor)2506 task_conversion_eval(task_t caller, task_t victim, int flavor)
2507 {
2508 /* flavor is mach_task_flavor_t or mach_thread_flavor_t */
2509 static_assert(TASK_FLAVOR_CONTROL == THREAD_FLAVOR_CONTROL);
2510 static_assert(TASK_FLAVOR_READ == THREAD_FLAVOR_READ);
2511 return task_conversion_eval_internal(caller, victim, FALSE, flavor);
2512 }
2513
2514 static kern_return_t
task_conversion_eval_out_trans(task_t caller,task_t victim,int flavor)2515 task_conversion_eval_out_trans(task_t caller, task_t victim, int flavor)
2516 {
2517 assert(flavor == TASK_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_CONTROL);
2518 return task_conversion_eval_internal(caller, victim, TRUE, flavor);
2519 }
2520
2521 /*
2522 * Routine: task_port_kotype_valid_for_flavor
2523 * Purpose:
2524 * Check whether the kobject type of a mach port
2525 * is valid for conversion to a task of given flavor.
2526 */
2527 static boolean_t
task_port_kotype_valid_for_flavor(natural_t kotype,mach_task_flavor_t flavor)2528 task_port_kotype_valid_for_flavor(
2529 natural_t kotype,
2530 mach_task_flavor_t flavor)
2531 {
2532 switch (flavor) {
2533 /* Ascending capability */
2534 case TASK_FLAVOR_NAME:
2535 if (kotype == IKOT_TASK_NAME) {
2536 return TRUE;
2537 }
2538 OS_FALLTHROUGH;
2539 case TASK_FLAVOR_INSPECT:
2540 if (kotype == IKOT_TASK_INSPECT) {
2541 return TRUE;
2542 }
2543 OS_FALLTHROUGH;
2544 case TASK_FLAVOR_READ:
2545 if (kotype == IKOT_TASK_READ) {
2546 return TRUE;
2547 }
2548 OS_FALLTHROUGH;
2549 case TASK_FLAVOR_CONTROL:
2550 if (kotype == IKOT_TASK_CONTROL) {
2551 return TRUE;
2552 }
2553 break;
2554 default:
2555 panic("strange task flavor");
2556 }
2557
2558 return FALSE;
2559 }
2560
2561 /*
2562 * Routine: convert_port_to_task_with_flavor_locked_noref
2563 * Purpose:
2564 * Internal helper routine to convert from a locked port to a task.
2565 * Args:
2566 * port - target port
2567 * flavor - requested task port flavor
2568 * options - port translation options
2569 * Conditions:
2570 * Port is locked and active.
2571 */
2572 static task_t
convert_port_to_task_with_flavor_locked_noref(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2573 convert_port_to_task_with_flavor_locked_noref(
2574 ipc_port_t port,
2575 mach_task_flavor_t flavor,
2576 port_intrans_options_t options)
2577 {
2578 ipc_kobject_type_t type = ip_kotype(port);
2579 task_t task;
2580
2581 ip_mq_lock_held(port);
2582 require_ip_active(port);
2583
2584 if (!task_port_kotype_valid_for_flavor(type, flavor)) {
2585 return TASK_NULL;
2586 }
2587
2588 task = ipc_kobject_get_locked(port, type);
2589 if (task == TASK_NULL) {
2590 return TASK_NULL;
2591 }
2592
2593 if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
2594 assert(flavor == TASK_FLAVOR_CONTROL);
2595 return TASK_NULL;
2596 }
2597
2598 /* TODO: rdar://42389187 */
2599 if (flavor == TASK_FLAVOR_NAME || flavor == TASK_FLAVOR_INSPECT) {
2600 assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
2601 }
2602
2603 if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
2604 task_conversion_eval(current_task(), task, flavor)) {
2605 return TASK_NULL;
2606 }
2607
2608 return task;
2609 }
2610
2611 /*
2612 * Routine: convert_port_to_task_with_flavor_locked
2613 * Purpose:
2614 * Internal helper routine to convert from a locked port to a task.
2615 * Args:
2616 * port - target port
2617 * flavor - requested task port flavor
2618 * options - port translation options
2619 * grp - task reference group
2620 * Conditions:
2621 * Port is locked and active.
2622 * Produces task ref or TASK_NULL.
2623 */
2624 static task_t
convert_port_to_task_with_flavor_locked(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2625 convert_port_to_task_with_flavor_locked(
2626 ipc_port_t port,
2627 mach_task_flavor_t flavor,
2628 port_intrans_options_t options,
2629 task_grp_t grp)
2630 {
2631 task_t task;
2632
2633 task = convert_port_to_task_with_flavor_locked_noref(port, flavor,
2634 options);
2635
2636 if (task != TASK_NULL) {
2637 task_reference_grp(task, grp);
2638 }
2639
2640 return task;
2641 }
2642
2643 /*
2644 * Routine: convert_port_to_task_with_flavor
2645 * Purpose:
2646 * Internal helper for converting from a port to a task.
2647 * Doesn't consume the port ref; produces a task ref,
2648 * which may be null.
2649 * Args:
2650 * port - target port
2651 * flavor - requested task port flavor
2652 * options - port translation options
2653 * grp - task reference group
2654 * Conditions:
2655 * Nothing locked.
2656 */
2657 static task_t
convert_port_to_task_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2658 convert_port_to_task_with_flavor(
2659 ipc_port_t port,
2660 mach_task_flavor_t flavor,
2661 port_intrans_options_t options,
2662 task_grp_t grp)
2663 {
2664 task_t task = TASK_NULL;
2665 task_t self = current_task();
2666
2667 if (IP_VALID(port)) {
2668 if (port == self->itk_self) {
2669 task_reference_grp(self, grp);
2670 return self;
2671 }
2672
2673 ip_mq_lock(port);
2674 if (ip_active(port)) {
2675 task = convert_port_to_task_with_flavor_locked(port,
2676 flavor, options, grp);
2677 }
2678 ip_mq_unlock(port);
2679 }
2680
2681 return task;
2682 }
2683
2684 task_t
convert_port_to_task(ipc_port_t port)2685 convert_port_to_task(
2686 ipc_port_t port)
2687 {
2688 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2689 PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL);
2690 }
2691
2692 task_t
convert_port_to_task_mig(ipc_port_t port)2693 convert_port_to_task_mig(
2694 ipc_port_t port)
2695 {
2696 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2697 PORT_INTRANS_OPTIONS_NONE, TASK_GRP_MIG);
2698 }
2699
2700 task_read_t
convert_port_to_task_read(ipc_port_t port)2701 convert_port_to_task_read(
2702 ipc_port_t port)
2703 {
2704 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2705 PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2706 }
2707
2708 static task_read_t
convert_port_to_task_read_no_eval(ipc_port_t port)2709 convert_port_to_task_read_no_eval(
2710 ipc_port_t port)
2711 {
2712 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2713 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2714 }
2715
2716 task_read_t
convert_port_to_task_read_mig(ipc_port_t port)2717 convert_port_to_task_read_mig(
2718 ipc_port_t port)
2719 {
2720 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2721 PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2722 }
2723
2724 task_inspect_t
convert_port_to_task_inspect(ipc_port_t port)2725 convert_port_to_task_inspect(
2726 ipc_port_t port)
2727 {
2728 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2729 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2730 }
2731
2732 task_inspect_t
convert_port_to_task_inspect_no_eval(ipc_port_t port)2733 convert_port_to_task_inspect_no_eval(
2734 ipc_port_t port)
2735 {
2736 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2737 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2738 }
2739
2740 task_inspect_t
convert_port_to_task_inspect_mig(ipc_port_t port)2741 convert_port_to_task_inspect_mig(
2742 ipc_port_t port)
2743 {
2744 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2745 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2746 }
2747
2748 task_name_t
convert_port_to_task_name(ipc_port_t port)2749 convert_port_to_task_name(
2750 ipc_port_t port)
2751 {
2752 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2753 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2754 }
2755
2756 task_name_t
convert_port_to_task_name_mig(ipc_port_t port)2757 convert_port_to_task_name_mig(
2758 ipc_port_t port)
2759 {
2760 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2761 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2762 }
2763
2764 /*
2765 * Routine: convert_port_to_task_policy
2766 * Purpose:
2767 * Convert from a port to a task.
2768 * Doesn't consume the port ref; produces a task ref,
2769 * which may be null.
2770 * If the port is being used with task_port_set(), any task port
2771 * type other than TASK_CONTROL requires an entitlement. If the
2772 * port is being used with task_port_get(), TASK_NAME requires an
2773 * entitlement.
2774 * Conditions:
2775 * Nothing locked.
2776 */
2777 static task_t
convert_port_to_task_policy_mig(ipc_port_t port,boolean_t set)2778 convert_port_to_task_policy_mig(ipc_port_t port, boolean_t set)
2779 {
2780 task_t task = TASK_NULL;
2781
2782 if (!IP_VALID(port)) {
2783 return TASK_NULL;
2784 }
2785
2786 task = set ?
2787 convert_port_to_task_mig(port) :
2788 convert_port_to_task_inspect_mig(port);
2789
2790 if (task == TASK_NULL &&
2791 IOCurrentTaskHasEntitlement("com.apple.private.task_policy")) {
2792 task = convert_port_to_task_name_mig(port);
2793 }
2794
2795 return task;
2796 }
2797
2798 task_policy_set_t
convert_port_to_task_policy_set_mig(ipc_port_t port)2799 convert_port_to_task_policy_set_mig(ipc_port_t port)
2800 {
2801 return convert_port_to_task_policy_mig(port, true);
2802 }
2803
2804 task_policy_get_t
convert_port_to_task_policy_get_mig(ipc_port_t port)2805 convert_port_to_task_policy_get_mig(ipc_port_t port)
2806 {
2807 return convert_port_to_task_policy_mig(port, false);
2808 }
2809
2810 /*
2811 * Routine: convert_port_to_task_suspension_token
2812 * Purpose:
2813 * Convert from a port to a task suspension token.
2814 * Doesn't consume the port ref; produces a suspension token ref,
2815 * which may be null.
2816 * Conditions:
2817 * Nothing locked.
2818 */
2819 static task_suspension_token_t
convert_port_to_task_suspension_token_grp(ipc_port_t port,task_grp_t grp)2820 convert_port_to_task_suspension_token_grp(
2821 ipc_port_t port,
2822 task_grp_t grp)
2823 {
2824 task_suspension_token_t task = TASK_NULL;
2825
2826 if (IP_VALID(port)) {
2827 ip_mq_lock(port);
2828 task = ipc_kobject_get_locked(port, IKOT_TASK_RESUME);
2829 if (task != TASK_NULL) {
2830 task_reference_grp(task, grp);
2831 }
2832 ip_mq_unlock(port);
2833 }
2834
2835 return task;
2836 }
2837
2838 task_suspension_token_t
convert_port_to_task_suspension_token_external(ipc_port_t port)2839 convert_port_to_task_suspension_token_external(
2840 ipc_port_t port)
2841 {
2842 return convert_port_to_task_suspension_token_grp(port, TASK_GRP_EXTERNAL);
2843 }
2844
2845 task_suspension_token_t
convert_port_to_task_suspension_token_mig(ipc_port_t port)2846 convert_port_to_task_suspension_token_mig(
2847 ipc_port_t port)
2848 {
2849 return convert_port_to_task_suspension_token_grp(port, TASK_GRP_MIG);
2850 }
2851
2852 task_suspension_token_t
convert_port_to_task_suspension_token_kernel(ipc_port_t port)2853 convert_port_to_task_suspension_token_kernel(
2854 ipc_port_t port)
2855 {
2856 return convert_port_to_task_suspension_token_grp(port, TASK_GRP_KERNEL);
2857 }
2858
2859 /*
2860 * Routine: convert_port_to_space_with_flavor
2861 * Purpose:
2862 * Internal helper for converting from a port to a space.
2863 * Doesn't consume the port ref; produces a space ref,
2864 * which may be null.
2865 * Args:
2866 * port - target port
2867 * flavor - requested ipc space flavor
2868 * options - port translation options
2869 * Conditions:
2870 * Nothing locked.
2871 */
2872 static ipc_space_t
convert_port_to_space_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2873 convert_port_to_space_with_flavor(
2874 ipc_port_t port,
2875 mach_task_flavor_t flavor,
2876 port_intrans_options_t options)
2877 {
2878 ipc_space_t space = IPC_SPACE_NULL;
2879 task_t task = TASK_NULL;
2880
2881 assert(flavor != TASK_FLAVOR_NAME);
2882
2883 if (IP_VALID(port)) {
2884 ip_mq_lock(port);
2885 if (ip_active(port)) {
2886 task = convert_port_to_task_with_flavor_locked_noref(port,
2887 flavor, options);
2888 }
2889
2890 /*
2891 * Because we hold the port lock and we could resolve a task,
2892 * even if we're racing with task termination, we know that
2893 * ipc_task_disable() hasn't been called yet.
2894 *
2895 * We try to sniff if `task->active` flipped to accelerate
2896 * resolving the race, but this isn't load bearing.
2897 *
2898 * The space will be torn down _after_ ipc_task_disable() returns,
2899 * so it is valid to take a reference on it now.
2900 */
2901 if (task && task->active) {
2902 space = task->itk_space;
2903 is_reference(space);
2904 }
2905 ip_mq_unlock(port);
2906 }
2907
2908 return space;
2909 }
2910
2911 ipc_space_t
convert_port_to_space(ipc_port_t port)2912 convert_port_to_space(
2913 ipc_port_t port)
2914 {
2915 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL,
2916 PORT_INTRANS_OPTIONS_NONE);
2917 }
2918
2919 ipc_space_read_t
convert_port_to_space_read(ipc_port_t port)2920 convert_port_to_space_read(
2921 ipc_port_t port)
2922 {
2923 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2924 PORT_INTRANS_ALLOW_CORPSE_TASK);
2925 }
2926
2927 ipc_space_read_t
convert_port_to_space_read_no_eval(ipc_port_t port)2928 convert_port_to_space_read_no_eval(
2929 ipc_port_t port)
2930 {
2931 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2932 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2933 }
2934
2935 ipc_space_inspect_t
convert_port_to_space_inspect(ipc_port_t port)2936 convert_port_to_space_inspect(
2937 ipc_port_t port)
2938 {
2939 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT,
2940 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2941 }
2942
2943 /*
2944 * Routine: convert_port_to_map_with_flavor
2945 * Purpose:
2946 * Internal helper for converting from a port to a map.
2947 * Doesn't consume the port ref; produces a map ref,
2948 * which may be null.
2949 * Args:
2950 * port - target port
2951 * flavor - requested vm map flavor
2952 * options - port translation options
2953 * Conditions:
2954 * Nothing locked.
2955 */
2956 static vm_map_t
convert_port_to_map_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2957 convert_port_to_map_with_flavor(
2958 ipc_port_t port,
2959 mach_task_flavor_t flavor,
2960 port_intrans_options_t options)
2961 {
2962 task_t task = TASK_NULL;
2963 vm_map_t map = VM_MAP_NULL;
2964
2965 /* there is no vm_map_inspect_t routines at the moment. */
2966 assert(flavor != TASK_FLAVOR_NAME && flavor != TASK_FLAVOR_INSPECT);
2967 assert((options & PORT_INTRANS_SKIP_TASK_EVAL) == 0);
2968
2969 if (IP_VALID(port)) {
2970 ip_mq_lock(port);
2971
2972 if (ip_active(port)) {
2973 task = convert_port_to_task_with_flavor_locked_noref(port,
2974 flavor, options);
2975 }
2976
2977 /*
2978 * Because we hold the port lock and we could resolve a task,
2979 * even if we're racing with task termination, we know that
2980 * ipc_task_disable() hasn't been called yet.
2981 *
2982 * We try to sniff if `task->active` flipped to accelerate
2983 * resolving the race, but this isn't load bearing.
2984 *
2985 * The vm map will be torn down _after_ ipc_task_disable() returns,
2986 * so it is valid to take a reference on it now.
2987 */
2988 if (task && task->active) {
2989 map = task->map;
2990
2991 if (map->pmap == kernel_pmap) {
2992 panic("userspace has control access to a "
2993 "kernel map %p through task %p", map, task);
2994 }
2995
2996 pmap_require(map->pmap);
2997 vm_map_reference(map);
2998 }
2999
3000 ip_mq_unlock(port);
3001 }
3002
3003 return map;
3004 }
3005
3006 vm_map_t
convert_port_to_map(ipc_port_t port)3007 convert_port_to_map(
3008 ipc_port_t port)
3009 {
3010 return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL,
3011 PORT_INTRANS_OPTIONS_NONE);
3012 }
3013
3014 vm_map_read_t
convert_port_to_map_read(ipc_port_t port)3015 convert_port_to_map_read(
3016 ipc_port_t port)
3017 {
3018 return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ,
3019 PORT_INTRANS_ALLOW_CORPSE_TASK);
3020 }
3021
3022 vm_map_inspect_t
convert_port_to_map_inspect(__unused ipc_port_t port)3023 convert_port_to_map_inspect(
3024 __unused ipc_port_t port)
3025 {
3026 /* there is no vm_map_inspect_t routines at the moment. */
3027 return VM_MAP_INSPECT_NULL;
3028 }
3029
3030 /*
3031 * Routine: thread_port_kotype_valid_for_flavor
3032 * Purpose:
3033 * Check whether the kobject type of a mach port
3034 * is valid for conversion to a thread of given flavor.
3035 */
3036 static boolean_t
thread_port_kotype_valid_for_flavor(natural_t kotype,mach_thread_flavor_t flavor)3037 thread_port_kotype_valid_for_flavor(
3038 natural_t kotype,
3039 mach_thread_flavor_t flavor)
3040 {
3041 switch (flavor) {
3042 /* Ascending capability */
3043 case THREAD_FLAVOR_INSPECT:
3044 if (kotype == IKOT_THREAD_INSPECT) {
3045 return TRUE;
3046 }
3047 OS_FALLTHROUGH;
3048 case THREAD_FLAVOR_READ:
3049 if (kotype == IKOT_THREAD_READ) {
3050 return TRUE;
3051 }
3052 OS_FALLTHROUGH;
3053 case THREAD_FLAVOR_CONTROL:
3054 if (kotype == IKOT_THREAD_CONTROL) {
3055 return TRUE;
3056 }
3057 break;
3058 default:
3059 panic("strange thread flavor");
3060 }
3061
3062 return FALSE;
3063 }
3064
3065 /*
3066 * Routine: convert_port_to_thread_with_flavor_locked
3067 * Purpose:
3068 * Internal helper routine to convert from a locked port to a thread.
3069 * Args:
3070 * port - target port
3071 * flavor - requested thread port flavor
3072 * options - port translation options
3073 * Conditions:
3074 * Port is locked and active.
3075 * Produces a thread ref or THREAD_NULL.
3076 */
3077 static thread_t
convert_port_to_thread_with_flavor_locked(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3078 convert_port_to_thread_with_flavor_locked(
3079 ipc_port_t port,
3080 mach_thread_flavor_t flavor,
3081 port_intrans_options_t options)
3082 {
3083 thread_t thread = THREAD_NULL;
3084 task_t task;
3085 ipc_kobject_type_t type = ip_kotype(port);
3086
3087 ip_mq_lock_held(port);
3088 require_ip_active(port);
3089
3090 if (!thread_port_kotype_valid_for_flavor(type, flavor)) {
3091 return THREAD_NULL;
3092 }
3093
3094 thread = ipc_kobject_get_locked(port, type);
3095
3096 if (thread == THREAD_NULL) {
3097 return THREAD_NULL;
3098 }
3099
3100 if (options & PORT_INTRANS_THREAD_NOT_CURRENT_THREAD) {
3101 if (thread == current_thread()) {
3102 return THREAD_NULL;
3103 }
3104 }
3105
3106 task = get_threadtask(thread);
3107
3108 if (options & PORT_INTRANS_THREAD_IN_CURRENT_TASK) {
3109 if (task != current_task()) {
3110 return THREAD_NULL;
3111 }
3112 } else {
3113 if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
3114 assert(flavor == THREAD_FLAVOR_CONTROL);
3115 return THREAD_NULL;
3116 }
3117 /* TODO: rdar://42389187 */
3118 if (flavor == THREAD_FLAVOR_INSPECT) {
3119 assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
3120 }
3121
3122 if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
3123 task_conversion_eval(current_task(), task, flavor) != KERN_SUCCESS) {
3124 return THREAD_NULL;
3125 }
3126 }
3127
3128 thread_reference(thread);
3129 return thread;
3130 }
3131
3132 /*
3133 * Routine: convert_port_to_thread_with_flavor
3134 * Purpose:
3135 * Internal helper for converting from a port to a thread.
3136 * Doesn't consume the port ref; produces a thread ref,
3137 * which may be null.
3138 * Args:
3139 * port - target port
3140 * flavor - requested thread port flavor
3141 * options - port translation options
3142 * Conditions:
3143 * Nothing locked.
3144 */
3145 static thread_t
convert_port_to_thread_with_flavor(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3146 convert_port_to_thread_with_flavor(
3147 ipc_port_t port,
3148 mach_thread_flavor_t flavor,
3149 port_intrans_options_t options)
3150 {
3151 thread_t thread = THREAD_NULL;
3152
3153 if (IP_VALID(port)) {
3154 ip_mq_lock(port);
3155 if (ip_active(port)) {
3156 thread = convert_port_to_thread_with_flavor_locked(port,
3157 flavor, options);
3158 }
3159 ip_mq_unlock(port);
3160 }
3161
3162 return thread;
3163 }
3164
3165 thread_t
convert_port_to_thread(ipc_port_t port)3166 convert_port_to_thread(
3167 ipc_port_t port)
3168 {
3169 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_CONTROL,
3170 PORT_INTRANS_OPTIONS_NONE);
3171 }
3172
3173 thread_read_t
convert_port_to_thread_read(ipc_port_t port)3174 convert_port_to_thread_read(
3175 ipc_port_t port)
3176 {
3177 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3178 PORT_INTRANS_ALLOW_CORPSE_TASK);
3179 }
3180
3181 static thread_read_t
convert_port_to_thread_read_no_eval(ipc_port_t port)3182 convert_port_to_thread_read_no_eval(
3183 ipc_port_t port)
3184 {
3185 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3186 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3187 }
3188
3189 thread_inspect_t
convert_port_to_thread_inspect(ipc_port_t port)3190 convert_port_to_thread_inspect(
3191 ipc_port_t port)
3192 {
3193 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3194 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3195 }
3196
3197 static thread_inspect_t
convert_port_to_thread_inspect_no_eval(ipc_port_t port)3198 convert_port_to_thread_inspect_no_eval(
3199 ipc_port_t port)
3200 {
3201 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3202 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3203 }
3204
3205 static inline ipc_kobject_type_t
thread_flavor_to_kotype(mach_thread_flavor_t flavor)3206 thread_flavor_to_kotype(mach_thread_flavor_t flavor)
3207 {
3208 switch (flavor) {
3209 case THREAD_FLAVOR_CONTROL:
3210 return IKOT_THREAD_CONTROL;
3211 case THREAD_FLAVOR_READ:
3212 return IKOT_THREAD_READ;
3213 default:
3214 return IKOT_THREAD_INSPECT;
3215 }
3216 }
3217
3218 /*
3219 * Routine: convert_thread_to_port_with_flavor
3220 * Purpose:
3221 * Convert from a thread to a port of given flavor.
3222 * Consumes a thread ref; produces a naked send right
3223 * which may be invalid.
3224 * Conditions:
3225 * Nothing locked.
3226 */
3227 static ipc_port_t
convert_thread_to_port_with_flavor(thread_t thread,thread_ro_t tro,mach_thread_flavor_t flavor)3228 convert_thread_to_port_with_flavor(
3229 thread_t thread,
3230 thread_ro_t tro,
3231 mach_thread_flavor_t flavor)
3232 {
3233 ipc_kobject_type_t kotype = thread_flavor_to_kotype(flavor);
3234 ipc_port_t port = IP_NULL;
3235
3236 thread_mtx_lock(thread);
3237
3238 /*
3239 * out-trans of weaker flavors are still permitted, but in-trans
3240 * is separately enforced.
3241 */
3242 if (flavor == THREAD_FLAVOR_CONTROL &&
3243 task_conversion_eval_out_trans(current_task(), tro->tro_task, flavor)) {
3244 /* denied by security policy, make the port appear dead */
3245 port = IP_DEAD;
3246 goto exit;
3247 }
3248
3249 if (!thread->ipc_active) {
3250 goto exit;
3251 }
3252
3253 port = tro->tro_ports[flavor];
3254 if (flavor == THREAD_FLAVOR_CONTROL) {
3255 port = ipc_kobject_make_send(port, thread, IKOT_THREAD_CONTROL);
3256 } else if (IP_VALID(port)) {
3257 (void)ipc_kobject_make_send_nsrequest(port, thread, kotype);
3258 } else {
3259 /*
3260 * Claim a send right on the thread read/inspect port, and request a no-senders
3261 * notification on that port (if none outstanding). A thread reference is not
3262 * donated here even though the ports are created lazily because it doesn't own the
3263 * kobject that it points to. Threads manage their lifetime explicitly and
3264 * have to synchronize with each other, between the task/thread terminating and the
3265 * send-once notification firing, and this is done under the thread mutex
3266 * rather than with atomics.
3267 */
3268 port = ipc_kobject_alloc_port(thread, kotype,
3269 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST |
3270 IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3271 /*
3272 * If Developer Mode is off, substitute read port for control
3273 * port if copying out to owning task's space, for the sake of
3274 * in-process exception handler.
3275 *
3276 * Also see: exception_deliver().
3277 */
3278 if (!developer_mode_state() && flavor == THREAD_FLAVOR_READ) {
3279 ipc_port_set_label(port, IPC_LABEL_SUBST_THREAD_READ);
3280 port->ip_kolabel->ikol_alt_port = tro->tro_self_port;
3281 }
3282 zalloc_ro_update_field(ZONE_ID_THREAD_RO,
3283 tro, tro_ports[flavor], &port);
3284 }
3285
3286 exit:
3287 thread_mtx_unlock(thread);
3288 thread_deallocate(thread);
3289 return port;
3290 }
3291
3292 ipc_port_t
convert_thread_to_port(thread_t thread)3293 convert_thread_to_port(
3294 thread_t thread)
3295 {
3296 thread_ro_t tro = get_thread_ro(thread);
3297 return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_CONTROL);
3298 }
3299
3300 ipc_port_t
convert_thread_read_to_port(thread_read_t thread)3301 convert_thread_read_to_port(thread_read_t thread)
3302 {
3303 thread_ro_t tro = get_thread_ro(thread);
3304 return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_READ);
3305 }
3306
3307 ipc_port_t
convert_thread_inspect_to_port(thread_inspect_t thread)3308 convert_thread_inspect_to_port(thread_inspect_t thread)
3309 {
3310 thread_ro_t tro = get_thread_ro(thread);
3311 return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_INSPECT);
3312 }
3313
3314
3315 /*
3316 * Routine: port_name_to_thread
3317 * Purpose:
3318 * Convert from a port name to a thread reference
3319 * A name of MACH_PORT_NULL is valid for the null thread.
3320 * Conditions:
3321 * Nothing locked.
3322 */
3323 thread_t
port_name_to_thread(mach_port_name_t name,port_intrans_options_t options)3324 port_name_to_thread(
3325 mach_port_name_t name,
3326 port_intrans_options_t options)
3327 {
3328 thread_t thread = THREAD_NULL;
3329 ipc_port_t kport;
3330 kern_return_t kr;
3331
3332 if (MACH_PORT_VALID(name)) {
3333 kr = ipc_port_translate_send(current_space(), name, &kport);
3334 if (kr == KERN_SUCCESS) {
3335 /* port is locked and active */
3336 assert(!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) &&
3337 !(options & PORT_INTRANS_SKIP_TASK_EVAL));
3338 thread = convert_port_to_thread_with_flavor_locked(kport,
3339 THREAD_FLAVOR_CONTROL, options);
3340 ip_mq_unlock(kport);
3341 }
3342 }
3343
3344 return thread;
3345 }
3346
3347 /*
3348 * Routine: port_name_is_pinned_itk_self
3349 * Purpose:
3350 * Returns whether this port name is for the pinned
3351 * mach_task_self (if it exists).
3352 *
3353 * task_self_trap() when the task port is pinned,
3354 * will memorize the name the port has in the space
3355 * in ip_receiver_name, which we can use to fast-track
3356 * this answer without taking any lock.
3357 *
3358 * ipc_task_disable() will set `ip_receiver_name` back to
3359 * MACH_PORT_SPECIAL_DEFAULT.
3360 *
3361 * Conditions:
3362 * self must be current_task()
3363 * Nothing locked.
3364 */
3365 static bool
port_name_is_pinned_itk_self(task_t self,mach_port_name_t name)3366 port_name_is_pinned_itk_self(
3367 task_t self,
3368 mach_port_name_t name)
3369 {
3370 ipc_port_t kport = self->itk_self;
3371 return MACH_PORT_VALID(name) && name != MACH_PORT_SPECIAL_DEFAULT &&
3372 kport->ip_pinned && ip_get_receiver_name(kport) == name;
3373 }
3374
3375 /*
3376 * Routine: port_name_to_current_task*_noref
3377 * Purpose:
3378 * Convert from a port name to current_task()
3379 * A name of MACH_PORT_NULL is valid for the null task.
3380 *
3381 * If current_task() is in the process of being terminated,
3382 * this might return a non NULL task even when port_name_to_task()
3383 * would.
3384 *
3385 * However, this is an acceptable race that can't be controlled by
3386 * userspace, and that downstream code using the returned task
3387 * has to handle anyway.
3388 *
3389 * ipc_space_disable() does try to narrow this race,
3390 * by causing port_name_is_pinned_itk_self() to fail.
3391 *
3392 * Returns:
3393 * current_task() if the port name was for current_task()
3394 * at the appropriate flavor.
3395 *
3396 * TASK_NULL otherwise.
3397 *
3398 * Conditions:
3399 * Nothing locked.
3400 */
3401 static task_t
port_name_to_current_task_internal_noref(mach_port_name_t name,mach_task_flavor_t flavor)3402 port_name_to_current_task_internal_noref(
3403 mach_port_name_t name,
3404 mach_task_flavor_t flavor)
3405 {
3406 ipc_port_t kport;
3407 kern_return_t kr;
3408 task_t task = TASK_NULL;
3409 task_t self = current_task();
3410
3411 if (port_name_is_pinned_itk_self(self, name)) {
3412 return self;
3413 }
3414
3415 if (MACH_PORT_VALID(name)) {
3416 kr = ipc_port_translate_send(self->itk_space, name, &kport);
3417 if (kr == KERN_SUCCESS) {
3418 ipc_kobject_type_t type = ip_kotype(kport);
3419 if (task_port_kotype_valid_for_flavor(type, flavor)) {
3420 task = ipc_kobject_get_locked(kport, type);
3421 }
3422 ip_mq_unlock(kport);
3423 if (task != self) {
3424 task = TASK_NULL;
3425 }
3426 }
3427 }
3428
3429 return task;
3430 }
3431
3432 task_t
port_name_to_current_task_noref(mach_port_name_t name)3433 port_name_to_current_task_noref(
3434 mach_port_name_t name)
3435 {
3436 return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_CONTROL);
3437 }
3438
3439 task_read_t
port_name_to_current_task_read_noref(mach_port_name_t name)3440 port_name_to_current_task_read_noref(
3441 mach_port_name_t name)
3442 {
3443 return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_READ);
3444 }
3445
3446 /*
3447 * Routine: port_name_to_task
3448 * Purpose:
3449 * Convert from a port name to a task reference
3450 * A name of MACH_PORT_NULL is valid for the null task.
3451 * Conditions:
3452 * Nothing locked.
3453 */
3454 static task_t
port_name_to_task_grp(mach_port_name_t name,task_grp_t grp)3455 port_name_to_task_grp(
3456 mach_port_name_t name,
3457 task_grp_t grp)
3458 {
3459 ipc_port_t kport;
3460 kern_return_t kr;
3461 task_t task = TASK_NULL;
3462 task_t self = current_task();
3463
3464 if (port_name_is_pinned_itk_self(self, name)) {
3465 task_reference_grp(self, grp);
3466 return self;
3467 }
3468
3469 if (MACH_PORT_VALID(name)) {
3470 kr = ipc_port_translate_send(self->itk_space, name, &kport);
3471 if (kr == KERN_SUCCESS) {
3472 /* port is locked and active */
3473 task = convert_port_to_task_with_flavor_locked(kport,
3474 TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE, grp);
3475 ip_mq_unlock(kport);
3476 }
3477 }
3478 return task;
3479 }
3480
3481 task_t
port_name_to_task_external(mach_port_name_t name)3482 port_name_to_task_external(
3483 mach_port_name_t name)
3484 {
3485 return port_name_to_task_grp(name, TASK_GRP_EXTERNAL);
3486 }
3487
3488 task_t
port_name_to_task_kernel(mach_port_name_t name)3489 port_name_to_task_kernel(
3490 mach_port_name_t name)
3491 {
3492 return port_name_to_task_grp(name, TASK_GRP_KERNEL);
3493 }
3494
3495 /*
3496 * Routine: port_name_to_task_read
3497 * Purpose:
3498 * Convert from a port name to a task reference
3499 * A name of MACH_PORT_NULL is valid for the null task.
3500 * Conditions:
3501 * Nothing locked.
3502 */
3503 task_read_t
port_name_to_task_read(mach_port_name_t name)3504 port_name_to_task_read(
3505 mach_port_name_t name)
3506 {
3507 ipc_port_t kport;
3508 kern_return_t kr;
3509 task_read_t tr = TASK_READ_NULL;
3510 task_t self = current_task();
3511
3512 if (port_name_is_pinned_itk_self(self, name)) {
3513 task_reference_grp(self, TASK_GRP_KERNEL);
3514 return self;
3515 }
3516
3517 if (MACH_PORT_VALID(name)) {
3518 kr = ipc_port_translate_send(self->itk_space, name, &kport);
3519 if (kr == KERN_SUCCESS) {
3520 /* port is locked and active */
3521 tr = convert_port_to_task_with_flavor_locked(kport,
3522 TASK_FLAVOR_READ, PORT_INTRANS_ALLOW_CORPSE_TASK,
3523 TASK_GRP_KERNEL);
3524 ip_mq_unlock(kport);
3525 }
3526 }
3527 return tr;
3528 }
3529
3530 /*
3531 * Routine: port_name_to_task_read_no_eval
3532 * Purpose:
3533 * Convert from a port name to a task reference
3534 * A name of MACH_PORT_NULL is valid for the null task.
3535 * Skips task_conversion_eval() during conversion.
3536 * Conditions:
3537 * Nothing locked.
3538 */
3539 task_read_t
port_name_to_task_read_no_eval(mach_port_name_t name)3540 port_name_to_task_read_no_eval(
3541 mach_port_name_t name)
3542 {
3543 ipc_port_t kport;
3544 kern_return_t kr;
3545 task_read_t tr = TASK_READ_NULL;
3546 task_t self = current_task();
3547
3548 if (port_name_is_pinned_itk_self(self, name)) {
3549 task_reference_grp(self, TASK_GRP_KERNEL);
3550 return self;
3551 }
3552
3553 if (MACH_PORT_VALID(name)) {
3554 port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3555 PORT_INTRANS_ALLOW_CORPSE_TASK;
3556
3557 kr = ipc_port_translate_send(self->itk_space, name, &kport);
3558 if (kr == KERN_SUCCESS) {
3559 /* port is locked and active */
3560 tr = convert_port_to_task_with_flavor_locked(kport,
3561 TASK_FLAVOR_READ, options, TASK_GRP_KERNEL);
3562 ip_mq_unlock(kport);
3563 }
3564 }
3565 return tr;
3566 }
3567
3568 /*
3569 * Routine: port_name_to_task_name
3570 * Purpose:
3571 * Convert from a port name to a task reference
3572 * A name of MACH_PORT_NULL is valid for the null task.
3573 * Conditions:
3574 * Nothing locked.
3575 */
3576 task_name_t
port_name_to_task_name(mach_port_name_t name)3577 port_name_to_task_name(
3578 mach_port_name_t name)
3579 {
3580 ipc_port_t kport;
3581 kern_return_t kr;
3582 task_name_t tn = TASK_NAME_NULL;
3583 task_t self = current_task();
3584
3585 if (port_name_is_pinned_itk_self(self, name)) {
3586 task_reference_grp(self, TASK_GRP_KERNEL);
3587 return self;
3588 }
3589
3590 if (MACH_PORT_VALID(name)) {
3591 port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3592 PORT_INTRANS_ALLOW_CORPSE_TASK;
3593
3594 kr = ipc_port_translate_send(current_space(), name, &kport);
3595 if (kr == KERN_SUCCESS) {
3596 /* port is locked and active */
3597 tn = convert_port_to_task_with_flavor_locked(kport,
3598 TASK_FLAVOR_NAME, options, TASK_GRP_KERNEL);
3599 ip_mq_unlock(kport);
3600 }
3601 }
3602 return tn;
3603 }
3604
3605 /*
3606 * Routine: port_name_to_task_id_token
3607 * Purpose:
3608 * Convert from a port name to a task identity token reference
3609 * Conditions:
3610 * Nothing locked.
3611 */
3612 task_id_token_t
port_name_to_task_id_token(mach_port_name_t name)3613 port_name_to_task_id_token(
3614 mach_port_name_t name)
3615 {
3616 ipc_port_t port;
3617 kern_return_t kr;
3618 task_id_token_t token = TASK_ID_TOKEN_NULL;
3619
3620 if (MACH_PORT_VALID(name)) {
3621 kr = ipc_port_translate_send(current_space(), name, &port);
3622 if (kr == KERN_SUCCESS) {
3623 token = convert_port_to_task_id_token(port);
3624 ip_mq_unlock(port);
3625 }
3626 }
3627 return token;
3628 }
3629
3630 /*
3631 * Routine: port_name_to_host
3632 * Purpose:
3633 * Convert from a port name to a host pointer.
3634 * NOTE: This does _not_ return a +1 reference to the host_t
3635 * Conditions:
3636 * Nothing locked.
3637 */
3638 host_t
port_name_to_host(mach_port_name_t name)3639 port_name_to_host(
3640 mach_port_name_t name)
3641 {
3642 host_t host = HOST_NULL;
3643 kern_return_t kr;
3644 ipc_port_t port;
3645
3646 if (MACH_PORT_VALID(name)) {
3647 kr = ipc_port_translate_send(current_space(), name, &port);
3648 if (kr == KERN_SUCCESS) {
3649 host = convert_port_to_host(port);
3650 ip_mq_unlock(port);
3651 }
3652 }
3653 return host;
3654 }
3655
3656 static inline ipc_kobject_type_t
task_flavor_to_kotype(mach_task_flavor_t flavor)3657 task_flavor_to_kotype(mach_task_flavor_t flavor)
3658 {
3659 switch (flavor) {
3660 case TASK_FLAVOR_CONTROL:
3661 return IKOT_TASK_CONTROL;
3662 case TASK_FLAVOR_READ:
3663 return IKOT_TASK_READ;
3664 case TASK_FLAVOR_INSPECT:
3665 return IKOT_TASK_INSPECT;
3666 default:
3667 return IKOT_TASK_NAME;
3668 }
3669 }
3670
3671 /*
3672 * Routine: convert_task_to_port_with_flavor
3673 * Purpose:
3674 * Convert from a task to a port of given flavor.
3675 * Consumes a task ref; produces a naked send right
3676 * which may be invalid.
3677 * Conditions:
3678 * Nothing locked.
3679 */
3680 ipc_port_t
convert_task_to_port_with_flavor(task_t task,mach_task_flavor_t flavor,task_grp_t grp)3681 convert_task_to_port_with_flavor(
3682 task_t task,
3683 mach_task_flavor_t flavor,
3684 task_grp_t grp)
3685 {
3686 ipc_kobject_type_t kotype = task_flavor_to_kotype(flavor);
3687 ipc_port_t port = IP_NULL;
3688
3689 itk_lock(task);
3690
3691 if (!task->ipc_active) {
3692 goto exit;
3693 }
3694
3695 /*
3696 * out-trans of weaker flavors are still permitted, but in-trans
3697 * is separately enforced.
3698 */
3699 if (flavor == TASK_FLAVOR_CONTROL &&
3700 task_conversion_eval_out_trans(current_task(), task, flavor)) {
3701 /* denied by security policy, make the port appear dead */
3702 port = IP_DEAD;
3703 goto exit;
3704 }
3705
3706 switch (flavor) {
3707 case TASK_FLAVOR_CONTROL:
3708 case TASK_FLAVOR_NAME:
3709 port = ipc_kobject_make_send(task->itk_task_ports[flavor],
3710 task, kotype);
3711 break;
3712 /*
3713 * Claim a send right on the task read/inspect port,
3714 * and request a no-senders notification on that port
3715 * (if none outstanding).
3716 *
3717 * The task's itk_lock is used to synchronize the handling
3718 * of the no-senders notification with the task termination.
3719 */
3720 case TASK_FLAVOR_READ:
3721 case TASK_FLAVOR_INSPECT:
3722 port = task->itk_task_ports[flavor];
3723 if (IP_VALID(port)) {
3724 (void)ipc_kobject_make_send_nsrequest(port,
3725 task, kotype);
3726 } else {
3727 port = ipc_kobject_alloc_port(task, kotype,
3728 IPC_KOBJECT_ALLOC_MAKE_SEND |
3729 IPC_KOBJECT_ALLOC_NSREQUEST |
3730 IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3731 /*
3732 * If Developer Mode is off, substitute read port for control port if
3733 * copying out to owning task's space, for the sake of in-process
3734 * exception handler.
3735 *
3736 * Also see: exception_deliver().
3737 */
3738 if (!developer_mode_state() && flavor == TASK_FLAVOR_READ) {
3739 ipc_port_set_label(port, IPC_LABEL_SUBST_TASK_READ);
3740 port->ip_kolabel->ikol_alt_port = task->itk_self;
3741 }
3742
3743 task->itk_task_ports[flavor] = port;
3744 }
3745 break;
3746 }
3747
3748 exit:
3749 itk_unlock(task);
3750 task_deallocate_grp(task, grp);
3751 return port;
3752 }
3753
3754 ipc_port_t
convert_corpse_to_port_and_nsrequest(task_t corpse)3755 convert_corpse_to_port_and_nsrequest(
3756 task_t corpse)
3757 {
3758 ipc_port_t port = IP_NULL;
3759 __assert_only kern_return_t kr;
3760
3761 assert(task_is_a_corpse(corpse));
3762 itk_lock(corpse);
3763 port = corpse->itk_task_ports[TASK_FLAVOR_CONTROL];
3764 assert(port->ip_srights == 0);
3765 kr = ipc_kobject_make_send_nsrequest(port, corpse, IKOT_TASK_CONTROL);
3766 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
3767 itk_unlock(corpse);
3768
3769 task_deallocate(corpse);
3770 return port;
3771 }
3772
3773 ipc_port_t
convert_task_to_port(task_t task)3774 convert_task_to_port(
3775 task_t task)
3776 {
3777 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_KERNEL);
3778 }
3779
3780 ipc_port_t
convert_task_read_to_port(task_read_t task)3781 convert_task_read_to_port(
3782 task_read_t task)
3783 {
3784 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_KERNEL);
3785 }
3786
3787 ipc_port_t
convert_task_inspect_to_port(task_inspect_t task)3788 convert_task_inspect_to_port(
3789 task_inspect_t task)
3790 {
3791 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT, TASK_GRP_KERNEL);
3792 }
3793
3794 ipc_port_t
convert_task_name_to_port(task_name_t task)3795 convert_task_name_to_port(
3796 task_name_t task)
3797 {
3798 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME, TASK_GRP_KERNEL);
3799 }
3800
3801 ipc_port_t
convert_task_to_port_external(task_t task)3802 convert_task_to_port_external(task_t task)
3803 {
3804 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_EXTERNAL);
3805 }
3806
3807 ipc_port_t
convert_task_read_to_port_external(task_t task)3808 convert_task_read_to_port_external(task_t task)
3809 {
3810 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_EXTERNAL);
3811 }
3812
3813 ipc_port_t
convert_task_to_port_pinned(task_t task)3814 convert_task_to_port_pinned(
3815 task_t task)
3816 {
3817 ipc_port_t port = IP_NULL;
3818
3819 assert(task == current_task());
3820
3821 itk_lock(task);
3822
3823 if (task->ipc_active) {
3824 port = ipc_kobject_make_send(task->itk_self, task,
3825 IKOT_TASK_CONTROL);
3826 }
3827
3828 if (port && task_is_immovable(task)) {
3829 assert(ip_is_pinned(port));
3830 assert(ip_is_immovable_send(port));
3831 }
3832
3833 itk_unlock(task);
3834 task_deallocate(task);
3835 return port;
3836 }
3837 /*
3838 * Routine: convert_task_suspend_token_to_port
3839 * Purpose:
3840 * Convert from a task suspension token to a port.
3841 * Consumes a task suspension token ref; produces a naked send-once right
3842 * which may be invalid.
3843 * Conditions:
3844 * Nothing locked.
3845 */
3846 static ipc_port_t
convert_task_suspension_token_to_port_grp(task_suspension_token_t task,task_grp_t grp)3847 convert_task_suspension_token_to_port_grp(
3848 task_suspension_token_t task,
3849 task_grp_t grp)
3850 {
3851 ipc_port_t port;
3852
3853 task_lock(task);
3854 if (task->active) {
3855 itk_lock(task);
3856 if (task->itk_resume == IP_NULL) {
3857 task->itk_resume = ipc_kobject_alloc_port((ipc_kobject_t) task,
3858 IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE);
3859 }
3860
3861 /*
3862 * Create a send-once right for each instance of a direct user-called
3863 * task_suspend2 call. Each time one of these send-once rights is abandoned,
3864 * the notification handler will resume the target task.
3865 */
3866 port = task->itk_resume;
3867 ipc_kobject_require(port, task, IKOT_TASK_RESUME);
3868 port = ipc_port_make_sonce(port);
3869 itk_unlock(task);
3870 assert(IP_VALID(port));
3871 } else {
3872 port = IP_NULL;
3873 }
3874
3875 task_unlock(task);
3876 task_suspension_token_deallocate_grp(task, grp);
3877
3878 return port;
3879 }
3880
3881 ipc_port_t
convert_task_suspension_token_to_port_external(task_suspension_token_t task)3882 convert_task_suspension_token_to_port_external(
3883 task_suspension_token_t task)
3884 {
3885 return convert_task_suspension_token_to_port_grp(task, TASK_GRP_EXTERNAL);
3886 }
3887
3888 ipc_port_t
convert_task_suspension_token_to_port_mig(task_suspension_token_t task)3889 convert_task_suspension_token_to_port_mig(
3890 task_suspension_token_t task)
3891 {
3892 return convert_task_suspension_token_to_port_grp(task, TASK_GRP_MIG);
3893 }
3894
3895 ipc_port_t
convert_thread_to_port_pinned(thread_t thread)3896 convert_thread_to_port_pinned(
3897 thread_t thread)
3898 {
3899 thread_ro_t tro = get_thread_ro(thread);
3900 ipc_port_t port = IP_NULL;
3901
3902 thread_mtx_lock(thread);
3903
3904 if (thread->ipc_active) {
3905 port = ipc_kobject_make_send(tro->tro_self_port,
3906 thread, IKOT_THREAD_CONTROL);
3907 }
3908
3909 if (port && task_is_immovable(tro->tro_task)) {
3910 assert(ip_is_immovable_send(port));
3911 }
3912
3913 thread_mtx_unlock(thread);
3914 thread_deallocate(thread);
3915 return port;
3916 }
3917 /*
3918 * Routine: space_deallocate
3919 * Purpose:
3920 * Deallocate a space ref produced by convert_port_to_space.
3921 * Conditions:
3922 * Nothing locked.
3923 */
3924
3925 void
space_deallocate(ipc_space_t space)3926 space_deallocate(
3927 ipc_space_t space)
3928 {
3929 if (space != IS_NULL) {
3930 is_release(space);
3931 }
3932 }
3933
3934 /*
3935 * Routine: space_read_deallocate
3936 * Purpose:
3937 * Deallocate a space read ref produced by convert_port_to_space_read.
3938 * Conditions:
3939 * Nothing locked.
3940 */
3941
3942 void
space_read_deallocate(ipc_space_read_t space)3943 space_read_deallocate(
3944 ipc_space_read_t space)
3945 {
3946 if (space != IS_INSPECT_NULL) {
3947 is_release((ipc_space_t)space);
3948 }
3949 }
3950
3951 /*
3952 * Routine: space_inspect_deallocate
3953 * Purpose:
3954 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
3955 * Conditions:
3956 * Nothing locked.
3957 */
3958
3959 void
space_inspect_deallocate(ipc_space_inspect_t space)3960 space_inspect_deallocate(
3961 ipc_space_inspect_t space)
3962 {
3963 if (space != IS_INSPECT_NULL) {
3964 is_release((ipc_space_t)space);
3965 }
3966 }
3967
3968
3969 /*
3970 * Routine: thread/task_set_exception_ports [kernel call]
3971 * Purpose:
3972 * Sets the thread/task exception port, flavor and
3973 * behavior for the exception types specified by the mask.
3974 * There will be one send right per exception per valid
3975 * port.
3976 * Conditions:
3977 * Nothing locked. If successful, consumes
3978 * the supplied send right.
3979 * Returns:
3980 * KERN_SUCCESS Changed the special port.
3981 * KERN_INVALID_ARGUMENT The thread is null,
3982 * Illegal mask bit set.
3983 * Illegal exception behavior
3984 * KERN_FAILURE The thread is dead.
3985 */
3986
3987 kern_return_t
thread_set_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)3988 thread_set_exception_ports(
3989 thread_t thread,
3990 exception_mask_t exception_mask,
3991 ipc_port_t new_port,
3992 exception_behavior_t new_behavior,
3993 thread_state_flavor_t new_flavor)
3994 {
3995 ipc_port_t old_port[EXC_TYPES_COUNT];
3996 thread_ro_t tro;
3997 boolean_t privileged = task_is_privileged(current_task());
3998
3999 #if CONFIG_MACF
4000 struct label *new_label;
4001 #endif
4002
4003 if (thread == THREAD_NULL) {
4004 return KERN_INVALID_ARGUMENT;
4005 }
4006
4007 if (exception_mask & ~EXC_MASK_VALID) {
4008 return KERN_INVALID_ARGUMENT;
4009 }
4010
4011 if (IP_VALID(new_port)) {
4012 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4013 case EXCEPTION_DEFAULT:
4014 case EXCEPTION_STATE:
4015 case EXCEPTION_STATE_IDENTITY:
4016 case EXCEPTION_IDENTITY_PROTECTED:
4017 break;
4018
4019 default:
4020 return KERN_INVALID_ARGUMENT;
4021 }
4022 }
4023
4024 if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4025 return KERN_INVALID_RIGHT;
4026 }
4027
4028
4029 /*
4030 * Check the validity of the thread_state_flavor by calling the
4031 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4032 * osfmk/mach/ARCHITECTURE/thread_status.h
4033 */
4034 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4035 return KERN_INVALID_ARGUMENT;
4036 }
4037
4038 if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4039 (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4040 && !(new_behavior & MACH_EXCEPTION_CODES)) {
4041 return KERN_INVALID_ARGUMENT;
4042 }
4043
4044 #if CONFIG_MACF
4045 new_label = mac_exc_create_label_for_current_proc();
4046 #endif
4047
4048 tro = get_thread_ro(thread);
4049 thread_mtx_lock(thread);
4050
4051 if (!thread->active) {
4052 thread_mtx_unlock(thread);
4053 #if CONFIG_MACF
4054 mac_exc_free_label(new_label);
4055 #endif
4056 return KERN_FAILURE;
4057 }
4058
4059 if (tro->tro_exc_actions == NULL) {
4060 ipc_thread_init_exc_actions(tro);
4061 }
4062 for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4063 struct exception_action *action = &tro->tro_exc_actions[i];
4064
4065 if ((exception_mask & (1 << i))
4066 #if CONFIG_MACF
4067 && mac_exc_update_action_label(action, new_label) == 0
4068 #endif
4069 ) {
4070 old_port[i] = action->port;
4071 action->port = exception_port_copy_send(new_port);
4072 action->behavior = new_behavior;
4073 action->flavor = new_flavor;
4074 action->privileged = privileged;
4075 } else {
4076 old_port[i] = IP_NULL;
4077 }
4078 }
4079
4080 thread_mtx_unlock(thread);
4081
4082 #if CONFIG_MACF
4083 mac_exc_free_label(new_label);
4084 #endif
4085
4086 for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4087 if (IP_VALID(old_port[i])) {
4088 ipc_port_release_send(old_port[i]);
4089 }
4090 }
4091
4092 if (IP_VALID(new_port)) { /* consume send right */
4093 ipc_port_release_send(new_port);
4094 }
4095
4096 return KERN_SUCCESS;
4097 }
4098
4099 kern_return_t
task_set_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4100 task_set_exception_ports(
4101 task_t task,
4102 exception_mask_t exception_mask,
4103 ipc_port_t new_port,
4104 exception_behavior_t new_behavior,
4105 thread_state_flavor_t new_flavor)
4106 {
4107 ipc_port_t old_port[EXC_TYPES_COUNT];
4108 boolean_t privileged = task_is_privileged(current_task());
4109 register int i;
4110
4111 #if CONFIG_MACF
4112 struct label *new_label;
4113 #endif
4114
4115 if (task == TASK_NULL) {
4116 return KERN_INVALID_ARGUMENT;
4117 }
4118
4119 if (exception_mask & ~EXC_MASK_VALID) {
4120 return KERN_INVALID_ARGUMENT;
4121 }
4122
4123 if (IP_VALID(new_port)) {
4124 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4125 case EXCEPTION_DEFAULT:
4126 case EXCEPTION_STATE:
4127 case EXCEPTION_STATE_IDENTITY:
4128 case EXCEPTION_IDENTITY_PROTECTED:
4129 break;
4130
4131 default:
4132 return KERN_INVALID_ARGUMENT;
4133 }
4134 }
4135
4136 if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4137 return KERN_INVALID_RIGHT;
4138 }
4139
4140
4141 /*
4142 * Check the validity of the thread_state_flavor by calling the
4143 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4144 * osfmk/mach/ARCHITECTURE/thread_status.h
4145 */
4146 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4147 return KERN_INVALID_ARGUMENT;
4148 }
4149
4150 if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4151 (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4152 && !(new_behavior & MACH_EXCEPTION_CODES)) {
4153 return KERN_INVALID_ARGUMENT;
4154 }
4155
4156 #if CONFIG_MACF
4157 new_label = mac_exc_create_label_for_current_proc();
4158 #endif
4159
4160 itk_lock(task);
4161
4162 /*
4163 * Allow setting exception port during the span of ipc_task_init() to
4164 * ipc_task_terminate(). posix_spawn() port actions can set exception
4165 * ports on target task _before_ task IPC access is enabled.
4166 */
4167 if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
4168 itk_unlock(task);
4169 #if CONFIG_MACF
4170 mac_exc_free_label(new_label);
4171 #endif
4172 return KERN_FAILURE;
4173 }
4174
4175 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4176 if ((exception_mask & (1 << i))
4177 #if CONFIG_MACF
4178 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4179 #endif
4180 ) {
4181 old_port[i] = task->exc_actions[i].port;
4182 task->exc_actions[i].port =
4183 exception_port_copy_send(new_port);
4184 task->exc_actions[i].behavior = new_behavior;
4185 task->exc_actions[i].flavor = new_flavor;
4186 task->exc_actions[i].privileged = privileged;
4187 } else {
4188 old_port[i] = IP_NULL;
4189 }
4190 }
4191
4192 itk_unlock(task);
4193
4194 #if CONFIG_MACF
4195 mac_exc_free_label(new_label);
4196 #endif
4197
4198 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4199 if (IP_VALID(old_port[i])) {
4200 ipc_port_release_send(old_port[i]);
4201 }
4202 }
4203
4204 if (IP_VALID(new_port)) { /* consume send right */
4205 ipc_port_release_send(new_port);
4206 }
4207
4208 return KERN_SUCCESS;
4209 }
4210
4211 /*
4212 * Routine: thread/task_swap_exception_ports [kernel call]
4213 * Purpose:
4214 * Sets the thread/task exception port, flavor and
4215 * behavior for the exception types specified by the
4216 * mask.
4217 *
4218 * The old ports, behavior and flavors are returned
4219 * Count specifies the array sizes on input and
4220 * the number of returned ports etc. on output. The
4221 * arrays must be large enough to hold all the returned
4222 * data, MIG returnes an error otherwise. The masks
4223 * array specifies the corresponding exception type(s).
4224 *
4225 * Conditions:
4226 * Nothing locked. If successful, consumes
4227 * the supplied send right.
4228 *
4229 * Returns upto [in} CountCnt elements.
4230 * Returns:
4231 * KERN_SUCCESS Changed the special port.
4232 * KERN_INVALID_ARGUMENT The thread is null,
4233 * Illegal mask bit set.
4234 * Illegal exception behavior
4235 * KERN_FAILURE The thread is dead.
4236 */
4237
4238 kern_return_t
thread_swap_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4239 thread_swap_exception_ports(
4240 thread_t thread,
4241 exception_mask_t exception_mask,
4242 ipc_port_t new_port,
4243 exception_behavior_t new_behavior,
4244 thread_state_flavor_t new_flavor,
4245 exception_mask_array_t masks,
4246 mach_msg_type_number_t *CountCnt,
4247 exception_port_array_t ports,
4248 exception_behavior_array_t behaviors,
4249 thread_state_flavor_array_t flavors)
4250 {
4251 ipc_port_t old_port[EXC_TYPES_COUNT];
4252 thread_ro_t tro;
4253 boolean_t privileged = task_is_privileged(current_task());
4254 unsigned int i, j, count;
4255
4256 #if CONFIG_MACF
4257 struct label *new_label;
4258 #endif
4259
4260 if (thread == THREAD_NULL) {
4261 return KERN_INVALID_ARGUMENT;
4262 }
4263
4264 if (exception_mask & ~EXC_MASK_VALID) {
4265 return KERN_INVALID_ARGUMENT;
4266 }
4267
4268 if (IP_VALID(new_port)) {
4269 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4270 case EXCEPTION_DEFAULT:
4271 case EXCEPTION_STATE:
4272 case EXCEPTION_STATE_IDENTITY:
4273 case EXCEPTION_IDENTITY_PROTECTED:
4274 break;
4275
4276 default:
4277 return KERN_INVALID_ARGUMENT;
4278 }
4279 }
4280
4281 if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4282 return KERN_INVALID_RIGHT;
4283 }
4284
4285
4286 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4287 return KERN_INVALID_ARGUMENT;
4288 }
4289
4290 if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4291 (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4292 && !(new_behavior & MACH_EXCEPTION_CODES)) {
4293 return KERN_INVALID_ARGUMENT;
4294 }
4295
4296 #if CONFIG_MACF
4297 new_label = mac_exc_create_label_for_current_proc();
4298 #endif
4299
4300 thread_mtx_lock(thread);
4301
4302 if (!thread->active) {
4303 thread_mtx_unlock(thread);
4304 #if CONFIG_MACF
4305 mac_exc_free_label(new_label);
4306 #endif
4307 return KERN_FAILURE;
4308 }
4309
4310 tro = get_thread_ro(thread);
4311 if (tro->tro_exc_actions == NULL) {
4312 ipc_thread_init_exc_actions(tro);
4313 }
4314
4315 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4316 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4317 struct exception_action *action = &tro->tro_exc_actions[i];
4318
4319 if ((exception_mask & (1 << i))
4320 #if CONFIG_MACF
4321 && mac_exc_update_action_label(action, new_label) == 0
4322 #endif
4323 ) {
4324 for (j = 0; j < count; ++j) {
4325 /*
4326 * search for an identical entry, if found
4327 * set corresponding mask for this exception.
4328 */
4329 if (action->port == ports[j] &&
4330 action->behavior == behaviors[j] &&
4331 action->flavor == flavors[j]) {
4332 masks[j] |= (1 << i);
4333 break;
4334 }
4335 }
4336
4337 if (j == count) {
4338 masks[j] = (1 << i);
4339 ports[j] = exception_port_copy_send(action->port);
4340
4341 behaviors[j] = action->behavior;
4342 flavors[j] = action->flavor;
4343 ++count;
4344 }
4345
4346 old_port[i] = action->port;
4347 action->port = exception_port_copy_send(new_port);
4348 action->behavior = new_behavior;
4349 action->flavor = new_flavor;
4350 action->privileged = privileged;
4351 } else {
4352 old_port[i] = IP_NULL;
4353 }
4354 }
4355
4356 thread_mtx_unlock(thread);
4357
4358 #if CONFIG_MACF
4359 mac_exc_free_label(new_label);
4360 #endif
4361
4362 while (--i >= FIRST_EXCEPTION) {
4363 if (IP_VALID(old_port[i])) {
4364 ipc_port_release_send(old_port[i]);
4365 }
4366 }
4367
4368 if (IP_VALID(new_port)) { /* consume send right */
4369 ipc_port_release_send(new_port);
4370 }
4371
4372 *CountCnt = count;
4373
4374 return KERN_SUCCESS;
4375 }
4376
4377 kern_return_t
task_swap_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4378 task_swap_exception_ports(
4379 task_t task,
4380 exception_mask_t exception_mask,
4381 ipc_port_t new_port,
4382 exception_behavior_t new_behavior,
4383 thread_state_flavor_t new_flavor,
4384 exception_mask_array_t masks,
4385 mach_msg_type_number_t *CountCnt,
4386 exception_port_array_t ports,
4387 exception_behavior_array_t behaviors,
4388 thread_state_flavor_array_t flavors)
4389 {
4390 ipc_port_t old_port[EXC_TYPES_COUNT];
4391 boolean_t privileged = task_is_privileged(current_task());
4392 unsigned int i, j, count;
4393
4394 #if CONFIG_MACF
4395 struct label *new_label;
4396 #endif
4397
4398 if (task == TASK_NULL) {
4399 return KERN_INVALID_ARGUMENT;
4400 }
4401
4402 if (exception_mask & ~EXC_MASK_VALID) {
4403 return KERN_INVALID_ARGUMENT;
4404 }
4405
4406 if (IP_VALID(new_port)) {
4407 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4408 case EXCEPTION_DEFAULT:
4409 case EXCEPTION_STATE:
4410 case EXCEPTION_STATE_IDENTITY:
4411 case EXCEPTION_IDENTITY_PROTECTED:
4412 break;
4413
4414 default:
4415 return KERN_INVALID_ARGUMENT;
4416 }
4417 }
4418
4419 if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4420 return KERN_INVALID_RIGHT;
4421 }
4422
4423
4424 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4425 return KERN_INVALID_ARGUMENT;
4426 }
4427
4428 if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4429 (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4430 && !(new_behavior & MACH_EXCEPTION_CODES)) {
4431 return KERN_INVALID_ARGUMENT;
4432 }
4433
4434 #if CONFIG_MACF
4435 new_label = mac_exc_create_label_for_current_proc();
4436 #endif
4437
4438 itk_lock(task);
4439
4440 if (!task->ipc_active) {
4441 itk_unlock(task);
4442 #if CONFIG_MACF
4443 mac_exc_free_label(new_label);
4444 #endif
4445 return KERN_FAILURE;
4446 }
4447
4448 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4449 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4450 if ((exception_mask & (1 << i))
4451 #if CONFIG_MACF
4452 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4453 #endif
4454 ) {
4455 for (j = 0; j < count; j++) {
4456 /*
4457 * search for an identical entry, if found
4458 * set corresponding mask for this exception.
4459 */
4460 if (task->exc_actions[i].port == ports[j] &&
4461 task->exc_actions[i].behavior == behaviors[j] &&
4462 task->exc_actions[i].flavor == flavors[j]) {
4463 masks[j] |= (1 << i);
4464 break;
4465 }
4466 }
4467
4468 if (j == count) {
4469 masks[j] = (1 << i);
4470 ports[j] = exception_port_copy_send(task->exc_actions[i].port);
4471 behaviors[j] = task->exc_actions[i].behavior;
4472 flavors[j] = task->exc_actions[i].flavor;
4473 ++count;
4474 }
4475
4476 old_port[i] = task->exc_actions[i].port;
4477
4478 task->exc_actions[i].port = exception_port_copy_send(new_port);
4479 task->exc_actions[i].behavior = new_behavior;
4480 task->exc_actions[i].flavor = new_flavor;
4481 task->exc_actions[i].privileged = privileged;
4482 } else {
4483 old_port[i] = IP_NULL;
4484 }
4485 }
4486
4487 itk_unlock(task);
4488
4489 #if CONFIG_MACF
4490 mac_exc_free_label(new_label);
4491 #endif
4492
4493 while (--i >= FIRST_EXCEPTION) {
4494 if (IP_VALID(old_port[i])) {
4495 ipc_port_release_send(old_port[i]);
4496 }
4497 }
4498
4499 if (IP_VALID(new_port)) { /* consume send right */
4500 ipc_port_release_send(new_port);
4501 }
4502
4503 *CountCnt = count;
4504
4505 return KERN_SUCCESS;
4506 }
4507
4508 /*
4509 * Routine: thread/task_get_exception_ports [kernel call]
4510 * Purpose:
4511 * Clones a send right for each of the thread/task's exception
4512 * ports specified in the mask and returns the behaviour
4513 * and flavor of said port.
4514 *
4515 * Returns upto [in} CountCnt elements.
4516 *
4517 * Conditions:
4518 * Nothing locked.
4519 * Returns:
4520 * KERN_SUCCESS Extracted a send right.
4521 * KERN_INVALID_ARGUMENT The thread is null,
4522 * Invalid special port,
4523 * Illegal mask bit set.
4524 * KERN_FAILURE The thread is dead.
4525 */
4526 static kern_return_t
thread_get_exception_ports_internal(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4527 thread_get_exception_ports_internal(
4528 thread_t thread,
4529 exception_mask_t exception_mask,
4530 exception_mask_array_t masks,
4531 mach_msg_type_number_t *CountCnt,
4532 exception_port_info_array_t ports_info,
4533 exception_port_array_t ports,
4534 exception_behavior_array_t behaviors,
4535 thread_state_flavor_array_t flavors)
4536 {
4537 unsigned int count;
4538 boolean_t info_only = (ports_info != NULL);
4539 thread_ro_t tro;
4540 ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4541
4542 if (thread == THREAD_NULL) {
4543 return KERN_INVALID_ARGUMENT;
4544 }
4545
4546 if (exception_mask & ~EXC_MASK_VALID) {
4547 return KERN_INVALID_ARGUMENT;
4548 }
4549
4550 if (!info_only && !ports) {
4551 return KERN_INVALID_ARGUMENT;
4552 }
4553
4554 tro = get_thread_ro(thread);
4555 thread_mtx_lock(thread);
4556
4557 if (!thread->active) {
4558 thread_mtx_unlock(thread);
4559
4560 return KERN_FAILURE;
4561 }
4562
4563 count = 0;
4564
4565 if (tro->tro_exc_actions == NULL) {
4566 goto done;
4567 }
4568
4569 for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4570 if (exception_mask & (1 << i)) {
4571 ipc_port_t exc_port = tro->tro_exc_actions[i].port;
4572 exception_behavior_t exc_behavior = tro->tro_exc_actions[i].behavior;
4573 thread_state_flavor_t exc_flavor = tro->tro_exc_actions[i].flavor;
4574
4575 for (j = 0; j < count; ++j) {
4576 /*
4577 * search for an identical entry, if found
4578 * set corresponding mask for this exception.
4579 */
4580 if (exc_port == port_ptrs[j] &&
4581 exc_behavior == behaviors[j] &&
4582 exc_flavor == flavors[j]) {
4583 masks[j] |= (1 << i);
4584 break;
4585 }
4586 }
4587
4588 if (j == count && count < *CountCnt) {
4589 masks[j] = (1 << i);
4590 port_ptrs[j] = exc_port;
4591
4592 if (info_only) {
4593 if (!IP_VALID(exc_port)) {
4594 ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4595 } else {
4596 uintptr_t receiver;
4597 (void)ipc_port_get_receiver_task(exc_port, &receiver);
4598 ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4599 ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4600 }
4601 } else {
4602 ports[j] = exception_port_copy_send(exc_port);
4603 }
4604 behaviors[j] = exc_behavior;
4605 flavors[j] = exc_flavor;
4606 ++count;
4607 }
4608 }
4609 }
4610
4611 done:
4612 thread_mtx_unlock(thread);
4613
4614 *CountCnt = count;
4615
4616 return KERN_SUCCESS;
4617 }
4618
4619 kern_return_t
thread_get_exception_ports(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4620 thread_get_exception_ports(
4621 thread_t thread,
4622 exception_mask_t exception_mask,
4623 exception_mask_array_t masks,
4624 mach_msg_type_number_t *CountCnt,
4625 exception_port_array_t ports,
4626 exception_behavior_array_t behaviors,
4627 thread_state_flavor_array_t flavors)
4628 {
4629 return thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4630 NULL, ports, behaviors, flavors);
4631 }
4632
4633 kern_return_t
thread_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4634 thread_get_exception_ports_info(
4635 mach_port_t port,
4636 exception_mask_t exception_mask,
4637 exception_mask_array_t masks,
4638 mach_msg_type_number_t *CountCnt,
4639 exception_port_info_array_t ports_info,
4640 exception_behavior_array_t behaviors,
4641 thread_state_flavor_array_t flavors)
4642 {
4643 kern_return_t kr;
4644
4645 thread_t thread = convert_port_to_thread_read_no_eval(port);
4646
4647 if (thread == THREAD_NULL) {
4648 return KERN_INVALID_ARGUMENT;
4649 }
4650
4651 kr = thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4652 ports_info, NULL, behaviors, flavors);
4653
4654 thread_deallocate(thread);
4655 return kr;
4656 }
4657
4658 kern_return_t
thread_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4659 thread_get_exception_ports_from_user(
4660 mach_port_t port,
4661 exception_mask_t exception_mask,
4662 exception_mask_array_t masks,
4663 mach_msg_type_number_t *CountCnt,
4664 exception_port_array_t ports,
4665 exception_behavior_array_t behaviors,
4666 thread_state_flavor_array_t flavors)
4667 {
4668 kern_return_t kr;
4669
4670 thread_t thread = convert_port_to_thread(port);
4671
4672 if (thread == THREAD_NULL) {
4673 return KERN_INVALID_ARGUMENT;
4674 }
4675
4676 kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4677
4678 thread_deallocate(thread);
4679 return kr;
4680 }
4681
4682 static kern_return_t
task_get_exception_ports_internal(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4683 task_get_exception_ports_internal(
4684 task_t task,
4685 exception_mask_t exception_mask,
4686 exception_mask_array_t masks,
4687 mach_msg_type_number_t *CountCnt,
4688 exception_port_info_array_t ports_info,
4689 exception_port_array_t ports,
4690 exception_behavior_array_t behaviors,
4691 thread_state_flavor_array_t flavors)
4692 {
4693 unsigned int count;
4694 boolean_t info_only = (ports_info != NULL);
4695 ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4696
4697 if (task == TASK_NULL) {
4698 return KERN_INVALID_ARGUMENT;
4699 }
4700
4701 if (exception_mask & ~EXC_MASK_VALID) {
4702 return KERN_INVALID_ARGUMENT;
4703 }
4704
4705 if (!info_only && !ports) {
4706 return KERN_INVALID_ARGUMENT;
4707 }
4708
4709 itk_lock(task);
4710
4711 if (!task->ipc_active) {
4712 itk_unlock(task);
4713 return KERN_FAILURE;
4714 }
4715
4716 count = 0;
4717
4718 for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4719 if (exception_mask & (1 << i)) {
4720 ipc_port_t exc_port = task->exc_actions[i].port;
4721 exception_behavior_t exc_behavior = task->exc_actions[i].behavior;
4722 thread_state_flavor_t exc_flavor = task->exc_actions[i].flavor;
4723
4724 for (j = 0; j < count; ++j) {
4725 /*
4726 * search for an identical entry, if found
4727 * set corresponding mask for this exception.
4728 */
4729 if (exc_port == port_ptrs[j] &&
4730 exc_behavior == behaviors[j] &&
4731 exc_flavor == flavors[j]) {
4732 masks[j] |= (1 << i);
4733 break;
4734 }
4735 }
4736
4737 if (j == count && count < *CountCnt) {
4738 masks[j] = (1 << i);
4739 port_ptrs[j] = exc_port;
4740
4741 if (info_only) {
4742 if (!IP_VALID(exc_port)) {
4743 ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4744 } else {
4745 uintptr_t receiver;
4746 (void)ipc_port_get_receiver_task(exc_port, &receiver);
4747 ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4748 ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4749 }
4750 } else {
4751 ports[j] = exception_port_copy_send(exc_port);
4752 }
4753 behaviors[j] = exc_behavior;
4754 flavors[j] = exc_flavor;
4755 ++count;
4756 }
4757 }
4758 }
4759
4760 itk_unlock(task);
4761
4762 *CountCnt = count;
4763
4764 return KERN_SUCCESS;
4765 }
4766
4767 kern_return_t
task_get_exception_ports(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4768 task_get_exception_ports(
4769 task_t task,
4770 exception_mask_t exception_mask,
4771 exception_mask_array_t masks,
4772 mach_msg_type_number_t *CountCnt,
4773 exception_port_array_t ports,
4774 exception_behavior_array_t behaviors,
4775 thread_state_flavor_array_t flavors)
4776 {
4777 return task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4778 NULL, ports, behaviors, flavors);
4779 }
4780
4781 kern_return_t
task_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4782 task_get_exception_ports_info(
4783 mach_port_t port,
4784 exception_mask_t exception_mask,
4785 exception_mask_array_t masks,
4786 mach_msg_type_number_t *CountCnt,
4787 exception_port_info_array_t ports_info,
4788 exception_behavior_array_t behaviors,
4789 thread_state_flavor_array_t flavors)
4790 {
4791 kern_return_t kr;
4792
4793 task_t task = convert_port_to_task_read_no_eval(port);
4794
4795 if (task == TASK_NULL) {
4796 return KERN_INVALID_ARGUMENT;
4797 }
4798
4799 kr = task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4800 ports_info, NULL, behaviors, flavors);
4801
4802 task_deallocate(task);
4803 return kr;
4804 }
4805
4806 kern_return_t
task_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4807 task_get_exception_ports_from_user(
4808 mach_port_t port,
4809 exception_mask_t exception_mask,
4810 exception_mask_array_t masks,
4811 mach_msg_type_number_t *CountCnt,
4812 exception_port_array_t ports,
4813 exception_behavior_array_t behaviors,
4814 thread_state_flavor_array_t flavors)
4815 {
4816 kern_return_t kr;
4817
4818 task_t task = convert_port_to_task(port);
4819
4820 if (task == TASK_NULL) {
4821 return KERN_INVALID_ARGUMENT;
4822 }
4823
4824 kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4825
4826 task_deallocate(task);
4827 return kr;
4828 }
4829
4830 /*
4831 * Routine: ipc_thread_port_unpin
4832 * Purpose:
4833 *
4834 * Called on the thread when it's terminating so that the last ref
4835 * can be deallocated without a guard exception.
4836 * Conditions:
4837 * Thread mutex lock is held.
4838 */
4839 void
ipc_thread_port_unpin(ipc_port_t port)4840 ipc_thread_port_unpin(
4841 ipc_port_t port)
4842 {
4843 if (port == IP_NULL) {
4844 return;
4845 }
4846 ip_mq_lock(port);
4847 port->ip_pinned = 0;
4848 ip_mq_unlock(port);
4849 }
4850