1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
86
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
94 #include <kdp/kdp_dyld.h>
95
96 #include <vm/vm_map.h>
97 #include <vm/vm_pageout.h>
98 #include <vm/vm_protos.h>
99
100 #include <security/mac_mach_internal.h>
101
102 #if CONFIG_CSR
103 #include <sys/csr.h>
104 #endif
105
106 #include <sys/code_signing.h> /* for developer mode state */
107
108 #if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL
109 extern int cs_relax_platform_task_ports;
110 #endif
111
112 extern boolean_t IOCurrentTaskHasEntitlement(const char *);
113
114 __options_decl(ipc_reply_port_type_t, uint32_t, {
115 IRPT_NONE = 0x00,
116 IRPT_USER = 0x01,
117 IRPT_KERNEL = 0x02,
118 });
119
120 /* forward declarations */
121 static kern_return_t special_port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor);
122 static kern_return_t special_port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor);
123 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port, ipc_reply_port_type_t reply_type);
124 static void ipc_port_unbind_special_reply_port(thread_t thread, ipc_reply_port_type_t reply_type);
125 extern kern_return_t task_conversion_eval(task_t caller, task_t victim, int flavor);
126 static thread_inspect_t convert_port_to_thread_inspect_no_eval(ipc_port_t port);
127 static ipc_port_t convert_thread_to_port_with_flavor(thread_t, thread_ro_t, mach_thread_flavor_t flavor);
128 ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor, task_grp_t grp);
129 kern_return_t task_set_special_port(task_t task, int which, ipc_port_t port);
130 kern_return_t task_get_special_port(task_t task, int which, ipc_port_t *portp);
131
132 /*
133 * Routine: ipc_task_init
134 * Purpose:
135 * Initialize a task's IPC state.
136 *
137 * If non-null, some state will be inherited from the parent.
138 * The parent must be appropriately initialized.
139 * Conditions:
140 * Nothing locked.
141 */
142
143 void
ipc_task_init(task_t task,task_t parent)144 ipc_task_init(
145 task_t task,
146 task_t parent)
147 {
148 ipc_space_t space;
149 ipc_port_t kport;
150 ipc_port_t nport;
151 ipc_port_t pport;
152 kern_return_t kr;
153 int i;
154
155
156 kr = ipc_space_create(IPC_LABEL_NONE, &space);
157 if (kr != KERN_SUCCESS) {
158 panic("ipc_task_init");
159 }
160
161 space->is_task = task;
162
163 kport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL,
164 IPC_KOBJECT_ALLOC_NONE);
165 pport = kport;
166
167 nport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_NAME,
168 IPC_KOBJECT_ALLOC_NONE);
169
170 itk_lock_init(task);
171 task->itk_task_ports[TASK_FLAVOR_CONTROL] = kport;
172 task->itk_task_ports[TASK_FLAVOR_NAME] = nport;
173
174 /* Lazily allocated on-demand */
175 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
176 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
177 task->itk_dyld_notify = NULL;
178 #if CONFIG_PROC_RESOURCE_LIMITS
179 task->itk_resource_notify = NULL;
180 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
181
182 task->itk_self = pport;
183 task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
184 if (task_is_a_corpse_fork(task)) {
185 /*
186 * No sender's notification for corpse would not
187 * work with a naked send right in kernel.
188 */
189 task->itk_settable_self = IP_NULL;
190 } else {
191 /* we just made the port, no need to triple check */
192 task->itk_settable_self = ipc_port_make_send_any(kport);
193 }
194 task->itk_debug_control = IP_NULL;
195 task->itk_space = space;
196
197 #if CONFIG_MACF
198 task->exc_actions[0].label = NULL;
199 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
200 mac_exc_associate_action_label(&task->exc_actions[i],
201 mac_exc_create_label(&task->exc_actions[i]));
202 }
203 #endif
204
205 /* always zero-out the first (unused) array element */
206 bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
207
208 if (parent == TASK_NULL) {
209 ipc_port_t port;
210 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
211 task->exc_actions[i].port = IP_NULL;
212 task->exc_actions[i].flavor = 0;
213 task->exc_actions[i].behavior = 0;
214 task->exc_actions[i].privileged = FALSE;
215 }/* for */
216
217 kr = host_get_host_port(host_priv_self(), &port);
218 assert(kr == KERN_SUCCESS);
219 task->itk_host = port;
220
221 task->itk_bootstrap = IP_NULL;
222 task->itk_task_access = IP_NULL;
223
224 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
225 task->itk_registered[i] = IP_NULL;
226 }
227 } else {
228 itk_lock(parent);
229 assert(parent->itk_task_ports[TASK_FLAVOR_CONTROL] != IP_NULL);
230
231 /* inherit registered ports */
232
233 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
234 task->itk_registered[i] =
235 ipc_port_copy_send_any(parent->itk_registered[i]);
236 }
237
238 /* inherit exception and bootstrap ports */
239
240 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
241 task->exc_actions[i].port =
242 exception_port_copy_send(parent->exc_actions[i].port);
243 task->exc_actions[i].flavor =
244 parent->exc_actions[i].flavor;
245 task->exc_actions[i].behavior =
246 parent->exc_actions[i].behavior;
247 task->exc_actions[i].privileged =
248 parent->exc_actions[i].privileged;
249 #if CONFIG_MACF
250 mac_exc_inherit_action_label(parent->exc_actions + i,
251 task->exc_actions + i);
252 #endif
253 }
254
255 task->itk_host = host_port_copy_send(parent->itk_host);
256
257 task->itk_bootstrap =
258 ipc_port_copy_send_mqueue(parent->itk_bootstrap);
259
260 task->itk_task_access =
261 ipc_port_copy_send_mqueue(parent->itk_task_access);
262
263 itk_unlock(parent);
264 }
265 }
266
267 /*
268 * Routine: ipc_task_set_immovable_pinned
269 * Purpose:
270 * Make a task's control port immovable and/or pinned
271 * according to its control port options. If control port
272 * is immovable, allocate an immovable control port for the
273 * task and optionally pin it.
274 * Conditions:
275 * Task's control port is movable and not pinned.
276 */
277 void
ipc_task_set_immovable_pinned(task_t task)278 ipc_task_set_immovable_pinned(
279 task_t task)
280 {
281 ipc_port_t kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
282 ipc_port_t new_pport;
283
284 /* pport is the same as kport at ipc_task_init() time */
285 assert(task->itk_self == task->itk_task_ports[TASK_FLAVOR_CONTROL]);
286 assert(task->itk_self == task->itk_settable_self);
287 assert(!task_is_a_corpse(task));
288
289 /* only tasks opt in immovable control port can have pinned control port */
290 if (task_is_immovable(task)) {
291 ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
292
293 if (task_is_pinned(task)) {
294 options |= IPC_KOBJECT_ALLOC_PINNED;
295 }
296
297 new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL, options);
298
299 assert(kport != IP_NULL);
300 ipc_port_set_label(kport, IPC_LABEL_SUBST_TASK);
301 kport->ip_kolabel->ikol_alt_port = new_pport;
302
303 itk_lock(task);
304 task->itk_self = new_pport;
305 itk_unlock(task);
306
307 /* enable the pinned port */
308 ipc_kobject_enable(new_pport, task, IKOT_TASK_CONTROL);
309 }
310 }
311
312 /*
313 * Routine: ipc_task_enable
314 * Purpose:
315 * Enable a task for IPC access.
316 * Conditions:
317 * Nothing locked.
318 */
319 void
ipc_task_enable(task_t task)320 ipc_task_enable(
321 task_t task)
322 {
323 ipc_port_t kport;
324 ipc_port_t nport;
325 ipc_port_t iport;
326 ipc_port_t rdport;
327 ipc_port_t pport;
328
329 itk_lock(task);
330 if (!task->active) {
331 /*
332 * task has been terminated before we can enable IPC access.
333 * The check is to make sure we don't accidentally re-enable
334 * the task ports _after_ they've been disabled during
335 * task_terminate_internal(), in which case we will hit the
336 * !task->ipc_active assertion in ipc_task_terminate().
337 *
338 * Technically we should grab task lock when checking task
339 * active bit, but since task termination unsets task->active
340 * _before_ calling ipc_task_disable(), we can always see the
341 * truth with just itk_lock() and bail if disable has been called.
342 */
343 itk_unlock(task);
344 return;
345 }
346
347 assert(!task->ipc_active || task_is_a_corpse(task));
348 task->ipc_active = true;
349
350 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
351 if (kport != IP_NULL) {
352 ipc_kobject_enable(kport, task, IKOT_TASK_CONTROL);
353 }
354 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
355 if (nport != IP_NULL) {
356 ipc_kobject_enable(nport, task, IKOT_TASK_NAME);
357 }
358 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
359 if (iport != IP_NULL) {
360 ipc_kobject_enable(iport, task, IKOT_TASK_INSPECT);
361 }
362 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
363 if (rdport != IP_NULL) {
364 ipc_kobject_enable(rdport, task, IKOT_TASK_READ);
365 }
366 pport = task->itk_self;
367 if (pport != kport && pport != IP_NULL) {
368 assert(task_is_immovable(task));
369 ipc_kobject_enable(pport, task, IKOT_TASK_CONTROL);
370 }
371
372 itk_unlock(task);
373 }
374
375 /*
376 * Routine: ipc_task_disable
377 * Purpose:
378 * Disable IPC access to a task.
379 * Conditions:
380 * Nothing locked.
381 */
382
383 void
ipc_task_disable(task_t task)384 ipc_task_disable(
385 task_t task)
386 {
387 ipc_port_t kport;
388 ipc_port_t nport;
389 ipc_port_t iport;
390 ipc_port_t rdport;
391 ipc_port_t rport;
392 ipc_port_t pport;
393
394 itk_lock(task);
395
396 /*
397 * This innocuous looking line is load bearing.
398 *
399 * It is used to disable the creation of lazy made ports.
400 * We must do so before we drop the last reference on the task,
401 * as task ports do not own a reference on the task, and
402 * convert_port_to_task* will crash trying to resurect a task.
403 */
404 task->ipc_active = false;
405
406 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
407 if (kport != IP_NULL) {
408 /* clears ikol_alt_port */
409 ipc_kobject_disable(kport, IKOT_TASK_CONTROL);
410 }
411 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
412 if (nport != IP_NULL) {
413 ipc_kobject_disable(nport, IKOT_TASK_NAME);
414 }
415 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
416 if (iport != IP_NULL) {
417 ipc_kobject_disable(iport, IKOT_TASK_INSPECT);
418 }
419 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
420 if (rdport != IP_NULL) {
421 /* clears ikol_alt_port */
422 ipc_kobject_disable(rdport, IKOT_TASK_READ);
423 }
424 pport = task->itk_self;
425 if (pport != IP_NULL) {
426 /* see port_name_is_pinned_itk_self() */
427 pport->ip_receiver_name = MACH_PORT_SPECIAL_DEFAULT;
428 if (pport != kport) {
429 assert(task_is_immovable(task));
430 assert(pport->ip_immovable_send);
431 ipc_kobject_disable(pport, IKOT_TASK_CONTROL);
432 }
433 }
434
435 rport = task->itk_resume;
436 if (rport != IP_NULL) {
437 /*
438 * From this point onwards this task is no longer accepting
439 * resumptions.
440 *
441 * There are still outstanding suspensions on this task,
442 * even as it is being torn down. Disconnect the task
443 * from the rport, thereby "orphaning" the rport. The rport
444 * itself will go away only when the last suspension holder
445 * destroys his SO right to it -- when he either
446 * exits, or tries to actually use that last SO right to
447 * resume this (now non-existent) task.
448 */
449 ipc_kobject_disable(rport, IKOT_TASK_RESUME);
450 }
451 itk_unlock(task);
452 }
453
454 /*
455 * Routine: ipc_task_terminate
456 * Purpose:
457 * Clean up and destroy a task's IPC state.
458 * Conditions:
459 * Nothing locked. The task must be suspended.
460 * (Or the current thread must be in the task.)
461 */
462
463 void
ipc_task_terminate(task_t task)464 ipc_task_terminate(
465 task_t task)
466 {
467 ipc_port_t kport;
468 ipc_port_t nport;
469 ipc_port_t iport;
470 ipc_port_t rdport;
471 ipc_port_t rport;
472 ipc_port_t pport;
473 ipc_port_t sself;
474 ipc_port_t *notifiers_ptr = NULL;
475
476 itk_lock(task);
477
478 /*
479 * If we ever failed to clear ipc_active before the last reference
480 * was dropped, lazy ports might be made and used after the last
481 * reference is dropped and cause use after free (see comment in
482 * ipc_task_disable()).
483 */
484 assert(!task->ipc_active);
485
486 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
487 sself = task->itk_settable_self;
488 pport = IP_NULL;
489
490 if (kport == IP_NULL) {
491 /* the task is already terminated (can this happen?) */
492 itk_unlock(task);
493 return;
494 }
495 task->itk_task_ports[TASK_FLAVOR_CONTROL] = IP_NULL;
496
497 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
498 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
499
500 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
501 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
502
503 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
504 assert(nport != IP_NULL);
505 task->itk_task_ports[TASK_FLAVOR_NAME] = IP_NULL;
506
507 if (task->itk_dyld_notify) {
508 notifiers_ptr = task->itk_dyld_notify;
509 task->itk_dyld_notify = NULL;
510 }
511
512 pport = task->itk_self;
513 task->itk_self = IP_NULL;
514
515 rport = task->itk_resume;
516 task->itk_resume = IP_NULL;
517
518 itk_unlock(task);
519
520 /* release the naked send rights */
521 if (IP_VALID(sself)) {
522 ipc_port_release_send(sself);
523 }
524
525 if (notifiers_ptr) {
526 for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
527 if (IP_VALID(notifiers_ptr[i])) {
528 ipc_port_release_send(notifiers_ptr[i]);
529 }
530 }
531 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
532 }
533
534 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
535 if (IP_VALID(task->exc_actions[i].port)) {
536 ipc_port_release_send(task->exc_actions[i].port);
537 }
538 #if CONFIG_MACF
539 mac_exc_free_action_label(task->exc_actions + i);
540 #endif
541 }
542
543 if (IP_VALID(task->itk_host)) {
544 ipc_port_release_send(task->itk_host);
545 }
546
547 if (IP_VALID(task->itk_bootstrap)) {
548 ipc_port_release_send(task->itk_bootstrap);
549 }
550
551 if (IP_VALID(task->itk_task_access)) {
552 ipc_port_release_send(task->itk_task_access);
553 }
554
555 if (IP_VALID(task->itk_debug_control)) {
556 ipc_port_release_send(task->itk_debug_control);
557 }
558
559 #if CONFIG_PROC_RESOURCE_LIMITS
560 if (IP_VALID(task->itk_resource_notify)) {
561 ipc_port_release_send(task->itk_resource_notify);
562 }
563 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
564
565 for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
566 if (IP_VALID(task->itk_registered[i])) {
567 ipc_port_release_send(task->itk_registered[i]);
568 }
569 }
570
571 /* clears read port ikol_alt_port, must be done first */
572 if (rdport != IP_NULL) {
573 ipc_kobject_dealloc_port(rdport, 0, IKOT_TASK_READ);
574 }
575 ipc_kobject_dealloc_port(kport, 0, IKOT_TASK_CONTROL);
576 /* ikol_alt_port cleared */
577
578 /* destroy other kernel ports */
579 ipc_kobject_dealloc_port(nport, 0, IKOT_TASK_NAME);
580 if (iport != IP_NULL) {
581 ipc_kobject_dealloc_port(iport, 0, IKOT_TASK_INSPECT);
582 }
583 if (pport != IP_NULL && pport != kport) {
584 ipc_kobject_dealloc_port(pport, 0, IKOT_TASK_CONTROL);
585 }
586 if (rport != IP_NULL) {
587 ipc_kobject_dealloc_port(rport, 0, IKOT_TASK_RESUME);
588 }
589
590 itk_lock_destroy(task);
591 }
592
593 /*
594 * Routine: ipc_task_reset
595 * Purpose:
596 * Reset a task's IPC state to protect it when
597 * it enters an elevated security context. The
598 * task name port can remain the same - since it
599 * represents no specific privilege.
600 * Conditions:
601 * Nothing locked. The task must be suspended.
602 * (Or the current thread must be in the task.)
603 */
604
605 void
ipc_task_reset(task_t task)606 ipc_task_reset(
607 task_t task)
608 {
609 ipc_port_t old_kport, old_pport, new_kport, new_pport;
610 ipc_port_t old_sself;
611 ipc_port_t old_rdport;
612 ipc_port_t old_iport;
613 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
614 ipc_port_t *notifiers_ptr = NULL;
615
616 #if CONFIG_MACF
617 /* Fresh label to unset credentials in existing labels. */
618 struct label *unset_label = mac_exc_create_label(NULL);
619 #endif
620
621 new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task,
622 IKOT_TASK_CONTROL, IPC_KOBJECT_ALLOC_NONE);
623 /*
624 * ipc_task_reset() only happens during sugid or corpsify.
625 *
626 * (1) sugid happens early in exec_mach_imgact(), at which point the old task
627 * port has not been enabled, and is left movable/not pinned.
628 * (2) corpse cannot execute more code so the notion of the immovable/pinned
629 * task port is bogus, and should appear as if it doesn't have one.
630 *
631 * So simply leave pport the same as kport.
632 */
633 new_pport = new_kport;
634
635 itk_lock(task);
636
637 old_kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
638 old_rdport = task->itk_task_ports[TASK_FLAVOR_READ];
639 old_iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
640
641 old_pport = task->itk_self;
642
643 if (old_pport == IP_NULL) {
644 /* the task is already terminated (can this happen?) */
645 itk_unlock(task);
646 ipc_kobject_dealloc_port(new_kport, 0, IKOT_TASK_CONTROL);
647 if (new_pport != new_kport) {
648 assert(task_is_immovable(task));
649 ipc_kobject_dealloc_port(new_pport, 0, IKOT_TASK_CONTROL);
650 }
651 #if CONFIG_MACF
652 mac_exc_free_label(unset_label);
653 #endif
654 return;
655 }
656
657 old_sself = task->itk_settable_self;
658 task->itk_task_ports[TASK_FLAVOR_CONTROL] = new_kport;
659 task->itk_self = new_pport;
660
661 if (task_is_a_corpse(task)) {
662 /* No extra send right for coprse, needed to arm no-sender notification */
663 task->itk_settable_self = IP_NULL;
664 } else {
665 /* we just made the port, no need to triple check */
666 task->itk_settable_self = ipc_port_make_send_any(new_kport);
667 }
668
669 /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
670 ip_mq_lock(old_kport);
671 /* clears ikol_alt_port */
672 ipc_kobject_disable_locked(old_kport, IKOT_TASK_CONTROL);
673 task->exec_token += 1;
674 ip_mq_unlock(old_kport);
675
676 /* Reset the read and inspect flavors of task port */
677 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
678 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
679
680 if (old_pport != old_kport) {
681 assert(task_is_immovable(task));
682 ip_mq_lock(old_pport);
683 ipc_kobject_disable_locked(old_pport, IKOT_TASK_CONTROL);
684 task->exec_token += 1;
685 ip_mq_unlock(old_pport);
686 }
687
688 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
689 old_exc_actions[i] = IP_NULL;
690
691 if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
692 continue;
693 }
694
695 if (!task->exc_actions[i].privileged) {
696 #if CONFIG_MACF
697 mac_exc_update_action_label(task->exc_actions + i, unset_label);
698 #endif
699 old_exc_actions[i] = task->exc_actions[i].port;
700 task->exc_actions[i].port = IP_NULL;
701 }
702 }/* for */
703
704 if (IP_VALID(task->itk_debug_control)) {
705 ipc_port_release_send(task->itk_debug_control);
706 }
707 task->itk_debug_control = IP_NULL;
708
709 if (task->itk_dyld_notify) {
710 notifiers_ptr = task->itk_dyld_notify;
711 task->itk_dyld_notify = NULL;
712 }
713
714 itk_unlock(task);
715
716 #if CONFIG_MACF
717 mac_exc_free_label(unset_label);
718 #endif
719
720 /* release the naked send rights */
721
722 if (IP_VALID(old_sself)) {
723 ipc_port_release_send(old_sself);
724 }
725
726 if (notifiers_ptr) {
727 for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
728 if (IP_VALID(notifiers_ptr[i])) {
729 ipc_port_release_send(notifiers_ptr[i]);
730 }
731 }
732 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
733 }
734
735 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
736 if (IP_VALID(old_exc_actions[i])) {
737 ipc_port_release_send(old_exc_actions[i]);
738 }
739 }
740
741 /* destroy all task port flavors */
742 if (old_rdport != IP_NULL) {
743 /* read port ikol_alt_port may point to kport, dealloc first */
744 ipc_kobject_dealloc_port(old_rdport, 0, IKOT_TASK_READ);
745 }
746 ipc_kobject_dealloc_port(old_kport, 0, IKOT_TASK_CONTROL);
747 /* ikol_alt_port cleared */
748
749 if (old_iport != IP_NULL) {
750 ipc_kobject_dealloc_port(old_iport, 0, IKOT_TASK_INSPECT);
751 }
752 if (old_pport != old_kport) {
753 assert(task_is_immovable(task));
754 ipc_kobject_dealloc_port(old_pport, 0, IKOT_TASK_CONTROL);
755 }
756 }
757
758 /*
759 * Routine: ipc_thread_init
760 * Purpose:
761 * Initialize a thread's IPC state.
762 * Conditions:
763 * Nothing locked.
764 */
765
766 void
ipc_thread_init(task_t task,thread_t thread,thread_ro_t tro,ipc_thread_init_options_t options)767 ipc_thread_init(
768 task_t task,
769 thread_t thread,
770 thread_ro_t tro,
771 ipc_thread_init_options_t options)
772 {
773 ipc_port_t kport;
774 ipc_port_t pport;
775 ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
776
777 if (task_is_immovable(task) && !(options & IPC_THREAD_INIT_MAINTHREAD)) {
778 /*
779 * pthreads and raw threads both have immovable port upon creation.
780 * pthreads are subsequently pinned via ipc_port_copyout_send_pinned() whereas
781 * raw threads are left unpinned.
782 */
783 alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
784
785 pport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
786 IKOT_THREAD_CONTROL, alloc_options);
787
788 kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)thread,
789 IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD, IPC_KOBJECT_ALLOC_NONE);
790 kport->ip_kolabel->ikol_alt_port = pport;
791 } else {
792 /*
793 * Main thread is created movable but may be set immovable and pinned in
794 * main_thread_set_immovable_pinned(). It needs to be handled separately
795 * because task_control_port_options is not available at main thread creation time.
796 */
797 kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
798 IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
799
800 pport = kport;
801 }
802
803 tro->tro_self_port = pport;
804 /* we just made the port, no need to triple check */
805 tro->tro_settable_self_port = ipc_port_make_send_any(kport);
806 tro->tro_ports[THREAD_FLAVOR_CONTROL] = kport;
807
808 thread->ith_special_reply_port = NULL;
809
810 #if IMPORTANCE_INHERITANCE
811 thread->ith_assertions = 0;
812 #endif
813
814 thread->ipc_active = true;
815 ipc_kmsg_queue_init(&thread->ith_messages);
816
817 thread->ith_kernel_reply_port = IP_NULL;
818 }
819
820 void
ipc_main_thread_set_immovable_pinned(thread_t thread)821 ipc_main_thread_set_immovable_pinned(thread_t thread)
822 {
823 thread_ro_t tro = get_thread_ro(thread);
824 ipc_port_t kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
825 task_t task = tro->tro_task;
826 ipc_port_t new_pport;
827
828 assert(thread_get_tag(thread) & THREAD_TAG_MAINTHREAD);
829
830 /* pport is the same as kport at ipc_thread_init() time */
831 assert(tro->tro_self_port == tro->tro_ports[THREAD_FLAVOR_CONTROL]);
832 assert(tro->tro_self_port == tro->tro_settable_self_port);
833
834 /*
835 * Main thread port is immovable/pinned depending on whether owner task has
836 * immovable/pinned task control port. task_control_port_options is now set.
837 */
838 if (task_is_immovable(task)) {
839 ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
840
841 if (task_is_pinned(task)) {
842 options |= IPC_KOBJECT_ALLOC_PINNED;
843 }
844
845 new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_THREAD_CONTROL, options);
846
847 assert(kport != IP_NULL);
848 ipc_port_set_label(kport, IPC_LABEL_SUBST_THREAD);
849 kport->ip_kolabel->ikol_alt_port = new_pport;
850
851 thread_mtx_lock(thread);
852 zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_self_port, &new_pport);
853 thread_mtx_unlock(thread);
854
855 /* enable the pinned port */
856 ipc_kobject_enable(new_pport, thread, IKOT_THREAD_CONTROL);
857 }
858 }
859
860 struct thread_init_exc_actions {
861 struct exception_action array[EXC_TYPES_COUNT];
862 };
863
864 static void
ipc_thread_init_exc_actions(thread_ro_t tro)865 ipc_thread_init_exc_actions(thread_ro_t tro)
866 {
867 struct exception_action *actions;
868
869 actions = kalloc_type(struct thread_init_exc_actions,
870 Z_WAITOK | Z_ZERO | Z_NOFAIL)->array;
871
872 #if CONFIG_MACF
873 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
874 mac_exc_associate_action_label(&actions[i],
875 mac_exc_create_label(&actions[i]));
876 }
877 #endif
878
879 zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions, &actions);
880 }
881
882 static void
ipc_thread_destroy_exc_actions(thread_ro_t tro)883 ipc_thread_destroy_exc_actions(thread_ro_t tro)
884 {
885 struct exception_action *actions = tro->tro_exc_actions;
886
887 if (actions) {
888 #if CONFIG_MACF
889 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
890 mac_exc_free_action_label(actions + i);
891 }
892 #endif
893
894 zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions);
895 struct thread_init_exc_actions *tr_actions =
896 (struct thread_init_exc_actions *)actions;
897 kfree_type(struct thread_init_exc_actions, tr_actions);
898 }
899 }
900
901 static void
ipc_thread_ro_update_ports(thread_ro_t tro,const struct thread_ro * tro_tpl)902 ipc_thread_ro_update_ports(
903 thread_ro_t tro,
904 const struct thread_ro *tro_tpl)
905 {
906 vm_size_t offs = offsetof(struct thread_ro, tro_self_port);
907 vm_size_t size = sizeof(struct ipc_port *) * 2 + sizeof(tro_tpl->tro_ports);
908
909 static_assert(offsetof(struct thread_ro, tro_settable_self_port) ==
910 offsetof(struct thread_ro, tro_self_port) +
911 sizeof(struct ipc_port_t *));
912 static_assert(offsetof(struct thread_ro, tro_ports) ==
913 offsetof(struct thread_ro, tro_self_port) +
914 2 * sizeof(struct ipc_port_t *));
915 zalloc_ro_mut(ZONE_ID_THREAD_RO, tro,
916 offs, &tro_tpl->tro_self_port, size);
917 }
918
919 /*
920 * Routine: ipc_thread_disable
921 * Purpose:
922 * Clean up and destroy a thread's IPC state.
923 * Conditions:
924 * Thread locked.
925 */
926 void
ipc_thread_disable(thread_t thread)927 ipc_thread_disable(
928 thread_t thread)
929 {
930 thread_ro_t tro = get_thread_ro(thread);
931 ipc_port_t kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
932 ipc_port_t iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
933 ipc_port_t rdport = tro->tro_ports[THREAD_FLAVOR_READ];
934 ipc_port_t pport = tro->tro_self_port;
935
936 /*
937 * This innocuous looking line is load bearing.
938 *
939 * It is used to disable the creation of lazy made ports.
940 * We must do so before we drop the last reference on the thread,
941 * as thread ports do not own a reference on the thread, and
942 * convert_port_to_thread* will crash trying to resurect a thread.
943 */
944 thread->ipc_active = false;
945
946 if (kport != IP_NULL) {
947 /* clears ikol_alt_port */
948 ipc_kobject_disable(kport, IKOT_THREAD_CONTROL);
949 }
950
951 if (iport != IP_NULL) {
952 ipc_kobject_disable(iport, IKOT_THREAD_INSPECT);
953 }
954
955 if (rdport != IP_NULL) {
956 /* clears ikol_alt_port */
957 ipc_kobject_disable(rdport, IKOT_THREAD_READ);
958 }
959
960 if (pport != kport && pport != IP_NULL) {
961 assert(task_is_immovable(tro->tro_task));
962 assert(pport->ip_immovable_send);
963 ipc_kobject_disable(pport, IKOT_THREAD_CONTROL);
964 }
965
966 /* unbind the thread special reply port */
967 if (IP_VALID(thread->ith_special_reply_port)) {
968 ipc_port_unbind_special_reply_port(thread, IRPT_USER);
969 }
970 }
971
972 /*
973 * Routine: ipc_thread_terminate
974 * Purpose:
975 * Clean up and destroy a thread's IPC state.
976 * Conditions:
977 * Nothing locked.
978 */
979
980 void
ipc_thread_terminate(thread_t thread)981 ipc_thread_terminate(
982 thread_t thread)
983 {
984 thread_ro_t tro = get_thread_ro(thread);
985 ipc_port_t kport = IP_NULL;
986 ipc_port_t iport = IP_NULL;
987 ipc_port_t rdport = IP_NULL;
988 ipc_port_t pport = IP_NULL;
989 ipc_port_t sport = IP_NULL;
990
991 thread_mtx_lock(thread);
992
993 /*
994 * If we ever failed to clear ipc_active before the last reference
995 * was dropped, lazy ports might be made and used after the last
996 * reference is dropped and cause use after free (see comment in
997 * ipc_thread_disable()).
998 */
999 assert(!thread->ipc_active);
1000
1001 kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1002 iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1003 rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1004 pport = tro->tro_self_port;
1005 sport = tro->tro_settable_self_port;
1006
1007 if (kport != IP_NULL) {
1008 if (IP_VALID(sport)) {
1009 ipc_port_release_send(sport);
1010 }
1011
1012 ipc_thread_ro_update_ports(tro, &(struct thread_ro){ });
1013
1014 if (tro->tro_exc_actions != NULL) {
1015 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1016 if (IP_VALID(tro->tro_exc_actions[i].port)) {
1017 ipc_port_release_send(tro->tro_exc_actions[i].port);
1018 }
1019 }
1020 ipc_thread_destroy_exc_actions(tro);
1021 }
1022 }
1023
1024 #if IMPORTANCE_INHERITANCE
1025 assert(thread->ith_assertions == 0);
1026 #endif
1027
1028 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
1029 thread_mtx_unlock(thread);
1030
1031 /* clears read port ikol_alt_port, must be done first */
1032 if (rdport != IP_NULL) {
1033 ipc_kobject_dealloc_port(rdport, 0, IKOT_THREAD_READ);
1034 }
1035 /* control port can also have ikol_alt_port */
1036 if (kport != IP_NULL) {
1037 ipc_kobject_dealloc_port(kport, 0, IKOT_THREAD_CONTROL);
1038 }
1039 /* ikol_alt_port cleared */
1040
1041 if (iport != IP_NULL) {
1042 ipc_kobject_dealloc_port(iport, 0, IKOT_THREAD_INSPECT);
1043 }
1044 if (pport != kport && pport != IP_NULL) {
1045 assert(task_is_immovable(tro->tro_task));
1046 ipc_kobject_dealloc_port(pport, 0, IKOT_THREAD_CONTROL);
1047 }
1048 if (thread->ith_kernel_reply_port != IP_NULL) {
1049 thread_dealloc_kernel_special_reply_port(thread);
1050 }
1051 }
1052
1053 /*
1054 * Routine: ipc_thread_reset
1055 * Purpose:
1056 * Reset the IPC state for a given Mach thread when
1057 * its task enters an elevated security context.
1058 * All flavors of thread port and its exception ports have
1059 * to be reset. Its RPC reply port cannot have any
1060 * rights outstanding, so it should be fine. The thread
1061 * inspect and read port are set to NULL.
1062 * Conditions:
1063 * Nothing locked.
1064 */
1065
1066 void
ipc_thread_reset(thread_t thread)1067 ipc_thread_reset(
1068 thread_t thread)
1069 {
1070 thread_ro_t tro = get_thread_ro(thread);
1071 ipc_port_t old_kport, new_kport, old_pport, new_pport;
1072 ipc_port_t old_sself;
1073 ipc_port_t old_rdport;
1074 ipc_port_t old_iport;
1075 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
1076 boolean_t has_old_exc_actions = FALSE;
1077 boolean_t thread_is_immovable;
1078 int i;
1079
1080 #if CONFIG_MACF
1081 struct label *new_label = mac_exc_create_label(NULL);
1082 #endif
1083
1084 thread_is_immovable = ip_is_immovable_send(tro->tro_self_port);
1085
1086 new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
1087 IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
1088 /*
1089 * ipc_thread_reset() only happens during sugid or corpsify.
1090 *
1091 * (1) sugid happens early in exec_mach_imgact(), at which point the old thread
1092 * port is still movable/not pinned.
1093 * (2) corpse cannot execute more code so the notion of the immovable/pinned
1094 * thread port is bogus, and should appear as if it doesn't have one.
1095 *
1096 * So simply leave pport the same as kport.
1097 */
1098 new_pport = new_kport;
1099
1100 thread_mtx_lock(thread);
1101
1102 old_kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1103 old_rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1104 old_iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1105
1106 old_sself = tro->tro_settable_self_port;
1107 old_pport = tro->tro_self_port;
1108
1109 if (old_kport == IP_NULL && thread->inspection == FALSE) {
1110 /* thread is already terminated (can this happen?) */
1111 thread_mtx_unlock(thread);
1112 ipc_kobject_dealloc_port(new_kport, 0, IKOT_THREAD_CONTROL);
1113 if (thread_is_immovable) {
1114 ipc_kobject_dealloc_port(new_pport, 0,
1115 IKOT_THREAD_CONTROL);
1116 }
1117 #if CONFIG_MACF
1118 mac_exc_free_label(new_label);
1119 #endif
1120 return;
1121 }
1122
1123 thread->ipc_active = true;
1124
1125 struct thread_ro tpl = {
1126 .tro_self_port = new_pport,
1127 /* we just made the port, no need to triple check */
1128 .tro_settable_self_port = ipc_port_make_send_any(new_kport),
1129 .tro_ports[THREAD_FLAVOR_CONTROL] = new_kport,
1130 };
1131
1132 ipc_thread_ro_update_ports(tro, &tpl);
1133
1134 if (old_kport != IP_NULL) {
1135 /* clears ikol_alt_port */
1136 (void)ipc_kobject_disable(old_kport, IKOT_THREAD_CONTROL);
1137 }
1138 if (old_rdport != IP_NULL) {
1139 /* clears ikol_alt_port */
1140 (void)ipc_kobject_disable(old_rdport, IKOT_THREAD_READ);
1141 }
1142 if (old_iport != IP_NULL) {
1143 (void)ipc_kobject_disable(old_iport, IKOT_THREAD_INSPECT);
1144 }
1145 if (thread_is_immovable && old_pport != IP_NULL) {
1146 (void)ipc_kobject_disable(old_pport, IKOT_THREAD_CONTROL);
1147 }
1148
1149 /*
1150 * Only ports that were set by root-owned processes
1151 * (privileged ports) should survive
1152 */
1153 if (tro->tro_exc_actions != NULL) {
1154 has_old_exc_actions = TRUE;
1155 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1156 if (tro->tro_exc_actions[i].privileged) {
1157 old_exc_actions[i] = IP_NULL;
1158 } else {
1159 #if CONFIG_MACF
1160 mac_exc_update_action_label(tro->tro_exc_actions + i, new_label);
1161 #endif
1162 old_exc_actions[i] = tro->tro_exc_actions[i].port;
1163 tro->tro_exc_actions[i].port = IP_NULL;
1164 }
1165 }
1166 }
1167
1168 thread_mtx_unlock(thread);
1169
1170 #if CONFIG_MACF
1171 mac_exc_free_label(new_label);
1172 #endif
1173
1174 /* release the naked send rights */
1175
1176 if (IP_VALID(old_sself)) {
1177 ipc_port_release_send(old_sself);
1178 }
1179
1180 if (has_old_exc_actions) {
1181 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1182 ipc_port_release_send(old_exc_actions[i]);
1183 }
1184 }
1185
1186 /* destroy the kernel ports */
1187 if (old_rdport != IP_NULL) {
1188 ipc_kobject_dealloc_port(old_rdport, 0, IKOT_THREAD_READ);
1189 }
1190 if (old_kport != IP_NULL) {
1191 ipc_kobject_dealloc_port(old_kport, 0, IKOT_THREAD_CONTROL);
1192 }
1193 /* ikol_alt_port cleared */
1194
1195 if (old_iport != IP_NULL) {
1196 ipc_kobject_dealloc_port(old_iport, 0, IKOT_THREAD_INSPECT);
1197 }
1198 if (old_pport != old_kport && old_pport != IP_NULL) {
1199 assert(thread_is_immovable);
1200 ipc_kobject_dealloc_port(old_pport, 0, IKOT_THREAD_CONTROL);
1201 }
1202
1203 /* unbind the thread special reply port */
1204 if (IP_VALID(thread->ith_special_reply_port)) {
1205 ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1206 }
1207 }
1208
1209 /*
1210 * Routine: retrieve_task_self_fast
1211 * Purpose:
1212 * Optimized version of retrieve_task_self,
1213 * that only works for the current task.
1214 *
1215 * Return a send right (possibly null/dead)
1216 * for the task's user-visible self port.
1217 * Conditions:
1218 * Nothing locked.
1219 */
1220
1221 static ipc_port_t
retrieve_task_self_fast(task_t task)1222 retrieve_task_self_fast(
1223 task_t task)
1224 {
1225 ipc_port_t port = IP_NULL;
1226
1227 assert(task == current_task());
1228
1229 itk_lock(task);
1230 assert(task->itk_self != IP_NULL);
1231
1232 #if CONFIG_CSR
1233 if (task->itk_settable_self != task->itk_task_ports[TASK_FLAVOR_CONTROL]) {
1234 port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1235 } else
1236 #endif
1237 {
1238 /* no interposing, return the IMMOVABLE port */
1239 port = ipc_kobject_make_send(task->itk_self, task,
1240 IKOT_TASK_CONTROL);
1241 #if (DEBUG || DEVELOPMENT)
1242 if (task_is_immovable(task)) {
1243 assert(ip_is_immovable_send(port));
1244 if (task_is_pinned(task)) {
1245 /* pinned port is also immovable */
1246 assert(ip_is_pinned(port));
1247 }
1248 } else {
1249 assert(!ip_is_immovable_send(port));
1250 assert(!ip_is_pinned(port));
1251 }
1252 #endif
1253 }
1254
1255 itk_unlock(task);
1256
1257 return port;
1258 }
1259
1260 /*
1261 * Routine: mach_task_is_self
1262 * Purpose:
1263 * [MIG call] Checks if the task (control/read/inspect/name/movable)
1264 * port is pointing to current_task.
1265 */
1266 kern_return_t
mach_task_is_self(task_t task,boolean_t * is_self)1267 mach_task_is_self(
1268 task_t task,
1269 boolean_t *is_self)
1270 {
1271 if (task == TASK_NULL) {
1272 return KERN_INVALID_ARGUMENT;
1273 }
1274
1275 *is_self = (task == current_task());
1276
1277 return KERN_SUCCESS;
1278 }
1279
1280 /*
1281 * Routine: retrieve_thread_self_fast
1282 * Purpose:
1283 * Return a send right (possibly null/dead)
1284 * for the thread's user-visible self port.
1285 *
1286 * Only works for the current thread.
1287 *
1288 * Conditions:
1289 * Nothing locked.
1290 */
1291
1292 ipc_port_t
retrieve_thread_self_fast(thread_t thread)1293 retrieve_thread_self_fast(
1294 thread_t thread)
1295 {
1296 thread_ro_t tro = get_thread_ro(thread);
1297 ipc_port_t port = IP_NULL;
1298
1299 assert(thread == current_thread());
1300
1301 thread_mtx_lock(thread);
1302
1303 assert(tro->tro_self_port != IP_NULL);
1304
1305 #if CONFIG_CSR
1306 if (tro->tro_settable_self_port != tro->tro_ports[THREAD_FLAVOR_CONTROL]) {
1307 port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1308 } else
1309 #endif
1310 {
1311 /* no interposing, return IMMOVABLE_PORT */
1312 port = ipc_kobject_make_send(tro->tro_self_port, thread,
1313 IKOT_THREAD_CONTROL);
1314 #if (DEBUG || DEVELOPMENT)
1315 if (task_is_immovable(tro->tro_task)) {
1316 assert(ip_is_immovable_send(port));
1317 uint16_t tag = thread_get_tag(thread);
1318 /* terminated threads are unpinned */
1319 if (thread->active && (tag & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD))) {
1320 assert(ip_is_pinned(port));
1321 } else {
1322 assert(!ip_is_pinned(port));
1323 }
1324 } else {
1325 assert(!ip_is_immovable_send(port));
1326 assert(!ip_is_pinned(port));
1327 }
1328 #endif
1329 }
1330
1331 thread_mtx_unlock(thread);
1332
1333 return port;
1334 }
1335
1336 /*
1337 * Routine: task_self_trap [mach trap]
1338 * Purpose:
1339 * Give the caller send rights for his own task port.
1340 * Conditions:
1341 * Nothing locked.
1342 * Returns:
1343 * MACH_PORT_NULL if there are any resource failures
1344 * or other errors.
1345 */
1346
1347 mach_port_name_t
task_self_trap(__unused struct task_self_trap_args * args)1348 task_self_trap(
1349 __unused struct task_self_trap_args *args)
1350 {
1351 task_t task = current_task();
1352 ipc_port_t sright;
1353 mach_port_name_t name;
1354
1355 sright = retrieve_task_self_fast(task);
1356 name = ipc_port_copyout_send(sright, task->itk_space);
1357
1358 /*
1359 * When the right is pinned, memorize the name we gave it
1360 * in ip_receiver_name (it's an abuse as this port really
1361 * isn't a message queue, but the field is up for grabs
1362 * and otherwise `MACH_PORT_SPECIAL_DEFAULT` for special ports).
1363 *
1364 * port_name_to_task* use this to fastpath IPCs to mach_task_self()
1365 * when it is pinned.
1366 *
1367 * ipc_task_disable() will revert this when the task dies.
1368 */
1369 if (sright == task->itk_self && sright->ip_pinned &&
1370 MACH_PORT_VALID(name)) {
1371 itk_lock(task);
1372 if (task->ipc_active) {
1373 if (ip_get_receiver_name(sright) == MACH_PORT_SPECIAL_DEFAULT) {
1374 sright->ip_receiver_name = name;
1375 } else if (ip_get_receiver_name(sright) != name) {
1376 panic("mach_task_self() name changed");
1377 }
1378 }
1379 itk_unlock(task);
1380 }
1381 return name;
1382 }
1383
1384 /*
1385 * Routine: thread_self_trap [mach trap]
1386 * Purpose:
1387 * Give the caller send rights for his own thread port.
1388 * Conditions:
1389 * Nothing locked.
1390 * Returns:
1391 * MACH_PORT_NULL if there are any resource failures
1392 * or other errors.
1393 */
1394
1395 mach_port_name_t
thread_self_trap(__unused struct thread_self_trap_args * args)1396 thread_self_trap(
1397 __unused struct thread_self_trap_args *args)
1398 {
1399 thread_t thread = current_thread();
1400 ipc_space_t space = current_space();
1401 ipc_port_t sright;
1402 mach_port_name_t name;
1403
1404 sright = retrieve_thread_self_fast(thread);
1405 name = ipc_port_copyout_send(sright, space);
1406 return name;
1407 }
1408
1409 /*
1410 * Routine: mach_reply_port [mach trap]
1411 * Purpose:
1412 * Allocate a port for the caller.
1413 * Conditions:
1414 * Nothing locked.
1415 * Returns:
1416 * MACH_PORT_NULL if there are any resource failures
1417 * or other errors.
1418 */
1419
1420 mach_port_name_t
mach_reply_port(__unused struct mach_reply_port_args * args)1421 mach_reply_port(
1422 __unused struct mach_reply_port_args *args)
1423 {
1424 ipc_port_t port;
1425 mach_port_name_t name;
1426 kern_return_t kr;
1427
1428 kr = ipc_port_alloc(current_task()->itk_space, IPC_PORT_INIT_MESSAGE_QUEUE,
1429 &name, &port);
1430 if (kr == KERN_SUCCESS) {
1431 ip_mq_unlock(port);
1432 } else {
1433 name = MACH_PORT_NULL;
1434 }
1435 return name;
1436 }
1437
1438 /*
1439 * Routine: thread_get_special_reply_port [mach trap]
1440 * Purpose:
1441 * Allocate a special reply port for the calling thread.
1442 * Conditions:
1443 * Nothing locked.
1444 * Returns:
1445 * mach_port_name_t: send right & receive right for special reply port.
1446 * MACH_PORT_NULL if there are any resource failures
1447 * or other errors.
1448 */
1449
1450 mach_port_name_t
thread_get_special_reply_port(__unused struct thread_get_special_reply_port_args * args)1451 thread_get_special_reply_port(
1452 __unused struct thread_get_special_reply_port_args *args)
1453 {
1454 ipc_port_t port;
1455 mach_port_name_t name;
1456 kern_return_t kr;
1457 thread_t thread = current_thread();
1458 ipc_port_init_flags_t flags = IPC_PORT_INIT_MESSAGE_QUEUE |
1459 IPC_PORT_INIT_MAKE_SEND_RIGHT | IPC_PORT_INIT_SPECIAL_REPLY;
1460
1461 /* unbind the thread special reply port */
1462 if (IP_VALID(thread->ith_special_reply_port)) {
1463 ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1464 }
1465
1466 kr = ipc_port_alloc(current_task()->itk_space, flags, &name, &port);
1467 if (kr == KERN_SUCCESS) {
1468 ipc_port_bind_special_reply_port_locked(port, IRPT_USER);
1469 ip_mq_unlock(port);
1470 } else {
1471 name = MACH_PORT_NULL;
1472 }
1473 return name;
1474 }
1475
1476 /*
1477 * Routine: thread_get_kernel_special_reply_port
1478 * Purpose:
1479 * Allocate a kernel special reply port for the calling thread.
1480 * Conditions:
1481 * Nothing locked.
1482 * Returns:
1483 * Creates and sets kernel special reply port.
1484 * KERN_SUCCESS on Success.
1485 * KERN_FAILURE on Failure.
1486 */
1487
1488 kern_return_t
thread_get_kernel_special_reply_port(void)1489 thread_get_kernel_special_reply_port(void)
1490 {
1491 ipc_port_t port = IPC_PORT_NULL;
1492 thread_t thread = current_thread();
1493
1494 /* unbind the thread special reply port */
1495 if (IP_VALID(thread->ith_kernel_reply_port)) {
1496 ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1497 }
1498
1499 port = ipc_port_alloc_reply(); /*returns a reference on the port */
1500 if (port != IPC_PORT_NULL) {
1501 ip_mq_lock(port);
1502 ipc_port_bind_special_reply_port_locked(port, IRPT_KERNEL);
1503 ip_mq_unlock(port);
1504 ip_release(port); /* release the reference returned by ipc_port_alloc_reply */
1505 }
1506 return KERN_SUCCESS;
1507 }
1508
1509 /*
1510 * Routine: ipc_port_bind_special_reply_port_locked
1511 * Purpose:
1512 * Bind the given port to current thread as a special reply port.
1513 * Conditions:
1514 * Port locked.
1515 * Returns:
1516 * None.
1517 */
1518
1519 static void
ipc_port_bind_special_reply_port_locked(ipc_port_t port,ipc_reply_port_type_t reply_type)1520 ipc_port_bind_special_reply_port_locked(
1521 ipc_port_t port,
1522 ipc_reply_port_type_t reply_type)
1523 {
1524 thread_t thread = current_thread();
1525 ipc_port_t *reply_portp;
1526
1527 if (reply_type == IRPT_USER) {
1528 reply_portp = &thread->ith_special_reply_port;
1529 } else {
1530 reply_portp = &thread->ith_kernel_reply_port;
1531 }
1532
1533 assert(*reply_portp == NULL);
1534 assert(port->ip_specialreply);
1535 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1536
1537 ip_reference(port);
1538 *reply_portp = port;
1539 port->ip_messages.imq_srp_owner_thread = thread;
1540
1541 ipc_special_reply_port_bits_reset(port);
1542 }
1543
1544 /*
1545 * Routine: ipc_port_unbind_special_reply_port
1546 * Purpose:
1547 * Unbind the thread's special reply port.
1548 * If the special port has threads waiting on turnstile,
1549 * update it's inheritor.
1550 * Condition:
1551 * Nothing locked.
1552 * Returns:
1553 * None.
1554 */
1555 static void
ipc_port_unbind_special_reply_port(thread_t thread,ipc_reply_port_type_t reply_type)1556 ipc_port_unbind_special_reply_port(
1557 thread_t thread,
1558 ipc_reply_port_type_t reply_type)
1559 {
1560 ipc_port_t *reply_portp;
1561
1562 if (reply_type == IRPT_USER) {
1563 reply_portp = &thread->ith_special_reply_port;
1564 } else {
1565 reply_portp = &thread->ith_kernel_reply_port;
1566 }
1567
1568 ipc_port_t special_reply_port = *reply_portp;
1569
1570 ip_mq_lock(special_reply_port);
1571
1572 *reply_portp = NULL;
1573 ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
1574 IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
1575 /* port unlocked */
1576
1577 /* Destroy the port if its kernel special reply, else just release a ref */
1578 if (reply_type == IRPT_USER) {
1579 ip_release(special_reply_port);
1580 } else {
1581 ipc_port_dealloc_reply(special_reply_port);
1582 }
1583 return;
1584 }
1585
1586 /*
1587 * Routine: thread_dealloc_kernel_special_reply_port
1588 * Purpose:
1589 * Unbind the thread's kernel special reply port.
1590 * If the special port has threads waiting on turnstile,
1591 * update it's inheritor.
1592 * Condition:
1593 * Called on current thread or a terminated thread.
1594 * Returns:
1595 * None.
1596 */
1597
1598 void
thread_dealloc_kernel_special_reply_port(thread_t thread)1599 thread_dealloc_kernel_special_reply_port(thread_t thread)
1600 {
1601 ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1602 }
1603
1604 /*
1605 * Routine: thread_get_special_port [kernel call]
1606 * Purpose:
1607 * Clones a send right for one of the thread's
1608 * special ports.
1609 * Conditions:
1610 * Nothing locked.
1611 * Returns:
1612 * KERN_SUCCESS Extracted a send right.
1613 * KERN_INVALID_ARGUMENT The thread is null.
1614 * KERN_FAILURE The thread is dead.
1615 * KERN_INVALID_ARGUMENT Invalid special port.
1616 */
1617
1618 kern_return_t
1619 thread_get_special_port(
1620 thread_inspect_t thread,
1621 int which,
1622 ipc_port_t *portp);
1623
1624 static kern_return_t
thread_get_special_port_internal(thread_inspect_t thread,thread_ro_t tro,int which,ipc_port_t * portp,mach_thread_flavor_t flavor)1625 thread_get_special_port_internal(
1626 thread_inspect_t thread,
1627 thread_ro_t tro,
1628 int which,
1629 ipc_port_t *portp,
1630 mach_thread_flavor_t flavor)
1631 {
1632 kern_return_t kr;
1633 ipc_port_t port;
1634
1635 if ((kr = special_port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) {
1636 return kr;
1637 }
1638
1639 thread_mtx_lock(thread);
1640 if (!thread->active) {
1641 thread_mtx_unlock(thread);
1642 return KERN_FAILURE;
1643 }
1644
1645 switch (which) {
1646 case THREAD_KERNEL_PORT:
1647 port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1648 #if CONFIG_CSR
1649 if (tro->tro_settable_self_port != port) {
1650 port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1651 } else
1652 #endif
1653 {
1654 port = ipc_kobject_copy_send(port, thread, IKOT_THREAD_CONTROL);
1655 }
1656 thread_mtx_unlock(thread);
1657 break;
1658
1659 case THREAD_READ_PORT:
1660 case THREAD_INSPECT_PORT:
1661 thread_mtx_unlock(thread);
1662 mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ?
1663 THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT;
1664 /* convert_thread_to_port_with_flavor consumes a thread reference */
1665 thread_reference(thread);
1666 port = convert_thread_to_port_with_flavor(thread, tro, current_flavor);
1667 break;
1668
1669 default:
1670 thread_mtx_unlock(thread);
1671 return KERN_INVALID_ARGUMENT;
1672 }
1673
1674 *portp = port;
1675 return KERN_SUCCESS;
1676 }
1677
1678 kern_return_t
thread_get_special_port(thread_inspect_t thread,int which,ipc_port_t * portp)1679 thread_get_special_port(
1680 thread_inspect_t thread,
1681 int which,
1682 ipc_port_t *portp)
1683 {
1684 if (thread == THREAD_NULL) {
1685 return KERN_INVALID_ARGUMENT;
1686 }
1687
1688 return thread_get_special_port_internal(thread, get_thread_ro(thread),
1689 which, portp, THREAD_FLAVOR_CONTROL);
1690 }
1691
1692 static ipc_port_t
thread_get_non_substituted_self(thread_t thread,thread_ro_t tro)1693 thread_get_non_substituted_self(thread_t thread, thread_ro_t tro)
1694 {
1695 ipc_port_t port = IP_NULL;
1696
1697 thread_mtx_lock(thread);
1698 port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1699 #if CONFIG_CSR
1700 if (tro->tro_settable_self_port != port) {
1701 port = ipc_port_make_send_mqueue(tro->tro_settable_self_port);
1702 } else
1703 #endif
1704 {
1705 port = ipc_kobject_make_send(port, thread, IKOT_THREAD_CONTROL);
1706 }
1707 thread_mtx_unlock(thread);
1708
1709 /* takes ownership of the send right */
1710 return ipc_kobject_alloc_subst_once(port);
1711 }
1712
1713 kern_return_t
thread_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)1714 thread_get_special_port_from_user(
1715 mach_port_t port,
1716 int which,
1717 ipc_port_t *portp)
1718 {
1719 thread_ro_t tro;
1720 ipc_kobject_type_t kotype;
1721 mach_thread_flavor_t flavor;
1722 kern_return_t kr = KERN_SUCCESS;
1723
1724 thread_t thread = convert_port_to_thread_inspect_no_eval(port);
1725
1726 if (thread == THREAD_NULL) {
1727 return KERN_INVALID_ARGUMENT;
1728 }
1729
1730 tro = get_thread_ro(thread);
1731 kotype = ip_kotype(port);
1732
1733 if (which == THREAD_KERNEL_PORT && tro->tro_task == current_task()) {
1734 #if CONFIG_MACF
1735 /*
1736 * only check for threads belong to current_task,
1737 * because foreign thread ports are always movable
1738 */
1739 if (mac_task_check_get_movable_control_port()) {
1740 kr = KERN_DENIED;
1741 goto out;
1742 }
1743 #endif
1744 if (kotype == IKOT_THREAD_CONTROL) {
1745 *portp = thread_get_non_substituted_self(thread, tro);
1746 goto out;
1747 }
1748 }
1749
1750 switch (kotype) {
1751 case IKOT_THREAD_CONTROL:
1752 flavor = THREAD_FLAVOR_CONTROL;
1753 break;
1754 case IKOT_THREAD_READ:
1755 flavor = THREAD_FLAVOR_READ;
1756 break;
1757 case IKOT_THREAD_INSPECT:
1758 flavor = THREAD_FLAVOR_INSPECT;
1759 break;
1760 default:
1761 panic("strange kobject type");
1762 }
1763
1764 kr = thread_get_special_port_internal(thread, tro, which, portp, flavor);
1765 out:
1766 thread_deallocate(thread);
1767 return kr;
1768 }
1769
1770 static kern_return_t
special_port_allowed_with_thread_flavor(int which,mach_thread_flavor_t flavor)1771 special_port_allowed_with_thread_flavor(
1772 int which,
1773 mach_thread_flavor_t flavor)
1774 {
1775 switch (flavor) {
1776 case THREAD_FLAVOR_CONTROL:
1777 return KERN_SUCCESS;
1778
1779 case THREAD_FLAVOR_READ:
1780
1781 switch (which) {
1782 case THREAD_READ_PORT:
1783 case THREAD_INSPECT_PORT:
1784 return KERN_SUCCESS;
1785 default:
1786 return KERN_INVALID_CAPABILITY;
1787 }
1788
1789 case THREAD_FLAVOR_INSPECT:
1790
1791 switch (which) {
1792 case THREAD_INSPECT_PORT:
1793 return KERN_SUCCESS;
1794 default:
1795 return KERN_INVALID_CAPABILITY;
1796 }
1797
1798 default:
1799 return KERN_INVALID_CAPABILITY;
1800 }
1801 }
1802
1803 /*
1804 * Routine: thread_set_special_port [kernel call]
1805 * Purpose:
1806 * Changes one of the thread's special ports,
1807 * setting it to the supplied send right.
1808 * Conditions:
1809 * Nothing locked. If successful, consumes
1810 * the supplied send right.
1811 * Returns:
1812 * KERN_SUCCESS Changed the special port.
1813 * KERN_INVALID_ARGUMENT The thread is null.
1814 * KERN_INVALID_RIGHT Port is marked as immovable.
1815 * KERN_FAILURE The thread is dead.
1816 * KERN_INVALID_ARGUMENT Invalid special port.
1817 * KERN_NO_ACCESS Restricted access to set port.
1818 */
1819
1820 kern_return_t
thread_set_special_port(thread_t thread,int which,ipc_port_t port)1821 thread_set_special_port(
1822 thread_t thread,
1823 int which,
1824 ipc_port_t port)
1825 {
1826 kern_return_t result = KERN_SUCCESS;
1827 thread_ro_t tro = NULL;
1828 ipc_port_t old = IP_NULL;
1829
1830 if (thread == THREAD_NULL) {
1831 return KERN_INVALID_ARGUMENT;
1832 }
1833
1834 if (IP_VALID(port) && port->ip_immovable_send) {
1835 return KERN_INVALID_RIGHT;
1836 }
1837
1838 switch (which) {
1839 case THREAD_KERNEL_PORT:
1840 #if CONFIG_CSR
1841 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
1842 /*
1843 * Only allow setting of thread-self
1844 * special port from user-space when SIP is
1845 * disabled (for Mach-on-Mach emulation).
1846 */
1847 tro = get_thread_ro(thread);
1848
1849 thread_mtx_lock(thread);
1850 if (thread->active) {
1851 old = tro->tro_settable_self_port;
1852 zalloc_ro_update_field(ZONE_ID_THREAD_RO,
1853 tro, tro_settable_self_port, &port);
1854 } else {
1855 result = KERN_FAILURE;
1856 }
1857 thread_mtx_unlock(thread);
1858
1859 if (IP_VALID(old)) {
1860 ipc_port_release_send(old);
1861 }
1862
1863 return result;
1864 }
1865 #else
1866 (void)old;
1867 (void)result;
1868 (void)tro;
1869 #endif
1870 return KERN_NO_ACCESS;
1871
1872 default:
1873 return KERN_INVALID_ARGUMENT;
1874 }
1875 }
1876
1877 /*
1878 * Routine: task_get_special_port [kernel call]
1879 * Purpose:
1880 * Clones a send right for one of the task's
1881 * special ports.
1882 * Conditions:
1883 * Nothing locked.
1884 * Returns:
1885 * KERN_SUCCESS Extracted a send right.
1886 * KERN_INVALID_ARGUMENT The task is null.
1887 * KERN_FAILURE The task/space is dead.
1888 * KERN_INVALID_ARGUMENT Invalid special port.
1889 */
1890
1891 static kern_return_t
task_get_special_port_internal(task_t task,int which,ipc_port_t * portp,mach_task_flavor_t flavor)1892 task_get_special_port_internal(
1893 task_t task,
1894 int which,
1895 ipc_port_t *portp,
1896 mach_task_flavor_t flavor)
1897 {
1898 kern_return_t kr;
1899 ipc_port_t port;
1900
1901 if (task == TASK_NULL) {
1902 return KERN_INVALID_ARGUMENT;
1903 }
1904
1905 if ((kr = special_port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) {
1906 return kr;
1907 }
1908
1909 itk_lock(task);
1910 if (!task->ipc_active) {
1911 itk_unlock(task);
1912 return KERN_FAILURE;
1913 }
1914
1915 switch (which) {
1916 case TASK_KERNEL_PORT:
1917 port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
1918 #if CONFIG_CSR
1919 if (task->itk_settable_self != port) {
1920 port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1921 } else
1922 #endif
1923 {
1924 port = ipc_kobject_copy_send(port, task, IKOT_TASK_CONTROL);
1925 }
1926 itk_unlock(task);
1927 break;
1928
1929 case TASK_READ_PORT:
1930 case TASK_INSPECT_PORT:
1931 itk_unlock(task);
1932 mach_task_flavor_t current_flavor = (which == TASK_READ_PORT) ?
1933 TASK_FLAVOR_READ : TASK_FLAVOR_INSPECT;
1934 /* convert_task_to_port_with_flavor consumes a task reference */
1935 task_reference(task);
1936 port = convert_task_to_port_with_flavor(task, current_flavor, TASK_GRP_KERNEL);
1937 break;
1938
1939 case TASK_NAME_PORT:
1940 port = ipc_kobject_make_send(task->itk_task_ports[TASK_FLAVOR_NAME],
1941 task, IKOT_TASK_NAME);
1942 itk_unlock(task);
1943 break;
1944
1945 case TASK_HOST_PORT:
1946 port = host_port_copy_send(task->itk_host);
1947 itk_unlock(task);
1948 break;
1949
1950 case TASK_BOOTSTRAP_PORT:
1951 port = ipc_port_copy_send_mqueue(task->itk_bootstrap);
1952 itk_unlock(task);
1953 break;
1954
1955 case TASK_ACCESS_PORT:
1956 port = ipc_port_copy_send_mqueue(task->itk_task_access);
1957 itk_unlock(task);
1958 break;
1959
1960 case TASK_DEBUG_CONTROL_PORT:
1961 port = ipc_port_copy_send_mqueue(task->itk_debug_control);
1962 itk_unlock(task);
1963 break;
1964
1965 #if CONFIG_PROC_RESOURCE_LIMITS
1966 case TASK_RESOURCE_NOTIFY_PORT:
1967 port = ipc_port_copy_send_mqueue(task->itk_resource_notify);
1968 itk_unlock(task);
1969 break;
1970 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1971
1972 default:
1973 itk_unlock(task);
1974 return KERN_INVALID_ARGUMENT;
1975 }
1976
1977 *portp = port;
1978 return KERN_SUCCESS;
1979 }
1980
1981 /* Kernel/Kext call only and skips MACF checks. MIG uses task_get_special_port_from_user(). */
1982 kern_return_t
task_get_special_port(task_t task,int which,ipc_port_t * portp)1983 task_get_special_port(
1984 task_t task,
1985 int which,
1986 ipc_port_t *portp)
1987 {
1988 return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL);
1989 }
1990
1991 static ipc_port_t
task_get_non_substituted_self(task_t task)1992 task_get_non_substituted_self(task_t task)
1993 {
1994 ipc_port_t port = IP_NULL;
1995
1996 itk_lock(task);
1997 port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
1998 #if CONFIG_CSR
1999 if (task->itk_settable_self != port) {
2000 port = ipc_port_make_send_mqueue(task->itk_settable_self);
2001 } else
2002 #endif
2003 {
2004 port = ipc_kobject_make_send(port, task, IKOT_TASK_CONTROL);
2005 }
2006 itk_unlock(task);
2007
2008 /* takes ownership of the send right */
2009 return ipc_kobject_alloc_subst_once(port);
2010 }
2011
2012 /* MIG call only. Kernel/Kext uses task_get_special_port() */
2013 kern_return_t
task_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)2014 task_get_special_port_from_user(
2015 mach_port_t port,
2016 int which,
2017 ipc_port_t *portp)
2018 {
2019 ipc_kobject_type_t kotype;
2020 mach_task_flavor_t flavor;
2021 kern_return_t kr = KERN_SUCCESS;
2022
2023 task_t task = convert_port_to_task_inspect_no_eval(port);
2024
2025 if (task == TASK_NULL) {
2026 return KERN_INVALID_ARGUMENT;
2027 }
2028
2029 kotype = ip_kotype(port);
2030
2031 #if CONFIG_MACF
2032 if (mac_task_check_get_task_special_port(current_task(), task, which)) {
2033 kr = KERN_DENIED;
2034 goto out;
2035 }
2036 #endif
2037
2038 if (which == TASK_KERNEL_PORT && task == current_task()) {
2039 #if CONFIG_MACF
2040 /*
2041 * only check for current_task,
2042 * because foreign task ports are always movable
2043 */
2044 if (mac_task_check_get_movable_control_port()) {
2045 kr = KERN_DENIED;
2046 goto out;
2047 }
2048 #endif
2049 if (kotype == IKOT_TASK_CONTROL) {
2050 *portp = task_get_non_substituted_self(task);
2051 goto out;
2052 }
2053 }
2054
2055 switch (kotype) {
2056 case IKOT_TASK_CONTROL:
2057 flavor = TASK_FLAVOR_CONTROL;
2058 break;
2059 case IKOT_TASK_READ:
2060 flavor = TASK_FLAVOR_READ;
2061 break;
2062 case IKOT_TASK_INSPECT:
2063 flavor = TASK_FLAVOR_INSPECT;
2064 break;
2065 default:
2066 panic("strange kobject type");
2067 }
2068
2069 kr = task_get_special_port_internal(task, which, portp, flavor);
2070 out:
2071 task_deallocate(task);
2072 return kr;
2073 }
2074
2075 static kern_return_t
special_port_allowed_with_task_flavor(int which,mach_task_flavor_t flavor)2076 special_port_allowed_with_task_flavor(
2077 int which,
2078 mach_task_flavor_t flavor)
2079 {
2080 switch (flavor) {
2081 case TASK_FLAVOR_CONTROL:
2082 return KERN_SUCCESS;
2083
2084 case TASK_FLAVOR_READ:
2085
2086 switch (which) {
2087 case TASK_READ_PORT:
2088 case TASK_INSPECT_PORT:
2089 case TASK_NAME_PORT:
2090 return KERN_SUCCESS;
2091 default:
2092 return KERN_INVALID_CAPABILITY;
2093 }
2094
2095 case TASK_FLAVOR_INSPECT:
2096
2097 switch (which) {
2098 case TASK_INSPECT_PORT:
2099 case TASK_NAME_PORT:
2100 return KERN_SUCCESS;
2101 default:
2102 return KERN_INVALID_CAPABILITY;
2103 }
2104
2105 default:
2106 return KERN_INVALID_CAPABILITY;
2107 }
2108 }
2109
2110 /*
2111 * Routine: task_set_special_port [MIG call]
2112 * Purpose:
2113 * Changes one of the task's special ports,
2114 * setting it to the supplied send right.
2115 * Conditions:
2116 * Nothing locked. If successful, consumes
2117 * the supplied send right.
2118 * Returns:
2119 * KERN_SUCCESS Changed the special port.
2120 * KERN_INVALID_ARGUMENT The task is null.
2121 * KERN_INVALID_RIGHT Port is marked as immovable.
2122 * KERN_FAILURE The task/space is dead.
2123 * KERN_INVALID_ARGUMENT Invalid special port.
2124 * KERN_NO_ACCESS Restricted access to set port.
2125 */
2126
2127 kern_return_t
task_set_special_port_from_user(task_t task,int which,ipc_port_t port)2128 task_set_special_port_from_user(
2129 task_t task,
2130 int which,
2131 ipc_port_t port)
2132 {
2133 if (task == TASK_NULL) {
2134 return KERN_INVALID_ARGUMENT;
2135 }
2136
2137 #if CONFIG_MACF
2138 if (mac_task_check_set_task_special_port(current_task(), task, which, port)) {
2139 return KERN_DENIED;
2140 }
2141 #endif
2142
2143 return task_set_special_port(task, which, port);
2144 }
2145
2146 /* Kernel call only. MIG uses task_set_special_port_from_user() */
2147 kern_return_t
task_set_special_port(task_t task,int which,ipc_port_t port)2148 task_set_special_port(
2149 task_t task,
2150 int which,
2151 ipc_port_t port)
2152 {
2153 if (task == TASK_NULL) {
2154 return KERN_INVALID_ARGUMENT;
2155 }
2156
2157 if (task_is_driver(current_task())) {
2158 return KERN_NO_ACCESS;
2159 }
2160
2161 if (IP_VALID(port) && port->ip_immovable_send) {
2162 return KERN_INVALID_RIGHT;
2163 }
2164
2165 switch (which) {
2166 case TASK_KERNEL_PORT:
2167 case TASK_HOST_PORT:
2168 #if CONFIG_CSR
2169 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
2170 /*
2171 * Only allow setting of task-self / task-host
2172 * special ports from user-space when SIP is
2173 * disabled (for Mach-on-Mach emulation).
2174 */
2175 break;
2176 }
2177 #endif
2178 return KERN_NO_ACCESS;
2179 default:
2180 break;
2181 }
2182
2183 return task_set_special_port_internal(task, which, port);
2184 }
2185
2186 /*
2187 * Routine: task_set_special_port_internal
2188 * Purpose:
2189 * Changes one of the task's special ports,
2190 * setting it to the supplied send right.
2191 * Conditions:
2192 * Nothing locked. If successful, consumes
2193 * the supplied send right.
2194 * Returns:
2195 * KERN_SUCCESS Changed the special port.
2196 * KERN_INVALID_ARGUMENT The task is null.
2197 * KERN_FAILURE The task/space is dead.
2198 * KERN_INVALID_ARGUMENT Invalid special port.
2199 * KERN_NO_ACCESS Restricted access to overwrite port.
2200 */
2201
2202 kern_return_t
task_set_special_port_internal(task_t task,int which,ipc_port_t port)2203 task_set_special_port_internal(
2204 task_t task,
2205 int which,
2206 ipc_port_t port)
2207 {
2208 ipc_port_t old = IP_NULL;
2209 kern_return_t rc = KERN_INVALID_ARGUMENT;
2210
2211 if (task == TASK_NULL) {
2212 goto out;
2213 }
2214
2215 itk_lock(task);
2216 /*
2217 * Allow setting special port during the span of ipc_task_init() to
2218 * ipc_task_terminate(). posix_spawn() port actions can set special
2219 * ports on target task _before_ task IPC access is enabled.
2220 */
2221 if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
2222 rc = KERN_FAILURE;
2223 goto out_unlock;
2224 }
2225
2226 switch (which) {
2227 case TASK_KERNEL_PORT:
2228 old = task->itk_settable_self;
2229 task->itk_settable_self = port;
2230 break;
2231
2232 case TASK_HOST_PORT:
2233 old = task->itk_host;
2234 task->itk_host = port;
2235 break;
2236
2237 case TASK_BOOTSTRAP_PORT:
2238 old = task->itk_bootstrap;
2239 task->itk_bootstrap = port;
2240 break;
2241
2242 /* Never allow overwrite of the task access port */
2243 case TASK_ACCESS_PORT:
2244 if (IP_VALID(task->itk_task_access)) {
2245 rc = KERN_NO_ACCESS;
2246 goto out_unlock;
2247 }
2248 task->itk_task_access = port;
2249 break;
2250
2251 case TASK_DEBUG_CONTROL_PORT:
2252 old = task->itk_debug_control;
2253 task->itk_debug_control = port;
2254 break;
2255
2256 #if CONFIG_PROC_RESOURCE_LIMITS
2257 case TASK_RESOURCE_NOTIFY_PORT:
2258 old = task->itk_resource_notify;
2259 task->itk_resource_notify = port;
2260 break;
2261 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
2262
2263 default:
2264 rc = KERN_INVALID_ARGUMENT;
2265 goto out_unlock;
2266 }/* switch */
2267
2268 rc = KERN_SUCCESS;
2269
2270 out_unlock:
2271 itk_unlock(task);
2272
2273 if (IP_VALID(old)) {
2274 ipc_port_release_send(old);
2275 }
2276 out:
2277 return rc;
2278 }
2279 /*
2280 * Routine: mach_ports_register [kernel call]
2281 * Purpose:
2282 * Stash a handful of port send rights in the task.
2283 * Child tasks will inherit these rights, but they
2284 * must use mach_ports_lookup to acquire them.
2285 *
2286 * The rights are supplied in a (wired) kalloc'd segment.
2287 * Rights which aren't supplied are assumed to be null.
2288 * Conditions:
2289 * Nothing locked. If successful, consumes
2290 * the supplied rights and memory.
2291 * Returns:
2292 * KERN_SUCCESS Stashed the port rights.
2293 * KERN_INVALID_RIGHT Port in array is marked immovable.
2294 * KERN_INVALID_ARGUMENT The task is null.
2295 * KERN_INVALID_ARGUMENT The task is dead.
2296 * KERN_INVALID_ARGUMENT The memory param is null.
2297 * KERN_INVALID_ARGUMENT Too many port rights supplied.
2298 */
2299
2300 kern_return_t
mach_ports_register(task_t task,mach_port_array_t memory,mach_msg_type_number_t portsCnt)2301 mach_ports_register(
2302 task_t task,
2303 mach_port_array_t memory,
2304 mach_msg_type_number_t portsCnt)
2305 {
2306 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
2307 unsigned int i;
2308
2309 if ((task == TASK_NULL) ||
2310 (portsCnt > TASK_PORT_REGISTER_MAX) ||
2311 (portsCnt && memory == NULL)) {
2312 return KERN_INVALID_ARGUMENT;
2313 }
2314
2315 /*
2316 * Pad the port rights with nulls.
2317 */
2318
2319 for (i = 0; i < portsCnt; i++) {
2320 ports[i] = memory[i];
2321 if (IP_VALID(ports[i]) && ports[i]->ip_immovable_send) {
2322 return KERN_INVALID_RIGHT;
2323 }
2324 }
2325 for (; i < TASK_PORT_REGISTER_MAX; i++) {
2326 ports[i] = IP_NULL;
2327 }
2328
2329 itk_lock(task);
2330 if (!task->ipc_active) {
2331 itk_unlock(task);
2332 return KERN_INVALID_ARGUMENT;
2333 }
2334
2335 /*
2336 * Replace the old send rights with the new.
2337 * Release the old rights after unlocking.
2338 */
2339
2340 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2341 ipc_port_t old;
2342
2343 old = task->itk_registered[i];
2344 task->itk_registered[i] = ports[i];
2345 ports[i] = old;
2346 }
2347
2348 itk_unlock(task);
2349
2350 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2351 if (IP_VALID(ports[i])) {
2352 ipc_port_release_send(ports[i]);
2353 }
2354 }
2355
2356 /*
2357 * Now that the operation is known to be successful,
2358 * we can free the memory.
2359 */
2360
2361 if (portsCnt != 0) {
2362 kfree_type(mach_port_t, portsCnt, memory);
2363 }
2364
2365 return KERN_SUCCESS;
2366 }
2367
2368 /*
2369 * Routine: mach_ports_lookup [kernel call]
2370 * Purpose:
2371 * Retrieves (clones) the stashed port send rights.
2372 * Conditions:
2373 * Nothing locked. If successful, the caller gets
2374 * rights and memory.
2375 * Returns:
2376 * KERN_SUCCESS Retrieved the send rights.
2377 * KERN_INVALID_ARGUMENT The task is null.
2378 * KERN_INVALID_ARGUMENT The task is dead.
2379 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
2380 */
2381
2382 kern_return_t
mach_ports_lookup(task_t task,mach_port_array_t * portsp,mach_msg_type_number_t * portsCnt)2383 mach_ports_lookup(
2384 task_t task,
2385 mach_port_array_t *portsp,
2386 mach_msg_type_number_t *portsCnt)
2387 {
2388 ipc_port_t *ports;
2389
2390 if (task == TASK_NULL) {
2391 return KERN_INVALID_ARGUMENT;
2392 }
2393
2394 ports = kalloc_type(ipc_port_t, TASK_PORT_REGISTER_MAX,
2395 Z_WAITOK | Z_ZERO | Z_NOFAIL);
2396
2397 itk_lock(task);
2398 if (!task->ipc_active) {
2399 itk_unlock(task);
2400 kfree_type(ipc_port_t, TASK_PORT_REGISTER_MAX, ports);
2401
2402 return KERN_INVALID_ARGUMENT;
2403 }
2404
2405 for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2406 ports[i] = ipc_port_copy_send_any(task->itk_registered[i]);
2407 }
2408
2409 itk_unlock(task);
2410
2411 *portsp = ports;
2412 *portsCnt = TASK_PORT_REGISTER_MAX;
2413 return KERN_SUCCESS;
2414 }
2415
2416 static kern_return_t
task_conversion_eval_internal(task_t caller,task_t victim,boolean_t out_trans,int flavor)2417 task_conversion_eval_internal(
2418 task_t caller,
2419 task_t victim,
2420 boolean_t out_trans,
2421 int flavor) /* control or read */
2422 {
2423 boolean_t allow_kern_task_out_trans;
2424 boolean_t allow_kern_task;
2425
2426 assert(flavor == TASK_FLAVOR_CONTROL || flavor == TASK_FLAVOR_READ);
2427 assert(flavor == THREAD_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_READ);
2428
2429 #if defined(SECURE_KERNEL)
2430 /*
2431 * On secure kernel platforms, reject converting kernel task/threads to port
2432 * and sending it to user space.
2433 */
2434 allow_kern_task_out_trans = FALSE;
2435 #else
2436 allow_kern_task_out_trans = TRUE;
2437 #endif
2438
2439 allow_kern_task = out_trans && allow_kern_task_out_trans;
2440
2441 if (victim == TASK_NULL) {
2442 return KERN_INVALID_SECURITY;
2443 }
2444
2445 task_require(victim);
2446
2447 /*
2448 * If Developer Mode is not enabled, deny attempts to translate foreign task's
2449 * control port completely. Read port or corpse is okay.
2450 */
2451 if (!developer_mode_state()) {
2452 if ((caller != victim) &&
2453 (flavor == TASK_FLAVOR_CONTROL) && !task_is_a_corpse(victim)) {
2454 #if XNU_TARGET_OS_OSX
2455 return KERN_INVALID_SECURITY;
2456 #else
2457 /*
2458 * All control ports are immovable.
2459 * Return an error for outtrans, but panic on intrans.
2460 */
2461 if (out_trans) {
2462 return KERN_INVALID_SECURITY;
2463 } else {
2464 panic("Just like pineapple on pizza, this task/thread port doesn't belong here.");
2465 }
2466 #endif /* XNU_TARGET_OS_OSX */
2467 }
2468 }
2469
2470 /*
2471 * Tasks are allowed to resolve their own task ports, and the kernel is
2472 * allowed to resolve anyone's task port (subject to Developer Mode check).
2473 */
2474 if (caller == kernel_task) {
2475 return KERN_SUCCESS;
2476 }
2477
2478 if (caller == victim) {
2479 return KERN_SUCCESS;
2480 }
2481
2482 /*
2483 * Only the kernel can resolve the kernel's task port. We've established
2484 * by this point that the caller is not kernel_task.
2485 */
2486 if (victim == kernel_task && !allow_kern_task) {
2487 return KERN_INVALID_SECURITY;
2488 }
2489
2490 #if !defined(XNU_TARGET_OS_OSX)
2491 /*
2492 * On platforms other than macOS, only a platform binary can resolve the task port
2493 * of another platform binary.
2494 */
2495 if (task_get_platform_binary(victim) && !task_get_platform_binary(caller)) {
2496 #if SECURE_KERNEL
2497 return KERN_INVALID_SECURITY;
2498 #else
2499 if (cs_relax_platform_task_ports) {
2500 return KERN_SUCCESS;
2501 } else {
2502 return KERN_INVALID_SECURITY;
2503 }
2504 #endif /* SECURE_KERNEL */
2505 }
2506 #endif /* !defined(XNU_TARGET_OS_OSX) */
2507
2508 return KERN_SUCCESS;
2509 }
2510
2511 kern_return_t
task_conversion_eval(task_t caller,task_t victim,int flavor)2512 task_conversion_eval(task_t caller, task_t victim, int flavor)
2513 {
2514 /* flavor is mach_task_flavor_t or mach_thread_flavor_t */
2515 static_assert(TASK_FLAVOR_CONTROL == THREAD_FLAVOR_CONTROL);
2516 static_assert(TASK_FLAVOR_READ == THREAD_FLAVOR_READ);
2517 return task_conversion_eval_internal(caller, victim, FALSE, flavor);
2518 }
2519
2520 static kern_return_t
task_conversion_eval_out_trans(task_t caller,task_t victim,int flavor)2521 task_conversion_eval_out_trans(task_t caller, task_t victim, int flavor)
2522 {
2523 assert(flavor == TASK_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_CONTROL);
2524 return task_conversion_eval_internal(caller, victim, TRUE, flavor);
2525 }
2526
2527 /*
2528 * Routine: task_port_kotype_valid_for_flavor
2529 * Purpose:
2530 * Check whether the kobject type of a mach port
2531 * is valid for conversion to a task of given flavor.
2532 */
2533 static boolean_t
task_port_kotype_valid_for_flavor(natural_t kotype,mach_task_flavor_t flavor)2534 task_port_kotype_valid_for_flavor(
2535 natural_t kotype,
2536 mach_task_flavor_t flavor)
2537 {
2538 switch (flavor) {
2539 /* Ascending capability */
2540 case TASK_FLAVOR_NAME:
2541 if (kotype == IKOT_TASK_NAME) {
2542 return TRUE;
2543 }
2544 OS_FALLTHROUGH;
2545 case TASK_FLAVOR_INSPECT:
2546 if (kotype == IKOT_TASK_INSPECT) {
2547 return TRUE;
2548 }
2549 OS_FALLTHROUGH;
2550 case TASK_FLAVOR_READ:
2551 if (kotype == IKOT_TASK_READ) {
2552 return TRUE;
2553 }
2554 OS_FALLTHROUGH;
2555 case TASK_FLAVOR_CONTROL:
2556 if (kotype == IKOT_TASK_CONTROL) {
2557 return TRUE;
2558 }
2559 break;
2560 default:
2561 panic("strange task flavor");
2562 }
2563
2564 return FALSE;
2565 }
2566
2567 /*
2568 * Routine: convert_port_to_task_with_flavor_locked_noref
2569 * Purpose:
2570 * Internal helper routine to convert from a locked port to a task.
2571 * Args:
2572 * port - target port
2573 * flavor - requested task port flavor
2574 * options - port translation options
2575 * Conditions:
2576 * Port is locked and active.
2577 */
2578 static task_t
convert_port_to_task_with_flavor_locked_noref(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2579 convert_port_to_task_with_flavor_locked_noref(
2580 ipc_port_t port,
2581 mach_task_flavor_t flavor,
2582 port_intrans_options_t options)
2583 {
2584 ipc_kobject_type_t type = ip_kotype(port);
2585 task_t task;
2586
2587 ip_mq_lock_held(port);
2588 require_ip_active(port);
2589
2590 if (!task_port_kotype_valid_for_flavor(type, flavor)) {
2591 return TASK_NULL;
2592 }
2593
2594 task = ipc_kobject_get_locked(port, type);
2595 if (task == TASK_NULL) {
2596 return TASK_NULL;
2597 }
2598
2599 if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
2600 assert(flavor == TASK_FLAVOR_CONTROL);
2601 return TASK_NULL;
2602 }
2603
2604 /* TODO: rdar://42389187 */
2605 if (flavor == TASK_FLAVOR_NAME || flavor == TASK_FLAVOR_INSPECT) {
2606 assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
2607 }
2608
2609 if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
2610 task_conversion_eval(current_task(), task, flavor)) {
2611 return TASK_NULL;
2612 }
2613
2614 return task;
2615 }
2616
2617 /*
2618 * Routine: convert_port_to_task_with_flavor_locked
2619 * Purpose:
2620 * Internal helper routine to convert from a locked port to a task.
2621 * Args:
2622 * port - target port
2623 * flavor - requested task port flavor
2624 * options - port translation options
2625 * grp - task reference group
2626 * Conditions:
2627 * Port is locked and active.
2628 * Produces task ref or TASK_NULL.
2629 */
2630 static task_t
convert_port_to_task_with_flavor_locked(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2631 convert_port_to_task_with_flavor_locked(
2632 ipc_port_t port,
2633 mach_task_flavor_t flavor,
2634 port_intrans_options_t options,
2635 task_grp_t grp)
2636 {
2637 task_t task;
2638
2639 task = convert_port_to_task_with_flavor_locked_noref(port, flavor,
2640 options);
2641
2642 if (task != TASK_NULL) {
2643 task_reference_grp(task, grp);
2644 }
2645
2646 return task;
2647 }
2648
2649 /*
2650 * Routine: convert_port_to_task_with_exec_token
2651 * Purpose:
2652 * Convert from a port to a task and return
2653 * the exec token stored in the task.
2654 * Doesn't consume the port ref; produces a task ref,
2655 * which may be null.
2656 * Conditions:
2657 * Nothing locked.
2658 */
2659 task_t
convert_port_to_task_with_exec_token(ipc_port_t port,uint32_t * exec_token)2660 convert_port_to_task_with_exec_token(
2661 ipc_port_t port,
2662 uint32_t *exec_token)
2663 {
2664 task_t task = TASK_NULL;
2665 task_t self = current_task();
2666
2667 if (IP_VALID(port)) {
2668 if (port == self->itk_self) {
2669 if (exec_token) {
2670 /*
2671 * This is ok to do without a lock,
2672 * from the perspective of `current_task()`
2673 * this token never changes, except
2674 * for the thread doing the exec.
2675 */
2676 *exec_token = self->exec_token;
2677 }
2678 task_reference_grp(self, TASK_GRP_KERNEL);
2679 return self;
2680 }
2681
2682 ip_mq_lock(port);
2683 if (ip_active(port)) {
2684 task = convert_port_to_task_with_flavor_locked(port,
2685 TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE,
2686 TASK_GRP_KERNEL);
2687 }
2688 ip_mq_unlock(port);
2689 }
2690
2691 if (task) {
2692 *exec_token = task->exec_token;
2693 }
2694
2695 return task;
2696 }
2697
2698 /*
2699 * Routine: convert_port_to_task_with_flavor
2700 * Purpose:
2701 * Internal helper for converting from a port to a task.
2702 * Doesn't consume the port ref; produces a task ref,
2703 * which may be null.
2704 * Args:
2705 * port - target port
2706 * flavor - requested task port flavor
2707 * options - port translation options
2708 * grp - task reference group
2709 * Conditions:
2710 * Nothing locked.
2711 */
2712 static task_t
convert_port_to_task_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2713 convert_port_to_task_with_flavor(
2714 ipc_port_t port,
2715 mach_task_flavor_t flavor,
2716 port_intrans_options_t options,
2717 task_grp_t grp)
2718 {
2719 task_t task = TASK_NULL;
2720 task_t self = current_task();
2721
2722 if (IP_VALID(port)) {
2723 if (port == self->itk_self) {
2724 task_reference_grp(self, grp);
2725 return self;
2726 }
2727
2728 ip_mq_lock(port);
2729 if (ip_active(port)) {
2730 task = convert_port_to_task_with_flavor_locked(port,
2731 flavor, options, grp);
2732 }
2733 ip_mq_unlock(port);
2734 }
2735
2736 return task;
2737 }
2738
2739 task_t
convert_port_to_task(ipc_port_t port)2740 convert_port_to_task(
2741 ipc_port_t port)
2742 {
2743 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2744 PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL);
2745 }
2746
2747 task_t
convert_port_to_task_mig(ipc_port_t port)2748 convert_port_to_task_mig(
2749 ipc_port_t port)
2750 {
2751 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2752 PORT_INTRANS_OPTIONS_NONE, TASK_GRP_MIG);
2753 }
2754
2755 task_read_t
convert_port_to_task_read(ipc_port_t port)2756 convert_port_to_task_read(
2757 ipc_port_t port)
2758 {
2759 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2760 PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2761 }
2762
2763 static task_read_t
convert_port_to_task_read_no_eval(ipc_port_t port)2764 convert_port_to_task_read_no_eval(
2765 ipc_port_t port)
2766 {
2767 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2768 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2769 }
2770
2771 task_read_t
convert_port_to_task_read_mig(ipc_port_t port)2772 convert_port_to_task_read_mig(
2773 ipc_port_t port)
2774 {
2775 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2776 PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2777 }
2778
2779 task_inspect_t
convert_port_to_task_inspect(ipc_port_t port)2780 convert_port_to_task_inspect(
2781 ipc_port_t port)
2782 {
2783 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2784 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2785 }
2786
2787 task_inspect_t
convert_port_to_task_inspect_no_eval(ipc_port_t port)2788 convert_port_to_task_inspect_no_eval(
2789 ipc_port_t port)
2790 {
2791 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2792 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2793 }
2794
2795 task_inspect_t
convert_port_to_task_inspect_mig(ipc_port_t port)2796 convert_port_to_task_inspect_mig(
2797 ipc_port_t port)
2798 {
2799 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2800 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2801 }
2802
2803 task_name_t
convert_port_to_task_name(ipc_port_t port)2804 convert_port_to_task_name(
2805 ipc_port_t port)
2806 {
2807 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2808 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2809 }
2810
2811 task_name_t
convert_port_to_task_name_mig(ipc_port_t port)2812 convert_port_to_task_name_mig(
2813 ipc_port_t port)
2814 {
2815 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2816 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2817 }
2818
2819 /*
2820 * Routine: convert_port_to_task_policy
2821 * Purpose:
2822 * Convert from a port to a task.
2823 * Doesn't consume the port ref; produces a task ref,
2824 * which may be null.
2825 * If the port is being used with task_port_set(), any task port
2826 * type other than TASK_CONTROL requires an entitlement. If the
2827 * port is being used with task_port_get(), TASK_NAME requires an
2828 * entitlement.
2829 * Conditions:
2830 * Nothing locked.
2831 */
2832 static task_t
convert_port_to_task_policy_mig(ipc_port_t port,boolean_t set)2833 convert_port_to_task_policy_mig(ipc_port_t port, boolean_t set)
2834 {
2835 task_t task = TASK_NULL;
2836
2837 if (!IP_VALID(port)) {
2838 return TASK_NULL;
2839 }
2840
2841 task = set ?
2842 convert_port_to_task_mig(port) :
2843 convert_port_to_task_inspect_mig(port);
2844
2845 if (task == TASK_NULL &&
2846 IOCurrentTaskHasEntitlement("com.apple.private.task_policy")) {
2847 task = convert_port_to_task_name_mig(port);
2848 }
2849
2850 return task;
2851 }
2852
2853 task_policy_set_t
convert_port_to_task_policy_set_mig(ipc_port_t port)2854 convert_port_to_task_policy_set_mig(ipc_port_t port)
2855 {
2856 return convert_port_to_task_policy_mig(port, true);
2857 }
2858
2859 task_policy_get_t
convert_port_to_task_policy_get_mig(ipc_port_t port)2860 convert_port_to_task_policy_get_mig(ipc_port_t port)
2861 {
2862 return convert_port_to_task_policy_mig(port, false);
2863 }
2864
2865 /*
2866 * Routine: convert_port_to_task_suspension_token
2867 * Purpose:
2868 * Convert from a port to a task suspension token.
2869 * Doesn't consume the port ref; produces a suspension token ref,
2870 * which may be null.
2871 * Conditions:
2872 * Nothing locked.
2873 */
2874 static task_suspension_token_t
convert_port_to_task_suspension_token_grp(ipc_port_t port,task_grp_t grp)2875 convert_port_to_task_suspension_token_grp(
2876 ipc_port_t port,
2877 task_grp_t grp)
2878 {
2879 task_suspension_token_t task = TASK_NULL;
2880
2881 if (IP_VALID(port)) {
2882 ip_mq_lock(port);
2883 task = ipc_kobject_get_locked(port, IKOT_TASK_RESUME);
2884 if (task != TASK_NULL) {
2885 task_reference_grp(task, grp);
2886 }
2887 ip_mq_unlock(port);
2888 }
2889
2890 return task;
2891 }
2892
2893 task_suspension_token_t
convert_port_to_task_suspension_token_external(ipc_port_t port)2894 convert_port_to_task_suspension_token_external(
2895 ipc_port_t port)
2896 {
2897 return convert_port_to_task_suspension_token_grp(port, TASK_GRP_EXTERNAL);
2898 }
2899
2900 task_suspension_token_t
convert_port_to_task_suspension_token_mig(ipc_port_t port)2901 convert_port_to_task_suspension_token_mig(
2902 ipc_port_t port)
2903 {
2904 return convert_port_to_task_suspension_token_grp(port, TASK_GRP_MIG);
2905 }
2906
2907 task_suspension_token_t
convert_port_to_task_suspension_token_kernel(ipc_port_t port)2908 convert_port_to_task_suspension_token_kernel(
2909 ipc_port_t port)
2910 {
2911 return convert_port_to_task_suspension_token_grp(port, TASK_GRP_KERNEL);
2912 }
2913
2914 /*
2915 * Routine: convert_port_to_space_with_flavor
2916 * Purpose:
2917 * Internal helper for converting from a port to a space.
2918 * Doesn't consume the port ref; produces a space ref,
2919 * which may be null.
2920 * Args:
2921 * port - target port
2922 * flavor - requested ipc space flavor
2923 * options - port translation options
2924 * Conditions:
2925 * Nothing locked.
2926 */
2927 static ipc_space_t
convert_port_to_space_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2928 convert_port_to_space_with_flavor(
2929 ipc_port_t port,
2930 mach_task_flavor_t flavor,
2931 port_intrans_options_t options)
2932 {
2933 ipc_space_t space = IPC_SPACE_NULL;
2934 task_t task = TASK_NULL;
2935
2936 assert(flavor != TASK_FLAVOR_NAME);
2937
2938 if (IP_VALID(port)) {
2939 ip_mq_lock(port);
2940 if (ip_active(port)) {
2941 task = convert_port_to_task_with_flavor_locked_noref(port,
2942 flavor, options);
2943 }
2944
2945 /*
2946 * Because we hold the port lock and we could resolve a task,
2947 * even if we're racing with task termination, we know that
2948 * ipc_task_disable() hasn't been called yet.
2949 *
2950 * We try to sniff if `task->active` flipped to accelerate
2951 * resolving the race, but this isn't load bearing.
2952 *
2953 * The space will be torn down _after_ ipc_task_disable() returns,
2954 * so it is valid to take a reference on it now.
2955 */
2956 if (task && task->active) {
2957 space = task->itk_space;
2958 is_reference(space);
2959 }
2960 ip_mq_unlock(port);
2961 }
2962
2963 return space;
2964 }
2965
2966 ipc_space_t
convert_port_to_space(ipc_port_t port)2967 convert_port_to_space(
2968 ipc_port_t port)
2969 {
2970 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL,
2971 PORT_INTRANS_OPTIONS_NONE);
2972 }
2973
2974 ipc_space_read_t
convert_port_to_space_read(ipc_port_t port)2975 convert_port_to_space_read(
2976 ipc_port_t port)
2977 {
2978 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2979 PORT_INTRANS_ALLOW_CORPSE_TASK);
2980 }
2981
2982 ipc_space_read_t
convert_port_to_space_read_no_eval(ipc_port_t port)2983 convert_port_to_space_read_no_eval(
2984 ipc_port_t port)
2985 {
2986 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2987 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2988 }
2989
2990 ipc_space_inspect_t
convert_port_to_space_inspect(ipc_port_t port)2991 convert_port_to_space_inspect(
2992 ipc_port_t port)
2993 {
2994 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT,
2995 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2996 }
2997
2998 /*
2999 * Routine: convert_port_to_map_with_flavor
3000 * Purpose:
3001 * Internal helper for converting from a port to a map.
3002 * Doesn't consume the port ref; produces a map ref,
3003 * which may be null.
3004 * Args:
3005 * port - target port
3006 * flavor - requested vm map flavor
3007 * options - port translation options
3008 * Conditions:
3009 * Nothing locked.
3010 */
3011 static vm_map_t
convert_port_to_map_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)3012 convert_port_to_map_with_flavor(
3013 ipc_port_t port,
3014 mach_task_flavor_t flavor,
3015 port_intrans_options_t options)
3016 {
3017 task_t task = TASK_NULL;
3018 vm_map_t map = VM_MAP_NULL;
3019
3020 /* there is no vm_map_inspect_t routines at the moment. */
3021 assert(flavor != TASK_FLAVOR_NAME && flavor != TASK_FLAVOR_INSPECT);
3022 assert((options & PORT_INTRANS_SKIP_TASK_EVAL) == 0);
3023
3024 if (IP_VALID(port)) {
3025 ip_mq_lock(port);
3026
3027 if (ip_active(port)) {
3028 task = convert_port_to_task_with_flavor_locked_noref(port,
3029 flavor, options);
3030 }
3031
3032 /*
3033 * Because we hold the port lock and we could resolve a task,
3034 * even if we're racing with task termination, we know that
3035 * ipc_task_disable() hasn't been called yet.
3036 *
3037 * We try to sniff if `task->active` flipped to accelerate
3038 * resolving the race, but this isn't load bearing.
3039 *
3040 * The vm map will be torn down _after_ ipc_task_disable() returns,
3041 * so it is valid to take a reference on it now.
3042 */
3043 if (task && task->active) {
3044 map = task->map;
3045
3046 if (map->pmap == kernel_pmap) {
3047 panic("userspace has control access to a "
3048 "kernel map %p through task %p", map, task);
3049 }
3050
3051 pmap_require(map->pmap);
3052 vm_map_reference(map);
3053 }
3054
3055 ip_mq_unlock(port);
3056 }
3057
3058 return map;
3059 }
3060
3061 vm_map_t
convert_port_to_map(ipc_port_t port)3062 convert_port_to_map(
3063 ipc_port_t port)
3064 {
3065 return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL,
3066 PORT_INTRANS_OPTIONS_NONE);
3067 }
3068
3069 vm_map_read_t
convert_port_to_map_read(ipc_port_t port)3070 convert_port_to_map_read(
3071 ipc_port_t port)
3072 {
3073 return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ,
3074 PORT_INTRANS_ALLOW_CORPSE_TASK);
3075 }
3076
3077 vm_map_inspect_t
convert_port_to_map_inspect(__unused ipc_port_t port)3078 convert_port_to_map_inspect(
3079 __unused ipc_port_t port)
3080 {
3081 /* there is no vm_map_inspect_t routines at the moment. */
3082 return VM_MAP_INSPECT_NULL;
3083 }
3084
3085 /*
3086 * Routine: thread_port_kotype_valid_for_flavor
3087 * Purpose:
3088 * Check whether the kobject type of a mach port
3089 * is valid for conversion to a thread of given flavor.
3090 */
3091 static boolean_t
thread_port_kotype_valid_for_flavor(natural_t kotype,mach_thread_flavor_t flavor)3092 thread_port_kotype_valid_for_flavor(
3093 natural_t kotype,
3094 mach_thread_flavor_t flavor)
3095 {
3096 switch (flavor) {
3097 /* Ascending capability */
3098 case THREAD_FLAVOR_INSPECT:
3099 if (kotype == IKOT_THREAD_INSPECT) {
3100 return TRUE;
3101 }
3102 OS_FALLTHROUGH;
3103 case THREAD_FLAVOR_READ:
3104 if (kotype == IKOT_THREAD_READ) {
3105 return TRUE;
3106 }
3107 OS_FALLTHROUGH;
3108 case THREAD_FLAVOR_CONTROL:
3109 if (kotype == IKOT_THREAD_CONTROL) {
3110 return TRUE;
3111 }
3112 break;
3113 default:
3114 panic("strange thread flavor");
3115 }
3116
3117 return FALSE;
3118 }
3119
3120 /*
3121 * Routine: convert_port_to_thread_with_flavor_locked
3122 * Purpose:
3123 * Internal helper routine to convert from a locked port to a thread.
3124 * Args:
3125 * port - target port
3126 * flavor - requested thread port flavor
3127 * options - port translation options
3128 * Conditions:
3129 * Port is locked and active.
3130 * Produces a thread ref or THREAD_NULL.
3131 */
3132 static thread_t
convert_port_to_thread_with_flavor_locked(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3133 convert_port_to_thread_with_flavor_locked(
3134 ipc_port_t port,
3135 mach_thread_flavor_t flavor,
3136 port_intrans_options_t options)
3137 {
3138 thread_t thread = THREAD_NULL;
3139 task_t task;
3140 ipc_kobject_type_t type = ip_kotype(port);
3141
3142 ip_mq_lock_held(port);
3143 require_ip_active(port);
3144
3145 if (!thread_port_kotype_valid_for_flavor(type, flavor)) {
3146 return THREAD_NULL;
3147 }
3148
3149 thread = ipc_kobject_get_locked(port, type);
3150
3151 if (thread == THREAD_NULL) {
3152 return THREAD_NULL;
3153 }
3154
3155 if (options & PORT_INTRANS_THREAD_NOT_CURRENT_THREAD) {
3156 if (thread == current_thread()) {
3157 return THREAD_NULL;
3158 }
3159 }
3160
3161 task = get_threadtask(thread);
3162
3163 if (options & PORT_INTRANS_THREAD_IN_CURRENT_TASK) {
3164 if (task != current_task()) {
3165 return THREAD_NULL;
3166 }
3167 } else {
3168 if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
3169 assert(flavor == THREAD_FLAVOR_CONTROL);
3170 return THREAD_NULL;
3171 }
3172 /* TODO: rdar://42389187 */
3173 if (flavor == THREAD_FLAVOR_INSPECT) {
3174 assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
3175 }
3176
3177 if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
3178 task_conversion_eval(current_task(), task, flavor) != KERN_SUCCESS) {
3179 return THREAD_NULL;
3180 }
3181 }
3182
3183 thread_reference(thread);
3184 return thread;
3185 }
3186
3187 /*
3188 * Routine: convert_port_to_thread_with_flavor
3189 * Purpose:
3190 * Internal helper for converting from a port to a thread.
3191 * Doesn't consume the port ref; produces a thread ref,
3192 * which may be null.
3193 * Args:
3194 * port - target port
3195 * flavor - requested thread port flavor
3196 * options - port translation options
3197 * Conditions:
3198 * Nothing locked.
3199 */
3200 static thread_t
convert_port_to_thread_with_flavor(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3201 convert_port_to_thread_with_flavor(
3202 ipc_port_t port,
3203 mach_thread_flavor_t flavor,
3204 port_intrans_options_t options)
3205 {
3206 thread_t thread = THREAD_NULL;
3207
3208 if (IP_VALID(port)) {
3209 ip_mq_lock(port);
3210 if (ip_active(port)) {
3211 thread = convert_port_to_thread_with_flavor_locked(port,
3212 flavor, options);
3213 }
3214 ip_mq_unlock(port);
3215 }
3216
3217 return thread;
3218 }
3219
3220 thread_t
convert_port_to_thread(ipc_port_t port)3221 convert_port_to_thread(
3222 ipc_port_t port)
3223 {
3224 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_CONTROL,
3225 PORT_INTRANS_OPTIONS_NONE);
3226 }
3227
3228 thread_read_t
convert_port_to_thread_read(ipc_port_t port)3229 convert_port_to_thread_read(
3230 ipc_port_t port)
3231 {
3232 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3233 PORT_INTRANS_ALLOW_CORPSE_TASK);
3234 }
3235
3236 static thread_read_t
convert_port_to_thread_read_no_eval(ipc_port_t port)3237 convert_port_to_thread_read_no_eval(
3238 ipc_port_t port)
3239 {
3240 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3241 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3242 }
3243
3244 thread_inspect_t
convert_port_to_thread_inspect(ipc_port_t port)3245 convert_port_to_thread_inspect(
3246 ipc_port_t port)
3247 {
3248 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3249 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3250 }
3251
3252 static thread_inspect_t
convert_port_to_thread_inspect_no_eval(ipc_port_t port)3253 convert_port_to_thread_inspect_no_eval(
3254 ipc_port_t port)
3255 {
3256 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3257 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3258 }
3259
3260 static inline ipc_kobject_type_t
thread_flavor_to_kotype(mach_thread_flavor_t flavor)3261 thread_flavor_to_kotype(mach_thread_flavor_t flavor)
3262 {
3263 switch (flavor) {
3264 case THREAD_FLAVOR_CONTROL:
3265 return IKOT_THREAD_CONTROL;
3266 case THREAD_FLAVOR_READ:
3267 return IKOT_THREAD_READ;
3268 default:
3269 return IKOT_THREAD_INSPECT;
3270 }
3271 }
3272
3273 /*
3274 * Routine: convert_thread_to_port_with_flavor
3275 * Purpose:
3276 * Convert from a thread to a port of given flavor.
3277 * Consumes a thread ref; produces a naked send right
3278 * which may be invalid.
3279 * Conditions:
3280 * Nothing locked.
3281 */
3282 static ipc_port_t
convert_thread_to_port_with_flavor(thread_t thread,thread_ro_t tro,mach_thread_flavor_t flavor)3283 convert_thread_to_port_with_flavor(
3284 thread_t thread,
3285 thread_ro_t tro,
3286 mach_thread_flavor_t flavor)
3287 {
3288 ipc_kobject_type_t kotype = thread_flavor_to_kotype(flavor);
3289 ipc_port_t port = IP_NULL;
3290
3291 thread_mtx_lock(thread);
3292
3293 /*
3294 * out-trans of weaker flavors are still permitted, but in-trans
3295 * is separately enforced.
3296 */
3297 if (flavor == THREAD_FLAVOR_CONTROL &&
3298 task_conversion_eval_out_trans(current_task(), tro->tro_task, flavor)) {
3299 /* denied by security policy, make the port appear dead */
3300 port = IP_DEAD;
3301 goto exit;
3302 }
3303
3304 if (!thread->ipc_active) {
3305 goto exit;
3306 }
3307
3308 port = tro->tro_ports[flavor];
3309 if (flavor == THREAD_FLAVOR_CONTROL) {
3310 port = ipc_kobject_make_send(port, thread, IKOT_THREAD_CONTROL);
3311 } else if (IP_VALID(port)) {
3312 (void)ipc_kobject_make_send_nsrequest(port, thread, kotype);
3313 } else {
3314 /*
3315 * Claim a send right on the thread read/inspect port, and request a no-senders
3316 * notification on that port (if none outstanding). A thread reference is not
3317 * donated here even though the ports are created lazily because it doesn't own the
3318 * kobject that it points to. Threads manage their lifetime explicitly and
3319 * have to synchronize with each other, between the task/thread terminating and the
3320 * send-once notification firing, and this is done under the thread mutex
3321 * rather than with atomics.
3322 */
3323 port = ipc_kobject_alloc_port(thread, kotype,
3324 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST |
3325 IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3326 /*
3327 * If Developer Mode is off, substitute read port for control
3328 * port if copying out to owning task's space, for the sake of
3329 * in-process exception handler.
3330 *
3331 * Also see: exception_deliver().
3332 */
3333 if (!developer_mode_state() && flavor == THREAD_FLAVOR_READ) {
3334 ipc_port_set_label(port, IPC_LABEL_SUBST_THREAD_READ);
3335 port->ip_kolabel->ikol_alt_port = tro->tro_self_port;
3336 }
3337 zalloc_ro_update_field(ZONE_ID_THREAD_RO,
3338 tro, tro_ports[flavor], &port);
3339 }
3340
3341 exit:
3342 thread_mtx_unlock(thread);
3343 thread_deallocate(thread);
3344 return port;
3345 }
3346
3347 ipc_port_t
convert_thread_to_port(thread_t thread)3348 convert_thread_to_port(
3349 thread_t thread)
3350 {
3351 thread_ro_t tro = get_thread_ro(thread);
3352 return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_CONTROL);
3353 }
3354
3355 ipc_port_t
convert_thread_read_to_port(thread_read_t thread)3356 convert_thread_read_to_port(thread_read_t thread)
3357 {
3358 thread_ro_t tro = get_thread_ro(thread);
3359 return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_READ);
3360 }
3361
3362 ipc_port_t
convert_thread_inspect_to_port(thread_inspect_t thread)3363 convert_thread_inspect_to_port(thread_inspect_t thread)
3364 {
3365 thread_ro_t tro = get_thread_ro(thread);
3366 return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_INSPECT);
3367 }
3368
3369
3370 /*
3371 * Routine: port_name_to_thread
3372 * Purpose:
3373 * Convert from a port name to a thread reference
3374 * A name of MACH_PORT_NULL is valid for the null thread.
3375 * Conditions:
3376 * Nothing locked.
3377 */
3378 thread_t
port_name_to_thread(mach_port_name_t name,port_intrans_options_t options)3379 port_name_to_thread(
3380 mach_port_name_t name,
3381 port_intrans_options_t options)
3382 {
3383 thread_t thread = THREAD_NULL;
3384 ipc_port_t kport;
3385 kern_return_t kr;
3386
3387 if (MACH_PORT_VALID(name)) {
3388 kr = ipc_port_translate_send(current_space(), name, &kport);
3389 if (kr == KERN_SUCCESS) {
3390 /* port is locked and active */
3391 assert(!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) &&
3392 !(options & PORT_INTRANS_SKIP_TASK_EVAL));
3393 thread = convert_port_to_thread_with_flavor_locked(kport,
3394 THREAD_FLAVOR_CONTROL, options);
3395 ip_mq_unlock(kport);
3396 }
3397 }
3398
3399 return thread;
3400 }
3401
3402 /*
3403 * Routine: port_name_is_pinned_itk_self
3404 * Purpose:
3405 * Returns whether this port name is for the pinned
3406 * mach_task_self (if it exists).
3407 *
3408 * task_self_trap() when the task port is pinned,
3409 * will memorize the name the port has in the space
3410 * in ip_receiver_name, which we can use to fast-track
3411 * this answer without taking any lock.
3412 *
3413 * ipc_task_disable() will set `ip_receiver_name` back to
3414 * MACH_PORT_SPECIAL_DEFAULT.
3415 *
3416 * Conditions:
3417 * self must be current_task()
3418 * Nothing locked.
3419 */
3420 static bool
port_name_is_pinned_itk_self(task_t self,mach_port_name_t name)3421 port_name_is_pinned_itk_self(
3422 task_t self,
3423 mach_port_name_t name)
3424 {
3425 ipc_port_t kport = self->itk_self;
3426 return MACH_PORT_VALID(name) && name != MACH_PORT_SPECIAL_DEFAULT &&
3427 kport->ip_pinned && ip_get_receiver_name(kport) == name;
3428 }
3429
3430 /*
3431 * Routine: port_name_to_current_task*_noref
3432 * Purpose:
3433 * Convert from a port name to current_task()
3434 * A name of MACH_PORT_NULL is valid for the null task.
3435 *
3436 * If current_task() is in the process of being terminated,
3437 * this might return a non NULL task even when port_name_to_task()
3438 * would.
3439 *
3440 * However, this is an acceptable race that can't be controlled by
3441 * userspace, and that downstream code using the returned task
3442 * has to handle anyway.
3443 *
3444 * ipc_space_disable() does try to narrow this race,
3445 * by causing port_name_is_pinned_itk_self() to fail.
3446 *
3447 * Returns:
3448 * current_task() if the port name was for current_task()
3449 * at the appropriate flavor.
3450 *
3451 * TASK_NULL otherwise.
3452 *
3453 * Conditions:
3454 * Nothing locked.
3455 */
3456 static task_t
port_name_to_current_task_internal_noref(mach_port_name_t name,mach_task_flavor_t flavor)3457 port_name_to_current_task_internal_noref(
3458 mach_port_name_t name,
3459 mach_task_flavor_t flavor)
3460 {
3461 ipc_port_t kport;
3462 kern_return_t kr;
3463 task_t task = TASK_NULL;
3464 task_t self = current_task();
3465
3466 if (port_name_is_pinned_itk_self(self, name)) {
3467 return self;
3468 }
3469
3470 if (MACH_PORT_VALID(name)) {
3471 kr = ipc_port_translate_send(self->itk_space, name, &kport);
3472 if (kr == KERN_SUCCESS) {
3473 ipc_kobject_type_t type = ip_kotype(kport);
3474 if (task_port_kotype_valid_for_flavor(type, flavor)) {
3475 task = ipc_kobject_get_locked(kport, type);
3476 }
3477 ip_mq_unlock(kport);
3478 if (task != self) {
3479 task = TASK_NULL;
3480 }
3481 }
3482 }
3483
3484 return task;
3485 }
3486
3487 task_t
port_name_to_current_task_noref(mach_port_name_t name)3488 port_name_to_current_task_noref(
3489 mach_port_name_t name)
3490 {
3491 return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_CONTROL);
3492 }
3493
3494 task_read_t
port_name_to_current_task_read_noref(mach_port_name_t name)3495 port_name_to_current_task_read_noref(
3496 mach_port_name_t name)
3497 {
3498 return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_READ);
3499 }
3500
3501 /*
3502 * Routine: port_name_to_task
3503 * Purpose:
3504 * Convert from a port name to a task reference
3505 * A name of MACH_PORT_NULL is valid for the null task.
3506 * Conditions:
3507 * Nothing locked.
3508 */
3509 static task_t
port_name_to_task_grp(mach_port_name_t name,task_grp_t grp)3510 port_name_to_task_grp(
3511 mach_port_name_t name,
3512 task_grp_t grp)
3513 {
3514 ipc_port_t kport;
3515 kern_return_t kr;
3516 task_t task = TASK_NULL;
3517 task_t self = current_task();
3518
3519 if (port_name_is_pinned_itk_self(self, name)) {
3520 task_reference_grp(self, grp);
3521 return self;
3522 }
3523
3524 if (MACH_PORT_VALID(name)) {
3525 kr = ipc_port_translate_send(self->itk_space, name, &kport);
3526 if (kr == KERN_SUCCESS) {
3527 /* port is locked and active */
3528 task = convert_port_to_task_with_flavor_locked(kport,
3529 TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE, grp);
3530 ip_mq_unlock(kport);
3531 }
3532 }
3533 return task;
3534 }
3535
3536 task_t
port_name_to_task_external(mach_port_name_t name)3537 port_name_to_task_external(
3538 mach_port_name_t name)
3539 {
3540 return port_name_to_task_grp(name, TASK_GRP_EXTERNAL);
3541 }
3542
3543 task_t
port_name_to_task_kernel(mach_port_name_t name)3544 port_name_to_task_kernel(
3545 mach_port_name_t name)
3546 {
3547 return port_name_to_task_grp(name, TASK_GRP_KERNEL);
3548 }
3549
3550 /*
3551 * Routine: port_name_to_task_read
3552 * Purpose:
3553 * Convert from a port name to a task reference
3554 * A name of MACH_PORT_NULL is valid for the null task.
3555 * Conditions:
3556 * Nothing locked.
3557 */
3558 task_read_t
port_name_to_task_read(mach_port_name_t name)3559 port_name_to_task_read(
3560 mach_port_name_t name)
3561 {
3562 ipc_port_t kport;
3563 kern_return_t kr;
3564 task_read_t tr = TASK_READ_NULL;
3565 task_t self = current_task();
3566
3567 if (port_name_is_pinned_itk_self(self, name)) {
3568 task_reference_grp(self, TASK_GRP_KERNEL);
3569 return self;
3570 }
3571
3572 if (MACH_PORT_VALID(name)) {
3573 kr = ipc_port_translate_send(self->itk_space, name, &kport);
3574 if (kr == KERN_SUCCESS) {
3575 /* port is locked and active */
3576 tr = convert_port_to_task_with_flavor_locked(kport,
3577 TASK_FLAVOR_READ, PORT_INTRANS_ALLOW_CORPSE_TASK,
3578 TASK_GRP_KERNEL);
3579 ip_mq_unlock(kport);
3580 }
3581 }
3582 return tr;
3583 }
3584
3585 /*
3586 * Routine: port_name_to_task_read_no_eval
3587 * Purpose:
3588 * Convert from a port name to a task reference
3589 * A name of MACH_PORT_NULL is valid for the null task.
3590 * Skips task_conversion_eval() during conversion.
3591 * Conditions:
3592 * Nothing locked.
3593 */
3594 task_read_t
port_name_to_task_read_no_eval(mach_port_name_t name)3595 port_name_to_task_read_no_eval(
3596 mach_port_name_t name)
3597 {
3598 ipc_port_t kport;
3599 kern_return_t kr;
3600 task_read_t tr = TASK_READ_NULL;
3601 task_t self = current_task();
3602
3603 if (port_name_is_pinned_itk_self(self, name)) {
3604 task_reference_grp(self, TASK_GRP_KERNEL);
3605 return self;
3606 }
3607
3608 if (MACH_PORT_VALID(name)) {
3609 port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3610 PORT_INTRANS_ALLOW_CORPSE_TASK;
3611
3612 kr = ipc_port_translate_send(self->itk_space, name, &kport);
3613 if (kr == KERN_SUCCESS) {
3614 /* port is locked and active */
3615 tr = convert_port_to_task_with_flavor_locked(kport,
3616 TASK_FLAVOR_READ, options, TASK_GRP_KERNEL);
3617 ip_mq_unlock(kport);
3618 }
3619 }
3620 return tr;
3621 }
3622
3623 /*
3624 * Routine: port_name_to_task_name
3625 * Purpose:
3626 * Convert from a port name to a task reference
3627 * A name of MACH_PORT_NULL is valid for the null task.
3628 * Conditions:
3629 * Nothing locked.
3630 */
3631 task_name_t
port_name_to_task_name(mach_port_name_t name)3632 port_name_to_task_name(
3633 mach_port_name_t name)
3634 {
3635 ipc_port_t kport;
3636 kern_return_t kr;
3637 task_name_t tn = TASK_NAME_NULL;
3638 task_t self = current_task();
3639
3640 if (port_name_is_pinned_itk_self(self, name)) {
3641 task_reference_grp(self, TASK_GRP_KERNEL);
3642 return self;
3643 }
3644
3645 if (MACH_PORT_VALID(name)) {
3646 port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3647 PORT_INTRANS_ALLOW_CORPSE_TASK;
3648
3649 kr = ipc_port_translate_send(current_space(), name, &kport);
3650 if (kr == KERN_SUCCESS) {
3651 /* port is locked and active */
3652 tn = convert_port_to_task_with_flavor_locked(kport,
3653 TASK_FLAVOR_NAME, options, TASK_GRP_KERNEL);
3654 ip_mq_unlock(kport);
3655 }
3656 }
3657 return tn;
3658 }
3659
3660 /*
3661 * Routine: port_name_to_task_id_token
3662 * Purpose:
3663 * Convert from a port name to a task identity token reference
3664 * Conditions:
3665 * Nothing locked.
3666 */
3667 task_id_token_t
port_name_to_task_id_token(mach_port_name_t name)3668 port_name_to_task_id_token(
3669 mach_port_name_t name)
3670 {
3671 ipc_port_t port;
3672 kern_return_t kr;
3673 task_id_token_t token = TASK_ID_TOKEN_NULL;
3674
3675 if (MACH_PORT_VALID(name)) {
3676 kr = ipc_port_translate_send(current_space(), name, &port);
3677 if (kr == KERN_SUCCESS) {
3678 token = convert_port_to_task_id_token(port);
3679 ip_mq_unlock(port);
3680 }
3681 }
3682 return token;
3683 }
3684
3685 /*
3686 * Routine: port_name_to_host
3687 * Purpose:
3688 * Convert from a port name to a host pointer.
3689 * NOTE: This does _not_ return a +1 reference to the host_t
3690 * Conditions:
3691 * Nothing locked.
3692 */
3693 host_t
port_name_to_host(mach_port_name_t name)3694 port_name_to_host(
3695 mach_port_name_t name)
3696 {
3697 host_t host = HOST_NULL;
3698 kern_return_t kr;
3699 ipc_port_t port;
3700
3701 if (MACH_PORT_VALID(name)) {
3702 kr = ipc_port_translate_send(current_space(), name, &port);
3703 if (kr == KERN_SUCCESS) {
3704 host = convert_port_to_host(port);
3705 ip_mq_unlock(port);
3706 }
3707 }
3708 return host;
3709 }
3710
3711 static inline ipc_kobject_type_t
task_flavor_to_kotype(mach_task_flavor_t flavor)3712 task_flavor_to_kotype(mach_task_flavor_t flavor)
3713 {
3714 switch (flavor) {
3715 case TASK_FLAVOR_CONTROL:
3716 return IKOT_TASK_CONTROL;
3717 case TASK_FLAVOR_READ:
3718 return IKOT_TASK_READ;
3719 case TASK_FLAVOR_INSPECT:
3720 return IKOT_TASK_INSPECT;
3721 default:
3722 return IKOT_TASK_NAME;
3723 }
3724 }
3725
3726 /*
3727 * Routine: convert_task_to_port_with_flavor
3728 * Purpose:
3729 * Convert from a task to a port of given flavor.
3730 * Consumes a task ref; produces a naked send right
3731 * which may be invalid.
3732 * Conditions:
3733 * Nothing locked.
3734 */
3735 ipc_port_t
convert_task_to_port_with_flavor(task_t task,mach_task_flavor_t flavor,task_grp_t grp)3736 convert_task_to_port_with_flavor(
3737 task_t task,
3738 mach_task_flavor_t flavor,
3739 task_grp_t grp)
3740 {
3741 ipc_kobject_type_t kotype = task_flavor_to_kotype(flavor);
3742 ipc_port_t port = IP_NULL;
3743
3744 itk_lock(task);
3745
3746 if (!task->ipc_active) {
3747 goto exit;
3748 }
3749
3750 /*
3751 * out-trans of weaker flavors are still permitted, but in-trans
3752 * is separately enforced.
3753 */
3754 if (flavor == TASK_FLAVOR_CONTROL &&
3755 task_conversion_eval_out_trans(current_task(), task, flavor)) {
3756 /* denied by security policy, make the port appear dead */
3757 port = IP_DEAD;
3758 goto exit;
3759 }
3760
3761 switch (flavor) {
3762 case TASK_FLAVOR_CONTROL:
3763 case TASK_FLAVOR_NAME:
3764 port = ipc_kobject_make_send(task->itk_task_ports[flavor],
3765 task, kotype);
3766 break;
3767 /*
3768 * Claim a send right on the task read/inspect port,
3769 * and request a no-senders notification on that port
3770 * (if none outstanding).
3771 *
3772 * The task's itk_lock is used to synchronize the handling
3773 * of the no-senders notification with the task termination.
3774 */
3775 case TASK_FLAVOR_READ:
3776 case TASK_FLAVOR_INSPECT:
3777 port = task->itk_task_ports[flavor];
3778 if (IP_VALID(port)) {
3779 (void)ipc_kobject_make_send_nsrequest(port,
3780 task, kotype);
3781 } else {
3782 port = ipc_kobject_alloc_port(task, kotype,
3783 IPC_KOBJECT_ALLOC_MAKE_SEND |
3784 IPC_KOBJECT_ALLOC_NSREQUEST |
3785 IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3786 /*
3787 * If Developer Mode is off, substitute read port for control port if
3788 * copying out to owning task's space, for the sake of in-process
3789 * exception handler.
3790 *
3791 * Also see: exception_deliver().
3792 */
3793 if (!developer_mode_state() && flavor == TASK_FLAVOR_READ) {
3794 ipc_port_set_label(port, IPC_LABEL_SUBST_TASK_READ);
3795 port->ip_kolabel->ikol_alt_port = task->itk_self;
3796 }
3797
3798 task->itk_task_ports[flavor] = port;
3799 }
3800 break;
3801 }
3802
3803 exit:
3804 itk_unlock(task);
3805 task_deallocate_grp(task, grp);
3806 return port;
3807 }
3808
3809 ipc_port_t
convert_corpse_to_port_and_nsrequest(task_t corpse)3810 convert_corpse_to_port_and_nsrequest(
3811 task_t corpse)
3812 {
3813 ipc_port_t port = IP_NULL;
3814 __assert_only kern_return_t kr;
3815
3816 assert(task_is_a_corpse(corpse));
3817 itk_lock(corpse);
3818 port = corpse->itk_task_ports[TASK_FLAVOR_CONTROL];
3819 assert(port->ip_srights == 0);
3820 kr = ipc_kobject_make_send_nsrequest(port, corpse, IKOT_TASK_CONTROL);
3821 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
3822 itk_unlock(corpse);
3823
3824 task_deallocate(corpse);
3825 return port;
3826 }
3827
3828 ipc_port_t
convert_task_to_port(task_t task)3829 convert_task_to_port(
3830 task_t task)
3831 {
3832 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_KERNEL);
3833 }
3834
3835 ipc_port_t
convert_task_read_to_port(task_read_t task)3836 convert_task_read_to_port(
3837 task_read_t task)
3838 {
3839 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_KERNEL);
3840 }
3841
3842 ipc_port_t
convert_task_inspect_to_port(task_inspect_t task)3843 convert_task_inspect_to_port(
3844 task_inspect_t task)
3845 {
3846 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT, TASK_GRP_KERNEL);
3847 }
3848
3849 ipc_port_t
convert_task_name_to_port(task_name_t task)3850 convert_task_name_to_port(
3851 task_name_t task)
3852 {
3853 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME, TASK_GRP_KERNEL);
3854 }
3855
3856 ipc_port_t
convert_task_to_port_external(task_t task)3857 convert_task_to_port_external(task_t task)
3858 {
3859 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_EXTERNAL);
3860 }
3861
3862 ipc_port_t
convert_task_read_to_port_external(task_t task)3863 convert_task_read_to_port_external(task_t task)
3864 {
3865 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_EXTERNAL);
3866 }
3867
3868 ipc_port_t
convert_task_to_port_pinned(task_t task)3869 convert_task_to_port_pinned(
3870 task_t task)
3871 {
3872 ipc_port_t port = IP_NULL;
3873
3874 assert(task == current_task());
3875
3876 itk_lock(task);
3877
3878 if (task->ipc_active) {
3879 port = ipc_kobject_make_send(task->itk_self, task,
3880 IKOT_TASK_CONTROL);
3881 }
3882
3883 if (port && task_is_immovable(task)) {
3884 assert(ip_is_pinned(port));
3885 assert(ip_is_immovable_send(port));
3886 }
3887
3888 itk_unlock(task);
3889 task_deallocate(task);
3890 return port;
3891 }
3892 /*
3893 * Routine: convert_task_suspend_token_to_port
3894 * Purpose:
3895 * Convert from a task suspension token to a port.
3896 * Consumes a task suspension token ref; produces a naked send-once right
3897 * which may be invalid.
3898 * Conditions:
3899 * Nothing locked.
3900 */
3901 static ipc_port_t
convert_task_suspension_token_to_port_grp(task_suspension_token_t task,task_grp_t grp)3902 convert_task_suspension_token_to_port_grp(
3903 task_suspension_token_t task,
3904 task_grp_t grp)
3905 {
3906 ipc_port_t port;
3907
3908 task_lock(task);
3909 if (task->active) {
3910 itk_lock(task);
3911 if (task->itk_resume == IP_NULL) {
3912 task->itk_resume = ipc_kobject_alloc_port((ipc_kobject_t) task,
3913 IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE);
3914 }
3915
3916 /*
3917 * Create a send-once right for each instance of a direct user-called
3918 * task_suspend2 call. Each time one of these send-once rights is abandoned,
3919 * the notification handler will resume the target task.
3920 */
3921 port = task->itk_resume;
3922 ipc_kobject_require(port, task, IKOT_TASK_RESUME);
3923 port = ipc_port_make_sonce(port);
3924 itk_unlock(task);
3925 assert(IP_VALID(port));
3926 } else {
3927 port = IP_NULL;
3928 }
3929
3930 task_unlock(task);
3931 task_suspension_token_deallocate_grp(task, grp);
3932
3933 return port;
3934 }
3935
3936 ipc_port_t
convert_task_suspension_token_to_port_external(task_suspension_token_t task)3937 convert_task_suspension_token_to_port_external(
3938 task_suspension_token_t task)
3939 {
3940 return convert_task_suspension_token_to_port_grp(task, TASK_GRP_EXTERNAL);
3941 }
3942
3943 ipc_port_t
convert_task_suspension_token_to_port_mig(task_suspension_token_t task)3944 convert_task_suspension_token_to_port_mig(
3945 task_suspension_token_t task)
3946 {
3947 return convert_task_suspension_token_to_port_grp(task, TASK_GRP_MIG);
3948 }
3949
3950 ipc_port_t
convert_thread_to_port_pinned(thread_t thread)3951 convert_thread_to_port_pinned(
3952 thread_t thread)
3953 {
3954 thread_ro_t tro = get_thread_ro(thread);
3955 ipc_port_t port = IP_NULL;
3956
3957 thread_mtx_lock(thread);
3958
3959 if (thread->ipc_active) {
3960 port = ipc_kobject_make_send(tro->tro_self_port,
3961 thread, IKOT_THREAD_CONTROL);
3962 }
3963
3964 if (port && task_is_immovable(tro->tro_task)) {
3965 assert(ip_is_immovable_send(port));
3966 }
3967
3968 thread_mtx_unlock(thread);
3969 thread_deallocate(thread);
3970 return port;
3971 }
3972 /*
3973 * Routine: space_deallocate
3974 * Purpose:
3975 * Deallocate a space ref produced by convert_port_to_space.
3976 * Conditions:
3977 * Nothing locked.
3978 */
3979
3980 void
space_deallocate(ipc_space_t space)3981 space_deallocate(
3982 ipc_space_t space)
3983 {
3984 if (space != IS_NULL) {
3985 is_release(space);
3986 }
3987 }
3988
3989 /*
3990 * Routine: space_read_deallocate
3991 * Purpose:
3992 * Deallocate a space read ref produced by convert_port_to_space_read.
3993 * Conditions:
3994 * Nothing locked.
3995 */
3996
3997 void
space_read_deallocate(ipc_space_read_t space)3998 space_read_deallocate(
3999 ipc_space_read_t space)
4000 {
4001 if (space != IS_INSPECT_NULL) {
4002 is_release((ipc_space_t)space);
4003 }
4004 }
4005
4006 /*
4007 * Routine: space_inspect_deallocate
4008 * Purpose:
4009 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
4010 * Conditions:
4011 * Nothing locked.
4012 */
4013
4014 void
space_inspect_deallocate(ipc_space_inspect_t space)4015 space_inspect_deallocate(
4016 ipc_space_inspect_t space)
4017 {
4018 if (space != IS_INSPECT_NULL) {
4019 is_release((ipc_space_t)space);
4020 }
4021 }
4022
4023
4024 /*
4025 * Routine: thread/task_set_exception_ports [kernel call]
4026 * Purpose:
4027 * Sets the thread/task exception port, flavor and
4028 * behavior for the exception types specified by the mask.
4029 * There will be one send right per exception per valid
4030 * port.
4031 * Conditions:
4032 * Nothing locked. If successful, consumes
4033 * the supplied send right.
4034 * Returns:
4035 * KERN_SUCCESS Changed the special port.
4036 * KERN_INVALID_ARGUMENT The thread is null,
4037 * Illegal mask bit set.
4038 * Illegal exception behavior
4039 * KERN_FAILURE The thread is dead.
4040 */
4041
4042 kern_return_t
thread_set_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4043 thread_set_exception_ports(
4044 thread_t thread,
4045 exception_mask_t exception_mask,
4046 ipc_port_t new_port,
4047 exception_behavior_t new_behavior,
4048 thread_state_flavor_t new_flavor)
4049 {
4050 ipc_port_t old_port[EXC_TYPES_COUNT];
4051 thread_ro_t tro;
4052 boolean_t privileged = task_is_privileged(current_task());
4053
4054 #if CONFIG_MACF
4055 struct label *new_label;
4056 #endif
4057
4058 if (thread == THREAD_NULL) {
4059 return KERN_INVALID_ARGUMENT;
4060 }
4061
4062 if (exception_mask & ~EXC_MASK_VALID) {
4063 return KERN_INVALID_ARGUMENT;
4064 }
4065
4066 if (IP_VALID(new_port)) {
4067 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4068 case EXCEPTION_DEFAULT:
4069 case EXCEPTION_STATE:
4070 case EXCEPTION_STATE_IDENTITY:
4071 case EXCEPTION_IDENTITY_PROTECTED:
4072 break;
4073
4074 default:
4075 return KERN_INVALID_ARGUMENT;
4076 }
4077 }
4078
4079 if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4080 return KERN_INVALID_RIGHT;
4081 }
4082
4083
4084 /*
4085 * Check the validity of the thread_state_flavor by calling the
4086 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4087 * osfmk/mach/ARCHITECTURE/thread_status.h
4088 */
4089 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4090 return KERN_INVALID_ARGUMENT;
4091 }
4092
4093 if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4094 (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4095 && !(new_behavior & MACH_EXCEPTION_CODES)) {
4096 return KERN_INVALID_ARGUMENT;
4097 }
4098
4099 #if CONFIG_MACF
4100 new_label = mac_exc_create_label_for_current_proc();
4101 #endif
4102
4103 tro = get_thread_ro(thread);
4104 thread_mtx_lock(thread);
4105
4106 if (!thread->active) {
4107 thread_mtx_unlock(thread);
4108 #if CONFIG_MACF
4109 mac_exc_free_label(new_label);
4110 #endif
4111 return KERN_FAILURE;
4112 }
4113
4114 if (tro->tro_exc_actions == NULL) {
4115 ipc_thread_init_exc_actions(tro);
4116 }
4117 for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4118 struct exception_action *action = &tro->tro_exc_actions[i];
4119
4120 if ((exception_mask & (1 << i))
4121 #if CONFIG_MACF
4122 && mac_exc_update_action_label(action, new_label) == 0
4123 #endif
4124 ) {
4125 old_port[i] = action->port;
4126 action->port = exception_port_copy_send(new_port);
4127 action->behavior = new_behavior;
4128 action->flavor = new_flavor;
4129 action->privileged = privileged;
4130 } else {
4131 old_port[i] = IP_NULL;
4132 }
4133 }
4134
4135 thread_mtx_unlock(thread);
4136
4137 #if CONFIG_MACF
4138 mac_exc_free_label(new_label);
4139 #endif
4140
4141 for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4142 if (IP_VALID(old_port[i])) {
4143 ipc_port_release_send(old_port[i]);
4144 }
4145 }
4146
4147 if (IP_VALID(new_port)) { /* consume send right */
4148 ipc_port_release_send(new_port);
4149 }
4150
4151 return KERN_SUCCESS;
4152 }
4153
4154 kern_return_t
task_set_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4155 task_set_exception_ports(
4156 task_t task,
4157 exception_mask_t exception_mask,
4158 ipc_port_t new_port,
4159 exception_behavior_t new_behavior,
4160 thread_state_flavor_t new_flavor)
4161 {
4162 ipc_port_t old_port[EXC_TYPES_COUNT];
4163 boolean_t privileged = task_is_privileged(current_task());
4164 register int i;
4165
4166 #if CONFIG_MACF
4167 struct label *new_label;
4168 #endif
4169
4170 if (task == TASK_NULL) {
4171 return KERN_INVALID_ARGUMENT;
4172 }
4173
4174 if (exception_mask & ~EXC_MASK_VALID) {
4175 return KERN_INVALID_ARGUMENT;
4176 }
4177
4178 if (IP_VALID(new_port)) {
4179 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4180 case EXCEPTION_DEFAULT:
4181 case EXCEPTION_STATE:
4182 case EXCEPTION_STATE_IDENTITY:
4183 case EXCEPTION_IDENTITY_PROTECTED:
4184 break;
4185
4186 default:
4187 return KERN_INVALID_ARGUMENT;
4188 }
4189 }
4190
4191 if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4192 return KERN_INVALID_RIGHT;
4193 }
4194
4195
4196 /*
4197 * Check the validity of the thread_state_flavor by calling the
4198 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4199 * osfmk/mach/ARCHITECTURE/thread_status.h
4200 */
4201 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4202 return KERN_INVALID_ARGUMENT;
4203 }
4204
4205 if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4206 (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4207 && !(new_behavior & MACH_EXCEPTION_CODES)) {
4208 return KERN_INVALID_ARGUMENT;
4209 }
4210
4211 #if CONFIG_MACF
4212 new_label = mac_exc_create_label_for_current_proc();
4213 #endif
4214
4215 itk_lock(task);
4216
4217 /*
4218 * Allow setting exception port during the span of ipc_task_init() to
4219 * ipc_task_terminate(). posix_spawn() port actions can set exception
4220 * ports on target task _before_ task IPC access is enabled.
4221 */
4222 if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
4223 itk_unlock(task);
4224 #if CONFIG_MACF
4225 mac_exc_free_label(new_label);
4226 #endif
4227 return KERN_FAILURE;
4228 }
4229
4230 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4231 if ((exception_mask & (1 << i))
4232 #if CONFIG_MACF
4233 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4234 #endif
4235 ) {
4236 old_port[i] = task->exc_actions[i].port;
4237 task->exc_actions[i].port =
4238 exception_port_copy_send(new_port);
4239 task->exc_actions[i].behavior = new_behavior;
4240 task->exc_actions[i].flavor = new_flavor;
4241 task->exc_actions[i].privileged = privileged;
4242 } else {
4243 old_port[i] = IP_NULL;
4244 }
4245 }
4246
4247 itk_unlock(task);
4248
4249 #if CONFIG_MACF
4250 mac_exc_free_label(new_label);
4251 #endif
4252
4253 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4254 if (IP_VALID(old_port[i])) {
4255 ipc_port_release_send(old_port[i]);
4256 }
4257 }
4258
4259 if (IP_VALID(new_port)) { /* consume send right */
4260 ipc_port_release_send(new_port);
4261 }
4262
4263 return KERN_SUCCESS;
4264 }
4265
4266 /*
4267 * Routine: thread/task_swap_exception_ports [kernel call]
4268 * Purpose:
4269 * Sets the thread/task exception port, flavor and
4270 * behavior for the exception types specified by the
4271 * mask.
4272 *
4273 * The old ports, behavior and flavors are returned
4274 * Count specifies the array sizes on input and
4275 * the number of returned ports etc. on output. The
4276 * arrays must be large enough to hold all the returned
4277 * data, MIG returnes an error otherwise. The masks
4278 * array specifies the corresponding exception type(s).
4279 *
4280 * Conditions:
4281 * Nothing locked. If successful, consumes
4282 * the supplied send right.
4283 *
4284 * Returns upto [in} CountCnt elements.
4285 * Returns:
4286 * KERN_SUCCESS Changed the special port.
4287 * KERN_INVALID_ARGUMENT The thread is null,
4288 * Illegal mask bit set.
4289 * Illegal exception behavior
4290 * KERN_FAILURE The thread is dead.
4291 */
4292
4293 kern_return_t
thread_swap_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4294 thread_swap_exception_ports(
4295 thread_t thread,
4296 exception_mask_t exception_mask,
4297 ipc_port_t new_port,
4298 exception_behavior_t new_behavior,
4299 thread_state_flavor_t new_flavor,
4300 exception_mask_array_t masks,
4301 mach_msg_type_number_t *CountCnt,
4302 exception_port_array_t ports,
4303 exception_behavior_array_t behaviors,
4304 thread_state_flavor_array_t flavors)
4305 {
4306 ipc_port_t old_port[EXC_TYPES_COUNT];
4307 thread_ro_t tro;
4308 boolean_t privileged = task_is_privileged(current_task());
4309 unsigned int i, j, count;
4310
4311 #if CONFIG_MACF
4312 struct label *new_label;
4313 #endif
4314
4315 if (thread == THREAD_NULL) {
4316 return KERN_INVALID_ARGUMENT;
4317 }
4318
4319 if (exception_mask & ~EXC_MASK_VALID) {
4320 return KERN_INVALID_ARGUMENT;
4321 }
4322
4323 if (IP_VALID(new_port)) {
4324 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4325 case EXCEPTION_DEFAULT:
4326 case EXCEPTION_STATE:
4327 case EXCEPTION_STATE_IDENTITY:
4328 case EXCEPTION_IDENTITY_PROTECTED:
4329 break;
4330
4331 default:
4332 return KERN_INVALID_ARGUMENT;
4333 }
4334 }
4335
4336 if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4337 return KERN_INVALID_RIGHT;
4338 }
4339
4340
4341 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4342 return KERN_INVALID_ARGUMENT;
4343 }
4344
4345 if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4346 (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4347 && !(new_behavior & MACH_EXCEPTION_CODES)) {
4348 return KERN_INVALID_ARGUMENT;
4349 }
4350
4351 #if CONFIG_MACF
4352 new_label = mac_exc_create_label_for_current_proc();
4353 #endif
4354
4355 thread_mtx_lock(thread);
4356
4357 if (!thread->active) {
4358 thread_mtx_unlock(thread);
4359 #if CONFIG_MACF
4360 mac_exc_free_label(new_label);
4361 #endif
4362 return KERN_FAILURE;
4363 }
4364
4365 tro = get_thread_ro(thread);
4366 if (tro->tro_exc_actions == NULL) {
4367 ipc_thread_init_exc_actions(tro);
4368 }
4369
4370 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4371 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4372 struct exception_action *action = &tro->tro_exc_actions[i];
4373
4374 if ((exception_mask & (1 << i))
4375 #if CONFIG_MACF
4376 && mac_exc_update_action_label(action, new_label) == 0
4377 #endif
4378 ) {
4379 for (j = 0; j < count; ++j) {
4380 /*
4381 * search for an identical entry, if found
4382 * set corresponding mask for this exception.
4383 */
4384 if (action->port == ports[j] &&
4385 action->behavior == behaviors[j] &&
4386 action->flavor == flavors[j]) {
4387 masks[j] |= (1 << i);
4388 break;
4389 }
4390 }
4391
4392 if (j == count) {
4393 masks[j] = (1 << i);
4394 ports[j] = exception_port_copy_send(action->port);
4395
4396 behaviors[j] = action->behavior;
4397 flavors[j] = action->flavor;
4398 ++count;
4399 }
4400
4401 old_port[i] = action->port;
4402 action->port = exception_port_copy_send(new_port);
4403 action->behavior = new_behavior;
4404 action->flavor = new_flavor;
4405 action->privileged = privileged;
4406 } else {
4407 old_port[i] = IP_NULL;
4408 }
4409 }
4410
4411 thread_mtx_unlock(thread);
4412
4413 #if CONFIG_MACF
4414 mac_exc_free_label(new_label);
4415 #endif
4416
4417 while (--i >= FIRST_EXCEPTION) {
4418 if (IP_VALID(old_port[i])) {
4419 ipc_port_release_send(old_port[i]);
4420 }
4421 }
4422
4423 if (IP_VALID(new_port)) { /* consume send right */
4424 ipc_port_release_send(new_port);
4425 }
4426
4427 *CountCnt = count;
4428
4429 return KERN_SUCCESS;
4430 }
4431
4432 kern_return_t
task_swap_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4433 task_swap_exception_ports(
4434 task_t task,
4435 exception_mask_t exception_mask,
4436 ipc_port_t new_port,
4437 exception_behavior_t new_behavior,
4438 thread_state_flavor_t new_flavor,
4439 exception_mask_array_t masks,
4440 mach_msg_type_number_t *CountCnt,
4441 exception_port_array_t ports,
4442 exception_behavior_array_t behaviors,
4443 thread_state_flavor_array_t flavors)
4444 {
4445 ipc_port_t old_port[EXC_TYPES_COUNT];
4446 boolean_t privileged = task_is_privileged(current_task());
4447 unsigned int i, j, count;
4448
4449 #if CONFIG_MACF
4450 struct label *new_label;
4451 #endif
4452
4453 if (task == TASK_NULL) {
4454 return KERN_INVALID_ARGUMENT;
4455 }
4456
4457 if (exception_mask & ~EXC_MASK_VALID) {
4458 return KERN_INVALID_ARGUMENT;
4459 }
4460
4461 if (IP_VALID(new_port)) {
4462 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4463 case EXCEPTION_DEFAULT:
4464 case EXCEPTION_STATE:
4465 case EXCEPTION_STATE_IDENTITY:
4466 case EXCEPTION_IDENTITY_PROTECTED:
4467 break;
4468
4469 default:
4470 return KERN_INVALID_ARGUMENT;
4471 }
4472 }
4473
4474 if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4475 return KERN_INVALID_RIGHT;
4476 }
4477
4478
4479 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4480 return KERN_INVALID_ARGUMENT;
4481 }
4482
4483 if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4484 (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4485 && !(new_behavior & MACH_EXCEPTION_CODES)) {
4486 return KERN_INVALID_ARGUMENT;
4487 }
4488
4489 #if CONFIG_MACF
4490 new_label = mac_exc_create_label_for_current_proc();
4491 #endif
4492
4493 itk_lock(task);
4494
4495 if (!task->ipc_active) {
4496 itk_unlock(task);
4497 #if CONFIG_MACF
4498 mac_exc_free_label(new_label);
4499 #endif
4500 return KERN_FAILURE;
4501 }
4502
4503 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4504 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4505 if ((exception_mask & (1 << i))
4506 #if CONFIG_MACF
4507 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4508 #endif
4509 ) {
4510 for (j = 0; j < count; j++) {
4511 /*
4512 * search for an identical entry, if found
4513 * set corresponding mask for this exception.
4514 */
4515 if (task->exc_actions[i].port == ports[j] &&
4516 task->exc_actions[i].behavior == behaviors[j] &&
4517 task->exc_actions[i].flavor == flavors[j]) {
4518 masks[j] |= (1 << i);
4519 break;
4520 }
4521 }
4522
4523 if (j == count) {
4524 masks[j] = (1 << i);
4525 ports[j] = exception_port_copy_send(task->exc_actions[i].port);
4526 behaviors[j] = task->exc_actions[i].behavior;
4527 flavors[j] = task->exc_actions[i].flavor;
4528 ++count;
4529 }
4530
4531 old_port[i] = task->exc_actions[i].port;
4532
4533 task->exc_actions[i].port = exception_port_copy_send(new_port);
4534 task->exc_actions[i].behavior = new_behavior;
4535 task->exc_actions[i].flavor = new_flavor;
4536 task->exc_actions[i].privileged = privileged;
4537 } else {
4538 old_port[i] = IP_NULL;
4539 }
4540 }
4541
4542 itk_unlock(task);
4543
4544 #if CONFIG_MACF
4545 mac_exc_free_label(new_label);
4546 #endif
4547
4548 while (--i >= FIRST_EXCEPTION) {
4549 if (IP_VALID(old_port[i])) {
4550 ipc_port_release_send(old_port[i]);
4551 }
4552 }
4553
4554 if (IP_VALID(new_port)) { /* consume send right */
4555 ipc_port_release_send(new_port);
4556 }
4557
4558 *CountCnt = count;
4559
4560 return KERN_SUCCESS;
4561 }
4562
4563 /*
4564 * Routine: thread/task_get_exception_ports [kernel call]
4565 * Purpose:
4566 * Clones a send right for each of the thread/task's exception
4567 * ports specified in the mask and returns the behaviour
4568 * and flavor of said port.
4569 *
4570 * Returns upto [in} CountCnt elements.
4571 *
4572 * Conditions:
4573 * Nothing locked.
4574 * Returns:
4575 * KERN_SUCCESS Extracted a send right.
4576 * KERN_INVALID_ARGUMENT The thread is null,
4577 * Invalid special port,
4578 * Illegal mask bit set.
4579 * KERN_FAILURE The thread is dead.
4580 */
4581 static kern_return_t
thread_get_exception_ports_internal(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4582 thread_get_exception_ports_internal(
4583 thread_t thread,
4584 exception_mask_t exception_mask,
4585 exception_mask_array_t masks,
4586 mach_msg_type_number_t *CountCnt,
4587 exception_port_info_array_t ports_info,
4588 exception_port_array_t ports,
4589 exception_behavior_array_t behaviors,
4590 thread_state_flavor_array_t flavors)
4591 {
4592 unsigned int count;
4593 boolean_t info_only = (ports_info != NULL);
4594 thread_ro_t tro;
4595 ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4596
4597 if (thread == THREAD_NULL) {
4598 return KERN_INVALID_ARGUMENT;
4599 }
4600
4601 if (exception_mask & ~EXC_MASK_VALID) {
4602 return KERN_INVALID_ARGUMENT;
4603 }
4604
4605 if (!info_only && !ports) {
4606 return KERN_INVALID_ARGUMENT;
4607 }
4608
4609 tro = get_thread_ro(thread);
4610 thread_mtx_lock(thread);
4611
4612 if (!thread->active) {
4613 thread_mtx_unlock(thread);
4614
4615 return KERN_FAILURE;
4616 }
4617
4618 count = 0;
4619
4620 if (tro->tro_exc_actions == NULL) {
4621 goto done;
4622 }
4623
4624 for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4625 if (exception_mask & (1 << i)) {
4626 ipc_port_t exc_port = tro->tro_exc_actions[i].port;
4627 exception_behavior_t exc_behavior = tro->tro_exc_actions[i].behavior;
4628 thread_state_flavor_t exc_flavor = tro->tro_exc_actions[i].flavor;
4629
4630 for (j = 0; j < count; ++j) {
4631 /*
4632 * search for an identical entry, if found
4633 * set corresponding mask for this exception.
4634 */
4635 if (exc_port == port_ptrs[j] &&
4636 exc_behavior == behaviors[j] &&
4637 exc_flavor == flavors[j]) {
4638 masks[j] |= (1 << i);
4639 break;
4640 }
4641 }
4642
4643 if (j == count && count < *CountCnt) {
4644 masks[j] = (1 << i);
4645 port_ptrs[j] = exc_port;
4646
4647 if (info_only) {
4648 if (!IP_VALID(exc_port)) {
4649 ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4650 } else {
4651 uintptr_t receiver;
4652 (void)ipc_port_get_receiver_task(exc_port, &receiver);
4653 ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4654 ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4655 }
4656 } else {
4657 ports[j] = exception_port_copy_send(exc_port);
4658 }
4659 behaviors[j] = exc_behavior;
4660 flavors[j] = exc_flavor;
4661 ++count;
4662 }
4663 }
4664 }
4665
4666 done:
4667 thread_mtx_unlock(thread);
4668
4669 *CountCnt = count;
4670
4671 return KERN_SUCCESS;
4672 }
4673
4674 kern_return_t
thread_get_exception_ports(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4675 thread_get_exception_ports(
4676 thread_t thread,
4677 exception_mask_t exception_mask,
4678 exception_mask_array_t masks,
4679 mach_msg_type_number_t *CountCnt,
4680 exception_port_array_t ports,
4681 exception_behavior_array_t behaviors,
4682 thread_state_flavor_array_t flavors)
4683 {
4684 return thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4685 NULL, ports, behaviors, flavors);
4686 }
4687
4688 kern_return_t
thread_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4689 thread_get_exception_ports_info(
4690 mach_port_t port,
4691 exception_mask_t exception_mask,
4692 exception_mask_array_t masks,
4693 mach_msg_type_number_t *CountCnt,
4694 exception_port_info_array_t ports_info,
4695 exception_behavior_array_t behaviors,
4696 thread_state_flavor_array_t flavors)
4697 {
4698 kern_return_t kr;
4699
4700 thread_t thread = convert_port_to_thread_read_no_eval(port);
4701
4702 if (thread == THREAD_NULL) {
4703 return KERN_INVALID_ARGUMENT;
4704 }
4705
4706 kr = thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4707 ports_info, NULL, behaviors, flavors);
4708
4709 thread_deallocate(thread);
4710 return kr;
4711 }
4712
4713 kern_return_t
thread_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4714 thread_get_exception_ports_from_user(
4715 mach_port_t port,
4716 exception_mask_t exception_mask,
4717 exception_mask_array_t masks,
4718 mach_msg_type_number_t *CountCnt,
4719 exception_port_array_t ports,
4720 exception_behavior_array_t behaviors,
4721 thread_state_flavor_array_t flavors)
4722 {
4723 kern_return_t kr;
4724
4725 thread_t thread = convert_port_to_thread(port);
4726
4727 if (thread == THREAD_NULL) {
4728 return KERN_INVALID_ARGUMENT;
4729 }
4730
4731 kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4732
4733 thread_deallocate(thread);
4734 return kr;
4735 }
4736
4737 static kern_return_t
task_get_exception_ports_internal(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4738 task_get_exception_ports_internal(
4739 task_t task,
4740 exception_mask_t exception_mask,
4741 exception_mask_array_t masks,
4742 mach_msg_type_number_t *CountCnt,
4743 exception_port_info_array_t ports_info,
4744 exception_port_array_t ports,
4745 exception_behavior_array_t behaviors,
4746 thread_state_flavor_array_t flavors)
4747 {
4748 unsigned int count;
4749 boolean_t info_only = (ports_info != NULL);
4750 ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4751
4752 if (task == TASK_NULL) {
4753 return KERN_INVALID_ARGUMENT;
4754 }
4755
4756 if (exception_mask & ~EXC_MASK_VALID) {
4757 return KERN_INVALID_ARGUMENT;
4758 }
4759
4760 if (!info_only && !ports) {
4761 return KERN_INVALID_ARGUMENT;
4762 }
4763
4764 itk_lock(task);
4765
4766 if (!task->ipc_active) {
4767 itk_unlock(task);
4768 return KERN_FAILURE;
4769 }
4770
4771 count = 0;
4772
4773 for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4774 if (exception_mask & (1 << i)) {
4775 ipc_port_t exc_port = task->exc_actions[i].port;
4776 exception_behavior_t exc_behavior = task->exc_actions[i].behavior;
4777 thread_state_flavor_t exc_flavor = task->exc_actions[i].flavor;
4778
4779 for (j = 0; j < count; ++j) {
4780 /*
4781 * search for an identical entry, if found
4782 * set corresponding mask for this exception.
4783 */
4784 if (exc_port == port_ptrs[j] &&
4785 exc_behavior == behaviors[j] &&
4786 exc_flavor == flavors[j]) {
4787 masks[j] |= (1 << i);
4788 break;
4789 }
4790 }
4791
4792 if (j == count && count < *CountCnt) {
4793 masks[j] = (1 << i);
4794 port_ptrs[j] = exc_port;
4795
4796 if (info_only) {
4797 if (!IP_VALID(exc_port)) {
4798 ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4799 } else {
4800 uintptr_t receiver;
4801 (void)ipc_port_get_receiver_task(exc_port, &receiver);
4802 ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4803 ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4804 }
4805 } else {
4806 ports[j] = exception_port_copy_send(exc_port);
4807 }
4808 behaviors[j] = exc_behavior;
4809 flavors[j] = exc_flavor;
4810 ++count;
4811 }
4812 }
4813 }
4814
4815 itk_unlock(task);
4816
4817 *CountCnt = count;
4818
4819 return KERN_SUCCESS;
4820 }
4821
4822 kern_return_t
task_get_exception_ports(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4823 task_get_exception_ports(
4824 task_t task,
4825 exception_mask_t exception_mask,
4826 exception_mask_array_t masks,
4827 mach_msg_type_number_t *CountCnt,
4828 exception_port_array_t ports,
4829 exception_behavior_array_t behaviors,
4830 thread_state_flavor_array_t flavors)
4831 {
4832 return task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4833 NULL, ports, behaviors, flavors);
4834 }
4835
4836 kern_return_t
task_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4837 task_get_exception_ports_info(
4838 mach_port_t port,
4839 exception_mask_t exception_mask,
4840 exception_mask_array_t masks,
4841 mach_msg_type_number_t *CountCnt,
4842 exception_port_info_array_t ports_info,
4843 exception_behavior_array_t behaviors,
4844 thread_state_flavor_array_t flavors)
4845 {
4846 kern_return_t kr;
4847
4848 task_t task = convert_port_to_task_read_no_eval(port);
4849
4850 if (task == TASK_NULL) {
4851 return KERN_INVALID_ARGUMENT;
4852 }
4853
4854 kr = task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4855 ports_info, NULL, behaviors, flavors);
4856
4857 task_deallocate(task);
4858 return kr;
4859 }
4860
4861 kern_return_t
task_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4862 task_get_exception_ports_from_user(
4863 mach_port_t port,
4864 exception_mask_t exception_mask,
4865 exception_mask_array_t masks,
4866 mach_msg_type_number_t *CountCnt,
4867 exception_port_array_t ports,
4868 exception_behavior_array_t behaviors,
4869 thread_state_flavor_array_t flavors)
4870 {
4871 kern_return_t kr;
4872
4873 task_t task = convert_port_to_task(port);
4874
4875 if (task == TASK_NULL) {
4876 return KERN_INVALID_ARGUMENT;
4877 }
4878
4879 kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4880
4881 task_deallocate(task);
4882 return kr;
4883 }
4884
4885 /*
4886 * Routine: ipc_thread_port_unpin
4887 * Purpose:
4888 *
4889 * Called on the thread when it's terminating so that the last ref
4890 * can be deallocated without a guard exception.
4891 * Conditions:
4892 * Thread mutex lock is held.
4893 */
4894 void
ipc_thread_port_unpin(ipc_port_t port)4895 ipc_thread_port_unpin(
4896 ipc_port_t port)
4897 {
4898 if (port == IP_NULL) {
4899 return;
4900 }
4901 ip_mq_lock(port);
4902 port->ip_pinned = 0;
4903 ip_mq_unlock(port);
4904 }
4905