xref: /xnu-8020.140.41/osfmk/kern/ipc_tt.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  */
62 /*
63  */
64 
65 /*
66  * File:	ipc_tt.c
67  * Purpose:
68  *	Task and thread related IPC functions.
69  */
70 
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
86 
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
94 #include <kdp/kdp_dyld.h>
95 
96 #include <vm/vm_map.h>
97 #include <vm/vm_pageout.h>
98 #include <vm/vm_protos.h>
99 
100 #include <security/mac_mach_internal.h>
101 
102 #if CONFIG_CSR
103 #include <sys/csr.h>
104 #endif
105 
106 #if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL
107 extern int cs_relax_platform_task_ports;
108 #endif
109 
110 extern boolean_t IOCurrentTaskHasEntitlement(const char *);
111 
112 __options_decl(ipc_reply_port_type_t, uint32_t, {
113 	IRPT_NONE        = 0x00,
114 	IRPT_USER        = 0x01,
115 	IRPT_KERNEL      = 0x02,
116 });
117 
118 /* forward declarations */
119 static kern_return_t special_port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor);
120 static kern_return_t special_port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor);
121 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port, ipc_reply_port_type_t reply_type);
122 static void ipc_port_unbind_special_reply_port(thread_t thread, ipc_reply_port_type_t reply_type);
123 extern kern_return_t task_conversion_eval(task_t caller, task_t victim);
124 static thread_inspect_t convert_port_to_thread_inspect_no_eval(ipc_port_t port);
125 static ipc_port_t convert_thread_to_port_with_flavor(thread_t, thread_ro_t, mach_thread_flavor_t flavor);
126 ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor, task_grp_t grp);
127 kern_return_t task_set_special_port(task_t task, int which, ipc_port_t port);
128 kern_return_t task_get_special_port(task_t task, int which, ipc_port_t *portp);
129 
130 /*
131  *	Routine:	ipc_task_init
132  *	Purpose:
133  *		Initialize a task's IPC state.
134  *
135  *		If non-null, some state will be inherited from the parent.
136  *		The parent must be appropriately initialized.
137  *	Conditions:
138  *		Nothing locked.
139  */
140 
141 void
ipc_task_init(task_t task,task_t parent)142 ipc_task_init(
143 	task_t          task,
144 	task_t          parent)
145 {
146 	ipc_space_t space;
147 	ipc_port_t kport;
148 	ipc_port_t nport;
149 	ipc_port_t pport;
150 	kern_return_t kr;
151 	int i;
152 
153 
154 	kr = ipc_space_create(&ipc_table_entries[0], IPC_LABEL_NONE, &space);
155 	if (kr != KERN_SUCCESS) {
156 		panic("ipc_task_init");
157 	}
158 
159 	space->is_task = task;
160 
161 	kport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL,
162 	    IPC_KOBJECT_ALLOC_NONE);
163 	pport = kport;
164 
165 	nport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_NAME,
166 	    IPC_KOBJECT_ALLOC_NONE);
167 
168 	itk_lock_init(task);
169 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = kport;
170 	task->itk_task_ports[TASK_FLAVOR_NAME] = nport;
171 
172 	/* Lazily allocated on-demand */
173 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
174 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
175 	task->itk_dyld_notify = NULL;
176 #if CONFIG_PROC_RESOURCE_LIMITS
177 	task->itk_resource_notify = NULL;
178 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
179 
180 	task->itk_self = pport;
181 	task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
182 	if (task_is_a_corpse_fork(task)) {
183 		/*
184 		 * No sender's notification for corpse would not
185 		 * work with a naked send right in kernel.
186 		 */
187 		task->itk_settable_self = IP_NULL;
188 	} else {
189 		task->itk_settable_self = ipc_port_make_send(kport);
190 	}
191 	task->itk_debug_control = IP_NULL;
192 	task->itk_space = space;
193 
194 #if CONFIG_MACF
195 	task->exc_actions[0].label = NULL;
196 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
197 		mac_exc_associate_action_label(&task->exc_actions[i],
198 		    mac_exc_create_label(&task->exc_actions[i]));
199 	}
200 #endif
201 
202 	/* always zero-out the first (unused) array element */
203 	bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
204 
205 	if (parent == TASK_NULL) {
206 		ipc_port_t port;
207 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
208 			task->exc_actions[i].port = IP_NULL;
209 			task->exc_actions[i].flavor = 0;
210 			task->exc_actions[i].behavior = 0;
211 			task->exc_actions[i].privileged = FALSE;
212 		}/* for */
213 
214 		kr = host_get_host_port(host_priv_self(), &port);
215 		assert(kr == KERN_SUCCESS);
216 		task->itk_host = port;
217 
218 		task->itk_bootstrap = IP_NULL;
219 		task->itk_task_access = IP_NULL;
220 
221 		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
222 			task->itk_registered[i] = IP_NULL;
223 		}
224 	} else {
225 		itk_lock(parent);
226 		assert(parent->itk_task_ports[TASK_FLAVOR_CONTROL] != IP_NULL);
227 
228 		/* inherit registered ports */
229 
230 		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
231 			task->itk_registered[i] =
232 			    ipc_port_copy_send(parent->itk_registered[i]);
233 		}
234 
235 		/* inherit exception and bootstrap ports */
236 
237 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
238 			task->exc_actions[i].port =
239 			    ipc_port_copy_send(parent->exc_actions[i].port);
240 			task->exc_actions[i].flavor =
241 			    parent->exc_actions[i].flavor;
242 			task->exc_actions[i].behavior =
243 			    parent->exc_actions[i].behavior;
244 			task->exc_actions[i].privileged =
245 			    parent->exc_actions[i].privileged;
246 #if CONFIG_MACF
247 			mac_exc_inherit_action_label(parent->exc_actions + i,
248 			    task->exc_actions + i);
249 #endif
250 		}/* for */
251 		task->itk_host =
252 		    ipc_port_copy_send(parent->itk_host);
253 
254 		task->itk_bootstrap =
255 		    ipc_port_copy_send(parent->itk_bootstrap);
256 
257 		task->itk_task_access =
258 		    ipc_port_copy_send(parent->itk_task_access);
259 
260 		itk_unlock(parent);
261 	}
262 }
263 
264 /*
265  *	Routine:	ipc_task_set_immovable_pinned
266  *	Purpose:
267  *		Make a task's control port immovable and/or pinned
268  *      according to its control port options. If control port
269  *      is immovable, allocate an immovable control port for the
270  *      task and optionally pin it. Deallocate the old control port.
271  *	Conditions:
272  *		Task's control port is movable and not pinned.
273  */
274 void
ipc_task_set_immovable_pinned(task_t task)275 ipc_task_set_immovable_pinned(
276 	task_t            task)
277 {
278 	ipc_port_t kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
279 	ipc_port_t new_pport;
280 
281 	/* pport is the same as kport at ipc_task_init() time */
282 	assert(task->itk_self == task->itk_task_ports[TASK_FLAVOR_CONTROL]);
283 	assert(task->itk_self == task->itk_settable_self);
284 	assert(!task_is_a_corpse(task));
285 
286 	/* only tasks opt in immovable control port can have pinned control port */
287 	if (task_is_immovable(task)) {
288 		ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
289 
290 		if (task_is_pinned(task)) {
291 			options |= IPC_KOBJECT_ALLOC_PINNED;
292 		}
293 
294 		new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL, options);
295 
296 		assert(kport != IP_NULL);
297 		ipc_port_set_label(kport, IPC_LABEL_SUBST_TASK);
298 		kport->ip_kolabel->ikol_alt_port = new_pport;
299 
300 		itk_lock(task);
301 		task->itk_self = new_pport;
302 		itk_unlock(task);
303 
304 		/* enable the pinned port */
305 		ipc_kobject_enable(new_pport, task, IKOT_TASK_CONTROL);
306 	}
307 }
308 
309 /*
310  *	Routine:	ipc_task_enable
311  *	Purpose:
312  *		Enable a task for IPC access.
313  *	Conditions:
314  *		Nothing locked.
315  */
316 void
ipc_task_enable(task_t task)317 ipc_task_enable(
318 	task_t          task)
319 {
320 	ipc_port_t kport;
321 	ipc_port_t nport;
322 	ipc_port_t iport;
323 	ipc_port_t rdport;
324 	ipc_port_t pport;
325 
326 	itk_lock(task);
327 
328 	assert(!task->ipc_active || task_is_a_corpse(task));
329 	task->ipc_active = true;
330 
331 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
332 	if (kport != IP_NULL) {
333 		ipc_kobject_enable(kport, task, IKOT_TASK_CONTROL);
334 	}
335 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
336 	if (nport != IP_NULL) {
337 		ipc_kobject_enable(nport, task, IKOT_TASK_NAME);
338 	}
339 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
340 	if (iport != IP_NULL) {
341 		ipc_kobject_enable(iport, task, IKOT_TASK_INSPECT);
342 	}
343 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
344 	if (rdport != IP_NULL) {
345 		ipc_kobject_enable(rdport, task, IKOT_TASK_READ);
346 	}
347 	pport = task->itk_self;
348 	if (pport != kport && pport != IP_NULL) {
349 		assert(task_is_immovable(task));
350 		ipc_kobject_enable(pport, task, IKOT_TASK_CONTROL);
351 	}
352 
353 	itk_unlock(task);
354 }
355 
356 /*
357  *	Routine:	ipc_task_disable
358  *	Purpose:
359  *		Disable IPC access to a task.
360  *	Conditions:
361  *		Nothing locked.
362  */
363 
364 void
ipc_task_disable(task_t task)365 ipc_task_disable(
366 	task_t          task)
367 {
368 	ipc_port_t kport;
369 	ipc_port_t nport;
370 	ipc_port_t iport;
371 	ipc_port_t rdport;
372 	ipc_port_t rport;
373 	ipc_port_t pport;
374 
375 	itk_lock(task);
376 
377 	/*
378 	 * This innocuous looking line is load bearing.
379 	 *
380 	 * It is used to disable the creation of lazy made ports.
381 	 * We must do so before we drop the last reference on the task,
382 	 * as task ports do not own a reference on the task, and
383 	 * convert_port_to_task* will crash trying to resurect a task.
384 	 */
385 	task->ipc_active = false;
386 
387 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
388 	if (kport != IP_NULL) {
389 		/* clears ikol_alt_port */
390 		ipc_kobject_disable(kport, IKOT_TASK_CONTROL);
391 	}
392 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
393 	if (nport != IP_NULL) {
394 		ipc_kobject_disable(nport, IKOT_TASK_NAME);
395 	}
396 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
397 	if (iport != IP_NULL) {
398 		ipc_kobject_disable(iport, IKOT_TASK_INSPECT);
399 	}
400 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
401 	if (rdport != IP_NULL) {
402 		ipc_kobject_disable(rdport, IKOT_TASK_READ);
403 	}
404 	pport = task->itk_self;
405 	if (pport != IP_NULL) {
406 		/* see port_name_is_pinned_itk_self() */
407 		pport->ip_receiver_name = MACH_PORT_SPECIAL_DEFAULT;
408 		if (pport != kport) {
409 			assert(task_is_immovable(task));
410 			assert(pport->ip_immovable_send);
411 			ipc_kobject_disable(pport, IKOT_TASK_CONTROL);
412 		}
413 	}
414 
415 	rport = task->itk_resume;
416 	if (rport != IP_NULL) {
417 		/*
418 		 * From this point onwards this task is no longer accepting
419 		 * resumptions.
420 		 *
421 		 * There are still outstanding suspensions on this task,
422 		 * even as it is being torn down. Disconnect the task
423 		 * from the rport, thereby "orphaning" the rport. The rport
424 		 * itself will go away only when the last suspension holder
425 		 * destroys his SO right to it -- when he either
426 		 * exits, or tries to actually use that last SO right to
427 		 * resume this (now non-existent) task.
428 		 */
429 		ipc_kobject_disable(rport, IKOT_TASK_RESUME);
430 	}
431 	itk_unlock(task);
432 }
433 
434 /*
435  *	Routine:	ipc_task_terminate
436  *	Purpose:
437  *		Clean up and destroy a task's IPC state.
438  *	Conditions:
439  *		Nothing locked.  The task must be suspended.
440  *		(Or the current thread must be in the task.)
441  */
442 
443 void
ipc_task_terminate(task_t task)444 ipc_task_terminate(
445 	task_t          task)
446 {
447 	ipc_port_t kport;
448 	ipc_port_t nport;
449 	ipc_port_t iport;
450 	ipc_port_t rdport;
451 	ipc_port_t rport;
452 	ipc_port_t pport;
453 	ipc_port_t sself;
454 	ipc_port_t *notifiers_ptr = NULL;
455 
456 	itk_lock(task);
457 
458 	/*
459 	 * If we ever failed to clear ipc_active before the last reference
460 	 * was dropped, lazy ports might be made and used after the last
461 	 * reference is dropped and cause use after free (see comment in
462 	 * ipc_task_disable()).
463 	 */
464 	assert(!task->ipc_active);
465 
466 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
467 	sself = task->itk_settable_self;
468 	pport = IP_NULL;
469 
470 	if (kport == IP_NULL) {
471 		/* the task is already terminated (can this happen?) */
472 		itk_unlock(task);
473 		return;
474 	}
475 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = IP_NULL;
476 
477 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
478 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
479 
480 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
481 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
482 
483 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
484 	assert(nport != IP_NULL);
485 	task->itk_task_ports[TASK_FLAVOR_NAME] = IP_NULL;
486 
487 	if (task->itk_dyld_notify) {
488 		notifiers_ptr = task->itk_dyld_notify;
489 		task->itk_dyld_notify = NULL;
490 	}
491 
492 	pport = task->itk_self;
493 	task->itk_self = IP_NULL;
494 
495 	rport = task->itk_resume;
496 	task->itk_resume = IP_NULL;
497 
498 	itk_unlock(task);
499 
500 	/* release the naked send rights */
501 	if (IP_VALID(sself)) {
502 		ipc_port_release_send(sself);
503 	}
504 
505 	if (notifiers_ptr) {
506 		for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
507 			if (IP_VALID(notifiers_ptr[i])) {
508 				ipc_port_release_send(notifiers_ptr[i]);
509 			}
510 		}
511 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
512 	}
513 
514 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
515 		if (IP_VALID(task->exc_actions[i].port)) {
516 			ipc_port_release_send(task->exc_actions[i].port);
517 		}
518 #if CONFIG_MACF
519 		mac_exc_free_action_label(task->exc_actions + i);
520 #endif
521 	}
522 
523 	if (IP_VALID(task->itk_host)) {
524 		ipc_port_release_send(task->itk_host);
525 	}
526 
527 	if (IP_VALID(task->itk_bootstrap)) {
528 		ipc_port_release_send(task->itk_bootstrap);
529 	}
530 
531 	if (IP_VALID(task->itk_task_access)) {
532 		ipc_port_release_send(task->itk_task_access);
533 	}
534 
535 	if (IP_VALID(task->itk_debug_control)) {
536 		ipc_port_release_send(task->itk_debug_control);
537 	}
538 
539 #if CONFIG_PROC_RESOURCE_LIMITS
540 	if (IP_VALID(task->itk_resource_notify)) {
541 		ipc_port_release_send(task->itk_resource_notify);
542 	}
543 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
544 
545 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
546 		if (IP_VALID(task->itk_registered[i])) {
547 			ipc_port_release_send(task->itk_registered[i]);
548 		}
549 	}
550 
551 	/* clears ikol_alt_port, must be done first */
552 	ipc_kobject_dealloc_port(kport, 0, IKOT_TASK_CONTROL);
553 
554 	/* destroy the kernel ports */
555 	if (pport != IP_NULL && pport != kport) {
556 		ipc_kobject_dealloc_port(pport, 0, IKOT_TASK_CONTROL);
557 	}
558 	ipc_kobject_dealloc_port(nport, 0, IKOT_TASK_NAME);
559 	if (iport != IP_NULL) {
560 		ipc_kobject_dealloc_port(iport, 0, IKOT_TASK_INSPECT);
561 	}
562 	if (rdport != IP_NULL) {
563 		ipc_kobject_dealloc_port(rdport, 0, IKOT_TASK_READ);
564 	}
565 	if (rport != IP_NULL) {
566 		ipc_kobject_dealloc_port(rport, 0, IKOT_TASK_RESUME);
567 	}
568 
569 	itk_lock_destroy(task);
570 }
571 
572 /*
573  *	Routine:	ipc_task_reset
574  *	Purpose:
575  *		Reset a task's IPC state to protect it when
576  *		it enters an elevated security context. The
577  *		task name port can remain the same - since it
578  *              represents no specific privilege.
579  *	Conditions:
580  *		Nothing locked.  The task must be suspended.
581  *		(Or the current thread must be in the task.)
582  */
583 
584 void
ipc_task_reset(task_t task)585 ipc_task_reset(
586 	task_t          task)
587 {
588 	ipc_port_t old_kport, old_pport, new_kport, new_pport;
589 	ipc_port_t old_sself;
590 	ipc_port_t old_rdport;
591 	ipc_port_t old_iport;
592 	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
593 	ipc_port_t *notifiers_ptr = NULL;
594 
595 #if CONFIG_MACF
596 	/* Fresh label to unset credentials in existing labels. */
597 	struct label *unset_label = mac_exc_create_label(NULL);
598 #endif
599 
600 	new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task,
601 	    IKOT_TASK_CONTROL, IPC_KOBJECT_ALLOC_NONE);
602 	/*
603 	 * ipc_task_reset() only happens during sugid or corpsify.
604 	 *
605 	 * (1) sugid happens early in exec_mach_imgact(), at which point the old task
606 	 * port is left movable/not pinned.
607 	 * (2) corpse cannot execute more code so the notion of the immovable/pinned
608 	 * task port is bogus, and should appear as if it doesn't have one.
609 	 *
610 	 * So simply leave pport the same as kport.
611 	 */
612 	new_pport = new_kport;
613 
614 	itk_lock(task);
615 
616 	old_kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
617 	old_rdport = task->itk_task_ports[TASK_FLAVOR_READ];
618 	old_iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
619 
620 	old_pport = task->itk_self;
621 
622 	if (old_pport == IP_NULL) {
623 		/* the task is already terminated (can this happen?) */
624 		itk_unlock(task);
625 		ipc_kobject_dealloc_port(new_kport, 0, IKOT_TASK_CONTROL);
626 		if (new_pport != new_kport) {
627 			assert(task_is_immovable(task));
628 			ipc_kobject_dealloc_port(new_pport, 0, IKOT_TASK_CONTROL);
629 		}
630 #if CONFIG_MACF
631 		mac_exc_free_label(unset_label);
632 #endif
633 		return;
634 	}
635 
636 	old_sself = task->itk_settable_self;
637 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = new_kport;
638 	task->itk_self = new_pport;
639 
640 	if (task_is_a_corpse(task)) {
641 		/* No extra send right for coprse, needed to arm no-sender notification */
642 		task->itk_settable_self = IP_NULL;
643 	} else {
644 		task->itk_settable_self = ipc_port_make_send(new_kport);
645 	}
646 
647 	/* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
648 	ip_mq_lock(old_kport);
649 	/* clears ikol_alt_port */
650 	ipc_kobject_disable_locked(old_kport, IKOT_TASK_CONTROL);
651 	task->exec_token += 1;
652 	ip_mq_unlock(old_kport);
653 
654 	/* Reset the read and inspect flavors of task port */
655 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
656 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
657 
658 	if (old_pport != old_kport) {
659 		assert(task_is_immovable(task));
660 		ip_mq_lock(old_pport);
661 		ipc_kobject_disable_locked(old_pport, IKOT_TASK_CONTROL);
662 		task->exec_token += 1;
663 		ip_mq_unlock(old_pport);
664 	}
665 
666 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
667 		old_exc_actions[i] = IP_NULL;
668 
669 		if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
670 			continue;
671 		}
672 
673 		if (!task->exc_actions[i].privileged) {
674 #if CONFIG_MACF
675 			mac_exc_update_action_label(task->exc_actions + i, unset_label);
676 #endif
677 			old_exc_actions[i] = task->exc_actions[i].port;
678 			task->exc_actions[i].port = IP_NULL;
679 		}
680 	}/* for */
681 
682 	if (IP_VALID(task->itk_debug_control)) {
683 		ipc_port_release_send(task->itk_debug_control);
684 	}
685 	task->itk_debug_control = IP_NULL;
686 
687 	if (task->itk_dyld_notify) {
688 		notifiers_ptr = task->itk_dyld_notify;
689 		task->itk_dyld_notify = NULL;
690 	}
691 
692 	itk_unlock(task);
693 
694 #if CONFIG_MACF
695 	mac_exc_free_label(unset_label);
696 #endif
697 
698 	/* release the naked send rights */
699 
700 	if (IP_VALID(old_sself)) {
701 		ipc_port_release_send(old_sself);
702 	}
703 
704 	if (notifiers_ptr) {
705 		for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
706 			if (IP_VALID(notifiers_ptr[i])) {
707 				ipc_port_release_send(notifiers_ptr[i]);
708 			}
709 		}
710 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
711 	}
712 
713 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
714 		if (IP_VALID(old_exc_actions[i])) {
715 			ipc_port_release_send(old_exc_actions[i]);
716 		}
717 	}
718 
719 	/* destroy all task port flavors */
720 	ipc_kobject_dealloc_port(old_kport, 0, IKOT_TASK_CONTROL);
721 	if (old_pport != old_kport) {
722 		assert(task_is_immovable(task));
723 		ipc_kobject_dealloc_port(old_pport, 0, IKOT_TASK_CONTROL);
724 	}
725 	if (old_rdport != IP_NULL) {
726 		ipc_kobject_dealloc_port(old_rdport, 0, IKOT_TASK_READ);
727 	}
728 	if (old_iport != IP_NULL) {
729 		ipc_kobject_dealloc_port(old_iport, 0, IKOT_TASK_INSPECT);
730 	}
731 }
732 
733 /*
734  *	Routine:	ipc_thread_init
735  *	Purpose:
736  *		Initialize a thread's IPC state.
737  *	Conditions:
738  *		Nothing locked.
739  */
740 
741 void
ipc_thread_init(task_t task,thread_t thread,thread_ro_t tro,ipc_thread_init_options_t options)742 ipc_thread_init(
743 	task_t          task,
744 	thread_t        thread,
745 	thread_ro_t     tro,
746 	ipc_thread_init_options_t options)
747 {
748 	ipc_port_t      kport;
749 	ipc_port_t      pport;
750 	ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
751 
752 	if (task_is_immovable(task) && !(options & IPC_THREAD_INIT_MAINTHREAD)) {
753 		/*
754 		 * pthreads and raw threads both have immovable port upon creation.
755 		 * pthreads are subsequently pinned via ipc_port_copyout_send_pinned() whereas
756 		 * raw threads are left unpinned.
757 		 */
758 		alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
759 
760 		pport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
761 		    IKOT_THREAD_CONTROL, alloc_options);
762 
763 		kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)thread,
764 		    IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD, IPC_KOBJECT_ALLOC_NONE);
765 		kport->ip_kolabel->ikol_alt_port = pport;
766 	} else {
767 		/*
768 		 * Main thread is created movable but may be set immovable and pinned in
769 		 * main_thread_set_immovable_pinned(). It needs to be handled separately
770 		 * because task_control_port_options is not available at main thread creation time.
771 		 */
772 		kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
773 		    IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
774 
775 		pport = kport;
776 	}
777 
778 	tro->tro_self_port = pport;
779 	tro->tro_settable_self_port = ipc_port_make_send(kport);
780 	tro->tro_ports[THREAD_FLAVOR_CONTROL] = kport;
781 
782 	thread->ith_special_reply_port = NULL;
783 
784 #if IMPORTANCE_INHERITANCE
785 	thread->ith_assertions = 0;
786 #endif
787 
788 	thread->ipc_active = true;
789 	ipc_kmsg_queue_init(&thread->ith_messages);
790 
791 	thread->ith_kernel_reply_port = IP_NULL;
792 }
793 
794 void
ipc_main_thread_set_immovable_pinned(thread_t thread)795 ipc_main_thread_set_immovable_pinned(thread_t thread)
796 {
797 	thread_ro_t tro = get_thread_ro(thread);
798 	ipc_port_t kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
799 	task_t task = tro->tro_task;
800 	ipc_port_t new_pport;
801 
802 	assert(thread_get_tag(thread) & THREAD_TAG_MAINTHREAD);
803 
804 	/* pport is the same as kport at ipc_thread_init() time */
805 	assert(tro->tro_self_port == tro->tro_ports[THREAD_FLAVOR_CONTROL]);
806 	assert(tro->tro_self_port == tro->tro_settable_self_port);
807 
808 	/*
809 	 * Main thread port is immovable/pinned depending on whether owner task has
810 	 * immovable/pinned task control port. task_control_port_options is now set.
811 	 */
812 	if (task_is_immovable(task)) {
813 		ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
814 
815 		if (task_is_pinned(task)) {
816 			options |= IPC_KOBJECT_ALLOC_PINNED;
817 		}
818 
819 		new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_THREAD_CONTROL, options);
820 
821 		assert(kport != IP_NULL);
822 		ipc_port_set_label(kport, IPC_LABEL_SUBST_THREAD);
823 		kport->ip_kolabel->ikol_alt_port = new_pport;
824 
825 		thread_mtx_lock(thread);
826 		zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_self_port, &new_pport);
827 		thread_mtx_unlock(thread);
828 
829 		/* enable the pinned port */
830 		ipc_kobject_enable(new_pport, thread, IKOT_THREAD_CONTROL);
831 	}
832 }
833 
834 struct thread_init_exc_actions {
835 	struct exception_action array[EXC_TYPES_COUNT];
836 };
837 
838 static void
ipc_thread_init_exc_actions(thread_ro_t tro)839 ipc_thread_init_exc_actions(thread_ro_t tro)
840 {
841 	struct exception_action *actions;
842 
843 	actions = kalloc_type(struct thread_init_exc_actions,
844 	    Z_WAITOK | Z_ZERO | Z_NOFAIL)->array;
845 
846 #if CONFIG_MACF
847 	for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
848 		mac_exc_associate_action_label(&actions[i],
849 		    mac_exc_create_label(&actions[i]));
850 	}
851 #endif
852 
853 	zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions, &actions);
854 }
855 
856 static void
ipc_thread_destroy_exc_actions(thread_ro_t tro)857 ipc_thread_destroy_exc_actions(thread_ro_t tro)
858 {
859 	struct exception_action *actions = tro->tro_exc_actions;
860 
861 	if (actions) {
862 #if CONFIG_MACF
863 		for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
864 			mac_exc_free_action_label(actions + i);
865 		}
866 #endif
867 
868 		zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions);
869 		struct thread_init_exc_actions *tr_actions =
870 		    (struct thread_init_exc_actions *)actions;
871 		kfree_type(struct thread_init_exc_actions, tr_actions);
872 	}
873 }
874 
875 static void
ipc_thread_ro_update_ports(thread_ro_t tro,const struct thread_ro * tro_tpl)876 ipc_thread_ro_update_ports(
877 	thread_ro_t             tro,
878 	const struct thread_ro *tro_tpl)
879 {
880 	vm_size_t offs = offsetof(struct thread_ro, tro_self_port);
881 	vm_size_t size = sizeof(struct ipc_port *) * 2 + sizeof(tro_tpl->tro_ports);
882 
883 	static_assert(offsetof(struct thread_ro, tro_settable_self_port) ==
884 	    offsetof(struct thread_ro, tro_self_port) +
885 	    sizeof(struct ipc_port_t *));
886 	static_assert(offsetof(struct thread_ro, tro_ports) ==
887 	    offsetof(struct thread_ro, tro_self_port) +
888 	    2 * sizeof(struct ipc_port_t *));
889 	zalloc_ro_mut(ZONE_ID_THREAD_RO, tro,
890 	    offs, &tro_tpl->tro_self_port, size);
891 }
892 
893 /*
894  *	Routine:	ipc_thread_disable
895  *	Purpose:
896  *		Clean up and destroy a thread's IPC state.
897  *	Conditions:
898  *		Thread locked.
899  */
900 void
ipc_thread_disable(thread_t thread)901 ipc_thread_disable(
902 	thread_t        thread)
903 {
904 	thread_ro_t     tro = get_thread_ro(thread);
905 	ipc_port_t      kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
906 	ipc_port_t      iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
907 	ipc_port_t      rdport = tro->tro_ports[THREAD_FLAVOR_READ];
908 	ipc_port_t      pport = tro->tro_self_port;
909 
910 	/*
911 	 * This innocuous looking line is load bearing.
912 	 *
913 	 * It is used to disable the creation of lazy made ports.
914 	 * We must do so before we drop the last reference on the thread,
915 	 * as thread ports do not own a reference on the thread, and
916 	 * convert_port_to_thread* will crash trying to resurect a thread.
917 	 */
918 	thread->ipc_active = false;
919 
920 	if (kport != IP_NULL) {
921 		/* clears ikol_alt_port */
922 		ipc_kobject_disable(kport, IKOT_THREAD_CONTROL);
923 	}
924 
925 	if (iport != IP_NULL) {
926 		ipc_kobject_disable(iport, IKOT_THREAD_INSPECT);
927 	}
928 
929 	if (rdport != IP_NULL) {
930 		ipc_kobject_disable(rdport, IKOT_THREAD_READ);
931 	}
932 
933 	if (pport != kport && pport != IP_NULL) {
934 		assert(task_is_immovable(tro->tro_task));
935 		assert(pport->ip_immovable_send);
936 		ipc_kobject_disable(pport, IKOT_THREAD_CONTROL);
937 	}
938 
939 	/* unbind the thread special reply port */
940 	if (IP_VALID(thread->ith_special_reply_port)) {
941 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
942 	}
943 }
944 
945 /*
946  *	Routine:	ipc_thread_terminate
947  *	Purpose:
948  *		Clean up and destroy a thread's IPC state.
949  *	Conditions:
950  *		Nothing locked.
951  */
952 
953 void
ipc_thread_terminate(thread_t thread)954 ipc_thread_terminate(
955 	thread_t        thread)
956 {
957 	thread_ro_t tro = get_thread_ro(thread);
958 	ipc_port_t kport = IP_NULL;
959 	ipc_port_t iport = IP_NULL;
960 	ipc_port_t rdport = IP_NULL;
961 	ipc_port_t pport = IP_NULL;
962 	ipc_port_t sport = IP_NULL;
963 
964 	thread_mtx_lock(thread);
965 
966 	/*
967 	 * If we ever failed to clear ipc_active before the last reference
968 	 * was dropped, lazy ports might be made and used after the last
969 	 * reference is dropped and cause use after free (see comment in
970 	 * ipc_thread_disable()).
971 	 */
972 	assert(!thread->ipc_active);
973 
974 	kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
975 	iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
976 	rdport = tro->tro_ports[THREAD_FLAVOR_READ];
977 	pport = tro->tro_self_port;
978 	sport = tro->tro_settable_self_port;
979 
980 	if (kport != IP_NULL) {
981 		if (IP_VALID(sport)) {
982 			ipc_port_release_send(sport);
983 		}
984 
985 		ipc_thread_ro_update_ports(tro, &(struct thread_ro){ });
986 
987 		if (tro->tro_exc_actions != NULL) {
988 			for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
989 				if (IP_VALID(tro->tro_exc_actions[i].port)) {
990 					ipc_port_release_send(tro->tro_exc_actions[i].port);
991 				}
992 			}
993 			ipc_thread_destroy_exc_actions(tro);
994 		}
995 	}
996 
997 #if IMPORTANCE_INHERITANCE
998 	assert(thread->ith_assertions == 0);
999 #endif
1000 
1001 	assert(ipc_kmsg_queue_empty(&thread->ith_messages));
1002 	thread_mtx_unlock(thread);
1003 
1004 	if (kport != IP_NULL) {
1005 		/* clears ikol_alt_port */
1006 		ipc_kobject_dealloc_port(kport, 0, IKOT_THREAD_CONTROL);
1007 	}
1008 
1009 	if (pport != kport && pport != IP_NULL) {
1010 		assert(task_is_immovable(tro->tro_task));
1011 		ipc_kobject_dealloc_port(pport, 0, IKOT_THREAD_CONTROL);
1012 	}
1013 	if (iport != IP_NULL) {
1014 		ipc_kobject_dealloc_port(iport, 0, IKOT_THREAD_INSPECT);
1015 	}
1016 	if (rdport != IP_NULL) {
1017 		ipc_kobject_dealloc_port(rdport, 0, IKOT_THREAD_READ);
1018 	}
1019 	if (thread->ith_kernel_reply_port != IP_NULL) {
1020 		thread_dealloc_kernel_special_reply_port(thread);
1021 	}
1022 }
1023 
1024 /*
1025  *	Routine:	ipc_thread_reset
1026  *	Purpose:
1027  *		Reset the IPC state for a given Mach thread when
1028  *		its task enters an elevated security context.
1029  *		All flavors of thread port and its exception ports have
1030  *		to be reset.  Its RPC reply port cannot have any
1031  *		rights outstanding, so it should be fine. The thread
1032  *		inspect and read port are set to NULL.
1033  *	Conditions:
1034  *		Nothing locked.
1035  */
1036 
1037 void
ipc_thread_reset(thread_t thread)1038 ipc_thread_reset(
1039 	thread_t        thread)
1040 {
1041 	thread_ro_t tro = get_thread_ro(thread);
1042 	ipc_port_t old_kport, new_kport, old_pport, new_pport;
1043 	ipc_port_t old_sself;
1044 	ipc_port_t old_rdport;
1045 	ipc_port_t old_iport;
1046 	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
1047 	boolean_t  has_old_exc_actions = FALSE;
1048 	boolean_t thread_is_immovable;
1049 	int i;
1050 
1051 #if CONFIG_MACF
1052 	struct label *new_label = mac_exc_create_label(NULL);
1053 #endif
1054 
1055 	thread_is_immovable = ip_is_immovable_send(tro->tro_self_port);
1056 
1057 	new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
1058 	    IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
1059 	/*
1060 	 * ipc_thread_reset() only happens during sugid or corpsify.
1061 	 *
1062 	 * (1) sugid happens early in exec_mach_imgact(), at which point the old thread
1063 	 * port is still movable/not pinned.
1064 	 * (2) corpse cannot execute more code so the notion of the immovable/pinned
1065 	 * thread port is bogus, and should appear as if it doesn't have one.
1066 	 *
1067 	 * So simply leave pport the same as kport.
1068 	 */
1069 	new_pport = new_kport;
1070 
1071 	thread_mtx_lock(thread);
1072 
1073 	old_kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1074 	old_rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1075 	old_iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1076 
1077 	old_sself = tro->tro_settable_self_port;
1078 	old_pport = tro->tro_self_port;
1079 
1080 	if (old_kport == IP_NULL && thread->inspection == FALSE) {
1081 		/* thread is already terminated (can this happen?) */
1082 		thread_mtx_unlock(thread);
1083 		ipc_kobject_dealloc_port(new_kport, 0, IKOT_THREAD_CONTROL);
1084 		if (thread_is_immovable) {
1085 			ipc_kobject_dealloc_port(new_pport, 0,
1086 			    IKOT_THREAD_CONTROL);
1087 		}
1088 #if CONFIG_MACF
1089 		mac_exc_free_label(new_label);
1090 #endif
1091 		return;
1092 	}
1093 
1094 	thread->ipc_active = true;
1095 
1096 	struct thread_ro tpl = {
1097 		.tro_self_port = new_pport,
1098 		.tro_settable_self_port = ipc_port_make_send(new_kport),
1099 		.tro_ports[THREAD_FLAVOR_CONTROL] = new_kport,
1100 	};
1101 
1102 	ipc_thread_ro_update_ports(tro, &tpl);
1103 
1104 	if (old_kport != IP_NULL) {
1105 		/* clears ikol_alt_port */
1106 		(void)ipc_kobject_disable(old_kport, IKOT_THREAD_CONTROL);
1107 	}
1108 	if (old_rdport != IP_NULL) {
1109 		(void)ipc_kobject_disable(old_rdport, IKOT_THREAD_READ);
1110 	}
1111 	if (old_iport != IP_NULL) {
1112 		(void)ipc_kobject_disable(old_iport, IKOT_THREAD_INSPECT);
1113 	}
1114 	if (thread_is_immovable && old_pport != IP_NULL) {
1115 		(void)ipc_kobject_disable(old_pport, IKOT_THREAD_CONTROL);
1116 	}
1117 
1118 	/*
1119 	 * Only ports that were set by root-owned processes
1120 	 * (privileged ports) should survive
1121 	 */
1122 	if (tro->tro_exc_actions != NULL) {
1123 		has_old_exc_actions = TRUE;
1124 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1125 			if (tro->tro_exc_actions[i].privileged) {
1126 				old_exc_actions[i] = IP_NULL;
1127 			} else {
1128 #if CONFIG_MACF
1129 				mac_exc_update_action_label(tro->tro_exc_actions + i, new_label);
1130 #endif
1131 				old_exc_actions[i] = tro->tro_exc_actions[i].port;
1132 				tro->tro_exc_actions[i].port = IP_NULL;
1133 			}
1134 		}
1135 	}
1136 
1137 	thread_mtx_unlock(thread);
1138 
1139 #if CONFIG_MACF
1140 	mac_exc_free_label(new_label);
1141 #endif
1142 
1143 	/* release the naked send rights */
1144 
1145 	if (IP_VALID(old_sself)) {
1146 		ipc_port_release_send(old_sself);
1147 	}
1148 
1149 	if (has_old_exc_actions) {
1150 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1151 			ipc_port_release_send(old_exc_actions[i]);
1152 		}
1153 	}
1154 
1155 	/* destroy the kernel port */
1156 	if (old_kport != IP_NULL) {
1157 		ipc_kobject_dealloc_port(old_kport, 0, IKOT_THREAD_CONTROL);
1158 	}
1159 	if (old_rdport != IP_NULL) {
1160 		ipc_kobject_dealloc_port(old_rdport, 0, IKOT_THREAD_READ);
1161 	}
1162 	if (old_iport != IP_NULL) {
1163 		ipc_kobject_dealloc_port(old_iport, 0, IKOT_THREAD_INSPECT);
1164 	}
1165 	if (old_pport != old_kport && old_pport != IP_NULL) {
1166 		assert(thread_is_immovable);
1167 		ipc_kobject_dealloc_port(old_pport, 0, IKOT_THREAD_CONTROL);
1168 	}
1169 
1170 	/* unbind the thread special reply port */
1171 	if (IP_VALID(thread->ith_special_reply_port)) {
1172 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1173 	}
1174 }
1175 
1176 /*
1177  *	Routine:	retrieve_task_self_fast
1178  *	Purpose:
1179  *		Optimized version of retrieve_task_self,
1180  *		that only works for the current task.
1181  *
1182  *		Return a send right (possibly null/dead)
1183  *		for the task's user-visible self port.
1184  *	Conditions:
1185  *		Nothing locked.
1186  */
1187 
1188 static ipc_port_t
retrieve_task_self_fast(task_t task)1189 retrieve_task_self_fast(
1190 	task_t          task)
1191 {
1192 	ipc_port_t port = IP_NULL;
1193 
1194 	assert(task == current_task());
1195 
1196 	itk_lock(task);
1197 	assert(task->itk_self != IP_NULL);
1198 
1199 	if (task->itk_settable_self == task->itk_task_ports[TASK_FLAVOR_CONTROL]) {
1200 		/* no interposing, return the IMMOVABLE port */
1201 		port = ipc_port_make_send(task->itk_self);
1202 #if (DEBUG || DEVELOPMENT)
1203 		if (task_is_immovable(task)) {
1204 			assert(ip_is_immovable_send(port));
1205 			if (task_is_pinned(task)) {
1206 				/* pinned port is also immovable */
1207 				assert(ip_is_pinned(port));
1208 			}
1209 		} else {
1210 			assert(!ip_is_immovable_send(port));
1211 			assert(!ip_is_pinned(port));
1212 		}
1213 #endif
1214 	} else {
1215 		port = ipc_port_copy_send(task->itk_settable_self);
1216 	}
1217 	itk_unlock(task);
1218 
1219 	return port;
1220 }
1221 
1222 /*
1223  *	Routine:	mach_task_is_self
1224  *	Purpose:
1225  *      [MIG call] Checks if the task (control/read/inspect/name/movable)
1226  *      port is pointing to current_task.
1227  */
1228 kern_return_t
mach_task_is_self(task_t task,boolean_t * is_self)1229 mach_task_is_self(
1230 	task_t         task,
1231 	boolean_t     *is_self)
1232 {
1233 	if (task == TASK_NULL) {
1234 		return KERN_INVALID_ARGUMENT;
1235 	}
1236 
1237 	*is_self = (task == current_task());
1238 
1239 	return KERN_SUCCESS;
1240 }
1241 
1242 /*
1243  *	Routine:	retrieve_thread_self_fast
1244  *	Purpose:
1245  *		Return a send right (possibly null/dead)
1246  *		for the thread's user-visible self port.
1247  *
1248  *		Only works for the current thread.
1249  *
1250  *	Conditions:
1251  *		Nothing locked.
1252  */
1253 
1254 ipc_port_t
retrieve_thread_self_fast(thread_t thread)1255 retrieve_thread_self_fast(
1256 	thread_t                thread)
1257 {
1258 	thread_ro_t tro = get_thread_ro(thread);
1259 	ipc_port_t port = IP_NULL;
1260 
1261 	assert(thread == current_thread());
1262 
1263 	thread_mtx_lock(thread);
1264 
1265 	assert(tro->tro_self_port != IP_NULL);
1266 
1267 	if (tro->tro_settable_self_port == tro->tro_ports[THREAD_FLAVOR_CONTROL]) {
1268 		/* no interposing, return IMMOVABLE_PORT */
1269 		port = ipc_port_make_send(tro->tro_self_port);
1270 #if (DEBUG || DEVELOPMENT)
1271 		if (task_is_immovable(tro->tro_task)) {
1272 			assert(ip_is_immovable_send(port));
1273 			uint16_t tag = thread_get_tag(thread);
1274 			/* terminated threads are unpinned */
1275 			if (thread->active && (tag & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD))) {
1276 				assert(ip_is_pinned(port));
1277 			} else {
1278 				assert(!ip_is_pinned(port));
1279 			}
1280 		} else {
1281 			assert(!ip_is_immovable_send(port));
1282 			assert(!ip_is_pinned(port));
1283 		}
1284 #endif
1285 	} else {
1286 		port = ipc_port_copy_send(tro->tro_settable_self_port);
1287 	}
1288 
1289 	thread_mtx_unlock(thread);
1290 
1291 	return port;
1292 }
1293 
1294 /*
1295  *	Routine:	task_self_trap [mach trap]
1296  *	Purpose:
1297  *		Give the caller send rights for his own task port.
1298  *	Conditions:
1299  *		Nothing locked.
1300  *	Returns:
1301  *		MACH_PORT_NULL if there are any resource failures
1302  *		or other errors.
1303  */
1304 
1305 mach_port_name_t
task_self_trap(__unused struct task_self_trap_args * args)1306 task_self_trap(
1307 	__unused struct task_self_trap_args *args)
1308 {
1309 	task_t task = current_task();
1310 	ipc_port_t sright;
1311 	mach_port_name_t name;
1312 
1313 	sright = retrieve_task_self_fast(task);
1314 	name = ipc_port_copyout_send(sright, task->itk_space);
1315 
1316 	/*
1317 	 * When the right is pinned, memorize the name we gave it
1318 	 * in ip_receiver_name (it's an abuse as this port really
1319 	 * isn't a message queue, but the field is up for grabs
1320 	 * and otherwise `MACH_PORT_SPECIAL_DEFAULT` for special ports).
1321 	 *
1322 	 * port_name_to_task* use this to fastpath IPCs to mach_task_self()
1323 	 * when it is pinned.
1324 	 *
1325 	 * ipc_task_disable() will revert this when the task dies.
1326 	 */
1327 	if (sright == task->itk_self && sright->ip_pinned &&
1328 	    MACH_PORT_VALID(name)) {
1329 		itk_lock(task);
1330 		if (task->ipc_active) {
1331 			if (ip_get_receiver_name(sright) == MACH_PORT_SPECIAL_DEFAULT) {
1332 				sright->ip_receiver_name = name;
1333 			} else if (ip_get_receiver_name(sright) != name) {
1334 				panic("mach_task_self() name changed");
1335 			}
1336 		}
1337 		itk_unlock(task);
1338 	}
1339 	return name;
1340 }
1341 
1342 /*
1343  *	Routine:	thread_self_trap [mach trap]
1344  *	Purpose:
1345  *		Give the caller send rights for his own thread port.
1346  *	Conditions:
1347  *		Nothing locked.
1348  *	Returns:
1349  *		MACH_PORT_NULL if there are any resource failures
1350  *		or other errors.
1351  */
1352 
1353 mach_port_name_t
thread_self_trap(__unused struct thread_self_trap_args * args)1354 thread_self_trap(
1355 	__unused struct thread_self_trap_args *args)
1356 {
1357 	thread_t thread = current_thread();
1358 	ipc_space_t space = current_space();
1359 	ipc_port_t sright;
1360 	mach_port_name_t name;
1361 
1362 	sright = retrieve_thread_self_fast(thread);
1363 	name = ipc_port_copyout_send(sright, space);
1364 	return name;
1365 }
1366 
1367 /*
1368  *	Routine:	mach_reply_port [mach trap]
1369  *	Purpose:
1370  *		Allocate a port for the caller.
1371  *	Conditions:
1372  *		Nothing locked.
1373  *	Returns:
1374  *		MACH_PORT_NULL if there are any resource failures
1375  *		or other errors.
1376  */
1377 
1378 mach_port_name_t
mach_reply_port(__unused struct mach_reply_port_args * args)1379 mach_reply_port(
1380 	__unused struct mach_reply_port_args *args)
1381 {
1382 	ipc_port_t port;
1383 	mach_port_name_t name;
1384 	kern_return_t kr;
1385 
1386 	kr = ipc_port_alloc(current_task()->itk_space, IPC_PORT_INIT_MESSAGE_QUEUE,
1387 	    &name, &port);
1388 	if (kr == KERN_SUCCESS) {
1389 		ip_mq_unlock(port);
1390 	} else {
1391 		name = MACH_PORT_NULL;
1392 	}
1393 	return name;
1394 }
1395 
1396 /*
1397  *	Routine:	thread_get_special_reply_port [mach trap]
1398  *	Purpose:
1399  *		Allocate a special reply port for the calling thread.
1400  *	Conditions:
1401  *		Nothing locked.
1402  *	Returns:
1403  *		mach_port_name_t: send right & receive right for special reply port.
1404  *		MACH_PORT_NULL if there are any resource failures
1405  *		or other errors.
1406  */
1407 
1408 mach_port_name_t
thread_get_special_reply_port(__unused struct thread_get_special_reply_port_args * args)1409 thread_get_special_reply_port(
1410 	__unused struct thread_get_special_reply_port_args *args)
1411 {
1412 	ipc_port_t port;
1413 	mach_port_name_t name;
1414 	kern_return_t kr;
1415 	thread_t thread = current_thread();
1416 	ipc_port_init_flags_t flags = IPC_PORT_INIT_MESSAGE_QUEUE |
1417 	    IPC_PORT_INIT_MAKE_SEND_RIGHT | IPC_PORT_INIT_SPECIAL_REPLY;
1418 
1419 	/* unbind the thread special reply port */
1420 	if (IP_VALID(thread->ith_special_reply_port)) {
1421 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1422 	}
1423 
1424 	kr = ipc_port_alloc(current_task()->itk_space, flags, &name, &port);
1425 	if (kr == KERN_SUCCESS) {
1426 		ipc_port_bind_special_reply_port_locked(port, IRPT_USER);
1427 		ip_mq_unlock(port);
1428 	} else {
1429 		name = MACH_PORT_NULL;
1430 	}
1431 	return name;
1432 }
1433 
1434 /*
1435  *	Routine:	thread_get_kernel_special_reply_port
1436  *	Purpose:
1437  *		Allocate a kernel special reply port for the calling thread.
1438  *	Conditions:
1439  *		Nothing locked.
1440  *	Returns:
1441  *		Creates and sets kernel special reply port.
1442  *		KERN_SUCCESS on Success.
1443  *		KERN_FAILURE on Failure.
1444  */
1445 
1446 kern_return_t
thread_get_kernel_special_reply_port(void)1447 thread_get_kernel_special_reply_port(void)
1448 {
1449 	ipc_port_t port = IPC_PORT_NULL;
1450 	thread_t thread = current_thread();
1451 
1452 	/* unbind the thread special reply port */
1453 	if (IP_VALID(thread->ith_kernel_reply_port)) {
1454 		ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1455 	}
1456 
1457 	port = ipc_port_alloc_reply(); /*returns a reference on the port */
1458 	if (port != IPC_PORT_NULL) {
1459 		ip_mq_lock(port);
1460 		ipc_port_bind_special_reply_port_locked(port, IRPT_KERNEL);
1461 		ip_mq_unlock(port);
1462 		ip_release(port); /* release the reference returned by ipc_port_alloc_reply */
1463 	}
1464 	return KERN_SUCCESS;
1465 }
1466 
1467 /*
1468  *	Routine:	ipc_port_bind_special_reply_port_locked
1469  *	Purpose:
1470  *		Bind the given port to current thread as a special reply port.
1471  *	Conditions:
1472  *		Port locked.
1473  *	Returns:
1474  *		None.
1475  */
1476 
1477 static void
ipc_port_bind_special_reply_port_locked(ipc_port_t port,ipc_reply_port_type_t reply_type)1478 ipc_port_bind_special_reply_port_locked(
1479 	ipc_port_t            port,
1480 	ipc_reply_port_type_t reply_type)
1481 {
1482 	thread_t thread = current_thread();
1483 	ipc_port_t *reply_portp;
1484 
1485 	if (reply_type == IRPT_USER) {
1486 		reply_portp = &thread->ith_special_reply_port;
1487 	} else {
1488 		reply_portp = &thread->ith_kernel_reply_port;
1489 	}
1490 
1491 	assert(*reply_portp == NULL);
1492 	assert(port->ip_specialreply);
1493 	assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1494 
1495 	ip_reference(port);
1496 	*reply_portp = port;
1497 	port->ip_messages.imq_srp_owner_thread = thread;
1498 
1499 	ipc_special_reply_port_bits_reset(port);
1500 }
1501 
1502 /*
1503  *	Routine:	ipc_port_unbind_special_reply_port
1504  *	Purpose:
1505  *		Unbind the thread's special reply port.
1506  *		If the special port has threads waiting on turnstile,
1507  *		update it's inheritor.
1508  *	Condition:
1509  *		Nothing locked.
1510  *	Returns:
1511  *		None.
1512  */
1513 static void
ipc_port_unbind_special_reply_port(thread_t thread,ipc_reply_port_type_t reply_type)1514 ipc_port_unbind_special_reply_port(
1515 	thread_t              thread,
1516 	ipc_reply_port_type_t reply_type)
1517 {
1518 	ipc_port_t *reply_portp;
1519 
1520 	if (reply_type == IRPT_USER) {
1521 		reply_portp = &thread->ith_special_reply_port;
1522 	} else {
1523 		reply_portp = &thread->ith_kernel_reply_port;
1524 	}
1525 
1526 	ipc_port_t special_reply_port = *reply_portp;
1527 
1528 	ip_mq_lock(special_reply_port);
1529 
1530 	*reply_portp = NULL;
1531 	ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
1532 	    IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
1533 	/* port unlocked */
1534 
1535 	/* Destroy the port if its kernel special reply, else just release a ref */
1536 	if (reply_type == IRPT_USER) {
1537 		ip_release(special_reply_port);
1538 	} else {
1539 		ipc_port_dealloc_reply(special_reply_port);
1540 	}
1541 	return;
1542 }
1543 
1544 /*
1545  *	Routine:	thread_dealloc_kernel_special_reply_port
1546  *	Purpose:
1547  *		Unbind the thread's kernel special reply port.
1548  *		If the special port has threads waiting on turnstile,
1549  *		update it's inheritor.
1550  *	Condition:
1551  *		Called on current thread or a terminated thread.
1552  *	Returns:
1553  *		None.
1554  */
1555 
1556 void
thread_dealloc_kernel_special_reply_port(thread_t thread)1557 thread_dealloc_kernel_special_reply_port(thread_t thread)
1558 {
1559 	ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1560 }
1561 
1562 /*
1563  *	Routine:	thread_get_special_port [kernel call]
1564  *	Purpose:
1565  *		Clones a send right for one of the thread's
1566  *		special ports.
1567  *	Conditions:
1568  *		Nothing locked.
1569  *	Returns:
1570  *		KERN_SUCCESS		Extracted a send right.
1571  *		KERN_INVALID_ARGUMENT	The thread is null.
1572  *		KERN_FAILURE		The thread is dead.
1573  *		KERN_INVALID_ARGUMENT	Invalid special port.
1574  */
1575 
1576 kern_return_t
1577 thread_get_special_port(
1578 	thread_inspect_t         thread,
1579 	int                      which,
1580 	ipc_port_t              *portp);
1581 
1582 static kern_return_t
thread_get_special_port_internal(thread_inspect_t thread,thread_ro_t tro,int which,ipc_port_t * portp,mach_thread_flavor_t flavor)1583 thread_get_special_port_internal(
1584 	thread_inspect_t         thread,
1585 	thread_ro_t              tro,
1586 	int                      which,
1587 	ipc_port_t              *portp,
1588 	mach_thread_flavor_t     flavor)
1589 {
1590 	kern_return_t      kr;
1591 	ipc_port_t port;
1592 
1593 	if ((kr = special_port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) {
1594 		return kr;
1595 	}
1596 
1597 	thread_mtx_lock(thread);
1598 	if (!thread->active) {
1599 		thread_mtx_unlock(thread);
1600 		return KERN_FAILURE;
1601 	}
1602 
1603 	switch (which) {
1604 	case THREAD_KERNEL_PORT:
1605 		port = ipc_port_copy_send(tro->tro_settable_self_port);
1606 		thread_mtx_unlock(thread);
1607 		break;
1608 
1609 	case THREAD_READ_PORT:
1610 	case THREAD_INSPECT_PORT:
1611 		thread_mtx_unlock(thread);
1612 		mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ?
1613 		    THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT;
1614 		/* convert_thread_to_port_with_flavor consumes a thread reference */
1615 		thread_reference(thread);
1616 		port = convert_thread_to_port_with_flavor(thread, tro, current_flavor);
1617 		break;
1618 
1619 	default:
1620 		thread_mtx_unlock(thread);
1621 		return KERN_INVALID_ARGUMENT;
1622 	}
1623 
1624 	*portp = port;
1625 	return KERN_SUCCESS;
1626 }
1627 
1628 kern_return_t
thread_get_special_port(thread_inspect_t thread,int which,ipc_port_t * portp)1629 thread_get_special_port(
1630 	thread_inspect_t         thread,
1631 	int                      which,
1632 	ipc_port_t              *portp)
1633 {
1634 	if (thread == THREAD_NULL) {
1635 		return KERN_INVALID_ARGUMENT;
1636 	}
1637 
1638 	return thread_get_special_port_internal(thread, get_thread_ro(thread),
1639 	           which, portp, THREAD_FLAVOR_CONTROL);
1640 }
1641 
1642 static ipc_port_t
thread_get_non_substituted_self(thread_t thread,thread_ro_t tro)1643 thread_get_non_substituted_self(thread_t thread, thread_ro_t tro)
1644 {
1645 	ipc_port_t port = IP_NULL;
1646 
1647 	thread_mtx_lock(thread);
1648 	port = ipc_port_make_send(tro->tro_settable_self_port);
1649 	thread_mtx_unlock(thread);
1650 
1651 	/* takes ownership of the send right */
1652 	return ipc_kobject_alloc_subst_once(port);
1653 }
1654 
1655 kern_return_t
thread_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)1656 thread_get_special_port_from_user(
1657 	mach_port_t     port,
1658 	int             which,
1659 	ipc_port_t      *portp)
1660 {
1661 	thread_ro_t tro;
1662 	ipc_kobject_type_t kotype;
1663 	mach_thread_flavor_t flavor;
1664 	kern_return_t kr = KERN_SUCCESS;
1665 
1666 	thread_t thread = convert_port_to_thread_inspect_no_eval(port);
1667 
1668 	if (thread == THREAD_NULL) {
1669 		return KERN_INVALID_ARGUMENT;
1670 	}
1671 
1672 	tro = get_thread_ro(thread);
1673 	kotype = ip_kotype(port);
1674 
1675 	if (which == THREAD_KERNEL_PORT && tro->tro_task == current_task()) {
1676 #if CONFIG_MACF
1677 		/*
1678 		 * only check for threads belong to current_task,
1679 		 * because foreign thread ports are always movable
1680 		 */
1681 		if (mac_task_check_get_movable_control_port()) {
1682 			kr = KERN_DENIED;
1683 			goto out;
1684 		}
1685 #endif
1686 		if (kotype == IKOT_THREAD_CONTROL) {
1687 			*portp = thread_get_non_substituted_self(thread, tro);
1688 			goto out;
1689 		}
1690 	}
1691 
1692 	switch (kotype) {
1693 	case IKOT_THREAD_CONTROL:
1694 		flavor = THREAD_FLAVOR_CONTROL;
1695 		break;
1696 	case IKOT_THREAD_READ:
1697 		flavor = THREAD_FLAVOR_READ;
1698 		break;
1699 	case IKOT_THREAD_INSPECT:
1700 		flavor = THREAD_FLAVOR_INSPECT;
1701 		break;
1702 	default:
1703 		panic("strange kobject type");
1704 	}
1705 
1706 	kr = thread_get_special_port_internal(thread, tro, which, portp, flavor);
1707 out:
1708 	thread_deallocate(thread);
1709 	return kr;
1710 }
1711 
1712 static kern_return_t
special_port_allowed_with_thread_flavor(int which,mach_thread_flavor_t flavor)1713 special_port_allowed_with_thread_flavor(
1714 	int                  which,
1715 	mach_thread_flavor_t flavor)
1716 {
1717 	switch (flavor) {
1718 	case THREAD_FLAVOR_CONTROL:
1719 		return KERN_SUCCESS;
1720 
1721 	case THREAD_FLAVOR_READ:
1722 
1723 		switch (which) {
1724 		case THREAD_READ_PORT:
1725 		case THREAD_INSPECT_PORT:
1726 			return KERN_SUCCESS;
1727 		default:
1728 			return KERN_INVALID_CAPABILITY;
1729 		}
1730 
1731 	case THREAD_FLAVOR_INSPECT:
1732 
1733 		switch (which) {
1734 		case THREAD_INSPECT_PORT:
1735 			return KERN_SUCCESS;
1736 		default:
1737 			return KERN_INVALID_CAPABILITY;
1738 		}
1739 
1740 	default:
1741 		return KERN_INVALID_CAPABILITY;
1742 	}
1743 }
1744 
1745 /*
1746  *	Routine:	thread_set_special_port [kernel call]
1747  *	Purpose:
1748  *		Changes one of the thread's special ports,
1749  *		setting it to the supplied send right.
1750  *	Conditions:
1751  *		Nothing locked.  If successful, consumes
1752  *		the supplied send right.
1753  *	Returns:
1754  *		KERN_SUCCESS            Changed the special port.
1755  *		KERN_INVALID_ARGUMENT   The thread is null.
1756  *      KERN_INVALID_RIGHT      Port is marked as immovable.
1757  *		KERN_FAILURE            The thread is dead.
1758  *		KERN_INVALID_ARGUMENT   Invalid special port.
1759  *		KERN_NO_ACCESS          Restricted access to set port.
1760  */
1761 
1762 kern_return_t
thread_set_special_port(thread_t thread,int which,ipc_port_t port)1763 thread_set_special_port(
1764 	thread_t                thread,
1765 	int                     which,
1766 	ipc_port_t              port)
1767 {
1768 	kern_return_t   result = KERN_SUCCESS;
1769 	thread_ro_t     tro = NULL;
1770 	ipc_port_t      old = IP_NULL;
1771 
1772 	if (thread == THREAD_NULL) {
1773 		return KERN_INVALID_ARGUMENT;
1774 	}
1775 
1776 	if (IP_VALID(port) && (port->ip_immovable_receive || port->ip_immovable_send)) {
1777 		return KERN_INVALID_RIGHT;
1778 	}
1779 
1780 	switch (which) {
1781 	case THREAD_KERNEL_PORT:
1782 #if CONFIG_CSR
1783 		if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
1784 			/*
1785 			 * Only allow setting of thread-self
1786 			 * special port from user-space when SIP is
1787 			 * disabled (for Mach-on-Mach emulation).
1788 			 */
1789 			tro = get_thread_ro(thread);
1790 
1791 			thread_mtx_lock(thread);
1792 			if (thread->active) {
1793 				old = tro->tro_settable_self_port;
1794 				zalloc_ro_update_field(ZONE_ID_THREAD_RO,
1795 				    tro, tro_settable_self_port, &port);
1796 			} else {
1797 				result = KERN_FAILURE;
1798 			}
1799 			thread_mtx_unlock(thread);
1800 
1801 			if (IP_VALID(old)) {
1802 				ipc_port_release_send(old);
1803 			}
1804 
1805 			return result;
1806 		}
1807 #else
1808 		(void)old;
1809 		(void)result;
1810 		(void)tro;
1811 #endif
1812 		return KERN_NO_ACCESS;
1813 
1814 	default:
1815 		return KERN_INVALID_ARGUMENT;
1816 	}
1817 }
1818 
1819 /*
1820  *	Routine:	task_get_special_port [kernel call]
1821  *	Purpose:
1822  *		Clones a send right for one of the task's
1823  *		special ports.
1824  *	Conditions:
1825  *		Nothing locked.
1826  *	Returns:
1827  *		KERN_SUCCESS		    Extracted a send right.
1828  *		KERN_INVALID_ARGUMENT	The task is null.
1829  *		KERN_FAILURE		    The task/space is dead.
1830  *		KERN_INVALID_ARGUMENT	Invalid special port.
1831  */
1832 
1833 static kern_return_t
task_get_special_port_internal(task_t task,int which,ipc_port_t * portp,mach_task_flavor_t flavor)1834 task_get_special_port_internal(
1835 	task_t          task,
1836 	int             which,
1837 	ipc_port_t      *portp,
1838 	mach_task_flavor_t        flavor)
1839 {
1840 	kern_return_t kr;
1841 	ipc_port_t port;
1842 
1843 	if (task == TASK_NULL) {
1844 		return KERN_INVALID_ARGUMENT;
1845 	}
1846 
1847 	if ((kr = special_port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) {
1848 		return kr;
1849 	}
1850 
1851 	itk_lock(task);
1852 	if (!task->ipc_active) {
1853 		itk_unlock(task);
1854 		return KERN_FAILURE;
1855 	}
1856 
1857 	switch (which) {
1858 	case TASK_KERNEL_PORT:
1859 		port = ipc_port_copy_send(task->itk_settable_self);
1860 		itk_unlock(task);
1861 		break;
1862 
1863 	case TASK_READ_PORT:
1864 	case TASK_INSPECT_PORT:
1865 		itk_unlock(task);
1866 		mach_task_flavor_t current_flavor = (which == TASK_READ_PORT) ?
1867 		    TASK_FLAVOR_READ : TASK_FLAVOR_INSPECT;
1868 		/* convert_task_to_port_with_flavor consumes a task reference */
1869 		task_reference(task);
1870 		port = convert_task_to_port_with_flavor(task, current_flavor, TASK_GRP_KERNEL);
1871 		break;
1872 
1873 	case TASK_NAME_PORT:
1874 		port = ipc_port_make_send(task->itk_task_ports[TASK_FLAVOR_NAME]);
1875 		itk_unlock(task);
1876 		break;
1877 
1878 	case TASK_HOST_PORT:
1879 		port = ipc_port_copy_send(task->itk_host);
1880 		itk_unlock(task);
1881 		break;
1882 
1883 	case TASK_BOOTSTRAP_PORT:
1884 		port = ipc_port_copy_send(task->itk_bootstrap);
1885 		itk_unlock(task);
1886 		break;
1887 
1888 	case TASK_ACCESS_PORT:
1889 		port = ipc_port_copy_send(task->itk_task_access);
1890 		itk_unlock(task);
1891 		break;
1892 
1893 	case TASK_DEBUG_CONTROL_PORT:
1894 		port = ipc_port_copy_send(task->itk_debug_control);
1895 		itk_unlock(task);
1896 		break;
1897 
1898 #if CONFIG_PROC_RESOURCE_LIMITS
1899 	case TASK_RESOURCE_NOTIFY_PORT:
1900 		port = ipc_port_copy_send(task->itk_resource_notify);
1901 		itk_unlock(task);
1902 		break;
1903 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1904 
1905 	default:
1906 		itk_unlock(task);
1907 		return KERN_INVALID_ARGUMENT;
1908 	}
1909 
1910 	*portp = port;
1911 	return KERN_SUCCESS;
1912 }
1913 
1914 /* Kernel/Kext call only and skips MACF checks. MIG uses task_get_special_port_from_user(). */
1915 kern_return_t
task_get_special_port(task_t task,int which,ipc_port_t * portp)1916 task_get_special_port(
1917 	task_t          task,
1918 	int             which,
1919 	ipc_port_t      *portp)
1920 {
1921 	return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL);
1922 }
1923 
1924 static ipc_port_t
task_get_non_substituted_self(task_t task)1925 task_get_non_substituted_self(task_t task)
1926 {
1927 	ipc_port_t port = IP_NULL;
1928 
1929 	itk_lock(task);
1930 	port = ipc_port_make_send(task->itk_settable_self);
1931 	itk_unlock(task);
1932 
1933 	/* takes ownership of the send right */
1934 	return ipc_kobject_alloc_subst_once(port);
1935 }
1936 
1937 /* MIG call only. Kernel/Kext uses task_get_special_port() */
1938 kern_return_t
task_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)1939 task_get_special_port_from_user(
1940 	mach_port_t     port,
1941 	int             which,
1942 	ipc_port_t      *portp)
1943 {
1944 	ipc_kobject_type_t kotype;
1945 	mach_task_flavor_t flavor;
1946 	kern_return_t kr = KERN_SUCCESS;
1947 
1948 	task_t task = convert_port_to_task_inspect_no_eval(port);
1949 
1950 	if (task == TASK_NULL) {
1951 		return KERN_INVALID_ARGUMENT;
1952 	}
1953 
1954 	kotype = ip_kotype(port);
1955 
1956 #if CONFIG_MACF
1957 	if (mac_task_check_get_task_special_port(current_task(), task, which)) {
1958 		kr = KERN_DENIED;
1959 		goto out;
1960 	}
1961 #endif
1962 
1963 	if (which == TASK_KERNEL_PORT && task == current_task()) {
1964 #if CONFIG_MACF
1965 		/*
1966 		 * only check for current_task,
1967 		 * because foreign task ports are always movable
1968 		 */
1969 		if (mac_task_check_get_movable_control_port()) {
1970 			kr = KERN_DENIED;
1971 			goto out;
1972 		}
1973 #endif
1974 		if (kotype == IKOT_TASK_CONTROL) {
1975 			*portp = task_get_non_substituted_self(task);
1976 			goto out;
1977 		}
1978 	}
1979 
1980 	switch (kotype) {
1981 	case IKOT_TASK_CONTROL:
1982 		flavor = TASK_FLAVOR_CONTROL;
1983 		break;
1984 	case IKOT_TASK_READ:
1985 		flavor = TASK_FLAVOR_READ;
1986 		break;
1987 	case IKOT_TASK_INSPECT:
1988 		flavor = TASK_FLAVOR_INSPECT;
1989 		break;
1990 	default:
1991 		panic("strange kobject type");
1992 	}
1993 
1994 	kr = task_get_special_port_internal(task, which, portp, flavor);
1995 out:
1996 	task_deallocate(task);
1997 	return kr;
1998 }
1999 
2000 static kern_return_t
special_port_allowed_with_task_flavor(int which,mach_task_flavor_t flavor)2001 special_port_allowed_with_task_flavor(
2002 	int                which,
2003 	mach_task_flavor_t flavor)
2004 {
2005 	switch (flavor) {
2006 	case TASK_FLAVOR_CONTROL:
2007 		return KERN_SUCCESS;
2008 
2009 	case TASK_FLAVOR_READ:
2010 
2011 		switch (which) {
2012 		case TASK_READ_PORT:
2013 		case TASK_INSPECT_PORT:
2014 		case TASK_NAME_PORT:
2015 			return KERN_SUCCESS;
2016 		default:
2017 			return KERN_INVALID_CAPABILITY;
2018 		}
2019 
2020 	case TASK_FLAVOR_INSPECT:
2021 
2022 		switch (which) {
2023 		case TASK_INSPECT_PORT:
2024 		case TASK_NAME_PORT:
2025 			return KERN_SUCCESS;
2026 		default:
2027 			return KERN_INVALID_CAPABILITY;
2028 		}
2029 
2030 	default:
2031 		return KERN_INVALID_CAPABILITY;
2032 	}
2033 }
2034 
2035 /*
2036  *	Routine:	task_set_special_port [MIG call]
2037  *	Purpose:
2038  *		Changes one of the task's special ports,
2039  *		setting it to the supplied send right.
2040  *	Conditions:
2041  *		Nothing locked.  If successful, consumes
2042  *		the supplied send right.
2043  *	Returns:
2044  *		KERN_SUCCESS		    Changed the special port.
2045  *		KERN_INVALID_ARGUMENT	The task is null.
2046  *      KERN_INVALID_RIGHT      Port is marked as immovable.
2047  *		KERN_FAILURE		    The task/space is dead.
2048  *		KERN_INVALID_ARGUMENT	Invalid special port.
2049  *      KERN_NO_ACCESS		    Restricted access to set port.
2050  */
2051 
2052 kern_return_t
task_set_special_port_from_user(task_t task,int which,ipc_port_t port)2053 task_set_special_port_from_user(
2054 	task_t          task,
2055 	int             which,
2056 	ipc_port_t      port)
2057 {
2058 #if CONFIG_MACF
2059 	if (mac_task_check_set_task_special_port(current_task(), task, which, port)) {
2060 		return KERN_DENIED;
2061 	}
2062 #endif
2063 
2064 	return task_set_special_port(task, which, port);
2065 }
2066 
2067 /* Kernel call only. MIG uses task_set_special_port_from_user() */
2068 kern_return_t
task_set_special_port(task_t task,int which,ipc_port_t port)2069 task_set_special_port(
2070 	task_t          task,
2071 	int             which,
2072 	ipc_port_t      port)
2073 {
2074 	if (task == TASK_NULL) {
2075 		return KERN_INVALID_ARGUMENT;
2076 	}
2077 
2078 	if (task_is_driver(current_task())) {
2079 		return KERN_NO_ACCESS;
2080 	}
2081 
2082 	if (IP_VALID(port) && (port->ip_immovable_receive || port->ip_immovable_send)) {
2083 		return KERN_INVALID_RIGHT;
2084 	}
2085 
2086 	switch (which) {
2087 	case TASK_KERNEL_PORT:
2088 	case TASK_HOST_PORT:
2089 #if CONFIG_CSR
2090 		if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
2091 			/*
2092 			 * Only allow setting of task-self / task-host
2093 			 * special ports from user-space when SIP is
2094 			 * disabled (for Mach-on-Mach emulation).
2095 			 */
2096 			break;
2097 		}
2098 #endif
2099 		return KERN_NO_ACCESS;
2100 	default:
2101 		break;
2102 	}
2103 
2104 	return task_set_special_port_internal(task, which, port);
2105 }
2106 
2107 /*
2108  *	Routine:	task_set_special_port_internal
2109  *	Purpose:
2110  *		Changes one of the task's special ports,
2111  *		setting it to the supplied send right.
2112  *	Conditions:
2113  *		Nothing locked.  If successful, consumes
2114  *		the supplied send right.
2115  *	Returns:
2116  *		KERN_SUCCESS		Changed the special port.
2117  *		KERN_INVALID_ARGUMENT	The task is null.
2118  *		KERN_FAILURE		The task/space is dead.
2119  *		KERN_INVALID_ARGUMENT	Invalid special port.
2120  *      KERN_NO_ACCESS		Restricted access to overwrite port.
2121  */
2122 
2123 kern_return_t
task_set_special_port_internal(task_t task,int which,ipc_port_t port)2124 task_set_special_port_internal(
2125 	task_t          task,
2126 	int             which,
2127 	ipc_port_t      port)
2128 {
2129 	ipc_port_t old = IP_NULL;
2130 	kern_return_t rc = KERN_INVALID_ARGUMENT;
2131 
2132 	if (task == TASK_NULL) {
2133 		goto out;
2134 	}
2135 
2136 	itk_lock(task);
2137 	if (!task->ipc_active) {
2138 		rc = KERN_FAILURE;
2139 		goto out_unlock;
2140 	}
2141 
2142 	switch (which) {
2143 	case TASK_KERNEL_PORT:
2144 		old = task->itk_settable_self;
2145 		task->itk_settable_self = port;
2146 		break;
2147 
2148 	case TASK_HOST_PORT:
2149 		old = task->itk_host;
2150 		task->itk_host = port;
2151 		break;
2152 
2153 	case TASK_BOOTSTRAP_PORT:
2154 		old = task->itk_bootstrap;
2155 		task->itk_bootstrap = port;
2156 		break;
2157 
2158 	/* Never allow overwrite of the task access port */
2159 	case TASK_ACCESS_PORT:
2160 		if (IP_VALID(task->itk_task_access)) {
2161 			rc = KERN_NO_ACCESS;
2162 			goto out_unlock;
2163 		}
2164 		task->itk_task_access = port;
2165 		break;
2166 
2167 	case TASK_DEBUG_CONTROL_PORT:
2168 		old = task->itk_debug_control;
2169 		task->itk_debug_control = port;
2170 		break;
2171 
2172 #if CONFIG_PROC_RESOURCE_LIMITS
2173 	case TASK_RESOURCE_NOTIFY_PORT:
2174 		old = task->itk_resource_notify;
2175 		task->itk_resource_notify = port;
2176 		break;
2177 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
2178 
2179 	default:
2180 		rc = KERN_INVALID_ARGUMENT;
2181 		goto out_unlock;
2182 	}/* switch */
2183 
2184 	rc = KERN_SUCCESS;
2185 
2186 out_unlock:
2187 	itk_unlock(task);
2188 
2189 	if (IP_VALID(old)) {
2190 		ipc_port_release_send(old);
2191 	}
2192 out:
2193 	return rc;
2194 }
2195 /*
2196  *	Routine:	mach_ports_register [kernel call]
2197  *	Purpose:
2198  *		Stash a handful of port send rights in the task.
2199  *		Child tasks will inherit these rights, but they
2200  *		must use mach_ports_lookup to acquire them.
2201  *
2202  *		The rights are supplied in a (wired) kalloc'd segment.
2203  *		Rights which aren't supplied are assumed to be null.
2204  *	Conditions:
2205  *		Nothing locked.  If successful, consumes
2206  *		the supplied rights and memory.
2207  *	Returns:
2208  *		KERN_SUCCESS		    Stashed the port rights.
2209  *      KERN_INVALID_RIGHT      Port in array is marked immovable.
2210  *		KERN_INVALID_ARGUMENT	The task is null.
2211  *		KERN_INVALID_ARGUMENT	The task is dead.
2212  *		KERN_INVALID_ARGUMENT	The memory param is null.
2213  *		KERN_INVALID_ARGUMENT	Too many port rights supplied.
2214  */
2215 
2216 kern_return_t
mach_ports_register(task_t task,mach_port_array_t memory,mach_msg_type_number_t portsCnt)2217 mach_ports_register(
2218 	task_t                  task,
2219 	mach_port_array_t       memory,
2220 	mach_msg_type_number_t  portsCnt)
2221 {
2222 	ipc_port_t ports[TASK_PORT_REGISTER_MAX];
2223 	unsigned int i;
2224 
2225 	if ((task == TASK_NULL) ||
2226 	    (portsCnt > TASK_PORT_REGISTER_MAX) ||
2227 	    (portsCnt && memory == NULL)) {
2228 		return KERN_INVALID_ARGUMENT;
2229 	}
2230 
2231 	/*
2232 	 *	Pad the port rights with nulls.
2233 	 */
2234 
2235 	for (i = 0; i < portsCnt; i++) {
2236 		ports[i] = memory[i];
2237 		if (IP_VALID(ports[i]) && (ports[i]->ip_immovable_receive || ports[i]->ip_immovable_send)) {
2238 			return KERN_INVALID_RIGHT;
2239 		}
2240 	}
2241 	for (; i < TASK_PORT_REGISTER_MAX; i++) {
2242 		ports[i] = IP_NULL;
2243 	}
2244 
2245 	itk_lock(task);
2246 	if (!task->ipc_active) {
2247 		itk_unlock(task);
2248 		return KERN_INVALID_ARGUMENT;
2249 	}
2250 
2251 	/*
2252 	 *	Replace the old send rights with the new.
2253 	 *	Release the old rights after unlocking.
2254 	 */
2255 
2256 	for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2257 		ipc_port_t old;
2258 
2259 		old = task->itk_registered[i];
2260 		task->itk_registered[i] = ports[i];
2261 		ports[i] = old;
2262 	}
2263 
2264 	itk_unlock(task);
2265 
2266 	for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2267 		if (IP_VALID(ports[i])) {
2268 			ipc_port_release_send(ports[i]);
2269 		}
2270 	}
2271 
2272 	/*
2273 	 *	Now that the operation is known to be successful,
2274 	 *	we can free the memory.
2275 	 */
2276 
2277 	if (portsCnt != 0) {
2278 		kfree_type(mach_port_t, portsCnt, memory);
2279 	}
2280 
2281 	return KERN_SUCCESS;
2282 }
2283 
2284 /*
2285  *	Routine:	mach_ports_lookup [kernel call]
2286  *	Purpose:
2287  *		Retrieves (clones) the stashed port send rights.
2288  *	Conditions:
2289  *		Nothing locked.  If successful, the caller gets
2290  *		rights and memory.
2291  *	Returns:
2292  *		KERN_SUCCESS		Retrieved the send rights.
2293  *		KERN_INVALID_ARGUMENT	The task is null.
2294  *		KERN_INVALID_ARGUMENT	The task is dead.
2295  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
2296  */
2297 
2298 kern_return_t
mach_ports_lookup(task_t task,mach_port_array_t * portsp,mach_msg_type_number_t * portsCnt)2299 mach_ports_lookup(
2300 	task_t                  task,
2301 	mach_port_array_t       *portsp,
2302 	mach_msg_type_number_t  *portsCnt)
2303 {
2304 	ipc_port_t *ports;
2305 
2306 	if (task == TASK_NULL) {
2307 		return KERN_INVALID_ARGUMENT;
2308 	}
2309 
2310 	ports = kalloc_type(ipc_port_t, TASK_PORT_REGISTER_MAX,
2311 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
2312 
2313 	itk_lock(task);
2314 	if (!task->ipc_active) {
2315 		itk_unlock(task);
2316 		kfree_type(ipc_port_t, TASK_PORT_REGISTER_MAX, ports);
2317 
2318 		return KERN_INVALID_ARGUMENT;
2319 	}
2320 
2321 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2322 		ports[i] = ipc_port_copy_send(task->itk_registered[i]);
2323 	}
2324 
2325 	itk_unlock(task);
2326 
2327 	*portsp = ports;
2328 	*portsCnt = TASK_PORT_REGISTER_MAX;
2329 	return KERN_SUCCESS;
2330 }
2331 
2332 static kern_return_t
task_conversion_eval_internal(task_t caller,task_t victim,boolean_t out_trans)2333 task_conversion_eval_internal(task_t caller, task_t victim, boolean_t out_trans)
2334 {
2335 	boolean_t allow_kern_task_out_trans;
2336 	boolean_t allow_kern_task;
2337 
2338 #if defined(SECURE_KERNEL)
2339 	/*
2340 	 * On secure kernel platforms, reject converting kernel task/threads to port
2341 	 * and sending it to user space.
2342 	 */
2343 	allow_kern_task_out_trans = FALSE;
2344 #else
2345 	allow_kern_task_out_trans = TRUE;
2346 #endif
2347 
2348 	allow_kern_task = out_trans && allow_kern_task_out_trans;
2349 
2350 	/*
2351 	 * Tasks are allowed to resolve their own task ports, and the kernel is
2352 	 * allowed to resolve anyone's task port.
2353 	 */
2354 	if (caller == kernel_task) {
2355 		return KERN_SUCCESS;
2356 	}
2357 
2358 	if (caller == victim) {
2359 		return KERN_SUCCESS;
2360 	}
2361 
2362 	/*
2363 	 * Only the kernel can can resolve the kernel's task port. We've established
2364 	 * by this point that the caller is not kernel_task.
2365 	 */
2366 	if (victim == TASK_NULL || (victim == kernel_task && !allow_kern_task)) {
2367 		return KERN_INVALID_SECURITY;
2368 	}
2369 
2370 	task_require(victim);
2371 
2372 #if !defined(XNU_TARGET_OS_OSX)
2373 	/*
2374 	 * On platforms other than macOS, only a platform binary can resolve the task port
2375 	 * of another platform binary.
2376 	 */
2377 	if ((victim->t_flags & TF_PLATFORM) && !(caller->t_flags & TF_PLATFORM)) {
2378 #if SECURE_KERNEL
2379 		return KERN_INVALID_SECURITY;
2380 #else
2381 		if (cs_relax_platform_task_ports) {
2382 			return KERN_SUCCESS;
2383 		} else {
2384 			return KERN_INVALID_SECURITY;
2385 		}
2386 #endif /* SECURE_KERNEL */
2387 	}
2388 #endif /* !defined(XNU_TARGET_OS_OSX) */
2389 
2390 	return KERN_SUCCESS;
2391 }
2392 
2393 kern_return_t
task_conversion_eval(task_t caller,task_t victim)2394 task_conversion_eval(task_t caller, task_t victim)
2395 {
2396 	return task_conversion_eval_internal(caller, victim, FALSE);
2397 }
2398 
2399 static kern_return_t
task_conversion_eval_out_trans(task_t caller,task_t victim)2400 task_conversion_eval_out_trans(task_t caller, task_t victim)
2401 {
2402 	return task_conversion_eval_internal(caller, victim, TRUE);
2403 }
2404 
2405 /*
2406  *	Routine:	task_port_kotype_valid_for_flavor
2407  *	Purpose:
2408  *		Check whether the kobject type of a mach port
2409  *      is valid for conversion to a task of given flavor.
2410  */
2411 static boolean_t
task_port_kotype_valid_for_flavor(natural_t kotype,mach_task_flavor_t flavor)2412 task_port_kotype_valid_for_flavor(
2413 	natural_t          kotype,
2414 	mach_task_flavor_t flavor)
2415 {
2416 	switch (flavor) {
2417 	/* Ascending capability */
2418 	case TASK_FLAVOR_NAME:
2419 		if (kotype == IKOT_TASK_NAME) {
2420 			return TRUE;
2421 		}
2422 		OS_FALLTHROUGH;
2423 	case TASK_FLAVOR_INSPECT:
2424 		if (kotype == IKOT_TASK_INSPECT) {
2425 			return TRUE;
2426 		}
2427 		OS_FALLTHROUGH;
2428 	case TASK_FLAVOR_READ:
2429 		if (kotype == IKOT_TASK_READ) {
2430 			return TRUE;
2431 		}
2432 		OS_FALLTHROUGH;
2433 	case TASK_FLAVOR_CONTROL:
2434 		if (kotype == IKOT_TASK_CONTROL) {
2435 			return TRUE;
2436 		}
2437 		break;
2438 	default:
2439 		panic("strange task flavor");
2440 	}
2441 
2442 	return FALSE;
2443 }
2444 
2445 /*
2446  *	Routine: convert_port_to_task_with_flavor_locked_noref
2447  *	Purpose:
2448  *		Internal helper routine to convert from a locked port to a task.
2449  *	Args:
2450  *		port   - target port
2451  *		flavor - requested task port flavor
2452  *		options - port translation options
2453  *	Conditions:
2454  *		Port is locked and active.
2455  */
2456 static task_t
convert_port_to_task_with_flavor_locked_noref(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2457 convert_port_to_task_with_flavor_locked_noref(
2458 	ipc_port_t              port,
2459 	mach_task_flavor_t      flavor,
2460 	port_intrans_options_t  options)
2461 {
2462 	ipc_kobject_type_t type = ip_kotype(port);
2463 	task_t task;
2464 
2465 	ip_mq_lock_held(port);
2466 	require_ip_active(port);
2467 
2468 	if (!task_port_kotype_valid_for_flavor(type, flavor)) {
2469 		return TASK_NULL;
2470 	}
2471 
2472 	task = ipc_kobject_get_locked(port, type);
2473 	if (task == TASK_NULL) {
2474 		return TASK_NULL;
2475 	}
2476 
2477 	if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
2478 		assert(flavor == TASK_FLAVOR_CONTROL);
2479 		return TASK_NULL;
2480 	}
2481 
2482 	/* TODO: rdar://42389187 */
2483 	if (flavor == TASK_FLAVOR_NAME || flavor == TASK_FLAVOR_INSPECT) {
2484 		assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
2485 	}
2486 
2487 	if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
2488 	    task_conversion_eval(current_task(), task)) {
2489 		return TASK_NULL;
2490 	}
2491 
2492 	return task;
2493 }
2494 
2495 /*
2496  *	Routine: convert_port_to_task_with_flavor_locked
2497  *	Purpose:
2498  *		Internal helper routine to convert from a locked port to a task.
2499  *	Args:
2500  *		port   - target port
2501  *		flavor - requested task port flavor
2502  *		options - port translation options
2503  *		grp    - task reference group
2504  *	Conditions:
2505  *		Port is locked and active.
2506  *		Produces task ref or TASK_NULL.
2507  */
2508 static task_t
convert_port_to_task_with_flavor_locked(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2509 convert_port_to_task_with_flavor_locked(
2510 	ipc_port_t              port,
2511 	mach_task_flavor_t      flavor,
2512 	port_intrans_options_t  options,
2513 	task_grp_t              grp)
2514 {
2515 	task_t task;
2516 
2517 	task = convert_port_to_task_with_flavor_locked_noref(port, flavor,
2518 	    options);
2519 
2520 	if (task != TASK_NULL) {
2521 		task_reference_grp(task, grp);
2522 	}
2523 
2524 	return task;
2525 }
2526 
2527 /*
2528  *	Routine:	convert_port_to_task_with_exec_token
2529  *	Purpose:
2530  *		Convert from a port to a task and return
2531  *		the exec token stored in the task.
2532  *		Doesn't consume the port ref; produces a task ref,
2533  *		which may be null.
2534  *	Conditions:
2535  *		Nothing locked.
2536  */
2537 task_t
convert_port_to_task_with_exec_token(ipc_port_t port,uint32_t * exec_token)2538 convert_port_to_task_with_exec_token(
2539 	ipc_port_t              port,
2540 	uint32_t                *exec_token)
2541 {
2542 	task_t task = TASK_NULL;
2543 	task_t self = current_task();
2544 
2545 	if (IP_VALID(port)) {
2546 		if (port == self->itk_self) {
2547 			if (exec_token) {
2548 				/*
2549 				 * This is ok to do without a lock,
2550 				 * from the perspective of `current_task()`
2551 				 * this token never changes, except
2552 				 * for the thread doing the exec.
2553 				 */
2554 				*exec_token = self->exec_token;
2555 			}
2556 			task_reference_grp(self, TASK_GRP_KERNEL);
2557 			return self;
2558 		}
2559 
2560 		ip_mq_lock(port);
2561 		if (ip_active(port)) {
2562 			task = convert_port_to_task_with_flavor_locked(port,
2563 			    TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE,
2564 			    TASK_GRP_KERNEL);
2565 		}
2566 		ip_mq_unlock(port);
2567 	}
2568 
2569 	if (task) {
2570 		*exec_token = task->exec_token;
2571 	}
2572 
2573 	return task;
2574 }
2575 
2576 /*
2577  *	Routine:	convert_port_to_task_with_flavor
2578  *	Purpose:
2579  *		Internal helper for converting from a port to a task.
2580  *		Doesn't consume the port ref; produces a task ref,
2581  *		which may be null.
2582  *	Args:
2583  *		port   - target port
2584  *		flavor - requested task port flavor
2585  *		options - port translation options
2586  *		grp    - task reference group
2587  *	Conditions:
2588  *		Nothing locked.
2589  */
2590 static task_t
convert_port_to_task_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2591 convert_port_to_task_with_flavor(
2592 	ipc_port_t         port,
2593 	mach_task_flavor_t flavor,
2594 	port_intrans_options_t options,
2595 	task_grp_t         grp)
2596 {
2597 	task_t task = TASK_NULL;
2598 	task_t self = current_task();
2599 
2600 	if (IP_VALID(port)) {
2601 		if (port == self->itk_self) {
2602 			task_reference_grp(self, grp);
2603 			return self;
2604 		}
2605 
2606 		ip_mq_lock(port);
2607 		if (ip_active(port)) {
2608 			task = convert_port_to_task_with_flavor_locked(port,
2609 			    flavor, options, grp);
2610 		}
2611 		ip_mq_unlock(port);
2612 	}
2613 
2614 	return task;
2615 }
2616 
2617 task_t
convert_port_to_task(ipc_port_t port)2618 convert_port_to_task(
2619 	ipc_port_t              port)
2620 {
2621 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2622 	           PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL);
2623 }
2624 
2625 task_t
convert_port_to_task_mig(ipc_port_t port)2626 convert_port_to_task_mig(
2627 	ipc_port_t              port)
2628 {
2629 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2630 	           PORT_INTRANS_OPTIONS_NONE, TASK_GRP_MIG);
2631 }
2632 
2633 task_read_t
convert_port_to_task_read(ipc_port_t port)2634 convert_port_to_task_read(
2635 	ipc_port_t              port)
2636 {
2637 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2638 	           PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2639 }
2640 
2641 static task_read_t
convert_port_to_task_read_no_eval(ipc_port_t port)2642 convert_port_to_task_read_no_eval(
2643 	ipc_port_t              port)
2644 {
2645 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2646 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2647 }
2648 
2649 task_read_t
convert_port_to_task_read_mig(ipc_port_t port)2650 convert_port_to_task_read_mig(
2651 	ipc_port_t              port)
2652 {
2653 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2654 	           PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2655 }
2656 
2657 task_inspect_t
convert_port_to_task_inspect(ipc_port_t port)2658 convert_port_to_task_inspect(
2659 	ipc_port_t              port)
2660 {
2661 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2662 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2663 }
2664 
2665 task_inspect_t
convert_port_to_task_inspect_no_eval(ipc_port_t port)2666 convert_port_to_task_inspect_no_eval(
2667 	ipc_port_t              port)
2668 {
2669 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2670 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2671 }
2672 
2673 task_inspect_t
convert_port_to_task_inspect_mig(ipc_port_t port)2674 convert_port_to_task_inspect_mig(
2675 	ipc_port_t              port)
2676 {
2677 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2678 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2679 }
2680 
2681 task_name_t
convert_port_to_task_name(ipc_port_t port)2682 convert_port_to_task_name(
2683 	ipc_port_t              port)
2684 {
2685 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2686 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2687 }
2688 
2689 task_name_t
convert_port_to_task_name_mig(ipc_port_t port)2690 convert_port_to_task_name_mig(
2691 	ipc_port_t              port)
2692 {
2693 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2694 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2695 }
2696 
2697 /*
2698  *	Routine:	convert_port_to_task_policy
2699  *	Purpose:
2700  *		Convert from a port to a task.
2701  *		Doesn't consume the port ref; produces a task ref,
2702  *		which may be null.
2703  *		If the port is being used with task_port_set(), any task port
2704  *		type other than TASK_CONTROL requires an entitlement. If the
2705  *		port is being used with task_port_get(), TASK_NAME requires an
2706  *		entitlement.
2707  *	Conditions:
2708  *		Nothing locked.
2709  */
2710 static task_t
convert_port_to_task_policy_mig(ipc_port_t port,boolean_t set)2711 convert_port_to_task_policy_mig(ipc_port_t port, boolean_t set)
2712 {
2713 	task_t task = TASK_NULL;
2714 	task_t ctask = current_task();
2715 
2716 	if (!IP_VALID(port)) {
2717 		return TASK_NULL;
2718 	}
2719 
2720 	task = set ?
2721 	    convert_port_to_task_mig(port) :
2722 	    convert_port_to_task_inspect_mig(port);
2723 
2724 	if (task == TASK_NULL &&
2725 	    IOCurrentTaskHasEntitlement("com.apple.private.task_policy")) {
2726 		task = convert_port_to_task_name_mig(port);
2727 	}
2728 
2729 	if (task_conversion_eval(ctask, task) != KERN_SUCCESS) {
2730 		task_deallocate_grp(task, TASK_GRP_MIG);
2731 		return TASK_NULL;
2732 	}
2733 
2734 	return task;
2735 }
2736 
2737 task_policy_set_t
convert_port_to_task_policy_set_mig(ipc_port_t port)2738 convert_port_to_task_policy_set_mig(ipc_port_t port)
2739 {
2740 	return convert_port_to_task_policy_mig(port, true);
2741 }
2742 
2743 task_policy_get_t
convert_port_to_task_policy_get_mig(ipc_port_t port)2744 convert_port_to_task_policy_get_mig(ipc_port_t port)
2745 {
2746 	return convert_port_to_task_policy_mig(port, false);
2747 }
2748 
2749 /*
2750  *	Routine:	convert_port_to_task_suspension_token
2751  *	Purpose:
2752  *		Convert from a port to a task suspension token.
2753  *		Doesn't consume the port ref; produces a suspension token ref,
2754  *		which may be null.
2755  *	Conditions:
2756  *		Nothing locked.
2757  */
2758 static task_suspension_token_t
convert_port_to_task_suspension_token_grp(ipc_port_t port,task_grp_t grp)2759 convert_port_to_task_suspension_token_grp(
2760 	ipc_port_t              port,
2761 	task_grp_t              grp)
2762 {
2763 	task_suspension_token_t task = TASK_NULL;
2764 
2765 	if (IP_VALID(port)) {
2766 		ip_mq_lock(port);
2767 		task = ipc_kobject_get_locked(port, IKOT_TASK_RESUME);
2768 		if (task != TASK_NULL) {
2769 			task_reference_grp(task, grp);
2770 		}
2771 		ip_mq_unlock(port);
2772 	}
2773 
2774 	return task;
2775 }
2776 
2777 task_suspension_token_t
convert_port_to_task_suspension_token_external(ipc_port_t port)2778 convert_port_to_task_suspension_token_external(
2779 	ipc_port_t              port)
2780 {
2781 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_EXTERNAL);
2782 }
2783 
2784 task_suspension_token_t
convert_port_to_task_suspension_token_mig(ipc_port_t port)2785 convert_port_to_task_suspension_token_mig(
2786 	ipc_port_t              port)
2787 {
2788 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_MIG);
2789 }
2790 
2791 task_suspension_token_t
convert_port_to_task_suspension_token_kernel(ipc_port_t port)2792 convert_port_to_task_suspension_token_kernel(
2793 	ipc_port_t              port)
2794 {
2795 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_KERNEL);
2796 }
2797 
2798 /*
2799  *	Routine:	convert_port_to_space_with_flavor
2800  *	Purpose:
2801  *		Internal helper for converting from a port to a space.
2802  *		Doesn't consume the port ref; produces a space ref,
2803  *		which may be null.
2804  *	Args:
2805  *		port   - target port
2806  *		flavor - requested ipc space flavor
2807  *		options - port translation options
2808  *	Conditions:
2809  *		Nothing locked.
2810  */
2811 static ipc_space_t
convert_port_to_space_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2812 convert_port_to_space_with_flavor(
2813 	ipc_port_t         port,
2814 	mach_task_flavor_t flavor,
2815 	port_intrans_options_t options)
2816 {
2817 	ipc_space_t space = IPC_SPACE_NULL;
2818 	task_t task = TASK_NULL;
2819 
2820 	assert(flavor != TASK_FLAVOR_NAME);
2821 
2822 	if (IP_VALID(port)) {
2823 		ip_mq_lock(port);
2824 		if (ip_active(port)) {
2825 			task = convert_port_to_task_with_flavor_locked_noref(port,
2826 			    flavor, options);
2827 		}
2828 
2829 		/*
2830 		 * Because we hold the port lock and we could resolve a task,
2831 		 * even if we're racing with task termination, we know that
2832 		 * ipc_task_disable() hasn't been called yet.
2833 		 *
2834 		 * We try to sniff if `task->active` flipped to accelerate
2835 		 * resolving the race, but this isn't load bearing.
2836 		 *
2837 		 * The space will be torn down _after_ ipc_task_disable() returns,
2838 		 * so it is valid to take a reference on it now.
2839 		 */
2840 		if (task && task->active) {
2841 			space = task->itk_space;
2842 			is_reference(space);
2843 		}
2844 		ip_mq_unlock(port);
2845 	}
2846 
2847 	return space;
2848 }
2849 
2850 ipc_space_t
convert_port_to_space(ipc_port_t port)2851 convert_port_to_space(
2852 	ipc_port_t      port)
2853 {
2854 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL,
2855 	           PORT_INTRANS_OPTIONS_NONE);
2856 }
2857 
2858 ipc_space_read_t
convert_port_to_space_read(ipc_port_t port)2859 convert_port_to_space_read(
2860 	ipc_port_t      port)
2861 {
2862 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2863 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
2864 }
2865 
2866 ipc_space_read_t
convert_port_to_space_read_no_eval(ipc_port_t port)2867 convert_port_to_space_read_no_eval(
2868 	ipc_port_t      port)
2869 {
2870 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2871 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2872 }
2873 
2874 ipc_space_inspect_t
convert_port_to_space_inspect(ipc_port_t port)2875 convert_port_to_space_inspect(
2876 	ipc_port_t      port)
2877 {
2878 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT,
2879 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2880 }
2881 
2882 /*
2883  *	Routine:	convert_port_to_map_with_flavor
2884  *	Purpose:
2885  *		Internal helper for converting from a port to a map.
2886  *		Doesn't consume the port ref; produces a map ref,
2887  *		which may be null.
2888  *	Args:
2889  *		port   - target port
2890  *		flavor - requested vm map flavor
2891  *		options - port translation options
2892  *	Conditions:
2893  *		Nothing locked.
2894  */
2895 static vm_map_t
convert_port_to_map_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2896 convert_port_to_map_with_flavor(
2897 	ipc_port_t         port,
2898 	mach_task_flavor_t flavor,
2899 	port_intrans_options_t options)
2900 {
2901 	task_t task = TASK_NULL;
2902 	vm_map_t map = VM_MAP_NULL;
2903 
2904 	/* there is no vm_map_inspect_t routines at the moment. */
2905 	assert(flavor != TASK_FLAVOR_NAME && flavor != TASK_FLAVOR_INSPECT);
2906 	assert((options & PORT_INTRANS_SKIP_TASK_EVAL) == 0);
2907 
2908 	if (IP_VALID(port)) {
2909 		ip_mq_lock(port);
2910 
2911 		if (ip_active(port)) {
2912 			task = convert_port_to_task_with_flavor_locked_noref(port,
2913 			    flavor, options);
2914 		}
2915 
2916 		/*
2917 		 * Because we hold the port lock and we could resolve a task,
2918 		 * even if we're racing with task termination, we know that
2919 		 * ipc_task_disable() hasn't been called yet.
2920 		 *
2921 		 * We try to sniff if `task->active` flipped to accelerate
2922 		 * resolving the race, but this isn't load bearing.
2923 		 *
2924 		 * The vm map will be torn down _after_ ipc_task_disable() returns,
2925 		 * so it is valid to take a reference on it now.
2926 		 */
2927 		if (task && task->active) {
2928 			map = task->map;
2929 
2930 			if (map->pmap == kernel_pmap) {
2931 				panic("userspace has control access to a "
2932 				    "kernel map %p through task %p", map, task);
2933 			}
2934 
2935 			pmap_require(map->pmap);
2936 			vm_map_reference(map);
2937 		}
2938 
2939 		ip_mq_unlock(port);
2940 	}
2941 
2942 	return map;
2943 }
2944 
2945 vm_map_t
convert_port_to_map(ipc_port_t port)2946 convert_port_to_map(
2947 	ipc_port_t              port)
2948 {
2949 	return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL,
2950 	           PORT_INTRANS_OPTIONS_NONE);
2951 }
2952 
2953 vm_map_read_t
convert_port_to_map_read(ipc_port_t port)2954 convert_port_to_map_read(
2955 	ipc_port_t              port)
2956 {
2957 	return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ,
2958 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
2959 }
2960 
2961 vm_map_inspect_t
convert_port_to_map_inspect(__unused ipc_port_t port)2962 convert_port_to_map_inspect(
2963 	__unused ipc_port_t     port)
2964 {
2965 	/* there is no vm_map_inspect_t routines at the moment. */
2966 	return VM_MAP_INSPECT_NULL;
2967 }
2968 
2969 /*
2970  *	Routine:	thread_port_kotype_valid_for_flavor
2971  *	Purpose:
2972  *		Check whether the kobject type of a mach port
2973  *      is valid for conversion to a thread of given flavor.
2974  */
2975 static boolean_t
thread_port_kotype_valid_for_flavor(natural_t kotype,mach_thread_flavor_t flavor)2976 thread_port_kotype_valid_for_flavor(
2977 	natural_t            kotype,
2978 	mach_thread_flavor_t flavor)
2979 {
2980 	switch (flavor) {
2981 	/* Ascending capability */
2982 	case THREAD_FLAVOR_INSPECT:
2983 		if (kotype == IKOT_THREAD_INSPECT) {
2984 			return TRUE;
2985 		}
2986 		OS_FALLTHROUGH;
2987 	case THREAD_FLAVOR_READ:
2988 		if (kotype == IKOT_THREAD_READ) {
2989 			return TRUE;
2990 		}
2991 		OS_FALLTHROUGH;
2992 	case THREAD_FLAVOR_CONTROL:
2993 		if (kotype == IKOT_THREAD_CONTROL) {
2994 			return TRUE;
2995 		}
2996 		break;
2997 	default:
2998 		panic("strange thread flavor");
2999 	}
3000 
3001 	return FALSE;
3002 }
3003 
3004 /*
3005  *	Routine: convert_port_to_thread_with_flavor_locked
3006  *	Purpose:
3007  *		Internal helper routine to convert from a locked port to a thread.
3008  *	Args:
3009  *		port   - target port
3010  *		flavor - requested thread port flavor
3011  *		options - port translation options
3012  *	Conditions:
3013  *		Port is locked and active.
3014  *		Produces a thread ref or THREAD_NULL.
3015  */
3016 static thread_t
convert_port_to_thread_with_flavor_locked(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3017 convert_port_to_thread_with_flavor_locked(
3018 	ipc_port_t               port,
3019 	mach_thread_flavor_t     flavor,
3020 	port_intrans_options_t   options)
3021 {
3022 	thread_t thread = THREAD_NULL;
3023 	task_t task;
3024 	ipc_kobject_type_t type = ip_kotype(port);
3025 
3026 	ip_mq_lock_held(port);
3027 	require_ip_active(port);
3028 
3029 	if (!thread_port_kotype_valid_for_flavor(type, flavor)) {
3030 		return THREAD_NULL;
3031 	}
3032 
3033 	thread = ipc_kobject_get_locked(port, type);
3034 
3035 	if (thread == THREAD_NULL) {
3036 		return THREAD_NULL;
3037 	}
3038 
3039 	if (options & PORT_INTRANS_THREAD_NOT_CURRENT_THREAD) {
3040 		if (thread == current_thread()) {
3041 			return THREAD_NULL;
3042 		}
3043 	}
3044 
3045 	task = get_threadtask(thread);
3046 
3047 	if (options & PORT_INTRANS_THREAD_IN_CURRENT_TASK) {
3048 		if (task != current_task()) {
3049 			return THREAD_NULL;
3050 		}
3051 	} else {
3052 		if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
3053 			assert(flavor == THREAD_FLAVOR_CONTROL);
3054 			return THREAD_NULL;
3055 		}
3056 		/* TODO: rdar://42389187 */
3057 		if (flavor == THREAD_FLAVOR_INSPECT) {
3058 			assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
3059 		}
3060 
3061 		if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
3062 		    task_conversion_eval(current_task(), task) != KERN_SUCCESS) {
3063 			return THREAD_NULL;
3064 		}
3065 	}
3066 
3067 	thread_reference(thread);
3068 	return thread;
3069 }
3070 
3071 /*
3072  *	Routine:	convert_port_to_thread_with_flavor
3073  *	Purpose:
3074  *		Internal helper for converting from a port to a thread.
3075  *		Doesn't consume the port ref; produces a thread ref,
3076  *		which may be null.
3077  *	Args:
3078  *		port   - target port
3079  *		flavor - requested thread port flavor
3080  *		options - port translation options
3081  *	Conditions:
3082  *		Nothing locked.
3083  */
3084 static thread_t
convert_port_to_thread_with_flavor(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3085 convert_port_to_thread_with_flavor(
3086 	ipc_port_t           port,
3087 	mach_thread_flavor_t flavor,
3088 	port_intrans_options_t options)
3089 {
3090 	thread_t thread = THREAD_NULL;
3091 
3092 	if (IP_VALID(port)) {
3093 		ip_mq_lock(port);
3094 		if (ip_active(port)) {
3095 			thread = convert_port_to_thread_with_flavor_locked(port,
3096 			    flavor, options);
3097 		}
3098 		ip_mq_unlock(port);
3099 	}
3100 
3101 	return thread;
3102 }
3103 
3104 thread_t
convert_port_to_thread(ipc_port_t port)3105 convert_port_to_thread(
3106 	ipc_port_t              port)
3107 {
3108 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_CONTROL,
3109 	           PORT_INTRANS_OPTIONS_NONE);
3110 }
3111 
3112 thread_read_t
convert_port_to_thread_read(ipc_port_t port)3113 convert_port_to_thread_read(
3114 	ipc_port_t              port)
3115 {
3116 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3117 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
3118 }
3119 
3120 static thread_read_t
convert_port_to_thread_read_no_eval(ipc_port_t port)3121 convert_port_to_thread_read_no_eval(
3122 	ipc_port_t              port)
3123 {
3124 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3125 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3126 }
3127 
3128 thread_inspect_t
convert_port_to_thread_inspect(ipc_port_t port)3129 convert_port_to_thread_inspect(
3130 	ipc_port_t              port)
3131 {
3132 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3133 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3134 }
3135 
3136 static thread_inspect_t
convert_port_to_thread_inspect_no_eval(ipc_port_t port)3137 convert_port_to_thread_inspect_no_eval(
3138 	ipc_port_t              port)
3139 {
3140 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3141 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3142 }
3143 
3144 /*
3145  *	Routine:	convert_thread_to_port_with_flavor
3146  *	Purpose:
3147  *		Convert from a thread to a port of given flavor.
3148  *		Consumes a thread ref; produces a naked send right
3149  *		which may be invalid.
3150  *	Conditions:
3151  *		Nothing locked.
3152  */
3153 static ipc_port_t
convert_thread_to_port_with_flavor(thread_t thread,thread_ro_t tro,mach_thread_flavor_t flavor)3154 convert_thread_to_port_with_flavor(
3155 	thread_t              thread,
3156 	thread_ro_t           tro,
3157 	mach_thread_flavor_t  flavor)
3158 {
3159 	ipc_port_t port = IP_NULL;
3160 
3161 	thread_mtx_lock(thread);
3162 
3163 	/*
3164 	 * out-trans of weaker flavors are still permitted, but in-trans
3165 	 * is separately enforced.
3166 	 */
3167 	if (flavor == THREAD_FLAVOR_CONTROL &&
3168 	    task_conversion_eval_out_trans(current_task(), tro->tro_task)) {
3169 		/* denied by security policy, make the port appear dead */
3170 		port = IP_DEAD;
3171 		goto exit;
3172 	}
3173 
3174 	if (!thread->ipc_active) {
3175 		goto exit;
3176 	}
3177 
3178 	port = tro->tro_ports[flavor];
3179 	if (flavor == THREAD_FLAVOR_CONTROL) {
3180 		port = ipc_port_make_send(port);
3181 	} else if (IP_VALID(port)) {
3182 		(void)ipc_kobject_make_send_nsrequest(port);
3183 	} else {
3184 		ipc_kobject_type_t kotype = (flavor == THREAD_FLAVOR_READ) ? IKOT_THREAD_READ : IKOT_THREAD_INSPECT;
3185 
3186 		/*
3187 		 * Claim a send right on the thread read/inspect port, and request a no-senders
3188 		 * notification on that port (if none outstanding). A thread reference is not
3189 		 * donated here even though the ports are created lazily because it doesn't own the
3190 		 * kobject that it points to. Threads manage their lifetime explicitly and
3191 		 * have to synchronize with each other, between the task/thread terminating and the
3192 		 * send-once notification firing, and this is done under the thread mutex
3193 		 * rather than with atomics.
3194 		 */
3195 		port = ipc_kobject_alloc_port(thread, kotype,
3196 		    IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST |
3197 		    IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3198 		zalloc_ro_update_field(ZONE_ID_THREAD_RO,
3199 		    tro, tro_ports[flavor], &port);
3200 	}
3201 
3202 exit:
3203 	thread_mtx_unlock(thread);
3204 	thread_deallocate(thread);
3205 	return port;
3206 }
3207 
3208 ipc_port_t
convert_thread_to_port(thread_t thread)3209 convert_thread_to_port(
3210 	thread_t                thread)
3211 {
3212 	thread_ro_t tro = get_thread_ro(thread);
3213 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_CONTROL);
3214 }
3215 
3216 ipc_port_t
convert_thread_read_to_port(thread_read_t thread)3217 convert_thread_read_to_port(thread_read_t thread)
3218 {
3219 	thread_ro_t tro = get_thread_ro(thread);
3220 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_READ);
3221 }
3222 
3223 ipc_port_t
convert_thread_inspect_to_port(thread_inspect_t thread)3224 convert_thread_inspect_to_port(thread_inspect_t thread)
3225 {
3226 	thread_ro_t tro = get_thread_ro(thread);
3227 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_INSPECT);
3228 }
3229 
3230 
3231 /*
3232  *	Routine:	port_name_to_thread
3233  *	Purpose:
3234  *		Convert from a port name to a thread reference
3235  *		A name of MACH_PORT_NULL is valid for the null thread.
3236  *	Conditions:
3237  *		Nothing locked.
3238  */
3239 thread_t
port_name_to_thread(mach_port_name_t name,port_intrans_options_t options)3240 port_name_to_thread(
3241 	mach_port_name_t         name,
3242 	port_intrans_options_t options)
3243 {
3244 	thread_t        thread = THREAD_NULL;
3245 	ipc_port_t      kport;
3246 	kern_return_t kr;
3247 
3248 	if (MACH_PORT_VALID(name)) {
3249 		kr = ipc_port_translate_send(current_space(), name, &kport);
3250 		if (kr == KERN_SUCCESS) {
3251 			/* port is locked and active */
3252 			assert(!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) &&
3253 			    !(options & PORT_INTRANS_SKIP_TASK_EVAL));
3254 			thread = convert_port_to_thread_with_flavor_locked(kport,
3255 			    THREAD_FLAVOR_CONTROL, options);
3256 			ip_mq_unlock(kport);
3257 		}
3258 	}
3259 
3260 	return thread;
3261 }
3262 
3263 /*
3264  *	Routine:	port_name_is_pinned_itk_self
3265  *	Purpose:
3266  *		Returns whether this port name is for the pinned
3267  *		mach_task_self (if it exists).
3268  *
3269  *		task_self_trap() when the task port is pinned,
3270  *		will memorize the name the port has in the space
3271  *		in ip_receiver_name, which we can use to fast-track
3272  *		this answer without taking any lock.
3273  *
3274  *		ipc_task_disable() will set `ip_receiver_name` back to
3275  *		MACH_PORT_SPECIAL_DEFAULT.
3276  *
3277  *	Conditions:
3278  *		self must be current_task()
3279  *		Nothing locked.
3280  */
3281 static bool
port_name_is_pinned_itk_self(task_t self,mach_port_name_t name)3282 port_name_is_pinned_itk_self(
3283 	task_t             self,
3284 	mach_port_name_t   name)
3285 {
3286 	ipc_port_t kport = self->itk_self;
3287 	return MACH_PORT_VALID(name) && name != MACH_PORT_SPECIAL_DEFAULT &&
3288 	       kport->ip_pinned && ip_get_receiver_name(kport) == name;
3289 }
3290 
3291 /*
3292  *	Routine:	port_name_to_current_task*_noref
3293  *	Purpose:
3294  *		Convert from a port name to current_task()
3295  *		A name of MACH_PORT_NULL is valid for the null task.
3296  *
3297  *		If current_task() is in the process of being terminated,
3298  *		this might return a non NULL task even when port_name_to_task()
3299  *		would.
3300  *
3301  *		However, this is an acceptable race that can't be controlled by
3302  *		userspace, and that downstream code using the returned task
3303  *		has to handle anyway.
3304  *
3305  *		ipc_space_disable() does try to narrow this race,
3306  *		by causing port_name_is_pinned_itk_self() to fail.
3307  *
3308  *	Returns:
3309  *		current_task() if the port name was for current_task()
3310  *		at the appropriate flavor.
3311  *
3312  *		TASK_NULL otherwise.
3313  *
3314  *	Conditions:
3315  *		Nothing locked.
3316  */
3317 static task_t
port_name_to_current_task_internal_noref(mach_port_name_t name,mach_task_flavor_t flavor)3318 port_name_to_current_task_internal_noref(
3319 	mach_port_name_t   name,
3320 	mach_task_flavor_t flavor)
3321 {
3322 	ipc_port_t kport;
3323 	kern_return_t kr;
3324 	task_t task = TASK_NULL;
3325 	task_t self = current_task();
3326 
3327 	if (port_name_is_pinned_itk_self(self, name)) {
3328 		return self;
3329 	}
3330 
3331 	if (MACH_PORT_VALID(name)) {
3332 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3333 		if (kr == KERN_SUCCESS) {
3334 			ipc_kobject_type_t type = ip_kotype(kport);
3335 			if (task_port_kotype_valid_for_flavor(type, flavor)) {
3336 				task = ipc_kobject_get_locked(kport, type);
3337 			}
3338 			ip_mq_unlock(kport);
3339 			if (task != self) {
3340 				task = TASK_NULL;
3341 			}
3342 		}
3343 	}
3344 
3345 	return task;
3346 }
3347 
3348 task_t
port_name_to_current_task_noref(mach_port_name_t name)3349 port_name_to_current_task_noref(
3350 	mach_port_name_t name)
3351 {
3352 	return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_CONTROL);
3353 }
3354 
3355 task_read_t
port_name_to_current_task_read_noref(mach_port_name_t name)3356 port_name_to_current_task_read_noref(
3357 	mach_port_name_t name)
3358 {
3359 	return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_READ);
3360 }
3361 
3362 /*
3363  *	Routine:	port_name_to_task
3364  *	Purpose:
3365  *		Convert from a port name to a task reference
3366  *		A name of MACH_PORT_NULL is valid for the null task.
3367  *	Conditions:
3368  *		Nothing locked.
3369  */
3370 static task_t
port_name_to_task_grp(mach_port_name_t name,task_grp_t grp)3371 port_name_to_task_grp(
3372 	mach_port_name_t name,
3373 	task_grp_t       grp)
3374 {
3375 	ipc_port_t kport;
3376 	kern_return_t kr;
3377 	task_t task = TASK_NULL;
3378 	task_t self = current_task();
3379 
3380 	if (port_name_is_pinned_itk_self(self, name)) {
3381 		task_reference_grp(self, grp);
3382 		return self;
3383 	}
3384 
3385 	if (MACH_PORT_VALID(name)) {
3386 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3387 		if (kr == KERN_SUCCESS) {
3388 			/* port is locked and active */
3389 			task = convert_port_to_task_with_flavor_locked(kport,
3390 			    TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE, grp);
3391 			ip_mq_unlock(kport);
3392 		}
3393 	}
3394 	return task;
3395 }
3396 
3397 task_t
port_name_to_task_external(mach_port_name_t name)3398 port_name_to_task_external(
3399 	mach_port_name_t name)
3400 {
3401 	return port_name_to_task_grp(name, TASK_GRP_EXTERNAL);
3402 }
3403 
3404 task_t
port_name_to_task_kernel(mach_port_name_t name)3405 port_name_to_task_kernel(
3406 	mach_port_name_t name)
3407 {
3408 	return port_name_to_task_grp(name, TASK_GRP_KERNEL);
3409 }
3410 
3411 /*
3412  *	Routine:	port_name_to_task_read
3413  *	Purpose:
3414  *		Convert from a port name to a task reference
3415  *		A name of MACH_PORT_NULL is valid for the null task.
3416  *	Conditions:
3417  *		Nothing locked.
3418  */
3419 task_read_t
port_name_to_task_read(mach_port_name_t name)3420 port_name_to_task_read(
3421 	mach_port_name_t name)
3422 {
3423 	ipc_port_t kport;
3424 	kern_return_t kr;
3425 	task_read_t tr = TASK_READ_NULL;
3426 	task_t self = current_task();
3427 
3428 	if (port_name_is_pinned_itk_self(self, name)) {
3429 		task_reference_grp(self, TASK_GRP_KERNEL);
3430 		return self;
3431 	}
3432 
3433 	if (MACH_PORT_VALID(name)) {
3434 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3435 		if (kr == KERN_SUCCESS) {
3436 			/* port is locked and active */
3437 			tr = convert_port_to_task_with_flavor_locked(kport,
3438 			    TASK_FLAVOR_READ, PORT_INTRANS_ALLOW_CORPSE_TASK,
3439 			    TASK_GRP_KERNEL);
3440 			ip_mq_unlock(kport);
3441 		}
3442 	}
3443 	return tr;
3444 }
3445 
3446 /*
3447  *	Routine:	port_name_to_task_read_no_eval
3448  *	Purpose:
3449  *		Convert from a port name to a task reference
3450  *		A name of MACH_PORT_NULL is valid for the null task.
3451  *		Skips task_conversion_eval() during conversion.
3452  *	Conditions:
3453  *		Nothing locked.
3454  */
3455 task_read_t
port_name_to_task_read_no_eval(mach_port_name_t name)3456 port_name_to_task_read_no_eval(
3457 	mach_port_name_t name)
3458 {
3459 	ipc_port_t kport;
3460 	kern_return_t kr;
3461 	task_read_t tr = TASK_READ_NULL;
3462 	task_t self = current_task();
3463 
3464 	if (port_name_is_pinned_itk_self(self, name)) {
3465 		task_reference_grp(self, TASK_GRP_KERNEL);
3466 		return self;
3467 	}
3468 
3469 	if (MACH_PORT_VALID(name)) {
3470 		port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3471 		    PORT_INTRANS_ALLOW_CORPSE_TASK;
3472 
3473 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3474 		if (kr == KERN_SUCCESS) {
3475 			/* port is locked and active */
3476 			tr = convert_port_to_task_with_flavor_locked(kport,
3477 			    TASK_FLAVOR_READ, options, TASK_GRP_KERNEL);
3478 			ip_mq_unlock(kport);
3479 		}
3480 	}
3481 	return tr;
3482 }
3483 
3484 /*
3485  *	Routine:	port_name_to_task_name
3486  *	Purpose:
3487  *		Convert from a port name to a task reference
3488  *		A name of MACH_PORT_NULL is valid for the null task.
3489  *	Conditions:
3490  *		Nothing locked.
3491  */
3492 task_name_t
port_name_to_task_name(mach_port_name_t name)3493 port_name_to_task_name(
3494 	mach_port_name_t name)
3495 {
3496 	ipc_port_t kport;
3497 	kern_return_t kr;
3498 	task_name_t tn = TASK_NAME_NULL;
3499 	task_t self = current_task();
3500 
3501 	if (port_name_is_pinned_itk_self(self, name)) {
3502 		task_reference_grp(self, TASK_GRP_KERNEL);
3503 		return self;
3504 	}
3505 
3506 	if (MACH_PORT_VALID(name)) {
3507 		port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3508 		    PORT_INTRANS_ALLOW_CORPSE_TASK;
3509 
3510 		kr = ipc_port_translate_send(current_space(), name, &kport);
3511 		if (kr == KERN_SUCCESS) {
3512 			/* port is locked and active */
3513 			tn = convert_port_to_task_with_flavor_locked(kport,
3514 			    TASK_FLAVOR_NAME, options, TASK_GRP_KERNEL);
3515 			ip_mq_unlock(kport);
3516 		}
3517 	}
3518 	return tn;
3519 }
3520 
3521 /*
3522  *	Routine:	port_name_to_task_id_token
3523  *	Purpose:
3524  *		Convert from a port name to a task identity token reference
3525  *	Conditions:
3526  *		Nothing locked.
3527  */
3528 task_id_token_t
port_name_to_task_id_token(mach_port_name_t name)3529 port_name_to_task_id_token(
3530 	mach_port_name_t name)
3531 {
3532 	ipc_port_t port;
3533 	kern_return_t kr;
3534 	task_id_token_t token = TASK_ID_TOKEN_NULL;
3535 
3536 	if (MACH_PORT_VALID(name)) {
3537 		kr = ipc_port_translate_send(current_space(), name, &port);
3538 		if (kr == KERN_SUCCESS) {
3539 			token = convert_port_to_task_id_token(port);
3540 			ip_mq_unlock(port);
3541 		}
3542 	}
3543 	return token;
3544 }
3545 
3546 /*
3547  *	Routine:	port_name_to_host
3548  *	Purpose:
3549  *		Convert from a port name to a host pointer.
3550  *		NOTE: This does _not_ return a +1 reference to the host_t
3551  *	Conditions:
3552  *		Nothing locked.
3553  */
3554 host_t
port_name_to_host(mach_port_name_t name)3555 port_name_to_host(
3556 	mach_port_name_t name)
3557 {
3558 	host_t host = HOST_NULL;
3559 	kern_return_t kr;
3560 	ipc_port_t port;
3561 
3562 	if (MACH_PORT_VALID(name)) {
3563 		kr = ipc_port_translate_send(current_space(), name, &port);
3564 		if (kr == KERN_SUCCESS) {
3565 			host = convert_port_to_host(port);
3566 			ip_mq_unlock(port);
3567 		}
3568 	}
3569 	return host;
3570 }
3571 
3572 /*
3573  *	Routine:	convert_task_to_port_with_flavor
3574  *	Purpose:
3575  *		Convert from a task to a port of given flavor.
3576  *		Consumes a task ref; produces a naked send right
3577  *		which may be invalid.
3578  *	Conditions:
3579  *		Nothing locked.
3580  */
3581 ipc_port_t
convert_task_to_port_with_flavor(task_t task,mach_task_flavor_t flavor,task_grp_t grp)3582 convert_task_to_port_with_flavor(
3583 	task_t              task,
3584 	mach_task_flavor_t  flavor,
3585 	task_grp_t          grp)
3586 {
3587 	ipc_port_t port = IP_NULL;
3588 	ipc_kobject_type_t kotype = IKOT_NONE;
3589 
3590 	itk_lock(task);
3591 
3592 	if (!task->ipc_active) {
3593 		goto exit;
3594 	}
3595 
3596 	/*
3597 	 * out-trans of weaker flavors are still permitted, but in-trans
3598 	 * is separately enforced.
3599 	 */
3600 	if (flavor == TASK_FLAVOR_CONTROL &&
3601 	    task_conversion_eval_out_trans(current_task(), task)) {
3602 		/* denied by security policy, make the port appear dead */
3603 		port = IP_DEAD;
3604 		goto exit;
3605 	}
3606 
3607 	switch (flavor) {
3608 	case TASK_FLAVOR_CONTROL:
3609 	case TASK_FLAVOR_NAME:
3610 		port = ipc_port_make_send(task->itk_task_ports[flavor]);
3611 		break;
3612 	/*
3613 	 * Claim a send right on the task read/inspect port, and request a no-senders
3614 	 * notification on that port (if none outstanding). A task reference is
3615 	 * deliberately not donated here because ipc_kobject_make_send_lazy_alloc_port
3616 	 * is used only for convenience and these ports don't control the lifecycle of
3617 	 * the task kobject. Instead, the task's itk_lock is used to synchronize the
3618 	 * handling of the no-senders notification with the task termination.
3619 	 */
3620 	case TASK_FLAVOR_READ:
3621 	case TASK_FLAVOR_INSPECT:
3622 		kotype = (flavor == TASK_FLAVOR_READ) ? IKOT_TASK_READ : IKOT_TASK_INSPECT;
3623 		(void)ipc_kobject_make_send_lazy_alloc_port((ipc_port_t *)&task->itk_task_ports[flavor],
3624 		    (ipc_kobject_t)task, kotype,
3625 		    IPC_KOBJECT_ALLOC_IMMOVABLE_SEND | IPC_KOBJECT_PTRAUTH_STORE,
3626 		    OS_PTRAUTH_DISCRIMINATOR("task.itk_task_ports"));
3627 		port = task->itk_task_ports[flavor];
3628 
3629 		break;
3630 	}
3631 
3632 exit:
3633 	itk_unlock(task);
3634 	task_deallocate_grp(task, grp);
3635 	return port;
3636 }
3637 
3638 ipc_port_t
convert_corpse_to_port_and_nsrequest(task_t corpse)3639 convert_corpse_to_port_and_nsrequest(
3640 	task_t          corpse)
3641 {
3642 	ipc_port_t port = IP_NULL;
3643 	__assert_only kern_return_t kr;
3644 
3645 	assert(task_is_a_corpse(corpse));
3646 	itk_lock(corpse);
3647 	port = corpse->itk_task_ports[TASK_FLAVOR_CONTROL];
3648 	assert(port->ip_srights == 0);
3649 	kr = ipc_kobject_make_send_nsrequest(port);
3650 	assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
3651 	itk_unlock(corpse);
3652 
3653 	task_deallocate(corpse);
3654 	return port;
3655 }
3656 
3657 ipc_port_t
convert_task_to_port(task_t task)3658 convert_task_to_port(
3659 	task_t          task)
3660 {
3661 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_KERNEL);
3662 }
3663 
3664 ipc_port_t
convert_task_read_to_port(task_read_t task)3665 convert_task_read_to_port(
3666 	task_read_t          task)
3667 {
3668 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_KERNEL);
3669 }
3670 
3671 ipc_port_t
convert_task_inspect_to_port(task_inspect_t task)3672 convert_task_inspect_to_port(
3673 	task_inspect_t          task)
3674 {
3675 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT, TASK_GRP_KERNEL);
3676 }
3677 
3678 ipc_port_t
convert_task_name_to_port(task_name_t task)3679 convert_task_name_to_port(
3680 	task_name_t             task)
3681 {
3682 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME, TASK_GRP_KERNEL);
3683 }
3684 
3685 extern ipc_port_t convert_task_to_port_external(task_t task);
3686 ipc_port_t
convert_task_to_port_external(task_t task)3687 convert_task_to_port_external(task_t task)
3688 {
3689 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_EXTERNAL);
3690 }
3691 
3692 ipc_port_t
convert_task_to_port_pinned(task_t task)3693 convert_task_to_port_pinned(
3694 	task_t          task)
3695 {
3696 	ipc_port_t port = IP_NULL;
3697 
3698 	assert(task == current_task());
3699 
3700 	itk_lock(task);
3701 
3702 	if (task->ipc_active && task->itk_self != IP_NULL) {
3703 		port = ipc_port_make_send(task->itk_self);
3704 	}
3705 
3706 	if (port && task_is_immovable(task)) {
3707 		assert(ip_is_pinned(port));
3708 		assert(ip_is_immovable_send(port));
3709 	}
3710 
3711 	itk_unlock(task);
3712 	task_deallocate(task);
3713 	return port;
3714 }
3715 /*
3716  *	Routine:	convert_task_suspend_token_to_port
3717  *	Purpose:
3718  *		Convert from a task suspension token to a port.
3719  *		Consumes a task suspension token ref; produces a naked send-once right
3720  *		which may be invalid.
3721  *	Conditions:
3722  *		Nothing locked.
3723  */
3724 static ipc_port_t
convert_task_suspension_token_to_port_grp(task_suspension_token_t task,task_grp_t grp)3725 convert_task_suspension_token_to_port_grp(
3726 	task_suspension_token_t         task,
3727 	task_grp_t                      grp)
3728 {
3729 	ipc_port_t port;
3730 
3731 	task_lock(task);
3732 	if (task->active) {
3733 		itk_lock(task);
3734 		if (task->itk_resume == IP_NULL) {
3735 			task->itk_resume = ipc_kobject_alloc_port((ipc_kobject_t) task,
3736 			    IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE);
3737 		}
3738 
3739 		/*
3740 		 * Create a send-once right for each instance of a direct user-called
3741 		 * task_suspend2 call. Each time one of these send-once rights is abandoned,
3742 		 * the notification handler will resume the target task.
3743 		 */
3744 		port = ipc_port_make_sonce(task->itk_resume);
3745 		itk_unlock(task);
3746 		assert(IP_VALID(port));
3747 	} else {
3748 		port = IP_NULL;
3749 	}
3750 
3751 	task_unlock(task);
3752 	task_suspension_token_deallocate_grp(task, grp);
3753 
3754 	return port;
3755 }
3756 
3757 ipc_port_t
convert_task_suspension_token_to_port_external(task_suspension_token_t task)3758 convert_task_suspension_token_to_port_external(
3759 	task_suspension_token_t         task)
3760 {
3761 	return convert_task_suspension_token_to_port_grp(task, TASK_GRP_EXTERNAL);
3762 }
3763 
3764 ipc_port_t
convert_task_suspension_token_to_port_mig(task_suspension_token_t task)3765 convert_task_suspension_token_to_port_mig(
3766 	task_suspension_token_t         task)
3767 {
3768 	return convert_task_suspension_token_to_port_grp(task, TASK_GRP_MIG);
3769 }
3770 
3771 ipc_port_t
convert_thread_to_port_pinned(thread_t thread)3772 convert_thread_to_port_pinned(
3773 	thread_t                thread)
3774 {
3775 	thread_ro_t tro = get_thread_ro(thread);
3776 	ipc_port_t  port = IP_NULL;
3777 
3778 	thread_mtx_lock(thread);
3779 
3780 	if (thread->ipc_active && tro->tro_self_port != IP_NULL) {
3781 		port = ipc_port_make_send(tro->tro_self_port);
3782 	}
3783 
3784 	if (port && task_is_immovable(tro->tro_task)) {
3785 		assert(ip_is_immovable_send(port));
3786 	}
3787 
3788 	thread_mtx_unlock(thread);
3789 	thread_deallocate(thread);
3790 	return port;
3791 }
3792 /*
3793  *	Routine:	space_deallocate
3794  *	Purpose:
3795  *		Deallocate a space ref produced by convert_port_to_space.
3796  *	Conditions:
3797  *		Nothing locked.
3798  */
3799 
3800 void
space_deallocate(ipc_space_t space)3801 space_deallocate(
3802 	ipc_space_t     space)
3803 {
3804 	if (space != IS_NULL) {
3805 		is_release(space);
3806 	}
3807 }
3808 
3809 /*
3810  *	Routine:	space_read_deallocate
3811  *	Purpose:
3812  *		Deallocate a space read ref produced by convert_port_to_space_read.
3813  *	Conditions:
3814  *		Nothing locked.
3815  */
3816 
3817 void
space_read_deallocate(ipc_space_read_t space)3818 space_read_deallocate(
3819 	ipc_space_read_t     space)
3820 {
3821 	if (space != IS_INSPECT_NULL) {
3822 		is_release((ipc_space_t)space);
3823 	}
3824 }
3825 
3826 /*
3827  *	Routine:	space_inspect_deallocate
3828  *	Purpose:
3829  *		Deallocate a space inspect ref produced by convert_port_to_space_inspect.
3830  *	Conditions:
3831  *		Nothing locked.
3832  */
3833 
3834 void
space_inspect_deallocate(ipc_space_inspect_t space)3835 space_inspect_deallocate(
3836 	ipc_space_inspect_t     space)
3837 {
3838 	if (space != IS_INSPECT_NULL) {
3839 		is_release((ipc_space_t)space);
3840 	}
3841 }
3842 
3843 
3844 /*
3845  *	Routine:	thread/task_set_exception_ports [kernel call]
3846  *	Purpose:
3847  *			Sets the thread/task exception port, flavor and
3848  *			behavior for the exception types specified by the mask.
3849  *			There will be one send right per exception per valid
3850  *			port.
3851  *	Conditions:
3852  *		Nothing locked.  If successful, consumes
3853  *		the supplied send right.
3854  *	Returns:
3855  *		KERN_SUCCESS		Changed the special port.
3856  *		KERN_INVALID_ARGUMENT	The thread is null,
3857  *					Illegal mask bit set.
3858  *					Illegal exception behavior
3859  *		KERN_FAILURE		The thread is dead.
3860  */
3861 
3862 kern_return_t
thread_set_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)3863 thread_set_exception_ports(
3864 	thread_t                thread,
3865 	exception_mask_t        exception_mask,
3866 	ipc_port_t              new_port,
3867 	exception_behavior_t    new_behavior,
3868 	thread_state_flavor_t   new_flavor)
3869 {
3870 	ipc_port_t  old_port[EXC_TYPES_COUNT];
3871 	thread_ro_t tro;
3872 	boolean_t   privileged = task_is_privileged(current_task());
3873 
3874 #if CONFIG_MACF
3875 	struct label *new_label;
3876 #endif
3877 
3878 	if (thread == THREAD_NULL) {
3879 		return KERN_INVALID_ARGUMENT;
3880 	}
3881 
3882 	if (exception_mask & ~EXC_MASK_VALID) {
3883 		return KERN_INVALID_ARGUMENT;
3884 	}
3885 
3886 	if (IP_VALID(new_port)) {
3887 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
3888 		case EXCEPTION_DEFAULT:
3889 		case EXCEPTION_STATE:
3890 		case EXCEPTION_STATE_IDENTITY:
3891 		case EXCEPTION_IDENTITY_PROTECTED:
3892 			break;
3893 
3894 		default:
3895 			return KERN_INVALID_ARGUMENT;
3896 		}
3897 	}
3898 
3899 	if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
3900 		return KERN_INVALID_RIGHT;
3901 	}
3902 
3903 
3904 	/*
3905 	 * Check the validity of the thread_state_flavor by calling the
3906 	 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
3907 	 * osfmk/mach/ARCHITECTURE/thread_status.h
3908 	 */
3909 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
3910 		return KERN_INVALID_ARGUMENT;
3911 	}
3912 
3913 	if ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED &&
3914 	    !(new_behavior & MACH_EXCEPTION_CODES)) {
3915 		return KERN_INVALID_ARGUMENT;
3916 	}
3917 
3918 #if CONFIG_MACF
3919 	new_label = mac_exc_create_label_for_current_proc();
3920 #endif
3921 
3922 	tro = get_thread_ro(thread);
3923 	thread_mtx_lock(thread);
3924 
3925 	if (!thread->active) {
3926 		thread_mtx_unlock(thread);
3927 #if CONFIG_MACF
3928 		mac_exc_free_label(new_label);
3929 #endif
3930 		return KERN_FAILURE;
3931 	}
3932 
3933 	if (tro->tro_exc_actions == NULL) {
3934 		ipc_thread_init_exc_actions(tro);
3935 	}
3936 	for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
3937 		struct exception_action *action = &tro->tro_exc_actions[i];
3938 
3939 		if ((exception_mask & (1 << i))
3940 #if CONFIG_MACF
3941 		    && mac_exc_update_action_label(action, new_label) == 0
3942 #endif
3943 		    ) {
3944 			old_port[i] = action->port;
3945 			action->port = ipc_port_copy_send(new_port);
3946 			action->behavior = new_behavior;
3947 			action->flavor = new_flavor;
3948 			action->privileged = privileged;
3949 		} else {
3950 			old_port[i] = IP_NULL;
3951 		}
3952 	}
3953 
3954 	thread_mtx_unlock(thread);
3955 
3956 #if CONFIG_MACF
3957 	mac_exc_free_label(new_label);
3958 #endif
3959 
3960 	for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
3961 		if (IP_VALID(old_port[i])) {
3962 			ipc_port_release_send(old_port[i]);
3963 		}
3964 	}
3965 
3966 	if (IP_VALID(new_port)) {         /* consume send right */
3967 		ipc_port_release_send(new_port);
3968 	}
3969 
3970 	return KERN_SUCCESS;
3971 }
3972 
3973 kern_return_t
task_set_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)3974 task_set_exception_ports(
3975 	task_t                                  task,
3976 	exception_mask_t                exception_mask,
3977 	ipc_port_t                              new_port,
3978 	exception_behavior_t    new_behavior,
3979 	thread_state_flavor_t   new_flavor)
3980 {
3981 	ipc_port_t              old_port[EXC_TYPES_COUNT];
3982 	boolean_t privileged = task_is_privileged(current_task());
3983 	register int    i;
3984 
3985 #if CONFIG_MACF
3986 	struct label *new_label;
3987 #endif
3988 
3989 	if (task == TASK_NULL) {
3990 		return KERN_INVALID_ARGUMENT;
3991 	}
3992 
3993 	if (exception_mask & ~EXC_MASK_VALID) {
3994 		return KERN_INVALID_ARGUMENT;
3995 	}
3996 
3997 	if (IP_VALID(new_port)) {
3998 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
3999 		case EXCEPTION_DEFAULT:
4000 		case EXCEPTION_STATE:
4001 		case EXCEPTION_STATE_IDENTITY:
4002 		case EXCEPTION_IDENTITY_PROTECTED:
4003 			break;
4004 
4005 		default:
4006 			return KERN_INVALID_ARGUMENT;
4007 		}
4008 	}
4009 
4010 	if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4011 		return KERN_INVALID_RIGHT;
4012 	}
4013 
4014 
4015 	/*
4016 	 * Check the validity of the thread_state_flavor by calling the
4017 	 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4018 	 * osfmk/mach/ARCHITECTURE/thread_status.h
4019 	 */
4020 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4021 		return KERN_INVALID_ARGUMENT;
4022 	}
4023 
4024 	if ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED
4025 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4026 		return KERN_INVALID_ARGUMENT;
4027 	}
4028 
4029 #if CONFIG_MACF
4030 	new_label = mac_exc_create_label_for_current_proc();
4031 #endif
4032 
4033 	itk_lock(task);
4034 
4035 	if (!task->ipc_active) {
4036 		itk_unlock(task);
4037 #if CONFIG_MACF
4038 		mac_exc_free_label(new_label);
4039 #endif
4040 		return KERN_FAILURE;
4041 	}
4042 
4043 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4044 		if ((exception_mask & (1 << i))
4045 #if CONFIG_MACF
4046 		    && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4047 #endif
4048 		    ) {
4049 			old_port[i] = task->exc_actions[i].port;
4050 			task->exc_actions[i].port =
4051 			    ipc_port_copy_send(new_port);
4052 			task->exc_actions[i].behavior = new_behavior;
4053 			task->exc_actions[i].flavor = new_flavor;
4054 			task->exc_actions[i].privileged = privileged;
4055 		} else {
4056 			old_port[i] = IP_NULL;
4057 		}
4058 	}
4059 
4060 	itk_unlock(task);
4061 
4062 #if CONFIG_MACF
4063 	mac_exc_free_label(new_label);
4064 #endif
4065 
4066 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4067 		if (IP_VALID(old_port[i])) {
4068 			ipc_port_release_send(old_port[i]);
4069 		}
4070 	}
4071 
4072 	if (IP_VALID(new_port)) {         /* consume send right */
4073 		ipc_port_release_send(new_port);
4074 	}
4075 
4076 	return KERN_SUCCESS;
4077 }
4078 
4079 /*
4080  *	Routine:	thread/task_swap_exception_ports [kernel call]
4081  *	Purpose:
4082  *			Sets the thread/task exception port, flavor and
4083  *			behavior for the exception types specified by the
4084  *			mask.
4085  *
4086  *			The old ports, behavior and flavors are returned
4087  *			Count specifies the array sizes on input and
4088  *			the number of returned ports etc. on output.  The
4089  *			arrays must be large enough to hold all the returned
4090  *			data, MIG returnes an error otherwise.  The masks
4091  *			array specifies the corresponding exception type(s).
4092  *
4093  *	Conditions:
4094  *		Nothing locked.  If successful, consumes
4095  *		the supplied send right.
4096  *
4097  *		Returns upto [in} CountCnt elements.
4098  *	Returns:
4099  *		KERN_SUCCESS		Changed the special port.
4100  *		KERN_INVALID_ARGUMENT	The thread is null,
4101  *					Illegal mask bit set.
4102  *					Illegal exception behavior
4103  *		KERN_FAILURE		The thread is dead.
4104  */
4105 
4106 kern_return_t
thread_swap_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4107 thread_swap_exception_ports(
4108 	thread_t                        thread,
4109 	exception_mask_t                exception_mask,
4110 	ipc_port_t                      new_port,
4111 	exception_behavior_t            new_behavior,
4112 	thread_state_flavor_t           new_flavor,
4113 	exception_mask_array_t          masks,
4114 	mach_msg_type_number_t          *CountCnt,
4115 	exception_port_array_t          ports,
4116 	exception_behavior_array_t      behaviors,
4117 	thread_state_flavor_array_t     flavors)
4118 {
4119 	ipc_port_t  old_port[EXC_TYPES_COUNT];
4120 	thread_ro_t tro;
4121 	boolean_t   privileged = task_is_privileged(current_task());
4122 	unsigned int    i, j, count;
4123 
4124 #if CONFIG_MACF
4125 	struct label *new_label;
4126 #endif
4127 
4128 	if (thread == THREAD_NULL) {
4129 		return KERN_INVALID_ARGUMENT;
4130 	}
4131 
4132 	if (exception_mask & ~EXC_MASK_VALID) {
4133 		return KERN_INVALID_ARGUMENT;
4134 	}
4135 
4136 	if (IP_VALID(new_port)) {
4137 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4138 		case EXCEPTION_DEFAULT:
4139 		case EXCEPTION_STATE:
4140 		case EXCEPTION_STATE_IDENTITY:
4141 		case EXCEPTION_IDENTITY_PROTECTED:
4142 			break;
4143 
4144 		default:
4145 			return KERN_INVALID_ARGUMENT;
4146 		}
4147 	}
4148 
4149 	if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4150 		return KERN_INVALID_RIGHT;
4151 	}
4152 
4153 
4154 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4155 		return KERN_INVALID_ARGUMENT;
4156 	}
4157 
4158 	if ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED
4159 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4160 		return KERN_INVALID_ARGUMENT;
4161 	}
4162 
4163 #if CONFIG_MACF
4164 	new_label = mac_exc_create_label_for_current_proc();
4165 #endif
4166 
4167 	thread_mtx_lock(thread);
4168 
4169 	if (!thread->active) {
4170 		thread_mtx_unlock(thread);
4171 #if CONFIG_MACF
4172 		mac_exc_free_label(new_label);
4173 #endif
4174 		return KERN_FAILURE;
4175 	}
4176 
4177 	tro = get_thread_ro(thread);
4178 	if (tro->tro_exc_actions == NULL) {
4179 		ipc_thread_init_exc_actions(tro);
4180 	}
4181 
4182 	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4183 	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4184 		struct exception_action *action = &tro->tro_exc_actions[i];
4185 
4186 		if ((exception_mask & (1 << i))
4187 #if CONFIG_MACF
4188 		    && mac_exc_update_action_label(action, new_label) == 0
4189 #endif
4190 		    ) {
4191 			for (j = 0; j < count; ++j) {
4192 				/*
4193 				 * search for an identical entry, if found
4194 				 * set corresponding mask for this exception.
4195 				 */
4196 				if (action->port == ports[j] &&
4197 				    action->behavior == behaviors[j] &&
4198 				    action->flavor == flavors[j]) {
4199 					masks[j] |= (1 << i);
4200 					break;
4201 				}
4202 			}
4203 
4204 			if (j == count) {
4205 				masks[j] = (1 << i);
4206 				ports[j] = ipc_port_copy_send(action->port);
4207 
4208 				behaviors[j] = action->behavior;
4209 				flavors[j] = action->flavor;
4210 				++count;
4211 			}
4212 
4213 			old_port[i] = action->port;
4214 			action->port = ipc_port_copy_send(new_port);
4215 			action->behavior = new_behavior;
4216 			action->flavor = new_flavor;
4217 			action->privileged = privileged;
4218 		} else {
4219 			old_port[i] = IP_NULL;
4220 		}
4221 	}
4222 
4223 	thread_mtx_unlock(thread);
4224 
4225 #if CONFIG_MACF
4226 	mac_exc_free_label(new_label);
4227 #endif
4228 
4229 	while (--i >= FIRST_EXCEPTION) {
4230 		if (IP_VALID(old_port[i])) {
4231 			ipc_port_release_send(old_port[i]);
4232 		}
4233 	}
4234 
4235 	if (IP_VALID(new_port)) {         /* consume send right */
4236 		ipc_port_release_send(new_port);
4237 	}
4238 
4239 	*CountCnt = count;
4240 
4241 	return KERN_SUCCESS;
4242 }
4243 
4244 kern_return_t
task_swap_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4245 task_swap_exception_ports(
4246 	task_t                                          task,
4247 	exception_mask_t                        exception_mask,
4248 	ipc_port_t                                      new_port,
4249 	exception_behavior_t            new_behavior,
4250 	thread_state_flavor_t           new_flavor,
4251 	exception_mask_array_t          masks,
4252 	mach_msg_type_number_t          *CountCnt,
4253 	exception_port_array_t          ports,
4254 	exception_behavior_array_t      behaviors,
4255 	thread_state_flavor_array_t     flavors)
4256 {
4257 	ipc_port_t              old_port[EXC_TYPES_COUNT];
4258 	boolean_t privileged = task_is_privileged(current_task());
4259 	unsigned int    i, j, count;
4260 
4261 #if CONFIG_MACF
4262 	struct label *new_label;
4263 #endif
4264 
4265 	if (task == TASK_NULL) {
4266 		return KERN_INVALID_ARGUMENT;
4267 	}
4268 
4269 	if (exception_mask & ~EXC_MASK_VALID) {
4270 		return KERN_INVALID_ARGUMENT;
4271 	}
4272 
4273 	if (IP_VALID(new_port)) {
4274 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4275 		case EXCEPTION_DEFAULT:
4276 		case EXCEPTION_STATE:
4277 		case EXCEPTION_STATE_IDENTITY:
4278 		case EXCEPTION_IDENTITY_PROTECTED:
4279 			break;
4280 
4281 		default:
4282 			return KERN_INVALID_ARGUMENT;
4283 		}
4284 	}
4285 
4286 	if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4287 		return KERN_INVALID_RIGHT;
4288 	}
4289 
4290 
4291 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4292 		return KERN_INVALID_ARGUMENT;
4293 	}
4294 
4295 	if ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED
4296 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4297 		return KERN_INVALID_ARGUMENT;
4298 	}
4299 
4300 #if CONFIG_MACF
4301 	new_label = mac_exc_create_label_for_current_proc();
4302 #endif
4303 
4304 	itk_lock(task);
4305 
4306 	if (!task->ipc_active) {
4307 		itk_unlock(task);
4308 #if CONFIG_MACF
4309 		mac_exc_free_label(new_label);
4310 #endif
4311 		return KERN_FAILURE;
4312 	}
4313 
4314 	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4315 	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4316 		if ((exception_mask & (1 << i))
4317 #if CONFIG_MACF
4318 		    && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4319 #endif
4320 		    ) {
4321 			for (j = 0; j < count; j++) {
4322 				/*
4323 				 * search for an identical entry, if found
4324 				 * set corresponding mask for this exception.
4325 				 */
4326 				if (task->exc_actions[i].port == ports[j] &&
4327 				    task->exc_actions[i].behavior == behaviors[j] &&
4328 				    task->exc_actions[i].flavor == flavors[j]) {
4329 					masks[j] |= (1 << i);
4330 					break;
4331 				}
4332 			}
4333 
4334 			if (j == count) {
4335 				masks[j] = (1 << i);
4336 				ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
4337 				behaviors[j] = task->exc_actions[i].behavior;
4338 				flavors[j] = task->exc_actions[i].flavor;
4339 				++count;
4340 			}
4341 
4342 			old_port[i] = task->exc_actions[i].port;
4343 
4344 			task->exc_actions[i].port =     ipc_port_copy_send(new_port);
4345 			task->exc_actions[i].behavior = new_behavior;
4346 			task->exc_actions[i].flavor = new_flavor;
4347 			task->exc_actions[i].privileged = privileged;
4348 		} else {
4349 			old_port[i] = IP_NULL;
4350 		}
4351 	}
4352 
4353 	itk_unlock(task);
4354 
4355 #if CONFIG_MACF
4356 	mac_exc_free_label(new_label);
4357 #endif
4358 
4359 	while (--i >= FIRST_EXCEPTION) {
4360 		if (IP_VALID(old_port[i])) {
4361 			ipc_port_release_send(old_port[i]);
4362 		}
4363 	}
4364 
4365 	if (IP_VALID(new_port)) {         /* consume send right */
4366 		ipc_port_release_send(new_port);
4367 	}
4368 
4369 	*CountCnt = count;
4370 
4371 	return KERN_SUCCESS;
4372 }
4373 
4374 /*
4375  *	Routine:	thread/task_get_exception_ports [kernel call]
4376  *	Purpose:
4377  *		Clones a send right for each of the thread/task's exception
4378  *		ports specified in the mask and returns the behaviour
4379  *		and flavor of said port.
4380  *
4381  *		Returns upto [in} CountCnt elements.
4382  *
4383  *	Conditions:
4384  *		Nothing locked.
4385  *	Returns:
4386  *		KERN_SUCCESS		Extracted a send right.
4387  *		KERN_INVALID_ARGUMENT	The thread is null,
4388  *					Invalid special port,
4389  *					Illegal mask bit set.
4390  *		KERN_FAILURE		The thread is dead.
4391  */
4392 static kern_return_t
thread_get_exception_ports_internal(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4393 thread_get_exception_ports_internal(
4394 	thread_t                        thread,
4395 	exception_mask_t                exception_mask,
4396 	exception_mask_array_t          masks,
4397 	mach_msg_type_number_t          *CountCnt,
4398 	exception_port_info_array_t     ports_info,
4399 	exception_port_array_t          ports,
4400 	exception_behavior_array_t      behaviors,
4401 	thread_state_flavor_array_t     flavors)
4402 {
4403 	unsigned int count;
4404 	boolean_t info_only = (ports_info != NULL);
4405 	boolean_t dbg_ok = TRUE;
4406 	thread_ro_t tro;
4407 	ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4408 
4409 	if (thread == THREAD_NULL) {
4410 		return KERN_INVALID_ARGUMENT;
4411 	}
4412 
4413 	if (exception_mask & ~EXC_MASK_VALID) {
4414 		return KERN_INVALID_ARGUMENT;
4415 	}
4416 
4417 	if (!info_only && !ports) {
4418 		return KERN_INVALID_ARGUMENT;
4419 	}
4420 
4421 #if !(DEVELOPMENT || DEBUG) && CONFIG_MACF
4422 	if (info_only && mac_task_check_expose_task(kernel_task, TASK_FLAVOR_CONTROL) == 0) {
4423 		dbg_ok = TRUE;
4424 	} else {
4425 		dbg_ok = FALSE;
4426 	}
4427 #endif
4428 
4429 	tro = get_thread_ro(thread);
4430 	thread_mtx_lock(thread);
4431 
4432 	if (!thread->active) {
4433 		thread_mtx_unlock(thread);
4434 
4435 		return KERN_FAILURE;
4436 	}
4437 
4438 	count = 0;
4439 
4440 	if (tro->tro_exc_actions == NULL) {
4441 		goto done;
4442 	}
4443 
4444 	for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4445 		if (exception_mask & (1 << i)) {
4446 			ipc_port_t exc_port = tro->tro_exc_actions[i].port;
4447 			exception_behavior_t exc_behavior = tro->tro_exc_actions[i].behavior;
4448 			thread_state_flavor_t exc_flavor = tro->tro_exc_actions[i].flavor;
4449 
4450 			for (j = 0; j < count; ++j) {
4451 				/*
4452 				 * search for an identical entry, if found
4453 				 * set corresponding mask for this exception.
4454 				 */
4455 				if (exc_port == port_ptrs[j] &&
4456 				    exc_behavior == behaviors[j] &&
4457 				    exc_flavor == flavors[j]) {
4458 					masks[j] |= (1 << i);
4459 					break;
4460 				}
4461 			}
4462 
4463 			if (j == count && count < *CountCnt) {
4464 				masks[j] = (1 << i);
4465 				port_ptrs[j] = exc_port;
4466 
4467 				if (info_only) {
4468 					if (!dbg_ok || !IP_VALID(exc_port)) {
4469 						/* avoid taking port lock if !dbg_ok */
4470 						ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4471 					} else {
4472 						uintptr_t receiver;
4473 						(void)ipc_port_get_receiver_task(exc_port, &receiver);
4474 						ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4475 						ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4476 					}
4477 				} else {
4478 					ports[j] = ipc_port_copy_send(exc_port);
4479 				}
4480 				behaviors[j] = exc_behavior;
4481 				flavors[j] = exc_flavor;
4482 				++count;
4483 			}
4484 		}
4485 	}
4486 
4487 done:
4488 	thread_mtx_unlock(thread);
4489 
4490 	*CountCnt = count;
4491 
4492 	return KERN_SUCCESS;
4493 }
4494 
4495 static kern_return_t
thread_get_exception_ports(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4496 thread_get_exception_ports(
4497 	thread_t                        thread,
4498 	exception_mask_t                exception_mask,
4499 	exception_mask_array_t          masks,
4500 	mach_msg_type_number_t          *CountCnt,
4501 	exception_port_array_t          ports,
4502 	exception_behavior_array_t      behaviors,
4503 	thread_state_flavor_array_t     flavors)
4504 {
4505 	return thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4506 	           NULL, ports, behaviors, flavors);
4507 }
4508 
4509 kern_return_t
thread_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4510 thread_get_exception_ports_info(
4511 	mach_port_t                     port,
4512 	exception_mask_t                exception_mask,
4513 	exception_mask_array_t          masks,
4514 	mach_msg_type_number_t          *CountCnt,
4515 	exception_port_info_array_t     ports_info,
4516 	exception_behavior_array_t      behaviors,
4517 	thread_state_flavor_array_t     flavors)
4518 {
4519 	kern_return_t kr;
4520 
4521 	thread_t thread = convert_port_to_thread_read_no_eval(port);
4522 
4523 	if (thread == THREAD_NULL) {
4524 		return KERN_INVALID_ARGUMENT;
4525 	}
4526 
4527 	kr = thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4528 	    ports_info, NULL, behaviors, flavors);
4529 
4530 	thread_deallocate(thread);
4531 	return kr;
4532 }
4533 
4534 kern_return_t
thread_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4535 thread_get_exception_ports_from_user(
4536 	mach_port_t                     port,
4537 	exception_mask_t                exception_mask,
4538 	exception_mask_array_t          masks,
4539 	mach_msg_type_number_t         *CountCnt,
4540 	exception_port_array_t          ports,
4541 	exception_behavior_array_t      behaviors,
4542 	thread_state_flavor_array_t     flavors)
4543 {
4544 	kern_return_t kr;
4545 
4546 	thread_t thread = convert_port_to_thread(port);
4547 
4548 	if (thread == THREAD_NULL) {
4549 		return KERN_INVALID_ARGUMENT;
4550 	}
4551 
4552 	kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4553 
4554 	thread_deallocate(thread);
4555 	return kr;
4556 }
4557 
4558 static kern_return_t
task_get_exception_ports_internal(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4559 task_get_exception_ports_internal(
4560 	task_t                          task,
4561 	exception_mask_t                exception_mask,
4562 	exception_mask_array_t          masks,
4563 	mach_msg_type_number_t          *CountCnt,
4564 	exception_port_info_array_t     ports_info,
4565 	exception_port_array_t          ports,
4566 	exception_behavior_array_t      behaviors,
4567 	thread_state_flavor_array_t     flavors)
4568 {
4569 	unsigned int count;
4570 	boolean_t info_only = (ports_info != NULL);
4571 	boolean_t dbg_ok = TRUE;
4572 	ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4573 
4574 	if (task == TASK_NULL) {
4575 		return KERN_INVALID_ARGUMENT;
4576 	}
4577 
4578 	if (exception_mask & ~EXC_MASK_VALID) {
4579 		return KERN_INVALID_ARGUMENT;
4580 	}
4581 
4582 	if (!info_only && !ports) {
4583 		return KERN_INVALID_ARGUMENT;
4584 	}
4585 
4586 #if !(DEVELOPMENT || DEBUG) && CONFIG_MACF
4587 	if (info_only && mac_task_check_expose_task(kernel_task, TASK_FLAVOR_CONTROL) == 0) {
4588 		dbg_ok = TRUE;
4589 	} else {
4590 		dbg_ok = FALSE;
4591 	}
4592 #endif
4593 
4594 	itk_lock(task);
4595 
4596 	if (!task->ipc_active) {
4597 		itk_unlock(task);
4598 		return KERN_FAILURE;
4599 	}
4600 
4601 	count = 0;
4602 
4603 	for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4604 		if (exception_mask & (1 << i)) {
4605 			ipc_port_t exc_port = task->exc_actions[i].port;
4606 			exception_behavior_t exc_behavior = task->exc_actions[i].behavior;
4607 			thread_state_flavor_t exc_flavor = task->exc_actions[i].flavor;
4608 
4609 			for (j = 0; j < count; ++j) {
4610 				/*
4611 				 * search for an identical entry, if found
4612 				 * set corresponding mask for this exception.
4613 				 */
4614 				if (exc_port == port_ptrs[j] &&
4615 				    exc_behavior == behaviors[j] &&
4616 				    exc_flavor == flavors[j]) {
4617 					masks[j] |= (1 << i);
4618 					break;
4619 				}
4620 			}
4621 
4622 			if (j == count && count < *CountCnt) {
4623 				masks[j] = (1 << i);
4624 				port_ptrs[j] = exc_port;
4625 
4626 				if (info_only) {
4627 					if (!dbg_ok || !IP_VALID(exc_port)) {
4628 						/* avoid taking port lock if !dbg_ok */
4629 						ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4630 					} else {
4631 						uintptr_t receiver;
4632 						(void)ipc_port_get_receiver_task(exc_port, &receiver);
4633 						ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4634 						ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4635 					}
4636 				} else {
4637 					ports[j] = ipc_port_copy_send(exc_port);
4638 				}
4639 				behaviors[j] = exc_behavior;
4640 				flavors[j] = exc_flavor;
4641 				++count;
4642 			}
4643 		}
4644 	}
4645 
4646 	itk_unlock(task);
4647 
4648 	*CountCnt = count;
4649 
4650 	return KERN_SUCCESS;
4651 }
4652 
4653 static kern_return_t
task_get_exception_ports(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4654 task_get_exception_ports(
4655 	task_t                          task,
4656 	exception_mask_t                exception_mask,
4657 	exception_mask_array_t          masks,
4658 	mach_msg_type_number_t          *CountCnt,
4659 	exception_port_array_t          ports,
4660 	exception_behavior_array_t      behaviors,
4661 	thread_state_flavor_array_t     flavors)
4662 {
4663 	return task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4664 	           NULL, ports, behaviors, flavors);
4665 }
4666 
4667 kern_return_t
task_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4668 task_get_exception_ports_info(
4669 	mach_port_t                     port,
4670 	exception_mask_t                exception_mask,
4671 	exception_mask_array_t          masks,
4672 	mach_msg_type_number_t          *CountCnt,
4673 	exception_port_info_array_t     ports_info,
4674 	exception_behavior_array_t      behaviors,
4675 	thread_state_flavor_array_t     flavors)
4676 {
4677 	kern_return_t kr;
4678 
4679 	task_t task = convert_port_to_task_read_no_eval(port);
4680 
4681 	if (task == TASK_NULL) {
4682 		return KERN_INVALID_ARGUMENT;
4683 	}
4684 
4685 	kr = task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4686 	    ports_info, NULL, behaviors, flavors);
4687 
4688 	task_deallocate(task);
4689 	return kr;
4690 }
4691 
4692 kern_return_t
task_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4693 task_get_exception_ports_from_user(
4694 	mach_port_t                     port,
4695 	exception_mask_t                exception_mask,
4696 	exception_mask_array_t          masks,
4697 	mach_msg_type_number_t         *CountCnt,
4698 	exception_port_array_t          ports,
4699 	exception_behavior_array_t      behaviors,
4700 	thread_state_flavor_array_t     flavors)
4701 {
4702 	kern_return_t kr;
4703 
4704 	task_t task = convert_port_to_task(port);
4705 
4706 	if (task == TASK_NULL) {
4707 		return KERN_INVALID_ARGUMENT;
4708 	}
4709 
4710 	kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4711 
4712 	task_deallocate(task);
4713 	return kr;
4714 }
4715 
4716 /*
4717  *	Routine:	ipc_thread_port_unpin
4718  *	Purpose:
4719  *
4720  *		Called on the thread when it's terminating so that the last ref
4721  *		can be deallocated without a guard exception.
4722  *	Conditions:
4723  *		Thread mutex lock is held.
4724  */
4725 void
ipc_thread_port_unpin(ipc_port_t port)4726 ipc_thread_port_unpin(
4727 	ipc_port_t port)
4728 {
4729 	if (port == IP_NULL) {
4730 		return;
4731 	}
4732 	ip_mq_lock(port);
4733 	port->ip_pinned = 0;
4734 	ip_mq_unlock(port);
4735 }
4736