xref: /xnu-8019.80.24/osfmk/kern/ipc_tt.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  */
62 /*
63  */
64 
65 /*
66  * File:	ipc_tt.c
67  * Purpose:
68  *	Task and thread related IPC functions.
69  */
70 
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
86 
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
94 #include <kdp/kdp_dyld.h>
95 
96 #include <vm/vm_map.h>
97 #include <vm/vm_pageout.h>
98 #include <vm/vm_protos.h>
99 
100 #include <security/mac_mach_internal.h>
101 
102 #if CONFIG_CSR
103 #include <sys/csr.h>
104 #endif
105 
106 #if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL
107 extern int cs_relax_platform_task_ports;
108 #endif
109 
110 extern boolean_t IOCurrentTaskHasEntitlement(const char *);
111 
112 __options_decl(ipc_reply_port_type_t, uint32_t, {
113 	IRPT_NONE        = 0x00,
114 	IRPT_USER        = 0x01,
115 	IRPT_KERNEL      = 0x02,
116 });
117 
118 /* forward declarations */
119 static kern_return_t special_port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor);
120 static kern_return_t special_port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor);
121 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port, ipc_reply_port_type_t reply_type);
122 static void ipc_port_unbind_special_reply_port(thread_t thread, ipc_reply_port_type_t reply_type);
123 extern kern_return_t task_conversion_eval(task_t caller, task_t victim);
124 static thread_inspect_t convert_port_to_thread_inspect_no_eval(ipc_port_t port);
125 static ipc_port_t convert_thread_to_port_with_flavor(thread_t, thread_ro_t, mach_thread_flavor_t flavor);
126 ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor, task_grp_t grp);
127 kern_return_t task_set_special_port(task_t task, int which, ipc_port_t port);
128 kern_return_t task_get_special_port(task_t task, int which, ipc_port_t *portp);
129 
130 /*
131  *	Routine:	ipc_task_init
132  *	Purpose:
133  *		Initialize a task's IPC state.
134  *
135  *		If non-null, some state will be inherited from the parent.
136  *		The parent must be appropriately initialized.
137  *	Conditions:
138  *		Nothing locked.
139  */
140 
141 void
ipc_task_init(task_t task,task_t parent)142 ipc_task_init(
143 	task_t          task,
144 	task_t          parent)
145 {
146 	ipc_space_t space;
147 	ipc_port_t kport;
148 	ipc_port_t nport;
149 	ipc_port_t pport;
150 	kern_return_t kr;
151 	int i;
152 
153 
154 	kr = ipc_space_create(&ipc_table_entries[0], IPC_LABEL_NONE, &space);
155 	if (kr != KERN_SUCCESS) {
156 		panic("ipc_task_init");
157 	}
158 
159 	space->is_task = task;
160 
161 	kport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL,
162 	    IPC_KOBJECT_ALLOC_NONE);
163 	pport = kport;
164 
165 	nport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_NAME,
166 	    IPC_KOBJECT_ALLOC_NONE);
167 
168 	itk_lock_init(task);
169 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = kport;
170 	task->itk_task_ports[TASK_FLAVOR_NAME] = nport;
171 
172 	/* Lazily allocated on-demand */
173 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
174 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
175 	task->itk_dyld_notify = NULL;
176 #if CONFIG_PROC_RESOURCE_LIMITS
177 	task->itk_resource_notify = NULL;
178 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
179 
180 	task->itk_self = pport;
181 	task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
182 	if (task_is_a_corpse_fork(task)) {
183 		/*
184 		 * No sender's notification for corpse would not
185 		 * work with a naked send right in kernel.
186 		 */
187 		task->itk_settable_self = IP_NULL;
188 	} else {
189 		task->itk_settable_self = ipc_port_make_send(kport);
190 	}
191 	task->itk_debug_control = IP_NULL;
192 	task->itk_space = space;
193 
194 #if CONFIG_MACF
195 	task->exc_actions[0].label = NULL;
196 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
197 		mac_exc_associate_action_label(&task->exc_actions[i],
198 		    mac_exc_create_label(&task->exc_actions[i]));
199 	}
200 #endif
201 
202 	/* always zero-out the first (unused) array element */
203 	bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
204 
205 	if (parent == TASK_NULL) {
206 		ipc_port_t port;
207 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
208 			task->exc_actions[i].port = IP_NULL;
209 			task->exc_actions[i].flavor = 0;
210 			task->exc_actions[i].behavior = 0;
211 			task->exc_actions[i].privileged = FALSE;
212 		}/* for */
213 
214 		kr = host_get_host_port(host_priv_self(), &port);
215 		assert(kr == KERN_SUCCESS);
216 		task->itk_host = port;
217 
218 		task->itk_bootstrap = IP_NULL;
219 		task->itk_task_access = IP_NULL;
220 
221 		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
222 			task->itk_registered[i] = IP_NULL;
223 		}
224 	} else {
225 		itk_lock(parent);
226 		assert(parent->itk_task_ports[TASK_FLAVOR_CONTROL] != IP_NULL);
227 
228 		/* inherit registered ports */
229 
230 		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
231 			task->itk_registered[i] =
232 			    ipc_port_copy_send(parent->itk_registered[i]);
233 		}
234 
235 		/* inherit exception and bootstrap ports */
236 
237 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
238 			task->exc_actions[i].port =
239 			    ipc_port_copy_send(parent->exc_actions[i].port);
240 			task->exc_actions[i].flavor =
241 			    parent->exc_actions[i].flavor;
242 			task->exc_actions[i].behavior =
243 			    parent->exc_actions[i].behavior;
244 			task->exc_actions[i].privileged =
245 			    parent->exc_actions[i].privileged;
246 #if CONFIG_MACF
247 			mac_exc_inherit_action_label(parent->exc_actions + i, task->exc_actions + i);
248 #endif
249 		}/* for */
250 		task->itk_host =
251 		    ipc_port_copy_send(parent->itk_host);
252 
253 		task->itk_bootstrap =
254 		    ipc_port_copy_send(parent->itk_bootstrap);
255 
256 		task->itk_task_access =
257 		    ipc_port_copy_send(parent->itk_task_access);
258 
259 		itk_unlock(parent);
260 	}
261 }
262 
263 /*
264  *	Routine:	ipc_task_set_immovable_pinned
265  *	Purpose:
266  *		Make a task's control port immovable and/or pinned
267  *      according to its control port options. If control port
268  *      is immovable, allocate an immovable control port for the
269  *      task and optionally pin it. Deallocate the old control port.
270  *	Conditions:
271  *		Task's control port is movable and not pinned.
272  */
273 void
ipc_task_set_immovable_pinned(task_t task)274 ipc_task_set_immovable_pinned(
275 	task_t            task)
276 {
277 	ipc_port_t kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
278 	ipc_port_t new_pport;
279 
280 	/* pport is the same as kport at ipc_task_init() time */
281 	assert(task->itk_self == task->itk_task_ports[TASK_FLAVOR_CONTROL]);
282 	assert(task->itk_self == task->itk_settable_self);
283 	assert(!task_is_a_corpse(task));
284 
285 	/* only tasks opt in immovable control port can have pinned control port */
286 	if (task_is_immovable(task)) {
287 		ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
288 
289 		if (task_is_pinned(task)) {
290 			options |= IPC_KOBJECT_ALLOC_PINNED;
291 		}
292 
293 		new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL, options);
294 
295 		assert(kport != IP_NULL);
296 		ipc_port_set_label(kport, IPC_LABEL_SUBST_TASK);
297 		kport->ip_kolabel->ikol_alt_port = new_pport;
298 
299 		itk_lock(task);
300 		task->itk_self = new_pport;
301 		itk_unlock(task);
302 
303 		/* enable the pinned port */
304 		ipc_kobject_enable(new_pport, task, IKOT_TASK_CONTROL);
305 	}
306 }
307 
308 /*
309  *	Routine:	ipc_task_enable
310  *	Purpose:
311  *		Enable a task for IPC access.
312  *	Conditions:
313  *		Nothing locked.
314  */
315 void
ipc_task_enable(task_t task)316 ipc_task_enable(
317 	task_t          task)
318 {
319 	ipc_port_t kport;
320 	ipc_port_t nport;
321 	ipc_port_t iport;
322 	ipc_port_t rdport;
323 	ipc_port_t pport;
324 
325 	itk_lock(task);
326 
327 	assert(!task->ipc_active || task_is_a_corpse(task));
328 	task->ipc_active = true;
329 
330 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
331 	if (kport != IP_NULL) {
332 		ipc_kobject_enable(kport, task, IKOT_TASK_CONTROL);
333 	}
334 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
335 	if (nport != IP_NULL) {
336 		ipc_kobject_enable(nport, task, IKOT_TASK_NAME);
337 	}
338 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
339 	if (iport != IP_NULL) {
340 		ipc_kobject_enable(iport, task, IKOT_TASK_INSPECT);
341 	}
342 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
343 	if (rdport != IP_NULL) {
344 		ipc_kobject_enable(rdport, task, IKOT_TASK_READ);
345 	}
346 	pport = task->itk_self;
347 	if (pport != kport && pport != IP_NULL) {
348 		assert(task_is_immovable(task));
349 		ipc_kobject_enable(pport, task, IKOT_TASK_CONTROL);
350 	}
351 
352 	itk_unlock(task);
353 }
354 
355 /*
356  *	Routine:	ipc_task_disable
357  *	Purpose:
358  *		Disable IPC access to a task.
359  *	Conditions:
360  *		Nothing locked.
361  */
362 
363 void
ipc_task_disable(task_t task)364 ipc_task_disable(
365 	task_t          task)
366 {
367 	ipc_port_t kport;
368 	ipc_port_t nport;
369 	ipc_port_t iport;
370 	ipc_port_t rdport;
371 	ipc_port_t rport;
372 	ipc_port_t pport;
373 
374 	itk_lock(task);
375 
376 	/*
377 	 * This innocuous looking line is load bearing.
378 	 *
379 	 * It is used to disable the creation of lazy made ports.
380 	 * We must do so before we drop the last reference on the task,
381 	 * as task ports do not own a reference on the task, and
382 	 * convert_port_to_task* will crash trying to resurect a task.
383 	 */
384 	task->ipc_active = false;
385 
386 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
387 	if (kport != IP_NULL) {
388 		/* clears ikol_alt_port */
389 		ipc_kobject_disable(kport, IKOT_TASK_CONTROL);
390 	}
391 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
392 	if (nport != IP_NULL) {
393 		ipc_kobject_disable(nport, IKOT_TASK_NAME);
394 	}
395 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
396 	if (iport != IP_NULL) {
397 		ipc_kobject_disable(iport, IKOT_TASK_INSPECT);
398 	}
399 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
400 	if (rdport != IP_NULL) {
401 		ipc_kobject_disable(rdport, IKOT_TASK_READ);
402 	}
403 	pport = task->itk_self;
404 	if (pport != IP_NULL) {
405 		/* see port_name_is_pinned_itk_self() */
406 		pport->ip_receiver_name = MACH_PORT_SPECIAL_DEFAULT;
407 		if (pport != kport) {
408 			assert(task_is_immovable(task));
409 			assert(pport->ip_immovable_send);
410 			ipc_kobject_disable(pport, IKOT_TASK_CONTROL);
411 		}
412 	}
413 
414 	rport = task->itk_resume;
415 	if (rport != IP_NULL) {
416 		/*
417 		 * From this point onwards this task is no longer accepting
418 		 * resumptions.
419 		 *
420 		 * There are still outstanding suspensions on this task,
421 		 * even as it is being torn down. Disconnect the task
422 		 * from the rport, thereby "orphaning" the rport. The rport
423 		 * itself will go away only when the last suspension holder
424 		 * destroys his SO right to it -- when he either
425 		 * exits, or tries to actually use that last SO right to
426 		 * resume this (now non-existent) task.
427 		 */
428 		ipc_kobject_disable(rport, IKOT_TASK_RESUME);
429 	}
430 	itk_unlock(task);
431 }
432 
433 /*
434  *	Routine:	ipc_task_terminate
435  *	Purpose:
436  *		Clean up and destroy a task's IPC state.
437  *	Conditions:
438  *		Nothing locked.  The task must be suspended.
439  *		(Or the current thread must be in the task.)
440  */
441 
442 void
ipc_task_terminate(task_t task)443 ipc_task_terminate(
444 	task_t          task)
445 {
446 	ipc_port_t kport;
447 	ipc_port_t nport;
448 	ipc_port_t iport;
449 	ipc_port_t rdport;
450 	ipc_port_t rport;
451 	ipc_port_t pport;
452 	ipc_port_t sself;
453 	ipc_port_t *notifiers_ptr = NULL;
454 
455 	itk_lock(task);
456 
457 	/*
458 	 * If we ever failed to clear ipc_active before the last reference
459 	 * was dropped, lazy ports might be made and used after the last
460 	 * reference is dropped and cause use after free (see comment in
461 	 * ipc_task_disable()).
462 	 */
463 	assert(!task->ipc_active);
464 
465 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
466 	sself = task->itk_settable_self;
467 	pport = IP_NULL;
468 
469 	if (kport == IP_NULL) {
470 		/* the task is already terminated (can this happen?) */
471 		itk_unlock(task);
472 		return;
473 	}
474 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = IP_NULL;
475 
476 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
477 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
478 
479 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
480 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
481 
482 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
483 	assert(nport != IP_NULL);
484 	task->itk_task_ports[TASK_FLAVOR_NAME] = IP_NULL;
485 
486 	if (task->itk_dyld_notify) {
487 		notifiers_ptr = task->itk_dyld_notify;
488 		task->itk_dyld_notify = NULL;
489 	}
490 
491 	pport = task->itk_self;
492 	task->itk_self = IP_NULL;
493 
494 	rport = task->itk_resume;
495 	task->itk_resume = IP_NULL;
496 
497 	itk_unlock(task);
498 
499 	/* release the naked send rights */
500 	if (IP_VALID(sself)) {
501 		ipc_port_release_send(sself);
502 	}
503 
504 	if (notifiers_ptr) {
505 		for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
506 			if (IP_VALID(notifiers_ptr[i])) {
507 				ipc_port_release_send(notifiers_ptr[i]);
508 			}
509 		}
510 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
511 	}
512 
513 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
514 		if (IP_VALID(task->exc_actions[i].port)) {
515 			ipc_port_release_send(task->exc_actions[i].port);
516 		}
517 #if CONFIG_MACF
518 		mac_exc_free_action_label(task->exc_actions + i);
519 #endif
520 	}
521 
522 	if (IP_VALID(task->itk_host)) {
523 		ipc_port_release_send(task->itk_host);
524 	}
525 
526 	if (IP_VALID(task->itk_bootstrap)) {
527 		ipc_port_release_send(task->itk_bootstrap);
528 	}
529 
530 	if (IP_VALID(task->itk_task_access)) {
531 		ipc_port_release_send(task->itk_task_access);
532 	}
533 
534 	if (IP_VALID(task->itk_debug_control)) {
535 		ipc_port_release_send(task->itk_debug_control);
536 	}
537 
538 #if CONFIG_PROC_RESOURCE_LIMITS
539 	if (IP_VALID(task->itk_resource_notify)) {
540 		ipc_port_release_send(task->itk_resource_notify);
541 	}
542 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
543 
544 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
545 		if (IP_VALID(task->itk_registered[i])) {
546 			ipc_port_release_send(task->itk_registered[i]);
547 		}
548 	}
549 
550 	/* clears ikol_alt_port, must be done first */
551 	ipc_kobject_dealloc_port(kport, 0, IKOT_TASK_CONTROL);
552 
553 	/* destroy the kernel ports */
554 	if (pport != IP_NULL && pport != kport) {
555 		ipc_kobject_dealloc_port(pport, 0, IKOT_TASK_CONTROL);
556 	}
557 	ipc_kobject_dealloc_port(nport, 0, IKOT_TASK_NAME);
558 	if (iport != IP_NULL) {
559 		ipc_kobject_dealloc_port(iport, 0, IKOT_TASK_INSPECT);
560 	}
561 	if (rdport != IP_NULL) {
562 		ipc_kobject_dealloc_port(rdport, 0, IKOT_TASK_READ);
563 	}
564 	if (rport != IP_NULL) {
565 		ipc_kobject_dealloc_port(rport, 0, IKOT_TASK_RESUME);
566 	}
567 
568 	itk_lock_destroy(task);
569 }
570 
571 /*
572  *	Routine:	ipc_task_reset
573  *	Purpose:
574  *		Reset a task's IPC state to protect it when
575  *		it enters an elevated security context. The
576  *		task name port can remain the same - since it
577  *              represents no specific privilege.
578  *	Conditions:
579  *		Nothing locked.  The task must be suspended.
580  *		(Or the current thread must be in the task.)
581  */
582 
583 void
ipc_task_reset(task_t task)584 ipc_task_reset(
585 	task_t          task)
586 {
587 	ipc_port_t old_kport, old_pport, new_kport, new_pport;
588 	ipc_port_t old_sself;
589 	ipc_port_t old_rdport;
590 	ipc_port_t old_iport;
591 	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
592 	ipc_port_t *notifiers_ptr = NULL;
593 
594 #if CONFIG_MACF
595 	/* Fresh label to unset credentials in existing labels. */
596 	struct label *unset_label = mac_exc_create_label(NULL);
597 #endif
598 
599 	new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task,
600 	    IKOT_TASK_CONTROL, IPC_KOBJECT_ALLOC_NONE);
601 	/*
602 	 * ipc_task_reset() only happens during sugid or corpsify.
603 	 *
604 	 * (1) sugid happens early in exec_mach_imgact(), at which point the old task
605 	 * port is left movable/not pinned.
606 	 * (2) corpse cannot execute more code so the notion of the immovable/pinned
607 	 * task port is bogus, and should appear as if it doesn't have one.
608 	 *
609 	 * So simply leave pport the same as kport.
610 	 */
611 	new_pport = new_kport;
612 
613 	itk_lock(task);
614 
615 	old_kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
616 	old_rdport = task->itk_task_ports[TASK_FLAVOR_READ];
617 	old_iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
618 
619 	old_pport = task->itk_self;
620 
621 	if (old_pport == IP_NULL) {
622 		/* the task is already terminated (can this happen?) */
623 		itk_unlock(task);
624 		ipc_kobject_dealloc_port(new_kport, 0, IKOT_TASK_CONTROL);
625 		if (new_pport != new_kport) {
626 			assert(task_is_immovable(task));
627 			ipc_kobject_dealloc_port(new_pport, 0, IKOT_TASK_CONTROL);
628 		}
629 #if CONFIG_MACF
630 		mac_exc_free_label(unset_label);
631 #endif
632 		return;
633 	}
634 
635 	old_sself = task->itk_settable_self;
636 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = new_kport;
637 	task->itk_self = new_pport;
638 
639 	if (task_is_a_corpse(task)) {
640 		/* No extra send right for coprse, needed to arm no-sender notification */
641 		task->itk_settable_self = IP_NULL;
642 	} else {
643 		task->itk_settable_self = ipc_port_make_send(new_kport);
644 	}
645 
646 	/* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
647 	ip_mq_lock(old_kport);
648 	/* clears ikol_alt_port */
649 	ipc_kobject_disable_locked(old_kport, IKOT_TASK_CONTROL);
650 	task->exec_token += 1;
651 	ip_mq_unlock(old_kport);
652 
653 	/* Reset the read and inspect flavors of task port */
654 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
655 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
656 
657 	if (old_pport != old_kport) {
658 		assert(task_is_immovable(task));
659 		ip_mq_lock(old_pport);
660 		ipc_kobject_disable_locked(old_pport, IKOT_TASK_CONTROL);
661 		task->exec_token += 1;
662 		ip_mq_unlock(old_pport);
663 	}
664 
665 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
666 		old_exc_actions[i] = IP_NULL;
667 
668 		if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
669 			continue;
670 		}
671 
672 		if (!task->exc_actions[i].privileged) {
673 #if CONFIG_MACF
674 			mac_exc_update_action_label(task->exc_actions + i, unset_label);
675 #endif
676 			old_exc_actions[i] = task->exc_actions[i].port;
677 			task->exc_actions[i].port = IP_NULL;
678 		}
679 	}/* for */
680 
681 	if (IP_VALID(task->itk_debug_control)) {
682 		ipc_port_release_send(task->itk_debug_control);
683 	}
684 	task->itk_debug_control = IP_NULL;
685 
686 	if (task->itk_dyld_notify) {
687 		notifiers_ptr = task->itk_dyld_notify;
688 		task->itk_dyld_notify = NULL;
689 	}
690 
691 	itk_unlock(task);
692 
693 #if CONFIG_MACF
694 	mac_exc_free_label(unset_label);
695 #endif
696 
697 	/* release the naked send rights */
698 
699 	if (IP_VALID(old_sself)) {
700 		ipc_port_release_send(old_sself);
701 	}
702 
703 	if (notifiers_ptr) {
704 		for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
705 			if (IP_VALID(notifiers_ptr[i])) {
706 				ipc_port_release_send(notifiers_ptr[i]);
707 			}
708 		}
709 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
710 	}
711 
712 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
713 		if (IP_VALID(old_exc_actions[i])) {
714 			ipc_port_release_send(old_exc_actions[i]);
715 		}
716 	}
717 
718 	/* destroy all task port flavors */
719 	ipc_kobject_dealloc_port(old_kport, 0, IKOT_TASK_CONTROL);
720 	if (old_pport != old_kport) {
721 		assert(task_is_immovable(task));
722 		ipc_kobject_dealloc_port(old_pport, 0, IKOT_TASK_CONTROL);
723 	}
724 	if (old_rdport != IP_NULL) {
725 		ipc_kobject_dealloc_port(old_rdport, 0, IKOT_TASK_READ);
726 	}
727 	if (old_iport != IP_NULL) {
728 		ipc_kobject_dealloc_port(old_iport, 0, IKOT_TASK_INSPECT);
729 	}
730 }
731 
732 /*
733  *	Routine:	ipc_thread_init
734  *	Purpose:
735  *		Initialize a thread's IPC state.
736  *	Conditions:
737  *		Nothing locked.
738  */
739 
740 void
ipc_thread_init(task_t task,thread_t thread,thread_ro_t tro,ipc_thread_init_options_t options)741 ipc_thread_init(
742 	task_t          task,
743 	thread_t        thread,
744 	thread_ro_t     tro,
745 	ipc_thread_init_options_t options)
746 {
747 	ipc_port_t      kport;
748 	ipc_port_t      pport;
749 	ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
750 
751 	/*
752 	 * Having task_is_immovable() true does not guarantee thread control port
753 	 * should be made immovable/pinned, also check options.
754 	 *
755 	 * Raw mach threads created via thread_create() have neither of INIT_PINNED
756 	 * or INIT_IMMOVABLE option set.
757 	 */
758 	if (task_is_immovable(task) && (options & IPC_THREAD_INIT_IMMOVABLE)) {
759 		alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
760 
761 		if (task_is_pinned(task) && (options & IPC_THREAD_INIT_PINNED)) {
762 			alloc_options |= IPC_KOBJECT_ALLOC_PINNED;
763 		}
764 
765 		pport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
766 		    IKOT_THREAD_CONTROL, alloc_options);
767 
768 		kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)thread,
769 		    IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD, IPC_KOBJECT_ALLOC_NONE);
770 		kport->ip_kolabel->ikol_alt_port = pport;
771 	} else {
772 		kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
773 		    IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
774 
775 		pport = kport;
776 	}
777 
778 	tro->tro_self_port = pport;
779 	tro->tro_settable_self_port = ipc_port_make_send(kport);
780 	tro->tro_ports[THREAD_FLAVOR_CONTROL] = kport;
781 
782 	thread->ith_special_reply_port = NULL;
783 
784 #if IMPORTANCE_INHERITANCE
785 	thread->ith_assertions = 0;
786 #endif
787 
788 	thread->ipc_active = true;
789 	ipc_kmsg_queue_init(&thread->ith_messages);
790 
791 	thread->ith_kernel_reply_port = IP_NULL;
792 }
793 
794 void
ipc_main_thread_set_immovable_pinned(thread_t thread)795 ipc_main_thread_set_immovable_pinned(thread_t thread)
796 {
797 	thread_ro_t tro = get_thread_ro(thread);
798 	ipc_port_t kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
799 	task_t task = tro->tro_task;
800 	ipc_port_t new_pport;
801 
802 	assert(thread_get_tag(thread) & THREAD_TAG_MAINTHREAD);
803 
804 	/* pport is the same as kport at ipc_thread_init() time */
805 	assert(tro->tro_self_port == tro->tro_ports[THREAD_FLAVOR_CONTROL]);
806 	assert(tro->tro_self_port == tro->tro_settable_self_port);
807 
808 	/*
809 	 * Main thread port is immovable/pinned depending on whether owner task has
810 	 * immovable/pinned task control port.
811 	 */
812 	if (task_is_immovable(task)) {
813 		ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
814 
815 		if (task_is_pinned(task)) {
816 			options |= IPC_KOBJECT_ALLOC_PINNED;
817 		}
818 
819 		new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_THREAD_CONTROL, options);
820 
821 		assert(kport != IP_NULL);
822 		ipc_port_set_label(kport, IPC_LABEL_SUBST_THREAD);
823 		kport->ip_kolabel->ikol_alt_port = new_pport;
824 
825 		thread_mtx_lock(thread);
826 		zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_self_port, &new_pport);
827 		thread_mtx_unlock(thread);
828 
829 		/* enable the pinned port */
830 		ipc_kobject_enable(new_pport, thread, IKOT_THREAD_CONTROL);
831 	}
832 }
833 
834 struct thread_init_exc_actions {
835 	struct exception_action array[EXC_TYPES_COUNT];
836 };
837 
838 static void
ipc_thread_init_exc_actions(thread_ro_t tro)839 ipc_thread_init_exc_actions(thread_ro_t tro)
840 {
841 	struct exception_action *actions;
842 
843 	actions = kalloc_type(struct thread_init_exc_actions,
844 	    Z_WAITOK | Z_ZERO | Z_NOFAIL)->array;
845 
846 #if CONFIG_MACF
847 	for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
848 		mac_exc_associate_action_label(&actions[i],
849 		    mac_exc_create_label(&actions[i]));
850 	}
851 #endif
852 
853 	zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions, &actions);
854 }
855 
856 static void
ipc_thread_destroy_exc_actions(thread_ro_t tro)857 ipc_thread_destroy_exc_actions(thread_ro_t tro)
858 {
859 	struct exception_action *actions = tro->tro_exc_actions;
860 
861 	if (actions) {
862 #if CONFIG_MACF
863 		for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
864 			mac_exc_free_action_label(actions + i);
865 		}
866 #endif
867 
868 		zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions);
869 		kfree_type(struct thread_init_exc_actions, actions);
870 	}
871 }
872 
873 static void
ipc_thread_ro_update_ports(thread_ro_t tro,const struct thread_ro * tro_tpl)874 ipc_thread_ro_update_ports(
875 	thread_ro_t             tro,
876 	const struct thread_ro *tro_tpl)
877 {
878 	vm_size_t offs = offsetof(struct thread_ro, tro_self_port);
879 	vm_size_t size = sizeof(struct ipc_port *) * 2 + sizeof(tro_tpl->tro_ports);
880 
881 	static_assert(offsetof(struct thread_ro, tro_settable_self_port) ==
882 	    offsetof(struct thread_ro, tro_self_port) +
883 	    sizeof(struct ipc_port_t *));
884 	static_assert(offsetof(struct thread_ro, tro_ports) ==
885 	    offsetof(struct thread_ro, tro_self_port) +
886 	    2 * sizeof(struct ipc_port_t *));
887 	zalloc_ro_mut(ZONE_ID_THREAD_RO, tro,
888 	    offs, &tro_tpl->tro_self_port, size);
889 }
890 
891 /*
892  *	Routine:	ipc_thread_disable
893  *	Purpose:
894  *		Clean up and destroy a thread's IPC state.
895  *	Conditions:
896  *		Thread locked.
897  */
898 void
ipc_thread_disable(thread_t thread)899 ipc_thread_disable(
900 	thread_t        thread)
901 {
902 	thread_ro_t     tro = get_thread_ro(thread);
903 	ipc_port_t      kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
904 	ipc_port_t      iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
905 	ipc_port_t      rdport = tro->tro_ports[THREAD_FLAVOR_READ];
906 	ipc_port_t      pport = tro->tro_self_port;
907 
908 	/*
909 	 * This innocuous looking line is load bearing.
910 	 *
911 	 * It is used to disable the creation of lazy made ports.
912 	 * We must do so before we drop the last reference on the thread,
913 	 * as thread ports do not own a reference on the thread, and
914 	 * convert_port_to_thread* will crash trying to resurect a thread.
915 	 */
916 	thread->ipc_active = false;
917 
918 	if (kport != IP_NULL) {
919 		/* clears ikol_alt_port */
920 		ipc_kobject_disable(kport, IKOT_THREAD_CONTROL);
921 	}
922 
923 	if (iport != IP_NULL) {
924 		ipc_kobject_disable(iport, IKOT_THREAD_INSPECT);
925 	}
926 
927 	if (rdport != IP_NULL) {
928 		ipc_kobject_disable(rdport, IKOT_THREAD_READ);
929 	}
930 
931 	if (pport != kport && pport != IP_NULL) {
932 		assert(task_is_immovable(tro->tro_task));
933 		assert(pport->ip_immovable_send);
934 		ipc_kobject_disable(pport, IKOT_THREAD_CONTROL);
935 	}
936 
937 	/* unbind the thread special reply port */
938 	if (IP_VALID(thread->ith_special_reply_port)) {
939 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
940 	}
941 }
942 
943 /*
944  *	Routine:	ipc_thread_terminate
945  *	Purpose:
946  *		Clean up and destroy a thread's IPC state.
947  *	Conditions:
948  *		Nothing locked.
949  */
950 
951 void
ipc_thread_terminate(thread_t thread)952 ipc_thread_terminate(
953 	thread_t        thread)
954 {
955 	thread_ro_t tro = get_thread_ro(thread);
956 	ipc_port_t kport = IP_NULL;
957 	ipc_port_t iport = IP_NULL;
958 	ipc_port_t rdport = IP_NULL;
959 	ipc_port_t pport = IP_NULL;
960 	ipc_port_t sport = IP_NULL;
961 
962 	thread_mtx_lock(thread);
963 
964 	/*
965 	 * If we ever failed to clear ipc_active before the last reference
966 	 * was dropped, lazy ports might be made and used after the last
967 	 * reference is dropped and cause use after free (see comment in
968 	 * ipc_thread_disable()).
969 	 */
970 	assert(!thread->ipc_active);
971 
972 	kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
973 	iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
974 	rdport = tro->tro_ports[THREAD_FLAVOR_READ];
975 	pport = tro->tro_self_port;
976 	sport = tro->tro_settable_self_port;
977 
978 	if (kport != IP_NULL) {
979 		if (IP_VALID(sport)) {
980 			ipc_port_release_send(sport);
981 		}
982 
983 		ipc_thread_ro_update_ports(tro, &(struct thread_ro){ });
984 
985 		if (tro->tro_exc_actions != NULL) {
986 			for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
987 				if (IP_VALID(tro->tro_exc_actions[i].port)) {
988 					ipc_port_release_send(tro->tro_exc_actions[i].port);
989 				}
990 			}
991 			ipc_thread_destroy_exc_actions(tro);
992 		}
993 	}
994 
995 #if IMPORTANCE_INHERITANCE
996 	assert(thread->ith_assertions == 0);
997 #endif
998 
999 	assert(ipc_kmsg_queue_empty(&thread->ith_messages));
1000 	thread_mtx_unlock(thread);
1001 
1002 	if (kport != IP_NULL) {
1003 		/* clears ikol_alt_port */
1004 		ipc_kobject_dealloc_port(kport, 0, IKOT_THREAD_CONTROL);
1005 	}
1006 
1007 	if (pport != kport && pport != IP_NULL) {
1008 		assert(task_is_immovable(tro->tro_task));
1009 		ipc_kobject_dealloc_port(pport, 0, IKOT_THREAD_CONTROL);
1010 	}
1011 	if (iport != IP_NULL) {
1012 		ipc_kobject_dealloc_port(iport, 0, IKOT_THREAD_INSPECT);
1013 	}
1014 	if (rdport != IP_NULL) {
1015 		ipc_kobject_dealloc_port(rdport, 0, IKOT_THREAD_READ);
1016 	}
1017 	if (thread->ith_kernel_reply_port != IP_NULL) {
1018 		thread_dealloc_kernel_special_reply_port(thread);
1019 	}
1020 }
1021 
1022 /*
1023  *	Routine:	ipc_thread_reset
1024  *	Purpose:
1025  *		Reset the IPC state for a given Mach thread when
1026  *		its task enters an elevated security context.
1027  *		All flavors of thread port and its exception ports have
1028  *		to be reset.  Its RPC reply port cannot have any
1029  *		rights outstanding, so it should be fine. The thread
1030  *		inspect and read port are set to NULL.
1031  *	Conditions:
1032  *		Nothing locked.
1033  */
1034 
1035 void
ipc_thread_reset(thread_t thread)1036 ipc_thread_reset(
1037 	thread_t        thread)
1038 {
1039 	thread_ro_t tro = get_thread_ro(thread);
1040 	ipc_port_t old_kport, new_kport, old_pport, new_pport;
1041 	ipc_port_t old_sself;
1042 	ipc_port_t old_rdport;
1043 	ipc_port_t old_iport;
1044 	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
1045 	boolean_t  has_old_exc_actions = FALSE;
1046 	boolean_t thread_is_immovable;
1047 	int i;
1048 
1049 #if CONFIG_MACF
1050 	struct label *new_label = mac_exc_create_label(NULL);
1051 #endif
1052 
1053 	thread_is_immovable = ip_is_immovable_send(tro->tro_self_port);
1054 
1055 	new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
1056 	    IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
1057 	/*
1058 	 * ipc_thread_reset() only happens during sugid or corpsify.
1059 	 *
1060 	 * (1) sugid happens early in exec_mach_imgact(), at which point the old thread
1061 	 * port is still movable/not pinned.
1062 	 * (2) corpse cannot execute more code so the notion of the immovable/pinned
1063 	 * thread port is bogus, and should appear as if it doesn't have one.
1064 	 *
1065 	 * So simply leave pport the same as kport.
1066 	 */
1067 	new_pport = new_kport;
1068 
1069 	thread_mtx_lock(thread);
1070 
1071 	old_kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1072 	old_rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1073 	old_iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1074 
1075 	old_sself = tro->tro_settable_self_port;
1076 	old_pport = tro->tro_self_port;
1077 
1078 	if (old_kport == IP_NULL && thread->inspection == FALSE) {
1079 		/* thread is already terminated (can this happen?) */
1080 		thread_mtx_unlock(thread);
1081 		ipc_kobject_dealloc_port(new_kport, 0, IKOT_THREAD_CONTROL);
1082 		if (thread_is_immovable) {
1083 			ipc_kobject_dealloc_port(new_pport, 0,
1084 			    IKOT_THREAD_CONTROL);
1085 		}
1086 #if CONFIG_MACF
1087 		mac_exc_free_label(new_label);
1088 #endif
1089 		return;
1090 	}
1091 
1092 	thread->ipc_active = true;
1093 
1094 	struct thread_ro tpl = {
1095 		.tro_self_port = new_pport,
1096 		.tro_settable_self_port = ipc_port_make_send(new_kport),
1097 		.tro_ports[THREAD_FLAVOR_CONTROL] = new_kport,
1098 	};
1099 
1100 	ipc_thread_ro_update_ports(tro, &tpl);
1101 
1102 	if (old_kport != IP_NULL) {
1103 		/* clears ikol_alt_port */
1104 		(void)ipc_kobject_disable(old_kport, IKOT_THREAD_CONTROL);
1105 	}
1106 	if (old_rdport != IP_NULL) {
1107 		(void)ipc_kobject_disable(old_rdport, IKOT_THREAD_READ);
1108 	}
1109 	if (old_iport != IP_NULL) {
1110 		(void)ipc_kobject_disable(old_iport, IKOT_THREAD_INSPECT);
1111 	}
1112 	if (thread_is_immovable && old_pport != IP_NULL) {
1113 		(void)ipc_kobject_disable(old_pport, IKOT_THREAD_CONTROL);
1114 	}
1115 
1116 	/*
1117 	 * Only ports that were set by root-owned processes
1118 	 * (privileged ports) should survive
1119 	 */
1120 	if (tro->tro_exc_actions != NULL) {
1121 		has_old_exc_actions = TRUE;
1122 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1123 			if (tro->tro_exc_actions[i].privileged) {
1124 				old_exc_actions[i] = IP_NULL;
1125 			} else {
1126 #if CONFIG_MACF
1127 				mac_exc_update_action_label(tro->tro_exc_actions + i, new_label);
1128 #endif
1129 				old_exc_actions[i] = tro->tro_exc_actions[i].port;
1130 				tro->tro_exc_actions[i].port = IP_NULL;
1131 			}
1132 		}
1133 	}
1134 
1135 	thread_mtx_unlock(thread);
1136 
1137 #if CONFIG_MACF
1138 	mac_exc_free_label(new_label);
1139 #endif
1140 
1141 	/* release the naked send rights */
1142 
1143 	if (IP_VALID(old_sself)) {
1144 		ipc_port_release_send(old_sself);
1145 	}
1146 
1147 	if (has_old_exc_actions) {
1148 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1149 			ipc_port_release_send(old_exc_actions[i]);
1150 		}
1151 	}
1152 
1153 	/* destroy the kernel port */
1154 	if (old_kport != IP_NULL) {
1155 		ipc_kobject_dealloc_port(old_kport, 0, IKOT_THREAD_CONTROL);
1156 	}
1157 	if (old_rdport != IP_NULL) {
1158 		ipc_kobject_dealloc_port(old_rdport, 0, IKOT_THREAD_READ);
1159 	}
1160 	if (old_iport != IP_NULL) {
1161 		ipc_kobject_dealloc_port(old_iport, 0, IKOT_THREAD_INSPECT);
1162 	}
1163 	if (old_pport != old_kport && old_pport != IP_NULL) {
1164 		assert(thread_is_immovable);
1165 		ipc_kobject_dealloc_port(old_pport, 0, IKOT_THREAD_CONTROL);
1166 	}
1167 
1168 	/* unbind the thread special reply port */
1169 	if (IP_VALID(thread->ith_special_reply_port)) {
1170 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1171 	}
1172 }
1173 
1174 /*
1175  *	Routine:	retrieve_task_self_fast
1176  *	Purpose:
1177  *		Optimized version of retrieve_task_self,
1178  *		that only works for the current task.
1179  *
1180  *		Return a send right (possibly null/dead)
1181  *		for the task's user-visible self port.
1182  *	Conditions:
1183  *		Nothing locked.
1184  */
1185 
1186 static ipc_port_t
retrieve_task_self_fast(task_t task)1187 retrieve_task_self_fast(
1188 	task_t          task)
1189 {
1190 	ipc_port_t port = IP_NULL;
1191 
1192 	assert(task == current_task());
1193 
1194 	itk_lock(task);
1195 	assert(task->itk_self != IP_NULL);
1196 
1197 	if (task->itk_settable_self == task->itk_task_ports[TASK_FLAVOR_CONTROL]) {
1198 		/* no interposing, return the IMMOVABLE port */
1199 		port = ipc_port_make_send(task->itk_self);
1200 #if (DEBUG || DEVELOPMENT)
1201 		if (task_is_immovable(task)) {
1202 			assert(ip_is_immovable_send(port));
1203 			if (task_is_pinned(task)) {
1204 				/* pinned port is also immovable */
1205 				assert(ip_is_pinned(port));
1206 			}
1207 		} else {
1208 			assert(!ip_is_immovable_send(port));
1209 			assert(!ip_is_pinned(port));
1210 		}
1211 #endif
1212 	} else {
1213 		port = ipc_port_copy_send(task->itk_settable_self);
1214 	}
1215 	itk_unlock(task);
1216 
1217 	return port;
1218 }
1219 
1220 /*
1221  *	Routine:	mach_task_is_self
1222  *	Purpose:
1223  *      [MIG call] Checks if the task (control/read/inspect/name/movable)
1224  *      port is pointing to current_task.
1225  */
1226 kern_return_t
mach_task_is_self(task_t task,boolean_t * is_self)1227 mach_task_is_self(
1228 	task_t         task,
1229 	boolean_t     *is_self)
1230 {
1231 	if (task == TASK_NULL) {
1232 		return KERN_INVALID_ARGUMENT;
1233 	}
1234 
1235 	*is_self = (task == current_task());
1236 
1237 	return KERN_SUCCESS;
1238 }
1239 
1240 /*
1241  *	Routine:	retrieve_thread_self_fast
1242  *	Purpose:
1243  *		Return a send right (possibly null/dead)
1244  *		for the thread's user-visible self port.
1245  *
1246  *		Only works for the current thread.
1247  *
1248  *	Conditions:
1249  *		Nothing locked.
1250  */
1251 
1252 ipc_port_t
retrieve_thread_self_fast(thread_t thread)1253 retrieve_thread_self_fast(
1254 	thread_t                thread)
1255 {
1256 	thread_ro_t tro = get_thread_ro(thread);
1257 	ipc_port_t port = IP_NULL;
1258 
1259 	assert(thread == current_thread());
1260 
1261 	thread_mtx_lock(thread);
1262 
1263 	assert(tro->tro_self_port != IP_NULL);
1264 
1265 	if (tro->tro_settable_self_port == tro->tro_ports[THREAD_FLAVOR_CONTROL]) {
1266 		/* no interposing, return IMMOVABLE_PORT */
1267 		port = ipc_port_make_send(tro->tro_self_port);
1268 	} else {
1269 		port = ipc_port_copy_send(tro->tro_settable_self_port);
1270 	}
1271 
1272 	thread_mtx_unlock(thread);
1273 
1274 	return port;
1275 }
1276 
1277 /*
1278  *	Routine:	task_self_trap [mach trap]
1279  *	Purpose:
1280  *		Give the caller send rights for his own task port.
1281  *	Conditions:
1282  *		Nothing locked.
1283  *	Returns:
1284  *		MACH_PORT_NULL if there are any resource failures
1285  *		or other errors.
1286  */
1287 
1288 mach_port_name_t
task_self_trap(__unused struct task_self_trap_args * args)1289 task_self_trap(
1290 	__unused struct task_self_trap_args *args)
1291 {
1292 	task_t task = current_task();
1293 	ipc_port_t sright;
1294 	mach_port_name_t name;
1295 
1296 	sright = retrieve_task_self_fast(task);
1297 	name = ipc_port_copyout_send(sright, task->itk_space);
1298 
1299 	/*
1300 	 * When the right is pinned, memorize the name we gave it
1301 	 * in ip_receiver_name (it's an abuse as this port really
1302 	 * isn't a message queue, but the field is up for grabs
1303 	 * and otherwise `MACH_PORT_SPECIAL_DEFAULT` for special ports).
1304 	 *
1305 	 * port_name_to_task* use this to fastpath IPCs to mach_task_self()
1306 	 * when it is pinned.
1307 	 *
1308 	 * ipc_task_disable() will revert this when the task dies.
1309 	 */
1310 	if (sright == task->itk_self && sright->ip_pinned &&
1311 	    MACH_PORT_VALID(name)) {
1312 		itk_lock(task);
1313 		if (task->ipc_active) {
1314 			if (ip_get_receiver_name(sright) == MACH_PORT_SPECIAL_DEFAULT) {
1315 				sright->ip_receiver_name = name;
1316 			} else if (ip_get_receiver_name(sright) != name) {
1317 				panic("mach_task_self() name changed");
1318 			}
1319 		}
1320 		itk_unlock(task);
1321 	}
1322 	return name;
1323 }
1324 
1325 /*
1326  *	Routine:	thread_self_trap [mach trap]
1327  *	Purpose:
1328  *		Give the caller send rights for his own thread port.
1329  *	Conditions:
1330  *		Nothing locked.
1331  *	Returns:
1332  *		MACH_PORT_NULL if there are any resource failures
1333  *		or other errors.
1334  */
1335 
1336 mach_port_name_t
thread_self_trap(__unused struct thread_self_trap_args * args)1337 thread_self_trap(
1338 	__unused struct thread_self_trap_args *args)
1339 {
1340 	thread_t thread = current_thread();
1341 	ipc_space_t space = current_space();
1342 	ipc_port_t sright;
1343 	mach_port_name_t name;
1344 
1345 	sright = retrieve_thread_self_fast(thread);
1346 	name = ipc_port_copyout_send(sright, space);
1347 	return name;
1348 }
1349 
1350 /*
1351  *	Routine:	mach_reply_port [mach trap]
1352  *	Purpose:
1353  *		Allocate a port for the caller.
1354  *	Conditions:
1355  *		Nothing locked.
1356  *	Returns:
1357  *		MACH_PORT_NULL if there are any resource failures
1358  *		or other errors.
1359  */
1360 
1361 mach_port_name_t
mach_reply_port(__unused struct mach_reply_port_args * args)1362 mach_reply_port(
1363 	__unused struct mach_reply_port_args *args)
1364 {
1365 	ipc_port_t port;
1366 	mach_port_name_t name;
1367 	kern_return_t kr;
1368 
1369 	kr = ipc_port_alloc(current_task()->itk_space, IPC_PORT_INIT_MESSAGE_QUEUE,
1370 	    &name, &port);
1371 	if (kr == KERN_SUCCESS) {
1372 		ip_mq_unlock(port);
1373 	} else {
1374 		name = MACH_PORT_NULL;
1375 	}
1376 	return name;
1377 }
1378 
1379 /*
1380  *	Routine:	thread_get_special_reply_port [mach trap]
1381  *	Purpose:
1382  *		Allocate a special reply port for the calling thread.
1383  *	Conditions:
1384  *		Nothing locked.
1385  *	Returns:
1386  *		mach_port_name_t: send right & receive right for special reply port.
1387  *		MACH_PORT_NULL if there are any resource failures
1388  *		or other errors.
1389  */
1390 
1391 mach_port_name_t
thread_get_special_reply_port(__unused struct thread_get_special_reply_port_args * args)1392 thread_get_special_reply_port(
1393 	__unused struct thread_get_special_reply_port_args *args)
1394 {
1395 	ipc_port_t port;
1396 	mach_port_name_t name;
1397 	kern_return_t kr;
1398 	thread_t thread = current_thread();
1399 	ipc_port_init_flags_t flags = IPC_PORT_INIT_MESSAGE_QUEUE |
1400 	    IPC_PORT_INIT_MAKE_SEND_RIGHT | IPC_PORT_INIT_SPECIAL_REPLY;
1401 
1402 	/* unbind the thread special reply port */
1403 	if (IP_VALID(thread->ith_special_reply_port)) {
1404 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1405 	}
1406 
1407 	kr = ipc_port_alloc(current_task()->itk_space, flags, &name, &port);
1408 	if (kr == KERN_SUCCESS) {
1409 		ipc_port_bind_special_reply_port_locked(port, IRPT_USER);
1410 		ip_mq_unlock(port);
1411 	} else {
1412 		name = MACH_PORT_NULL;
1413 	}
1414 	return name;
1415 }
1416 
1417 /*
1418  *	Routine:	thread_get_kernel_special_reply_port
1419  *	Purpose:
1420  *		Allocate a kernel special reply port for the calling thread.
1421  *	Conditions:
1422  *		Nothing locked.
1423  *	Returns:
1424  *		Creates and sets kernel special reply port.
1425  *		KERN_SUCCESS on Success.
1426  *		KERN_FAILURE on Failure.
1427  */
1428 
1429 kern_return_t
thread_get_kernel_special_reply_port(void)1430 thread_get_kernel_special_reply_port(void)
1431 {
1432 	ipc_port_t port = IPC_PORT_NULL;
1433 	thread_t thread = current_thread();
1434 
1435 	/* unbind the thread special reply port */
1436 	if (IP_VALID(thread->ith_kernel_reply_port)) {
1437 		ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1438 	}
1439 
1440 	port = ipc_port_alloc_reply(); /*returns a reference on the port */
1441 	if (port != IPC_PORT_NULL) {
1442 		ip_mq_lock(port);
1443 		ipc_port_bind_special_reply_port_locked(port, IRPT_KERNEL);
1444 		ip_mq_unlock(port);
1445 		ip_release(port); /* release the reference returned by ipc_port_alloc_reply */
1446 	}
1447 	return KERN_SUCCESS;
1448 }
1449 
1450 /*
1451  *	Routine:	ipc_port_bind_special_reply_port_locked
1452  *	Purpose:
1453  *		Bind the given port to current thread as a special reply port.
1454  *	Conditions:
1455  *		Port locked.
1456  *	Returns:
1457  *		None.
1458  */
1459 
1460 static void
ipc_port_bind_special_reply_port_locked(ipc_port_t port,ipc_reply_port_type_t reply_type)1461 ipc_port_bind_special_reply_port_locked(
1462 	ipc_port_t            port,
1463 	ipc_reply_port_type_t reply_type)
1464 {
1465 	thread_t thread = current_thread();
1466 	ipc_port_t *reply_portp;
1467 
1468 	if (reply_type == IRPT_USER) {
1469 		reply_portp = &thread->ith_special_reply_port;
1470 	} else {
1471 		reply_portp = &thread->ith_kernel_reply_port;
1472 	}
1473 
1474 	assert(*reply_portp == NULL);
1475 	assert(port->ip_specialreply);
1476 	assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1477 
1478 	ip_reference(port);
1479 	*reply_portp = port;
1480 	port->ip_messages.imq_srp_owner_thread = thread;
1481 
1482 	ipc_special_reply_port_bits_reset(port);
1483 }
1484 
1485 /*
1486  *	Routine:	ipc_port_unbind_special_reply_port
1487  *	Purpose:
1488  *		Unbind the thread's special reply port.
1489  *		If the special port has threads waiting on turnstile,
1490  *		update it's inheritor.
1491  *	Condition:
1492  *		Nothing locked.
1493  *	Returns:
1494  *		None.
1495  */
1496 static void
ipc_port_unbind_special_reply_port(thread_t thread,ipc_reply_port_type_t reply_type)1497 ipc_port_unbind_special_reply_port(
1498 	thread_t              thread,
1499 	ipc_reply_port_type_t reply_type)
1500 {
1501 	ipc_port_t *reply_portp;
1502 
1503 	if (reply_type == IRPT_USER) {
1504 		reply_portp = &thread->ith_special_reply_port;
1505 	} else {
1506 		reply_portp = &thread->ith_kernel_reply_port;
1507 	}
1508 
1509 	ipc_port_t special_reply_port = *reply_portp;
1510 
1511 	ip_mq_lock(special_reply_port);
1512 
1513 	*reply_portp = NULL;
1514 	ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
1515 	    IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
1516 	/* port unlocked */
1517 
1518 	/* Destroy the port if its kernel special reply, else just release a ref */
1519 	if (reply_type == IRPT_USER) {
1520 		ip_release(special_reply_port);
1521 	} else {
1522 		ipc_port_dealloc_reply(special_reply_port);
1523 	}
1524 	return;
1525 }
1526 
1527 /*
1528  *	Routine:	thread_dealloc_kernel_special_reply_port
1529  *	Purpose:
1530  *		Unbind the thread's kernel special reply port.
1531  *		If the special port has threads waiting on turnstile,
1532  *		update it's inheritor.
1533  *	Condition:
1534  *		Called on current thread or a terminated thread.
1535  *	Returns:
1536  *		None.
1537  */
1538 
1539 void
thread_dealloc_kernel_special_reply_port(thread_t thread)1540 thread_dealloc_kernel_special_reply_port(thread_t thread)
1541 {
1542 	ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1543 }
1544 
1545 /*
1546  *	Routine:	thread_get_special_port [kernel call]
1547  *	Purpose:
1548  *		Clones a send right for one of the thread's
1549  *		special ports.
1550  *	Conditions:
1551  *		Nothing locked.
1552  *	Returns:
1553  *		KERN_SUCCESS		Extracted a send right.
1554  *		KERN_INVALID_ARGUMENT	The thread is null.
1555  *		KERN_FAILURE		The thread is dead.
1556  *		KERN_INVALID_ARGUMENT	Invalid special port.
1557  */
1558 
1559 kern_return_t
1560 thread_get_special_port(
1561 	thread_inspect_t         thread,
1562 	int                      which,
1563 	ipc_port_t              *portp);
1564 
1565 static kern_return_t
thread_get_special_port_internal(thread_inspect_t thread,thread_ro_t tro,int which,ipc_port_t * portp,mach_thread_flavor_t flavor)1566 thread_get_special_port_internal(
1567 	thread_inspect_t         thread,
1568 	thread_ro_t              tro,
1569 	int                      which,
1570 	ipc_port_t              *portp,
1571 	mach_thread_flavor_t     flavor)
1572 {
1573 	kern_return_t      kr;
1574 	ipc_port_t port;
1575 
1576 	if ((kr = special_port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) {
1577 		return kr;
1578 	}
1579 
1580 	thread_mtx_lock(thread);
1581 	if (!thread->active) {
1582 		thread_mtx_unlock(thread);
1583 		return KERN_FAILURE;
1584 	}
1585 
1586 	switch (which) {
1587 	case THREAD_KERNEL_PORT:
1588 		port = ipc_port_copy_send(tro->tro_settable_self_port);
1589 		thread_mtx_unlock(thread);
1590 		break;
1591 
1592 	case THREAD_READ_PORT:
1593 	case THREAD_INSPECT_PORT:
1594 		thread_mtx_unlock(thread);
1595 		mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ?
1596 		    THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT;
1597 		/* convert_thread_to_port_with_flavor consumes a thread reference */
1598 		thread_reference(thread);
1599 		port = convert_thread_to_port_with_flavor(thread, tro, current_flavor);
1600 		break;
1601 
1602 	default:
1603 		thread_mtx_unlock(thread);
1604 		return KERN_INVALID_ARGUMENT;
1605 	}
1606 
1607 	*portp = port;
1608 	return KERN_SUCCESS;
1609 }
1610 
1611 kern_return_t
thread_get_special_port(thread_inspect_t thread,int which,ipc_port_t * portp)1612 thread_get_special_port(
1613 	thread_inspect_t         thread,
1614 	int                      which,
1615 	ipc_port_t              *portp)
1616 {
1617 	if (thread == THREAD_NULL) {
1618 		return KERN_INVALID_ARGUMENT;
1619 	}
1620 
1621 	return thread_get_special_port_internal(thread, get_thread_ro(thread),
1622 	           which, portp, THREAD_FLAVOR_CONTROL);
1623 }
1624 
1625 static ipc_port_t
thread_get_non_substituted_self(thread_t thread,thread_ro_t tro)1626 thread_get_non_substituted_self(thread_t thread, thread_ro_t tro)
1627 {
1628 	ipc_port_t port = IP_NULL;
1629 
1630 	thread_mtx_lock(thread);
1631 	port = ipc_port_make_send(tro->tro_settable_self_port);
1632 	thread_mtx_unlock(thread);
1633 
1634 	/* takes ownership of the send right */
1635 	return ipc_kobject_alloc_subst_once(port);
1636 }
1637 
1638 kern_return_t
thread_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)1639 thread_get_special_port_from_user(
1640 	mach_port_t     port,
1641 	int             which,
1642 	ipc_port_t      *portp)
1643 {
1644 	thread_ro_t tro;
1645 	ipc_kobject_type_t kotype;
1646 	mach_thread_flavor_t flavor;
1647 	kern_return_t kr = KERN_SUCCESS;
1648 
1649 	thread_t thread = convert_port_to_thread_inspect_no_eval(port);
1650 
1651 	if (thread == THREAD_NULL) {
1652 		return KERN_INVALID_ARGUMENT;
1653 	}
1654 
1655 	tro = get_thread_ro(thread);
1656 	kotype = ip_kotype(port);
1657 
1658 	if (which == THREAD_KERNEL_PORT && tro->tro_task == current_task()) {
1659 #if CONFIG_MACF
1660 		/*
1661 		 * only check for threads belong to current_task,
1662 		 * because foreign thread ports are always movable
1663 		 */
1664 		if (mac_task_check_get_movable_control_port()) {
1665 			kr = KERN_DENIED;
1666 			goto out;
1667 		}
1668 #endif
1669 		if (kotype == IKOT_THREAD_CONTROL) {
1670 			*portp = thread_get_non_substituted_self(thread, tro);
1671 			goto out;
1672 		}
1673 	}
1674 
1675 	switch (kotype) {
1676 	case IKOT_THREAD_CONTROL:
1677 		flavor = THREAD_FLAVOR_CONTROL;
1678 		break;
1679 	case IKOT_THREAD_READ:
1680 		flavor = THREAD_FLAVOR_READ;
1681 		break;
1682 	case IKOT_THREAD_INSPECT:
1683 		flavor = THREAD_FLAVOR_INSPECT;
1684 		break;
1685 	default:
1686 		panic("strange kobject type");
1687 	}
1688 
1689 	kr = thread_get_special_port_internal(thread, tro, which, portp, flavor);
1690 out:
1691 	thread_deallocate(thread);
1692 	return kr;
1693 }
1694 
1695 static kern_return_t
special_port_allowed_with_thread_flavor(int which,mach_thread_flavor_t flavor)1696 special_port_allowed_with_thread_flavor(
1697 	int                  which,
1698 	mach_thread_flavor_t flavor)
1699 {
1700 	switch (flavor) {
1701 	case THREAD_FLAVOR_CONTROL:
1702 		return KERN_SUCCESS;
1703 
1704 	case THREAD_FLAVOR_READ:
1705 
1706 		switch (which) {
1707 		case THREAD_READ_PORT:
1708 		case THREAD_INSPECT_PORT:
1709 			return KERN_SUCCESS;
1710 		default:
1711 			return KERN_INVALID_CAPABILITY;
1712 		}
1713 
1714 	case THREAD_FLAVOR_INSPECT:
1715 
1716 		switch (which) {
1717 		case THREAD_INSPECT_PORT:
1718 			return KERN_SUCCESS;
1719 		default:
1720 			return KERN_INVALID_CAPABILITY;
1721 		}
1722 
1723 	default:
1724 		return KERN_INVALID_CAPABILITY;
1725 	}
1726 }
1727 
1728 /*
1729  *	Routine:	thread_set_special_port [kernel call]
1730  *	Purpose:
1731  *		Changes one of the thread's special ports,
1732  *		setting it to the supplied send right.
1733  *	Conditions:
1734  *		Nothing locked.  If successful, consumes
1735  *		the supplied send right.
1736  *	Returns:
1737  *		KERN_SUCCESS            Changed the special port.
1738  *		KERN_INVALID_ARGUMENT   The thread is null.
1739  *      KERN_INVALID_RIGHT      Port is marked as immovable.
1740  *		KERN_FAILURE            The thread is dead.
1741  *		KERN_INVALID_ARGUMENT   Invalid special port.
1742  *		KERN_NO_ACCESS          Restricted access to set port.
1743  */
1744 
1745 kern_return_t
thread_set_special_port(thread_t thread,int which,ipc_port_t port)1746 thread_set_special_port(
1747 	thread_t                thread,
1748 	int                     which,
1749 	ipc_port_t              port)
1750 {
1751 	kern_return_t   result = KERN_SUCCESS;
1752 	thread_ro_t     tro;
1753 	ipc_port_t      old = IP_NULL;
1754 
1755 	if (thread == THREAD_NULL) {
1756 		return KERN_INVALID_ARGUMENT;
1757 	}
1758 
1759 	if (IP_VALID(port) && (port->ip_immovable_receive || port->ip_immovable_send)) {
1760 		return KERN_INVALID_RIGHT;
1761 	}
1762 
1763 	tro = get_thread_ro(thread);
1764 
1765 	switch (which) {
1766 	case THREAD_KERNEL_PORT:
1767 #if CONFIG_CSR
1768 		if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) != 0) {
1769 			/*
1770 			 * Only allow setting of thread-self
1771 			 * special port from user-space when SIP is
1772 			 * disabled (for Mach-on-Mach emulation).
1773 			 */
1774 			return KERN_NO_ACCESS;
1775 		}
1776 #endif
1777 		thread_mtx_lock(thread);
1778 		if (thread->active) {
1779 			old = tro->tro_settable_self_port;
1780 			zalloc_ro_update_field(ZONE_ID_THREAD_RO,
1781 			    tro, tro_settable_self_port, &port);
1782 		} else {
1783 			result = KERN_FAILURE;
1784 		}
1785 		thread_mtx_unlock(thread);
1786 
1787 		if (IP_VALID(old)) {
1788 			ipc_port_release_send(old);
1789 		}
1790 
1791 		return result;
1792 
1793 	default:
1794 		return KERN_INVALID_ARGUMENT;
1795 	}
1796 }
1797 
1798 /*
1799  *	Routine:	task_get_special_port [kernel call]
1800  *	Purpose:
1801  *		Clones a send right for one of the task's
1802  *		special ports.
1803  *	Conditions:
1804  *		Nothing locked.
1805  *	Returns:
1806  *		KERN_SUCCESS		    Extracted a send right.
1807  *		KERN_INVALID_ARGUMENT	The task is null.
1808  *		KERN_FAILURE		    The task/space is dead.
1809  *		KERN_INVALID_ARGUMENT	Invalid special port.
1810  */
1811 
1812 static kern_return_t
task_get_special_port_internal(task_t task,int which,ipc_port_t * portp,mach_task_flavor_t flavor)1813 task_get_special_port_internal(
1814 	task_t          task,
1815 	int             which,
1816 	ipc_port_t      *portp,
1817 	mach_task_flavor_t        flavor)
1818 {
1819 	kern_return_t kr;
1820 	ipc_port_t port;
1821 
1822 	if (task == TASK_NULL) {
1823 		return KERN_INVALID_ARGUMENT;
1824 	}
1825 
1826 	if ((kr = special_port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) {
1827 		return kr;
1828 	}
1829 
1830 	itk_lock(task);
1831 	if (!task->ipc_active) {
1832 		itk_unlock(task);
1833 		return KERN_FAILURE;
1834 	}
1835 
1836 	switch (which) {
1837 	case TASK_KERNEL_PORT:
1838 		port = ipc_port_copy_send(task->itk_settable_self);
1839 		itk_unlock(task);
1840 		break;
1841 
1842 	case TASK_READ_PORT:
1843 	case TASK_INSPECT_PORT:
1844 		itk_unlock(task);
1845 		mach_task_flavor_t current_flavor = (which == TASK_READ_PORT) ?
1846 		    TASK_FLAVOR_READ : TASK_FLAVOR_INSPECT;
1847 		/* convert_task_to_port_with_flavor consumes a task reference */
1848 		task_reference(task);
1849 		port = convert_task_to_port_with_flavor(task, current_flavor, TASK_GRP_KERNEL);
1850 		break;
1851 
1852 	case TASK_NAME_PORT:
1853 		port = ipc_port_make_send(task->itk_task_ports[TASK_FLAVOR_NAME]);
1854 		itk_unlock(task);
1855 		break;
1856 
1857 	case TASK_HOST_PORT:
1858 		port = ipc_port_copy_send(task->itk_host);
1859 		itk_unlock(task);
1860 		break;
1861 
1862 	case TASK_BOOTSTRAP_PORT:
1863 		port = ipc_port_copy_send(task->itk_bootstrap);
1864 		itk_unlock(task);
1865 		break;
1866 
1867 	case TASK_ACCESS_PORT:
1868 		port = ipc_port_copy_send(task->itk_task_access);
1869 		itk_unlock(task);
1870 		break;
1871 
1872 	case TASK_DEBUG_CONTROL_PORT:
1873 		port = ipc_port_copy_send(task->itk_debug_control);
1874 		itk_unlock(task);
1875 		break;
1876 
1877 #if CONFIG_PROC_RESOURCE_LIMITS
1878 	case TASK_RESOURCE_NOTIFY_PORT:
1879 		port = ipc_port_copy_send(task->itk_resource_notify);
1880 		itk_unlock(task);
1881 		break;
1882 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1883 
1884 	default:
1885 		itk_unlock(task);
1886 		return KERN_INVALID_ARGUMENT;
1887 	}
1888 
1889 	*portp = port;
1890 	return KERN_SUCCESS;
1891 }
1892 
1893 /* Kernel/Kext call only and skips MACF checks. MIG uses task_get_special_port_from_user(). */
1894 kern_return_t
task_get_special_port(task_t task,int which,ipc_port_t * portp)1895 task_get_special_port(
1896 	task_t          task,
1897 	int             which,
1898 	ipc_port_t      *portp)
1899 {
1900 	return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL);
1901 }
1902 
1903 static ipc_port_t
task_get_non_substituted_self(task_t task)1904 task_get_non_substituted_self(task_t task)
1905 {
1906 	ipc_port_t port = IP_NULL;
1907 
1908 	itk_lock(task);
1909 	port = ipc_port_make_send(task->itk_settable_self);
1910 	itk_unlock(task);
1911 
1912 	/* takes ownership of the send right */
1913 	return ipc_kobject_alloc_subst_once(port);
1914 }
1915 
1916 /* MIG call only. Kernel/Kext uses task_get_special_port() */
1917 kern_return_t
task_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)1918 task_get_special_port_from_user(
1919 	mach_port_t     port,
1920 	int             which,
1921 	ipc_port_t      *portp)
1922 {
1923 	ipc_kobject_type_t kotype;
1924 	mach_task_flavor_t flavor;
1925 	kern_return_t kr = KERN_SUCCESS;
1926 
1927 	task_t task = convert_port_to_task_inspect_no_eval(port);
1928 
1929 	if (task == TASK_NULL) {
1930 		return KERN_INVALID_ARGUMENT;
1931 	}
1932 
1933 	kotype = ip_kotype(port);
1934 
1935 #if CONFIG_MACF
1936 	if (mac_task_check_get_task_special_port(current_task(), task, which)) {
1937 		kr = KERN_DENIED;
1938 		goto out;
1939 	}
1940 #endif
1941 
1942 	if (which == TASK_KERNEL_PORT && task == current_task()) {
1943 #if CONFIG_MACF
1944 		/*
1945 		 * only check for current_task,
1946 		 * because foreign task ports are always movable
1947 		 */
1948 		if (mac_task_check_get_movable_control_port()) {
1949 			kr = KERN_DENIED;
1950 			goto out;
1951 		}
1952 #endif
1953 		if (kotype == IKOT_TASK_CONTROL) {
1954 			*portp = task_get_non_substituted_self(task);
1955 			goto out;
1956 		}
1957 	}
1958 
1959 	switch (kotype) {
1960 	case IKOT_TASK_CONTROL:
1961 		flavor = TASK_FLAVOR_CONTROL;
1962 		break;
1963 	case IKOT_TASK_READ:
1964 		flavor = TASK_FLAVOR_READ;
1965 		break;
1966 	case IKOT_TASK_INSPECT:
1967 		flavor = TASK_FLAVOR_INSPECT;
1968 		break;
1969 	default:
1970 		panic("strange kobject type");
1971 	}
1972 
1973 	kr = task_get_special_port_internal(task, which, portp, flavor);
1974 out:
1975 	task_deallocate(task);
1976 	return kr;
1977 }
1978 
1979 static kern_return_t
special_port_allowed_with_task_flavor(int which,mach_task_flavor_t flavor)1980 special_port_allowed_with_task_flavor(
1981 	int                which,
1982 	mach_task_flavor_t flavor)
1983 {
1984 	switch (flavor) {
1985 	case TASK_FLAVOR_CONTROL:
1986 		return KERN_SUCCESS;
1987 
1988 	case TASK_FLAVOR_READ:
1989 
1990 		switch (which) {
1991 		case TASK_READ_PORT:
1992 		case TASK_INSPECT_PORT:
1993 		case TASK_NAME_PORT:
1994 			return KERN_SUCCESS;
1995 		default:
1996 			return KERN_INVALID_CAPABILITY;
1997 		}
1998 
1999 	case TASK_FLAVOR_INSPECT:
2000 
2001 		switch (which) {
2002 		case TASK_INSPECT_PORT:
2003 		case TASK_NAME_PORT:
2004 			return KERN_SUCCESS;
2005 		default:
2006 			return KERN_INVALID_CAPABILITY;
2007 		}
2008 
2009 	default:
2010 		return KERN_INVALID_CAPABILITY;
2011 	}
2012 }
2013 
2014 /*
2015  *	Routine:	task_set_special_port [MIG call]
2016  *	Purpose:
2017  *		Changes one of the task's special ports,
2018  *		setting it to the supplied send right.
2019  *	Conditions:
2020  *		Nothing locked.  If successful, consumes
2021  *		the supplied send right.
2022  *	Returns:
2023  *		KERN_SUCCESS		    Changed the special port.
2024  *		KERN_INVALID_ARGUMENT	The task is null.
2025  *      KERN_INVALID_RIGHT      Port is marked as immovable.
2026  *		KERN_FAILURE		    The task/space is dead.
2027  *		KERN_INVALID_ARGUMENT	Invalid special port.
2028  *      KERN_NO_ACCESS		    Restricted access to set port.
2029  */
2030 
2031 kern_return_t
task_set_special_port_from_user(task_t task,int which,ipc_port_t port)2032 task_set_special_port_from_user(
2033 	task_t          task,
2034 	int             which,
2035 	ipc_port_t      port)
2036 {
2037 #if CONFIG_MACF
2038 	if (mac_task_check_set_task_special_port(current_task(), task, which, port)) {
2039 		return KERN_DENIED;
2040 	}
2041 #endif
2042 
2043 	return task_set_special_port(task, which, port);
2044 }
2045 
2046 /* Kernel call only. MIG uses task_set_special_port_from_user() */
2047 kern_return_t
task_set_special_port(task_t task,int which,ipc_port_t port)2048 task_set_special_port(
2049 	task_t          task,
2050 	int             which,
2051 	ipc_port_t      port)
2052 {
2053 	if (task == TASK_NULL) {
2054 		return KERN_INVALID_ARGUMENT;
2055 	}
2056 
2057 	if (task_is_driver(current_task())) {
2058 		return KERN_NO_ACCESS;
2059 	}
2060 
2061 	if (IP_VALID(port) && (port->ip_immovable_receive || port->ip_immovable_send)) {
2062 		return KERN_INVALID_RIGHT;
2063 	}
2064 
2065 	switch (which) {
2066 	case TASK_KERNEL_PORT:
2067 	case TASK_HOST_PORT:
2068 #if CONFIG_CSR
2069 		if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
2070 			/*
2071 			 * Only allow setting of task-self / task-host
2072 			 * special ports from user-space when SIP is
2073 			 * disabled (for Mach-on-Mach emulation).
2074 			 */
2075 			break;
2076 		}
2077 #endif
2078 		return KERN_NO_ACCESS;
2079 	default:
2080 		break;
2081 	}
2082 
2083 	return task_set_special_port_internal(task, which, port);
2084 }
2085 
2086 /*
2087  *	Routine:	task_set_special_port_internal
2088  *	Purpose:
2089  *		Changes one of the task's special ports,
2090  *		setting it to the supplied send right.
2091  *	Conditions:
2092  *		Nothing locked.  If successful, consumes
2093  *		the supplied send right.
2094  *	Returns:
2095  *		KERN_SUCCESS		Changed the special port.
2096  *		KERN_INVALID_ARGUMENT	The task is null.
2097  *		KERN_FAILURE		The task/space is dead.
2098  *		KERN_INVALID_ARGUMENT	Invalid special port.
2099  *      KERN_NO_ACCESS		Restricted access to overwrite port.
2100  */
2101 
2102 kern_return_t
task_set_special_port_internal(task_t task,int which,ipc_port_t port)2103 task_set_special_port_internal(
2104 	task_t          task,
2105 	int             which,
2106 	ipc_port_t      port)
2107 {
2108 	ipc_port_t old = IP_NULL;
2109 	kern_return_t rc = KERN_INVALID_ARGUMENT;
2110 
2111 	if (task == TASK_NULL) {
2112 		goto out;
2113 	}
2114 
2115 	itk_lock(task);
2116 	if (!task->ipc_active) {
2117 		rc = KERN_FAILURE;
2118 		goto out_unlock;
2119 	}
2120 
2121 	switch (which) {
2122 	case TASK_KERNEL_PORT:
2123 		old = task->itk_settable_self;
2124 		task->itk_settable_self = port;
2125 		break;
2126 
2127 	case TASK_HOST_PORT:
2128 		old = task->itk_host;
2129 		task->itk_host = port;
2130 		break;
2131 
2132 	case TASK_BOOTSTRAP_PORT:
2133 		old = task->itk_bootstrap;
2134 		task->itk_bootstrap = port;
2135 		break;
2136 
2137 	/* Never allow overwrite of the task access port */
2138 	case TASK_ACCESS_PORT:
2139 		if (IP_VALID(task->itk_task_access)) {
2140 			rc = KERN_NO_ACCESS;
2141 			goto out_unlock;
2142 		}
2143 		task->itk_task_access = port;
2144 		break;
2145 
2146 	case TASK_DEBUG_CONTROL_PORT:
2147 		old = task->itk_debug_control;
2148 		task->itk_debug_control = port;
2149 		break;
2150 
2151 #if CONFIG_PROC_RESOURCE_LIMITS
2152 	case TASK_RESOURCE_NOTIFY_PORT:
2153 		old = task->itk_resource_notify;
2154 		task->itk_resource_notify = port;
2155 		break;
2156 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
2157 
2158 	default:
2159 		rc = KERN_INVALID_ARGUMENT;
2160 		goto out_unlock;
2161 	}/* switch */
2162 
2163 	rc = KERN_SUCCESS;
2164 
2165 out_unlock:
2166 	itk_unlock(task);
2167 
2168 	if (IP_VALID(old)) {
2169 		ipc_port_release_send(old);
2170 	}
2171 out:
2172 	return rc;
2173 }
2174 /*
2175  *	Routine:	mach_ports_register [kernel call]
2176  *	Purpose:
2177  *		Stash a handful of port send rights in the task.
2178  *		Child tasks will inherit these rights, but they
2179  *		must use mach_ports_lookup to acquire them.
2180  *
2181  *		The rights are supplied in a (wired) kalloc'd segment.
2182  *		Rights which aren't supplied are assumed to be null.
2183  *	Conditions:
2184  *		Nothing locked.  If successful, consumes
2185  *		the supplied rights and memory.
2186  *	Returns:
2187  *		KERN_SUCCESS		    Stashed the port rights.
2188  *      KERN_INVALID_RIGHT      Port in array is marked immovable.
2189  *		KERN_INVALID_ARGUMENT	The task is null.
2190  *		KERN_INVALID_ARGUMENT	The task is dead.
2191  *		KERN_INVALID_ARGUMENT	The memory param is null.
2192  *		KERN_INVALID_ARGUMENT	Too many port rights supplied.
2193  */
2194 
2195 kern_return_t
mach_ports_register(task_t task,mach_port_array_t memory,mach_msg_type_number_t portsCnt)2196 mach_ports_register(
2197 	task_t                  task,
2198 	mach_port_array_t       memory,
2199 	mach_msg_type_number_t  portsCnt)
2200 {
2201 	ipc_port_t ports[TASK_PORT_REGISTER_MAX];
2202 	unsigned int i;
2203 
2204 	if ((task == TASK_NULL) ||
2205 	    (portsCnt > TASK_PORT_REGISTER_MAX) ||
2206 	    (portsCnt && memory == NULL)) {
2207 		return KERN_INVALID_ARGUMENT;
2208 	}
2209 
2210 	/*
2211 	 *	Pad the port rights with nulls.
2212 	 */
2213 
2214 	for (i = 0; i < portsCnt; i++) {
2215 		ports[i] = memory[i];
2216 		if (IP_VALID(ports[i]) && (ports[i]->ip_immovable_receive || ports[i]->ip_immovable_send)) {
2217 			return KERN_INVALID_RIGHT;
2218 		}
2219 	}
2220 	for (; i < TASK_PORT_REGISTER_MAX; i++) {
2221 		ports[i] = IP_NULL;
2222 	}
2223 
2224 	itk_lock(task);
2225 	if (!task->ipc_active) {
2226 		itk_unlock(task);
2227 		return KERN_INVALID_ARGUMENT;
2228 	}
2229 
2230 	/*
2231 	 *	Replace the old send rights with the new.
2232 	 *	Release the old rights after unlocking.
2233 	 */
2234 
2235 	for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2236 		ipc_port_t old;
2237 
2238 		old = task->itk_registered[i];
2239 		task->itk_registered[i] = ports[i];
2240 		ports[i] = old;
2241 	}
2242 
2243 	itk_unlock(task);
2244 
2245 	for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2246 		if (IP_VALID(ports[i])) {
2247 			ipc_port_release_send(ports[i]);
2248 		}
2249 	}
2250 
2251 	/*
2252 	 *	Now that the operation is known to be successful,
2253 	 *	we can free the memory.
2254 	 */
2255 
2256 	if (portsCnt != 0) {
2257 		kfree_type(mach_port_t, portsCnt, memory);
2258 	}
2259 
2260 	return KERN_SUCCESS;
2261 }
2262 
2263 /*
2264  *	Routine:	mach_ports_lookup [kernel call]
2265  *	Purpose:
2266  *		Retrieves (clones) the stashed port send rights.
2267  *	Conditions:
2268  *		Nothing locked.  If successful, the caller gets
2269  *		rights and memory.
2270  *	Returns:
2271  *		KERN_SUCCESS		Retrieved the send rights.
2272  *		KERN_INVALID_ARGUMENT	The task is null.
2273  *		KERN_INVALID_ARGUMENT	The task is dead.
2274  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
2275  */
2276 
2277 kern_return_t
mach_ports_lookup(task_t task,mach_port_array_t * portsp,mach_msg_type_number_t * portsCnt)2278 mach_ports_lookup(
2279 	task_t                  task,
2280 	mach_port_array_t       *portsp,
2281 	mach_msg_type_number_t  *portsCnt)
2282 {
2283 	ipc_port_t *ports;
2284 
2285 	if (task == TASK_NULL) {
2286 		return KERN_INVALID_ARGUMENT;
2287 	}
2288 
2289 	ports = kalloc_type(ipc_port_t, TASK_PORT_REGISTER_MAX,
2290 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
2291 
2292 	itk_lock(task);
2293 	if (!task->ipc_active) {
2294 		itk_unlock(task);
2295 		kfree_type(ipc_port_t, TASK_PORT_REGISTER_MAX, ports);
2296 
2297 		return KERN_INVALID_ARGUMENT;
2298 	}
2299 
2300 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2301 		ports[i] = ipc_port_copy_send(task->itk_registered[i]);
2302 	}
2303 
2304 	itk_unlock(task);
2305 
2306 	*portsp = ports;
2307 	*portsCnt = TASK_PORT_REGISTER_MAX;
2308 	return KERN_SUCCESS;
2309 }
2310 
2311 static kern_return_t
task_conversion_eval_internal(task_t caller,task_t victim,boolean_t out_trans)2312 task_conversion_eval_internal(task_t caller, task_t victim, boolean_t out_trans)
2313 {
2314 	boolean_t allow_kern_task_out_trans;
2315 	boolean_t allow_kern_task;
2316 
2317 #if defined(SECURE_KERNEL)
2318 	/*
2319 	 * On secure kernel platforms, reject converting kernel task/threads to port
2320 	 * and sending it to user space.
2321 	 */
2322 	allow_kern_task_out_trans = FALSE;
2323 #else
2324 	allow_kern_task_out_trans = TRUE;
2325 #endif
2326 
2327 	allow_kern_task = out_trans && allow_kern_task_out_trans;
2328 
2329 	/*
2330 	 * Tasks are allowed to resolve their own task ports, and the kernel is
2331 	 * allowed to resolve anyone's task port.
2332 	 */
2333 	if (caller == kernel_task) {
2334 		return KERN_SUCCESS;
2335 	}
2336 
2337 	if (caller == victim) {
2338 		return KERN_SUCCESS;
2339 	}
2340 
2341 	/*
2342 	 * Only the kernel can can resolve the kernel's task port. We've established
2343 	 * by this point that the caller is not kernel_task.
2344 	 */
2345 	if (victim == TASK_NULL || (victim == kernel_task && !allow_kern_task)) {
2346 		return KERN_INVALID_SECURITY;
2347 	}
2348 
2349 	task_require(victim);
2350 
2351 #if !defined(XNU_TARGET_OS_OSX)
2352 	/*
2353 	 * On platforms other than macOS, only a platform binary can resolve the task port
2354 	 * of another platform binary.
2355 	 */
2356 	if ((victim->t_flags & TF_PLATFORM) && !(caller->t_flags & TF_PLATFORM)) {
2357 #if SECURE_KERNEL
2358 		return KERN_INVALID_SECURITY;
2359 #else
2360 		if (cs_relax_platform_task_ports) {
2361 			return KERN_SUCCESS;
2362 		} else {
2363 			return KERN_INVALID_SECURITY;
2364 		}
2365 #endif /* SECURE_KERNEL */
2366 	}
2367 #endif /* !defined(XNU_TARGET_OS_OSX) */
2368 
2369 	return KERN_SUCCESS;
2370 }
2371 
2372 kern_return_t
task_conversion_eval(task_t caller,task_t victim)2373 task_conversion_eval(task_t caller, task_t victim)
2374 {
2375 	return task_conversion_eval_internal(caller, victim, FALSE);
2376 }
2377 
2378 static kern_return_t
task_conversion_eval_out_trans(task_t caller,task_t victim)2379 task_conversion_eval_out_trans(task_t caller, task_t victim)
2380 {
2381 	return task_conversion_eval_internal(caller, victim, TRUE);
2382 }
2383 
2384 /*
2385  *	Routine:	task_port_kotype_valid_for_flavor
2386  *	Purpose:
2387  *		Check whether the kobject type of a mach port
2388  *      is valid for conversion to a task of given flavor.
2389  */
2390 static boolean_t
task_port_kotype_valid_for_flavor(natural_t kotype,mach_task_flavor_t flavor)2391 task_port_kotype_valid_for_flavor(
2392 	natural_t          kotype,
2393 	mach_task_flavor_t flavor)
2394 {
2395 	switch (flavor) {
2396 	/* Ascending capability */
2397 	case TASK_FLAVOR_NAME:
2398 		if (kotype == IKOT_TASK_NAME) {
2399 			return TRUE;
2400 		}
2401 		OS_FALLTHROUGH;
2402 	case TASK_FLAVOR_INSPECT:
2403 		if (kotype == IKOT_TASK_INSPECT) {
2404 			return TRUE;
2405 		}
2406 		OS_FALLTHROUGH;
2407 	case TASK_FLAVOR_READ:
2408 		if (kotype == IKOT_TASK_READ) {
2409 			return TRUE;
2410 		}
2411 		OS_FALLTHROUGH;
2412 	case TASK_FLAVOR_CONTROL:
2413 		if (kotype == IKOT_TASK_CONTROL) {
2414 			return TRUE;
2415 		}
2416 		break;
2417 	default:
2418 		panic("strange task flavor");
2419 	}
2420 
2421 	return FALSE;
2422 }
2423 
2424 /*
2425  *	Routine: convert_port_to_locked_task_with_flavor
2426  *	Purpose:
2427  *		Internal helper routine to convert from a port to a locked
2428  *		task. Used by several routines that try to convert from a
2429  *		task port to a reference on some task related object (space and map).
2430  *  Args:
2431  *      port    - target port
2432  *      flavor  - requested task port flavor
2433  *      options - port translation options
2434  *	Conditions:
2435  *		Nothing locked, blocking OK.
2436  */
2437 static task_t
convert_port_to_locked_task_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2438 convert_port_to_locked_task_with_flavor(
2439 	ipc_port_t              port,
2440 	mach_task_flavor_t      flavor,
2441 	port_intrans_options_t  options)
2442 {
2443 	int try_failed_count = 0;
2444 
2445 	while (IP_VALID(port)) {
2446 		ipc_kobject_type_t type = ip_kotype(port);
2447 		task_t task;
2448 
2449 		if (!task_port_kotype_valid_for_flavor(type, flavor)) {
2450 			return TASK_NULL;
2451 		}
2452 
2453 		ip_mq_lock(port);
2454 		task = ipc_kobject_get_locked(port, type);
2455 		if (task == TASK_NULL) {
2456 			ip_mq_unlock(port);
2457 			return TASK_NULL;
2458 		}
2459 
2460 		if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
2461 			assert(flavor == TASK_FLAVOR_CONTROL);
2462 			ip_mq_unlock(port);
2463 			return TASK_NULL;
2464 		}
2465 
2466 		if (flavor == TASK_FLAVOR_NAME || flavor == TASK_FLAVOR_INSPECT) {
2467 			assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
2468 		}
2469 
2470 		if (!(options & PORT_INTRANS_SKIP_TASK_EVAL)
2471 		    && task_conversion_eval(current_task(), task)) {
2472 			ip_mq_unlock(port);
2473 			return TASK_NULL;
2474 		}
2475 
2476 		/*
2477 		 * Normal lock ordering puts task_lock() before ip_mq_lock().
2478 		 * Attempt out-of-order locking here.
2479 		 */
2480 		if (task_lock_try(task)) {
2481 			ip_mq_unlock(port);
2482 			return task;
2483 		}
2484 		try_failed_count++;
2485 
2486 		ip_mq_unlock(port);
2487 		mutex_pause(try_failed_count);
2488 	}
2489 	return TASK_NULL;
2490 }
2491 
2492 /*
2493  *	Routine: convert_port_to_task_with_flavor_locked
2494  *	Purpose:
2495  *		Internal helper routine to convert from a locked port to a task.
2496  *      Used by convert_port_to_task_with_flavor() and port name -> task conversions.
2497  *  Args:
2498  *      port   - target port
2499  *      flavor - requested task port flavor
2500  *      options - port translation options
2501  *      grp    - task reference group
2502  *	Conditions:
2503  *		Port is locked and active. Produces task ref or TASK_NULL.
2504  */
2505 static task_t
convert_port_to_task_with_flavor_locked(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2506 convert_port_to_task_with_flavor_locked(
2507 	ipc_port_t              port,
2508 	mach_task_flavor_t      flavor,
2509 	port_intrans_options_t  options,
2510 	task_grp_t              grp)
2511 {
2512 	task_t          task = TASK_NULL;
2513 	ipc_kobject_type_t type = ip_kotype(port);
2514 
2515 	ip_mq_lock_held(port);
2516 	require_ip_active(port);
2517 
2518 	if (!task_port_kotype_valid_for_flavor(type, flavor)) {
2519 		return TASK_NULL;
2520 	}
2521 
2522 	task = ipc_kobject_get_locked(port, type);
2523 	if (task != TASK_NULL) {
2524 		if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
2525 			assert(flavor == TASK_FLAVOR_CONTROL);
2526 			return TASK_NULL;
2527 		}
2528 
2529 		/* TODO: rdar://42389187 */
2530 		if (flavor == TASK_FLAVOR_NAME || flavor == TASK_FLAVOR_INSPECT) {
2531 			assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
2532 		}
2533 
2534 		if (!(options & PORT_INTRANS_SKIP_TASK_EVAL)
2535 		    && task_conversion_eval(current_task(), task)) {
2536 			return TASK_NULL;
2537 		}
2538 
2539 		task_reference_grp(task, grp);
2540 	}
2541 
2542 	return task;
2543 }
2544 
2545 /*
2546  *	Routine:	convert_port_to_task_with_exec_token
2547  *	Purpose:
2548  *		Convert from a port to a task and return
2549  *		the exec token stored in the task.
2550  *		Doesn't consume the port ref; produces a task ref,
2551  *		which may be null.
2552  *	Conditions:
2553  *		Nothing locked.
2554  */
2555 task_t
convert_port_to_task_with_exec_token(ipc_port_t port,uint32_t * exec_token)2556 convert_port_to_task_with_exec_token(
2557 	ipc_port_t              port,
2558 	uint32_t                *exec_token)
2559 {
2560 	task_t task = TASK_NULL;
2561 	task_t self = current_task();
2562 
2563 	if (IP_VALID(port)) {
2564 		if (port == self->itk_self) {
2565 			if (exec_token) {
2566 				/*
2567 				 * This is ok to do without a lock,
2568 				 * from the perspective of `current_task()`
2569 				 * this token never changes, except
2570 				 * for the thread doing the exec.
2571 				 */
2572 				*exec_token = self->exec_token;
2573 			}
2574 			task_reference_grp(self, TASK_GRP_KERNEL);
2575 			return self;
2576 		}
2577 
2578 		ip_mq_lock(port);
2579 		if (ip_active(port)) {
2580 			task = convert_port_to_task_with_flavor_locked(port, TASK_FLAVOR_CONTROL,
2581 			    PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL);
2582 		}
2583 		ip_mq_unlock(port);
2584 	}
2585 
2586 	if (task) {
2587 		*exec_token = task->exec_token;
2588 	}
2589 
2590 	return task;
2591 }
2592 
2593 /*
2594  *	Routine:	convert_port_to_task_with_flavor
2595  *	Purpose:
2596  *		Internal helper for converting from a port to a task.
2597  *		Doesn't consume the port ref; produces a task ref,
2598  *		which may be null.
2599  *  Args:
2600  *      port   - target port
2601  *      flavor - requested task port flavor
2602  *      options - port translation options
2603  *      grp    - task reference group
2604  *	Conditions:
2605  *		Nothing locked.
2606  */
2607 static task_t
convert_port_to_task_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2608 convert_port_to_task_with_flavor(
2609 	ipc_port_t         port,
2610 	mach_task_flavor_t flavor,
2611 	port_intrans_options_t options,
2612 	task_grp_t         grp)
2613 {
2614 	task_t task = TASK_NULL;
2615 	task_t self = current_task();
2616 
2617 	if (IP_VALID(port)) {
2618 		if (port == self->itk_self) {
2619 			task_reference_grp(self, grp);
2620 			return self;
2621 		}
2622 
2623 		ip_mq_lock(port);
2624 		if (ip_active(port)) {
2625 			task = convert_port_to_task_with_flavor_locked(port, flavor, options, grp);
2626 		}
2627 		ip_mq_unlock(port);
2628 	}
2629 
2630 	return task;
2631 }
2632 
2633 task_t
convert_port_to_task(ipc_port_t port)2634 convert_port_to_task(
2635 	ipc_port_t              port)
2636 {
2637 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2638 	           PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL);
2639 }
2640 
2641 task_t
convert_port_to_task_mig(ipc_port_t port)2642 convert_port_to_task_mig(
2643 	ipc_port_t              port)
2644 {
2645 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2646 	           PORT_INTRANS_OPTIONS_NONE, TASK_GRP_MIG);
2647 }
2648 
2649 task_read_t
convert_port_to_task_read(ipc_port_t port)2650 convert_port_to_task_read(
2651 	ipc_port_t              port)
2652 {
2653 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2654 	           PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2655 }
2656 
2657 static task_read_t
convert_port_to_task_read_no_eval(ipc_port_t port)2658 convert_port_to_task_read_no_eval(
2659 	ipc_port_t              port)
2660 {
2661 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2662 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2663 }
2664 
2665 task_read_t
convert_port_to_task_read_mig(ipc_port_t port)2666 convert_port_to_task_read_mig(
2667 	ipc_port_t              port)
2668 {
2669 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2670 	           PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2671 }
2672 
2673 task_inspect_t
convert_port_to_task_inspect(ipc_port_t port)2674 convert_port_to_task_inspect(
2675 	ipc_port_t              port)
2676 {
2677 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2678 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2679 }
2680 
2681 task_inspect_t
convert_port_to_task_inspect_no_eval(ipc_port_t port)2682 convert_port_to_task_inspect_no_eval(
2683 	ipc_port_t              port)
2684 {
2685 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2686 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2687 }
2688 
2689 task_inspect_t
convert_port_to_task_inspect_mig(ipc_port_t port)2690 convert_port_to_task_inspect_mig(
2691 	ipc_port_t              port)
2692 {
2693 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2694 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2695 }
2696 
2697 task_name_t
convert_port_to_task_name(ipc_port_t port)2698 convert_port_to_task_name(
2699 	ipc_port_t              port)
2700 {
2701 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2702 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2703 }
2704 
2705 task_name_t
convert_port_to_task_name_mig(ipc_port_t port)2706 convert_port_to_task_name_mig(
2707 	ipc_port_t              port)
2708 {
2709 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2710 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2711 }
2712 
2713 /*
2714  *	Routine:	convert_port_to_task_policy
2715  *	Purpose:
2716  *		Convert from a port to a task.
2717  *		Doesn't consume the port ref; produces a task ref,
2718  *		which may be null.
2719  *		If the port is being used with task_port_set(), any task port
2720  *		type other than TASK_CONTROL requires an entitlement. If the
2721  *		port is being used with task_port_get(), TASK_NAME requires an
2722  *		entitlement.
2723  *	Conditions:
2724  *		Nothing locked.
2725  */
2726 static task_t
convert_port_to_task_policy_mig(ipc_port_t port,boolean_t set)2727 convert_port_to_task_policy_mig(ipc_port_t port, boolean_t set)
2728 {
2729 	task_t task = TASK_NULL;
2730 	task_t ctask = current_task();
2731 
2732 	if (!IP_VALID(port)) {
2733 		return TASK_NULL;
2734 	}
2735 
2736 	task = set ?
2737 	    convert_port_to_task_mig(port) :
2738 	    convert_port_to_task_inspect_mig(port);
2739 
2740 	if (task == TASK_NULL &&
2741 	    IOCurrentTaskHasEntitlement("com.apple.private.task_policy")) {
2742 		task = convert_port_to_task_name_mig(port);
2743 	}
2744 
2745 	if (task_conversion_eval(ctask, task) != KERN_SUCCESS) {
2746 		task_deallocate_grp(task, TASK_GRP_MIG);
2747 		return TASK_NULL;
2748 	}
2749 
2750 	return task;
2751 }
2752 
2753 task_policy_set_t
convert_port_to_task_policy_set_mig(ipc_port_t port)2754 convert_port_to_task_policy_set_mig(ipc_port_t port)
2755 {
2756 	return convert_port_to_task_policy_mig(port, true);
2757 }
2758 
2759 task_policy_get_t
convert_port_to_task_policy_get_mig(ipc_port_t port)2760 convert_port_to_task_policy_get_mig(ipc_port_t port)
2761 {
2762 	return convert_port_to_task_policy_mig(port, false);
2763 }
2764 
2765 /*
2766  *	Routine:	convert_port_to_task_suspension_token
2767  *	Purpose:
2768  *		Convert from a port to a task suspension token.
2769  *		Doesn't consume the port ref; produces a suspension token ref,
2770  *		which may be null.
2771  *	Conditions:
2772  *		Nothing locked.
2773  */
2774 static task_suspension_token_t
convert_port_to_task_suspension_token_grp(ipc_port_t port,task_grp_t grp)2775 convert_port_to_task_suspension_token_grp(
2776 	ipc_port_t              port,
2777 	task_grp_t              grp)
2778 {
2779 	task_suspension_token_t task = TASK_NULL;
2780 
2781 	if (IP_VALID(port)) {
2782 		ip_mq_lock(port);
2783 		task = ipc_kobject_get_locked(port, IKOT_TASK_RESUME);
2784 		if (task != TASK_NULL) {
2785 			task_reference_grp(task, grp);
2786 		}
2787 		ip_mq_unlock(port);
2788 	}
2789 
2790 	return task;
2791 }
2792 
2793 task_suspension_token_t
convert_port_to_task_suspension_token_external(ipc_port_t port)2794 convert_port_to_task_suspension_token_external(
2795 	ipc_port_t              port)
2796 {
2797 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_EXTERNAL);
2798 }
2799 
2800 task_suspension_token_t
convert_port_to_task_suspension_token_mig(ipc_port_t port)2801 convert_port_to_task_suspension_token_mig(
2802 	ipc_port_t              port)
2803 {
2804 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_MIG);
2805 }
2806 
2807 task_suspension_token_t
convert_port_to_task_suspension_token_kernel(ipc_port_t port)2808 convert_port_to_task_suspension_token_kernel(
2809 	ipc_port_t              port)
2810 {
2811 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_KERNEL);
2812 }
2813 
2814 /*
2815  *	Routine:	convert_port_to_space_with_flavor
2816  *	Purpose:
2817  *		Internal helper for converting from a port to a space.
2818  *		Doesn't consume the port ref; produces a space ref,
2819  *		which may be null.
2820  *  Args:
2821  *      port   - target port
2822  *      flavor - requested ipc space flavor
2823  *      options - port translation options
2824  *	Conditions:
2825  *		Nothing locked.
2826  */
2827 static ipc_space_t
convert_port_to_space_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2828 convert_port_to_space_with_flavor(
2829 	ipc_port_t         port,
2830 	mach_task_flavor_t flavor,
2831 	port_intrans_options_t options)
2832 {
2833 	ipc_space_t space;
2834 	task_t task;
2835 
2836 	assert(flavor != TASK_FLAVOR_NAME);
2837 	task = convert_port_to_locked_task_with_flavor(port, flavor, options);
2838 
2839 	if (task == TASK_NULL) {
2840 		return IPC_SPACE_NULL;
2841 	}
2842 
2843 	if (!task->active) {
2844 		task_unlock(task);
2845 		return IPC_SPACE_NULL;
2846 	}
2847 
2848 	space = task->itk_space;
2849 	is_reference(space);
2850 	task_unlock(task);
2851 	return space;
2852 }
2853 
2854 ipc_space_t
convert_port_to_space(ipc_port_t port)2855 convert_port_to_space(
2856 	ipc_port_t      port)
2857 {
2858 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL,
2859 	           PORT_INTRANS_OPTIONS_NONE);
2860 }
2861 
2862 ipc_space_read_t
convert_port_to_space_read(ipc_port_t port)2863 convert_port_to_space_read(
2864 	ipc_port_t      port)
2865 {
2866 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2867 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
2868 }
2869 
2870 ipc_space_read_t
convert_port_to_space_read_no_eval(ipc_port_t port)2871 convert_port_to_space_read_no_eval(
2872 	ipc_port_t      port)
2873 {
2874 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2875 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2876 }
2877 
2878 ipc_space_inspect_t
convert_port_to_space_inspect(ipc_port_t port)2879 convert_port_to_space_inspect(
2880 	ipc_port_t      port)
2881 {
2882 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT,
2883 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2884 }
2885 
2886 /*
2887  *	Routine:	convert_port_to_map_with_flavor
2888  *	Purpose:
2889  *		Internal helper for converting from a port to a map.
2890  *		Doesn't consume the port ref; produces a map ref,
2891  *		which may be null.
2892  *  Args:
2893  *      port   - target port
2894  *      flavor - requested vm map flavor
2895  *      options - port translation options
2896  *	Conditions:
2897  *		Nothing locked.
2898  */
2899 static vm_map_t
convert_port_to_map_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2900 convert_port_to_map_with_flavor(
2901 	ipc_port_t         port,
2902 	mach_task_flavor_t flavor,
2903 	port_intrans_options_t options)
2904 {
2905 	task_t task;
2906 	vm_map_t map;
2907 
2908 	/* there is no vm_map_inspect_t routines at the moment. */
2909 	assert(flavor != TASK_FLAVOR_NAME && flavor != TASK_FLAVOR_INSPECT);
2910 	task = convert_port_to_locked_task_with_flavor(port, flavor, options);
2911 
2912 	if (task == TASK_NULL) {
2913 		return VM_MAP_NULL;
2914 	}
2915 
2916 	if (!task->active) {
2917 		task_unlock(task);
2918 		return VM_MAP_NULL;
2919 	}
2920 
2921 	map = task->map;
2922 	if (map->pmap == kernel_pmap) {
2923 		if (flavor == TASK_FLAVOR_CONTROL) {
2924 			panic("userspace has control access to a "
2925 			    "kernel map %p through task %p", map, task);
2926 		}
2927 		if (task != kernel_task) {
2928 			panic("userspace has access to a "
2929 			    "kernel map %p through task %p", map, task);
2930 		}
2931 	} else {
2932 		pmap_require(map->pmap);
2933 	}
2934 
2935 	vm_map_reference(map);
2936 	task_unlock(task);
2937 	return map;
2938 }
2939 
2940 vm_map_t
convert_port_to_map(ipc_port_t port)2941 convert_port_to_map(
2942 	ipc_port_t              port)
2943 {
2944 	return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL,
2945 	           PORT_INTRANS_OPTIONS_NONE);
2946 }
2947 
2948 vm_map_read_t
convert_port_to_map_read(ipc_port_t port)2949 convert_port_to_map_read(
2950 	ipc_port_t              port)
2951 {
2952 	return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ,
2953 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
2954 }
2955 
2956 vm_map_inspect_t
convert_port_to_map_inspect(__unused ipc_port_t port)2957 convert_port_to_map_inspect(
2958 	__unused ipc_port_t     port)
2959 {
2960 	/* there is no vm_map_inspect_t routines at the moment. */
2961 	return VM_MAP_INSPECT_NULL;
2962 }
2963 
2964 /*
2965  *	Routine:	thread_port_kotype_valid_for_flavor
2966  *	Purpose:
2967  *		Check whether the kobject type of a mach port
2968  *      is valid for conversion to a thread of given flavor.
2969  */
2970 static boolean_t
thread_port_kotype_valid_for_flavor(natural_t kotype,mach_thread_flavor_t flavor)2971 thread_port_kotype_valid_for_flavor(
2972 	natural_t            kotype,
2973 	mach_thread_flavor_t flavor)
2974 {
2975 	switch (flavor) {
2976 	/* Ascending capability */
2977 	case THREAD_FLAVOR_INSPECT:
2978 		if (kotype == IKOT_THREAD_INSPECT) {
2979 			return TRUE;
2980 		}
2981 		OS_FALLTHROUGH;
2982 	case THREAD_FLAVOR_READ:
2983 		if (kotype == IKOT_THREAD_READ) {
2984 			return TRUE;
2985 		}
2986 		OS_FALLTHROUGH;
2987 	case THREAD_FLAVOR_CONTROL:
2988 		if (kotype == IKOT_THREAD_CONTROL) {
2989 			return TRUE;
2990 		}
2991 		break;
2992 	default:
2993 		panic("strange thread flavor");
2994 	}
2995 
2996 	return FALSE;
2997 }
2998 
2999 /*
3000  *	Routine: convert_port_to_thread_with_flavor_locked
3001  *	Purpose:
3002  *		Internal helper routine to convert from a locked port to a thread.
3003  *      Used by convert_port_to_thread_with_flavor() and port name -> thread conversions.
3004  *  Args:
3005  *      port    - target port
3006  *      flavor  - requested thread port flavor
3007  *      options - port translation options
3008  *	Conditions:
3009  *		Port is locked and active. Produces thread ref or THREAD_NULL.
3010  */
3011 static thread_t
convert_port_to_thread_with_flavor_locked(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3012 convert_port_to_thread_with_flavor_locked(
3013 	ipc_port_t               port,
3014 	mach_thread_flavor_t     flavor,
3015 	port_intrans_options_t   options)
3016 {
3017 	thread_t thread = THREAD_NULL;
3018 	task_t task;
3019 	ipc_kobject_type_t type = ip_kotype(port);
3020 
3021 	ip_mq_lock_held(port);
3022 	require_ip_active(port);
3023 
3024 	if (!thread_port_kotype_valid_for_flavor(type, flavor)) {
3025 		return THREAD_NULL;
3026 	}
3027 
3028 	thread = ipc_kobject_get_locked(port, type);
3029 
3030 	if (thread == THREAD_NULL) {
3031 		return THREAD_NULL;
3032 	}
3033 
3034 	if (options & PORT_INTRANS_THREAD_NOT_CURRENT_THREAD) {
3035 		if (thread == current_thread()) {
3036 			return THREAD_NULL;
3037 		}
3038 	}
3039 
3040 	task = get_threadtask(thread);
3041 
3042 	if (options & PORT_INTRANS_THREAD_IN_CURRENT_TASK) {
3043 		if (task != current_task()) {
3044 			return THREAD_NULL;
3045 		}
3046 	} else {
3047 		if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
3048 			assert(flavor == THREAD_FLAVOR_CONTROL);
3049 			return THREAD_NULL;
3050 		}
3051 		/* TODO: rdar://42389187 */
3052 		if (flavor == THREAD_FLAVOR_INSPECT) {
3053 			assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
3054 		}
3055 
3056 		if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
3057 		    task_conversion_eval(current_task(), task) != KERN_SUCCESS) {
3058 			return THREAD_NULL;
3059 		}
3060 	}
3061 
3062 	thread_reference(thread);
3063 	return thread;
3064 }
3065 
3066 /*
3067  *	Routine:	convert_port_to_thread_with_flavor
3068  *	Purpose:
3069  *		Internal helper for converting from a port to a thread.
3070  *		Doesn't consume the port ref; produces a thread ref,
3071  *		which may be null.
3072  *  Args:
3073  *      port   - target port
3074  *      flavor - requested thread port flavor
3075  *      options - port translation options
3076  *	Conditions:
3077  *		Nothing locked.
3078  */
3079 static thread_t
convert_port_to_thread_with_flavor(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3080 convert_port_to_thread_with_flavor(
3081 	ipc_port_t           port,
3082 	mach_thread_flavor_t flavor,
3083 	port_intrans_options_t options)
3084 {
3085 	thread_t thread = THREAD_NULL;
3086 
3087 	if (IP_VALID(port)) {
3088 		ip_mq_lock(port);
3089 		if (ip_active(port)) {
3090 			thread = convert_port_to_thread_with_flavor_locked(port, flavor, options);
3091 		}
3092 		ip_mq_unlock(port);
3093 	}
3094 
3095 	return thread;
3096 }
3097 
3098 thread_t
convert_port_to_thread(ipc_port_t port)3099 convert_port_to_thread(
3100 	ipc_port_t              port)
3101 {
3102 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_CONTROL,
3103 	           PORT_INTRANS_OPTIONS_NONE);
3104 }
3105 
3106 thread_read_t
convert_port_to_thread_read(ipc_port_t port)3107 convert_port_to_thread_read(
3108 	ipc_port_t              port)
3109 {
3110 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3111 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
3112 }
3113 
3114 static thread_read_t
convert_port_to_thread_read_no_eval(ipc_port_t port)3115 convert_port_to_thread_read_no_eval(
3116 	ipc_port_t              port)
3117 {
3118 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3119 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3120 }
3121 
3122 thread_inspect_t
convert_port_to_thread_inspect(ipc_port_t port)3123 convert_port_to_thread_inspect(
3124 	ipc_port_t              port)
3125 {
3126 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3127 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3128 }
3129 
3130 static thread_inspect_t
convert_port_to_thread_inspect_no_eval(ipc_port_t port)3131 convert_port_to_thread_inspect_no_eval(
3132 	ipc_port_t              port)
3133 {
3134 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3135 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3136 }
3137 
3138 /*
3139  *	Routine:	convert_thread_to_port_with_flavor
3140  *	Purpose:
3141  *		Convert from a thread to a port of given flavor.
3142  *		Consumes a thread ref; produces a naked send right
3143  *		which may be invalid.
3144  *	Conditions:
3145  *		Nothing locked.
3146  */
3147 static ipc_port_t
convert_thread_to_port_with_flavor(thread_t thread,thread_ro_t tro,mach_thread_flavor_t flavor)3148 convert_thread_to_port_with_flavor(
3149 	thread_t              thread,
3150 	thread_ro_t           tro,
3151 	mach_thread_flavor_t  flavor)
3152 {
3153 	ipc_port_t port = IP_NULL;
3154 
3155 	thread_mtx_lock(thread);
3156 
3157 	/*
3158 	 * out-trans of weaker flavors are still permitted, but in-trans
3159 	 * is separately enforced.
3160 	 */
3161 	if (flavor == THREAD_FLAVOR_CONTROL &&
3162 	    task_conversion_eval_out_trans(current_task(), tro->tro_task)) {
3163 		/* denied by security policy, make the port appear dead */
3164 		port = IP_DEAD;
3165 		goto exit;
3166 	}
3167 
3168 	if (!thread->ipc_active) {
3169 		goto exit;
3170 	}
3171 
3172 	port = tro->tro_ports[flavor];
3173 	if (flavor == THREAD_FLAVOR_CONTROL) {
3174 		port = ipc_port_make_send(port);
3175 	} else if (IP_VALID(port)) {
3176 		(void)ipc_kobject_make_send_nsrequest(port);
3177 	} else {
3178 		ipc_kobject_type_t kotype = (flavor == THREAD_FLAVOR_READ) ? IKOT_THREAD_READ : IKOT_THREAD_INSPECT;
3179 
3180 		/*
3181 		 * Claim a send right on the thread read/inspect port, and request a no-senders
3182 		 * notification on that port (if none outstanding). A thread reference is not
3183 		 * donated here even though the ports are created lazily because it doesn't own the
3184 		 * kobject that it points to. Threads manage their lifetime explicitly and
3185 		 * have to synchronize with each other, between the task/thread terminating and the
3186 		 * send-once notification firing, and this is done under the thread mutex
3187 		 * rather than with atomics.
3188 		 */
3189 		port = ipc_kobject_alloc_port(thread, kotype,
3190 		    IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST |
3191 		    IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3192 		zalloc_ro_update_field(ZONE_ID_THREAD_RO,
3193 		    tro, tro_ports[flavor], &port);
3194 	}
3195 
3196 exit:
3197 	thread_mtx_unlock(thread);
3198 	thread_deallocate(thread);
3199 	return port;
3200 }
3201 
3202 ipc_port_t
convert_thread_to_port(thread_t thread)3203 convert_thread_to_port(
3204 	thread_t                thread)
3205 {
3206 	thread_ro_t tro = get_thread_ro(thread);
3207 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_CONTROL);
3208 }
3209 
3210 ipc_port_t
convert_thread_read_to_port(thread_read_t thread)3211 convert_thread_read_to_port(thread_read_t thread)
3212 {
3213 	thread_ro_t tro = get_thread_ro(thread);
3214 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_READ);
3215 }
3216 
3217 ipc_port_t
convert_thread_inspect_to_port(thread_inspect_t thread)3218 convert_thread_inspect_to_port(thread_inspect_t thread)
3219 {
3220 	thread_ro_t tro = get_thread_ro(thread);
3221 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_INSPECT);
3222 }
3223 
3224 
3225 /*
3226  *	Routine:	port_name_to_thread
3227  *	Purpose:
3228  *		Convert from a port name to a thread reference
3229  *		A name of MACH_PORT_NULL is valid for the null thread.
3230  *	Conditions:
3231  *		Nothing locked.
3232  */
3233 thread_t
port_name_to_thread(mach_port_name_t name,port_intrans_options_t options)3234 port_name_to_thread(
3235 	mach_port_name_t         name,
3236 	port_intrans_options_t options)
3237 {
3238 	thread_t        thread = THREAD_NULL;
3239 	ipc_port_t      kport;
3240 	kern_return_t kr;
3241 
3242 	if (MACH_PORT_VALID(name)) {
3243 		kr = ipc_port_translate_send(current_space(), name, &kport);
3244 		if (kr == KERN_SUCCESS) {
3245 			/* port is locked and active */
3246 			assert(!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) &&
3247 			    !(options & PORT_INTRANS_SKIP_TASK_EVAL));
3248 			thread = convert_port_to_thread_with_flavor_locked(kport,
3249 			    THREAD_FLAVOR_CONTROL, options);
3250 			ip_mq_unlock(kport);
3251 		}
3252 	}
3253 
3254 	return thread;
3255 }
3256 
3257 /*
3258  *	Routine:	port_name_is_pinned_itk_self
3259  *	Purpose:
3260  *		Returns whether this port name is for the pinned
3261  *		mach_task_self (if it exists).
3262  *
3263  *		task_self_trap() when the task port is pinned,
3264  *		will memorize the name the port has in the space
3265  *		in ip_receiver_name, which we can use to fast-track
3266  *		this answer without taking any lock.
3267  *
3268  *		ipc_task_disable() will set `ip_receiver_name` back to
3269  *		MACH_PORT_SPECIAL_DEFAULT.
3270  *
3271  *	Conditions:
3272  *		self must be current_task()
3273  *		Nothing locked.
3274  */
3275 static bool
port_name_is_pinned_itk_self(task_t self,mach_port_name_t name)3276 port_name_is_pinned_itk_self(
3277 	task_t             self,
3278 	mach_port_name_t   name)
3279 {
3280 	ipc_port_t kport = self->itk_self;
3281 	return MACH_PORT_VALID(name) && name != MACH_PORT_SPECIAL_DEFAULT &&
3282 	       kport->ip_pinned && ip_get_receiver_name(kport) == name;
3283 }
3284 
3285 /*
3286  *	Routine:	port_name_to_current_task*_noref
3287  *	Purpose:
3288  *		Convert from a port name to current_task()
3289  *		A name of MACH_PORT_NULL is valid for the null task.
3290  *
3291  *		If current_task() is in the process of being terminated,
3292  *		this might return a non NULL task even when port_name_to_task()
3293  *		would.
3294  *
3295  *		However, this is an acceptable race that can't be controlled by
3296  *		userspace, and that downstream code using the returned task
3297  *		has to handle anyway.
3298  *
3299  *		ipc_space_disable() does try to narrow this race,
3300  *		by causing port_name_is_pinned_itk_self() to fail.
3301  *
3302  *	Returns:
3303  *		current_task() if the port name was for current_task()
3304  *		at the appropriate flavor.
3305  *
3306  *		TASK_NULL otherwise.
3307  *
3308  *	Conditions:
3309  *		Nothing locked.
3310  */
3311 static task_t
port_name_to_current_task_internal_noref(mach_port_name_t name,mach_task_flavor_t flavor)3312 port_name_to_current_task_internal_noref(
3313 	mach_port_name_t   name,
3314 	mach_task_flavor_t flavor)
3315 {
3316 	ipc_port_t kport;
3317 	kern_return_t kr;
3318 	task_t task = TASK_NULL;
3319 	task_t self = current_task();
3320 
3321 	if (port_name_is_pinned_itk_self(self, name)) {
3322 		return self;
3323 	}
3324 
3325 	if (MACH_PORT_VALID(name)) {
3326 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3327 		if (kr == KERN_SUCCESS) {
3328 			ipc_kobject_type_t type = ip_kotype(kport);
3329 			if (task_port_kotype_valid_for_flavor(type, flavor)) {
3330 				task = ipc_kobject_get_locked(kport, type);
3331 			}
3332 			ip_mq_unlock(kport);
3333 			if (task != self) {
3334 				task = TASK_NULL;
3335 			}
3336 		}
3337 	}
3338 
3339 	return task;
3340 }
3341 
3342 task_t
port_name_to_current_task_noref(mach_port_name_t name)3343 port_name_to_current_task_noref(
3344 	mach_port_name_t name)
3345 {
3346 	return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_CONTROL);
3347 }
3348 
3349 task_read_t
port_name_to_current_task_read_noref(mach_port_name_t name)3350 port_name_to_current_task_read_noref(
3351 	mach_port_name_t name)
3352 {
3353 	return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_READ);
3354 }
3355 
3356 /*
3357  *	Routine:	port_name_to_task
3358  *	Purpose:
3359  *		Convert from a port name to a task reference
3360  *		A name of MACH_PORT_NULL is valid for the null task.
3361  *	Conditions:
3362  *		Nothing locked.
3363  */
3364 static task_t
port_name_to_task_grp(mach_port_name_t name,task_grp_t grp)3365 port_name_to_task_grp(
3366 	mach_port_name_t name,
3367 	task_grp_t       grp)
3368 {
3369 	ipc_port_t kport;
3370 	kern_return_t kr;
3371 	task_t task = TASK_NULL;
3372 	task_t self = current_task();
3373 
3374 	if (port_name_is_pinned_itk_self(self, name)) {
3375 		task_reference_grp(self, grp);
3376 		return self;
3377 	}
3378 
3379 	if (MACH_PORT_VALID(name)) {
3380 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3381 		if (kr == KERN_SUCCESS) {
3382 			/* port is locked and active */
3383 			task = convert_port_to_task_with_flavor_locked(kport, TASK_FLAVOR_CONTROL,
3384 			    PORT_INTRANS_OPTIONS_NONE, grp);
3385 			ip_mq_unlock(kport);
3386 		}
3387 	}
3388 	return task;
3389 }
3390 
3391 task_t
port_name_to_task_external(mach_port_name_t name)3392 port_name_to_task_external(
3393 	mach_port_name_t name)
3394 {
3395 	return port_name_to_task_grp(name, TASK_GRP_EXTERNAL);
3396 }
3397 
3398 task_t
port_name_to_task_kernel(mach_port_name_t name)3399 port_name_to_task_kernel(
3400 	mach_port_name_t name)
3401 {
3402 	return port_name_to_task_grp(name, TASK_GRP_KERNEL);
3403 }
3404 
3405 /*
3406  *	Routine:	port_name_to_task_read
3407  *	Purpose:
3408  *		Convert from a port name to a task reference
3409  *		A name of MACH_PORT_NULL is valid for the null task.
3410  *	Conditions:
3411  *		Nothing locked.
3412  */
3413 task_read_t
port_name_to_task_read(mach_port_name_t name)3414 port_name_to_task_read(
3415 	mach_port_name_t name)
3416 {
3417 	ipc_port_t kport;
3418 	kern_return_t kr;
3419 	task_read_t tr = TASK_READ_NULL;
3420 	task_t self = current_task();
3421 
3422 	if (port_name_is_pinned_itk_self(self, name)) {
3423 		task_reference_grp(self, TASK_GRP_KERNEL);
3424 		return self;
3425 	}
3426 
3427 	if (MACH_PORT_VALID(name)) {
3428 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3429 		if (kr == KERN_SUCCESS) {
3430 			/* port is locked and active */
3431 			tr = convert_port_to_task_with_flavor_locked(kport, TASK_FLAVOR_READ,
3432 			    PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
3433 			ip_mq_unlock(kport);
3434 		}
3435 	}
3436 	return tr;
3437 }
3438 
3439 /*
3440  *	Routine:	port_name_to_task_read_no_eval
3441  *	Purpose:
3442  *		Convert from a port name to a task reference
3443  *		A name of MACH_PORT_NULL is valid for the null task.
3444  *		Skips task_conversion_eval() during conversion.
3445  *	Conditions:
3446  *		Nothing locked.
3447  */
3448 task_read_t
port_name_to_task_read_no_eval(mach_port_name_t name)3449 port_name_to_task_read_no_eval(
3450 	mach_port_name_t name)
3451 {
3452 	ipc_port_t kport;
3453 	kern_return_t kr;
3454 	task_read_t tr = TASK_READ_NULL;
3455 	task_t self = current_task();
3456 
3457 	if (port_name_is_pinned_itk_self(self, name)) {
3458 		task_reference_grp(self, TASK_GRP_KERNEL);
3459 		return self;
3460 	}
3461 
3462 	if (MACH_PORT_VALID(name)) {
3463 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3464 		if (kr == KERN_SUCCESS) {
3465 			/* port is locked and active */
3466 			tr = convert_port_to_task_with_flavor_locked(kport, TASK_FLAVOR_READ,
3467 			    PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
3468 			ip_mq_unlock(kport);
3469 		}
3470 	}
3471 	return tr;
3472 }
3473 
3474 /*
3475  *	Routine:	port_name_to_task_name
3476  *	Purpose:
3477  *		Convert from a port name to a task reference
3478  *		A name of MACH_PORT_NULL is valid for the null task.
3479  *	Conditions:
3480  *		Nothing locked.
3481  */
3482 task_name_t
port_name_to_task_name(mach_port_name_t name)3483 port_name_to_task_name(
3484 	mach_port_name_t name)
3485 {
3486 	ipc_port_t kport;
3487 	kern_return_t kr;
3488 	task_name_t tn = TASK_NAME_NULL;
3489 	task_t self = current_task();
3490 
3491 	if (port_name_is_pinned_itk_self(self, name)) {
3492 		task_reference_grp(self, TASK_GRP_KERNEL);
3493 		return self;
3494 	}
3495 
3496 	if (MACH_PORT_VALID(name)) {
3497 		kr = ipc_port_translate_send(current_space(), name, &kport);
3498 		if (kr == KERN_SUCCESS) {
3499 			/* port is locked and active */
3500 			tn = convert_port_to_task_with_flavor_locked(kport, TASK_FLAVOR_NAME,
3501 			    PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
3502 			ip_mq_unlock(kport);
3503 		}
3504 	}
3505 	return tn;
3506 }
3507 
3508 /*
3509  *	Routine:	port_name_to_task_id_token
3510  *	Purpose:
3511  *		Convert from a port name to a task identity token reference
3512  *	Conditions:
3513  *		Nothing locked.
3514  */
3515 task_id_token_t
port_name_to_task_id_token(mach_port_name_t name)3516 port_name_to_task_id_token(
3517 	mach_port_name_t name)
3518 {
3519 	ipc_port_t port;
3520 	kern_return_t kr;
3521 	task_id_token_t token = TASK_ID_TOKEN_NULL;
3522 
3523 	if (MACH_PORT_VALID(name)) {
3524 		kr = ipc_port_translate_send(current_space(), name, &port);
3525 		if (kr == KERN_SUCCESS) {
3526 			token = convert_port_to_task_id_token(port);
3527 			ip_mq_unlock(port);
3528 		}
3529 	}
3530 	return token;
3531 }
3532 
3533 /*
3534  *	Routine:	port_name_to_host
3535  *	Purpose:
3536  *		Convert from a port name to a host pointer.
3537  *		NOTE: This does _not_ return a +1 reference to the host_t
3538  *	Conditions:
3539  *		Nothing locked.
3540  */
3541 host_t
port_name_to_host(mach_port_name_t name)3542 port_name_to_host(
3543 	mach_port_name_t name)
3544 {
3545 	host_t host = HOST_NULL;
3546 	kern_return_t kr;
3547 	ipc_port_t port;
3548 
3549 	if (MACH_PORT_VALID(name)) {
3550 		kr = ipc_port_translate_send(current_space(), name, &port);
3551 		if (kr == KERN_SUCCESS) {
3552 			host = convert_port_to_host(port);
3553 			ip_mq_unlock(port);
3554 		}
3555 	}
3556 	return host;
3557 }
3558 
3559 /*
3560  *	Routine:	convert_task_to_port_with_flavor
3561  *	Purpose:
3562  *		Convert from a task to a port of given flavor.
3563  *		Consumes a task ref; produces a naked send right
3564  *		which may be invalid.
3565  *	Conditions:
3566  *		Nothing locked.
3567  */
3568 ipc_port_t
convert_task_to_port_with_flavor(task_t task,mach_task_flavor_t flavor,task_grp_t grp)3569 convert_task_to_port_with_flavor(
3570 	task_t              task,
3571 	mach_task_flavor_t  flavor,
3572 	task_grp_t          grp)
3573 {
3574 	ipc_port_t port = IP_NULL;
3575 	ipc_kobject_type_t kotype = IKOT_NONE;
3576 
3577 	itk_lock(task);
3578 
3579 	if (!task->ipc_active) {
3580 		goto exit;
3581 	}
3582 
3583 	/*
3584 	 * out-trans of weaker flavors are still permitted, but in-trans
3585 	 * is separately enforced.
3586 	 */
3587 	if (flavor == TASK_FLAVOR_CONTROL &&
3588 	    task_conversion_eval_out_trans(current_task(), task)) {
3589 		/* denied by security policy, make the port appear dead */
3590 		port = IP_DEAD;
3591 		goto exit;
3592 	}
3593 
3594 	switch (flavor) {
3595 	case TASK_FLAVOR_CONTROL:
3596 	case TASK_FLAVOR_NAME:
3597 		port = ipc_port_make_send(task->itk_task_ports[flavor]);
3598 		break;
3599 	/*
3600 	 * Claim a send right on the task read/inspect port, and request a no-senders
3601 	 * notification on that port (if none outstanding). A task reference is
3602 	 * deliberately not donated here because ipc_kobject_make_send_lazy_alloc_port
3603 	 * is used only for convenience and these ports don't control the lifecycle of
3604 	 * the task kobject. Instead, the task's itk_lock is used to synchronize the
3605 	 * handling of the no-senders notification with the task termination.
3606 	 */
3607 	case TASK_FLAVOR_READ:
3608 	case TASK_FLAVOR_INSPECT:
3609 		kotype = (flavor == TASK_FLAVOR_READ) ? IKOT_TASK_READ : IKOT_TASK_INSPECT;
3610 		(void)ipc_kobject_make_send_lazy_alloc_port((ipc_port_t *)&task->itk_task_ports[flavor],
3611 		    (ipc_kobject_t)task, kotype,
3612 		    IPC_KOBJECT_ALLOC_IMMOVABLE_SEND | IPC_KOBJECT_PTRAUTH_STORE,
3613 		    OS_PTRAUTH_DISCRIMINATOR("task.itk_task_ports"));
3614 		port = task->itk_task_ports[flavor];
3615 
3616 		break;
3617 	}
3618 
3619 exit:
3620 	itk_unlock(task);
3621 	task_deallocate_grp(task, grp);
3622 	return port;
3623 }
3624 
3625 ipc_port_t
convert_corpse_to_port_and_nsrequest(task_t corpse)3626 convert_corpse_to_port_and_nsrequest(
3627 	task_t          corpse)
3628 {
3629 	ipc_port_t port = IP_NULL;
3630 	__assert_only kern_return_t kr;
3631 
3632 	assert(task_is_a_corpse(corpse));
3633 	itk_lock(corpse);
3634 	port = corpse->itk_task_ports[TASK_FLAVOR_CONTROL];
3635 	assert(port->ip_srights == 0);
3636 	kr = ipc_kobject_make_send_nsrequest(port);
3637 	assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
3638 	itk_unlock(corpse);
3639 
3640 	task_deallocate(corpse);
3641 	return port;
3642 }
3643 
3644 ipc_port_t
convert_task_to_port(task_t task)3645 convert_task_to_port(
3646 	task_t          task)
3647 {
3648 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_KERNEL);
3649 }
3650 
3651 ipc_port_t
convert_task_read_to_port(task_read_t task)3652 convert_task_read_to_port(
3653 	task_read_t          task)
3654 {
3655 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_KERNEL);
3656 }
3657 
3658 ipc_port_t
convert_task_inspect_to_port(task_inspect_t task)3659 convert_task_inspect_to_port(
3660 	task_inspect_t          task)
3661 {
3662 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT, TASK_GRP_KERNEL);
3663 }
3664 
3665 ipc_port_t
convert_task_name_to_port(task_name_t task)3666 convert_task_name_to_port(
3667 	task_name_t             task)
3668 {
3669 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME, TASK_GRP_KERNEL);
3670 }
3671 
3672 extern ipc_port_t convert_task_to_port_external(task_t task);
3673 ipc_port_t
convert_task_to_port_external(task_t task)3674 convert_task_to_port_external(task_t task)
3675 {
3676 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_EXTERNAL);
3677 }
3678 
3679 ipc_port_t
convert_task_to_port_pinned(task_t task)3680 convert_task_to_port_pinned(
3681 	task_t          task)
3682 {
3683 	ipc_port_t port = IP_NULL;
3684 
3685 	assert(task == current_task());
3686 
3687 	itk_lock(task);
3688 
3689 	if (task->ipc_active && task->itk_self != IP_NULL) {
3690 		port = ipc_port_make_send(task->itk_self);
3691 	}
3692 
3693 	itk_unlock(task);
3694 	task_deallocate(task);
3695 	return port;
3696 }
3697 /*
3698  *	Routine:	convert_task_suspend_token_to_port
3699  *	Purpose:
3700  *		Convert from a task suspension token to a port.
3701  *		Consumes a task suspension token ref; produces a naked send-once right
3702  *		which may be invalid.
3703  *	Conditions:
3704  *		Nothing locked.
3705  */
3706 static ipc_port_t
convert_task_suspension_token_to_port_grp(task_suspension_token_t task,task_grp_t grp)3707 convert_task_suspension_token_to_port_grp(
3708 	task_suspension_token_t         task,
3709 	task_grp_t                      grp)
3710 {
3711 	ipc_port_t port;
3712 
3713 	task_lock(task);
3714 	if (task->active) {
3715 		itk_lock(task);
3716 		if (task->itk_resume == IP_NULL) {
3717 			task->itk_resume = ipc_kobject_alloc_port((ipc_kobject_t) task,
3718 			    IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE);
3719 		}
3720 
3721 		/*
3722 		 * Create a send-once right for each instance of a direct user-called
3723 		 * task_suspend2 call. Each time one of these send-once rights is abandoned,
3724 		 * the notification handler will resume the target task.
3725 		 */
3726 		port = ipc_port_make_sonce(task->itk_resume);
3727 		itk_unlock(task);
3728 		assert(IP_VALID(port));
3729 	} else {
3730 		port = IP_NULL;
3731 	}
3732 
3733 	task_unlock(task);
3734 	task_suspension_token_deallocate_grp(task, grp);
3735 
3736 	return port;
3737 }
3738 
3739 ipc_port_t
convert_task_suspension_token_to_port_external(task_suspension_token_t task)3740 convert_task_suspension_token_to_port_external(
3741 	task_suspension_token_t         task)
3742 {
3743 	return convert_task_suspension_token_to_port_grp(task, TASK_GRP_EXTERNAL);
3744 }
3745 
3746 ipc_port_t
convert_task_suspension_token_to_port_mig(task_suspension_token_t task)3747 convert_task_suspension_token_to_port_mig(
3748 	task_suspension_token_t         task)
3749 {
3750 	return convert_task_suspension_token_to_port_grp(task, TASK_GRP_MIG);
3751 }
3752 
3753 ipc_port_t
convert_thread_to_port_pinned(thread_t thread)3754 convert_thread_to_port_pinned(
3755 	thread_t                thread)
3756 {
3757 	thread_ro_t tro = get_thread_ro(thread);
3758 	ipc_port_t  port = IP_NULL;
3759 
3760 	thread_mtx_lock(thread);
3761 
3762 	if (thread->ipc_active && tro->tro_self_port != IP_NULL) {
3763 		port = ipc_port_make_send(tro->tro_self_port);
3764 	}
3765 
3766 	thread_mtx_unlock(thread);
3767 	thread_deallocate(thread);
3768 	return port;
3769 }
3770 /*
3771  *	Routine:	space_deallocate
3772  *	Purpose:
3773  *		Deallocate a space ref produced by convert_port_to_space.
3774  *	Conditions:
3775  *		Nothing locked.
3776  */
3777 
3778 void
space_deallocate(ipc_space_t space)3779 space_deallocate(
3780 	ipc_space_t     space)
3781 {
3782 	if (space != IS_NULL) {
3783 		is_release(space);
3784 	}
3785 }
3786 
3787 /*
3788  *	Routine:	space_read_deallocate
3789  *	Purpose:
3790  *		Deallocate a space read ref produced by convert_port_to_space_read.
3791  *	Conditions:
3792  *		Nothing locked.
3793  */
3794 
3795 void
space_read_deallocate(ipc_space_read_t space)3796 space_read_deallocate(
3797 	ipc_space_read_t     space)
3798 {
3799 	if (space != IS_INSPECT_NULL) {
3800 		is_release((ipc_space_t)space);
3801 	}
3802 }
3803 
3804 /*
3805  *	Routine:	space_inspect_deallocate
3806  *	Purpose:
3807  *		Deallocate a space inspect ref produced by convert_port_to_space_inspect.
3808  *	Conditions:
3809  *		Nothing locked.
3810  */
3811 
3812 void
space_inspect_deallocate(ipc_space_inspect_t space)3813 space_inspect_deallocate(
3814 	ipc_space_inspect_t     space)
3815 {
3816 	if (space != IS_INSPECT_NULL) {
3817 		is_release((ipc_space_t)space);
3818 	}
3819 }
3820 
3821 
3822 /*
3823  *	Routine:	thread/task_set_exception_ports [kernel call]
3824  *	Purpose:
3825  *			Sets the thread/task exception port, flavor and
3826  *			behavior for the exception types specified by the mask.
3827  *			There will be one send right per exception per valid
3828  *			port.
3829  *	Conditions:
3830  *		Nothing locked.  If successful, consumes
3831  *		the supplied send right.
3832  *	Returns:
3833  *		KERN_SUCCESS		Changed the special port.
3834  *		KERN_INVALID_ARGUMENT	The thread is null,
3835  *					Illegal mask bit set.
3836  *					Illegal exception behavior
3837  *		KERN_FAILURE		The thread is dead.
3838  */
3839 
3840 kern_return_t
thread_set_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)3841 thread_set_exception_ports(
3842 	thread_t                thread,
3843 	exception_mask_t        exception_mask,
3844 	ipc_port_t              new_port,
3845 	exception_behavior_t    new_behavior,
3846 	thread_state_flavor_t   new_flavor)
3847 {
3848 	ipc_port_t  old_port[EXC_TYPES_COUNT];
3849 	thread_ro_t tro;
3850 	boolean_t   privileged = task_is_privileged(current_task());
3851 
3852 #if CONFIG_MACF
3853 	struct label *new_label;
3854 #endif
3855 
3856 	if (thread == THREAD_NULL) {
3857 		return KERN_INVALID_ARGUMENT;
3858 	}
3859 
3860 	if (exception_mask & ~EXC_MASK_VALID) {
3861 		return KERN_INVALID_ARGUMENT;
3862 	}
3863 
3864 	if (IP_VALID(new_port)) {
3865 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
3866 		case EXCEPTION_DEFAULT:
3867 		case EXCEPTION_STATE:
3868 		case EXCEPTION_STATE_IDENTITY:
3869 		case EXCEPTION_IDENTITY_PROTECTED:
3870 			break;
3871 
3872 		default:
3873 			return KERN_INVALID_ARGUMENT;
3874 		}
3875 	}
3876 
3877 	if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
3878 		return KERN_INVALID_RIGHT;
3879 	}
3880 
3881 
3882 	/*
3883 	 * Check the validity of the thread_state_flavor by calling the
3884 	 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
3885 	 * osfmk/mach/ARCHITECTURE/thread_status.h
3886 	 */
3887 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
3888 		return KERN_INVALID_ARGUMENT;
3889 	}
3890 
3891 	if ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED &&
3892 	    !(new_behavior & MACH_EXCEPTION_CODES)) {
3893 		return KERN_INVALID_ARGUMENT;
3894 	}
3895 
3896 #if CONFIG_MACF
3897 	new_label = mac_exc_create_label_for_current_proc();
3898 #endif
3899 
3900 	tro = get_thread_ro(thread);
3901 	thread_mtx_lock(thread);
3902 
3903 	if (!thread->active) {
3904 		thread_mtx_unlock(thread);
3905 
3906 		return KERN_FAILURE;
3907 	}
3908 
3909 	if (tro->tro_exc_actions == NULL) {
3910 		ipc_thread_init_exc_actions(tro);
3911 	}
3912 	for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
3913 		struct exception_action *action = &tro->tro_exc_actions[i];
3914 
3915 		if ((exception_mask & (1 << i))
3916 #if CONFIG_MACF
3917 		    && mac_exc_update_action_label(action, new_label) == 0
3918 #endif
3919 		    ) {
3920 			old_port[i] = action->port;
3921 			action->port = ipc_port_copy_send(new_port);
3922 			action->behavior = new_behavior;
3923 			action->flavor = new_flavor;
3924 			action->privileged = privileged;
3925 		} else {
3926 			old_port[i] = IP_NULL;
3927 		}
3928 	}
3929 
3930 	thread_mtx_unlock(thread);
3931 
3932 #if CONFIG_MACF
3933 	mac_exc_free_label(new_label);
3934 #endif
3935 
3936 	for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
3937 		if (IP_VALID(old_port[i])) {
3938 			ipc_port_release_send(old_port[i]);
3939 		}
3940 	}
3941 
3942 	if (IP_VALID(new_port)) {         /* consume send right */
3943 		ipc_port_release_send(new_port);
3944 	}
3945 
3946 	return KERN_SUCCESS;
3947 }
3948 
3949 kern_return_t
task_set_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)3950 task_set_exception_ports(
3951 	task_t                                  task,
3952 	exception_mask_t                exception_mask,
3953 	ipc_port_t                              new_port,
3954 	exception_behavior_t    new_behavior,
3955 	thread_state_flavor_t   new_flavor)
3956 {
3957 	ipc_port_t              old_port[EXC_TYPES_COUNT];
3958 	boolean_t privileged = task_is_privileged(current_task());
3959 	register int    i;
3960 
3961 #if CONFIG_MACF
3962 	struct label *new_label;
3963 #endif
3964 
3965 	if (task == TASK_NULL) {
3966 		return KERN_INVALID_ARGUMENT;
3967 	}
3968 
3969 	if (exception_mask & ~EXC_MASK_VALID) {
3970 		return KERN_INVALID_ARGUMENT;
3971 	}
3972 
3973 	if (IP_VALID(new_port)) {
3974 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
3975 		case EXCEPTION_DEFAULT:
3976 		case EXCEPTION_STATE:
3977 		case EXCEPTION_STATE_IDENTITY:
3978 		case EXCEPTION_IDENTITY_PROTECTED:
3979 			break;
3980 
3981 		default:
3982 			return KERN_INVALID_ARGUMENT;
3983 		}
3984 	}
3985 
3986 	if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
3987 		return KERN_INVALID_RIGHT;
3988 	}
3989 
3990 
3991 	/*
3992 	 * Check the validity of the thread_state_flavor by calling the
3993 	 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
3994 	 * osfmk/mach/ARCHITECTURE/thread_status.h
3995 	 */
3996 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
3997 		return KERN_INVALID_ARGUMENT;
3998 	}
3999 
4000 	if ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED
4001 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4002 		return KERN_INVALID_ARGUMENT;
4003 	}
4004 
4005 #if CONFIG_MACF
4006 	new_label = mac_exc_create_label_for_current_proc();
4007 #endif
4008 
4009 	itk_lock(task);
4010 
4011 	if (!task->ipc_active) {
4012 		itk_unlock(task);
4013 		return KERN_FAILURE;
4014 	}
4015 
4016 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4017 		if ((exception_mask & (1 << i))
4018 #if CONFIG_MACF
4019 		    && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4020 #endif
4021 		    ) {
4022 			old_port[i] = task->exc_actions[i].port;
4023 			task->exc_actions[i].port =
4024 			    ipc_port_copy_send(new_port);
4025 			task->exc_actions[i].behavior = new_behavior;
4026 			task->exc_actions[i].flavor = new_flavor;
4027 			task->exc_actions[i].privileged = privileged;
4028 		} else {
4029 			old_port[i] = IP_NULL;
4030 		}
4031 	}
4032 
4033 	itk_unlock(task);
4034 
4035 #if CONFIG_MACF
4036 	mac_exc_free_label(new_label);
4037 #endif
4038 
4039 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4040 		if (IP_VALID(old_port[i])) {
4041 			ipc_port_release_send(old_port[i]);
4042 		}
4043 	}
4044 
4045 	if (IP_VALID(new_port)) {         /* consume send right */
4046 		ipc_port_release_send(new_port);
4047 	}
4048 
4049 	return KERN_SUCCESS;
4050 }
4051 
4052 /*
4053  *	Routine:	thread/task_swap_exception_ports [kernel call]
4054  *	Purpose:
4055  *			Sets the thread/task exception port, flavor and
4056  *			behavior for the exception types specified by the
4057  *			mask.
4058  *
4059  *			The old ports, behavior and flavors are returned
4060  *			Count specifies the array sizes on input and
4061  *			the number of returned ports etc. on output.  The
4062  *			arrays must be large enough to hold all the returned
4063  *			data, MIG returnes an error otherwise.  The masks
4064  *			array specifies the corresponding exception type(s).
4065  *
4066  *	Conditions:
4067  *		Nothing locked.  If successful, consumes
4068  *		the supplied send right.
4069  *
4070  *		Returns upto [in} CountCnt elements.
4071  *	Returns:
4072  *		KERN_SUCCESS		Changed the special port.
4073  *		KERN_INVALID_ARGUMENT	The thread is null,
4074  *					Illegal mask bit set.
4075  *					Illegal exception behavior
4076  *		KERN_FAILURE		The thread is dead.
4077  */
4078 
4079 kern_return_t
thread_swap_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4080 thread_swap_exception_ports(
4081 	thread_t                        thread,
4082 	exception_mask_t                exception_mask,
4083 	ipc_port_t                      new_port,
4084 	exception_behavior_t            new_behavior,
4085 	thread_state_flavor_t           new_flavor,
4086 	exception_mask_array_t          masks,
4087 	mach_msg_type_number_t          *CountCnt,
4088 	exception_port_array_t          ports,
4089 	exception_behavior_array_t      behaviors,
4090 	thread_state_flavor_array_t     flavors)
4091 {
4092 	ipc_port_t  old_port[EXC_TYPES_COUNT];
4093 	thread_ro_t tro;
4094 	boolean_t   privileged = task_is_privileged(current_task());
4095 	unsigned int    i, j, count;
4096 
4097 #if CONFIG_MACF
4098 	struct label *new_label;
4099 #endif
4100 
4101 	if (thread == THREAD_NULL) {
4102 		return KERN_INVALID_ARGUMENT;
4103 	}
4104 
4105 	if (exception_mask & ~EXC_MASK_VALID) {
4106 		return KERN_INVALID_ARGUMENT;
4107 	}
4108 
4109 	if (IP_VALID(new_port)) {
4110 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4111 		case EXCEPTION_DEFAULT:
4112 		case EXCEPTION_STATE:
4113 		case EXCEPTION_STATE_IDENTITY:
4114 		case EXCEPTION_IDENTITY_PROTECTED:
4115 			break;
4116 
4117 		default:
4118 			return KERN_INVALID_ARGUMENT;
4119 		}
4120 	}
4121 
4122 	if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4123 		return KERN_INVALID_RIGHT;
4124 	}
4125 
4126 
4127 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4128 		return KERN_INVALID_ARGUMENT;
4129 	}
4130 
4131 	if ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED
4132 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4133 		return KERN_INVALID_ARGUMENT;
4134 	}
4135 
4136 #if CONFIG_MACF
4137 	new_label = mac_exc_create_label_for_current_proc();
4138 #endif
4139 
4140 	thread_mtx_lock(thread);
4141 
4142 	if (!thread->active) {
4143 		thread_mtx_unlock(thread);
4144 #if CONFIG_MACF
4145 		mac_exc_free_label(new_label);
4146 #endif
4147 		return KERN_FAILURE;
4148 	}
4149 
4150 	tro = get_thread_ro(thread);
4151 	if (tro->tro_exc_actions == NULL) {
4152 		ipc_thread_init_exc_actions(tro);
4153 	}
4154 
4155 	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4156 	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4157 		struct exception_action *action = &tro->tro_exc_actions[i];
4158 
4159 		if ((exception_mask & (1 << i))
4160 #if CONFIG_MACF
4161 		    && mac_exc_update_action_label(action, new_label) == 0
4162 #endif
4163 		    ) {
4164 			for (j = 0; j < count; ++j) {
4165 				/*
4166 				 * search for an identical entry, if found
4167 				 * set corresponding mask for this exception.
4168 				 */
4169 				if (action->port == ports[j] &&
4170 				    action->behavior == behaviors[j] &&
4171 				    action->flavor == flavors[j]) {
4172 					masks[j] |= (1 << i);
4173 					break;
4174 				}
4175 			}
4176 
4177 			if (j == count) {
4178 				masks[j] = (1 << i);
4179 				ports[j] = ipc_port_copy_send(action->port);
4180 
4181 				behaviors[j] = action->behavior;
4182 				flavors[j] = action->flavor;
4183 				++count;
4184 			}
4185 
4186 			old_port[i] = action->port;
4187 			action->port = ipc_port_copy_send(new_port);
4188 			action->behavior = new_behavior;
4189 			action->flavor = new_flavor;
4190 			action->privileged = privileged;
4191 		} else {
4192 			old_port[i] = IP_NULL;
4193 		}
4194 	}
4195 
4196 	thread_mtx_unlock(thread);
4197 
4198 #if CONFIG_MACF
4199 	mac_exc_free_label(new_label);
4200 #endif
4201 
4202 	while (--i >= FIRST_EXCEPTION) {
4203 		if (IP_VALID(old_port[i])) {
4204 			ipc_port_release_send(old_port[i]);
4205 		}
4206 	}
4207 
4208 	if (IP_VALID(new_port)) {         /* consume send right */
4209 		ipc_port_release_send(new_port);
4210 	}
4211 
4212 	*CountCnt = count;
4213 
4214 	return KERN_SUCCESS;
4215 }
4216 
4217 kern_return_t
task_swap_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4218 task_swap_exception_ports(
4219 	task_t                                          task,
4220 	exception_mask_t                        exception_mask,
4221 	ipc_port_t                                      new_port,
4222 	exception_behavior_t            new_behavior,
4223 	thread_state_flavor_t           new_flavor,
4224 	exception_mask_array_t          masks,
4225 	mach_msg_type_number_t          *CountCnt,
4226 	exception_port_array_t          ports,
4227 	exception_behavior_array_t      behaviors,
4228 	thread_state_flavor_array_t     flavors)
4229 {
4230 	ipc_port_t              old_port[EXC_TYPES_COUNT];
4231 	boolean_t privileged = task_is_privileged(current_task());
4232 	unsigned int    i, j, count;
4233 
4234 #if CONFIG_MACF
4235 	struct label *new_label;
4236 #endif
4237 
4238 	if (task == TASK_NULL) {
4239 		return KERN_INVALID_ARGUMENT;
4240 	}
4241 
4242 	if (exception_mask & ~EXC_MASK_VALID) {
4243 		return KERN_INVALID_ARGUMENT;
4244 	}
4245 
4246 	if (IP_VALID(new_port)) {
4247 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4248 		case EXCEPTION_DEFAULT:
4249 		case EXCEPTION_STATE:
4250 		case EXCEPTION_STATE_IDENTITY:
4251 		case EXCEPTION_IDENTITY_PROTECTED:
4252 			break;
4253 
4254 		default:
4255 			return KERN_INVALID_ARGUMENT;
4256 		}
4257 	}
4258 
4259 	if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4260 		return KERN_INVALID_RIGHT;
4261 	}
4262 
4263 
4264 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4265 		return KERN_INVALID_ARGUMENT;
4266 	}
4267 
4268 	if ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED
4269 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4270 		return KERN_INVALID_ARGUMENT;
4271 	}
4272 
4273 #if CONFIG_MACF
4274 	new_label = mac_exc_create_label_for_current_proc();
4275 #endif
4276 
4277 	itk_lock(task);
4278 
4279 	if (!task->ipc_active) {
4280 		itk_unlock(task);
4281 #if CONFIG_MACF
4282 		mac_exc_free_label(new_label);
4283 #endif
4284 		return KERN_FAILURE;
4285 	}
4286 
4287 	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4288 	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4289 		if ((exception_mask & (1 << i))
4290 #if CONFIG_MACF
4291 		    && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4292 #endif
4293 		    ) {
4294 			for (j = 0; j < count; j++) {
4295 				/*
4296 				 * search for an identical entry, if found
4297 				 * set corresponding mask for this exception.
4298 				 */
4299 				if (task->exc_actions[i].port == ports[j] &&
4300 				    task->exc_actions[i].behavior == behaviors[j] &&
4301 				    task->exc_actions[i].flavor == flavors[j]) {
4302 					masks[j] |= (1 << i);
4303 					break;
4304 				}
4305 			}
4306 
4307 			if (j == count) {
4308 				masks[j] = (1 << i);
4309 				ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
4310 				behaviors[j] = task->exc_actions[i].behavior;
4311 				flavors[j] = task->exc_actions[i].flavor;
4312 				++count;
4313 			}
4314 
4315 			old_port[i] = task->exc_actions[i].port;
4316 
4317 			task->exc_actions[i].port =     ipc_port_copy_send(new_port);
4318 			task->exc_actions[i].behavior = new_behavior;
4319 			task->exc_actions[i].flavor = new_flavor;
4320 			task->exc_actions[i].privileged = privileged;
4321 		} else {
4322 			old_port[i] = IP_NULL;
4323 		}
4324 	}
4325 
4326 	itk_unlock(task);
4327 
4328 #if CONFIG_MACF
4329 	mac_exc_free_label(new_label);
4330 #endif
4331 
4332 	while (--i >= FIRST_EXCEPTION) {
4333 		if (IP_VALID(old_port[i])) {
4334 			ipc_port_release_send(old_port[i]);
4335 		}
4336 	}
4337 
4338 	if (IP_VALID(new_port)) {         /* consume send right */
4339 		ipc_port_release_send(new_port);
4340 	}
4341 
4342 	*CountCnt = count;
4343 
4344 	return KERN_SUCCESS;
4345 }
4346 
4347 /*
4348  *	Routine:	thread/task_get_exception_ports [kernel call]
4349  *	Purpose:
4350  *		Clones a send right for each of the thread/task's exception
4351  *		ports specified in the mask and returns the behaviour
4352  *		and flavor of said port.
4353  *
4354  *		Returns upto [in} CountCnt elements.
4355  *
4356  *	Conditions:
4357  *		Nothing locked.
4358  *	Returns:
4359  *		KERN_SUCCESS		Extracted a send right.
4360  *		KERN_INVALID_ARGUMENT	The thread is null,
4361  *					Invalid special port,
4362  *					Illegal mask bit set.
4363  *		KERN_FAILURE		The thread is dead.
4364  */
4365 static kern_return_t
thread_get_exception_ports_internal(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4366 thread_get_exception_ports_internal(
4367 	thread_t                        thread,
4368 	exception_mask_t                exception_mask,
4369 	exception_mask_array_t          masks,
4370 	mach_msg_type_number_t          *CountCnt,
4371 	exception_port_info_array_t     ports_info,
4372 	exception_port_array_t          ports,
4373 	exception_behavior_array_t      behaviors,
4374 	thread_state_flavor_array_t     flavors)
4375 {
4376 	unsigned int count;
4377 	boolean_t info_only = (ports_info != NULL);
4378 	boolean_t dbg_ok = TRUE;
4379 	thread_ro_t tro;
4380 	ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4381 
4382 	if (thread == THREAD_NULL) {
4383 		return KERN_INVALID_ARGUMENT;
4384 	}
4385 
4386 	if (exception_mask & ~EXC_MASK_VALID) {
4387 		return KERN_INVALID_ARGUMENT;
4388 	}
4389 
4390 	if (!info_only && !ports) {
4391 		return KERN_INVALID_ARGUMENT;
4392 	}
4393 
4394 #if !(DEVELOPMENT || DEBUG) && CONFIG_MACF
4395 	if (info_only && mac_task_check_expose_task(kernel_task, TASK_FLAVOR_CONTROL) == 0) {
4396 		dbg_ok = TRUE;
4397 	} else {
4398 		dbg_ok = FALSE;
4399 	}
4400 #endif
4401 
4402 	tro = get_thread_ro(thread);
4403 	thread_mtx_lock(thread);
4404 
4405 	if (!thread->active) {
4406 		thread_mtx_unlock(thread);
4407 
4408 		return KERN_FAILURE;
4409 	}
4410 
4411 	count = 0;
4412 
4413 	if (tro->tro_exc_actions == NULL) {
4414 		goto done;
4415 	}
4416 
4417 	for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4418 		if (exception_mask & (1 << i)) {
4419 			ipc_port_t exc_port = tro->tro_exc_actions[i].port;
4420 			exception_behavior_t exc_behavior = tro->tro_exc_actions[i].behavior;
4421 			thread_state_flavor_t exc_flavor = tro->tro_exc_actions[i].flavor;
4422 
4423 			for (j = 0; j < count; ++j) {
4424 				/*
4425 				 * search for an identical entry, if found
4426 				 * set corresponding mask for this exception.
4427 				 */
4428 				if (exc_port == port_ptrs[j] &&
4429 				    exc_behavior == behaviors[j] &&
4430 				    exc_flavor == flavors[j]) {
4431 					masks[j] |= (1 << i);
4432 					break;
4433 				}
4434 			}
4435 
4436 			if (j == count && count < *CountCnt) {
4437 				masks[j] = (1 << i);
4438 				port_ptrs[j] = exc_port;
4439 
4440 				if (info_only) {
4441 					if (!dbg_ok || !IP_VALID(exc_port)) {
4442 						/* avoid taking port lock if !dbg_ok */
4443 						ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4444 					} else {
4445 						uintptr_t receiver;
4446 						(void)ipc_port_get_receiver_task(exc_port, &receiver);
4447 						ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4448 						ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4449 					}
4450 				} else {
4451 					ports[j] = ipc_port_copy_send(exc_port);
4452 				}
4453 				behaviors[j] = exc_behavior;
4454 				flavors[j] = exc_flavor;
4455 				++count;
4456 			}
4457 		}
4458 	}
4459 
4460 done:
4461 	thread_mtx_unlock(thread);
4462 
4463 	*CountCnt = count;
4464 
4465 	return KERN_SUCCESS;
4466 }
4467 
4468 static kern_return_t
thread_get_exception_ports(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4469 thread_get_exception_ports(
4470 	thread_t                        thread,
4471 	exception_mask_t                exception_mask,
4472 	exception_mask_array_t          masks,
4473 	mach_msg_type_number_t          *CountCnt,
4474 	exception_port_array_t          ports,
4475 	exception_behavior_array_t      behaviors,
4476 	thread_state_flavor_array_t     flavors)
4477 {
4478 	return thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4479 	           NULL, ports, behaviors, flavors);
4480 }
4481 
4482 kern_return_t
thread_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4483 thread_get_exception_ports_info(
4484 	mach_port_t                     port,
4485 	exception_mask_t                exception_mask,
4486 	exception_mask_array_t          masks,
4487 	mach_msg_type_number_t          *CountCnt,
4488 	exception_port_info_array_t     ports_info,
4489 	exception_behavior_array_t      behaviors,
4490 	thread_state_flavor_array_t     flavors)
4491 {
4492 	kern_return_t kr;
4493 
4494 	thread_t thread = convert_port_to_thread_read_no_eval(port);
4495 
4496 	if (thread == THREAD_NULL) {
4497 		return KERN_INVALID_ARGUMENT;
4498 	}
4499 
4500 	kr = thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4501 	    ports_info, NULL, behaviors, flavors);
4502 
4503 	thread_deallocate(thread);
4504 	return kr;
4505 }
4506 
4507 kern_return_t
thread_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4508 thread_get_exception_ports_from_user(
4509 	mach_port_t                     port,
4510 	exception_mask_t                exception_mask,
4511 	exception_mask_array_t          masks,
4512 	mach_msg_type_number_t         *CountCnt,
4513 	exception_port_array_t          ports,
4514 	exception_behavior_array_t      behaviors,
4515 	thread_state_flavor_array_t     flavors)
4516 {
4517 	kern_return_t kr;
4518 
4519 	thread_t thread = convert_port_to_thread(port);
4520 
4521 	if (thread == THREAD_NULL) {
4522 		return KERN_INVALID_ARGUMENT;
4523 	}
4524 
4525 	kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4526 
4527 	thread_deallocate(thread);
4528 	return kr;
4529 }
4530 
4531 static kern_return_t
task_get_exception_ports_internal(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4532 task_get_exception_ports_internal(
4533 	task_t                          task,
4534 	exception_mask_t                exception_mask,
4535 	exception_mask_array_t          masks,
4536 	mach_msg_type_number_t          *CountCnt,
4537 	exception_port_info_array_t     ports_info,
4538 	exception_port_array_t          ports,
4539 	exception_behavior_array_t      behaviors,
4540 	thread_state_flavor_array_t     flavors)
4541 {
4542 	unsigned int count;
4543 	boolean_t info_only = (ports_info != NULL);
4544 	boolean_t dbg_ok = TRUE;
4545 	ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4546 
4547 	if (task == TASK_NULL) {
4548 		return KERN_INVALID_ARGUMENT;
4549 	}
4550 
4551 	if (exception_mask & ~EXC_MASK_VALID) {
4552 		return KERN_INVALID_ARGUMENT;
4553 	}
4554 
4555 	if (!info_only && !ports) {
4556 		return KERN_INVALID_ARGUMENT;
4557 	}
4558 
4559 #if !(DEVELOPMENT || DEBUG) && CONFIG_MACF
4560 	if (info_only && mac_task_check_expose_task(kernel_task, TASK_FLAVOR_CONTROL) == 0) {
4561 		dbg_ok = TRUE;
4562 	} else {
4563 		dbg_ok = FALSE;
4564 	}
4565 #endif
4566 
4567 	itk_lock(task);
4568 
4569 	if (!task->ipc_active) {
4570 		itk_unlock(task);
4571 		return KERN_FAILURE;
4572 	}
4573 
4574 	count = 0;
4575 
4576 	for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4577 		if (exception_mask & (1 << i)) {
4578 			ipc_port_t exc_port = task->exc_actions[i].port;
4579 			exception_behavior_t exc_behavior = task->exc_actions[i].behavior;
4580 			thread_state_flavor_t exc_flavor = task->exc_actions[i].flavor;
4581 
4582 			for (j = 0; j < count; ++j) {
4583 				/*
4584 				 * search for an identical entry, if found
4585 				 * set corresponding mask for this exception.
4586 				 */
4587 				if (exc_port == port_ptrs[j] &&
4588 				    exc_behavior == behaviors[j] &&
4589 				    exc_flavor == flavors[j]) {
4590 					masks[j] |= (1 << i);
4591 					break;
4592 				}
4593 			}
4594 
4595 			if (j == count && count < *CountCnt) {
4596 				masks[j] = (1 << i);
4597 				port_ptrs[j] = exc_port;
4598 
4599 				if (info_only) {
4600 					if (!dbg_ok || !IP_VALID(exc_port)) {
4601 						/* avoid taking port lock if !dbg_ok */
4602 						ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4603 					} else {
4604 						uintptr_t receiver;
4605 						(void)ipc_port_get_receiver_task(exc_port, &receiver);
4606 						ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4607 						ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4608 					}
4609 				} else {
4610 					ports[j] = ipc_port_copy_send(exc_port);
4611 				}
4612 				behaviors[j] = exc_behavior;
4613 				flavors[j] = exc_flavor;
4614 				++count;
4615 			}
4616 		}
4617 	}
4618 
4619 	itk_unlock(task);
4620 
4621 	*CountCnt = count;
4622 
4623 	return KERN_SUCCESS;
4624 }
4625 
4626 static kern_return_t
task_get_exception_ports(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4627 task_get_exception_ports(
4628 	task_t                          task,
4629 	exception_mask_t                exception_mask,
4630 	exception_mask_array_t          masks,
4631 	mach_msg_type_number_t          *CountCnt,
4632 	exception_port_array_t          ports,
4633 	exception_behavior_array_t      behaviors,
4634 	thread_state_flavor_array_t     flavors)
4635 {
4636 	return task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4637 	           NULL, ports, behaviors, flavors);
4638 }
4639 
4640 kern_return_t
task_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4641 task_get_exception_ports_info(
4642 	mach_port_t                     port,
4643 	exception_mask_t                exception_mask,
4644 	exception_mask_array_t          masks,
4645 	mach_msg_type_number_t          *CountCnt,
4646 	exception_port_info_array_t     ports_info,
4647 	exception_behavior_array_t      behaviors,
4648 	thread_state_flavor_array_t     flavors)
4649 {
4650 	kern_return_t kr;
4651 
4652 	task_t task = convert_port_to_task_read_no_eval(port);
4653 
4654 	if (task == TASK_NULL) {
4655 		return KERN_INVALID_ARGUMENT;
4656 	}
4657 
4658 	kr = task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4659 	    ports_info, NULL, behaviors, flavors);
4660 
4661 	task_deallocate(task);
4662 	return kr;
4663 }
4664 
4665 kern_return_t
task_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4666 task_get_exception_ports_from_user(
4667 	mach_port_t                     port,
4668 	exception_mask_t                exception_mask,
4669 	exception_mask_array_t          masks,
4670 	mach_msg_type_number_t         *CountCnt,
4671 	exception_port_array_t          ports,
4672 	exception_behavior_array_t      behaviors,
4673 	thread_state_flavor_array_t     flavors)
4674 {
4675 	kern_return_t kr;
4676 
4677 	task_t task = convert_port_to_task(port);
4678 
4679 	if (task == TASK_NULL) {
4680 		return KERN_INVALID_ARGUMENT;
4681 	}
4682 
4683 	kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4684 
4685 	task_deallocate(task);
4686 	return kr;
4687 }
4688 
4689 /*
4690  *	Routine:	ipc_thread_port_unpin
4691  *	Purpose:
4692  *
4693  *		Called on the thread when it's terminating so that the last ref can be
4694  *		deallocated without a guard exception.
4695  *	Conditions:
4696  *		Thread mutex lock is held.
4697  */
4698 void
ipc_thread_port_unpin(ipc_port_t port)4699 ipc_thread_port_unpin(
4700 	ipc_port_t port)
4701 {
4702 	if (port == IP_NULL) {
4703 		return;
4704 	}
4705 	ip_mq_lock(port);
4706 	port->ip_pinned = 0;
4707 	ip_mq_unlock(port);
4708 }
4709