xref: /xnu-10002.81.5/osfmk/kern/ipc_tt.c (revision 5e3eaea39dcf651e66cb99ba7d70e32cc4a99587)
1 /*
2  * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  */
62 /*
63  */
64 
65 /*
66  * File:	ipc_tt.c
67  * Purpose:
68  *	Task and thread related IPC functions.
69  */
70 
71 #include <ipc/port.h>
72 #include <mach/mach_types.h>
73 #include <mach/boolean.h>
74 #include <mach/kern_return.h>
75 #include <mach/mach_param.h>
76 #include <mach/task_special_ports.h>
77 #include <mach/thread_special_ports.h>
78 #include <mach/thread_status.h>
79 #include <mach/exception_types.h>
80 #include <mach/memory_object_types.h>
81 #include <mach/mach_traps.h>
82 #include <mach/task_server.h>
83 #include <mach/thread_act_server.h>
84 #include <mach/mach_host_server.h>
85 #include <mach/host_priv_server.h>
86 #include <mach/vm_map_server.h>
87 
88 #include <kern/exc_guard.h>
89 #include <kern/kern_types.h>
90 #include <kern/host.h>
91 #include <kern/ipc_kobject.h>
92 #include <kern/ipc_tt.h>
93 #include <kern/kalloc.h>
94 #include <kern/thread.h>
95 #include <kern/ux_handler.h>
96 #include <kern/misc_protos.h>
97 #include <kdp/kdp_dyld.h>
98 
99 #include <vm/vm_map.h>
100 #include <vm/vm_pageout.h>
101 #include <vm/vm_protos.h>
102 #include <libkern/coreanalytics/coreanalytics.h>
103 
104 #include <security/mac_mach_internal.h>
105 
106 #if CONFIG_CSR
107 #include <sys/csr.h>
108 #endif
109 
110 #include <sys/code_signing.h> /* for developer mode state */
111 
112 #if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL
113 extern int cs_relax_platform_task_ports;
114 #endif
115 
116 extern boolean_t IOCurrentTaskHasEntitlement(const char *);
117 extern boolean_t proc_is_simulated(const proc_t);
118 extern struct proc* current_proc(void);
119 
120 /* bootarg to create lightweight corpse for thread identity lockdown */
121 TUNABLE(bool, thid_should_crash, "thid_should_crash", true);
122 
123 #define SET_EXCEPTION_ENTITLEMENT "com.apple.private.set-exception-port"
124 
125 CA_EVENT(set_exception,
126     CA_STATIC_STRING(CA_PROCNAME_LEN), current_proc,
127     CA_STATIC_STRING(CA_PROCNAME_LEN), thread_proc,
128     CA_INT, mask,
129     CA_STATIC_STRING(6), level);
130 
131 __options_decl(ipc_reply_port_type_t, uint32_t, {
132 	IRPT_NONE        = 0x00,
133 	IRPT_USER        = 0x01,
134 	IRPT_KERNEL      = 0x02,
135 });
136 
137 /* forward declarations */
138 static kern_return_t special_port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor);
139 static kern_return_t special_port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor);
140 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port, ipc_reply_port_type_t reply_type);
141 static void ipc_port_unbind_special_reply_port(thread_t thread, ipc_reply_port_type_t reply_type);
142 extern kern_return_t task_conversion_eval(task_t caller, task_t victim, int flavor);
143 static thread_inspect_t convert_port_to_thread_inspect_no_eval(ipc_port_t port);
144 static ipc_port_t convert_thread_to_port_with_flavor(thread_t, thread_ro_t, mach_thread_flavor_t flavor);
145 ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor, task_grp_t grp);
146 kern_return_t task_set_special_port(task_t task, int which, ipc_port_t port);
147 kern_return_t task_get_special_port(task_t task, int which, ipc_port_t *portp);
148 
149 /*
150  *	Routine:	ipc_task_init
151  *	Purpose:
152  *		Initialize a task's IPC state.
153  *
154  *		If non-null, some state will be inherited from the parent.
155  *		The parent must be appropriately initialized.
156  *	Conditions:
157  *		Nothing locked.
158  */
159 
160 void
ipc_task_init(task_t task,task_t parent)161 ipc_task_init(
162 	task_t          task,
163 	task_t          parent)
164 {
165 	ipc_space_t space;
166 	ipc_port_t kport;
167 	ipc_port_t nport;
168 	ipc_port_t pport;
169 	kern_return_t kr;
170 	int i;
171 
172 
173 	kr = ipc_space_create(IPC_LABEL_NONE, &space);
174 	if (kr != KERN_SUCCESS) {
175 		panic("ipc_task_init");
176 	}
177 
178 	space->is_task = task;
179 
180 	kport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL,
181 	    IPC_KOBJECT_ALLOC_NONE);
182 	pport = kport;
183 
184 	nport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_NAME,
185 	    IPC_KOBJECT_ALLOC_NONE);
186 
187 	itk_lock_init(task);
188 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = kport;
189 	task->itk_task_ports[TASK_FLAVOR_NAME] = nport;
190 
191 	/* Lazily allocated on-demand */
192 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
193 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
194 	task->itk_dyld_notify = NULL;
195 #if CONFIG_PROC_RESOURCE_LIMITS
196 	task->itk_resource_notify = NULL;
197 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
198 
199 	task->itk_self = pport;
200 	task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
201 	if (task_is_a_corpse_fork(task)) {
202 		/*
203 		 * No sender's notification for corpse would not
204 		 * work with a naked send right in kernel.
205 		 */
206 		task->itk_settable_self = IP_NULL;
207 	} else {
208 		/* we just made the port, no need to triple check */
209 		task->itk_settable_self = ipc_port_make_send_any(kport);
210 	}
211 	task->itk_debug_control = IP_NULL;
212 	task->itk_space = space;
213 
214 #if CONFIG_MACF
215 	task->exc_actions[0].label = NULL;
216 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
217 		mac_exc_associate_action_label(&task->exc_actions[i],
218 		    mac_exc_create_label(&task->exc_actions[i]));
219 	}
220 #endif
221 
222 	/* always zero-out the first (unused) array element */
223 	bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
224 
225 	if (parent == TASK_NULL) {
226 		ipc_port_t port = IP_NULL;
227 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
228 			task->exc_actions[i].port = IP_NULL;
229 			task->exc_actions[i].flavor = 0;
230 			task->exc_actions[i].behavior = 0;
231 			task->exc_actions[i].privileged = FALSE;
232 		}/* for */
233 
234 		kr = host_get_host_port(host_priv_self(), &port);
235 		assert(kr == KERN_SUCCESS);
236 		task->itk_host = port;
237 
238 		task->itk_bootstrap = IP_NULL;
239 		task->itk_task_access = IP_NULL;
240 
241 		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
242 			task->itk_registered[i] = IP_NULL;
243 		}
244 	} else {
245 		itk_lock(parent);
246 		assert(parent->itk_task_ports[TASK_FLAVOR_CONTROL] != IP_NULL);
247 
248 		/* inherit registered ports */
249 
250 		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
251 			task->itk_registered[i] =
252 			    ipc_port_copy_send_any(parent->itk_registered[i]);
253 		}
254 
255 		/* inherit exception and bootstrap ports */
256 
257 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
258 			task->exc_actions[i].port =
259 			    exception_port_copy_send(parent->exc_actions[i].port);
260 			task->exc_actions[i].flavor =
261 			    parent->exc_actions[i].flavor;
262 			task->exc_actions[i].behavior =
263 			    parent->exc_actions[i].behavior;
264 			task->exc_actions[i].privileged =
265 			    parent->exc_actions[i].privileged;
266 #if CONFIG_MACF
267 			mac_exc_inherit_action_label(parent->exc_actions + i,
268 			    task->exc_actions + i);
269 #endif
270 		}
271 
272 		task->itk_host = host_port_copy_send(parent->itk_host);
273 
274 		task->itk_bootstrap =
275 		    ipc_port_copy_send_mqueue(parent->itk_bootstrap);
276 
277 		task->itk_task_access =
278 		    ipc_port_copy_send_mqueue(parent->itk_task_access);
279 
280 		itk_unlock(parent);
281 	}
282 }
283 
284 /*
285  *	Routine:	ipc_task_set_immovable_pinned
286  *	Purpose:
287  *		Make a task's control port immovable and/or pinned
288  *      according to its control port options. If control port
289  *      is immovable, allocate an immovable control port for the
290  *      task and optionally pin it.
291  *	Conditions:
292  *		Task's control port is movable and not pinned.
293  */
294 void
ipc_task_set_immovable_pinned(task_t task)295 ipc_task_set_immovable_pinned(
296 	task_t            task)
297 {
298 	ipc_port_t kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
299 	ipc_port_t new_pport;
300 
301 	/* pport is the same as kport at ipc_task_init() time */
302 	assert(task->itk_self == task->itk_task_ports[TASK_FLAVOR_CONTROL]);
303 	assert(task->itk_self == task->itk_settable_self);
304 	assert(!task_is_a_corpse(task));
305 
306 	/* only tasks opt in immovable control port can have pinned control port */
307 	if (task_is_immovable(task)) {
308 		ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
309 
310 		if (task_is_pinned(task)) {
311 			options |= IPC_KOBJECT_ALLOC_PINNED;
312 		}
313 
314 		new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL, options);
315 
316 		assert(kport != IP_NULL);
317 		ipc_port_set_label(kport, IPC_LABEL_SUBST_TASK);
318 		kport->ip_kolabel->ikol_alt_port = new_pport;
319 
320 		itk_lock(task);
321 		task->itk_self = new_pport;
322 		itk_unlock(task);
323 
324 		/* enable the pinned port */
325 		ipc_kobject_enable(new_pport, task, IKOT_TASK_CONTROL);
326 	}
327 }
328 
329 /*
330  *	Routine:	ipc_task_enable
331  *	Purpose:
332  *		Enable a task for IPC access.
333  *	Conditions:
334  *		Nothing locked.
335  */
336 void
ipc_task_enable(task_t task)337 ipc_task_enable(
338 	task_t          task)
339 {
340 	ipc_port_t kport;
341 	ipc_port_t nport;
342 	ipc_port_t iport;
343 	ipc_port_t rdport;
344 	ipc_port_t pport;
345 
346 	itk_lock(task);
347 	if (!task->active) {
348 		/*
349 		 * task has been terminated before we can enable IPC access.
350 		 * The check is to make sure we don't accidentally re-enable
351 		 * the task ports _after_ they've been disabled during
352 		 * task_terminate_internal(), in which case we will hit the
353 		 * !task->ipc_active assertion in ipc_task_terminate().
354 		 *
355 		 * Technically we should grab task lock when checking task
356 		 * active bit, but since task termination unsets task->active
357 		 * _before_ calling ipc_task_disable(), we can always see the
358 		 * truth with just itk_lock() and bail if disable has been called.
359 		 */
360 		itk_unlock(task);
361 		return;
362 	}
363 
364 	assert(!task->ipc_active || task_is_a_corpse(task));
365 	task->ipc_active = true;
366 
367 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
368 	if (kport != IP_NULL) {
369 		ipc_kobject_enable(kport, task, IKOT_TASK_CONTROL);
370 	}
371 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
372 	if (nport != IP_NULL) {
373 		ipc_kobject_enable(nport, task, IKOT_TASK_NAME);
374 	}
375 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
376 	if (iport != IP_NULL) {
377 		ipc_kobject_enable(iport, task, IKOT_TASK_INSPECT);
378 	}
379 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
380 	if (rdport != IP_NULL) {
381 		ipc_kobject_enable(rdport, task, IKOT_TASK_READ);
382 	}
383 	pport = task->itk_self;
384 	if (pport != kport && pport != IP_NULL) {
385 		assert(task_is_immovable(task));
386 		ipc_kobject_enable(pport, task, IKOT_TASK_CONTROL);
387 	}
388 
389 	itk_unlock(task);
390 }
391 
392 /*
393  *	Routine:	ipc_task_disable
394  *	Purpose:
395  *		Disable IPC access to a task.
396  *	Conditions:
397  *		Nothing locked.
398  */
399 
400 void
ipc_task_disable(task_t task)401 ipc_task_disable(
402 	task_t          task)
403 {
404 	ipc_port_t kport;
405 	ipc_port_t nport;
406 	ipc_port_t iport;
407 	ipc_port_t rdport;
408 	ipc_port_t rport;
409 	ipc_port_t pport;
410 
411 	itk_lock(task);
412 
413 	/*
414 	 * This innocuous looking line is load bearing.
415 	 *
416 	 * It is used to disable the creation of lazy made ports.
417 	 * We must do so before we drop the last reference on the task,
418 	 * as task ports do not own a reference on the task, and
419 	 * convert_port_to_task* will crash trying to resurect a task.
420 	 */
421 	task->ipc_active = false;
422 
423 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
424 	if (kport != IP_NULL) {
425 		/* clears ikol_alt_port */
426 		ipc_kobject_disable(kport, IKOT_TASK_CONTROL);
427 	}
428 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
429 	if (nport != IP_NULL) {
430 		ipc_kobject_disable(nport, IKOT_TASK_NAME);
431 	}
432 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
433 	if (iport != IP_NULL) {
434 		ipc_kobject_disable(iport, IKOT_TASK_INSPECT);
435 	}
436 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
437 	if (rdport != IP_NULL) {
438 		/* clears ikol_alt_port */
439 		ipc_kobject_disable(rdport, IKOT_TASK_READ);
440 	}
441 	pport = task->itk_self;
442 	if (pport != IP_NULL) {
443 		/* see port_name_is_pinned_itk_self() */
444 		pport->ip_receiver_name = MACH_PORT_SPECIAL_DEFAULT;
445 		if (pport != kport) {
446 			assert(task_is_immovable(task));
447 			assert(pport->ip_immovable_send);
448 			ipc_kobject_disable(pport, IKOT_TASK_CONTROL);
449 		}
450 	}
451 
452 	rport = task->itk_resume;
453 	if (rport != IP_NULL) {
454 		/*
455 		 * From this point onwards this task is no longer accepting
456 		 * resumptions.
457 		 *
458 		 * There are still outstanding suspensions on this task,
459 		 * even as it is being torn down. Disconnect the task
460 		 * from the rport, thereby "orphaning" the rport. The rport
461 		 * itself will go away only when the last suspension holder
462 		 * destroys his SO right to it -- when he either
463 		 * exits, or tries to actually use that last SO right to
464 		 * resume this (now non-existent) task.
465 		 */
466 		ipc_kobject_disable(rport, IKOT_TASK_RESUME);
467 	}
468 	itk_unlock(task);
469 }
470 
471 /*
472  *	Routine:	ipc_task_terminate
473  *	Purpose:
474  *		Clean up and destroy a task's IPC state.
475  *	Conditions:
476  *		Nothing locked.  The task must be suspended.
477  *		(Or the current thread must be in the task.)
478  */
479 
480 void
ipc_task_terminate(task_t task)481 ipc_task_terminate(
482 	task_t          task)
483 {
484 	ipc_port_t kport;
485 	ipc_port_t nport;
486 	ipc_port_t iport;
487 	ipc_port_t rdport;
488 	ipc_port_t rport;
489 	ipc_port_t pport;
490 	ipc_port_t sself;
491 	ipc_port_t *notifiers_ptr = NULL;
492 
493 	itk_lock(task);
494 
495 	/*
496 	 * If we ever failed to clear ipc_active before the last reference
497 	 * was dropped, lazy ports might be made and used after the last
498 	 * reference is dropped and cause use after free (see comment in
499 	 * ipc_task_disable()).
500 	 */
501 	assert(!task->ipc_active);
502 
503 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
504 	sself = task->itk_settable_self;
505 	pport = IP_NULL;
506 
507 	if (kport == IP_NULL) {
508 		/* the task is already terminated (can this happen?) */
509 		itk_unlock(task);
510 		return;
511 	}
512 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = IP_NULL;
513 
514 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
515 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
516 
517 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
518 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
519 
520 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
521 	assert(nport != IP_NULL);
522 	task->itk_task_ports[TASK_FLAVOR_NAME] = IP_NULL;
523 
524 	if (task->itk_dyld_notify) {
525 		notifiers_ptr = task->itk_dyld_notify;
526 		task->itk_dyld_notify = NULL;
527 	}
528 
529 	pport = task->itk_self;
530 	task->itk_self = IP_NULL;
531 
532 	rport = task->itk_resume;
533 	task->itk_resume = IP_NULL;
534 
535 	itk_unlock(task);
536 
537 	/* release the naked send rights */
538 	if (IP_VALID(sself)) {
539 		ipc_port_release_send(sself);
540 	}
541 
542 	if (notifiers_ptr) {
543 		for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
544 			if (IP_VALID(notifiers_ptr[i])) {
545 				ipc_port_release_send(notifiers_ptr[i]);
546 			}
547 		}
548 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
549 	}
550 
551 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
552 		if (IP_VALID(task->exc_actions[i].port)) {
553 			ipc_port_release_send(task->exc_actions[i].port);
554 		}
555 #if CONFIG_MACF
556 		mac_exc_free_action_label(task->exc_actions + i);
557 #endif
558 	}
559 
560 	if (IP_VALID(task->itk_host)) {
561 		ipc_port_release_send(task->itk_host);
562 	}
563 
564 	if (IP_VALID(task->itk_bootstrap)) {
565 		ipc_port_release_send(task->itk_bootstrap);
566 	}
567 
568 	if (IP_VALID(task->itk_task_access)) {
569 		ipc_port_release_send(task->itk_task_access);
570 	}
571 
572 	if (IP_VALID(task->itk_debug_control)) {
573 		ipc_port_release_send(task->itk_debug_control);
574 	}
575 
576 #if CONFIG_PROC_RESOURCE_LIMITS
577 	if (IP_VALID(task->itk_resource_notify)) {
578 		ipc_port_release_send(task->itk_resource_notify);
579 	}
580 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
581 
582 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
583 		if (IP_VALID(task->itk_registered[i])) {
584 			ipc_port_release_send(task->itk_registered[i]);
585 		}
586 	}
587 
588 	/* clears read port ikol_alt_port, must be done first */
589 	if (rdport != IP_NULL) {
590 		ipc_kobject_dealloc_port(rdport, 0, IKOT_TASK_READ);
591 	}
592 	ipc_kobject_dealloc_port(kport, 0, IKOT_TASK_CONTROL);
593 	/* ikol_alt_port cleared */
594 
595 	/* destroy other kernel ports */
596 	ipc_kobject_dealloc_port(nport, 0, IKOT_TASK_NAME);
597 	if (iport != IP_NULL) {
598 		ipc_kobject_dealloc_port(iport, 0, IKOT_TASK_INSPECT);
599 	}
600 	if (pport != IP_NULL && pport != kport) {
601 		ipc_kobject_dealloc_port(pport, 0, IKOT_TASK_CONTROL);
602 	}
603 	if (rport != IP_NULL) {
604 		ipc_kobject_dealloc_port(rport, 0, IKOT_TASK_RESUME);
605 	}
606 
607 	itk_lock_destroy(task);
608 }
609 
610 /*
611  *	Routine:	ipc_task_reset
612  *	Purpose:
613  *		Reset a task's IPC state to protect it when
614  *		it enters an elevated security context. The
615  *		task name port can remain the same - since it
616  *              represents no specific privilege.
617  *	Conditions:
618  *		Nothing locked.  The task must be suspended.
619  *		(Or the current thread must be in the task.)
620  */
621 
622 void
ipc_task_reset(task_t task)623 ipc_task_reset(
624 	task_t          task)
625 {
626 	ipc_port_t old_kport, old_pport, new_kport, new_pport;
627 	ipc_port_t old_sself;
628 	ipc_port_t old_rdport;
629 	ipc_port_t old_iport;
630 	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
631 	ipc_port_t *notifiers_ptr = NULL;
632 
633 #if CONFIG_MACF
634 	/* Fresh label to unset credentials in existing labels. */
635 	struct label *unset_label = mac_exc_create_label(NULL);
636 #endif
637 
638 	new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task,
639 	    IKOT_TASK_CONTROL, IPC_KOBJECT_ALLOC_NONE);
640 	/*
641 	 * ipc_task_reset() only happens during sugid or corpsify.
642 	 *
643 	 * (1) sugid happens early in exec_mach_imgact(), at which point the old task
644 	 * port has not been enabled, and is left movable/not pinned.
645 	 * (2) corpse cannot execute more code so the notion of the immovable/pinned
646 	 * task port is bogus, and should appear as if it doesn't have one.
647 	 *
648 	 * So simply leave pport the same as kport.
649 	 */
650 	new_pport = new_kport;
651 
652 	itk_lock(task);
653 
654 	old_kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
655 	old_rdport = task->itk_task_ports[TASK_FLAVOR_READ];
656 	old_iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
657 
658 	old_pport = task->itk_self;
659 
660 	if (old_pport == IP_NULL) {
661 		/* the task is already terminated (can this happen?) */
662 		itk_unlock(task);
663 		ipc_kobject_dealloc_port(new_kport, 0, IKOT_TASK_CONTROL);
664 		if (new_pport != new_kport) {
665 			assert(task_is_immovable(task));
666 			ipc_kobject_dealloc_port(new_pport, 0, IKOT_TASK_CONTROL);
667 		}
668 #if CONFIG_MACF
669 		mac_exc_free_label(unset_label);
670 #endif
671 		return;
672 	}
673 
674 	old_sself = task->itk_settable_self;
675 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = new_kport;
676 	task->itk_self = new_pport;
677 
678 	if (task_is_a_corpse(task)) {
679 		/* No extra send right for coprse, needed to arm no-sender notification */
680 		task->itk_settable_self = IP_NULL;
681 	} else {
682 		/* we just made the port, no need to triple check */
683 		task->itk_settable_self = ipc_port_make_send_any(new_kport);
684 	}
685 
686 	/* clears ikol_alt_port */
687 	ipc_kobject_disable(old_kport, IKOT_TASK_CONTROL);
688 
689 	/* Reset the read and inspect flavors of task port */
690 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
691 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
692 
693 	if (old_pport != old_kport) {
694 		assert(task_is_immovable(task));
695 		ipc_kobject_disable(old_pport, IKOT_TASK_CONTROL);
696 	}
697 
698 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
699 		old_exc_actions[i] = IP_NULL;
700 
701 		if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
702 			continue;
703 		}
704 
705 		if (!task->exc_actions[i].privileged) {
706 #if CONFIG_MACF
707 			mac_exc_update_action_label(task->exc_actions + i, unset_label);
708 #endif
709 			old_exc_actions[i] = task->exc_actions[i].port;
710 			task->exc_actions[i].port = IP_NULL;
711 		}
712 	}/* for */
713 
714 	if (IP_VALID(task->itk_debug_control)) {
715 		ipc_port_release_send(task->itk_debug_control);
716 	}
717 	task->itk_debug_control = IP_NULL;
718 
719 	if (task->itk_dyld_notify) {
720 		notifiers_ptr = task->itk_dyld_notify;
721 		task->itk_dyld_notify = NULL;
722 	}
723 
724 	itk_unlock(task);
725 
726 #if CONFIG_MACF
727 	mac_exc_free_label(unset_label);
728 #endif
729 
730 	/* release the naked send rights */
731 
732 	if (IP_VALID(old_sself)) {
733 		ipc_port_release_send(old_sself);
734 	}
735 
736 	if (notifiers_ptr) {
737 		for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
738 			if (IP_VALID(notifiers_ptr[i])) {
739 				ipc_port_release_send(notifiers_ptr[i]);
740 			}
741 		}
742 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
743 	}
744 
745 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
746 		if (IP_VALID(old_exc_actions[i])) {
747 			ipc_port_release_send(old_exc_actions[i]);
748 		}
749 	}
750 
751 	/* destroy all task port flavors */
752 	if (old_rdport != IP_NULL) {
753 		/* read port ikol_alt_port may point to kport, dealloc first */
754 		ipc_kobject_dealloc_port(old_rdport, 0, IKOT_TASK_READ);
755 	}
756 	ipc_kobject_dealloc_port(old_kport, 0, IKOT_TASK_CONTROL);
757 	/* ikol_alt_port cleared */
758 
759 	if (old_iport != IP_NULL) {
760 		ipc_kobject_dealloc_port(old_iport, 0, IKOT_TASK_INSPECT);
761 	}
762 	if (old_pport != old_kport) {
763 		assert(task_is_immovable(task));
764 		ipc_kobject_dealloc_port(old_pport, 0, IKOT_TASK_CONTROL);
765 	}
766 }
767 
768 /*
769  *	Routine:	ipc_thread_init
770  *	Purpose:
771  *		Initialize a thread's IPC state.
772  *	Conditions:
773  *		Nothing locked.
774  */
775 
776 void
ipc_thread_init(task_t task,thread_t thread,thread_ro_t tro,ipc_thread_init_options_t options)777 ipc_thread_init(
778 	task_t          task,
779 	thread_t        thread,
780 	thread_ro_t     tro,
781 	ipc_thread_init_options_t options)
782 {
783 	ipc_port_t      kport;
784 	ipc_port_t      pport;
785 	ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
786 
787 	if (task_is_immovable(task) && !(options & IPC_THREAD_INIT_MAINTHREAD)) {
788 		/*
789 		 * pthreads and raw threads both have immovable port upon creation.
790 		 * pthreads are subsequently pinned via ipc_port_copyout_send_pinned() whereas
791 		 * raw threads are left unpinned.
792 		 */
793 		alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
794 
795 		pport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
796 		    IKOT_THREAD_CONTROL, alloc_options);
797 
798 		kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)thread,
799 		    IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD, IPC_KOBJECT_ALLOC_NONE);
800 		kport->ip_kolabel->ikol_alt_port = pport;
801 	} else {
802 		/*
803 		 * Main thread is created movable but may be set immovable and pinned in
804 		 * main_thread_set_immovable_pinned(). It needs to be handled separately
805 		 * because task_control_port_options is not available at main thread creation time.
806 		 */
807 		kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
808 		    IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
809 
810 		pport = kport;
811 	}
812 
813 	tro->tro_self_port = pport;
814 	/* we just made the port, no need to triple check */
815 	tro->tro_settable_self_port = ipc_port_make_send_any(kport);
816 	tro->tro_ports[THREAD_FLAVOR_CONTROL] = kport;
817 
818 	thread->ith_special_reply_port = NULL;
819 
820 #if IMPORTANCE_INHERITANCE
821 	thread->ith_assertions = 0;
822 #endif
823 
824 	thread->ipc_active = true;
825 	ipc_kmsg_queue_init(&thread->ith_messages);
826 
827 	thread->ith_kernel_reply_port = IP_NULL;
828 }
829 
830 void
ipc_main_thread_set_immovable_pinned(thread_t thread)831 ipc_main_thread_set_immovable_pinned(thread_t thread)
832 {
833 	thread_ro_t tro = get_thread_ro(thread);
834 	ipc_port_t kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
835 	task_t task = tro->tro_task;
836 	ipc_port_t new_pport;
837 
838 	assert(thread_get_tag(thread) & THREAD_TAG_MAINTHREAD);
839 
840 	/* pport is the same as kport at ipc_thread_init() time */
841 	assert(tro->tro_self_port == tro->tro_ports[THREAD_FLAVOR_CONTROL]);
842 	assert(tro->tro_self_port == tro->tro_settable_self_port);
843 
844 	/*
845 	 * Main thread port is immovable/pinned depending on whether owner task has
846 	 * immovable/pinned task control port. task_control_port_options is now set.
847 	 */
848 	if (task_is_immovable(task)) {
849 		ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
850 
851 		if (task_is_pinned(task)) {
852 			options |= IPC_KOBJECT_ALLOC_PINNED;
853 		}
854 
855 		new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_THREAD_CONTROL, options);
856 
857 		assert(kport != IP_NULL);
858 		ipc_port_set_label(kport, IPC_LABEL_SUBST_THREAD);
859 		kport->ip_kolabel->ikol_alt_port = new_pport;
860 
861 		thread_mtx_lock(thread);
862 		zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_self_port, &new_pport);
863 		thread_mtx_unlock(thread);
864 
865 		/* enable the pinned port */
866 		ipc_kobject_enable(new_pport, thread, IKOT_THREAD_CONTROL);
867 	}
868 }
869 
870 struct thread_init_exc_actions {
871 	struct exception_action array[EXC_TYPES_COUNT];
872 };
873 
874 static void
ipc_thread_init_exc_actions(thread_ro_t tro)875 ipc_thread_init_exc_actions(thread_ro_t tro)
876 {
877 	struct exception_action *actions;
878 
879 	actions = kalloc_type(struct thread_init_exc_actions,
880 	    Z_WAITOK | Z_ZERO | Z_NOFAIL)->array;
881 
882 #if CONFIG_MACF
883 	for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
884 		mac_exc_associate_action_label(&actions[i],
885 		    mac_exc_create_label(&actions[i]));
886 	}
887 #endif
888 
889 	zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions, &actions);
890 }
891 
892 static void
ipc_thread_destroy_exc_actions(thread_ro_t tro)893 ipc_thread_destroy_exc_actions(thread_ro_t tro)
894 {
895 	struct exception_action *actions = tro->tro_exc_actions;
896 
897 	if (actions) {
898 #if CONFIG_MACF
899 		for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
900 			mac_exc_free_action_label(actions + i);
901 		}
902 #endif
903 
904 		zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions);
905 		struct thread_init_exc_actions *tr_actions =
906 		    (struct thread_init_exc_actions *)actions;
907 		kfree_type(struct thread_init_exc_actions, tr_actions);
908 	}
909 }
910 
911 static void
ipc_thread_ro_update_ports(thread_ro_t tro,const struct thread_ro * tro_tpl)912 ipc_thread_ro_update_ports(
913 	thread_ro_t             tro,
914 	const struct thread_ro *tro_tpl)
915 {
916 	vm_size_t offs = offsetof(struct thread_ro, tro_self_port);
917 	vm_size_t size = sizeof(struct ipc_port *) * 2 + sizeof(tro_tpl->tro_ports);
918 
919 	static_assert(offsetof(struct thread_ro, tro_settable_self_port) ==
920 	    offsetof(struct thread_ro, tro_self_port) +
921 	    sizeof(struct ipc_port_t *));
922 	static_assert(offsetof(struct thread_ro, tro_ports) ==
923 	    offsetof(struct thread_ro, tro_self_port) +
924 	    2 * sizeof(struct ipc_port_t *));
925 	zalloc_ro_mut(ZONE_ID_THREAD_RO, tro,
926 	    offs, &tro_tpl->tro_self_port, size);
927 }
928 
929 /*
930  *	Routine:	ipc_thread_disable
931  *	Purpose:
932  *		Clean up and destroy a thread's IPC state.
933  *	Conditions:
934  *		Thread locked.
935  */
936 void
ipc_thread_disable(thread_t thread)937 ipc_thread_disable(
938 	thread_t        thread)
939 {
940 	thread_ro_t     tro = get_thread_ro(thread);
941 	ipc_port_t      kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
942 	ipc_port_t      iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
943 	ipc_port_t      rdport = tro->tro_ports[THREAD_FLAVOR_READ];
944 	ipc_port_t      pport = tro->tro_self_port;
945 
946 	/*
947 	 * This innocuous looking line is load bearing.
948 	 *
949 	 * It is used to disable the creation of lazy made ports.
950 	 * We must do so before we drop the last reference on the thread,
951 	 * as thread ports do not own a reference on the thread, and
952 	 * convert_port_to_thread* will crash trying to resurect a thread.
953 	 */
954 	thread->ipc_active = false;
955 
956 	if (kport != IP_NULL) {
957 		/* clears ikol_alt_port */
958 		ipc_kobject_disable(kport, IKOT_THREAD_CONTROL);
959 	}
960 
961 	if (iport != IP_NULL) {
962 		ipc_kobject_disable(iport, IKOT_THREAD_INSPECT);
963 	}
964 
965 	if (rdport != IP_NULL) {
966 		/* clears ikol_alt_port */
967 		ipc_kobject_disable(rdport, IKOT_THREAD_READ);
968 	}
969 
970 	if (pport != kport && pport != IP_NULL) {
971 		assert(task_is_immovable(tro->tro_task));
972 		assert(pport->ip_immovable_send);
973 		ipc_kobject_disable(pport, IKOT_THREAD_CONTROL);
974 	}
975 
976 	/* unbind the thread special reply port */
977 	if (IP_VALID(thread->ith_special_reply_port)) {
978 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
979 	}
980 }
981 
982 /*
983  *	Routine:	ipc_thread_terminate
984  *	Purpose:
985  *		Clean up and destroy a thread's IPC state.
986  *	Conditions:
987  *		Nothing locked.
988  */
989 
990 void
ipc_thread_terminate(thread_t thread)991 ipc_thread_terminate(
992 	thread_t        thread)
993 {
994 	thread_ro_t tro = get_thread_ro(thread);
995 	ipc_port_t kport = IP_NULL;
996 	ipc_port_t iport = IP_NULL;
997 	ipc_port_t rdport = IP_NULL;
998 	ipc_port_t pport = IP_NULL;
999 	ipc_port_t sport = IP_NULL;
1000 
1001 	thread_mtx_lock(thread);
1002 
1003 	/*
1004 	 * If we ever failed to clear ipc_active before the last reference
1005 	 * was dropped, lazy ports might be made and used after the last
1006 	 * reference is dropped and cause use after free (see comment in
1007 	 * ipc_thread_disable()).
1008 	 */
1009 	assert(!thread->ipc_active);
1010 
1011 	kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1012 	iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1013 	rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1014 	pport = tro->tro_self_port;
1015 	sport = tro->tro_settable_self_port;
1016 
1017 	if (kport != IP_NULL) {
1018 		if (IP_VALID(sport)) {
1019 			ipc_port_release_send(sport);
1020 		}
1021 
1022 		ipc_thread_ro_update_ports(tro, &(struct thread_ro){ });
1023 
1024 		if (tro->tro_exc_actions != NULL) {
1025 			for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1026 				if (IP_VALID(tro->tro_exc_actions[i].port)) {
1027 					ipc_port_release_send(tro->tro_exc_actions[i].port);
1028 				}
1029 			}
1030 			ipc_thread_destroy_exc_actions(tro);
1031 		}
1032 	}
1033 
1034 #if IMPORTANCE_INHERITANCE
1035 	assert(thread->ith_assertions == 0);
1036 #endif
1037 
1038 	assert(ipc_kmsg_queue_empty(&thread->ith_messages));
1039 	thread_mtx_unlock(thread);
1040 
1041 	/* clears read port ikol_alt_port, must be done first */
1042 	if (rdport != IP_NULL) {
1043 		ipc_kobject_dealloc_port(rdport, 0, IKOT_THREAD_READ);
1044 	}
1045 	/* control port can also have ikol_alt_port */
1046 	if (kport != IP_NULL) {
1047 		ipc_kobject_dealloc_port(kport, 0, IKOT_THREAD_CONTROL);
1048 	}
1049 	/* ikol_alt_port cleared */
1050 
1051 	if (iport != IP_NULL) {
1052 		ipc_kobject_dealloc_port(iport, 0, IKOT_THREAD_INSPECT);
1053 	}
1054 	if (pport != kport && pport != IP_NULL) {
1055 		assert(task_is_immovable(tro->tro_task));
1056 		ipc_kobject_dealloc_port(pport, 0, IKOT_THREAD_CONTROL);
1057 	}
1058 	if (thread->ith_kernel_reply_port != IP_NULL) {
1059 		thread_dealloc_kernel_special_reply_port(thread);
1060 	}
1061 }
1062 
1063 /*
1064  *	Routine:	ipc_thread_reset
1065  *	Purpose:
1066  *		Reset the IPC state for a given Mach thread when
1067  *		its task enters an elevated security context.
1068  *		All flavors of thread port and its exception ports have
1069  *		to be reset.  Its RPC reply port cannot have any
1070  *		rights outstanding, so it should be fine. The thread
1071  *		inspect and read port are set to NULL.
1072  *	Conditions:
1073  *		Nothing locked.
1074  */
1075 
1076 void
ipc_thread_reset(thread_t thread)1077 ipc_thread_reset(
1078 	thread_t        thread)
1079 {
1080 	thread_ro_t tro = get_thread_ro(thread);
1081 	ipc_port_t old_kport, new_kport, old_pport, new_pport;
1082 	ipc_port_t old_sself;
1083 	ipc_port_t old_rdport;
1084 	ipc_port_t old_iport;
1085 	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
1086 	boolean_t  has_old_exc_actions = FALSE;
1087 	boolean_t thread_is_immovable;
1088 	int i;
1089 
1090 #if CONFIG_MACF
1091 	struct label *new_label = mac_exc_create_label(NULL);
1092 #endif
1093 
1094 	thread_is_immovable = ip_is_immovable_send(tro->tro_self_port);
1095 
1096 	new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
1097 	    IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
1098 	/*
1099 	 * ipc_thread_reset() only happens during sugid or corpsify.
1100 	 *
1101 	 * (1) sugid happens early in exec_mach_imgact(), at which point the old thread
1102 	 * port is still movable/not pinned.
1103 	 * (2) corpse cannot execute more code so the notion of the immovable/pinned
1104 	 * thread port is bogus, and should appear as if it doesn't have one.
1105 	 *
1106 	 * So simply leave pport the same as kport.
1107 	 */
1108 	new_pport = new_kport;
1109 
1110 	thread_mtx_lock(thread);
1111 
1112 	old_kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1113 	old_rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1114 	old_iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1115 
1116 	old_sself = tro->tro_settable_self_port;
1117 	old_pport = tro->tro_self_port;
1118 
1119 	if (old_kport == IP_NULL && thread->inspection == FALSE) {
1120 		/* thread is already terminated (can this happen?) */
1121 		thread_mtx_unlock(thread);
1122 		ipc_kobject_dealloc_port(new_kport, 0, IKOT_THREAD_CONTROL);
1123 		if (thread_is_immovable) {
1124 			ipc_kobject_dealloc_port(new_pport, 0,
1125 			    IKOT_THREAD_CONTROL);
1126 		}
1127 #if CONFIG_MACF
1128 		mac_exc_free_label(new_label);
1129 #endif
1130 		return;
1131 	}
1132 
1133 	thread->ipc_active = true;
1134 
1135 	struct thread_ro tpl = {
1136 		.tro_self_port = new_pport,
1137 		/* we just made the port, no need to triple check */
1138 		.tro_settable_self_port = ipc_port_make_send_any(new_kport),
1139 		.tro_ports[THREAD_FLAVOR_CONTROL] = new_kport,
1140 	};
1141 
1142 	ipc_thread_ro_update_ports(tro, &tpl);
1143 
1144 	if (old_kport != IP_NULL) {
1145 		/* clears ikol_alt_port */
1146 		(void)ipc_kobject_disable(old_kport, IKOT_THREAD_CONTROL);
1147 	}
1148 	if (old_rdport != IP_NULL) {
1149 		/* clears ikol_alt_port */
1150 		(void)ipc_kobject_disable(old_rdport, IKOT_THREAD_READ);
1151 	}
1152 	if (old_iport != IP_NULL) {
1153 		(void)ipc_kobject_disable(old_iport, IKOT_THREAD_INSPECT);
1154 	}
1155 	if (thread_is_immovable && old_pport != IP_NULL) {
1156 		(void)ipc_kobject_disable(old_pport, IKOT_THREAD_CONTROL);
1157 	}
1158 
1159 	/*
1160 	 * Only ports that were set by root-owned processes
1161 	 * (privileged ports) should survive
1162 	 */
1163 	if (tro->tro_exc_actions != NULL) {
1164 		has_old_exc_actions = TRUE;
1165 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1166 			if (tro->tro_exc_actions[i].privileged) {
1167 				old_exc_actions[i] = IP_NULL;
1168 			} else {
1169 #if CONFIG_MACF
1170 				mac_exc_update_action_label(tro->tro_exc_actions + i, new_label);
1171 #endif
1172 				old_exc_actions[i] = tro->tro_exc_actions[i].port;
1173 				tro->tro_exc_actions[i].port = IP_NULL;
1174 			}
1175 		}
1176 	}
1177 
1178 	thread_mtx_unlock(thread);
1179 
1180 #if CONFIG_MACF
1181 	mac_exc_free_label(new_label);
1182 #endif
1183 
1184 	/* release the naked send rights */
1185 
1186 	if (IP_VALID(old_sself)) {
1187 		ipc_port_release_send(old_sself);
1188 	}
1189 
1190 	if (has_old_exc_actions) {
1191 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1192 			ipc_port_release_send(old_exc_actions[i]);
1193 		}
1194 	}
1195 
1196 	/* destroy the kernel ports */
1197 	if (old_rdport != IP_NULL) {
1198 		ipc_kobject_dealloc_port(old_rdport, 0, IKOT_THREAD_READ);
1199 	}
1200 	if (old_kport != IP_NULL) {
1201 		ipc_kobject_dealloc_port(old_kport, 0, IKOT_THREAD_CONTROL);
1202 	}
1203 	/* ikol_alt_port cleared */
1204 
1205 	if (old_iport != IP_NULL) {
1206 		ipc_kobject_dealloc_port(old_iport, 0, IKOT_THREAD_INSPECT);
1207 	}
1208 	if (old_pport != old_kport && old_pport != IP_NULL) {
1209 		assert(thread_is_immovable);
1210 		ipc_kobject_dealloc_port(old_pport, 0, IKOT_THREAD_CONTROL);
1211 	}
1212 
1213 	/* unbind the thread special reply port */
1214 	if (IP_VALID(thread->ith_special_reply_port)) {
1215 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1216 	}
1217 }
1218 
1219 /*
1220  *	Routine:	retrieve_task_self_fast
1221  *	Purpose:
1222  *		Optimized version of retrieve_task_self,
1223  *		that only works for the current task.
1224  *
1225  *		Return a send right (possibly null/dead)
1226  *		for the task's user-visible self port.
1227  *	Conditions:
1228  *		Nothing locked.
1229  */
1230 
1231 static ipc_port_t
retrieve_task_self_fast(task_t task)1232 retrieve_task_self_fast(
1233 	task_t          task)
1234 {
1235 	ipc_port_t port = IP_NULL;
1236 
1237 	assert(task == current_task());
1238 
1239 	itk_lock(task);
1240 	assert(task->itk_self != IP_NULL);
1241 
1242 #if CONFIG_CSR
1243 	if (task->itk_settable_self != task->itk_task_ports[TASK_FLAVOR_CONTROL]) {
1244 		port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1245 	} else
1246 #endif
1247 	{
1248 		/* no interposing, return the IMMOVABLE port */
1249 		port = ipc_kobject_make_send(task->itk_self, task,
1250 		    IKOT_TASK_CONTROL);
1251 #if (DEBUG || DEVELOPMENT)
1252 		if (task_is_immovable(task)) {
1253 			assert(ip_is_immovable_send(port));
1254 			if (task_is_pinned(task)) {
1255 				/* pinned port is also immovable */
1256 				assert(ip_is_pinned(port));
1257 			}
1258 		} else {
1259 			assert(!ip_is_immovable_send(port));
1260 			assert(!ip_is_pinned(port));
1261 		}
1262 #endif
1263 	}
1264 
1265 	itk_unlock(task);
1266 
1267 	return port;
1268 }
1269 
1270 /*
1271  *	Routine:	mach_task_is_self
1272  *	Purpose:
1273  *      [MIG call] Checks if the task (control/read/inspect/name/movable)
1274  *      port is pointing to current_task.
1275  */
1276 kern_return_t
mach_task_is_self(task_t task,boolean_t * is_self)1277 mach_task_is_self(
1278 	task_t         task,
1279 	boolean_t     *is_self)
1280 {
1281 	if (task == TASK_NULL) {
1282 		return KERN_INVALID_ARGUMENT;
1283 	}
1284 
1285 	*is_self = (task == current_task());
1286 
1287 	return KERN_SUCCESS;
1288 }
1289 
1290 /*
1291  *	Routine:	retrieve_thread_self_fast
1292  *	Purpose:
1293  *		Return a send right (possibly null/dead)
1294  *		for the thread's user-visible self port.
1295  *
1296  *		Only works for the current thread.
1297  *
1298  *	Conditions:
1299  *		Nothing locked.
1300  */
1301 
1302 ipc_port_t
retrieve_thread_self_fast(thread_t thread)1303 retrieve_thread_self_fast(
1304 	thread_t                thread)
1305 {
1306 	thread_ro_t tro = get_thread_ro(thread);
1307 	ipc_port_t port = IP_NULL;
1308 
1309 	assert(thread == current_thread());
1310 
1311 	thread_mtx_lock(thread);
1312 
1313 	assert(tro->tro_self_port != IP_NULL);
1314 
1315 #if CONFIG_CSR
1316 	if (tro->tro_settable_self_port != tro->tro_ports[THREAD_FLAVOR_CONTROL]) {
1317 		port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1318 	} else
1319 #endif
1320 	{
1321 		/* no interposing, return IMMOVABLE_PORT */
1322 		port = ipc_kobject_make_send(tro->tro_self_port, thread,
1323 		    IKOT_THREAD_CONTROL);
1324 #if (DEBUG || DEVELOPMENT)
1325 		if (task_is_immovable(tro->tro_task)) {
1326 			assert(ip_is_immovable_send(port));
1327 			uint16_t tag = thread_get_tag(thread);
1328 			/* terminated threads are unpinned */
1329 			if (thread->active && (tag & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD))) {
1330 				assert(ip_is_pinned(port));
1331 			} else {
1332 				assert(!ip_is_pinned(port));
1333 			}
1334 		} else {
1335 			assert(!ip_is_immovable_send(port));
1336 			assert(!ip_is_pinned(port));
1337 		}
1338 #endif
1339 	}
1340 
1341 	thread_mtx_unlock(thread);
1342 
1343 	return port;
1344 }
1345 
1346 /*
1347  *	Routine:	task_self_trap [mach trap]
1348  *	Purpose:
1349  *		Give the caller send rights for his own task port.
1350  *	Conditions:
1351  *		Nothing locked.
1352  *	Returns:
1353  *		MACH_PORT_NULL if there are any resource failures
1354  *		or other errors.
1355  */
1356 
1357 mach_port_name_t
task_self_trap(__unused struct task_self_trap_args * args)1358 task_self_trap(
1359 	__unused struct task_self_trap_args *args)
1360 {
1361 	task_t task = current_task();
1362 	ipc_port_t sright;
1363 	mach_port_name_t name;
1364 
1365 	sright = retrieve_task_self_fast(task);
1366 	name = ipc_port_copyout_send(sright, task->itk_space);
1367 
1368 	/*
1369 	 * When the right is pinned, memorize the name we gave it
1370 	 * in ip_receiver_name (it's an abuse as this port really
1371 	 * isn't a message queue, but the field is up for grabs
1372 	 * and otherwise `MACH_PORT_SPECIAL_DEFAULT` for special ports).
1373 	 *
1374 	 * port_name_to_task* use this to fastpath IPCs to mach_task_self()
1375 	 * when it is pinned.
1376 	 *
1377 	 * ipc_task_disable() will revert this when the task dies.
1378 	 */
1379 	if (sright == task->itk_self && sright->ip_pinned &&
1380 	    MACH_PORT_VALID(name)) {
1381 		itk_lock(task);
1382 		if (task->ipc_active) {
1383 			if (ip_get_receiver_name(sright) == MACH_PORT_SPECIAL_DEFAULT) {
1384 				sright->ip_receiver_name = name;
1385 			} else if (ip_get_receiver_name(sright) != name) {
1386 				panic("mach_task_self() name changed");
1387 			}
1388 		}
1389 		itk_unlock(task);
1390 	}
1391 	return name;
1392 }
1393 
1394 /*
1395  *	Routine:	thread_self_trap [mach trap]
1396  *	Purpose:
1397  *		Give the caller send rights for his own thread port.
1398  *	Conditions:
1399  *		Nothing locked.
1400  *	Returns:
1401  *		MACH_PORT_NULL if there are any resource failures
1402  *		or other errors.
1403  */
1404 
1405 mach_port_name_t
thread_self_trap(__unused struct thread_self_trap_args * args)1406 thread_self_trap(
1407 	__unused struct thread_self_trap_args *args)
1408 {
1409 	thread_t thread = current_thread();
1410 	ipc_space_t space = current_space();
1411 	ipc_port_t sright;
1412 	mach_port_name_t name;
1413 
1414 	sright = retrieve_thread_self_fast(thread);
1415 	name = ipc_port_copyout_send(sright, space);
1416 	return name;
1417 }
1418 
1419 /*
1420  *	Routine:	mach_reply_port [mach trap]
1421  *	Purpose:
1422  *		Allocate a port for the caller.
1423  *	Conditions:
1424  *		Nothing locked.
1425  *	Returns:
1426  *		MACH_PORT_NULL if there are any resource failures
1427  *		or other errors.
1428  */
1429 
1430 mach_port_name_t
mach_reply_port(__unused struct mach_reply_port_args * args)1431 mach_reply_port(
1432 	__unused struct mach_reply_port_args *args)
1433 {
1434 	ipc_port_t port;
1435 	mach_port_name_t name;
1436 	kern_return_t kr;
1437 
1438 	kr = ipc_port_alloc(current_task()->itk_space, IPC_PORT_INIT_MESSAGE_QUEUE,
1439 	    &name, &port);
1440 	if (kr == KERN_SUCCESS) {
1441 		ip_mq_unlock(port);
1442 	} else {
1443 		name = MACH_PORT_NULL;
1444 	}
1445 	return name;
1446 }
1447 
1448 /*
1449  *	Routine:	thread_get_special_reply_port [mach trap]
1450  *	Purpose:
1451  *		Allocate a special reply port for the calling thread.
1452  *	Conditions:
1453  *		Nothing locked.
1454  *	Returns:
1455  *		mach_port_name_t: send right & receive right for special reply port.
1456  *		MACH_PORT_NULL if there are any resource failures
1457  *		or other errors.
1458  */
1459 
1460 mach_port_name_t
thread_get_special_reply_port(__unused struct thread_get_special_reply_port_args * args)1461 thread_get_special_reply_port(
1462 	__unused struct thread_get_special_reply_port_args *args)
1463 {
1464 	ipc_port_t port;
1465 	mach_port_name_t name;
1466 	kern_return_t kr;
1467 	thread_t thread = current_thread();
1468 	ipc_port_init_flags_t flags = IPC_PORT_INIT_MESSAGE_QUEUE |
1469 	    IPC_PORT_INIT_MAKE_SEND_RIGHT | IPC_PORT_INIT_SPECIAL_REPLY;
1470 
1471 	/* unbind the thread special reply port */
1472 	if (IP_VALID(thread->ith_special_reply_port)) {
1473 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1474 	}
1475 
1476 	kr = ipc_port_alloc(current_task()->itk_space, flags, &name, &port);
1477 	if (kr == KERN_SUCCESS) {
1478 		ipc_port_bind_special_reply_port_locked(port, IRPT_USER);
1479 		ip_mq_unlock(port);
1480 	} else {
1481 		name = MACH_PORT_NULL;
1482 	}
1483 	return name;
1484 }
1485 
1486 /*
1487  *	Routine:	thread_get_kernel_special_reply_port
1488  *	Purpose:
1489  *		Allocate a kernel special reply port for the calling thread.
1490  *	Conditions:
1491  *		Nothing locked.
1492  *	Returns:
1493  *		Creates and sets kernel special reply port.
1494  *		KERN_SUCCESS on Success.
1495  *		KERN_FAILURE on Failure.
1496  */
1497 
1498 kern_return_t
thread_get_kernel_special_reply_port(void)1499 thread_get_kernel_special_reply_port(void)
1500 {
1501 	ipc_port_t port = IP_NULL;
1502 	thread_t thread = current_thread();
1503 
1504 	/* unbind the thread special reply port */
1505 	if (IP_VALID(thread->ith_kernel_reply_port)) {
1506 		ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1507 	}
1508 
1509 	port = ipc_port_alloc_reply(); /*returns a reference on the port */
1510 	if (port != IPC_PORT_NULL) {
1511 		ip_mq_lock(port);
1512 		ipc_port_bind_special_reply_port_locked(port, IRPT_KERNEL);
1513 		ip_mq_unlock(port);
1514 		ip_release(port); /* release the reference returned by ipc_port_alloc_reply */
1515 	}
1516 	return KERN_SUCCESS;
1517 }
1518 
1519 /*
1520  *	Routine:	ipc_port_bind_special_reply_port_locked
1521  *	Purpose:
1522  *		Bind the given port to current thread as a special reply port.
1523  *	Conditions:
1524  *		Port locked.
1525  *	Returns:
1526  *		None.
1527  */
1528 
1529 static void
ipc_port_bind_special_reply_port_locked(ipc_port_t port,ipc_reply_port_type_t reply_type)1530 ipc_port_bind_special_reply_port_locked(
1531 	ipc_port_t            port,
1532 	ipc_reply_port_type_t reply_type)
1533 {
1534 	thread_t thread = current_thread();
1535 	ipc_port_t *reply_portp;
1536 
1537 	if (reply_type == IRPT_USER) {
1538 		reply_portp = &thread->ith_special_reply_port;
1539 	} else {
1540 		reply_portp = &thread->ith_kernel_reply_port;
1541 	}
1542 
1543 	assert(*reply_portp == NULL);
1544 	assert(port->ip_specialreply);
1545 	assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1546 
1547 	ip_reference(port);
1548 	*reply_portp = port;
1549 	port->ip_messages.imq_srp_owner_thread = thread;
1550 
1551 	ipc_special_reply_port_bits_reset(port);
1552 }
1553 
1554 /*
1555  *	Routine:	ipc_port_unbind_special_reply_port
1556  *	Purpose:
1557  *		Unbind the thread's special reply port.
1558  *		If the special port has threads waiting on turnstile,
1559  *		update it's inheritor.
1560  *	Condition:
1561  *		Nothing locked.
1562  *	Returns:
1563  *		None.
1564  */
1565 static void
ipc_port_unbind_special_reply_port(thread_t thread,ipc_reply_port_type_t reply_type)1566 ipc_port_unbind_special_reply_port(
1567 	thread_t              thread,
1568 	ipc_reply_port_type_t reply_type)
1569 {
1570 	ipc_port_t *reply_portp;
1571 
1572 	if (reply_type == IRPT_USER) {
1573 		reply_portp = &thread->ith_special_reply_port;
1574 	} else {
1575 		reply_portp = &thread->ith_kernel_reply_port;
1576 	}
1577 
1578 	ipc_port_t special_reply_port = *reply_portp;
1579 
1580 	ip_mq_lock(special_reply_port);
1581 
1582 	*reply_portp = NULL;
1583 	ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
1584 	    IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
1585 	/* port unlocked */
1586 
1587 	/* Destroy the port if its kernel special reply, else just release a ref */
1588 	if (reply_type == IRPT_USER) {
1589 		ip_release(special_reply_port);
1590 	} else {
1591 		ipc_port_dealloc_reply(special_reply_port);
1592 	}
1593 	return;
1594 }
1595 
1596 /*
1597  *	Routine:	thread_dealloc_kernel_special_reply_port
1598  *	Purpose:
1599  *		Unbind the thread's kernel special reply port.
1600  *		If the special port has threads waiting on turnstile,
1601  *		update it's inheritor.
1602  *	Condition:
1603  *		Called on current thread or a terminated thread.
1604  *	Returns:
1605  *		None.
1606  */
1607 
1608 void
thread_dealloc_kernel_special_reply_port(thread_t thread)1609 thread_dealloc_kernel_special_reply_port(thread_t thread)
1610 {
1611 	ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1612 }
1613 
1614 /*
1615  *	Routine:	thread_get_special_port [kernel call]
1616  *	Purpose:
1617  *		Clones a send right for one of the thread's
1618  *		special ports.
1619  *	Conditions:
1620  *		Nothing locked.
1621  *	Returns:
1622  *		KERN_SUCCESS		Extracted a send right.
1623  *		KERN_INVALID_ARGUMENT	The thread is null.
1624  *		KERN_FAILURE		The thread is dead.
1625  *		KERN_INVALID_ARGUMENT	Invalid special port.
1626  */
1627 
1628 kern_return_t
1629 thread_get_special_port(
1630 	thread_inspect_t         thread,
1631 	int                      which,
1632 	ipc_port_t              *portp);
1633 
1634 static kern_return_t
thread_get_special_port_internal(thread_inspect_t thread,thread_ro_t tro,int which,ipc_port_t * portp,mach_thread_flavor_t flavor)1635 thread_get_special_port_internal(
1636 	thread_inspect_t         thread,
1637 	thread_ro_t              tro,
1638 	int                      which,
1639 	ipc_port_t              *portp,
1640 	mach_thread_flavor_t     flavor)
1641 {
1642 	kern_return_t      kr;
1643 	ipc_port_t port;
1644 
1645 	if ((kr = special_port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) {
1646 		return kr;
1647 	}
1648 
1649 	thread_mtx_lock(thread);
1650 	if (!thread->active) {
1651 		thread_mtx_unlock(thread);
1652 		return KERN_FAILURE;
1653 	}
1654 
1655 	switch (which) {
1656 	case THREAD_KERNEL_PORT:
1657 		port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1658 #if CONFIG_CSR
1659 		if (tro->tro_settable_self_port != port) {
1660 			port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1661 		} else
1662 #endif
1663 		{
1664 			port = ipc_kobject_copy_send(port, thread, IKOT_THREAD_CONTROL);
1665 		}
1666 		thread_mtx_unlock(thread);
1667 		break;
1668 
1669 	case THREAD_READ_PORT:
1670 	case THREAD_INSPECT_PORT:
1671 		thread_mtx_unlock(thread);
1672 		mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ?
1673 		    THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT;
1674 		/* convert_thread_to_port_with_flavor consumes a thread reference */
1675 		thread_reference(thread);
1676 		port = convert_thread_to_port_with_flavor(thread, tro, current_flavor);
1677 		break;
1678 
1679 	default:
1680 		thread_mtx_unlock(thread);
1681 		return KERN_INVALID_ARGUMENT;
1682 	}
1683 
1684 	*portp = port;
1685 	return KERN_SUCCESS;
1686 }
1687 
1688 kern_return_t
thread_get_special_port(thread_inspect_t thread,int which,ipc_port_t * portp)1689 thread_get_special_port(
1690 	thread_inspect_t         thread,
1691 	int                      which,
1692 	ipc_port_t              *portp)
1693 {
1694 	if (thread == THREAD_NULL) {
1695 		return KERN_INVALID_ARGUMENT;
1696 	}
1697 
1698 	return thread_get_special_port_internal(thread, get_thread_ro(thread),
1699 	           which, portp, THREAD_FLAVOR_CONTROL);
1700 }
1701 
1702 static ipc_port_t
thread_get_non_substituted_self(thread_t thread,thread_ro_t tro)1703 thread_get_non_substituted_self(thread_t thread, thread_ro_t tro)
1704 {
1705 	ipc_port_t port = IP_NULL;
1706 
1707 	thread_mtx_lock(thread);
1708 	port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1709 #if CONFIG_CSR
1710 	if (tro->tro_settable_self_port != port) {
1711 		port = ipc_port_make_send_mqueue(tro->tro_settable_self_port);
1712 	} else
1713 #endif
1714 	{
1715 		port = ipc_kobject_make_send(port, thread, IKOT_THREAD_CONTROL);
1716 	}
1717 	thread_mtx_unlock(thread);
1718 
1719 	/* takes ownership of the send right */
1720 	return ipc_kobject_alloc_subst_once(port);
1721 }
1722 
1723 kern_return_t
thread_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)1724 thread_get_special_port_from_user(
1725 	mach_port_t     port,
1726 	int             which,
1727 	ipc_port_t      *portp)
1728 {
1729 	thread_ro_t tro;
1730 	ipc_kobject_type_t kotype;
1731 	mach_thread_flavor_t flavor;
1732 	kern_return_t kr = KERN_SUCCESS;
1733 
1734 	thread_t thread = convert_port_to_thread_inspect_no_eval(port);
1735 
1736 	if (thread == THREAD_NULL) {
1737 		return KERN_INVALID_ARGUMENT;
1738 	}
1739 
1740 	tro = get_thread_ro(thread);
1741 	kotype = ip_kotype(port);
1742 
1743 	if (which == THREAD_KERNEL_PORT && tro->tro_task == current_task()) {
1744 #if CONFIG_MACF
1745 		/*
1746 		 * only check for threads belong to current_task,
1747 		 * because foreign thread ports are always movable
1748 		 */
1749 		if (mac_task_check_get_movable_control_port()) {
1750 			kr = KERN_DENIED;
1751 			goto out;
1752 		}
1753 #endif
1754 		if (kotype == IKOT_THREAD_CONTROL) {
1755 			*portp = thread_get_non_substituted_self(thread, tro);
1756 			goto out;
1757 		}
1758 	}
1759 
1760 	switch (kotype) {
1761 	case IKOT_THREAD_CONTROL:
1762 		flavor = THREAD_FLAVOR_CONTROL;
1763 		break;
1764 	case IKOT_THREAD_READ:
1765 		flavor = THREAD_FLAVOR_READ;
1766 		break;
1767 	case IKOT_THREAD_INSPECT:
1768 		flavor = THREAD_FLAVOR_INSPECT;
1769 		break;
1770 	default:
1771 		panic("strange kobject type");
1772 	}
1773 
1774 	kr = thread_get_special_port_internal(thread, tro, which, portp, flavor);
1775 out:
1776 	thread_deallocate(thread);
1777 	return kr;
1778 }
1779 
1780 static kern_return_t
special_port_allowed_with_thread_flavor(int which,mach_thread_flavor_t flavor)1781 special_port_allowed_with_thread_flavor(
1782 	int                  which,
1783 	mach_thread_flavor_t flavor)
1784 {
1785 	switch (flavor) {
1786 	case THREAD_FLAVOR_CONTROL:
1787 		return KERN_SUCCESS;
1788 
1789 	case THREAD_FLAVOR_READ:
1790 
1791 		switch (which) {
1792 		case THREAD_READ_PORT:
1793 		case THREAD_INSPECT_PORT:
1794 			return KERN_SUCCESS;
1795 		default:
1796 			return KERN_INVALID_CAPABILITY;
1797 		}
1798 
1799 	case THREAD_FLAVOR_INSPECT:
1800 
1801 		switch (which) {
1802 		case THREAD_INSPECT_PORT:
1803 			return KERN_SUCCESS;
1804 		default:
1805 			return KERN_INVALID_CAPABILITY;
1806 		}
1807 
1808 	default:
1809 		return KERN_INVALID_CAPABILITY;
1810 	}
1811 }
1812 
1813 /*
1814  *	Routine:	thread_set_special_port [kernel call]
1815  *	Purpose:
1816  *		Changes one of the thread's special ports,
1817  *		setting it to the supplied send right.
1818  *	Conditions:
1819  *		Nothing locked.  If successful, consumes
1820  *		the supplied send right.
1821  *	Returns:
1822  *		KERN_SUCCESS            Changed the special port.
1823  *		KERN_INVALID_ARGUMENT   The thread is null.
1824  *      KERN_INVALID_RIGHT      Port is marked as immovable.
1825  *		KERN_FAILURE            The thread is dead.
1826  *		KERN_INVALID_ARGUMENT   Invalid special port.
1827  *		KERN_NO_ACCESS          Restricted access to set port.
1828  */
1829 
1830 kern_return_t
thread_set_special_port(thread_t thread,int which,ipc_port_t port)1831 thread_set_special_port(
1832 	thread_t                thread,
1833 	int                     which,
1834 	ipc_port_t              port)
1835 {
1836 	kern_return_t   result = KERN_SUCCESS;
1837 	thread_ro_t     tro = NULL;
1838 	ipc_port_t      old = IP_NULL;
1839 
1840 	if (thread == THREAD_NULL) {
1841 		return KERN_INVALID_ARGUMENT;
1842 	}
1843 
1844 	if (IP_VALID(port) && port->ip_immovable_send) {
1845 		return KERN_INVALID_RIGHT;
1846 	}
1847 
1848 	switch (which) {
1849 	case THREAD_KERNEL_PORT:
1850 #if CONFIG_CSR
1851 		if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
1852 			/*
1853 			 * Only allow setting of thread-self
1854 			 * special port from user-space when SIP is
1855 			 * disabled (for Mach-on-Mach emulation).
1856 			 */
1857 			tro = get_thread_ro(thread);
1858 
1859 			thread_mtx_lock(thread);
1860 			if (thread->active) {
1861 				old = tro->tro_settable_self_port;
1862 				zalloc_ro_update_field(ZONE_ID_THREAD_RO,
1863 				    tro, tro_settable_self_port, &port);
1864 			} else {
1865 				result = KERN_FAILURE;
1866 			}
1867 			thread_mtx_unlock(thread);
1868 
1869 			if (IP_VALID(old)) {
1870 				ipc_port_release_send(old);
1871 			}
1872 
1873 			return result;
1874 		}
1875 #else
1876 		(void)old;
1877 		(void)result;
1878 		(void)tro;
1879 #endif
1880 		return KERN_NO_ACCESS;
1881 
1882 	default:
1883 		return KERN_INVALID_ARGUMENT;
1884 	}
1885 }
1886 
1887 /*
1888  *	Routine:	task_get_special_port [kernel call]
1889  *	Purpose:
1890  *		Clones a send right for one of the task's
1891  *		special ports.
1892  *	Conditions:
1893  *		Nothing locked.
1894  *	Returns:
1895  *		KERN_SUCCESS		    Extracted a send right.
1896  *		KERN_INVALID_ARGUMENT	The task is null.
1897  *		KERN_FAILURE		    The task/space is dead.
1898  *		KERN_INVALID_ARGUMENT	Invalid special port.
1899  */
1900 
1901 static kern_return_t
task_get_special_port_internal(task_t task,int which,ipc_port_t * portp,mach_task_flavor_t flavor)1902 task_get_special_port_internal(
1903 	task_t          task,
1904 	int             which,
1905 	ipc_port_t      *portp,
1906 	mach_task_flavor_t        flavor)
1907 {
1908 	kern_return_t kr;
1909 	ipc_port_t port;
1910 
1911 	if (task == TASK_NULL) {
1912 		return KERN_INVALID_ARGUMENT;
1913 	}
1914 
1915 	if ((kr = special_port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) {
1916 		return kr;
1917 	}
1918 
1919 	itk_lock(task);
1920 	if (!task->ipc_active) {
1921 		itk_unlock(task);
1922 		return KERN_FAILURE;
1923 	}
1924 
1925 	switch (which) {
1926 	case TASK_KERNEL_PORT:
1927 		port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
1928 #if CONFIG_CSR
1929 		if (task->itk_settable_self != port) {
1930 			port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1931 		} else
1932 #endif
1933 		{
1934 			port = ipc_kobject_copy_send(port, task, IKOT_TASK_CONTROL);
1935 		}
1936 		itk_unlock(task);
1937 		break;
1938 
1939 	case TASK_READ_PORT:
1940 	case TASK_INSPECT_PORT:
1941 		itk_unlock(task);
1942 		mach_task_flavor_t current_flavor = (which == TASK_READ_PORT) ?
1943 		    TASK_FLAVOR_READ : TASK_FLAVOR_INSPECT;
1944 		/* convert_task_to_port_with_flavor consumes a task reference */
1945 		task_reference(task);
1946 		port = convert_task_to_port_with_flavor(task, current_flavor, TASK_GRP_KERNEL);
1947 		break;
1948 
1949 	case TASK_NAME_PORT:
1950 		port = ipc_kobject_make_send(task->itk_task_ports[TASK_FLAVOR_NAME],
1951 		    task, IKOT_TASK_NAME);
1952 		itk_unlock(task);
1953 		break;
1954 
1955 	case TASK_HOST_PORT:
1956 		port = host_port_copy_send(task->itk_host);
1957 		itk_unlock(task);
1958 		break;
1959 
1960 	case TASK_BOOTSTRAP_PORT:
1961 		port = ipc_port_copy_send_mqueue(task->itk_bootstrap);
1962 		itk_unlock(task);
1963 		break;
1964 
1965 	case TASK_ACCESS_PORT:
1966 		port = ipc_port_copy_send_mqueue(task->itk_task_access);
1967 		itk_unlock(task);
1968 		break;
1969 
1970 	case TASK_DEBUG_CONTROL_PORT:
1971 		port = ipc_port_copy_send_mqueue(task->itk_debug_control);
1972 		itk_unlock(task);
1973 		break;
1974 
1975 #if CONFIG_PROC_RESOURCE_LIMITS
1976 	case TASK_RESOURCE_NOTIFY_PORT:
1977 		port = ipc_port_copy_send_mqueue(task->itk_resource_notify);
1978 		itk_unlock(task);
1979 		break;
1980 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1981 
1982 	default:
1983 		itk_unlock(task);
1984 		return KERN_INVALID_ARGUMENT;
1985 	}
1986 
1987 	*portp = port;
1988 	return KERN_SUCCESS;
1989 }
1990 
1991 /* Kernel/Kext call only and skips MACF checks. MIG uses task_get_special_port_from_user(). */
1992 kern_return_t
task_get_special_port(task_t task,int which,ipc_port_t * portp)1993 task_get_special_port(
1994 	task_t          task,
1995 	int             which,
1996 	ipc_port_t      *portp)
1997 {
1998 	return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL);
1999 }
2000 
2001 static ipc_port_t
task_get_non_substituted_self(task_t task)2002 task_get_non_substituted_self(task_t task)
2003 {
2004 	ipc_port_t port = IP_NULL;
2005 
2006 	itk_lock(task);
2007 	port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
2008 #if CONFIG_CSR
2009 	if (task->itk_settable_self != port) {
2010 		port = ipc_port_make_send_mqueue(task->itk_settable_self);
2011 	} else
2012 #endif
2013 	{
2014 		port = ipc_kobject_make_send(port, task, IKOT_TASK_CONTROL);
2015 	}
2016 	itk_unlock(task);
2017 
2018 	/* takes ownership of the send right */
2019 	return ipc_kobject_alloc_subst_once(port);
2020 }
2021 
2022 /* MIG call only. Kernel/Kext uses task_get_special_port() */
2023 kern_return_t
task_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)2024 task_get_special_port_from_user(
2025 	mach_port_t     port,
2026 	int             which,
2027 	ipc_port_t      *portp)
2028 {
2029 	ipc_kobject_type_t kotype;
2030 	mach_task_flavor_t flavor;
2031 	kern_return_t kr = KERN_SUCCESS;
2032 
2033 	task_t task = convert_port_to_task_inspect_no_eval(port);
2034 
2035 	if (task == TASK_NULL) {
2036 		return KERN_INVALID_ARGUMENT;
2037 	}
2038 
2039 	kotype = ip_kotype(port);
2040 
2041 #if CONFIG_MACF
2042 	if (mac_task_check_get_task_special_port(current_task(), task, which)) {
2043 		kr = KERN_DENIED;
2044 		goto out;
2045 	}
2046 #endif
2047 
2048 	if (which == TASK_KERNEL_PORT && task == current_task()) {
2049 #if CONFIG_MACF
2050 		/*
2051 		 * only check for current_task,
2052 		 * because foreign task ports are always movable
2053 		 */
2054 		if (mac_task_check_get_movable_control_port()) {
2055 			kr = KERN_DENIED;
2056 			goto out;
2057 		}
2058 #endif
2059 		if (kotype == IKOT_TASK_CONTROL) {
2060 			*portp = task_get_non_substituted_self(task);
2061 			goto out;
2062 		}
2063 	}
2064 
2065 	switch (kotype) {
2066 	case IKOT_TASK_CONTROL:
2067 		flavor = TASK_FLAVOR_CONTROL;
2068 		break;
2069 	case IKOT_TASK_READ:
2070 		flavor = TASK_FLAVOR_READ;
2071 		break;
2072 	case IKOT_TASK_INSPECT:
2073 		flavor = TASK_FLAVOR_INSPECT;
2074 		break;
2075 	default:
2076 		panic("strange kobject type");
2077 	}
2078 
2079 	kr = task_get_special_port_internal(task, which, portp, flavor);
2080 out:
2081 	task_deallocate(task);
2082 	return kr;
2083 }
2084 
2085 static kern_return_t
special_port_allowed_with_task_flavor(int which,mach_task_flavor_t flavor)2086 special_port_allowed_with_task_flavor(
2087 	int                which,
2088 	mach_task_flavor_t flavor)
2089 {
2090 	switch (flavor) {
2091 	case TASK_FLAVOR_CONTROL:
2092 		return KERN_SUCCESS;
2093 
2094 	case TASK_FLAVOR_READ:
2095 
2096 		switch (which) {
2097 		case TASK_READ_PORT:
2098 		case TASK_INSPECT_PORT:
2099 		case TASK_NAME_PORT:
2100 			return KERN_SUCCESS;
2101 		default:
2102 			return KERN_INVALID_CAPABILITY;
2103 		}
2104 
2105 	case TASK_FLAVOR_INSPECT:
2106 
2107 		switch (which) {
2108 		case TASK_INSPECT_PORT:
2109 		case TASK_NAME_PORT:
2110 			return KERN_SUCCESS;
2111 		default:
2112 			return KERN_INVALID_CAPABILITY;
2113 		}
2114 
2115 	default:
2116 		return KERN_INVALID_CAPABILITY;
2117 	}
2118 }
2119 
2120 /*
2121  *	Routine:	task_set_special_port [MIG call]
2122  *	Purpose:
2123  *		Changes one of the task's special ports,
2124  *		setting it to the supplied send right.
2125  *	Conditions:
2126  *		Nothing locked.  If successful, consumes
2127  *		the supplied send right.
2128  *	Returns:
2129  *		KERN_SUCCESS		    Changed the special port.
2130  *		KERN_INVALID_ARGUMENT	The task is null.
2131  *      KERN_INVALID_RIGHT      Port is marked as immovable.
2132  *		KERN_FAILURE		    The task/space is dead.
2133  *		KERN_INVALID_ARGUMENT	Invalid special port.
2134  *      KERN_NO_ACCESS		    Restricted access to set port.
2135  */
2136 
2137 kern_return_t
task_set_special_port_from_user(task_t task,int which,ipc_port_t port)2138 task_set_special_port_from_user(
2139 	task_t          task,
2140 	int             which,
2141 	ipc_port_t      port)
2142 {
2143 	if (task == TASK_NULL) {
2144 		return KERN_INVALID_ARGUMENT;
2145 	}
2146 
2147 #if CONFIG_MACF
2148 	if (mac_task_check_set_task_special_port(current_task(), task, which, port)) {
2149 		return KERN_DENIED;
2150 	}
2151 #endif
2152 
2153 	return task_set_special_port(task, which, port);
2154 }
2155 
2156 /* Kernel call only. MIG uses task_set_special_port_from_user() */
2157 kern_return_t
task_set_special_port(task_t task,int which,ipc_port_t port)2158 task_set_special_port(
2159 	task_t          task,
2160 	int             which,
2161 	ipc_port_t      port)
2162 {
2163 	if (task == TASK_NULL) {
2164 		return KERN_INVALID_ARGUMENT;
2165 	}
2166 
2167 	if (task_is_driver(current_task())) {
2168 		return KERN_NO_ACCESS;
2169 	}
2170 
2171 	if (IP_VALID(port) && port->ip_immovable_send) {
2172 		return KERN_INVALID_RIGHT;
2173 	}
2174 
2175 	switch (which) {
2176 	case TASK_KERNEL_PORT:
2177 	case TASK_HOST_PORT:
2178 #if CONFIG_CSR
2179 		if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
2180 			/*
2181 			 * Only allow setting of task-self / task-host
2182 			 * special ports from user-space when SIP is
2183 			 * disabled (for Mach-on-Mach emulation).
2184 			 */
2185 			break;
2186 		}
2187 #endif
2188 		return KERN_NO_ACCESS;
2189 	default:
2190 		break;
2191 	}
2192 
2193 	return task_set_special_port_internal(task, which, port);
2194 }
2195 
2196 /*
2197  *	Routine:	task_set_special_port_internal
2198  *	Purpose:
2199  *		Changes one of the task's special ports,
2200  *		setting it to the supplied send right.
2201  *	Conditions:
2202  *		Nothing locked.  If successful, consumes
2203  *		the supplied send right.
2204  *	Returns:
2205  *		KERN_SUCCESS		Changed the special port.
2206  *		KERN_INVALID_ARGUMENT	The task is null.
2207  *		KERN_FAILURE		The task/space is dead.
2208  *		KERN_INVALID_ARGUMENT	Invalid special port.
2209  *      KERN_NO_ACCESS		Restricted access to overwrite port.
2210  */
2211 
2212 kern_return_t
task_set_special_port_internal(task_t task,int which,ipc_port_t port)2213 task_set_special_port_internal(
2214 	task_t          task,
2215 	int             which,
2216 	ipc_port_t      port)
2217 {
2218 	ipc_port_t old = IP_NULL;
2219 	kern_return_t rc = KERN_INVALID_ARGUMENT;
2220 
2221 	if (task == TASK_NULL) {
2222 		goto out;
2223 	}
2224 
2225 	itk_lock(task);
2226 	/*
2227 	 * Allow setting special port during the span of ipc_task_init() to
2228 	 * ipc_task_terminate(). posix_spawn() port actions can set special
2229 	 * ports on target task _before_ task IPC access is enabled.
2230 	 */
2231 	if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
2232 		rc = KERN_FAILURE;
2233 		goto out_unlock;
2234 	}
2235 
2236 	switch (which) {
2237 	case TASK_KERNEL_PORT:
2238 		old = task->itk_settable_self;
2239 		task->itk_settable_self = port;
2240 		break;
2241 
2242 	case TASK_HOST_PORT:
2243 		old = task->itk_host;
2244 		task->itk_host = port;
2245 		break;
2246 
2247 	case TASK_BOOTSTRAP_PORT:
2248 		old = task->itk_bootstrap;
2249 		task->itk_bootstrap = port;
2250 		break;
2251 
2252 	/* Never allow overwrite of the task access port */
2253 	case TASK_ACCESS_PORT:
2254 		if (IP_VALID(task->itk_task_access)) {
2255 			rc = KERN_NO_ACCESS;
2256 			goto out_unlock;
2257 		}
2258 		task->itk_task_access = port;
2259 		break;
2260 
2261 	case TASK_DEBUG_CONTROL_PORT:
2262 		old = task->itk_debug_control;
2263 		task->itk_debug_control = port;
2264 		break;
2265 
2266 #if CONFIG_PROC_RESOURCE_LIMITS
2267 	case TASK_RESOURCE_NOTIFY_PORT:
2268 		old = task->itk_resource_notify;
2269 		task->itk_resource_notify = port;
2270 		break;
2271 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
2272 
2273 	default:
2274 		rc = KERN_INVALID_ARGUMENT;
2275 		goto out_unlock;
2276 	}/* switch */
2277 
2278 	rc = KERN_SUCCESS;
2279 
2280 out_unlock:
2281 	itk_unlock(task);
2282 
2283 	if (IP_VALID(old)) {
2284 		ipc_port_release_send(old);
2285 	}
2286 out:
2287 	return rc;
2288 }
2289 /*
2290  *	Routine:	mach_ports_register [kernel call]
2291  *	Purpose:
2292  *		Stash a handful of port send rights in the task.
2293  *		Child tasks will inherit these rights, but they
2294  *		must use mach_ports_lookup to acquire them.
2295  *
2296  *		The rights are supplied in a (wired) kalloc'd segment.
2297  *		Rights which aren't supplied are assumed to be null.
2298  *	Conditions:
2299  *		Nothing locked.  If successful, consumes
2300  *		the supplied rights and memory.
2301  *	Returns:
2302  *		KERN_SUCCESS		    Stashed the port rights.
2303  *      KERN_INVALID_RIGHT      Port in array is marked immovable.
2304  *		KERN_INVALID_ARGUMENT	The task is null.
2305  *		KERN_INVALID_ARGUMENT	The task is dead.
2306  *		KERN_INVALID_ARGUMENT	The memory param is null.
2307  *		KERN_INVALID_ARGUMENT	Too many port rights supplied.
2308  */
2309 
2310 kern_return_t
mach_ports_register(task_t task,mach_port_array_t memory,mach_msg_type_number_t portsCnt)2311 mach_ports_register(
2312 	task_t                  task,
2313 	mach_port_array_t       memory,
2314 	mach_msg_type_number_t  portsCnt)
2315 {
2316 	ipc_port_t ports[TASK_PORT_REGISTER_MAX];
2317 	unsigned int i;
2318 
2319 	if ((task == TASK_NULL) ||
2320 	    (portsCnt > TASK_PORT_REGISTER_MAX) ||
2321 	    (portsCnt && memory == NULL)) {
2322 		return KERN_INVALID_ARGUMENT;
2323 	}
2324 
2325 	/*
2326 	 *	Pad the port rights with nulls.
2327 	 */
2328 
2329 	for (i = 0; i < portsCnt; i++) {
2330 		ports[i] = memory[i];
2331 		if (IP_VALID(ports[i]) && ports[i]->ip_immovable_send) {
2332 			return KERN_INVALID_RIGHT;
2333 		}
2334 	}
2335 	for (; i < TASK_PORT_REGISTER_MAX; i++) {
2336 		ports[i] = IP_NULL;
2337 	}
2338 
2339 	itk_lock(task);
2340 	if (!task->ipc_active) {
2341 		itk_unlock(task);
2342 		return KERN_INVALID_ARGUMENT;
2343 	}
2344 
2345 	/*
2346 	 *	Replace the old send rights with the new.
2347 	 *	Release the old rights after unlocking.
2348 	 */
2349 
2350 	for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2351 		ipc_port_t old;
2352 
2353 		old = task->itk_registered[i];
2354 		task->itk_registered[i] = ports[i];
2355 		ports[i] = old;
2356 	}
2357 
2358 	itk_unlock(task);
2359 
2360 	for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2361 		if (IP_VALID(ports[i])) {
2362 			ipc_port_release_send(ports[i]);
2363 		}
2364 	}
2365 
2366 	/*
2367 	 *	Now that the operation is known to be successful,
2368 	 *	we can free the memory.
2369 	 */
2370 
2371 	if (portsCnt != 0) {
2372 		kfree_type(mach_port_t, portsCnt, memory);
2373 	}
2374 
2375 	return KERN_SUCCESS;
2376 }
2377 
2378 /*
2379  *	Routine:	mach_ports_lookup [kernel call]
2380  *	Purpose:
2381  *		Retrieves (clones) the stashed port send rights.
2382  *	Conditions:
2383  *		Nothing locked.  If successful, the caller gets
2384  *		rights and memory.
2385  *	Returns:
2386  *		KERN_SUCCESS		Retrieved the send rights.
2387  *		KERN_INVALID_ARGUMENT	The task is null.
2388  *		KERN_INVALID_ARGUMENT	The task is dead.
2389  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
2390  */
2391 
2392 kern_return_t
mach_ports_lookup(task_t task,mach_port_array_t * portsp,mach_msg_type_number_t * portsCnt)2393 mach_ports_lookup(
2394 	task_t                  task,
2395 	mach_port_array_t       *portsp,
2396 	mach_msg_type_number_t  *portsCnt)
2397 {
2398 	ipc_port_t *ports;
2399 
2400 	if (task == TASK_NULL) {
2401 		return KERN_INVALID_ARGUMENT;
2402 	}
2403 
2404 	ports = kalloc_type(ipc_port_t, TASK_PORT_REGISTER_MAX,
2405 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
2406 
2407 	itk_lock(task);
2408 	if (!task->ipc_active) {
2409 		itk_unlock(task);
2410 		kfree_type(ipc_port_t, TASK_PORT_REGISTER_MAX, ports);
2411 
2412 		return KERN_INVALID_ARGUMENT;
2413 	}
2414 
2415 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2416 		ports[i] = ipc_port_copy_send_any(task->itk_registered[i]);
2417 	}
2418 
2419 	itk_unlock(task);
2420 
2421 	*portsp = ports;
2422 	*portsCnt = TASK_PORT_REGISTER_MAX;
2423 	return KERN_SUCCESS;
2424 }
2425 
2426 static kern_return_t
task_conversion_eval_internal(task_t caller,task_t victim,boolean_t out_trans,int flavor)2427 task_conversion_eval_internal(
2428 	task_t             caller,
2429 	task_t             victim,
2430 	boolean_t          out_trans,
2431 	int                flavor) /* control or read */
2432 {
2433 	boolean_t allow_kern_task_out_trans;
2434 	boolean_t allow_kern_task;
2435 
2436 	assert(flavor == TASK_FLAVOR_CONTROL || flavor == TASK_FLAVOR_READ);
2437 	assert(flavor == THREAD_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_READ);
2438 
2439 #if defined(SECURE_KERNEL)
2440 	/*
2441 	 * On secure kernel platforms, reject converting kernel task/threads to port
2442 	 * and sending it to user space.
2443 	 */
2444 	allow_kern_task_out_trans = FALSE;
2445 #else
2446 	allow_kern_task_out_trans = TRUE;
2447 #endif
2448 
2449 	allow_kern_task = out_trans && allow_kern_task_out_trans;
2450 
2451 	if (victim == TASK_NULL) {
2452 		return KERN_INVALID_SECURITY;
2453 	}
2454 
2455 	task_require(victim);
2456 
2457 	/*
2458 	 * If Developer Mode is not enabled, deny attempts to translate foreign task's
2459 	 * control port completely. Read port or corpse is okay.
2460 	 */
2461 	if (!developer_mode_state()) {
2462 		if ((caller != victim) &&
2463 		    (flavor == TASK_FLAVOR_CONTROL) && !task_is_a_corpse(victim)) {
2464 #if XNU_TARGET_OS_OSX
2465 			return KERN_INVALID_SECURITY;
2466 #else
2467 			/*
2468 			 * All control ports are immovable.
2469 			 * Return an error for outtrans, but panic on intrans.
2470 			 */
2471 			if (out_trans) {
2472 				return KERN_INVALID_SECURITY;
2473 			} else {
2474 				panic("Just like pineapple on pizza, this task/thread port doesn't belong here.");
2475 			}
2476 #endif /* XNU_TARGET_OS_OSX */
2477 		}
2478 	}
2479 
2480 	/*
2481 	 * Tasks are allowed to resolve their own task ports, and the kernel is
2482 	 * allowed to resolve anyone's task port (subject to Developer Mode check).
2483 	 */
2484 	if (caller == kernel_task) {
2485 		return KERN_SUCCESS;
2486 	}
2487 
2488 	if (caller == victim) {
2489 		return KERN_SUCCESS;
2490 	}
2491 
2492 	/*
2493 	 * Only the kernel can resolve the kernel's task port. We've established
2494 	 * by this point that the caller is not kernel_task.
2495 	 */
2496 	if (victim == kernel_task && !allow_kern_task) {
2497 		return KERN_INVALID_SECURITY;
2498 	}
2499 
2500 #if !defined(XNU_TARGET_OS_OSX)
2501 	/*
2502 	 * On platforms other than macOS, only a platform binary can resolve the task port
2503 	 * of another platform binary.
2504 	 */
2505 	if (task_get_platform_binary(victim) && !task_get_platform_binary(caller)) {
2506 #if SECURE_KERNEL
2507 		return KERN_INVALID_SECURITY;
2508 #else
2509 		if (cs_relax_platform_task_ports) {
2510 			return KERN_SUCCESS;
2511 		} else {
2512 			return KERN_INVALID_SECURITY;
2513 		}
2514 #endif /* SECURE_KERNEL */
2515 	}
2516 #endif /* !defined(XNU_TARGET_OS_OSX) */
2517 
2518 	return KERN_SUCCESS;
2519 }
2520 
2521 kern_return_t
task_conversion_eval(task_t caller,task_t victim,int flavor)2522 task_conversion_eval(task_t caller, task_t victim, int flavor)
2523 {
2524 	/* flavor is mach_task_flavor_t or mach_thread_flavor_t */
2525 	static_assert(TASK_FLAVOR_CONTROL == THREAD_FLAVOR_CONTROL);
2526 	static_assert(TASK_FLAVOR_READ == THREAD_FLAVOR_READ);
2527 	return task_conversion_eval_internal(caller, victim, FALSE, flavor);
2528 }
2529 
2530 static kern_return_t
task_conversion_eval_out_trans(task_t caller,task_t victim,int flavor)2531 task_conversion_eval_out_trans(task_t caller, task_t victim, int flavor)
2532 {
2533 	assert(flavor == TASK_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_CONTROL);
2534 	return task_conversion_eval_internal(caller, victim, TRUE, flavor);
2535 }
2536 
2537 /*
2538  *	Routine:	task_port_kotype_valid_for_flavor
2539  *	Purpose:
2540  *		Check whether the kobject type of a mach port
2541  *      is valid for conversion to a task of given flavor.
2542  */
2543 static boolean_t
task_port_kotype_valid_for_flavor(natural_t kotype,mach_task_flavor_t flavor)2544 task_port_kotype_valid_for_flavor(
2545 	natural_t          kotype,
2546 	mach_task_flavor_t flavor)
2547 {
2548 	switch (flavor) {
2549 	/* Ascending capability */
2550 	case TASK_FLAVOR_NAME:
2551 		if (kotype == IKOT_TASK_NAME) {
2552 			return TRUE;
2553 		}
2554 		OS_FALLTHROUGH;
2555 	case TASK_FLAVOR_INSPECT:
2556 		if (kotype == IKOT_TASK_INSPECT) {
2557 			return TRUE;
2558 		}
2559 		OS_FALLTHROUGH;
2560 	case TASK_FLAVOR_READ:
2561 		if (kotype == IKOT_TASK_READ) {
2562 			return TRUE;
2563 		}
2564 		OS_FALLTHROUGH;
2565 	case TASK_FLAVOR_CONTROL:
2566 		if (kotype == IKOT_TASK_CONTROL) {
2567 			return TRUE;
2568 		}
2569 		break;
2570 	default:
2571 		panic("strange task flavor");
2572 	}
2573 
2574 	return FALSE;
2575 }
2576 
2577 /*
2578  *	Routine: convert_port_to_task_with_flavor_locked_noref
2579  *	Purpose:
2580  *		Internal helper routine to convert from a locked port to a task.
2581  *	Args:
2582  *		port   - target port
2583  *		flavor - requested task port flavor
2584  *		options - port translation options
2585  *	Conditions:
2586  *		Port is locked and active.
2587  */
2588 static task_t
convert_port_to_task_with_flavor_locked_noref(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2589 convert_port_to_task_with_flavor_locked_noref(
2590 	ipc_port_t              port,
2591 	mach_task_flavor_t      flavor,
2592 	port_intrans_options_t  options)
2593 {
2594 	ipc_kobject_type_t type = ip_kotype(port);
2595 	task_t task;
2596 
2597 	ip_mq_lock_held(port);
2598 	require_ip_active(port);
2599 
2600 	if (!task_port_kotype_valid_for_flavor(type, flavor)) {
2601 		return TASK_NULL;
2602 	}
2603 
2604 	task = ipc_kobject_get_locked(port, type);
2605 	if (task == TASK_NULL) {
2606 		return TASK_NULL;
2607 	}
2608 
2609 	if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
2610 		assert(flavor == TASK_FLAVOR_CONTROL);
2611 		return TASK_NULL;
2612 	}
2613 
2614 	/* TODO: rdar://42389187 */
2615 	if (flavor == TASK_FLAVOR_NAME || flavor == TASK_FLAVOR_INSPECT) {
2616 		assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
2617 	}
2618 
2619 	if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
2620 	    task_conversion_eval(current_task(), task, flavor)) {
2621 		return TASK_NULL;
2622 	}
2623 
2624 	return task;
2625 }
2626 
2627 /*
2628  *	Routine: convert_port_to_task_with_flavor_locked
2629  *	Purpose:
2630  *		Internal helper routine to convert from a locked port to a task.
2631  *	Args:
2632  *		port   - target port
2633  *		flavor - requested task port flavor
2634  *		options - port translation options
2635  *		grp    - task reference group
2636  *	Conditions:
2637  *		Port is locked and active.
2638  *		Produces task ref or TASK_NULL.
2639  */
2640 static task_t
convert_port_to_task_with_flavor_locked(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2641 convert_port_to_task_with_flavor_locked(
2642 	ipc_port_t              port,
2643 	mach_task_flavor_t      flavor,
2644 	port_intrans_options_t  options,
2645 	task_grp_t              grp)
2646 {
2647 	task_t task;
2648 
2649 	task = convert_port_to_task_with_flavor_locked_noref(port, flavor,
2650 	    options);
2651 
2652 	if (task != TASK_NULL) {
2653 		task_reference_grp(task, grp);
2654 	}
2655 
2656 	return task;
2657 }
2658 
2659 /*
2660  *	Routine:	convert_port_to_task_with_flavor
2661  *	Purpose:
2662  *		Internal helper for converting from a port to a task.
2663  *		Doesn't consume the port ref; produces a task ref,
2664  *		which may be null.
2665  *	Args:
2666  *		port   - target port
2667  *		flavor - requested task port flavor
2668  *		options - port translation options
2669  *		grp    - task reference group
2670  *	Conditions:
2671  *		Nothing locked.
2672  */
2673 static task_t
convert_port_to_task_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2674 convert_port_to_task_with_flavor(
2675 	ipc_port_t         port,
2676 	mach_task_flavor_t flavor,
2677 	port_intrans_options_t options,
2678 	task_grp_t         grp)
2679 {
2680 	task_t task = TASK_NULL;
2681 	task_t self = current_task();
2682 
2683 	if (IP_VALID(port)) {
2684 		if (port == self->itk_self) {
2685 			task_reference_grp(self, grp);
2686 			return self;
2687 		}
2688 
2689 		ip_mq_lock(port);
2690 		if (ip_active(port)) {
2691 			task = convert_port_to_task_with_flavor_locked(port,
2692 			    flavor, options, grp);
2693 		}
2694 		ip_mq_unlock(port);
2695 	}
2696 
2697 	return task;
2698 }
2699 
2700 task_t
convert_port_to_task(ipc_port_t port)2701 convert_port_to_task(
2702 	ipc_port_t              port)
2703 {
2704 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2705 	           PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL);
2706 }
2707 
2708 task_t
convert_port_to_task_mig(ipc_port_t port)2709 convert_port_to_task_mig(
2710 	ipc_port_t              port)
2711 {
2712 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2713 	           PORT_INTRANS_OPTIONS_NONE, TASK_GRP_MIG);
2714 }
2715 
2716 task_read_t
convert_port_to_task_read(ipc_port_t port)2717 convert_port_to_task_read(
2718 	ipc_port_t              port)
2719 {
2720 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2721 	           PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2722 }
2723 
2724 static task_read_t
convert_port_to_task_read_no_eval(ipc_port_t port)2725 convert_port_to_task_read_no_eval(
2726 	ipc_port_t              port)
2727 {
2728 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2729 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2730 }
2731 
2732 task_read_t
convert_port_to_task_read_mig(ipc_port_t port)2733 convert_port_to_task_read_mig(
2734 	ipc_port_t              port)
2735 {
2736 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2737 	           PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2738 }
2739 
2740 task_inspect_t
convert_port_to_task_inspect(ipc_port_t port)2741 convert_port_to_task_inspect(
2742 	ipc_port_t              port)
2743 {
2744 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2745 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2746 }
2747 
2748 task_inspect_t
convert_port_to_task_inspect_no_eval(ipc_port_t port)2749 convert_port_to_task_inspect_no_eval(
2750 	ipc_port_t              port)
2751 {
2752 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2753 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2754 }
2755 
2756 task_inspect_t
convert_port_to_task_inspect_mig(ipc_port_t port)2757 convert_port_to_task_inspect_mig(
2758 	ipc_port_t              port)
2759 {
2760 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2761 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2762 }
2763 
2764 task_name_t
convert_port_to_task_name(ipc_port_t port)2765 convert_port_to_task_name(
2766 	ipc_port_t              port)
2767 {
2768 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2769 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2770 }
2771 
2772 task_name_t
convert_port_to_task_name_mig(ipc_port_t port)2773 convert_port_to_task_name_mig(
2774 	ipc_port_t              port)
2775 {
2776 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2777 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2778 }
2779 
2780 /*
2781  *	Routine:	convert_port_to_task_policy
2782  *	Purpose:
2783  *		Convert from a port to a task.
2784  *		Doesn't consume the port ref; produces a task ref,
2785  *		which may be null.
2786  *		If the port is being used with task_port_set(), any task port
2787  *		type other than TASK_CONTROL requires an entitlement. If the
2788  *		port is being used with task_port_get(), TASK_NAME requires an
2789  *		entitlement.
2790  *	Conditions:
2791  *		Nothing locked.
2792  */
2793 static task_t
convert_port_to_task_policy_mig(ipc_port_t port,boolean_t set)2794 convert_port_to_task_policy_mig(ipc_port_t port, boolean_t set)
2795 {
2796 	task_t task = TASK_NULL;
2797 
2798 	if (!IP_VALID(port)) {
2799 		return TASK_NULL;
2800 	}
2801 
2802 	task = set ?
2803 	    convert_port_to_task_mig(port) :
2804 	    convert_port_to_task_inspect_mig(port);
2805 
2806 	if (task == TASK_NULL &&
2807 	    IOCurrentTaskHasEntitlement("com.apple.private.task_policy")) {
2808 		task = convert_port_to_task_name_mig(port);
2809 	}
2810 
2811 	return task;
2812 }
2813 
2814 task_policy_set_t
convert_port_to_task_policy_set_mig(ipc_port_t port)2815 convert_port_to_task_policy_set_mig(ipc_port_t port)
2816 {
2817 	return convert_port_to_task_policy_mig(port, true);
2818 }
2819 
2820 task_policy_get_t
convert_port_to_task_policy_get_mig(ipc_port_t port)2821 convert_port_to_task_policy_get_mig(ipc_port_t port)
2822 {
2823 	return convert_port_to_task_policy_mig(port, false);
2824 }
2825 
2826 /*
2827  *	Routine:	convert_port_to_task_suspension_token
2828  *	Purpose:
2829  *		Convert from a port to a task suspension token.
2830  *		Doesn't consume the port ref; produces a suspension token ref,
2831  *		which may be null.
2832  *	Conditions:
2833  *		Nothing locked.
2834  */
2835 static task_suspension_token_t
convert_port_to_task_suspension_token_grp(ipc_port_t port,task_grp_t grp)2836 convert_port_to_task_suspension_token_grp(
2837 	ipc_port_t              port,
2838 	task_grp_t              grp)
2839 {
2840 	task_suspension_token_t task = TASK_NULL;
2841 
2842 	if (IP_VALID(port)) {
2843 		ip_mq_lock(port);
2844 		task = ipc_kobject_get_locked(port, IKOT_TASK_RESUME);
2845 		if (task != TASK_NULL) {
2846 			task_reference_grp(task, grp);
2847 		}
2848 		ip_mq_unlock(port);
2849 	}
2850 
2851 	return task;
2852 }
2853 
2854 task_suspension_token_t
convert_port_to_task_suspension_token_external(ipc_port_t port)2855 convert_port_to_task_suspension_token_external(
2856 	ipc_port_t              port)
2857 {
2858 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_EXTERNAL);
2859 }
2860 
2861 task_suspension_token_t
convert_port_to_task_suspension_token_mig(ipc_port_t port)2862 convert_port_to_task_suspension_token_mig(
2863 	ipc_port_t              port)
2864 {
2865 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_MIG);
2866 }
2867 
2868 task_suspension_token_t
convert_port_to_task_suspension_token_kernel(ipc_port_t port)2869 convert_port_to_task_suspension_token_kernel(
2870 	ipc_port_t              port)
2871 {
2872 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_KERNEL);
2873 }
2874 
2875 /*
2876  *	Routine:	convert_port_to_space_with_flavor
2877  *	Purpose:
2878  *		Internal helper for converting from a port to a space.
2879  *		Doesn't consume the port ref; produces a space ref,
2880  *		which may be null.
2881  *	Args:
2882  *		port   - target port
2883  *		flavor - requested ipc space flavor
2884  *		options - port translation options
2885  *	Conditions:
2886  *		Nothing locked.
2887  */
2888 static ipc_space_t
convert_port_to_space_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2889 convert_port_to_space_with_flavor(
2890 	ipc_port_t         port,
2891 	mach_task_flavor_t flavor,
2892 	port_intrans_options_t options)
2893 {
2894 	ipc_space_t space = IPC_SPACE_NULL;
2895 	task_t task = TASK_NULL;
2896 
2897 	assert(flavor != TASK_FLAVOR_NAME);
2898 
2899 	if (IP_VALID(port)) {
2900 		ip_mq_lock(port);
2901 		if (ip_active(port)) {
2902 			task = convert_port_to_task_with_flavor_locked_noref(port,
2903 			    flavor, options);
2904 		}
2905 
2906 		/*
2907 		 * Because we hold the port lock and we could resolve a task,
2908 		 * even if we're racing with task termination, we know that
2909 		 * ipc_task_disable() hasn't been called yet.
2910 		 *
2911 		 * We try to sniff if `task->active` flipped to accelerate
2912 		 * resolving the race, but this isn't load bearing.
2913 		 *
2914 		 * The space will be torn down _after_ ipc_task_disable() returns,
2915 		 * so it is valid to take a reference on it now.
2916 		 */
2917 		if (task && task->active) {
2918 			space = task->itk_space;
2919 			is_reference(space);
2920 		}
2921 		ip_mq_unlock(port);
2922 	}
2923 
2924 	return space;
2925 }
2926 
2927 ipc_space_t
convert_port_to_space(ipc_port_t port)2928 convert_port_to_space(
2929 	ipc_port_t      port)
2930 {
2931 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL,
2932 	           PORT_INTRANS_OPTIONS_NONE);
2933 }
2934 
2935 ipc_space_read_t
convert_port_to_space_read(ipc_port_t port)2936 convert_port_to_space_read(
2937 	ipc_port_t      port)
2938 {
2939 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2940 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
2941 }
2942 
2943 ipc_space_read_t
convert_port_to_space_read_no_eval(ipc_port_t port)2944 convert_port_to_space_read_no_eval(
2945 	ipc_port_t      port)
2946 {
2947 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2948 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2949 }
2950 
2951 ipc_space_inspect_t
convert_port_to_space_inspect(ipc_port_t port)2952 convert_port_to_space_inspect(
2953 	ipc_port_t      port)
2954 {
2955 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT,
2956 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2957 }
2958 
2959 /*
2960  *	Routine:	convert_port_to_map_with_flavor
2961  *	Purpose:
2962  *		Internal helper for converting from a port to a map.
2963  *		Doesn't consume the port ref; produces a map ref,
2964  *		which may be null.
2965  *	Args:
2966  *		port   - target port
2967  *		flavor - requested vm map flavor
2968  *		options - port translation options
2969  *	Conditions:
2970  *		Nothing locked.
2971  */
2972 static vm_map_t
convert_port_to_map_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2973 convert_port_to_map_with_flavor(
2974 	ipc_port_t         port,
2975 	mach_task_flavor_t flavor,
2976 	port_intrans_options_t options)
2977 {
2978 	task_t task = TASK_NULL;
2979 	vm_map_t map = VM_MAP_NULL;
2980 
2981 	/* there is no vm_map_inspect_t routines at the moment. */
2982 	assert(flavor != TASK_FLAVOR_NAME && flavor != TASK_FLAVOR_INSPECT);
2983 	assert((options & PORT_INTRANS_SKIP_TASK_EVAL) == 0);
2984 
2985 	if (IP_VALID(port)) {
2986 		ip_mq_lock(port);
2987 
2988 		if (ip_active(port)) {
2989 			task = convert_port_to_task_with_flavor_locked_noref(port,
2990 			    flavor, options);
2991 		}
2992 
2993 		/*
2994 		 * Because we hold the port lock and we could resolve a task,
2995 		 * even if we're racing with task termination, we know that
2996 		 * ipc_task_disable() hasn't been called yet.
2997 		 *
2998 		 * We try to sniff if `task->active` flipped to accelerate
2999 		 * resolving the race, but this isn't load bearing.
3000 		 *
3001 		 * The vm map will be torn down _after_ ipc_task_disable() returns,
3002 		 * so it is valid to take a reference on it now.
3003 		 */
3004 		if (task && task->active) {
3005 			map = task->map;
3006 
3007 			if (map->pmap == kernel_pmap) {
3008 				panic("userspace has control access to a "
3009 				    "kernel map %p through task %p", map, task);
3010 			}
3011 
3012 			pmap_require(map->pmap);
3013 			vm_map_reference(map);
3014 		}
3015 
3016 		ip_mq_unlock(port);
3017 	}
3018 
3019 	return map;
3020 }
3021 
3022 vm_map_t
convert_port_to_map(ipc_port_t port)3023 convert_port_to_map(
3024 	ipc_port_t              port)
3025 {
3026 	return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL,
3027 	           PORT_INTRANS_OPTIONS_NONE);
3028 }
3029 
3030 vm_map_read_t
convert_port_to_map_read(ipc_port_t port)3031 convert_port_to_map_read(
3032 	ipc_port_t              port)
3033 {
3034 	return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ,
3035 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
3036 }
3037 
3038 vm_map_inspect_t
convert_port_to_map_inspect(__unused ipc_port_t port)3039 convert_port_to_map_inspect(
3040 	__unused ipc_port_t     port)
3041 {
3042 	/* there is no vm_map_inspect_t routines at the moment. */
3043 	return VM_MAP_INSPECT_NULL;
3044 }
3045 
3046 /*
3047  *	Routine:	thread_port_kotype_valid_for_flavor
3048  *	Purpose:
3049  *		Check whether the kobject type of a mach port
3050  *      is valid for conversion to a thread of given flavor.
3051  */
3052 static boolean_t
thread_port_kotype_valid_for_flavor(natural_t kotype,mach_thread_flavor_t flavor)3053 thread_port_kotype_valid_for_flavor(
3054 	natural_t            kotype,
3055 	mach_thread_flavor_t flavor)
3056 {
3057 	switch (flavor) {
3058 	/* Ascending capability */
3059 	case THREAD_FLAVOR_INSPECT:
3060 		if (kotype == IKOT_THREAD_INSPECT) {
3061 			return TRUE;
3062 		}
3063 		OS_FALLTHROUGH;
3064 	case THREAD_FLAVOR_READ:
3065 		if (kotype == IKOT_THREAD_READ) {
3066 			return TRUE;
3067 		}
3068 		OS_FALLTHROUGH;
3069 	case THREAD_FLAVOR_CONTROL:
3070 		if (kotype == IKOT_THREAD_CONTROL) {
3071 			return TRUE;
3072 		}
3073 		break;
3074 	default:
3075 		panic("strange thread flavor");
3076 	}
3077 
3078 	return FALSE;
3079 }
3080 
3081 /*
3082  *	Routine: convert_port_to_thread_with_flavor_locked
3083  *	Purpose:
3084  *		Internal helper routine to convert from a locked port to a thread.
3085  *	Args:
3086  *		port   - target port
3087  *		flavor - requested thread port flavor
3088  *		options - port translation options
3089  *	Conditions:
3090  *		Port is locked and active.
3091  *		Produces a thread ref or THREAD_NULL.
3092  */
3093 static thread_t
convert_port_to_thread_with_flavor_locked(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3094 convert_port_to_thread_with_flavor_locked(
3095 	ipc_port_t               port,
3096 	mach_thread_flavor_t     flavor,
3097 	port_intrans_options_t   options)
3098 {
3099 	thread_t thread = THREAD_NULL;
3100 	task_t task;
3101 	ipc_kobject_type_t type = ip_kotype(port);
3102 
3103 	ip_mq_lock_held(port);
3104 	require_ip_active(port);
3105 
3106 	if (!thread_port_kotype_valid_for_flavor(type, flavor)) {
3107 		return THREAD_NULL;
3108 	}
3109 
3110 	thread = ipc_kobject_get_locked(port, type);
3111 
3112 	if (thread == THREAD_NULL) {
3113 		return THREAD_NULL;
3114 	}
3115 
3116 	if (options & PORT_INTRANS_THREAD_NOT_CURRENT_THREAD) {
3117 		if (thread == current_thread()) {
3118 			return THREAD_NULL;
3119 		}
3120 	}
3121 
3122 	task = get_threadtask(thread);
3123 
3124 	if (options & PORT_INTRANS_THREAD_IN_CURRENT_TASK) {
3125 		if (task != current_task()) {
3126 			return THREAD_NULL;
3127 		}
3128 	} else {
3129 		if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
3130 			assert(flavor == THREAD_FLAVOR_CONTROL);
3131 			return THREAD_NULL;
3132 		}
3133 		/* TODO: rdar://42389187 */
3134 		if (flavor == THREAD_FLAVOR_INSPECT) {
3135 			assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
3136 		}
3137 
3138 		if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
3139 		    task_conversion_eval(current_task(), task, flavor) != KERN_SUCCESS) {
3140 			return THREAD_NULL;
3141 		}
3142 	}
3143 
3144 	thread_reference(thread);
3145 	return thread;
3146 }
3147 
3148 /*
3149  *	Routine:	convert_port_to_thread_with_flavor
3150  *	Purpose:
3151  *		Internal helper for converting from a port to a thread.
3152  *		Doesn't consume the port ref; produces a thread ref,
3153  *		which may be null.
3154  *	Args:
3155  *		port   - target port
3156  *		flavor - requested thread port flavor
3157  *		options - port translation options
3158  *	Conditions:
3159  *		Nothing locked.
3160  */
3161 static thread_t
convert_port_to_thread_with_flavor(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3162 convert_port_to_thread_with_flavor(
3163 	ipc_port_t           port,
3164 	mach_thread_flavor_t flavor,
3165 	port_intrans_options_t options)
3166 {
3167 	thread_t thread = THREAD_NULL;
3168 
3169 	if (IP_VALID(port)) {
3170 		ip_mq_lock(port);
3171 		if (ip_active(port)) {
3172 			thread = convert_port_to_thread_with_flavor_locked(port,
3173 			    flavor, options);
3174 		}
3175 		ip_mq_unlock(port);
3176 	}
3177 
3178 	return thread;
3179 }
3180 
3181 thread_t
convert_port_to_thread(ipc_port_t port)3182 convert_port_to_thread(
3183 	ipc_port_t              port)
3184 {
3185 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_CONTROL,
3186 	           PORT_INTRANS_OPTIONS_NONE);
3187 }
3188 
3189 thread_read_t
convert_port_to_thread_read(ipc_port_t port)3190 convert_port_to_thread_read(
3191 	ipc_port_t              port)
3192 {
3193 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3194 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
3195 }
3196 
3197 static thread_read_t
convert_port_to_thread_read_no_eval(ipc_port_t port)3198 convert_port_to_thread_read_no_eval(
3199 	ipc_port_t              port)
3200 {
3201 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3202 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3203 }
3204 
3205 thread_inspect_t
convert_port_to_thread_inspect(ipc_port_t port)3206 convert_port_to_thread_inspect(
3207 	ipc_port_t              port)
3208 {
3209 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3210 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3211 }
3212 
3213 static thread_inspect_t
convert_port_to_thread_inspect_no_eval(ipc_port_t port)3214 convert_port_to_thread_inspect_no_eval(
3215 	ipc_port_t              port)
3216 {
3217 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3218 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3219 }
3220 
3221 static inline ipc_kobject_type_t
thread_flavor_to_kotype(mach_thread_flavor_t flavor)3222 thread_flavor_to_kotype(mach_thread_flavor_t flavor)
3223 {
3224 	switch (flavor) {
3225 	case THREAD_FLAVOR_CONTROL:
3226 		return IKOT_THREAD_CONTROL;
3227 	case THREAD_FLAVOR_READ:
3228 		return IKOT_THREAD_READ;
3229 	default:
3230 		return IKOT_THREAD_INSPECT;
3231 	}
3232 }
3233 
3234 /*
3235  *	Routine:	convert_thread_to_port_with_flavor
3236  *	Purpose:
3237  *		Convert from a thread to a port of given flavor.
3238  *		Consumes a thread ref; produces a naked send right
3239  *		which may be invalid.
3240  *	Conditions:
3241  *		Nothing locked.
3242  */
3243 static ipc_port_t
convert_thread_to_port_with_flavor(thread_t thread,thread_ro_t tro,mach_thread_flavor_t flavor)3244 convert_thread_to_port_with_flavor(
3245 	thread_t              thread,
3246 	thread_ro_t           tro,
3247 	mach_thread_flavor_t  flavor)
3248 {
3249 	ipc_kobject_type_t kotype = thread_flavor_to_kotype(flavor);
3250 	ipc_port_t port = IP_NULL;
3251 
3252 	thread_mtx_lock(thread);
3253 
3254 	/*
3255 	 * out-trans of weaker flavors are still permitted, but in-trans
3256 	 * is separately enforced.
3257 	 */
3258 	if (flavor == THREAD_FLAVOR_CONTROL &&
3259 	    task_conversion_eval_out_trans(current_task(), tro->tro_task, flavor)) {
3260 		/* denied by security policy, make the port appear dead */
3261 		port = IP_DEAD;
3262 		goto exit;
3263 	}
3264 
3265 	if (!thread->ipc_active) {
3266 		goto exit;
3267 	}
3268 
3269 	port = tro->tro_ports[flavor];
3270 	if (flavor == THREAD_FLAVOR_CONTROL) {
3271 		port = ipc_kobject_make_send(port, thread, IKOT_THREAD_CONTROL);
3272 	} else if (IP_VALID(port)) {
3273 		(void)ipc_kobject_make_send_nsrequest(port, thread, kotype);
3274 	} else {
3275 		/*
3276 		 * Claim a send right on the thread read/inspect port, and request a no-senders
3277 		 * notification on that port (if none outstanding). A thread reference is not
3278 		 * donated here even though the ports are created lazily because it doesn't own the
3279 		 * kobject that it points to. Threads manage their lifetime explicitly and
3280 		 * have to synchronize with each other, between the task/thread terminating and the
3281 		 * send-once notification firing, and this is done under the thread mutex
3282 		 * rather than with atomics.
3283 		 */
3284 		port = ipc_kobject_alloc_port(thread, kotype,
3285 		    IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST |
3286 		    IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3287 		/*
3288 		 * If Developer Mode is off, substitute read port for control
3289 		 * port if copying out to owning task's space, for the sake of
3290 		 * in-process exception handler.
3291 		 *
3292 		 * Also see: exception_deliver().
3293 		 */
3294 		if (!developer_mode_state() && flavor == THREAD_FLAVOR_READ) {
3295 			ipc_port_set_label(port, IPC_LABEL_SUBST_THREAD_READ);
3296 			port->ip_kolabel->ikol_alt_port = tro->tro_self_port;
3297 		}
3298 		zalloc_ro_update_field(ZONE_ID_THREAD_RO,
3299 		    tro, tro_ports[flavor], &port);
3300 	}
3301 
3302 exit:
3303 	thread_mtx_unlock(thread);
3304 	thread_deallocate(thread);
3305 	return port;
3306 }
3307 
3308 ipc_port_t
convert_thread_to_port(thread_t thread)3309 convert_thread_to_port(
3310 	thread_t                thread)
3311 {
3312 	thread_ro_t tro = get_thread_ro(thread);
3313 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_CONTROL);
3314 }
3315 
3316 ipc_port_t
convert_thread_read_to_port(thread_read_t thread)3317 convert_thread_read_to_port(thread_read_t thread)
3318 {
3319 	thread_ro_t tro = get_thread_ro(thread);
3320 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_READ);
3321 }
3322 
3323 ipc_port_t
convert_thread_inspect_to_port(thread_inspect_t thread)3324 convert_thread_inspect_to_port(thread_inspect_t thread)
3325 {
3326 	thread_ro_t tro = get_thread_ro(thread);
3327 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_INSPECT);
3328 }
3329 
3330 
3331 /*
3332  *	Routine:	port_name_to_thread
3333  *	Purpose:
3334  *		Convert from a port name to a thread reference
3335  *		A name of MACH_PORT_NULL is valid for the null thread.
3336  *	Conditions:
3337  *		Nothing locked.
3338  */
3339 thread_t
port_name_to_thread(mach_port_name_t name,port_intrans_options_t options)3340 port_name_to_thread(
3341 	mach_port_name_t         name,
3342 	port_intrans_options_t options)
3343 {
3344 	thread_t        thread = THREAD_NULL;
3345 	ipc_port_t      kport;
3346 	kern_return_t kr;
3347 
3348 	if (MACH_PORT_VALID(name)) {
3349 		kr = ipc_port_translate_send(current_space(), name, &kport);
3350 		if (kr == KERN_SUCCESS) {
3351 			/* port is locked and active */
3352 			assert(!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) &&
3353 			    !(options & PORT_INTRANS_SKIP_TASK_EVAL));
3354 			thread = convert_port_to_thread_with_flavor_locked(kport,
3355 			    THREAD_FLAVOR_CONTROL, options);
3356 			ip_mq_unlock(kport);
3357 		}
3358 	}
3359 
3360 	return thread;
3361 }
3362 
3363 /*
3364  *	Routine:	port_name_is_pinned_itk_self
3365  *	Purpose:
3366  *		Returns whether this port name is for the pinned
3367  *		mach_task_self (if it exists).
3368  *
3369  *		task_self_trap() when the task port is pinned,
3370  *		will memorize the name the port has in the space
3371  *		in ip_receiver_name, which we can use to fast-track
3372  *		this answer without taking any lock.
3373  *
3374  *		ipc_task_disable() will set `ip_receiver_name` back to
3375  *		MACH_PORT_SPECIAL_DEFAULT.
3376  *
3377  *	Conditions:
3378  *		self must be current_task()
3379  *		Nothing locked.
3380  */
3381 static bool
port_name_is_pinned_itk_self(task_t self,mach_port_name_t name)3382 port_name_is_pinned_itk_self(
3383 	task_t             self,
3384 	mach_port_name_t   name)
3385 {
3386 	ipc_port_t kport = self->itk_self;
3387 	return MACH_PORT_VALID(name) && name != MACH_PORT_SPECIAL_DEFAULT &&
3388 	       kport->ip_pinned && ip_get_receiver_name(kport) == name;
3389 }
3390 
3391 /*
3392  *	Routine:	port_name_to_current_task*_noref
3393  *	Purpose:
3394  *		Convert from a port name to current_task()
3395  *		A name of MACH_PORT_NULL is valid for the null task.
3396  *
3397  *		If current_task() is in the process of being terminated,
3398  *		this might return a non NULL task even when port_name_to_task()
3399  *		would.
3400  *
3401  *		However, this is an acceptable race that can't be controlled by
3402  *		userspace, and that downstream code using the returned task
3403  *		has to handle anyway.
3404  *
3405  *		ipc_space_disable() does try to narrow this race,
3406  *		by causing port_name_is_pinned_itk_self() to fail.
3407  *
3408  *	Returns:
3409  *		current_task() if the port name was for current_task()
3410  *		at the appropriate flavor.
3411  *
3412  *		TASK_NULL otherwise.
3413  *
3414  *	Conditions:
3415  *		Nothing locked.
3416  */
3417 static task_t
port_name_to_current_task_internal_noref(mach_port_name_t name,mach_task_flavor_t flavor)3418 port_name_to_current_task_internal_noref(
3419 	mach_port_name_t   name,
3420 	mach_task_flavor_t flavor)
3421 {
3422 	ipc_port_t kport;
3423 	kern_return_t kr;
3424 	task_t task = TASK_NULL;
3425 	task_t self = current_task();
3426 
3427 	if (port_name_is_pinned_itk_self(self, name)) {
3428 		return self;
3429 	}
3430 
3431 	if (MACH_PORT_VALID(name)) {
3432 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3433 		if (kr == KERN_SUCCESS) {
3434 			ipc_kobject_type_t type = ip_kotype(kport);
3435 			if (task_port_kotype_valid_for_flavor(type, flavor)) {
3436 				task = ipc_kobject_get_locked(kport, type);
3437 			}
3438 			ip_mq_unlock(kport);
3439 			if (task != self) {
3440 				task = TASK_NULL;
3441 			}
3442 		}
3443 	}
3444 
3445 	return task;
3446 }
3447 
3448 task_t
port_name_to_current_task_noref(mach_port_name_t name)3449 port_name_to_current_task_noref(
3450 	mach_port_name_t name)
3451 {
3452 	return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_CONTROL);
3453 }
3454 
3455 task_read_t
port_name_to_current_task_read_noref(mach_port_name_t name)3456 port_name_to_current_task_read_noref(
3457 	mach_port_name_t name)
3458 {
3459 	return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_READ);
3460 }
3461 
3462 /*
3463  *	Routine:	port_name_to_task
3464  *	Purpose:
3465  *		Convert from a port name to a task reference
3466  *		A name of MACH_PORT_NULL is valid for the null task.
3467  *	Conditions:
3468  *		Nothing locked.
3469  */
3470 static task_t
port_name_to_task_grp(mach_port_name_t name,task_grp_t grp)3471 port_name_to_task_grp(
3472 	mach_port_name_t name,
3473 	task_grp_t       grp)
3474 {
3475 	ipc_port_t kport;
3476 	kern_return_t kr;
3477 	task_t task = TASK_NULL;
3478 	task_t self = current_task();
3479 
3480 	if (port_name_is_pinned_itk_self(self, name)) {
3481 		task_reference_grp(self, grp);
3482 		return self;
3483 	}
3484 
3485 	if (MACH_PORT_VALID(name)) {
3486 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3487 		if (kr == KERN_SUCCESS) {
3488 			/* port is locked and active */
3489 			task = convert_port_to_task_with_flavor_locked(kport,
3490 			    TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE, grp);
3491 			ip_mq_unlock(kport);
3492 		}
3493 	}
3494 	return task;
3495 }
3496 
3497 task_t
port_name_to_task_external(mach_port_name_t name)3498 port_name_to_task_external(
3499 	mach_port_name_t name)
3500 {
3501 	return port_name_to_task_grp(name, TASK_GRP_EXTERNAL);
3502 }
3503 
3504 task_t
port_name_to_task_kernel(mach_port_name_t name)3505 port_name_to_task_kernel(
3506 	mach_port_name_t name)
3507 {
3508 	return port_name_to_task_grp(name, TASK_GRP_KERNEL);
3509 }
3510 
3511 /*
3512  *	Routine:	port_name_to_task_read
3513  *	Purpose:
3514  *		Convert from a port name to a task reference
3515  *		A name of MACH_PORT_NULL is valid for the null task.
3516  *	Conditions:
3517  *		Nothing locked.
3518  */
3519 task_read_t
port_name_to_task_read(mach_port_name_t name)3520 port_name_to_task_read(
3521 	mach_port_name_t name)
3522 {
3523 	ipc_port_t kport;
3524 	kern_return_t kr;
3525 	task_read_t tr = TASK_READ_NULL;
3526 	task_t self = current_task();
3527 
3528 	if (port_name_is_pinned_itk_self(self, name)) {
3529 		task_reference_grp(self, TASK_GRP_KERNEL);
3530 		return self;
3531 	}
3532 
3533 	if (MACH_PORT_VALID(name)) {
3534 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3535 		if (kr == KERN_SUCCESS) {
3536 			/* port is locked and active */
3537 			tr = convert_port_to_task_with_flavor_locked(kport,
3538 			    TASK_FLAVOR_READ, PORT_INTRANS_ALLOW_CORPSE_TASK,
3539 			    TASK_GRP_KERNEL);
3540 			ip_mq_unlock(kport);
3541 		}
3542 	}
3543 	return tr;
3544 }
3545 
3546 /*
3547  *	Routine:	port_name_to_task_read_no_eval
3548  *	Purpose:
3549  *		Convert from a port name to a task reference
3550  *		A name of MACH_PORT_NULL is valid for the null task.
3551  *		Skips task_conversion_eval() during conversion.
3552  *	Conditions:
3553  *		Nothing locked.
3554  */
3555 task_read_t
port_name_to_task_read_no_eval(mach_port_name_t name)3556 port_name_to_task_read_no_eval(
3557 	mach_port_name_t name)
3558 {
3559 	ipc_port_t kport;
3560 	kern_return_t kr;
3561 	task_read_t tr = TASK_READ_NULL;
3562 	task_t self = current_task();
3563 
3564 	if (port_name_is_pinned_itk_self(self, name)) {
3565 		task_reference_grp(self, TASK_GRP_KERNEL);
3566 		return self;
3567 	}
3568 
3569 	if (MACH_PORT_VALID(name)) {
3570 		port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3571 		    PORT_INTRANS_ALLOW_CORPSE_TASK;
3572 
3573 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3574 		if (kr == KERN_SUCCESS) {
3575 			/* port is locked and active */
3576 			tr = convert_port_to_task_with_flavor_locked(kport,
3577 			    TASK_FLAVOR_READ, options, TASK_GRP_KERNEL);
3578 			ip_mq_unlock(kport);
3579 		}
3580 	}
3581 	return tr;
3582 }
3583 
3584 /*
3585  *	Routine:	port_name_to_task_name
3586  *	Purpose:
3587  *		Convert from a port name to a task reference
3588  *		A name of MACH_PORT_NULL is valid for the null task.
3589  *	Conditions:
3590  *		Nothing locked.
3591  */
3592 task_name_t
port_name_to_task_name(mach_port_name_t name)3593 port_name_to_task_name(
3594 	mach_port_name_t name)
3595 {
3596 	ipc_port_t kport;
3597 	kern_return_t kr;
3598 	task_name_t tn = TASK_NAME_NULL;
3599 	task_t self = current_task();
3600 
3601 	if (port_name_is_pinned_itk_self(self, name)) {
3602 		task_reference_grp(self, TASK_GRP_KERNEL);
3603 		return self;
3604 	}
3605 
3606 	if (MACH_PORT_VALID(name)) {
3607 		port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3608 		    PORT_INTRANS_ALLOW_CORPSE_TASK;
3609 
3610 		kr = ipc_port_translate_send(current_space(), name, &kport);
3611 		if (kr == KERN_SUCCESS) {
3612 			/* port is locked and active */
3613 			tn = convert_port_to_task_with_flavor_locked(kport,
3614 			    TASK_FLAVOR_NAME, options, TASK_GRP_KERNEL);
3615 			ip_mq_unlock(kport);
3616 		}
3617 	}
3618 	return tn;
3619 }
3620 
3621 /*
3622  *	Routine:	port_name_to_task_id_token
3623  *	Purpose:
3624  *		Convert from a port name to a task identity token reference
3625  *	Conditions:
3626  *		Nothing locked.
3627  */
3628 task_id_token_t
port_name_to_task_id_token(mach_port_name_t name)3629 port_name_to_task_id_token(
3630 	mach_port_name_t name)
3631 {
3632 	ipc_port_t port;
3633 	kern_return_t kr;
3634 	task_id_token_t token = TASK_ID_TOKEN_NULL;
3635 
3636 	if (MACH_PORT_VALID(name)) {
3637 		kr = ipc_port_translate_send(current_space(), name, &port);
3638 		if (kr == KERN_SUCCESS) {
3639 			token = convert_port_to_task_id_token(port);
3640 			ip_mq_unlock(port);
3641 		}
3642 	}
3643 	return token;
3644 }
3645 
3646 /*
3647  *	Routine:	port_name_to_host
3648  *	Purpose:
3649  *		Convert from a port name to a host pointer.
3650  *		NOTE: This does _not_ return a +1 reference to the host_t
3651  *	Conditions:
3652  *		Nothing locked.
3653  */
3654 host_t
port_name_to_host(mach_port_name_t name)3655 port_name_to_host(
3656 	mach_port_name_t name)
3657 {
3658 	host_t host = HOST_NULL;
3659 	kern_return_t kr;
3660 	ipc_port_t port;
3661 
3662 	if (MACH_PORT_VALID(name)) {
3663 		kr = ipc_port_translate_send(current_space(), name, &port);
3664 		if (kr == KERN_SUCCESS) {
3665 			host = convert_port_to_host(port);
3666 			ip_mq_unlock(port);
3667 		}
3668 	}
3669 	return host;
3670 }
3671 
3672 static inline ipc_kobject_type_t
task_flavor_to_kotype(mach_task_flavor_t flavor)3673 task_flavor_to_kotype(mach_task_flavor_t flavor)
3674 {
3675 	switch (flavor) {
3676 	case TASK_FLAVOR_CONTROL:
3677 		return IKOT_TASK_CONTROL;
3678 	case TASK_FLAVOR_READ:
3679 		return IKOT_TASK_READ;
3680 	case TASK_FLAVOR_INSPECT:
3681 		return IKOT_TASK_INSPECT;
3682 	default:
3683 		return IKOT_TASK_NAME;
3684 	}
3685 }
3686 
3687 /*
3688  *	Routine:	convert_task_to_port_with_flavor
3689  *	Purpose:
3690  *		Convert from a task to a port of given flavor.
3691  *		Consumes a task ref; produces a naked send right
3692  *		which may be invalid.
3693  *	Conditions:
3694  *		Nothing locked.
3695  */
3696 ipc_port_t
convert_task_to_port_with_flavor(task_t task,mach_task_flavor_t flavor,task_grp_t grp)3697 convert_task_to_port_with_flavor(
3698 	task_t              task,
3699 	mach_task_flavor_t  flavor,
3700 	task_grp_t          grp)
3701 {
3702 	ipc_kobject_type_t kotype = task_flavor_to_kotype(flavor);
3703 	ipc_port_t port = IP_NULL;
3704 
3705 	itk_lock(task);
3706 
3707 	if (!task->ipc_active) {
3708 		goto exit;
3709 	}
3710 
3711 	/*
3712 	 * out-trans of weaker flavors are still permitted, but in-trans
3713 	 * is separately enforced.
3714 	 */
3715 	if (flavor == TASK_FLAVOR_CONTROL &&
3716 	    task_conversion_eval_out_trans(current_task(), task, flavor)) {
3717 		/* denied by security policy, make the port appear dead */
3718 		port = IP_DEAD;
3719 		goto exit;
3720 	}
3721 
3722 	switch (flavor) {
3723 	case TASK_FLAVOR_CONTROL:
3724 	case TASK_FLAVOR_NAME:
3725 		port = ipc_kobject_make_send(task->itk_task_ports[flavor],
3726 		    task, kotype);
3727 		break;
3728 	/*
3729 	 * Claim a send right on the task read/inspect port,
3730 	 * and request a no-senders notification on that port
3731 	 * (if none outstanding).
3732 	 *
3733 	 * The task's itk_lock is used to synchronize the handling
3734 	 * of the no-senders notification with the task termination.
3735 	 */
3736 	case TASK_FLAVOR_READ:
3737 	case TASK_FLAVOR_INSPECT:
3738 		port = task->itk_task_ports[flavor];
3739 		if (IP_VALID(port)) {
3740 			(void)ipc_kobject_make_send_nsrequest(port,
3741 			    task, kotype);
3742 		} else {
3743 			port = ipc_kobject_alloc_port(task, kotype,
3744 			    IPC_KOBJECT_ALLOC_MAKE_SEND |
3745 			    IPC_KOBJECT_ALLOC_NSREQUEST |
3746 			    IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3747 			/*
3748 			 * If Developer Mode is off, substitute read port for control port if
3749 			 * copying out to owning task's space, for the sake of in-process
3750 			 * exception handler.
3751 			 *
3752 			 * Also see: exception_deliver().
3753 			 */
3754 			if (!developer_mode_state() && flavor == TASK_FLAVOR_READ) {
3755 				ipc_port_set_label(port, IPC_LABEL_SUBST_TASK_READ);
3756 				port->ip_kolabel->ikol_alt_port = task->itk_self;
3757 			}
3758 
3759 			task->itk_task_ports[flavor] = port;
3760 		}
3761 		break;
3762 	}
3763 
3764 exit:
3765 	itk_unlock(task);
3766 	task_deallocate_grp(task, grp);
3767 	return port;
3768 }
3769 
3770 ipc_port_t
convert_corpse_to_port_and_nsrequest(task_t corpse)3771 convert_corpse_to_port_and_nsrequest(
3772 	task_t          corpse)
3773 {
3774 	ipc_port_t port = IP_NULL;
3775 	__assert_only kern_return_t kr;
3776 
3777 	assert(task_is_a_corpse(corpse));
3778 	itk_lock(corpse);
3779 	port = corpse->itk_task_ports[TASK_FLAVOR_CONTROL];
3780 	assert(port->ip_srights == 0);
3781 	kr = ipc_kobject_make_send_nsrequest(port, corpse, IKOT_TASK_CONTROL);
3782 	assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
3783 	itk_unlock(corpse);
3784 
3785 	task_deallocate(corpse);
3786 	return port;
3787 }
3788 
3789 ipc_port_t
convert_task_to_port(task_t task)3790 convert_task_to_port(
3791 	task_t          task)
3792 {
3793 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_KERNEL);
3794 }
3795 
3796 ipc_port_t
convert_task_read_to_port(task_read_t task)3797 convert_task_read_to_port(
3798 	task_read_t          task)
3799 {
3800 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_KERNEL);
3801 }
3802 
3803 ipc_port_t
convert_task_inspect_to_port(task_inspect_t task)3804 convert_task_inspect_to_port(
3805 	task_inspect_t          task)
3806 {
3807 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT, TASK_GRP_KERNEL);
3808 }
3809 
3810 ipc_port_t
convert_task_name_to_port(task_name_t task)3811 convert_task_name_to_port(
3812 	task_name_t             task)
3813 {
3814 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME, TASK_GRP_KERNEL);
3815 }
3816 
3817 ipc_port_t
convert_task_to_port_external(task_t task)3818 convert_task_to_port_external(task_t task)
3819 {
3820 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_EXTERNAL);
3821 }
3822 
3823 ipc_port_t
convert_task_read_to_port_external(task_t task)3824 convert_task_read_to_port_external(task_t task)
3825 {
3826 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_EXTERNAL);
3827 }
3828 
3829 ipc_port_t
convert_task_to_port_pinned(task_t task)3830 convert_task_to_port_pinned(
3831 	task_t          task)
3832 {
3833 	ipc_port_t port = IP_NULL;
3834 
3835 	assert(task == current_task());
3836 
3837 	itk_lock(task);
3838 
3839 	if (task->ipc_active) {
3840 		port = ipc_kobject_make_send(task->itk_self, task,
3841 		    IKOT_TASK_CONTROL);
3842 	}
3843 
3844 	if (port && task_is_immovable(task)) {
3845 		assert(ip_is_pinned(port));
3846 		assert(ip_is_immovable_send(port));
3847 	}
3848 
3849 	itk_unlock(task);
3850 	task_deallocate(task);
3851 	return port;
3852 }
3853 /*
3854  *	Routine:	convert_task_suspend_token_to_port
3855  *	Purpose:
3856  *		Convert from a task suspension token to a port.
3857  *		Consumes a task suspension token ref; produces a naked send-once right
3858  *		which may be invalid.
3859  *	Conditions:
3860  *		Nothing locked.
3861  */
3862 static ipc_port_t
convert_task_suspension_token_to_port_grp(task_suspension_token_t task,task_grp_t grp)3863 convert_task_suspension_token_to_port_grp(
3864 	task_suspension_token_t         task,
3865 	task_grp_t                      grp)
3866 {
3867 	ipc_port_t port;
3868 
3869 	task_lock(task);
3870 	if (task->active) {
3871 		itk_lock(task);
3872 		if (task->itk_resume == IP_NULL) {
3873 			task->itk_resume = ipc_kobject_alloc_port((ipc_kobject_t) task,
3874 			    IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE);
3875 		}
3876 
3877 		/*
3878 		 * Create a send-once right for each instance of a direct user-called
3879 		 * task_suspend2 call. Each time one of these send-once rights is abandoned,
3880 		 * the notification handler will resume the target task.
3881 		 */
3882 		port = task->itk_resume;
3883 		ipc_kobject_require(port, task, IKOT_TASK_RESUME);
3884 		port = ipc_port_make_sonce(port);
3885 		itk_unlock(task);
3886 		assert(IP_VALID(port));
3887 	} else {
3888 		port = IP_NULL;
3889 	}
3890 
3891 	task_unlock(task);
3892 	task_suspension_token_deallocate_grp(task, grp);
3893 
3894 	return port;
3895 }
3896 
3897 ipc_port_t
convert_task_suspension_token_to_port_external(task_suspension_token_t task)3898 convert_task_suspension_token_to_port_external(
3899 	task_suspension_token_t         task)
3900 {
3901 	return convert_task_suspension_token_to_port_grp(task, TASK_GRP_EXTERNAL);
3902 }
3903 
3904 ipc_port_t
convert_task_suspension_token_to_port_mig(task_suspension_token_t task)3905 convert_task_suspension_token_to_port_mig(
3906 	task_suspension_token_t         task)
3907 {
3908 	return convert_task_suspension_token_to_port_grp(task, TASK_GRP_MIG);
3909 }
3910 
3911 ipc_port_t
convert_thread_to_port_pinned(thread_t thread)3912 convert_thread_to_port_pinned(
3913 	thread_t                thread)
3914 {
3915 	thread_ro_t tro = get_thread_ro(thread);
3916 	ipc_port_t  port = IP_NULL;
3917 
3918 	thread_mtx_lock(thread);
3919 
3920 	if (thread->ipc_active) {
3921 		port = ipc_kobject_make_send(tro->tro_self_port,
3922 		    thread, IKOT_THREAD_CONTROL);
3923 	}
3924 
3925 	if (port && task_is_immovable(tro->tro_task)) {
3926 		assert(ip_is_immovable_send(port));
3927 	}
3928 
3929 	thread_mtx_unlock(thread);
3930 	thread_deallocate(thread);
3931 	return port;
3932 }
3933 /*
3934  *	Routine:	space_deallocate
3935  *	Purpose:
3936  *		Deallocate a space ref produced by convert_port_to_space.
3937  *	Conditions:
3938  *		Nothing locked.
3939  */
3940 
3941 void
space_deallocate(ipc_space_t space)3942 space_deallocate(
3943 	ipc_space_t     space)
3944 {
3945 	if (space != IS_NULL) {
3946 		is_release(space);
3947 	}
3948 }
3949 
3950 /*
3951  *	Routine:	space_read_deallocate
3952  *	Purpose:
3953  *		Deallocate a space read ref produced by convert_port_to_space_read.
3954  *	Conditions:
3955  *		Nothing locked.
3956  */
3957 
3958 void
space_read_deallocate(ipc_space_read_t space)3959 space_read_deallocate(
3960 	ipc_space_read_t     space)
3961 {
3962 	if (space != IS_INSPECT_NULL) {
3963 		is_release((ipc_space_t)space);
3964 	}
3965 }
3966 
3967 /*
3968  *	Routine:	space_inspect_deallocate
3969  *	Purpose:
3970  *		Deallocate a space inspect ref produced by convert_port_to_space_inspect.
3971  *	Conditions:
3972  *		Nothing locked.
3973  */
3974 
3975 void
space_inspect_deallocate(ipc_space_inspect_t space)3976 space_inspect_deallocate(
3977 	ipc_space_inspect_t     space)
3978 {
3979 	if (space != IS_INSPECT_NULL) {
3980 		is_release((ipc_space_t)space);
3981 	}
3982 }
3983 
3984 
3985 #if !defined(XNU_TARGET_OS_OSX)
3986 static boolean_t
behavior_is_identity_protected(int new_behavior)3987 behavior_is_identity_protected(int new_behavior)
3988 {
3989 	return (new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED;
3990 }
3991 
3992 static boolean_t
identity_protection_opted_out(const ipc_port_t new_port)3993 identity_protection_opted_out(const ipc_port_t new_port)
3994 {
3995 	if (IP_VALID(new_port)) {
3996 		return ip_is_id_prot_opted_out(new_port);
3997 	}
3998 	return false;
3999 }
4000 
4001 static void
send_set_exception_telemetry(const task_t excepting_task,const exception_mask_t mask,const char * level)4002 send_set_exception_telemetry(const task_t excepting_task, const exception_mask_t mask, const char* level)
4003 {
4004 	ca_event_t ca_event = CA_EVENT_ALLOCATE(set_exception);
4005 	CA_EVENT_TYPE(set_exception) * event = ca_event->data;
4006 
4007 	task_procname(current_task(), (char *) &event->current_proc, sizeof(event->current_proc));
4008 	task_procname(excepting_task, (char *) &event->thread_proc, sizeof(event->thread_proc));
4009 	event->mask = mask;
4010 	strlcpy(event->level, level, sizeof(event->level));
4011 
4012 	CA_EVENT_SEND(ca_event);
4013 }
4014 
4015 /* Returns whether the violation should be ignored */
4016 static boolean_t
set_exception_behavior_violation(const ipc_port_t new_port,const task_t excepting_task,const exception_mask_t mask,const char * level)4017 set_exception_behavior_violation(const ipc_port_t new_port, const task_t excepting_task,
4018     const exception_mask_t mask, const char *level)
4019 {
4020 	mach_port_name_t new_name = CAST_MACH_PORT_TO_NAME(new_port);
4021 	boolean_t rate_limited;
4022 
4023 	task_lock(current_task());
4024 	rate_limited = task_has_exception_telemetry(current_task());
4025 	if (!rate_limited) {
4026 		task_set_exception_telemetry(current_task());
4027 	}
4028 	task_unlock(current_task());
4029 
4030 	if (thid_should_crash && !rate_limited) {
4031 		/* create lightweight corpse */
4032 		mach_port_guard_exception(new_name, 0, 0, kGUARD_EXC_EXCEPTION_BEHAVIOR_ENFORCE);
4033 	}
4034 
4035 	/* always report the proc name to CA */
4036 	send_set_exception_telemetry(excepting_task, mask, level);
4037 
4038 	/* if the bootarg has been manually set to false, ignore the violation */
4039 	return !thid_should_crash;
4040 }
4041 
4042 /*
4043  * Protect platform binary task/thread ports.
4044  * excepting_task is NULL if we are setting a host exception port.
4045  */
4046 static boolean_t
exception_exposes_protected_ports(const ipc_port_t new_port,const task_t excepting_task)4047 exception_exposes_protected_ports(const ipc_port_t new_port, const task_t excepting_task)
4048 {
4049 	if (!IP_VALID(new_port) || is_ux_handler_port(new_port)) {
4050 		/*
4051 		 * sending exceptions to invalid port does not pose risk
4052 		 * ux_handler port is an immovable, read-only kobject port; doesn't need protection.
4053 		 */
4054 		return FALSE;
4055 	} else if (excepting_task) {
4056 		/*  setting task/thread exception port - protect platform binaries */
4057 		return task_ro_flags_get(excepting_task) & TFRO_PLATFORM;
4058 	}
4059 
4060 	/* setting host port exposes all processes - always protect. */
4061 	return TRUE;
4062 }
4063 #endif /* !defined(XNU_TARGET_OS_OSX) */
4064 
4065 #if CONFIG_CSR
4066 #if !defined(XNU_TARGET_OS_OSX)
4067 static bool
SIP_is_enabled()4068 SIP_is_enabled()
4069 {
4070 	return csr_check(CSR_ALLOW_UNRESTRICTED_FS) == 0;
4071 }
4072 #endif /* !defined(XNU_TARGET_OS_OSX) */
4073 #endif /* CONFIG_CSR */
4074 
4075 boolean_t
set_exception_behavior_allowed(__unused const ipc_port_t new_port,__unused int new_behavior,__unused const task_t excepting_task,__unused const exception_mask_t mask,__unused const char * level)4076 set_exception_behavior_allowed(__unused const ipc_port_t new_port, __unused int new_behavior,
4077     __unused const task_t excepting_task, __unused const exception_mask_t mask, __unused const char *level)
4078 {
4079 #if defined(XNU_TARGET_OS_OSX)
4080 	/* Third party plugins run in multiple platform binaries on macos, which we can't break */
4081 	return TRUE;
4082 #else /* defined(XNU_TARGET_OS_OSX) */
4083 	if (exception_exposes_protected_ports(new_port, excepting_task)
4084 	    && !behavior_is_identity_protected(new_behavior)
4085 	    && !identity_protection_opted_out(new_port) /* Ignore opted out */
4086 #if CONFIG_CSR
4087 	    && SIP_is_enabled() /* cannot enforce if SIP is disabled */
4088 #endif
4089 #if CONFIG_ROSETTA
4090 	    && !task_is_translated(current_task())
4091 #endif /* CONFIG_ROSETTA */
4092 	    && !proc_is_simulated(current_proc())
4093 	    && !IOCurrentTaskHasEntitlement("com.apple.private.thread-set-state") /* rdar://109119238 */
4094 	    && !IOCurrentTaskHasEntitlement(SET_EXCEPTION_ENTITLEMENT)) {
4095 		return set_exception_behavior_violation(new_port, excepting_task, mask, level);
4096 	}
4097 
4098 	return TRUE;
4099 #endif /* defined(XNU_TARGET_OS_OSX) */
4100 }
4101 
4102 /*
4103  *	Routine:	thread/task_set_exception_ports [kernel call]
4104  *	Purpose:
4105  *			Sets the thread/task exception port, flavor and
4106  *			behavior for the exception types specified by the mask.
4107  *			There will be one send right per exception per valid
4108  *			port.
4109  *	Conditions:
4110  *		Nothing locked.  If successful, consumes
4111  *		the supplied send right.
4112  *	Returns:
4113  *		KERN_SUCCESS		Changed the special port.
4114  *		KERN_INVALID_ARGUMENT	The thread is null,
4115  *					Illegal mask bit set.
4116  *					Illegal exception behavior
4117  *		KERN_FAILURE		The thread is dead.
4118  *		KERN_NO_ACCESS		Restricted access to set port
4119  */
4120 
4121 kern_return_t
thread_set_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4122 thread_set_exception_ports(
4123 	thread_t                thread,
4124 	exception_mask_t        exception_mask,
4125 	ipc_port_t              new_port,
4126 	exception_behavior_t    new_behavior,
4127 	thread_state_flavor_t   new_flavor)
4128 {
4129 	ipc_port_t  old_port[EXC_TYPES_COUNT];
4130 	thread_ro_t tro;
4131 	boolean_t   privileged = task_is_privileged(current_task());
4132 
4133 #if CONFIG_MACF
4134 	struct label *new_label;
4135 #endif
4136 
4137 	if (thread == THREAD_NULL) {
4138 		return KERN_INVALID_ARGUMENT;
4139 	}
4140 
4141 	if (exception_mask & ~EXC_MASK_VALID) {
4142 		return KERN_INVALID_ARGUMENT;
4143 	}
4144 
4145 	if (IP_VALID(new_port)) {
4146 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4147 		case EXCEPTION_DEFAULT:
4148 		case EXCEPTION_STATE:
4149 		case EXCEPTION_STATE_IDENTITY:
4150 		case EXCEPTION_IDENTITY_PROTECTED:
4151 			break;
4152 
4153 		default:
4154 			return KERN_INVALID_ARGUMENT;
4155 		}
4156 	}
4157 
4158 	/*
4159 	 * rdar://77996387
4160 	 * Avoid exposing immovable ports send rights (kobjects) to `get_exception_ports`,
4161 	 * but allow opted out ports to still be set on thread only.
4162 	 */
4163 	if (IP_VALID(new_port) &&
4164 	    ((!ip_is_id_prot_opted_out(new_port) && new_port->ip_immovable_receive) ||
4165 	    new_port->ip_immovable_send)) {
4166 		return KERN_INVALID_RIGHT;
4167 	}
4168 
4169 
4170 	/*
4171 	 * Check the validity of the thread_state_flavor by calling the
4172 	 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4173 	 * osfmk/mach/ARCHITECTURE/thread_status.h
4174 	 */
4175 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4176 		return KERN_INVALID_ARGUMENT;
4177 	}
4178 
4179 	if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4180 	    (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4181 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4182 		return KERN_INVALID_ARGUMENT;
4183 	}
4184 
4185 	if (!set_exception_behavior_allowed(new_port, new_behavior, get_threadtask(thread), exception_mask, "thread")) {
4186 		return KERN_NO_ACCESS;
4187 	}
4188 
4189 #if CONFIG_MACF
4190 	new_label = mac_exc_create_label_for_current_proc();
4191 #endif
4192 
4193 	tro = get_thread_ro(thread);
4194 	thread_mtx_lock(thread);
4195 
4196 	if (!thread->active) {
4197 		thread_mtx_unlock(thread);
4198 #if CONFIG_MACF
4199 		mac_exc_free_label(new_label);
4200 #endif
4201 		return KERN_FAILURE;
4202 	}
4203 
4204 	if (tro->tro_exc_actions == NULL) {
4205 		ipc_thread_init_exc_actions(tro);
4206 	}
4207 	for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4208 		struct exception_action *action = &tro->tro_exc_actions[i];
4209 
4210 		if ((exception_mask & (1 << i))
4211 #if CONFIG_MACF
4212 		    && mac_exc_update_action_label(action, new_label) == 0
4213 #endif
4214 		    ) {
4215 			old_port[i] = action->port;
4216 			action->port = exception_port_copy_send(new_port);
4217 			action->behavior = new_behavior;
4218 			action->flavor = new_flavor;
4219 			action->privileged = privileged;
4220 		} else {
4221 			old_port[i] = IP_NULL;
4222 		}
4223 	}
4224 
4225 	thread_mtx_unlock(thread);
4226 
4227 #if CONFIG_MACF
4228 	mac_exc_free_label(new_label);
4229 #endif
4230 
4231 	for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4232 		if (IP_VALID(old_port[i])) {
4233 			ipc_port_release_send(old_port[i]);
4234 		}
4235 	}
4236 
4237 	if (IP_VALID(new_port)) {         /* consume send right */
4238 		ipc_port_release_send(new_port);
4239 	}
4240 
4241 	return KERN_SUCCESS;
4242 }
4243 
4244 kern_return_t
task_set_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4245 task_set_exception_ports(
4246 	task_t                                  task,
4247 	exception_mask_t                exception_mask,
4248 	ipc_port_t                              new_port,
4249 	exception_behavior_t    new_behavior,
4250 	thread_state_flavor_t   new_flavor)
4251 {
4252 	ipc_port_t              old_port[EXC_TYPES_COUNT];
4253 	boolean_t privileged = task_is_privileged(current_task());
4254 	register int    i;
4255 
4256 #if CONFIG_MACF
4257 	struct label *new_label;
4258 #endif
4259 
4260 	if (task == TASK_NULL) {
4261 		return KERN_INVALID_ARGUMENT;
4262 	}
4263 
4264 	if (exception_mask & ~EXC_MASK_VALID) {
4265 		return KERN_INVALID_ARGUMENT;
4266 	}
4267 
4268 	if (IP_VALID(new_port)) {
4269 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4270 		case EXCEPTION_DEFAULT:
4271 		case EXCEPTION_STATE:
4272 		case EXCEPTION_STATE_IDENTITY:
4273 		case EXCEPTION_IDENTITY_PROTECTED:
4274 			break;
4275 
4276 		default:
4277 			return KERN_INVALID_ARGUMENT;
4278 		}
4279 	}
4280 
4281 	if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4282 		return KERN_INVALID_RIGHT;
4283 	}
4284 
4285 
4286 	/*
4287 	 * Check the validity of the thread_state_flavor by calling the
4288 	 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4289 	 * osfmk/mach/ARCHITECTURE/thread_status.h
4290 	 */
4291 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4292 		return KERN_INVALID_ARGUMENT;
4293 	}
4294 
4295 	if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4296 	    (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4297 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4298 		return KERN_INVALID_ARGUMENT;
4299 	}
4300 
4301 	if (!set_exception_behavior_allowed(new_port, new_behavior, task, exception_mask, "task")) {
4302 		return KERN_NO_ACCESS;
4303 	}
4304 
4305 #if CONFIG_MACF
4306 	new_label = mac_exc_create_label_for_current_proc();
4307 #endif
4308 
4309 	itk_lock(task);
4310 
4311 	/*
4312 	 * Allow setting exception port during the span of ipc_task_init() to
4313 	 * ipc_task_terminate(). posix_spawn() port actions can set exception
4314 	 * ports on target task _before_ task IPC access is enabled.
4315 	 */
4316 	if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
4317 		itk_unlock(task);
4318 #if CONFIG_MACF
4319 		mac_exc_free_label(new_label);
4320 #endif
4321 		return KERN_FAILURE;
4322 	}
4323 
4324 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4325 		if ((exception_mask & (1 << i))
4326 #if CONFIG_MACF
4327 		    && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4328 #endif
4329 		    ) {
4330 			old_port[i] = task->exc_actions[i].port;
4331 			task->exc_actions[i].port =
4332 			    exception_port_copy_send(new_port);
4333 			task->exc_actions[i].behavior = new_behavior;
4334 			task->exc_actions[i].flavor = new_flavor;
4335 			task->exc_actions[i].privileged = privileged;
4336 		} else {
4337 			old_port[i] = IP_NULL;
4338 		}
4339 	}
4340 
4341 	itk_unlock(task);
4342 
4343 #if CONFIG_MACF
4344 	mac_exc_free_label(new_label);
4345 #endif
4346 
4347 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4348 		if (IP_VALID(old_port[i])) {
4349 			ipc_port_release_send(old_port[i]);
4350 		}
4351 	}
4352 
4353 	if (IP_VALID(new_port)) {         /* consume send right */
4354 		ipc_port_release_send(new_port);
4355 	}
4356 
4357 	return KERN_SUCCESS;
4358 }
4359 
4360 /*
4361  *	Routine:	thread/task_swap_exception_ports [kernel call]
4362  *	Purpose:
4363  *			Sets the thread/task exception port, flavor and
4364  *			behavior for the exception types specified by the
4365  *			mask.
4366  *
4367  *			The old ports, behavior and flavors are returned
4368  *			Count specifies the array sizes on input and
4369  *			the number of returned ports etc. on output.  The
4370  *			arrays must be large enough to hold all the returned
4371  *			data, MIG returnes an error otherwise.  The masks
4372  *			array specifies the corresponding exception type(s).
4373  *
4374  *	Conditions:
4375  *		Nothing locked.  If successful, consumes
4376  *		the supplied send right.
4377  *
4378  *		Returns upto [in} CountCnt elements.
4379  *	Returns:
4380  *		KERN_SUCCESS		Changed the special port.
4381  *		KERN_INVALID_ARGUMENT	The thread is null,
4382  *					Illegal mask bit set.
4383  *					Illegal exception behavior
4384  *		KERN_FAILURE		The thread is dead.
4385  *		KERN_NO_ACCESS		Restricted access to set port
4386  */
4387 
4388 kern_return_t
thread_swap_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4389 thread_swap_exception_ports(
4390 	thread_t                        thread,
4391 	exception_mask_t                exception_mask,
4392 	ipc_port_t                      new_port,
4393 	exception_behavior_t            new_behavior,
4394 	thread_state_flavor_t           new_flavor,
4395 	exception_mask_array_t          masks,
4396 	mach_msg_type_number_t          *CountCnt,
4397 	exception_port_array_t          ports,
4398 	exception_behavior_array_t      behaviors,
4399 	thread_state_flavor_array_t     flavors)
4400 {
4401 	ipc_port_t  old_port[EXC_TYPES_COUNT];
4402 	thread_ro_t tro;
4403 	boolean_t   privileged = task_is_privileged(current_task());
4404 	unsigned int    i, j, count;
4405 
4406 #if CONFIG_MACF
4407 	struct label *new_label;
4408 #endif
4409 
4410 	if (thread == THREAD_NULL) {
4411 		return KERN_INVALID_ARGUMENT;
4412 	}
4413 
4414 	if (exception_mask & ~EXC_MASK_VALID) {
4415 		return KERN_INVALID_ARGUMENT;
4416 	}
4417 
4418 	if (IP_VALID(new_port)) {
4419 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4420 		case EXCEPTION_DEFAULT:
4421 		case EXCEPTION_STATE:
4422 		case EXCEPTION_STATE_IDENTITY:
4423 		case EXCEPTION_IDENTITY_PROTECTED:
4424 			break;
4425 
4426 		default:
4427 			return KERN_INVALID_ARGUMENT;
4428 		}
4429 	}
4430 
4431 	if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4432 		return KERN_INVALID_RIGHT;
4433 	}
4434 
4435 
4436 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4437 		return KERN_INVALID_ARGUMENT;
4438 	}
4439 
4440 	if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4441 	    (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4442 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4443 		return KERN_INVALID_ARGUMENT;
4444 	}
4445 
4446 	if (!set_exception_behavior_allowed(new_port, new_behavior, get_threadtask(thread), exception_mask, "thread")) {
4447 		return KERN_NO_ACCESS;
4448 	}
4449 
4450 #if CONFIG_MACF
4451 	new_label = mac_exc_create_label_for_current_proc();
4452 #endif
4453 
4454 	thread_mtx_lock(thread);
4455 
4456 	if (!thread->active) {
4457 		thread_mtx_unlock(thread);
4458 #if CONFIG_MACF
4459 		mac_exc_free_label(new_label);
4460 #endif
4461 		return KERN_FAILURE;
4462 	}
4463 
4464 	tro = get_thread_ro(thread);
4465 	if (tro->tro_exc_actions == NULL) {
4466 		ipc_thread_init_exc_actions(tro);
4467 	}
4468 
4469 	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4470 	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4471 		struct exception_action *action = &tro->tro_exc_actions[i];
4472 
4473 		if ((exception_mask & (1 << i))
4474 #if CONFIG_MACF
4475 		    && mac_exc_update_action_label(action, new_label) == 0
4476 #endif
4477 		    ) {
4478 			for (j = 0; j < count; ++j) {
4479 				/*
4480 				 * search for an identical entry, if found
4481 				 * set corresponding mask for this exception.
4482 				 */
4483 				if (action->port == ports[j] &&
4484 				    action->behavior == behaviors[j] &&
4485 				    action->flavor == flavors[j]) {
4486 					masks[j] |= (1 << i);
4487 					break;
4488 				}
4489 			}
4490 
4491 			if (j == count) {
4492 				masks[j] = (1 << i);
4493 				ports[j] = exception_port_copy_send(action->port);
4494 
4495 				behaviors[j] = action->behavior;
4496 				flavors[j] = action->flavor;
4497 				++count;
4498 			}
4499 
4500 			old_port[i] = action->port;
4501 			action->port = exception_port_copy_send(new_port);
4502 			action->behavior = new_behavior;
4503 			action->flavor = new_flavor;
4504 			action->privileged = privileged;
4505 		} else {
4506 			old_port[i] = IP_NULL;
4507 		}
4508 	}
4509 
4510 	thread_mtx_unlock(thread);
4511 
4512 #if CONFIG_MACF
4513 	mac_exc_free_label(new_label);
4514 #endif
4515 
4516 	while (--i >= FIRST_EXCEPTION) {
4517 		if (IP_VALID(old_port[i])) {
4518 			ipc_port_release_send(old_port[i]);
4519 		}
4520 	}
4521 
4522 	if (IP_VALID(new_port)) {         /* consume send right */
4523 		ipc_port_release_send(new_port);
4524 	}
4525 
4526 	*CountCnt = count;
4527 
4528 	return KERN_SUCCESS;
4529 }
4530 
4531 kern_return_t
task_swap_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4532 task_swap_exception_ports(
4533 	task_t                                          task,
4534 	exception_mask_t                        exception_mask,
4535 	ipc_port_t                                      new_port,
4536 	exception_behavior_t            new_behavior,
4537 	thread_state_flavor_t           new_flavor,
4538 	exception_mask_array_t          masks,
4539 	mach_msg_type_number_t          *CountCnt,
4540 	exception_port_array_t          ports,
4541 	exception_behavior_array_t      behaviors,
4542 	thread_state_flavor_array_t     flavors)
4543 {
4544 	ipc_port_t              old_port[EXC_TYPES_COUNT];
4545 	boolean_t privileged = task_is_privileged(current_task());
4546 	unsigned int    i, j, count;
4547 
4548 #if CONFIG_MACF
4549 	struct label *new_label;
4550 #endif
4551 
4552 	if (task == TASK_NULL) {
4553 		return KERN_INVALID_ARGUMENT;
4554 	}
4555 
4556 	if (exception_mask & ~EXC_MASK_VALID) {
4557 		return KERN_INVALID_ARGUMENT;
4558 	}
4559 
4560 	if (IP_VALID(new_port)) {
4561 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4562 		case EXCEPTION_DEFAULT:
4563 		case EXCEPTION_STATE:
4564 		case EXCEPTION_STATE_IDENTITY:
4565 		case EXCEPTION_IDENTITY_PROTECTED:
4566 			break;
4567 
4568 		default:
4569 			return KERN_INVALID_ARGUMENT;
4570 		}
4571 	}
4572 
4573 	if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4574 		return KERN_INVALID_RIGHT;
4575 	}
4576 
4577 
4578 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4579 		return KERN_INVALID_ARGUMENT;
4580 	}
4581 
4582 	if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4583 	    (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4584 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4585 		return KERN_INVALID_ARGUMENT;
4586 	}
4587 
4588 	if (!set_exception_behavior_allowed(new_port, new_behavior, task, exception_mask, "task")) {
4589 		return KERN_NO_ACCESS;
4590 	}
4591 
4592 #if CONFIG_MACF
4593 	new_label = mac_exc_create_label_for_current_proc();
4594 #endif
4595 
4596 	itk_lock(task);
4597 
4598 	if (!task->ipc_active) {
4599 		itk_unlock(task);
4600 #if CONFIG_MACF
4601 		mac_exc_free_label(new_label);
4602 #endif
4603 		return KERN_FAILURE;
4604 	}
4605 
4606 	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4607 	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4608 		if ((exception_mask & (1 << i))
4609 #if CONFIG_MACF
4610 		    && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4611 #endif
4612 		    ) {
4613 			for (j = 0; j < count; j++) {
4614 				/*
4615 				 * search for an identical entry, if found
4616 				 * set corresponding mask for this exception.
4617 				 */
4618 				if (task->exc_actions[i].port == ports[j] &&
4619 				    task->exc_actions[i].behavior == behaviors[j] &&
4620 				    task->exc_actions[i].flavor == flavors[j]) {
4621 					masks[j] |= (1 << i);
4622 					break;
4623 				}
4624 			}
4625 
4626 			if (j == count) {
4627 				masks[j] = (1 << i);
4628 				ports[j] = exception_port_copy_send(task->exc_actions[i].port);
4629 				behaviors[j] = task->exc_actions[i].behavior;
4630 				flavors[j] = task->exc_actions[i].flavor;
4631 				++count;
4632 			}
4633 
4634 			old_port[i] = task->exc_actions[i].port;
4635 
4636 			task->exc_actions[i].port = exception_port_copy_send(new_port);
4637 			task->exc_actions[i].behavior = new_behavior;
4638 			task->exc_actions[i].flavor = new_flavor;
4639 			task->exc_actions[i].privileged = privileged;
4640 		} else {
4641 			old_port[i] = IP_NULL;
4642 		}
4643 	}
4644 
4645 	itk_unlock(task);
4646 
4647 #if CONFIG_MACF
4648 	mac_exc_free_label(new_label);
4649 #endif
4650 
4651 	while (--i >= FIRST_EXCEPTION) {
4652 		if (IP_VALID(old_port[i])) {
4653 			ipc_port_release_send(old_port[i]);
4654 		}
4655 	}
4656 
4657 	if (IP_VALID(new_port)) {         /* consume send right */
4658 		ipc_port_release_send(new_port);
4659 	}
4660 
4661 	*CountCnt = count;
4662 
4663 	return KERN_SUCCESS;
4664 }
4665 
4666 /*
4667  *	Routine:	thread/task_get_exception_ports [kernel call]
4668  *	Purpose:
4669  *		Clones a send right for each of the thread/task's exception
4670  *		ports specified in the mask and returns the behaviour
4671  *		and flavor of said port.
4672  *
4673  *		Returns upto [in} CountCnt elements.
4674  *
4675  *	Conditions:
4676  *		Nothing locked.
4677  *	Returns:
4678  *		KERN_SUCCESS		Extracted a send right.
4679  *		KERN_INVALID_ARGUMENT	The thread is null,
4680  *					Invalid special port,
4681  *					Illegal mask bit set.
4682  *		KERN_FAILURE		The thread is dead.
4683  */
4684 static kern_return_t
thread_get_exception_ports_internal(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4685 thread_get_exception_ports_internal(
4686 	thread_t                        thread,
4687 	exception_mask_t                exception_mask,
4688 	exception_mask_array_t          masks,
4689 	mach_msg_type_number_t          *CountCnt,
4690 	exception_port_info_array_t     ports_info,
4691 	exception_port_array_t          ports,
4692 	exception_behavior_array_t      behaviors,
4693 	thread_state_flavor_array_t     flavors)
4694 {
4695 	unsigned int count;
4696 	boolean_t info_only = (ports_info != NULL);
4697 	thread_ro_t tro;
4698 	ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4699 
4700 	if (thread == THREAD_NULL) {
4701 		return KERN_INVALID_ARGUMENT;
4702 	}
4703 
4704 	if (exception_mask & ~EXC_MASK_VALID) {
4705 		return KERN_INVALID_ARGUMENT;
4706 	}
4707 
4708 	if (!info_only && !ports) {
4709 		return KERN_INVALID_ARGUMENT;
4710 	}
4711 
4712 	tro = get_thread_ro(thread);
4713 	thread_mtx_lock(thread);
4714 
4715 	if (!thread->active) {
4716 		thread_mtx_unlock(thread);
4717 
4718 		return KERN_FAILURE;
4719 	}
4720 
4721 	count = 0;
4722 
4723 	if (tro->tro_exc_actions == NULL) {
4724 		goto done;
4725 	}
4726 
4727 	for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4728 		if (exception_mask & (1 << i)) {
4729 			ipc_port_t exc_port = tro->tro_exc_actions[i].port;
4730 			exception_behavior_t exc_behavior = tro->tro_exc_actions[i].behavior;
4731 			thread_state_flavor_t exc_flavor = tro->tro_exc_actions[i].flavor;
4732 
4733 			for (j = 0; j < count; ++j) {
4734 				/*
4735 				 * search for an identical entry, if found
4736 				 * set corresponding mask for this exception.
4737 				 */
4738 				if (exc_port == port_ptrs[j] &&
4739 				    exc_behavior == behaviors[j] &&
4740 				    exc_flavor == flavors[j]) {
4741 					masks[j] |= (1 << i);
4742 					break;
4743 				}
4744 			}
4745 
4746 			if (j == count && count < *CountCnt) {
4747 				masks[j] = (1 << i);
4748 				port_ptrs[j] = exc_port;
4749 
4750 				if (info_only) {
4751 					if (!IP_VALID(exc_port)) {
4752 						ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4753 					} else {
4754 						uintptr_t receiver;
4755 						(void)ipc_port_get_receiver_task(exc_port, &receiver);
4756 						ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4757 						ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4758 					}
4759 				} else {
4760 					ports[j] = exception_port_copy_send(exc_port);
4761 				}
4762 				behaviors[j] = exc_behavior;
4763 				flavors[j] = exc_flavor;
4764 				++count;
4765 			}
4766 		}
4767 	}
4768 
4769 done:
4770 	thread_mtx_unlock(thread);
4771 
4772 	*CountCnt = count;
4773 
4774 	return KERN_SUCCESS;
4775 }
4776 
4777 kern_return_t
thread_get_exception_ports(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4778 thread_get_exception_ports(
4779 	thread_t                        thread,
4780 	exception_mask_t                exception_mask,
4781 	exception_mask_array_t          masks,
4782 	mach_msg_type_number_t          *CountCnt,
4783 	exception_port_array_t          ports,
4784 	exception_behavior_array_t      behaviors,
4785 	thread_state_flavor_array_t     flavors)
4786 {
4787 	return thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4788 	           NULL, ports, behaviors, flavors);
4789 }
4790 
4791 kern_return_t
thread_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4792 thread_get_exception_ports_info(
4793 	mach_port_t                     port,
4794 	exception_mask_t                exception_mask,
4795 	exception_mask_array_t          masks,
4796 	mach_msg_type_number_t          *CountCnt,
4797 	exception_port_info_array_t     ports_info,
4798 	exception_behavior_array_t      behaviors,
4799 	thread_state_flavor_array_t     flavors)
4800 {
4801 	kern_return_t kr;
4802 
4803 	thread_t thread = convert_port_to_thread_read_no_eval(port);
4804 
4805 	if (thread == THREAD_NULL) {
4806 		return KERN_INVALID_ARGUMENT;
4807 	}
4808 
4809 	kr = thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4810 	    ports_info, NULL, behaviors, flavors);
4811 
4812 	thread_deallocate(thread);
4813 	return kr;
4814 }
4815 
4816 kern_return_t
thread_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4817 thread_get_exception_ports_from_user(
4818 	mach_port_t                     port,
4819 	exception_mask_t                exception_mask,
4820 	exception_mask_array_t          masks,
4821 	mach_msg_type_number_t         *CountCnt,
4822 	exception_port_array_t          ports,
4823 	exception_behavior_array_t      behaviors,
4824 	thread_state_flavor_array_t     flavors)
4825 {
4826 	kern_return_t kr;
4827 
4828 	thread_t thread = convert_port_to_thread(port);
4829 
4830 	if (thread == THREAD_NULL) {
4831 		return KERN_INVALID_ARGUMENT;
4832 	}
4833 
4834 	kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4835 
4836 	thread_deallocate(thread);
4837 	return kr;
4838 }
4839 
4840 static kern_return_t
task_get_exception_ports_internal(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4841 task_get_exception_ports_internal(
4842 	task_t                          task,
4843 	exception_mask_t                exception_mask,
4844 	exception_mask_array_t          masks,
4845 	mach_msg_type_number_t          *CountCnt,
4846 	exception_port_info_array_t     ports_info,
4847 	exception_port_array_t          ports,
4848 	exception_behavior_array_t      behaviors,
4849 	thread_state_flavor_array_t     flavors)
4850 {
4851 	unsigned int count;
4852 	boolean_t info_only = (ports_info != NULL);
4853 	ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4854 
4855 	if (task == TASK_NULL) {
4856 		return KERN_INVALID_ARGUMENT;
4857 	}
4858 
4859 	if (exception_mask & ~EXC_MASK_VALID) {
4860 		return KERN_INVALID_ARGUMENT;
4861 	}
4862 
4863 	if (!info_only && !ports) {
4864 		return KERN_INVALID_ARGUMENT;
4865 	}
4866 
4867 	itk_lock(task);
4868 
4869 	if (!task->ipc_active) {
4870 		itk_unlock(task);
4871 		return KERN_FAILURE;
4872 	}
4873 
4874 	count = 0;
4875 
4876 	for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4877 		if (exception_mask & (1 << i)) {
4878 			ipc_port_t exc_port = task->exc_actions[i].port;
4879 			exception_behavior_t exc_behavior = task->exc_actions[i].behavior;
4880 			thread_state_flavor_t exc_flavor = task->exc_actions[i].flavor;
4881 
4882 			for (j = 0; j < count; ++j) {
4883 				/*
4884 				 * search for an identical entry, if found
4885 				 * set corresponding mask for this exception.
4886 				 */
4887 				if (exc_port == port_ptrs[j] &&
4888 				    exc_behavior == behaviors[j] &&
4889 				    exc_flavor == flavors[j]) {
4890 					masks[j] |= (1 << i);
4891 					break;
4892 				}
4893 			}
4894 
4895 			if (j == count && count < *CountCnt) {
4896 				masks[j] = (1 << i);
4897 				port_ptrs[j] = exc_port;
4898 
4899 				if (info_only) {
4900 					if (!IP_VALID(exc_port)) {
4901 						ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4902 					} else {
4903 						uintptr_t receiver;
4904 						(void)ipc_port_get_receiver_task(exc_port, &receiver);
4905 						ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4906 						ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4907 					}
4908 				} else {
4909 					ports[j] = exception_port_copy_send(exc_port);
4910 				}
4911 				behaviors[j] = exc_behavior;
4912 				flavors[j] = exc_flavor;
4913 				++count;
4914 			}
4915 		}
4916 	}
4917 
4918 	itk_unlock(task);
4919 
4920 	*CountCnt = count;
4921 
4922 	return KERN_SUCCESS;
4923 }
4924 
4925 kern_return_t
task_get_exception_ports(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4926 task_get_exception_ports(
4927 	task_t                          task,
4928 	exception_mask_t                exception_mask,
4929 	exception_mask_array_t          masks,
4930 	mach_msg_type_number_t          *CountCnt,
4931 	exception_port_array_t          ports,
4932 	exception_behavior_array_t      behaviors,
4933 	thread_state_flavor_array_t     flavors)
4934 {
4935 	return task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4936 	           NULL, ports, behaviors, flavors);
4937 }
4938 
4939 kern_return_t
task_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4940 task_get_exception_ports_info(
4941 	mach_port_t                     port,
4942 	exception_mask_t                exception_mask,
4943 	exception_mask_array_t          masks,
4944 	mach_msg_type_number_t          *CountCnt,
4945 	exception_port_info_array_t     ports_info,
4946 	exception_behavior_array_t      behaviors,
4947 	thread_state_flavor_array_t     flavors)
4948 {
4949 	kern_return_t kr;
4950 
4951 	task_t task = convert_port_to_task_read_no_eval(port);
4952 
4953 	if (task == TASK_NULL) {
4954 		return KERN_INVALID_ARGUMENT;
4955 	}
4956 
4957 	kr = task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4958 	    ports_info, NULL, behaviors, flavors);
4959 
4960 	task_deallocate(task);
4961 	return kr;
4962 }
4963 
4964 kern_return_t
task_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4965 task_get_exception_ports_from_user(
4966 	mach_port_t                     port,
4967 	exception_mask_t                exception_mask,
4968 	exception_mask_array_t          masks,
4969 	mach_msg_type_number_t         *CountCnt,
4970 	exception_port_array_t          ports,
4971 	exception_behavior_array_t      behaviors,
4972 	thread_state_flavor_array_t     flavors)
4973 {
4974 	kern_return_t kr;
4975 
4976 	task_t task = convert_port_to_task(port);
4977 
4978 	if (task == TASK_NULL) {
4979 		return KERN_INVALID_ARGUMENT;
4980 	}
4981 
4982 	kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4983 
4984 	task_deallocate(task);
4985 	return kr;
4986 }
4987 
4988 /*
4989  *	Routine:	ipc_thread_port_unpin
4990  *	Purpose:
4991  *
4992  *		Called on the thread when it's terminating so that the last ref
4993  *		can be deallocated without a guard exception.
4994  *	Conditions:
4995  *		Thread mutex lock is held.
4996  */
4997 void
ipc_thread_port_unpin(ipc_port_t port)4998 ipc_thread_port_unpin(
4999 	ipc_port_t port)
5000 {
5001 	if (port == IP_NULL) {
5002 		return;
5003 	}
5004 	ip_mq_lock(port);
5005 	port->ip_pinned = 0;
5006 	ip_mq_unlock(port);
5007 }
5008