xref: /xnu-10063.141.1/osfmk/kern/ipc_tt.c (revision d8b80295118ef25ac3a784134bcf95cd8e88109f)
1 /*
2  * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  */
62 /*
63  */
64 
65 /*
66  * File:	ipc_tt.c
67  * Purpose:
68  *	Task and thread related IPC functions.
69  */
70 
71 #include <ipc/port.h>
72 #include <mach/mach_types.h>
73 #include <mach/boolean.h>
74 #include <mach/kern_return.h>
75 #include <mach/mach_param.h>
76 #include <mach/task_special_ports.h>
77 #include <mach/thread_special_ports.h>
78 #include <mach/thread_status.h>
79 #include <mach/exception_types.h>
80 #include <mach/memory_object_types.h>
81 #include <mach/mach_traps.h>
82 #include <mach/task_server.h>
83 #include <mach/thread_act_server.h>
84 #include <mach/mach_host_server.h>
85 #include <mach/host_priv_server.h>
86 #include <mach/vm_map_server.h>
87 
88 #include <kern/exc_guard.h>
89 #include <kern/kern_types.h>
90 #include <kern/host.h>
91 #include <kern/ipc_kobject.h>
92 #include <kern/ipc_tt.h>
93 #include <kern/kalloc.h>
94 #include <kern/thread.h>
95 #include <kern/ux_handler.h>
96 #include <kern/misc_protos.h>
97 #include <kdp/kdp_dyld.h>
98 
99 #include <vm/vm_map.h>
100 #include <vm/vm_pageout.h>
101 #include <vm/vm_protos.h>
102 #include <libkern/coreanalytics/coreanalytics.h>
103 
104 #include <security/mac_mach_internal.h>
105 
106 #if CONFIG_CSR
107 #include <sys/csr.h>
108 #endif
109 
110 #include <sys/code_signing.h> /* for developer mode state */
111 
112 #if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL
113 extern int cs_relax_platform_task_ports;
114 #endif
115 
116 extern boolean_t IOCurrentTaskHasEntitlement(const char *);
117 extern boolean_t proc_is_simulated(const proc_t);
118 extern struct proc* current_proc(void);
119 
120 /* bootarg to create lightweight corpse for thread identity lockdown */
121 TUNABLE(bool, thid_should_crash, "thid_should_crash", true);
122 
123 #define SET_EXCEPTION_ENTITLEMENT "com.apple.private.set-exception-port"
124 
125 CA_EVENT(set_exception,
126     CA_STATIC_STRING(CA_PROCNAME_LEN), current_proc,
127     CA_STATIC_STRING(CA_PROCNAME_LEN), thread_proc,
128     CA_INT, mask,
129     CA_STATIC_STRING(6), level);
130 
131 __options_decl(ipc_reply_port_type_t, uint32_t, {
132 	IRPT_NONE        = 0x00,
133 	IRPT_USER        = 0x01,
134 	IRPT_KERNEL      = 0x02,
135 });
136 
137 /* forward declarations */
138 static kern_return_t special_port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor);
139 static kern_return_t special_port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor);
140 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port, ipc_reply_port_type_t reply_type);
141 static void ipc_port_unbind_special_reply_port(thread_t thread, ipc_reply_port_type_t reply_type);
142 extern kern_return_t task_conversion_eval(task_t caller, task_t victim, int flavor);
143 static thread_inspect_t convert_port_to_thread_inspect_no_eval(ipc_port_t port);
144 static ipc_port_t convert_thread_to_port_with_flavor(thread_t, thread_ro_t, mach_thread_flavor_t flavor);
145 ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor, task_grp_t grp);
146 kern_return_t task_set_special_port(task_t task, int which, ipc_port_t port);
147 kern_return_t task_get_special_port(task_t task, int which, ipc_port_t *portp);
148 
149 /*
150  *	Routine:	ipc_task_init
151  *	Purpose:
152  *		Initialize a task's IPC state.
153  *
154  *		If non-null, some state will be inherited from the parent.
155  *		The parent must be appropriately initialized.
156  *	Conditions:
157  *		Nothing locked.
158  */
159 
160 void
ipc_task_init(task_t task,task_t parent)161 ipc_task_init(
162 	task_t          task,
163 	task_t          parent)
164 {
165 	ipc_space_t space;
166 	ipc_port_t kport;
167 	ipc_port_t nport;
168 	ipc_port_t pport;
169 	kern_return_t kr;
170 	int i;
171 
172 
173 	kr = ipc_space_create(IPC_LABEL_NONE, &space);
174 	if (kr != KERN_SUCCESS) {
175 		panic("ipc_task_init");
176 	}
177 
178 	space->is_task = task;
179 
180 	kport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL,
181 	    IPC_KOBJECT_ALLOC_NONE);
182 	pport = kport;
183 
184 	nport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_NAME,
185 	    IPC_KOBJECT_ALLOC_NONE);
186 
187 	itk_lock_init(task);
188 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = kport;
189 	task->itk_task_ports[TASK_FLAVOR_NAME] = nport;
190 
191 	/* Lazily allocated on-demand */
192 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
193 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
194 	task->itk_dyld_notify = NULL;
195 #if CONFIG_PROC_RESOURCE_LIMITS
196 	task->itk_resource_notify = NULL;
197 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
198 
199 	task->itk_self = pport;
200 	task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
201 	if (task_is_a_corpse_fork(task)) {
202 		/*
203 		 * No sender's notification for corpse would not
204 		 * work with a naked send right in kernel.
205 		 */
206 		task->itk_settable_self = IP_NULL;
207 	} else {
208 		/* we just made the port, no need to triple check */
209 		task->itk_settable_self = ipc_port_make_send_any(kport);
210 	}
211 	task->itk_debug_control = IP_NULL;
212 	task->itk_space = space;
213 
214 #if CONFIG_MACF
215 	task->exc_actions[0].label = NULL;
216 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
217 		mac_exc_associate_action_label(&task->exc_actions[i],
218 		    mac_exc_create_label(&task->exc_actions[i]));
219 	}
220 #endif
221 
222 	/* always zero-out the first (unused) array element */
223 	bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
224 
225 	if (parent == TASK_NULL) {
226 		ipc_port_t port = IP_NULL;
227 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
228 			task->exc_actions[i].port = IP_NULL;
229 			task->exc_actions[i].flavor = 0;
230 			task->exc_actions[i].behavior = 0;
231 			task->exc_actions[i].privileged = FALSE;
232 		}/* for */
233 
234 		kr = host_get_host_port(host_priv_self(), &port);
235 		assert(kr == KERN_SUCCESS);
236 		task->itk_host = port;
237 
238 		task->itk_bootstrap = IP_NULL;
239 		task->itk_task_access = IP_NULL;
240 
241 		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
242 			task->itk_registered[i] = IP_NULL;
243 		}
244 	} else {
245 		itk_lock(parent);
246 		assert(parent->itk_task_ports[TASK_FLAVOR_CONTROL] != IP_NULL);
247 
248 		/* inherit registered ports */
249 
250 		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
251 			task->itk_registered[i] =
252 			    ipc_port_copy_send_any(parent->itk_registered[i]);
253 		}
254 
255 		/* inherit exception and bootstrap ports */
256 
257 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
258 			task->exc_actions[i].port =
259 			    exception_port_copy_send(parent->exc_actions[i].port);
260 			task->exc_actions[i].flavor =
261 			    parent->exc_actions[i].flavor;
262 			task->exc_actions[i].behavior =
263 			    parent->exc_actions[i].behavior;
264 			task->exc_actions[i].privileged =
265 			    parent->exc_actions[i].privileged;
266 #if CONFIG_MACF
267 			mac_exc_inherit_action_label(parent->exc_actions + i,
268 			    task->exc_actions + i);
269 #endif
270 		}
271 
272 		task->itk_host = host_port_copy_send(parent->itk_host);
273 
274 		task->itk_bootstrap =
275 		    ipc_port_copy_send_mqueue(parent->itk_bootstrap);
276 
277 		task->itk_task_access =
278 		    ipc_port_copy_send_mqueue(parent->itk_task_access);
279 
280 		itk_unlock(parent);
281 	}
282 }
283 
284 /*
285  *	Routine:	ipc_task_set_immovable_pinned
286  *	Purpose:
287  *		Make a task's control port immovable and/or pinned
288  *      according to its control port options. If control port
289  *      is immovable, allocate an immovable control port for the
290  *      task and optionally pin it.
291  *	Conditions:
292  *		Task's control port is movable and not pinned.
293  */
294 void
ipc_task_set_immovable_pinned(task_t task)295 ipc_task_set_immovable_pinned(
296 	task_t            task)
297 {
298 	ipc_port_t kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
299 	ipc_port_t new_pport;
300 
301 	/* pport is the same as kport at ipc_task_init() time */
302 	assert(task->itk_self == task->itk_task_ports[TASK_FLAVOR_CONTROL]);
303 	assert(task->itk_self == task->itk_settable_self);
304 	assert(!task_is_a_corpse(task));
305 
306 	/* only tasks opt in immovable control port can have pinned control port */
307 	if (task_is_immovable(task)) {
308 		ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
309 
310 		if (task_is_pinned(task)) {
311 			options |= IPC_KOBJECT_ALLOC_PINNED;
312 		}
313 
314 		new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL, options);
315 
316 		assert(kport != IP_NULL);
317 		ipc_port_set_label(kport, IPC_LABEL_SUBST_TASK);
318 		kport->ip_kolabel->ikol_alt_port = new_pport;
319 
320 		itk_lock(task);
321 		task->itk_self = new_pport;
322 		itk_unlock(task);
323 
324 		/* enable the pinned port */
325 		ipc_kobject_enable(new_pport, task, IKOT_TASK_CONTROL);
326 	}
327 }
328 
329 /*
330  *	Routine:	ipc_task_enable
331  *	Purpose:
332  *		Enable a task for IPC access.
333  *	Conditions:
334  *		Nothing locked.
335  */
336 void
ipc_task_enable(task_t task)337 ipc_task_enable(
338 	task_t          task)
339 {
340 	ipc_port_t kport;
341 	ipc_port_t nport;
342 	ipc_port_t iport;
343 	ipc_port_t rdport;
344 	ipc_port_t pport;
345 
346 	itk_lock(task);
347 	if (!task->active) {
348 		/*
349 		 * task has been terminated before we can enable IPC access.
350 		 * The check is to make sure we don't accidentally re-enable
351 		 * the task ports _after_ they've been disabled during
352 		 * task_terminate_internal(), in which case we will hit the
353 		 * !task->ipc_active assertion in ipc_task_terminate().
354 		 *
355 		 * Technically we should grab task lock when checking task
356 		 * active bit, but since task termination unsets task->active
357 		 * _before_ calling ipc_task_disable(), we can always see the
358 		 * truth with just itk_lock() and bail if disable has been called.
359 		 */
360 		itk_unlock(task);
361 		return;
362 	}
363 
364 	assert(!task->ipc_active || task_is_a_corpse(task));
365 	task->ipc_active = true;
366 
367 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
368 	if (kport != IP_NULL) {
369 		ipc_kobject_enable(kport, task, IKOT_TASK_CONTROL);
370 	}
371 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
372 	if (nport != IP_NULL) {
373 		ipc_kobject_enable(nport, task, IKOT_TASK_NAME);
374 	}
375 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
376 	if (iport != IP_NULL) {
377 		ipc_kobject_enable(iport, task, IKOT_TASK_INSPECT);
378 	}
379 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
380 	if (rdport != IP_NULL) {
381 		ipc_kobject_enable(rdport, task, IKOT_TASK_READ);
382 	}
383 	pport = task->itk_self;
384 	if (pport != kport && pport != IP_NULL) {
385 		assert(task_is_immovable(task));
386 		ipc_kobject_enable(pport, task, IKOT_TASK_CONTROL);
387 	}
388 
389 	itk_unlock(task);
390 }
391 
392 /*
393  *	Routine:	ipc_task_disable
394  *	Purpose:
395  *		Disable IPC access to a task.
396  *	Conditions:
397  *		Nothing locked.
398  */
399 
400 void
ipc_task_disable(task_t task)401 ipc_task_disable(
402 	task_t          task)
403 {
404 	ipc_port_t kport;
405 	ipc_port_t nport;
406 	ipc_port_t iport;
407 	ipc_port_t rdport;
408 	ipc_port_t rport;
409 	ipc_port_t pport;
410 
411 	itk_lock(task);
412 
413 	/*
414 	 * This innocuous looking line is load bearing.
415 	 *
416 	 * It is used to disable the creation of lazy made ports.
417 	 * We must do so before we drop the last reference on the task,
418 	 * as task ports do not own a reference on the task, and
419 	 * convert_port_to_task* will crash trying to resurect a task.
420 	 */
421 	task->ipc_active = false;
422 
423 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
424 	if (kport != IP_NULL) {
425 		/* clears ikol_alt_port */
426 		ipc_kobject_disable(kport, IKOT_TASK_CONTROL);
427 	}
428 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
429 	if (nport != IP_NULL) {
430 		ipc_kobject_disable(nport, IKOT_TASK_NAME);
431 	}
432 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
433 	if (iport != IP_NULL) {
434 		ipc_kobject_disable(iport, IKOT_TASK_INSPECT);
435 	}
436 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
437 	if (rdport != IP_NULL) {
438 		/* clears ikol_alt_port */
439 		ipc_kobject_disable(rdport, IKOT_TASK_READ);
440 	}
441 	pport = task->itk_self;
442 	if (pport != IP_NULL) {
443 		/* see port_name_is_pinned_itk_self() */
444 		pport->ip_receiver_name = MACH_PORT_SPECIAL_DEFAULT;
445 		if (pport != kport) {
446 			assert(task_is_immovable(task));
447 			assert(pport->ip_immovable_send);
448 			ipc_kobject_disable(pport, IKOT_TASK_CONTROL);
449 		}
450 	}
451 
452 	rport = task->itk_resume;
453 	if (rport != IP_NULL) {
454 		/*
455 		 * From this point onwards this task is no longer accepting
456 		 * resumptions.
457 		 *
458 		 * There are still outstanding suspensions on this task,
459 		 * even as it is being torn down. Disconnect the task
460 		 * from the rport, thereby "orphaning" the rport. The rport
461 		 * itself will go away only when the last suspension holder
462 		 * destroys his SO right to it -- when he either
463 		 * exits, or tries to actually use that last SO right to
464 		 * resume this (now non-existent) task.
465 		 */
466 		ipc_kobject_disable(rport, IKOT_TASK_RESUME);
467 	}
468 	itk_unlock(task);
469 }
470 
471 /*
472  *	Routine:	ipc_task_terminate
473  *	Purpose:
474  *		Clean up and destroy a task's IPC state.
475  *	Conditions:
476  *		Nothing locked.  The task must be suspended.
477  *		(Or the current thread must be in the task.)
478  */
479 
480 void
ipc_task_terminate(task_t task)481 ipc_task_terminate(
482 	task_t          task)
483 {
484 	ipc_port_t kport;
485 	ipc_port_t nport;
486 	ipc_port_t iport;
487 	ipc_port_t rdport;
488 	ipc_port_t rport;
489 	ipc_port_t pport;
490 	ipc_port_t sself;
491 	ipc_port_t *notifiers_ptr = NULL;
492 
493 	itk_lock(task);
494 
495 	/*
496 	 * If we ever failed to clear ipc_active before the last reference
497 	 * was dropped, lazy ports might be made and used after the last
498 	 * reference is dropped and cause use after free (see comment in
499 	 * ipc_task_disable()).
500 	 */
501 	assert(!task->ipc_active);
502 
503 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
504 	sself = task->itk_settable_self;
505 	pport = IP_NULL;
506 
507 	if (kport == IP_NULL) {
508 		/* the task is already terminated (can this happen?) */
509 		itk_unlock(task);
510 		return;
511 	}
512 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = IP_NULL;
513 
514 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
515 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
516 
517 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
518 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
519 
520 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
521 	assert(nport != IP_NULL);
522 	task->itk_task_ports[TASK_FLAVOR_NAME] = IP_NULL;
523 
524 	if (task->itk_dyld_notify) {
525 		notifiers_ptr = task->itk_dyld_notify;
526 		task->itk_dyld_notify = NULL;
527 	}
528 
529 	pport = task->itk_self;
530 	task->itk_self = IP_NULL;
531 
532 	rport = task->itk_resume;
533 	task->itk_resume = IP_NULL;
534 
535 	itk_unlock(task);
536 
537 	/* release the naked send rights */
538 	if (IP_VALID(sself)) {
539 		ipc_port_release_send(sself);
540 	}
541 
542 	if (notifiers_ptr) {
543 		for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
544 			if (IP_VALID(notifiers_ptr[i])) {
545 				ipc_port_release_send(notifiers_ptr[i]);
546 			}
547 		}
548 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
549 	}
550 
551 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
552 		if (IP_VALID(task->exc_actions[i].port)) {
553 			ipc_port_release_send(task->exc_actions[i].port);
554 		}
555 #if CONFIG_MACF
556 		mac_exc_free_action_label(task->exc_actions + i);
557 #endif
558 	}
559 
560 	if (IP_VALID(task->itk_host)) {
561 		ipc_port_release_send(task->itk_host);
562 	}
563 
564 	if (IP_VALID(task->itk_bootstrap)) {
565 		ipc_port_release_send(task->itk_bootstrap);
566 	}
567 
568 	if (IP_VALID(task->itk_task_access)) {
569 		ipc_port_release_send(task->itk_task_access);
570 	}
571 
572 	if (IP_VALID(task->itk_debug_control)) {
573 		ipc_port_release_send(task->itk_debug_control);
574 	}
575 
576 #if CONFIG_PROC_RESOURCE_LIMITS
577 	if (IP_VALID(task->itk_resource_notify)) {
578 		ipc_port_release_send(task->itk_resource_notify);
579 	}
580 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
581 
582 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
583 		if (IP_VALID(task->itk_registered[i])) {
584 			ipc_port_release_send(task->itk_registered[i]);
585 		}
586 	}
587 
588 	/* clears read port ikol_alt_port, must be done first */
589 	if (rdport != IP_NULL) {
590 		ipc_kobject_dealloc_port(rdport, 0, IKOT_TASK_READ);
591 	}
592 	ipc_kobject_dealloc_port(kport, 0, IKOT_TASK_CONTROL);
593 	/* ikol_alt_port cleared */
594 
595 	/* destroy other kernel ports */
596 	ipc_kobject_dealloc_port(nport, 0, IKOT_TASK_NAME);
597 	if (iport != IP_NULL) {
598 		ipc_kobject_dealloc_port(iport, 0, IKOT_TASK_INSPECT);
599 	}
600 	if (pport != IP_NULL && pport != kport) {
601 		ipc_kobject_dealloc_port(pport, 0, IKOT_TASK_CONTROL);
602 	}
603 	if (rport != IP_NULL) {
604 		ipc_kobject_dealloc_port(rport, 0, IKOT_TASK_RESUME);
605 	}
606 
607 	itk_lock_destroy(task);
608 }
609 
610 /*
611  *	Routine:	ipc_task_reset
612  *	Purpose:
613  *		Reset a task's IPC state to protect it when
614  *		it enters an elevated security context. The
615  *		task name port can remain the same - since it
616  *              represents no specific privilege.
617  *	Conditions:
618  *		Nothing locked.  The task must be suspended.
619  *		(Or the current thread must be in the task.)
620  */
621 
622 void
ipc_task_reset(task_t task)623 ipc_task_reset(
624 	task_t          task)
625 {
626 	ipc_port_t old_kport, old_pport, new_kport, new_pport;
627 	ipc_port_t old_sself;
628 	ipc_port_t old_rdport;
629 	ipc_port_t old_iport;
630 	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
631 	ipc_port_t *notifiers_ptr = NULL;
632 
633 #if CONFIG_MACF
634 	/* Fresh label to unset credentials in existing labels. */
635 	struct label *unset_label = mac_exc_create_label(NULL);
636 #endif
637 
638 	new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task,
639 	    IKOT_TASK_CONTROL, IPC_KOBJECT_ALLOC_NONE);
640 	/*
641 	 * ipc_task_reset() only happens during sugid or corpsify.
642 	 *
643 	 * (1) sugid happens early in exec_mach_imgact(), at which point the old task
644 	 * port has not been enabled, and is left movable/not pinned.
645 	 * (2) corpse cannot execute more code so the notion of the immovable/pinned
646 	 * task port is bogus, and should appear as if it doesn't have one.
647 	 *
648 	 * So simply leave pport the same as kport.
649 	 */
650 	new_pport = new_kport;
651 
652 	itk_lock(task);
653 
654 	old_kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
655 	old_rdport = task->itk_task_ports[TASK_FLAVOR_READ];
656 	old_iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
657 
658 	old_pport = task->itk_self;
659 
660 	if (old_pport == IP_NULL) {
661 		/* the task is already terminated (can this happen?) */
662 		itk_unlock(task);
663 		ipc_kobject_dealloc_port(new_kport, 0, IKOT_TASK_CONTROL);
664 		if (new_pport != new_kport) {
665 			assert(task_is_immovable(task));
666 			ipc_kobject_dealloc_port(new_pport, 0, IKOT_TASK_CONTROL);
667 		}
668 #if CONFIG_MACF
669 		mac_exc_free_label(unset_label);
670 #endif
671 		return;
672 	}
673 
674 	old_sself = task->itk_settable_self;
675 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = new_kport;
676 	task->itk_self = new_pport;
677 
678 	if (task_is_a_corpse(task)) {
679 		/* No extra send right for coprse, needed to arm no-sender notification */
680 		task->itk_settable_self = IP_NULL;
681 	} else {
682 		/* we just made the port, no need to triple check */
683 		task->itk_settable_self = ipc_port_make_send_any(new_kport);
684 	}
685 
686 	/* clears ikol_alt_port */
687 	ipc_kobject_disable(old_kport, IKOT_TASK_CONTROL);
688 
689 	/* Reset the read and inspect flavors of task port */
690 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
691 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
692 
693 	if (old_pport != old_kport) {
694 		assert(task_is_immovable(task));
695 		ipc_kobject_disable(old_pport, IKOT_TASK_CONTROL);
696 	}
697 
698 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
699 		old_exc_actions[i] = IP_NULL;
700 
701 		if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
702 			continue;
703 		}
704 
705 		if (!task->exc_actions[i].privileged) {
706 #if CONFIG_MACF
707 			mac_exc_update_action_label(task->exc_actions + i, unset_label);
708 #endif
709 			old_exc_actions[i] = task->exc_actions[i].port;
710 			task->exc_actions[i].port = IP_NULL;
711 		}
712 	}/* for */
713 
714 	if (IP_VALID(task->itk_debug_control)) {
715 		ipc_port_release_send(task->itk_debug_control);
716 	}
717 	task->itk_debug_control = IP_NULL;
718 
719 	if (task->itk_dyld_notify) {
720 		notifiers_ptr = task->itk_dyld_notify;
721 		task->itk_dyld_notify = NULL;
722 	}
723 
724 	itk_unlock(task);
725 
726 #if CONFIG_MACF
727 	mac_exc_free_label(unset_label);
728 #endif
729 
730 	/* release the naked send rights */
731 
732 	if (IP_VALID(old_sself)) {
733 		ipc_port_release_send(old_sself);
734 	}
735 
736 	if (notifiers_ptr) {
737 		for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
738 			if (IP_VALID(notifiers_ptr[i])) {
739 				ipc_port_release_send(notifiers_ptr[i]);
740 			}
741 		}
742 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
743 	}
744 
745 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
746 		if (IP_VALID(old_exc_actions[i])) {
747 			ipc_port_release_send(old_exc_actions[i]);
748 		}
749 	}
750 
751 	/* destroy all task port flavors */
752 	if (old_rdport != IP_NULL) {
753 		/* read port ikol_alt_port may point to kport, dealloc first */
754 		ipc_kobject_dealloc_port(old_rdport, 0, IKOT_TASK_READ);
755 	}
756 	ipc_kobject_dealloc_port(old_kport, 0, IKOT_TASK_CONTROL);
757 	/* ikol_alt_port cleared */
758 
759 	if (old_iport != IP_NULL) {
760 		ipc_kobject_dealloc_port(old_iport, 0, IKOT_TASK_INSPECT);
761 	}
762 	if (old_pport != old_kport) {
763 		assert(task_is_immovable(task));
764 		ipc_kobject_dealloc_port(old_pport, 0, IKOT_TASK_CONTROL);
765 	}
766 }
767 
768 /*
769  *	Routine:	ipc_thread_init
770  *	Purpose:
771  *		Initialize a thread's IPC state.
772  *	Conditions:
773  *		Nothing locked.
774  */
775 
776 void
ipc_thread_init(task_t task,thread_t thread,thread_ro_t tro,ipc_thread_init_options_t options)777 ipc_thread_init(
778 	task_t          task,
779 	thread_t        thread,
780 	thread_ro_t     tro,
781 	ipc_thread_init_options_t options)
782 {
783 	ipc_port_t      kport;
784 	ipc_port_t      pport;
785 	ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
786 
787 	if (task_is_immovable(task) && !(options & IPC_THREAD_INIT_MAINTHREAD)) {
788 		/*
789 		 * pthreads and raw threads both have immovable port upon creation.
790 		 * pthreads are subsequently pinned via ipc_port_copyout_send_pinned() whereas
791 		 * raw threads are left unpinned.
792 		 */
793 		alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
794 
795 		pport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
796 		    IKOT_THREAD_CONTROL, alloc_options);
797 
798 		kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)thread,
799 		    IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD, IPC_KOBJECT_ALLOC_NONE);
800 		kport->ip_kolabel->ikol_alt_port = pport;
801 	} else {
802 		/*
803 		 * Main thread is created movable but may be set immovable and pinned in
804 		 * main_thread_set_immovable_pinned(). It needs to be handled separately
805 		 * because task_control_port_options is not available at main thread creation time.
806 		 */
807 		kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
808 		    IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
809 
810 		pport = kport;
811 	}
812 
813 	tro->tro_self_port = pport;
814 	/* we just made the port, no need to triple check */
815 	tro->tro_settable_self_port = ipc_port_make_send_any(kport);
816 	tro->tro_ports[THREAD_FLAVOR_CONTROL] = kport;
817 
818 	thread->ith_special_reply_port = NULL;
819 
820 #if IMPORTANCE_INHERITANCE
821 	thread->ith_assertions = 0;
822 #endif
823 
824 	thread->ipc_active = true;
825 	ipc_kmsg_queue_init(&thread->ith_messages);
826 
827 	thread->ith_kernel_reply_port = IP_NULL;
828 }
829 
830 void
ipc_main_thread_set_immovable_pinned(thread_t thread)831 ipc_main_thread_set_immovable_pinned(thread_t thread)
832 {
833 	thread_ro_t tro = get_thread_ro(thread);
834 	ipc_port_t kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
835 	task_t task = tro->tro_task;
836 	ipc_port_t new_pport;
837 
838 	assert(thread_get_tag(thread) & THREAD_TAG_MAINTHREAD);
839 
840 	/* pport is the same as kport at ipc_thread_init() time */
841 	assert(tro->tro_self_port == tro->tro_ports[THREAD_FLAVOR_CONTROL]);
842 	assert(tro->tro_self_port == tro->tro_settable_self_port);
843 
844 	/*
845 	 * Main thread port is immovable/pinned depending on whether owner task has
846 	 * immovable/pinned task control port. task_control_port_options is now set.
847 	 */
848 	if (task_is_immovable(task)) {
849 		ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
850 
851 		if (task_is_pinned(task)) {
852 			options |= IPC_KOBJECT_ALLOC_PINNED;
853 		}
854 
855 		new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_THREAD_CONTROL, options);
856 
857 		assert(kport != IP_NULL);
858 		ipc_port_set_label(kport, IPC_LABEL_SUBST_THREAD);
859 		kport->ip_kolabel->ikol_alt_port = new_pport;
860 
861 		thread_mtx_lock(thread);
862 		zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_self_port, &new_pport);
863 		thread_mtx_unlock(thread);
864 
865 		/* enable the pinned port */
866 		ipc_kobject_enable(new_pport, thread, IKOT_THREAD_CONTROL);
867 	}
868 }
869 
870 struct thread_init_exc_actions {
871 	struct exception_action array[EXC_TYPES_COUNT];
872 };
873 
874 static void
ipc_thread_init_exc_actions(thread_ro_t tro)875 ipc_thread_init_exc_actions(thread_ro_t tro)
876 {
877 	struct exception_action *actions;
878 
879 	actions = kalloc_type(struct thread_init_exc_actions,
880 	    Z_WAITOK | Z_ZERO | Z_NOFAIL)->array;
881 
882 #if CONFIG_MACF
883 	for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
884 		mac_exc_associate_action_label(&actions[i],
885 		    mac_exc_create_label(&actions[i]));
886 	}
887 #endif
888 
889 	zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions, &actions);
890 }
891 
892 static void
ipc_thread_destroy_exc_actions(thread_ro_t tro)893 ipc_thread_destroy_exc_actions(thread_ro_t tro)
894 {
895 	struct exception_action *actions = tro->tro_exc_actions;
896 
897 	if (actions) {
898 #if CONFIG_MACF
899 		for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
900 			mac_exc_free_action_label(actions + i);
901 		}
902 #endif
903 
904 		zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions);
905 		struct thread_init_exc_actions *tr_actions =
906 		    (struct thread_init_exc_actions *)actions;
907 		kfree_type(struct thread_init_exc_actions, tr_actions);
908 	}
909 }
910 
911 static void
ipc_thread_ro_update_ports(thread_ro_t tro,const struct thread_ro * tro_tpl)912 ipc_thread_ro_update_ports(
913 	thread_ro_t             tro,
914 	const struct thread_ro *tro_tpl)
915 {
916 	vm_size_t offs = offsetof(struct thread_ro, tro_self_port);
917 	vm_size_t size = sizeof(struct ipc_port *) * 2 + sizeof(tro_tpl->tro_ports);
918 
919 	static_assert(offsetof(struct thread_ro, tro_settable_self_port) ==
920 	    offsetof(struct thread_ro, tro_self_port) +
921 	    sizeof(struct ipc_port_t *));
922 	static_assert(offsetof(struct thread_ro, tro_ports) ==
923 	    offsetof(struct thread_ro, tro_self_port) +
924 	    2 * sizeof(struct ipc_port_t *));
925 	zalloc_ro_mut(ZONE_ID_THREAD_RO, tro,
926 	    offs, &tro_tpl->tro_self_port, size);
927 }
928 
929 /*
930  *	Routine:	ipc_thread_disable
931  *	Purpose:
932  *		Clean up and destroy a thread's IPC state.
933  *	Conditions:
934  *		Thread locked.
935  */
936 void
ipc_thread_disable(thread_t thread)937 ipc_thread_disable(
938 	thread_t        thread)
939 {
940 	thread_ro_t     tro = get_thread_ro(thread);
941 	ipc_port_t      kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
942 	ipc_port_t      iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
943 	ipc_port_t      rdport = tro->tro_ports[THREAD_FLAVOR_READ];
944 	ipc_port_t      pport = tro->tro_self_port;
945 
946 	/*
947 	 * This innocuous looking line is load bearing.
948 	 *
949 	 * It is used to disable the creation of lazy made ports.
950 	 * We must do so before we drop the last reference on the thread,
951 	 * as thread ports do not own a reference on the thread, and
952 	 * convert_port_to_thread* will crash trying to resurect a thread.
953 	 */
954 	thread->ipc_active = false;
955 
956 	if (kport != IP_NULL) {
957 		/* clears ikol_alt_port */
958 		ipc_kobject_disable(kport, IKOT_THREAD_CONTROL);
959 	}
960 
961 	if (iport != IP_NULL) {
962 		ipc_kobject_disable(iport, IKOT_THREAD_INSPECT);
963 	}
964 
965 	if (rdport != IP_NULL) {
966 		/* clears ikol_alt_port */
967 		ipc_kobject_disable(rdport, IKOT_THREAD_READ);
968 	}
969 
970 	if (pport != kport && pport != IP_NULL) {
971 		assert(task_is_immovable(tro->tro_task));
972 		assert(pport->ip_immovable_send);
973 		ipc_kobject_disable(pport, IKOT_THREAD_CONTROL);
974 	}
975 
976 	/* unbind the thread special reply port */
977 	if (IP_VALID(thread->ith_special_reply_port)) {
978 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
979 	}
980 }
981 
982 /*
983  *	Routine:	ipc_thread_terminate
984  *	Purpose:
985  *		Clean up and destroy a thread's IPC state.
986  *	Conditions:
987  *		Nothing locked.
988  */
989 
990 void
ipc_thread_terminate(thread_t thread)991 ipc_thread_terminate(
992 	thread_t        thread)
993 {
994 	thread_ro_t tro = get_thread_ro(thread);
995 	ipc_port_t kport = IP_NULL;
996 	ipc_port_t iport = IP_NULL;
997 	ipc_port_t rdport = IP_NULL;
998 	ipc_port_t pport = IP_NULL;
999 	ipc_port_t sport = IP_NULL;
1000 
1001 	thread_mtx_lock(thread);
1002 
1003 	/*
1004 	 * If we ever failed to clear ipc_active before the last reference
1005 	 * was dropped, lazy ports might be made and used after the last
1006 	 * reference is dropped and cause use after free (see comment in
1007 	 * ipc_thread_disable()).
1008 	 */
1009 	assert(!thread->ipc_active);
1010 
1011 	kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1012 	iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1013 	rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1014 	pport = tro->tro_self_port;
1015 	sport = tro->tro_settable_self_port;
1016 
1017 	if (kport != IP_NULL) {
1018 		if (IP_VALID(sport)) {
1019 			ipc_port_release_send(sport);
1020 		}
1021 
1022 		ipc_thread_ro_update_ports(tro, &(struct thread_ro){ });
1023 
1024 		if (tro->tro_exc_actions != NULL) {
1025 			for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1026 				if (IP_VALID(tro->tro_exc_actions[i].port)) {
1027 					ipc_port_release_send(tro->tro_exc_actions[i].port);
1028 				}
1029 			}
1030 			ipc_thread_destroy_exc_actions(tro);
1031 		}
1032 	}
1033 
1034 #if IMPORTANCE_INHERITANCE
1035 	assert(thread->ith_assertions == 0);
1036 #endif
1037 
1038 	assert(ipc_kmsg_queue_empty(&thread->ith_messages));
1039 	thread_mtx_unlock(thread);
1040 
1041 	/* clears read port ikol_alt_port, must be done first */
1042 	if (rdport != IP_NULL) {
1043 		ipc_kobject_dealloc_port(rdport, 0, IKOT_THREAD_READ);
1044 	}
1045 	/* control port can also have ikol_alt_port */
1046 	if (kport != IP_NULL) {
1047 		ipc_kobject_dealloc_port(kport, 0, IKOT_THREAD_CONTROL);
1048 	}
1049 	/* ikol_alt_port cleared */
1050 
1051 	if (iport != IP_NULL) {
1052 		ipc_kobject_dealloc_port(iport, 0, IKOT_THREAD_INSPECT);
1053 	}
1054 	if (pport != kport && pport != IP_NULL) {
1055 		assert(task_is_immovable(tro->tro_task));
1056 		ipc_kobject_dealloc_port(pport, 0, IKOT_THREAD_CONTROL);
1057 	}
1058 	if (thread->ith_kernel_reply_port != IP_NULL) {
1059 		thread_dealloc_kernel_special_reply_port(thread);
1060 	}
1061 }
1062 
1063 /*
1064  *	Routine:	ipc_thread_reset
1065  *	Purpose:
1066  *		Reset the IPC state for a given Mach thread when
1067  *		its task enters an elevated security context.
1068  *		All flavors of thread port and its exception ports have
1069  *		to be reset.  Its RPC reply port cannot have any
1070  *		rights outstanding, so it should be fine. The thread
1071  *		inspect and read port are set to NULL.
1072  *	Conditions:
1073  *		Nothing locked.
1074  */
1075 
1076 void
ipc_thread_reset(thread_t thread)1077 ipc_thread_reset(
1078 	thread_t        thread)
1079 {
1080 	thread_ro_t tro = get_thread_ro(thread);
1081 	ipc_port_t old_kport, new_kport, old_pport, new_pport;
1082 	ipc_port_t old_sself;
1083 	ipc_port_t old_rdport;
1084 	ipc_port_t old_iport;
1085 	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
1086 	boolean_t  has_old_exc_actions = FALSE;
1087 	boolean_t thread_is_immovable;
1088 	int i;
1089 
1090 #if CONFIG_MACF
1091 	struct label *new_label = mac_exc_create_label(NULL);
1092 #endif
1093 
1094 	thread_is_immovable = ip_is_immovable_send(tro->tro_self_port);
1095 
1096 	new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
1097 	    IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
1098 	/*
1099 	 * ipc_thread_reset() only happens during sugid or corpsify.
1100 	 *
1101 	 * (1) sugid happens early in exec_mach_imgact(), at which point the old thread
1102 	 * port is still movable/not pinned.
1103 	 * (2) corpse cannot execute more code so the notion of the immovable/pinned
1104 	 * thread port is bogus, and should appear as if it doesn't have one.
1105 	 *
1106 	 * So simply leave pport the same as kport.
1107 	 */
1108 	new_pport = new_kport;
1109 
1110 	thread_mtx_lock(thread);
1111 
1112 	old_kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1113 	old_rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1114 	old_iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1115 
1116 	old_sself = tro->tro_settable_self_port;
1117 	old_pport = tro->tro_self_port;
1118 
1119 	if (old_kport == IP_NULL && thread->inspection == FALSE) {
1120 		/* thread is already terminated (can this happen?) */
1121 		thread_mtx_unlock(thread);
1122 		ipc_kobject_dealloc_port(new_kport, 0, IKOT_THREAD_CONTROL);
1123 		if (thread_is_immovable) {
1124 			ipc_kobject_dealloc_port(new_pport, 0,
1125 			    IKOT_THREAD_CONTROL);
1126 		}
1127 #if CONFIG_MACF
1128 		mac_exc_free_label(new_label);
1129 #endif
1130 		return;
1131 	}
1132 
1133 	thread->ipc_active = true;
1134 
1135 	struct thread_ro tpl = {
1136 		.tro_self_port = new_pport,
1137 		/* we just made the port, no need to triple check */
1138 		.tro_settable_self_port = ipc_port_make_send_any(new_kport),
1139 		.tro_ports[THREAD_FLAVOR_CONTROL] = new_kport,
1140 	};
1141 
1142 	ipc_thread_ro_update_ports(tro, &tpl);
1143 
1144 	if (old_kport != IP_NULL) {
1145 		/* clears ikol_alt_port */
1146 		(void)ipc_kobject_disable(old_kport, IKOT_THREAD_CONTROL);
1147 	}
1148 	if (old_rdport != IP_NULL) {
1149 		/* clears ikol_alt_port */
1150 		(void)ipc_kobject_disable(old_rdport, IKOT_THREAD_READ);
1151 	}
1152 	if (old_iport != IP_NULL) {
1153 		(void)ipc_kobject_disable(old_iport, IKOT_THREAD_INSPECT);
1154 	}
1155 	if (thread_is_immovable && old_pport != IP_NULL) {
1156 		(void)ipc_kobject_disable(old_pport, IKOT_THREAD_CONTROL);
1157 	}
1158 
1159 	/*
1160 	 * Only ports that were set by root-owned processes
1161 	 * (privileged ports) should survive
1162 	 */
1163 	if (tro->tro_exc_actions != NULL) {
1164 		has_old_exc_actions = TRUE;
1165 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1166 			if (tro->tro_exc_actions[i].privileged) {
1167 				old_exc_actions[i] = IP_NULL;
1168 			} else {
1169 #if CONFIG_MACF
1170 				mac_exc_update_action_label(tro->tro_exc_actions + i, new_label);
1171 #endif
1172 				old_exc_actions[i] = tro->tro_exc_actions[i].port;
1173 				tro->tro_exc_actions[i].port = IP_NULL;
1174 			}
1175 		}
1176 	}
1177 
1178 	thread_mtx_unlock(thread);
1179 
1180 #if CONFIG_MACF
1181 	mac_exc_free_label(new_label);
1182 #endif
1183 
1184 	/* release the naked send rights */
1185 
1186 	if (IP_VALID(old_sself)) {
1187 		ipc_port_release_send(old_sself);
1188 	}
1189 
1190 	if (has_old_exc_actions) {
1191 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1192 			ipc_port_release_send(old_exc_actions[i]);
1193 		}
1194 	}
1195 
1196 	/* destroy the kernel ports */
1197 	if (old_rdport != IP_NULL) {
1198 		ipc_kobject_dealloc_port(old_rdport, 0, IKOT_THREAD_READ);
1199 	}
1200 	if (old_kport != IP_NULL) {
1201 		ipc_kobject_dealloc_port(old_kport, 0, IKOT_THREAD_CONTROL);
1202 	}
1203 	/* ikol_alt_port cleared */
1204 
1205 	if (old_iport != IP_NULL) {
1206 		ipc_kobject_dealloc_port(old_iport, 0, IKOT_THREAD_INSPECT);
1207 	}
1208 	if (old_pport != old_kport && old_pport != IP_NULL) {
1209 		assert(thread_is_immovable);
1210 		ipc_kobject_dealloc_port(old_pport, 0, IKOT_THREAD_CONTROL);
1211 	}
1212 
1213 	/* unbind the thread special reply port */
1214 	if (IP_VALID(thread->ith_special_reply_port)) {
1215 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1216 	}
1217 }
1218 
1219 /*
1220  *	Routine:	retrieve_task_self_fast
1221  *	Purpose:
1222  *		Optimized version of retrieve_task_self,
1223  *		that only works for the current task.
1224  *
1225  *		Return a send right (possibly null/dead)
1226  *		for the task's user-visible self port.
1227  *	Conditions:
1228  *		Nothing locked.
1229  */
1230 
1231 static ipc_port_t
retrieve_task_self_fast(task_t task)1232 retrieve_task_self_fast(
1233 	task_t          task)
1234 {
1235 	ipc_port_t port = IP_NULL;
1236 
1237 	assert(task == current_task());
1238 
1239 	itk_lock(task);
1240 	assert(task->itk_self != IP_NULL);
1241 
1242 #if CONFIG_CSR
1243 	if (task->itk_settable_self != task->itk_task_ports[TASK_FLAVOR_CONTROL]) {
1244 		port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1245 	} else
1246 #endif
1247 	{
1248 		/* no interposing, return the IMMOVABLE port */
1249 		port = ipc_kobject_make_send(task->itk_self, task,
1250 		    IKOT_TASK_CONTROL);
1251 #if (DEBUG || DEVELOPMENT)
1252 		if (task_is_immovable(task)) {
1253 			assert(ip_is_immovable_send(port));
1254 			if (task_is_pinned(task)) {
1255 				/* pinned port is also immovable */
1256 				assert(ip_is_pinned(port));
1257 			}
1258 		} else {
1259 			assert(!ip_is_immovable_send(port));
1260 			assert(!ip_is_pinned(port));
1261 		}
1262 #endif
1263 	}
1264 
1265 	itk_unlock(task);
1266 
1267 	return port;
1268 }
1269 
1270 /*
1271  *	Routine:	mach_task_is_self
1272  *	Purpose:
1273  *      [MIG call] Checks if the task (control/read/inspect/name/movable)
1274  *      port is pointing to current_task.
1275  */
1276 kern_return_t
mach_task_is_self(task_t task,boolean_t * is_self)1277 mach_task_is_self(
1278 	task_t         task,
1279 	boolean_t     *is_self)
1280 {
1281 	if (task == TASK_NULL) {
1282 		return KERN_INVALID_ARGUMENT;
1283 	}
1284 
1285 	*is_self = (task == current_task());
1286 
1287 	return KERN_SUCCESS;
1288 }
1289 
1290 /*
1291  *	Routine:	retrieve_thread_self_fast
1292  *	Purpose:
1293  *		Return a send right (possibly null/dead)
1294  *		for the thread's user-visible self port.
1295  *
1296  *		Only works for the current thread.
1297  *
1298  *	Conditions:
1299  *		Nothing locked.
1300  */
1301 
1302 ipc_port_t
retrieve_thread_self_fast(thread_t thread)1303 retrieve_thread_self_fast(
1304 	thread_t                thread)
1305 {
1306 	thread_ro_t tro = get_thread_ro(thread);
1307 	ipc_port_t port = IP_NULL;
1308 
1309 	assert(thread == current_thread());
1310 
1311 	thread_mtx_lock(thread);
1312 
1313 	assert(tro->tro_self_port != IP_NULL);
1314 
1315 #if CONFIG_CSR
1316 	if (tro->tro_settable_self_port != tro->tro_ports[THREAD_FLAVOR_CONTROL]) {
1317 		port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1318 	} else
1319 #endif
1320 	{
1321 		/* no interposing, return IMMOVABLE_PORT */
1322 		port = ipc_kobject_make_send(tro->tro_self_port, thread,
1323 		    IKOT_THREAD_CONTROL);
1324 #if (DEBUG || DEVELOPMENT)
1325 		if (task_is_immovable(tro->tro_task)) {
1326 			assert(ip_is_immovable_send(port));
1327 			uint16_t tag = thread_get_tag(thread);
1328 			/* terminated threads are unpinned */
1329 			if (thread->active && (tag & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD))) {
1330 				assert(ip_is_pinned(port));
1331 			} else {
1332 				assert(!ip_is_pinned(port));
1333 			}
1334 		} else {
1335 			assert(!ip_is_immovable_send(port));
1336 			assert(!ip_is_pinned(port));
1337 		}
1338 #endif
1339 	}
1340 
1341 	thread_mtx_unlock(thread);
1342 
1343 	return port;
1344 }
1345 
1346 /*
1347  *	Routine:	task_self_trap [mach trap]
1348  *	Purpose:
1349  *		Give the caller send rights for his own task port.
1350  *	Conditions:
1351  *		Nothing locked.
1352  *	Returns:
1353  *		MACH_PORT_NULL if there are any resource failures
1354  *		or other errors.
1355  */
1356 
1357 mach_port_name_t
task_self_trap(__unused struct task_self_trap_args * args)1358 task_self_trap(
1359 	__unused struct task_self_trap_args *args)
1360 {
1361 	task_t task = current_task();
1362 	ipc_port_t sright;
1363 	mach_port_name_t name;
1364 
1365 	sright = retrieve_task_self_fast(task);
1366 	name = ipc_port_copyout_send(sright, task->itk_space);
1367 
1368 	/*
1369 	 * When the right is pinned, memorize the name we gave it
1370 	 * in ip_receiver_name (it's an abuse as this port really
1371 	 * isn't a message queue, but the field is up for grabs
1372 	 * and otherwise `MACH_PORT_SPECIAL_DEFAULT` for special ports).
1373 	 *
1374 	 * port_name_to_task* use this to fastpath IPCs to mach_task_self()
1375 	 * when it is pinned.
1376 	 *
1377 	 * ipc_task_disable() will revert this when the task dies.
1378 	 */
1379 	if (sright == task->itk_self && sright->ip_pinned &&
1380 	    MACH_PORT_VALID(name)) {
1381 		itk_lock(task);
1382 		if (task->ipc_active) {
1383 			if (ip_get_receiver_name(sright) == MACH_PORT_SPECIAL_DEFAULT) {
1384 				sright->ip_receiver_name = name;
1385 			} else if (ip_get_receiver_name(sright) != name) {
1386 				panic("mach_task_self() name changed");
1387 			}
1388 		}
1389 		itk_unlock(task);
1390 	}
1391 	return name;
1392 }
1393 
1394 /*
1395  *	Routine:	thread_self_trap [mach trap]
1396  *	Purpose:
1397  *		Give the caller send rights for his own thread port.
1398  *	Conditions:
1399  *		Nothing locked.
1400  *	Returns:
1401  *		MACH_PORT_NULL if there are any resource failures
1402  *		or other errors.
1403  */
1404 
1405 mach_port_name_t
thread_self_trap(__unused struct thread_self_trap_args * args)1406 thread_self_trap(
1407 	__unused struct thread_self_trap_args *args)
1408 {
1409 	thread_t thread = current_thread();
1410 	ipc_space_t space = current_space();
1411 	ipc_port_t sright;
1412 	mach_port_name_t name;
1413 
1414 	sright = retrieve_thread_self_fast(thread);
1415 	name = ipc_port_copyout_send(sright, space);
1416 	return name;
1417 }
1418 
1419 /*
1420  *	Routine:	mach_reply_port [mach trap]
1421  *	Purpose:
1422  *		Allocate a port for the caller.
1423  *	Conditions:
1424  *		Nothing locked.
1425  *	Returns:
1426  *		MACH_PORT_NULL if there are any resource failures
1427  *		or other errors.
1428  */
1429 
1430 mach_port_name_t
mach_reply_port(__unused struct mach_reply_port_args * args)1431 mach_reply_port(
1432 	__unused struct mach_reply_port_args *args)
1433 {
1434 	ipc_port_t port;
1435 	mach_port_name_t name;
1436 	kern_return_t kr;
1437 
1438 	kr = ipc_port_alloc(current_task()->itk_space, IPC_PORT_INIT_MESSAGE_QUEUE,
1439 	    &name, &port);
1440 	if (kr == KERN_SUCCESS) {
1441 		ip_mq_unlock(port);
1442 	} else {
1443 		name = MACH_PORT_NULL;
1444 	}
1445 	return name;
1446 }
1447 
1448 /*
1449  *	Routine:	thread_get_special_reply_port [mach trap]
1450  *	Purpose:
1451  *		Allocate a special reply port for the calling thread.
1452  *	Conditions:
1453  *		Nothing locked.
1454  *	Returns:
1455  *		mach_port_name_t: send right & receive right for special reply port.
1456  *		MACH_PORT_NULL if there are any resource failures
1457  *		or other errors.
1458  */
1459 
1460 mach_port_name_t
thread_get_special_reply_port(__unused struct thread_get_special_reply_port_args * args)1461 thread_get_special_reply_port(
1462 	__unused struct thread_get_special_reply_port_args *args)
1463 {
1464 	ipc_port_t port;
1465 	mach_port_name_t name;
1466 	kern_return_t kr;
1467 	thread_t thread = current_thread();
1468 	ipc_port_init_flags_t flags = IPC_PORT_INIT_MESSAGE_QUEUE |
1469 	    IPC_PORT_INIT_MAKE_SEND_RIGHT | IPC_PORT_INIT_SPECIAL_REPLY;
1470 
1471 	/* unbind the thread special reply port */
1472 	if (IP_VALID(thread->ith_special_reply_port)) {
1473 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1474 	}
1475 
1476 	kr = ipc_port_alloc(current_task()->itk_space, flags, &name, &port);
1477 	if (kr == KERN_SUCCESS) {
1478 		ipc_port_bind_special_reply_port_locked(port, IRPT_USER);
1479 		ip_mq_unlock(port);
1480 	} else {
1481 		name = MACH_PORT_NULL;
1482 	}
1483 	return name;
1484 }
1485 
1486 /*
1487  *	Routine:	thread_get_kernel_special_reply_port
1488  *	Purpose:
1489  *		Allocate a kernel special reply port for the calling thread.
1490  *	Conditions:
1491  *		Nothing locked.
1492  *	Returns:
1493  *		Creates and sets kernel special reply port.
1494  *		KERN_SUCCESS on Success.
1495  *		KERN_FAILURE on Failure.
1496  */
1497 
1498 kern_return_t
thread_get_kernel_special_reply_port(void)1499 thread_get_kernel_special_reply_port(void)
1500 {
1501 	ipc_port_t port = IP_NULL;
1502 	thread_t thread = current_thread();
1503 
1504 	/* unbind the thread special reply port */
1505 	if (IP_VALID(thread->ith_kernel_reply_port)) {
1506 		ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1507 	}
1508 
1509 	port = ipc_port_alloc_reply(); /*returns a reference on the port */
1510 	if (port != IPC_PORT_NULL) {
1511 		ip_mq_lock(port);
1512 		ipc_port_bind_special_reply_port_locked(port, IRPT_KERNEL);
1513 		ip_mq_unlock(port);
1514 		ip_release(port); /* release the reference returned by ipc_port_alloc_reply */
1515 	}
1516 	return KERN_SUCCESS;
1517 }
1518 
1519 /*
1520  *	Routine:	ipc_port_bind_special_reply_port_locked
1521  *	Purpose:
1522  *		Bind the given port to current thread as a special reply port.
1523  *	Conditions:
1524  *		Port locked.
1525  *	Returns:
1526  *		None.
1527  */
1528 
1529 static void
ipc_port_bind_special_reply_port_locked(ipc_port_t port,ipc_reply_port_type_t reply_type)1530 ipc_port_bind_special_reply_port_locked(
1531 	ipc_port_t            port,
1532 	ipc_reply_port_type_t reply_type)
1533 {
1534 	thread_t thread = current_thread();
1535 	ipc_port_t *reply_portp;
1536 
1537 	if (reply_type == IRPT_USER) {
1538 		reply_portp = &thread->ith_special_reply_port;
1539 	} else {
1540 		reply_portp = &thread->ith_kernel_reply_port;
1541 	}
1542 
1543 	assert(*reply_portp == NULL);
1544 	assert(port->ip_specialreply);
1545 	assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1546 
1547 	ip_reference(port);
1548 	*reply_portp = port;
1549 	port->ip_messages.imq_srp_owner_thread = thread;
1550 
1551 	ipc_special_reply_port_bits_reset(port);
1552 }
1553 
1554 /*
1555  *	Routine:	ipc_port_unbind_special_reply_port
1556  *	Purpose:
1557  *		Unbind the thread's special reply port.
1558  *		If the special port has threads waiting on turnstile,
1559  *		update it's inheritor.
1560  *	Condition:
1561  *		Nothing locked.
1562  *	Returns:
1563  *		None.
1564  */
1565 static void
ipc_port_unbind_special_reply_port(thread_t thread,ipc_reply_port_type_t reply_type)1566 ipc_port_unbind_special_reply_port(
1567 	thread_t              thread,
1568 	ipc_reply_port_type_t reply_type)
1569 {
1570 	ipc_port_t *reply_portp;
1571 
1572 	if (reply_type == IRPT_USER) {
1573 		reply_portp = &thread->ith_special_reply_port;
1574 	} else {
1575 		reply_portp = &thread->ith_kernel_reply_port;
1576 	}
1577 
1578 	ipc_port_t special_reply_port = *reply_portp;
1579 
1580 	ip_mq_lock(special_reply_port);
1581 
1582 	*reply_portp = NULL;
1583 	ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
1584 	    IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
1585 	/* port unlocked */
1586 
1587 	/* Destroy the port if its kernel special reply, else just release a ref */
1588 	if (reply_type == IRPT_USER) {
1589 		ip_release(special_reply_port);
1590 	} else {
1591 		ipc_port_dealloc_reply(special_reply_port);
1592 	}
1593 	return;
1594 }
1595 
1596 /*
1597  *	Routine:	thread_dealloc_kernel_special_reply_port
1598  *	Purpose:
1599  *		Unbind the thread's kernel special reply port.
1600  *		If the special port has threads waiting on turnstile,
1601  *		update it's inheritor.
1602  *	Condition:
1603  *		Called on current thread or a terminated thread.
1604  *	Returns:
1605  *		None.
1606  */
1607 
1608 void
thread_dealloc_kernel_special_reply_port(thread_t thread)1609 thread_dealloc_kernel_special_reply_port(thread_t thread)
1610 {
1611 	ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1612 }
1613 
1614 /*
1615  *	Routine:	thread_get_special_port [kernel call]
1616  *	Purpose:
1617  *		Clones a send right for one of the thread's
1618  *		special ports.
1619  *	Conditions:
1620  *		Nothing locked.
1621  *	Returns:
1622  *		KERN_SUCCESS		Extracted a send right.
1623  *		KERN_INVALID_ARGUMENT	The thread is null.
1624  *		KERN_FAILURE		The thread is dead.
1625  *		KERN_INVALID_ARGUMENT	Invalid special port.
1626  */
1627 
1628 kern_return_t
1629 thread_get_special_port(
1630 	thread_inspect_t         thread,
1631 	int                      which,
1632 	ipc_port_t              *portp);
1633 
1634 static kern_return_t
thread_get_special_port_internal(thread_inspect_t thread,thread_ro_t tro,int which,ipc_port_t * portp,mach_thread_flavor_t flavor)1635 thread_get_special_port_internal(
1636 	thread_inspect_t         thread,
1637 	thread_ro_t              tro,
1638 	int                      which,
1639 	ipc_port_t              *portp,
1640 	mach_thread_flavor_t     flavor)
1641 {
1642 	kern_return_t      kr;
1643 	ipc_port_t port;
1644 
1645 	if ((kr = special_port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) {
1646 		return kr;
1647 	}
1648 
1649 	thread_mtx_lock(thread);
1650 	if (!thread->active) {
1651 		thread_mtx_unlock(thread);
1652 		return KERN_FAILURE;
1653 	}
1654 
1655 	switch (which) {
1656 	case THREAD_KERNEL_PORT:
1657 		port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1658 #if CONFIG_CSR
1659 		if (tro->tro_settable_self_port != port) {
1660 			port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1661 		} else
1662 #endif
1663 		{
1664 			port = ipc_kobject_copy_send(port, thread, IKOT_THREAD_CONTROL);
1665 		}
1666 		thread_mtx_unlock(thread);
1667 		break;
1668 
1669 	case THREAD_READ_PORT:
1670 	case THREAD_INSPECT_PORT:
1671 		thread_mtx_unlock(thread);
1672 		mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ?
1673 		    THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT;
1674 		/* convert_thread_to_port_with_flavor consumes a thread reference */
1675 		thread_reference(thread);
1676 		port = convert_thread_to_port_with_flavor(thread, tro, current_flavor);
1677 		break;
1678 
1679 	default:
1680 		thread_mtx_unlock(thread);
1681 		return KERN_INVALID_ARGUMENT;
1682 	}
1683 
1684 	*portp = port;
1685 	return KERN_SUCCESS;
1686 }
1687 
1688 kern_return_t
thread_get_special_port(thread_inspect_t thread,int which,ipc_port_t * portp)1689 thread_get_special_port(
1690 	thread_inspect_t         thread,
1691 	int                      which,
1692 	ipc_port_t              *portp)
1693 {
1694 	if (thread == THREAD_NULL) {
1695 		return KERN_INVALID_ARGUMENT;
1696 	}
1697 
1698 	return thread_get_special_port_internal(thread, get_thread_ro(thread),
1699 	           which, portp, THREAD_FLAVOR_CONTROL);
1700 }
1701 
1702 static ipc_port_t
thread_get_non_substituted_self(thread_t thread,thread_ro_t tro)1703 thread_get_non_substituted_self(thread_t thread, thread_ro_t tro)
1704 {
1705 	ipc_port_t port = IP_NULL;
1706 
1707 	thread_mtx_lock(thread);
1708 	port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1709 #if CONFIG_CSR
1710 	if (tro->tro_settable_self_port != port) {
1711 		port = ipc_port_make_send_mqueue(tro->tro_settable_self_port);
1712 	} else
1713 #endif
1714 	{
1715 		port = ipc_kobject_make_send(port, thread, IKOT_THREAD_CONTROL);
1716 	}
1717 	thread_mtx_unlock(thread);
1718 
1719 	/* takes ownership of the send right */
1720 	return ipc_kobject_alloc_subst_once(port);
1721 }
1722 
1723 kern_return_t
thread_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)1724 thread_get_special_port_from_user(
1725 	mach_port_t     port,
1726 	int             which,
1727 	ipc_port_t      *portp)
1728 {
1729 	thread_ro_t tro;
1730 	ipc_kobject_type_t kotype;
1731 	mach_thread_flavor_t flavor;
1732 	kern_return_t kr = KERN_SUCCESS;
1733 
1734 	thread_t thread = convert_port_to_thread_inspect_no_eval(port);
1735 
1736 	if (thread == THREAD_NULL) {
1737 		return KERN_INVALID_ARGUMENT;
1738 	}
1739 
1740 	tro = get_thread_ro(thread);
1741 	kotype = ip_kotype(port);
1742 
1743 	if (which == THREAD_KERNEL_PORT && tro->tro_task == current_task()) {
1744 #if CONFIG_MACF
1745 		/*
1746 		 * only check for threads belong to current_task,
1747 		 * because foreign thread ports are always movable
1748 		 */
1749 		if (mac_task_check_get_movable_control_port()) {
1750 			kr = KERN_DENIED;
1751 			goto out;
1752 		}
1753 #endif
1754 		if (kotype == IKOT_THREAD_CONTROL) {
1755 			*portp = thread_get_non_substituted_self(thread, tro);
1756 			goto out;
1757 		}
1758 	}
1759 
1760 	switch (kotype) {
1761 	case IKOT_THREAD_CONTROL:
1762 		flavor = THREAD_FLAVOR_CONTROL;
1763 		break;
1764 	case IKOT_THREAD_READ:
1765 		flavor = THREAD_FLAVOR_READ;
1766 		break;
1767 	case IKOT_THREAD_INSPECT:
1768 		flavor = THREAD_FLAVOR_INSPECT;
1769 		break;
1770 	default:
1771 		panic("strange kobject type");
1772 	}
1773 
1774 	kr = thread_get_special_port_internal(thread, tro, which, portp, flavor);
1775 out:
1776 	thread_deallocate(thread);
1777 	return kr;
1778 }
1779 
1780 static kern_return_t
special_port_allowed_with_thread_flavor(int which,mach_thread_flavor_t flavor)1781 special_port_allowed_with_thread_flavor(
1782 	int                  which,
1783 	mach_thread_flavor_t flavor)
1784 {
1785 	switch (flavor) {
1786 	case THREAD_FLAVOR_CONTROL:
1787 		return KERN_SUCCESS;
1788 
1789 	case THREAD_FLAVOR_READ:
1790 
1791 		switch (which) {
1792 		case THREAD_READ_PORT:
1793 		case THREAD_INSPECT_PORT:
1794 			return KERN_SUCCESS;
1795 		default:
1796 			return KERN_INVALID_CAPABILITY;
1797 		}
1798 
1799 	case THREAD_FLAVOR_INSPECT:
1800 
1801 		switch (which) {
1802 		case THREAD_INSPECT_PORT:
1803 			return KERN_SUCCESS;
1804 		default:
1805 			return KERN_INVALID_CAPABILITY;
1806 		}
1807 
1808 	default:
1809 		return KERN_INVALID_CAPABILITY;
1810 	}
1811 }
1812 
1813 /*
1814  *	Routine:	thread_set_special_port [kernel call]
1815  *	Purpose:
1816  *		Changes one of the thread's special ports,
1817  *		setting it to the supplied send right.
1818  *	Conditions:
1819  *		Nothing locked.  If successful, consumes
1820  *		the supplied send right.
1821  *	Returns:
1822  *		KERN_SUCCESS            Changed the special port.
1823  *		KERN_INVALID_ARGUMENT   The thread is null.
1824  *      KERN_INVALID_RIGHT      Port is marked as immovable.
1825  *		KERN_FAILURE            The thread is dead.
1826  *		KERN_INVALID_ARGUMENT   Invalid special port.
1827  *		KERN_NO_ACCESS          Restricted access to set port.
1828  */
1829 
1830 kern_return_t
thread_set_special_port(thread_t thread,int which,ipc_port_t port)1831 thread_set_special_port(
1832 	thread_t                thread,
1833 	int                     which,
1834 	ipc_port_t              port)
1835 {
1836 	kern_return_t   result = KERN_SUCCESS;
1837 	thread_ro_t     tro = NULL;
1838 	ipc_port_t      old = IP_NULL;
1839 
1840 	if (thread == THREAD_NULL) {
1841 		return KERN_INVALID_ARGUMENT;
1842 	}
1843 
1844 	if (IP_VALID(port) && port->ip_immovable_send) {
1845 		return KERN_INVALID_RIGHT;
1846 	}
1847 
1848 	switch (which) {
1849 	case THREAD_KERNEL_PORT:
1850 #if CONFIG_CSR
1851 		if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
1852 			/*
1853 			 * Only allow setting of thread-self
1854 			 * special port from user-space when SIP is
1855 			 * disabled (for Mach-on-Mach emulation).
1856 			 */
1857 			tro = get_thread_ro(thread);
1858 
1859 			thread_mtx_lock(thread);
1860 			if (thread->active) {
1861 				old = tro->tro_settable_self_port;
1862 				zalloc_ro_update_field(ZONE_ID_THREAD_RO,
1863 				    tro, tro_settable_self_port, &port);
1864 			} else {
1865 				result = KERN_FAILURE;
1866 			}
1867 			thread_mtx_unlock(thread);
1868 
1869 			if (IP_VALID(old)) {
1870 				ipc_port_release_send(old);
1871 			}
1872 
1873 			return result;
1874 		}
1875 #else
1876 		(void)old;
1877 		(void)result;
1878 		(void)tro;
1879 #endif
1880 		return KERN_NO_ACCESS;
1881 
1882 	default:
1883 		return KERN_INVALID_ARGUMENT;
1884 	}
1885 }
1886 
1887 /*
1888  *	Routine:	task_get_special_port [kernel call]
1889  *	Purpose:
1890  *		Clones a send right for one of the task's
1891  *		special ports.
1892  *	Conditions:
1893  *		Nothing locked.
1894  *	Returns:
1895  *		KERN_SUCCESS		    Extracted a send right.
1896  *		KERN_INVALID_ARGUMENT	The task is null.
1897  *		KERN_FAILURE		    The task/space is dead.
1898  *		KERN_INVALID_ARGUMENT	Invalid special port.
1899  */
1900 
1901 static kern_return_t
task_get_special_port_internal(task_t task,int which,ipc_port_t * portp,mach_task_flavor_t flavor)1902 task_get_special_port_internal(
1903 	task_t          task,
1904 	int             which,
1905 	ipc_port_t      *portp,
1906 	mach_task_flavor_t        flavor)
1907 {
1908 	kern_return_t kr;
1909 	ipc_port_t port;
1910 
1911 	if (task == TASK_NULL) {
1912 		return KERN_INVALID_ARGUMENT;
1913 	}
1914 
1915 	if ((kr = special_port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) {
1916 		return kr;
1917 	}
1918 
1919 	itk_lock(task);
1920 	if (!task->ipc_active) {
1921 		itk_unlock(task);
1922 		return KERN_FAILURE;
1923 	}
1924 
1925 	switch (which) {
1926 	case TASK_KERNEL_PORT:
1927 		port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
1928 #if CONFIG_CSR
1929 		if (task->itk_settable_self != port) {
1930 			port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1931 		} else
1932 #endif
1933 		{
1934 			port = ipc_kobject_copy_send(port, task, IKOT_TASK_CONTROL);
1935 		}
1936 		itk_unlock(task);
1937 		break;
1938 
1939 	case TASK_READ_PORT:
1940 	case TASK_INSPECT_PORT:
1941 		itk_unlock(task);
1942 		mach_task_flavor_t current_flavor = (which == TASK_READ_PORT) ?
1943 		    TASK_FLAVOR_READ : TASK_FLAVOR_INSPECT;
1944 		/* convert_task_to_port_with_flavor consumes a task reference */
1945 		task_reference(task);
1946 		port = convert_task_to_port_with_flavor(task, current_flavor, TASK_GRP_KERNEL);
1947 		break;
1948 
1949 	case TASK_NAME_PORT:
1950 		port = ipc_kobject_make_send(task->itk_task_ports[TASK_FLAVOR_NAME],
1951 		    task, IKOT_TASK_NAME);
1952 		itk_unlock(task);
1953 		break;
1954 
1955 	case TASK_HOST_PORT:
1956 		port = host_port_copy_send(task->itk_host);
1957 		itk_unlock(task);
1958 		break;
1959 
1960 	case TASK_BOOTSTRAP_PORT:
1961 		port = ipc_port_copy_send_mqueue(task->itk_bootstrap);
1962 		itk_unlock(task);
1963 		break;
1964 
1965 	case TASK_ACCESS_PORT:
1966 		port = ipc_port_copy_send_mqueue(task->itk_task_access);
1967 		itk_unlock(task);
1968 		break;
1969 
1970 	case TASK_DEBUG_CONTROL_PORT:
1971 		port = ipc_port_copy_send_mqueue(task->itk_debug_control);
1972 		itk_unlock(task);
1973 		break;
1974 
1975 #if CONFIG_PROC_RESOURCE_LIMITS
1976 	case TASK_RESOURCE_NOTIFY_PORT:
1977 		port = ipc_port_copy_send_mqueue(task->itk_resource_notify);
1978 		itk_unlock(task);
1979 		break;
1980 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1981 
1982 	default:
1983 		itk_unlock(task);
1984 		return KERN_INVALID_ARGUMENT;
1985 	}
1986 
1987 	*portp = port;
1988 	return KERN_SUCCESS;
1989 }
1990 
1991 /* Kernel/Kext call only and skips MACF checks. MIG uses task_get_special_port_from_user(). */
1992 kern_return_t
task_get_special_port(task_t task,int which,ipc_port_t * portp)1993 task_get_special_port(
1994 	task_t          task,
1995 	int             which,
1996 	ipc_port_t      *portp)
1997 {
1998 	return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL);
1999 }
2000 
2001 static ipc_port_t
task_get_non_substituted_self(task_t task)2002 task_get_non_substituted_self(task_t task)
2003 {
2004 	ipc_port_t port = IP_NULL;
2005 
2006 	itk_lock(task);
2007 	port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
2008 #if CONFIG_CSR
2009 	if (task->itk_settable_self != port) {
2010 		port = ipc_port_make_send_mqueue(task->itk_settable_self);
2011 	} else
2012 #endif
2013 	{
2014 		port = ipc_kobject_make_send(port, task, IKOT_TASK_CONTROL);
2015 	}
2016 	itk_unlock(task);
2017 
2018 	/* takes ownership of the send right */
2019 	return ipc_kobject_alloc_subst_once(port);
2020 }
2021 
2022 /* MIG call only. Kernel/Kext uses task_get_special_port() */
2023 kern_return_t
task_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)2024 task_get_special_port_from_user(
2025 	mach_port_t     port,
2026 	int             which,
2027 	ipc_port_t      *portp)
2028 {
2029 	ipc_kobject_type_t kotype;
2030 	mach_task_flavor_t flavor;
2031 	kern_return_t kr = KERN_SUCCESS;
2032 
2033 	task_t task = convert_port_to_task_inspect_no_eval(port);
2034 
2035 	if (task == TASK_NULL) {
2036 		return KERN_INVALID_ARGUMENT;
2037 	}
2038 
2039 	kotype = ip_kotype(port);
2040 
2041 #if CONFIG_MACF
2042 	if (mac_task_check_get_task_special_port(current_task(), task, which)) {
2043 		kr = KERN_DENIED;
2044 		goto out;
2045 	}
2046 #endif
2047 
2048 	if (which == TASK_KERNEL_PORT && task == current_task()) {
2049 #if CONFIG_MACF
2050 		/*
2051 		 * only check for current_task,
2052 		 * because foreign task ports are always movable
2053 		 */
2054 		if (mac_task_check_get_movable_control_port()) {
2055 			kr = KERN_DENIED;
2056 			goto out;
2057 		}
2058 #endif
2059 		if (kotype == IKOT_TASK_CONTROL) {
2060 			*portp = task_get_non_substituted_self(task);
2061 			goto out;
2062 		}
2063 	}
2064 
2065 	switch (kotype) {
2066 	case IKOT_TASK_CONTROL:
2067 		flavor = TASK_FLAVOR_CONTROL;
2068 		break;
2069 	case IKOT_TASK_READ:
2070 		flavor = TASK_FLAVOR_READ;
2071 		break;
2072 	case IKOT_TASK_INSPECT:
2073 		flavor = TASK_FLAVOR_INSPECT;
2074 		break;
2075 	default:
2076 		panic("strange kobject type");
2077 	}
2078 
2079 	kr = task_get_special_port_internal(task, which, portp, flavor);
2080 out:
2081 	task_deallocate(task);
2082 	return kr;
2083 }
2084 
2085 static kern_return_t
special_port_allowed_with_task_flavor(int which,mach_task_flavor_t flavor)2086 special_port_allowed_with_task_flavor(
2087 	int                which,
2088 	mach_task_flavor_t flavor)
2089 {
2090 	switch (flavor) {
2091 	case TASK_FLAVOR_CONTROL:
2092 		return KERN_SUCCESS;
2093 
2094 	case TASK_FLAVOR_READ:
2095 
2096 		switch (which) {
2097 		case TASK_READ_PORT:
2098 		case TASK_INSPECT_PORT:
2099 		case TASK_NAME_PORT:
2100 			return KERN_SUCCESS;
2101 		default:
2102 			return KERN_INVALID_CAPABILITY;
2103 		}
2104 
2105 	case TASK_FLAVOR_INSPECT:
2106 
2107 		switch (which) {
2108 		case TASK_INSPECT_PORT:
2109 		case TASK_NAME_PORT:
2110 			return KERN_SUCCESS;
2111 		default:
2112 			return KERN_INVALID_CAPABILITY;
2113 		}
2114 
2115 	default:
2116 		return KERN_INVALID_CAPABILITY;
2117 	}
2118 }
2119 
2120 /*
2121  *	Routine:	task_set_special_port [MIG call]
2122  *	Purpose:
2123  *		Changes one of the task's special ports,
2124  *		setting it to the supplied send right.
2125  *	Conditions:
2126  *		Nothing locked.  If successful, consumes
2127  *		the supplied send right.
2128  *	Returns:
2129  *		KERN_SUCCESS		    Changed the special port.
2130  *		KERN_INVALID_ARGUMENT	The task is null.
2131  *      KERN_INVALID_RIGHT      Port is marked as immovable.
2132  *		KERN_FAILURE		    The task/space is dead.
2133  *		KERN_INVALID_ARGUMENT	Invalid special port.
2134  *      KERN_NO_ACCESS		    Restricted access to set port.
2135  */
2136 
2137 kern_return_t
task_set_special_port_from_user(task_t task,int which,ipc_port_t port)2138 task_set_special_port_from_user(
2139 	task_t          task,
2140 	int             which,
2141 	ipc_port_t      port)
2142 {
2143 	if (task == TASK_NULL) {
2144 		return KERN_INVALID_ARGUMENT;
2145 	}
2146 
2147 #if CONFIG_MACF
2148 	if (mac_task_check_set_task_special_port(current_task(), task, which, port)) {
2149 		return KERN_DENIED;
2150 	}
2151 #endif
2152 
2153 	return task_set_special_port(task, which, port);
2154 }
2155 
2156 /* Kernel call only. MIG uses task_set_special_port_from_user() */
2157 kern_return_t
task_set_special_port(task_t task,int which,ipc_port_t port)2158 task_set_special_port(
2159 	task_t          task,
2160 	int             which,
2161 	ipc_port_t      port)
2162 {
2163 	if (task == TASK_NULL) {
2164 		return KERN_INVALID_ARGUMENT;
2165 	}
2166 
2167 	if (task_is_driver(current_task())) {
2168 		return KERN_NO_ACCESS;
2169 	}
2170 
2171 	if (IP_VALID(port) && port->ip_immovable_send) {
2172 		return KERN_INVALID_RIGHT;
2173 	}
2174 
2175 	switch (which) {
2176 	case TASK_KERNEL_PORT:
2177 	case TASK_HOST_PORT:
2178 #if CONFIG_CSR
2179 		if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
2180 			/*
2181 			 * Only allow setting of task-self / task-host
2182 			 * special ports from user-space when SIP is
2183 			 * disabled (for Mach-on-Mach emulation).
2184 			 */
2185 			break;
2186 		}
2187 #endif
2188 		return KERN_NO_ACCESS;
2189 	default:
2190 		break;
2191 	}
2192 
2193 	return task_set_special_port_internal(task, which, port);
2194 }
2195 
2196 /*
2197  *	Routine:	task_set_special_port_internal
2198  *	Purpose:
2199  *		Changes one of the task's special ports,
2200  *		setting it to the supplied send right.
2201  *	Conditions:
2202  *		Nothing locked.  If successful, consumes
2203  *		the supplied send right.
2204  *	Returns:
2205  *		KERN_SUCCESS		Changed the special port.
2206  *		KERN_INVALID_ARGUMENT	The task is null.
2207  *		KERN_FAILURE		The task/space is dead.
2208  *		KERN_INVALID_ARGUMENT	Invalid special port.
2209  *      KERN_NO_ACCESS		Restricted access to overwrite port.
2210  */
2211 
2212 kern_return_t
task_set_special_port_internal(task_t task,int which,ipc_port_t port)2213 task_set_special_port_internal(
2214 	task_t          task,
2215 	int             which,
2216 	ipc_port_t      port)
2217 {
2218 	ipc_port_t old = IP_NULL;
2219 	kern_return_t rc = KERN_INVALID_ARGUMENT;
2220 
2221 	if (task == TASK_NULL) {
2222 		goto out;
2223 	}
2224 
2225 	itk_lock(task);
2226 	/*
2227 	 * Allow setting special port during the span of ipc_task_init() to
2228 	 * ipc_task_terminate(). posix_spawn() port actions can set special
2229 	 * ports on target task _before_ task IPC access is enabled.
2230 	 */
2231 	if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
2232 		rc = KERN_FAILURE;
2233 		goto out_unlock;
2234 	}
2235 
2236 	switch (which) {
2237 	case TASK_KERNEL_PORT:
2238 		old = task->itk_settable_self;
2239 		task->itk_settable_self = port;
2240 		break;
2241 
2242 	case TASK_HOST_PORT:
2243 		old = task->itk_host;
2244 		task->itk_host = port;
2245 		break;
2246 
2247 	case TASK_BOOTSTRAP_PORT:
2248 		old = task->itk_bootstrap;
2249 		task->itk_bootstrap = port;
2250 		break;
2251 
2252 	/* Never allow overwrite of the task access port */
2253 	case TASK_ACCESS_PORT:
2254 		if (IP_VALID(task->itk_task_access)) {
2255 			rc = KERN_NO_ACCESS;
2256 			goto out_unlock;
2257 		}
2258 		task->itk_task_access = port;
2259 		break;
2260 
2261 	case TASK_DEBUG_CONTROL_PORT:
2262 		old = task->itk_debug_control;
2263 		task->itk_debug_control = port;
2264 		break;
2265 
2266 #if CONFIG_PROC_RESOURCE_LIMITS
2267 	case TASK_RESOURCE_NOTIFY_PORT:
2268 		old = task->itk_resource_notify;
2269 		task->itk_resource_notify = port;
2270 		break;
2271 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
2272 
2273 	default:
2274 		rc = KERN_INVALID_ARGUMENT;
2275 		goto out_unlock;
2276 	}/* switch */
2277 
2278 	rc = KERN_SUCCESS;
2279 
2280 out_unlock:
2281 	itk_unlock(task);
2282 
2283 	if (IP_VALID(old)) {
2284 		ipc_port_release_send(old);
2285 	}
2286 out:
2287 	return rc;
2288 }
2289 /*
2290  *	Routine:	mach_ports_register [kernel call]
2291  *	Purpose:
2292  *		Stash a handful of port send rights in the task.
2293  *		Child tasks will inherit these rights, but they
2294  *		must use mach_ports_lookup to acquire them.
2295  *
2296  *		The rights are supplied in a (wired) kalloc'd segment.
2297  *		Rights which aren't supplied are assumed to be null.
2298  *	Conditions:
2299  *		Nothing locked.  If successful, consumes
2300  *		the supplied rights and memory.
2301  *	Returns:
2302  *		KERN_SUCCESS		    Stashed the port rights.
2303  *      KERN_INVALID_RIGHT      Port in array is marked immovable.
2304  *		KERN_INVALID_ARGUMENT	The task is null.
2305  *		KERN_INVALID_ARGUMENT	The task is dead.
2306  *		KERN_INVALID_ARGUMENT	The memory param is null.
2307  *		KERN_INVALID_ARGUMENT	Too many port rights supplied.
2308  */
2309 
2310 kern_return_t
_kernelrpc_mach_ports_register3(task_t task,mach_port_t port1,mach_port_t port2,mach_port_t port3)2311 _kernelrpc_mach_ports_register3(
2312 	task_t                  task,
2313 	mach_port_t             port1,
2314 	mach_port_t             port2,
2315 	mach_port_t             port3)
2316 {
2317 	ipc_port_t ports[TASK_PORT_REGISTER_MAX] = {
2318 		port1, port2, port3,
2319 	};
2320 
2321 	if (task == TASK_NULL) {
2322 		return KERN_INVALID_ARGUMENT;
2323 	}
2324 
2325 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2326 		if (IP_VALID(ports[i]) && ports[i]->ip_immovable_send) {
2327 			return KERN_INVALID_RIGHT;
2328 		}
2329 	}
2330 
2331 	itk_lock(task);
2332 	if (!task->ipc_active) {
2333 		itk_unlock(task);
2334 		return KERN_INVALID_ARGUMENT;
2335 	}
2336 
2337 	/*
2338 	 *	Replace the old send rights with the new.
2339 	 *	Release the old rights after unlocking.
2340 	 */
2341 
2342 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2343 		ipc_port_t old;
2344 
2345 		old = task->itk_registered[i];
2346 		task->itk_registered[i] = ports[i];
2347 		ports[i] = old;
2348 	}
2349 
2350 	itk_unlock(task);
2351 
2352 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2353 		ipc_port_release_send(ports[i]);
2354 	}
2355 
2356 	return KERN_SUCCESS;
2357 }
2358 
2359 /*
2360  *	Routine:	mach_ports_lookup [kernel call]
2361  *	Purpose:
2362  *		Retrieves (clones) the stashed port send rights.
2363  *	Conditions:
2364  *		Nothing locked.  If successful, the caller gets
2365  *		rights and memory.
2366  *	Returns:
2367  *		KERN_SUCCESS		Retrieved the send rights.
2368  *		KERN_INVALID_ARGUMENT	The task is null.
2369  *		KERN_INVALID_ARGUMENT	The task is dead.
2370  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
2371  */
2372 
2373 kern_return_t
_kernelrpc_mach_ports_lookup3(task_t task,ipc_port_t * port1,ipc_port_t * port2,ipc_port_t * port3)2374 _kernelrpc_mach_ports_lookup3(
2375 	task_t                  task,
2376 	ipc_port_t             *port1,
2377 	ipc_port_t             *port2,
2378 	ipc_port_t             *port3)
2379 {
2380 	if (task == TASK_NULL) {
2381 		return KERN_INVALID_ARGUMENT;
2382 	}
2383 
2384 	itk_lock(task);
2385 	if (!task->ipc_active) {
2386 		itk_unlock(task);
2387 		return KERN_INVALID_ARGUMENT;
2388 	}
2389 
2390 	*port1 = ipc_port_copy_send_any(task->itk_registered[0]);
2391 	*port2 = ipc_port_copy_send_any(task->itk_registered[1]);
2392 	*port3 = ipc_port_copy_send_any(task->itk_registered[2]);
2393 
2394 	itk_unlock(task);
2395 
2396 	return KERN_SUCCESS;
2397 }
2398 
2399 static kern_return_t
task_conversion_eval_internal(task_t caller,task_t victim,boolean_t out_trans,int flavor)2400 task_conversion_eval_internal(
2401 	task_t             caller,
2402 	task_t             victim,
2403 	boolean_t          out_trans,
2404 	int                flavor) /* control or read */
2405 {
2406 	boolean_t allow_kern_task_out_trans;
2407 	boolean_t allow_kern_task;
2408 
2409 	assert(flavor == TASK_FLAVOR_CONTROL || flavor == TASK_FLAVOR_READ);
2410 	assert(flavor == THREAD_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_READ);
2411 
2412 #if defined(SECURE_KERNEL)
2413 	/*
2414 	 * On secure kernel platforms, reject converting kernel task/threads to port
2415 	 * and sending it to user space.
2416 	 */
2417 	allow_kern_task_out_trans = FALSE;
2418 #else
2419 	allow_kern_task_out_trans = TRUE;
2420 #endif
2421 
2422 	allow_kern_task = out_trans && allow_kern_task_out_trans;
2423 
2424 	if (victim == TASK_NULL) {
2425 		return KERN_INVALID_SECURITY;
2426 	}
2427 
2428 	task_require(victim);
2429 
2430 	/*
2431 	 * If Developer Mode is not enabled, deny attempts to translate foreign task's
2432 	 * control port completely. Read port or corpse is okay.
2433 	 */
2434 	if (!developer_mode_state()) {
2435 		if ((caller != victim) &&
2436 		    (flavor == TASK_FLAVOR_CONTROL) && !task_is_a_corpse(victim)) {
2437 #if XNU_TARGET_OS_OSX
2438 			return KERN_INVALID_SECURITY;
2439 #else
2440 			/*
2441 			 * All control ports are immovable.
2442 			 * Return an error for outtrans, but panic on intrans.
2443 			 */
2444 			if (out_trans) {
2445 				return KERN_INVALID_SECURITY;
2446 			} else {
2447 				panic("Just like pineapple on pizza, this task/thread port doesn't belong here.");
2448 			}
2449 #endif /* XNU_TARGET_OS_OSX */
2450 		}
2451 	}
2452 
2453 	/*
2454 	 * Tasks are allowed to resolve their own task ports, and the kernel is
2455 	 * allowed to resolve anyone's task port (subject to Developer Mode check).
2456 	 */
2457 	if (caller == kernel_task) {
2458 		return KERN_SUCCESS;
2459 	}
2460 
2461 	if (caller == victim) {
2462 		return KERN_SUCCESS;
2463 	}
2464 
2465 	/*
2466 	 * Only the kernel can resolve the kernel's task port. We've established
2467 	 * by this point that the caller is not kernel_task.
2468 	 */
2469 	if (victim == kernel_task && !allow_kern_task) {
2470 		return KERN_INVALID_SECURITY;
2471 	}
2472 
2473 #if !defined(XNU_TARGET_OS_OSX)
2474 	/*
2475 	 * On platforms other than macOS, only a platform binary can resolve the task port
2476 	 * of another platform binary.
2477 	 */
2478 	if (task_get_platform_binary(victim) && !task_get_platform_binary(caller)) {
2479 #if SECURE_KERNEL
2480 		return KERN_INVALID_SECURITY;
2481 #else
2482 		if (cs_relax_platform_task_ports) {
2483 			return KERN_SUCCESS;
2484 		} else {
2485 			return KERN_INVALID_SECURITY;
2486 		}
2487 #endif /* SECURE_KERNEL */
2488 	}
2489 #endif /* !defined(XNU_TARGET_OS_OSX) */
2490 
2491 	return KERN_SUCCESS;
2492 }
2493 
2494 kern_return_t
task_conversion_eval(task_t caller,task_t victim,int flavor)2495 task_conversion_eval(task_t caller, task_t victim, int flavor)
2496 {
2497 	/* flavor is mach_task_flavor_t or mach_thread_flavor_t */
2498 	static_assert(TASK_FLAVOR_CONTROL == THREAD_FLAVOR_CONTROL);
2499 	static_assert(TASK_FLAVOR_READ == THREAD_FLAVOR_READ);
2500 	return task_conversion_eval_internal(caller, victim, FALSE, flavor);
2501 }
2502 
2503 static kern_return_t
task_conversion_eval_out_trans(task_t caller,task_t victim,int flavor)2504 task_conversion_eval_out_trans(task_t caller, task_t victim, int flavor)
2505 {
2506 	assert(flavor == TASK_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_CONTROL);
2507 	return task_conversion_eval_internal(caller, victim, TRUE, flavor);
2508 }
2509 
2510 /*
2511  *	Routine:	task_port_kotype_valid_for_flavor
2512  *	Purpose:
2513  *		Check whether the kobject type of a mach port
2514  *      is valid for conversion to a task of given flavor.
2515  */
2516 static boolean_t
task_port_kotype_valid_for_flavor(natural_t kotype,mach_task_flavor_t flavor)2517 task_port_kotype_valid_for_flavor(
2518 	natural_t          kotype,
2519 	mach_task_flavor_t flavor)
2520 {
2521 	switch (flavor) {
2522 	/* Ascending capability */
2523 	case TASK_FLAVOR_NAME:
2524 		if (kotype == IKOT_TASK_NAME) {
2525 			return TRUE;
2526 		}
2527 		OS_FALLTHROUGH;
2528 	case TASK_FLAVOR_INSPECT:
2529 		if (kotype == IKOT_TASK_INSPECT) {
2530 			return TRUE;
2531 		}
2532 		OS_FALLTHROUGH;
2533 	case TASK_FLAVOR_READ:
2534 		if (kotype == IKOT_TASK_READ) {
2535 			return TRUE;
2536 		}
2537 		OS_FALLTHROUGH;
2538 	case TASK_FLAVOR_CONTROL:
2539 		if (kotype == IKOT_TASK_CONTROL) {
2540 			return TRUE;
2541 		}
2542 		break;
2543 	default:
2544 		panic("strange task flavor");
2545 	}
2546 
2547 	return FALSE;
2548 }
2549 
2550 /*
2551  *	Routine: convert_port_to_task_with_flavor_locked_noref
2552  *	Purpose:
2553  *		Internal helper routine to convert from a locked port to a task.
2554  *	Args:
2555  *		port   - target port
2556  *		flavor - requested task port flavor
2557  *		options - port translation options
2558  *	Conditions:
2559  *		Port is locked and active.
2560  */
2561 static task_t
convert_port_to_task_with_flavor_locked_noref(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2562 convert_port_to_task_with_flavor_locked_noref(
2563 	ipc_port_t              port,
2564 	mach_task_flavor_t      flavor,
2565 	port_intrans_options_t  options)
2566 {
2567 	ipc_kobject_type_t type = ip_kotype(port);
2568 	task_t task;
2569 
2570 	ip_mq_lock_held(port);
2571 	require_ip_active(port);
2572 
2573 	if (!task_port_kotype_valid_for_flavor(type, flavor)) {
2574 		return TASK_NULL;
2575 	}
2576 
2577 	task = ipc_kobject_get_locked(port, type);
2578 	if (task == TASK_NULL) {
2579 		return TASK_NULL;
2580 	}
2581 
2582 	if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
2583 		assert(flavor == TASK_FLAVOR_CONTROL);
2584 		return TASK_NULL;
2585 	}
2586 
2587 	/* TODO: rdar://42389187 */
2588 	if (flavor == TASK_FLAVOR_NAME || flavor == TASK_FLAVOR_INSPECT) {
2589 		assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
2590 	}
2591 
2592 	if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
2593 	    task_conversion_eval(current_task(), task, flavor)) {
2594 		return TASK_NULL;
2595 	}
2596 
2597 	return task;
2598 }
2599 
2600 /*
2601  *	Routine: convert_port_to_task_with_flavor_locked
2602  *	Purpose:
2603  *		Internal helper routine to convert from a locked port to a task.
2604  *	Args:
2605  *		port   - target port
2606  *		flavor - requested task port flavor
2607  *		options - port translation options
2608  *		grp    - task reference group
2609  *	Conditions:
2610  *		Port is locked and active.
2611  *		Produces task ref or TASK_NULL.
2612  */
2613 static task_t
convert_port_to_task_with_flavor_locked(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2614 convert_port_to_task_with_flavor_locked(
2615 	ipc_port_t              port,
2616 	mach_task_flavor_t      flavor,
2617 	port_intrans_options_t  options,
2618 	task_grp_t              grp)
2619 {
2620 	task_t task;
2621 
2622 	task = convert_port_to_task_with_flavor_locked_noref(port, flavor,
2623 	    options);
2624 
2625 	if (task != TASK_NULL) {
2626 		task_reference_grp(task, grp);
2627 	}
2628 
2629 	return task;
2630 }
2631 
2632 /*
2633  *	Routine:	convert_port_to_task_with_flavor
2634  *	Purpose:
2635  *		Internal helper for converting from a port to a task.
2636  *		Doesn't consume the port ref; produces a task ref,
2637  *		which may be null.
2638  *	Args:
2639  *		port   - target port
2640  *		flavor - requested task port flavor
2641  *		options - port translation options
2642  *		grp    - task reference group
2643  *	Conditions:
2644  *		Nothing locked.
2645  */
2646 static task_t
convert_port_to_task_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2647 convert_port_to_task_with_flavor(
2648 	ipc_port_t         port,
2649 	mach_task_flavor_t flavor,
2650 	port_intrans_options_t options,
2651 	task_grp_t         grp)
2652 {
2653 	task_t task = TASK_NULL;
2654 	task_t self = current_task();
2655 
2656 	if (IP_VALID(port)) {
2657 		if (port == self->itk_self) {
2658 			task_reference_grp(self, grp);
2659 			return self;
2660 		}
2661 
2662 		ip_mq_lock(port);
2663 		if (ip_active(port)) {
2664 			task = convert_port_to_task_with_flavor_locked(port,
2665 			    flavor, options, grp);
2666 		}
2667 		ip_mq_unlock(port);
2668 	}
2669 
2670 	return task;
2671 }
2672 
2673 task_t
convert_port_to_task(ipc_port_t port)2674 convert_port_to_task(
2675 	ipc_port_t              port)
2676 {
2677 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2678 	           PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL);
2679 }
2680 
2681 task_t
convert_port_to_task_mig(ipc_port_t port)2682 convert_port_to_task_mig(
2683 	ipc_port_t              port)
2684 {
2685 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2686 	           PORT_INTRANS_OPTIONS_NONE, TASK_GRP_MIG);
2687 }
2688 
2689 task_read_t
convert_port_to_task_read(ipc_port_t port)2690 convert_port_to_task_read(
2691 	ipc_port_t              port)
2692 {
2693 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2694 	           PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2695 }
2696 
2697 static task_read_t
convert_port_to_task_read_no_eval(ipc_port_t port)2698 convert_port_to_task_read_no_eval(
2699 	ipc_port_t              port)
2700 {
2701 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2702 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2703 }
2704 
2705 task_read_t
convert_port_to_task_read_mig(ipc_port_t port)2706 convert_port_to_task_read_mig(
2707 	ipc_port_t              port)
2708 {
2709 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2710 	           PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2711 }
2712 
2713 task_inspect_t
convert_port_to_task_inspect(ipc_port_t port)2714 convert_port_to_task_inspect(
2715 	ipc_port_t              port)
2716 {
2717 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2718 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2719 }
2720 
2721 task_inspect_t
convert_port_to_task_inspect_no_eval(ipc_port_t port)2722 convert_port_to_task_inspect_no_eval(
2723 	ipc_port_t              port)
2724 {
2725 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2726 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2727 }
2728 
2729 task_inspect_t
convert_port_to_task_inspect_mig(ipc_port_t port)2730 convert_port_to_task_inspect_mig(
2731 	ipc_port_t              port)
2732 {
2733 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2734 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2735 }
2736 
2737 task_name_t
convert_port_to_task_name(ipc_port_t port)2738 convert_port_to_task_name(
2739 	ipc_port_t              port)
2740 {
2741 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2742 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2743 }
2744 
2745 task_name_t
convert_port_to_task_name_mig(ipc_port_t port)2746 convert_port_to_task_name_mig(
2747 	ipc_port_t              port)
2748 {
2749 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2750 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2751 }
2752 
2753 /*
2754  *	Routine:	convert_port_to_task_policy
2755  *	Purpose:
2756  *		Convert from a port to a task.
2757  *		Doesn't consume the port ref; produces a task ref,
2758  *		which may be null.
2759  *		If the port is being used with task_port_set(), any task port
2760  *		type other than TASK_CONTROL requires an entitlement. If the
2761  *		port is being used with task_port_get(), TASK_NAME requires an
2762  *		entitlement.
2763  *	Conditions:
2764  *		Nothing locked.
2765  */
2766 static task_t
convert_port_to_task_policy_mig(ipc_port_t port,boolean_t set)2767 convert_port_to_task_policy_mig(ipc_port_t port, boolean_t set)
2768 {
2769 	task_t task = TASK_NULL;
2770 
2771 	if (!IP_VALID(port)) {
2772 		return TASK_NULL;
2773 	}
2774 
2775 	task = set ?
2776 	    convert_port_to_task_mig(port) :
2777 	    convert_port_to_task_inspect_mig(port);
2778 
2779 	if (task == TASK_NULL &&
2780 	    IOCurrentTaskHasEntitlement("com.apple.private.task_policy")) {
2781 		task = convert_port_to_task_name_mig(port);
2782 	}
2783 
2784 	return task;
2785 }
2786 
2787 task_policy_set_t
convert_port_to_task_policy_set_mig(ipc_port_t port)2788 convert_port_to_task_policy_set_mig(ipc_port_t port)
2789 {
2790 	return convert_port_to_task_policy_mig(port, true);
2791 }
2792 
2793 task_policy_get_t
convert_port_to_task_policy_get_mig(ipc_port_t port)2794 convert_port_to_task_policy_get_mig(ipc_port_t port)
2795 {
2796 	return convert_port_to_task_policy_mig(port, false);
2797 }
2798 
2799 /*
2800  *	Routine:	convert_port_to_task_suspension_token
2801  *	Purpose:
2802  *		Convert from a port to a task suspension token.
2803  *		Doesn't consume the port ref; produces a suspension token ref,
2804  *		which may be null.
2805  *	Conditions:
2806  *		Nothing locked.
2807  */
2808 static task_suspension_token_t
convert_port_to_task_suspension_token_grp(ipc_port_t port,task_grp_t grp)2809 convert_port_to_task_suspension_token_grp(
2810 	ipc_port_t              port,
2811 	task_grp_t              grp)
2812 {
2813 	task_suspension_token_t task = TASK_NULL;
2814 
2815 	if (IP_VALID(port)) {
2816 		ip_mq_lock(port);
2817 		task = ipc_kobject_get_locked(port, IKOT_TASK_RESUME);
2818 		if (task != TASK_NULL) {
2819 			task_reference_grp(task, grp);
2820 		}
2821 		ip_mq_unlock(port);
2822 	}
2823 
2824 	return task;
2825 }
2826 
2827 task_suspension_token_t
convert_port_to_task_suspension_token_external(ipc_port_t port)2828 convert_port_to_task_suspension_token_external(
2829 	ipc_port_t              port)
2830 {
2831 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_EXTERNAL);
2832 }
2833 
2834 task_suspension_token_t
convert_port_to_task_suspension_token_mig(ipc_port_t port)2835 convert_port_to_task_suspension_token_mig(
2836 	ipc_port_t              port)
2837 {
2838 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_MIG);
2839 }
2840 
2841 task_suspension_token_t
convert_port_to_task_suspension_token_kernel(ipc_port_t port)2842 convert_port_to_task_suspension_token_kernel(
2843 	ipc_port_t              port)
2844 {
2845 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_KERNEL);
2846 }
2847 
2848 /*
2849  *	Routine:	convert_port_to_space_with_flavor
2850  *	Purpose:
2851  *		Internal helper for converting from a port to a space.
2852  *		Doesn't consume the port ref; produces a space ref,
2853  *		which may be null.
2854  *	Args:
2855  *		port   - target port
2856  *		flavor - requested ipc space flavor
2857  *		options - port translation options
2858  *	Conditions:
2859  *		Nothing locked.
2860  */
2861 static ipc_space_t
convert_port_to_space_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2862 convert_port_to_space_with_flavor(
2863 	ipc_port_t         port,
2864 	mach_task_flavor_t flavor,
2865 	port_intrans_options_t options)
2866 {
2867 	ipc_space_t space = IPC_SPACE_NULL;
2868 	task_t task = TASK_NULL;
2869 
2870 	assert(flavor != TASK_FLAVOR_NAME);
2871 
2872 	if (IP_VALID(port)) {
2873 		ip_mq_lock(port);
2874 		if (ip_active(port)) {
2875 			task = convert_port_to_task_with_flavor_locked_noref(port,
2876 			    flavor, options);
2877 		}
2878 
2879 		/*
2880 		 * Because we hold the port lock and we could resolve a task,
2881 		 * even if we're racing with task termination, we know that
2882 		 * ipc_task_disable() hasn't been called yet.
2883 		 *
2884 		 * We try to sniff if `task->active` flipped to accelerate
2885 		 * resolving the race, but this isn't load bearing.
2886 		 *
2887 		 * The space will be torn down _after_ ipc_task_disable() returns,
2888 		 * so it is valid to take a reference on it now.
2889 		 */
2890 		if (task && task->active) {
2891 			space = task->itk_space;
2892 			is_reference(space);
2893 		}
2894 		ip_mq_unlock(port);
2895 	}
2896 
2897 	return space;
2898 }
2899 
2900 ipc_space_t
convert_port_to_space(ipc_port_t port)2901 convert_port_to_space(
2902 	ipc_port_t      port)
2903 {
2904 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL,
2905 	           PORT_INTRANS_OPTIONS_NONE);
2906 }
2907 
2908 ipc_space_read_t
convert_port_to_space_read(ipc_port_t port)2909 convert_port_to_space_read(
2910 	ipc_port_t      port)
2911 {
2912 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2913 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
2914 }
2915 
2916 ipc_space_read_t
convert_port_to_space_read_no_eval(ipc_port_t port)2917 convert_port_to_space_read_no_eval(
2918 	ipc_port_t      port)
2919 {
2920 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2921 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2922 }
2923 
2924 ipc_space_inspect_t
convert_port_to_space_inspect(ipc_port_t port)2925 convert_port_to_space_inspect(
2926 	ipc_port_t      port)
2927 {
2928 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT,
2929 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2930 }
2931 
2932 /*
2933  *	Routine:	convert_port_to_map_with_flavor
2934  *	Purpose:
2935  *		Internal helper for converting from a port to a map.
2936  *		Doesn't consume the port ref; produces a map ref,
2937  *		which may be null.
2938  *	Args:
2939  *		port   - target port
2940  *		flavor - requested vm map flavor
2941  *		options - port translation options
2942  *	Conditions:
2943  *		Nothing locked.
2944  */
2945 static vm_map_t
convert_port_to_map_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2946 convert_port_to_map_with_flavor(
2947 	ipc_port_t         port,
2948 	mach_task_flavor_t flavor,
2949 	port_intrans_options_t options)
2950 {
2951 	task_t task = TASK_NULL;
2952 	vm_map_t map = VM_MAP_NULL;
2953 
2954 	/* there is no vm_map_inspect_t routines at the moment. */
2955 	assert(flavor != TASK_FLAVOR_NAME && flavor != TASK_FLAVOR_INSPECT);
2956 	assert((options & PORT_INTRANS_SKIP_TASK_EVAL) == 0);
2957 
2958 	if (IP_VALID(port)) {
2959 		ip_mq_lock(port);
2960 
2961 		if (ip_active(port)) {
2962 			task = convert_port_to_task_with_flavor_locked_noref(port,
2963 			    flavor, options);
2964 		}
2965 
2966 		/*
2967 		 * Because we hold the port lock and we could resolve a task,
2968 		 * even if we're racing with task termination, we know that
2969 		 * ipc_task_disable() hasn't been called yet.
2970 		 *
2971 		 * We try to sniff if `task->active` flipped to accelerate
2972 		 * resolving the race, but this isn't load bearing.
2973 		 *
2974 		 * The vm map will be torn down _after_ ipc_task_disable() returns,
2975 		 * so it is valid to take a reference on it now.
2976 		 */
2977 		if (task && task->active) {
2978 			map = task->map;
2979 
2980 			if (map->pmap == kernel_pmap) {
2981 				panic("userspace has control access to a "
2982 				    "kernel map %p through task %p", map, task);
2983 			}
2984 
2985 			pmap_require(map->pmap);
2986 			vm_map_reference(map);
2987 		}
2988 
2989 		ip_mq_unlock(port);
2990 	}
2991 
2992 	return map;
2993 }
2994 
2995 vm_map_t
convert_port_to_map(ipc_port_t port)2996 convert_port_to_map(
2997 	ipc_port_t              port)
2998 {
2999 	return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL,
3000 	           PORT_INTRANS_OPTIONS_NONE);
3001 }
3002 
3003 vm_map_read_t
convert_port_to_map_read(ipc_port_t port)3004 convert_port_to_map_read(
3005 	ipc_port_t              port)
3006 {
3007 	return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ,
3008 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
3009 }
3010 
3011 vm_map_inspect_t
convert_port_to_map_inspect(__unused ipc_port_t port)3012 convert_port_to_map_inspect(
3013 	__unused ipc_port_t     port)
3014 {
3015 	/* there is no vm_map_inspect_t routines at the moment. */
3016 	return VM_MAP_INSPECT_NULL;
3017 }
3018 
3019 /*
3020  *	Routine:	thread_port_kotype_valid_for_flavor
3021  *	Purpose:
3022  *		Check whether the kobject type of a mach port
3023  *      is valid for conversion to a thread of given flavor.
3024  */
3025 static boolean_t
thread_port_kotype_valid_for_flavor(natural_t kotype,mach_thread_flavor_t flavor)3026 thread_port_kotype_valid_for_flavor(
3027 	natural_t            kotype,
3028 	mach_thread_flavor_t flavor)
3029 {
3030 	switch (flavor) {
3031 	/* Ascending capability */
3032 	case THREAD_FLAVOR_INSPECT:
3033 		if (kotype == IKOT_THREAD_INSPECT) {
3034 			return TRUE;
3035 		}
3036 		OS_FALLTHROUGH;
3037 	case THREAD_FLAVOR_READ:
3038 		if (kotype == IKOT_THREAD_READ) {
3039 			return TRUE;
3040 		}
3041 		OS_FALLTHROUGH;
3042 	case THREAD_FLAVOR_CONTROL:
3043 		if (kotype == IKOT_THREAD_CONTROL) {
3044 			return TRUE;
3045 		}
3046 		break;
3047 	default:
3048 		panic("strange thread flavor");
3049 	}
3050 
3051 	return FALSE;
3052 }
3053 
3054 /*
3055  *	Routine: convert_port_to_thread_with_flavor_locked
3056  *	Purpose:
3057  *		Internal helper routine to convert from a locked port to a thread.
3058  *	Args:
3059  *		port   - target port
3060  *		flavor - requested thread port flavor
3061  *		options - port translation options
3062  *	Conditions:
3063  *		Port is locked and active.
3064  *		Produces a thread ref or THREAD_NULL.
3065  */
3066 static thread_t
convert_port_to_thread_with_flavor_locked(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3067 convert_port_to_thread_with_flavor_locked(
3068 	ipc_port_t               port,
3069 	mach_thread_flavor_t     flavor,
3070 	port_intrans_options_t   options)
3071 {
3072 	thread_t thread = THREAD_NULL;
3073 	task_t task;
3074 	ipc_kobject_type_t type = ip_kotype(port);
3075 
3076 	ip_mq_lock_held(port);
3077 	require_ip_active(port);
3078 
3079 	if (!thread_port_kotype_valid_for_flavor(type, flavor)) {
3080 		return THREAD_NULL;
3081 	}
3082 
3083 	thread = ipc_kobject_get_locked(port, type);
3084 
3085 	if (thread == THREAD_NULL) {
3086 		return THREAD_NULL;
3087 	}
3088 
3089 	if (options & PORT_INTRANS_THREAD_NOT_CURRENT_THREAD) {
3090 		if (thread == current_thread()) {
3091 			return THREAD_NULL;
3092 		}
3093 	}
3094 
3095 	task = get_threadtask(thread);
3096 
3097 	if (options & PORT_INTRANS_THREAD_IN_CURRENT_TASK) {
3098 		if (task != current_task()) {
3099 			return THREAD_NULL;
3100 		}
3101 	} else {
3102 		if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
3103 			assert(flavor == THREAD_FLAVOR_CONTROL);
3104 			return THREAD_NULL;
3105 		}
3106 		/* TODO: rdar://42389187 */
3107 		if (flavor == THREAD_FLAVOR_INSPECT) {
3108 			assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
3109 		}
3110 
3111 		if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
3112 		    task_conversion_eval(current_task(), task, flavor) != KERN_SUCCESS) {
3113 			return THREAD_NULL;
3114 		}
3115 	}
3116 
3117 	thread_reference(thread);
3118 	return thread;
3119 }
3120 
3121 /*
3122  *	Routine:	convert_port_to_thread_with_flavor
3123  *	Purpose:
3124  *		Internal helper for converting from a port to a thread.
3125  *		Doesn't consume the port ref; produces a thread ref,
3126  *		which may be null.
3127  *	Args:
3128  *		port   - target port
3129  *		flavor - requested thread port flavor
3130  *		options - port translation options
3131  *	Conditions:
3132  *		Nothing locked.
3133  */
3134 static thread_t
convert_port_to_thread_with_flavor(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3135 convert_port_to_thread_with_flavor(
3136 	ipc_port_t           port,
3137 	mach_thread_flavor_t flavor,
3138 	port_intrans_options_t options)
3139 {
3140 	thread_t thread = THREAD_NULL;
3141 
3142 	if (IP_VALID(port)) {
3143 		ip_mq_lock(port);
3144 		if (ip_active(port)) {
3145 			thread = convert_port_to_thread_with_flavor_locked(port,
3146 			    flavor, options);
3147 		}
3148 		ip_mq_unlock(port);
3149 	}
3150 
3151 	return thread;
3152 }
3153 
3154 thread_t
convert_port_to_thread(ipc_port_t port)3155 convert_port_to_thread(
3156 	ipc_port_t              port)
3157 {
3158 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_CONTROL,
3159 	           PORT_INTRANS_OPTIONS_NONE);
3160 }
3161 
3162 thread_read_t
convert_port_to_thread_read(ipc_port_t port)3163 convert_port_to_thread_read(
3164 	ipc_port_t              port)
3165 {
3166 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3167 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
3168 }
3169 
3170 static thread_read_t
convert_port_to_thread_read_no_eval(ipc_port_t port)3171 convert_port_to_thread_read_no_eval(
3172 	ipc_port_t              port)
3173 {
3174 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3175 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3176 }
3177 
3178 thread_inspect_t
convert_port_to_thread_inspect(ipc_port_t port)3179 convert_port_to_thread_inspect(
3180 	ipc_port_t              port)
3181 {
3182 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3183 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3184 }
3185 
3186 static thread_inspect_t
convert_port_to_thread_inspect_no_eval(ipc_port_t port)3187 convert_port_to_thread_inspect_no_eval(
3188 	ipc_port_t              port)
3189 {
3190 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3191 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3192 }
3193 
3194 static inline ipc_kobject_type_t
thread_flavor_to_kotype(mach_thread_flavor_t flavor)3195 thread_flavor_to_kotype(mach_thread_flavor_t flavor)
3196 {
3197 	switch (flavor) {
3198 	case THREAD_FLAVOR_CONTROL:
3199 		return IKOT_THREAD_CONTROL;
3200 	case THREAD_FLAVOR_READ:
3201 		return IKOT_THREAD_READ;
3202 	default:
3203 		return IKOT_THREAD_INSPECT;
3204 	}
3205 }
3206 
3207 /*
3208  *	Routine:	convert_thread_to_port_with_flavor
3209  *	Purpose:
3210  *		Convert from a thread to a port of given flavor.
3211  *		Consumes a thread ref; produces a naked send right
3212  *		which may be invalid.
3213  *	Conditions:
3214  *		Nothing locked.
3215  */
3216 static ipc_port_t
convert_thread_to_port_with_flavor(thread_t thread,thread_ro_t tro,mach_thread_flavor_t flavor)3217 convert_thread_to_port_with_flavor(
3218 	thread_t              thread,
3219 	thread_ro_t           tro,
3220 	mach_thread_flavor_t  flavor)
3221 {
3222 	ipc_kobject_type_t kotype = thread_flavor_to_kotype(flavor);
3223 	ipc_port_t port = IP_NULL;
3224 
3225 	thread_mtx_lock(thread);
3226 
3227 	/*
3228 	 * out-trans of weaker flavors are still permitted, but in-trans
3229 	 * is separately enforced.
3230 	 */
3231 	if (flavor == THREAD_FLAVOR_CONTROL &&
3232 	    task_conversion_eval_out_trans(current_task(), tro->tro_task, flavor)) {
3233 		/* denied by security policy, make the port appear dead */
3234 		port = IP_DEAD;
3235 		goto exit;
3236 	}
3237 
3238 	if (!thread->ipc_active) {
3239 		goto exit;
3240 	}
3241 
3242 	port = tro->tro_ports[flavor];
3243 	if (flavor == THREAD_FLAVOR_CONTROL) {
3244 		port = ipc_kobject_make_send(port, thread, IKOT_THREAD_CONTROL);
3245 	} else if (IP_VALID(port)) {
3246 		(void)ipc_kobject_make_send_nsrequest(port, thread, kotype);
3247 	} else {
3248 		/*
3249 		 * Claim a send right on the thread read/inspect port, and request a no-senders
3250 		 * notification on that port (if none outstanding). A thread reference is not
3251 		 * donated here even though the ports are created lazily because it doesn't own the
3252 		 * kobject that it points to. Threads manage their lifetime explicitly and
3253 		 * have to synchronize with each other, between the task/thread terminating and the
3254 		 * send-once notification firing, and this is done under the thread mutex
3255 		 * rather than with atomics.
3256 		 */
3257 		port = ipc_kobject_alloc_port(thread, kotype,
3258 		    IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST |
3259 		    IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3260 		/*
3261 		 * If Developer Mode is off, substitute read port for control
3262 		 * port if copying out to owning task's space, for the sake of
3263 		 * in-process exception handler.
3264 		 *
3265 		 * Also see: exception_deliver().
3266 		 */
3267 		if (!developer_mode_state() && flavor == THREAD_FLAVOR_READ) {
3268 			ipc_port_set_label(port, IPC_LABEL_SUBST_THREAD_READ);
3269 			port->ip_kolabel->ikol_alt_port = tro->tro_self_port;
3270 		}
3271 		zalloc_ro_update_field(ZONE_ID_THREAD_RO,
3272 		    tro, tro_ports[flavor], &port);
3273 	}
3274 
3275 exit:
3276 	thread_mtx_unlock(thread);
3277 	thread_deallocate(thread);
3278 	return port;
3279 }
3280 
3281 ipc_port_t
convert_thread_to_port(thread_t thread)3282 convert_thread_to_port(
3283 	thread_t                thread)
3284 {
3285 	thread_ro_t tro = get_thread_ro(thread);
3286 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_CONTROL);
3287 }
3288 
3289 ipc_port_t
convert_thread_read_to_port(thread_read_t thread)3290 convert_thread_read_to_port(thread_read_t thread)
3291 {
3292 	thread_ro_t tro = get_thread_ro(thread);
3293 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_READ);
3294 }
3295 
3296 ipc_port_t
convert_thread_inspect_to_port(thread_inspect_t thread)3297 convert_thread_inspect_to_port(thread_inspect_t thread)
3298 {
3299 	thread_ro_t tro = get_thread_ro(thread);
3300 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_INSPECT);
3301 }
3302 
3303 void
convert_thread_array_to_ports(thread_act_array_t array,size_t count,mach_thread_flavor_t flavor)3304 convert_thread_array_to_ports(
3305 	thread_act_array_t      array,
3306 	size_t                  count,
3307 	mach_thread_flavor_t    flavor)
3308 {
3309 	thread_t *thread_list = (thread_t *)array;
3310 	task_t task_self = current_task();
3311 
3312 	for (size_t i = 0; i < count; i++) {
3313 		thread_t   thread = thread_list[i];
3314 		ipc_port_t port;
3315 
3316 		switch (flavor) {
3317 		case THREAD_FLAVOR_CONTROL:
3318 			if (get_threadtask(thread) == task_self) {
3319 				port = convert_thread_to_port_pinned(thread);
3320 			} else {
3321 				port = convert_thread_to_port(thread);
3322 			}
3323 			break;
3324 		case THREAD_FLAVOR_READ:
3325 			port = convert_thread_read_to_port(thread);
3326 			break;
3327 		case THREAD_FLAVOR_INSPECT:
3328 			port = convert_thread_inspect_to_port(thread);
3329 			break;
3330 		}
3331 
3332 		array[i].port = port;
3333 	}
3334 }
3335 
3336 
3337 /*
3338  *	Routine:	port_name_to_thread
3339  *	Purpose:
3340  *		Convert from a port name to a thread reference
3341  *		A name of MACH_PORT_NULL is valid for the null thread.
3342  *	Conditions:
3343  *		Nothing locked.
3344  */
3345 thread_t
port_name_to_thread(mach_port_name_t name,port_intrans_options_t options)3346 port_name_to_thread(
3347 	mach_port_name_t         name,
3348 	port_intrans_options_t options)
3349 {
3350 	thread_t        thread = THREAD_NULL;
3351 	ipc_port_t      kport;
3352 	kern_return_t kr;
3353 
3354 	if (MACH_PORT_VALID(name)) {
3355 		kr = ipc_port_translate_send(current_space(), name, &kport);
3356 		if (kr == KERN_SUCCESS) {
3357 			/* port is locked and active */
3358 			assert(!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) &&
3359 			    !(options & PORT_INTRANS_SKIP_TASK_EVAL));
3360 			thread = convert_port_to_thread_with_flavor_locked(kport,
3361 			    THREAD_FLAVOR_CONTROL, options);
3362 			ip_mq_unlock(kport);
3363 		}
3364 	}
3365 
3366 	return thread;
3367 }
3368 
3369 /*
3370  *	Routine:	port_name_is_pinned_itk_self
3371  *	Purpose:
3372  *		Returns whether this port name is for the pinned
3373  *		mach_task_self (if it exists).
3374  *
3375  *		task_self_trap() when the task port is pinned,
3376  *		will memorize the name the port has in the space
3377  *		in ip_receiver_name, which we can use to fast-track
3378  *		this answer without taking any lock.
3379  *
3380  *		ipc_task_disable() will set `ip_receiver_name` back to
3381  *		MACH_PORT_SPECIAL_DEFAULT.
3382  *
3383  *	Conditions:
3384  *		self must be current_task()
3385  *		Nothing locked.
3386  */
3387 static bool
port_name_is_pinned_itk_self(task_t self,mach_port_name_t name)3388 port_name_is_pinned_itk_self(
3389 	task_t             self,
3390 	mach_port_name_t   name)
3391 {
3392 	ipc_port_t kport = self->itk_self;
3393 	return MACH_PORT_VALID(name) && name != MACH_PORT_SPECIAL_DEFAULT &&
3394 	       kport->ip_pinned && ip_get_receiver_name(kport) == name;
3395 }
3396 
3397 /*
3398  *	Routine:	port_name_to_current_task*_noref
3399  *	Purpose:
3400  *		Convert from a port name to current_task()
3401  *		A name of MACH_PORT_NULL is valid for the null task.
3402  *
3403  *		If current_task() is in the process of being terminated,
3404  *		this might return a non NULL task even when port_name_to_task()
3405  *		would.
3406  *
3407  *		However, this is an acceptable race that can't be controlled by
3408  *		userspace, and that downstream code using the returned task
3409  *		has to handle anyway.
3410  *
3411  *		ipc_space_disable() does try to narrow this race,
3412  *		by causing port_name_is_pinned_itk_self() to fail.
3413  *
3414  *	Returns:
3415  *		current_task() if the port name was for current_task()
3416  *		at the appropriate flavor.
3417  *
3418  *		TASK_NULL otherwise.
3419  *
3420  *	Conditions:
3421  *		Nothing locked.
3422  */
3423 static task_t
port_name_to_current_task_internal_noref(mach_port_name_t name,mach_task_flavor_t flavor)3424 port_name_to_current_task_internal_noref(
3425 	mach_port_name_t   name,
3426 	mach_task_flavor_t flavor)
3427 {
3428 	ipc_port_t kport;
3429 	kern_return_t kr;
3430 	task_t task = TASK_NULL;
3431 	task_t self = current_task();
3432 
3433 	if (port_name_is_pinned_itk_self(self, name)) {
3434 		return self;
3435 	}
3436 
3437 	if (MACH_PORT_VALID(name)) {
3438 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3439 		if (kr == KERN_SUCCESS) {
3440 			ipc_kobject_type_t type = ip_kotype(kport);
3441 			if (task_port_kotype_valid_for_flavor(type, flavor)) {
3442 				task = ipc_kobject_get_locked(kport, type);
3443 			}
3444 			ip_mq_unlock(kport);
3445 			if (task != self) {
3446 				task = TASK_NULL;
3447 			}
3448 		}
3449 	}
3450 
3451 	return task;
3452 }
3453 
3454 task_t
port_name_to_current_task_noref(mach_port_name_t name)3455 port_name_to_current_task_noref(
3456 	mach_port_name_t name)
3457 {
3458 	return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_CONTROL);
3459 }
3460 
3461 task_read_t
port_name_to_current_task_read_noref(mach_port_name_t name)3462 port_name_to_current_task_read_noref(
3463 	mach_port_name_t name)
3464 {
3465 	return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_READ);
3466 }
3467 
3468 /*
3469  *	Routine:	port_name_to_task
3470  *	Purpose:
3471  *		Convert from a port name to a task reference
3472  *		A name of MACH_PORT_NULL is valid for the null task.
3473  *	Conditions:
3474  *		Nothing locked.
3475  */
3476 static task_t
port_name_to_task_grp(mach_port_name_t name,task_grp_t grp)3477 port_name_to_task_grp(
3478 	mach_port_name_t name,
3479 	task_grp_t       grp)
3480 {
3481 	ipc_port_t kport;
3482 	kern_return_t kr;
3483 	task_t task = TASK_NULL;
3484 	task_t self = current_task();
3485 
3486 	if (port_name_is_pinned_itk_self(self, name)) {
3487 		task_reference_grp(self, grp);
3488 		return self;
3489 	}
3490 
3491 	if (MACH_PORT_VALID(name)) {
3492 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3493 		if (kr == KERN_SUCCESS) {
3494 			/* port is locked and active */
3495 			task = convert_port_to_task_with_flavor_locked(kport,
3496 			    TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE, grp);
3497 			ip_mq_unlock(kport);
3498 		}
3499 	}
3500 	return task;
3501 }
3502 
3503 task_t
port_name_to_task_external(mach_port_name_t name)3504 port_name_to_task_external(
3505 	mach_port_name_t name)
3506 {
3507 	return port_name_to_task_grp(name, TASK_GRP_EXTERNAL);
3508 }
3509 
3510 task_t
port_name_to_task_kernel(mach_port_name_t name)3511 port_name_to_task_kernel(
3512 	mach_port_name_t name)
3513 {
3514 	return port_name_to_task_grp(name, TASK_GRP_KERNEL);
3515 }
3516 
3517 /*
3518  *	Routine:	port_name_to_task_read
3519  *	Purpose:
3520  *		Convert from a port name to a task reference
3521  *		A name of MACH_PORT_NULL is valid for the null task.
3522  *	Conditions:
3523  *		Nothing locked.
3524  */
3525 task_read_t
port_name_to_task_read(mach_port_name_t name)3526 port_name_to_task_read(
3527 	mach_port_name_t name)
3528 {
3529 	ipc_port_t kport;
3530 	kern_return_t kr;
3531 	task_read_t tr = TASK_READ_NULL;
3532 	task_t self = current_task();
3533 
3534 	if (port_name_is_pinned_itk_self(self, name)) {
3535 		task_reference_grp(self, TASK_GRP_KERNEL);
3536 		return self;
3537 	}
3538 
3539 	if (MACH_PORT_VALID(name)) {
3540 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3541 		if (kr == KERN_SUCCESS) {
3542 			/* port is locked and active */
3543 			tr = convert_port_to_task_with_flavor_locked(kport,
3544 			    TASK_FLAVOR_READ, PORT_INTRANS_ALLOW_CORPSE_TASK,
3545 			    TASK_GRP_KERNEL);
3546 			ip_mq_unlock(kport);
3547 		}
3548 	}
3549 	return tr;
3550 }
3551 
3552 /*
3553  *	Routine:	port_name_to_task_read_no_eval
3554  *	Purpose:
3555  *		Convert from a port name to a task reference
3556  *		A name of MACH_PORT_NULL is valid for the null task.
3557  *		Skips task_conversion_eval() during conversion.
3558  *	Conditions:
3559  *		Nothing locked.
3560  */
3561 task_read_t
port_name_to_task_read_no_eval(mach_port_name_t name)3562 port_name_to_task_read_no_eval(
3563 	mach_port_name_t name)
3564 {
3565 	ipc_port_t kport;
3566 	kern_return_t kr;
3567 	task_read_t tr = TASK_READ_NULL;
3568 	task_t self = current_task();
3569 
3570 	if (port_name_is_pinned_itk_self(self, name)) {
3571 		task_reference_grp(self, TASK_GRP_KERNEL);
3572 		return self;
3573 	}
3574 
3575 	if (MACH_PORT_VALID(name)) {
3576 		port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3577 		    PORT_INTRANS_ALLOW_CORPSE_TASK;
3578 
3579 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3580 		if (kr == KERN_SUCCESS) {
3581 			/* port is locked and active */
3582 			tr = convert_port_to_task_with_flavor_locked(kport,
3583 			    TASK_FLAVOR_READ, options, TASK_GRP_KERNEL);
3584 			ip_mq_unlock(kport);
3585 		}
3586 	}
3587 	return tr;
3588 }
3589 
3590 /*
3591  *	Routine:	port_name_to_task_name
3592  *	Purpose:
3593  *		Convert from a port name to a task reference
3594  *		A name of MACH_PORT_NULL is valid for the null task.
3595  *	Conditions:
3596  *		Nothing locked.
3597  */
3598 task_name_t
port_name_to_task_name(mach_port_name_t name)3599 port_name_to_task_name(
3600 	mach_port_name_t name)
3601 {
3602 	ipc_port_t kport;
3603 	kern_return_t kr;
3604 	task_name_t tn = TASK_NAME_NULL;
3605 	task_t self = current_task();
3606 
3607 	if (port_name_is_pinned_itk_self(self, name)) {
3608 		task_reference_grp(self, TASK_GRP_KERNEL);
3609 		return self;
3610 	}
3611 
3612 	if (MACH_PORT_VALID(name)) {
3613 		port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3614 		    PORT_INTRANS_ALLOW_CORPSE_TASK;
3615 
3616 		kr = ipc_port_translate_send(current_space(), name, &kport);
3617 		if (kr == KERN_SUCCESS) {
3618 			/* port is locked and active */
3619 			tn = convert_port_to_task_with_flavor_locked(kport,
3620 			    TASK_FLAVOR_NAME, options, TASK_GRP_KERNEL);
3621 			ip_mq_unlock(kport);
3622 		}
3623 	}
3624 	return tn;
3625 }
3626 
3627 /*
3628  *	Routine:	port_name_to_task_id_token
3629  *	Purpose:
3630  *		Convert from a port name to a task identity token reference
3631  *	Conditions:
3632  *		Nothing locked.
3633  */
3634 task_id_token_t
port_name_to_task_id_token(mach_port_name_t name)3635 port_name_to_task_id_token(
3636 	mach_port_name_t name)
3637 {
3638 	ipc_port_t port;
3639 	kern_return_t kr;
3640 	task_id_token_t token = TASK_ID_TOKEN_NULL;
3641 
3642 	if (MACH_PORT_VALID(name)) {
3643 		kr = ipc_port_translate_send(current_space(), name, &port);
3644 		if (kr == KERN_SUCCESS) {
3645 			token = convert_port_to_task_id_token(port);
3646 			ip_mq_unlock(port);
3647 		}
3648 	}
3649 	return token;
3650 }
3651 
3652 /*
3653  *	Routine:	port_name_to_host
3654  *	Purpose:
3655  *		Convert from a port name to a host pointer.
3656  *		NOTE: This does _not_ return a +1 reference to the host_t
3657  *	Conditions:
3658  *		Nothing locked.
3659  */
3660 host_t
port_name_to_host(mach_port_name_t name)3661 port_name_to_host(
3662 	mach_port_name_t name)
3663 {
3664 	host_t host = HOST_NULL;
3665 	kern_return_t kr;
3666 	ipc_port_t port;
3667 
3668 	if (MACH_PORT_VALID(name)) {
3669 		kr = ipc_port_translate_send(current_space(), name, &port);
3670 		if (kr == KERN_SUCCESS) {
3671 			host = convert_port_to_host(port);
3672 			ip_mq_unlock(port);
3673 		}
3674 	}
3675 	return host;
3676 }
3677 
3678 static inline ipc_kobject_type_t
task_flavor_to_kotype(mach_task_flavor_t flavor)3679 task_flavor_to_kotype(mach_task_flavor_t flavor)
3680 {
3681 	switch (flavor) {
3682 	case TASK_FLAVOR_CONTROL:
3683 		return IKOT_TASK_CONTROL;
3684 	case TASK_FLAVOR_READ:
3685 		return IKOT_TASK_READ;
3686 	case TASK_FLAVOR_INSPECT:
3687 		return IKOT_TASK_INSPECT;
3688 	default:
3689 		return IKOT_TASK_NAME;
3690 	}
3691 }
3692 
3693 /*
3694  *	Routine:	convert_task_to_port_with_flavor
3695  *	Purpose:
3696  *		Convert from a task to a port of given flavor.
3697  *		Consumes a task ref; produces a naked send right
3698  *		which may be invalid.
3699  *	Conditions:
3700  *		Nothing locked.
3701  */
3702 ipc_port_t
convert_task_to_port_with_flavor(task_t task,mach_task_flavor_t flavor,task_grp_t grp)3703 convert_task_to_port_with_flavor(
3704 	task_t              task,
3705 	mach_task_flavor_t  flavor,
3706 	task_grp_t          grp)
3707 {
3708 	ipc_kobject_type_t kotype = task_flavor_to_kotype(flavor);
3709 	ipc_port_t port = IP_NULL;
3710 
3711 	itk_lock(task);
3712 
3713 	if (!task->ipc_active) {
3714 		goto exit;
3715 	}
3716 
3717 	/*
3718 	 * out-trans of weaker flavors are still permitted, but in-trans
3719 	 * is separately enforced.
3720 	 */
3721 	if (flavor == TASK_FLAVOR_CONTROL &&
3722 	    task_conversion_eval_out_trans(current_task(), task, flavor)) {
3723 		/* denied by security policy, make the port appear dead */
3724 		port = IP_DEAD;
3725 		goto exit;
3726 	}
3727 
3728 	switch (flavor) {
3729 	case TASK_FLAVOR_CONTROL:
3730 	case TASK_FLAVOR_NAME:
3731 		port = ipc_kobject_make_send(task->itk_task_ports[flavor],
3732 		    task, kotype);
3733 		break;
3734 	/*
3735 	 * Claim a send right on the task read/inspect port,
3736 	 * and request a no-senders notification on that port
3737 	 * (if none outstanding).
3738 	 *
3739 	 * The task's itk_lock is used to synchronize the handling
3740 	 * of the no-senders notification with the task termination.
3741 	 */
3742 	case TASK_FLAVOR_READ:
3743 	case TASK_FLAVOR_INSPECT:
3744 		port = task->itk_task_ports[flavor];
3745 		if (IP_VALID(port)) {
3746 			(void)ipc_kobject_make_send_nsrequest(port,
3747 			    task, kotype);
3748 		} else {
3749 			port = ipc_kobject_alloc_port(task, kotype,
3750 			    IPC_KOBJECT_ALLOC_MAKE_SEND |
3751 			    IPC_KOBJECT_ALLOC_NSREQUEST |
3752 			    IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3753 			/*
3754 			 * If Developer Mode is off, substitute read port for control port if
3755 			 * copying out to owning task's space, for the sake of in-process
3756 			 * exception handler.
3757 			 *
3758 			 * Also see: exception_deliver().
3759 			 */
3760 			if (!developer_mode_state() && flavor == TASK_FLAVOR_READ) {
3761 				ipc_port_set_label(port, IPC_LABEL_SUBST_TASK_READ);
3762 				port->ip_kolabel->ikol_alt_port = task->itk_self;
3763 			}
3764 
3765 			task->itk_task_ports[flavor] = port;
3766 		}
3767 		break;
3768 	}
3769 
3770 exit:
3771 	itk_unlock(task);
3772 	task_deallocate_grp(task, grp);
3773 	return port;
3774 }
3775 
3776 ipc_port_t
convert_corpse_to_port_and_nsrequest(task_t corpse)3777 convert_corpse_to_port_and_nsrequest(
3778 	task_t          corpse)
3779 {
3780 	ipc_port_t port = IP_NULL;
3781 	__assert_only kern_return_t kr;
3782 
3783 	assert(task_is_a_corpse(corpse));
3784 	itk_lock(corpse);
3785 	port = corpse->itk_task_ports[TASK_FLAVOR_CONTROL];
3786 	assert(port->ip_srights == 0);
3787 	kr = ipc_kobject_make_send_nsrequest(port, corpse, IKOT_TASK_CONTROL);
3788 	assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
3789 	itk_unlock(corpse);
3790 
3791 	task_deallocate(corpse);
3792 	return port;
3793 }
3794 
3795 ipc_port_t
convert_task_to_port(task_t task)3796 convert_task_to_port(
3797 	task_t          task)
3798 {
3799 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_KERNEL);
3800 }
3801 
3802 ipc_port_t
convert_task_read_to_port(task_read_t task)3803 convert_task_read_to_port(
3804 	task_read_t          task)
3805 {
3806 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_KERNEL);
3807 }
3808 
3809 ipc_port_t
convert_task_inspect_to_port(task_inspect_t task)3810 convert_task_inspect_to_port(
3811 	task_inspect_t          task)
3812 {
3813 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT, TASK_GRP_KERNEL);
3814 }
3815 
3816 ipc_port_t
convert_task_name_to_port(task_name_t task)3817 convert_task_name_to_port(
3818 	task_name_t             task)
3819 {
3820 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME, TASK_GRP_KERNEL);
3821 }
3822 
3823 ipc_port_t
convert_task_to_port_external(task_t task)3824 convert_task_to_port_external(task_t task)
3825 {
3826 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_EXTERNAL);
3827 }
3828 
3829 ipc_port_t
convert_task_read_to_port_external(task_t task)3830 convert_task_read_to_port_external(task_t task)
3831 {
3832 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_EXTERNAL);
3833 }
3834 
3835 ipc_port_t
convert_task_to_port_pinned(task_t task)3836 convert_task_to_port_pinned(
3837 	task_t          task)
3838 {
3839 	ipc_port_t port = IP_NULL;
3840 
3841 	assert(task == current_task());
3842 
3843 	itk_lock(task);
3844 
3845 	if (task->ipc_active) {
3846 		port = ipc_kobject_make_send(task->itk_self, task,
3847 		    IKOT_TASK_CONTROL);
3848 	}
3849 
3850 	if (port && task_is_immovable(task)) {
3851 		assert(ip_is_pinned(port));
3852 		assert(ip_is_immovable_send(port));
3853 	}
3854 
3855 	itk_unlock(task);
3856 	task_deallocate(task);
3857 	return port;
3858 }
3859 
3860 void
convert_task_array_to_ports(task_array_t array,size_t count,mach_task_flavor_t flavor)3861 convert_task_array_to_ports(
3862 	task_array_t            array,
3863 	size_t                  count,
3864 	mach_task_flavor_t      flavor)
3865 {
3866 	task_t *task_list = (task_t *)array;
3867 	task_t task_self = current_task();
3868 
3869 	for (size_t i = 0; i < count; i++) {
3870 		task_t     task = task_list[i];
3871 		ipc_port_t port;
3872 
3873 		switch (flavor) {
3874 		case TASK_FLAVOR_CONTROL:
3875 			if (task == task_self) {
3876 				/* if current_task(), return pinned port */
3877 				port = convert_task_to_port_pinned(task);
3878 			} else {
3879 				port = convert_task_to_port(task);
3880 			}
3881 			break;
3882 		case TASK_FLAVOR_READ:
3883 			port = convert_task_read_to_port(task);
3884 			break;
3885 		case TASK_FLAVOR_INSPECT:
3886 			port = convert_task_inspect_to_port(task);
3887 			break;
3888 		case TASK_FLAVOR_NAME:
3889 			port = convert_task_name_to_port(task);
3890 			break;
3891 		}
3892 
3893 		array[i].port = port;
3894 	}
3895 }
3896 
3897 /*
3898  *	Routine:	convert_task_suspend_token_to_port
3899  *	Purpose:
3900  *		Convert from a task suspension token to a port.
3901  *		Consumes a task suspension token ref; produces a naked send-once right
3902  *		which may be invalid.
3903  *	Conditions:
3904  *		Nothing locked.
3905  */
3906 static ipc_port_t
convert_task_suspension_token_to_port_grp(task_suspension_token_t task,task_grp_t grp)3907 convert_task_suspension_token_to_port_grp(
3908 	task_suspension_token_t         task,
3909 	task_grp_t                      grp)
3910 {
3911 	ipc_port_t port;
3912 
3913 	task_lock(task);
3914 	if (task->active) {
3915 		itk_lock(task);
3916 		if (task->itk_resume == IP_NULL) {
3917 			task->itk_resume = ipc_kobject_alloc_port((ipc_kobject_t) task,
3918 			    IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE);
3919 		}
3920 
3921 		/*
3922 		 * Create a send-once right for each instance of a direct user-called
3923 		 * task_suspend2 call. Each time one of these send-once rights is abandoned,
3924 		 * the notification handler will resume the target task.
3925 		 */
3926 		port = task->itk_resume;
3927 		ipc_kobject_require(port, task, IKOT_TASK_RESUME);
3928 		port = ipc_port_make_sonce(port);
3929 		itk_unlock(task);
3930 		assert(IP_VALID(port));
3931 	} else {
3932 		port = IP_NULL;
3933 	}
3934 
3935 	task_unlock(task);
3936 	task_suspension_token_deallocate_grp(task, grp);
3937 
3938 	return port;
3939 }
3940 
3941 ipc_port_t
convert_task_suspension_token_to_port_external(task_suspension_token_t task)3942 convert_task_suspension_token_to_port_external(
3943 	task_suspension_token_t         task)
3944 {
3945 	return convert_task_suspension_token_to_port_grp(task, TASK_GRP_EXTERNAL);
3946 }
3947 
3948 ipc_port_t
convert_task_suspension_token_to_port_mig(task_suspension_token_t task)3949 convert_task_suspension_token_to_port_mig(
3950 	task_suspension_token_t         task)
3951 {
3952 	return convert_task_suspension_token_to_port_grp(task, TASK_GRP_MIG);
3953 }
3954 
3955 ipc_port_t
convert_thread_to_port_pinned(thread_t thread)3956 convert_thread_to_port_pinned(
3957 	thread_t                thread)
3958 {
3959 	thread_ro_t tro = get_thread_ro(thread);
3960 	ipc_port_t  port = IP_NULL;
3961 
3962 	thread_mtx_lock(thread);
3963 
3964 	if (thread->ipc_active) {
3965 		port = ipc_kobject_make_send(tro->tro_self_port,
3966 		    thread, IKOT_THREAD_CONTROL);
3967 	}
3968 
3969 	if (port && task_is_immovable(tro->tro_task)) {
3970 		assert(ip_is_immovable_send(port));
3971 	}
3972 
3973 	thread_mtx_unlock(thread);
3974 	thread_deallocate(thread);
3975 	return port;
3976 }
3977 /*
3978  *	Routine:	space_deallocate
3979  *	Purpose:
3980  *		Deallocate a space ref produced by convert_port_to_space.
3981  *	Conditions:
3982  *		Nothing locked.
3983  */
3984 
3985 void
space_deallocate(ipc_space_t space)3986 space_deallocate(
3987 	ipc_space_t     space)
3988 {
3989 	if (space != IS_NULL) {
3990 		is_release(space);
3991 	}
3992 }
3993 
3994 /*
3995  *	Routine:	space_read_deallocate
3996  *	Purpose:
3997  *		Deallocate a space read ref produced by convert_port_to_space_read.
3998  *	Conditions:
3999  *		Nothing locked.
4000  */
4001 
4002 void
space_read_deallocate(ipc_space_read_t space)4003 space_read_deallocate(
4004 	ipc_space_read_t     space)
4005 {
4006 	if (space != IS_INSPECT_NULL) {
4007 		is_release((ipc_space_t)space);
4008 	}
4009 }
4010 
4011 /*
4012  *	Routine:	space_inspect_deallocate
4013  *	Purpose:
4014  *		Deallocate a space inspect ref produced by convert_port_to_space_inspect.
4015  *	Conditions:
4016  *		Nothing locked.
4017  */
4018 
4019 void
space_inspect_deallocate(ipc_space_inspect_t space)4020 space_inspect_deallocate(
4021 	ipc_space_inspect_t     space)
4022 {
4023 	if (space != IS_INSPECT_NULL) {
4024 		is_release((ipc_space_t)space);
4025 	}
4026 }
4027 
4028 
4029 static boolean_t
behavior_is_identity_protected(int new_behavior)4030 behavior_is_identity_protected(int new_behavior)
4031 {
4032 	return (new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED;
4033 }
4034 
4035 static boolean_t
identity_protection_opted_out(const ipc_port_t new_port)4036 identity_protection_opted_out(const ipc_port_t new_port)
4037 {
4038 	if (IP_VALID(new_port)) {
4039 		return ip_is_id_prot_opted_out(new_port);
4040 	}
4041 	return false;
4042 }
4043 
4044 static void
send_set_exception_telemetry(const task_t excepting_task,const exception_mask_t mask,const char * level)4045 send_set_exception_telemetry(const task_t excepting_task, const exception_mask_t mask, const char* level)
4046 {
4047 	ca_event_t ca_event = CA_EVENT_ALLOCATE(set_exception);
4048 	CA_EVENT_TYPE(set_exception) * event = ca_event->data;
4049 
4050 	task_procname(current_task(), (char *) &event->current_proc, sizeof(event->current_proc));
4051 	task_procname(excepting_task, (char *) &event->thread_proc, sizeof(event->thread_proc));
4052 	event->mask = mask;
4053 	strlcpy(event->level, level, sizeof(event->level));
4054 
4055 	CA_EVENT_SEND(ca_event);
4056 }
4057 
4058 /* Returns whether the violation should be ignored */
4059 static boolean_t
set_exception_behavior_violation(const task_t excepting_task,const exception_mask_t mask,const char * level)4060 set_exception_behavior_violation(const task_t excepting_task,
4061     const exception_mask_t mask, const char * level)
4062 {
4063 	if (thid_should_crash) {
4064 		/* create lightweight corpse */
4065 		mach_port_guard_exception(0, 0, 0, kGUARD_EXC_EXCEPTION_BEHAVIOR_ENFORCE);
4066 	}
4067 
4068 	/* always report the proc name to CA */
4069 	send_set_exception_telemetry(excepting_task, mask, level);
4070 
4071 	/* if the bootarg has been manually set to false, ignore the violation */
4072 	return !thid_should_crash;
4073 }
4074 
4075 /*
4076  * Protect platform binary task/thread ports.
4077  * excepting_task is NULL if we are setting a host exception port.
4078  */
4079 static boolean_t
exception_exposes_protected_ports(const ipc_port_t new_port,const task_t excepting_task)4080 exception_exposes_protected_ports(const ipc_port_t new_port, const task_t excepting_task)
4081 {
4082 	if (!IP_VALID(new_port) || is_ux_handler_port(new_port)) {
4083 		/*
4084 		 * sending exceptions to invalid port does not pose risk
4085 		 * ux_handler port is an immovable, read-only kobject port; doesn't need protection.
4086 		 */
4087 		return FALSE;
4088 	} else if (excepting_task) {
4089 		/*  setting task/thread exception port - protect hardened binaries */
4090 		return task_is_hardened_binary(excepting_task);
4091 	}
4092 
4093 	/* setting host port exposes all processes - always protect. */
4094 	return TRUE;
4095 }
4096 
4097 #if XNU_TARGET_OS_OSX && CONFIG_CSR
4098 static bool
SIP_is_enabled()4099 SIP_is_enabled()
4100 {
4101 	return csr_check(CSR_ALLOW_UNRESTRICTED_FS) != 0;
4102 }
4103 #endif /* XNU_TARGET_OS_OSX && CONFIG_CSR*/
4104 
4105 boolean_t
set_exception_behavior_allowed(__unused const ipc_port_t new_port,__unused int new_behavior,__unused const task_t excepting_task,__unused const exception_mask_t mask,__unused const char * level)4106 set_exception_behavior_allowed(__unused const ipc_port_t new_port, __unused int new_behavior,
4107     __unused const task_t excepting_task, __unused const exception_mask_t mask, __unused const char *level)
4108 {
4109 	if (exception_exposes_protected_ports(new_port, excepting_task)
4110 	    && !behavior_is_identity_protected(new_behavior)
4111 	    && !identity_protection_opted_out(new_port) /* Ignore opted out */
4112 #if XNU_TARGET_OS_OSX
4113 	    && !task_opted_out_mach_hardening(excepting_task)
4114 #if CONFIG_CSR
4115 	    && SIP_is_enabled() /* cannot enforce if SIP is disabled */
4116 #endif /* CONFIG_CSR */
4117 #endif /* XNU_TARGET_OS_OSX */
4118 #if CONFIG_ROSETTA
4119 	    && !task_is_translated(current_task())
4120 #endif /* CONFIG_ROSETTA */
4121 	    && !proc_is_simulated(current_proc())
4122 	    && !IOCurrentTaskHasEntitlement("com.apple.private.thread-set-state") /* rdar://109119238 */
4123 	    && !IOCurrentTaskHasEntitlement(SET_EXCEPTION_ENTITLEMENT)) {
4124 		return set_exception_behavior_violation(excepting_task, mask, level);
4125 	}
4126 
4127 	return true;
4128 }
4129 
4130 /*
4131  *	Routine:	thread/task_set_exception_ports [kernel call]
4132  *	Purpose:
4133  *			Sets the thread/task exception port, flavor and
4134  *			behavior for the exception types specified by the mask.
4135  *			There will be one send right per exception per valid
4136  *			port.
4137  *	Conditions:
4138  *		Nothing locked.  If successful, consumes
4139  *		the supplied send right.
4140  *	Returns:
4141  *		KERN_SUCCESS		Changed the special port.
4142  *		KERN_INVALID_ARGUMENT	The thread is null,
4143  *					Illegal mask bit set.
4144  *					Illegal exception behavior
4145  *		KERN_FAILURE		The thread is dead.
4146  *		KERN_NO_ACCESS		Restricted access to set port
4147  */
4148 
4149 kern_return_t
thread_set_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4150 thread_set_exception_ports(
4151 	thread_t                thread,
4152 	exception_mask_t        exception_mask,
4153 	ipc_port_t              new_port,
4154 	exception_behavior_t    new_behavior,
4155 	thread_state_flavor_t   new_flavor)
4156 {
4157 	ipc_port_t  old_port[EXC_TYPES_COUNT];
4158 	thread_ro_t tro;
4159 	boolean_t   privileged = task_is_privileged(current_task());
4160 
4161 #if CONFIG_MACF
4162 	struct label *new_label;
4163 #endif
4164 
4165 	if (thread == THREAD_NULL) {
4166 		return KERN_INVALID_ARGUMENT;
4167 	}
4168 
4169 	if (exception_mask & ~EXC_MASK_VALID) {
4170 		return KERN_INVALID_ARGUMENT;
4171 	}
4172 
4173 	if (IP_VALID(new_port)) {
4174 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4175 		case EXCEPTION_DEFAULT:
4176 		case EXCEPTION_STATE:
4177 		case EXCEPTION_STATE_IDENTITY:
4178 		case EXCEPTION_IDENTITY_PROTECTED:
4179 			break;
4180 
4181 		default:
4182 			return KERN_INVALID_ARGUMENT;
4183 		}
4184 	}
4185 
4186 	/*
4187 	 * rdar://77996387
4188 	 * Avoid exposing immovable ports send rights (kobjects) to `get_exception_ports`,
4189 	 * but allow opted out ports to still be set on thread only.
4190 	 */
4191 	if (IP_VALID(new_port) &&
4192 	    ((!ip_is_id_prot_opted_out(new_port) && new_port->ip_immovable_receive) ||
4193 	    new_port->ip_immovable_send)) {
4194 		return KERN_INVALID_RIGHT;
4195 	}
4196 
4197 
4198 	/*
4199 	 * Check the validity of the thread_state_flavor by calling the
4200 	 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4201 	 * osfmk/mach/ARCHITECTURE/thread_status.h
4202 	 */
4203 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4204 		return KERN_INVALID_ARGUMENT;
4205 	}
4206 
4207 	if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4208 	    (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4209 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4210 		return KERN_INVALID_ARGUMENT;
4211 	}
4212 
4213 	if (!set_exception_behavior_allowed(new_port, new_behavior, get_threadtask(thread), exception_mask, "thread")) {
4214 		return KERN_NO_ACCESS;
4215 	}
4216 
4217 #if CONFIG_MACF
4218 	new_label = mac_exc_create_label_for_current_proc();
4219 #endif
4220 
4221 	tro = get_thread_ro(thread);
4222 	thread_mtx_lock(thread);
4223 
4224 	if (!thread->active) {
4225 		thread_mtx_unlock(thread);
4226 #if CONFIG_MACF
4227 		mac_exc_free_label(new_label);
4228 #endif
4229 		return KERN_FAILURE;
4230 	}
4231 
4232 	if (tro->tro_exc_actions == NULL) {
4233 		ipc_thread_init_exc_actions(tro);
4234 	}
4235 	for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4236 		struct exception_action *action = &tro->tro_exc_actions[i];
4237 
4238 		if ((exception_mask & (1 << i))
4239 #if CONFIG_MACF
4240 		    && mac_exc_update_action_label(action, new_label) == 0
4241 #endif
4242 		    ) {
4243 			old_port[i] = action->port;
4244 			action->port = exception_port_copy_send(new_port);
4245 			action->behavior = new_behavior;
4246 			action->flavor = new_flavor;
4247 			action->privileged = privileged;
4248 		} else {
4249 			old_port[i] = IP_NULL;
4250 		}
4251 	}
4252 
4253 	thread_mtx_unlock(thread);
4254 
4255 #if CONFIG_MACF
4256 	mac_exc_free_label(new_label);
4257 #endif
4258 
4259 	for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4260 		if (IP_VALID(old_port[i])) {
4261 			ipc_port_release_send(old_port[i]);
4262 		}
4263 	}
4264 
4265 	if (IP_VALID(new_port)) {         /* consume send right */
4266 		ipc_port_release_send(new_port);
4267 	}
4268 
4269 	return KERN_SUCCESS;
4270 }
4271 
4272 kern_return_t
task_set_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4273 task_set_exception_ports(
4274 	task_t                                  task,
4275 	exception_mask_t                exception_mask,
4276 	ipc_port_t                              new_port,
4277 	exception_behavior_t    new_behavior,
4278 	thread_state_flavor_t   new_flavor)
4279 {
4280 	ipc_port_t              old_port[EXC_TYPES_COUNT];
4281 	boolean_t privileged = task_is_privileged(current_task());
4282 	register int    i;
4283 
4284 #if CONFIG_MACF
4285 	struct label *new_label;
4286 #endif
4287 
4288 	if (task == TASK_NULL) {
4289 		return KERN_INVALID_ARGUMENT;
4290 	}
4291 
4292 	if (exception_mask & ~EXC_MASK_VALID) {
4293 		return KERN_INVALID_ARGUMENT;
4294 	}
4295 
4296 	if (IP_VALID(new_port)) {
4297 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4298 		case EXCEPTION_DEFAULT:
4299 		case EXCEPTION_STATE:
4300 		case EXCEPTION_STATE_IDENTITY:
4301 		case EXCEPTION_IDENTITY_PROTECTED:
4302 			break;
4303 
4304 		default:
4305 			return KERN_INVALID_ARGUMENT;
4306 		}
4307 	}
4308 
4309 	if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4310 		return KERN_INVALID_RIGHT;
4311 	}
4312 
4313 
4314 	/*
4315 	 * Check the validity of the thread_state_flavor by calling the
4316 	 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4317 	 * osfmk/mach/ARCHITECTURE/thread_status.h
4318 	 */
4319 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4320 		return KERN_INVALID_ARGUMENT;
4321 	}
4322 
4323 	if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4324 	    (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4325 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4326 		return KERN_INVALID_ARGUMENT;
4327 	}
4328 
4329 	if (!set_exception_behavior_allowed(new_port, new_behavior, task, exception_mask, "task")) {
4330 		return KERN_NO_ACCESS;
4331 	}
4332 
4333 #if CONFIG_MACF
4334 	new_label = mac_exc_create_label_for_current_proc();
4335 #endif
4336 
4337 	itk_lock(task);
4338 
4339 	/*
4340 	 * Allow setting exception port during the span of ipc_task_init() to
4341 	 * ipc_task_terminate(). posix_spawn() port actions can set exception
4342 	 * ports on target task _before_ task IPC access is enabled.
4343 	 */
4344 	if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
4345 		itk_unlock(task);
4346 #if CONFIG_MACF
4347 		mac_exc_free_label(new_label);
4348 #endif
4349 		return KERN_FAILURE;
4350 	}
4351 
4352 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4353 		if ((exception_mask & (1 << i))
4354 #if CONFIG_MACF
4355 		    && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4356 #endif
4357 		    ) {
4358 			old_port[i] = task->exc_actions[i].port;
4359 			task->exc_actions[i].port =
4360 			    exception_port_copy_send(new_port);
4361 			task->exc_actions[i].behavior = new_behavior;
4362 			task->exc_actions[i].flavor = new_flavor;
4363 			task->exc_actions[i].privileged = privileged;
4364 		} else {
4365 			old_port[i] = IP_NULL;
4366 		}
4367 	}
4368 
4369 	itk_unlock(task);
4370 
4371 #if CONFIG_MACF
4372 	mac_exc_free_label(new_label);
4373 #endif
4374 
4375 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4376 		if (IP_VALID(old_port[i])) {
4377 			ipc_port_release_send(old_port[i]);
4378 		}
4379 	}
4380 
4381 	if (IP_VALID(new_port)) {         /* consume send right */
4382 		ipc_port_release_send(new_port);
4383 	}
4384 
4385 	return KERN_SUCCESS;
4386 }
4387 
4388 /*
4389  *	Routine:	thread/task_swap_exception_ports [kernel call]
4390  *	Purpose:
4391  *			Sets the thread/task exception port, flavor and
4392  *			behavior for the exception types specified by the
4393  *			mask.
4394  *
4395  *			The old ports, behavior and flavors are returned
4396  *			Count specifies the array sizes on input and
4397  *			the number of returned ports etc. on output.  The
4398  *			arrays must be large enough to hold all the returned
4399  *			data, MIG returnes an error otherwise.  The masks
4400  *			array specifies the corresponding exception type(s).
4401  *
4402  *	Conditions:
4403  *		Nothing locked.  If successful, consumes
4404  *		the supplied send right.
4405  *
4406  *		Returns upto [in} CountCnt elements.
4407  *	Returns:
4408  *		KERN_SUCCESS		Changed the special port.
4409  *		KERN_INVALID_ARGUMENT	The thread is null,
4410  *					Illegal mask bit set.
4411  *					Illegal exception behavior
4412  *		KERN_FAILURE		The thread is dead.
4413  *		KERN_NO_ACCESS		Restricted access to set port
4414  */
4415 
4416 kern_return_t
thread_swap_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4417 thread_swap_exception_ports(
4418 	thread_t                        thread,
4419 	exception_mask_t                exception_mask,
4420 	ipc_port_t                      new_port,
4421 	exception_behavior_t            new_behavior,
4422 	thread_state_flavor_t           new_flavor,
4423 	exception_mask_array_t          masks,
4424 	mach_msg_type_number_t          *CountCnt,
4425 	exception_port_array_t          ports,
4426 	exception_behavior_array_t      behaviors,
4427 	thread_state_flavor_array_t     flavors)
4428 {
4429 	ipc_port_t  old_port[EXC_TYPES_COUNT];
4430 	thread_ro_t tro;
4431 	boolean_t   privileged = task_is_privileged(current_task());
4432 	unsigned int    i, j, count;
4433 
4434 #if CONFIG_MACF
4435 	struct label *new_label;
4436 #endif
4437 
4438 	if (thread == THREAD_NULL) {
4439 		return KERN_INVALID_ARGUMENT;
4440 	}
4441 
4442 	if (exception_mask & ~EXC_MASK_VALID) {
4443 		return KERN_INVALID_ARGUMENT;
4444 	}
4445 
4446 	if (IP_VALID(new_port)) {
4447 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4448 		case EXCEPTION_DEFAULT:
4449 		case EXCEPTION_STATE:
4450 		case EXCEPTION_STATE_IDENTITY:
4451 		case EXCEPTION_IDENTITY_PROTECTED:
4452 			break;
4453 
4454 		default:
4455 			return KERN_INVALID_ARGUMENT;
4456 		}
4457 	}
4458 
4459 	if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4460 		return KERN_INVALID_RIGHT;
4461 	}
4462 
4463 
4464 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4465 		return KERN_INVALID_ARGUMENT;
4466 	}
4467 
4468 	if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4469 	    (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4470 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4471 		return KERN_INVALID_ARGUMENT;
4472 	}
4473 
4474 	if (!set_exception_behavior_allowed(new_port, new_behavior, get_threadtask(thread), exception_mask, "thread")) {
4475 		return KERN_NO_ACCESS;
4476 	}
4477 
4478 #if CONFIG_MACF
4479 	new_label = mac_exc_create_label_for_current_proc();
4480 #endif
4481 
4482 	thread_mtx_lock(thread);
4483 
4484 	if (!thread->active) {
4485 		thread_mtx_unlock(thread);
4486 #if CONFIG_MACF
4487 		mac_exc_free_label(new_label);
4488 #endif
4489 		return KERN_FAILURE;
4490 	}
4491 
4492 	tro = get_thread_ro(thread);
4493 	if (tro->tro_exc_actions == NULL) {
4494 		ipc_thread_init_exc_actions(tro);
4495 	}
4496 
4497 	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4498 	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4499 		struct exception_action *action = &tro->tro_exc_actions[i];
4500 
4501 		if ((exception_mask & (1 << i))
4502 #if CONFIG_MACF
4503 		    && mac_exc_update_action_label(action, new_label) == 0
4504 #endif
4505 		    ) {
4506 			for (j = 0; j < count; ++j) {
4507 				/*
4508 				 * search for an identical entry, if found
4509 				 * set corresponding mask for this exception.
4510 				 */
4511 				if (action->port == ports[j] &&
4512 				    action->behavior == behaviors[j] &&
4513 				    action->flavor == flavors[j]) {
4514 					masks[j] |= (1 << i);
4515 					break;
4516 				}
4517 			}
4518 
4519 			if (j == count) {
4520 				masks[j] = (1 << i);
4521 				ports[j] = exception_port_copy_send(action->port);
4522 
4523 				behaviors[j] = action->behavior;
4524 				flavors[j] = action->flavor;
4525 				++count;
4526 			}
4527 
4528 			old_port[i] = action->port;
4529 			action->port = exception_port_copy_send(new_port);
4530 			action->behavior = new_behavior;
4531 			action->flavor = new_flavor;
4532 			action->privileged = privileged;
4533 		} else {
4534 			old_port[i] = IP_NULL;
4535 		}
4536 	}
4537 
4538 	thread_mtx_unlock(thread);
4539 
4540 #if CONFIG_MACF
4541 	mac_exc_free_label(new_label);
4542 #endif
4543 
4544 	while (--i >= FIRST_EXCEPTION) {
4545 		if (IP_VALID(old_port[i])) {
4546 			ipc_port_release_send(old_port[i]);
4547 		}
4548 	}
4549 
4550 	if (IP_VALID(new_port)) {         /* consume send right */
4551 		ipc_port_release_send(new_port);
4552 	}
4553 
4554 	*CountCnt = count;
4555 
4556 	return KERN_SUCCESS;
4557 }
4558 
4559 kern_return_t
task_swap_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4560 task_swap_exception_ports(
4561 	task_t                                          task,
4562 	exception_mask_t                        exception_mask,
4563 	ipc_port_t                                      new_port,
4564 	exception_behavior_t            new_behavior,
4565 	thread_state_flavor_t           new_flavor,
4566 	exception_mask_array_t          masks,
4567 	mach_msg_type_number_t          *CountCnt,
4568 	exception_port_array_t          ports,
4569 	exception_behavior_array_t      behaviors,
4570 	thread_state_flavor_array_t     flavors)
4571 {
4572 	ipc_port_t              old_port[EXC_TYPES_COUNT];
4573 	boolean_t privileged = task_is_privileged(current_task());
4574 	unsigned int    i, j, count;
4575 
4576 #if CONFIG_MACF
4577 	struct label *new_label;
4578 #endif
4579 
4580 	if (task == TASK_NULL) {
4581 		return KERN_INVALID_ARGUMENT;
4582 	}
4583 
4584 	if (exception_mask & ~EXC_MASK_VALID) {
4585 		return KERN_INVALID_ARGUMENT;
4586 	}
4587 
4588 	if (IP_VALID(new_port)) {
4589 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4590 		case EXCEPTION_DEFAULT:
4591 		case EXCEPTION_STATE:
4592 		case EXCEPTION_STATE_IDENTITY:
4593 		case EXCEPTION_IDENTITY_PROTECTED:
4594 			break;
4595 
4596 		default:
4597 			return KERN_INVALID_ARGUMENT;
4598 		}
4599 	}
4600 
4601 	if (IP_VALID(new_port) && (new_port->ip_immovable_receive || new_port->ip_immovable_send)) {
4602 		return KERN_INVALID_RIGHT;
4603 	}
4604 
4605 
4606 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4607 		return KERN_INVALID_ARGUMENT;
4608 	}
4609 
4610 	if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4611 	    (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4612 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4613 		return KERN_INVALID_ARGUMENT;
4614 	}
4615 
4616 	if (!set_exception_behavior_allowed(new_port, new_behavior, task, exception_mask, "task")) {
4617 		return KERN_NO_ACCESS;
4618 	}
4619 
4620 #if CONFIG_MACF
4621 	new_label = mac_exc_create_label_for_current_proc();
4622 #endif
4623 
4624 	itk_lock(task);
4625 
4626 	if (!task->ipc_active) {
4627 		itk_unlock(task);
4628 #if CONFIG_MACF
4629 		mac_exc_free_label(new_label);
4630 #endif
4631 		return KERN_FAILURE;
4632 	}
4633 
4634 	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4635 	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4636 		if ((exception_mask & (1 << i))
4637 #if CONFIG_MACF
4638 		    && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4639 #endif
4640 		    ) {
4641 			for (j = 0; j < count; j++) {
4642 				/*
4643 				 * search for an identical entry, if found
4644 				 * set corresponding mask for this exception.
4645 				 */
4646 				if (task->exc_actions[i].port == ports[j] &&
4647 				    task->exc_actions[i].behavior == behaviors[j] &&
4648 				    task->exc_actions[i].flavor == flavors[j]) {
4649 					masks[j] |= (1 << i);
4650 					break;
4651 				}
4652 			}
4653 
4654 			if (j == count) {
4655 				masks[j] = (1 << i);
4656 				ports[j] = exception_port_copy_send(task->exc_actions[i].port);
4657 				behaviors[j] = task->exc_actions[i].behavior;
4658 				flavors[j] = task->exc_actions[i].flavor;
4659 				++count;
4660 			}
4661 
4662 			old_port[i] = task->exc_actions[i].port;
4663 
4664 			task->exc_actions[i].port = exception_port_copy_send(new_port);
4665 			task->exc_actions[i].behavior = new_behavior;
4666 			task->exc_actions[i].flavor = new_flavor;
4667 			task->exc_actions[i].privileged = privileged;
4668 		} else {
4669 			old_port[i] = IP_NULL;
4670 		}
4671 	}
4672 
4673 	itk_unlock(task);
4674 
4675 #if CONFIG_MACF
4676 	mac_exc_free_label(new_label);
4677 #endif
4678 
4679 	while (--i >= FIRST_EXCEPTION) {
4680 		if (IP_VALID(old_port[i])) {
4681 			ipc_port_release_send(old_port[i]);
4682 		}
4683 	}
4684 
4685 	if (IP_VALID(new_port)) {         /* consume send right */
4686 		ipc_port_release_send(new_port);
4687 	}
4688 
4689 	*CountCnt = count;
4690 
4691 	return KERN_SUCCESS;
4692 }
4693 
4694 /*
4695  *	Routine:	thread/task_get_exception_ports [kernel call]
4696  *	Purpose:
4697  *		Clones a send right for each of the thread/task's exception
4698  *		ports specified in the mask and returns the behaviour
4699  *		and flavor of said port.
4700  *
4701  *		Returns upto [in} CountCnt elements.
4702  *
4703  *	Conditions:
4704  *		Nothing locked.
4705  *	Returns:
4706  *		KERN_SUCCESS		Extracted a send right.
4707  *		KERN_INVALID_ARGUMENT	The thread is null,
4708  *					Invalid special port,
4709  *					Illegal mask bit set.
4710  *		KERN_FAILURE		The thread is dead.
4711  */
4712 static kern_return_t
thread_get_exception_ports_internal(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4713 thread_get_exception_ports_internal(
4714 	thread_t                        thread,
4715 	exception_mask_t                exception_mask,
4716 	exception_mask_array_t          masks,
4717 	mach_msg_type_number_t          *CountCnt,
4718 	exception_port_info_array_t     ports_info,
4719 	exception_port_array_t          ports,
4720 	exception_behavior_array_t      behaviors,
4721 	thread_state_flavor_array_t     flavors)
4722 {
4723 	unsigned int count;
4724 	boolean_t info_only = (ports_info != NULL);
4725 	thread_ro_t tro;
4726 	ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4727 
4728 	if (thread == THREAD_NULL) {
4729 		return KERN_INVALID_ARGUMENT;
4730 	}
4731 
4732 	if (exception_mask & ~EXC_MASK_VALID) {
4733 		return KERN_INVALID_ARGUMENT;
4734 	}
4735 
4736 	if (!info_only && !ports) {
4737 		return KERN_INVALID_ARGUMENT;
4738 	}
4739 
4740 	tro = get_thread_ro(thread);
4741 	thread_mtx_lock(thread);
4742 
4743 	if (!thread->active) {
4744 		thread_mtx_unlock(thread);
4745 
4746 		return KERN_FAILURE;
4747 	}
4748 
4749 	count = 0;
4750 
4751 	if (tro->tro_exc_actions == NULL) {
4752 		goto done;
4753 	}
4754 
4755 	for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4756 		if (exception_mask & (1 << i)) {
4757 			ipc_port_t exc_port = tro->tro_exc_actions[i].port;
4758 			exception_behavior_t exc_behavior = tro->tro_exc_actions[i].behavior;
4759 			thread_state_flavor_t exc_flavor = tro->tro_exc_actions[i].flavor;
4760 
4761 			for (j = 0; j < count; ++j) {
4762 				/*
4763 				 * search for an identical entry, if found
4764 				 * set corresponding mask for this exception.
4765 				 */
4766 				if (exc_port == port_ptrs[j] &&
4767 				    exc_behavior == behaviors[j] &&
4768 				    exc_flavor == flavors[j]) {
4769 					masks[j] |= (1 << i);
4770 					break;
4771 				}
4772 			}
4773 
4774 			if (j == count && count < *CountCnt) {
4775 				masks[j] = (1 << i);
4776 				port_ptrs[j] = exc_port;
4777 
4778 				if (info_only) {
4779 					if (!IP_VALID(exc_port)) {
4780 						ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4781 					} else {
4782 						uintptr_t receiver;
4783 						(void)ipc_port_get_receiver_task(exc_port, &receiver);
4784 						ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4785 						ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4786 					}
4787 				} else {
4788 					ports[j] = exception_port_copy_send(exc_port);
4789 				}
4790 				behaviors[j] = exc_behavior;
4791 				flavors[j] = exc_flavor;
4792 				++count;
4793 			}
4794 		}
4795 	}
4796 
4797 done:
4798 	thread_mtx_unlock(thread);
4799 
4800 	*CountCnt = count;
4801 
4802 	return KERN_SUCCESS;
4803 }
4804 
4805 kern_return_t
thread_get_exception_ports(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4806 thread_get_exception_ports(
4807 	thread_t                        thread,
4808 	exception_mask_t                exception_mask,
4809 	exception_mask_array_t          masks,
4810 	mach_msg_type_number_t          *CountCnt,
4811 	exception_port_array_t          ports,
4812 	exception_behavior_array_t      behaviors,
4813 	thread_state_flavor_array_t     flavors)
4814 {
4815 	return thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4816 	           NULL, ports, behaviors, flavors);
4817 }
4818 
4819 kern_return_t
thread_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4820 thread_get_exception_ports_info(
4821 	mach_port_t                     port,
4822 	exception_mask_t                exception_mask,
4823 	exception_mask_array_t          masks,
4824 	mach_msg_type_number_t          *CountCnt,
4825 	exception_port_info_array_t     ports_info,
4826 	exception_behavior_array_t      behaviors,
4827 	thread_state_flavor_array_t     flavors)
4828 {
4829 	kern_return_t kr;
4830 
4831 	thread_t thread = convert_port_to_thread_read_no_eval(port);
4832 
4833 	if (thread == THREAD_NULL) {
4834 		return KERN_INVALID_ARGUMENT;
4835 	}
4836 
4837 	kr = thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4838 	    ports_info, NULL, behaviors, flavors);
4839 
4840 	thread_deallocate(thread);
4841 	return kr;
4842 }
4843 
4844 kern_return_t
thread_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4845 thread_get_exception_ports_from_user(
4846 	mach_port_t                     port,
4847 	exception_mask_t                exception_mask,
4848 	exception_mask_array_t          masks,
4849 	mach_msg_type_number_t         *CountCnt,
4850 	exception_port_array_t          ports,
4851 	exception_behavior_array_t      behaviors,
4852 	thread_state_flavor_array_t     flavors)
4853 {
4854 	kern_return_t kr;
4855 
4856 	thread_t thread = convert_port_to_thread(port);
4857 
4858 	if (thread == THREAD_NULL) {
4859 		return KERN_INVALID_ARGUMENT;
4860 	}
4861 
4862 	kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4863 
4864 	thread_deallocate(thread);
4865 	return kr;
4866 }
4867 
4868 static kern_return_t
task_get_exception_ports_internal(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4869 task_get_exception_ports_internal(
4870 	task_t                          task,
4871 	exception_mask_t                exception_mask,
4872 	exception_mask_array_t          masks,
4873 	mach_msg_type_number_t          *CountCnt,
4874 	exception_port_info_array_t     ports_info,
4875 	exception_port_array_t          ports,
4876 	exception_behavior_array_t      behaviors,
4877 	thread_state_flavor_array_t     flavors)
4878 {
4879 	unsigned int count;
4880 	boolean_t info_only = (ports_info != NULL);
4881 	ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4882 
4883 	if (task == TASK_NULL) {
4884 		return KERN_INVALID_ARGUMENT;
4885 	}
4886 
4887 	if (exception_mask & ~EXC_MASK_VALID) {
4888 		return KERN_INVALID_ARGUMENT;
4889 	}
4890 
4891 	if (!info_only && !ports) {
4892 		return KERN_INVALID_ARGUMENT;
4893 	}
4894 
4895 	itk_lock(task);
4896 
4897 	if (!task->ipc_active) {
4898 		itk_unlock(task);
4899 		return KERN_FAILURE;
4900 	}
4901 
4902 	count = 0;
4903 
4904 	for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4905 		if (exception_mask & (1 << i)) {
4906 			ipc_port_t exc_port = task->exc_actions[i].port;
4907 			exception_behavior_t exc_behavior = task->exc_actions[i].behavior;
4908 			thread_state_flavor_t exc_flavor = task->exc_actions[i].flavor;
4909 
4910 			for (j = 0; j < count; ++j) {
4911 				/*
4912 				 * search for an identical entry, if found
4913 				 * set corresponding mask for this exception.
4914 				 */
4915 				if (exc_port == port_ptrs[j] &&
4916 				    exc_behavior == behaviors[j] &&
4917 				    exc_flavor == flavors[j]) {
4918 					masks[j] |= (1 << i);
4919 					break;
4920 				}
4921 			}
4922 
4923 			if (j == count && count < *CountCnt) {
4924 				masks[j] = (1 << i);
4925 				port_ptrs[j] = exc_port;
4926 
4927 				if (info_only) {
4928 					if (!IP_VALID(exc_port)) {
4929 						ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4930 					} else {
4931 						uintptr_t receiver;
4932 						(void)ipc_port_get_receiver_task(exc_port, &receiver);
4933 						ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4934 						ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4935 					}
4936 				} else {
4937 					ports[j] = exception_port_copy_send(exc_port);
4938 				}
4939 				behaviors[j] = exc_behavior;
4940 				flavors[j] = exc_flavor;
4941 				++count;
4942 			}
4943 		}
4944 	}
4945 
4946 	itk_unlock(task);
4947 
4948 	*CountCnt = count;
4949 
4950 	return KERN_SUCCESS;
4951 }
4952 
4953 kern_return_t
task_get_exception_ports(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4954 task_get_exception_ports(
4955 	task_t                          task,
4956 	exception_mask_t                exception_mask,
4957 	exception_mask_array_t          masks,
4958 	mach_msg_type_number_t          *CountCnt,
4959 	exception_port_array_t          ports,
4960 	exception_behavior_array_t      behaviors,
4961 	thread_state_flavor_array_t     flavors)
4962 {
4963 	return task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4964 	           NULL, ports, behaviors, flavors);
4965 }
4966 
4967 kern_return_t
task_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4968 task_get_exception_ports_info(
4969 	mach_port_t                     port,
4970 	exception_mask_t                exception_mask,
4971 	exception_mask_array_t          masks,
4972 	mach_msg_type_number_t          *CountCnt,
4973 	exception_port_info_array_t     ports_info,
4974 	exception_behavior_array_t      behaviors,
4975 	thread_state_flavor_array_t     flavors)
4976 {
4977 	kern_return_t kr;
4978 
4979 	task_t task = convert_port_to_task_read_no_eval(port);
4980 
4981 	if (task == TASK_NULL) {
4982 		return KERN_INVALID_ARGUMENT;
4983 	}
4984 
4985 	kr = task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4986 	    ports_info, NULL, behaviors, flavors);
4987 
4988 	task_deallocate(task);
4989 	return kr;
4990 }
4991 
4992 kern_return_t
task_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4993 task_get_exception_ports_from_user(
4994 	mach_port_t                     port,
4995 	exception_mask_t                exception_mask,
4996 	exception_mask_array_t          masks,
4997 	mach_msg_type_number_t         *CountCnt,
4998 	exception_port_array_t          ports,
4999 	exception_behavior_array_t      behaviors,
5000 	thread_state_flavor_array_t     flavors)
5001 {
5002 	kern_return_t kr;
5003 
5004 	task_t task = convert_port_to_task(port);
5005 
5006 	if (task == TASK_NULL) {
5007 		return KERN_INVALID_ARGUMENT;
5008 	}
5009 
5010 	kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors);
5011 
5012 	task_deallocate(task);
5013 	return kr;
5014 }
5015 
5016 /*
5017  *	Routine:	ipc_thread_port_unpin
5018  *	Purpose:
5019  *
5020  *		Called on the thread when it's terminating so that the last ref
5021  *		can be deallocated without a guard exception.
5022  *	Conditions:
5023  *		Thread mutex lock is held.
5024  */
5025 void
ipc_thread_port_unpin(ipc_port_t port)5026 ipc_thread_port_unpin(
5027 	ipc_port_t port)
5028 {
5029 	if (port == IP_NULL) {
5030 		return;
5031 	}
5032 	ip_mq_lock(port);
5033 	port->ip_pinned = 0;
5034 	ip_mq_unlock(port);
5035 }
5036