xref: /xnu-12377.1.9/osfmk/kern/ipc_tt.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  */
62 /*
63  */
64 
65 /*
66  * File:	ipc_tt.c
67  * Purpose:
68  *	Task and thread related IPC functions.
69  */
70 
71 #include <IOKit/IOBSD.h> // IOTaskHasEntitlement
72 
73 #include <ipc/ipc_policy.h>
74 #include <mach/mach_types.h>
75 #include <mach/boolean.h>
76 #include <mach/kern_return.h>
77 #include <mach/mach_param.h>
78 #include <mach/task_special_ports.h>
79 #include <mach/thread_special_ports.h>
80 #include <mach/thread_status.h>
81 #include <mach/exception_types.h>
82 #include <mach/memory_object_types.h>
83 #include <mach/mach_traps.h>
84 #include <mach/task_server.h>
85 #include <mach/thread_act_server.h>
86 #include <mach/mach_host_server.h>
87 #include <mach/host_priv_server.h>
88 #include <mach/vm_map_server.h>
89 
90 #include <kern/exc_guard.h>
91 #include <kern/kern_types.h>
92 #include <kern/host.h>
93 #include <kern/ipc_kobject.h>
94 #include <kern/ipc_tt.h>
95 #include <kern/kalloc.h>
96 #include <kern/thread.h>
97 #include <kern/ux_handler.h>
98 #include <kern/misc_protos.h>
99 #include <kdp/kdp_dyld.h>
100 
101 #include <sys/proc_ro.h>
102 
103 #include <vm/vm_map_xnu.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_protos.h>
106 #include <mach/vm_types.h>
107 #include <libkern/coreanalytics/coreanalytics.h>
108 
109 #include <security/mac_mach_internal.h>
110 
111 #if CONFIG_CSR
112 #include <sys/csr.h>
113 #endif
114 
115 #include <sys/code_signing.h> /* for developer mode state */
116 
117 #if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL
118 extern int cs_relax_platform_task_ports;
119 #endif
120 
121 extern boolean_t IOCurrentTaskHasEntitlement(const char *);
122 extern boolean_t proc_is_simulated(const proc_t);
123 extern struct proc* current_proc(void);
124 
125 /* bootarg to create lightweight corpse for thread identity lockdown */
126 TUNABLE(bool, thid_should_crash, "thid_should_crash", true);
127 
128 /* Allows the process to call `[thread,task]_set_exception_ports */
129 #define SET_EXCEPTION_ENTITLEMENT "com.apple.private.set-exception-port"
130 
131 /*
132  * Entitlement to disallow setting the exception port of task/thread unless you
133  * are being debugged or are setting up the hardened task exception handler
134  */
135 #define IPC_ONLY_ONE_EXCEPTION_PORT "com.apple.security.only-one-exception-port"
136 
137 CA_EVENT(set_exception,
138     CA_STATIC_STRING(CA_PROCNAME_LEN), current_proc,
139     CA_STATIC_STRING(CA_PROCNAME_LEN), thread_proc,
140     CA_INT, mask,
141     CA_STATIC_STRING(6), level);
142 
143 __options_decl(ipc_reply_port_type_t, uint32_t, {
144 	IRPT_NONE        = 0x00,
145 	IRPT_USER        = 0x01,
146 	IRPT_KERNEL      = 0x02,
147 });
148 
149 /* forward declarations */
150 static kern_return_t special_port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor);
151 static kern_return_t special_port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor);
152 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port, ipc_reply_port_type_t reply_type);
153 static void ipc_port_unbind_special_reply_port(thread_t thread, ipc_reply_port_type_t reply_type);
154 extern kern_return_t task_conversion_eval(task_t caller, task_t victim, int flavor);
155 static thread_inspect_t convert_port_to_thread_inspect_no_eval(ipc_port_t port);
156 static ipc_port_t convert_thread_to_port_with_flavor(thread_t, thread_ro_t, mach_thread_flavor_t flavor);
157 ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor, task_grp_t grp);
158 kern_return_t task_set_special_port(task_t task, int which, ipc_port_t port);
159 kern_return_t task_get_special_port(task_t task, int which, ipc_port_t *portp);
160 
161 /*
162  *	Routine:	ipc_task_init
163  *	Purpose:
164  *		Initialize a task's IPC state.
165  *
166  *		If non-null, some state will be inherited from the parent.
167  *		The parent must be appropriately initialized.
168  *	Conditions:
169  *		Nothing locked.
170  */
171 
172 void
ipc_task_init(task_t task,task_t parent)173 ipc_task_init(
174 	task_t          task,
175 	task_t          parent)
176 {
177 	ipc_space_t space;
178 	ipc_port_t kport;
179 	ipc_port_t nport;
180 	kern_return_t kr;
181 	struct label *temp_label;
182 	int i;
183 
184 
185 	kr = ipc_space_create(IPC_LABEL_NONE, &space);
186 	if (kr != KERN_SUCCESS) {
187 		panic("ipc_task_init");
188 	}
189 
190 	space->is_task = task;
191 
192 	kport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL,
193 	    IPC_KOBJECT_ALLOC_NONE);
194 
195 	nport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_NAME,
196 	    IPC_KOBJECT_ALLOC_NONE);
197 
198 	itk_lock_init(task);
199 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = kport;
200 	task->itk_task_ports[TASK_FLAVOR_NAME] = nport;
201 
202 	/* Lazily allocated on-demand */
203 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
204 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
205 	task->itk_dyld_notify = NULL;
206 #if CONFIG_PROC_RESOURCE_LIMITS
207 	task->itk_resource_notify = NULL;
208 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
209 
210 	task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
211 #if CONFIG_CSR
212 	if (task_is_a_corpse_fork(task)) {
213 		/*
214 		 * No sender's notification for corpse would not
215 		 * work with a naked send right in kernel.
216 		 */
217 		task->itk_settable_self = IP_NULL;
218 	} else {
219 		/* we just made the port, no need to triple check */
220 		task->itk_settable_self = ipc_port_make_send_any(kport);
221 	}
222 #endif /* CONFIG_CSR */
223 	task->itk_debug_control = IP_NULL;
224 	task->itk_space = space;
225 
226 #if CONFIG_MACF
227 	task->exc_actions[0].label = NULL;
228 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
229 		mac_exc_associate_action_label(&task->exc_actions[i],
230 		    mac_exc_create_label(&task->exc_actions[i]));
231 	}
232 #endif
233 
234 	/* always zero-out the first (unused) array element */
235 	bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
236 	/* We don't need to inherit this */
237 	bzero(&task->hardened_exception_action, sizeof(task->hardened_exception_action));
238 
239 	if (parent == TASK_NULL) {
240 		ipc_port_t port = IP_NULL;
241 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
242 			task->exc_actions[i].port = IP_NULL;
243 			task->exc_actions[i].flavor = 0;
244 			task->exc_actions[i].behavior = 0;
245 			task->exc_actions[i].privileged = FALSE;
246 		}/* for */
247 
248 		kr = host_get_host_port(host_priv_self(), &port);
249 		assert(kr == KERN_SUCCESS);
250 		task->itk_host = port;
251 
252 		task->itk_bootstrap = IP_NULL;
253 		task->itk_task_access = IP_NULL;
254 
255 		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
256 			task->itk_registered[i] = IP_NULL;
257 		}
258 	} else {
259 		itk_lock(parent);
260 		assert(parent->itk_task_ports[TASK_FLAVOR_CONTROL] != IP_NULL);
261 
262 		/* inherit registered ports */
263 
264 		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
265 			task->itk_registered[i] =
266 			    ipc_port_copy_send_any(parent->itk_registered[i]);
267 		}
268 
269 		/* inherit exception and bootstrap ports */
270 
271 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
272 			temp_label = task->exc_actions[i].label;
273 			task->exc_actions[i] = parent->exc_actions[i];
274 			task->exc_actions[i].port =
275 			    exception_port_copy_send(parent->exc_actions[i].port);
276 			task->exc_actions[i].label = temp_label;
277 #if CONFIG_MACF
278 			mac_exc_inherit_action_label(parent->exc_actions + i,
279 			    task->exc_actions + i);
280 #endif
281 		}
282 
283 		task->itk_host = host_port_copy_send(parent->itk_host);
284 
285 		task->itk_bootstrap =
286 		    ipc_port_copy_send_mqueue(parent->itk_bootstrap);
287 
288 		task->itk_task_access =
289 		    ipc_port_copy_send_mqueue(parent->itk_task_access);
290 
291 		itk_unlock(parent);
292 	}
293 }
294 
295 /*
296  *	Routine:	ipc_task_copyout_control_port
297  *	Purpose:
298  *		Copyout the task control port as pinned
299  *      and stash the send right name in the port
300  */
301 void
ipc_task_copyout_control_port(task_t task)302 ipc_task_copyout_control_port(
303 	task_t            task)
304 {
305 	ipc_port_t kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
306 	mach_port_name_t name;
307 	ipc_port_t pport;
308 
309 #if CONFIG_CSR
310 	assert(kport == task->itk_settable_self);
311 #endif /* CONFIG_CSR */
312 	assert(!task_is_a_corpse(task));
313 
314 	pport = ipc_port_make_send_any(kport);
315 	/*
316 	 * mach_task_self() is pinned, memorize the name we gave it
317 	 * in ip_receiver_name (it's an abuse as this port really
318 	 * isn't a message queue, but the field is up for grabs
319 	 * and otherwise `MACH_PORT_SPECIAL_DEFAULT` for special ports).
320 	 *
321 	 * port_name_to_task* use this to fastpath IPCs.
322 	 *
323 	 * ipc_task_disable() will revert this when the task dies.
324 	 */
325 	name = ipc_port_copyout_send_pinned(pport, task->itk_space);
326 	if (MACH_PORT_VALID(name)) {
327 		pport->ip_receiver_name = name;
328 	}
329 }
330 
331 /*
332  *	Routine:	ipc_thread_set_immovable_pinned
333  *	Purpose:
334  *		Copyout the task control port as pinned and immovable
335  *      and stash the send right name in the port
336  *  Notes:
337  *		Consumes a thread ref; produces a naked send right
338  *		which may be invalid.
339  */
340 void
ipc_thread_set_immovable_pinned(thread_t thread)341 ipc_thread_set_immovable_pinned(
342 	thread_t            thread)
343 {
344 	ipc_port_t kport = convert_thread_to_port_immovable(thread);
345 
346 	task_t task = get_threadtask(thread);
347 	mach_port_name_t name;
348 
349 #if CONFIG_CSR
350 	assert(kport == thread->t_tro->tro_settable_self_port);
351 #endif /* CONFIG_CSR */
352 	assert(!task_is_a_corpse(task));
353 
354 	name = ipc_port_copyout_send_pinned(kport, task->itk_space);
355 }
356 
357 /*
358  *	Routine:	ipc_task_enable
359  *	Purpose:
360  *		Enable a task for IPC access.
361  *	Conditions:
362  *		Nothing locked.
363  */
364 void
ipc_task_enable(task_t task)365 ipc_task_enable(
366 	task_t          task)
367 {
368 	ipc_port_t kport;
369 	ipc_port_t nport;
370 	ipc_port_t iport;
371 	ipc_port_t rdport;
372 
373 	ipc_space_set_policy(task->itk_space, ipc_policy_for_task(task));
374 
375 	itk_lock(task);
376 	if (!task->active) {
377 		/*
378 		 * task has been terminated before we can enable IPC access.
379 		 * The check is to make sure we don't accidentally re-enable
380 		 * the task ports _after_ they've been disabled during
381 		 * task_terminate_internal(), in which case we will hit the
382 		 * !task->ipc_active assertion in ipc_task_terminate().
383 		 *
384 		 * Technically we should grab task lock when checking task
385 		 * active bit, but since task termination unsets task->active
386 		 * _before_ calling ipc_task_disable(), we can always see the
387 		 * truth with just itk_lock() and bail if disable has been called.
388 		 */
389 		itk_unlock(task);
390 		return;
391 	}
392 
393 	assert(task_is_a_corpse(task) || task->map->owning_task == task); /* verify vm_map_setup called */
394 	assert(!task->ipc_active || task_is_a_corpse(task));
395 	task->ipc_active = true;
396 
397 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
398 	if (kport != IP_NULL) {
399 		ipc_kobject_enable(kport, task, IKOT_TASK_CONTROL);
400 	}
401 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
402 	if (nport != IP_NULL) {
403 		ipc_kobject_enable(nport, task, IKOT_TASK_NAME);
404 	}
405 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
406 	if (iport != IP_NULL) {
407 		ipc_kobject_enable(iport, task, IKOT_TASK_INSPECT);
408 	}
409 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
410 	if (rdport != IP_NULL) {
411 		ipc_kobject_enable(rdport, task, IKOT_TASK_READ);
412 	}
413 
414 	itk_unlock(task);
415 }
416 
417 /*
418  *	Routine:	ipc_task_disable
419  *	Purpose:
420  *		Disable IPC access to a task.
421  *	Conditions:
422  *		Nothing locked.
423  */
424 
425 void
ipc_task_disable(task_t task)426 ipc_task_disable(
427 	task_t          task)
428 {
429 	ipc_port_t kport;
430 	ipc_port_t nport;
431 	ipc_port_t iport;
432 	ipc_port_t rdport;
433 	ipc_port_t rport;
434 
435 	itk_lock(task);
436 
437 	/*
438 	 * This innocuous looking line is load bearing.
439 	 *
440 	 * It is used to disable the creation of lazy made ports.
441 	 * We must do so before we drop the last reference on the task,
442 	 * as task ports do not own a reference on the task, and
443 	 * convert_port_to_task* will crash trying to resurect a task.
444 	 */
445 	task->ipc_active = false;
446 
447 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
448 	if (kport != IP_NULL) {
449 		ipc_kobject_disable(kport, IKOT_TASK_CONTROL);
450 	}
451 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
452 	if (nport != IP_NULL) {
453 		ipc_kobject_disable(nport, IKOT_TASK_NAME);
454 	}
455 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
456 	if (iport != IP_NULL) {
457 		ipc_kobject_disable(iport, IKOT_TASK_INSPECT);
458 	}
459 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
460 	if (rdport != IP_NULL) {
461 		/* clears ikol_alt_port */
462 		ipc_kobject_disable(rdport, IKOT_TASK_READ);
463 	}
464 
465 	rport = task->itk_resume;
466 	if (rport != IP_NULL) {
467 		/*
468 		 * From this point onwards this task is no longer accepting
469 		 * resumptions.
470 		 *
471 		 * There are still outstanding suspensions on this task,
472 		 * even as it is being torn down. Disconnect the task
473 		 * from the rport, thereby "orphaning" the rport. The rport
474 		 * itself will go away only when the last suspension holder
475 		 * destroys his SO right to it -- when he either
476 		 * exits, or tries to actually use that last SO right to
477 		 * resume this (now non-existent) task.
478 		 */
479 		ipc_kobject_disable(rport, IKOT_TASK_RESUME);
480 	}
481 	itk_unlock(task);
482 }
483 
484 /*
485  *	Routine:	ipc_task_terminate
486  *	Purpose:
487  *		Clean up and destroy a task's IPC state.
488  *	Conditions:
489  *		Nothing locked.  The task must be suspended.
490  *		(Or the current thread must be in the task.)
491  */
492 
493 void
ipc_task_terminate(task_t task)494 ipc_task_terminate(
495 	task_t          task)
496 {
497 	ipc_port_t kport;
498 	ipc_port_t nport;
499 	ipc_port_t iport;
500 	ipc_port_t rdport;
501 	ipc_port_t rport;
502 #if CONFIG_CSR
503 	ipc_port_t sself;
504 #endif /* CONFIG_CSR */
505 	ipc_port_t *notifiers_ptr = NULL;
506 
507 	itk_lock(task);
508 
509 	/*
510 	 * If we ever failed to clear ipc_active before the last reference
511 	 * was dropped, lazy ports might be made and used after the last
512 	 * reference is dropped and cause use after free (see comment in
513 	 * ipc_task_disable()).
514 	 */
515 	assert(!task->ipc_active);
516 
517 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
518 #if CONFIG_CSR
519 	sself = task->itk_settable_self;
520 #endif /* CONFIG_CSR */
521 
522 	if (kport == IP_NULL) {
523 		/* the task is already terminated (can this happen?) */
524 		itk_unlock(task);
525 		return;
526 	}
527 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = IP_NULL;
528 
529 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
530 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
531 
532 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
533 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
534 
535 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
536 	assert(nport != IP_NULL);
537 	task->itk_task_ports[TASK_FLAVOR_NAME] = IP_NULL;
538 
539 	if (task->itk_dyld_notify) {
540 		notifiers_ptr = task->itk_dyld_notify;
541 		task->itk_dyld_notify = NULL;
542 	}
543 
544 	rport = task->itk_resume;
545 	task->itk_resume = IP_NULL;
546 
547 	itk_unlock(task);
548 
549 	/* release the naked send rights */
550 #if CONFIG_CSR
551 	if (IP_VALID(sself)) {
552 		ipc_port_release_send(sself);
553 	}
554 #endif /* CONFIG_CSR */
555 
556 	if (notifiers_ptr) {
557 		for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
558 			if (IP_VALID(notifiers_ptr[i])) {
559 				ipc_port_release_send(notifiers_ptr[i]);
560 			}
561 		}
562 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
563 	}
564 
565 	if (IP_VALID(task->hardened_exception_action.ea.port)) {
566 		ipc_port_release_send(task->hardened_exception_action.ea.port);
567 	}
568 
569 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
570 		if (IP_VALID(task->exc_actions[i].port)) {
571 			ipc_port_release_send(task->exc_actions[i].port);
572 		}
573 #if CONFIG_MACF
574 		mac_exc_free_action_label(task->exc_actions + i);
575 #endif
576 	}
577 
578 	if (IP_VALID(task->itk_host)) {
579 		ipc_port_release_send(task->itk_host);
580 	}
581 
582 	if (IP_VALID(task->itk_bootstrap)) {
583 		ipc_port_release_send(task->itk_bootstrap);
584 	}
585 
586 	if (IP_VALID(task->itk_task_access)) {
587 		ipc_port_release_send(task->itk_task_access);
588 	}
589 
590 	if (IP_VALID(task->itk_debug_control)) {
591 		ipc_port_release_send(task->itk_debug_control);
592 	}
593 
594 #if CONFIG_PROC_RESOURCE_LIMITS
595 	if (IP_VALID(task->itk_resource_notify)) {
596 		ipc_port_release_send(task->itk_resource_notify);
597 	}
598 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
599 
600 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
601 		if (IP_VALID(task->itk_registered[i])) {
602 			ipc_port_release_send(task->itk_registered[i]);
603 		}
604 	}
605 
606 	/* clears read port ikol_alt_port, must be done first */
607 	if (rdport != IP_NULL) {
608 		ipc_kobject_dealloc_port(rdport, IPC_KOBJECT_NO_MSCOUNT,
609 		    IKOT_TASK_READ);
610 	}
611 	ipc_kobject_dealloc_port(kport, IPC_KOBJECT_NO_MSCOUNT,
612 	    IKOT_TASK_CONTROL);
613 
614 	/* destroy other kernel ports */
615 	ipc_kobject_dealloc_port(nport, IPC_KOBJECT_NO_MSCOUNT,
616 	    IKOT_TASK_NAME);
617 	if (iport != IP_NULL) {
618 		ipc_kobject_dealloc_port(iport, IPC_KOBJECT_NO_MSCOUNT,
619 		    IKOT_TASK_INSPECT);
620 	}
621 	if (rport != IP_NULL) {
622 		ipc_kobject_dealloc_port(rport, IPC_KOBJECT_NO_MSCOUNT,
623 		    IKOT_TASK_RESUME);
624 	}
625 
626 	itk_lock_destroy(task);
627 }
628 
629 /*
630  *	Routine:	ipc_task_reset
631  *	Purpose:
632  *		Reset a task's IPC state to protect it when
633  *		it enters an elevated security context. The
634  *		task name port can remain the same - since it
635  *              represents no specific privilege.
636  *	Conditions:
637  *		Nothing locked.  The task must be suspended.
638  *		(Or the current thread must be in the task.)
639  */
640 
641 void
ipc_task_reset(task_t task)642 ipc_task_reset(
643 	task_t          task)
644 {
645 	ipc_port_t old_kport, new_kport;
646 #if CONFIG_CSR
647 	ipc_port_t old_sself;
648 #endif /* CONFIG_CSR */
649 	ipc_port_t old_rdport;
650 	ipc_port_t old_iport;
651 	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
652 	ipc_port_t old_hardened_exception;
653 	ipc_port_t *notifiers_ptr = NULL;
654 
655 #if CONFIG_MACF
656 	/* Fresh label to unset credentials in existing labels. */
657 	struct label *unset_label = mac_exc_create_label(NULL);
658 #endif
659 
660 	new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task,
661 	    IKOT_TASK_CONTROL, IPC_KOBJECT_ALLOC_NONE);
662 	/*
663 	 * ipc_task_reset() only happens during sugid or corpsify.
664 	 *
665 	 * (1) sugid happens early in exec_mach_imgact(),
666 	 *     at which point the old task port has not been enabled,
667 	 *     and is left movable.
668 	 * (2) corpse cannot execute more code so the notion of the immovable
669 	 *     task port is bogus, and should appear as if it doesn't have one.
670 	 *
671 	 */
672 	itk_lock(task);
673 
674 	old_kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
675 	old_rdport = task->itk_task_ports[TASK_FLAVOR_READ];
676 	old_iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
677 
678 	if (old_kport == IP_NULL) {
679 		/* the task is already terminated (can this happen?) */
680 		itk_unlock(task);
681 		ipc_kobject_dealloc_port(new_kport, IPC_KOBJECT_NO_MSCOUNT, IKOT_TASK_CONTROL);
682 #if CONFIG_MACF
683 		mac_exc_free_label(unset_label);
684 #endif
685 		return;
686 	}
687 
688 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = new_kport;
689 
690 #if CONFIG_CSR
691 	old_sself = task->itk_settable_self;
692 	if (task_is_a_corpse(task)) {
693 		/* No extra send right for coprse, needed to arm no-sender notification */
694 		task->itk_settable_self = IP_NULL;
695 	} else {
696 		/* we just made the port, no need to triple check */
697 		task->itk_settable_self = ipc_port_make_send_any(new_kport);
698 	}
699 #endif /* CONFIG_CSR */
700 
701 	ipc_kobject_disable(old_kport, IKOT_TASK_CONTROL);
702 
703 	/* Reset the read and inspect flavors of task port */
704 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
705 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
706 
707 	if (IP_VALID(task->hardened_exception_action.ea.port)
708 	    && !task->hardened_exception_action.ea.privileged) {
709 		old_hardened_exception = task->hardened_exception_action.ea.port;
710 		task->hardened_exception_action.ea.port = IP_NULL;
711 	}
712 
713 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
714 		old_exc_actions[i] = IP_NULL;
715 
716 		if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
717 			continue;
718 		}
719 
720 		if (!task->exc_actions[i].privileged) {
721 #if CONFIG_MACF
722 			mac_exc_update_action_label(task->exc_actions + i, unset_label);
723 #endif
724 			old_exc_actions[i] = task->exc_actions[i].port;
725 			task->exc_actions[i].port = IP_NULL;
726 		}
727 	}/* for */
728 
729 	if (IP_VALID(task->itk_debug_control)) {
730 		ipc_port_release_send(task->itk_debug_control);
731 	}
732 	task->itk_debug_control = IP_NULL;
733 
734 	if (task->itk_dyld_notify) {
735 		notifiers_ptr = task->itk_dyld_notify;
736 		task->itk_dyld_notify = NULL;
737 	}
738 
739 	itk_unlock(task);
740 
741 #if CONFIG_MACF
742 	mac_exc_free_label(unset_label);
743 #endif
744 
745 	/* release the naked send rights */
746 #if CONFIG_CSR
747 	if (IP_VALID(old_sself)) {
748 		ipc_port_release_send(old_sself);
749 	}
750 #endif /* CONFIG_CSR */
751 
752 	if (notifiers_ptr) {
753 		for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
754 			if (IP_VALID(notifiers_ptr[i])) {
755 				ipc_port_release_send(notifiers_ptr[i]);
756 			}
757 		}
758 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
759 	}
760 
761 	ipc_port_release_send(old_hardened_exception);
762 
763 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
764 		if (IP_VALID(old_exc_actions[i])) {
765 			ipc_port_release_send(old_exc_actions[i]);
766 		}
767 	}
768 
769 	/* destroy all task port flavors */
770 	if (old_rdport != IP_NULL) {
771 		/* read port ikol_alt_port may point to kport, dealloc first */
772 		ipc_kobject_dealloc_port(old_rdport, IPC_KOBJECT_NO_MSCOUNT,
773 		    IKOT_TASK_READ);
774 	}
775 	ipc_kobject_dealloc_port(old_kport, IPC_KOBJECT_NO_MSCOUNT,
776 	    IKOT_TASK_CONTROL);
777 
778 	if (old_iport != IP_NULL) {
779 		ipc_kobject_dealloc_port(old_iport, IPC_KOBJECT_NO_MSCOUNT,
780 		    IKOT_TASK_INSPECT);
781 	}
782 }
783 
784 /*
785  *	Routine:	ipc_thread_init
786  *	Purpose:
787  *		Initialize a thread's IPC state.
788  *	Conditions:
789  *		Nothing locked.
790  */
791 
792 void
ipc_thread_init(__unused task_t task,thread_t thread,thread_ro_t tro)793 ipc_thread_init(
794 	__unused task_t task,
795 	thread_t        thread,
796 	thread_ro_t     tro)
797 {
798 	ipc_port_t         kport;
799 
800 	/*
801 	 * pthreads are subsequently pinned via
802 	 * ipc_port_copyout_send_pinned() whereas raw threads are left
803 	 * unpinned.
804 	 */
805 	kport = ipc_kobject_alloc_port(thread, IKOT_THREAD_CONTROL,
806 	    IPC_KOBJECT_ALLOC_NONE);
807 
808 	/* we just made the port, no need to triple check */
809 #if CONFIG_CSR
810 	tro->tro_settable_self_port = ipc_port_make_send_any(kport);
811 #endif /* CONFIG_CSR */
812 	tro->tro_ports[THREAD_FLAVOR_CONTROL] = kport;
813 
814 	thread->ith_special_reply_port = NULL;
815 
816 #if IMPORTANCE_INHERITANCE
817 	thread->ith_assertions = 0;
818 #endif
819 
820 	thread->ipc_active = true;
821 	ipc_kmsg_queue_init(&thread->ith_messages);
822 
823 	thread->ith_kernel_reply_port = IP_NULL;
824 }
825 
826 struct thread_init_exc_actions {
827 	struct exception_action array[EXC_TYPES_COUNT];
828 };
829 
830 static void
ipc_thread_init_exc_actions(thread_ro_t tro)831 ipc_thread_init_exc_actions(thread_ro_t tro)
832 {
833 	struct exception_action *actions;
834 
835 	actions = kalloc_type(struct thread_init_exc_actions,
836 	    Z_WAITOK | Z_ZERO | Z_NOFAIL)->array;
837 
838 #if CONFIG_MACF
839 	for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
840 		mac_exc_associate_action_label(&actions[i],
841 		    mac_exc_create_label(&actions[i]));
842 	}
843 #endif
844 
845 	zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions, &actions);
846 }
847 
848 static void
ipc_thread_destroy_exc_actions(thread_ro_t tro)849 ipc_thread_destroy_exc_actions(thread_ro_t tro)
850 {
851 	struct exception_action *actions = tro->tro_exc_actions;
852 
853 	if (actions) {
854 #if CONFIG_MACF
855 		for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
856 			mac_exc_free_action_label(actions + i);
857 		}
858 #endif
859 
860 		zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions);
861 		struct thread_init_exc_actions *tr_actions =
862 		    (struct thread_init_exc_actions *)actions;
863 		kfree_type(struct thread_init_exc_actions, tr_actions);
864 	}
865 }
866 
867 static void
ipc_thread_ro_update_ports(thread_ro_t tro,const struct thread_ro * tro_tpl)868 ipc_thread_ro_update_ports(
869 	thread_ro_t             tro,
870 	const struct thread_ro *tro_tpl)
871 {
872 	vm_size_t offs = offsetof(struct thread_ro, tro_ports);
873 	vm_size_t size = sizeof(tro_tpl->tro_ports)
874 #if CONFIG_CSR
875 	    + sizeof(struct ipc_port *);
876 #else
877 	;
878 #endif /* CONFIG_CSR */
879 
880 #if CONFIG_CSR
881 	static_assert(offsetof(struct thread_ro, tro_settable_self_port) ==
882 	    offsetof(struct thread_ro, tro_ports) +
883 	    sizeof(tro_tpl->tro_ports));
884 #endif /* CONFIG_CSR */
885 
886 	zalloc_ro_mut(ZONE_ID_THREAD_RO, tro,
887 	    offs, &tro_tpl->tro_ports, size);
888 }
889 
890 /*
891  *	Routine:	ipc_thread_disable
892  *	Purpose:
893  *		Clean up and destroy a thread's IPC state.
894  *	Conditions:
895  *		Thread locked.
896  */
897 void
ipc_thread_disable(thread_t thread)898 ipc_thread_disable(
899 	thread_t        thread)
900 {
901 	thread_ro_t     tro = get_thread_ro(thread);
902 	ipc_port_t      kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
903 	ipc_port_t      iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
904 	ipc_port_t      rdport = tro->tro_ports[THREAD_FLAVOR_READ];
905 
906 	/*
907 	 * This innocuous looking line is load bearing.
908 	 *
909 	 * It is used to disable the creation of lazy made ports.
910 	 * We must do so before we drop the last reference on the thread,
911 	 * as thread ports do not own a reference on the thread, and
912 	 * convert_port_to_thread* will crash trying to resurect a thread.
913 	 */
914 	thread->ipc_active = false;
915 
916 	if (kport != IP_NULL) {
917 		ipc_kobject_disable(kport, IKOT_THREAD_CONTROL);
918 	}
919 
920 	if (iport != IP_NULL) {
921 		ipc_kobject_disable(iport, IKOT_THREAD_INSPECT);
922 	}
923 
924 	if (rdport != IP_NULL) {
925 		/* clears ikol_alt_port */
926 		ipc_kobject_disable(rdport, IKOT_THREAD_READ);
927 	}
928 
929 	/* unbind the thread special reply port */
930 	if (IP_VALID(thread->ith_special_reply_port)) {
931 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
932 	}
933 }
934 
935 /*
936  *	Routine:	ipc_thread_terminate
937  *	Purpose:
938  *		Clean up and destroy a thread's IPC state.
939  *	Conditions:
940  *		Nothing locked.
941  */
942 
943 void
ipc_thread_terminate(thread_t thread)944 ipc_thread_terminate(
945 	thread_t        thread)
946 {
947 	thread_ro_t tro = get_thread_ro(thread);
948 	ipc_port_t kport = IP_NULL;
949 	ipc_port_t iport = IP_NULL;
950 	ipc_port_t rdport = IP_NULL;
951 #if CONFIG_CSR
952 	ipc_port_t sport = IP_NULL;
953 #endif /* CONFIG_CSR */
954 
955 	thread_mtx_lock(thread);
956 
957 	/*
958 	 * If we ever failed to clear ipc_active before the last reference
959 	 * was dropped, lazy ports might be made and used after the last
960 	 * reference is dropped and cause use after free (see comment in
961 	 * ipc_thread_disable()).
962 	 */
963 	assert(!thread->ipc_active);
964 
965 	kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
966 	iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
967 	rdport = tro->tro_ports[THREAD_FLAVOR_READ];
968 #if CONFIG_CSR
969 	sport = tro->tro_settable_self_port;
970 #endif /* CONFIG_CSR */
971 
972 	if (kport != IP_NULL) {
973 #if CONFIG_CSR
974 		if (IP_VALID(sport)) {
975 			ipc_port_release_send(sport);
976 		}
977 #endif /* CONFIG_CSR */
978 
979 		ipc_thread_ro_update_ports(tro, &(struct thread_ro){ });
980 
981 		if (tro->tro_exc_actions != NULL) {
982 			for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
983 				if (IP_VALID(tro->tro_exc_actions[i].port)) {
984 					ipc_port_release_send(tro->tro_exc_actions[i].port);
985 				}
986 			}
987 			ipc_thread_destroy_exc_actions(tro);
988 		}
989 	}
990 
991 #if IMPORTANCE_INHERITANCE
992 	assert(thread->ith_assertions == 0);
993 #endif
994 
995 	assert(ipc_kmsg_queue_empty(&thread->ith_messages));
996 	thread_mtx_unlock(thread);
997 
998 	/* clears read port ikol_alt_port, must be done first */
999 	if (rdport != IP_NULL) {
1000 		ipc_kobject_dealloc_port(rdport, IPC_KOBJECT_NO_MSCOUNT,
1001 		    IKOT_THREAD_READ);
1002 	}
1003 
1004 	if (kport != IP_NULL) {
1005 		ipc_kobject_dealloc_port(kport, IPC_KOBJECT_NO_MSCOUNT,
1006 		    IKOT_THREAD_CONTROL);
1007 	}
1008 	if (iport != IP_NULL) {
1009 		ipc_kobject_dealloc_port(iport, IPC_KOBJECT_NO_MSCOUNT,
1010 		    IKOT_THREAD_INSPECT);
1011 	}
1012 	if (thread->ith_kernel_reply_port != IP_NULL) {
1013 		thread_dealloc_kernel_special_reply_port(thread);
1014 	}
1015 }
1016 
1017 /*
1018  *	Routine:	ipc_thread_reset
1019  *	Purpose:
1020  *		Reset the IPC state for a given Mach thread when
1021  *		its task enters an elevated security context.
1022  *		All flavors of thread port and its exception ports have
1023  *		to be reset.  Its RPC reply port cannot have any
1024  *		rights outstanding, so it should be fine. The thread
1025  *		inspect and read port are set to NULL.
1026  *	Conditions:
1027  *		Nothing locked.
1028  */
1029 
1030 void
ipc_thread_reset(thread_t thread)1031 ipc_thread_reset(
1032 	thread_t        thread)
1033 {
1034 	thread_ro_t tro = get_thread_ro(thread);
1035 	ipc_port_t old_kport, new_kport;
1036 #if CONFIG_CSR
1037 	ipc_port_t old_sself;
1038 #endif /* CONFIG_CSR */
1039 	ipc_port_t old_rdport;
1040 	ipc_port_t old_iport;
1041 	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
1042 	boolean_t  has_old_exc_actions = FALSE;
1043 	int i;
1044 
1045 #if CONFIG_MACF
1046 	struct label *new_label = mac_exc_create_label(NULL);
1047 #endif
1048 
1049 	new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
1050 	    IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
1051 	/*
1052 	 * ipc_thread_reset() only happens during sugid or corpsify.
1053 	 *
1054 	 * (1) sugid happens early in exec_mach_imgact(), at which point
1055 	 *     the old thread port is still movable.
1056 	 * (2) corpse cannot execute more code so the notion of the immovable
1057 	 *     thread port is bogus, and should appear as if it doesn't have one.
1058 	 */
1059 
1060 	thread_mtx_lock(thread);
1061 
1062 	old_kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1063 	old_rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1064 	old_iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1065 
1066 #if CONFIG_CSR
1067 	old_sself = tro->tro_settable_self_port;
1068 #endif /* CONFIG_CSR */
1069 
1070 	if (old_kport == IP_NULL && thread->inspection == FALSE) {
1071 		/* thread is already terminated (can this happen?) */
1072 		thread_mtx_unlock(thread);
1073 		ipc_kobject_dealloc_port(new_kport, IPC_KOBJECT_NO_MSCOUNT,
1074 		    IKOT_THREAD_CONTROL);
1075 #if CONFIG_MACF
1076 		mac_exc_free_label(new_label);
1077 #endif
1078 		return;
1079 	}
1080 
1081 	thread->ipc_active = true;
1082 
1083 	struct thread_ro tpl = {
1084 		.tro_ports[THREAD_FLAVOR_CONTROL] = new_kport,
1085 		/* we just made the port, no need to triple check */
1086 #if CONFIG_CSR
1087 		.tro_settable_self_port = ipc_port_make_send_any(new_kport),
1088 #endif /* CONFIG_CSR */
1089 	};
1090 
1091 	ipc_thread_ro_update_ports(tro, &tpl);
1092 
1093 	if (old_kport != IP_NULL) {
1094 		(void)ipc_kobject_disable(old_kport, IKOT_THREAD_CONTROL);
1095 	}
1096 	if (old_rdport != IP_NULL) {
1097 		/* clears ikol_alt_port */
1098 		(void)ipc_kobject_disable(old_rdport, IKOT_THREAD_READ);
1099 	}
1100 	if (old_iport != IP_NULL) {
1101 		(void)ipc_kobject_disable(old_iport, IKOT_THREAD_INSPECT);
1102 	}
1103 
1104 	/*
1105 	 * Only ports that were set by root-owned processes
1106 	 * (privileged ports) should survive
1107 	 */
1108 	if (tro->tro_exc_actions != NULL) {
1109 		has_old_exc_actions = TRUE;
1110 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1111 			if (tro->tro_exc_actions[i].privileged) {
1112 				old_exc_actions[i] = IP_NULL;
1113 			} else {
1114 #if CONFIG_MACF
1115 				mac_exc_update_action_label(tro->tro_exc_actions + i, new_label);
1116 #endif
1117 				old_exc_actions[i] = tro->tro_exc_actions[i].port;
1118 				tro->tro_exc_actions[i].port = IP_NULL;
1119 			}
1120 		}
1121 	}
1122 
1123 	thread_mtx_unlock(thread);
1124 
1125 #if CONFIG_MACF
1126 	mac_exc_free_label(new_label);
1127 #endif
1128 
1129 	/* release the naked send rights */
1130 #if CONFIG_CSR
1131 	if (IP_VALID(old_sself)) {
1132 		ipc_port_release_send(old_sself);
1133 	}
1134 #endif /* CONFIG_CSR */
1135 
1136 	if (has_old_exc_actions) {
1137 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1138 			ipc_port_release_send(old_exc_actions[i]);
1139 		}
1140 	}
1141 
1142 	/* destroy the kernel ports */
1143 	if (old_rdport != IP_NULL) {
1144 		ipc_kobject_dealloc_port(old_rdport, IPC_KOBJECT_NO_MSCOUNT,
1145 		    IKOT_THREAD_READ);
1146 		/* ikol_alt_port cleared */
1147 	}
1148 	if (old_kport != IP_NULL) {
1149 		ipc_kobject_dealloc_port(old_kport, IPC_KOBJECT_NO_MSCOUNT,
1150 		    IKOT_THREAD_CONTROL);
1151 	}
1152 
1153 	if (old_iport != IP_NULL) {
1154 		ipc_kobject_dealloc_port(old_iport, IPC_KOBJECT_NO_MSCOUNT,
1155 		    IKOT_THREAD_INSPECT);
1156 	}
1157 
1158 	/* unbind the thread special reply port */
1159 	if (IP_VALID(thread->ith_special_reply_port)) {
1160 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1161 	}
1162 }
1163 
1164 /*
1165  *	Routine:	retrieve_task_self_fast
1166  *	Purpose:
1167  *		Optimized version of retrieve_task_self,
1168  *		that only works for the current task.
1169  *
1170  *		Return a send right (possibly null/dead)
1171  *		for the task's user-visible self port.
1172  *	Conditions:
1173  *		Nothing locked.
1174  */
1175 
1176 static ipc_port_t
retrieve_task_self_fast(task_t task)1177 retrieve_task_self_fast(
1178 	task_t          task)
1179 {
1180 	ipc_port_t port = IP_NULL;
1181 	ipc_port_t kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
1182 
1183 	assert(task == current_task());
1184 
1185 	itk_lock(task);
1186 	assert(kport != IP_NULL);
1187 
1188 #if CONFIG_CSR
1189 	if (task->itk_settable_self != kport) {
1190 		port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1191 	} else
1192 #endif
1193 	{
1194 		port = ipc_kobject_make_send(kport, task, IKOT_TASK_CONTROL);
1195 	}
1196 
1197 	itk_unlock(task);
1198 
1199 	return port;
1200 }
1201 
1202 /*
1203  *	Routine:	mach_task_is_self
1204  *	Purpose:
1205  *      [MIG call] Checks if the task (control/read/inspect/name/movable)
1206  *      port is pointing to current_task.
1207  */
1208 kern_return_t
mach_task_is_self(task_t task,boolean_t * is_self)1209 mach_task_is_self(
1210 	task_t         task,
1211 	boolean_t     *is_self)
1212 {
1213 	if (task == TASK_NULL) {
1214 		return KERN_INVALID_ARGUMENT;
1215 	}
1216 
1217 	*is_self = (task == current_task());
1218 
1219 	return KERN_SUCCESS;
1220 }
1221 
1222 /*
1223  *	Routine:	retrieve_thread_self_fast
1224  *	Purpose:
1225  *		Return a send right (possibly null/dead)
1226  *		for the thread's user-visible self port.
1227  *
1228  *		Only works for the current thread.
1229  *
1230  *	Conditions:
1231  *		Nothing locked.
1232  */
1233 
1234 ipc_port_t
retrieve_thread_self_fast(thread_t thread)1235 retrieve_thread_self_fast(
1236 	thread_t                thread)
1237 {
1238 	thread_ro_t tro = get_thread_ro(thread);
1239 	ipc_port_t port = IP_NULL;
1240 
1241 	assert(thread == current_thread());
1242 
1243 	thread_mtx_lock(thread);
1244 
1245 #if CONFIG_CSR
1246 	if (tro->tro_settable_self_port != tro->tro_ports[THREAD_FLAVOR_CONTROL]) {
1247 		port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1248 	} else
1249 #endif
1250 	{
1251 		port = ipc_kobject_make_send(tro->tro_ports[THREAD_FLAVOR_CONTROL],
1252 		    thread, IKOT_THREAD_CONTROL);
1253 	}
1254 
1255 	thread_mtx_unlock(thread);
1256 
1257 	return port;
1258 }
1259 
1260 /*
1261  *	Routine:	task_self_trap [mach trap]
1262  *	Purpose:
1263  *		Give the caller send rights for their own task port.
1264  *	Conditions:
1265  *		Nothing locked.
1266  *	Returns:
1267  *		MACH_PORT_NULL if there are any resource failures
1268  *		or other errors.
1269  */
1270 
1271 mach_port_name_t
task_self_trap(__unused struct task_self_trap_args * args)1272 task_self_trap(
1273 	__unused struct task_self_trap_args *args)
1274 {
1275 	task_t task = current_task();
1276 	ipc_port_t sright;
1277 
1278 	sright = retrieve_task_self_fast(task);
1279 	return ipc_port_copyout_send(sright, task->itk_space);
1280 }
1281 
1282 /*
1283  *	Routine:	thread_self_trap [mach trap]
1284  *	Purpose:
1285  *		Give the caller send rights for his own thread port.
1286  *	Conditions:
1287  *		Nothing locked.
1288  *	Returns:
1289  *		MACH_PORT_NULL if there are any resource failures
1290  *		or other errors.
1291  */
1292 
1293 mach_port_name_t
thread_self_trap(__unused struct thread_self_trap_args * args)1294 thread_self_trap(
1295 	__unused struct thread_self_trap_args *args)
1296 {
1297 	thread_t thread = current_thread();
1298 	ipc_space_t space = current_space();
1299 	ipc_port_t sright;
1300 	mach_port_name_t name;
1301 
1302 	sright = retrieve_thread_self_fast(thread);
1303 	name = ipc_port_copyout_send(sright, space);
1304 	return name;
1305 }
1306 
1307 /*
1308  *	Routine:	mach_reply_port [mach trap]
1309  *	Purpose:
1310  *		Allocate a port for the caller.
1311  *	Conditions:
1312  *		Nothing locked.
1313  *	Returns:
1314  *		MACH_PORT_NULL if there are any resource failures
1315  *		or other errors.
1316  */
1317 
1318 mach_port_name_t
mach_reply_port(__unused struct mach_reply_port_args * args)1319 mach_reply_port(
1320 	__unused struct mach_reply_port_args *args)
1321 {
1322 	ipc_port_t port;
1323 	mach_port_name_t name;
1324 	kern_return_t kr;
1325 
1326 	kr = ipc_port_alloc(current_space(), IPC_OBJECT_LABEL(IOT_PORT),
1327 	    IP_INIT_NONE, &name, &port);
1328 	if (kr == KERN_SUCCESS) {
1329 		ip_mq_unlock(port);
1330 	} else {
1331 		name = MACH_PORT_NULL;
1332 	}
1333 	return name;
1334 }
1335 
1336 /*
1337  *	Routine:	thread_get_special_reply_port [mach trap]
1338  *	Purpose:
1339  *		Allocate a special reply port for the calling thread.
1340  *	Conditions:
1341  *		Nothing locked.
1342  *	Returns:
1343  *		mach_port_name_t: send right & receive right for special reply port.
1344  *		MACH_PORT_NULL if there are any resource failures
1345  *		or other errors.
1346  */
1347 
1348 mach_port_name_t
thread_get_special_reply_port(__unused struct thread_get_special_reply_port_args * args)1349 thread_get_special_reply_port(
1350 	__unused struct thread_get_special_reply_port_args *args)
1351 {
1352 	ipc_port_t port;
1353 	mach_port_name_t name;
1354 	kern_return_t kr;
1355 	thread_t thread = current_thread();
1356 
1357 	/* unbind the thread special reply port */
1358 	if (IP_VALID(thread->ith_special_reply_port)) {
1359 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1360 	}
1361 
1362 	kr = ipc_port_alloc(current_space(), IPC_OBJECT_LABEL(IOT_SPECIAL_REPLY_PORT),
1363 	    IP_INIT_MAKE_SEND_RIGHT, &name, &port);
1364 	if (kr == KERN_SUCCESS) {
1365 		ipc_port_bind_special_reply_port_locked(port, IRPT_USER);
1366 		ip_mq_unlock(port);
1367 	} else {
1368 		name = MACH_PORT_NULL;
1369 	}
1370 	return name;
1371 }
1372 
1373 /*
1374  *	Routine:	thread_get_kernel_special_reply_port
1375  *	Purpose:
1376  *		Allocate a kernel special reply port for the calling thread.
1377  *	Conditions:
1378  *		Nothing locked.
1379  *	Returns:
1380  *		Creates and sets kernel special reply port.
1381  *		KERN_SUCCESS on Success.
1382  *		KERN_FAILURE on Failure.
1383  */
1384 
1385 kern_return_t
thread_get_kernel_special_reply_port(void)1386 thread_get_kernel_special_reply_port(void)
1387 {
1388 	ipc_port_t port = IP_NULL;
1389 	thread_t thread = current_thread();
1390 
1391 	/* unbind the thread special reply port */
1392 	if (IP_VALID(thread->ith_kernel_reply_port)) {
1393 		ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1394 	}
1395 
1396 	port = ipc_port_alloc_special(ipc_space_reply,
1397 	    IPC_OBJECT_LABEL(IOT_SPECIAL_REPLY_PORT), IP_INIT_NONE);
1398 	ipc_port_bind_special_reply_port_locked(port, IRPT_KERNEL);
1399 	ip_mq_unlock(port);
1400 
1401 	/* release the reference returned by ipc_port_alloc_special */
1402 	ip_release(port);
1403 
1404 	return KERN_SUCCESS;
1405 }
1406 
1407 /*
1408  *	Routine:	ipc_port_bind_special_reply_port_locked
1409  *	Purpose:
1410  *		Bind the given port to current thread as a special reply port.
1411  *	Conditions:
1412  *		Port locked.
1413  *	Returns:
1414  *		None.
1415  */
1416 
1417 static void
ipc_port_bind_special_reply_port_locked(ipc_port_t port,ipc_reply_port_type_t reply_type)1418 ipc_port_bind_special_reply_port_locked(
1419 	ipc_port_t            port,
1420 	ipc_reply_port_type_t reply_type)
1421 {
1422 	thread_t thread = current_thread();
1423 	ipc_port_t *reply_portp;
1424 
1425 	if (reply_type == IRPT_USER) {
1426 		reply_portp = &thread->ith_special_reply_port;
1427 	} else {
1428 		reply_portp = &thread->ith_kernel_reply_port;
1429 	}
1430 
1431 	assert(*reply_portp == NULL);
1432 	assert(ip_is_special_reply_port(port));
1433 	assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1434 
1435 	ip_reference(port);
1436 	*reply_portp = port;
1437 	port->ip_messages.imq_srp_owner_thread = thread;
1438 
1439 	ipc_special_reply_port_bits_reset(port);
1440 }
1441 
1442 /*
1443  *	Routine:	ipc_port_unbind_special_reply_port
1444  *	Purpose:
1445  *		Unbind the thread's special reply port.
1446  *		If the special port has threads waiting on turnstile,
1447  *		update it's inheritor.
1448  *	Condition:
1449  *		Nothing locked.
1450  *	Returns:
1451  *		None.
1452  */
1453 static void
ipc_port_unbind_special_reply_port(thread_t thread,ipc_reply_port_type_t reply_type)1454 ipc_port_unbind_special_reply_port(
1455 	thread_t              thread,
1456 	ipc_reply_port_type_t reply_type)
1457 {
1458 	ipc_port_t *reply_portp;
1459 
1460 	if (reply_type == IRPT_USER) {
1461 		reply_portp = &thread->ith_special_reply_port;
1462 	} else {
1463 		reply_portp = &thread->ith_kernel_reply_port;
1464 	}
1465 
1466 	ipc_port_t special_reply_port = *reply_portp;
1467 
1468 	ip_mq_lock(special_reply_port);
1469 
1470 	*reply_portp = NULL;
1471 	ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
1472 	    IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
1473 	/* port unlocked */
1474 
1475 	/* Destroy the port if its kernel special reply, else just release a ref */
1476 	if (reply_type == IRPT_USER) {
1477 		ip_release(special_reply_port);
1478 	} else {
1479 		ip_mq_lock(special_reply_port);
1480 		ipc_port_destroy(special_reply_port);
1481 	}
1482 }
1483 
1484 /*
1485  *	Routine:	thread_dealloc_kernel_special_reply_port
1486  *	Purpose:
1487  *		Unbind the thread's kernel special reply port.
1488  *		If the special port has threads waiting on turnstile,
1489  *		update it's inheritor.
1490  *	Condition:
1491  *		Called on current thread or a terminated thread.
1492  *	Returns:
1493  *		None.
1494  */
1495 
1496 void
thread_dealloc_kernel_special_reply_port(thread_t thread)1497 thread_dealloc_kernel_special_reply_port(thread_t thread)
1498 {
1499 	ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1500 }
1501 
1502 /*
1503  *	Routine:	thread_get_special_port [kernel call]
1504  *	Purpose:
1505  *		Clones a send right for one of the thread's
1506  *		special ports.
1507  *	Conditions:
1508  *		Nothing locked.
1509  *	Returns:
1510  *		KERN_SUCCESS		Extracted a send right.
1511  *		KERN_INVALID_ARGUMENT	The thread is null.
1512  *		KERN_FAILURE		The thread is dead.
1513  *		KERN_INVALID_ARGUMENT	Invalid special port.
1514  */
1515 
1516 kern_return_t
1517 thread_get_special_port(
1518 	thread_inspect_t         thread,
1519 	int                      which,
1520 	ipc_port_t              *portp);
1521 
1522 static kern_return_t
thread_get_special_port_internal(thread_inspect_t thread,thread_ro_t tro,int which,ipc_port_t * portp,mach_thread_flavor_t flavor)1523 thread_get_special_port_internal(
1524 	thread_inspect_t         thread,
1525 	thread_ro_t              tro,
1526 	int                      which,
1527 	ipc_port_t              *portp,
1528 	mach_thread_flavor_t     flavor)
1529 {
1530 	kern_return_t      kr;
1531 	ipc_port_t port;
1532 
1533 	if ((kr = special_port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) {
1534 		return kr;
1535 	}
1536 
1537 	thread_mtx_lock(thread);
1538 	if (!thread->active) {
1539 		thread_mtx_unlock(thread);
1540 		return KERN_FAILURE;
1541 	}
1542 
1543 	switch (which) {
1544 	case THREAD_KERNEL_PORT:
1545 		port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1546 #if CONFIG_CSR
1547 		if (tro->tro_settable_self_port != port) {
1548 			port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1549 		} else
1550 #endif /* CONFIG_CSR */
1551 		{
1552 			port = ipc_kobject_copy_send(port, thread, IKOT_THREAD_CONTROL);
1553 		}
1554 		thread_mtx_unlock(thread);
1555 		break;
1556 
1557 	case THREAD_READ_PORT:
1558 	case THREAD_INSPECT_PORT:
1559 		thread_mtx_unlock(thread);
1560 		mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ?
1561 		    THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT;
1562 		/* convert_thread_to_port_with_flavor consumes a thread reference */
1563 		thread_reference(thread);
1564 		port = convert_thread_to_port_with_flavor(thread, tro, current_flavor);
1565 		break;
1566 
1567 	default:
1568 		thread_mtx_unlock(thread);
1569 		return KERN_INVALID_ARGUMENT;
1570 	}
1571 
1572 	*portp = port;
1573 	return KERN_SUCCESS;
1574 }
1575 
1576 kern_return_t
thread_get_special_port(thread_inspect_t thread,int which,ipc_port_t * portp)1577 thread_get_special_port(
1578 	thread_inspect_t         thread,
1579 	int                      which,
1580 	ipc_port_t              *portp)
1581 {
1582 	if (thread == THREAD_NULL) {
1583 		return KERN_INVALID_ARGUMENT;
1584 	}
1585 
1586 	return thread_get_special_port_internal(thread, get_thread_ro(thread),
1587 	           which, portp, THREAD_FLAVOR_CONTROL);
1588 }
1589 
1590 kern_return_t
thread_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)1591 thread_get_special_port_from_user(
1592 	mach_port_t     port,
1593 	int             which,
1594 	ipc_port_t      *portp)
1595 {
1596 	thread_ro_t tro;
1597 	ipc_kobject_type_t kotype;
1598 	mach_thread_flavor_t flavor;
1599 	kern_return_t kr = KERN_SUCCESS;
1600 
1601 	thread_t thread = convert_port_to_thread_inspect_no_eval(port);
1602 
1603 	if (thread == THREAD_NULL) {
1604 		return KERN_INVALID_ARGUMENT;
1605 	}
1606 
1607 	tro = get_thread_ro(thread);
1608 	kotype = ip_type(port);
1609 
1610 	if (which == THREAD_KERNEL_PORT && tro->tro_task == current_task()) {
1611 #if CONFIG_MACF
1612 		/*
1613 		 * only check for threads belong to current_task,
1614 		 * because foreign thread ports are always movable
1615 		 */
1616 		if (mac_task_check_get_movable_control_port()) {
1617 			kr = KERN_DENIED;
1618 			goto out;
1619 		}
1620 #endif
1621 		/*
1622 		 * if `mac_task_check_get_movable_control_port` returned 0,
1623 		 * then we must also have a movable task.
1624 		 * see `task_set_exc_guard_default`
1625 		 */
1626 		assert(!task_is_immovable(current_task()));
1627 	}
1628 
1629 	switch (kotype) {
1630 	case IKOT_THREAD_CONTROL:
1631 		flavor = THREAD_FLAVOR_CONTROL;
1632 		break;
1633 	case IKOT_THREAD_READ:
1634 		flavor = THREAD_FLAVOR_READ;
1635 		break;
1636 	case IKOT_THREAD_INSPECT:
1637 		flavor = THREAD_FLAVOR_INSPECT;
1638 		break;
1639 	default:
1640 		panic("strange kobject type");
1641 	}
1642 
1643 	kr = thread_get_special_port_internal(thread, tro, which, portp, flavor);
1644 out:
1645 	thread_deallocate(thread);
1646 	return kr;
1647 }
1648 
1649 static kern_return_t
special_port_allowed_with_thread_flavor(int which,mach_thread_flavor_t flavor)1650 special_port_allowed_with_thread_flavor(
1651 	int                  which,
1652 	mach_thread_flavor_t flavor)
1653 {
1654 	switch (flavor) {
1655 	case THREAD_FLAVOR_CONTROL:
1656 		return KERN_SUCCESS;
1657 
1658 	case THREAD_FLAVOR_READ:
1659 
1660 		switch (which) {
1661 		case THREAD_READ_PORT:
1662 		case THREAD_INSPECT_PORT:
1663 			return KERN_SUCCESS;
1664 		default:
1665 			return KERN_INVALID_CAPABILITY;
1666 		}
1667 
1668 	case THREAD_FLAVOR_INSPECT:
1669 
1670 		switch (which) {
1671 		case THREAD_INSPECT_PORT:
1672 			return KERN_SUCCESS;
1673 		default:
1674 			return KERN_INVALID_CAPABILITY;
1675 		}
1676 
1677 	default:
1678 		return KERN_INVALID_CAPABILITY;
1679 	}
1680 }
1681 
1682 /*
1683  *	Routine:	thread_set_special_port [kernel call]
1684  *	Purpose:
1685  *		Changes one of the thread's special ports,
1686  *		setting it to the supplied send right.
1687  *	Conditions:
1688  *		Nothing locked.  If successful, consumes
1689  *		the supplied send right.
1690  *	Returns:
1691  *		KERN_SUCCESS            Changed the special port.
1692  *		KERN_INVALID_ARGUMENT   The thread is null.
1693  *      KERN_INVALID_RIGHT      Port is marked as immovable.
1694  *		KERN_FAILURE            The thread is dead.
1695  *		KERN_INVALID_ARGUMENT   Invalid special port.
1696  *		KERN_NO_ACCESS          Restricted access to set port.
1697  */
1698 
1699 kern_return_t
thread_set_special_port(thread_t thread,int which,ipc_port_t port)1700 thread_set_special_port(
1701 	thread_t                thread,
1702 	int                     which,
1703 	ipc_port_t              port)
1704 {
1705 	kern_return_t   result = KERN_SUCCESS;
1706 	thread_ro_t     tro = NULL;
1707 	ipc_port_t      old = IP_NULL;
1708 
1709 	if (thread == THREAD_NULL) {
1710 		return KERN_INVALID_ARGUMENT;
1711 	}
1712 
1713 	/*
1714 	 * rdar://70585367
1715 	 * disallow immovable send so other process can't retrieve it through thread_get_special_port()
1716 	 */
1717 	if (!ipc_can_stash_naked_send(port)) {
1718 		return KERN_INVALID_RIGHT;
1719 	}
1720 
1721 	switch (which) {
1722 	case THREAD_KERNEL_PORT:
1723 #if CONFIG_CSR
1724 		if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
1725 			/*
1726 			 * Only allow setting of thread-self
1727 			 * special port from user-space when SIP is
1728 			 * disabled (for Mach-on-Mach emulation).
1729 			 */
1730 			tro = get_thread_ro(thread);
1731 
1732 			thread_mtx_lock(thread);
1733 			if (thread->active) {
1734 				old = tro->tro_settable_self_port;
1735 				zalloc_ro_update_field(ZONE_ID_THREAD_RO,
1736 				    tro, tro_settable_self_port, &port);
1737 			} else {
1738 				result = KERN_FAILURE;
1739 			}
1740 			thread_mtx_unlock(thread);
1741 
1742 			if (IP_VALID(old)) {
1743 				ipc_port_release_send(old);
1744 			}
1745 
1746 			return result;
1747 		}
1748 #else
1749 		(void)old;
1750 		(void)result;
1751 		(void)tro;
1752 #endif /* CONFIG_CSR */
1753 		return KERN_NO_ACCESS;
1754 
1755 	default:
1756 		return KERN_INVALID_ARGUMENT;
1757 	}
1758 }
1759 
1760 static inline mach_task_flavor_t
task_special_type_to_flavor(task_special_port_t which)1761 task_special_type_to_flavor(task_special_port_t which)
1762 {
1763 	switch (which) {
1764 	case TASK_KERNEL_PORT:
1765 		return TASK_FLAVOR_CONTROL;
1766 	case TASK_NAME_PORT:
1767 		return TASK_FLAVOR_NAME;
1768 	case TASK_INSPECT_PORT:
1769 		return TASK_FLAVOR_INSPECT;
1770 	case TASK_READ_PORT:
1771 		return TASK_FLAVOR_READ;
1772 	default:
1773 		break;
1774 	}
1775 	panic("invalid special port: %d", which);
1776 }
1777 
1778 /*
1779  *	Routine:	task_get_special_port [kernel call]
1780  *	Purpose:
1781  *		Clones a send right for one of the task's
1782  *		special ports.
1783  *	Conditions:
1784  *		Nothing locked.
1785  *	Returns:
1786  *		KERN_SUCCESS		    Extracted a send right.
1787  *		KERN_INVALID_ARGUMENT	The task is null.
1788  *		KERN_FAILURE		    The task/space is dead.
1789  *		KERN_INVALID_ARGUMENT	Invalid special port.
1790  */
1791 
1792 static kern_return_t
task_get_special_port_internal(task_t task,int which,ipc_port_t * portp,mach_task_flavor_t flavor)1793 task_get_special_port_internal(
1794 	task_t          task,
1795 	int             which,
1796 	ipc_port_t      *portp,
1797 	mach_task_flavor_t        flavor)
1798 {
1799 	kern_return_t kr;
1800 	ipc_port_t port;
1801 
1802 	if (task == TASK_NULL) {
1803 		return KERN_INVALID_ARGUMENT;
1804 	}
1805 
1806 	if ((kr = special_port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) {
1807 		return kr;
1808 	}
1809 
1810 	itk_lock(task);
1811 	if (!task->ipc_active) {
1812 		itk_unlock(task);
1813 		return KERN_FAILURE;
1814 	}
1815 
1816 	switch (which) {
1817 	case TASK_KERNEL_PORT:
1818 		port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
1819 #if CONFIG_CSR
1820 		if (task->itk_settable_self != port) {
1821 			port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1822 		} else
1823 #endif /* CONFIG_CSR */
1824 		{
1825 			port = ipc_kobject_copy_send(port, task, IKOT_TASK_CONTROL);
1826 		}
1827 		itk_unlock(task);
1828 		break;
1829 
1830 	case TASK_READ_PORT:
1831 	case TASK_INSPECT_PORT:
1832 		itk_unlock(task);
1833 		mach_task_flavor_t current_flavor = task_special_type_to_flavor(which);
1834 		/* convert_task_to_port_with_flavor consumes a task reference */
1835 		task_reference(task);
1836 		port = convert_task_to_port_with_flavor(task, current_flavor, TASK_GRP_KERNEL);
1837 		break;
1838 
1839 	case TASK_NAME_PORT:
1840 		port = ipc_kobject_make_send(task->itk_task_ports[TASK_FLAVOR_NAME],
1841 		    task, IKOT_TASK_NAME);
1842 		itk_unlock(task);
1843 		break;
1844 
1845 	case TASK_HOST_PORT:
1846 		port = host_port_copy_send(task->itk_host);
1847 		itk_unlock(task);
1848 		break;
1849 
1850 	case TASK_BOOTSTRAP_PORT:
1851 		port = ipc_port_copy_send_mqueue(task->itk_bootstrap);
1852 		itk_unlock(task);
1853 		break;
1854 
1855 	case TASK_ACCESS_PORT:
1856 		port = ipc_port_copy_send_mqueue(task->itk_task_access);
1857 		itk_unlock(task);
1858 		break;
1859 
1860 	case TASK_DEBUG_CONTROL_PORT:
1861 		port = ipc_port_copy_send_mqueue(task->itk_debug_control);
1862 		itk_unlock(task);
1863 		break;
1864 
1865 #if CONFIG_PROC_RESOURCE_LIMITS
1866 	case TASK_RESOURCE_NOTIFY_PORT:
1867 		port = ipc_port_copy_send_mqueue(task->itk_resource_notify);
1868 		itk_unlock(task);
1869 		break;
1870 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1871 
1872 	default:
1873 		itk_unlock(task);
1874 		return KERN_INVALID_ARGUMENT;
1875 	}
1876 
1877 	*portp = port;
1878 	return KERN_SUCCESS;
1879 }
1880 
1881 /* Kernel/Kext call only and skips MACF checks. MIG uses task_get_special_port_from_user(). */
1882 kern_return_t
task_get_special_port(task_t task,int which,ipc_port_t * portp)1883 task_get_special_port(
1884 	task_t          task,
1885 	int             which,
1886 	ipc_port_t      *portp)
1887 {
1888 	return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL);
1889 }
1890 
1891 /* MIG call only. Kernel/Kext uses task_get_special_port() */
1892 kern_return_t
task_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)1893 task_get_special_port_from_user(
1894 	mach_port_t     port,
1895 	int             which,
1896 	ipc_port_t      *portp)
1897 {
1898 	ipc_kobject_type_t kotype;
1899 	mach_task_flavor_t flavor;
1900 	kern_return_t kr = KERN_SUCCESS;
1901 
1902 	task_t task = convert_port_to_task_inspect_no_eval(port);
1903 
1904 	if (task == TASK_NULL) {
1905 		return KERN_INVALID_ARGUMENT;
1906 	}
1907 
1908 	kotype = ip_type(port);
1909 
1910 #if CONFIG_MACF
1911 	if (mac_task_check_get_task_special_port(current_task(), task, which)) {
1912 		kr = KERN_DENIED;
1913 		goto out;
1914 	}
1915 #endif
1916 
1917 	if (which == TASK_KERNEL_PORT && task == current_task()) {
1918 #if CONFIG_MACF
1919 		/*
1920 		 * only check for current_task,
1921 		 * because foreign task ports are always movable
1922 		 */
1923 		if (mac_task_check_get_movable_control_port()) {
1924 			kr = KERN_DENIED;
1925 			goto out;
1926 		}
1927 #endif
1928 		/*
1929 		 * if `mac_task_check_get_movable_control_port` returned 0,
1930 		 * then we must also have a movable task.
1931 		 * see `task_set_exc_guard_default`
1932 		 */
1933 		assert(!task_is_immovable(current_task()));
1934 	}
1935 
1936 	switch (kotype) {
1937 	case IKOT_TASK_CONTROL:
1938 		flavor = TASK_FLAVOR_CONTROL;
1939 		break;
1940 	case IKOT_TASK_READ:
1941 		flavor = TASK_FLAVOR_READ;
1942 		break;
1943 	case IKOT_TASK_INSPECT:
1944 		flavor = TASK_FLAVOR_INSPECT;
1945 		break;
1946 	default:
1947 		panic("strange kobject type");
1948 	}
1949 
1950 	kr = task_get_special_port_internal(task, which, portp, flavor);
1951 out:
1952 	task_deallocate(task);
1953 	return kr;
1954 }
1955 
1956 static kern_return_t
special_port_allowed_with_task_flavor(int which,mach_task_flavor_t flavor)1957 special_port_allowed_with_task_flavor(
1958 	int                which,
1959 	mach_task_flavor_t flavor)
1960 {
1961 	switch (flavor) {
1962 	case TASK_FLAVOR_CONTROL:
1963 		return KERN_SUCCESS;
1964 
1965 	case TASK_FLAVOR_READ:
1966 
1967 		switch (which) {
1968 		case TASK_READ_PORT:
1969 		case TASK_INSPECT_PORT:
1970 		case TASK_NAME_PORT:
1971 			return KERN_SUCCESS;
1972 		default:
1973 			return KERN_INVALID_CAPABILITY;
1974 		}
1975 
1976 	case TASK_FLAVOR_INSPECT:
1977 
1978 		switch (which) {
1979 		case TASK_INSPECT_PORT:
1980 		case TASK_NAME_PORT:
1981 			return KERN_SUCCESS;
1982 		default:
1983 			return KERN_INVALID_CAPABILITY;
1984 		}
1985 
1986 	default:
1987 		return KERN_INVALID_CAPABILITY;
1988 	}
1989 }
1990 
1991 /*
1992  *	Routine:	task_set_special_port [MIG call]
1993  *	Purpose:
1994  *		Changes one of the task's special ports,
1995  *		setting it to the supplied send right.
1996  *	Conditions:
1997  *		Nothing locked.  If successful, consumes
1998  *		the supplied send right.
1999  *	Returns:
2000  *		KERN_SUCCESS		    Changed the special port.
2001  *		KERN_INVALID_ARGUMENT	The task is null.
2002  *      KERN_INVALID_RIGHT      Port is marked as immovable.
2003  *		KERN_FAILURE		    The task/space is dead.
2004  *		KERN_INVALID_ARGUMENT	Invalid special port.
2005  *      KERN_NO_ACCESS		    Restricted access to set port.
2006  */
2007 
2008 kern_return_t
task_set_special_port_from_user(task_t task,int which,ipc_port_t port)2009 task_set_special_port_from_user(
2010 	task_t          task,
2011 	int             which,
2012 	ipc_port_t      port)
2013 {
2014 	if (task == TASK_NULL) {
2015 		return KERN_INVALID_ARGUMENT;
2016 	}
2017 
2018 #if CONFIG_MACF
2019 	if (mac_task_check_set_task_special_port(current_task(), task, which, port)) {
2020 		return KERN_DENIED;
2021 	}
2022 #endif
2023 
2024 	return task_set_special_port(task, which, port);
2025 }
2026 
2027 /* Kernel call only. MIG uses task_set_special_port_from_user() */
2028 kern_return_t
task_set_special_port(task_t task,int which,ipc_port_t port)2029 task_set_special_port(
2030 	task_t          task,
2031 	int             which,
2032 	ipc_port_t      port)
2033 {
2034 	if (task == TASK_NULL) {
2035 		return KERN_INVALID_ARGUMENT;
2036 	}
2037 
2038 	if (task_is_driver(current_task())) {
2039 		return KERN_NO_ACCESS;
2040 	}
2041 
2042 	/*
2043 	 * rdar://70585367
2044 	 * disallow immovable send so other process can't retrieve it through task_get_special_port()
2045 	 */
2046 	if (!ipc_can_stash_naked_send(port)) {
2047 		return KERN_INVALID_RIGHT;
2048 	}
2049 
2050 
2051 	switch (which) {
2052 	case TASK_KERNEL_PORT:
2053 	case TASK_HOST_PORT:
2054 #if CONFIG_CSR
2055 		if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
2056 			/*
2057 			 * Only allow setting of task-self / task-host
2058 			 * special ports from user-space when SIP is
2059 			 * disabled (for Mach-on-Mach emulation).
2060 			 */
2061 			break;
2062 		}
2063 #endif
2064 		return KERN_NO_ACCESS;
2065 	default:
2066 		break;
2067 	}
2068 
2069 	return task_set_special_port_internal(task, which, port);
2070 }
2071 
2072 /*
2073  *	Routine:	task_set_special_port_internal
2074  *	Purpose:
2075  *		Changes one of the task's special ports,
2076  *		setting it to the supplied send right.
2077  *	Conditions:
2078  *		Nothing locked.  If successful, consumes
2079  *		the supplied send right.
2080  *	Returns:
2081  *		KERN_SUCCESS		Changed the special port.
2082  *		KERN_INVALID_ARGUMENT	The task is null.
2083  *		KERN_FAILURE		The task/space is dead.
2084  *		KERN_INVALID_ARGUMENT	Invalid special port.
2085  *      KERN_NO_ACCESS		Restricted access to overwrite port.
2086  */
2087 
2088 kern_return_t
task_set_special_port_internal(task_t task,int which,ipc_port_t port)2089 task_set_special_port_internal(
2090 	task_t          task,
2091 	int             which,
2092 	ipc_port_t      port)
2093 {
2094 	ipc_port_t old = IP_NULL;
2095 	kern_return_t rc = KERN_INVALID_ARGUMENT;
2096 
2097 	if (task == TASK_NULL) {
2098 		goto out;
2099 	}
2100 
2101 	itk_lock(task);
2102 	/*
2103 	 * Allow setting special port during the span of ipc_task_init() to
2104 	 * ipc_task_terminate(). posix_spawn() port actions can set special
2105 	 * ports on target task _before_ task IPC access is enabled.
2106 	 */
2107 	if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
2108 		rc = KERN_FAILURE;
2109 		goto out_unlock;
2110 	}
2111 
2112 	switch (which) {
2113 #if CONFIG_CSR
2114 	case TASK_KERNEL_PORT:
2115 		old = task->itk_settable_self;
2116 		task->itk_settable_self = port;
2117 		break;
2118 #endif /* CONFIG_CSR */
2119 
2120 	case TASK_HOST_PORT:
2121 		old = task->itk_host;
2122 		task->itk_host = port;
2123 		break;
2124 
2125 	case TASK_BOOTSTRAP_PORT:
2126 		old = task->itk_bootstrap;
2127 		task->itk_bootstrap = port;
2128 		break;
2129 
2130 	/* Never allow overwrite of the task access port */
2131 	case TASK_ACCESS_PORT:
2132 		if (IP_VALID(task->itk_task_access)) {
2133 			rc = KERN_NO_ACCESS;
2134 			goto out_unlock;
2135 		}
2136 		task->itk_task_access = port;
2137 		break;
2138 
2139 	case TASK_DEBUG_CONTROL_PORT:
2140 		old = task->itk_debug_control;
2141 		task->itk_debug_control = port;
2142 		break;
2143 
2144 #if CONFIG_PROC_RESOURCE_LIMITS
2145 	case TASK_RESOURCE_NOTIFY_PORT:
2146 		old = task->itk_resource_notify;
2147 		task->itk_resource_notify = port;
2148 		break;
2149 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
2150 
2151 	default:
2152 		rc = KERN_INVALID_ARGUMENT;
2153 		goto out_unlock;
2154 	}/* switch */
2155 
2156 	rc = KERN_SUCCESS;
2157 
2158 out_unlock:
2159 	itk_unlock(task);
2160 
2161 	if (IP_VALID(old)) {
2162 		ipc_port_release_send(old);
2163 	}
2164 out:
2165 	return rc;
2166 }
2167 /*
2168  *	Routine:	mach_ports_register [kernel call]
2169  *	Purpose:
2170  *		Stash a handful of port send rights in the task.
2171  *		Child tasks will inherit these rights, but they
2172  *		must use mach_ports_lookup to acquire them.
2173  *
2174  *		The rights are supplied in a (wired) kalloc'd segment.
2175  *		Rights which aren't supplied are assumed to be null.
2176  *	Conditions:
2177  *		Nothing locked.  If successful, consumes
2178  *		the supplied rights and memory.
2179  *	Returns:
2180  *		KERN_SUCCESS		    Stashed the port rights.
2181  *      KERN_INVALID_RIGHT      Port in array is marked immovable.
2182  *		KERN_INVALID_ARGUMENT	The task is null.
2183  *		KERN_INVALID_ARGUMENT	The task is dead.
2184  *		KERN_INVALID_ARGUMENT	The memory param is null.
2185  *		KERN_INVALID_ARGUMENT	Too many port rights supplied.
2186  */
2187 
2188 kern_return_t
_kernelrpc_mach_ports_register3(task_t task,mach_port_t port1,mach_port_t port2,mach_port_t port3)2189 _kernelrpc_mach_ports_register3(
2190 	task_t                  task,
2191 	mach_port_t             port1,
2192 	mach_port_t             port2,
2193 	mach_port_t             port3)
2194 {
2195 	ipc_port_t ports[TASK_PORT_REGISTER_MAX] = {
2196 		port1, port2, port3,
2197 	};
2198 
2199 	if (task == TASK_NULL) {
2200 		return KERN_INVALID_ARGUMENT;
2201 	}
2202 
2203 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2204 		/*
2205 		 * rdar://70585367
2206 		 * disallow immovable send so other process can't retrieve it through mach_ports_lookup()
2207 		 */
2208 		if (!ipc_can_stash_naked_send(ports[i])) {
2209 			return KERN_INVALID_RIGHT;
2210 		}
2211 	}
2212 
2213 	itk_lock(task);
2214 	if (!task->ipc_active) {
2215 		itk_unlock(task);
2216 		return KERN_INVALID_ARGUMENT;
2217 	}
2218 
2219 	/*
2220 	 *	Replace the old send rights with the new.
2221 	 *	Release the old rights after unlocking.
2222 	 */
2223 
2224 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2225 		ipc_port_t old;
2226 
2227 		old = task->itk_registered[i];
2228 		task->itk_registered[i] = ports[i];
2229 		ports[i] = old;
2230 	}
2231 
2232 	itk_unlock(task);
2233 
2234 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2235 		ipc_port_release_send(ports[i]);
2236 	}
2237 
2238 	return KERN_SUCCESS;
2239 }
2240 
2241 /*
2242  *	Routine:	mach_ports_lookup [kernel call]
2243  *	Purpose:
2244  *		Retrieves (clones) the stashed port send rights.
2245  *	Conditions:
2246  *		Nothing locked.  If successful, the caller gets
2247  *		rights and memory.
2248  *	Returns:
2249  *		KERN_SUCCESS		Retrieved the send rights.
2250  *		KERN_INVALID_ARGUMENT	The task is null.
2251  *		KERN_INVALID_ARGUMENT	The task is dead.
2252  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
2253  */
2254 
2255 kern_return_t
_kernelrpc_mach_ports_lookup3(task_t task,ipc_port_t * port1,ipc_port_t * port2,ipc_port_t * port3)2256 _kernelrpc_mach_ports_lookup3(
2257 	task_t                  task,
2258 	ipc_port_t             *port1,
2259 	ipc_port_t             *port2,
2260 	ipc_port_t             *port3)
2261 {
2262 	if (task == TASK_NULL) {
2263 		return KERN_INVALID_ARGUMENT;
2264 	}
2265 
2266 	itk_lock(task);
2267 	if (!task->ipc_active) {
2268 		itk_unlock(task);
2269 		return KERN_INVALID_ARGUMENT;
2270 	}
2271 
2272 	*port1 = ipc_port_copy_send_any(task->itk_registered[0]);
2273 	*port2 = ipc_port_copy_send_any(task->itk_registered[1]);
2274 	*port3 = ipc_port_copy_send_any(task->itk_registered[2]);
2275 
2276 	itk_unlock(task);
2277 
2278 	return KERN_SUCCESS;
2279 }
2280 
2281 static kern_return_t
task_conversion_eval_internal(task_t caller,task_t victim,boolean_t out_trans,int flavor)2282 task_conversion_eval_internal(
2283 	task_t             caller,
2284 	task_t             victim,
2285 	boolean_t          out_trans,
2286 	int                flavor) /* control or read */
2287 {
2288 	boolean_t allow_kern_task_out_trans;
2289 	boolean_t allow_kern_task;
2290 
2291 	assert(flavor == TASK_FLAVOR_CONTROL || flavor == TASK_FLAVOR_READ);
2292 	assert(flavor == THREAD_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_READ);
2293 
2294 #if defined(SECURE_KERNEL)
2295 	/*
2296 	 * On secure kernel platforms, reject converting kernel task/threads to port
2297 	 * and sending it to user space.
2298 	 */
2299 	allow_kern_task_out_trans = FALSE;
2300 #else
2301 	allow_kern_task_out_trans = TRUE;
2302 #endif
2303 
2304 	allow_kern_task = out_trans && allow_kern_task_out_trans;
2305 
2306 	if (victim == TASK_NULL) {
2307 		return KERN_INVALID_SECURITY;
2308 	}
2309 
2310 	task_require(victim);
2311 
2312 	/*
2313 	 * If Developer Mode is not enabled, deny attempts to translate foreign task's
2314 	 * control port completely. Read port or corpse is okay.
2315 	 */
2316 	if (!developer_mode_state()) {
2317 		if ((caller != victim) &&
2318 		    (flavor == TASK_FLAVOR_CONTROL) && !task_is_a_corpse(victim)) {
2319 #if XNU_TARGET_OS_OSX
2320 			return KERN_INVALID_SECURITY;
2321 #else
2322 			/*
2323 			 * All control ports are immovable.
2324 			 * Return an error for outtrans, but panic on intrans.
2325 			 */
2326 			if (out_trans) {
2327 				return KERN_INVALID_SECURITY;
2328 			} else {
2329 				panic("Just like pineapple on pizza, this task/thread port doesn't belong here.");
2330 			}
2331 #endif /* XNU_TARGET_OS_OSX */
2332 		}
2333 	}
2334 
2335 	/*
2336 	 * Tasks are allowed to resolve their own task ports, and the kernel is
2337 	 * allowed to resolve anyone's task port (subject to Developer Mode check).
2338 	 */
2339 	if (caller == kernel_task) {
2340 		return KERN_SUCCESS;
2341 	}
2342 
2343 	if (caller == victim) {
2344 		return KERN_SUCCESS;
2345 	}
2346 
2347 	/*
2348 	 * Only the kernel can resolve the kernel's task port. We've established
2349 	 * by this point that the caller is not kernel_task.
2350 	 */
2351 	if (victim == kernel_task && !allow_kern_task) {
2352 		return KERN_INVALID_SECURITY;
2353 	}
2354 
2355 #if !defined(XNU_TARGET_OS_OSX)
2356 	/*
2357 	 * On platforms other than macOS, only a platform binary can resolve the task port
2358 	 * of another platform binary.
2359 	 */
2360 	if (task_get_platform_binary(victim) && !task_get_platform_binary(caller)) {
2361 #if SECURE_KERNEL
2362 		return KERN_INVALID_SECURITY;
2363 #else
2364 		if (cs_relax_platform_task_ports) {
2365 			return KERN_SUCCESS;
2366 		} else {
2367 			return KERN_INVALID_SECURITY;
2368 		}
2369 #endif /* SECURE_KERNEL */
2370 	}
2371 #endif /* !defined(XNU_TARGET_OS_OSX) */
2372 
2373 	return KERN_SUCCESS;
2374 }
2375 
2376 kern_return_t
task_conversion_eval(task_t caller,task_t victim,int flavor)2377 task_conversion_eval(task_t caller, task_t victim, int flavor)
2378 {
2379 	/* flavor is mach_task_flavor_t or mach_thread_flavor_t */
2380 	static_assert(TASK_FLAVOR_CONTROL == THREAD_FLAVOR_CONTROL);
2381 	static_assert(TASK_FLAVOR_READ == THREAD_FLAVOR_READ);
2382 	return task_conversion_eval_internal(caller, victim, FALSE, flavor);
2383 }
2384 
2385 static kern_return_t
task_conversion_eval_out_trans(task_t caller,task_t victim,int flavor)2386 task_conversion_eval_out_trans(task_t caller, task_t victim, int flavor)
2387 {
2388 	assert(flavor == TASK_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_CONTROL);
2389 	return task_conversion_eval_internal(caller, victim, TRUE, flavor);
2390 }
2391 
2392 /*
2393  *	Routine:	task_port_kotype_valid_for_flavor
2394  *	Purpose:
2395  *		Check whether the kobject type of a mach port
2396  *      is valid for conversion to a task of given flavor.
2397  */
2398 static boolean_t
task_port_kotype_valid_for_flavor(natural_t kotype,mach_task_flavor_t flavor)2399 task_port_kotype_valid_for_flavor(
2400 	natural_t          kotype,
2401 	mach_task_flavor_t flavor)
2402 {
2403 	switch (flavor) {
2404 	/* Ascending capability */
2405 	case TASK_FLAVOR_NAME:
2406 		if (kotype == IKOT_TASK_NAME) {
2407 			return TRUE;
2408 		}
2409 		OS_FALLTHROUGH;
2410 	case TASK_FLAVOR_INSPECT:
2411 		if (kotype == IKOT_TASK_INSPECT) {
2412 			return TRUE;
2413 		}
2414 		OS_FALLTHROUGH;
2415 	case TASK_FLAVOR_READ:
2416 		if (kotype == IKOT_TASK_READ) {
2417 			return TRUE;
2418 		}
2419 		OS_FALLTHROUGH;
2420 	case TASK_FLAVOR_CONTROL:
2421 		if (kotype == IKOT_TASK_CONTROL) {
2422 			return TRUE;
2423 		}
2424 		break;
2425 	default:
2426 		panic("strange task flavor");
2427 	}
2428 
2429 	return FALSE;
2430 }
2431 
2432 /*
2433  *	Routine: convert_port_to_task_with_flavor_locked_noref
2434  *	Purpose:
2435  *		Internal helper routine to convert from a locked port to a task.
2436  *	Args:
2437  *		port   - target port
2438  *		flavor - requested task port flavor
2439  *		options - port translation options
2440  *	Conditions:
2441  *		Port is locked and active.
2442  */
2443 static task_t
convert_port_to_task_with_flavor_locked_noref(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2444 convert_port_to_task_with_flavor_locked_noref(
2445 	ipc_port_t              port,
2446 	mach_task_flavor_t      flavor,
2447 	port_intrans_options_t  options)
2448 {
2449 	ipc_kobject_type_t type = ip_type(port);
2450 	task_t task;
2451 
2452 	ip_mq_lock_held(port);
2453 	require_ip_active(port);
2454 
2455 	if (!task_port_kotype_valid_for_flavor(type, flavor)) {
2456 		return TASK_NULL;
2457 	}
2458 
2459 	task = ipc_kobject_get_locked(port, type);
2460 	if (task == TASK_NULL) {
2461 		return TASK_NULL;
2462 	}
2463 
2464 	if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
2465 		assert(flavor == TASK_FLAVOR_CONTROL);
2466 		return TASK_NULL;
2467 	}
2468 
2469 	/* TODO: rdar://42389187 */
2470 	if (flavor == TASK_FLAVOR_NAME || flavor == TASK_FLAVOR_INSPECT) {
2471 		assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
2472 	}
2473 
2474 	if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
2475 	    task_conversion_eval(current_task(), task, flavor)) {
2476 		return TASK_NULL;
2477 	}
2478 
2479 	return task;
2480 }
2481 
2482 /*
2483  *	Routine: convert_port_to_task_with_flavor_locked
2484  *	Purpose:
2485  *		Internal helper routine to convert from a locked port to a task.
2486  *	Args:
2487  *		port   - target port
2488  *		flavor - requested task port flavor
2489  *		options - port translation options
2490  *		grp    - task reference group
2491  *	Conditions:
2492  *		Port is locked and active.
2493  *		Produces task ref or TASK_NULL.
2494  */
2495 static task_t
convert_port_to_task_with_flavor_locked(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2496 convert_port_to_task_with_flavor_locked(
2497 	ipc_port_t              port,
2498 	mach_task_flavor_t      flavor,
2499 	port_intrans_options_t  options,
2500 	task_grp_t              grp)
2501 {
2502 	task_t task;
2503 
2504 	task = convert_port_to_task_with_flavor_locked_noref(port, flavor,
2505 	    options);
2506 
2507 	if (task != TASK_NULL) {
2508 		task_reference_grp(task, grp);
2509 	}
2510 
2511 	return task;
2512 }
2513 
2514 /*
2515  *	Routine:	convert_port_to_task_with_flavor
2516  *	Purpose:
2517  *		Internal helper for converting from a port to a task.
2518  *		Doesn't consume the port ref; produces a task ref,
2519  *		which may be null.
2520  *	Args:
2521  *		port   - target port
2522  *		flavor - requested task port flavor
2523  *		options - port translation options
2524  *		grp    - task reference group
2525  *	Conditions:
2526  *		Nothing locked.
2527  */
2528 static task_t
convert_port_to_task_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2529 convert_port_to_task_with_flavor(
2530 	ipc_port_t         port,
2531 	mach_task_flavor_t flavor,
2532 	port_intrans_options_t options,
2533 	task_grp_t         grp)
2534 {
2535 	task_t task = TASK_NULL;
2536 	task_t self = current_task();
2537 
2538 	if (IP_VALID(port)) {
2539 		if (port == self->itk_task_ports[TASK_FLAVOR_CONTROL]) {
2540 			task_reference_grp(self, grp);
2541 			return self;
2542 		}
2543 
2544 		ip_mq_lock(port);
2545 		if (ip_active(port)) {
2546 			task = convert_port_to_task_with_flavor_locked(port,
2547 			    flavor, options, grp);
2548 		}
2549 		ip_mq_unlock(port);
2550 	}
2551 
2552 	return task;
2553 }
2554 
2555 task_t
convert_port_to_task(ipc_port_t port)2556 convert_port_to_task(
2557 	ipc_port_t              port)
2558 {
2559 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2560 	           PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL);
2561 }
2562 
2563 task_t
convert_port_to_task_mig(ipc_port_t port)2564 convert_port_to_task_mig(
2565 	ipc_port_t              port)
2566 {
2567 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2568 	           PORT_INTRANS_OPTIONS_NONE, TASK_GRP_MIG);
2569 }
2570 
2571 task_read_t
convert_port_to_task_read(ipc_port_t port)2572 convert_port_to_task_read(
2573 	ipc_port_t              port)
2574 {
2575 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2576 	           PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2577 }
2578 
2579 task_read_t
convert_port_to_task_read_no_eval(ipc_port_t port)2580 convert_port_to_task_read_no_eval(
2581 	ipc_port_t              port)
2582 {
2583 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2584 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2585 }
2586 
2587 task_read_t
convert_port_to_task_read_mig(ipc_port_t port)2588 convert_port_to_task_read_mig(
2589 	ipc_port_t              port)
2590 {
2591 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2592 	           PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2593 }
2594 
2595 task_inspect_t
convert_port_to_task_inspect(ipc_port_t port)2596 convert_port_to_task_inspect(
2597 	ipc_port_t              port)
2598 {
2599 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2600 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2601 }
2602 
2603 task_inspect_t
convert_port_to_task_inspect_no_eval(ipc_port_t port)2604 convert_port_to_task_inspect_no_eval(
2605 	ipc_port_t              port)
2606 {
2607 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2608 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2609 }
2610 
2611 task_inspect_t
convert_port_to_task_inspect_mig(ipc_port_t port)2612 convert_port_to_task_inspect_mig(
2613 	ipc_port_t              port)
2614 {
2615 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2616 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2617 }
2618 
2619 task_name_t
convert_port_to_task_name(ipc_port_t port)2620 convert_port_to_task_name(
2621 	ipc_port_t              port)
2622 {
2623 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2624 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2625 }
2626 
2627 task_name_t
convert_port_to_task_name_mig(ipc_port_t port)2628 convert_port_to_task_name_mig(
2629 	ipc_port_t              port)
2630 {
2631 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2632 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2633 }
2634 
2635 /*
2636  *	Routine:	convert_port_to_task_policy
2637  *	Purpose:
2638  *		Convert from a port to a task.
2639  *		Doesn't consume the port ref; produces a task ref,
2640  *		which may be null.
2641  *		If the port is being used with task_port_set(), any task port
2642  *		type other than TASK_CONTROL requires an entitlement. If the
2643  *		port is being used with task_port_get(), TASK_NAME requires an
2644  *		entitlement.
2645  *	Conditions:
2646  *		Nothing locked.
2647  */
2648 static task_t
convert_port_to_task_policy_mig(ipc_port_t port,boolean_t set)2649 convert_port_to_task_policy_mig(ipc_port_t port, boolean_t set)
2650 {
2651 	task_t task = TASK_NULL;
2652 
2653 	if (!IP_VALID(port)) {
2654 		return TASK_NULL;
2655 	}
2656 
2657 	task = set ?
2658 	    convert_port_to_task_mig(port) :
2659 	    convert_port_to_task_inspect_mig(port);
2660 
2661 	if (task == TASK_NULL &&
2662 	    IOCurrentTaskHasEntitlement("com.apple.private.task_policy")) {
2663 		task = convert_port_to_task_name_mig(port);
2664 	}
2665 
2666 	return task;
2667 }
2668 
2669 task_policy_set_t
convert_port_to_task_policy_set_mig(ipc_port_t port)2670 convert_port_to_task_policy_set_mig(ipc_port_t port)
2671 {
2672 	return convert_port_to_task_policy_mig(port, true);
2673 }
2674 
2675 task_policy_get_t
convert_port_to_task_policy_get_mig(ipc_port_t port)2676 convert_port_to_task_policy_get_mig(ipc_port_t port)
2677 {
2678 	return convert_port_to_task_policy_mig(port, false);
2679 }
2680 
2681 /*
2682  *	Routine:	convert_port_to_task_suspension_token
2683  *	Purpose:
2684  *		Convert from a port to a task suspension token.
2685  *		Doesn't consume the port ref; produces a suspension token ref,
2686  *		which may be null.
2687  *	Conditions:
2688  *		Nothing locked.
2689  */
2690 static task_suspension_token_t
convert_port_to_task_suspension_token_grp(ipc_port_t port,task_grp_t grp)2691 convert_port_to_task_suspension_token_grp(
2692 	ipc_port_t              port,
2693 	task_grp_t              grp)
2694 {
2695 	task_suspension_token_t task = TASK_NULL;
2696 
2697 	if (IP_VALID(port)) {
2698 		ip_mq_lock(port);
2699 		task = ipc_kobject_get_locked(port, IKOT_TASK_RESUME);
2700 		if (task != TASK_NULL) {
2701 			task_reference_grp(task, grp);
2702 		}
2703 		ip_mq_unlock(port);
2704 	}
2705 
2706 	return task;
2707 }
2708 
2709 task_suspension_token_t
convert_port_to_task_suspension_token_external(ipc_port_t port)2710 convert_port_to_task_suspension_token_external(
2711 	ipc_port_t              port)
2712 {
2713 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_EXTERNAL);
2714 }
2715 
2716 task_suspension_token_t
convert_port_to_task_suspension_token_mig(ipc_port_t port)2717 convert_port_to_task_suspension_token_mig(
2718 	ipc_port_t              port)
2719 {
2720 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_MIG);
2721 }
2722 
2723 task_suspension_token_t
convert_port_to_task_suspension_token_kernel(ipc_port_t port)2724 convert_port_to_task_suspension_token_kernel(
2725 	ipc_port_t              port)
2726 {
2727 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_KERNEL);
2728 }
2729 
2730 /*
2731  *	Routine:	convert_port_to_space_with_flavor
2732  *	Purpose:
2733  *		Internal helper for converting from a port to a space.
2734  *		Doesn't consume the port ref; produces a space ref,
2735  *		which may be null.
2736  *	Args:
2737  *		port   - target port
2738  *		flavor - requested ipc space flavor
2739  *		options - port translation options
2740  *	Conditions:
2741  *		Nothing locked.
2742  */
2743 static ipc_space_t
convert_port_to_space_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2744 convert_port_to_space_with_flavor(
2745 	ipc_port_t         port,
2746 	mach_task_flavor_t flavor,
2747 	port_intrans_options_t options)
2748 {
2749 	ipc_space_t space = IPC_SPACE_NULL;
2750 	task_t task = TASK_NULL;
2751 
2752 	assert(flavor != TASK_FLAVOR_NAME);
2753 
2754 	if (IP_VALID(port)) {
2755 		ip_mq_lock(port);
2756 		if (ip_active(port)) {
2757 			task = convert_port_to_task_with_flavor_locked_noref(port,
2758 			    flavor, options);
2759 		}
2760 
2761 		/*
2762 		 * Because we hold the port lock and we could resolve a task,
2763 		 * even if we're racing with task termination, we know that
2764 		 * ipc_task_disable() hasn't been called yet.
2765 		 *
2766 		 * We try to sniff if `task->active` flipped to accelerate
2767 		 * resolving the race, but this isn't load bearing.
2768 		 *
2769 		 * The space will be torn down _after_ ipc_task_disable() returns,
2770 		 * so it is valid to take a reference on it now.
2771 		 */
2772 		if (task && task->active) {
2773 			space = task->itk_space;
2774 			is_reference(space);
2775 		}
2776 		ip_mq_unlock(port);
2777 	}
2778 
2779 	return space;
2780 }
2781 
2782 ipc_space_t
convert_port_to_space(ipc_port_t port)2783 convert_port_to_space(
2784 	ipc_port_t      port)
2785 {
2786 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL,
2787 	           PORT_INTRANS_OPTIONS_NONE);
2788 }
2789 
2790 ipc_space_read_t
convert_port_to_space_read(ipc_port_t port)2791 convert_port_to_space_read(
2792 	ipc_port_t      port)
2793 {
2794 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2795 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
2796 }
2797 
2798 ipc_space_read_t
convert_port_to_space_read_no_eval(ipc_port_t port)2799 convert_port_to_space_read_no_eval(
2800 	ipc_port_t      port)
2801 {
2802 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2803 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2804 }
2805 
2806 ipc_space_inspect_t
convert_port_to_space_inspect(ipc_port_t port)2807 convert_port_to_space_inspect(
2808 	ipc_port_t      port)
2809 {
2810 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT,
2811 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2812 }
2813 
2814 /*
2815  *	Routine:	convert_port_to_map_with_flavor
2816  *	Purpose:
2817  *		Internal helper for converting from a port to a map.
2818  *		Doesn't consume the port ref; produces a map ref,
2819  *		which may be null.
2820  *	Args:
2821  *		port   - target port
2822  *		flavor - requested vm map flavor
2823  *		options - port translation options
2824  *	Conditions:
2825  *		Nothing locked.
2826  */
2827 static vm_map_t
convert_port_to_map_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2828 convert_port_to_map_with_flavor(
2829 	ipc_port_t         port,
2830 	mach_task_flavor_t flavor,
2831 	port_intrans_options_t options)
2832 {
2833 	task_t task = TASK_NULL;
2834 	vm_map_t map = VM_MAP_NULL;
2835 
2836 	/* there is no vm_map_inspect_t routines at the moment. */
2837 	assert(flavor != TASK_FLAVOR_NAME && flavor != TASK_FLAVOR_INSPECT);
2838 	assert((options & PORT_INTRANS_SKIP_TASK_EVAL) == 0);
2839 
2840 	if (IP_VALID(port)) {
2841 		ip_mq_lock(port);
2842 
2843 		if (ip_active(port)) {
2844 			task = convert_port_to_task_with_flavor_locked_noref(port,
2845 			    flavor, options);
2846 		}
2847 
2848 		/*
2849 		 * Because we hold the port lock and we could resolve a task,
2850 		 * even if we're racing with task termination, we know that
2851 		 * ipc_task_disable() hasn't been called yet.
2852 		 *
2853 		 * We try to sniff if `task->active` flipped to accelerate
2854 		 * resolving the race, but this isn't load bearing.
2855 		 *
2856 		 * The vm map will be torn down _after_ ipc_task_disable() returns,
2857 		 * so it is valid to take a reference on it now.
2858 		 */
2859 		if (task && task->active) {
2860 			map = task->map;
2861 
2862 			if (map->pmap == kernel_pmap) {
2863 				panic("userspace has control access to a "
2864 				    "kernel map %p through task %p", map, task);
2865 			}
2866 
2867 			pmap_require(map->pmap);
2868 			vm_map_reference(map);
2869 		}
2870 
2871 		ip_mq_unlock(port);
2872 	}
2873 
2874 	return map;
2875 }
2876 
2877 vm_map_t
convert_port_to_map(ipc_port_t port)2878 convert_port_to_map(
2879 	ipc_port_t              port)
2880 {
2881 	return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL,
2882 	           PORT_INTRANS_OPTIONS_NONE);
2883 }
2884 
2885 vm_map_read_t
convert_port_to_map_read(ipc_port_t port)2886 convert_port_to_map_read(
2887 	ipc_port_t              port)
2888 {
2889 	return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ,
2890 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
2891 }
2892 
2893 vm_map_inspect_t
convert_port_to_map_inspect(__unused ipc_port_t port)2894 convert_port_to_map_inspect(
2895 	__unused ipc_port_t     port)
2896 {
2897 	/* there is no vm_map_inspect_t routines at the moment. */
2898 	return VM_MAP_INSPECT_NULL;
2899 }
2900 
2901 /*
2902  *	Routine:	thread_port_kotype_valid_for_flavor
2903  *	Purpose:
2904  *		Check whether the kobject type of a mach port
2905  *      is valid for conversion to a thread of given flavor.
2906  */
2907 static boolean_t
thread_port_kotype_valid_for_flavor(natural_t kotype,mach_thread_flavor_t flavor)2908 thread_port_kotype_valid_for_flavor(
2909 	natural_t            kotype,
2910 	mach_thread_flavor_t flavor)
2911 {
2912 	switch (flavor) {
2913 	/* Ascending capability */
2914 	case THREAD_FLAVOR_INSPECT:
2915 		if (kotype == IKOT_THREAD_INSPECT) {
2916 			return TRUE;
2917 		}
2918 		OS_FALLTHROUGH;
2919 	case THREAD_FLAVOR_READ:
2920 		if (kotype == IKOT_THREAD_READ) {
2921 			return TRUE;
2922 		}
2923 		OS_FALLTHROUGH;
2924 	case THREAD_FLAVOR_CONTROL:
2925 		if (kotype == IKOT_THREAD_CONTROL) {
2926 			return TRUE;
2927 		}
2928 		break;
2929 	default:
2930 		panic("strange thread flavor");
2931 	}
2932 
2933 	return FALSE;
2934 }
2935 
2936 /*
2937  *	Routine: convert_port_to_thread_with_flavor_locked
2938  *	Purpose:
2939  *		Internal helper routine to convert from a locked port to a thread.
2940  *	Args:
2941  *		port   - target port
2942  *		flavor - requested thread port flavor
2943  *		options - port translation options
2944  *	Conditions:
2945  *		Port is locked and active.
2946  *		Produces a thread ref or THREAD_NULL.
2947  */
2948 static thread_t
convert_port_to_thread_with_flavor_locked(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)2949 convert_port_to_thread_with_flavor_locked(
2950 	ipc_port_t               port,
2951 	mach_thread_flavor_t     flavor,
2952 	port_intrans_options_t   options)
2953 {
2954 	thread_t thread = THREAD_NULL;
2955 	task_t task;
2956 	ipc_kobject_type_t type = ip_type(port);
2957 
2958 	ip_mq_lock_held(port);
2959 	require_ip_active(port);
2960 
2961 	if (!thread_port_kotype_valid_for_flavor(type, flavor)) {
2962 		return THREAD_NULL;
2963 	}
2964 
2965 	thread = ipc_kobject_get_locked(port, type);
2966 
2967 	if (thread == THREAD_NULL) {
2968 		return THREAD_NULL;
2969 	}
2970 
2971 	if (options & PORT_INTRANS_THREAD_NOT_CURRENT_THREAD) {
2972 		if (thread == current_thread()) {
2973 			return THREAD_NULL;
2974 		}
2975 	}
2976 
2977 	task = get_threadtask(thread);
2978 
2979 	if (options & PORT_INTRANS_THREAD_IN_CURRENT_TASK) {
2980 		if (task != current_task()) {
2981 			return THREAD_NULL;
2982 		}
2983 	} else {
2984 		if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
2985 			assert(flavor == THREAD_FLAVOR_CONTROL);
2986 			return THREAD_NULL;
2987 		}
2988 		/* TODO: rdar://42389187 */
2989 		if (flavor == THREAD_FLAVOR_INSPECT) {
2990 			assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
2991 		}
2992 
2993 		if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
2994 		    task_conversion_eval(current_task(), task, flavor) != KERN_SUCCESS) {
2995 			return THREAD_NULL;
2996 		}
2997 	}
2998 
2999 	thread_reference(thread);
3000 	return thread;
3001 }
3002 
3003 /*
3004  *	Routine:	convert_port_to_thread_with_flavor
3005  *	Purpose:
3006  *		Internal helper for converting from a port to a thread.
3007  *		Doesn't consume the port ref; produces a thread ref,
3008  *		which may be null.
3009  *	Args:
3010  *		port   - target port
3011  *		flavor - requested thread port flavor
3012  *		options - port translation options
3013  *	Conditions:
3014  *		Nothing locked.
3015  */
3016 static thread_t
convert_port_to_thread_with_flavor(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3017 convert_port_to_thread_with_flavor(
3018 	ipc_port_t           port,
3019 	mach_thread_flavor_t flavor,
3020 	port_intrans_options_t options)
3021 {
3022 	thread_t thread = THREAD_NULL;
3023 
3024 	if (IP_VALID(port)) {
3025 		ip_mq_lock(port);
3026 		if (ip_active(port)) {
3027 			thread = convert_port_to_thread_with_flavor_locked(port,
3028 			    flavor, options);
3029 		}
3030 		ip_mq_unlock(port);
3031 	}
3032 
3033 	return thread;
3034 }
3035 
3036 thread_t
convert_port_to_thread(ipc_port_t port)3037 convert_port_to_thread(
3038 	ipc_port_t              port)
3039 {
3040 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_CONTROL,
3041 	           PORT_INTRANS_OPTIONS_NONE);
3042 }
3043 
3044 thread_read_t
convert_port_to_thread_read(ipc_port_t port)3045 convert_port_to_thread_read(
3046 	ipc_port_t              port)
3047 {
3048 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3049 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
3050 }
3051 
3052 static thread_read_t
convert_port_to_thread_read_no_eval(ipc_port_t port)3053 convert_port_to_thread_read_no_eval(
3054 	ipc_port_t              port)
3055 {
3056 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3057 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3058 }
3059 
3060 thread_inspect_t
convert_port_to_thread_inspect(ipc_port_t port)3061 convert_port_to_thread_inspect(
3062 	ipc_port_t              port)
3063 {
3064 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3065 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3066 }
3067 
3068 static thread_inspect_t
convert_port_to_thread_inspect_no_eval(ipc_port_t port)3069 convert_port_to_thread_inspect_no_eval(
3070 	ipc_port_t              port)
3071 {
3072 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3073 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3074 }
3075 
3076 static inline ipc_kobject_type_t
thread_flavor_to_kotype(mach_thread_flavor_t flavor)3077 thread_flavor_to_kotype(mach_thread_flavor_t flavor)
3078 {
3079 	switch (flavor) {
3080 	case THREAD_FLAVOR_CONTROL:
3081 		return IKOT_THREAD_CONTROL;
3082 	case THREAD_FLAVOR_READ:
3083 		return IKOT_THREAD_READ;
3084 	default:
3085 		return IKOT_THREAD_INSPECT;
3086 	}
3087 }
3088 
3089 
3090 ipc_port_t
convert_thread_to_port_immovable(thread_t thread)3091 convert_thread_to_port_immovable(
3092 	thread_t                thread)
3093 {
3094 	thread_ro_t tro = get_thread_ro(thread);
3095 	ipc_port_t  port = IP_NULL;
3096 
3097 	thread_mtx_lock(thread);
3098 
3099 	if (thread->ipc_active) {
3100 		port = ipc_kobject_make_send(tro->tro_ports[THREAD_FLAVOR_CONTROL],
3101 		    thread, IKOT_THREAD_CONTROL);
3102 	}
3103 
3104 	thread_mtx_unlock(thread);
3105 	thread_deallocate(thread);
3106 	return port;
3107 }
3108 
3109 /*
3110  *	Routine:	convert_thread_to_port_with_flavor
3111  *	Purpose:
3112  *		Convert from a thread to a port of given flavor.
3113  *		Consumes a thread ref; produces a naked send right
3114  *		which may be invalid.
3115  *	Conditions:
3116  *		Nothing locked.
3117  */
3118 static ipc_port_t
convert_thread_to_port_with_flavor(thread_t thread,thread_ro_t tro,mach_thread_flavor_t flavor)3119 convert_thread_to_port_with_flavor(
3120 	thread_t              thread,
3121 	thread_ro_t           tro,
3122 	mach_thread_flavor_t  flavor)
3123 {
3124 	ipc_kobject_type_t kotype = thread_flavor_to_kotype(flavor);
3125 	ipc_port_t port = IP_NULL;
3126 
3127 	thread_mtx_lock(thread);
3128 
3129 	/*
3130 	 * out-trans of weaker flavors are still permitted, but in-trans
3131 	 * is separately enforced.
3132 	 */
3133 	if (flavor == THREAD_FLAVOR_CONTROL &&
3134 	    task_conversion_eval_out_trans(current_task(), tro->tro_task, flavor)) {
3135 		/* denied by security policy, make the port appear dead */
3136 		port = IP_DEAD;
3137 		goto exit;
3138 	}
3139 
3140 	if (!thread->ipc_active) {
3141 		goto exit;
3142 	}
3143 
3144 	port = tro->tro_ports[flavor];
3145 	if (flavor == THREAD_FLAVOR_CONTROL) {
3146 		port = ipc_kobject_make_send(port, thread, IKOT_THREAD_CONTROL);
3147 	} else if (IP_VALID(port)) {
3148 		(void)ipc_kobject_make_send(port, thread, kotype);
3149 	} else {
3150 		ipc_object_label_t label = IPC_OBJECT_LABEL(kotype);
3151 
3152 		/*
3153 		 * If Developer Mode is off, substitute read port for control
3154 		 * port if copying out to owning task's space, for the sake of
3155 		 * in-process exception handler.
3156 		 *
3157 		 * Also see: exception_deliver().
3158 		 */
3159 		if (!developer_mode_state() && flavor == THREAD_FLAVOR_READ) {
3160 			label = ipc_kobject_label_alloc(kotype,
3161 			    IPC_LABEL_SUBST_THREAD_READ, tro->tro_ports[THREAD_FLAVOR_CONTROL]);
3162 		}
3163 
3164 		/*
3165 		 * Claim a send right on the thread read/inspect port, and request a no-senders
3166 		 * notification on that port (if none outstanding). A thread reference is not
3167 		 * donated here even though the ports are created lazily because it doesn't own the
3168 		 * kobject that it points to. Threads manage their lifetime explicitly and
3169 		 * have to synchronize with each other, between the task/thread terminating and the
3170 		 * send-once notification firing, and this is done under the thread mutex
3171 		 * rather than with atomics.
3172 		 */
3173 		port = ipc_kobject_alloc_port(thread, label,
3174 		    IPC_KOBJECT_ALLOC_MAKE_SEND);
3175 
3176 		zalloc_ro_update_field(ZONE_ID_THREAD_RO,
3177 		    tro, tro_ports[flavor], &port);
3178 	}
3179 
3180 exit:
3181 	thread_mtx_unlock(thread);
3182 	thread_deallocate(thread);
3183 	return port;
3184 }
3185 
3186 ipc_port_t
convert_thread_to_port(thread_t thread)3187 convert_thread_to_port(
3188 	thread_t                thread)
3189 {
3190 	thread_ro_t tro = get_thread_ro(thread);
3191 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_CONTROL);
3192 }
3193 
3194 ipc_port_t
convert_thread_read_to_port(thread_read_t thread)3195 convert_thread_read_to_port(thread_read_t thread)
3196 {
3197 	thread_ro_t tro = get_thread_ro(thread);
3198 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_READ);
3199 }
3200 
3201 ipc_port_t
convert_thread_inspect_to_port(thread_inspect_t thread)3202 convert_thread_inspect_to_port(thread_inspect_t thread)
3203 {
3204 	thread_ro_t tro = get_thread_ro(thread);
3205 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_INSPECT);
3206 }
3207 
3208 void
convert_thread_array_to_ports(thread_act_array_t array,size_t count,mach_thread_flavor_t flavor)3209 convert_thread_array_to_ports(
3210 	thread_act_array_t      array,
3211 	size_t                  count,
3212 	mach_thread_flavor_t    flavor)
3213 {
3214 	thread_t *thread_list = (thread_t *)array;
3215 
3216 	for (size_t i = 0; i < count; i++) {
3217 		thread_t   thread = thread_list[i];
3218 		ipc_port_t port;
3219 
3220 		switch (flavor) {
3221 		case THREAD_FLAVOR_CONTROL:
3222 			port = convert_thread_to_port(thread);
3223 			break;
3224 		case THREAD_FLAVOR_READ:
3225 			port = convert_thread_read_to_port(thread);
3226 			break;
3227 		case THREAD_FLAVOR_INSPECT:
3228 			port = convert_thread_inspect_to_port(thread);
3229 			break;
3230 		}
3231 
3232 		array[i].port = port;
3233 	}
3234 }
3235 
3236 
3237 /*
3238  *	Routine:	port_name_to_thread
3239  *	Purpose:
3240  *		Convert from a port name to a thread reference
3241  *		A name of MACH_PORT_NULL is valid for the null thread.
3242  *	Conditions:
3243  *		Nothing locked.
3244  */
3245 thread_t
port_name_to_thread(mach_port_name_t name,port_intrans_options_t options)3246 port_name_to_thread(
3247 	mach_port_name_t         name,
3248 	port_intrans_options_t options)
3249 {
3250 	thread_t        thread = THREAD_NULL;
3251 	ipc_port_t      kport;
3252 	kern_return_t kr;
3253 
3254 	if (MACH_PORT_VALID(name)) {
3255 		kr = ipc_port_translate_send(current_space(), name, &kport);
3256 		if (kr == KERN_SUCCESS) {
3257 			/* port is locked and active */
3258 			assert(!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) &&
3259 			    !(options & PORT_INTRANS_SKIP_TASK_EVAL));
3260 			thread = convert_port_to_thread_with_flavor_locked(kport,
3261 			    THREAD_FLAVOR_CONTROL, options);
3262 			ip_mq_unlock(kport);
3263 		}
3264 	}
3265 
3266 	return thread;
3267 }
3268 
3269 /*
3270  *	Routine:	port_name_is_pinned_self
3271  *	Purpose:
3272  *		Returns whether this port name is for the pinned
3273  *		mach_task_self (if it exists).
3274  *
3275  *		task_self_trap() will memorize the name the port has
3276  *		in the space in ip_receiver_name when it gets pinned,
3277  *		which we can use to fast-track this answer without
3278  *		taking any lock.
3279  *
3280  *		ipc_task_disable() will set `ip_receiver_name` back to
3281  *		MACH_PORT_SPECIAL_DEFAULT.
3282  *
3283  *	Conditions:
3284  *		self must be current_task()
3285  *		Nothing locked.
3286  */
3287 static bool
port_name_is_pinned_self(task_t self,mach_port_name_t name)3288 port_name_is_pinned_self(
3289 	task_t             self,
3290 	mach_port_name_t   name)
3291 {
3292 	ipc_port_t kport = self->itk_task_ports[TASK_FLAVOR_CONTROL];
3293 	return MACH_PORT_VALID(name) && name != MACH_PORT_SPECIAL_DEFAULT &&
3294 	       ip_get_receiver_name(kport) == name;
3295 }
3296 
3297 /*
3298  *	Routine:	port_name_to_current_task*_noref
3299  *	Purpose:
3300  *		Convert from a port name to current_task()
3301  *		A name of MACH_PORT_NULL is valid for the null task.
3302  *
3303  *		If current_task() is in the process of being terminated,
3304  *		this might return a non NULL task even when port_name_to_task()
3305  *		would.
3306  *
3307  *		However, this is an acceptable race that can't be controlled by
3308  *		userspace, and that downstream code using the returned task
3309  *		has to handle anyway.
3310  *
3311  *		ipc_space_disable() does try to narrow this race,
3312  *		by causing port_name_is_pinned_self() to fail.
3313  *
3314  *	Returns:
3315  *		current_task() if the port name was for current_task()
3316  *		at the appropriate flavor.
3317  *
3318  *		TASK_NULL otherwise.
3319  *
3320  *	Conditions:
3321  *		Nothing locked.
3322  */
3323 static task_t
port_name_to_current_task_internal_noref(mach_port_name_t name,mach_task_flavor_t flavor)3324 port_name_to_current_task_internal_noref(
3325 	mach_port_name_t   name,
3326 	mach_task_flavor_t flavor)
3327 {
3328 	ipc_port_t kport;
3329 	kern_return_t kr;
3330 	task_t task = TASK_NULL;
3331 	task_t self = current_task();
3332 
3333 	if (port_name_is_pinned_self(self, name)) {
3334 		return self;
3335 	}
3336 
3337 	if (MACH_PORT_VALID(name)) {
3338 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3339 		if (kr == KERN_SUCCESS) {
3340 			ipc_kobject_type_t type = ip_type(kport);
3341 			if (task_port_kotype_valid_for_flavor(type, flavor)) {
3342 				task = ipc_kobject_get_locked(kport, type);
3343 			}
3344 			ip_mq_unlock(kport);
3345 			if (task != self) {
3346 				task = TASK_NULL;
3347 			}
3348 		}
3349 	}
3350 
3351 	return task;
3352 }
3353 
3354 task_t
port_name_to_current_task_noref(mach_port_name_t name)3355 port_name_to_current_task_noref(
3356 	mach_port_name_t name)
3357 {
3358 	return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_CONTROL);
3359 }
3360 
3361 task_read_t
port_name_to_current_task_read_noref(mach_port_name_t name)3362 port_name_to_current_task_read_noref(
3363 	mach_port_name_t name)
3364 {
3365 	return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_READ);
3366 }
3367 
3368 /*
3369  *	Routine:	port_name_to_task_grp
3370  *	Purpose:
3371  *		Convert from a port name to a task reference
3372  *		A name of MACH_PORT_NULL is valid for the null task.
3373  *		Acquire a send right if [inout] @kportp is non-null.
3374  *	Conditions:
3375  *		Nothing locked.
3376  */
3377 static task_t
port_name_to_task_grp(mach_port_name_t name,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp,ipc_port_t * kportp)3378 port_name_to_task_grp(
3379 	mach_port_name_t name,
3380 	mach_task_flavor_t flavor,
3381 	port_intrans_options_t options,
3382 	task_grp_t       grp,
3383 	ipc_port_t       *kportp)
3384 {
3385 	ipc_port_t kport;
3386 	kern_return_t kr;
3387 	task_t task = TASK_NULL;
3388 	task_t self = current_task();
3389 
3390 	if (!kportp && port_name_is_pinned_self(self, name)) {
3391 		task_reference_grp(self, grp);
3392 		return self;
3393 	}
3394 
3395 	if (MACH_PORT_VALID(name)) {
3396 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3397 		if (kr == KERN_SUCCESS) {
3398 			/* port is locked and active */
3399 			task = convert_port_to_task_with_flavor_locked(kport,
3400 			    flavor, options, grp);
3401 			if (kportp) {
3402 				/* send right requested */
3403 				ipc_port_copy_send_any_locked(kport);
3404 				*kportp = kport;
3405 			}
3406 			ip_mq_unlock(kport);
3407 		}
3408 	}
3409 	return task;
3410 }
3411 
3412 task_t
port_name_to_task_external(mach_port_name_t name)3413 port_name_to_task_external(
3414 	mach_port_name_t name)
3415 {
3416 	return port_name_to_task_grp(name, TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE, TASK_GRP_EXTERNAL, NULL);
3417 }
3418 
3419 task_t
port_name_to_task_kernel(mach_port_name_t name)3420 port_name_to_task_kernel(
3421 	mach_port_name_t name)
3422 {
3423 	return port_name_to_task_grp(name, TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL, NULL);
3424 }
3425 
3426 /*
3427  *	Routine:	port_name_to_task_read
3428  *	Purpose:
3429  *		Convert from a port name to a task reference
3430  *		A name of MACH_PORT_NULL is valid for the null task.
3431  *	Conditions:
3432  *		Nothing locked.
3433  */
3434 task_read_t
port_name_to_task_read(mach_port_name_t name)3435 port_name_to_task_read(
3436 	mach_port_name_t name)
3437 {
3438 	return port_name_to_task_grp(name, TASK_FLAVOR_READ, PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL, NULL);
3439 }
3440 
3441 /*
3442  *	Routine:	port_name_to_task_read_and_send_right
3443  *	Purpose:
3444  *		Convert from a port name to a task reference
3445  *		A name of MACH_PORT_NULL is valid for the null task.
3446  *	Conditions:
3447  *		On success, ipc port returned with a +1 send right.
3448  */
3449 task_read_t
port_name_to_task_read_and_send_right(mach_port_name_t name,ipc_port_t * kportp)3450 port_name_to_task_read_and_send_right(
3451 	mach_port_name_t name,
3452 	ipc_port_t *kportp)
3453 {
3454 	return port_name_to_task_grp(name, TASK_FLAVOR_READ, PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL, kportp);
3455 }
3456 
3457 /*
3458  *	Routine:	port_name_to_task_read_no_eval
3459  *	Purpose:
3460  *		Convert from a port name to a task reference
3461  *		A name of MACH_PORT_NULL is valid for the null task.
3462  *		Skips task_conversion_eval() during conversion.
3463  *	Conditions:
3464  *		Nothing locked.
3465  */
3466 task_read_t
port_name_to_task_read_no_eval(mach_port_name_t name)3467 port_name_to_task_read_no_eval(
3468 	mach_port_name_t name)
3469 {
3470 	port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3471 	    PORT_INTRANS_ALLOW_CORPSE_TASK;
3472 	return port_name_to_task_grp(name, TASK_FLAVOR_READ, options, TASK_GRP_KERNEL, NULL);
3473 }
3474 
3475 /*
3476  *	Routine:	port_name_to_task_name
3477  *	Purpose:
3478  *		Convert from a port name to a task reference
3479  *		A name of MACH_PORT_NULL is valid for the null task.
3480  *	Conditions:
3481  *		Nothing locked.
3482  */
3483 task_name_t
port_name_to_task_name(mach_port_name_t name)3484 port_name_to_task_name(
3485 	mach_port_name_t name)
3486 {
3487 	port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3488 	    PORT_INTRANS_ALLOW_CORPSE_TASK;
3489 	return port_name_to_task_grp(name, TASK_FLAVOR_NAME, options, TASK_GRP_KERNEL, NULL);
3490 }
3491 
3492 /*
3493  *	Routine:	port_name_to_task_id_token
3494  *	Purpose:
3495  *		Convert from a port name to a task identity token reference
3496  *	Conditions:
3497  *		Nothing locked.
3498  */
3499 task_id_token_t
port_name_to_task_id_token(mach_port_name_t name)3500 port_name_to_task_id_token(
3501 	mach_port_name_t name)
3502 {
3503 	ipc_port_t port;
3504 	kern_return_t kr;
3505 	task_id_token_t token = TASK_ID_TOKEN_NULL;
3506 
3507 	if (MACH_PORT_VALID(name)) {
3508 		kr = ipc_port_translate_send(current_space(), name, &port);
3509 		if (kr == KERN_SUCCESS) {
3510 			token = convert_port_to_task_id_token(port);
3511 			ip_mq_unlock(port);
3512 		}
3513 	}
3514 	return token;
3515 }
3516 
3517 /*
3518  *	Routine:	port_name_to_host
3519  *	Purpose:
3520  *		Convert from a port name to a host pointer.
3521  *		NOTE: This does _not_ return a +1 reference to the host_t
3522  *	Conditions:
3523  *		Nothing locked.
3524  */
3525 host_t
port_name_to_host(mach_port_name_t name)3526 port_name_to_host(
3527 	mach_port_name_t name)
3528 {
3529 	host_t host = HOST_NULL;
3530 	kern_return_t kr;
3531 	ipc_port_t port;
3532 
3533 	if (MACH_PORT_VALID(name)) {
3534 		kr = ipc_port_translate_send(current_space(), name, &port);
3535 		if (kr == KERN_SUCCESS) {
3536 			host = convert_port_to_host(port);
3537 			ip_mq_unlock(port);
3538 		}
3539 	}
3540 	return host;
3541 }
3542 
3543 static inline ipc_kobject_type_t
task_flavor_to_kotype(mach_task_flavor_t flavor)3544 task_flavor_to_kotype(mach_task_flavor_t flavor)
3545 {
3546 	switch (flavor) {
3547 	case TASK_FLAVOR_CONTROL:
3548 		return IKOT_TASK_CONTROL;
3549 	case TASK_FLAVOR_READ:
3550 		return IKOT_TASK_READ;
3551 	case TASK_FLAVOR_INSPECT:
3552 		return IKOT_TASK_INSPECT;
3553 	default:
3554 		return IKOT_TASK_NAME;
3555 	}
3556 }
3557 
3558 /*
3559  *	Routine:	convert_task_to_port_with_flavor
3560  *	Purpose:
3561  *		Convert from a task to a port of given flavor.
3562  *		Consumes a task ref; produces a naked send right
3563  *		which may be invalid.
3564  *	Conditions:
3565  *		Nothing locked.
3566  */
3567 ipc_port_t
convert_task_to_port_with_flavor(task_t task,mach_task_flavor_t flavor,task_grp_t grp)3568 convert_task_to_port_with_flavor(
3569 	task_t              task,
3570 	mach_task_flavor_t  flavor,
3571 	task_grp_t          grp)
3572 {
3573 	ipc_kobject_type_t kotype = task_flavor_to_kotype(flavor);
3574 	ipc_port_t port = IP_NULL;
3575 
3576 	itk_lock(task);
3577 
3578 	if (!task->ipc_active) {
3579 		goto exit;
3580 	}
3581 
3582 	/*
3583 	 * out-trans of weaker flavors are still permitted, but in-trans
3584 	 * is separately enforced.
3585 	 */
3586 	if (flavor == TASK_FLAVOR_CONTROL &&
3587 	    task_conversion_eval_out_trans(current_task(), task, flavor)) {
3588 		/* denied by security policy, make the port appear dead */
3589 		port = IP_DEAD;
3590 		goto exit;
3591 	}
3592 
3593 	switch (flavor) {
3594 	case TASK_FLAVOR_CONTROL:
3595 	case TASK_FLAVOR_NAME:
3596 		port = ipc_kobject_make_send(task->itk_task_ports[flavor],
3597 		    task, kotype);
3598 		break;
3599 	/*
3600 	 * Claim a send right on the task read/inspect port,
3601 	 * and request a no-senders notification on that port
3602 	 * (if none outstanding).
3603 	 *
3604 	 * The task's itk_lock is used to synchronize the handling
3605 	 * of the no-senders notification with the task termination.
3606 	 */
3607 	case TASK_FLAVOR_READ:
3608 	case TASK_FLAVOR_INSPECT:
3609 		port = task->itk_task_ports[flavor];
3610 		if (IP_VALID(port)) {
3611 			(void)ipc_kobject_make_send(port, task, kotype);
3612 		} else {
3613 			ipc_object_label_t label = IPC_OBJECT_LABEL(kotype);
3614 
3615 			/*
3616 			 * If Developer Mode is off, substitute read port for control port if
3617 			 * copying out to owning task's space, for the sake of in-process
3618 			 * exception handler.
3619 			 *
3620 			 * Also see: exception_deliver().
3621 			 */
3622 			if (!developer_mode_state() && flavor == TASK_FLAVOR_READ) {
3623 				label = ipc_kobject_label_alloc(kotype,
3624 				    IPC_LABEL_SUBST_TASK_READ, task->itk_task_ports[TASK_FLAVOR_CONTROL]);
3625 			}
3626 
3627 			port = ipc_kobject_alloc_port(task, label,
3628 			    IPC_KOBJECT_ALLOC_MAKE_SEND);
3629 			task->itk_task_ports[flavor] = port;
3630 		}
3631 		break;
3632 	}
3633 
3634 exit:
3635 	itk_unlock(task);
3636 	task_deallocate_grp(task, grp);
3637 	return port;
3638 }
3639 
3640 ipc_port_t
convert_corpse_to_port_and_nsrequest(task_t corpse)3641 convert_corpse_to_port_and_nsrequest(
3642 	task_t          corpse)
3643 {
3644 	ipc_port_t port = IP_NULL;
3645 
3646 	assert(task_is_a_corpse(corpse));
3647 	itk_lock(corpse);
3648 	port = corpse->itk_task_ports[TASK_FLAVOR_CONTROL];
3649 	port = ipc_kobject_make_send(port, corpse, IKOT_TASK_CONTROL);
3650 	itk_unlock(corpse);
3651 
3652 	task_deallocate(corpse);
3653 	return port;
3654 }
3655 
3656 ipc_port_t
convert_task_to_port(task_t task)3657 convert_task_to_port(
3658 	task_t          task)
3659 {
3660 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_KERNEL);
3661 }
3662 
3663 ipc_port_t
convert_task_read_to_port(task_read_t task)3664 convert_task_read_to_port(
3665 	task_read_t          task)
3666 {
3667 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_KERNEL);
3668 }
3669 
3670 ipc_port_t
convert_task_inspect_to_port(task_inspect_t task)3671 convert_task_inspect_to_port(
3672 	task_inspect_t          task)
3673 {
3674 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT, TASK_GRP_KERNEL);
3675 }
3676 
3677 ipc_port_t
convert_task_name_to_port(task_name_t task)3678 convert_task_name_to_port(
3679 	task_name_t             task)
3680 {
3681 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME, TASK_GRP_KERNEL);
3682 }
3683 
3684 ipc_port_t
convert_task_to_port_external(task_t task)3685 convert_task_to_port_external(task_t task)
3686 {
3687 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_EXTERNAL);
3688 }
3689 
3690 ipc_port_t
convert_task_read_to_port_external(task_t task)3691 convert_task_read_to_port_external(task_t task)
3692 {
3693 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_EXTERNAL);
3694 }
3695 
3696 void
convert_task_array_to_ports(task_array_t array,size_t count,mach_task_flavor_t flavor)3697 convert_task_array_to_ports(
3698 	task_array_t            array,
3699 	size_t                  count,
3700 	mach_task_flavor_t      flavor)
3701 {
3702 	task_t *task_list = (task_t *)array;
3703 
3704 	for (size_t i = 0; i < count; i++) {
3705 		task_t     task = task_list[i];
3706 		ipc_port_t port;
3707 
3708 		switch (flavor) {
3709 		case TASK_FLAVOR_CONTROL:
3710 			/* copyout determines immovability, see `should_mark_immovable_send` */
3711 			port = convert_task_to_port(task);
3712 			break;
3713 		case TASK_FLAVOR_READ:
3714 			port = convert_task_read_to_port(task);
3715 			break;
3716 		case TASK_FLAVOR_INSPECT:
3717 			port = convert_task_inspect_to_port(task);
3718 			break;
3719 		case TASK_FLAVOR_NAME:
3720 			port = convert_task_name_to_port(task);
3721 			break;
3722 		}
3723 
3724 		array[i].port = port;
3725 	}
3726 }
3727 
3728 /*
3729  *	Routine:	convert_task_suspend_token_to_port
3730  *	Purpose:
3731  *		Convert from a task suspension token to a port.
3732  *		Consumes a task suspension token ref; produces a naked send-once right
3733  *		which may be invalid.
3734  *	Conditions:
3735  *		Nothing locked.
3736  */
3737 static ipc_port_t
convert_task_suspension_token_to_port_grp(task_suspension_token_t task,task_grp_t grp)3738 convert_task_suspension_token_to_port_grp(
3739 	task_suspension_token_t         task,
3740 	task_grp_t                      grp)
3741 {
3742 	ipc_port_t port;
3743 
3744 	task_lock(task);
3745 	if (task->active) {
3746 		itk_lock(task);
3747 		if (task->itk_resume == IP_NULL) {
3748 			task->itk_resume = ipc_kobject_alloc_port((ipc_kobject_t) task,
3749 			    IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE);
3750 		}
3751 
3752 		/*
3753 		 * Create a send-once right for each instance of a direct user-called
3754 		 * task_suspend2 call. Each time one of these send-once rights is abandoned,
3755 		 * the notification handler will resume the target task.
3756 		 */
3757 		port = task->itk_resume;
3758 		ipc_kobject_require(port, task, IKOT_TASK_RESUME);
3759 		port = ipc_port_make_sonce(port);
3760 		itk_unlock(task);
3761 		assert(IP_VALID(port));
3762 	} else {
3763 		port = IP_NULL;
3764 	}
3765 
3766 	task_unlock(task);
3767 	task_suspension_token_deallocate_grp(task, grp);
3768 
3769 	return port;
3770 }
3771 
3772 ipc_port_t
convert_task_suspension_token_to_port_external(task_suspension_token_t task)3773 convert_task_suspension_token_to_port_external(
3774 	task_suspension_token_t         task)
3775 {
3776 	return convert_task_suspension_token_to_port_grp(task, TASK_GRP_EXTERNAL);
3777 }
3778 
3779 ipc_port_t
convert_task_suspension_token_to_port_mig(task_suspension_token_t task)3780 convert_task_suspension_token_to_port_mig(
3781 	task_suspension_token_t         task)
3782 {
3783 	return convert_task_suspension_token_to_port_grp(task, TASK_GRP_MIG);
3784 }
3785 
3786 /*
3787  *	Routine:	space_deallocate
3788  *	Purpose:
3789  *		Deallocate a space ref produced by convert_port_to_space.
3790  *	Conditions:
3791  *		Nothing locked.
3792  */
3793 
3794 void
space_deallocate(ipc_space_t space)3795 space_deallocate(
3796 	ipc_space_t     space)
3797 {
3798 	if (space != IS_NULL) {
3799 		is_release(space);
3800 	}
3801 }
3802 
3803 /*
3804  *	Routine:	space_read_deallocate
3805  *	Purpose:
3806  *		Deallocate a space read ref produced by convert_port_to_space_read.
3807  *	Conditions:
3808  *		Nothing locked.
3809  */
3810 
3811 void
space_read_deallocate(ipc_space_read_t space)3812 space_read_deallocate(
3813 	ipc_space_read_t     space)
3814 {
3815 	if (space != IS_INSPECT_NULL) {
3816 		is_release((ipc_space_t)space);
3817 	}
3818 }
3819 
3820 /*
3821  *	Routine:	space_inspect_deallocate
3822  *	Purpose:
3823  *		Deallocate a space inspect ref produced by convert_port_to_space_inspect.
3824  *	Conditions:
3825  *		Nothing locked.
3826  */
3827 
3828 void
space_inspect_deallocate(ipc_space_inspect_t space)3829 space_inspect_deallocate(
3830 	ipc_space_inspect_t     space)
3831 {
3832 	if (space != IS_INSPECT_NULL) {
3833 		is_release((ipc_space_t)space);
3834 	}
3835 }
3836 
3837 
3838 static boolean_t
behavior_is_identity_protected(int new_behavior)3839 behavior_is_identity_protected(int new_behavior)
3840 {
3841 	return ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED) ||
3842 	       ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_STATE) ||
3843 	       ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_STATE_IDENTITY_PROTECTED);
3844 }
3845 
3846 static void
send_set_exception_telemetry(const task_t excepting_task,const exception_mask_t mask)3847 send_set_exception_telemetry(const task_t excepting_task, const exception_mask_t mask)
3848 {
3849 	ca_event_t ca_event = CA_EVENT_ALLOCATE(set_exception);
3850 	CA_EVENT_TYPE(set_exception) * event = ca_event->data;
3851 
3852 	task_procname(current_task(), (char *) &event->current_proc, sizeof(event->current_proc));
3853 	task_procname(excepting_task, (char *) &event->thread_proc, sizeof(event->thread_proc));
3854 	event->mask = mask;
3855 
3856 	CA_EVENT_SEND(ca_event);
3857 }
3858 
3859 /* Returns whether the violation should be ignored */
3860 static boolean_t
set_exception_behavior_violation(const task_t excepting_task,const exception_mask_t mask,int new_behavior)3861 set_exception_behavior_violation(const task_t excepting_task, const exception_mask_t mask, int new_behavior)
3862 {
3863 	if (thid_should_crash) {
3864 		/* create lightweight corpse */
3865 		mach_port_guard_exception(new_behavior, mask, kGUARD_EXC_EXCEPTION_BEHAVIOR_ENFORCE);
3866 	}
3867 
3868 	/* always report the proc name to CA */
3869 	send_set_exception_telemetry(excepting_task, mask);
3870 
3871 	/* if the bootarg has been manually set to false, ignore the violation */
3872 	return !thid_should_crash;
3873 }
3874 
3875 static bool
exception_exposes_protected_ports(const ipc_port_t new_port)3876 exception_exposes_protected_ports(const ipc_port_t new_port)
3877 {
3878 	/*
3879 	 * sending exceptions to invalid port does not pose risk
3880 	 * ux_handler port is an immovable, read-only kobject port; doesn't need protection.
3881 	 */
3882 	return IP_VALID(new_port) && !is_ux_handler_port(new_port);
3883 }
3884 
3885 static bool
exception_ports_frozen(task_t excepting_task)3886 exception_ports_frozen(task_t excepting_task)
3887 {
3888 	return excepting_task &&
3889 	       (task_ro_flags_get(excepting_task) & TFRO_FREEZE_EXCEPTION_PORTS);
3890 }
3891 
3892 #if XNU_TARGET_OS_OSX && CONFIG_CSR
3893 static bool
SIP_is_enabled()3894 SIP_is_enabled()
3895 {
3896 	return csr_check(CSR_ALLOW_UNRESTRICTED_FS) != 0;
3897 }
3898 #endif /* XNU_TARGET_OS_OSX && CONFIG_CSR*/
3899 
3900 static bool
exception_is_identity_protected(const ipc_port_t new_port,int new_behavior,const task_t excepting_task,const exception_mask_t mask)3901 exception_is_identity_protected(const ipc_port_t new_port, int new_behavior,
3902     const task_t excepting_task, const exception_mask_t mask)
3903 {
3904 	ipc_space_policy_t policy = {};
3905 
3906 	/* excepting_task is NULL if we are setting a host exception port. */
3907 	if (excepting_task) {
3908 		policy = ipc_policy_for_task(excepting_task);
3909 	}
3910 
3911 	if (exception_exposes_protected_ports(new_port)
3912 	    && (!excepting_task || ipc_should_apply_policy(policy, IPC_POLICY_ENHANCED_V1))
3913 	    && !behavior_is_identity_protected(new_behavior)
3914 #if CONFIG_CSR
3915 	    && SIP_is_enabled()     /* cannot enforce if SIP is disabled */
3916 #endif /* CONFIG_CSR */
3917 #if CONFIG_ROSETTA
3918 	    && !task_is_translated(current_task())
3919 #endif /* CONFIG_ROSETTA */
3920 	    && !proc_is_simulated(current_proc())
3921 	    ) {
3922 		return set_exception_behavior_violation(excepting_task, mask, new_behavior);
3923 	}
3924 
3925 	return true;
3926 }
3927 
3928 static boolean_t
set_exception_behavior_allowed(const ipc_port_t new_port,int new_behavior,const task_t excepting_task,const exception_mask_t mask,const bool hardened_exception)3929 set_exception_behavior_allowed(const ipc_port_t new_port, int new_behavior,
3930     const task_t excepting_task, const exception_mask_t mask, const bool hardened_exception)
3931 {
3932 	const char *excepting_task_name = "";
3933 	const char *cur_task_name = "";
3934 
3935 	if (excepting_task) {
3936 		excepting_task_name = task_best_name(excepting_task);
3937 	}
3938 	if (current_task()) {
3939 		cur_task_name = task_best_name(current_task());
3940 	}
3941 
3942 	/* Allow debuggers, tests, and tooling to set exception ports however they wish */
3943 	if (IOCurrentTaskHasEntitlement(SET_EXCEPTION_ENTITLEMENT)) {
3944 		kprintf("Allowing set_exception_ports from [%s] on [%s] for "
3945 		    "entitled process/debugger\n", cur_task_name, excepting_task_name);
3946 		return true;
3947 	}
3948 
3949 	/* excepting_task can be NULL if setting the host port */
3950 	if (excepting_task) {
3951 		/*
3952 		 * Only allow hardened set_exception_port calls on hardened tasks
3953 		 * that opt in via entitlement
3954 		 */
3955 		ipc_space_policy_t pol = ipc_policy_for_task(excepting_task);
3956 		bool only_one_exception_port =
3957 		    IOTaskHasEntitlement(excepting_task, IPC_ONLY_ONE_EXCEPTION_PORT)
3958 		    && ipc_should_apply_policy(pol, IPC_SPACE_POLICY_ENHANCED_V1);
3959 
3960 		if (!hardened_exception && only_one_exception_port) {
3961 			kprintf("Disallowing set_exception_ports from [%s] on [%s] due "
3962 			    "to only_one_exception_port policy\n", cur_task_name, excepting_task_name);
3963 			return set_exception_behavior_violation(excepting_task, mask, new_behavior);
3964 		}
3965 	}
3966 
3967 	/* Everyone else follows the standard policy and must use identity protected exceptions */
3968 	return exception_is_identity_protected(new_port, new_behavior, excepting_task, mask);
3969 }
3970 
3971 /*
3972  *	Routine: set_exception_ports_validation
3973  *	Purpose:
3974  *		Common argument validation shared between all exception port
3975  *		setting/swapping routines
3976  *	Conditions:
3977  *		Nothing locked.
3978  *	Returns:
3979  *		KERN_SUCCESS            Setting the exception port is allowed
3980  *		                        with these arguments
3981  *		KERN_INVALID_ARGUMENT   Invalid arguments
3982  *		KERN_INVALID_RIGHT      Incorrect port configuration
3983  *		KERN_DENIED             Denied by security policy
3984  */
3985 kern_return_t
set_exception_ports_validation(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,bool hardened_exception)3986 set_exception_ports_validation(
3987 	task_t                  task,
3988 	exception_mask_t        exception_mask,
3989 	ipc_port_t              new_port,
3990 	exception_behavior_t    new_behavior,
3991 	thread_state_flavor_t   new_flavor,
3992 	bool                    hardened_exception)
3993 {
3994 	if (exception_mask & ~EXC_MASK_VALID) {
3995 		return KERN_INVALID_ARGUMENT;
3996 	}
3997 
3998 	if (IP_VALID(new_port)) {
3999 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4000 		case EXCEPTION_DEFAULT:
4001 		case EXCEPTION_STATE:
4002 		case EXCEPTION_STATE_IDENTITY:
4003 		case EXCEPTION_IDENTITY_PROTECTED:
4004 		case EXCEPTION_STATE_IDENTITY_PROTECTED:
4005 			break;
4006 
4007 		default:
4008 			return KERN_INVALID_ARGUMENT;
4009 		}
4010 	}
4011 
4012 	if (IP_VALID(new_port) && !ipc_is_valid_exception_port(task, new_port)) {
4013 		return KERN_INVALID_RIGHT;
4014 	}
4015 
4016 
4017 	/*
4018 	 * Check the validity of the thread_state_flavor by calling the
4019 	 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4020 	 * osfmk/mach/ARCHITECTURE/thread_status.h
4021 	 */
4022 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4023 		return KERN_INVALID_ARGUMENT;
4024 	}
4025 
4026 	if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4027 	    (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4028 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4029 		return KERN_INVALID_ARGUMENT;
4030 	}
4031 
4032 	if (!set_exception_behavior_allowed(new_port, new_behavior, task, exception_mask, hardened_exception)) {
4033 		return KERN_DENIED;
4034 	}
4035 
4036 	return KERN_SUCCESS;
4037 }
4038 
4039 /*
4040  *	Routine:	thread_set_exception_ports_internal
4041  *	Purpose:
4042  *		Set a new exception action on the thread
4043  *	Conditions:
4044  *		Arguments have been validated via `set_exception_ports_validation`
4045  *		Nothing locked.
4046  *  Returns:
4047  *      KERN_SUCCESS	Setting the exception port is allowed with these arguments
4048  *		KERN_FAILURE	Thread is inactive
4049  */
4050 kern_return_t
thread_set_exception_ports_internal(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,boolean_t hardened)4051 thread_set_exception_ports_internal(
4052 	thread_t                thread,
4053 	exception_mask_t        exception_mask,
4054 	ipc_port_t              new_port,
4055 	exception_behavior_t    new_behavior,
4056 	thread_state_flavor_t   new_flavor,
4057 	boolean_t               hardened)
4058 {
4059 	ipc_port_t  old_port[EXC_TYPES_COUNT];
4060 	thread_ro_t tro;
4061 	boolean_t   privileged = task_is_privileged(current_task());
4062 
4063 #if CONFIG_MACF
4064 	if (mac_task_check_set_thread_exception_ports(current_task(), get_threadtask(thread), exception_mask, new_behavior) != 0) {
4065 		return KERN_NO_ACCESS;
4066 	}
4067 
4068 	struct label *new_label = mac_exc_create_label_for_current_proc();
4069 #endif
4070 
4071 	tro = get_thread_ro(thread);
4072 	thread_mtx_lock(thread);
4073 
4074 	if (!thread->active) {
4075 		thread_mtx_unlock(thread);
4076 #if CONFIG_MACF
4077 		mac_exc_free_label(new_label);
4078 #endif
4079 		return KERN_FAILURE;
4080 	}
4081 
4082 	if (tro->tro_exc_actions == NULL) {
4083 		ipc_thread_init_exc_actions(tro);
4084 	}
4085 	for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4086 		struct exception_action *action = &tro->tro_exc_actions[i];
4087 
4088 		if ((exception_mask & (1 << i))
4089 #if CONFIG_MACF
4090 		    && mac_exc_update_action_label(action, new_label) == 0
4091 #endif
4092 		    ) {
4093 			old_port[i] = action->port;
4094 			action->port = exception_port_copy_send(new_port);
4095 			action->behavior = new_behavior;
4096 			action->flavor = new_flavor;
4097 			action->privileged = privileged;
4098 			action->hardened = hardened;
4099 		} else {
4100 			old_port[i] = IP_NULL;
4101 		}
4102 	}
4103 
4104 	thread_mtx_unlock(thread);
4105 
4106 #if CONFIG_MACF
4107 	mac_exc_free_label(new_label);
4108 #endif
4109 
4110 	for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4111 		if (IP_VALID(old_port[i])) {
4112 			ipc_port_release_send(old_port[i]);
4113 		}
4114 	}
4115 
4116 	if (IP_VALID(new_port)) {         /* consume send right */
4117 		ipc_port_release_send(new_port);
4118 	}
4119 
4120 	return KERN_SUCCESS;
4121 }
4122 
4123 /*
4124  *	Routine:	thread/task_set_exception_ports [kernel call]
4125  *	Purpose:
4126  *			Sets the thread/task exception port, flavor and
4127  *			behavior for the exception types specified by the mask.
4128  *			There will be one send right per exception per valid
4129  *			port.
4130  *	Conditions:
4131  *		Nothing locked.  If successful, consumes
4132  *		the supplied send right.
4133  *	Returns:
4134  *		KERN_SUCCESS		Changed the special port.
4135  *		KERN_INVALID_ARGUMENT	The thread is null,
4136  *					Illegal mask bit set.
4137  *					Illegal exception behavior
4138  *		KERN_FAILURE		The thread is dead.
4139  *		KERN_NO_ACCESS		Restricted access to set port
4140  */
4141 
4142 kern_return_t
thread_set_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4143 thread_set_exception_ports(
4144 	thread_t                thread,
4145 	exception_mask_t        exception_mask,
4146 	ipc_port_t              new_port,
4147 	exception_behavior_t    new_behavior,
4148 	thread_state_flavor_t   new_flavor)
4149 {
4150 	if (thread == THREAD_NULL) {
4151 		return KERN_INVALID_ARGUMENT;
4152 	}
4153 	bool hardened_exception_flow = false;
4154 	kern_return_t kr = set_exception_ports_validation(get_threadtask(thread),
4155 	    exception_mask, new_port, new_behavior, new_flavor, hardened_exception_flow);
4156 	if (kr != KERN_SUCCESS) {
4157 		return kr;
4158 	}
4159 
4160 	return thread_set_exception_ports_internal(thread, exception_mask, new_port, new_behavior, new_flavor, false);
4161 }
4162 
4163 kern_return_t
task_set_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4164 task_set_exception_ports(
4165 	task_t                                  task,
4166 	exception_mask_t                exception_mask,
4167 	ipc_port_t                              new_port,
4168 	exception_behavior_t    new_behavior,
4169 	thread_state_flavor_t   new_flavor)
4170 {
4171 	ipc_port_t              old_port[EXC_TYPES_COUNT];
4172 	boolean_t privileged = task_is_privileged(current_task());
4173 	register int    i;
4174 
4175 	if (task == TASK_NULL) {
4176 		return KERN_INVALID_ARGUMENT;
4177 	}
4178 	bool hardened_exception_flow = false;
4179 	kern_return_t kr = set_exception_ports_validation(task, exception_mask,
4180 	    new_port, new_behavior, new_flavor, hardened_exception_flow);
4181 	if (kr != KERN_SUCCESS) {
4182 		return kr;
4183 	}
4184 
4185 
4186 #if CONFIG_MACF
4187 	if (mac_task_check_set_task_exception_ports(current_task(), task, exception_mask, new_behavior) != 0) {
4188 		return KERN_NO_ACCESS;
4189 	}
4190 
4191 	struct label *new_label = mac_exc_create_label_for_current_proc();
4192 #endif
4193 
4194 	itk_lock(task);
4195 
4196 	/*
4197 	 * Allow setting exception port during the span of ipc_task_init() to
4198 	 * ipc_task_terminate(). posix_spawn() port actions can set exception
4199 	 * ports on target task _before_ task IPC access is enabled.
4200 	 */
4201 	if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
4202 		itk_unlock(task);
4203 #if CONFIG_MACF
4204 		mac_exc_free_label(new_label);
4205 #endif
4206 		return KERN_FAILURE;
4207 	}
4208 
4209 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4210 		if ((exception_mask & (1 << i))
4211 #if CONFIG_MACF
4212 		    && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4213 #endif
4214 		    ) {
4215 			old_port[i] = task->exc_actions[i].port;
4216 			task->exc_actions[i].port =
4217 			    exception_port_copy_send(new_port);
4218 			task->exc_actions[i].behavior = new_behavior;
4219 			task->exc_actions[i].flavor = new_flavor;
4220 			task->exc_actions[i].privileged = privileged;
4221 		} else {
4222 			old_port[i] = IP_NULL;
4223 		}
4224 	}
4225 
4226 	itk_unlock(task);
4227 
4228 #if CONFIG_MACF
4229 	mac_exc_free_label(new_label);
4230 #endif
4231 
4232 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4233 		if (IP_VALID(old_port[i])) {
4234 			ipc_port_release_send(old_port[i]);
4235 		}
4236 	}
4237 
4238 	if (IP_VALID(new_port)) {         /* consume send right */
4239 		ipc_port_release_send(new_port);
4240 	}
4241 
4242 	return KERN_SUCCESS;
4243 }
4244 
4245 /*
4246  *	Routine:	thread/task_swap_exception_ports [kernel call]
4247  *	Purpose:
4248  *			Sets the thread/task exception port, flavor and
4249  *			behavior for the exception types specified by the
4250  *			mask.
4251  *
4252  *			The old ports, behavior and flavors are returned
4253  *			Count specifies the array sizes on input and
4254  *			the number of returned ports etc. on output.  The
4255  *			arrays must be large enough to hold all the returned
4256  *			data, MIG returnes an error otherwise.  The masks
4257  *			array specifies the corresponding exception type(s).
4258  *
4259  *	Conditions:
4260  *		Nothing locked.  If successful, consumes
4261  *		the supplied send right.
4262  *
4263  *		Returns upto [in} CountCnt elements.
4264  *	Returns:
4265  *		KERN_SUCCESS		Changed the special port.
4266  *		KERN_INVALID_ARGUMENT	The thread is null,
4267  *					Illegal mask bit set.
4268  *					Illegal exception behavior
4269  *		KERN_FAILURE		The thread is dead.
4270  *		KERN_NO_ACCESS		Restricted access to set port
4271  */
4272 
4273 kern_return_t
thread_swap_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4274 thread_swap_exception_ports(
4275 	thread_t                        thread,
4276 	exception_mask_t                exception_mask,
4277 	ipc_port_t                      new_port,
4278 	exception_behavior_t            new_behavior,
4279 	thread_state_flavor_t           new_flavor,
4280 	exception_mask_array_t          masks,
4281 	mach_msg_type_number_t          *CountCnt,
4282 	exception_port_array_t          ports,
4283 	exception_behavior_array_t      behaviors,
4284 	thread_state_flavor_array_t     flavors)
4285 {
4286 	ipc_port_t  old_port[EXC_TYPES_COUNT];
4287 	thread_ro_t tro;
4288 	boolean_t   privileged = task_is_privileged(current_task());
4289 	unsigned int    i, j, count;
4290 
4291 	if (thread == THREAD_NULL) {
4292 		return KERN_INVALID_ARGUMENT;
4293 	}
4294 	bool hardened_exception_flow = false;
4295 	kern_return_t kr = set_exception_ports_validation(get_threadtask(thread),
4296 	    exception_mask, new_port, new_behavior, new_flavor, hardened_exception_flow);
4297 	if (kr != KERN_SUCCESS) {
4298 		return kr;
4299 	}
4300 
4301 #if CONFIG_MACF
4302 	if (mac_task_check_set_thread_exception_ports(current_task(), get_threadtask(thread), exception_mask, new_behavior) != 0) {
4303 		return KERN_NO_ACCESS;
4304 	}
4305 
4306 	struct label *new_label = mac_exc_create_label_for_current_proc();
4307 #endif
4308 
4309 	thread_mtx_lock(thread);
4310 
4311 	if (!thread->active) {
4312 		thread_mtx_unlock(thread);
4313 #if CONFIG_MACF
4314 		mac_exc_free_label(new_label);
4315 #endif
4316 		return KERN_FAILURE;
4317 	}
4318 
4319 	tro = get_thread_ro(thread);
4320 	if (tro->tro_exc_actions == NULL) {
4321 		ipc_thread_init_exc_actions(tro);
4322 	}
4323 
4324 	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4325 	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4326 		struct exception_action *action = &tro->tro_exc_actions[i];
4327 
4328 		if ((exception_mask & (1 << i))
4329 #if CONFIG_MACF
4330 		    && mac_exc_update_action_label(action, new_label) == 0
4331 #endif
4332 		    ) {
4333 			for (j = 0; j < count; ++j) {
4334 				/*
4335 				 * search for an identical entry, if found
4336 				 * set corresponding mask for this exception.
4337 				 */
4338 				if (action->port == ports[j] &&
4339 				    action->behavior == behaviors[j] &&
4340 				    action->flavor == flavors[j]) {
4341 					masks[j] |= (1 << i);
4342 					break;
4343 				}
4344 			}
4345 
4346 			if (j == count) {
4347 				masks[j] = (1 << i);
4348 				ports[j] = exception_port_copy_send(action->port);
4349 
4350 				behaviors[j] = action->behavior;
4351 				flavors[j] = action->flavor;
4352 				++count;
4353 			}
4354 
4355 			old_port[i] = action->port;
4356 			action->port = exception_port_copy_send(new_port);
4357 			action->behavior = new_behavior;
4358 			action->flavor = new_flavor;
4359 			action->privileged = privileged;
4360 		} else {
4361 			old_port[i] = IP_NULL;
4362 		}
4363 	}
4364 
4365 	thread_mtx_unlock(thread);
4366 
4367 #if CONFIG_MACF
4368 	mac_exc_free_label(new_label);
4369 #endif
4370 
4371 	while (--i >= FIRST_EXCEPTION) {
4372 		if (IP_VALID(old_port[i])) {
4373 			ipc_port_release_send(old_port[i]);
4374 		}
4375 	}
4376 
4377 	if (IP_VALID(new_port)) {         /* consume send right */
4378 		ipc_port_release_send(new_port);
4379 	}
4380 
4381 	*CountCnt = count;
4382 
4383 	return KERN_SUCCESS;
4384 }
4385 
4386 kern_return_t
task_swap_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4387 task_swap_exception_ports(
4388 	task_t                                          task,
4389 	exception_mask_t                        exception_mask,
4390 	ipc_port_t                                      new_port,
4391 	exception_behavior_t            new_behavior,
4392 	thread_state_flavor_t           new_flavor,
4393 	exception_mask_array_t          masks,
4394 	mach_msg_type_number_t          *CountCnt,
4395 	exception_port_array_t          ports,
4396 	exception_behavior_array_t      behaviors,
4397 	thread_state_flavor_array_t     flavors)
4398 {
4399 	ipc_port_t              old_port[EXC_TYPES_COUNT];
4400 	boolean_t privileged = task_is_privileged(current_task());
4401 	unsigned int    i, j, count;
4402 
4403 #if CONFIG_MACF
4404 	struct label *new_label;
4405 #endif
4406 
4407 	if (task == TASK_NULL) {
4408 		return KERN_INVALID_ARGUMENT;
4409 	}
4410 	bool hardened_exception_flow = false;
4411 	kern_return_t kr = set_exception_ports_validation(task, exception_mask,
4412 	    new_port, new_behavior, new_flavor, hardened_exception_flow);
4413 	if (kr != KERN_SUCCESS) {
4414 		return kr;
4415 	}
4416 
4417 #if CONFIG_MACF
4418 	if (mac_task_check_set_task_exception_ports(current_task(), task, exception_mask, new_behavior) != 0) {
4419 		return KERN_NO_ACCESS;
4420 	}
4421 
4422 	new_label = mac_exc_create_label_for_current_proc();
4423 #endif
4424 
4425 	itk_lock(task);
4426 
4427 	if (!task->ipc_active) {
4428 		itk_unlock(task);
4429 #if CONFIG_MACF
4430 		mac_exc_free_label(new_label);
4431 #endif
4432 		return KERN_FAILURE;
4433 	}
4434 
4435 	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4436 	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4437 		if ((exception_mask & (1 << i))
4438 #if CONFIG_MACF
4439 		    && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4440 #endif
4441 		    ) {
4442 			for (j = 0; j < count; j++) {
4443 				/*
4444 				 * search for an identical entry, if found
4445 				 * set corresponding mask for this exception.
4446 				 */
4447 				if (task->exc_actions[i].port == ports[j] &&
4448 				    task->exc_actions[i].behavior == behaviors[j] &&
4449 				    task->exc_actions[i].flavor == flavors[j]) {
4450 					masks[j] |= (1 << i);
4451 					break;
4452 				}
4453 			}
4454 
4455 			if (j == count) {
4456 				masks[j] = (1 << i);
4457 				ports[j] = exception_port_copy_send(task->exc_actions[i].port);
4458 				behaviors[j] = task->exc_actions[i].behavior;
4459 				flavors[j] = task->exc_actions[i].flavor;
4460 				++count;
4461 			}
4462 
4463 			old_port[i] = task->exc_actions[i].port;
4464 
4465 			task->exc_actions[i].port = exception_port_copy_send(new_port);
4466 			task->exc_actions[i].behavior = new_behavior;
4467 			task->exc_actions[i].flavor = new_flavor;
4468 			task->exc_actions[i].privileged = privileged;
4469 		} else {
4470 			old_port[i] = IP_NULL;
4471 		}
4472 	}
4473 
4474 	itk_unlock(task);
4475 
4476 #if CONFIG_MACF
4477 	mac_exc_free_label(new_label);
4478 #endif
4479 
4480 	while (--i >= FIRST_EXCEPTION) {
4481 		if (IP_VALID(old_port[i])) {
4482 			ipc_port_release_send(old_port[i]);
4483 		}
4484 	}
4485 
4486 	if (IP_VALID(new_port)) {         /* consume send right */
4487 		ipc_port_release_send(new_port);
4488 	}
4489 
4490 	*CountCnt = count;
4491 
4492 	return KERN_SUCCESS;
4493 }
4494 
4495 /*
4496  *	Routine:	thread/task_get_exception_ports [kernel call]
4497  *	Purpose:
4498  *		Clones a send right for each of the thread/task's exception
4499  *		ports specified in the mask and returns the behaviour
4500  *		and flavor of said port.
4501  *
4502  *		Returns upto [in} CountCnt elements.
4503  *
4504  *	Conditions:
4505  *		Nothing locked.
4506  *	Returns:
4507  *		KERN_SUCCESS		Extracted a send right.
4508  *		KERN_INVALID_ARGUMENT	The thread is null,
4509  *					Invalid special port,
4510  *					Illegal mask bit set.
4511  *		KERN_FAILURE		The thread is dead.
4512  */
4513 static kern_return_t
thread_get_exception_ports_internal(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4514 thread_get_exception_ports_internal(
4515 	thread_t                        thread,
4516 	exception_mask_t                exception_mask,
4517 	exception_mask_array_t          masks,
4518 	mach_msg_type_number_t          *CountCnt,
4519 	exception_port_info_array_t     ports_info,
4520 	exception_port_array_t          ports,
4521 	exception_behavior_array_t      behaviors,
4522 	thread_state_flavor_array_t     flavors)
4523 {
4524 	unsigned int count;
4525 	boolean_t info_only = (ports_info != NULL);
4526 	thread_ro_t tro;
4527 	ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4528 
4529 	if (thread == THREAD_NULL) {
4530 		return KERN_INVALID_ARGUMENT;
4531 	}
4532 
4533 	if (exception_mask & ~EXC_MASK_VALID) {
4534 		return KERN_INVALID_ARGUMENT;
4535 	}
4536 
4537 	if (!info_only && !ports) {
4538 		return KERN_INVALID_ARGUMENT;
4539 	}
4540 
4541 	/*
4542 	 * Allocate a save area for FP state before taking thread lock,
4543 	 * if necessary, to ensure that VM_KERNEL_ADDRHASH() doesn't cause
4544 	 * an FP state allocation while holding thread locks.
4545 	 */
4546 	ml_fp_save_area_prealloc();
4547 
4548 	tro = get_thread_ro(thread);
4549 	thread_mtx_lock(thread);
4550 
4551 	if (!thread->active) {
4552 		thread_mtx_unlock(thread);
4553 
4554 		return KERN_FAILURE;
4555 	}
4556 
4557 	count = 0;
4558 
4559 	if (tro->tro_exc_actions == NULL) {
4560 		goto done;
4561 	}
4562 
4563 	for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4564 		if (exception_mask & (1 << i)) {
4565 			ipc_port_t exc_port = tro->tro_exc_actions[i].port;
4566 			exception_behavior_t exc_behavior = tro->tro_exc_actions[i].behavior;
4567 			thread_state_flavor_t exc_flavor = tro->tro_exc_actions[i].flavor;
4568 
4569 			for (j = 0; j < count; ++j) {
4570 				/*
4571 				 * search for an identical entry, if found
4572 				 * set corresponding mask for this exception.
4573 				 */
4574 				if (exc_port == port_ptrs[j] &&
4575 				    exc_behavior == behaviors[j] &&
4576 				    exc_flavor == flavors[j]) {
4577 					masks[j] |= (1 << i);
4578 					break;
4579 				}
4580 			}
4581 
4582 			if (j == count && count < *CountCnt) {
4583 				masks[j] = (1 << i);
4584 				port_ptrs[j] = exc_port;
4585 
4586 				if (info_only) {
4587 					if (!IP_VALID(exc_port)) {
4588 						ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4589 					} else {
4590 						task_t receiver = TASK_NULL;
4591 						(void)ipc_port_get_receiver_task(exc_port, &receiver);
4592 						ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRHASH(exc_port);
4593 						ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRHASH(receiver) : 0;
4594 					}
4595 				} else {
4596 					ports[j] = exception_port_copy_send(exc_port);
4597 				}
4598 				behaviors[j] = exc_behavior;
4599 				flavors[j] = exc_flavor;
4600 				++count;
4601 			}
4602 		}
4603 	}
4604 
4605 done:
4606 	thread_mtx_unlock(thread);
4607 
4608 	*CountCnt = count;
4609 
4610 	return KERN_SUCCESS;
4611 }
4612 
4613 kern_return_t
thread_get_exception_ports(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4614 thread_get_exception_ports(
4615 	thread_t                        thread,
4616 	exception_mask_t                exception_mask,
4617 	exception_mask_array_t          masks,
4618 	mach_msg_type_number_t          *CountCnt,
4619 	exception_port_array_t          ports,
4620 	exception_behavior_array_t      behaviors,
4621 	thread_state_flavor_array_t     flavors)
4622 {
4623 	return thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4624 	           NULL, ports, behaviors, flavors);
4625 }
4626 
4627 kern_return_t
thread_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4628 thread_get_exception_ports_info(
4629 	mach_port_t                     port,
4630 	exception_mask_t                exception_mask,
4631 	exception_mask_array_t          masks,
4632 	mach_msg_type_number_t          *CountCnt,
4633 	exception_port_info_array_t     ports_info,
4634 	exception_behavior_array_t      behaviors,
4635 	thread_state_flavor_array_t     flavors)
4636 {
4637 	kern_return_t kr;
4638 
4639 	thread_t thread = convert_port_to_thread_read_no_eval(port);
4640 
4641 	if (thread == THREAD_NULL) {
4642 		return KERN_INVALID_ARGUMENT;
4643 	}
4644 
4645 	kr = thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4646 	    ports_info, NULL, behaviors, flavors);
4647 
4648 	thread_deallocate(thread);
4649 	return kr;
4650 }
4651 
4652 kern_return_t
thread_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4653 thread_get_exception_ports_from_user(
4654 	mach_port_t                     port,
4655 	exception_mask_t                exception_mask,
4656 	exception_mask_array_t          masks,
4657 	mach_msg_type_number_t         *CountCnt,
4658 	exception_port_array_t          ports,
4659 	exception_behavior_array_t      behaviors,
4660 	thread_state_flavor_array_t     flavors)
4661 {
4662 	kern_return_t kr;
4663 
4664 	thread_t thread = convert_port_to_thread(port);
4665 
4666 	if (thread == THREAD_NULL) {
4667 		return KERN_INVALID_ARGUMENT;
4668 	}
4669 
4670 	kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4671 
4672 	thread_deallocate(thread);
4673 	return kr;
4674 }
4675 
4676 static kern_return_t
task_get_exception_ports_internal(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4677 task_get_exception_ports_internal(
4678 	task_t                          task,
4679 	exception_mask_t                exception_mask,
4680 	exception_mask_array_t          masks,
4681 	mach_msg_type_number_t          *CountCnt,
4682 	exception_port_info_array_t     ports_info,
4683 	exception_port_array_t          ports,
4684 	exception_behavior_array_t      behaviors,
4685 	thread_state_flavor_array_t     flavors)
4686 {
4687 	unsigned int count;
4688 	boolean_t info_only = (ports_info != NULL);
4689 	ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4690 
4691 	if (task == TASK_NULL) {
4692 		return KERN_INVALID_ARGUMENT;
4693 	}
4694 
4695 	if (exception_mask & ~EXC_MASK_VALID) {
4696 		return KERN_INVALID_ARGUMENT;
4697 	}
4698 
4699 	if (!info_only && !ports) {
4700 		return KERN_INVALID_ARGUMENT;
4701 	}
4702 
4703 	/*
4704 	 * Allocate a save area for FP state before taking task lock,
4705 	 * if necessary, to ensure that VM_KERNEL_ADDRHASH() doesn't cause
4706 	 * an FP state allocation while holding task locks.
4707 	 */
4708 	ml_fp_save_area_prealloc();
4709 
4710 	itk_lock(task);
4711 
4712 	if (!task->ipc_active) {
4713 		itk_unlock(task);
4714 		return KERN_FAILURE;
4715 	}
4716 
4717 	count = 0;
4718 
4719 	for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4720 		if (exception_mask & (1 << i)) {
4721 			ipc_port_t exc_port = task->exc_actions[i].port;
4722 			exception_behavior_t exc_behavior = task->exc_actions[i].behavior;
4723 			thread_state_flavor_t exc_flavor = task->exc_actions[i].flavor;
4724 
4725 			for (j = 0; j < count; ++j) {
4726 				/*
4727 				 * search for an identical entry, if found
4728 				 * set corresponding mask for this exception.
4729 				 */
4730 				if (exc_port == port_ptrs[j] &&
4731 				    exc_behavior == behaviors[j] &&
4732 				    exc_flavor == flavors[j]) {
4733 					masks[j] |= (1 << i);
4734 					break;
4735 				}
4736 			}
4737 
4738 			if (j == count && count < *CountCnt) {
4739 				masks[j] = (1 << i);
4740 				port_ptrs[j] = exc_port;
4741 
4742 				if (info_only) {
4743 					if (!IP_VALID(exc_port)) {
4744 						ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4745 					} else {
4746 						task_t receiver = TASK_NULL;
4747 						(void)ipc_port_get_receiver_task(exc_port, &receiver);
4748 						ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRHASH(exc_port);
4749 						ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRHASH(receiver) : 0;
4750 					}
4751 				} else {
4752 					ports[j] = exception_port_copy_send(exc_port);
4753 				}
4754 				behaviors[j] = exc_behavior;
4755 				flavors[j] = exc_flavor;
4756 				++count;
4757 			}
4758 		}
4759 	}
4760 
4761 	itk_unlock(task);
4762 
4763 	*CountCnt = count;
4764 
4765 	return KERN_SUCCESS;
4766 }
4767 
4768 kern_return_t
task_get_exception_ports(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4769 task_get_exception_ports(
4770 	task_t                          task,
4771 	exception_mask_t                exception_mask,
4772 	exception_mask_array_t          masks,
4773 	mach_msg_type_number_t          *CountCnt,
4774 	exception_port_array_t          ports,
4775 	exception_behavior_array_t      behaviors,
4776 	thread_state_flavor_array_t     flavors)
4777 {
4778 	return task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4779 	           NULL, ports, behaviors, flavors);
4780 }
4781 
4782 kern_return_t
task_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4783 task_get_exception_ports_info(
4784 	mach_port_t                     port,
4785 	exception_mask_t                exception_mask,
4786 	exception_mask_array_t          masks,
4787 	mach_msg_type_number_t          *CountCnt,
4788 	exception_port_info_array_t     ports_info,
4789 	exception_behavior_array_t      behaviors,
4790 	thread_state_flavor_array_t     flavors)
4791 {
4792 	kern_return_t kr;
4793 
4794 	task_t task = convert_port_to_task_read_no_eval(port);
4795 
4796 	if (task == TASK_NULL) {
4797 		return KERN_INVALID_ARGUMENT;
4798 	}
4799 
4800 	kr = task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4801 	    ports_info, NULL, behaviors, flavors);
4802 
4803 	task_deallocate(task);
4804 	return kr;
4805 }
4806 
4807 kern_return_t
task_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4808 task_get_exception_ports_from_user(
4809 	mach_port_t                     port,
4810 	exception_mask_t                exception_mask,
4811 	exception_mask_array_t          masks,
4812 	mach_msg_type_number_t         *CountCnt,
4813 	exception_port_array_t          ports,
4814 	exception_behavior_array_t      behaviors,
4815 	thread_state_flavor_array_t     flavors)
4816 {
4817 	kern_return_t kr;
4818 
4819 	task_t task = convert_port_to_task(port);
4820 
4821 	if (task == TASK_NULL) {
4822 		return KERN_INVALID_ARGUMENT;
4823 	}
4824 
4825 	kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4826 
4827 	task_deallocate(task);
4828 	return kr;
4829 }
4830 
4831 /*
4832  *	Routine:	ipc_thread_port_unpin
4833  *	Purpose:
4834  *
4835  *		Called on the thread when it's terminating so that the last ref
4836  *		can be deallocated without a guard exception.
4837  *	Conditions:
4838  *		Thread mutex lock is held.
4839  */
4840 void
ipc_thread_port_unpin(ipc_port_t port)4841 ipc_thread_port_unpin(
4842 	ipc_port_t port)
4843 {
4844 	ipc_object_unpin(current_space(), port);
4845 }
4846 
4847 /*
4848  *	Routine:	task_register_hardened_exception_handler
4849  *	Purpose:
4850  *		Register a port as a hardened exception handler.
4851  *		See task.defs for additional info
4852  *	Conditions:
4853  *		Nothing locked.
4854  *		Limit of one hardened exception handler per task
4855  *	Returns:
4856  *		KERN_INVALID_ARGUMENT	invalid thread
4857  *		KERN_DENIED             violating the security policy
4858  *		KERN_NAME_EXISTS        Already set a hardened exception handler
4859  *		                        on this task
4860  *		KERN_SUCCESS
4861  */
4862 kern_return_t
task_register_hardened_exception_handler(task_t task,uint32_t signed_pc_key,exception_mask_t exceptions_allowed,exception_behavior_t behaviors_allowed,thread_state_flavor_t flavors_allowed,mach_port_t new_port)4863 task_register_hardened_exception_handler(
4864 	task_t                  task,
4865 	uint32_t                signed_pc_key,
4866 	exception_mask_t        exceptions_allowed,
4867 	exception_behavior_t    behaviors_allowed,
4868 	thread_state_flavor_t   flavors_allowed,
4869 	mach_port_t             new_port)
4870 {
4871 	ipc_port_t old_port;
4872 
4873 	if (task == TASK_NULL) {
4874 		return KERN_INVALID_ARGUMENT;
4875 	}
4876 	if (IP_VALID(new_port) && !ip_is_exception_port(new_port)) {
4877 		return KERN_INVALID_ARGUMENT;
4878 	}
4879 
4880 
4881 	bool hardened_exception_flow = true;
4882 	kern_return_t kr = set_exception_ports_validation(task, exceptions_allowed,
4883 	    new_port, behaviors_allowed, flavors_allowed, hardened_exception_flow);
4884 	if (kr != KERN_SUCCESS) {
4885 		return kr;
4886 	}
4887 
4888 	/* You can only register one hardened exception handler */
4889 	if (exception_ports_frozen(task)) {
4890 		return KERN_INVALID_ARGUMENT;
4891 	}
4892 	task_ro_flags_set(task, TFRO_FREEZE_EXCEPTION_PORTS);
4893 	itk_lock(task);
4894 
4895 	/* No reason to allow setting this multiple times per task */
4896 	old_port = task->hardened_exception_action.ea.port;
4897 	if (IP_VALID(old_port)) {
4898 		itk_unlock(task);
4899 		return KERN_NAME_EXISTS;
4900 	}
4901 
4902 	/* Stash the semantics for this port on the task */
4903 	struct hardened_exception_action hea;
4904 	hea.ea.port = new_port; /* Donate our send right to the task */
4905 	hea.ea.flavor = flavors_allowed;
4906 	hea.ea.behavior = behaviors_allowed;
4907 	hea.ea.privileged = false;
4908 	hea.ea.label = NULL;
4909 	hea.signed_pc_key = signed_pc_key;
4910 	hea.exception = exceptions_allowed;
4911 
4912 	task->hardened_exception_action = hea;
4913 	itk_unlock(task);
4914 
4915 	return KERN_SUCCESS;
4916 }
4917 
4918 /*
4919  *	Routine:	thread_adopt_exception_handler
4920  *	Purpose:
4921  *		Adopt the hardened exception handler from the current task,
4922  *		for this thread.
4923  *
4924  *		Allows to set exception ports on a thread after exception ports
4925  *		have been frozen for the task.
4926  *	Conditions:
4927  *		Nothing locked
4928  *	Returns:
4929  *		KERN_INVALID_ARGUMENT   invalid thread
4930  *		KERN_DENIED             violating the security policy
4931  *		KERN_SUCCESS
4932  */
4933 kern_return_t
thread_adopt_exception_handler(thread_t thread,mach_port_t exc_port,exception_mask_t exc_mask,exception_behavior_t behavior_mask,thread_state_flavor_t flavor_mask)4934 thread_adopt_exception_handler(
4935 	thread_t                thread,
4936 	mach_port_t             exc_port,
4937 	exception_mask_t        exc_mask,
4938 	exception_behavior_t    behavior_mask,
4939 	thread_state_flavor_t   flavor_mask)
4940 {
4941 	if (thread == THREAD_NULL) {
4942 		return KERN_INVALID_ARGUMENT;
4943 	}
4944 
4945 	task_t task = get_threadtask(thread);
4946 
4947 	if (task != current_task()) {
4948 		return KERN_DENIED;
4949 	}
4950 
4951 	/* We must have exactly one hardened exception port per task */
4952 	if (!exception_ports_frozen(task)) {
4953 		return KERN_DENIED;
4954 	}
4955 
4956 	/* Ensure we see a consistent state of the hardened exception action */
4957 	itk_lock(task);
4958 	struct hardened_exception_action hea = task->hardened_exception_action;
4959 	itk_unlock(task);
4960 
4961 	if (exc_port != IP_NULL && exc_port != hea.ea.port) {
4962 		return KERN_DENIED;
4963 	}
4964 	/* Ensure that the new masks for this thread are a subset of the
4965 	 * allowable masks for this exception handler
4966 	 */
4967 	if (exc_mask & ~hea.exception ||
4968 	    behavior_mask & ~hea.ea.behavior ||
4969 	    flavor_mask & ~hea.ea.flavor) {
4970 		return KERN_DENIED;
4971 	}
4972 
4973 	assert(!IP_VALID(exc_port) || ip_is_exception_port(exc_port));
4974 
4975 	/*
4976 	 * We can safely assume this will be valid because we called
4977 	 * set_exception_ports_validation on it when it was originally
4978 	 * set on the task
4979 	 */
4980 	return thread_set_exception_ports_internal(thread, exc_mask, exc_port,
4981 	           behavior_mask, flavor_mask, true);
4982 }
4983