xref: /xnu-12377.81.4/osfmk/kern/ipc_tt.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  */
62 /*
63  */
64 
65 /*
66  * File:	ipc_tt.c
67  * Purpose:
68  *	Task and thread related IPC functions.
69  */
70 
71 #include <IOKit/IOBSD.h> // IOTaskHasEntitlement
72 
73 #include <ipc/ipc_policy.h>
74 #include <mach/mach_types.h>
75 #include <mach/boolean.h>
76 #include <mach/kern_return.h>
77 #include <mach/mach_param.h>
78 #include <mach/task_special_ports.h>
79 #include <mach/thread_special_ports.h>
80 #include <mach/thread_status.h>
81 #include <mach/exception_types.h>
82 #include <mach/memory_object_types.h>
83 #include <mach/mach_traps.h>
84 #include <mach/task_server.h>
85 #include <mach/thread_act_server.h>
86 #include <mach/mach_host_server.h>
87 #include <mach/host_priv_server.h>
88 #include <mach/vm_map_server.h>
89 
90 #include <kern/exc_guard.h>
91 #include <kern/kern_types.h>
92 #include <kern/host.h>
93 #include <kern/ipc_kobject.h>
94 #include <kern/ipc_tt.h>
95 #include <kern/kalloc.h>
96 #include <kern/thread.h>
97 #include <kern/ux_handler.h>
98 #include <kern/misc_protos.h>
99 #include <kdp/kdp_dyld.h>
100 
101 #include <sys/proc_ro.h>
102 
103 #include <vm/vm_map_xnu.h>
104 #include <vm/vm_pageout.h>
105 #include <vm/vm_protos.h>
106 #include <mach/vm_types.h>
107 #include <libkern/coreanalytics/coreanalytics.h>
108 
109 #include <security/mac_mach_internal.h>
110 
111 #if CONFIG_CSR
112 #include <sys/csr.h>
113 #endif
114 
115 #include <sys/code_signing.h> /* for developer mode state */
116 
117 #if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL
118 extern int cs_relax_platform_task_ports;
119 #endif
120 
121 extern boolean_t IOCurrentTaskHasEntitlement(const char *);
122 extern boolean_t proc_is_simulated(const proc_t);
123 extern struct proc* current_proc(void);
124 
125 /* bootarg to create lightweight corpse for thread identity lockdown */
126 TUNABLE(bool, thid_should_crash, "thid_should_crash", true);
127 
128 /* Allows the process to call `[thread,task]_set_exception_ports */
129 #define SET_EXCEPTION_ENTITLEMENT "com.apple.private.set-exception-port"
130 
131 /*
132  * Entitlement to disallow setting the exception port of task/thread unless you
133  * are being debugged or are setting up the hardened task exception handler
134  */
135 #define IPC_ONLY_ONE_EXCEPTION_PORT "com.apple.security.only-one-exception-port"
136 
137 CA_EVENT(set_exception,
138     CA_STATIC_STRING(CA_PROCNAME_LEN), current_proc,
139     CA_STATIC_STRING(CA_PROCNAME_LEN), thread_proc,
140     CA_INT, mask,
141     CA_STATIC_STRING(6), level);
142 
143 __options_decl(ipc_reply_port_type_t, uint32_t, {
144 	IRPT_NONE        = 0x00,
145 	IRPT_USER        = 0x01,
146 	IRPT_KERNEL      = 0x02,
147 });
148 
149 /* forward declarations */
150 static kern_return_t special_port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor);
151 static kern_return_t special_port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor);
152 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port, ipc_reply_port_type_t reply_type);
153 static void ipc_port_unbind_special_reply_port(thread_t thread, ipc_reply_port_type_t reply_type);
154 extern kern_return_t task_conversion_eval(task_t caller, task_t victim, int flavor);
155 static thread_inspect_t convert_port_to_thread_inspect_no_eval(ipc_port_t port);
156 static ipc_port_t convert_thread_to_port_with_flavor(thread_t, thread_ro_t, mach_thread_flavor_t flavor);
157 ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor, task_grp_t grp);
158 kern_return_t task_set_special_port(task_t task, int which, ipc_port_t port);
159 kern_return_t task_get_special_port(task_t task, int which, ipc_port_t *portp);
160 
161 /*
162  *	Routine:	ipc_task_init
163  *	Purpose:
164  *		Initialize a task's IPC state.
165  *
166  *		If non-null, some state will be inherited from the parent.
167  *		The parent must be appropriately initialized.
168  *	Conditions:
169  *		Nothing locked.
170  */
171 
172 void
ipc_task_init(task_t task,task_t parent)173 ipc_task_init(
174 	task_t          task,
175 	task_t          parent)
176 {
177 	ipc_space_t space;
178 	ipc_port_t kport;
179 	ipc_port_t nport;
180 	kern_return_t kr;
181 	struct label *temp_label;
182 	int i;
183 
184 
185 	kr = ipc_space_create(IPC_LABEL_NONE, &space);
186 	if (kr != KERN_SUCCESS) {
187 		panic("ipc_task_init");
188 	}
189 
190 	space->is_task = task;
191 
192 	kport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL,
193 	    IPC_KOBJECT_ALLOC_NONE);
194 
195 	nport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_NAME,
196 	    IPC_KOBJECT_ALLOC_NONE);
197 
198 	itk_lock_init(task);
199 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = kport;
200 	task->itk_task_ports[TASK_FLAVOR_NAME] = nport;
201 
202 	/* Lazily allocated on-demand */
203 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
204 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
205 	task->itk_dyld_notify = NULL;
206 #if CONFIG_PROC_RESOURCE_LIMITS
207 	task->itk_resource_notify = NULL;
208 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
209 
210 	task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
211 #if CONFIG_CSR
212 	if (task_is_a_corpse_fork(task)) {
213 		/*
214 		 * No sender's notification for corpse would not
215 		 * work with a naked send right in kernel.
216 		 */
217 		task->itk_settable_self = IP_NULL;
218 	} else {
219 		/* we just made the port, no need to triple check */
220 		task->itk_settable_self = ipc_port_make_send_any(kport);
221 	}
222 #endif /* CONFIG_CSR */
223 	task->itk_debug_control = IP_NULL;
224 	task->itk_space = space;
225 
226 #if CONFIG_MACF
227 	task->exc_actions[0].label = NULL;
228 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
229 		mac_exc_associate_action_label(&task->exc_actions[i],
230 		    mac_exc_create_label(&task->exc_actions[i]));
231 	}
232 #endif
233 
234 	/* always zero-out the first (unused) array element */
235 	bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
236 	/* We don't need to inherit this */
237 	bzero(&task->hardened_exception_action, sizeof(task->hardened_exception_action));
238 
239 	if (parent == TASK_NULL) {
240 		ipc_port_t port = IP_NULL;
241 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
242 			task->exc_actions[i].port = IP_NULL;
243 			task->exc_actions[i].flavor = 0;
244 			task->exc_actions[i].behavior = 0;
245 			task->exc_actions[i].privileged = FALSE;
246 		}/* for */
247 
248 		kr = host_get_host_port(host_priv_self(), &port);
249 		assert(kr == KERN_SUCCESS);
250 		task->itk_host = port;
251 
252 		task->itk_bootstrap = IP_NULL;
253 		task->itk_task_access = IP_NULL;
254 
255 		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
256 			task->itk_registered[i] = IP_NULL;
257 		}
258 	} else {
259 		itk_lock(parent);
260 		assert(parent->itk_task_ports[TASK_FLAVOR_CONTROL] != IP_NULL);
261 
262 		/* inherit registered ports */
263 
264 		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
265 			task->itk_registered[i] =
266 			    ipc_port_copy_send_any(parent->itk_registered[i]);
267 		}
268 
269 		/* inherit exception and bootstrap ports */
270 
271 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
272 			temp_label = task->exc_actions[i].label;
273 			task->exc_actions[i] = parent->exc_actions[i];
274 			task->exc_actions[i].port =
275 			    exception_port_copy_send(parent->exc_actions[i].port);
276 			task->exc_actions[i].label = temp_label;
277 #if CONFIG_MACF
278 			mac_exc_inherit_action_label(parent->exc_actions + i,
279 			    task->exc_actions + i);
280 #endif
281 		}
282 
283 		task->itk_host = host_port_copy_send(parent->itk_host);
284 
285 		task->itk_bootstrap =
286 		    ipc_port_copy_send_mqueue(parent->itk_bootstrap);
287 
288 		task->itk_task_access =
289 		    ipc_port_copy_send_mqueue(parent->itk_task_access);
290 
291 		itk_unlock(parent);
292 	}
293 }
294 
295 /*
296  *	Routine:	ipc_task_copyout_control_port
297  *	Purpose:
298  *		Copyout the task control port as pinned
299  *      and stash the send right name in the port
300  *  Condition:
301  *      Nothing locked.
302  */
303 void
ipc_task_copyout_control_port(task_t task)304 ipc_task_copyout_control_port(
305 	task_t            task)
306 {
307 	ipc_port_t kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
308 	mach_port_name_t name;
309 	ipc_port_t pport;
310 
311 #if CONFIG_CSR
312 	assert(kport == task->itk_settable_self);
313 #endif /* CONFIG_CSR */
314 	assert(!task_is_a_corpse(task));
315 
316 	pport = ipc_port_make_send_any(kport);
317 	/*
318 	 * mach_task_self() is pinned, memorize the name we gave it
319 	 * in ip_receiver_name (it's an abuse as this port really
320 	 * isn't a message queue, but the field is up for grabs
321 	 * and otherwise `MACH_PORT_SPECIAL_DEFAULT` for special ports).
322 	 *
323 	 * port_name_to_task* use this to fastpath IPCs.
324 	 *
325 	 * ipc_task_disable() will revert this when the task dies.
326 	 */
327 	name = ipc_port_copyout_send_pinned(pport, task->itk_space);
328 	if (MACH_PORT_VALID(name)) {
329 		pport->ip_receiver_name = name;
330 	}
331 }
332 
333 /*
334  *	Routine:	ipc_thread_set_immovable_pinned
335  *	Purpose:
336  *		Copyout the task control port as pinned and immovable
337  *      and stash the send right name in the port
338  *  Notes:
339  *		Consumes a thread ref; produces a naked send right
340  *		which may be invalid.
341  */
342 void
ipc_thread_set_immovable_pinned(thread_t thread)343 ipc_thread_set_immovable_pinned(
344 	thread_t            thread)
345 {
346 	ipc_port_t kport = convert_thread_to_port_immovable(thread);
347 
348 	task_t task = get_threadtask(thread);
349 	mach_port_name_t name;
350 
351 #if CONFIG_CSR
352 	assert(kport == thread->t_tro->tro_settable_self_port);
353 #endif /* CONFIG_CSR */
354 	assert(!task_is_a_corpse(task));
355 
356 	name = ipc_port_copyout_send_pinned(kport, task->itk_space);
357 }
358 
359 /*
360  *	Routine:	ipc_task_enable
361  *	Purpose:
362  *		Enable a task for IPC access.
363  *	Conditions:
364  *		Nothing locked.
365  */
366 void
ipc_task_enable(task_t task)367 ipc_task_enable(
368 	task_t          task)
369 {
370 	ipc_port_t kport;
371 	ipc_port_t nport;
372 	ipc_port_t iport;
373 	ipc_port_t rdport;
374 
375 	ipc_space_set_policy(task->itk_space, ipc_policy_for_task(task));
376 
377 	itk_lock(task);
378 	if (!task->active) {
379 		/*
380 		 * task has been terminated before we can enable IPC access.
381 		 * The check is to make sure we don't accidentally re-enable
382 		 * the task ports _after_ they've been disabled during
383 		 * task_terminate_internal(), in which case we will hit the
384 		 * !task->ipc_active assertion in ipc_task_terminate().
385 		 *
386 		 * Technically we should grab task lock when checking task
387 		 * active bit, but since task termination unsets task->active
388 		 * _before_ calling ipc_task_disable(), we can always see the
389 		 * truth with just itk_lock() and bail if disable has been called.
390 		 */
391 		itk_unlock(task);
392 		return;
393 	}
394 
395 	/* verify vm_map_setup called */
396 	assert(task_is_a_corpse(task) || task->map->owning_task == task);
397 
398 	/* verify task_set_control_port_options called */
399 	assert(task_is_a_corpse_fork(task) || task == kernel_task ||
400 	    task_get_control_port_options(task) != TASK_CONTROL_PORT_OPTIONS_INVALID);
401 
402 	assert(!task->ipc_active || task_is_a_corpse(task));
403 	task->ipc_active = true;
404 
405 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
406 	if (kport != IP_NULL) {
407 		ipc_kobject_enable(kport, task, IKOT_TASK_CONTROL);
408 	}
409 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
410 	if (nport != IP_NULL) {
411 		ipc_kobject_enable(nport, task, IKOT_TASK_NAME);
412 	}
413 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
414 	if (iport != IP_NULL) {
415 		ipc_kobject_enable(iport, task, IKOT_TASK_INSPECT);
416 	}
417 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
418 	if (rdport != IP_NULL) {
419 		ipc_kobject_enable(rdport, task, IKOT_TASK_READ);
420 	}
421 
422 	itk_unlock(task);
423 }
424 
425 /*
426  *	Routine:	ipc_task_disable
427  *	Purpose:
428  *		Disable IPC access to a task.
429  *	Conditions:
430  *		Nothing locked.
431  */
432 
433 void
ipc_task_disable(task_t task)434 ipc_task_disable(
435 	task_t          task)
436 {
437 	ipc_port_t kport;
438 	ipc_port_t nport;
439 	ipc_port_t iport;
440 	ipc_port_t rdport;
441 	ipc_port_t rport;
442 
443 	itk_lock(task);
444 
445 	/*
446 	 * This innocuous looking line is load bearing.
447 	 *
448 	 * It is used to disable the creation of lazy made ports.
449 	 * We must do so before we drop the last reference on the task,
450 	 * as task ports do not own a reference on the task, and
451 	 * convert_port_to_task* will crash trying to resurect a task.
452 	 */
453 	task->ipc_active = false;
454 
455 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
456 	if (kport != IP_NULL) {
457 		ipc_kobject_disable(kport, IKOT_TASK_CONTROL);
458 	}
459 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
460 	if (nport != IP_NULL) {
461 		ipc_kobject_disable(nport, IKOT_TASK_NAME);
462 	}
463 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
464 	if (iport != IP_NULL) {
465 		ipc_kobject_disable(iport, IKOT_TASK_INSPECT);
466 	}
467 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
468 	if (rdport != IP_NULL) {
469 		/* clears ikol_alt_port */
470 		ipc_kobject_disable(rdport, IKOT_TASK_READ);
471 	}
472 
473 	rport = task->itk_resume;
474 	if (rport != IP_NULL) {
475 		/*
476 		 * From this point onwards this task is no longer accepting
477 		 * resumptions.
478 		 *
479 		 * There are still outstanding suspensions on this task,
480 		 * even as it is being torn down. Disconnect the task
481 		 * from the rport, thereby "orphaning" the rport. The rport
482 		 * itself will go away only when the last suspension holder
483 		 * destroys his SO right to it -- when he either
484 		 * exits, or tries to actually use that last SO right to
485 		 * resume this (now non-existent) task.
486 		 */
487 		ipc_kobject_disable(rport, IKOT_TASK_RESUME);
488 	}
489 	itk_unlock(task);
490 }
491 
492 /*
493  *	Routine:	ipc_task_terminate
494  *	Purpose:
495  *		Clean up and destroy a task's IPC state.
496  *	Conditions:
497  *		Nothing locked.  The task must be suspended.
498  *		(Or the current thread must be in the task.)
499  */
500 
501 void
ipc_task_terminate(task_t task)502 ipc_task_terminate(
503 	task_t          task)
504 {
505 	ipc_port_t kport;
506 	ipc_port_t nport;
507 	ipc_port_t iport;
508 	ipc_port_t rdport;
509 	ipc_port_t rport;
510 #if CONFIG_CSR
511 	ipc_port_t sself;
512 #endif /* CONFIG_CSR */
513 	ipc_port_t *notifiers_ptr = NULL;
514 
515 	itk_lock(task);
516 
517 	/*
518 	 * If we ever failed to clear ipc_active before the last reference
519 	 * was dropped, lazy ports might be made and used after the last
520 	 * reference is dropped and cause use after free (see comment in
521 	 * ipc_task_disable()).
522 	 */
523 	assert(!task->ipc_active);
524 
525 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
526 #if CONFIG_CSR
527 	sself = task->itk_settable_self;
528 #endif /* CONFIG_CSR */
529 
530 	if (kport == IP_NULL) {
531 		/* the task is already terminated (can this happen?) */
532 		itk_unlock(task);
533 		return;
534 	}
535 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = IP_NULL;
536 
537 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
538 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
539 
540 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
541 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
542 
543 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
544 	assert(nport != IP_NULL);
545 	task->itk_task_ports[TASK_FLAVOR_NAME] = IP_NULL;
546 
547 	if (task->itk_dyld_notify) {
548 		notifiers_ptr = task->itk_dyld_notify;
549 		task->itk_dyld_notify = NULL;
550 	}
551 
552 	rport = task->itk_resume;
553 	task->itk_resume = IP_NULL;
554 
555 	itk_unlock(task);
556 
557 	/* release the naked send rights */
558 #if CONFIG_CSR
559 	if (IP_VALID(sself)) {
560 		ipc_port_release_send(sself);
561 	}
562 #endif /* CONFIG_CSR */
563 
564 	if (notifiers_ptr) {
565 		for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
566 			if (IP_VALID(notifiers_ptr[i])) {
567 				ipc_port_release_send(notifiers_ptr[i]);
568 			}
569 		}
570 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
571 	}
572 
573 	if (IP_VALID(task->hardened_exception_action.ea.port)) {
574 		ipc_port_release_send(task->hardened_exception_action.ea.port);
575 	}
576 
577 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
578 		if (IP_VALID(task->exc_actions[i].port)) {
579 			ipc_port_release_send(task->exc_actions[i].port);
580 		}
581 #if CONFIG_MACF
582 		mac_exc_free_action_label(task->exc_actions + i);
583 #endif
584 	}
585 
586 	if (IP_VALID(task->itk_host)) {
587 		ipc_port_release_send(task->itk_host);
588 	}
589 
590 	if (IP_VALID(task->itk_bootstrap)) {
591 		ipc_port_release_send(task->itk_bootstrap);
592 	}
593 
594 	if (IP_VALID(task->itk_task_access)) {
595 		ipc_port_release_send(task->itk_task_access);
596 	}
597 
598 	if (IP_VALID(task->itk_debug_control)) {
599 		ipc_port_release_send(task->itk_debug_control);
600 	}
601 
602 #if CONFIG_PROC_RESOURCE_LIMITS
603 	if (IP_VALID(task->itk_resource_notify)) {
604 		ipc_port_release_send(task->itk_resource_notify);
605 	}
606 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
607 
608 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
609 		if (IP_VALID(task->itk_registered[i])) {
610 			ipc_port_release_send(task->itk_registered[i]);
611 		}
612 	}
613 
614 	/* clears read port ikol_alt_port, must be done first */
615 	if (rdport != IP_NULL) {
616 		ipc_kobject_dealloc_port(rdport, IPC_KOBJECT_NO_MSCOUNT,
617 		    IKOT_TASK_READ);
618 	}
619 	ipc_kobject_dealloc_port(kport, IPC_KOBJECT_NO_MSCOUNT,
620 	    IKOT_TASK_CONTROL);
621 
622 	/* destroy other kernel ports */
623 	ipc_kobject_dealloc_port(nport, IPC_KOBJECT_NO_MSCOUNT,
624 	    IKOT_TASK_NAME);
625 	if (iport != IP_NULL) {
626 		ipc_kobject_dealloc_port(iport, IPC_KOBJECT_NO_MSCOUNT,
627 		    IKOT_TASK_INSPECT);
628 	}
629 	if (rport != IP_NULL) {
630 		ipc_kobject_dealloc_port(rport, IPC_KOBJECT_NO_MSCOUNT,
631 		    IKOT_TASK_RESUME);
632 	}
633 
634 	itk_lock_destroy(task);
635 }
636 
637 /*
638  *	Routine:	ipc_task_reset
639  *	Purpose:
640  *		Reset a task's IPC state to protect it when
641  *		it enters an elevated security context. The
642  *		task name port can remain the same - since it
643  *              represents no specific privilege.
644  *	Conditions:
645  *		Nothing locked.  The task must be suspended.
646  *		(Or the current thread must be in the task.)
647  */
648 
649 void
ipc_task_reset(task_t task)650 ipc_task_reset(
651 	task_t          task)
652 {
653 	ipc_port_t old_kport, new_kport;
654 #if CONFIG_CSR
655 	ipc_port_t old_sself;
656 #endif /* CONFIG_CSR */
657 	ipc_port_t old_rdport;
658 	ipc_port_t old_iport;
659 	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
660 	ipc_port_t old_hardened_exception;
661 	ipc_port_t *notifiers_ptr = NULL;
662 
663 #if CONFIG_MACF
664 	/* Fresh label to unset credentials in existing labels. */
665 	struct label *unset_label = mac_exc_create_label(NULL);
666 #endif
667 
668 	new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task,
669 	    IKOT_TASK_CONTROL, IPC_KOBJECT_ALLOC_NONE);
670 	/*
671 	 * ipc_task_reset() only happens during sugid or corpsify.
672 	 *
673 	 * (1) sugid happens early in exec_mach_imgact(),
674 	 *     at which point the old task port has not been enabled,
675 	 *     and is left movable.
676 	 * (2) corpse cannot execute more code so the notion of the immovable
677 	 *     task port is bogus, and should appear as if it doesn't have one.
678 	 *
679 	 */
680 	itk_lock(task);
681 
682 	old_kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
683 	old_rdport = task->itk_task_ports[TASK_FLAVOR_READ];
684 	old_iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
685 
686 	if (old_kport == IP_NULL) {
687 		/* the task is already terminated (can this happen?) */
688 		itk_unlock(task);
689 		ipc_kobject_dealloc_port(new_kport, IPC_KOBJECT_NO_MSCOUNT, IKOT_TASK_CONTROL);
690 #if CONFIG_MACF
691 		mac_exc_free_label(unset_label);
692 #endif
693 		return;
694 	}
695 
696 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = new_kport;
697 
698 #if CONFIG_CSR
699 	old_sself = task->itk_settable_self;
700 	if (task_is_a_corpse(task)) {
701 		/* No extra send right for coprse, needed to arm no-sender notification */
702 		task->itk_settable_self = IP_NULL;
703 	} else {
704 		/* we just made the port, no need to triple check */
705 		task->itk_settable_self = ipc_port_make_send_any(new_kport);
706 	}
707 #endif /* CONFIG_CSR */
708 
709 	ipc_kobject_disable(old_kport, IKOT_TASK_CONTROL);
710 
711 	/* Reset the read and inspect flavors of task port */
712 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
713 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
714 
715 	if (IP_VALID(task->hardened_exception_action.ea.port)
716 	    && !task->hardened_exception_action.ea.privileged) {
717 		old_hardened_exception = task->hardened_exception_action.ea.port;
718 		task->hardened_exception_action.ea.port = IP_NULL;
719 	}
720 
721 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
722 		old_exc_actions[i] = IP_NULL;
723 
724 		if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
725 			continue;
726 		}
727 
728 		if (!task->exc_actions[i].privileged) {
729 #if CONFIG_MACF
730 			mac_exc_update_action_label(task->exc_actions + i, unset_label);
731 #endif
732 			old_exc_actions[i] = task->exc_actions[i].port;
733 			task->exc_actions[i].port = IP_NULL;
734 		}
735 	}/* for */
736 
737 	if (IP_VALID(task->itk_debug_control)) {
738 		ipc_port_release_send(task->itk_debug_control);
739 	}
740 	task->itk_debug_control = IP_NULL;
741 
742 	if (task->itk_dyld_notify) {
743 		notifiers_ptr = task->itk_dyld_notify;
744 		task->itk_dyld_notify = NULL;
745 	}
746 
747 	itk_unlock(task);
748 
749 #if CONFIG_MACF
750 	mac_exc_free_label(unset_label);
751 #endif
752 
753 	/* release the naked send rights */
754 #if CONFIG_CSR
755 	if (IP_VALID(old_sself)) {
756 		ipc_port_release_send(old_sself);
757 	}
758 #endif /* CONFIG_CSR */
759 
760 	if (notifiers_ptr) {
761 		for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
762 			if (IP_VALID(notifiers_ptr[i])) {
763 				ipc_port_release_send(notifiers_ptr[i]);
764 			}
765 		}
766 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
767 	}
768 
769 	ipc_port_release_send(old_hardened_exception);
770 
771 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
772 		if (IP_VALID(old_exc_actions[i])) {
773 			ipc_port_release_send(old_exc_actions[i]);
774 		}
775 	}
776 
777 	/* destroy all task port flavors */
778 	if (old_rdport != IP_NULL) {
779 		/* read port ikol_alt_port may point to kport, dealloc first */
780 		ipc_kobject_dealloc_port(old_rdport, IPC_KOBJECT_NO_MSCOUNT,
781 		    IKOT_TASK_READ);
782 	}
783 	ipc_kobject_dealloc_port(old_kport, IPC_KOBJECT_NO_MSCOUNT,
784 	    IKOT_TASK_CONTROL);
785 
786 	if (old_iport != IP_NULL) {
787 		ipc_kobject_dealloc_port(old_iport, IPC_KOBJECT_NO_MSCOUNT,
788 		    IKOT_TASK_INSPECT);
789 	}
790 }
791 
792 /*
793  *	Routine:	ipc_thread_init
794  *	Purpose:
795  *		Initialize a thread's IPC state.
796  *	Conditions:
797  *		Nothing locked.
798  */
799 
800 void
ipc_thread_init(__unused task_t task,thread_t thread,thread_ro_t tro)801 ipc_thread_init(
802 	__unused task_t task,
803 	thread_t        thread,
804 	thread_ro_t     tro)
805 {
806 	ipc_port_t         kport;
807 
808 	/*
809 	 * pthreads are subsequently pinned via
810 	 * ipc_port_copyout_send_pinned() whereas raw threads are left
811 	 * unpinned.
812 	 */
813 	kport = ipc_kobject_alloc_port(thread, IKOT_THREAD_CONTROL,
814 	    IPC_KOBJECT_ALLOC_NONE);
815 
816 	/* we just made the port, no need to triple check */
817 #if CONFIG_CSR
818 	tro->tro_settable_self_port = ipc_port_make_send_any(kport);
819 #endif /* CONFIG_CSR */
820 	tro->tro_ports[THREAD_FLAVOR_CONTROL] = kport;
821 
822 	thread->ith_special_reply_port = NULL;
823 
824 #if IMPORTANCE_INHERITANCE
825 	thread->ith_assertions = 0;
826 #endif
827 
828 	thread->ipc_active = true;
829 	ipc_kmsg_queue_init(&thread->ith_messages);
830 
831 	thread->ith_kernel_reply_port = IP_NULL;
832 }
833 
834 struct thread_init_exc_actions {
835 	struct exception_action array[EXC_TYPES_COUNT];
836 };
837 
838 static void
ipc_thread_init_exc_actions(thread_ro_t tro)839 ipc_thread_init_exc_actions(thread_ro_t tro)
840 {
841 	struct exception_action *actions;
842 
843 	actions = kalloc_type(struct thread_init_exc_actions,
844 	    Z_WAITOK | Z_ZERO | Z_NOFAIL)->array;
845 
846 #if CONFIG_MACF
847 	for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
848 		mac_exc_associate_action_label(&actions[i],
849 		    mac_exc_create_label(&actions[i]));
850 	}
851 #endif
852 
853 	zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions, &actions);
854 }
855 
856 static void
ipc_thread_destroy_exc_actions(thread_ro_t tro)857 ipc_thread_destroy_exc_actions(thread_ro_t tro)
858 {
859 	struct exception_action *actions = tro->tro_exc_actions;
860 
861 	if (actions) {
862 #if CONFIG_MACF
863 		for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
864 			mac_exc_free_action_label(actions + i);
865 		}
866 #endif
867 
868 		zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions);
869 		struct thread_init_exc_actions *tr_actions =
870 		    (struct thread_init_exc_actions *)actions;
871 		kfree_type(struct thread_init_exc_actions, tr_actions);
872 	}
873 }
874 
875 static void
ipc_thread_ro_update_ports(thread_ro_t tro,const struct thread_ro * tro_tpl)876 ipc_thread_ro_update_ports(
877 	thread_ro_t             tro,
878 	const struct thread_ro *tro_tpl)
879 {
880 	vm_size_t offs = offsetof(struct thread_ro, tro_ports);
881 	vm_size_t size = sizeof(tro_tpl->tro_ports)
882 #if CONFIG_CSR
883 	    + sizeof(struct ipc_port *);
884 #else
885 	;
886 #endif /* CONFIG_CSR */
887 
888 #if CONFIG_CSR
889 	static_assert(offsetof(struct thread_ro, tro_settable_self_port) ==
890 	    offsetof(struct thread_ro, tro_ports) +
891 	    sizeof(tro_tpl->tro_ports));
892 #endif /* CONFIG_CSR */
893 
894 	zalloc_ro_mut(ZONE_ID_THREAD_RO, tro,
895 	    offs, &tro_tpl->tro_ports, size);
896 }
897 
898 /*
899  *	Routine:	ipc_thread_disable
900  *	Purpose:
901  *		Clean up and destroy a thread's IPC state.
902  *	Conditions:
903  *		Thread locked.
904  */
905 void
ipc_thread_disable(thread_t thread)906 ipc_thread_disable(
907 	thread_t        thread)
908 {
909 	thread_ro_t     tro = get_thread_ro(thread);
910 	ipc_port_t      kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
911 	ipc_port_t      iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
912 	ipc_port_t      rdport = tro->tro_ports[THREAD_FLAVOR_READ];
913 
914 	/*
915 	 * This innocuous looking line is load bearing.
916 	 *
917 	 * It is used to disable the creation of lazy made ports.
918 	 * We must do so before we drop the last reference on the thread,
919 	 * as thread ports do not own a reference on the thread, and
920 	 * convert_port_to_thread* will crash trying to resurect a thread.
921 	 */
922 	thread->ipc_active = false;
923 
924 	if (kport != IP_NULL) {
925 		ipc_kobject_disable(kport, IKOT_THREAD_CONTROL);
926 	}
927 
928 	if (iport != IP_NULL) {
929 		ipc_kobject_disable(iport, IKOT_THREAD_INSPECT);
930 	}
931 
932 	if (rdport != IP_NULL) {
933 		/* clears ikol_alt_port */
934 		ipc_kobject_disable(rdport, IKOT_THREAD_READ);
935 	}
936 
937 	/* unbind the thread special reply port */
938 	if (IP_VALID(thread->ith_special_reply_port)) {
939 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
940 	}
941 }
942 
943 /*
944  *	Routine:	ipc_thread_terminate
945  *	Purpose:
946  *		Clean up and destroy a thread's IPC state.
947  *	Conditions:
948  *		Nothing locked.
949  */
950 
951 void
ipc_thread_terminate(thread_t thread)952 ipc_thread_terminate(
953 	thread_t        thread)
954 {
955 	thread_ro_t tro = get_thread_ro(thread);
956 	ipc_port_t kport = IP_NULL;
957 	ipc_port_t iport = IP_NULL;
958 	ipc_port_t rdport = IP_NULL;
959 #if CONFIG_CSR
960 	ipc_port_t sport = IP_NULL;
961 #endif /* CONFIG_CSR */
962 
963 	thread_mtx_lock(thread);
964 
965 	/*
966 	 * If we ever failed to clear ipc_active before the last reference
967 	 * was dropped, lazy ports might be made and used after the last
968 	 * reference is dropped and cause use after free (see comment in
969 	 * ipc_thread_disable()).
970 	 */
971 	assert(!thread->ipc_active);
972 
973 	kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
974 	iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
975 	rdport = tro->tro_ports[THREAD_FLAVOR_READ];
976 #if CONFIG_CSR
977 	sport = tro->tro_settable_self_port;
978 #endif /* CONFIG_CSR */
979 
980 	if (kport != IP_NULL) {
981 #if CONFIG_CSR
982 		if (IP_VALID(sport)) {
983 			ipc_port_release_send(sport);
984 		}
985 #endif /* CONFIG_CSR */
986 
987 		ipc_thread_ro_update_ports(tro, &(struct thread_ro){ });
988 
989 		if (tro->tro_exc_actions != NULL) {
990 			for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
991 				if (IP_VALID(tro->tro_exc_actions[i].port)) {
992 					ipc_port_release_send(tro->tro_exc_actions[i].port);
993 				}
994 			}
995 			ipc_thread_destroy_exc_actions(tro);
996 		}
997 	}
998 
999 #if IMPORTANCE_INHERITANCE
1000 	assert(thread->ith_assertions == 0);
1001 #endif
1002 
1003 	assert(ipc_kmsg_queue_empty(&thread->ith_messages));
1004 	thread_mtx_unlock(thread);
1005 
1006 	/* clears read port ikol_alt_port, must be done first */
1007 	if (rdport != IP_NULL) {
1008 		ipc_kobject_dealloc_port(rdport, IPC_KOBJECT_NO_MSCOUNT,
1009 		    IKOT_THREAD_READ);
1010 	}
1011 
1012 	if (kport != IP_NULL) {
1013 		ipc_kobject_dealloc_port(kport, IPC_KOBJECT_NO_MSCOUNT,
1014 		    IKOT_THREAD_CONTROL);
1015 	}
1016 	if (iport != IP_NULL) {
1017 		ipc_kobject_dealloc_port(iport, IPC_KOBJECT_NO_MSCOUNT,
1018 		    IKOT_THREAD_INSPECT);
1019 	}
1020 	if (thread->ith_kernel_reply_port != IP_NULL) {
1021 		thread_dealloc_kernel_special_reply_port(thread);
1022 	}
1023 }
1024 
1025 /*
1026  *	Routine:	ipc_thread_reset
1027  *	Purpose:
1028  *		Reset the IPC state for a given Mach thread when
1029  *		its task enters an elevated security context.
1030  *		All flavors of thread port and its exception ports have
1031  *		to be reset.  Its RPC reply port cannot have any
1032  *		rights outstanding, so it should be fine. The thread
1033  *		inspect and read port are set to NULL.
1034  *	Conditions:
1035  *		Nothing locked.
1036  */
1037 
1038 void
ipc_thread_reset(thread_t thread)1039 ipc_thread_reset(
1040 	thread_t        thread)
1041 {
1042 	thread_ro_t tro = get_thread_ro(thread);
1043 	ipc_port_t old_kport, new_kport;
1044 #if CONFIG_CSR
1045 	ipc_port_t old_sself;
1046 #endif /* CONFIG_CSR */
1047 	ipc_port_t old_rdport;
1048 	ipc_port_t old_iport;
1049 	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
1050 	boolean_t  has_old_exc_actions = FALSE;
1051 	int i;
1052 
1053 #if CONFIG_MACF
1054 	struct label *new_label = mac_exc_create_label(NULL);
1055 #endif
1056 
1057 	new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
1058 	    IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
1059 	/*
1060 	 * ipc_thread_reset() only happens during sugid or corpsify.
1061 	 *
1062 	 * (1) sugid happens early in exec_mach_imgact(), at which point
1063 	 *     the old thread port is still movable.
1064 	 * (2) corpse cannot execute more code so the notion of the immovable
1065 	 *     thread port is bogus, and should appear as if it doesn't have one.
1066 	 */
1067 
1068 	thread_mtx_lock(thread);
1069 
1070 	old_kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1071 	old_rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1072 	old_iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1073 
1074 #if CONFIG_CSR
1075 	old_sself = tro->tro_settable_self_port;
1076 #endif /* CONFIG_CSR */
1077 
1078 	if (old_kport == IP_NULL && thread->inspection == FALSE) {
1079 		/* thread is already terminated (can this happen?) */
1080 		thread_mtx_unlock(thread);
1081 		ipc_kobject_dealloc_port(new_kport, IPC_KOBJECT_NO_MSCOUNT,
1082 		    IKOT_THREAD_CONTROL);
1083 #if CONFIG_MACF
1084 		mac_exc_free_label(new_label);
1085 #endif
1086 		return;
1087 	}
1088 
1089 	thread->ipc_active = true;
1090 
1091 	struct thread_ro tpl = {
1092 		.tro_ports[THREAD_FLAVOR_CONTROL] = new_kport,
1093 		/* we just made the port, no need to triple check */
1094 #if CONFIG_CSR
1095 		.tro_settable_self_port = ipc_port_make_send_any(new_kport),
1096 #endif /* CONFIG_CSR */
1097 	};
1098 
1099 	ipc_thread_ro_update_ports(tro, &tpl);
1100 
1101 	if (old_kport != IP_NULL) {
1102 		(void)ipc_kobject_disable(old_kport, IKOT_THREAD_CONTROL);
1103 	}
1104 	if (old_rdport != IP_NULL) {
1105 		/* clears ikol_alt_port */
1106 		(void)ipc_kobject_disable(old_rdport, IKOT_THREAD_READ);
1107 	}
1108 	if (old_iport != IP_NULL) {
1109 		(void)ipc_kobject_disable(old_iport, IKOT_THREAD_INSPECT);
1110 	}
1111 
1112 	/*
1113 	 * Only ports that were set by root-owned processes
1114 	 * (privileged ports) should survive
1115 	 */
1116 	if (tro->tro_exc_actions != NULL) {
1117 		has_old_exc_actions = TRUE;
1118 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1119 			if (tro->tro_exc_actions[i].privileged) {
1120 				old_exc_actions[i] = IP_NULL;
1121 			} else {
1122 #if CONFIG_MACF
1123 				mac_exc_update_action_label(tro->tro_exc_actions + i, new_label);
1124 #endif
1125 				old_exc_actions[i] = tro->tro_exc_actions[i].port;
1126 				tro->tro_exc_actions[i].port = IP_NULL;
1127 			}
1128 		}
1129 	}
1130 
1131 	thread_mtx_unlock(thread);
1132 
1133 #if CONFIG_MACF
1134 	mac_exc_free_label(new_label);
1135 #endif
1136 
1137 	/* release the naked send rights */
1138 #if CONFIG_CSR
1139 	if (IP_VALID(old_sself)) {
1140 		ipc_port_release_send(old_sself);
1141 	}
1142 #endif /* CONFIG_CSR */
1143 
1144 	if (has_old_exc_actions) {
1145 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1146 			ipc_port_release_send(old_exc_actions[i]);
1147 		}
1148 	}
1149 
1150 	/* destroy the kernel ports */
1151 	if (old_rdport != IP_NULL) {
1152 		ipc_kobject_dealloc_port(old_rdport, IPC_KOBJECT_NO_MSCOUNT,
1153 		    IKOT_THREAD_READ);
1154 		/* ikol_alt_port cleared */
1155 	}
1156 	if (old_kport != IP_NULL) {
1157 		ipc_kobject_dealloc_port(old_kport, IPC_KOBJECT_NO_MSCOUNT,
1158 		    IKOT_THREAD_CONTROL);
1159 	}
1160 
1161 	if (old_iport != IP_NULL) {
1162 		ipc_kobject_dealloc_port(old_iport, IPC_KOBJECT_NO_MSCOUNT,
1163 		    IKOT_THREAD_INSPECT);
1164 	}
1165 
1166 	/* unbind the thread special reply port */
1167 	if (IP_VALID(thread->ith_special_reply_port)) {
1168 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1169 	}
1170 }
1171 
1172 /*
1173  *	Routine:	retrieve_task_self_fast
1174  *	Purpose:
1175  *		Optimized version of retrieve_task_self,
1176  *		that only works for the current task.
1177  *
1178  *		Return a send right (possibly null/dead)
1179  *		for the task's user-visible self port.
1180  *	Conditions:
1181  *		Nothing locked.
1182  */
1183 
1184 static ipc_port_t
retrieve_task_self_fast(task_t task)1185 retrieve_task_self_fast(
1186 	task_t          task)
1187 {
1188 	ipc_port_t port = IP_NULL;
1189 	ipc_port_t kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
1190 
1191 	assert(task == current_task());
1192 
1193 	itk_lock(task);
1194 	assert(kport != IP_NULL);
1195 
1196 #if CONFIG_CSR
1197 	if (task->itk_settable_self != kport) {
1198 		port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1199 	} else
1200 #endif
1201 	{
1202 		port = ipc_kobject_make_send(kport, task, IKOT_TASK_CONTROL);
1203 	}
1204 
1205 	itk_unlock(task);
1206 
1207 	return port;
1208 }
1209 
1210 /*
1211  *	Routine:	mach_task_is_self
1212  *	Purpose:
1213  *      [MIG call] Checks if the task (control/read/inspect/name/movable)
1214  *      port is pointing to current_task.
1215  */
1216 kern_return_t
mach_task_is_self(task_t task,boolean_t * is_self)1217 mach_task_is_self(
1218 	task_t         task,
1219 	boolean_t     *is_self)
1220 {
1221 	if (task == TASK_NULL) {
1222 		return KERN_INVALID_ARGUMENT;
1223 	}
1224 
1225 	*is_self = (task == current_task());
1226 
1227 	return KERN_SUCCESS;
1228 }
1229 
1230 /*
1231  *	Routine:	retrieve_thread_self_fast
1232  *	Purpose:
1233  *		Return a send right (possibly null/dead)
1234  *		for the thread's user-visible self port.
1235  *
1236  *		Only works for the current thread.
1237  *
1238  *	Conditions:
1239  *		Nothing locked.
1240  */
1241 
1242 ipc_port_t
retrieve_thread_self_fast(thread_t thread)1243 retrieve_thread_self_fast(
1244 	thread_t                thread)
1245 {
1246 	thread_ro_t tro = get_thread_ro(thread);
1247 	ipc_port_t port = IP_NULL;
1248 
1249 	assert(thread == current_thread());
1250 
1251 	thread_mtx_lock(thread);
1252 
1253 #if CONFIG_CSR
1254 	if (tro->tro_settable_self_port != tro->tro_ports[THREAD_FLAVOR_CONTROL]) {
1255 		port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1256 	} else
1257 #endif
1258 	{
1259 		port = ipc_kobject_make_send(tro->tro_ports[THREAD_FLAVOR_CONTROL],
1260 		    thread, IKOT_THREAD_CONTROL);
1261 	}
1262 
1263 	thread_mtx_unlock(thread);
1264 
1265 	return port;
1266 }
1267 
1268 /*
1269  *	Routine:	task_self_trap [mach trap]
1270  *	Purpose:
1271  *		Give the caller send rights for their own task port.
1272  *	Conditions:
1273  *		Nothing locked.
1274  *	Returns:
1275  *		MACH_PORT_NULL if there are any resource failures
1276  *		or other errors.
1277  */
1278 
1279 mach_port_name_t
task_self_trap(__unused struct task_self_trap_args * args)1280 task_self_trap(
1281 	__unused struct task_self_trap_args *args)
1282 {
1283 	task_t task = current_task();
1284 	ipc_port_t sright;
1285 
1286 	sright = retrieve_task_self_fast(task);
1287 	return ipc_port_copyout_send(sright, task->itk_space);
1288 }
1289 
1290 /*
1291  *	Routine:	thread_self_trap [mach trap]
1292  *	Purpose:
1293  *		Give the caller send rights for his own thread port.
1294  *	Conditions:
1295  *		Nothing locked.
1296  *	Returns:
1297  *		MACH_PORT_NULL if there are any resource failures
1298  *		or other errors.
1299  */
1300 
1301 mach_port_name_t
thread_self_trap(__unused struct thread_self_trap_args * args)1302 thread_self_trap(
1303 	__unused struct thread_self_trap_args *args)
1304 {
1305 	thread_t thread = current_thread();
1306 	ipc_space_t space = current_space();
1307 	ipc_port_t sright;
1308 	mach_port_name_t name;
1309 
1310 	sright = retrieve_thread_self_fast(thread);
1311 	name = ipc_port_copyout_send(sright, space);
1312 	return name;
1313 }
1314 
1315 /*
1316  *	Routine:	mach_reply_port [mach trap]
1317  *	Purpose:
1318  *		Allocate a port for the caller.
1319  *	Conditions:
1320  *		Nothing locked.
1321  *	Returns:
1322  *		MACH_PORT_NULL if there are any resource failures
1323  *		or other errors.
1324  */
1325 
1326 mach_port_name_t
mach_reply_port(__unused struct mach_reply_port_args * args)1327 mach_reply_port(
1328 	__unused struct mach_reply_port_args *args)
1329 {
1330 	ipc_port_t port;
1331 	mach_port_name_t name;
1332 	kern_return_t kr;
1333 
1334 	kr = ipc_port_alloc(current_space(), IPC_OBJECT_LABEL(IOT_PORT),
1335 	    IP_INIT_NONE, &name, &port);
1336 	if (kr == KERN_SUCCESS) {
1337 		ip_mq_unlock(port);
1338 	} else {
1339 		name = MACH_PORT_NULL;
1340 	}
1341 	return name;
1342 }
1343 
1344 /*
1345  *	Routine:	thread_get_special_reply_port [mach trap]
1346  *	Purpose:
1347  *		Allocate a special reply port for the calling thread.
1348  *	Conditions:
1349  *		Nothing locked.
1350  *	Returns:
1351  *		mach_port_name_t: send right & receive right for special reply port.
1352  *		MACH_PORT_NULL if there are any resource failures
1353  *		or other errors.
1354  */
1355 
1356 mach_port_name_t
thread_get_special_reply_port(__unused struct thread_get_special_reply_port_args * args)1357 thread_get_special_reply_port(
1358 	__unused struct thread_get_special_reply_port_args *args)
1359 {
1360 	ipc_port_t port;
1361 	mach_port_name_t name;
1362 	kern_return_t kr;
1363 	thread_t thread = current_thread();
1364 
1365 	/* unbind the thread special reply port */
1366 	if (IP_VALID(thread->ith_special_reply_port)) {
1367 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1368 	}
1369 
1370 	kr = ipc_port_alloc(current_space(), IPC_OBJECT_LABEL(IOT_SPECIAL_REPLY_PORT),
1371 	    IP_INIT_MAKE_SEND_RIGHT, &name, &port);
1372 	if (kr == KERN_SUCCESS) {
1373 		ipc_port_bind_special_reply_port_locked(port, IRPT_USER);
1374 		ip_mq_unlock(port);
1375 	} else {
1376 		name = MACH_PORT_NULL;
1377 	}
1378 	return name;
1379 }
1380 
1381 /*
1382  *	Routine:	thread_get_kernel_special_reply_port
1383  *	Purpose:
1384  *		Allocate a kernel special reply port for the calling thread.
1385  *	Conditions:
1386  *		Nothing locked.
1387  *	Returns:
1388  *		Creates and sets kernel special reply port.
1389  *		KERN_SUCCESS on Success.
1390  *		KERN_FAILURE on Failure.
1391  */
1392 
1393 kern_return_t
thread_get_kernel_special_reply_port(void)1394 thread_get_kernel_special_reply_port(void)
1395 {
1396 	ipc_port_t port = IP_NULL;
1397 	thread_t thread = current_thread();
1398 
1399 	/* unbind the thread special reply port */
1400 	if (IP_VALID(thread->ith_kernel_reply_port)) {
1401 		ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1402 	}
1403 
1404 	port = ipc_port_alloc_special(ipc_space_reply,
1405 	    IPC_OBJECT_LABEL(IOT_SPECIAL_REPLY_PORT), IP_INIT_NONE);
1406 	ipc_port_bind_special_reply_port_locked(port, IRPT_KERNEL);
1407 	ip_mq_unlock(port);
1408 
1409 	/* release the reference returned by ipc_port_alloc_special */
1410 	ip_release(port);
1411 
1412 	return KERN_SUCCESS;
1413 }
1414 
1415 /*
1416  *	Routine:	ipc_port_bind_special_reply_port_locked
1417  *	Purpose:
1418  *		Bind the given port to current thread as a special reply port.
1419  *	Conditions:
1420  *		Port locked.
1421  *	Returns:
1422  *		None.
1423  */
1424 
1425 static void
ipc_port_bind_special_reply_port_locked(ipc_port_t port,ipc_reply_port_type_t reply_type)1426 ipc_port_bind_special_reply_port_locked(
1427 	ipc_port_t            port,
1428 	ipc_reply_port_type_t reply_type)
1429 {
1430 	thread_t thread = current_thread();
1431 	ipc_port_t *reply_portp;
1432 
1433 	if (reply_type == IRPT_USER) {
1434 		reply_portp = &thread->ith_special_reply_port;
1435 	} else {
1436 		reply_portp = &thread->ith_kernel_reply_port;
1437 	}
1438 
1439 	assert(*reply_portp == NULL);
1440 	assert(ip_is_special_reply_port(port));
1441 	assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1442 
1443 	ip_reference(port);
1444 	*reply_portp = port;
1445 	port->ip_messages.imq_srp_owner_thread = thread;
1446 
1447 	ipc_special_reply_port_bits_reset(port);
1448 }
1449 
1450 /*
1451  *	Routine:	ipc_port_unbind_special_reply_port
1452  *	Purpose:
1453  *		Unbind the thread's special reply port.
1454  *		If the special port has threads waiting on turnstile,
1455  *		update it's inheritor.
1456  *	Condition:
1457  *		Nothing locked.
1458  *	Returns:
1459  *		None.
1460  */
1461 static void
ipc_port_unbind_special_reply_port(thread_t thread,ipc_reply_port_type_t reply_type)1462 ipc_port_unbind_special_reply_port(
1463 	thread_t              thread,
1464 	ipc_reply_port_type_t reply_type)
1465 {
1466 	ipc_port_t *reply_portp;
1467 
1468 	if (reply_type == IRPT_USER) {
1469 		reply_portp = &thread->ith_special_reply_port;
1470 	} else {
1471 		reply_portp = &thread->ith_kernel_reply_port;
1472 	}
1473 
1474 	ipc_port_t special_reply_port = *reply_portp;
1475 
1476 	ip_mq_lock(special_reply_port);
1477 
1478 	*reply_portp = NULL;
1479 	ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
1480 	    IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
1481 	/* port unlocked */
1482 
1483 	/* Destroy the port if its kernel special reply, else just release a ref */
1484 	if (reply_type == IRPT_USER) {
1485 		ip_release(special_reply_port);
1486 	} else {
1487 		ip_mq_lock(special_reply_port);
1488 		ipc_port_destroy(special_reply_port);
1489 	}
1490 }
1491 
1492 /*
1493  *	Routine:	thread_dealloc_kernel_special_reply_port
1494  *	Purpose:
1495  *		Unbind the thread's kernel special reply port.
1496  *		If the special port has threads waiting on turnstile,
1497  *		update it's inheritor.
1498  *	Condition:
1499  *		Called on current thread or a terminated thread.
1500  *	Returns:
1501  *		None.
1502  */
1503 
1504 void
thread_dealloc_kernel_special_reply_port(thread_t thread)1505 thread_dealloc_kernel_special_reply_port(thread_t thread)
1506 {
1507 	ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1508 }
1509 
1510 /*
1511  *	Routine:	thread_get_special_port [kernel call]
1512  *	Purpose:
1513  *		Clones a send right for one of the thread's
1514  *		special ports.
1515  *	Conditions:
1516  *		Nothing locked.
1517  *	Returns:
1518  *		KERN_SUCCESS		Extracted a send right.
1519  *		KERN_INVALID_ARGUMENT	The thread is null.
1520  *		KERN_FAILURE		The thread is dead.
1521  *		KERN_INVALID_ARGUMENT	Invalid special port.
1522  */
1523 
1524 kern_return_t
1525 thread_get_special_port(
1526 	thread_inspect_t         thread,
1527 	int                      which,
1528 	ipc_port_t              *portp);
1529 
1530 static kern_return_t
thread_get_special_port_internal(thread_inspect_t thread,thread_ro_t tro,int which,ipc_port_t * portp,mach_thread_flavor_t flavor)1531 thread_get_special_port_internal(
1532 	thread_inspect_t         thread,
1533 	thread_ro_t              tro,
1534 	int                      which,
1535 	ipc_port_t              *portp,
1536 	mach_thread_flavor_t     flavor)
1537 {
1538 	kern_return_t      kr;
1539 	ipc_port_t port;
1540 
1541 	if ((kr = special_port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) {
1542 		return kr;
1543 	}
1544 
1545 	thread_mtx_lock(thread);
1546 	if (!thread->active) {
1547 		thread_mtx_unlock(thread);
1548 		return KERN_FAILURE;
1549 	}
1550 
1551 	switch (which) {
1552 	case THREAD_KERNEL_PORT:
1553 		port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1554 #if CONFIG_CSR
1555 		if (tro->tro_settable_self_port != port) {
1556 			port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1557 		} else
1558 #endif /* CONFIG_CSR */
1559 		{
1560 			port = ipc_kobject_copy_send(port, thread, IKOT_THREAD_CONTROL);
1561 		}
1562 		thread_mtx_unlock(thread);
1563 		break;
1564 
1565 	case THREAD_READ_PORT:
1566 	case THREAD_INSPECT_PORT:
1567 		thread_mtx_unlock(thread);
1568 		mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ?
1569 		    THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT;
1570 		/* convert_thread_to_port_with_flavor consumes a thread reference */
1571 		thread_reference(thread);
1572 		port = convert_thread_to_port_with_flavor(thread, tro, current_flavor);
1573 		break;
1574 
1575 	default:
1576 		thread_mtx_unlock(thread);
1577 		return KERN_INVALID_ARGUMENT;
1578 	}
1579 
1580 	*portp = port;
1581 	return KERN_SUCCESS;
1582 }
1583 
1584 kern_return_t
thread_get_special_port(thread_inspect_t thread,int which,ipc_port_t * portp)1585 thread_get_special_port(
1586 	thread_inspect_t         thread,
1587 	int                      which,
1588 	ipc_port_t              *portp)
1589 {
1590 	if (thread == THREAD_NULL) {
1591 		return KERN_INVALID_ARGUMENT;
1592 	}
1593 
1594 	return thread_get_special_port_internal(thread, get_thread_ro(thread),
1595 	           which, portp, THREAD_FLAVOR_CONTROL);
1596 }
1597 
1598 kern_return_t
thread_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)1599 thread_get_special_port_from_user(
1600 	mach_port_t     port,
1601 	int             which,
1602 	ipc_port_t      *portp)
1603 {
1604 	thread_ro_t tro;
1605 	ipc_kobject_type_t kotype;
1606 	mach_thread_flavor_t flavor;
1607 	kern_return_t kr = KERN_SUCCESS;
1608 
1609 	task_t curr_task = current_task();
1610 	thread_t thread = convert_port_to_thread_inspect_no_eval(port);
1611 
1612 	if (thread == THREAD_NULL) {
1613 		return KERN_INVALID_ARGUMENT;
1614 	}
1615 
1616 	tro = get_thread_ro(thread);
1617 	kotype = ip_type(port);
1618 
1619 	if (which == THREAD_KERNEL_PORT && tro->tro_task == curr_task) {
1620 #if CONFIG_MACF
1621 		/*
1622 		 * only check for threads belong to current_task,
1623 		 * because foreign thread ports are always movable
1624 		 */
1625 		if (mac_task_check_get_movable_control_port()) {
1626 			kr = KERN_DENIED;
1627 			goto out;
1628 		}
1629 #endif
1630 		/*
1631 		 * if `mac_task_check_get_movable_control_port` returned 0,
1632 		 * then we must also have a movable task.
1633 		 * see `task_set_exc_guard_default`
1634 		 */
1635 		assert(!task_is_immovable(curr_task));
1636 	}
1637 
1638 	switch (kotype) {
1639 	case IKOT_THREAD_CONTROL:
1640 		flavor = THREAD_FLAVOR_CONTROL;
1641 		break;
1642 	case IKOT_THREAD_READ:
1643 		flavor = THREAD_FLAVOR_READ;
1644 		break;
1645 	case IKOT_THREAD_INSPECT:
1646 		flavor = THREAD_FLAVOR_INSPECT;
1647 		break;
1648 	default:
1649 		panic("strange kobject type");
1650 	}
1651 
1652 	kr = thread_get_special_port_internal(thread, tro, which, portp, flavor);
1653 out:
1654 	thread_deallocate(thread);
1655 	return kr;
1656 }
1657 
1658 static kern_return_t
special_port_allowed_with_thread_flavor(int which,mach_thread_flavor_t flavor)1659 special_port_allowed_with_thread_flavor(
1660 	int                  which,
1661 	mach_thread_flavor_t flavor)
1662 {
1663 	switch (flavor) {
1664 	case THREAD_FLAVOR_CONTROL:
1665 		return KERN_SUCCESS;
1666 
1667 	case THREAD_FLAVOR_READ:
1668 
1669 		switch (which) {
1670 		case THREAD_READ_PORT:
1671 		case THREAD_INSPECT_PORT:
1672 			return KERN_SUCCESS;
1673 		default:
1674 			return KERN_INVALID_CAPABILITY;
1675 		}
1676 
1677 	case THREAD_FLAVOR_INSPECT:
1678 
1679 		switch (which) {
1680 		case THREAD_INSPECT_PORT:
1681 			return KERN_SUCCESS;
1682 		default:
1683 			return KERN_INVALID_CAPABILITY;
1684 		}
1685 
1686 	default:
1687 		return KERN_INVALID_CAPABILITY;
1688 	}
1689 }
1690 
1691 /*
1692  *	Routine:	thread_set_special_port [kernel call]
1693  *	Purpose:
1694  *		Changes one of the thread's special ports,
1695  *		setting it to the supplied send right.
1696  *	Conditions:
1697  *		Nothing locked.  If successful, consumes
1698  *		the supplied send right.
1699  *	Returns:
1700  *		KERN_SUCCESS            Changed the special port.
1701  *		KERN_INVALID_ARGUMENT   The thread is null.
1702  *      KERN_INVALID_RIGHT      Port is marked as immovable.
1703  *		KERN_FAILURE            The thread is dead.
1704  *		KERN_INVALID_ARGUMENT   Invalid special port.
1705  *		KERN_NO_ACCESS          Restricted access to set port.
1706  */
1707 
1708 kern_return_t
thread_set_special_port(thread_t thread,int which,ipc_port_t port)1709 thread_set_special_port(
1710 	thread_t                thread,
1711 	int                     which,
1712 	ipc_port_t              port)
1713 {
1714 	kern_return_t   result = KERN_SUCCESS;
1715 	thread_ro_t     tro = NULL;
1716 	ipc_port_t      old = IP_NULL;
1717 
1718 	if (thread == THREAD_NULL) {
1719 		return KERN_INVALID_ARGUMENT;
1720 	}
1721 
1722 	/*
1723 	 * rdar://70585367
1724 	 * disallow immovable send so other process can't retrieve it through thread_get_special_port()
1725 	 */
1726 	if (!ipc_can_stash_naked_send(port)) {
1727 		return KERN_INVALID_RIGHT;
1728 	}
1729 
1730 	switch (which) {
1731 	case THREAD_KERNEL_PORT:
1732 #if CONFIG_CSR
1733 		if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
1734 			/*
1735 			 * Only allow setting of thread-self
1736 			 * special port from user-space when SIP is
1737 			 * disabled (for Mach-on-Mach emulation).
1738 			 */
1739 			tro = get_thread_ro(thread);
1740 
1741 			thread_mtx_lock(thread);
1742 			if (thread->active) {
1743 				old = tro->tro_settable_self_port;
1744 				zalloc_ro_update_field(ZONE_ID_THREAD_RO,
1745 				    tro, tro_settable_self_port, &port);
1746 			} else {
1747 				result = KERN_FAILURE;
1748 			}
1749 			thread_mtx_unlock(thread);
1750 
1751 			if (IP_VALID(old)) {
1752 				ipc_port_release_send(old);
1753 			}
1754 
1755 			return result;
1756 		}
1757 #else
1758 		(void)old;
1759 		(void)result;
1760 		(void)tro;
1761 #endif /* CONFIG_CSR */
1762 		return KERN_NO_ACCESS;
1763 
1764 	default:
1765 		return KERN_INVALID_ARGUMENT;
1766 	}
1767 }
1768 
1769 static inline mach_task_flavor_t
task_special_type_to_flavor(task_special_port_t which)1770 task_special_type_to_flavor(task_special_port_t which)
1771 {
1772 	switch (which) {
1773 	case TASK_KERNEL_PORT:
1774 		return TASK_FLAVOR_CONTROL;
1775 	case TASK_NAME_PORT:
1776 		return TASK_FLAVOR_NAME;
1777 	case TASK_INSPECT_PORT:
1778 		return TASK_FLAVOR_INSPECT;
1779 	case TASK_READ_PORT:
1780 		return TASK_FLAVOR_READ;
1781 	default:
1782 		break;
1783 	}
1784 	panic("invalid special port: %d", which);
1785 }
1786 
1787 /*
1788  *	Routine:	task_get_special_port [kernel call]
1789  *	Purpose:
1790  *		Clones a send right for one of the task's
1791  *		special ports.
1792  *	Conditions:
1793  *		Nothing locked.
1794  *	Returns:
1795  *		KERN_SUCCESS		    Extracted a send right.
1796  *		KERN_INVALID_ARGUMENT	The task is null.
1797  *		KERN_FAILURE		    The task/space is dead.
1798  *		KERN_INVALID_ARGUMENT	Invalid special port.
1799  */
1800 
1801 static kern_return_t
task_get_special_port_internal(task_t task,int which,ipc_port_t * portp,mach_task_flavor_t flavor)1802 task_get_special_port_internal(
1803 	task_t          task,
1804 	int             which,
1805 	ipc_port_t      *portp,
1806 	mach_task_flavor_t        flavor)
1807 {
1808 	kern_return_t kr;
1809 	ipc_port_t port;
1810 
1811 	if (task == TASK_NULL) {
1812 		return KERN_INVALID_ARGUMENT;
1813 	}
1814 
1815 	if ((kr = special_port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) {
1816 		return kr;
1817 	}
1818 
1819 	itk_lock(task);
1820 	if (!task->ipc_active) {
1821 		itk_unlock(task);
1822 		return KERN_FAILURE;
1823 	}
1824 
1825 	switch (which) {
1826 	case TASK_KERNEL_PORT:
1827 		port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
1828 #if CONFIG_CSR
1829 		if (task->itk_settable_self != port) {
1830 			port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1831 		} else
1832 #endif /* CONFIG_CSR */
1833 		{
1834 			port = ipc_kobject_copy_send(port, task, IKOT_TASK_CONTROL);
1835 		}
1836 		itk_unlock(task);
1837 		break;
1838 
1839 	case TASK_READ_PORT:
1840 	case TASK_INSPECT_PORT:
1841 		itk_unlock(task);
1842 		mach_task_flavor_t current_flavor = task_special_type_to_flavor(which);
1843 		/* convert_task_to_port_with_flavor consumes a task reference */
1844 		task_reference(task);
1845 		port = convert_task_to_port_with_flavor(task, current_flavor, TASK_GRP_KERNEL);
1846 		break;
1847 
1848 	case TASK_NAME_PORT:
1849 		port = ipc_kobject_make_send(task->itk_task_ports[TASK_FLAVOR_NAME],
1850 		    task, IKOT_TASK_NAME);
1851 		itk_unlock(task);
1852 		break;
1853 
1854 	case TASK_HOST_PORT:
1855 		port = host_port_copy_send(task->itk_host);
1856 		itk_unlock(task);
1857 		break;
1858 
1859 	case TASK_BOOTSTRAP_PORT:
1860 		port = ipc_port_copy_send_mqueue(task->itk_bootstrap);
1861 		itk_unlock(task);
1862 		break;
1863 
1864 	case TASK_ACCESS_PORT:
1865 		port = ipc_port_copy_send_mqueue(task->itk_task_access);
1866 		itk_unlock(task);
1867 		break;
1868 
1869 	case TASK_DEBUG_CONTROL_PORT:
1870 		port = ipc_port_copy_send_mqueue(task->itk_debug_control);
1871 		itk_unlock(task);
1872 		break;
1873 
1874 #if CONFIG_PROC_RESOURCE_LIMITS
1875 	case TASK_RESOURCE_NOTIFY_PORT:
1876 		port = ipc_port_copy_send_mqueue(task->itk_resource_notify);
1877 		itk_unlock(task);
1878 		break;
1879 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
1880 
1881 	default:
1882 		itk_unlock(task);
1883 		return KERN_INVALID_ARGUMENT;
1884 	}
1885 
1886 	*portp = port;
1887 	return KERN_SUCCESS;
1888 }
1889 
1890 /* Kernel/Kext call only and skips MACF checks. MIG uses task_get_special_port_from_user(). */
1891 kern_return_t
task_get_special_port(task_t task,int which,ipc_port_t * portp)1892 task_get_special_port(
1893 	task_t          task,
1894 	int             which,
1895 	ipc_port_t      *portp)
1896 {
1897 	return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL);
1898 }
1899 
1900 /* MIG call only. Kernel/Kext uses task_get_special_port() */
1901 kern_return_t
task_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)1902 task_get_special_port_from_user(
1903 	mach_port_t     port,
1904 	int             which,
1905 	ipc_port_t      *portp)
1906 {
1907 	ipc_kobject_type_t kotype;
1908 	mach_task_flavor_t flavor;
1909 	kern_return_t kr = KERN_SUCCESS;
1910 
1911 	task_t curr_task = current_task();
1912 	task_t task = convert_port_to_task_inspect_no_eval(port);
1913 
1914 	if (task == TASK_NULL) {
1915 		return KERN_INVALID_ARGUMENT;
1916 	}
1917 
1918 	kotype = ip_type(port);
1919 
1920 #if CONFIG_MACF
1921 	if (mac_task_check_get_task_special_port(current_task(), task, which)) {
1922 		kr = KERN_DENIED;
1923 		goto out;
1924 	}
1925 #endif
1926 
1927 	if (which == TASK_KERNEL_PORT && task == curr_task) {
1928 #if CONFIG_MACF
1929 		/*
1930 		 * only check for current_task,
1931 		 * because foreign task ports are always movable
1932 		 */
1933 		if (mac_task_check_get_movable_control_port()) {
1934 			kr = KERN_DENIED;
1935 			goto out;
1936 		}
1937 #endif
1938 		/*
1939 		 * if `mac_task_check_get_movable_control_port` returned 0,
1940 		 * then we must also have a movable task.
1941 		 * see `task_set_exc_guard_default`
1942 		 */
1943 		assert(!task_is_immovable(curr_task));
1944 	}
1945 
1946 	switch (kotype) {
1947 	case IKOT_TASK_CONTROL:
1948 		flavor = TASK_FLAVOR_CONTROL;
1949 		break;
1950 	case IKOT_TASK_READ:
1951 		flavor = TASK_FLAVOR_READ;
1952 		break;
1953 	case IKOT_TASK_INSPECT:
1954 		flavor = TASK_FLAVOR_INSPECT;
1955 		break;
1956 	default:
1957 		panic("strange kobject type");
1958 	}
1959 
1960 	kr = task_get_special_port_internal(task, which, portp, flavor);
1961 out:
1962 	task_deallocate(task);
1963 	return kr;
1964 }
1965 
1966 static kern_return_t
special_port_allowed_with_task_flavor(int which,mach_task_flavor_t flavor)1967 special_port_allowed_with_task_flavor(
1968 	int                which,
1969 	mach_task_flavor_t flavor)
1970 {
1971 	switch (flavor) {
1972 	case TASK_FLAVOR_CONTROL:
1973 		return KERN_SUCCESS;
1974 
1975 	case TASK_FLAVOR_READ:
1976 
1977 		switch (which) {
1978 		case TASK_READ_PORT:
1979 		case TASK_INSPECT_PORT:
1980 		case TASK_NAME_PORT:
1981 			return KERN_SUCCESS;
1982 		default:
1983 			return KERN_INVALID_CAPABILITY;
1984 		}
1985 
1986 	case TASK_FLAVOR_INSPECT:
1987 
1988 		switch (which) {
1989 		case TASK_INSPECT_PORT:
1990 		case TASK_NAME_PORT:
1991 			return KERN_SUCCESS;
1992 		default:
1993 			return KERN_INVALID_CAPABILITY;
1994 		}
1995 
1996 	default:
1997 		return KERN_INVALID_CAPABILITY;
1998 	}
1999 }
2000 
2001 /*
2002  *	Routine:	task_set_special_port [MIG call]
2003  *	Purpose:
2004  *		Changes one of the task's special ports,
2005  *		setting it to the supplied send right.
2006  *	Conditions:
2007  *		Nothing locked.  If successful, consumes
2008  *		the supplied send right.
2009  *	Returns:
2010  *		KERN_SUCCESS		    Changed the special port.
2011  *		KERN_INVALID_ARGUMENT	The task is null.
2012  *      KERN_INVALID_RIGHT      Port is marked as immovable.
2013  *		KERN_FAILURE		    The task/space is dead.
2014  *		KERN_INVALID_ARGUMENT	Invalid special port.
2015  *      KERN_NO_ACCESS		    Restricted access to set port.
2016  */
2017 
2018 kern_return_t
task_set_special_port_from_user(task_t task,int which,ipc_port_t port)2019 task_set_special_port_from_user(
2020 	task_t          task,
2021 	int             which,
2022 	ipc_port_t      port)
2023 {
2024 	if (task == TASK_NULL) {
2025 		return KERN_INVALID_ARGUMENT;
2026 	}
2027 
2028 #if CONFIG_MACF
2029 	if (mac_task_check_set_task_special_port(current_task(), task, which, port)) {
2030 		return KERN_DENIED;
2031 	}
2032 #endif
2033 
2034 	return task_set_special_port(task, which, port);
2035 }
2036 
2037 /* Kernel call only. MIG uses task_set_special_port_from_user() */
2038 kern_return_t
task_set_special_port(task_t task,int which,ipc_port_t port)2039 task_set_special_port(
2040 	task_t          task,
2041 	int             which,
2042 	ipc_port_t      port)
2043 {
2044 	if (task == TASK_NULL) {
2045 		return KERN_INVALID_ARGUMENT;
2046 	}
2047 
2048 	if (task_is_driver(current_task())) {
2049 		return KERN_NO_ACCESS;
2050 	}
2051 
2052 	/*
2053 	 * rdar://70585367
2054 	 * disallow immovable send so other process can't retrieve it through task_get_special_port()
2055 	 */
2056 	if (!ipc_can_stash_naked_send(port)) {
2057 		return KERN_INVALID_RIGHT;
2058 	}
2059 
2060 
2061 	switch (which) {
2062 	case TASK_KERNEL_PORT:
2063 	case TASK_HOST_PORT:
2064 #if CONFIG_CSR
2065 		if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
2066 			/*
2067 			 * Only allow setting of task-self / task-host
2068 			 * special ports from user-space when SIP is
2069 			 * disabled (for Mach-on-Mach emulation).
2070 			 */
2071 			break;
2072 		}
2073 #endif
2074 		return KERN_NO_ACCESS;
2075 	default:
2076 		break;
2077 	}
2078 
2079 	return task_set_special_port_internal(task, which, port);
2080 }
2081 
2082 /*
2083  *	Routine:	task_set_special_port_internal
2084  *	Purpose:
2085  *		Changes one of the task's special ports,
2086  *		setting it to the supplied send right.
2087  *	Conditions:
2088  *		Nothing locked.  If successful, consumes
2089  *		the supplied send right.
2090  *	Returns:
2091  *		KERN_SUCCESS		Changed the special port.
2092  *		KERN_INVALID_ARGUMENT	The task is null.
2093  *		KERN_FAILURE		The task/space is dead.
2094  *		KERN_INVALID_ARGUMENT	Invalid special port.
2095  *      KERN_NO_ACCESS		Restricted access to overwrite port.
2096  */
2097 
2098 kern_return_t
task_set_special_port_internal(task_t task,int which,ipc_port_t port)2099 task_set_special_port_internal(
2100 	task_t          task,
2101 	int             which,
2102 	ipc_port_t      port)
2103 {
2104 	ipc_port_t old = IP_NULL;
2105 	kern_return_t rc = KERN_INVALID_ARGUMENT;
2106 
2107 	if (task == TASK_NULL) {
2108 		goto out;
2109 	}
2110 
2111 	itk_lock(task);
2112 	/*
2113 	 * Allow setting special port during the span of ipc_task_init() to
2114 	 * ipc_task_terminate(). posix_spawn() port actions can set special
2115 	 * ports on target task _before_ task IPC access is enabled.
2116 	 */
2117 	if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
2118 		rc = KERN_FAILURE;
2119 		goto out_unlock;
2120 	}
2121 
2122 	switch (which) {
2123 #if CONFIG_CSR
2124 	case TASK_KERNEL_PORT:
2125 		old = task->itk_settable_self;
2126 		task->itk_settable_self = port;
2127 		break;
2128 #endif /* CONFIG_CSR */
2129 
2130 	case TASK_HOST_PORT:
2131 		old = task->itk_host;
2132 		task->itk_host = port;
2133 		break;
2134 
2135 	case TASK_BOOTSTRAP_PORT:
2136 		old = task->itk_bootstrap;
2137 		task->itk_bootstrap = port;
2138 		break;
2139 
2140 	/* Never allow overwrite of the task access port */
2141 	case TASK_ACCESS_PORT:
2142 		if (IP_VALID(task->itk_task_access)) {
2143 			rc = KERN_NO_ACCESS;
2144 			goto out_unlock;
2145 		}
2146 		task->itk_task_access = port;
2147 		break;
2148 
2149 	case TASK_DEBUG_CONTROL_PORT:
2150 		old = task->itk_debug_control;
2151 		task->itk_debug_control = port;
2152 		break;
2153 
2154 #if CONFIG_PROC_RESOURCE_LIMITS
2155 	case TASK_RESOURCE_NOTIFY_PORT:
2156 		old = task->itk_resource_notify;
2157 		task->itk_resource_notify = port;
2158 		break;
2159 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
2160 
2161 	default:
2162 		rc = KERN_INVALID_ARGUMENT;
2163 		goto out_unlock;
2164 	}/* switch */
2165 
2166 	rc = KERN_SUCCESS;
2167 
2168 out_unlock:
2169 	itk_unlock(task);
2170 
2171 	if (IP_VALID(old)) {
2172 		ipc_port_release_send(old);
2173 	}
2174 out:
2175 	return rc;
2176 }
2177 /*
2178  *	Routine:	mach_ports_register [kernel call]
2179  *	Purpose:
2180  *		Stash a handful of port send rights in the task.
2181  *		Child tasks will inherit these rights, but they
2182  *		must use mach_ports_lookup to acquire them.
2183  *
2184  *		The rights are supplied in a (wired) kalloc'd segment.
2185  *		Rights which aren't supplied are assumed to be null.
2186  *	Conditions:
2187  *		Nothing locked.  If successful, consumes
2188  *		the supplied rights and memory.
2189  *	Returns:
2190  *		KERN_SUCCESS		    Stashed the port rights.
2191  *      KERN_INVALID_RIGHT      Port in array is marked immovable.
2192  *		KERN_INVALID_ARGUMENT	The task is null.
2193  *		KERN_INVALID_ARGUMENT	The task is dead.
2194  *		KERN_INVALID_ARGUMENT	The memory param is null.
2195  *		KERN_INVALID_ARGUMENT	Too many port rights supplied.
2196  */
2197 
2198 kern_return_t
_kernelrpc_mach_ports_register3(task_t task,mach_port_t port1,mach_port_t port2,mach_port_t port3)2199 _kernelrpc_mach_ports_register3(
2200 	task_t                  task,
2201 	mach_port_t             port1,
2202 	mach_port_t             port2,
2203 	mach_port_t             port3)
2204 {
2205 	ipc_port_t ports[TASK_PORT_REGISTER_MAX] = {
2206 		port1, port2, port3,
2207 	};
2208 
2209 	if (task == TASK_NULL) {
2210 		return KERN_INVALID_ARGUMENT;
2211 	}
2212 
2213 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2214 		/*
2215 		 * rdar://70585367
2216 		 * disallow immovable send so other process can't retrieve it through mach_ports_lookup()
2217 		 */
2218 		if (!ipc_can_stash_naked_send(ports[i])) {
2219 			return KERN_INVALID_RIGHT;
2220 		}
2221 	}
2222 
2223 	itk_lock(task);
2224 	if (!task->ipc_active) {
2225 		itk_unlock(task);
2226 		return KERN_INVALID_ARGUMENT;
2227 	}
2228 
2229 	/*
2230 	 *	Replace the old send rights with the new.
2231 	 *	Release the old rights after unlocking.
2232 	 */
2233 
2234 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2235 		ipc_port_t old;
2236 
2237 		old = task->itk_registered[i];
2238 		task->itk_registered[i] = ports[i];
2239 		ports[i] = old;
2240 	}
2241 
2242 	itk_unlock(task);
2243 
2244 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2245 		ipc_port_release_send(ports[i]);
2246 	}
2247 
2248 	return KERN_SUCCESS;
2249 }
2250 
2251 /*
2252  *	Routine:	mach_ports_lookup [kernel call]
2253  *	Purpose:
2254  *		Retrieves (clones) the stashed port send rights.
2255  *	Conditions:
2256  *		Nothing locked.  If successful, the caller gets
2257  *		rights and memory.
2258  *	Returns:
2259  *		KERN_SUCCESS		Retrieved the send rights.
2260  *		KERN_INVALID_ARGUMENT	The task is null.
2261  *		KERN_INVALID_ARGUMENT	The task is dead.
2262  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
2263  */
2264 
2265 kern_return_t
_kernelrpc_mach_ports_lookup3(task_t task,ipc_port_t * port1,ipc_port_t * port2,ipc_port_t * port3)2266 _kernelrpc_mach_ports_lookup3(
2267 	task_t                  task,
2268 	ipc_port_t             *port1,
2269 	ipc_port_t             *port2,
2270 	ipc_port_t             *port3)
2271 {
2272 	if (task == TASK_NULL) {
2273 		return KERN_INVALID_ARGUMENT;
2274 	}
2275 
2276 	itk_lock(task);
2277 	if (!task->ipc_active) {
2278 		itk_unlock(task);
2279 		return KERN_INVALID_ARGUMENT;
2280 	}
2281 
2282 	*port1 = ipc_port_copy_send_any(task->itk_registered[0]);
2283 	*port2 = ipc_port_copy_send_any(task->itk_registered[1]);
2284 	*port3 = ipc_port_copy_send_any(task->itk_registered[2]);
2285 
2286 	itk_unlock(task);
2287 
2288 	return KERN_SUCCESS;
2289 }
2290 
2291 static kern_return_t
task_conversion_eval_internal(task_t caller,task_t victim,boolean_t out_trans,int flavor)2292 task_conversion_eval_internal(
2293 	task_t             caller,
2294 	task_t             victim,
2295 	boolean_t          out_trans,
2296 	int                flavor) /* control or read */
2297 {
2298 	boolean_t allow_kern_task_out_trans;
2299 	boolean_t allow_kern_task;
2300 
2301 	assert(flavor == TASK_FLAVOR_CONTROL || flavor == TASK_FLAVOR_READ);
2302 	assert(flavor == THREAD_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_READ);
2303 
2304 #if defined(SECURE_KERNEL)
2305 	/*
2306 	 * On secure kernel platforms, reject converting kernel task/threads to port
2307 	 * and sending it to user space.
2308 	 */
2309 	allow_kern_task_out_trans = FALSE;
2310 #else
2311 	allow_kern_task_out_trans = TRUE;
2312 #endif
2313 
2314 	allow_kern_task = out_trans && allow_kern_task_out_trans;
2315 
2316 	if (victim == TASK_NULL) {
2317 		return KERN_INVALID_SECURITY;
2318 	}
2319 
2320 	task_require(victim);
2321 
2322 	/*
2323 	 * If Developer Mode is not enabled, deny attempts to translate foreign task's
2324 	 * control port completely. Read port or corpse is okay.
2325 	 */
2326 	if (!developer_mode_state()) {
2327 		if ((caller != victim) &&
2328 		    (flavor == TASK_FLAVOR_CONTROL) && !task_is_a_corpse(victim)) {
2329 #if XNU_TARGET_OS_OSX
2330 			return KERN_INVALID_SECURITY;
2331 #else
2332 			/*
2333 			 * All control ports are immovable.
2334 			 * Return an error for outtrans, but panic on intrans.
2335 			 */
2336 			if (out_trans) {
2337 				return KERN_INVALID_SECURITY;
2338 			} else {
2339 				panic("Just like pineapple on pizza, this task/thread port doesn't belong here.");
2340 			}
2341 #endif /* XNU_TARGET_OS_OSX */
2342 		}
2343 	}
2344 
2345 	/*
2346 	 * Tasks are allowed to resolve their own task ports, and the kernel is
2347 	 * allowed to resolve anyone's task port (subject to Developer Mode check).
2348 	 */
2349 	if (caller == kernel_task) {
2350 		return KERN_SUCCESS;
2351 	}
2352 
2353 	if (caller == victim) {
2354 		return KERN_SUCCESS;
2355 	}
2356 
2357 	/*
2358 	 * Only the kernel can resolve the kernel's task port. We've established
2359 	 * by this point that the caller is not kernel_task.
2360 	 */
2361 	if (victim == kernel_task && !allow_kern_task) {
2362 		return KERN_INVALID_SECURITY;
2363 	}
2364 
2365 #if !defined(XNU_TARGET_OS_OSX)
2366 	/*
2367 	 * On platforms other than macOS, only a platform binary can resolve the task port
2368 	 * of another platform binary.
2369 	 */
2370 	if (task_get_platform_binary(victim) && !task_get_platform_binary(caller)) {
2371 #if SECURE_KERNEL
2372 		return KERN_INVALID_SECURITY;
2373 #else
2374 		if (cs_relax_platform_task_ports) {
2375 			return KERN_SUCCESS;
2376 		} else {
2377 			return KERN_INVALID_SECURITY;
2378 		}
2379 #endif /* SECURE_KERNEL */
2380 	}
2381 #endif /* !defined(XNU_TARGET_OS_OSX) */
2382 
2383 	return KERN_SUCCESS;
2384 }
2385 
2386 kern_return_t
task_conversion_eval(task_t caller,task_t victim,int flavor)2387 task_conversion_eval(task_t caller, task_t victim, int flavor)
2388 {
2389 	/* flavor is mach_task_flavor_t or mach_thread_flavor_t */
2390 	static_assert(TASK_FLAVOR_CONTROL == THREAD_FLAVOR_CONTROL);
2391 	static_assert(TASK_FLAVOR_READ == THREAD_FLAVOR_READ);
2392 	return task_conversion_eval_internal(caller, victim, FALSE, flavor);
2393 }
2394 
2395 static kern_return_t
task_conversion_eval_out_trans(task_t caller,task_t victim,int flavor)2396 task_conversion_eval_out_trans(task_t caller, task_t victim, int flavor)
2397 {
2398 	assert(flavor == TASK_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_CONTROL);
2399 	return task_conversion_eval_internal(caller, victim, TRUE, flavor);
2400 }
2401 
2402 /*
2403  *	Routine:	task_port_kotype_valid_for_flavor
2404  *	Purpose:
2405  *		Check whether the kobject type of a mach port
2406  *      is valid for conversion to a task of given flavor.
2407  */
2408 static boolean_t
task_port_kotype_valid_for_flavor(natural_t kotype,mach_task_flavor_t flavor)2409 task_port_kotype_valid_for_flavor(
2410 	natural_t          kotype,
2411 	mach_task_flavor_t flavor)
2412 {
2413 	switch (flavor) {
2414 	/* Ascending capability */
2415 	case TASK_FLAVOR_NAME:
2416 		if (kotype == IKOT_TASK_NAME) {
2417 			return TRUE;
2418 		}
2419 		OS_FALLTHROUGH;
2420 	case TASK_FLAVOR_INSPECT:
2421 		if (kotype == IKOT_TASK_INSPECT) {
2422 			return TRUE;
2423 		}
2424 		OS_FALLTHROUGH;
2425 	case TASK_FLAVOR_READ:
2426 		if (kotype == IKOT_TASK_READ) {
2427 			return TRUE;
2428 		}
2429 		OS_FALLTHROUGH;
2430 	case TASK_FLAVOR_CONTROL:
2431 		if (kotype == IKOT_TASK_CONTROL) {
2432 			return TRUE;
2433 		}
2434 		break;
2435 	default:
2436 		panic("strange task flavor");
2437 	}
2438 
2439 	return FALSE;
2440 }
2441 
2442 /*
2443  *	Routine: convert_port_to_task_with_flavor_locked_noref
2444  *	Purpose:
2445  *		Internal helper routine to convert from a locked port to a task.
2446  *	Args:
2447  *		port   - target port
2448  *		flavor - requested task port flavor
2449  *		options - port translation options
2450  *	Conditions:
2451  *		Port is locked and active.
2452  */
2453 static task_t
convert_port_to_task_with_flavor_locked_noref(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2454 convert_port_to_task_with_flavor_locked_noref(
2455 	ipc_port_t              port,
2456 	mach_task_flavor_t      flavor,
2457 	port_intrans_options_t  options)
2458 {
2459 	ipc_kobject_type_t type = ip_type(port);
2460 	task_t task;
2461 
2462 	ip_mq_lock_held(port);
2463 	require_ip_active(port);
2464 
2465 	if (!task_port_kotype_valid_for_flavor(type, flavor)) {
2466 		return TASK_NULL;
2467 	}
2468 
2469 	task = ipc_kobject_get_locked(port, type);
2470 	if (task == TASK_NULL) {
2471 		return TASK_NULL;
2472 	}
2473 
2474 	if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
2475 		assert(flavor == TASK_FLAVOR_CONTROL);
2476 		return TASK_NULL;
2477 	}
2478 
2479 	/* TODO: rdar://42389187 */
2480 	if (flavor == TASK_FLAVOR_NAME || flavor == TASK_FLAVOR_INSPECT) {
2481 		assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
2482 	}
2483 
2484 	if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
2485 	    task_conversion_eval(current_task(), task, flavor)) {
2486 		return TASK_NULL;
2487 	}
2488 
2489 	return task;
2490 }
2491 
2492 /*
2493  *	Routine: convert_port_to_task_with_flavor_locked
2494  *	Purpose:
2495  *		Internal helper routine to convert from a locked port to a task.
2496  *	Args:
2497  *		port   - target port
2498  *		flavor - requested task port flavor
2499  *		options - port translation options
2500  *		grp    - task reference group
2501  *	Conditions:
2502  *		Port is locked and active.
2503  *		Produces task ref or TASK_NULL.
2504  */
2505 static task_t
convert_port_to_task_with_flavor_locked(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2506 convert_port_to_task_with_flavor_locked(
2507 	ipc_port_t              port,
2508 	mach_task_flavor_t      flavor,
2509 	port_intrans_options_t  options,
2510 	task_grp_t              grp)
2511 {
2512 	task_t task;
2513 
2514 	task = convert_port_to_task_with_flavor_locked_noref(port, flavor,
2515 	    options);
2516 
2517 	if (task != TASK_NULL) {
2518 		task_reference_grp(task, grp);
2519 	}
2520 
2521 	return task;
2522 }
2523 
2524 /*
2525  *	Routine:	convert_port_to_task_with_flavor
2526  *	Purpose:
2527  *		Internal helper for converting from a port to a task.
2528  *		Doesn't consume the port ref; produces a task ref,
2529  *		which may be null.
2530  *	Args:
2531  *		port   - target port
2532  *		flavor - requested task port flavor
2533  *		options - port translation options
2534  *		grp    - task reference group
2535  *	Conditions:
2536  *		Nothing locked.
2537  */
2538 static task_t
convert_port_to_task_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2539 convert_port_to_task_with_flavor(
2540 	ipc_port_t         port,
2541 	mach_task_flavor_t flavor,
2542 	port_intrans_options_t options,
2543 	task_grp_t         grp)
2544 {
2545 	task_t task = TASK_NULL;
2546 	task_t self = current_task();
2547 
2548 	if (IP_VALID(port)) {
2549 		if (port == self->itk_task_ports[TASK_FLAVOR_CONTROL]) {
2550 			task_reference_grp(self, grp);
2551 			return self;
2552 		}
2553 
2554 		ip_mq_lock(port);
2555 		if (ip_active(port)) {
2556 			task = convert_port_to_task_with_flavor_locked(port,
2557 			    flavor, options, grp);
2558 		}
2559 		ip_mq_unlock(port);
2560 	}
2561 
2562 	return task;
2563 }
2564 
2565 task_t
convert_port_to_task(ipc_port_t port)2566 convert_port_to_task(
2567 	ipc_port_t              port)
2568 {
2569 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2570 	           PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL);
2571 }
2572 
2573 task_t
convert_port_to_task_mig(ipc_port_t port)2574 convert_port_to_task_mig(
2575 	ipc_port_t              port)
2576 {
2577 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2578 	           PORT_INTRANS_OPTIONS_NONE, TASK_GRP_MIG);
2579 }
2580 
2581 task_read_t
convert_port_to_task_read(ipc_port_t port)2582 convert_port_to_task_read(
2583 	ipc_port_t              port)
2584 {
2585 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2586 	           PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2587 }
2588 
2589 task_read_t
convert_port_to_task_read_no_eval(ipc_port_t port)2590 convert_port_to_task_read_no_eval(
2591 	ipc_port_t              port)
2592 {
2593 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2594 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2595 }
2596 
2597 task_read_t
convert_port_to_task_read_mig(ipc_port_t port)2598 convert_port_to_task_read_mig(
2599 	ipc_port_t              port)
2600 {
2601 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2602 	           PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2603 }
2604 
2605 task_inspect_t
convert_port_to_task_inspect(ipc_port_t port)2606 convert_port_to_task_inspect(
2607 	ipc_port_t              port)
2608 {
2609 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2610 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2611 }
2612 
2613 task_inspect_t
convert_port_to_task_inspect_no_eval(ipc_port_t port)2614 convert_port_to_task_inspect_no_eval(
2615 	ipc_port_t              port)
2616 {
2617 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2618 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2619 }
2620 
2621 task_inspect_t
convert_port_to_task_inspect_mig(ipc_port_t port)2622 convert_port_to_task_inspect_mig(
2623 	ipc_port_t              port)
2624 {
2625 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2626 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2627 }
2628 
2629 task_name_t
convert_port_to_task_name(ipc_port_t port)2630 convert_port_to_task_name(
2631 	ipc_port_t              port)
2632 {
2633 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2634 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2635 }
2636 
2637 task_name_t
convert_port_to_task_name_mig(ipc_port_t port)2638 convert_port_to_task_name_mig(
2639 	ipc_port_t              port)
2640 {
2641 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2642 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2643 }
2644 
2645 /*
2646  *	Routine:	convert_port_to_task_policy
2647  *	Purpose:
2648  *		Convert from a port to a task.
2649  *		Doesn't consume the port ref; produces a task ref,
2650  *		which may be null.
2651  *		If the port is being used with task_port_set(), any task port
2652  *		type other than TASK_CONTROL requires an entitlement. If the
2653  *		port is being used with task_port_get(), TASK_NAME requires an
2654  *		entitlement.
2655  *	Conditions:
2656  *		Nothing locked.
2657  */
2658 static task_t
convert_port_to_task_policy_mig(ipc_port_t port,boolean_t set)2659 convert_port_to_task_policy_mig(ipc_port_t port, boolean_t set)
2660 {
2661 	task_t task = TASK_NULL;
2662 
2663 	if (!IP_VALID(port)) {
2664 		return TASK_NULL;
2665 	}
2666 
2667 	task = set ?
2668 	    convert_port_to_task_mig(port) :
2669 	    convert_port_to_task_inspect_mig(port);
2670 
2671 	if (task == TASK_NULL &&
2672 	    IOCurrentTaskHasEntitlement("com.apple.private.task_policy")) {
2673 		task = convert_port_to_task_name_mig(port);
2674 	}
2675 
2676 	return task;
2677 }
2678 
2679 task_policy_set_t
convert_port_to_task_policy_set_mig(ipc_port_t port)2680 convert_port_to_task_policy_set_mig(ipc_port_t port)
2681 {
2682 	return convert_port_to_task_policy_mig(port, true);
2683 }
2684 
2685 task_policy_get_t
convert_port_to_task_policy_get_mig(ipc_port_t port)2686 convert_port_to_task_policy_get_mig(ipc_port_t port)
2687 {
2688 	return convert_port_to_task_policy_mig(port, false);
2689 }
2690 
2691 /*
2692  *	Routine:	convert_port_to_task_suspension_token
2693  *	Purpose:
2694  *		Convert from a port to a task suspension token.
2695  *		Doesn't consume the port ref; produces a suspension token ref,
2696  *		which may be null.
2697  *	Conditions:
2698  *		Nothing locked.
2699  */
2700 static task_suspension_token_t
convert_port_to_task_suspension_token_grp(ipc_port_t port,task_grp_t grp)2701 convert_port_to_task_suspension_token_grp(
2702 	ipc_port_t              port,
2703 	task_grp_t              grp)
2704 {
2705 	task_suspension_token_t task = TASK_NULL;
2706 
2707 	if (IP_VALID(port)) {
2708 		ip_mq_lock(port);
2709 		task = ipc_kobject_get_locked(port, IKOT_TASK_RESUME);
2710 		if (task != TASK_NULL) {
2711 			task_reference_grp(task, grp);
2712 		}
2713 		ip_mq_unlock(port);
2714 	}
2715 
2716 	return task;
2717 }
2718 
2719 task_suspension_token_t
convert_port_to_task_suspension_token_external(ipc_port_t port)2720 convert_port_to_task_suspension_token_external(
2721 	ipc_port_t              port)
2722 {
2723 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_EXTERNAL);
2724 }
2725 
2726 task_suspension_token_t
convert_port_to_task_suspension_token_mig(ipc_port_t port)2727 convert_port_to_task_suspension_token_mig(
2728 	ipc_port_t              port)
2729 {
2730 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_MIG);
2731 }
2732 
2733 task_suspension_token_t
convert_port_to_task_suspension_token_kernel(ipc_port_t port)2734 convert_port_to_task_suspension_token_kernel(
2735 	ipc_port_t              port)
2736 {
2737 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_KERNEL);
2738 }
2739 
2740 /*
2741  *	Routine:	convert_port_to_space_with_flavor
2742  *	Purpose:
2743  *		Internal helper for converting from a port to a space.
2744  *		Doesn't consume the port ref; produces a space ref,
2745  *		which may be null.
2746  *	Args:
2747  *		port   - target port
2748  *		flavor - requested ipc space flavor
2749  *		options - port translation options
2750  *	Conditions:
2751  *		Nothing locked.
2752  */
2753 static ipc_space_t
convert_port_to_space_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2754 convert_port_to_space_with_flavor(
2755 	ipc_port_t         port,
2756 	mach_task_flavor_t flavor,
2757 	port_intrans_options_t options)
2758 {
2759 	ipc_space_t space = IPC_SPACE_NULL;
2760 	task_t task = TASK_NULL;
2761 
2762 	assert(flavor != TASK_FLAVOR_NAME);
2763 
2764 	if (IP_VALID(port)) {
2765 		ip_mq_lock(port);
2766 		if (ip_active(port)) {
2767 			task = convert_port_to_task_with_flavor_locked_noref(port,
2768 			    flavor, options);
2769 		}
2770 
2771 		/*
2772 		 * Because we hold the port lock and we could resolve a task,
2773 		 * even if we're racing with task termination, we know that
2774 		 * ipc_task_disable() hasn't been called yet.
2775 		 *
2776 		 * We try to sniff if `task->active` flipped to accelerate
2777 		 * resolving the race, but this isn't load bearing.
2778 		 *
2779 		 * The space will be torn down _after_ ipc_task_disable() returns,
2780 		 * so it is valid to take a reference on it now.
2781 		 */
2782 		if (task && task->active) {
2783 			space = task->itk_space;
2784 			is_reference(space);
2785 		}
2786 		ip_mq_unlock(port);
2787 	}
2788 
2789 	return space;
2790 }
2791 
2792 ipc_space_t
convert_port_to_space(ipc_port_t port)2793 convert_port_to_space(
2794 	ipc_port_t      port)
2795 {
2796 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL,
2797 	           PORT_INTRANS_OPTIONS_NONE);
2798 }
2799 
2800 ipc_space_read_t
convert_port_to_space_read(ipc_port_t port)2801 convert_port_to_space_read(
2802 	ipc_port_t      port)
2803 {
2804 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2805 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
2806 }
2807 
2808 ipc_space_read_t
convert_port_to_space_read_no_eval(ipc_port_t port)2809 convert_port_to_space_read_no_eval(
2810 	ipc_port_t      port)
2811 {
2812 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2813 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2814 }
2815 
2816 ipc_space_inspect_t
convert_port_to_space_inspect(ipc_port_t port)2817 convert_port_to_space_inspect(
2818 	ipc_port_t      port)
2819 {
2820 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT,
2821 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2822 }
2823 
2824 /*
2825  *	Routine:	convert_port_to_map_with_flavor
2826  *	Purpose:
2827  *		Internal helper for converting from a port to a map.
2828  *		Doesn't consume the port ref; produces a map ref,
2829  *		which may be null.
2830  *	Args:
2831  *		port   - target port
2832  *		flavor - requested vm map flavor
2833  *		options - port translation options
2834  *	Conditions:
2835  *		Nothing locked.
2836  */
2837 static vm_map_t
convert_port_to_map_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2838 convert_port_to_map_with_flavor(
2839 	ipc_port_t         port,
2840 	mach_task_flavor_t flavor,
2841 	port_intrans_options_t options)
2842 {
2843 	task_t task = TASK_NULL;
2844 	vm_map_t map = VM_MAP_NULL;
2845 
2846 	/* there is no vm_map_inspect_t routines at the moment. */
2847 	assert(flavor != TASK_FLAVOR_NAME && flavor != TASK_FLAVOR_INSPECT);
2848 	assert((options & PORT_INTRANS_SKIP_TASK_EVAL) == 0);
2849 
2850 	if (IP_VALID(port)) {
2851 		ip_mq_lock(port);
2852 
2853 		if (ip_active(port)) {
2854 			task = convert_port_to_task_with_flavor_locked_noref(port,
2855 			    flavor, options);
2856 		}
2857 
2858 		/*
2859 		 * Because we hold the port lock and we could resolve a task,
2860 		 * even if we're racing with task termination, we know that
2861 		 * ipc_task_disable() hasn't been called yet.
2862 		 *
2863 		 * We try to sniff if `task->active` flipped to accelerate
2864 		 * resolving the race, but this isn't load bearing.
2865 		 *
2866 		 * The vm map will be torn down _after_ ipc_task_disable() returns,
2867 		 * so it is valid to take a reference on it now.
2868 		 */
2869 		if (task && task->active) {
2870 			map = task->map;
2871 
2872 			if (map->pmap == kernel_pmap) {
2873 				panic("userspace has control access to a "
2874 				    "kernel map %p through task %p", map, task);
2875 			}
2876 
2877 			pmap_require(map->pmap);
2878 			vm_map_reference(map);
2879 		}
2880 
2881 		ip_mq_unlock(port);
2882 	}
2883 
2884 	return map;
2885 }
2886 
2887 vm_map_t
convert_port_to_map(ipc_port_t port)2888 convert_port_to_map(
2889 	ipc_port_t              port)
2890 {
2891 	return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL,
2892 	           PORT_INTRANS_OPTIONS_NONE);
2893 }
2894 
2895 vm_map_read_t
convert_port_to_map_read(ipc_port_t port)2896 convert_port_to_map_read(
2897 	ipc_port_t              port)
2898 {
2899 	return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ,
2900 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
2901 }
2902 
2903 vm_map_inspect_t
convert_port_to_map_inspect(__unused ipc_port_t port)2904 convert_port_to_map_inspect(
2905 	__unused ipc_port_t     port)
2906 {
2907 	/* there is no vm_map_inspect_t routines at the moment. */
2908 	return VM_MAP_INSPECT_NULL;
2909 }
2910 
2911 /*
2912  *	Routine:	thread_port_kotype_valid_for_flavor
2913  *	Purpose:
2914  *		Check whether the kobject type of a mach port
2915  *      is valid for conversion to a thread of given flavor.
2916  */
2917 static boolean_t
thread_port_kotype_valid_for_flavor(natural_t kotype,mach_thread_flavor_t flavor)2918 thread_port_kotype_valid_for_flavor(
2919 	natural_t            kotype,
2920 	mach_thread_flavor_t flavor)
2921 {
2922 	switch (flavor) {
2923 	/* Ascending capability */
2924 	case THREAD_FLAVOR_INSPECT:
2925 		if (kotype == IKOT_THREAD_INSPECT) {
2926 			return TRUE;
2927 		}
2928 		OS_FALLTHROUGH;
2929 	case THREAD_FLAVOR_READ:
2930 		if (kotype == IKOT_THREAD_READ) {
2931 			return TRUE;
2932 		}
2933 		OS_FALLTHROUGH;
2934 	case THREAD_FLAVOR_CONTROL:
2935 		if (kotype == IKOT_THREAD_CONTROL) {
2936 			return TRUE;
2937 		}
2938 		break;
2939 	default:
2940 		panic("strange thread flavor");
2941 	}
2942 
2943 	return FALSE;
2944 }
2945 
2946 /*
2947  *	Routine: convert_port_to_thread_with_flavor_locked
2948  *	Purpose:
2949  *		Internal helper routine to convert from a locked port to a thread.
2950  *	Args:
2951  *		port   - target port
2952  *		flavor - requested thread port flavor
2953  *		options - port translation options
2954  *	Conditions:
2955  *		Port is locked and active.
2956  *		Produces a thread ref or THREAD_NULL.
2957  */
2958 static thread_t
convert_port_to_thread_with_flavor_locked(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)2959 convert_port_to_thread_with_flavor_locked(
2960 	ipc_port_t               port,
2961 	mach_thread_flavor_t     flavor,
2962 	port_intrans_options_t   options)
2963 {
2964 	thread_t thread = THREAD_NULL;
2965 	task_t task;
2966 	ipc_kobject_type_t type = ip_type(port);
2967 
2968 	ip_mq_lock_held(port);
2969 	require_ip_active(port);
2970 
2971 	if (!thread_port_kotype_valid_for_flavor(type, flavor)) {
2972 		return THREAD_NULL;
2973 	}
2974 
2975 	thread = ipc_kobject_get_locked(port, type);
2976 
2977 	if (thread == THREAD_NULL) {
2978 		return THREAD_NULL;
2979 	}
2980 
2981 	if (options & PORT_INTRANS_THREAD_NOT_CURRENT_THREAD) {
2982 		if (thread == current_thread()) {
2983 			return THREAD_NULL;
2984 		}
2985 	}
2986 
2987 	task = get_threadtask(thread);
2988 
2989 	if (options & PORT_INTRANS_THREAD_IN_CURRENT_TASK) {
2990 		if (task != current_task()) {
2991 			return THREAD_NULL;
2992 		}
2993 	} else {
2994 		if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
2995 			assert(flavor == THREAD_FLAVOR_CONTROL);
2996 			return THREAD_NULL;
2997 		}
2998 		/* TODO: rdar://42389187 */
2999 		if (flavor == THREAD_FLAVOR_INSPECT) {
3000 			assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
3001 		}
3002 
3003 		if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
3004 		    task_conversion_eval(current_task(), task, flavor) != KERN_SUCCESS) {
3005 			return THREAD_NULL;
3006 		}
3007 	}
3008 
3009 	thread_reference(thread);
3010 	return thread;
3011 }
3012 
3013 /*
3014  *	Routine:	convert_port_to_thread_with_flavor
3015  *	Purpose:
3016  *		Internal helper for converting from a port to a thread.
3017  *		Doesn't consume the port ref; produces a thread ref,
3018  *		which may be null.
3019  *	Args:
3020  *		port   - target port
3021  *		flavor - requested thread port flavor
3022  *		options - port translation options
3023  *	Conditions:
3024  *		Nothing locked.
3025  */
3026 static thread_t
convert_port_to_thread_with_flavor(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3027 convert_port_to_thread_with_flavor(
3028 	ipc_port_t           port,
3029 	mach_thread_flavor_t flavor,
3030 	port_intrans_options_t options)
3031 {
3032 	thread_t thread = THREAD_NULL;
3033 
3034 	if (IP_VALID(port)) {
3035 		ip_mq_lock(port);
3036 		if (ip_active(port)) {
3037 			thread = convert_port_to_thread_with_flavor_locked(port,
3038 			    flavor, options);
3039 		}
3040 		ip_mq_unlock(port);
3041 	}
3042 
3043 	return thread;
3044 }
3045 
3046 thread_t
convert_port_to_thread(ipc_port_t port)3047 convert_port_to_thread(
3048 	ipc_port_t              port)
3049 {
3050 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_CONTROL,
3051 	           PORT_INTRANS_OPTIONS_NONE);
3052 }
3053 
3054 thread_read_t
convert_port_to_thread_read(ipc_port_t port)3055 convert_port_to_thread_read(
3056 	ipc_port_t              port)
3057 {
3058 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3059 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
3060 }
3061 
3062 static thread_read_t
convert_port_to_thread_read_no_eval(ipc_port_t port)3063 convert_port_to_thread_read_no_eval(
3064 	ipc_port_t              port)
3065 {
3066 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3067 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3068 }
3069 
3070 thread_inspect_t
convert_port_to_thread_inspect(ipc_port_t port)3071 convert_port_to_thread_inspect(
3072 	ipc_port_t              port)
3073 {
3074 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3075 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3076 }
3077 
3078 static thread_inspect_t
convert_port_to_thread_inspect_no_eval(ipc_port_t port)3079 convert_port_to_thread_inspect_no_eval(
3080 	ipc_port_t              port)
3081 {
3082 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3083 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3084 }
3085 
3086 static inline ipc_kobject_type_t
thread_flavor_to_kotype(mach_thread_flavor_t flavor)3087 thread_flavor_to_kotype(mach_thread_flavor_t flavor)
3088 {
3089 	switch (flavor) {
3090 	case THREAD_FLAVOR_CONTROL:
3091 		return IKOT_THREAD_CONTROL;
3092 	case THREAD_FLAVOR_READ:
3093 		return IKOT_THREAD_READ;
3094 	default:
3095 		return IKOT_THREAD_INSPECT;
3096 	}
3097 }
3098 
3099 
3100 ipc_port_t
convert_thread_to_port_immovable(thread_t thread)3101 convert_thread_to_port_immovable(
3102 	thread_t                thread)
3103 {
3104 	thread_ro_t tro = get_thread_ro(thread);
3105 	ipc_port_t  port = IP_NULL;
3106 
3107 	thread_mtx_lock(thread);
3108 
3109 	if (thread->ipc_active) {
3110 		port = ipc_kobject_make_send(tro->tro_ports[THREAD_FLAVOR_CONTROL],
3111 		    thread, IKOT_THREAD_CONTROL);
3112 	}
3113 
3114 	thread_mtx_unlock(thread);
3115 	thread_deallocate(thread);
3116 	return port;
3117 }
3118 
3119 /*
3120  *	Routine:	convert_thread_to_port_with_flavor
3121  *	Purpose:
3122  *		Convert from a thread to a port of given flavor.
3123  *		Consumes a thread ref; produces a naked send right
3124  *		which may be invalid.
3125  *	Conditions:
3126  *		Nothing locked.
3127  */
3128 static ipc_port_t
convert_thread_to_port_with_flavor(thread_t thread,thread_ro_t tro,mach_thread_flavor_t flavor)3129 convert_thread_to_port_with_flavor(
3130 	thread_t              thread,
3131 	thread_ro_t           tro,
3132 	mach_thread_flavor_t  flavor)
3133 {
3134 	ipc_kobject_type_t kotype = thread_flavor_to_kotype(flavor);
3135 	ipc_port_t port = IP_NULL;
3136 
3137 	thread_mtx_lock(thread);
3138 
3139 	/*
3140 	 * out-trans of weaker flavors are still permitted, but in-trans
3141 	 * is separately enforced.
3142 	 */
3143 	if (flavor == THREAD_FLAVOR_CONTROL &&
3144 	    task_conversion_eval_out_trans(current_task(), tro->tro_task, flavor)) {
3145 		/* denied by security policy, make the port appear dead */
3146 		port = IP_DEAD;
3147 		goto exit;
3148 	}
3149 
3150 	if (!thread->ipc_active) {
3151 		goto exit;
3152 	}
3153 
3154 	port = tro->tro_ports[flavor];
3155 	if (flavor == THREAD_FLAVOR_CONTROL) {
3156 		port = ipc_kobject_make_send(port, thread, IKOT_THREAD_CONTROL);
3157 	} else if (IP_VALID(port)) {
3158 		(void)ipc_kobject_make_send(port, thread, kotype);
3159 	} else {
3160 		ipc_object_label_t label = IPC_OBJECT_LABEL(kotype);
3161 
3162 		/*
3163 		 * If Developer Mode is off, substitute read port for control
3164 		 * port if copying out to owning task's space, for the sake of
3165 		 * in-process exception handler.
3166 		 *
3167 		 * Also see: exception_deliver().
3168 		 */
3169 		if (!developer_mode_state() && flavor == THREAD_FLAVOR_READ) {
3170 			label = ipc_kobject_label_alloc(kotype,
3171 			    IPC_LABEL_SUBST_THREAD_READ, tro->tro_ports[THREAD_FLAVOR_CONTROL]);
3172 		}
3173 
3174 		/*
3175 		 * Claim a send right on the thread read/inspect port, and request a no-senders
3176 		 * notification on that port (if none outstanding). A thread reference is not
3177 		 * donated here even though the ports are created lazily because it doesn't own the
3178 		 * kobject that it points to. Threads manage their lifetime explicitly and
3179 		 * have to synchronize with each other, between the task/thread terminating and the
3180 		 * send-once notification firing, and this is done under the thread mutex
3181 		 * rather than with atomics.
3182 		 */
3183 		port = ipc_kobject_alloc_port(thread, label,
3184 		    IPC_KOBJECT_ALLOC_MAKE_SEND);
3185 
3186 		zalloc_ro_update_field(ZONE_ID_THREAD_RO,
3187 		    tro, tro_ports[flavor], &port);
3188 	}
3189 
3190 exit:
3191 	thread_mtx_unlock(thread);
3192 	thread_deallocate(thread);
3193 	return port;
3194 }
3195 
3196 ipc_port_t
convert_thread_to_port(thread_t thread)3197 convert_thread_to_port(
3198 	thread_t                thread)
3199 {
3200 	thread_ro_t tro = get_thread_ro(thread);
3201 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_CONTROL);
3202 }
3203 
3204 ipc_port_t
convert_thread_read_to_port(thread_read_t thread)3205 convert_thread_read_to_port(thread_read_t thread)
3206 {
3207 	thread_ro_t tro = get_thread_ro(thread);
3208 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_READ);
3209 }
3210 
3211 ipc_port_t
convert_thread_inspect_to_port(thread_inspect_t thread)3212 convert_thread_inspect_to_port(thread_inspect_t thread)
3213 {
3214 	thread_ro_t tro = get_thread_ro(thread);
3215 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_INSPECT);
3216 }
3217 
3218 void
convert_thread_array_to_ports(thread_act_array_t array,size_t count,mach_thread_flavor_t flavor)3219 convert_thread_array_to_ports(
3220 	thread_act_array_t      array,
3221 	size_t                  count,
3222 	mach_thread_flavor_t    flavor)
3223 {
3224 	thread_t *thread_list = (thread_t *)array;
3225 
3226 	for (size_t i = 0; i < count; i++) {
3227 		thread_t   thread = thread_list[i];
3228 		ipc_port_t port;
3229 
3230 		switch (flavor) {
3231 		case THREAD_FLAVOR_CONTROL:
3232 			port = convert_thread_to_port(thread);
3233 			break;
3234 		case THREAD_FLAVOR_READ:
3235 			port = convert_thread_read_to_port(thread);
3236 			break;
3237 		case THREAD_FLAVOR_INSPECT:
3238 			port = convert_thread_inspect_to_port(thread);
3239 			break;
3240 		}
3241 
3242 		array[i].port = port;
3243 	}
3244 }
3245 
3246 
3247 /*
3248  *	Routine:	port_name_to_thread
3249  *	Purpose:
3250  *		Convert from a port name to a thread reference
3251  *		A name of MACH_PORT_NULL is valid for the null thread.
3252  *	Conditions:
3253  *		Nothing locked.
3254  */
3255 thread_t
port_name_to_thread(mach_port_name_t name,port_intrans_options_t options)3256 port_name_to_thread(
3257 	mach_port_name_t         name,
3258 	port_intrans_options_t options)
3259 {
3260 	thread_t        thread = THREAD_NULL;
3261 	ipc_port_t      kport;
3262 	kern_return_t kr;
3263 
3264 	if (MACH_PORT_VALID(name)) {
3265 		kr = ipc_port_translate_send(current_space(), name, &kport);
3266 		if (kr == KERN_SUCCESS) {
3267 			/* port is locked and active */
3268 			assert(!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) &&
3269 			    !(options & PORT_INTRANS_SKIP_TASK_EVAL));
3270 			thread = convert_port_to_thread_with_flavor_locked(kport,
3271 			    THREAD_FLAVOR_CONTROL, options);
3272 			ip_mq_unlock(kport);
3273 		}
3274 	}
3275 
3276 	return thread;
3277 }
3278 
3279 /*
3280  *	Routine:	port_name_is_pinned_self
3281  *	Purpose:
3282  *		Returns whether this port name is for the pinned
3283  *		mach_task_self (if it exists).
3284  *
3285  *		task_self_trap() will memorize the name the port has
3286  *		in the space in ip_receiver_name when it gets pinned,
3287  *		which we can use to fast-track this answer without
3288  *		taking any lock.
3289  *
3290  *		ipc_task_disable() will set `ip_receiver_name` back to
3291  *		MACH_PORT_SPECIAL_DEFAULT.
3292  *
3293  *	Conditions:
3294  *		self must be current_task()
3295  *		Nothing locked.
3296  */
3297 static bool
port_name_is_pinned_self(task_t self,mach_port_name_t name)3298 port_name_is_pinned_self(
3299 	task_t             self,
3300 	mach_port_name_t   name)
3301 {
3302 	ipc_port_t kport = self->itk_task_ports[TASK_FLAVOR_CONTROL];
3303 	return MACH_PORT_VALID(name) && name != MACH_PORT_SPECIAL_DEFAULT &&
3304 	       ip_get_receiver_name(kport) == name;
3305 }
3306 
3307 /*
3308  *	Routine:	port_name_to_current_task*_noref
3309  *	Purpose:
3310  *		Convert from a port name to current_task()
3311  *		A name of MACH_PORT_NULL is valid for the null task.
3312  *
3313  *		If current_task() is in the process of being terminated,
3314  *		this might return a non NULL task even when port_name_to_task()
3315  *		would.
3316  *
3317  *		However, this is an acceptable race that can't be controlled by
3318  *		userspace, and that downstream code using the returned task
3319  *		has to handle anyway.
3320  *
3321  *		ipc_space_disable() does try to narrow this race,
3322  *		by causing port_name_is_pinned_self() to fail.
3323  *
3324  *	Returns:
3325  *		current_task() if the port name was for current_task()
3326  *		at the appropriate flavor.
3327  *
3328  *		TASK_NULL otherwise.
3329  *
3330  *	Conditions:
3331  *		Nothing locked.
3332  */
3333 static task_t
port_name_to_current_task_internal_noref(mach_port_name_t name,mach_task_flavor_t flavor)3334 port_name_to_current_task_internal_noref(
3335 	mach_port_name_t   name,
3336 	mach_task_flavor_t flavor)
3337 {
3338 	ipc_port_t kport;
3339 	kern_return_t kr;
3340 	task_t task = TASK_NULL;
3341 	task_t self = current_task();
3342 
3343 	if (port_name_is_pinned_self(self, name)) {
3344 		return self;
3345 	}
3346 
3347 	if (MACH_PORT_VALID(name)) {
3348 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3349 		if (kr == KERN_SUCCESS) {
3350 			ipc_kobject_type_t type = ip_type(kport);
3351 			if (task_port_kotype_valid_for_flavor(type, flavor)) {
3352 				task = ipc_kobject_get_locked(kport, type);
3353 			}
3354 			ip_mq_unlock(kport);
3355 			if (task != self) {
3356 				task = TASK_NULL;
3357 			}
3358 		}
3359 	}
3360 
3361 	return task;
3362 }
3363 
3364 task_t
port_name_to_current_task_noref(mach_port_name_t name)3365 port_name_to_current_task_noref(
3366 	mach_port_name_t name)
3367 {
3368 	return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_CONTROL);
3369 }
3370 
3371 task_read_t
port_name_to_current_task_read_noref(mach_port_name_t name)3372 port_name_to_current_task_read_noref(
3373 	mach_port_name_t name)
3374 {
3375 	return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_READ);
3376 }
3377 
3378 /*
3379  *	Routine:	port_name_to_task_grp
3380  *	Purpose:
3381  *		Convert from a port name to a task reference
3382  *		A name of MACH_PORT_NULL is valid for the null task.
3383  *		Acquire a send right if [inout] @kportp is non-null.
3384  *	Conditions:
3385  *		Nothing locked.
3386  */
3387 static task_t
port_name_to_task_grp(mach_port_name_t name,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp,ipc_port_t * kportp)3388 port_name_to_task_grp(
3389 	mach_port_name_t name,
3390 	mach_task_flavor_t flavor,
3391 	port_intrans_options_t options,
3392 	task_grp_t       grp,
3393 	ipc_port_t       *kportp)
3394 {
3395 	ipc_port_t kport;
3396 	kern_return_t kr;
3397 	task_t task = TASK_NULL;
3398 	task_t self = current_task();
3399 
3400 	if (!kportp && port_name_is_pinned_self(self, name)) {
3401 		task_reference_grp(self, grp);
3402 		return self;
3403 	}
3404 
3405 	if (MACH_PORT_VALID(name)) {
3406 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3407 		if (kr == KERN_SUCCESS) {
3408 			/* port is locked and active */
3409 			task = convert_port_to_task_with_flavor_locked(kport,
3410 			    flavor, options, grp);
3411 			if (kportp) {
3412 				/* send right requested */
3413 				ipc_port_copy_send_any_locked(kport);
3414 				*kportp = kport;
3415 			}
3416 			ip_mq_unlock(kport);
3417 		}
3418 	}
3419 	return task;
3420 }
3421 
3422 task_t
port_name_to_task_external(mach_port_name_t name)3423 port_name_to_task_external(
3424 	mach_port_name_t name)
3425 {
3426 	return port_name_to_task_grp(name, TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE, TASK_GRP_EXTERNAL, NULL);
3427 }
3428 
3429 task_t
port_name_to_task_kernel(mach_port_name_t name)3430 port_name_to_task_kernel(
3431 	mach_port_name_t name)
3432 {
3433 	return port_name_to_task_grp(name, TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL, NULL);
3434 }
3435 
3436 /*
3437  *	Routine:	port_name_to_task_read
3438  *	Purpose:
3439  *		Convert from a port name to a task reference
3440  *		A name of MACH_PORT_NULL is valid for the null task.
3441  *	Conditions:
3442  *		Nothing locked.
3443  */
3444 task_read_t
port_name_to_task_read(mach_port_name_t name)3445 port_name_to_task_read(
3446 	mach_port_name_t name)
3447 {
3448 	return port_name_to_task_grp(name, TASK_FLAVOR_READ, PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL, NULL);
3449 }
3450 
3451 /*
3452  *	Routine:	port_name_to_task_read_and_send_right
3453  *	Purpose:
3454  *		Convert from a port name to a task reference
3455  *		A name of MACH_PORT_NULL is valid for the null task.
3456  *	Conditions:
3457  *		On success, ipc port returned with a +1 send right.
3458  */
3459 task_read_t
port_name_to_task_read_and_send_right(mach_port_name_t name,ipc_port_t * kportp)3460 port_name_to_task_read_and_send_right(
3461 	mach_port_name_t name,
3462 	ipc_port_t *kportp)
3463 {
3464 	return port_name_to_task_grp(name, TASK_FLAVOR_READ, PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL, kportp);
3465 }
3466 
3467 /*
3468  *	Routine:	port_name_to_task_read_no_eval
3469  *	Purpose:
3470  *		Convert from a port name to a task reference
3471  *		A name of MACH_PORT_NULL is valid for the null task.
3472  *		Skips task_conversion_eval() during conversion.
3473  *	Conditions:
3474  *		Nothing locked.
3475  */
3476 task_read_t
port_name_to_task_read_no_eval(mach_port_name_t name)3477 port_name_to_task_read_no_eval(
3478 	mach_port_name_t name)
3479 {
3480 	port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3481 	    PORT_INTRANS_ALLOW_CORPSE_TASK;
3482 	return port_name_to_task_grp(name, TASK_FLAVOR_READ, options, TASK_GRP_KERNEL, NULL);
3483 }
3484 
3485 /*
3486  *	Routine:	port_name_to_task_name
3487  *	Purpose:
3488  *		Convert from a port name to a task reference
3489  *		A name of MACH_PORT_NULL is valid for the null task.
3490  *	Conditions:
3491  *		Nothing locked.
3492  */
3493 task_name_t
port_name_to_task_name(mach_port_name_t name)3494 port_name_to_task_name(
3495 	mach_port_name_t name)
3496 {
3497 	port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3498 	    PORT_INTRANS_ALLOW_CORPSE_TASK;
3499 	return port_name_to_task_grp(name, TASK_FLAVOR_NAME, options, TASK_GRP_KERNEL, NULL);
3500 }
3501 
3502 /*
3503  *	Routine:	port_name_to_task_id_token
3504  *	Purpose:
3505  *		Convert from a port name to a task identity token reference
3506  *	Conditions:
3507  *		Nothing locked.
3508  */
3509 task_id_token_t
port_name_to_task_id_token(mach_port_name_t name)3510 port_name_to_task_id_token(
3511 	mach_port_name_t name)
3512 {
3513 	ipc_port_t port;
3514 	kern_return_t kr;
3515 	task_id_token_t token = TASK_ID_TOKEN_NULL;
3516 
3517 	if (MACH_PORT_VALID(name)) {
3518 		kr = ipc_port_translate_send(current_space(), name, &port);
3519 		if (kr == KERN_SUCCESS) {
3520 			token = convert_port_to_task_id_token(port);
3521 			ip_mq_unlock(port);
3522 		}
3523 	}
3524 	return token;
3525 }
3526 
3527 /*
3528  *	Routine:	port_name_to_host
3529  *	Purpose:
3530  *		Convert from a port name to a host pointer.
3531  *		NOTE: This does _not_ return a +1 reference to the host_t
3532  *	Conditions:
3533  *		Nothing locked.
3534  */
3535 host_t
port_name_to_host(mach_port_name_t name)3536 port_name_to_host(
3537 	mach_port_name_t name)
3538 {
3539 	host_t host = HOST_NULL;
3540 	kern_return_t kr;
3541 	ipc_port_t port;
3542 
3543 	if (MACH_PORT_VALID(name)) {
3544 		kr = ipc_port_translate_send(current_space(), name, &port);
3545 		if (kr == KERN_SUCCESS) {
3546 			host = convert_port_to_host(port);
3547 			ip_mq_unlock(port);
3548 		}
3549 	}
3550 	return host;
3551 }
3552 
3553 static inline ipc_kobject_type_t
task_flavor_to_kotype(mach_task_flavor_t flavor)3554 task_flavor_to_kotype(mach_task_flavor_t flavor)
3555 {
3556 	switch (flavor) {
3557 	case TASK_FLAVOR_CONTROL:
3558 		return IKOT_TASK_CONTROL;
3559 	case TASK_FLAVOR_READ:
3560 		return IKOT_TASK_READ;
3561 	case TASK_FLAVOR_INSPECT:
3562 		return IKOT_TASK_INSPECT;
3563 	default:
3564 		return IKOT_TASK_NAME;
3565 	}
3566 }
3567 
3568 /*
3569  *	Routine:	convert_task_to_port_with_flavor
3570  *	Purpose:
3571  *		Convert from a task to a port of given flavor.
3572  *		Consumes a task ref; produces a naked send right
3573  *		which may be invalid.
3574  *	Conditions:
3575  *		Nothing locked.
3576  */
3577 ipc_port_t
convert_task_to_port_with_flavor(task_t task,mach_task_flavor_t flavor,task_grp_t grp)3578 convert_task_to_port_with_flavor(
3579 	task_t              task,
3580 	mach_task_flavor_t  flavor,
3581 	task_grp_t          grp)
3582 {
3583 	ipc_kobject_type_t kotype = task_flavor_to_kotype(flavor);
3584 	ipc_port_t port = IP_NULL;
3585 
3586 	itk_lock(task);
3587 
3588 	if (!task->ipc_active) {
3589 		goto exit;
3590 	}
3591 
3592 	/*
3593 	 * out-trans of weaker flavors are still permitted, but in-trans
3594 	 * is separately enforced.
3595 	 */
3596 	if (flavor == TASK_FLAVOR_CONTROL &&
3597 	    task_conversion_eval_out_trans(current_task(), task, flavor)) {
3598 		/* denied by security policy, make the port appear dead */
3599 		port = IP_DEAD;
3600 		goto exit;
3601 	}
3602 
3603 	switch (flavor) {
3604 	case TASK_FLAVOR_CONTROL:
3605 	case TASK_FLAVOR_NAME:
3606 		port = ipc_kobject_make_send(task->itk_task_ports[flavor],
3607 		    task, kotype);
3608 		break;
3609 	/*
3610 	 * Claim a send right on the task read/inspect port,
3611 	 * and request a no-senders notification on that port
3612 	 * (if none outstanding).
3613 	 *
3614 	 * The task's itk_lock is used to synchronize the handling
3615 	 * of the no-senders notification with the task termination.
3616 	 */
3617 	case TASK_FLAVOR_READ:
3618 	case TASK_FLAVOR_INSPECT:
3619 		port = task->itk_task_ports[flavor];
3620 		if (IP_VALID(port)) {
3621 			(void)ipc_kobject_make_send(port, task, kotype);
3622 		} else {
3623 			ipc_object_label_t label = IPC_OBJECT_LABEL(kotype);
3624 
3625 			/*
3626 			 * If Developer Mode is off, substitute read port for control port if
3627 			 * copying out to owning task's space, for the sake of in-process
3628 			 * exception handler.
3629 			 *
3630 			 * Also see: exception_deliver().
3631 			 */
3632 			if (!developer_mode_state() && flavor == TASK_FLAVOR_READ) {
3633 				label = ipc_kobject_label_alloc(kotype,
3634 				    IPC_LABEL_SUBST_TASK_READ, task->itk_task_ports[TASK_FLAVOR_CONTROL]);
3635 			}
3636 
3637 			port = ipc_kobject_alloc_port(task, label,
3638 			    IPC_KOBJECT_ALLOC_MAKE_SEND);
3639 			task->itk_task_ports[flavor] = port;
3640 		}
3641 		break;
3642 	}
3643 
3644 exit:
3645 	itk_unlock(task);
3646 	task_deallocate_grp(task, grp);
3647 	return port;
3648 }
3649 
3650 ipc_port_t
convert_corpse_to_port_and_nsrequest(task_t corpse)3651 convert_corpse_to_port_and_nsrequest(
3652 	task_t          corpse)
3653 {
3654 	ipc_port_t port = IP_NULL;
3655 
3656 	assert(task_is_a_corpse(corpse));
3657 	itk_lock(corpse);
3658 	port = corpse->itk_task_ports[TASK_FLAVOR_CONTROL];
3659 	port = ipc_kobject_make_send(port, corpse, IKOT_TASK_CONTROL);
3660 	itk_unlock(corpse);
3661 
3662 	task_deallocate(corpse);
3663 	return port;
3664 }
3665 
3666 ipc_port_t
convert_task_to_port(task_t task)3667 convert_task_to_port(
3668 	task_t          task)
3669 {
3670 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_KERNEL);
3671 }
3672 
3673 ipc_port_t
convert_task_read_to_port(task_read_t task)3674 convert_task_read_to_port(
3675 	task_read_t          task)
3676 {
3677 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_KERNEL);
3678 }
3679 
3680 ipc_port_t
convert_task_inspect_to_port(task_inspect_t task)3681 convert_task_inspect_to_port(
3682 	task_inspect_t          task)
3683 {
3684 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT, TASK_GRP_KERNEL);
3685 }
3686 
3687 ipc_port_t
convert_task_name_to_port(task_name_t task)3688 convert_task_name_to_port(
3689 	task_name_t             task)
3690 {
3691 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME, TASK_GRP_KERNEL);
3692 }
3693 
3694 ipc_port_t
convert_task_to_port_external(task_t task)3695 convert_task_to_port_external(task_t task)
3696 {
3697 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_EXTERNAL);
3698 }
3699 
3700 ipc_port_t
convert_task_read_to_port_external(task_t task)3701 convert_task_read_to_port_external(task_t task)
3702 {
3703 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_EXTERNAL);
3704 }
3705 
3706 void
convert_task_array_to_ports(task_array_t array,size_t count,mach_task_flavor_t flavor)3707 convert_task_array_to_ports(
3708 	task_array_t            array,
3709 	size_t                  count,
3710 	mach_task_flavor_t      flavor)
3711 {
3712 	task_t *task_list = (task_t *)array;
3713 
3714 	for (size_t i = 0; i < count; i++) {
3715 		task_t     task = task_list[i];
3716 		ipc_port_t port;
3717 
3718 		switch (flavor) {
3719 		case TASK_FLAVOR_CONTROL:
3720 			/* copyout determines immovability, see `should_mark_immovable_send` */
3721 			port = convert_task_to_port(task);
3722 			break;
3723 		case TASK_FLAVOR_READ:
3724 			port = convert_task_read_to_port(task);
3725 			break;
3726 		case TASK_FLAVOR_INSPECT:
3727 			port = convert_task_inspect_to_port(task);
3728 			break;
3729 		case TASK_FLAVOR_NAME:
3730 			port = convert_task_name_to_port(task);
3731 			break;
3732 		}
3733 
3734 		array[i].port = port;
3735 	}
3736 }
3737 
3738 /*
3739  *	Routine:	convert_task_suspend_token_to_port
3740  *	Purpose:
3741  *		Convert from a task suspension token to a port.
3742  *		Consumes a task suspension token ref; produces a naked send-once right
3743  *		which may be invalid.
3744  *	Conditions:
3745  *		Nothing locked.
3746  */
3747 static ipc_port_t
convert_task_suspension_token_to_port_grp(task_suspension_token_t task,task_grp_t grp)3748 convert_task_suspension_token_to_port_grp(
3749 	task_suspension_token_t         task,
3750 	task_grp_t                      grp)
3751 {
3752 	ipc_port_t port;
3753 
3754 	task_lock(task);
3755 	if (task->active) {
3756 		itk_lock(task);
3757 		if (task->itk_resume == IP_NULL) {
3758 			task->itk_resume = ipc_kobject_alloc_port((ipc_kobject_t) task,
3759 			    IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE);
3760 		}
3761 
3762 		/*
3763 		 * Create a send-once right for each instance of a direct user-called
3764 		 * task_suspend2 call. Each time one of these send-once rights is abandoned,
3765 		 * the notification handler will resume the target task.
3766 		 */
3767 		port = task->itk_resume;
3768 		ipc_kobject_require(port, task, IKOT_TASK_RESUME);
3769 		port = ipc_port_make_sonce(port);
3770 		itk_unlock(task);
3771 		assert(IP_VALID(port));
3772 	} else {
3773 		port = IP_NULL;
3774 	}
3775 
3776 	task_unlock(task);
3777 	task_suspension_token_deallocate_grp(task, grp);
3778 
3779 	return port;
3780 }
3781 
3782 ipc_port_t
convert_task_suspension_token_to_port_external(task_suspension_token_t task)3783 convert_task_suspension_token_to_port_external(
3784 	task_suspension_token_t         task)
3785 {
3786 	return convert_task_suspension_token_to_port_grp(task, TASK_GRP_EXTERNAL);
3787 }
3788 
3789 ipc_port_t
convert_task_suspension_token_to_port_mig(task_suspension_token_t task)3790 convert_task_suspension_token_to_port_mig(
3791 	task_suspension_token_t         task)
3792 {
3793 	return convert_task_suspension_token_to_port_grp(task, TASK_GRP_MIG);
3794 }
3795 
3796 /*
3797  *	Routine:	space_deallocate
3798  *	Purpose:
3799  *		Deallocate a space ref produced by convert_port_to_space.
3800  *	Conditions:
3801  *		Nothing locked.
3802  */
3803 
3804 void
space_deallocate(ipc_space_t space)3805 space_deallocate(
3806 	ipc_space_t     space)
3807 {
3808 	if (space != IS_NULL) {
3809 		is_release(space);
3810 	}
3811 }
3812 
3813 /*
3814  *	Routine:	space_read_deallocate
3815  *	Purpose:
3816  *		Deallocate a space read ref produced by convert_port_to_space_read.
3817  *	Conditions:
3818  *		Nothing locked.
3819  */
3820 
3821 void
space_read_deallocate(ipc_space_read_t space)3822 space_read_deallocate(
3823 	ipc_space_read_t     space)
3824 {
3825 	if (space != IS_INSPECT_NULL) {
3826 		is_release((ipc_space_t)space);
3827 	}
3828 }
3829 
3830 /*
3831  *	Routine:	space_inspect_deallocate
3832  *	Purpose:
3833  *		Deallocate a space inspect ref produced by convert_port_to_space_inspect.
3834  *	Conditions:
3835  *		Nothing locked.
3836  */
3837 
3838 void
space_inspect_deallocate(ipc_space_inspect_t space)3839 space_inspect_deallocate(
3840 	ipc_space_inspect_t     space)
3841 {
3842 	if (space != IS_INSPECT_NULL) {
3843 		is_release((ipc_space_t)space);
3844 	}
3845 }
3846 
3847 
3848 static boolean_t
behavior_is_identity_protected(int new_behavior)3849 behavior_is_identity_protected(int new_behavior)
3850 {
3851 	return ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED) ||
3852 	       ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_STATE) ||
3853 	       ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_STATE_IDENTITY_PROTECTED);
3854 }
3855 
3856 static void
send_set_exception_telemetry(const task_t excepting_task,const exception_mask_t mask)3857 send_set_exception_telemetry(const task_t excepting_task, const exception_mask_t mask)
3858 {
3859 	ca_event_t ca_event = CA_EVENT_ALLOCATE(set_exception);
3860 	CA_EVENT_TYPE(set_exception) * event = ca_event->data;
3861 
3862 	task_procname(current_task(), (char *) &event->current_proc, sizeof(event->current_proc));
3863 	task_procname(excepting_task, (char *) &event->thread_proc, sizeof(event->thread_proc));
3864 	event->mask = mask;
3865 
3866 	CA_EVENT_SEND(ca_event);
3867 }
3868 
3869 /* Returns whether the violation should be ignored */
3870 static boolean_t
set_exception_behavior_violation(const task_t excepting_task,const exception_mask_t mask,int new_behavior)3871 set_exception_behavior_violation(const task_t excepting_task, const exception_mask_t mask, int new_behavior)
3872 {
3873 	if (thid_should_crash) {
3874 		/* create lightweight corpse */
3875 		mach_port_guard_exception(new_behavior, mask, kGUARD_EXC_EXCEPTION_BEHAVIOR_ENFORCE);
3876 	}
3877 
3878 	/* always report the proc name to CA */
3879 	send_set_exception_telemetry(excepting_task, mask);
3880 
3881 	/* if the bootarg has been manually set to false, ignore the violation */
3882 	return !thid_should_crash;
3883 }
3884 
3885 static bool
exception_exposes_protected_ports(const ipc_port_t new_port)3886 exception_exposes_protected_ports(const ipc_port_t new_port)
3887 {
3888 	/*
3889 	 * sending exceptions to invalid port does not pose risk
3890 	 * ux_handler port is an immovable, read-only kobject port; doesn't need protection.
3891 	 */
3892 	return IP_VALID(new_port) && !is_ux_handler_port(new_port);
3893 }
3894 
3895 static bool
exception_ports_frozen(task_t excepting_task)3896 exception_ports_frozen(task_t excepting_task)
3897 {
3898 	return excepting_task &&
3899 	       (task_ro_flags_get(excepting_task) & TFRO_FREEZE_EXCEPTION_PORTS);
3900 }
3901 
3902 #if XNU_TARGET_OS_OSX && CONFIG_CSR
3903 static bool
SIP_is_enabled()3904 SIP_is_enabled()
3905 {
3906 	return csr_check(CSR_ALLOW_UNRESTRICTED_FS) != 0;
3907 }
3908 #endif /* XNU_TARGET_OS_OSX && CONFIG_CSR*/
3909 
3910 static bool
exception_is_identity_protected(const ipc_port_t new_port,int new_behavior,const task_t excepting_task,const exception_mask_t mask)3911 exception_is_identity_protected(const ipc_port_t new_port, int new_behavior,
3912     const task_t excepting_task, const exception_mask_t mask)
3913 {
3914 	ipc_space_policy_t policy = {};
3915 
3916 	/* excepting_task is NULL if we are setting a host exception port. */
3917 	if (excepting_task) {
3918 		policy = ipc_policy_for_task(excepting_task);
3919 	}
3920 
3921 	if (exception_exposes_protected_ports(new_port)
3922 	    && (!excepting_task || ipc_should_apply_policy(policy, IPC_POLICY_ENHANCED_V1))
3923 	    && !behavior_is_identity_protected(new_behavior)
3924 #if CONFIG_CSR
3925 	    && SIP_is_enabled()     /* cannot enforce if SIP is disabled */
3926 #endif /* CONFIG_CSR */
3927 #if CONFIG_ROSETTA
3928 	    && !task_is_translated(current_task())
3929 #endif /* CONFIG_ROSETTA */
3930 	    && !proc_is_simulated(current_proc())
3931 	    ) {
3932 		return set_exception_behavior_violation(excepting_task, mask, new_behavior);
3933 	}
3934 
3935 	return true;
3936 }
3937 
3938 static boolean_t
set_exception_behavior_allowed(const ipc_port_t new_port,int new_behavior,const task_t excepting_task,const exception_mask_t mask,const bool hardened_exception)3939 set_exception_behavior_allowed(const ipc_port_t new_port, int new_behavior,
3940     const task_t excepting_task, const exception_mask_t mask, const bool hardened_exception)
3941 {
3942 	const char *excepting_task_name = "";
3943 	const char *cur_task_name = "";
3944 
3945 	if (excepting_task) {
3946 		excepting_task_name = task_best_name(excepting_task);
3947 	}
3948 	if (current_task()) {
3949 		cur_task_name = task_best_name(current_task());
3950 	}
3951 
3952 	/* Allow debuggers, tests, and tooling to set exception ports however they wish */
3953 	if (IOCurrentTaskHasEntitlement(SET_EXCEPTION_ENTITLEMENT)) {
3954 		kprintf("Allowing set_exception_ports from [%s] on [%s] for "
3955 		    "entitled process/debugger\n", cur_task_name, excepting_task_name);
3956 		return true;
3957 	}
3958 
3959 	/* excepting_task can be NULL if setting the host port */
3960 	if (excepting_task) {
3961 		/*
3962 		 * Only allow hardened set_exception_port calls on hardened tasks
3963 		 * that opt in via entitlement
3964 		 */
3965 		ipc_space_policy_t pol = ipc_policy_for_task(excepting_task);
3966 		bool only_one_exception_port =
3967 		    IOTaskHasEntitlement(excepting_task, IPC_ONLY_ONE_EXCEPTION_PORT)
3968 		    && ipc_should_apply_policy(pol, IPC_SPACE_POLICY_ENHANCED_V1);
3969 
3970 		if (!hardened_exception && only_one_exception_port) {
3971 			kprintf("Disallowing set_exception_ports from [%s] on [%s] due "
3972 			    "to only_one_exception_port policy\n", cur_task_name, excepting_task_name);
3973 			return set_exception_behavior_violation(excepting_task, mask, new_behavior);
3974 		}
3975 	}
3976 
3977 	/* Everyone else follows the standard policy and must use identity protected exceptions */
3978 	return exception_is_identity_protected(new_port, new_behavior, excepting_task, mask);
3979 }
3980 
3981 /*
3982  *	Routine: set_exception_ports_validation
3983  *	Purpose:
3984  *		Common argument validation shared between all exception port
3985  *		setting/swapping routines
3986  *	Conditions:
3987  *		Nothing locked.
3988  *	Returns:
3989  *		KERN_SUCCESS            Setting the exception port is allowed
3990  *		                        with these arguments
3991  *		KERN_INVALID_ARGUMENT   Invalid arguments
3992  *		KERN_INVALID_RIGHT      Incorrect port configuration
3993  *		KERN_DENIED             Denied by security policy
3994  */
3995 kern_return_t
set_exception_ports_validation(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,bool hardened_exception)3996 set_exception_ports_validation(
3997 	task_t                  task,
3998 	exception_mask_t        exception_mask,
3999 	ipc_port_t              new_port,
4000 	exception_behavior_t    new_behavior,
4001 	thread_state_flavor_t   new_flavor,
4002 	bool                    hardened_exception)
4003 {
4004 	if (exception_mask & ~EXC_MASK_VALID) {
4005 		return KERN_INVALID_ARGUMENT;
4006 	}
4007 
4008 	if (IP_VALID(new_port)) {
4009 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4010 		case EXCEPTION_DEFAULT:
4011 		case EXCEPTION_STATE:
4012 		case EXCEPTION_STATE_IDENTITY:
4013 		case EXCEPTION_IDENTITY_PROTECTED:
4014 		case EXCEPTION_STATE_IDENTITY_PROTECTED:
4015 			break;
4016 
4017 		default:
4018 			return KERN_INVALID_ARGUMENT;
4019 		}
4020 	}
4021 
4022 	if (IP_VALID(new_port) && !ipc_is_valid_exception_port(task, new_port)) {
4023 		return KERN_INVALID_RIGHT;
4024 	}
4025 
4026 
4027 	/*
4028 	 * Check the validity of the thread_state_flavor by calling the
4029 	 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4030 	 * osfmk/mach/ARCHITECTURE/thread_status.h
4031 	 */
4032 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4033 		return KERN_INVALID_ARGUMENT;
4034 	}
4035 
4036 	if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4037 	    (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4038 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4039 		return KERN_INVALID_ARGUMENT;
4040 	}
4041 
4042 	if (!set_exception_behavior_allowed(new_port, new_behavior, task, exception_mask, hardened_exception)) {
4043 		return KERN_DENIED;
4044 	}
4045 
4046 	return KERN_SUCCESS;
4047 }
4048 
4049 /*
4050  *	Routine:	thread_set_exception_ports_internal
4051  *	Purpose:
4052  *		Set a new exception action on the thread
4053  *	Conditions:
4054  *		Arguments have been validated via `set_exception_ports_validation`
4055  *		Nothing locked.
4056  *  Returns:
4057  *      KERN_SUCCESS	Setting the exception port is allowed with these arguments
4058  *		KERN_FAILURE	Thread is inactive
4059  */
4060 kern_return_t
thread_set_exception_ports_internal(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,boolean_t hardened)4061 thread_set_exception_ports_internal(
4062 	thread_t                thread,
4063 	exception_mask_t        exception_mask,
4064 	ipc_port_t              new_port,
4065 	exception_behavior_t    new_behavior,
4066 	thread_state_flavor_t   new_flavor,
4067 	boolean_t               hardened)
4068 {
4069 	ipc_port_t  old_port[EXC_TYPES_COUNT];
4070 	thread_ro_t tro;
4071 	boolean_t   privileged = task_is_privileged(current_task());
4072 
4073 #if CONFIG_MACF
4074 	if (mac_task_check_set_thread_exception_ports(current_task(), get_threadtask(thread), exception_mask, new_behavior) != 0) {
4075 		return KERN_NO_ACCESS;
4076 	}
4077 
4078 	struct label *new_label = mac_exc_create_label_for_current_proc();
4079 #endif
4080 
4081 	tro = get_thread_ro(thread);
4082 	thread_mtx_lock(thread);
4083 
4084 	if (!thread->active) {
4085 		thread_mtx_unlock(thread);
4086 #if CONFIG_MACF
4087 		mac_exc_free_label(new_label);
4088 #endif
4089 		return KERN_FAILURE;
4090 	}
4091 
4092 	if (tro->tro_exc_actions == NULL) {
4093 		ipc_thread_init_exc_actions(tro);
4094 	}
4095 	for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4096 		struct exception_action *action = &tro->tro_exc_actions[i];
4097 
4098 		if ((exception_mask & (1 << i))
4099 #if CONFIG_MACF
4100 		    && mac_exc_update_action_label(action, new_label) == 0
4101 #endif
4102 		    ) {
4103 			old_port[i] = action->port;
4104 			action->port = exception_port_copy_send(new_port);
4105 			action->behavior = new_behavior;
4106 			action->flavor = new_flavor;
4107 			action->privileged = privileged;
4108 			action->hardened = hardened;
4109 		} else {
4110 			old_port[i] = IP_NULL;
4111 		}
4112 	}
4113 
4114 	thread_mtx_unlock(thread);
4115 
4116 #if CONFIG_MACF
4117 	mac_exc_free_label(new_label);
4118 #endif
4119 
4120 	for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4121 		if (IP_VALID(old_port[i])) {
4122 			ipc_port_release_send(old_port[i]);
4123 		}
4124 	}
4125 
4126 	if (IP_VALID(new_port)) {         /* consume send right */
4127 		ipc_port_release_send(new_port);
4128 	}
4129 
4130 	return KERN_SUCCESS;
4131 }
4132 
4133 /*
4134  *	Routine:	thread/task_set_exception_ports [kernel call]
4135  *	Purpose:
4136  *			Sets the thread/task exception port, flavor and
4137  *			behavior for the exception types specified by the mask.
4138  *			There will be one send right per exception per valid
4139  *			port.
4140  *	Conditions:
4141  *		Nothing locked.  If successful, consumes
4142  *		the supplied send right.
4143  *	Returns:
4144  *		KERN_SUCCESS		Changed the special port.
4145  *		KERN_INVALID_ARGUMENT	The thread is null,
4146  *					Illegal mask bit set.
4147  *					Illegal exception behavior
4148  *		KERN_FAILURE		The thread is dead.
4149  *		KERN_NO_ACCESS		Restricted access to set port
4150  */
4151 
4152 kern_return_t
thread_set_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4153 thread_set_exception_ports(
4154 	thread_t                thread,
4155 	exception_mask_t        exception_mask,
4156 	ipc_port_t              new_port,
4157 	exception_behavior_t    new_behavior,
4158 	thread_state_flavor_t   new_flavor)
4159 {
4160 	if (thread == THREAD_NULL) {
4161 		return KERN_INVALID_ARGUMENT;
4162 	}
4163 	bool hardened_exception_flow = false;
4164 	kern_return_t kr = set_exception_ports_validation(get_threadtask(thread),
4165 	    exception_mask, new_port, new_behavior, new_flavor, hardened_exception_flow);
4166 	if (kr != KERN_SUCCESS) {
4167 		return kr;
4168 	}
4169 
4170 	return thread_set_exception_ports_internal(thread, exception_mask, new_port, new_behavior, new_flavor, false);
4171 }
4172 
4173 kern_return_t
task_set_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4174 task_set_exception_ports(
4175 	task_t                                  task,
4176 	exception_mask_t                exception_mask,
4177 	ipc_port_t                              new_port,
4178 	exception_behavior_t    new_behavior,
4179 	thread_state_flavor_t   new_flavor)
4180 {
4181 	ipc_port_t              old_port[EXC_TYPES_COUNT];
4182 	boolean_t privileged = task_is_privileged(current_task());
4183 	register int    i;
4184 
4185 	if (task == TASK_NULL) {
4186 		return KERN_INVALID_ARGUMENT;
4187 	}
4188 	bool hardened_exception_flow = false;
4189 	kern_return_t kr = set_exception_ports_validation(task, exception_mask,
4190 	    new_port, new_behavior, new_flavor, hardened_exception_flow);
4191 	if (kr != KERN_SUCCESS) {
4192 		return kr;
4193 	}
4194 
4195 
4196 #if CONFIG_MACF
4197 	if (mac_task_check_set_task_exception_ports(current_task(), task, exception_mask, new_behavior) != 0) {
4198 		return KERN_NO_ACCESS;
4199 	}
4200 
4201 	struct label *new_label = mac_exc_create_label_for_current_proc();
4202 #endif
4203 
4204 	itk_lock(task);
4205 
4206 	/*
4207 	 * Allow setting exception port during the span of ipc_task_init() to
4208 	 * ipc_task_terminate(). posix_spawn() port actions can set exception
4209 	 * ports on target task _before_ task IPC access is enabled.
4210 	 */
4211 	if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
4212 		itk_unlock(task);
4213 #if CONFIG_MACF
4214 		mac_exc_free_label(new_label);
4215 #endif
4216 		return KERN_FAILURE;
4217 	}
4218 
4219 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4220 		if ((exception_mask & (1 << i))
4221 #if CONFIG_MACF
4222 		    && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4223 #endif
4224 		    ) {
4225 			old_port[i] = task->exc_actions[i].port;
4226 			task->exc_actions[i].port =
4227 			    exception_port_copy_send(new_port);
4228 			task->exc_actions[i].behavior = new_behavior;
4229 			task->exc_actions[i].flavor = new_flavor;
4230 			task->exc_actions[i].privileged = privileged;
4231 		} else {
4232 			old_port[i] = IP_NULL;
4233 		}
4234 	}
4235 
4236 	itk_unlock(task);
4237 
4238 #if CONFIG_MACF
4239 	mac_exc_free_label(new_label);
4240 #endif
4241 
4242 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4243 		if (IP_VALID(old_port[i])) {
4244 			ipc_port_release_send(old_port[i]);
4245 		}
4246 	}
4247 
4248 	if (IP_VALID(new_port)) {         /* consume send right */
4249 		ipc_port_release_send(new_port);
4250 	}
4251 
4252 	return KERN_SUCCESS;
4253 }
4254 
4255 /*
4256  *	Routine:	thread/task_swap_exception_ports [kernel call]
4257  *	Purpose:
4258  *			Sets the thread/task exception port, flavor and
4259  *			behavior for the exception types specified by the
4260  *			mask.
4261  *
4262  *			The old ports, behavior and flavors are returned
4263  *			Count specifies the array sizes on input and
4264  *			the number of returned ports etc. on output.  The
4265  *			arrays must be large enough to hold all the returned
4266  *			data, MIG returnes an error otherwise.  The masks
4267  *			array specifies the corresponding exception type(s).
4268  *
4269  *	Conditions:
4270  *		Nothing locked.  If successful, consumes
4271  *		the supplied send right.
4272  *
4273  *		Returns upto [in} CountCnt elements.
4274  *	Returns:
4275  *		KERN_SUCCESS		Changed the special port.
4276  *		KERN_INVALID_ARGUMENT	The thread is null,
4277  *					Illegal mask bit set.
4278  *					Illegal exception behavior
4279  *		KERN_FAILURE		The thread is dead.
4280  *		KERN_NO_ACCESS		Restricted access to set port
4281  */
4282 
4283 kern_return_t
thread_swap_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4284 thread_swap_exception_ports(
4285 	thread_t                        thread,
4286 	exception_mask_t                exception_mask,
4287 	ipc_port_t                      new_port,
4288 	exception_behavior_t            new_behavior,
4289 	thread_state_flavor_t           new_flavor,
4290 	exception_mask_array_t          masks,
4291 	mach_msg_type_number_t          *CountCnt,
4292 	exception_port_array_t          ports,
4293 	exception_behavior_array_t      behaviors,
4294 	thread_state_flavor_array_t     flavors)
4295 {
4296 	ipc_port_t  old_port[EXC_TYPES_COUNT];
4297 	thread_ro_t tro;
4298 	boolean_t   privileged = task_is_privileged(current_task());
4299 	unsigned int    i, j, count;
4300 
4301 	if (thread == THREAD_NULL) {
4302 		return KERN_INVALID_ARGUMENT;
4303 	}
4304 	bool hardened_exception_flow = false;
4305 	kern_return_t kr = set_exception_ports_validation(get_threadtask(thread),
4306 	    exception_mask, new_port, new_behavior, new_flavor, hardened_exception_flow);
4307 	if (kr != KERN_SUCCESS) {
4308 		return kr;
4309 	}
4310 
4311 #if CONFIG_MACF
4312 	if (mac_task_check_set_thread_exception_ports(current_task(), get_threadtask(thread), exception_mask, new_behavior) != 0) {
4313 		return KERN_NO_ACCESS;
4314 	}
4315 
4316 	struct label *new_label = mac_exc_create_label_for_current_proc();
4317 #endif
4318 
4319 	thread_mtx_lock(thread);
4320 
4321 	if (!thread->active) {
4322 		thread_mtx_unlock(thread);
4323 #if CONFIG_MACF
4324 		mac_exc_free_label(new_label);
4325 #endif
4326 		return KERN_FAILURE;
4327 	}
4328 
4329 	tro = get_thread_ro(thread);
4330 	if (tro->tro_exc_actions == NULL) {
4331 		ipc_thread_init_exc_actions(tro);
4332 	}
4333 
4334 	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4335 	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4336 		struct exception_action *action = &tro->tro_exc_actions[i];
4337 
4338 		if ((exception_mask & (1 << i))
4339 #if CONFIG_MACF
4340 		    && mac_exc_update_action_label(action, new_label) == 0
4341 #endif
4342 		    ) {
4343 			for (j = 0; j < count; ++j) {
4344 				/*
4345 				 * search for an identical entry, if found
4346 				 * set corresponding mask for this exception.
4347 				 */
4348 				if (action->port == ports[j] &&
4349 				    action->behavior == behaviors[j] &&
4350 				    action->flavor == flavors[j]) {
4351 					masks[j] |= (1 << i);
4352 					break;
4353 				}
4354 			}
4355 
4356 			if (j == count) {
4357 				masks[j] = (1 << i);
4358 				ports[j] = exception_port_copy_send(action->port);
4359 
4360 				behaviors[j] = action->behavior;
4361 				flavors[j] = action->flavor;
4362 				++count;
4363 			}
4364 
4365 			old_port[i] = action->port;
4366 			action->port = exception_port_copy_send(new_port);
4367 			action->behavior = new_behavior;
4368 			action->flavor = new_flavor;
4369 			action->privileged = privileged;
4370 		} else {
4371 			old_port[i] = IP_NULL;
4372 		}
4373 	}
4374 
4375 	thread_mtx_unlock(thread);
4376 
4377 #if CONFIG_MACF
4378 	mac_exc_free_label(new_label);
4379 #endif
4380 
4381 	while (--i >= FIRST_EXCEPTION) {
4382 		if (IP_VALID(old_port[i])) {
4383 			ipc_port_release_send(old_port[i]);
4384 		}
4385 	}
4386 
4387 	if (IP_VALID(new_port)) {         /* consume send right */
4388 		ipc_port_release_send(new_port);
4389 	}
4390 
4391 	*CountCnt = count;
4392 
4393 	return KERN_SUCCESS;
4394 }
4395 
4396 kern_return_t
task_swap_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4397 task_swap_exception_ports(
4398 	task_t                                          task,
4399 	exception_mask_t                        exception_mask,
4400 	ipc_port_t                                      new_port,
4401 	exception_behavior_t            new_behavior,
4402 	thread_state_flavor_t           new_flavor,
4403 	exception_mask_array_t          masks,
4404 	mach_msg_type_number_t          *CountCnt,
4405 	exception_port_array_t          ports,
4406 	exception_behavior_array_t      behaviors,
4407 	thread_state_flavor_array_t     flavors)
4408 {
4409 	ipc_port_t              old_port[EXC_TYPES_COUNT];
4410 	boolean_t privileged = task_is_privileged(current_task());
4411 	unsigned int    i, j, count;
4412 
4413 #if CONFIG_MACF
4414 	struct label *new_label;
4415 #endif
4416 
4417 	if (task == TASK_NULL) {
4418 		return KERN_INVALID_ARGUMENT;
4419 	}
4420 	bool hardened_exception_flow = false;
4421 	kern_return_t kr = set_exception_ports_validation(task, exception_mask,
4422 	    new_port, new_behavior, new_flavor, hardened_exception_flow);
4423 	if (kr != KERN_SUCCESS) {
4424 		return kr;
4425 	}
4426 
4427 #if CONFIG_MACF
4428 	if (mac_task_check_set_task_exception_ports(current_task(), task, exception_mask, new_behavior) != 0) {
4429 		return KERN_NO_ACCESS;
4430 	}
4431 
4432 	new_label = mac_exc_create_label_for_current_proc();
4433 #endif
4434 
4435 	itk_lock(task);
4436 
4437 	if (!task->ipc_active) {
4438 		itk_unlock(task);
4439 #if CONFIG_MACF
4440 		mac_exc_free_label(new_label);
4441 #endif
4442 		return KERN_FAILURE;
4443 	}
4444 
4445 	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4446 	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4447 		if ((exception_mask & (1 << i))
4448 #if CONFIG_MACF
4449 		    && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4450 #endif
4451 		    ) {
4452 			for (j = 0; j < count; j++) {
4453 				/*
4454 				 * search for an identical entry, if found
4455 				 * set corresponding mask for this exception.
4456 				 */
4457 				if (task->exc_actions[i].port == ports[j] &&
4458 				    task->exc_actions[i].behavior == behaviors[j] &&
4459 				    task->exc_actions[i].flavor == flavors[j]) {
4460 					masks[j] |= (1 << i);
4461 					break;
4462 				}
4463 			}
4464 
4465 			if (j == count) {
4466 				masks[j] = (1 << i);
4467 				ports[j] = exception_port_copy_send(task->exc_actions[i].port);
4468 				behaviors[j] = task->exc_actions[i].behavior;
4469 				flavors[j] = task->exc_actions[i].flavor;
4470 				++count;
4471 			}
4472 
4473 			old_port[i] = task->exc_actions[i].port;
4474 
4475 			task->exc_actions[i].port = exception_port_copy_send(new_port);
4476 			task->exc_actions[i].behavior = new_behavior;
4477 			task->exc_actions[i].flavor = new_flavor;
4478 			task->exc_actions[i].privileged = privileged;
4479 		} else {
4480 			old_port[i] = IP_NULL;
4481 		}
4482 	}
4483 
4484 	itk_unlock(task);
4485 
4486 #if CONFIG_MACF
4487 	mac_exc_free_label(new_label);
4488 #endif
4489 
4490 	while (--i >= FIRST_EXCEPTION) {
4491 		if (IP_VALID(old_port[i])) {
4492 			ipc_port_release_send(old_port[i]);
4493 		}
4494 	}
4495 
4496 	if (IP_VALID(new_port)) {         /* consume send right */
4497 		ipc_port_release_send(new_port);
4498 	}
4499 
4500 	*CountCnt = count;
4501 
4502 	return KERN_SUCCESS;
4503 }
4504 
4505 /*
4506  *	Routine:	thread/task_get_exception_ports [kernel call]
4507  *	Purpose:
4508  *		Clones a send right for each of the thread/task's exception
4509  *		ports specified in the mask and returns the behaviour
4510  *		and flavor of said port.
4511  *
4512  *		Returns upto [in} CountCnt elements.
4513  *
4514  *	Conditions:
4515  *		Nothing locked.
4516  *	Returns:
4517  *		KERN_SUCCESS		Extracted a send right.
4518  *		KERN_INVALID_ARGUMENT	The thread is null,
4519  *					Invalid special port,
4520  *					Illegal mask bit set.
4521  *		KERN_FAILURE		The thread is dead.
4522  */
4523 static kern_return_t
thread_get_exception_ports_internal(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4524 thread_get_exception_ports_internal(
4525 	thread_t                        thread,
4526 	exception_mask_t                exception_mask,
4527 	exception_mask_array_t          masks,
4528 	mach_msg_type_number_t          *CountCnt,
4529 	exception_port_info_array_t     ports_info,
4530 	exception_port_array_t          ports,
4531 	exception_behavior_array_t      behaviors,
4532 	thread_state_flavor_array_t     flavors)
4533 {
4534 	unsigned int count;
4535 	boolean_t info_only = (ports_info != NULL);
4536 	thread_ro_t tro;
4537 	ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4538 
4539 	if (thread == THREAD_NULL) {
4540 		return KERN_INVALID_ARGUMENT;
4541 	}
4542 
4543 	if (exception_mask & ~EXC_MASK_VALID) {
4544 		return KERN_INVALID_ARGUMENT;
4545 	}
4546 
4547 	if (!info_only && !ports) {
4548 		return KERN_INVALID_ARGUMENT;
4549 	}
4550 
4551 	/*
4552 	 * Allocate a save area for FP state before taking thread lock,
4553 	 * if necessary, to ensure that VM_KERNEL_ADDRHASH() doesn't cause
4554 	 * an FP state allocation while holding thread locks.
4555 	 */
4556 	ml_fp_save_area_prealloc();
4557 
4558 	tro = get_thread_ro(thread);
4559 	thread_mtx_lock(thread);
4560 
4561 	if (!thread->active) {
4562 		thread_mtx_unlock(thread);
4563 
4564 		return KERN_FAILURE;
4565 	}
4566 
4567 	count = 0;
4568 
4569 	if (tro->tro_exc_actions == NULL) {
4570 		goto done;
4571 	}
4572 
4573 	for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4574 		if (exception_mask & (1 << i)) {
4575 			ipc_port_t exc_port = tro->tro_exc_actions[i].port;
4576 			exception_behavior_t exc_behavior = tro->tro_exc_actions[i].behavior;
4577 			thread_state_flavor_t exc_flavor = tro->tro_exc_actions[i].flavor;
4578 
4579 			for (j = 0; j < count; ++j) {
4580 				/*
4581 				 * search for an identical entry, if found
4582 				 * set corresponding mask for this exception.
4583 				 */
4584 				if (exc_port == port_ptrs[j] &&
4585 				    exc_behavior == behaviors[j] &&
4586 				    exc_flavor == flavors[j]) {
4587 					masks[j] |= (1 << i);
4588 					break;
4589 				}
4590 			}
4591 
4592 			if (j == count && count < *CountCnt) {
4593 				masks[j] = (1 << i);
4594 				port_ptrs[j] = exc_port;
4595 
4596 				if (info_only) {
4597 					if (!IP_VALID(exc_port)) {
4598 						ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4599 					} else {
4600 						task_t receiver = TASK_NULL;
4601 						(void)ipc_port_get_receiver_task(exc_port, &receiver);
4602 						ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRHASH(exc_port);
4603 						ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRHASH(receiver) : 0;
4604 					}
4605 				} else {
4606 					ports[j] = exception_port_copy_send(exc_port);
4607 				}
4608 				behaviors[j] = exc_behavior;
4609 				flavors[j] = exc_flavor;
4610 				++count;
4611 			}
4612 		}
4613 	}
4614 
4615 done:
4616 	thread_mtx_unlock(thread);
4617 
4618 	*CountCnt = count;
4619 
4620 	return KERN_SUCCESS;
4621 }
4622 
4623 kern_return_t
thread_get_exception_ports(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4624 thread_get_exception_ports(
4625 	thread_t                        thread,
4626 	exception_mask_t                exception_mask,
4627 	exception_mask_array_t          masks,
4628 	mach_msg_type_number_t          *CountCnt,
4629 	exception_port_array_t          ports,
4630 	exception_behavior_array_t      behaviors,
4631 	thread_state_flavor_array_t     flavors)
4632 {
4633 	return thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4634 	           NULL, ports, behaviors, flavors);
4635 }
4636 
4637 kern_return_t
thread_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4638 thread_get_exception_ports_info(
4639 	mach_port_t                     port,
4640 	exception_mask_t                exception_mask,
4641 	exception_mask_array_t          masks,
4642 	mach_msg_type_number_t          *CountCnt,
4643 	exception_port_info_array_t     ports_info,
4644 	exception_behavior_array_t      behaviors,
4645 	thread_state_flavor_array_t     flavors)
4646 {
4647 	kern_return_t kr;
4648 
4649 	thread_t thread = convert_port_to_thread_read_no_eval(port);
4650 
4651 	if (thread == THREAD_NULL) {
4652 		return KERN_INVALID_ARGUMENT;
4653 	}
4654 
4655 	kr = thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4656 	    ports_info, NULL, behaviors, flavors);
4657 
4658 	thread_deallocate(thread);
4659 	return kr;
4660 }
4661 
4662 kern_return_t
thread_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4663 thread_get_exception_ports_from_user(
4664 	mach_port_t                     port,
4665 	exception_mask_t                exception_mask,
4666 	exception_mask_array_t          masks,
4667 	mach_msg_type_number_t         *CountCnt,
4668 	exception_port_array_t          ports,
4669 	exception_behavior_array_t      behaviors,
4670 	thread_state_flavor_array_t     flavors)
4671 {
4672 	kern_return_t kr;
4673 
4674 	thread_t thread = convert_port_to_thread(port);
4675 
4676 	if (thread == THREAD_NULL) {
4677 		return KERN_INVALID_ARGUMENT;
4678 	}
4679 
4680 	kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4681 
4682 	thread_deallocate(thread);
4683 	return kr;
4684 }
4685 
4686 static kern_return_t
task_get_exception_ports_internal(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4687 task_get_exception_ports_internal(
4688 	task_t                          task,
4689 	exception_mask_t                exception_mask,
4690 	exception_mask_array_t          masks,
4691 	mach_msg_type_number_t          *CountCnt,
4692 	exception_port_info_array_t     ports_info,
4693 	exception_port_array_t          ports,
4694 	exception_behavior_array_t      behaviors,
4695 	thread_state_flavor_array_t     flavors)
4696 {
4697 	unsigned int count;
4698 	boolean_t info_only = (ports_info != NULL);
4699 	ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4700 
4701 	if (task == TASK_NULL) {
4702 		return KERN_INVALID_ARGUMENT;
4703 	}
4704 
4705 	if (exception_mask & ~EXC_MASK_VALID) {
4706 		return KERN_INVALID_ARGUMENT;
4707 	}
4708 
4709 	if (!info_only && !ports) {
4710 		return KERN_INVALID_ARGUMENT;
4711 	}
4712 
4713 	/*
4714 	 * Allocate a save area for FP state before taking task lock,
4715 	 * if necessary, to ensure that VM_KERNEL_ADDRHASH() doesn't cause
4716 	 * an FP state allocation while holding task locks.
4717 	 */
4718 	ml_fp_save_area_prealloc();
4719 
4720 	itk_lock(task);
4721 
4722 	if (!task->ipc_active) {
4723 		itk_unlock(task);
4724 		return KERN_FAILURE;
4725 	}
4726 
4727 	count = 0;
4728 
4729 	for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4730 		if (exception_mask & (1 << i)) {
4731 			ipc_port_t exc_port = task->exc_actions[i].port;
4732 			exception_behavior_t exc_behavior = task->exc_actions[i].behavior;
4733 			thread_state_flavor_t exc_flavor = task->exc_actions[i].flavor;
4734 
4735 			for (j = 0; j < count; ++j) {
4736 				/*
4737 				 * search for an identical entry, if found
4738 				 * set corresponding mask for this exception.
4739 				 */
4740 				if (exc_port == port_ptrs[j] &&
4741 				    exc_behavior == behaviors[j] &&
4742 				    exc_flavor == flavors[j]) {
4743 					masks[j] |= (1 << i);
4744 					break;
4745 				}
4746 			}
4747 
4748 			if (j == count && count < *CountCnt) {
4749 				masks[j] = (1 << i);
4750 				port_ptrs[j] = exc_port;
4751 
4752 				if (info_only) {
4753 					if (!IP_VALID(exc_port)) {
4754 						ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4755 					} else {
4756 						task_t receiver = TASK_NULL;
4757 						(void)ipc_port_get_receiver_task(exc_port, &receiver);
4758 						ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRHASH(exc_port);
4759 						ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRHASH(receiver) : 0;
4760 					}
4761 				} else {
4762 					ports[j] = exception_port_copy_send(exc_port);
4763 				}
4764 				behaviors[j] = exc_behavior;
4765 				flavors[j] = exc_flavor;
4766 				++count;
4767 			}
4768 		}
4769 	}
4770 
4771 	itk_unlock(task);
4772 
4773 	*CountCnt = count;
4774 
4775 	return KERN_SUCCESS;
4776 }
4777 
4778 kern_return_t
task_get_exception_ports(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4779 task_get_exception_ports(
4780 	task_t                          task,
4781 	exception_mask_t                exception_mask,
4782 	exception_mask_array_t          masks,
4783 	mach_msg_type_number_t          *CountCnt,
4784 	exception_port_array_t          ports,
4785 	exception_behavior_array_t      behaviors,
4786 	thread_state_flavor_array_t     flavors)
4787 {
4788 	return task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4789 	           NULL, ports, behaviors, flavors);
4790 }
4791 
4792 kern_return_t
task_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4793 task_get_exception_ports_info(
4794 	mach_port_t                     port,
4795 	exception_mask_t                exception_mask,
4796 	exception_mask_array_t          masks,
4797 	mach_msg_type_number_t          *CountCnt,
4798 	exception_port_info_array_t     ports_info,
4799 	exception_behavior_array_t      behaviors,
4800 	thread_state_flavor_array_t     flavors)
4801 {
4802 	kern_return_t kr;
4803 
4804 	task_t task = convert_port_to_task_read_no_eval(port);
4805 
4806 	if (task == TASK_NULL) {
4807 		return KERN_INVALID_ARGUMENT;
4808 	}
4809 
4810 	kr = task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
4811 	    ports_info, NULL, behaviors, flavors);
4812 
4813 	task_deallocate(task);
4814 	return kr;
4815 }
4816 
4817 kern_return_t
task_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4818 task_get_exception_ports_from_user(
4819 	mach_port_t                     port,
4820 	exception_mask_t                exception_mask,
4821 	exception_mask_array_t          masks,
4822 	mach_msg_type_number_t         *CountCnt,
4823 	exception_port_array_t          ports,
4824 	exception_behavior_array_t      behaviors,
4825 	thread_state_flavor_array_t     flavors)
4826 {
4827 	kern_return_t kr;
4828 
4829 	task_t task = convert_port_to_task(port);
4830 
4831 	if (task == TASK_NULL) {
4832 		return KERN_INVALID_ARGUMENT;
4833 	}
4834 
4835 	kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4836 
4837 	task_deallocate(task);
4838 	return kr;
4839 }
4840 
4841 /*
4842  *	Routine:	ipc_thread_port_unpin
4843  *	Purpose:
4844  *
4845  *		Called on the thread when it's terminating so that the last ref
4846  *		can be deallocated without a guard exception.
4847  *	Conditions:
4848  *		Thread mutex lock is held.
4849  */
4850 void
ipc_thread_port_unpin(ipc_port_t port)4851 ipc_thread_port_unpin(
4852 	ipc_port_t port)
4853 {
4854 	ipc_object_unpin(current_space(), port);
4855 }
4856 
4857 /*
4858  *	Routine:	task_register_hardened_exception_handler
4859  *	Purpose:
4860  *		Register a port as a hardened exception handler.
4861  *		See task.defs for additional info
4862  *	Conditions:
4863  *		Nothing locked.
4864  *		Limit of one hardened exception handler per task
4865  *	Returns:
4866  *		KERN_INVALID_ARGUMENT	invalid thread
4867  *		KERN_DENIED             violating the security policy
4868  *		KERN_NAME_EXISTS        Already set a hardened exception handler
4869  *		                        on this task
4870  *		KERN_SUCCESS
4871  */
4872 kern_return_t
task_register_hardened_exception_handler(task_t task,uint32_t signed_pc_key,exception_mask_t exceptions_allowed,exception_behavior_t behaviors_allowed,thread_state_flavor_t flavors_allowed,mach_port_t new_port)4873 task_register_hardened_exception_handler(
4874 	task_t                  task,
4875 	uint32_t                signed_pc_key,
4876 	exception_mask_t        exceptions_allowed,
4877 	exception_behavior_t    behaviors_allowed,
4878 	thread_state_flavor_t   flavors_allowed,
4879 	mach_port_t             new_port)
4880 {
4881 	ipc_port_t old_port;
4882 
4883 	if (task == TASK_NULL) {
4884 		return KERN_INVALID_ARGUMENT;
4885 	}
4886 	if (IP_VALID(new_port) && !ip_is_exception_port(new_port)) {
4887 		return KERN_INVALID_ARGUMENT;
4888 	}
4889 
4890 
4891 	bool hardened_exception_flow = true;
4892 	kern_return_t kr = set_exception_ports_validation(task, exceptions_allowed,
4893 	    new_port, behaviors_allowed, flavors_allowed, hardened_exception_flow);
4894 	if (kr != KERN_SUCCESS) {
4895 		return kr;
4896 	}
4897 
4898 	/* You can only register one hardened exception handler */
4899 	if (exception_ports_frozen(task)) {
4900 		return KERN_INVALID_ARGUMENT;
4901 	}
4902 	task_ro_flags_set(task, TFRO_FREEZE_EXCEPTION_PORTS);
4903 	itk_lock(task);
4904 
4905 	/* No reason to allow setting this multiple times per task */
4906 	old_port = task->hardened_exception_action.ea.port;
4907 	if (IP_VALID(old_port)) {
4908 		itk_unlock(task);
4909 		return KERN_NAME_EXISTS;
4910 	}
4911 
4912 	/* Stash the semantics for this port on the task */
4913 	struct hardened_exception_action hea;
4914 	hea.ea.port = new_port; /* Donate our send right to the task */
4915 	hea.ea.flavor = flavors_allowed;
4916 	hea.ea.behavior = behaviors_allowed;
4917 	hea.ea.privileged = false;
4918 	hea.ea.label = NULL;
4919 	hea.signed_pc_key = signed_pc_key;
4920 	hea.exception = exceptions_allowed;
4921 
4922 	task->hardened_exception_action = hea;
4923 	itk_unlock(task);
4924 
4925 	return KERN_SUCCESS;
4926 }
4927 
4928 /*
4929  *	Routine:	thread_adopt_exception_handler
4930  *	Purpose:
4931  *		Adopt the hardened exception handler from the current task,
4932  *		for this thread.
4933  *
4934  *		Allows to set exception ports on a thread after exception ports
4935  *		have been frozen for the task.
4936  *	Conditions:
4937  *		Nothing locked
4938  *	Returns:
4939  *		KERN_INVALID_ARGUMENT   invalid thread
4940  *		KERN_DENIED             violating the security policy
4941  *		KERN_SUCCESS
4942  */
4943 kern_return_t
thread_adopt_exception_handler(thread_t thread,mach_port_t exc_port,exception_mask_t exc_mask,exception_behavior_t behavior_mask,thread_state_flavor_t flavor_mask)4944 thread_adopt_exception_handler(
4945 	thread_t                thread,
4946 	mach_port_t             exc_port,
4947 	exception_mask_t        exc_mask,
4948 	exception_behavior_t    behavior_mask,
4949 	thread_state_flavor_t   flavor_mask)
4950 {
4951 	if (thread == THREAD_NULL) {
4952 		return KERN_INVALID_ARGUMENT;
4953 	}
4954 
4955 	task_t task = get_threadtask(thread);
4956 
4957 	if (task != current_task()) {
4958 		return KERN_DENIED;
4959 	}
4960 
4961 	/* We must have exactly one hardened exception port per task */
4962 	if (!exception_ports_frozen(task)) {
4963 		return KERN_DENIED;
4964 	}
4965 
4966 	/* Ensure we see a consistent state of the hardened exception action */
4967 	itk_lock(task);
4968 	struct hardened_exception_action hea = task->hardened_exception_action;
4969 	itk_unlock(task);
4970 
4971 	if (exc_port != IP_NULL && exc_port != hea.ea.port) {
4972 		return KERN_DENIED;
4973 	}
4974 	/* Ensure that the new masks for this thread are a subset of the
4975 	 * allowable masks for this exception handler
4976 	 */
4977 	if (exc_mask & ~hea.exception ||
4978 	    behavior_mask & ~hea.ea.behavior ||
4979 	    flavor_mask & ~hea.ea.flavor) {
4980 		return KERN_DENIED;
4981 	}
4982 
4983 	assert(!IP_VALID(exc_port) || ip_is_exception_port(exc_port));
4984 
4985 	/*
4986 	 * We can safely assume this will be valid because we called
4987 	 * set_exception_ports_validation on it when it was originally
4988 	 * set on the task
4989 	 */
4990 	return thread_set_exception_ports_internal(thread, exc_mask, exc_port,
4991 	           behavior_mask, flavor_mask, true);
4992 }
4993