xref: /xnu-11215.41.3/osfmk/kern/ipc_tt.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  */
62 /*
63  */
64 
65 /*
66  * File:	ipc_tt.c
67  * Purpose:
68  *	Task and thread related IPC functions.
69  */
70 
71 #include <IOKit/IOBSD.h> // IOTaskHasEntitlement
72 
73 #include <ipc/port.h>
74 #include <mach/mach_types.h>
75 #include <mach/boolean.h>
76 #include <mach/kern_return.h>
77 #include <mach/mach_param.h>
78 #include <mach/task_special_ports.h>
79 #include <mach/thread_special_ports.h>
80 #include <mach/thread_status.h>
81 #include <mach/exception_types.h>
82 #include <mach/memory_object_types.h>
83 #include <mach/mach_traps.h>
84 #include <mach/task_server.h>
85 #include <mach/thread_act_server.h>
86 #include <mach/mach_host_server.h>
87 #include <mach/host_priv_server.h>
88 #include <mach/vm_map_server.h>
89 
90 #include <kern/exc_guard.h>
91 #include <kern/kern_types.h>
92 #include <kern/host.h>
93 #include <kern/ipc_kobject.h>
94 #include <kern/ipc_tt.h>
95 #include <kern/kalloc.h>
96 #include <kern/thread.h>
97 #include <kern/ux_handler.h>
98 #include <kern/misc_protos.h>
99 #include <kdp/kdp_dyld.h>
100 
101 #include <vm/vm_map_xnu.h>
102 #include <vm/vm_pageout.h>
103 #include <vm/vm_protos.h>
104 #include <mach/vm_types.h>
105 #include <libkern/coreanalytics/coreanalytics.h>
106 
107 #include <security/mac_mach_internal.h>
108 
109 #if CONFIG_CSR
110 #include <sys/csr.h>
111 #endif
112 
113 #include <sys/code_signing.h> /* for developer mode state */
114 
115 #if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL
116 extern int cs_relax_platform_task_ports;
117 #endif
118 
119 extern boolean_t IOCurrentTaskHasEntitlement(const char *);
120 extern boolean_t proc_is_simulated(const proc_t);
121 extern struct proc* current_proc(void);
122 
123 /* bootarg to create lightweight corpse for thread identity lockdown */
124 TUNABLE(bool, thid_should_crash, "thid_should_crash", true);
125 
126 /* Allows the process to call `[thread,task]_set_exception_ports */
127 #define SET_EXCEPTION_ENTITLEMENT "com.apple.private.set-exception-port"
128 
129 /*
130  * Entitlement to disallow setting the exception port of task/thread unless you
131  * are being debugged or are setting up the hardened task exception handler
132  */
133 #define IPC_ONLY_ONE_EXCEPTION_PORT "com.apple.security.only-one-exception-port"
134 
135 CA_EVENT(set_exception,
136     CA_STATIC_STRING(CA_PROCNAME_LEN), current_proc,
137     CA_STATIC_STRING(CA_PROCNAME_LEN), thread_proc,
138     CA_INT, mask,
139     CA_STATIC_STRING(6), level);
140 
141 __options_decl(ipc_reply_port_type_t, uint32_t, {
142 	IRPT_NONE        = 0x00,
143 	IRPT_USER        = 0x01,
144 	IRPT_KERNEL      = 0x02,
145 });
146 
147 /* forward declarations */
148 static kern_return_t special_port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor);
149 static kern_return_t special_port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor);
150 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port, ipc_reply_port_type_t reply_type);
151 static void ipc_port_unbind_special_reply_port(thread_t thread, ipc_reply_port_type_t reply_type);
152 extern kern_return_t task_conversion_eval(task_t caller, task_t victim, int flavor);
153 static thread_inspect_t convert_port_to_thread_inspect_no_eval(ipc_port_t port);
154 static ipc_port_t convert_thread_to_port_with_flavor(thread_t, thread_ro_t, mach_thread_flavor_t flavor);
155 ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor, task_grp_t grp);
156 kern_return_t task_set_special_port(task_t task, int which, ipc_port_t port);
157 kern_return_t task_get_special_port(task_t task, int which, ipc_port_t *portp);
158 
159 /*
160  *	Routine:	ipc_task_init
161  *	Purpose:
162  *		Initialize a task's IPC state.
163  *
164  *		If non-null, some state will be inherited from the parent.
165  *		The parent must be appropriately initialized.
166  *	Conditions:
167  *		Nothing locked.
168  */
169 
170 void
ipc_task_init(task_t task,task_t parent)171 ipc_task_init(
172 	task_t          task,
173 	task_t          parent)
174 {
175 	ipc_space_t space;
176 	ipc_port_t kport;
177 	ipc_port_t nport;
178 	ipc_port_t pport;
179 	kern_return_t kr;
180 	struct label *temp_label;
181 	int i;
182 
183 
184 	kr = ipc_space_create(IPC_LABEL_NONE, &space);
185 	if (kr != KERN_SUCCESS) {
186 		panic("ipc_task_init");
187 	}
188 
189 	space->is_task = task;
190 
191 	kport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL,
192 	    IPC_KOBJECT_ALLOC_NONE);
193 	pport = kport;
194 
195 	nport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_NAME,
196 	    IPC_KOBJECT_ALLOC_NONE);
197 
198 	itk_lock_init(task);
199 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = kport;
200 	task->itk_task_ports[TASK_FLAVOR_NAME] = nport;
201 
202 	/* Lazily allocated on-demand */
203 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
204 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
205 	task->itk_dyld_notify = NULL;
206 #if CONFIG_PROC_RESOURCE_LIMITS
207 	task->itk_resource_notify = NULL;
208 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
209 
210 	task->itk_self = pport;
211 	task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
212 #if CONFIG_CSR
213 	if (task_is_a_corpse_fork(task)) {
214 		/*
215 		 * No sender's notification for corpse would not
216 		 * work with a naked send right in kernel.
217 		 */
218 		task->itk_settable_self = IP_NULL;
219 	} else {
220 		/* we just made the port, no need to triple check */
221 		task->itk_settable_self = ipc_port_make_send_any(kport);
222 	}
223 #endif /* CONFIG_CSR */
224 	task->itk_debug_control = IP_NULL;
225 	task->itk_space = space;
226 
227 #if CONFIG_MACF
228 	task->exc_actions[0].label = NULL;
229 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
230 		mac_exc_associate_action_label(&task->exc_actions[i],
231 		    mac_exc_create_label(&task->exc_actions[i]));
232 	}
233 #endif
234 
235 	/* always zero-out the first (unused) array element */
236 	bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
237 	/* We don't need to inherit this */
238 	bzero(&task->hardened_exception_action, sizeof(task->hardened_exception_action));
239 
240 	if (parent == TASK_NULL) {
241 		ipc_port_t port = IP_NULL;
242 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
243 			task->exc_actions[i].port = IP_NULL;
244 			task->exc_actions[i].flavor = 0;
245 			task->exc_actions[i].behavior = 0;
246 			task->exc_actions[i].privileged = FALSE;
247 		}/* for */
248 
249 		kr = host_get_host_port(host_priv_self(), &port);
250 		assert(kr == KERN_SUCCESS);
251 		task->itk_host = port;
252 
253 		task->itk_bootstrap = IP_NULL;
254 		task->itk_task_access = IP_NULL;
255 
256 		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
257 			task->itk_registered[i] = IP_NULL;
258 		}
259 	} else {
260 		itk_lock(parent);
261 		assert(parent->itk_task_ports[TASK_FLAVOR_CONTROL] != IP_NULL);
262 
263 		/* inherit registered ports */
264 
265 		for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
266 			task->itk_registered[i] =
267 			    ipc_port_copy_send_any(parent->itk_registered[i]);
268 		}
269 
270 		/* inherit exception and bootstrap ports */
271 
272 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
273 			temp_label = task->exc_actions[i].label;
274 			task->exc_actions[i] = parent->exc_actions[i];
275 			task->exc_actions[i].port =
276 			    exception_port_copy_send(parent->exc_actions[i].port);
277 			task->exc_actions[i].label = temp_label;
278 #if CONFIG_MACF
279 			mac_exc_inherit_action_label(parent->exc_actions + i,
280 			    task->exc_actions + i);
281 #endif
282 		}
283 
284 		task->itk_host = host_port_copy_send(parent->itk_host);
285 
286 		task->itk_bootstrap =
287 		    ipc_port_copy_send_mqueue(parent->itk_bootstrap);
288 
289 		task->itk_task_access =
290 		    ipc_port_copy_send_mqueue(parent->itk_task_access);
291 
292 		itk_unlock(parent);
293 	}
294 }
295 
296 /*
297  *	Routine:	ipc_task_set_immovable_pinned
298  *	Purpose:
299  *		Make a task's control port immovable and/or pinned
300  *      according to its control port options. If control port
301  *      is immovable, allocate an immovable control port for the
302  *      task and optionally pin it.
303  *	Conditions:
304  *		Task's control port is movable and not pinned.
305  */
306 void
ipc_task_set_immovable_pinned(task_t task)307 ipc_task_set_immovable_pinned(
308 	task_t            task)
309 {
310 	ipc_port_t kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
311 	ipc_port_t new_pport;
312 
313 	/* pport is the same as kport at ipc_task_init() time */
314 	assert(task->itk_self == task->itk_task_ports[TASK_FLAVOR_CONTROL]);
315 #if CONFIG_CSR
316 	assert(task->itk_self == task->itk_settable_self);
317 #endif /* CONFIG_CSR */
318 	assert(!task_is_a_corpse(task));
319 
320 	/* only tasks opt in immovable control port can have pinned control port */
321 	if (task_is_immovable(task)) {
322 		ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
323 
324 		if (task_is_pinned(task)) {
325 			options |= IPC_KOBJECT_ALLOC_PINNED;
326 		}
327 
328 		new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL, options);
329 
330 		assert(kport != IP_NULL);
331 		ipc_port_set_label(kport, IPC_LABEL_SUBST_TASK);
332 		kport->ip_kolabel->ikol_alt_port = new_pport;
333 
334 		itk_lock(task);
335 		task->itk_self = new_pport;
336 		itk_unlock(task);
337 
338 		/* enable the pinned port */
339 		ipc_kobject_enable(new_pport, task, IKOT_TASK_CONTROL);
340 	}
341 }
342 
343 /*
344  *	Routine:	ipc_task_enable
345  *	Purpose:
346  *		Enable a task for IPC access.
347  *	Conditions:
348  *		Nothing locked.
349  */
350 void
ipc_task_enable(task_t task)351 ipc_task_enable(
352 	task_t          task)
353 {
354 	ipc_port_t kport;
355 	ipc_port_t nport;
356 	ipc_port_t iport;
357 	ipc_port_t rdport;
358 	ipc_port_t pport;
359 
360 	itk_lock(task);
361 	if (!task->active) {
362 		/*
363 		 * task has been terminated before we can enable IPC access.
364 		 * The check is to make sure we don't accidentally re-enable
365 		 * the task ports _after_ they've been disabled during
366 		 * task_terminate_internal(), in which case we will hit the
367 		 * !task->ipc_active assertion in ipc_task_terminate().
368 		 *
369 		 * Technically we should grab task lock when checking task
370 		 * active bit, but since task termination unsets task->active
371 		 * _before_ calling ipc_task_disable(), we can always see the
372 		 * truth with just itk_lock() and bail if disable has been called.
373 		 */
374 		itk_unlock(task);
375 		return;
376 	}
377 
378 	assert(!task->ipc_active || task_is_a_corpse(task));
379 	task->ipc_active = true;
380 
381 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
382 	if (kport != IP_NULL) {
383 		ipc_kobject_enable(kport, task, IKOT_TASK_CONTROL);
384 	}
385 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
386 	if (nport != IP_NULL) {
387 		ipc_kobject_enable(nport, task, IKOT_TASK_NAME);
388 	}
389 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
390 	if (iport != IP_NULL) {
391 		ipc_kobject_enable(iport, task, IKOT_TASK_INSPECT);
392 	}
393 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
394 	if (rdport != IP_NULL) {
395 		ipc_kobject_enable(rdport, task, IKOT_TASK_READ);
396 	}
397 	pport = task->itk_self;
398 	if (pport != kport && pport != IP_NULL) {
399 		assert(task_is_immovable(task));
400 		ipc_kobject_enable(pport, task, IKOT_TASK_CONTROL);
401 	}
402 
403 	itk_unlock(task);
404 }
405 
406 /*
407  *	Routine:	ipc_task_disable
408  *	Purpose:
409  *		Disable IPC access to a task.
410  *	Conditions:
411  *		Nothing locked.
412  */
413 
414 void
ipc_task_disable(task_t task)415 ipc_task_disable(
416 	task_t          task)
417 {
418 	ipc_port_t kport;
419 	ipc_port_t nport;
420 	ipc_port_t iport;
421 	ipc_port_t rdport;
422 	ipc_port_t rport;
423 	ipc_port_t pport;
424 
425 	itk_lock(task);
426 
427 	/*
428 	 * This innocuous looking line is load bearing.
429 	 *
430 	 * It is used to disable the creation of lazy made ports.
431 	 * We must do so before we drop the last reference on the task,
432 	 * as task ports do not own a reference on the task, and
433 	 * convert_port_to_task* will crash trying to resurect a task.
434 	 */
435 	task->ipc_active = false;
436 
437 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
438 	if (kport != IP_NULL) {
439 		/* clears ikol_alt_port */
440 		ipc_kobject_disable(kport, IKOT_TASK_CONTROL);
441 	}
442 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
443 	if (nport != IP_NULL) {
444 		ipc_kobject_disable(nport, IKOT_TASK_NAME);
445 	}
446 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
447 	if (iport != IP_NULL) {
448 		ipc_kobject_disable(iport, IKOT_TASK_INSPECT);
449 	}
450 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
451 	if (rdport != IP_NULL) {
452 		/* clears ikol_alt_port */
453 		ipc_kobject_disable(rdport, IKOT_TASK_READ);
454 	}
455 	pport = task->itk_self;
456 	if (pport != IP_NULL) {
457 		/* see port_name_is_pinned_itk_self() */
458 		pport->ip_receiver_name = MACH_PORT_SPECIAL_DEFAULT;
459 		if (pport != kport) {
460 			assert(task_is_immovable(task));
461 			assert(pport->ip_immovable_send);
462 			ipc_kobject_disable(pport, IKOT_TASK_CONTROL);
463 		}
464 	}
465 
466 	rport = task->itk_resume;
467 	if (rport != IP_NULL) {
468 		/*
469 		 * From this point onwards this task is no longer accepting
470 		 * resumptions.
471 		 *
472 		 * There are still outstanding suspensions on this task,
473 		 * even as it is being torn down. Disconnect the task
474 		 * from the rport, thereby "orphaning" the rport. The rport
475 		 * itself will go away only when the last suspension holder
476 		 * destroys his SO right to it -- when he either
477 		 * exits, or tries to actually use that last SO right to
478 		 * resume this (now non-existent) task.
479 		 */
480 		ipc_kobject_disable(rport, IKOT_TASK_RESUME);
481 	}
482 	itk_unlock(task);
483 }
484 
485 /*
486  *	Routine:	ipc_task_terminate
487  *	Purpose:
488  *		Clean up and destroy a task's IPC state.
489  *	Conditions:
490  *		Nothing locked.  The task must be suspended.
491  *		(Or the current thread must be in the task.)
492  */
493 
494 void
ipc_task_terminate(task_t task)495 ipc_task_terminate(
496 	task_t          task)
497 {
498 	ipc_port_t kport;
499 	ipc_port_t nport;
500 	ipc_port_t iport;
501 	ipc_port_t rdport;
502 	ipc_port_t rport;
503 	ipc_port_t pport;
504 #if CONFIG_CSR
505 	ipc_port_t sself;
506 #endif /* CONFIG_CSR */
507 	ipc_port_t *notifiers_ptr = NULL;
508 
509 	itk_lock(task);
510 
511 	/*
512 	 * If we ever failed to clear ipc_active before the last reference
513 	 * was dropped, lazy ports might be made and used after the last
514 	 * reference is dropped and cause use after free (see comment in
515 	 * ipc_task_disable()).
516 	 */
517 	assert(!task->ipc_active);
518 
519 	kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
520 #if CONFIG_CSR
521 	sself = task->itk_settable_self;
522 #endif /* CONFIG_CSR */
523 	pport = IP_NULL;
524 
525 	if (kport == IP_NULL) {
526 		/* the task is already terminated (can this happen?) */
527 		itk_unlock(task);
528 		return;
529 	}
530 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = IP_NULL;
531 
532 	rdport = task->itk_task_ports[TASK_FLAVOR_READ];
533 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
534 
535 	iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
536 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
537 
538 	nport = task->itk_task_ports[TASK_FLAVOR_NAME];
539 	assert(nport != IP_NULL);
540 	task->itk_task_ports[TASK_FLAVOR_NAME] = IP_NULL;
541 
542 	if (task->itk_dyld_notify) {
543 		notifiers_ptr = task->itk_dyld_notify;
544 		task->itk_dyld_notify = NULL;
545 	}
546 
547 	pport = task->itk_self;
548 	task->itk_self = IP_NULL;
549 
550 	rport = task->itk_resume;
551 	task->itk_resume = IP_NULL;
552 
553 	itk_unlock(task);
554 
555 	/* release the naked send rights */
556 #if CONFIG_CSR
557 	if (IP_VALID(sself)) {
558 		ipc_port_release_send(sself);
559 	}
560 #endif /* CONFIG_CSR */
561 
562 	if (notifiers_ptr) {
563 		for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
564 			if (IP_VALID(notifiers_ptr[i])) {
565 				ipc_port_release_send(notifiers_ptr[i]);
566 			}
567 		}
568 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
569 	}
570 
571 	if (IP_VALID(task->hardened_exception_action.ea.port)) {
572 		ipc_port_release_send(task->hardened_exception_action.ea.port);
573 	}
574 
575 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
576 		if (IP_VALID(task->exc_actions[i].port)) {
577 			ipc_port_release_send(task->exc_actions[i].port);
578 		}
579 #if CONFIG_MACF
580 		mac_exc_free_action_label(task->exc_actions + i);
581 #endif
582 	}
583 
584 	if (IP_VALID(task->itk_host)) {
585 		ipc_port_release_send(task->itk_host);
586 	}
587 
588 	if (IP_VALID(task->itk_bootstrap)) {
589 		ipc_port_release_send(task->itk_bootstrap);
590 	}
591 
592 	if (IP_VALID(task->itk_task_access)) {
593 		ipc_port_release_send(task->itk_task_access);
594 	}
595 
596 	if (IP_VALID(task->itk_debug_control)) {
597 		ipc_port_release_send(task->itk_debug_control);
598 	}
599 
600 #if CONFIG_PROC_RESOURCE_LIMITS
601 	if (IP_VALID(task->itk_resource_notify)) {
602 		ipc_port_release_send(task->itk_resource_notify);
603 	}
604 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
605 
606 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
607 		if (IP_VALID(task->itk_registered[i])) {
608 			ipc_port_release_send(task->itk_registered[i]);
609 		}
610 	}
611 
612 	/* clears read port ikol_alt_port, must be done first */
613 	if (rdport != IP_NULL) {
614 		ipc_kobject_dealloc_port(rdport, 0, IKOT_TASK_READ);
615 	}
616 	ipc_kobject_dealloc_port(kport, 0, IKOT_TASK_CONTROL);
617 	/* ikol_alt_port cleared */
618 
619 	/* destroy other kernel ports */
620 	ipc_kobject_dealloc_port(nport, 0, IKOT_TASK_NAME);
621 	if (iport != IP_NULL) {
622 		ipc_kobject_dealloc_port(iport, 0, IKOT_TASK_INSPECT);
623 	}
624 	if (pport != IP_NULL && pport != kport) {
625 		ipc_kobject_dealloc_port(pport, 0, IKOT_TASK_CONTROL);
626 	}
627 	if (rport != IP_NULL) {
628 		ipc_kobject_dealloc_port(rport, 0, IKOT_TASK_RESUME);
629 	}
630 
631 	itk_lock_destroy(task);
632 }
633 
634 /*
635  *	Routine:	ipc_task_reset
636  *	Purpose:
637  *		Reset a task's IPC state to protect it when
638  *		it enters an elevated security context. The
639  *		task name port can remain the same - since it
640  *              represents no specific privilege.
641  *	Conditions:
642  *		Nothing locked.  The task must be suspended.
643  *		(Or the current thread must be in the task.)
644  */
645 
646 void
ipc_task_reset(task_t task)647 ipc_task_reset(
648 	task_t          task)
649 {
650 	ipc_port_t old_kport, old_pport, new_kport, new_pport;
651 #if CONFIG_CSR
652 	ipc_port_t old_sself;
653 #endif /* CONFIG_CSR */
654 	ipc_port_t old_rdport;
655 	ipc_port_t old_iport;
656 	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
657 	ipc_port_t old_hardened_exception;
658 	ipc_port_t *notifiers_ptr = NULL;
659 
660 #if CONFIG_MACF
661 	/* Fresh label to unset credentials in existing labels. */
662 	struct label *unset_label = mac_exc_create_label(NULL);
663 #endif
664 
665 	new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task,
666 	    IKOT_TASK_CONTROL, IPC_KOBJECT_ALLOC_NONE);
667 	/*
668 	 * ipc_task_reset() only happens during sugid or corpsify.
669 	 *
670 	 * (1) sugid happens early in exec_mach_imgact(), at which point the old task
671 	 * port has not been enabled, and is left movable/not pinned.
672 	 * (2) corpse cannot execute more code so the notion of the immovable/pinned
673 	 * task port is bogus, and should appear as if it doesn't have one.
674 	 *
675 	 * So simply leave pport the same as kport.
676 	 */
677 	new_pport = new_kport;
678 
679 	itk_lock(task);
680 
681 	old_kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
682 	old_rdport = task->itk_task_ports[TASK_FLAVOR_READ];
683 	old_iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
684 
685 	old_pport = task->itk_self;
686 
687 	if (old_pport == IP_NULL) {
688 		/* the task is already terminated (can this happen?) */
689 		itk_unlock(task);
690 		ipc_kobject_dealloc_port(new_kport, 0, IKOT_TASK_CONTROL);
691 		if (new_pport != new_kport) {
692 			assert(task_is_immovable(task));
693 			ipc_kobject_dealloc_port(new_pport, 0, IKOT_TASK_CONTROL);
694 		}
695 #if CONFIG_MACF
696 		mac_exc_free_label(unset_label);
697 #endif
698 		return;
699 	}
700 
701 	task->itk_task_ports[TASK_FLAVOR_CONTROL] = new_kport;
702 	task->itk_self = new_pport;
703 
704 #if CONFIG_CSR
705 	old_sself = task->itk_settable_self;
706 	if (task_is_a_corpse(task)) {
707 		/* No extra send right for coprse, needed to arm no-sender notification */
708 		task->itk_settable_self = IP_NULL;
709 	} else {
710 		/* we just made the port, no need to triple check */
711 		task->itk_settable_self = ipc_port_make_send_any(new_kport);
712 	}
713 #endif /* CONFIG_CSR */
714 
715 	/* clears ikol_alt_port */
716 	ipc_kobject_disable(old_kport, IKOT_TASK_CONTROL);
717 
718 	/* Reset the read and inspect flavors of task port */
719 	task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
720 	task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
721 
722 	if (old_pport != old_kport) {
723 		assert(task_is_immovable(task));
724 		ipc_kobject_disable(old_pport, IKOT_TASK_CONTROL);
725 	}
726 
727 	if (IP_VALID(task->hardened_exception_action.ea.port)
728 	    && !task->hardened_exception_action.ea.privileged) {
729 		old_hardened_exception = task->hardened_exception_action.ea.port;
730 		task->hardened_exception_action.ea.port = IP_NULL;
731 	}
732 
733 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
734 		old_exc_actions[i] = IP_NULL;
735 
736 		if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
737 			continue;
738 		}
739 
740 		if (!task->exc_actions[i].privileged) {
741 #if CONFIG_MACF
742 			mac_exc_update_action_label(task->exc_actions + i, unset_label);
743 #endif
744 			old_exc_actions[i] = task->exc_actions[i].port;
745 			task->exc_actions[i].port = IP_NULL;
746 		}
747 	}/* for */
748 
749 	if (IP_VALID(task->itk_debug_control)) {
750 		ipc_port_release_send(task->itk_debug_control);
751 	}
752 	task->itk_debug_control = IP_NULL;
753 
754 	if (task->itk_dyld_notify) {
755 		notifiers_ptr = task->itk_dyld_notify;
756 		task->itk_dyld_notify = NULL;
757 	}
758 
759 	itk_unlock(task);
760 
761 #if CONFIG_MACF
762 	mac_exc_free_label(unset_label);
763 #endif
764 
765 	/* release the naked send rights */
766 #if CONFIG_CSR
767 	if (IP_VALID(old_sself)) {
768 		ipc_port_release_send(old_sself);
769 	}
770 #endif /* CONFIG_CSR */
771 
772 	if (notifiers_ptr) {
773 		for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
774 			if (IP_VALID(notifiers_ptr[i])) {
775 				ipc_port_release_send(notifiers_ptr[i]);
776 			}
777 		}
778 		kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
779 	}
780 
781 	ipc_port_release_send(old_hardened_exception);
782 
783 	for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
784 		if (IP_VALID(old_exc_actions[i])) {
785 			ipc_port_release_send(old_exc_actions[i]);
786 		}
787 	}
788 
789 	/* destroy all task port flavors */
790 	if (old_rdport != IP_NULL) {
791 		/* read port ikol_alt_port may point to kport, dealloc first */
792 		ipc_kobject_dealloc_port(old_rdport, 0, IKOT_TASK_READ);
793 	}
794 	ipc_kobject_dealloc_port(old_kport, 0, IKOT_TASK_CONTROL);
795 	/* ikol_alt_port cleared */
796 
797 	if (old_iport != IP_NULL) {
798 		ipc_kobject_dealloc_port(old_iport, 0, IKOT_TASK_INSPECT);
799 	}
800 	if (old_pport != old_kport) {
801 		assert(task_is_immovable(task));
802 		ipc_kobject_dealloc_port(old_pport, 0, IKOT_TASK_CONTROL);
803 	}
804 }
805 
806 /*
807  *	Routine:	ipc_thread_init
808  *	Purpose:
809  *		Initialize a thread's IPC state.
810  *	Conditions:
811  *		Nothing locked.
812  */
813 
814 void
ipc_thread_init(task_t task,thread_t thread,thread_ro_t tro,ipc_thread_init_options_t options)815 ipc_thread_init(
816 	task_t          task,
817 	thread_t        thread,
818 	thread_ro_t     tro,
819 	ipc_thread_init_options_t options)
820 {
821 	ipc_port_t      kport;
822 	ipc_port_t      pport;
823 	ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
824 
825 	if (task_is_immovable(task) && !(options & IPC_THREAD_INIT_MAINTHREAD)) {
826 		/*
827 		 * pthreads and raw threads both have immovable port upon creation.
828 		 * pthreads are subsequently pinned via ipc_port_copyout_send_pinned() whereas
829 		 * raw threads are left unpinned.
830 		 */
831 		alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
832 
833 		pport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
834 		    IKOT_THREAD_CONTROL, alloc_options);
835 
836 		kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)thread,
837 		    IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD, IPC_KOBJECT_ALLOC_NONE);
838 		kport->ip_kolabel->ikol_alt_port = pport;
839 	} else {
840 		/*
841 		 * Main thread is created movable but may be set immovable and pinned in
842 		 * main_thread_set_immovable_pinned(). It needs to be handled separately
843 		 * because task_control_port_options is not available at main thread creation time.
844 		 */
845 		kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
846 		    IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
847 
848 		pport = kport;
849 	}
850 
851 	tro->tro_self_port = pport;
852 	/* we just made the port, no need to triple check */
853 #if CONFIG_CSR
854 	tro->tro_settable_self_port = ipc_port_make_send_any(kport);
855 #endif /* CONFIG_CSR */
856 	tro->tro_ports[THREAD_FLAVOR_CONTROL] = kport;
857 
858 	thread->ith_special_reply_port = NULL;
859 
860 #if IMPORTANCE_INHERITANCE
861 	thread->ith_assertions = 0;
862 #endif
863 
864 	thread->ipc_active = true;
865 	ipc_kmsg_queue_init(&thread->ith_messages);
866 
867 	thread->ith_kernel_reply_port = IP_NULL;
868 }
869 
870 void
ipc_main_thread_set_immovable_pinned(thread_t thread)871 ipc_main_thread_set_immovable_pinned(thread_t thread)
872 {
873 	thread_ro_t tro = get_thread_ro(thread);
874 	ipc_port_t kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
875 	task_t task = tro->tro_task;
876 	ipc_port_t new_pport;
877 
878 	assert(thread_get_tag(thread) & THREAD_TAG_MAINTHREAD);
879 
880 	/* pport is the same as kport at ipc_thread_init() time */
881 	assert(tro->tro_self_port == tro->tro_ports[THREAD_FLAVOR_CONTROL]);
882 #if CONFIG_CSR
883 	assert(tro->tro_self_port == tro->tro_settable_self_port);
884 #endif /* CONFIG_CSR */
885 
886 	/*
887 	 * Main thread port is immovable/pinned depending on whether owner task has
888 	 * immovable/pinned task control port. task_control_port_options is now set.
889 	 */
890 	if (task_is_immovable(task)) {
891 		ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
892 
893 		if (task_is_pinned(task)) {
894 			options |= IPC_KOBJECT_ALLOC_PINNED;
895 		}
896 
897 		new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_THREAD_CONTROL, options);
898 
899 		assert(kport != IP_NULL);
900 		ipc_port_set_label(kport, IPC_LABEL_SUBST_THREAD);
901 		kport->ip_kolabel->ikol_alt_port = new_pport;
902 
903 		thread_mtx_lock(thread);
904 		zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_self_port, &new_pport);
905 		thread_mtx_unlock(thread);
906 
907 		/* enable the pinned port */
908 		ipc_kobject_enable(new_pport, thread, IKOT_THREAD_CONTROL);
909 	}
910 }
911 
912 struct thread_init_exc_actions {
913 	struct exception_action array[EXC_TYPES_COUNT];
914 };
915 
916 static void
ipc_thread_init_exc_actions(thread_ro_t tro)917 ipc_thread_init_exc_actions(thread_ro_t tro)
918 {
919 	struct exception_action *actions;
920 
921 	actions = kalloc_type(struct thread_init_exc_actions,
922 	    Z_WAITOK | Z_ZERO | Z_NOFAIL)->array;
923 
924 #if CONFIG_MACF
925 	for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
926 		mac_exc_associate_action_label(&actions[i],
927 		    mac_exc_create_label(&actions[i]));
928 	}
929 #endif
930 
931 	zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions, &actions);
932 }
933 
934 static void
ipc_thread_destroy_exc_actions(thread_ro_t tro)935 ipc_thread_destroy_exc_actions(thread_ro_t tro)
936 {
937 	struct exception_action *actions = tro->tro_exc_actions;
938 
939 	if (actions) {
940 #if CONFIG_MACF
941 		for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
942 			mac_exc_free_action_label(actions + i);
943 		}
944 #endif
945 
946 		zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions);
947 		struct thread_init_exc_actions *tr_actions =
948 		    (struct thread_init_exc_actions *)actions;
949 		kfree_type(struct thread_init_exc_actions, tr_actions);
950 	}
951 }
952 
953 static void
ipc_thread_ro_update_ports(thread_ro_t tro,const struct thread_ro * tro_tpl)954 ipc_thread_ro_update_ports(
955 	thread_ro_t             tro,
956 	const struct thread_ro *tro_tpl)
957 {
958 	vm_size_t offs = offsetof(struct thread_ro, tro_self_port);
959 	vm_size_t size = sizeof(struct ipc_port *) +
960 #if CONFIG_CSR
961 	    sizeof(struct ipc_port *) +
962 #endif /* CONFIG_CSR */
963 	    sizeof(tro_tpl->tro_ports);
964 
965 #if CONFIG_CSR
966 	static_assert(offsetof(struct thread_ro, tro_settable_self_port) ==
967 	    offsetof(struct thread_ro, tro_self_port) +
968 	    sizeof(struct ipc_port_t *));
969 #endif /* CONFIG_CSR */
970 	static_assert(offsetof(struct thread_ro, tro_ports) ==
971 	    offsetof(struct thread_ro, tro_self_port) +
972 #if CONFIG_CSR
973 	    sizeof(struct ipc_port_t *) +
974 #endif /* CONFIG_CSR */
975 	    sizeof(struct ipc_port_t *));
976 
977 	zalloc_ro_mut(ZONE_ID_THREAD_RO, tro,
978 	    offs, &tro_tpl->tro_self_port, size);
979 }
980 
981 /*
982  *	Routine:	ipc_thread_disable
983  *	Purpose:
984  *		Clean up and destroy a thread's IPC state.
985  *	Conditions:
986  *		Thread locked.
987  */
988 void
ipc_thread_disable(thread_t thread)989 ipc_thread_disable(
990 	thread_t        thread)
991 {
992 	thread_ro_t     tro = get_thread_ro(thread);
993 	ipc_port_t      kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
994 	ipc_port_t      iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
995 	ipc_port_t      rdport = tro->tro_ports[THREAD_FLAVOR_READ];
996 	ipc_port_t      pport = tro->tro_self_port;
997 
998 	/*
999 	 * This innocuous looking line is load bearing.
1000 	 *
1001 	 * It is used to disable the creation of lazy made ports.
1002 	 * We must do so before we drop the last reference on the thread,
1003 	 * as thread ports do not own a reference on the thread, and
1004 	 * convert_port_to_thread* will crash trying to resurect a thread.
1005 	 */
1006 	thread->ipc_active = false;
1007 
1008 	if (kport != IP_NULL) {
1009 		/* clears ikol_alt_port */
1010 		ipc_kobject_disable(kport, IKOT_THREAD_CONTROL);
1011 	}
1012 
1013 	if (iport != IP_NULL) {
1014 		ipc_kobject_disable(iport, IKOT_THREAD_INSPECT);
1015 	}
1016 
1017 	if (rdport != IP_NULL) {
1018 		/* clears ikol_alt_port */
1019 		ipc_kobject_disable(rdport, IKOT_THREAD_READ);
1020 	}
1021 
1022 	if (pport != kport && pport != IP_NULL) {
1023 		assert(task_is_immovable(tro->tro_task));
1024 		assert(pport->ip_immovable_send);
1025 		ipc_kobject_disable(pport, IKOT_THREAD_CONTROL);
1026 	}
1027 
1028 	/* unbind the thread special reply port */
1029 	if (IP_VALID(thread->ith_special_reply_port)) {
1030 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1031 	}
1032 }
1033 
1034 /*
1035  *	Routine:	ipc_thread_terminate
1036  *	Purpose:
1037  *		Clean up and destroy a thread's IPC state.
1038  *	Conditions:
1039  *		Nothing locked.
1040  */
1041 
1042 void
ipc_thread_terminate(thread_t thread)1043 ipc_thread_terminate(
1044 	thread_t        thread)
1045 {
1046 	thread_ro_t tro = get_thread_ro(thread);
1047 	ipc_port_t kport = IP_NULL;
1048 	ipc_port_t iport = IP_NULL;
1049 	ipc_port_t rdport = IP_NULL;
1050 	ipc_port_t pport = IP_NULL;
1051 #if CONFIG_CSR
1052 	ipc_port_t sport = IP_NULL;
1053 #endif /* CONFIG_CSR */
1054 
1055 	thread_mtx_lock(thread);
1056 
1057 	/*
1058 	 * If we ever failed to clear ipc_active before the last reference
1059 	 * was dropped, lazy ports might be made and used after the last
1060 	 * reference is dropped and cause use after free (see comment in
1061 	 * ipc_thread_disable()).
1062 	 */
1063 	assert(!thread->ipc_active);
1064 
1065 	kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1066 	iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1067 	rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1068 	pport = tro->tro_self_port;
1069 #if CONFIG_CSR
1070 	sport = tro->tro_settable_self_port;
1071 #endif /* CONFIG_CSR */
1072 
1073 	if (kport != IP_NULL) {
1074 #if CONFIG_CSR
1075 		if (IP_VALID(sport)) {
1076 			ipc_port_release_send(sport);
1077 		}
1078 #endif /* CONFIG_CSR */
1079 
1080 		ipc_thread_ro_update_ports(tro, &(struct thread_ro){ });
1081 
1082 		if (tro->tro_exc_actions != NULL) {
1083 			for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1084 				if (IP_VALID(tro->tro_exc_actions[i].port)) {
1085 					ipc_port_release_send(tro->tro_exc_actions[i].port);
1086 				}
1087 			}
1088 			ipc_thread_destroy_exc_actions(tro);
1089 		}
1090 	}
1091 
1092 #if IMPORTANCE_INHERITANCE
1093 	assert(thread->ith_assertions == 0);
1094 #endif
1095 
1096 	assert(ipc_kmsg_queue_empty(&thread->ith_messages));
1097 	thread_mtx_unlock(thread);
1098 
1099 	/* clears read port ikol_alt_port, must be done first */
1100 	if (rdport != IP_NULL) {
1101 		ipc_kobject_dealloc_port(rdport, 0, IKOT_THREAD_READ);
1102 	}
1103 	/* control port can also have ikol_alt_port */
1104 	if (kport != IP_NULL) {
1105 		ipc_kobject_dealloc_port(kport, 0, IKOT_THREAD_CONTROL);
1106 	}
1107 	/* ikol_alt_port cleared */
1108 
1109 	if (iport != IP_NULL) {
1110 		ipc_kobject_dealloc_port(iport, 0, IKOT_THREAD_INSPECT);
1111 	}
1112 	if (pport != kport && pport != IP_NULL) {
1113 		assert(task_is_immovable(tro->tro_task));
1114 		ipc_kobject_dealloc_port(pport, 0, IKOT_THREAD_CONTROL);
1115 	}
1116 	if (thread->ith_kernel_reply_port != IP_NULL) {
1117 		thread_dealloc_kernel_special_reply_port(thread);
1118 	}
1119 }
1120 
1121 /*
1122  *	Routine:	ipc_thread_reset
1123  *	Purpose:
1124  *		Reset the IPC state for a given Mach thread when
1125  *		its task enters an elevated security context.
1126  *		All flavors of thread port and its exception ports have
1127  *		to be reset.  Its RPC reply port cannot have any
1128  *		rights outstanding, so it should be fine. The thread
1129  *		inspect and read port are set to NULL.
1130  *	Conditions:
1131  *		Nothing locked.
1132  */
1133 
1134 void
ipc_thread_reset(thread_t thread)1135 ipc_thread_reset(
1136 	thread_t        thread)
1137 {
1138 	thread_ro_t tro = get_thread_ro(thread);
1139 	ipc_port_t old_kport, new_kport, old_pport, new_pport;
1140 #if CONFIG_CSR
1141 	ipc_port_t old_sself;
1142 #endif /* CONFIG_CSR */
1143 	ipc_port_t old_rdport;
1144 	ipc_port_t old_iport;
1145 	ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
1146 	boolean_t  has_old_exc_actions = FALSE;
1147 	boolean_t thread_is_immovable;
1148 	int i;
1149 
1150 #if CONFIG_MACF
1151 	struct label *new_label = mac_exc_create_label(NULL);
1152 #endif
1153 
1154 	thread_is_immovable = ip_is_immovable_send(tro->tro_self_port);
1155 
1156 	new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
1157 	    IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
1158 	/*
1159 	 * ipc_thread_reset() only happens during sugid or corpsify.
1160 	 *
1161 	 * (1) sugid happens early in exec_mach_imgact(), at which point the old thread
1162 	 * port is still movable/not pinned.
1163 	 * (2) corpse cannot execute more code so the notion of the immovable/pinned
1164 	 * thread port is bogus, and should appear as if it doesn't have one.
1165 	 *
1166 	 * So simply leave pport the same as kport.
1167 	 */
1168 	new_pport = new_kport;
1169 
1170 	thread_mtx_lock(thread);
1171 
1172 	old_kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1173 	old_rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1174 	old_iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1175 
1176 #if CONFIG_CSR
1177 	old_sself = tro->tro_settable_self_port;
1178 #endif /* CONFIG_CSR */
1179 	old_pport = tro->tro_self_port;
1180 
1181 	if (old_kport == IP_NULL && thread->inspection == FALSE) {
1182 		/* thread is already terminated (can this happen?) */
1183 		thread_mtx_unlock(thread);
1184 		ipc_kobject_dealloc_port(new_kport, 0, IKOT_THREAD_CONTROL);
1185 		if (thread_is_immovable) {
1186 			ipc_kobject_dealloc_port(new_pport, 0,
1187 			    IKOT_THREAD_CONTROL);
1188 		}
1189 #if CONFIG_MACF
1190 		mac_exc_free_label(new_label);
1191 #endif
1192 		return;
1193 	}
1194 
1195 	thread->ipc_active = true;
1196 
1197 	struct thread_ro tpl = {
1198 		.tro_self_port = new_pport,
1199 		/* we just made the port, no need to triple check */
1200 #if CONFIG_CSR
1201 		.tro_settable_self_port = ipc_port_make_send_any(new_kport),
1202 #endif /* CONFIG_CSR */
1203 		.tro_ports[THREAD_FLAVOR_CONTROL] = new_kport,
1204 	};
1205 
1206 	ipc_thread_ro_update_ports(tro, &tpl);
1207 
1208 	if (old_kport != IP_NULL) {
1209 		/* clears ikol_alt_port */
1210 		(void)ipc_kobject_disable(old_kport, IKOT_THREAD_CONTROL);
1211 	}
1212 	if (old_rdport != IP_NULL) {
1213 		/* clears ikol_alt_port */
1214 		(void)ipc_kobject_disable(old_rdport, IKOT_THREAD_READ);
1215 	}
1216 	if (old_iport != IP_NULL) {
1217 		(void)ipc_kobject_disable(old_iport, IKOT_THREAD_INSPECT);
1218 	}
1219 	if (thread_is_immovable && old_pport != IP_NULL) {
1220 		(void)ipc_kobject_disable(old_pport, IKOT_THREAD_CONTROL);
1221 	}
1222 
1223 	/*
1224 	 * Only ports that were set by root-owned processes
1225 	 * (privileged ports) should survive
1226 	 */
1227 	if (tro->tro_exc_actions != NULL) {
1228 		has_old_exc_actions = TRUE;
1229 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1230 			if (tro->tro_exc_actions[i].privileged) {
1231 				old_exc_actions[i] = IP_NULL;
1232 			} else {
1233 #if CONFIG_MACF
1234 				mac_exc_update_action_label(tro->tro_exc_actions + i, new_label);
1235 #endif
1236 				old_exc_actions[i] = tro->tro_exc_actions[i].port;
1237 				tro->tro_exc_actions[i].port = IP_NULL;
1238 			}
1239 		}
1240 	}
1241 
1242 	thread_mtx_unlock(thread);
1243 
1244 #if CONFIG_MACF
1245 	mac_exc_free_label(new_label);
1246 #endif
1247 
1248 	/* release the naked send rights */
1249 #if CONFIG_CSR
1250 	if (IP_VALID(old_sself)) {
1251 		ipc_port_release_send(old_sself);
1252 	}
1253 #endif /* CONFIG_CSR */
1254 
1255 	if (has_old_exc_actions) {
1256 		for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1257 			ipc_port_release_send(old_exc_actions[i]);
1258 		}
1259 	}
1260 
1261 	/* destroy the kernel ports */
1262 	if (old_rdport != IP_NULL) {
1263 		ipc_kobject_dealloc_port(old_rdport, 0, IKOT_THREAD_READ);
1264 	}
1265 	if (old_kport != IP_NULL) {
1266 		ipc_kobject_dealloc_port(old_kport, 0, IKOT_THREAD_CONTROL);
1267 	}
1268 	/* ikol_alt_port cleared */
1269 
1270 	if (old_iport != IP_NULL) {
1271 		ipc_kobject_dealloc_port(old_iport, 0, IKOT_THREAD_INSPECT);
1272 	}
1273 	if (old_pport != old_kport && old_pport != IP_NULL) {
1274 		assert(thread_is_immovable);
1275 		ipc_kobject_dealloc_port(old_pport, 0, IKOT_THREAD_CONTROL);
1276 	}
1277 
1278 	/* unbind the thread special reply port */
1279 	if (IP_VALID(thread->ith_special_reply_port)) {
1280 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1281 	}
1282 }
1283 
1284 /*
1285  *	Routine:	retrieve_task_self_fast
1286  *	Purpose:
1287  *		Optimized version of retrieve_task_self,
1288  *		that only works for the current task.
1289  *
1290  *		Return a send right (possibly null/dead)
1291  *		for the task's user-visible self port.
1292  *	Conditions:
1293  *		Nothing locked.
1294  */
1295 
1296 static ipc_port_t
retrieve_task_self_fast(task_t task)1297 retrieve_task_self_fast(
1298 	task_t          task)
1299 {
1300 	ipc_port_t port = IP_NULL;
1301 
1302 	assert(task == current_task());
1303 
1304 	itk_lock(task);
1305 	assert(task->itk_self != IP_NULL);
1306 
1307 #if CONFIG_CSR
1308 	if (task->itk_settable_self != task->itk_task_ports[TASK_FLAVOR_CONTROL]) {
1309 		port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1310 	} else
1311 #endif
1312 	{
1313 		/* no interposing, return the IMMOVABLE port */
1314 		port = ipc_kobject_make_send(task->itk_self, task,
1315 		    IKOT_TASK_CONTROL);
1316 #if (DEBUG || DEVELOPMENT)
1317 		if (task_is_immovable(task)) {
1318 			assert(ip_is_immovable_send(port));
1319 			if (task_is_pinned(task)) {
1320 				/* pinned port is also immovable */
1321 				assert(ip_is_pinned(port));
1322 			}
1323 		} else {
1324 			assert(!ip_is_immovable_send(port));
1325 			assert(!ip_is_pinned(port));
1326 		}
1327 #endif
1328 	}
1329 
1330 	itk_unlock(task);
1331 
1332 	return port;
1333 }
1334 
1335 /*
1336  *	Routine:	mach_task_is_self
1337  *	Purpose:
1338  *      [MIG call] Checks if the task (control/read/inspect/name/movable)
1339  *      port is pointing to current_task.
1340  */
1341 kern_return_t
mach_task_is_self(task_t task,boolean_t * is_self)1342 mach_task_is_self(
1343 	task_t         task,
1344 	boolean_t     *is_self)
1345 {
1346 	if (task == TASK_NULL) {
1347 		return KERN_INVALID_ARGUMENT;
1348 	}
1349 
1350 	*is_self = (task == current_task());
1351 
1352 	return KERN_SUCCESS;
1353 }
1354 
1355 /*
1356  *	Routine:	retrieve_thread_self_fast
1357  *	Purpose:
1358  *		Return a send right (possibly null/dead)
1359  *		for the thread's user-visible self port.
1360  *
1361  *		Only works for the current thread.
1362  *
1363  *	Conditions:
1364  *		Nothing locked.
1365  */
1366 
1367 ipc_port_t
retrieve_thread_self_fast(thread_t thread)1368 retrieve_thread_self_fast(
1369 	thread_t                thread)
1370 {
1371 	thread_ro_t tro = get_thread_ro(thread);
1372 	ipc_port_t port = IP_NULL;
1373 
1374 	assert(thread == current_thread());
1375 
1376 	thread_mtx_lock(thread);
1377 
1378 	assert(tro->tro_self_port != IP_NULL);
1379 
1380 #if CONFIG_CSR
1381 	if (tro->tro_settable_self_port != tro->tro_ports[THREAD_FLAVOR_CONTROL]) {
1382 		port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1383 	} else
1384 #endif
1385 	{
1386 		/* no interposing, return IMMOVABLE_PORT */
1387 		port = ipc_kobject_make_send(tro->tro_self_port, thread,
1388 		    IKOT_THREAD_CONTROL);
1389 #if (DEBUG || DEVELOPMENT)
1390 		if (task_is_immovable(tro->tro_task)) {
1391 			assert(ip_is_immovable_send(port));
1392 			uint16_t tag = thread_get_tag(thread);
1393 			/* terminated threads are unpinned */
1394 			if (thread->active && (tag & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD))) {
1395 				assert(ip_is_pinned(port));
1396 			} else {
1397 				assert(!ip_is_pinned(port));
1398 			}
1399 		} else {
1400 			assert(!ip_is_immovable_send(port));
1401 			assert(!ip_is_pinned(port));
1402 		}
1403 #endif
1404 	}
1405 
1406 	thread_mtx_unlock(thread);
1407 
1408 	return port;
1409 }
1410 
1411 /*
1412  *	Routine:	task_self_trap [mach trap]
1413  *	Purpose:
1414  *		Give the caller send rights for his own task port.
1415  *	Conditions:
1416  *		Nothing locked.
1417  *	Returns:
1418  *		MACH_PORT_NULL if there are any resource failures
1419  *		or other errors.
1420  */
1421 
1422 mach_port_name_t
task_self_trap(__unused struct task_self_trap_args * args)1423 task_self_trap(
1424 	__unused struct task_self_trap_args *args)
1425 {
1426 	task_t task = current_task();
1427 	ipc_port_t sright;
1428 	mach_port_name_t name;
1429 
1430 	sright = retrieve_task_self_fast(task);
1431 	name = ipc_port_copyout_send(sright, task->itk_space);
1432 
1433 	/*
1434 	 * When the right is pinned, memorize the name we gave it
1435 	 * in ip_receiver_name (it's an abuse as this port really
1436 	 * isn't a message queue, but the field is up for grabs
1437 	 * and otherwise `MACH_PORT_SPECIAL_DEFAULT` for special ports).
1438 	 *
1439 	 * port_name_to_task* use this to fastpath IPCs to mach_task_self()
1440 	 * when it is pinned.
1441 	 *
1442 	 * ipc_task_disable() will revert this when the task dies.
1443 	 */
1444 	if (sright == task->itk_self && sright->ip_pinned &&
1445 	    MACH_PORT_VALID(name)) {
1446 		itk_lock(task);
1447 		if (task->ipc_active) {
1448 			if (ip_get_receiver_name(sright) == MACH_PORT_SPECIAL_DEFAULT) {
1449 				sright->ip_receiver_name = name;
1450 			} else if (ip_get_receiver_name(sright) != name) {
1451 				panic("mach_task_self() name changed");
1452 			}
1453 		}
1454 		itk_unlock(task);
1455 	}
1456 	return name;
1457 }
1458 
1459 /*
1460  *	Routine:	thread_self_trap [mach trap]
1461  *	Purpose:
1462  *		Give the caller send rights for his own thread port.
1463  *	Conditions:
1464  *		Nothing locked.
1465  *	Returns:
1466  *		MACH_PORT_NULL if there are any resource failures
1467  *		or other errors.
1468  */
1469 
1470 mach_port_name_t
thread_self_trap(__unused struct thread_self_trap_args * args)1471 thread_self_trap(
1472 	__unused struct thread_self_trap_args *args)
1473 {
1474 	thread_t thread = current_thread();
1475 	ipc_space_t space = current_space();
1476 	ipc_port_t sright;
1477 	mach_port_name_t name;
1478 
1479 	sright = retrieve_thread_self_fast(thread);
1480 	name = ipc_port_copyout_send(sright, space);
1481 	return name;
1482 }
1483 
1484 /*
1485  *	Routine:	mach_reply_port [mach trap]
1486  *	Purpose:
1487  *		Allocate a port for the caller.
1488  *	Conditions:
1489  *		Nothing locked.
1490  *	Returns:
1491  *		MACH_PORT_NULL if there are any resource failures
1492  *		or other errors.
1493  */
1494 
1495 mach_port_name_t
mach_reply_port(__unused struct mach_reply_port_args * args)1496 mach_reply_port(
1497 	__unused struct mach_reply_port_args *args)
1498 {
1499 	ipc_port_t port;
1500 	mach_port_name_t name;
1501 	kern_return_t kr;
1502 
1503 	kr = ipc_port_alloc(current_task()->itk_space, IPC_PORT_INIT_MESSAGE_QUEUE,
1504 	    &name, &port);
1505 	if (kr == KERN_SUCCESS) {
1506 		ip_mq_unlock(port);
1507 	} else {
1508 		name = MACH_PORT_NULL;
1509 	}
1510 	return name;
1511 }
1512 
1513 /*
1514  *	Routine:	thread_get_special_reply_port [mach trap]
1515  *	Purpose:
1516  *		Allocate a special reply port for the calling thread.
1517  *	Conditions:
1518  *		Nothing locked.
1519  *	Returns:
1520  *		mach_port_name_t: send right & receive right for special reply port.
1521  *		MACH_PORT_NULL if there are any resource failures
1522  *		or other errors.
1523  */
1524 
1525 mach_port_name_t
thread_get_special_reply_port(__unused struct thread_get_special_reply_port_args * args)1526 thread_get_special_reply_port(
1527 	__unused struct thread_get_special_reply_port_args *args)
1528 {
1529 	ipc_port_t port;
1530 	mach_port_name_t name;
1531 	kern_return_t kr;
1532 	thread_t thread = current_thread();
1533 	ipc_port_init_flags_t flags = IPC_PORT_INIT_MESSAGE_QUEUE |
1534 	    IPC_PORT_INIT_MAKE_SEND_RIGHT | IPC_PORT_INIT_SPECIAL_REPLY;
1535 
1536 	/* unbind the thread special reply port */
1537 	if (IP_VALID(thread->ith_special_reply_port)) {
1538 		ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1539 	}
1540 
1541 	kr = ipc_port_alloc(current_task()->itk_space, flags, &name, &port);
1542 	if (kr == KERN_SUCCESS) {
1543 		ipc_port_bind_special_reply_port_locked(port, IRPT_USER);
1544 		ip_mq_unlock(port);
1545 	} else {
1546 		name = MACH_PORT_NULL;
1547 	}
1548 	return name;
1549 }
1550 
1551 /*
1552  *	Routine:	thread_get_kernel_special_reply_port
1553  *	Purpose:
1554  *		Allocate a kernel special reply port for the calling thread.
1555  *	Conditions:
1556  *		Nothing locked.
1557  *	Returns:
1558  *		Creates and sets kernel special reply port.
1559  *		KERN_SUCCESS on Success.
1560  *		KERN_FAILURE on Failure.
1561  */
1562 
1563 kern_return_t
thread_get_kernel_special_reply_port(void)1564 thread_get_kernel_special_reply_port(void)
1565 {
1566 	ipc_port_t port = IP_NULL;
1567 	thread_t thread = current_thread();
1568 
1569 	/* unbind the thread special reply port */
1570 	if (IP_VALID(thread->ith_kernel_reply_port)) {
1571 		ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1572 	}
1573 
1574 	port = ipc_port_alloc_reply(); /*returns a reference on the port */
1575 	if (port != IPC_PORT_NULL) {
1576 		ip_mq_lock(port);
1577 		ipc_port_bind_special_reply_port_locked(port, IRPT_KERNEL);
1578 		ip_mq_unlock(port);
1579 		ip_release(port); /* release the reference returned by ipc_port_alloc_reply */
1580 	}
1581 	return KERN_SUCCESS;
1582 }
1583 
1584 /*
1585  *	Routine:	ipc_port_bind_special_reply_port_locked
1586  *	Purpose:
1587  *		Bind the given port to current thread as a special reply port.
1588  *	Conditions:
1589  *		Port locked.
1590  *	Returns:
1591  *		None.
1592  */
1593 
1594 static void
ipc_port_bind_special_reply_port_locked(ipc_port_t port,ipc_reply_port_type_t reply_type)1595 ipc_port_bind_special_reply_port_locked(
1596 	ipc_port_t            port,
1597 	ipc_reply_port_type_t reply_type)
1598 {
1599 	thread_t thread = current_thread();
1600 	ipc_port_t *reply_portp;
1601 
1602 	if (reply_type == IRPT_USER) {
1603 		reply_portp = &thread->ith_special_reply_port;
1604 	} else {
1605 		reply_portp = &thread->ith_kernel_reply_port;
1606 	}
1607 
1608 	assert(*reply_portp == NULL);
1609 	assert(port->ip_specialreply);
1610 	assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1611 
1612 	ip_reference(port);
1613 	*reply_portp = port;
1614 	port->ip_messages.imq_srp_owner_thread = thread;
1615 
1616 	ipc_special_reply_port_bits_reset(port);
1617 }
1618 
1619 /*
1620  *	Routine:	ipc_port_unbind_special_reply_port
1621  *	Purpose:
1622  *		Unbind the thread's special reply port.
1623  *		If the special port has threads waiting on turnstile,
1624  *		update it's inheritor.
1625  *	Condition:
1626  *		Nothing locked.
1627  *	Returns:
1628  *		None.
1629  */
1630 static void
ipc_port_unbind_special_reply_port(thread_t thread,ipc_reply_port_type_t reply_type)1631 ipc_port_unbind_special_reply_port(
1632 	thread_t              thread,
1633 	ipc_reply_port_type_t reply_type)
1634 {
1635 	ipc_port_t *reply_portp;
1636 
1637 	if (reply_type == IRPT_USER) {
1638 		reply_portp = &thread->ith_special_reply_port;
1639 	} else {
1640 		reply_portp = &thread->ith_kernel_reply_port;
1641 	}
1642 
1643 	ipc_port_t special_reply_port = *reply_portp;
1644 
1645 	ip_mq_lock(special_reply_port);
1646 
1647 	*reply_portp = NULL;
1648 	ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
1649 	    IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
1650 	/* port unlocked */
1651 
1652 	/* Destroy the port if its kernel special reply, else just release a ref */
1653 	if (reply_type == IRPT_USER) {
1654 		ip_release(special_reply_port);
1655 	} else {
1656 		ipc_port_dealloc_reply(special_reply_port);
1657 	}
1658 	return;
1659 }
1660 
1661 /*
1662  *	Routine:	thread_dealloc_kernel_special_reply_port
1663  *	Purpose:
1664  *		Unbind the thread's kernel special reply port.
1665  *		If the special port has threads waiting on turnstile,
1666  *		update it's inheritor.
1667  *	Condition:
1668  *		Called on current thread or a terminated thread.
1669  *	Returns:
1670  *		None.
1671  */
1672 
1673 void
thread_dealloc_kernel_special_reply_port(thread_t thread)1674 thread_dealloc_kernel_special_reply_port(thread_t thread)
1675 {
1676 	ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1677 }
1678 
1679 /*
1680  *	Routine:	thread_get_special_port [kernel call]
1681  *	Purpose:
1682  *		Clones a send right for one of the thread's
1683  *		special ports.
1684  *	Conditions:
1685  *		Nothing locked.
1686  *	Returns:
1687  *		KERN_SUCCESS		Extracted a send right.
1688  *		KERN_INVALID_ARGUMENT	The thread is null.
1689  *		KERN_FAILURE		The thread is dead.
1690  *		KERN_INVALID_ARGUMENT	Invalid special port.
1691  */
1692 
1693 kern_return_t
1694 thread_get_special_port(
1695 	thread_inspect_t         thread,
1696 	int                      which,
1697 	ipc_port_t              *portp);
1698 
1699 static kern_return_t
thread_get_special_port_internal(thread_inspect_t thread,thread_ro_t tro,int which,ipc_port_t * portp,mach_thread_flavor_t flavor)1700 thread_get_special_port_internal(
1701 	thread_inspect_t         thread,
1702 	thread_ro_t              tro,
1703 	int                      which,
1704 	ipc_port_t              *portp,
1705 	mach_thread_flavor_t     flavor)
1706 {
1707 	kern_return_t      kr;
1708 	ipc_port_t port;
1709 
1710 	if ((kr = special_port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) {
1711 		return kr;
1712 	}
1713 
1714 	thread_mtx_lock(thread);
1715 	if (!thread->active) {
1716 		thread_mtx_unlock(thread);
1717 		return KERN_FAILURE;
1718 	}
1719 
1720 	switch (which) {
1721 	case THREAD_KERNEL_PORT:
1722 		port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1723 #if CONFIG_CSR
1724 		if (tro->tro_settable_self_port != port) {
1725 			port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1726 		} else
1727 #endif /* CONFIG_CSR */
1728 		{
1729 			port = ipc_kobject_copy_send(port, thread, IKOT_THREAD_CONTROL);
1730 		}
1731 		thread_mtx_unlock(thread);
1732 		break;
1733 
1734 	case THREAD_READ_PORT:
1735 	case THREAD_INSPECT_PORT:
1736 		thread_mtx_unlock(thread);
1737 		mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ?
1738 		    THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT;
1739 		/* convert_thread_to_port_with_flavor consumes a thread reference */
1740 		thread_reference(thread);
1741 		port = convert_thread_to_port_with_flavor(thread, tro, current_flavor);
1742 		break;
1743 
1744 	default:
1745 		thread_mtx_unlock(thread);
1746 		return KERN_INVALID_ARGUMENT;
1747 	}
1748 
1749 	*portp = port;
1750 	return KERN_SUCCESS;
1751 }
1752 
1753 kern_return_t
thread_get_special_port(thread_inspect_t thread,int which,ipc_port_t * portp)1754 thread_get_special_port(
1755 	thread_inspect_t         thread,
1756 	int                      which,
1757 	ipc_port_t              *portp)
1758 {
1759 	if (thread == THREAD_NULL) {
1760 		return KERN_INVALID_ARGUMENT;
1761 	}
1762 
1763 	return thread_get_special_port_internal(thread, get_thread_ro(thread),
1764 	           which, portp, THREAD_FLAVOR_CONTROL);
1765 }
1766 
1767 static ipc_port_t
thread_get_non_substituted_self(thread_t thread,thread_ro_t tro)1768 thread_get_non_substituted_self(thread_t thread, thread_ro_t tro)
1769 {
1770 	ipc_port_t port = IP_NULL;
1771 
1772 	thread_mtx_lock(thread);
1773 	port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1774 #if CONFIG_CSR
1775 	if (tro->tro_settable_self_port != port) {
1776 		port = ipc_port_make_send_mqueue(tro->tro_settable_self_port);
1777 	} else
1778 #endif /* CONFIG_CSR */
1779 	{
1780 		port = ipc_kobject_make_send(port, thread, IKOT_THREAD_CONTROL);
1781 	}
1782 	thread_mtx_unlock(thread);
1783 
1784 	/* takes ownership of the send right */
1785 	return ipc_kobject_alloc_subst_once(port);
1786 }
1787 
1788 kern_return_t
thread_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)1789 thread_get_special_port_from_user(
1790 	mach_port_t     port,
1791 	int             which,
1792 	ipc_port_t      *portp)
1793 {
1794 	thread_ro_t tro;
1795 	ipc_kobject_type_t kotype;
1796 	mach_thread_flavor_t flavor;
1797 	kern_return_t kr = KERN_SUCCESS;
1798 
1799 	thread_t thread = convert_port_to_thread_inspect_no_eval(port);
1800 
1801 	if (thread == THREAD_NULL) {
1802 		return KERN_INVALID_ARGUMENT;
1803 	}
1804 
1805 	tro = get_thread_ro(thread);
1806 	kotype = ip_kotype(port);
1807 
1808 	if (which == THREAD_KERNEL_PORT && tro->tro_task == current_task()) {
1809 #if CONFIG_MACF
1810 		/*
1811 		 * only check for threads belong to current_task,
1812 		 * because foreign thread ports are always movable
1813 		 */
1814 		if (mac_task_check_get_movable_control_port()) {
1815 			kr = KERN_DENIED;
1816 			goto out;
1817 		}
1818 #endif
1819 		if (kotype == IKOT_THREAD_CONTROL) {
1820 			*portp = thread_get_non_substituted_self(thread, tro);
1821 			goto out;
1822 		}
1823 	}
1824 
1825 	switch (kotype) {
1826 	case IKOT_THREAD_CONTROL:
1827 		flavor = THREAD_FLAVOR_CONTROL;
1828 		break;
1829 	case IKOT_THREAD_READ:
1830 		flavor = THREAD_FLAVOR_READ;
1831 		break;
1832 	case IKOT_THREAD_INSPECT:
1833 		flavor = THREAD_FLAVOR_INSPECT;
1834 		break;
1835 	default:
1836 		panic("strange kobject type");
1837 	}
1838 
1839 	kr = thread_get_special_port_internal(thread, tro, which, portp, flavor);
1840 out:
1841 	thread_deallocate(thread);
1842 	return kr;
1843 }
1844 
1845 static kern_return_t
special_port_allowed_with_thread_flavor(int which,mach_thread_flavor_t flavor)1846 special_port_allowed_with_thread_flavor(
1847 	int                  which,
1848 	mach_thread_flavor_t flavor)
1849 {
1850 	switch (flavor) {
1851 	case THREAD_FLAVOR_CONTROL:
1852 		return KERN_SUCCESS;
1853 
1854 	case THREAD_FLAVOR_READ:
1855 
1856 		switch (which) {
1857 		case THREAD_READ_PORT:
1858 		case THREAD_INSPECT_PORT:
1859 			return KERN_SUCCESS;
1860 		default:
1861 			return KERN_INVALID_CAPABILITY;
1862 		}
1863 
1864 	case THREAD_FLAVOR_INSPECT:
1865 
1866 		switch (which) {
1867 		case THREAD_INSPECT_PORT:
1868 			return KERN_SUCCESS;
1869 		default:
1870 			return KERN_INVALID_CAPABILITY;
1871 		}
1872 
1873 	default:
1874 		return KERN_INVALID_CAPABILITY;
1875 	}
1876 }
1877 
1878 /*
1879  *	Routine:	thread_set_special_port [kernel call]
1880  *	Purpose:
1881  *		Changes one of the thread's special ports,
1882  *		setting it to the supplied send right.
1883  *	Conditions:
1884  *		Nothing locked.  If successful, consumes
1885  *		the supplied send right.
1886  *	Returns:
1887  *		KERN_SUCCESS            Changed the special port.
1888  *		KERN_INVALID_ARGUMENT   The thread is null.
1889  *      KERN_INVALID_RIGHT      Port is marked as immovable.
1890  *		KERN_FAILURE            The thread is dead.
1891  *		KERN_INVALID_ARGUMENT   Invalid special port.
1892  *		KERN_NO_ACCESS          Restricted access to set port.
1893  */
1894 
1895 kern_return_t
thread_set_special_port(thread_t thread,int which,ipc_port_t port)1896 thread_set_special_port(
1897 	thread_t                thread,
1898 	int                     which,
1899 	ipc_port_t              port)
1900 {
1901 	kern_return_t   result = KERN_SUCCESS;
1902 	thread_ro_t     tro = NULL;
1903 	ipc_port_t      old = IP_NULL;
1904 
1905 	if (thread == THREAD_NULL) {
1906 		return KERN_INVALID_ARGUMENT;
1907 	}
1908 
1909 	if (IP_VALID(port) && port->ip_immovable_send) {
1910 		return KERN_INVALID_RIGHT;
1911 	}
1912 
1913 	switch (which) {
1914 	case THREAD_KERNEL_PORT:
1915 #if CONFIG_CSR
1916 		if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
1917 			/*
1918 			 * Only allow setting of thread-self
1919 			 * special port from user-space when SIP is
1920 			 * disabled (for Mach-on-Mach emulation).
1921 			 */
1922 			tro = get_thread_ro(thread);
1923 
1924 			thread_mtx_lock(thread);
1925 			if (thread->active) {
1926 				old = tro->tro_settable_self_port;
1927 				zalloc_ro_update_field(ZONE_ID_THREAD_RO,
1928 				    tro, tro_settable_self_port, &port);
1929 			} else {
1930 				result = KERN_FAILURE;
1931 			}
1932 			thread_mtx_unlock(thread);
1933 
1934 			if (IP_VALID(old)) {
1935 				ipc_port_release_send(old);
1936 			}
1937 
1938 			return result;
1939 		}
1940 #else
1941 		(void)old;
1942 		(void)result;
1943 		(void)tro;
1944 #endif /* CONFIG_CSR */
1945 		return KERN_NO_ACCESS;
1946 
1947 	default:
1948 		return KERN_INVALID_ARGUMENT;
1949 	}
1950 }
1951 
1952 /*
1953  *	Routine:	task_get_special_port [kernel call]
1954  *	Purpose:
1955  *		Clones a send right for one of the task's
1956  *		special ports.
1957  *	Conditions:
1958  *		Nothing locked.
1959  *	Returns:
1960  *		KERN_SUCCESS		    Extracted a send right.
1961  *		KERN_INVALID_ARGUMENT	The task is null.
1962  *		KERN_FAILURE		    The task/space is dead.
1963  *		KERN_INVALID_ARGUMENT	Invalid special port.
1964  */
1965 
1966 static kern_return_t
task_get_special_port_internal(task_t task,int which,ipc_port_t * portp,mach_task_flavor_t flavor)1967 task_get_special_port_internal(
1968 	task_t          task,
1969 	int             which,
1970 	ipc_port_t      *portp,
1971 	mach_task_flavor_t        flavor)
1972 {
1973 	kern_return_t kr;
1974 	ipc_port_t port;
1975 
1976 	if (task == TASK_NULL) {
1977 		return KERN_INVALID_ARGUMENT;
1978 	}
1979 
1980 	if ((kr = special_port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) {
1981 		return kr;
1982 	}
1983 
1984 	itk_lock(task);
1985 	if (!task->ipc_active) {
1986 		itk_unlock(task);
1987 		return KERN_FAILURE;
1988 	}
1989 
1990 	switch (which) {
1991 	case TASK_KERNEL_PORT:
1992 		port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
1993 #if CONFIG_CSR
1994 		if (task->itk_settable_self != port) {
1995 			port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1996 		} else
1997 #endif /* CONFIG_CSR */
1998 		{
1999 			port = ipc_kobject_copy_send(port, task, IKOT_TASK_CONTROL);
2000 		}
2001 		itk_unlock(task);
2002 		break;
2003 
2004 	case TASK_READ_PORT:
2005 	case TASK_INSPECT_PORT:
2006 		itk_unlock(task);
2007 		mach_task_flavor_t current_flavor = (which == TASK_READ_PORT) ?
2008 		    TASK_FLAVOR_READ : TASK_FLAVOR_INSPECT;
2009 		/* convert_task_to_port_with_flavor consumes a task reference */
2010 		task_reference(task);
2011 		port = convert_task_to_port_with_flavor(task, current_flavor, TASK_GRP_KERNEL);
2012 		break;
2013 
2014 	case TASK_NAME_PORT:
2015 		port = ipc_kobject_make_send(task->itk_task_ports[TASK_FLAVOR_NAME],
2016 		    task, IKOT_TASK_NAME);
2017 		itk_unlock(task);
2018 		break;
2019 
2020 	case TASK_HOST_PORT:
2021 		port = host_port_copy_send(task->itk_host);
2022 		itk_unlock(task);
2023 		break;
2024 
2025 	case TASK_BOOTSTRAP_PORT:
2026 		port = ipc_port_copy_send_mqueue(task->itk_bootstrap);
2027 		itk_unlock(task);
2028 		break;
2029 
2030 	case TASK_ACCESS_PORT:
2031 		port = ipc_port_copy_send_mqueue(task->itk_task_access);
2032 		itk_unlock(task);
2033 		break;
2034 
2035 	case TASK_DEBUG_CONTROL_PORT:
2036 		port = ipc_port_copy_send_mqueue(task->itk_debug_control);
2037 		itk_unlock(task);
2038 		break;
2039 
2040 #if CONFIG_PROC_RESOURCE_LIMITS
2041 	case TASK_RESOURCE_NOTIFY_PORT:
2042 		port = ipc_port_copy_send_mqueue(task->itk_resource_notify);
2043 		itk_unlock(task);
2044 		break;
2045 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
2046 
2047 	default:
2048 		itk_unlock(task);
2049 		return KERN_INVALID_ARGUMENT;
2050 	}
2051 
2052 	*portp = port;
2053 	return KERN_SUCCESS;
2054 }
2055 
2056 /* Kernel/Kext call only and skips MACF checks. MIG uses task_get_special_port_from_user(). */
2057 kern_return_t
task_get_special_port(task_t task,int which,ipc_port_t * portp)2058 task_get_special_port(
2059 	task_t          task,
2060 	int             which,
2061 	ipc_port_t      *portp)
2062 {
2063 	return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL);
2064 }
2065 
2066 static ipc_port_t
task_get_non_substituted_self(task_t task)2067 task_get_non_substituted_self(task_t task)
2068 {
2069 	ipc_port_t port = IP_NULL;
2070 
2071 	itk_lock(task);
2072 	port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
2073 #if CONFIG_CSR
2074 	if (task->itk_settable_self != port) {
2075 		port = ipc_port_make_send_mqueue(task->itk_settable_self);
2076 	} else
2077 #endif /* CONFIG_CSR */
2078 	{
2079 		port = ipc_kobject_make_send(port, task, IKOT_TASK_CONTROL);
2080 	}
2081 	itk_unlock(task);
2082 
2083 	/* takes ownership of the send right */
2084 	return ipc_kobject_alloc_subst_once(port);
2085 }
2086 
2087 /* MIG call only. Kernel/Kext uses task_get_special_port() */
2088 kern_return_t
task_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)2089 task_get_special_port_from_user(
2090 	mach_port_t     port,
2091 	int             which,
2092 	ipc_port_t      *portp)
2093 {
2094 	ipc_kobject_type_t kotype;
2095 	mach_task_flavor_t flavor;
2096 	kern_return_t kr = KERN_SUCCESS;
2097 
2098 	task_t task = convert_port_to_task_inspect_no_eval(port);
2099 
2100 	if (task == TASK_NULL) {
2101 		return KERN_INVALID_ARGUMENT;
2102 	}
2103 
2104 	kotype = ip_kotype(port);
2105 
2106 #if CONFIG_MACF
2107 	if (mac_task_check_get_task_special_port(current_task(), task, which)) {
2108 		kr = KERN_DENIED;
2109 		goto out;
2110 	}
2111 #endif
2112 
2113 	if (which == TASK_KERNEL_PORT && task == current_task()) {
2114 #if CONFIG_MACF
2115 		/*
2116 		 * only check for current_task,
2117 		 * because foreign task ports are always movable
2118 		 */
2119 		if (mac_task_check_get_movable_control_port()) {
2120 			kr = KERN_DENIED;
2121 			goto out;
2122 		}
2123 #endif
2124 		if (kotype == IKOT_TASK_CONTROL) {
2125 			*portp = task_get_non_substituted_self(task);
2126 			goto out;
2127 		}
2128 	}
2129 
2130 	switch (kotype) {
2131 	case IKOT_TASK_CONTROL:
2132 		flavor = TASK_FLAVOR_CONTROL;
2133 		break;
2134 	case IKOT_TASK_READ:
2135 		flavor = TASK_FLAVOR_READ;
2136 		break;
2137 	case IKOT_TASK_INSPECT:
2138 		flavor = TASK_FLAVOR_INSPECT;
2139 		break;
2140 	default:
2141 		panic("strange kobject type");
2142 	}
2143 
2144 	kr = task_get_special_port_internal(task, which, portp, flavor);
2145 out:
2146 	task_deallocate(task);
2147 	return kr;
2148 }
2149 
2150 static kern_return_t
special_port_allowed_with_task_flavor(int which,mach_task_flavor_t flavor)2151 special_port_allowed_with_task_flavor(
2152 	int                which,
2153 	mach_task_flavor_t flavor)
2154 {
2155 	switch (flavor) {
2156 	case TASK_FLAVOR_CONTROL:
2157 		return KERN_SUCCESS;
2158 
2159 	case TASK_FLAVOR_READ:
2160 
2161 		switch (which) {
2162 		case TASK_READ_PORT:
2163 		case TASK_INSPECT_PORT:
2164 		case TASK_NAME_PORT:
2165 			return KERN_SUCCESS;
2166 		default:
2167 			return KERN_INVALID_CAPABILITY;
2168 		}
2169 
2170 	case TASK_FLAVOR_INSPECT:
2171 
2172 		switch (which) {
2173 		case TASK_INSPECT_PORT:
2174 		case TASK_NAME_PORT:
2175 			return KERN_SUCCESS;
2176 		default:
2177 			return KERN_INVALID_CAPABILITY;
2178 		}
2179 
2180 	default:
2181 		return KERN_INVALID_CAPABILITY;
2182 	}
2183 }
2184 
2185 /*
2186  *	Routine:	task_set_special_port [MIG call]
2187  *	Purpose:
2188  *		Changes one of the task's special ports,
2189  *		setting it to the supplied send right.
2190  *	Conditions:
2191  *		Nothing locked.  If successful, consumes
2192  *		the supplied send right.
2193  *	Returns:
2194  *		KERN_SUCCESS		    Changed the special port.
2195  *		KERN_INVALID_ARGUMENT	The task is null.
2196  *      KERN_INVALID_RIGHT      Port is marked as immovable.
2197  *		KERN_FAILURE		    The task/space is dead.
2198  *		KERN_INVALID_ARGUMENT	Invalid special port.
2199  *      KERN_NO_ACCESS		    Restricted access to set port.
2200  */
2201 
2202 kern_return_t
task_set_special_port_from_user(task_t task,int which,ipc_port_t port)2203 task_set_special_port_from_user(
2204 	task_t          task,
2205 	int             which,
2206 	ipc_port_t      port)
2207 {
2208 	if (task == TASK_NULL) {
2209 		return KERN_INVALID_ARGUMENT;
2210 	}
2211 
2212 #if CONFIG_MACF
2213 	if (mac_task_check_set_task_special_port(current_task(), task, which, port)) {
2214 		return KERN_DENIED;
2215 	}
2216 #endif
2217 
2218 	return task_set_special_port(task, which, port);
2219 }
2220 
2221 /* Kernel call only. MIG uses task_set_special_port_from_user() */
2222 kern_return_t
task_set_special_port(task_t task,int which,ipc_port_t port)2223 task_set_special_port(
2224 	task_t          task,
2225 	int             which,
2226 	ipc_port_t      port)
2227 {
2228 	if (task == TASK_NULL) {
2229 		return KERN_INVALID_ARGUMENT;
2230 	}
2231 
2232 	if (task_is_driver(current_task())) {
2233 		return KERN_NO_ACCESS;
2234 	}
2235 
2236 	if (IP_VALID(port) && port->ip_immovable_send) {
2237 		return KERN_INVALID_RIGHT;
2238 	}
2239 
2240 	switch (which) {
2241 	case TASK_KERNEL_PORT:
2242 	case TASK_HOST_PORT:
2243 #if CONFIG_CSR
2244 		if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
2245 			/*
2246 			 * Only allow setting of task-self / task-host
2247 			 * special ports from user-space when SIP is
2248 			 * disabled (for Mach-on-Mach emulation).
2249 			 */
2250 			break;
2251 		}
2252 #endif
2253 		return KERN_NO_ACCESS;
2254 	default:
2255 		break;
2256 	}
2257 
2258 	return task_set_special_port_internal(task, which, port);
2259 }
2260 
2261 /*
2262  *	Routine:	task_set_special_port_internal
2263  *	Purpose:
2264  *		Changes one of the task's special ports,
2265  *		setting it to the supplied send right.
2266  *	Conditions:
2267  *		Nothing locked.  If successful, consumes
2268  *		the supplied send right.
2269  *	Returns:
2270  *		KERN_SUCCESS		Changed the special port.
2271  *		KERN_INVALID_ARGUMENT	The task is null.
2272  *		KERN_FAILURE		The task/space is dead.
2273  *		KERN_INVALID_ARGUMENT	Invalid special port.
2274  *      KERN_NO_ACCESS		Restricted access to overwrite port.
2275  */
2276 
2277 kern_return_t
task_set_special_port_internal(task_t task,int which,ipc_port_t port)2278 task_set_special_port_internal(
2279 	task_t          task,
2280 	int             which,
2281 	ipc_port_t      port)
2282 {
2283 	ipc_port_t old = IP_NULL;
2284 	kern_return_t rc = KERN_INVALID_ARGUMENT;
2285 
2286 	if (task == TASK_NULL) {
2287 		goto out;
2288 	}
2289 
2290 	itk_lock(task);
2291 	/*
2292 	 * Allow setting special port during the span of ipc_task_init() to
2293 	 * ipc_task_terminate(). posix_spawn() port actions can set special
2294 	 * ports on target task _before_ task IPC access is enabled.
2295 	 */
2296 	if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
2297 		rc = KERN_FAILURE;
2298 		goto out_unlock;
2299 	}
2300 
2301 	switch (which) {
2302 #if CONFIG_CSR
2303 	case TASK_KERNEL_PORT:
2304 		old = task->itk_settable_self;
2305 		task->itk_settable_self = port;
2306 		break;
2307 #endif /* CONFIG_CSR */
2308 
2309 	case TASK_HOST_PORT:
2310 		old = task->itk_host;
2311 		task->itk_host = port;
2312 		break;
2313 
2314 	case TASK_BOOTSTRAP_PORT:
2315 		old = task->itk_bootstrap;
2316 		task->itk_bootstrap = port;
2317 		break;
2318 
2319 	/* Never allow overwrite of the task access port */
2320 	case TASK_ACCESS_PORT:
2321 		if (IP_VALID(task->itk_task_access)) {
2322 			rc = KERN_NO_ACCESS;
2323 			goto out_unlock;
2324 		}
2325 		task->itk_task_access = port;
2326 		break;
2327 
2328 	case TASK_DEBUG_CONTROL_PORT:
2329 		old = task->itk_debug_control;
2330 		task->itk_debug_control = port;
2331 		break;
2332 
2333 #if CONFIG_PROC_RESOURCE_LIMITS
2334 	case TASK_RESOURCE_NOTIFY_PORT:
2335 		old = task->itk_resource_notify;
2336 		task->itk_resource_notify = port;
2337 		break;
2338 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
2339 
2340 	default:
2341 		rc = KERN_INVALID_ARGUMENT;
2342 		goto out_unlock;
2343 	}/* switch */
2344 
2345 	rc = KERN_SUCCESS;
2346 
2347 out_unlock:
2348 	itk_unlock(task);
2349 
2350 	if (IP_VALID(old)) {
2351 		ipc_port_release_send(old);
2352 	}
2353 out:
2354 	return rc;
2355 }
2356 /*
2357  *	Routine:	mach_ports_register [kernel call]
2358  *	Purpose:
2359  *		Stash a handful of port send rights in the task.
2360  *		Child tasks will inherit these rights, but they
2361  *		must use mach_ports_lookup to acquire them.
2362  *
2363  *		The rights are supplied in a (wired) kalloc'd segment.
2364  *		Rights which aren't supplied are assumed to be null.
2365  *	Conditions:
2366  *		Nothing locked.  If successful, consumes
2367  *		the supplied rights and memory.
2368  *	Returns:
2369  *		KERN_SUCCESS		    Stashed the port rights.
2370  *      KERN_INVALID_RIGHT      Port in array is marked immovable.
2371  *		KERN_INVALID_ARGUMENT	The task is null.
2372  *		KERN_INVALID_ARGUMENT	The task is dead.
2373  *		KERN_INVALID_ARGUMENT	The memory param is null.
2374  *		KERN_INVALID_ARGUMENT	Too many port rights supplied.
2375  */
2376 
2377 kern_return_t
_kernelrpc_mach_ports_register3(task_t task,mach_port_t port1,mach_port_t port2,mach_port_t port3)2378 _kernelrpc_mach_ports_register3(
2379 	task_t                  task,
2380 	mach_port_t             port1,
2381 	mach_port_t             port2,
2382 	mach_port_t             port3)
2383 {
2384 	ipc_port_t ports[TASK_PORT_REGISTER_MAX] = {
2385 		port1, port2, port3,
2386 	};
2387 
2388 	if (task == TASK_NULL) {
2389 		return KERN_INVALID_ARGUMENT;
2390 	}
2391 
2392 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2393 		if (IP_VALID(ports[i]) && ports[i]->ip_immovable_send) {
2394 			return KERN_INVALID_RIGHT;
2395 		}
2396 	}
2397 
2398 	itk_lock(task);
2399 	if (!task->ipc_active) {
2400 		itk_unlock(task);
2401 		return KERN_INVALID_ARGUMENT;
2402 	}
2403 
2404 	/*
2405 	 *	Replace the old send rights with the new.
2406 	 *	Release the old rights after unlocking.
2407 	 */
2408 
2409 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2410 		ipc_port_t old;
2411 
2412 		old = task->itk_registered[i];
2413 		task->itk_registered[i] = ports[i];
2414 		ports[i] = old;
2415 	}
2416 
2417 	itk_unlock(task);
2418 
2419 	for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2420 		ipc_port_release_send(ports[i]);
2421 	}
2422 
2423 	return KERN_SUCCESS;
2424 }
2425 
2426 /*
2427  *	Routine:	mach_ports_lookup [kernel call]
2428  *	Purpose:
2429  *		Retrieves (clones) the stashed port send rights.
2430  *	Conditions:
2431  *		Nothing locked.  If successful, the caller gets
2432  *		rights and memory.
2433  *	Returns:
2434  *		KERN_SUCCESS		Retrieved the send rights.
2435  *		KERN_INVALID_ARGUMENT	The task is null.
2436  *		KERN_INVALID_ARGUMENT	The task is dead.
2437  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
2438  */
2439 
2440 kern_return_t
_kernelrpc_mach_ports_lookup3(task_t task,ipc_port_t * port1,ipc_port_t * port2,ipc_port_t * port3)2441 _kernelrpc_mach_ports_lookup3(
2442 	task_t                  task,
2443 	ipc_port_t             *port1,
2444 	ipc_port_t             *port2,
2445 	ipc_port_t             *port3)
2446 {
2447 	if (task == TASK_NULL) {
2448 		return KERN_INVALID_ARGUMENT;
2449 	}
2450 
2451 	itk_lock(task);
2452 	if (!task->ipc_active) {
2453 		itk_unlock(task);
2454 		return KERN_INVALID_ARGUMENT;
2455 	}
2456 
2457 	*port1 = ipc_port_copy_send_any(task->itk_registered[0]);
2458 	*port2 = ipc_port_copy_send_any(task->itk_registered[1]);
2459 	*port3 = ipc_port_copy_send_any(task->itk_registered[2]);
2460 
2461 	itk_unlock(task);
2462 
2463 	return KERN_SUCCESS;
2464 }
2465 
2466 static kern_return_t
task_conversion_eval_internal(task_t caller,task_t victim,boolean_t out_trans,int flavor)2467 task_conversion_eval_internal(
2468 	task_t             caller,
2469 	task_t             victim,
2470 	boolean_t          out_trans,
2471 	int                flavor) /* control or read */
2472 {
2473 	boolean_t allow_kern_task_out_trans;
2474 	boolean_t allow_kern_task;
2475 
2476 	assert(flavor == TASK_FLAVOR_CONTROL || flavor == TASK_FLAVOR_READ);
2477 	assert(flavor == THREAD_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_READ);
2478 
2479 #if defined(SECURE_KERNEL)
2480 	/*
2481 	 * On secure kernel platforms, reject converting kernel task/threads to port
2482 	 * and sending it to user space.
2483 	 */
2484 	allow_kern_task_out_trans = FALSE;
2485 #else
2486 	allow_kern_task_out_trans = TRUE;
2487 #endif
2488 
2489 	allow_kern_task = out_trans && allow_kern_task_out_trans;
2490 
2491 	if (victim == TASK_NULL) {
2492 		return KERN_INVALID_SECURITY;
2493 	}
2494 
2495 	task_require(victim);
2496 
2497 	/*
2498 	 * If Developer Mode is not enabled, deny attempts to translate foreign task's
2499 	 * control port completely. Read port or corpse is okay.
2500 	 */
2501 	if (!developer_mode_state()) {
2502 		if ((caller != victim) &&
2503 		    (flavor == TASK_FLAVOR_CONTROL) && !task_is_a_corpse(victim)) {
2504 #if XNU_TARGET_OS_OSX
2505 			return KERN_INVALID_SECURITY;
2506 #else
2507 			/*
2508 			 * All control ports are immovable.
2509 			 * Return an error for outtrans, but panic on intrans.
2510 			 */
2511 			if (out_trans) {
2512 				return KERN_INVALID_SECURITY;
2513 			} else {
2514 				panic("Just like pineapple on pizza, this task/thread port doesn't belong here.");
2515 			}
2516 #endif /* XNU_TARGET_OS_OSX */
2517 		}
2518 	}
2519 
2520 	/*
2521 	 * Tasks are allowed to resolve their own task ports, and the kernel is
2522 	 * allowed to resolve anyone's task port (subject to Developer Mode check).
2523 	 */
2524 	if (caller == kernel_task) {
2525 		return KERN_SUCCESS;
2526 	}
2527 
2528 	if (caller == victim) {
2529 		return KERN_SUCCESS;
2530 	}
2531 
2532 	/*
2533 	 * Only the kernel can resolve the kernel's task port. We've established
2534 	 * by this point that the caller is not kernel_task.
2535 	 */
2536 	if (victim == kernel_task && !allow_kern_task) {
2537 		return KERN_INVALID_SECURITY;
2538 	}
2539 
2540 #if !defined(XNU_TARGET_OS_OSX)
2541 	/*
2542 	 * On platforms other than macOS, only a platform binary can resolve the task port
2543 	 * of another platform binary.
2544 	 */
2545 	if (task_get_platform_binary(victim) && !task_get_platform_binary(caller)) {
2546 #if SECURE_KERNEL
2547 		return KERN_INVALID_SECURITY;
2548 #else
2549 		if (cs_relax_platform_task_ports) {
2550 			return KERN_SUCCESS;
2551 		} else {
2552 			return KERN_INVALID_SECURITY;
2553 		}
2554 #endif /* SECURE_KERNEL */
2555 	}
2556 #endif /* !defined(XNU_TARGET_OS_OSX) */
2557 
2558 	return KERN_SUCCESS;
2559 }
2560 
2561 kern_return_t
task_conversion_eval(task_t caller,task_t victim,int flavor)2562 task_conversion_eval(task_t caller, task_t victim, int flavor)
2563 {
2564 	/* flavor is mach_task_flavor_t or mach_thread_flavor_t */
2565 	static_assert(TASK_FLAVOR_CONTROL == THREAD_FLAVOR_CONTROL);
2566 	static_assert(TASK_FLAVOR_READ == THREAD_FLAVOR_READ);
2567 	return task_conversion_eval_internal(caller, victim, FALSE, flavor);
2568 }
2569 
2570 static kern_return_t
task_conversion_eval_out_trans(task_t caller,task_t victim,int flavor)2571 task_conversion_eval_out_trans(task_t caller, task_t victim, int flavor)
2572 {
2573 	assert(flavor == TASK_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_CONTROL);
2574 	return task_conversion_eval_internal(caller, victim, TRUE, flavor);
2575 }
2576 
2577 /*
2578  *	Routine:	task_port_kotype_valid_for_flavor
2579  *	Purpose:
2580  *		Check whether the kobject type of a mach port
2581  *      is valid for conversion to a task of given flavor.
2582  */
2583 static boolean_t
task_port_kotype_valid_for_flavor(natural_t kotype,mach_task_flavor_t flavor)2584 task_port_kotype_valid_for_flavor(
2585 	natural_t          kotype,
2586 	mach_task_flavor_t flavor)
2587 {
2588 	switch (flavor) {
2589 	/* Ascending capability */
2590 	case TASK_FLAVOR_NAME:
2591 		if (kotype == IKOT_TASK_NAME) {
2592 			return TRUE;
2593 		}
2594 		OS_FALLTHROUGH;
2595 	case TASK_FLAVOR_INSPECT:
2596 		if (kotype == IKOT_TASK_INSPECT) {
2597 			return TRUE;
2598 		}
2599 		OS_FALLTHROUGH;
2600 	case TASK_FLAVOR_READ:
2601 		if (kotype == IKOT_TASK_READ) {
2602 			return TRUE;
2603 		}
2604 		OS_FALLTHROUGH;
2605 	case TASK_FLAVOR_CONTROL:
2606 		if (kotype == IKOT_TASK_CONTROL) {
2607 			return TRUE;
2608 		}
2609 		break;
2610 	default:
2611 		panic("strange task flavor");
2612 	}
2613 
2614 	return FALSE;
2615 }
2616 
2617 /*
2618  *	Routine: convert_port_to_task_with_flavor_locked_noref
2619  *	Purpose:
2620  *		Internal helper routine to convert from a locked port to a task.
2621  *	Args:
2622  *		port   - target port
2623  *		flavor - requested task port flavor
2624  *		options - port translation options
2625  *	Conditions:
2626  *		Port is locked and active.
2627  */
2628 static task_t
convert_port_to_task_with_flavor_locked_noref(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2629 convert_port_to_task_with_flavor_locked_noref(
2630 	ipc_port_t              port,
2631 	mach_task_flavor_t      flavor,
2632 	port_intrans_options_t  options)
2633 {
2634 	ipc_kobject_type_t type = ip_kotype(port);
2635 	task_t task;
2636 
2637 	ip_mq_lock_held(port);
2638 	require_ip_active(port);
2639 
2640 	if (!task_port_kotype_valid_for_flavor(type, flavor)) {
2641 		return TASK_NULL;
2642 	}
2643 
2644 	task = ipc_kobject_get_locked(port, type);
2645 	if (task == TASK_NULL) {
2646 		return TASK_NULL;
2647 	}
2648 
2649 	if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
2650 		assert(flavor == TASK_FLAVOR_CONTROL);
2651 		return TASK_NULL;
2652 	}
2653 
2654 	/* TODO: rdar://42389187 */
2655 	if (flavor == TASK_FLAVOR_NAME || flavor == TASK_FLAVOR_INSPECT) {
2656 		assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
2657 	}
2658 
2659 	if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
2660 	    task_conversion_eval(current_task(), task, flavor)) {
2661 		return TASK_NULL;
2662 	}
2663 
2664 	return task;
2665 }
2666 
2667 /*
2668  *	Routine: convert_port_to_task_with_flavor_locked
2669  *	Purpose:
2670  *		Internal helper routine to convert from a locked port to a task.
2671  *	Args:
2672  *		port   - target port
2673  *		flavor - requested task port flavor
2674  *		options - port translation options
2675  *		grp    - task reference group
2676  *	Conditions:
2677  *		Port is locked and active.
2678  *		Produces task ref or TASK_NULL.
2679  */
2680 static task_t
convert_port_to_task_with_flavor_locked(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2681 convert_port_to_task_with_flavor_locked(
2682 	ipc_port_t              port,
2683 	mach_task_flavor_t      flavor,
2684 	port_intrans_options_t  options,
2685 	task_grp_t              grp)
2686 {
2687 	task_t task;
2688 
2689 	task = convert_port_to_task_with_flavor_locked_noref(port, flavor,
2690 	    options);
2691 
2692 	if (task != TASK_NULL) {
2693 		task_reference_grp(task, grp);
2694 	}
2695 
2696 	return task;
2697 }
2698 
2699 /*
2700  *	Routine:	convert_port_to_task_with_flavor
2701  *	Purpose:
2702  *		Internal helper for converting from a port to a task.
2703  *		Doesn't consume the port ref; produces a task ref,
2704  *		which may be null.
2705  *	Args:
2706  *		port   - target port
2707  *		flavor - requested task port flavor
2708  *		options - port translation options
2709  *		grp    - task reference group
2710  *	Conditions:
2711  *		Nothing locked.
2712  */
2713 static task_t
convert_port_to_task_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2714 convert_port_to_task_with_flavor(
2715 	ipc_port_t         port,
2716 	mach_task_flavor_t flavor,
2717 	port_intrans_options_t options,
2718 	task_grp_t         grp)
2719 {
2720 	task_t task = TASK_NULL;
2721 	task_t self = current_task();
2722 
2723 	if (IP_VALID(port)) {
2724 		if (port == self->itk_self) {
2725 			task_reference_grp(self, grp);
2726 			return self;
2727 		}
2728 
2729 		ip_mq_lock(port);
2730 		if (ip_active(port)) {
2731 			task = convert_port_to_task_with_flavor_locked(port,
2732 			    flavor, options, grp);
2733 		}
2734 		ip_mq_unlock(port);
2735 	}
2736 
2737 	return task;
2738 }
2739 
2740 task_t
convert_port_to_task(ipc_port_t port)2741 convert_port_to_task(
2742 	ipc_port_t              port)
2743 {
2744 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2745 	           PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL);
2746 }
2747 
2748 task_t
convert_port_to_task_mig(ipc_port_t port)2749 convert_port_to_task_mig(
2750 	ipc_port_t              port)
2751 {
2752 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2753 	           PORT_INTRANS_OPTIONS_NONE, TASK_GRP_MIG);
2754 }
2755 
2756 task_read_t
convert_port_to_task_read(ipc_port_t port)2757 convert_port_to_task_read(
2758 	ipc_port_t              port)
2759 {
2760 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2761 	           PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2762 }
2763 
2764 task_read_t
convert_port_to_task_read_no_eval(ipc_port_t port)2765 convert_port_to_task_read_no_eval(
2766 	ipc_port_t              port)
2767 {
2768 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2769 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2770 }
2771 
2772 task_read_t
convert_port_to_task_read_mig(ipc_port_t port)2773 convert_port_to_task_read_mig(
2774 	ipc_port_t              port)
2775 {
2776 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2777 	           PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2778 }
2779 
2780 task_inspect_t
convert_port_to_task_inspect(ipc_port_t port)2781 convert_port_to_task_inspect(
2782 	ipc_port_t              port)
2783 {
2784 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2785 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2786 }
2787 
2788 task_inspect_t
convert_port_to_task_inspect_no_eval(ipc_port_t port)2789 convert_port_to_task_inspect_no_eval(
2790 	ipc_port_t              port)
2791 {
2792 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2793 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2794 }
2795 
2796 task_inspect_t
convert_port_to_task_inspect_mig(ipc_port_t port)2797 convert_port_to_task_inspect_mig(
2798 	ipc_port_t              port)
2799 {
2800 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2801 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2802 }
2803 
2804 task_name_t
convert_port_to_task_name(ipc_port_t port)2805 convert_port_to_task_name(
2806 	ipc_port_t              port)
2807 {
2808 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2809 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2810 }
2811 
2812 task_name_t
convert_port_to_task_name_mig(ipc_port_t port)2813 convert_port_to_task_name_mig(
2814 	ipc_port_t              port)
2815 {
2816 	return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2817 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2818 }
2819 
2820 /*
2821  *	Routine:	convert_port_to_task_policy
2822  *	Purpose:
2823  *		Convert from a port to a task.
2824  *		Doesn't consume the port ref; produces a task ref,
2825  *		which may be null.
2826  *		If the port is being used with task_port_set(), any task port
2827  *		type other than TASK_CONTROL requires an entitlement. If the
2828  *		port is being used with task_port_get(), TASK_NAME requires an
2829  *		entitlement.
2830  *	Conditions:
2831  *		Nothing locked.
2832  */
2833 static task_t
convert_port_to_task_policy_mig(ipc_port_t port,boolean_t set)2834 convert_port_to_task_policy_mig(ipc_port_t port, boolean_t set)
2835 {
2836 	task_t task = TASK_NULL;
2837 
2838 	if (!IP_VALID(port)) {
2839 		return TASK_NULL;
2840 	}
2841 
2842 	task = set ?
2843 	    convert_port_to_task_mig(port) :
2844 	    convert_port_to_task_inspect_mig(port);
2845 
2846 	if (task == TASK_NULL &&
2847 	    IOCurrentTaskHasEntitlement("com.apple.private.task_policy")) {
2848 		task = convert_port_to_task_name_mig(port);
2849 	}
2850 
2851 	return task;
2852 }
2853 
2854 task_policy_set_t
convert_port_to_task_policy_set_mig(ipc_port_t port)2855 convert_port_to_task_policy_set_mig(ipc_port_t port)
2856 {
2857 	return convert_port_to_task_policy_mig(port, true);
2858 }
2859 
2860 task_policy_get_t
convert_port_to_task_policy_get_mig(ipc_port_t port)2861 convert_port_to_task_policy_get_mig(ipc_port_t port)
2862 {
2863 	return convert_port_to_task_policy_mig(port, false);
2864 }
2865 
2866 /*
2867  *	Routine:	convert_port_to_task_suspension_token
2868  *	Purpose:
2869  *		Convert from a port to a task suspension token.
2870  *		Doesn't consume the port ref; produces a suspension token ref,
2871  *		which may be null.
2872  *	Conditions:
2873  *		Nothing locked.
2874  */
2875 static task_suspension_token_t
convert_port_to_task_suspension_token_grp(ipc_port_t port,task_grp_t grp)2876 convert_port_to_task_suspension_token_grp(
2877 	ipc_port_t              port,
2878 	task_grp_t              grp)
2879 {
2880 	task_suspension_token_t task = TASK_NULL;
2881 
2882 	if (IP_VALID(port)) {
2883 		ip_mq_lock(port);
2884 		task = ipc_kobject_get_locked(port, IKOT_TASK_RESUME);
2885 		if (task != TASK_NULL) {
2886 			task_reference_grp(task, grp);
2887 		}
2888 		ip_mq_unlock(port);
2889 	}
2890 
2891 	return task;
2892 }
2893 
2894 task_suspension_token_t
convert_port_to_task_suspension_token_external(ipc_port_t port)2895 convert_port_to_task_suspension_token_external(
2896 	ipc_port_t              port)
2897 {
2898 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_EXTERNAL);
2899 }
2900 
2901 task_suspension_token_t
convert_port_to_task_suspension_token_mig(ipc_port_t port)2902 convert_port_to_task_suspension_token_mig(
2903 	ipc_port_t              port)
2904 {
2905 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_MIG);
2906 }
2907 
2908 task_suspension_token_t
convert_port_to_task_suspension_token_kernel(ipc_port_t port)2909 convert_port_to_task_suspension_token_kernel(
2910 	ipc_port_t              port)
2911 {
2912 	return convert_port_to_task_suspension_token_grp(port, TASK_GRP_KERNEL);
2913 }
2914 
2915 /*
2916  *	Routine:	convert_port_to_space_with_flavor
2917  *	Purpose:
2918  *		Internal helper for converting from a port to a space.
2919  *		Doesn't consume the port ref; produces a space ref,
2920  *		which may be null.
2921  *	Args:
2922  *		port   - target port
2923  *		flavor - requested ipc space flavor
2924  *		options - port translation options
2925  *	Conditions:
2926  *		Nothing locked.
2927  */
2928 static ipc_space_t
convert_port_to_space_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2929 convert_port_to_space_with_flavor(
2930 	ipc_port_t         port,
2931 	mach_task_flavor_t flavor,
2932 	port_intrans_options_t options)
2933 {
2934 	ipc_space_t space = IPC_SPACE_NULL;
2935 	task_t task = TASK_NULL;
2936 
2937 	assert(flavor != TASK_FLAVOR_NAME);
2938 
2939 	if (IP_VALID(port)) {
2940 		ip_mq_lock(port);
2941 		if (ip_active(port)) {
2942 			task = convert_port_to_task_with_flavor_locked_noref(port,
2943 			    flavor, options);
2944 		}
2945 
2946 		/*
2947 		 * Because we hold the port lock and we could resolve a task,
2948 		 * even if we're racing with task termination, we know that
2949 		 * ipc_task_disable() hasn't been called yet.
2950 		 *
2951 		 * We try to sniff if `task->active` flipped to accelerate
2952 		 * resolving the race, but this isn't load bearing.
2953 		 *
2954 		 * The space will be torn down _after_ ipc_task_disable() returns,
2955 		 * so it is valid to take a reference on it now.
2956 		 */
2957 		if (task && task->active) {
2958 			space = task->itk_space;
2959 			is_reference(space);
2960 		}
2961 		ip_mq_unlock(port);
2962 	}
2963 
2964 	return space;
2965 }
2966 
2967 ipc_space_t
convert_port_to_space(ipc_port_t port)2968 convert_port_to_space(
2969 	ipc_port_t      port)
2970 {
2971 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL,
2972 	           PORT_INTRANS_OPTIONS_NONE);
2973 }
2974 
2975 ipc_space_read_t
convert_port_to_space_read(ipc_port_t port)2976 convert_port_to_space_read(
2977 	ipc_port_t      port)
2978 {
2979 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2980 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
2981 }
2982 
2983 ipc_space_read_t
convert_port_to_space_read_no_eval(ipc_port_t port)2984 convert_port_to_space_read_no_eval(
2985 	ipc_port_t      port)
2986 {
2987 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2988 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2989 }
2990 
2991 ipc_space_inspect_t
convert_port_to_space_inspect(ipc_port_t port)2992 convert_port_to_space_inspect(
2993 	ipc_port_t      port)
2994 {
2995 	return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT,
2996 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
2997 }
2998 
2999 /*
3000  *	Routine:	convert_port_to_map_with_flavor
3001  *	Purpose:
3002  *		Internal helper for converting from a port to a map.
3003  *		Doesn't consume the port ref; produces a map ref,
3004  *		which may be null.
3005  *	Args:
3006  *		port   - target port
3007  *		flavor - requested vm map flavor
3008  *		options - port translation options
3009  *	Conditions:
3010  *		Nothing locked.
3011  */
3012 static vm_map_t
convert_port_to_map_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)3013 convert_port_to_map_with_flavor(
3014 	ipc_port_t         port,
3015 	mach_task_flavor_t flavor,
3016 	port_intrans_options_t options)
3017 {
3018 	task_t task = TASK_NULL;
3019 	vm_map_t map = VM_MAP_NULL;
3020 
3021 	/* there is no vm_map_inspect_t routines at the moment. */
3022 	assert(flavor != TASK_FLAVOR_NAME && flavor != TASK_FLAVOR_INSPECT);
3023 	assert((options & PORT_INTRANS_SKIP_TASK_EVAL) == 0);
3024 
3025 	if (IP_VALID(port)) {
3026 		ip_mq_lock(port);
3027 
3028 		if (ip_active(port)) {
3029 			task = convert_port_to_task_with_flavor_locked_noref(port,
3030 			    flavor, options);
3031 		}
3032 
3033 		/*
3034 		 * Because we hold the port lock and we could resolve a task,
3035 		 * even if we're racing with task termination, we know that
3036 		 * ipc_task_disable() hasn't been called yet.
3037 		 *
3038 		 * We try to sniff if `task->active` flipped to accelerate
3039 		 * resolving the race, but this isn't load bearing.
3040 		 *
3041 		 * The vm map will be torn down _after_ ipc_task_disable() returns,
3042 		 * so it is valid to take a reference on it now.
3043 		 */
3044 		if (task && task->active) {
3045 			map = task->map;
3046 
3047 			if (map->pmap == kernel_pmap) {
3048 				panic("userspace has control access to a "
3049 				    "kernel map %p through task %p", map, task);
3050 			}
3051 
3052 			pmap_require(map->pmap);
3053 			vm_map_reference(map);
3054 		}
3055 
3056 		ip_mq_unlock(port);
3057 	}
3058 
3059 	return map;
3060 }
3061 
3062 vm_map_t
convert_port_to_map(ipc_port_t port)3063 convert_port_to_map(
3064 	ipc_port_t              port)
3065 {
3066 	return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL,
3067 	           PORT_INTRANS_OPTIONS_NONE);
3068 }
3069 
3070 vm_map_read_t
convert_port_to_map_read(ipc_port_t port)3071 convert_port_to_map_read(
3072 	ipc_port_t              port)
3073 {
3074 	return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ,
3075 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
3076 }
3077 
3078 vm_map_inspect_t
convert_port_to_map_inspect(__unused ipc_port_t port)3079 convert_port_to_map_inspect(
3080 	__unused ipc_port_t     port)
3081 {
3082 	/* there is no vm_map_inspect_t routines at the moment. */
3083 	return VM_MAP_INSPECT_NULL;
3084 }
3085 
3086 /*
3087  *	Routine:	thread_port_kotype_valid_for_flavor
3088  *	Purpose:
3089  *		Check whether the kobject type of a mach port
3090  *      is valid for conversion to a thread of given flavor.
3091  */
3092 static boolean_t
thread_port_kotype_valid_for_flavor(natural_t kotype,mach_thread_flavor_t flavor)3093 thread_port_kotype_valid_for_flavor(
3094 	natural_t            kotype,
3095 	mach_thread_flavor_t flavor)
3096 {
3097 	switch (flavor) {
3098 	/* Ascending capability */
3099 	case THREAD_FLAVOR_INSPECT:
3100 		if (kotype == IKOT_THREAD_INSPECT) {
3101 			return TRUE;
3102 		}
3103 		OS_FALLTHROUGH;
3104 	case THREAD_FLAVOR_READ:
3105 		if (kotype == IKOT_THREAD_READ) {
3106 			return TRUE;
3107 		}
3108 		OS_FALLTHROUGH;
3109 	case THREAD_FLAVOR_CONTROL:
3110 		if (kotype == IKOT_THREAD_CONTROL) {
3111 			return TRUE;
3112 		}
3113 		break;
3114 	default:
3115 		panic("strange thread flavor");
3116 	}
3117 
3118 	return FALSE;
3119 }
3120 
3121 /*
3122  *	Routine: convert_port_to_thread_with_flavor_locked
3123  *	Purpose:
3124  *		Internal helper routine to convert from a locked port to a thread.
3125  *	Args:
3126  *		port   - target port
3127  *		flavor - requested thread port flavor
3128  *		options - port translation options
3129  *	Conditions:
3130  *		Port is locked and active.
3131  *		Produces a thread ref or THREAD_NULL.
3132  */
3133 static thread_t
convert_port_to_thread_with_flavor_locked(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3134 convert_port_to_thread_with_flavor_locked(
3135 	ipc_port_t               port,
3136 	mach_thread_flavor_t     flavor,
3137 	port_intrans_options_t   options)
3138 {
3139 	thread_t thread = THREAD_NULL;
3140 	task_t task;
3141 	ipc_kobject_type_t type = ip_kotype(port);
3142 
3143 	ip_mq_lock_held(port);
3144 	require_ip_active(port);
3145 
3146 	if (!thread_port_kotype_valid_for_flavor(type, flavor)) {
3147 		return THREAD_NULL;
3148 	}
3149 
3150 	thread = ipc_kobject_get_locked(port, type);
3151 
3152 	if (thread == THREAD_NULL) {
3153 		return THREAD_NULL;
3154 	}
3155 
3156 	if (options & PORT_INTRANS_THREAD_NOT_CURRENT_THREAD) {
3157 		if (thread == current_thread()) {
3158 			return THREAD_NULL;
3159 		}
3160 	}
3161 
3162 	task = get_threadtask(thread);
3163 
3164 	if (options & PORT_INTRANS_THREAD_IN_CURRENT_TASK) {
3165 		if (task != current_task()) {
3166 			return THREAD_NULL;
3167 		}
3168 	} else {
3169 		if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
3170 			assert(flavor == THREAD_FLAVOR_CONTROL);
3171 			return THREAD_NULL;
3172 		}
3173 		/* TODO: rdar://42389187 */
3174 		if (flavor == THREAD_FLAVOR_INSPECT) {
3175 			assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
3176 		}
3177 
3178 		if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
3179 		    task_conversion_eval(current_task(), task, flavor) != KERN_SUCCESS) {
3180 			return THREAD_NULL;
3181 		}
3182 	}
3183 
3184 	thread_reference(thread);
3185 	return thread;
3186 }
3187 
3188 /*
3189  *	Routine:	convert_port_to_thread_with_flavor
3190  *	Purpose:
3191  *		Internal helper for converting from a port to a thread.
3192  *		Doesn't consume the port ref; produces a thread ref,
3193  *		which may be null.
3194  *	Args:
3195  *		port   - target port
3196  *		flavor - requested thread port flavor
3197  *		options - port translation options
3198  *	Conditions:
3199  *		Nothing locked.
3200  */
3201 static thread_t
convert_port_to_thread_with_flavor(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3202 convert_port_to_thread_with_flavor(
3203 	ipc_port_t           port,
3204 	mach_thread_flavor_t flavor,
3205 	port_intrans_options_t options)
3206 {
3207 	thread_t thread = THREAD_NULL;
3208 
3209 	if (IP_VALID(port)) {
3210 		ip_mq_lock(port);
3211 		if (ip_active(port)) {
3212 			thread = convert_port_to_thread_with_flavor_locked(port,
3213 			    flavor, options);
3214 		}
3215 		ip_mq_unlock(port);
3216 	}
3217 
3218 	return thread;
3219 }
3220 
3221 thread_t
convert_port_to_thread(ipc_port_t port)3222 convert_port_to_thread(
3223 	ipc_port_t              port)
3224 {
3225 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_CONTROL,
3226 	           PORT_INTRANS_OPTIONS_NONE);
3227 }
3228 
3229 thread_read_t
convert_port_to_thread_read(ipc_port_t port)3230 convert_port_to_thread_read(
3231 	ipc_port_t              port)
3232 {
3233 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3234 	           PORT_INTRANS_ALLOW_CORPSE_TASK);
3235 }
3236 
3237 static thread_read_t
convert_port_to_thread_read_no_eval(ipc_port_t port)3238 convert_port_to_thread_read_no_eval(
3239 	ipc_port_t              port)
3240 {
3241 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3242 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3243 }
3244 
3245 thread_inspect_t
convert_port_to_thread_inspect(ipc_port_t port)3246 convert_port_to_thread_inspect(
3247 	ipc_port_t              port)
3248 {
3249 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3250 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3251 }
3252 
3253 static thread_inspect_t
convert_port_to_thread_inspect_no_eval(ipc_port_t port)3254 convert_port_to_thread_inspect_no_eval(
3255 	ipc_port_t              port)
3256 {
3257 	return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3258 	           PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3259 }
3260 
3261 static inline ipc_kobject_type_t
thread_flavor_to_kotype(mach_thread_flavor_t flavor)3262 thread_flavor_to_kotype(mach_thread_flavor_t flavor)
3263 {
3264 	switch (flavor) {
3265 	case THREAD_FLAVOR_CONTROL:
3266 		return IKOT_THREAD_CONTROL;
3267 	case THREAD_FLAVOR_READ:
3268 		return IKOT_THREAD_READ;
3269 	default:
3270 		return IKOT_THREAD_INSPECT;
3271 	}
3272 }
3273 
3274 /*
3275  *	Routine:	convert_thread_to_port_with_flavor
3276  *	Purpose:
3277  *		Convert from a thread to a port of given flavor.
3278  *		Consumes a thread ref; produces a naked send right
3279  *		which may be invalid.
3280  *	Conditions:
3281  *		Nothing locked.
3282  */
3283 static ipc_port_t
convert_thread_to_port_with_flavor(thread_t thread,thread_ro_t tro,mach_thread_flavor_t flavor)3284 convert_thread_to_port_with_flavor(
3285 	thread_t              thread,
3286 	thread_ro_t           tro,
3287 	mach_thread_flavor_t  flavor)
3288 {
3289 	ipc_kobject_type_t kotype = thread_flavor_to_kotype(flavor);
3290 	ipc_port_t port = IP_NULL;
3291 
3292 	thread_mtx_lock(thread);
3293 
3294 	/*
3295 	 * out-trans of weaker flavors are still permitted, but in-trans
3296 	 * is separately enforced.
3297 	 */
3298 	if (flavor == THREAD_FLAVOR_CONTROL &&
3299 	    task_conversion_eval_out_trans(current_task(), tro->tro_task, flavor)) {
3300 		/* denied by security policy, make the port appear dead */
3301 		port = IP_DEAD;
3302 		goto exit;
3303 	}
3304 
3305 	if (!thread->ipc_active) {
3306 		goto exit;
3307 	}
3308 
3309 	port = tro->tro_ports[flavor];
3310 	if (flavor == THREAD_FLAVOR_CONTROL) {
3311 		port = ipc_kobject_make_send(port, thread, IKOT_THREAD_CONTROL);
3312 	} else if (IP_VALID(port)) {
3313 		(void)ipc_kobject_make_send_nsrequest(port, thread, kotype);
3314 	} else {
3315 		/*
3316 		 * Claim a send right on the thread read/inspect port, and request a no-senders
3317 		 * notification on that port (if none outstanding). A thread reference is not
3318 		 * donated here even though the ports are created lazily because it doesn't own the
3319 		 * kobject that it points to. Threads manage their lifetime explicitly and
3320 		 * have to synchronize with each other, between the task/thread terminating and the
3321 		 * send-once notification firing, and this is done under the thread mutex
3322 		 * rather than with atomics.
3323 		 */
3324 		port = ipc_kobject_alloc_port(thread, kotype,
3325 		    IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST |
3326 		    IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3327 		/*
3328 		 * If Developer Mode is off, substitute read port for control
3329 		 * port if copying out to owning task's space, for the sake of
3330 		 * in-process exception handler.
3331 		 *
3332 		 * Also see: exception_deliver().
3333 		 */
3334 		if (!developer_mode_state() && flavor == THREAD_FLAVOR_READ) {
3335 			ipc_port_set_label(port, IPC_LABEL_SUBST_THREAD_READ);
3336 			port->ip_kolabel->ikol_alt_port = tro->tro_self_port;
3337 		}
3338 		zalloc_ro_update_field(ZONE_ID_THREAD_RO,
3339 		    tro, tro_ports[flavor], &port);
3340 	}
3341 
3342 exit:
3343 	thread_mtx_unlock(thread);
3344 	thread_deallocate(thread);
3345 	return port;
3346 }
3347 
3348 ipc_port_t
convert_thread_to_port(thread_t thread)3349 convert_thread_to_port(
3350 	thread_t                thread)
3351 {
3352 	thread_ro_t tro = get_thread_ro(thread);
3353 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_CONTROL);
3354 }
3355 
3356 ipc_port_t
convert_thread_read_to_port(thread_read_t thread)3357 convert_thread_read_to_port(thread_read_t thread)
3358 {
3359 	thread_ro_t tro = get_thread_ro(thread);
3360 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_READ);
3361 }
3362 
3363 ipc_port_t
convert_thread_inspect_to_port(thread_inspect_t thread)3364 convert_thread_inspect_to_port(thread_inspect_t thread)
3365 {
3366 	thread_ro_t tro = get_thread_ro(thread);
3367 	return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_INSPECT);
3368 }
3369 
3370 void
convert_thread_array_to_ports(thread_act_array_t array,size_t count,mach_thread_flavor_t flavor)3371 convert_thread_array_to_ports(
3372 	thread_act_array_t      array,
3373 	size_t                  count,
3374 	mach_thread_flavor_t    flavor)
3375 {
3376 	thread_t *thread_list = (thread_t *)array;
3377 	task_t task_self = current_task();
3378 
3379 	for (size_t i = 0; i < count; i++) {
3380 		thread_t   thread = thread_list[i];
3381 		ipc_port_t port;
3382 
3383 		switch (flavor) {
3384 		case THREAD_FLAVOR_CONTROL:
3385 			if (get_threadtask(thread) == task_self) {
3386 				port = convert_thread_to_port_pinned(thread);
3387 			} else {
3388 				port = convert_thread_to_port(thread);
3389 			}
3390 			break;
3391 		case THREAD_FLAVOR_READ:
3392 			port = convert_thread_read_to_port(thread);
3393 			break;
3394 		case THREAD_FLAVOR_INSPECT:
3395 			port = convert_thread_inspect_to_port(thread);
3396 			break;
3397 		}
3398 
3399 		array[i].port = port;
3400 	}
3401 }
3402 
3403 
3404 /*
3405  *	Routine:	port_name_to_thread
3406  *	Purpose:
3407  *		Convert from a port name to a thread reference
3408  *		A name of MACH_PORT_NULL is valid for the null thread.
3409  *	Conditions:
3410  *		Nothing locked.
3411  */
3412 thread_t
port_name_to_thread(mach_port_name_t name,port_intrans_options_t options)3413 port_name_to_thread(
3414 	mach_port_name_t         name,
3415 	port_intrans_options_t options)
3416 {
3417 	thread_t        thread = THREAD_NULL;
3418 	ipc_port_t      kport;
3419 	kern_return_t kr;
3420 
3421 	if (MACH_PORT_VALID(name)) {
3422 		kr = ipc_port_translate_send(current_space(), name, &kport);
3423 		if (kr == KERN_SUCCESS) {
3424 			/* port is locked and active */
3425 			assert(!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) &&
3426 			    !(options & PORT_INTRANS_SKIP_TASK_EVAL));
3427 			thread = convert_port_to_thread_with_flavor_locked(kport,
3428 			    THREAD_FLAVOR_CONTROL, options);
3429 			ip_mq_unlock(kport);
3430 		}
3431 	}
3432 
3433 	return thread;
3434 }
3435 
3436 /*
3437  *	Routine:	port_name_is_pinned_itk_self
3438  *	Purpose:
3439  *		Returns whether this port name is for the pinned
3440  *		mach_task_self (if it exists).
3441  *
3442  *		task_self_trap() when the task port is pinned,
3443  *		will memorize the name the port has in the space
3444  *		in ip_receiver_name, which we can use to fast-track
3445  *		this answer without taking any lock.
3446  *
3447  *		ipc_task_disable() will set `ip_receiver_name` back to
3448  *		MACH_PORT_SPECIAL_DEFAULT.
3449  *
3450  *	Conditions:
3451  *		self must be current_task()
3452  *		Nothing locked.
3453  */
3454 static bool
port_name_is_pinned_itk_self(task_t self,mach_port_name_t name)3455 port_name_is_pinned_itk_self(
3456 	task_t             self,
3457 	mach_port_name_t   name)
3458 {
3459 	ipc_port_t kport = self->itk_self;
3460 	return MACH_PORT_VALID(name) && name != MACH_PORT_SPECIAL_DEFAULT &&
3461 	       kport->ip_pinned && ip_get_receiver_name(kport) == name;
3462 }
3463 
3464 /*
3465  *	Routine:	port_name_to_current_task*_noref
3466  *	Purpose:
3467  *		Convert from a port name to current_task()
3468  *		A name of MACH_PORT_NULL is valid for the null task.
3469  *
3470  *		If current_task() is in the process of being terminated,
3471  *		this might return a non NULL task even when port_name_to_task()
3472  *		would.
3473  *
3474  *		However, this is an acceptable race that can't be controlled by
3475  *		userspace, and that downstream code using the returned task
3476  *		has to handle anyway.
3477  *
3478  *		ipc_space_disable() does try to narrow this race,
3479  *		by causing port_name_is_pinned_itk_self() to fail.
3480  *
3481  *	Returns:
3482  *		current_task() if the port name was for current_task()
3483  *		at the appropriate flavor.
3484  *
3485  *		TASK_NULL otherwise.
3486  *
3487  *	Conditions:
3488  *		Nothing locked.
3489  */
3490 static task_t
port_name_to_current_task_internal_noref(mach_port_name_t name,mach_task_flavor_t flavor)3491 port_name_to_current_task_internal_noref(
3492 	mach_port_name_t   name,
3493 	mach_task_flavor_t flavor)
3494 {
3495 	ipc_port_t kport;
3496 	kern_return_t kr;
3497 	task_t task = TASK_NULL;
3498 	task_t self = current_task();
3499 
3500 	if (port_name_is_pinned_itk_self(self, name)) {
3501 		return self;
3502 	}
3503 
3504 	if (MACH_PORT_VALID(name)) {
3505 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3506 		if (kr == KERN_SUCCESS) {
3507 			ipc_kobject_type_t type = ip_kotype(kport);
3508 			if (task_port_kotype_valid_for_flavor(type, flavor)) {
3509 				task = ipc_kobject_get_locked(kport, type);
3510 			}
3511 			ip_mq_unlock(kport);
3512 			if (task != self) {
3513 				task = TASK_NULL;
3514 			}
3515 		}
3516 	}
3517 
3518 	return task;
3519 }
3520 
3521 task_t
port_name_to_current_task_noref(mach_port_name_t name)3522 port_name_to_current_task_noref(
3523 	mach_port_name_t name)
3524 {
3525 	return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_CONTROL);
3526 }
3527 
3528 task_read_t
port_name_to_current_task_read_noref(mach_port_name_t name)3529 port_name_to_current_task_read_noref(
3530 	mach_port_name_t name)
3531 {
3532 	return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_READ);
3533 }
3534 
3535 /*
3536  *	Routine:	port_name_to_task_grp
3537  *	Purpose:
3538  *		Convert from a port name to a task reference
3539  *		A name of MACH_PORT_NULL is valid for the null task.
3540  *		Acquire a send right if [inout] @kportp is non-null.
3541  *	Conditions:
3542  *		Nothing locked.
3543  */
3544 static task_t
port_name_to_task_grp(mach_port_name_t name,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp,ipc_port_t * kportp)3545 port_name_to_task_grp(
3546 	mach_port_name_t name,
3547 	mach_task_flavor_t flavor,
3548 	port_intrans_options_t options,
3549 	task_grp_t       grp,
3550 	ipc_port_t       *kportp)
3551 {
3552 	ipc_port_t kport;
3553 	kern_return_t kr;
3554 	task_t task = TASK_NULL;
3555 	task_t self = current_task();
3556 
3557 	if (!kportp && port_name_is_pinned_itk_self(self, name)) {
3558 		task_reference_grp(self, grp);
3559 		return self;
3560 	}
3561 
3562 	if (MACH_PORT_VALID(name)) {
3563 		kr = ipc_port_translate_send(self->itk_space, name, &kport);
3564 		if (kr == KERN_SUCCESS) {
3565 			/* port is locked and active */
3566 			task = convert_port_to_task_with_flavor_locked(kport,
3567 			    flavor, options, grp);
3568 			if (kportp) {
3569 				/* send right requested */
3570 				ipc_port_copy_send_any_locked(kport);
3571 				*kportp = kport;
3572 			}
3573 			ip_mq_unlock(kport);
3574 		}
3575 	}
3576 	return task;
3577 }
3578 
3579 task_t
port_name_to_task_external(mach_port_name_t name)3580 port_name_to_task_external(
3581 	mach_port_name_t name)
3582 {
3583 	return port_name_to_task_grp(name, TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE, TASK_GRP_EXTERNAL, NULL);
3584 }
3585 
3586 task_t
port_name_to_task_kernel(mach_port_name_t name)3587 port_name_to_task_kernel(
3588 	mach_port_name_t name)
3589 {
3590 	return port_name_to_task_grp(name, TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL, NULL);
3591 }
3592 
3593 /*
3594  *	Routine:	port_name_to_task_read
3595  *	Purpose:
3596  *		Convert from a port name to a task reference
3597  *		A name of MACH_PORT_NULL is valid for the null task.
3598  *	Conditions:
3599  *		Nothing locked.
3600  */
3601 task_read_t
port_name_to_task_read(mach_port_name_t name)3602 port_name_to_task_read(
3603 	mach_port_name_t name)
3604 {
3605 	return port_name_to_task_grp(name, TASK_FLAVOR_READ, PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL, NULL);
3606 }
3607 
3608 /*
3609  *	Routine:	port_name_to_task_read_and_send_right
3610  *	Purpose:
3611  *		Convert from a port name to a task reference
3612  *		A name of MACH_PORT_NULL is valid for the null task.
3613  *	Conditions:
3614  *		On success, ipc port returned with a +1 send right.
3615  */
3616 task_read_t
port_name_to_task_read_and_send_right(mach_port_name_t name,ipc_port_t * kportp)3617 port_name_to_task_read_and_send_right(
3618 	mach_port_name_t name,
3619 	ipc_port_t *kportp)
3620 {
3621 	return port_name_to_task_grp(name, TASK_FLAVOR_READ, PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL, kportp);
3622 }
3623 
3624 /*
3625  *	Routine:	port_name_to_task_read_no_eval
3626  *	Purpose:
3627  *		Convert from a port name to a task reference
3628  *		A name of MACH_PORT_NULL is valid for the null task.
3629  *		Skips task_conversion_eval() during conversion.
3630  *	Conditions:
3631  *		Nothing locked.
3632  */
3633 task_read_t
port_name_to_task_read_no_eval(mach_port_name_t name)3634 port_name_to_task_read_no_eval(
3635 	mach_port_name_t name)
3636 {
3637 	port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3638 	    PORT_INTRANS_ALLOW_CORPSE_TASK;
3639 	return port_name_to_task_grp(name, TASK_FLAVOR_READ, options, TASK_GRP_KERNEL, NULL);
3640 }
3641 
3642 /*
3643  *	Routine:	port_name_to_task_name
3644  *	Purpose:
3645  *		Convert from a port name to a task reference
3646  *		A name of MACH_PORT_NULL is valid for the null task.
3647  *	Conditions:
3648  *		Nothing locked.
3649  */
3650 task_name_t
port_name_to_task_name(mach_port_name_t name)3651 port_name_to_task_name(
3652 	mach_port_name_t name)
3653 {
3654 	port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3655 	    PORT_INTRANS_ALLOW_CORPSE_TASK;
3656 	return port_name_to_task_grp(name, TASK_FLAVOR_NAME, options, TASK_GRP_KERNEL, NULL);
3657 }
3658 
3659 /*
3660  *	Routine:	port_name_to_task_id_token
3661  *	Purpose:
3662  *		Convert from a port name to a task identity token reference
3663  *	Conditions:
3664  *		Nothing locked.
3665  */
3666 task_id_token_t
port_name_to_task_id_token(mach_port_name_t name)3667 port_name_to_task_id_token(
3668 	mach_port_name_t name)
3669 {
3670 	ipc_port_t port;
3671 	kern_return_t kr;
3672 	task_id_token_t token = TASK_ID_TOKEN_NULL;
3673 
3674 	if (MACH_PORT_VALID(name)) {
3675 		kr = ipc_port_translate_send(current_space(), name, &port);
3676 		if (kr == KERN_SUCCESS) {
3677 			token = convert_port_to_task_id_token(port);
3678 			ip_mq_unlock(port);
3679 		}
3680 	}
3681 	return token;
3682 }
3683 
3684 /*
3685  *	Routine:	port_name_to_host
3686  *	Purpose:
3687  *		Convert from a port name to a host pointer.
3688  *		NOTE: This does _not_ return a +1 reference to the host_t
3689  *	Conditions:
3690  *		Nothing locked.
3691  */
3692 host_t
port_name_to_host(mach_port_name_t name)3693 port_name_to_host(
3694 	mach_port_name_t name)
3695 {
3696 	host_t host = HOST_NULL;
3697 	kern_return_t kr;
3698 	ipc_port_t port;
3699 
3700 	if (MACH_PORT_VALID(name)) {
3701 		kr = ipc_port_translate_send(current_space(), name, &port);
3702 		if (kr == KERN_SUCCESS) {
3703 			host = convert_port_to_host(port);
3704 			ip_mq_unlock(port);
3705 		}
3706 	}
3707 	return host;
3708 }
3709 
3710 static inline ipc_kobject_type_t
task_flavor_to_kotype(mach_task_flavor_t flavor)3711 task_flavor_to_kotype(mach_task_flavor_t flavor)
3712 {
3713 	switch (flavor) {
3714 	case TASK_FLAVOR_CONTROL:
3715 		return IKOT_TASK_CONTROL;
3716 	case TASK_FLAVOR_READ:
3717 		return IKOT_TASK_READ;
3718 	case TASK_FLAVOR_INSPECT:
3719 		return IKOT_TASK_INSPECT;
3720 	default:
3721 		return IKOT_TASK_NAME;
3722 	}
3723 }
3724 
3725 /*
3726  *	Routine:	convert_task_to_port_with_flavor
3727  *	Purpose:
3728  *		Convert from a task to a port of given flavor.
3729  *		Consumes a task ref; produces a naked send right
3730  *		which may be invalid.
3731  *	Conditions:
3732  *		Nothing locked.
3733  */
3734 ipc_port_t
convert_task_to_port_with_flavor(task_t task,mach_task_flavor_t flavor,task_grp_t grp)3735 convert_task_to_port_with_flavor(
3736 	task_t              task,
3737 	mach_task_flavor_t  flavor,
3738 	task_grp_t          grp)
3739 {
3740 	ipc_kobject_type_t kotype = task_flavor_to_kotype(flavor);
3741 	ipc_port_t port = IP_NULL;
3742 
3743 	itk_lock(task);
3744 
3745 	if (!task->ipc_active) {
3746 		goto exit;
3747 	}
3748 
3749 	/*
3750 	 * out-trans of weaker flavors are still permitted, but in-trans
3751 	 * is separately enforced.
3752 	 */
3753 	if (flavor == TASK_FLAVOR_CONTROL &&
3754 	    task_conversion_eval_out_trans(current_task(), task, flavor)) {
3755 		/* denied by security policy, make the port appear dead */
3756 		port = IP_DEAD;
3757 		goto exit;
3758 	}
3759 
3760 	switch (flavor) {
3761 	case TASK_FLAVOR_CONTROL:
3762 	case TASK_FLAVOR_NAME:
3763 		port = ipc_kobject_make_send(task->itk_task_ports[flavor],
3764 		    task, kotype);
3765 		break;
3766 	/*
3767 	 * Claim a send right on the task read/inspect port,
3768 	 * and request a no-senders notification on that port
3769 	 * (if none outstanding).
3770 	 *
3771 	 * The task's itk_lock is used to synchronize the handling
3772 	 * of the no-senders notification with the task termination.
3773 	 */
3774 	case TASK_FLAVOR_READ:
3775 	case TASK_FLAVOR_INSPECT:
3776 		port = task->itk_task_ports[flavor];
3777 		if (IP_VALID(port)) {
3778 			(void)ipc_kobject_make_send_nsrequest(port,
3779 			    task, kotype);
3780 		} else {
3781 			port = ipc_kobject_alloc_port(task, kotype,
3782 			    IPC_KOBJECT_ALLOC_MAKE_SEND |
3783 			    IPC_KOBJECT_ALLOC_NSREQUEST |
3784 			    IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3785 			/*
3786 			 * If Developer Mode is off, substitute read port for control port if
3787 			 * copying out to owning task's space, for the sake of in-process
3788 			 * exception handler.
3789 			 *
3790 			 * Also see: exception_deliver().
3791 			 */
3792 			if (!developer_mode_state() && flavor == TASK_FLAVOR_READ) {
3793 				ipc_port_set_label(port, IPC_LABEL_SUBST_TASK_READ);
3794 				port->ip_kolabel->ikol_alt_port = task->itk_self;
3795 			}
3796 
3797 			task->itk_task_ports[flavor] = port;
3798 		}
3799 		break;
3800 	}
3801 
3802 exit:
3803 	itk_unlock(task);
3804 	task_deallocate_grp(task, grp);
3805 	return port;
3806 }
3807 
3808 ipc_port_t
convert_corpse_to_port_and_nsrequest(task_t corpse)3809 convert_corpse_to_port_and_nsrequest(
3810 	task_t          corpse)
3811 {
3812 	ipc_port_t port = IP_NULL;
3813 	__assert_only kern_return_t kr;
3814 
3815 	assert(task_is_a_corpse(corpse));
3816 	itk_lock(corpse);
3817 	port = corpse->itk_task_ports[TASK_FLAVOR_CONTROL];
3818 	kr = ipc_kobject_make_send_nsrequest(port, corpse, IKOT_TASK_CONTROL);
3819 	assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
3820 	itk_unlock(corpse);
3821 
3822 	task_deallocate(corpse);
3823 	return port;
3824 }
3825 
3826 ipc_port_t
convert_task_to_port(task_t task)3827 convert_task_to_port(
3828 	task_t          task)
3829 {
3830 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_KERNEL);
3831 }
3832 
3833 ipc_port_t
convert_task_read_to_port(task_read_t task)3834 convert_task_read_to_port(
3835 	task_read_t          task)
3836 {
3837 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_KERNEL);
3838 }
3839 
3840 ipc_port_t
convert_task_inspect_to_port(task_inspect_t task)3841 convert_task_inspect_to_port(
3842 	task_inspect_t          task)
3843 {
3844 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT, TASK_GRP_KERNEL);
3845 }
3846 
3847 ipc_port_t
convert_task_name_to_port(task_name_t task)3848 convert_task_name_to_port(
3849 	task_name_t             task)
3850 {
3851 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME, TASK_GRP_KERNEL);
3852 }
3853 
3854 ipc_port_t
convert_task_to_port_external(task_t task)3855 convert_task_to_port_external(task_t task)
3856 {
3857 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_EXTERNAL);
3858 }
3859 
3860 ipc_port_t
convert_task_read_to_port_external(task_t task)3861 convert_task_read_to_port_external(task_t task)
3862 {
3863 	return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_EXTERNAL);
3864 }
3865 
3866 ipc_port_t
convert_task_to_port_pinned(task_t task)3867 convert_task_to_port_pinned(
3868 	task_t          task)
3869 {
3870 	ipc_port_t port = IP_NULL;
3871 
3872 	assert(task == current_task());
3873 
3874 	itk_lock(task);
3875 
3876 	if (task->ipc_active) {
3877 		port = ipc_kobject_make_send(task->itk_self, task,
3878 		    IKOT_TASK_CONTROL);
3879 	}
3880 
3881 	if (port && task_is_immovable(task)) {
3882 		assert(ip_is_pinned(port));
3883 		assert(ip_is_immovable_send(port));
3884 	}
3885 
3886 	itk_unlock(task);
3887 	task_deallocate(task);
3888 	return port;
3889 }
3890 
3891 void
convert_task_array_to_ports(task_array_t array,size_t count,mach_task_flavor_t flavor)3892 convert_task_array_to_ports(
3893 	task_array_t            array,
3894 	size_t                  count,
3895 	mach_task_flavor_t      flavor)
3896 {
3897 	task_t *task_list = (task_t *)array;
3898 	task_t task_self = current_task();
3899 
3900 	for (size_t i = 0; i < count; i++) {
3901 		task_t     task = task_list[i];
3902 		ipc_port_t port;
3903 
3904 		switch (flavor) {
3905 		case TASK_FLAVOR_CONTROL:
3906 			if (task == task_self) {
3907 				/* if current_task(), return pinned port */
3908 				port = convert_task_to_port_pinned(task);
3909 			} else {
3910 				port = convert_task_to_port(task);
3911 			}
3912 			break;
3913 		case TASK_FLAVOR_READ:
3914 			port = convert_task_read_to_port(task);
3915 			break;
3916 		case TASK_FLAVOR_INSPECT:
3917 			port = convert_task_inspect_to_port(task);
3918 			break;
3919 		case TASK_FLAVOR_NAME:
3920 			port = convert_task_name_to_port(task);
3921 			break;
3922 		}
3923 
3924 		array[i].port = port;
3925 	}
3926 }
3927 
3928 /*
3929  *	Routine:	convert_task_suspend_token_to_port
3930  *	Purpose:
3931  *		Convert from a task suspension token to a port.
3932  *		Consumes a task suspension token ref; produces a naked send-once right
3933  *		which may be invalid.
3934  *	Conditions:
3935  *		Nothing locked.
3936  */
3937 static ipc_port_t
convert_task_suspension_token_to_port_grp(task_suspension_token_t task,task_grp_t grp)3938 convert_task_suspension_token_to_port_grp(
3939 	task_suspension_token_t         task,
3940 	task_grp_t                      grp)
3941 {
3942 	ipc_port_t port;
3943 
3944 	task_lock(task);
3945 	if (task->active) {
3946 		itk_lock(task);
3947 		if (task->itk_resume == IP_NULL) {
3948 			task->itk_resume = ipc_kobject_alloc_port((ipc_kobject_t) task,
3949 			    IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE);
3950 		}
3951 
3952 		/*
3953 		 * Create a send-once right for each instance of a direct user-called
3954 		 * task_suspend2 call. Each time one of these send-once rights is abandoned,
3955 		 * the notification handler will resume the target task.
3956 		 */
3957 		port = task->itk_resume;
3958 		ipc_kobject_require(port, task, IKOT_TASK_RESUME);
3959 		port = ipc_port_make_sonce(port);
3960 		itk_unlock(task);
3961 		assert(IP_VALID(port));
3962 	} else {
3963 		port = IP_NULL;
3964 	}
3965 
3966 	task_unlock(task);
3967 	task_suspension_token_deallocate_grp(task, grp);
3968 
3969 	return port;
3970 }
3971 
3972 ipc_port_t
convert_task_suspension_token_to_port_external(task_suspension_token_t task)3973 convert_task_suspension_token_to_port_external(
3974 	task_suspension_token_t         task)
3975 {
3976 	return convert_task_suspension_token_to_port_grp(task, TASK_GRP_EXTERNAL);
3977 }
3978 
3979 ipc_port_t
convert_task_suspension_token_to_port_mig(task_suspension_token_t task)3980 convert_task_suspension_token_to_port_mig(
3981 	task_suspension_token_t         task)
3982 {
3983 	return convert_task_suspension_token_to_port_grp(task, TASK_GRP_MIG);
3984 }
3985 
3986 ipc_port_t
convert_thread_to_port_pinned(thread_t thread)3987 convert_thread_to_port_pinned(
3988 	thread_t                thread)
3989 {
3990 	thread_ro_t tro = get_thread_ro(thread);
3991 	ipc_port_t  port = IP_NULL;
3992 
3993 	thread_mtx_lock(thread);
3994 
3995 	if (thread->ipc_active) {
3996 		port = ipc_kobject_make_send(tro->tro_self_port,
3997 		    thread, IKOT_THREAD_CONTROL);
3998 	}
3999 
4000 	if (port && task_is_immovable(tro->tro_task)) {
4001 		assert(ip_is_immovable_send(port));
4002 	}
4003 
4004 	thread_mtx_unlock(thread);
4005 	thread_deallocate(thread);
4006 	return port;
4007 }
4008 /*
4009  *	Routine:	space_deallocate
4010  *	Purpose:
4011  *		Deallocate a space ref produced by convert_port_to_space.
4012  *	Conditions:
4013  *		Nothing locked.
4014  */
4015 
4016 void
space_deallocate(ipc_space_t space)4017 space_deallocate(
4018 	ipc_space_t     space)
4019 {
4020 	if (space != IS_NULL) {
4021 		is_release(space);
4022 	}
4023 }
4024 
4025 /*
4026  *	Routine:	space_read_deallocate
4027  *	Purpose:
4028  *		Deallocate a space read ref produced by convert_port_to_space_read.
4029  *	Conditions:
4030  *		Nothing locked.
4031  */
4032 
4033 void
space_read_deallocate(ipc_space_read_t space)4034 space_read_deallocate(
4035 	ipc_space_read_t     space)
4036 {
4037 	if (space != IS_INSPECT_NULL) {
4038 		is_release((ipc_space_t)space);
4039 	}
4040 }
4041 
4042 /*
4043  *	Routine:	space_inspect_deallocate
4044  *	Purpose:
4045  *		Deallocate a space inspect ref produced by convert_port_to_space_inspect.
4046  *	Conditions:
4047  *		Nothing locked.
4048  */
4049 
4050 void
space_inspect_deallocate(ipc_space_inspect_t space)4051 space_inspect_deallocate(
4052 	ipc_space_inspect_t     space)
4053 {
4054 	if (space != IS_INSPECT_NULL) {
4055 		is_release((ipc_space_t)space);
4056 	}
4057 }
4058 
4059 
4060 static boolean_t
behavior_is_identity_protected(int new_behavior)4061 behavior_is_identity_protected(int new_behavior)
4062 {
4063 	return ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED) ||
4064 	       ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_STATE) ||
4065 	       ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_STATE_IDENTITY_PROTECTED);
4066 }
4067 
4068 static void
send_set_exception_telemetry(const task_t excepting_task,const exception_mask_t mask)4069 send_set_exception_telemetry(const task_t excepting_task, const exception_mask_t mask)
4070 {
4071 	ca_event_t ca_event = CA_EVENT_ALLOCATE(set_exception);
4072 	CA_EVENT_TYPE(set_exception) * event = ca_event->data;
4073 
4074 	task_procname(current_task(), (char *) &event->current_proc, sizeof(event->current_proc));
4075 	task_procname(excepting_task, (char *) &event->thread_proc, sizeof(event->thread_proc));
4076 	event->mask = mask;
4077 
4078 	CA_EVENT_SEND(ca_event);
4079 }
4080 
4081 /* Returns whether the violation should be ignored */
4082 static boolean_t
set_exception_behavior_violation(const task_t excepting_task,const exception_mask_t mask)4083 set_exception_behavior_violation(const task_t excepting_task,
4084     const exception_mask_t mask)
4085 {
4086 	if (thid_should_crash) {
4087 		/* create lightweight corpse */
4088 		mach_port_guard_exception(0, 0, 0, kGUARD_EXC_EXCEPTION_BEHAVIOR_ENFORCE);
4089 	}
4090 
4091 	/* always report the proc name to CA */
4092 	send_set_exception_telemetry(excepting_task, mask);
4093 
4094 	/* if the bootarg has been manually set to false, ignore the violation */
4095 	return !thid_should_crash;
4096 }
4097 
4098 /*
4099  * Protect platform binary task/thread ports.
4100  * excepting_task is NULL if we are setting a host exception port.
4101  */
4102 static boolean_t
exception_exposes_protected_ports(const ipc_port_t new_port,const task_t excepting_task)4103 exception_exposes_protected_ports(const ipc_port_t new_port, const task_t excepting_task)
4104 {
4105 	if (!IP_VALID(new_port) || is_ux_handler_port(new_port)) {
4106 		/*
4107 		 * sending exceptions to invalid port does not pose risk
4108 		 * ux_handler port is an immovable, read-only kobject port; doesn't need protection.
4109 		 */
4110 		return FALSE;
4111 	} else if (excepting_task) {
4112 		/*  setting task/thread exception port - protect hardened binaries */
4113 		return task_is_hardened_binary(excepting_task);
4114 	}
4115 
4116 	/* setting host port exposes all processes - always protect. */
4117 	return TRUE;
4118 }
4119 
4120 static boolean_t
exception_ports_frozen(task_t excepting_task)4121 exception_ports_frozen(task_t excepting_task)
4122 {
4123 	return excepting_task &&
4124 	       (task_ro_flags_get(excepting_task) & TFRO_FREEZE_EXCEPTION_PORTS);
4125 }
4126 
4127 #if XNU_TARGET_OS_OSX && CONFIG_CSR
4128 static bool
SIP_is_enabled()4129 SIP_is_enabled()
4130 {
4131 	return csr_check(CSR_ALLOW_UNRESTRICTED_FS) != 0;
4132 }
4133 #endif /* XNU_TARGET_OS_OSX && CONFIG_CSR*/
4134 
4135 static boolean_t
exception_is_identity_protected(const ipc_port_t new_port,int new_behavior,const task_t excepting_task,const exception_mask_t mask)4136 exception_is_identity_protected(const ipc_port_t new_port, int new_behavior,
4137     const task_t excepting_task, const exception_mask_t mask)
4138 {
4139 	if (exception_exposes_protected_ports(new_port, excepting_task)
4140 	    && !behavior_is_identity_protected(new_behavior)
4141 #if XNU_TARGET_OS_OSX
4142 	    && !task_opted_out_mach_hardening(excepting_task)     /* Some tasks are opted out more generally */
4143 #if CONFIG_CSR
4144 	    && SIP_is_enabled()     /* cannot enforce if SIP is disabled */
4145 #endif /* CONFIG_CSR */
4146 #endif /* XNU_TARGET_OS_OSX */
4147 #if CONFIG_ROSETTA
4148 	    && !task_is_translated(current_task())
4149 #endif /* CONFIG_ROSETTA */
4150 	    && !proc_is_simulated(current_proc())
4151 	    ) {
4152 		return set_exception_behavior_violation(excepting_task, mask);
4153 	}
4154 
4155 	return true;
4156 }
4157 
4158 static boolean_t
set_exception_behavior_allowed(const ipc_port_t new_port,int new_behavior,const task_t excepting_task,const exception_mask_t mask,const bool hardened_exception)4159 set_exception_behavior_allowed(const ipc_port_t new_port, int new_behavior,
4160     const task_t excepting_task, const exception_mask_t mask, const bool hardened_exception)
4161 {
4162 	const char *excepting_task_name = "";
4163 	const char *cur_task_name = "";
4164 
4165 	if (excepting_task) {
4166 		excepting_task_name = task_best_name(excepting_task);
4167 	}
4168 	if (current_task()) {
4169 		cur_task_name = task_best_name(current_task());
4170 	}
4171 
4172 	/* Allow debuggers, tests, and tooling to set exception ports however they wish */
4173 	if (IOCurrentTaskHasEntitlement(SET_EXCEPTION_ENTITLEMENT)) {
4174 		kprintf("Allowing set_exception_ports from [%s] on [%s] for "
4175 		    "entitled process/debugger\n", cur_task_name, excepting_task_name);
4176 		return true;
4177 	}
4178 
4179 	/* excepting_task can be NULL if setting the host port */
4180 	if (excepting_task) {
4181 		/*
4182 		 * Only allow hardened set_exception_port calls on hardened tasks
4183 		 * that opt in via entitlement
4184 		 */
4185 		bool only_one_exception_port =
4186 		    IOTaskHasEntitlement(excepting_task, IPC_ONLY_ONE_EXCEPTION_PORT)
4187 		    && task_is_hardened_binary(excepting_task);
4188 
4189 		if (!hardened_exception && only_one_exception_port) {
4190 			kprintf("Disallowing set_exception_ports from [%s] on [%s] due "
4191 			    "to only_one_exception_port policy\n", cur_task_name, excepting_task_name);
4192 			return set_exception_behavior_violation(excepting_task, mask);
4193 		}
4194 	}
4195 
4196 	/* Everyone else follows the standard policy and must use identity protected exceptions */
4197 	return exception_is_identity_protected(new_port, new_behavior, excepting_task, mask);
4198 }
4199 
4200 /*
4201  *	Routine:	set_exception_ports_validation
4202  *	Purpose:
4203  *		Common argument validation shared between all exception port setting/swapping routines
4204  *	Conditions:
4205  *		Nothing locked.
4206  *  Returns:
4207  *      KERN_SUCCESS			Setting the exception port is allowed with these arguments
4208  *		KERN_INVALID_ARGUMENT	Invalid arguments
4209  *		KERN_INVALID_RIGHT		Incorrect port configuration
4210  *		KERN_DENIED				Denied by security policy
4211  */
4212 kern_return_t
set_exception_ports_validation(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,bool hardened_exception)4213 set_exception_ports_validation(
4214 	task_t                  task,
4215 	exception_mask_t        exception_mask,
4216 	ipc_port_t              new_port,
4217 	exception_behavior_t    new_behavior,
4218 	thread_state_flavor_t   new_flavor,
4219 	bool hardened_exception
4220 	)
4221 {
4222 	if (exception_mask & ~EXC_MASK_VALID) {
4223 		return KERN_INVALID_ARGUMENT;
4224 	}
4225 
4226 	if (IP_VALID(new_port)) {
4227 		switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4228 		case EXCEPTION_DEFAULT:
4229 		case EXCEPTION_STATE:
4230 		case EXCEPTION_STATE_IDENTITY:
4231 		case EXCEPTION_IDENTITY_PROTECTED:
4232 		case EXCEPTION_STATE_IDENTITY_PROTECTED:
4233 			break;
4234 
4235 		default:
4236 			return KERN_INVALID_ARGUMENT;
4237 		}
4238 	}
4239 
4240 	/*
4241 	 * rdar://77996387
4242 	 * Avoid exposing immovable ports send rights (kobjects) to `get_exception_ports`,
4243 	 * but exception ports to still be set.
4244 	 */
4245 	if (IP_VALID(new_port) &&
4246 	    ((!ip_is_exception_port(new_port) && new_port->ip_immovable_receive) ||
4247 	    new_port->ip_immovable_send)) {
4248 		return KERN_INVALID_RIGHT;
4249 	}
4250 
4251 
4252 	/*
4253 	 * Check the validity of the thread_state_flavor by calling the
4254 	 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4255 	 * osfmk/mach/ARCHITECTURE/thread_status.h
4256 	 */
4257 	if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4258 		return KERN_INVALID_ARGUMENT;
4259 	}
4260 
4261 	if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4262 	    (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4263 	    && !(new_behavior & MACH_EXCEPTION_CODES)) {
4264 		return KERN_INVALID_ARGUMENT;
4265 	}
4266 
4267 	if (!set_exception_behavior_allowed(new_port, new_behavior, task, exception_mask, hardened_exception)) {
4268 		return KERN_DENIED;
4269 	}
4270 
4271 	return KERN_SUCCESS;
4272 }
4273 
4274 /*
4275  *	Routine:	thread_set_exception_ports_internal
4276  *	Purpose:
4277  *		Set a new exception action on the thread
4278  *	Conditions:
4279  *		Arguments have been validated via `set_exception_ports_validation`
4280  *		Nothing locked.
4281  *  Returns:
4282  *      KERN_SUCCESS	Setting the exception port is allowed with these arguments
4283  *		KERN_FAILURE	Thread is inactive
4284  */
4285 kern_return_t
thread_set_exception_ports_internal(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,boolean_t hardened)4286 thread_set_exception_ports_internal(
4287 	thread_t                thread,
4288 	exception_mask_t        exception_mask,
4289 	ipc_port_t              new_port,
4290 	exception_behavior_t    new_behavior,
4291 	thread_state_flavor_t   new_flavor,
4292 	boolean_t               hardened)
4293 {
4294 	ipc_port_t  old_port[EXC_TYPES_COUNT];
4295 	thread_ro_t tro;
4296 	boolean_t   privileged = task_is_privileged(current_task());
4297 
4298 #if CONFIG_MACF
4299 	if (mac_task_check_set_thread_exception_ports(current_task(), get_threadtask(thread), exception_mask, new_behavior) != 0) {
4300 		return KERN_NO_ACCESS;
4301 	}
4302 
4303 	struct label *new_label = mac_exc_create_label_for_current_proc();
4304 #endif
4305 
4306 	tro = get_thread_ro(thread);
4307 	thread_mtx_lock(thread);
4308 
4309 	if (!thread->active) {
4310 		thread_mtx_unlock(thread);
4311 #if CONFIG_MACF
4312 		mac_exc_free_label(new_label);
4313 #endif
4314 		return KERN_FAILURE;
4315 	}
4316 
4317 	if (tro->tro_exc_actions == NULL) {
4318 		ipc_thread_init_exc_actions(tro);
4319 	}
4320 	for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4321 		struct exception_action *action = &tro->tro_exc_actions[i];
4322 
4323 		if ((exception_mask & (1 << i))
4324 #if CONFIG_MACF
4325 		    && mac_exc_update_action_label(action, new_label) == 0
4326 #endif
4327 		    ) {
4328 			old_port[i] = action->port;
4329 			action->port = exception_port_copy_send(new_port);
4330 			action->behavior = new_behavior;
4331 			action->flavor = new_flavor;
4332 			action->privileged = privileged;
4333 			action->hardened = hardened;
4334 		} else {
4335 			old_port[i] = IP_NULL;
4336 		}
4337 	}
4338 
4339 	thread_mtx_unlock(thread);
4340 
4341 #if CONFIG_MACF
4342 	mac_exc_free_label(new_label);
4343 #endif
4344 
4345 	for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4346 		if (IP_VALID(old_port[i])) {
4347 			ipc_port_release_send(old_port[i]);
4348 		}
4349 	}
4350 
4351 	if (IP_VALID(new_port)) {         /* consume send right */
4352 		ipc_port_release_send(new_port);
4353 	}
4354 
4355 	return KERN_SUCCESS;
4356 }
4357 
4358 /*
4359  *	Routine:	thread/task_set_exception_ports [kernel call]
4360  *	Purpose:
4361  *			Sets the thread/task exception port, flavor and
4362  *			behavior for the exception types specified by the mask.
4363  *			There will be one send right per exception per valid
4364  *			port.
4365  *	Conditions:
4366  *		Nothing locked.  If successful, consumes
4367  *		the supplied send right.
4368  *	Returns:
4369  *		KERN_SUCCESS		Changed the special port.
4370  *		KERN_INVALID_ARGUMENT	The thread is null,
4371  *					Illegal mask bit set.
4372  *					Illegal exception behavior
4373  *		KERN_FAILURE		The thread is dead.
4374  *		KERN_NO_ACCESS		Restricted access to set port
4375  */
4376 
4377 kern_return_t
thread_set_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4378 thread_set_exception_ports(
4379 	thread_t                thread,
4380 	exception_mask_t        exception_mask,
4381 	ipc_port_t              new_port,
4382 	exception_behavior_t    new_behavior,
4383 	thread_state_flavor_t   new_flavor)
4384 {
4385 	if (thread == THREAD_NULL) {
4386 		return KERN_INVALID_ARGUMENT;
4387 	}
4388 	bool hardened_exception_flow = false;
4389 	kern_return_t kr = set_exception_ports_validation(get_threadtask(thread),
4390 	    exception_mask, new_port, new_behavior, new_flavor, hardened_exception_flow);
4391 	if (kr != KERN_SUCCESS) {
4392 		return kr;
4393 	}
4394 
4395 	return thread_set_exception_ports_internal(thread, exception_mask, new_port, new_behavior, new_flavor, false);
4396 }
4397 
4398 kern_return_t
task_set_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4399 task_set_exception_ports(
4400 	task_t                                  task,
4401 	exception_mask_t                exception_mask,
4402 	ipc_port_t                              new_port,
4403 	exception_behavior_t    new_behavior,
4404 	thread_state_flavor_t   new_flavor)
4405 {
4406 	ipc_port_t              old_port[EXC_TYPES_COUNT];
4407 	boolean_t privileged = task_is_privileged(current_task());
4408 	register int    i;
4409 
4410 	if (task == TASK_NULL) {
4411 		return KERN_INVALID_ARGUMENT;
4412 	}
4413 	bool hardened_exception_flow = false;
4414 	kern_return_t kr = set_exception_ports_validation(task, exception_mask,
4415 	    new_port, new_behavior, new_flavor, hardened_exception_flow);
4416 	if (kr != KERN_SUCCESS) {
4417 		return kr;
4418 	}
4419 
4420 
4421 #if CONFIG_MACF
4422 	if (mac_task_check_set_task_exception_ports(current_task(), task, exception_mask, new_behavior) != 0) {
4423 		return KERN_NO_ACCESS;
4424 	}
4425 
4426 	struct label *new_label = mac_exc_create_label_for_current_proc();
4427 #endif
4428 
4429 	itk_lock(task);
4430 
4431 	/*
4432 	 * Allow setting exception port during the span of ipc_task_init() to
4433 	 * ipc_task_terminate(). posix_spawn() port actions can set exception
4434 	 * ports on target task _before_ task IPC access is enabled.
4435 	 */
4436 	if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
4437 		itk_unlock(task);
4438 #if CONFIG_MACF
4439 		mac_exc_free_label(new_label);
4440 #endif
4441 		return KERN_FAILURE;
4442 	}
4443 
4444 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4445 		if ((exception_mask & (1 << i))
4446 #if CONFIG_MACF
4447 		    && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4448 #endif
4449 		    ) {
4450 			old_port[i] = task->exc_actions[i].port;
4451 			task->exc_actions[i].port =
4452 			    exception_port_copy_send(new_port);
4453 			task->exc_actions[i].behavior = new_behavior;
4454 			task->exc_actions[i].flavor = new_flavor;
4455 			task->exc_actions[i].privileged = privileged;
4456 		} else {
4457 			old_port[i] = IP_NULL;
4458 		}
4459 	}
4460 
4461 	itk_unlock(task);
4462 
4463 #if CONFIG_MACF
4464 	mac_exc_free_label(new_label);
4465 #endif
4466 
4467 	for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4468 		if (IP_VALID(old_port[i])) {
4469 			ipc_port_release_send(old_port[i]);
4470 		}
4471 	}
4472 
4473 	if (IP_VALID(new_port)) {         /* consume send right */
4474 		ipc_port_release_send(new_port);
4475 	}
4476 
4477 	return KERN_SUCCESS;
4478 }
4479 
4480 /*
4481  *	Routine:	thread/task_swap_exception_ports [kernel call]
4482  *	Purpose:
4483  *			Sets the thread/task exception port, flavor and
4484  *			behavior for the exception types specified by the
4485  *			mask.
4486  *
4487  *			The old ports, behavior and flavors are returned
4488  *			Count specifies the array sizes on input and
4489  *			the number of returned ports etc. on output.  The
4490  *			arrays must be large enough to hold all the returned
4491  *			data, MIG returnes an error otherwise.  The masks
4492  *			array specifies the corresponding exception type(s).
4493  *
4494  *	Conditions:
4495  *		Nothing locked.  If successful, consumes
4496  *		the supplied send right.
4497  *
4498  *		Returns upto [in} CountCnt elements.
4499  *	Returns:
4500  *		KERN_SUCCESS		Changed the special port.
4501  *		KERN_INVALID_ARGUMENT	The thread is null,
4502  *					Illegal mask bit set.
4503  *					Illegal exception behavior
4504  *		KERN_FAILURE		The thread is dead.
4505  *		KERN_NO_ACCESS		Restricted access to set port
4506  */
4507 
4508 kern_return_t
thread_swap_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4509 thread_swap_exception_ports(
4510 	thread_t                        thread,
4511 	exception_mask_t                exception_mask,
4512 	ipc_port_t                      new_port,
4513 	exception_behavior_t            new_behavior,
4514 	thread_state_flavor_t           new_flavor,
4515 	exception_mask_array_t          masks,
4516 	mach_msg_type_number_t          *CountCnt,
4517 	exception_port_array_t          ports,
4518 	exception_behavior_array_t      behaviors,
4519 	thread_state_flavor_array_t     flavors)
4520 {
4521 	ipc_port_t  old_port[EXC_TYPES_COUNT];
4522 	thread_ro_t tro;
4523 	boolean_t   privileged = task_is_privileged(current_task());
4524 	unsigned int    i, j, count;
4525 
4526 	if (thread == THREAD_NULL) {
4527 		return KERN_INVALID_ARGUMENT;
4528 	}
4529 	bool hardened_exception_flow = false;
4530 	kern_return_t kr = set_exception_ports_validation(get_threadtask(thread),
4531 	    exception_mask, new_port, new_behavior, new_flavor, hardened_exception_flow);
4532 	if (kr != KERN_SUCCESS) {
4533 		return kr;
4534 	}
4535 
4536 #if CONFIG_MACF
4537 	if (mac_task_check_set_thread_exception_ports(current_task(), get_threadtask(thread), exception_mask, new_behavior) != 0) {
4538 		return KERN_NO_ACCESS;
4539 	}
4540 
4541 	struct label *new_label = mac_exc_create_label_for_current_proc();
4542 #endif
4543 
4544 	thread_mtx_lock(thread);
4545 
4546 	if (!thread->active) {
4547 		thread_mtx_unlock(thread);
4548 #if CONFIG_MACF
4549 		mac_exc_free_label(new_label);
4550 #endif
4551 		return KERN_FAILURE;
4552 	}
4553 
4554 	tro = get_thread_ro(thread);
4555 	if (tro->tro_exc_actions == NULL) {
4556 		ipc_thread_init_exc_actions(tro);
4557 	}
4558 
4559 	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4560 	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4561 		struct exception_action *action = &tro->tro_exc_actions[i];
4562 
4563 		if ((exception_mask & (1 << i))
4564 #if CONFIG_MACF
4565 		    && mac_exc_update_action_label(action, new_label) == 0
4566 #endif
4567 		    ) {
4568 			for (j = 0; j < count; ++j) {
4569 				/*
4570 				 * search for an identical entry, if found
4571 				 * set corresponding mask for this exception.
4572 				 */
4573 				if (action->port == ports[j] &&
4574 				    action->behavior == behaviors[j] &&
4575 				    action->flavor == flavors[j]) {
4576 					masks[j] |= (1 << i);
4577 					break;
4578 				}
4579 			}
4580 
4581 			if (j == count) {
4582 				masks[j] = (1 << i);
4583 				ports[j] = exception_port_copy_send(action->port);
4584 
4585 				behaviors[j] = action->behavior;
4586 				flavors[j] = action->flavor;
4587 				++count;
4588 			}
4589 
4590 			old_port[i] = action->port;
4591 			action->port = exception_port_copy_send(new_port);
4592 			action->behavior = new_behavior;
4593 			action->flavor = new_flavor;
4594 			action->privileged = privileged;
4595 		} else {
4596 			old_port[i] = IP_NULL;
4597 		}
4598 	}
4599 
4600 	thread_mtx_unlock(thread);
4601 
4602 #if CONFIG_MACF
4603 	mac_exc_free_label(new_label);
4604 #endif
4605 
4606 	while (--i >= FIRST_EXCEPTION) {
4607 		if (IP_VALID(old_port[i])) {
4608 			ipc_port_release_send(old_port[i]);
4609 		}
4610 	}
4611 
4612 	if (IP_VALID(new_port)) {         /* consume send right */
4613 		ipc_port_release_send(new_port);
4614 	}
4615 
4616 	*CountCnt = count;
4617 
4618 	return KERN_SUCCESS;
4619 }
4620 
4621 kern_return_t
task_swap_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4622 task_swap_exception_ports(
4623 	task_t                                          task,
4624 	exception_mask_t                        exception_mask,
4625 	ipc_port_t                                      new_port,
4626 	exception_behavior_t            new_behavior,
4627 	thread_state_flavor_t           new_flavor,
4628 	exception_mask_array_t          masks,
4629 	mach_msg_type_number_t          *CountCnt,
4630 	exception_port_array_t          ports,
4631 	exception_behavior_array_t      behaviors,
4632 	thread_state_flavor_array_t     flavors)
4633 {
4634 	ipc_port_t              old_port[EXC_TYPES_COUNT];
4635 	boolean_t privileged = task_is_privileged(current_task());
4636 	unsigned int    i, j, count;
4637 
4638 #if CONFIG_MACF
4639 	struct label *new_label;
4640 #endif
4641 
4642 	if (task == TASK_NULL) {
4643 		return KERN_INVALID_ARGUMENT;
4644 	}
4645 	bool hardened_exception_flow = false;
4646 	kern_return_t kr = set_exception_ports_validation(task, exception_mask,
4647 	    new_port, new_behavior, new_flavor, hardened_exception_flow);
4648 	if (kr != KERN_SUCCESS) {
4649 		return kr;
4650 	}
4651 
4652 #if CONFIG_MACF
4653 	if (mac_task_check_set_task_exception_ports(current_task(), task, exception_mask, new_behavior) != 0) {
4654 		return KERN_NO_ACCESS;
4655 	}
4656 
4657 	new_label = mac_exc_create_label_for_current_proc();
4658 #endif
4659 
4660 	itk_lock(task);
4661 
4662 	if (!task->ipc_active) {
4663 		itk_unlock(task);
4664 #if CONFIG_MACF
4665 		mac_exc_free_label(new_label);
4666 #endif
4667 		return KERN_FAILURE;
4668 	}
4669 
4670 	assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4671 	for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4672 		if ((exception_mask & (1 << i))
4673 #if CONFIG_MACF
4674 		    && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4675 #endif
4676 		    ) {
4677 			for (j = 0; j < count; j++) {
4678 				/*
4679 				 * search for an identical entry, if found
4680 				 * set corresponding mask for this exception.
4681 				 */
4682 				if (task->exc_actions[i].port == ports[j] &&
4683 				    task->exc_actions[i].behavior == behaviors[j] &&
4684 				    task->exc_actions[i].flavor == flavors[j]) {
4685 					masks[j] |= (1 << i);
4686 					break;
4687 				}
4688 			}
4689 
4690 			if (j == count) {
4691 				masks[j] = (1 << i);
4692 				ports[j] = exception_port_copy_send(task->exc_actions[i].port);
4693 				behaviors[j] = task->exc_actions[i].behavior;
4694 				flavors[j] = task->exc_actions[i].flavor;
4695 				++count;
4696 			}
4697 
4698 			old_port[i] = task->exc_actions[i].port;
4699 
4700 			task->exc_actions[i].port = exception_port_copy_send(new_port);
4701 			task->exc_actions[i].behavior = new_behavior;
4702 			task->exc_actions[i].flavor = new_flavor;
4703 			task->exc_actions[i].privileged = privileged;
4704 		} else {
4705 			old_port[i] = IP_NULL;
4706 		}
4707 	}
4708 
4709 	itk_unlock(task);
4710 
4711 #if CONFIG_MACF
4712 	mac_exc_free_label(new_label);
4713 #endif
4714 
4715 	while (--i >= FIRST_EXCEPTION) {
4716 		if (IP_VALID(old_port[i])) {
4717 			ipc_port_release_send(old_port[i]);
4718 		}
4719 	}
4720 
4721 	if (IP_VALID(new_port)) {         /* consume send right */
4722 		ipc_port_release_send(new_port);
4723 	}
4724 
4725 	*CountCnt = count;
4726 
4727 	return KERN_SUCCESS;
4728 }
4729 
4730 /*
4731  *	Routine:	thread/task_get_exception_ports [kernel call]
4732  *	Purpose:
4733  *		Clones a send right for each of the thread/task's exception
4734  *		ports specified in the mask and returns the behaviour
4735  *		and flavor of said port.
4736  *
4737  *		Returns upto [in} CountCnt elements.
4738  *
4739  *	Conditions:
4740  *		Nothing locked.
4741  *	Returns:
4742  *		KERN_SUCCESS		Extracted a send right.
4743  *		KERN_INVALID_ARGUMENT	The thread is null,
4744  *					Invalid special port,
4745  *					Illegal mask bit set.
4746  *		KERN_FAILURE		The thread is dead.
4747  */
4748 static kern_return_t
thread_get_exception_ports_internal(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4749 thread_get_exception_ports_internal(
4750 	thread_t                        thread,
4751 	exception_mask_t                exception_mask,
4752 	exception_mask_array_t          masks,
4753 	mach_msg_type_number_t          *CountCnt,
4754 	exception_port_info_array_t     ports_info,
4755 	exception_port_array_t          ports,
4756 	exception_behavior_array_t      behaviors,
4757 	thread_state_flavor_array_t     flavors)
4758 {
4759 	unsigned int count;
4760 	boolean_t info_only = (ports_info != NULL);
4761 	thread_ro_t tro;
4762 	ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4763 
4764 	if (thread == THREAD_NULL) {
4765 		return KERN_INVALID_ARGUMENT;
4766 	}
4767 
4768 	if (exception_mask & ~EXC_MASK_VALID) {
4769 		return KERN_INVALID_ARGUMENT;
4770 	}
4771 
4772 	if (!info_only && !ports) {
4773 		return KERN_INVALID_ARGUMENT;
4774 	}
4775 
4776 	tro = get_thread_ro(thread);
4777 	thread_mtx_lock(thread);
4778 
4779 	if (!thread->active) {
4780 		thread_mtx_unlock(thread);
4781 
4782 		return KERN_FAILURE;
4783 	}
4784 
4785 	count = 0;
4786 
4787 	if (tro->tro_exc_actions == NULL) {
4788 		goto done;
4789 	}
4790 
4791 	for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4792 		if (exception_mask & (1 << i)) {
4793 			ipc_port_t exc_port = tro->tro_exc_actions[i].port;
4794 			exception_behavior_t exc_behavior = tro->tro_exc_actions[i].behavior;
4795 			thread_state_flavor_t exc_flavor = tro->tro_exc_actions[i].flavor;
4796 
4797 			for (j = 0; j < count; ++j) {
4798 				/*
4799 				 * search for an identical entry, if found
4800 				 * set corresponding mask for this exception.
4801 				 */
4802 				if (exc_port == port_ptrs[j] &&
4803 				    exc_behavior == behaviors[j] &&
4804 				    exc_flavor == flavors[j]) {
4805 					masks[j] |= (1 << i);
4806 					break;
4807 				}
4808 			}
4809 
4810 			if (j == count && count < *CountCnt) {
4811 				masks[j] = (1 << i);
4812 				port_ptrs[j] = exc_port;
4813 
4814 				if (info_only) {
4815 					if (!IP_VALID(exc_port)) {
4816 						ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4817 					} else {
4818 						uintptr_t receiver;
4819 						(void)ipc_port_get_receiver_task(exc_port, &receiver);
4820 						ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4821 						ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4822 					}
4823 				} else {
4824 					ports[j] = exception_port_copy_send(exc_port);
4825 				}
4826 				behaviors[j] = exc_behavior;
4827 				flavors[j] = exc_flavor;
4828 				++count;
4829 			}
4830 		}
4831 	}
4832 
4833 done:
4834 	thread_mtx_unlock(thread);
4835 
4836 	*CountCnt = count;
4837 
4838 	return KERN_SUCCESS;
4839 }
4840 
4841 kern_return_t
thread_get_exception_ports(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4842 thread_get_exception_ports(
4843 	thread_t                        thread,
4844 	exception_mask_t                exception_mask,
4845 	exception_mask_array_t          masks,
4846 	mach_msg_type_number_t          *CountCnt,
4847 	exception_port_array_t          ports,
4848 	exception_behavior_array_t      behaviors,
4849 	thread_state_flavor_array_t     flavors)
4850 {
4851 	return thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4852 	           NULL, ports, behaviors, flavors);
4853 }
4854 
4855 kern_return_t
thread_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4856 thread_get_exception_ports_info(
4857 	mach_port_t                     port,
4858 	exception_mask_t                exception_mask,
4859 	exception_mask_array_t          masks,
4860 	mach_msg_type_number_t          *CountCnt,
4861 	exception_port_info_array_t     ports_info,
4862 	exception_behavior_array_t      behaviors,
4863 	thread_state_flavor_array_t     flavors)
4864 {
4865 	kern_return_t kr;
4866 
4867 	thread_t thread = convert_port_to_thread_read_no_eval(port);
4868 
4869 	if (thread == THREAD_NULL) {
4870 		return KERN_INVALID_ARGUMENT;
4871 	}
4872 
4873 	kr = thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4874 	    ports_info, NULL, behaviors, flavors);
4875 
4876 	thread_deallocate(thread);
4877 	return kr;
4878 }
4879 
4880 kern_return_t
thread_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4881 thread_get_exception_ports_from_user(
4882 	mach_port_t                     port,
4883 	exception_mask_t                exception_mask,
4884 	exception_mask_array_t          masks,
4885 	mach_msg_type_number_t         *CountCnt,
4886 	exception_port_array_t          ports,
4887 	exception_behavior_array_t      behaviors,
4888 	thread_state_flavor_array_t     flavors)
4889 {
4890 	kern_return_t kr;
4891 
4892 	thread_t thread = convert_port_to_thread(port);
4893 
4894 	if (thread == THREAD_NULL) {
4895 		return KERN_INVALID_ARGUMENT;
4896 	}
4897 
4898 	kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4899 
4900 	thread_deallocate(thread);
4901 	return kr;
4902 }
4903 
4904 static kern_return_t
task_get_exception_ports_internal(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4905 task_get_exception_ports_internal(
4906 	task_t                          task,
4907 	exception_mask_t                exception_mask,
4908 	exception_mask_array_t          masks,
4909 	mach_msg_type_number_t          *CountCnt,
4910 	exception_port_info_array_t     ports_info,
4911 	exception_port_array_t          ports,
4912 	exception_behavior_array_t      behaviors,
4913 	thread_state_flavor_array_t     flavors)
4914 {
4915 	unsigned int count;
4916 	boolean_t info_only = (ports_info != NULL);
4917 	ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4918 
4919 	if (task == TASK_NULL) {
4920 		return KERN_INVALID_ARGUMENT;
4921 	}
4922 
4923 	if (exception_mask & ~EXC_MASK_VALID) {
4924 		return KERN_INVALID_ARGUMENT;
4925 	}
4926 
4927 	if (!info_only && !ports) {
4928 		return KERN_INVALID_ARGUMENT;
4929 	}
4930 
4931 	itk_lock(task);
4932 
4933 	if (!task->ipc_active) {
4934 		itk_unlock(task);
4935 		return KERN_FAILURE;
4936 	}
4937 
4938 	count = 0;
4939 
4940 	for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4941 		if (exception_mask & (1 << i)) {
4942 			ipc_port_t exc_port = task->exc_actions[i].port;
4943 			exception_behavior_t exc_behavior = task->exc_actions[i].behavior;
4944 			thread_state_flavor_t exc_flavor = task->exc_actions[i].flavor;
4945 
4946 			for (j = 0; j < count; ++j) {
4947 				/*
4948 				 * search for an identical entry, if found
4949 				 * set corresponding mask for this exception.
4950 				 */
4951 				if (exc_port == port_ptrs[j] &&
4952 				    exc_behavior == behaviors[j] &&
4953 				    exc_flavor == flavors[j]) {
4954 					masks[j] |= (1 << i);
4955 					break;
4956 				}
4957 			}
4958 
4959 			if (j == count && count < *CountCnt) {
4960 				masks[j] = (1 << i);
4961 				port_ptrs[j] = exc_port;
4962 
4963 				if (info_only) {
4964 					if (!IP_VALID(exc_port)) {
4965 						ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4966 					} else {
4967 						uintptr_t receiver;
4968 						(void)ipc_port_get_receiver_task(exc_port, &receiver);
4969 						ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRPERM(exc_port);
4970 						ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRPERM(receiver) : 0;
4971 					}
4972 				} else {
4973 					ports[j] = exception_port_copy_send(exc_port);
4974 				}
4975 				behaviors[j] = exc_behavior;
4976 				flavors[j] = exc_flavor;
4977 				++count;
4978 			}
4979 		}
4980 	}
4981 
4982 	itk_unlock(task);
4983 
4984 	*CountCnt = count;
4985 
4986 	return KERN_SUCCESS;
4987 }
4988 
4989 kern_return_t
task_get_exception_ports(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4990 task_get_exception_ports(
4991 	task_t                          task,
4992 	exception_mask_t                exception_mask,
4993 	exception_mask_array_t          masks,
4994 	mach_msg_type_number_t          *CountCnt,
4995 	exception_port_array_t          ports,
4996 	exception_behavior_array_t      behaviors,
4997 	thread_state_flavor_array_t     flavors)
4998 {
4999 	return task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
5000 	           NULL, ports, behaviors, flavors);
5001 }
5002 
5003 kern_return_t
task_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)5004 task_get_exception_ports_info(
5005 	mach_port_t                     port,
5006 	exception_mask_t                exception_mask,
5007 	exception_mask_array_t          masks,
5008 	mach_msg_type_number_t          *CountCnt,
5009 	exception_port_info_array_t     ports_info,
5010 	exception_behavior_array_t      behaviors,
5011 	thread_state_flavor_array_t     flavors)
5012 {
5013 	kern_return_t kr;
5014 
5015 	task_t task = convert_port_to_task_read_no_eval(port);
5016 
5017 	if (task == TASK_NULL) {
5018 		return KERN_INVALID_ARGUMENT;
5019 	}
5020 
5021 	kr = task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
5022 	    ports_info, NULL, behaviors, flavors);
5023 
5024 	task_deallocate(task);
5025 	return kr;
5026 }
5027 
5028 kern_return_t
task_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)5029 task_get_exception_ports_from_user(
5030 	mach_port_t                     port,
5031 	exception_mask_t                exception_mask,
5032 	exception_mask_array_t          masks,
5033 	mach_msg_type_number_t         *CountCnt,
5034 	exception_port_array_t          ports,
5035 	exception_behavior_array_t      behaviors,
5036 	thread_state_flavor_array_t     flavors)
5037 {
5038 	kern_return_t kr;
5039 
5040 	task_t task = convert_port_to_task(port);
5041 
5042 	if (task == TASK_NULL) {
5043 		return KERN_INVALID_ARGUMENT;
5044 	}
5045 
5046 	kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors);
5047 
5048 	task_deallocate(task);
5049 	return kr;
5050 }
5051 
5052 /*
5053  *	Routine:	ipc_thread_port_unpin
5054  *	Purpose:
5055  *
5056  *		Called on the thread when it's terminating so that the last ref
5057  *		can be deallocated without a guard exception.
5058  *	Conditions:
5059  *		Thread mutex lock is held.
5060  */
5061 void
ipc_thread_port_unpin(ipc_port_t port)5062 ipc_thread_port_unpin(
5063 	ipc_port_t port)
5064 {
5065 	if (port == IP_NULL) {
5066 		return;
5067 	}
5068 	ip_mq_lock(port);
5069 	port->ip_pinned = 0;
5070 	ip_mq_unlock(port);
5071 }
5072 
5073 /*
5074  *	Routine:	task_register_hardened_exception_handler
5075  *	Purpose:
5076  *      Register a port as a hardened exception handler.
5077  *		See task.defs for additional info
5078  *	Conditions:
5079  *		Nothing locked.
5080  *		Limit of one hardened exception handler per task
5081  *	Returns:
5082  *      KERN_INVALID_ARGUMENT	invalid thread
5083  *      KERN_DENIED             breaking the security policy
5084  *      KERN_NAME_EXISTS        Already set a hardened exception handler on this task
5085  *      KERN_SUCCESS
5086  */
5087 kern_return_t
task_register_hardened_exception_handler(task_t task,uint32_t signed_pc_key,exception_mask_t exceptions_allowed,exception_behavior_t behaviors_allowed,thread_state_flavor_t flavors_allowed,mach_port_t new_port)5088 task_register_hardened_exception_handler(
5089 	task_t task,
5090 	uint32_t signed_pc_key,
5091 	exception_mask_t exceptions_allowed,
5092 	exception_behavior_t behaviors_allowed,
5093 	thread_state_flavor_t flavors_allowed,
5094 	mach_port_t new_port)
5095 {
5096 	ipc_port_t old_port;
5097 
5098 	if (task == TASK_NULL) {
5099 		return KERN_INVALID_ARGUMENT;
5100 	}
5101 	if (IP_VALID(new_port) && !ip_is_exception_port(new_port)) {
5102 		return KERN_INVALID_ARGUMENT;
5103 	}
5104 
5105 
5106 	bool hardened_exception_flow = true;
5107 	kern_return_t kr = set_exception_ports_validation(task, exceptions_allowed,
5108 	    new_port, behaviors_allowed, flavors_allowed, hardened_exception_flow);
5109 	if (kr != KERN_SUCCESS) {
5110 		return kr;
5111 	}
5112 
5113 	/* You can only register one hardened exception handler */
5114 	if (exception_ports_frozen(task)) {
5115 		return KERN_INVALID_ARGUMENT;
5116 	}
5117 	task_ro_flags_set(task, TFRO_FREEZE_EXCEPTION_PORTS);
5118 	itk_lock(task);
5119 
5120 	/* No reason to allow setting this multiple times per task */
5121 	old_port = task->hardened_exception_action.ea.port;
5122 	if (IP_VALID(old_port)) {
5123 		itk_unlock(task);
5124 		return KERN_NAME_EXISTS;
5125 	}
5126 
5127 	/* Stash the semantics for this port on the task */
5128 	struct hardened_exception_action hea;
5129 	hea.ea.port = new_port; /* Donate our send right to the task */
5130 	hea.ea.flavor = flavors_allowed;
5131 	hea.ea.behavior = behaviors_allowed;
5132 	hea.ea.privileged = false;
5133 	hea.ea.label = NULL;
5134 	hea.signed_pc_key = signed_pc_key;
5135 	hea.exception = exceptions_allowed;
5136 
5137 	task->hardened_exception_action = hea;
5138 	itk_unlock(task);
5139 
5140 	return KERN_SUCCESS;
5141 }
5142 
5143 /*
5144  *	Routine:	thread_adopt_exception_handler
5145  *	Purpose:
5146  *      Adopt the hardened exception handler from the current task, for this thread.
5147  *		Allows you to set exception ports on a thread after exception ports
5148  *		have been frozen for the task.
5149  *	Conditions:
5150  *		Nothing locked
5151  *	Returns:
5152  *      KERN_INVALID_ARGUMENT	invalid thread
5153  *      KERN_DENIED             breaking the security policy
5154  *		KERN_SUCCESS
5155  */
5156 kern_return_t
thread_adopt_exception_handler(thread_t thread,mach_port_t exc_port,exception_mask_t exc_mask,exception_behavior_t behavior_mask,thread_state_flavor_t flavor_mask)5157 thread_adopt_exception_handler(
5158 	thread_t thread,
5159 	mach_port_t exc_port,
5160 	exception_mask_t exc_mask,
5161 	exception_behavior_t behavior_mask,
5162 	thread_state_flavor_t flavor_mask
5163 	)
5164 {
5165 	if (thread == THREAD_NULL) {
5166 		return KERN_INVALID_ARGUMENT;
5167 	}
5168 
5169 	task_t task = get_threadtask(thread);
5170 
5171 	if (task != current_task()) {
5172 		return KERN_DENIED;
5173 	}
5174 
5175 	/* We must have exactly one hardened exception port per task */
5176 	if (!exception_ports_frozen(task)) {
5177 		return KERN_DENIED;
5178 	}
5179 
5180 	/* Ensure we see a consistent state of the hardened exception action */
5181 	itk_lock(task);
5182 	struct hardened_exception_action hea = task->hardened_exception_action;
5183 	itk_unlock(task);
5184 
5185 	if (exc_port != IP_NULL && exc_port != hea.ea.port) {
5186 		return KERN_DENIED;
5187 	}
5188 	/* Ensure that the new masks for this thread are a subset of the
5189 	 * allowable masks for this exception handler
5190 	 */
5191 	if (exc_mask & ~hea.exception ||
5192 	    behavior_mask & ~hea.ea.behavior ||
5193 	    flavor_mask & ~hea.ea.flavor) {
5194 		return KERN_DENIED;
5195 	}
5196 
5197 	assert(!IP_VALID(exc_port) || exc_port->ip_immovable_receive);
5198 	assert(!IP_VALID(exc_port) || ip_is_exception_port(exc_port));
5199 
5200 	/* We can safely assume this will be valid because we called set_exception_ports_validation on it when it was originally set on the task */
5201 	return thread_set_exception_ports_internal(thread, exc_mask, exc_port, behavior_mask, flavor_mask, true);
5202 }
5203