xref: /xnu-11417.140.69/osfmk/ipc/ipc_right.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  * Copyright (c) 2005-2006 SPARTA, Inc.
62  */
63 /*
64  */
65 /*
66  *	File:	ipc/ipc_right.c
67  *	Author:	Rich Draves
68  *	Date:	1989
69  *
70  *	Functions to manipulate IPC capabilities.
71  */
72 
73 #include <mach/boolean.h>
74 #include <mach/kern_return.h>
75 #include <mach/port.h>
76 #include <mach/message.h>
77 #include <kern/assert.h>
78 #include <kern/ipc_kobject.h>
79 #include <kern/misc_protos.h>
80 #include <kern/policy_internal.h>
81 #include <libkern/coreanalytics/coreanalytics.h>
82 #include <ipc/port.h>
83 #include <ipc/ipc_entry.h>
84 #include <ipc/ipc_space.h>
85 #include <ipc/ipc_object.h>
86 #include <ipc/ipc_hash.h>
87 #include <ipc/ipc_policy.h>
88 #include <ipc/ipc_port.h>
89 #include <ipc/ipc_pset.h>
90 #include <ipc/ipc_right.h>
91 #include <ipc/ipc_notify.h>
92 #include <ipc/ipc_importance.h>
93 #include <ipc/ipc_service_port.h>
94 #include <security/mac_mach_internal.h>
95 
96 extern struct proc *current_proc(void);
97 extern int csproc_hardened_runtime(struct proc* p);
98 
99 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
100 
101 TUNABLE(bool, service_port_defense_enabled, "-service_port_defense_enabled", false);
102 static TUNABLE(bool, reply_port_semantics, "reply_port_semantics", true);
103 
104 /*
105  *	Routine:	ipc_right_lookup_read
106  *	Purpose:
107  *		Finds an entry in a space, given the name.
108  *	Conditions:
109  *		Nothing locked.
110  *		If an object is found, it is locked and active.
111  *	Returns:
112  *		KERN_SUCCESS		Found an entry.
113  *		KERN_INVALID_TASK	The space is dead.
114  *		KERN_INVALID_NAME	Name doesn't exist in space.
115  */
116 kern_return_t
ipc_right_lookup_read(ipc_space_t space,mach_port_name_t name,ipc_entry_bits_t * bitsp,ipc_object_t * objectp)117 ipc_right_lookup_read(
118 	ipc_space_t             space,
119 	mach_port_name_t        name,
120 	ipc_entry_bits_t       *bitsp,
121 	ipc_object_t           *objectp)
122 {
123 	mach_port_index_t index;
124 	ipc_entry_table_t table;
125 	ipc_entry_t entry;
126 	ipc_object_t object;
127 	kern_return_t kr;
128 
129 	index = MACH_PORT_INDEX(name);
130 	if (__improbable(index == 0)) {
131 		*bitsp = 0;
132 		*objectp = IPC_OBJECT_NULL;
133 		return KERN_INVALID_NAME;
134 	}
135 
136 	smr_ipc_enter();
137 
138 	/*
139 	 * Acquire a (possibly stale) pointer to the table,
140 	 * and guard it so that it can't be deallocated while we use it.
141 	 *
142 	 * smr_ipc_enter() has the property that it strongly serializes
143 	 * after any store-release. This is important because it means that if
144 	 * one considers this (broken) userspace usage:
145 	 *
146 	 * Thread 1:
147 	 *   - makes a semaphore, gets name 0x1003
148 	 *   - stores that name to a global `sema` in userspace
149 	 *
150 	 * Thread 2:
151 	 *   - spins to observe `sema` becoming non 0
152 	 *   - calls semaphore_wait() on 0x1003
153 	 *
154 	 * Then, because in order to return 0x1003 this thread issued
155 	 * a store-release (when calling is_write_unlock()),
156 	 * then this smr_entered_load() can't possibly observe a table
157 	 * pointer that is older than the one that was current when the
158 	 * semaphore was made.
159 	 *
160 	 * This fundamental property allows us to never loop.
161 	 */
162 	table = smr_entered_load(&space->is_table);
163 	if (__improbable(table == NULL)) {
164 		kr = KERN_INVALID_TASK;
165 		goto out_put;
166 	}
167 	entry = ipc_entry_table_get(table, index);
168 	if (__improbable(entry == NULL)) {
169 		kr = KERN_INVALID_NAME;
170 		goto out_put;
171 	}
172 
173 	/*
174 	 * Note: this should be an atomic load, but PAC and atomics
175 	 *       don't work interact well together.
176 	 */
177 	object = entry->ie_volatile_object;
178 
179 	/*
180 	 * Attempt to lock an object that lives in this entry.
181 	 * It might fail or be a completely different object by now.
182 	 *
183 	 * Make sure that acquiring the lock is fully ordered after any
184 	 * lock-release (using os_atomic_barrier_before_lock_acquire()).
185 	 * This allows us to always reliably observe space termination below.
186 	 */
187 	os_atomic_barrier_before_lock_acquire();
188 	if (__improbable(object == IPC_OBJECT_NULL ||
189 	    !io_lock_allow_invalid(object))) {
190 		kr = KERN_INVALID_NAME;
191 		goto out_put;
192 	}
193 
194 	/*
195 	 * Now that we hold the object lock, we are preventing any entry
196 	 * in this space for this object to be mutated.
197 	 *
198 	 * If the space didn't grow after we acquired our hazardous reference,
199 	 * and before a mutation of the entry, then holding the object lock
200 	 * guarantees we will observe the truth of ie_bits, ie_object and
201 	 * ie_request (those are always mutated with the object lock held).
202 	 *
203 	 * However this ordering is problematic:
204 	 * - [A]cquisition of the table pointer
205 	 * - [G]rowth of the space (making the table pointer in [A] stale)
206 	 * - [M]utation of the entry
207 	 * - [L]ocking of the object read through [A].
208 	 *
209 	 * The space lock is held for both [G] and [M], and the object lock
210 	 * is held for [M], which means that once we lock the object we can
211 	 * observe if [G] happenend by reloading the table pointer.
212 	 *
213 	 * We might still fail to observe any growth operation that happened
214 	 * after the last mutation of this object's entry, because holding
215 	 * an object lock doesn't guarantee anything about the liveness
216 	 * of the space table pointer. This is not a problem at all: by
217 	 * definition, those didn't affect the state of the entry.
218 	 *
219 	 * TODO: a data-structure where the entries are grown by "slabs",
220 	 *       would allow for the address of an ipc_entry_t to never
221 	 *       change once it exists in a space and would avoid a reload
222 	 *       (as well as making space growth faster).
223 	 *       We however still need to check for termination.
224 	 */
225 	table = smr_entered_load(&space->is_table);
226 	if (__improbable(table == NULL)) {
227 		kr = KERN_INVALID_TASK;
228 		goto out_put_unlock;
229 	}
230 
231 	/*
232 	 * Tables never shrink so we don't need to validate the length twice.
233 	 */
234 	entry = ipc_entry_table_get_nocheck(table, index);
235 
236 	/*
237 	 * Now that we hold the lock and have a "fresh enough" table pointer,
238 	 * validate if this entry is what we think it is.
239 	 *
240 	 * To the risk of being repetitive, we still need to protect
241 	 * those accesses under SMR, because subsequent
242 	 * table growths might retire the memory. However we know
243 	 * those growths will have left our entry unchanged.
244 	 */
245 	if (__improbable(entry->ie_object != object)) {
246 		kr = KERN_INVALID_NAME;
247 		goto out_put_unlock;
248 	}
249 
250 	ipc_entry_bits_t bits = entry->ie_bits;
251 	if (__improbable(IE_BITS_GEN(bits) != MACH_PORT_GEN(name) ||
252 	    IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE)) {
253 		kr = KERN_INVALID_NAME;
254 		goto out_put_unlock;
255 	}
256 
257 	/* Done with hazardous accesses to the table */
258 	smr_ipc_leave();
259 
260 	*bitsp = bits;
261 	*objectp = object;
262 	return KERN_SUCCESS;
263 
264 out_put_unlock:
265 	io_unlock(object);
266 out_put:
267 	smr_ipc_leave();
268 	return kr;
269 }
270 
271 /*
272  *	Routine:	ipc_right_lookup_write
273  *	Purpose:
274  *		Finds an entry in a space, given the name.
275  *	Conditions:
276  *		Nothing locked.  If successful, the space is write-locked.
277  *	Returns:
278  *		KERN_SUCCESS		Found an entry.
279  *		KERN_INVALID_TASK	The space is dead.
280  *		KERN_INVALID_NAME	Name doesn't exist in space.
281  */
282 
283 kern_return_t
ipc_right_lookup_write(ipc_space_t space,mach_port_name_t name,ipc_entry_t * entryp)284 ipc_right_lookup_write(
285 	ipc_space_t             space,
286 	mach_port_name_t        name,
287 	ipc_entry_t             *entryp)
288 {
289 	ipc_entry_t entry;
290 
291 	assert(space != IS_NULL);
292 
293 	is_write_lock(space);
294 
295 	if (!is_active(space)) {
296 		is_write_unlock(space);
297 		return KERN_INVALID_TASK;
298 	}
299 
300 	if ((entry = ipc_entry_lookup(space, name)) == IE_NULL) {
301 		is_write_unlock(space);
302 		return KERN_INVALID_NAME;
303 	}
304 
305 	*entryp = entry;
306 	return KERN_SUCCESS;
307 }
308 
309 /*
310  *	Routine:	ipc_right_lookup_two_write
311  *	Purpose:
312  *		Like ipc_right_lookup except that it returns two
313  *		entries for two different names that were looked
314  *		up under the same space lock.
315  *	Conditions:
316  *		Nothing locked.  If successful, the space is write-locked.
317  *	Returns:
318  *		KERN_INVALID_TASK	The space is dead.
319  *		KERN_INVALID_NAME	Name doesn't exist in space.
320  */
321 
322 kern_return_t
ipc_right_lookup_two_write(ipc_space_t space,mach_port_name_t name1,ipc_entry_t * entryp1,mach_port_name_t name2,ipc_entry_t * entryp2)323 ipc_right_lookup_two_write(
324 	ipc_space_t             space,
325 	mach_port_name_t        name1,
326 	ipc_entry_t             *entryp1,
327 	mach_port_name_t        name2,
328 	ipc_entry_t             *entryp2)
329 {
330 	ipc_entry_t entry1;
331 	ipc_entry_t entry2;
332 
333 	assert(space != IS_NULL);
334 
335 	is_write_lock(space);
336 
337 	if (!is_active(space)) {
338 		is_write_unlock(space);
339 		return KERN_INVALID_TASK;
340 	}
341 
342 	if ((entry1 = ipc_entry_lookup(space, name1)) == IE_NULL) {
343 		is_write_unlock(space);
344 		mach_port_guard_exception(name1, 0, kGUARD_EXC_INVALID_NAME);
345 		return KERN_INVALID_NAME;
346 	}
347 	if ((entry2 = ipc_entry_lookup(space, name2)) == IE_NULL) {
348 		is_write_unlock(space);
349 		mach_port_guard_exception(name2, 0, kGUARD_EXC_INVALID_NAME);
350 		return KERN_INVALID_NAME;
351 	}
352 	*entryp1 = entry1;
353 	*entryp2 = entry2;
354 	return KERN_SUCCESS;
355 }
356 
357 /*
358  *	Routine:	ipc_right_reverse
359  *	Purpose:
360  *		Translate (space, port) -> (name, entry).
361  *		Only finds send/receive rights.
362  *		Returns TRUE if an entry is found; if so,
363  *		the port active.
364  *	Conditions:
365  *		The space must be locked (read or write) and active.
366  *		The port is locked and active
367  */
368 
369 bool
ipc_right_reverse(ipc_space_t space,ipc_port_t port,mach_port_name_t * namep,ipc_entry_t * entryp)370 ipc_right_reverse(
371 	ipc_space_t             space,
372 	ipc_port_t              port,
373 	mach_port_name_t       *namep,
374 	ipc_entry_t            *entryp)
375 {
376 	mach_port_name_t name;
377 	ipc_entry_t entry;
378 
379 	assert(is_active(space));
380 
381 	require_ip_active(port);
382 
383 	ip_mq_lock_held(port);
384 
385 	if (ip_in_space(port, space)) {
386 		name = ip_get_receiver_name(port);
387 		assert(name != MACH_PORT_NULL);
388 
389 		entry = ipc_entry_lookup(space, name);
390 
391 		assert(entry != IE_NULL);
392 		assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
393 		assert(port == entry->ie_port);
394 
395 		*namep = name;
396 		*entryp = entry;
397 		return true;
398 	}
399 
400 	if (ipc_hash_lookup(space, ip_to_object(port), namep, entryp)) {
401 		entry = *entryp;
402 		assert(entry != IE_NULL);
403 		assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
404 		assert(port == entry->ie_port);
405 
406 		return true;
407 	}
408 
409 	return false;
410 }
411 
412 /*
413  *	Routine:	ipc_right_request_cancel
414  *	Purpose:
415  *		Cancel a notification request and return the send-once right.
416  *		Afterwards, entry->ie_request == 0.
417  *	Conditions:
418  *		The space must be write-locked; the port must be locked.
419  *		The port must be active.
420  */
421 
422 static inline ipc_port_t
ipc_right_request_cancel(ipc_port_t port,mach_port_name_t name,ipc_entry_t entry)423 ipc_right_request_cancel(
424 	ipc_port_t              port,
425 	mach_port_name_t        name,
426 	ipc_entry_t             entry)
427 {
428 	ipc_port_request_index_t request = entry->ie_request;
429 
430 	if (request != IE_REQ_NONE) {
431 		entry->ie_request = IE_REQ_NONE;
432 		return ipc_port_request_cancel(port, name, request);
433 	}
434 	return IP_NULL;
435 }
436 
437 /*
438  *	Routine:	ipc_right_dnrequest
439  *	Purpose:
440  *		Make a dead-name request, returning the previously
441  *		registered send-once right.  If notify is IP_NULL,
442  *		just cancels the previously registered request.
443  *
444  *	Conditions:
445  *		Nothing locked.  May allocate memory.
446  *		Only consumes/returns refs if successful.
447  *	Returns:
448  *		KERN_SUCCESS		Made/canceled dead-name request.
449  *		KERN_INVALID_TASK	The space is dead.
450  *		KERN_INVALID_NAME	Name doesn't exist in space.
451  *		KERN_INVALID_RIGHT	Name doesn't denote port/dead rights.
452  *		KERN_INVALID_ARGUMENT	Name denotes dead name, but
453  *			immediate is FALSE or notify is IP_NULL.
454  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
455  */
456 
457 kern_return_t
ipc_right_request_alloc(ipc_space_t space,mach_port_name_t name,ipc_port_request_opts_t options,ipc_port_t notify,ipc_port_t * previousp)458 ipc_right_request_alloc(
459 	ipc_space_t             space,
460 	mach_port_name_t        name,
461 	ipc_port_request_opts_t options,
462 	ipc_port_t              notify,
463 	ipc_port_t              *previousp)
464 {
465 	ipc_port_t previous = IP_NULL;
466 	ipc_entry_t entry;
467 	kern_return_t kr;
468 #if IMPORTANCE_INHERITANCE
469 	bool will_arm = false;
470 #endif /* IMPORTANCE_INHERITANCE */
471 
472 	for (;;) {
473 		ipc_port_t port = IP_NULL;
474 
475 		kr = ipc_right_lookup_write(space, name, &entry);
476 		if (kr != KERN_SUCCESS) {
477 			return kr;
478 		}
479 
480 		/* space is write-locked and active */
481 
482 		/* if nothing to do or undo, we're done */
483 		if (notify == IP_NULL && entry->ie_request == IE_REQ_NONE) {
484 			is_write_unlock(space);
485 			*previousp = IP_NULL;
486 			return KERN_SUCCESS;
487 		}
488 
489 		/* see if the entry is of proper type for requests */
490 		if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) {
491 			ipc_port_request_index_t new_request;
492 
493 			port = entry->ie_port;
494 			assert(port != IP_NULL);
495 
496 			if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
497 				/* port is locked and active */
498 
499 				/*
500 				 * No matter what, we need to cancel any
501 				 * previous request.
502 				 */
503 				previous = ipc_right_request_cancel(port, name, entry);
504 
505 				/* if no new request, just cancel previous */
506 				if (notify == IP_NULL) {
507 					ip_mq_unlock(port);
508 					ipc_entry_modified(space, name, entry);
509 					is_write_unlock(space);
510 					break;
511 				}
512 
513 				/*
514 				 * send-once rights, kernel objects, and non-full other queues
515 				 * fire immediately (if immediate specified).
516 				 */
517 				if (options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK) &&
518 				    ((entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE) ||
519 				    ip_in_space(port, ipc_space_kernel) ||
520 				    !ip_full(port))) {
521 					ip_mq_unlock(port);
522 					ipc_entry_modified(space, name, entry);
523 					is_write_unlock(space);
524 
525 					ipc_notify_send_possible(notify, name);
526 					break;
527 				}
528 
529 				/*
530 				 * If there was a previous request, freeing it
531 				 * above guarantees that the subsequent
532 				 * allocation will find a slot and succeed,
533 				 * thus assuring an atomic swap.
534 				 */
535 #if IMPORTANCE_INHERITANCE
536 				will_arm = port->ip_sprequests == 0 &&
537 				    options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK);
538 #endif /* IMPORTANCE_INHERITANCE */
539 				kr = ipc_port_request_alloc(port, name, notify,
540 				    options, &new_request);
541 
542 				if (kr != KERN_SUCCESS) {
543 					assert(previous == IP_NULL);
544 					is_write_unlock(space);
545 
546 					kr = ipc_port_request_grow(port);
547 					/* port is unlocked */
548 
549 					if (kr != KERN_SUCCESS) {
550 						return kr;
551 					}
552 
553 					continue;
554 				}
555 
556 				assert(new_request != IE_REQ_NONE);
557 				entry->ie_request = new_request;
558 				ipc_entry_modified(space, name, entry);
559 				is_write_unlock(space);
560 
561 #if IMPORTANCE_INHERITANCE
562 				if (will_arm &&
563 				    port->ip_impdonation != 0 &&
564 				    port->ip_spimportant == 0 &&
565 				    task_is_importance_donor(current_task())) {
566 					if (ipc_port_importance_delta(port, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) {
567 						ip_mq_unlock(port);
568 					}
569 				} else
570 #endif /* IMPORTANCE_INHERITANCE */
571 				ip_mq_unlock(port);
572 
573 				break;
574 			}
575 			/* entry may have changed to dead-name by ipc_right_check() */
576 		}
577 
578 		/* treat send_possible requests as immediate w.r.t. dead-name */
579 		if (options && notify != IP_NULL &&
580 		    (entry->ie_bits & MACH_PORT_TYPE_DEAD_NAME)) {
581 			mach_port_urefs_t urefs = IE_BITS_UREFS(entry->ie_bits);
582 
583 			assert(urefs > 0);
584 
585 			/* leave urefs pegged to maximum if it overflowed */
586 			if (urefs < MACH_PORT_UREFS_MAX) {
587 				(entry->ie_bits)++; /* increment urefs */
588 			}
589 			ipc_entry_modified(space, name, entry);
590 
591 			is_write_unlock(space);
592 
593 			if (port != IP_NULL) {
594 				ip_release(port);
595 			}
596 
597 			ipc_notify_dead_name(notify, name);
598 			previous = IP_NULL;
599 			break;
600 		}
601 
602 		kr = (entry->ie_bits & MACH_PORT_TYPE_PORT_OR_DEAD) ?
603 		    KERN_INVALID_ARGUMENT : KERN_INVALID_RIGHT;
604 
605 		is_write_unlock(space);
606 
607 		if (port != IP_NULL) {
608 			ip_release(port);
609 		}
610 
611 		return kr;
612 	}
613 
614 	*previousp = previous;
615 	return KERN_SUCCESS;
616 }
617 
618 /*
619  *	Routine:	ipc_right_inuse
620  *	Purpose:
621  *		Check if an entry is being used.
622  *		Returns TRUE if it is.
623  *	Conditions:
624  *		The space is write-locked and active.
625  */
626 
627 bool
ipc_right_inuse(ipc_entry_t entry)628 ipc_right_inuse(
629 	ipc_entry_t entry)
630 {
631 	return IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_NONE;
632 }
633 
634 /*
635  *	Routine:	ipc_right_check
636  *	Purpose:
637  *		Check if the port has died.  If it has,
638  *              and IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE is not
639  *              passed and it is not a send once right then
640  *		clean up the entry and return TRUE.
641  *	Conditions:
642  *		The space is write-locked; the port is not locked.
643  *		If returns FALSE, the port is also locked.
644  *		Otherwise, entry is converted to a dead name.
645  *
646  *		Caller is responsible for a reference to port if it
647  *		had died (returns TRUE).
648  */
649 
650 boolean_t
ipc_right_check(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry,ipc_object_copyin_flags_t flags)651 ipc_right_check(
652 	ipc_space_t              space,
653 	ipc_port_t               port,
654 	mach_port_name_t         name,
655 	ipc_entry_t              entry,
656 	ipc_object_copyin_flags_t flags)
657 {
658 	ipc_entry_bits_t bits;
659 
660 	assert(is_active(space));
661 	assert(port == entry->ie_port);
662 
663 	ip_mq_lock(port);
664 	if (ip_active(port) ||
665 	    ((flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE) &&
666 	    entry->ie_request == IE_REQ_NONE &&
667 	    (entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE))) {
668 		return FALSE;
669 	}
670 
671 	/* this was either a pure send right or a send-once right */
672 
673 	bits = entry->ie_bits;
674 	assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
675 	assert(IE_BITS_UREFS(bits) > 0);
676 
677 	if (bits & MACH_PORT_TYPE_SEND) {
678 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
679 		assert(IE_BITS_UREFS(bits) > 0);
680 		ip_srights_dec(port);
681 	} else {
682 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
683 		assert(IE_BITS_UREFS(bits) == 1);
684 		ip_sorights_dec(port);
685 	}
686 
687 	/*
688 	 * delete SEND rights from ipc hash.
689 	 */
690 
691 	if ((bits & MACH_PORT_TYPE_SEND) != 0) {
692 		ipc_hash_delete(space, ip_to_object(port), name, entry);
693 	}
694 
695 	/* convert entry to dead name */
696 	bits = (bits & ~IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME;
697 
698 	/*
699 	 * If there was a notification request outstanding on this
700 	 * name, and the port went dead, that notification
701 	 * must already be on its way up from the port layer.
702 	 *
703 	 * Add the reference that the notification carries. It
704 	 * is done here, and not in the notification delivery,
705 	 * because the latter doesn't have a space reference and
706 	 * trying to actually move a send-right reference would
707 	 * get short-circuited into a MACH_PORT_DEAD by IPC. Since
708 	 * all calls that deal with the right eventually come
709 	 * through here, it has the same result.
710 	 *
711 	 * Once done, clear the request index so we only account
712 	 * for it once.
713 	 */
714 	if (entry->ie_request != IE_REQ_NONE) {
715 		if (ipc_port_request_type(port, name, entry->ie_request) != 0) {
716 			/* if urefs are pegged due to overflow, leave them pegged */
717 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
718 				bits++; /* increment urefs */
719 			}
720 		}
721 		entry->ie_request = IE_REQ_NONE;
722 	}
723 	entry->ie_bits = bits;
724 	entry->ie_object = IPC_OBJECT_NULL;
725 
726 	ip_mq_unlock(port);
727 
728 	ipc_entry_modified(space, name, entry);
729 
730 	return TRUE;
731 }
732 
733 /*
734  *	Routine:	ipc_right_terminate
735  *	Purpose:
736  *		Cleans up an entry in a terminated space.
737  *		The entry isn't deallocated or removed
738  *		from reverse hash tables.
739  *	Conditions:
740  *		The space is dead and unlocked.
741  */
742 
743 void
ipc_right_terminate(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)744 ipc_right_terminate(
745 	ipc_space_t             space,
746 	mach_port_name_t        name,
747 	ipc_entry_t             entry)
748 {
749 	mach_port_type_t type;
750 	ipc_port_t port = IP_NULL;
751 	ipc_pset_t pset = IPS_NULL;
752 
753 	assert(!is_active(space));
754 
755 	type   = IE_BITS_TYPE(entry->ie_bits);
756 
757 	/*
758 	 * Hollow the entry under the port lock,
759 	 * in order to avoid dangling pointers.
760 	 *
761 	 * ipc_right_lookup_read() doesn't need it for correctness,
762 	 * but ipc_space_terminate() as it now goes through 2 rounds
763 	 * of termination (receive rights first, the rest second).
764 	 */
765 
766 	if (type & MACH_PORT_TYPE_PORT_SET) {
767 		pset = entry->ie_pset;
768 		ips_mq_lock(pset);
769 	} else if (type != MACH_PORT_TYPE_DEAD_NAME) {
770 		port = entry->ie_port;
771 		ip_mq_lock(port);
772 	}
773 	entry->ie_object = IPC_OBJECT_NULL;
774 	entry->ie_bits  &= (IE_BITS_GEN_MASK | IE_BITS_ROLL_MASK);
775 
776 	switch (type) {
777 	case MACH_PORT_TYPE_DEAD_NAME:
778 		assert(entry->ie_request == IE_REQ_NONE);
779 		break;
780 
781 	case MACH_PORT_TYPE_PORT_SET:
782 		assert(entry->ie_request == IE_REQ_NONE);
783 		assert(ips_active(pset));
784 
785 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
786 		break;
787 
788 	case MACH_PORT_TYPE_SEND:
789 	case MACH_PORT_TYPE_RECEIVE:
790 	case MACH_PORT_TYPE_SEND_RECEIVE:
791 	case MACH_PORT_TYPE_SEND_ONCE: {
792 		ipc_port_t request = IP_NULL;
793 		ipc_notify_nsenders_t nsrequest = { };
794 
795 		if (!ip_active(port)) {
796 			ip_mq_unlock(port);
797 			ip_release(port);
798 			break;
799 		}
800 
801 		request = ipc_right_request_cancel(port, name, entry);
802 
803 		if (type & MACH_PORT_TYPE_SEND) {
804 			ip_srights_dec(port);
805 			if (port->ip_srights == 0) {
806 				nsrequest = ipc_notify_no_senders_prepare(port);
807 			}
808 		}
809 
810 		if (type & MACH_PORT_TYPE_RECEIVE) {
811 			assert(ip_get_receiver_name(port) == name);
812 			assert(ip_in_space(port, space));
813 
814 			ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
815 		} else if (type & MACH_PORT_TYPE_SEND_ONCE) {
816 			assert(port->ip_sorights > 0);
817 			port->ip_reply_context = 0;
818 
819 			ipc_notify_send_once_and_unlock(port); /* consumes our ref */
820 		} else {
821 			/* port could be dead, in-transit, or in a foreign space */
822 			assert(!ip_in_space(port, space));
823 
824 			ip_mq_unlock(port);
825 			ip_release(port);
826 		}
827 
828 		/*
829 		 * For both no-senders and port-deleted notifications,
830 		 * look at whether the destination is still active.
831 		 * If it isn't, just swallow the send-once right.
832 		 *
833 		 * This is a racy check, but this ok because we can only
834 		 * fail to notice that the port is now inactive, which
835 		 * only causes us to fail at an optimizaiton.
836 		 *
837 		 * The purpose here is to avoid sending messages
838 		 * to receive rights that used to be in this space,
839 		 * which we can't fail to observe.
840 		 */
841 		if (nsrequest.ns_notify != IP_NULL) {
842 			if (ip_active(nsrequest.ns_notify)) {
843 				ipc_notify_no_senders_emit(nsrequest);
844 			} else {
845 				ipc_notify_no_senders_consume(nsrequest);
846 			}
847 		}
848 
849 		if (request != IP_NULL) {
850 			if (ip_active(request)) {
851 				ipc_notify_port_deleted(request, name);
852 			} else {
853 				ipc_port_release_sonce(request);
854 			}
855 		}
856 		break;
857 	}
858 
859 	default:
860 		panic("ipc_right_terminate: strange type - 0x%x", type);
861 	}
862 }
863 
864 /*
865  *	Routine:	ipc_right_destroy
866  *	Purpose:
867  *		Destroys an entry in a space.
868  *	Conditions:
869  *		The space is write-locked (returns unlocked).
870  *		The space must be active.
871  *	Returns:
872  *		KERN_SUCCESS		      The entry was destroyed.
873  *      KERN_INVALID_CAPABILITY   The port is pinned.
874  *      KERN_INVALID_RIGHT        Port guard violation.
875  */
876 
877 kern_return_t
ipc_right_destroy(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,boolean_t check_guard,uint64_t guard)878 ipc_right_destroy(
879 	ipc_space_t             space,
880 	mach_port_name_t        name,
881 	ipc_entry_t             entry,
882 	boolean_t               check_guard,
883 	uint64_t                guard)
884 {
885 	ipc_entry_bits_t bits;
886 	mach_port_type_t type;
887 
888 	bits = entry->ie_bits;
889 	type = IE_BITS_TYPE(bits);
890 
891 	assert(is_active(space));
892 
893 	switch (type) {
894 	case MACH_PORT_TYPE_DEAD_NAME:
895 		assert(entry->ie_request == IE_REQ_NONE);
896 		assert(entry->ie_object == IPC_OBJECT_NULL);
897 
898 		ipc_entry_dealloc(space, IPC_OBJECT_NULL, name, entry);
899 		is_write_unlock(space);
900 		break;
901 
902 	case MACH_PORT_TYPE_PORT_SET: {
903 		ipc_pset_t pset = entry->ie_pset;
904 
905 		assert(entry->ie_request == IE_REQ_NONE);
906 		assert(pset != IPS_NULL);
907 
908 		ips_mq_lock(pset);
909 		assert(ips_active(pset));
910 
911 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
912 
913 		is_write_unlock(space);
914 
915 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
916 		break;
917 	}
918 
919 	case MACH_PORT_TYPE_SEND:
920 	case MACH_PORT_TYPE_RECEIVE:
921 	case MACH_PORT_TYPE_SEND_RECEIVE:
922 	case MACH_PORT_TYPE_SEND_ONCE: {
923 		ipc_port_t port = entry->ie_port;
924 		ipc_notify_nsenders_t nsrequest = { };
925 		ipc_port_t request;
926 
927 		assert(port != IP_NULL);
928 
929 		if (type == MACH_PORT_TYPE_SEND) {
930 			if (ip_is_pinned(port)) {
931 				assert(ip_active(port));
932 				is_write_unlock(space);
933 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DESTROY);
934 				return KERN_INVALID_CAPABILITY;
935 			}
936 			ipc_hash_delete(space, ip_to_object(port), name, entry);
937 		}
938 
939 		ip_mq_lock(port);
940 
941 		if (!ip_active(port)) {
942 			assert((type & MACH_PORT_TYPE_RECEIVE) == 0);
943 			entry->ie_request = IE_REQ_NONE;
944 			assert(!ip_is_pinned(port));
945 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
946 			ip_mq_unlock(port);
947 			is_write_unlock(space);
948 			ip_release(port);
949 			break;
950 		}
951 
952 		/* For receive rights, check for guarding */
953 		if ((type & MACH_PORT_TYPE_RECEIVE) &&
954 		    (check_guard) && (port->ip_guarded) &&
955 		    (guard != port->ip_context)) {
956 			/* Guard Violation */
957 			uint64_t portguard = port->ip_context;
958 			ip_mq_unlock(port);
959 			is_write_unlock(space);
960 			/* Raise mach port guard exception */
961 			mach_port_guard_exception(name, portguard, kGUARD_EXC_DESTROY);
962 			return KERN_INVALID_RIGHT;
963 		}
964 
965 
966 		request = ipc_right_request_cancel(port, name, entry);
967 		assert(!ip_is_pinned(port));
968 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
969 
970 		is_write_unlock(space);
971 
972 		if (type & MACH_PORT_TYPE_SEND) {
973 			ip_srights_dec(port);
974 			if (port->ip_srights == 0) {
975 				nsrequest = ipc_notify_no_senders_prepare(port);
976 			}
977 		}
978 
979 		if (type & MACH_PORT_TYPE_RECEIVE) {
980 			require_ip_active(port);
981 			assert(ip_in_space(port, space));
982 
983 			ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
984 		} else if (type & MACH_PORT_TYPE_SEND_ONCE) {
985 			assert(port->ip_sorights > 0);
986 			port->ip_reply_context = 0;
987 			ipc_notify_send_once_and_unlock(port); /* consumes our ref */
988 		} else {
989 			assert(!ip_in_space(port, space));
990 
991 			ip_mq_unlock(port);
992 			ip_release(port);
993 		}
994 
995 		ipc_notify_no_senders_emit(nsrequest);
996 
997 		if (request != IP_NULL) {
998 			ipc_notify_port_deleted(request, name);
999 		}
1000 
1001 
1002 		break;
1003 	}
1004 
1005 	default:
1006 		ipc_unreachable("ipc_right_destroy: strange type");
1007 	}
1008 
1009 	return KERN_SUCCESS;
1010 }
1011 
1012 /*
1013  *	Routine:	ipc_right_dealloc
1014  *	Purpose:
1015  *		Releases a send/send-once/dead-name/port_set user ref.
1016  *		Like ipc_right_delta with a delta of -1,
1017  *		but looks at the entry to determine the right.
1018  *	Conditions:
1019  *		The space is write-locked, and is unlocked upon return.
1020  *		The space must be active.
1021  *	Returns:
1022  *		KERN_SUCCESS		A user ref was released.
1023  *		KERN_INVALID_RIGHT	Entry has wrong type.
1024  *      KERN_INVALID_CAPABILITY  Deallocating a pinned right.
1025  */
1026 
1027 kern_return_t
ipc_right_dealloc(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)1028 ipc_right_dealloc(
1029 	ipc_space_t             space,
1030 	mach_port_name_t        name,
1031 	ipc_entry_t             entry)
1032 {
1033 	ipc_port_t port = IP_NULL;
1034 	ipc_entry_bits_t bits;
1035 	mach_port_type_t type;
1036 
1037 	bits = entry->ie_bits;
1038 	type = IE_BITS_TYPE(bits);
1039 
1040 	assert(is_active(space));
1041 
1042 	switch (type) {
1043 	case MACH_PORT_TYPE_PORT_SET: {
1044 		ipc_pset_t pset;
1045 
1046 		assert(IE_BITS_UREFS(bits) == 0);
1047 		assert(entry->ie_request == IE_REQ_NONE);
1048 
1049 		pset = entry->ie_pset;
1050 		ips_mq_lock(pset);
1051 		assert(ips_active(pset));
1052 
1053 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1054 
1055 		is_write_unlock(space);
1056 
1057 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1058 		break;
1059 	}
1060 
1061 	case MACH_PORT_TYPE_DEAD_NAME: {
1062 dead_name:
1063 
1064 		assert(IE_BITS_UREFS(bits) > 0);
1065 		assert(entry->ie_request == IE_REQ_NONE);
1066 		assert(entry->ie_object == IPC_OBJECT_NULL);
1067 
1068 		if (IE_BITS_UREFS(bits) == 1) {
1069 			ipc_entry_dealloc(space, IPC_OBJECT_NULL, name, entry);
1070 		} else {
1071 			/* if urefs are pegged due to overflow, leave them pegged */
1072 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1073 				entry->ie_bits = bits - 1; /* decrement urefs */
1074 			}
1075 			ipc_entry_modified(space, name, entry);
1076 		}
1077 		is_write_unlock(space);
1078 
1079 		/* release any port that got converted to dead name below */
1080 		if (port != IP_NULL) {
1081 			ip_release(port);
1082 		}
1083 		break;
1084 	}
1085 
1086 	case MACH_PORT_TYPE_SEND_ONCE: {
1087 		ipc_port_t request;
1088 
1089 		assert(IE_BITS_UREFS(bits) == 1);
1090 
1091 		port = entry->ie_port;
1092 		assert(port != IP_NULL);
1093 
1094 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1095 			bits = entry->ie_bits;
1096 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1097 			goto dead_name;     /* it will release port */
1098 		}
1099 		/* port is locked and active */
1100 
1101 		assert(port->ip_sorights > 0);
1102 
1103 		/*
1104 		 * clear any reply context:
1105 		 * no one will be sending the response b/c we are destroying
1106 		 * the single, outstanding send once right.
1107 		 */
1108 		port->ip_reply_context = 0;
1109 
1110 		request = ipc_right_request_cancel(port, name, entry);
1111 		assert(!ip_is_pinned(port));
1112 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1113 
1114 		is_write_unlock(space);
1115 
1116 		ipc_notify_send_once_and_unlock(port);
1117 
1118 		if (request != IP_NULL) {
1119 			ipc_notify_port_deleted(request, name);
1120 		}
1121 		break;
1122 	}
1123 
1124 	case MACH_PORT_TYPE_SEND: {
1125 		ipc_port_t request = IP_NULL;
1126 		ipc_notify_nsenders_t nsrequest = { };
1127 
1128 		assert(IE_BITS_UREFS(bits) > 0);
1129 
1130 		port = entry->ie_port;
1131 		assert(port != IP_NULL);
1132 
1133 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1134 			bits = entry->ie_bits;
1135 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1136 			goto dead_name;     /* it will release port */
1137 		}
1138 		/* port is locked and active */
1139 
1140 		assert(port->ip_srights > 0);
1141 
1142 		if (IE_BITS_UREFS(bits) == 1) {
1143 			if (ip_is_pinned(port)) {
1144 				ip_mq_unlock(port);
1145 				is_write_unlock(space);
1146 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1147 				return KERN_INVALID_CAPABILITY;
1148 			}
1149 			ip_srights_dec(port);
1150 			if (port->ip_srights == 0) {
1151 				nsrequest = ipc_notify_no_senders_prepare(port);
1152 			}
1153 
1154 			request = ipc_right_request_cancel(port, name, entry);
1155 			ipc_hash_delete(space, ip_to_object(port), name, entry);
1156 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1157 			ip_mq_unlock(port);
1158 			is_write_unlock(space);
1159 
1160 			ip_release(port);
1161 		} else {
1162 			/* if urefs are pegged due to overflow, leave them pegged */
1163 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1164 				entry->ie_bits = bits - 1; /* decrement urefs */
1165 			}
1166 			ip_mq_unlock(port);
1167 			ipc_entry_modified(space, name, entry);
1168 			is_write_unlock(space);
1169 		}
1170 
1171 		ipc_notify_no_senders_emit(nsrequest);
1172 
1173 		if (request != IP_NULL) {
1174 			ipc_notify_port_deleted(request, name);
1175 		}
1176 		break;
1177 	}
1178 
1179 	case MACH_PORT_TYPE_SEND_RECEIVE: {
1180 		ipc_notify_nsenders_t nsrequest = { };
1181 
1182 		assert(IE_BITS_UREFS(bits) > 0);
1183 
1184 		port = entry->ie_port;
1185 		assert(port != IP_NULL);
1186 
1187 		ip_mq_lock(port);
1188 		require_ip_active(port);
1189 		assert(ip_get_receiver_name(port) == name);
1190 		assert(ip_in_space(port, space));
1191 		assert(port->ip_srights > 0);
1192 
1193 		if (IE_BITS_UREFS(bits) == 1) {
1194 			ip_srights_dec(port);
1195 			if (port->ip_srights == 0) {
1196 				nsrequest = ipc_notify_no_senders_prepare(port);
1197 			}
1198 
1199 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1200 			    MACH_PORT_TYPE_SEND);
1201 		} else {
1202 			/* if urefs are pegged due to overflow, leave them pegged */
1203 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1204 				entry->ie_bits = bits - 1; /* decrement urefs */
1205 			}
1206 		}
1207 		ip_mq_unlock(port);
1208 
1209 		ipc_entry_modified(space, name, entry);
1210 		is_write_unlock(space);
1211 
1212 		ipc_notify_no_senders_emit(nsrequest);
1213 		break;
1214 	}
1215 
1216 	default:
1217 		is_write_unlock(space);
1218 		mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_RIGHT);
1219 		return KERN_INVALID_RIGHT;
1220 	}
1221 
1222 	return KERN_SUCCESS;
1223 }
1224 
1225 /*
1226  *	Routine:	ipc_right_delta
1227  *	Purpose:
1228  *		Modifies the user-reference count for a right.
1229  *		May deallocate the right, if the count goes to zero.
1230  *	Conditions:
1231  *		The space is write-locked, and is unlocked upon return.
1232  *		The space must be active.
1233  *	Returns:
1234  *		KERN_SUCCESS		Count was modified.
1235  *		KERN_INVALID_RIGHT	Entry has wrong type.
1236  *		KERN_INVALID_VALUE	Bad delta for the right.
1237  *		KERN_INVALID_CAPABILITY Deallocating a pinned right.
1238  */
1239 
1240 kern_return_t
ipc_right_delta(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_right_t right,mach_port_delta_t delta)1241 ipc_right_delta(
1242 	ipc_space_t             space,
1243 	mach_port_name_t        name,
1244 	ipc_entry_t             entry,
1245 	mach_port_right_t       right,
1246 	mach_port_delta_t       delta)
1247 {
1248 	ipc_port_t port = IP_NULL;
1249 	ipc_port_t port_to_release = IP_NULL;
1250 	ipc_entry_bits_t bits;
1251 
1252 	bits = entry->ie_bits;
1253 
1254 /*
1255  *	The following is used (for case MACH_PORT_RIGHT_DEAD_NAME) in the
1256  *	switch below. It is used to keep track of those cases (in DIPC)
1257  *	where we have postponed the dropping of a port reference. Since
1258  *	the dropping of the reference could cause the port to disappear
1259  *	we postpone doing so when we are holding the space lock.
1260  */
1261 
1262 	assert(is_active(space));
1263 	assert(right < MACH_PORT_RIGHT_NUMBER);
1264 
1265 	/* Rights-specific restrictions and operations. */
1266 
1267 	switch (right) {
1268 	case MACH_PORT_RIGHT_PORT_SET: {
1269 		ipc_pset_t pset;
1270 
1271 		if ((bits & MACH_PORT_TYPE_PORT_SET) == 0) {
1272 			mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_RIGHT);
1273 			goto invalid_right;
1274 		}
1275 
1276 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_PORT_SET);
1277 		assert(IE_BITS_UREFS(bits) == 0);
1278 		assert(entry->ie_request == IE_REQ_NONE);
1279 
1280 		if (delta == 0) {
1281 			goto success;
1282 		}
1283 
1284 		if (delta != -1) {
1285 			goto invalid_value;
1286 		}
1287 
1288 		pset = entry->ie_pset;
1289 		ips_mq_lock(pset);
1290 		assert(ips_active(pset));
1291 
1292 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1293 
1294 		is_write_unlock(space);
1295 
1296 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1297 		break;
1298 	}
1299 
1300 	case MACH_PORT_RIGHT_RECEIVE: {
1301 		ipc_port_t request = IP_NULL;
1302 
1303 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1304 			if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1305 				mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_RIGHT);
1306 			}
1307 			goto invalid_right;
1308 		}
1309 
1310 		if (delta == 0) {
1311 			goto success;
1312 		}
1313 
1314 		if (delta != -1) {
1315 			goto invalid_value;
1316 		}
1317 
1318 		port = entry->ie_port;
1319 		assert(port != IP_NULL);
1320 
1321 		/*
1322 		 *	The port lock is needed for ipc_right_dncancel;
1323 		 *	otherwise, we wouldn't have to take the lock
1324 		 *	until just before dropping the space lock.
1325 		 */
1326 
1327 		ip_mq_lock(port);
1328 		require_ip_active(port);
1329 		assert(ip_get_receiver_name(port) == name);
1330 		assert(ip_in_space(port, space));
1331 
1332 		/* Mach Port Guard Checking */
1333 		if (port->ip_guarded) {
1334 			uint64_t portguard = port->ip_context;
1335 			ip_mq_unlock(port);
1336 			is_write_unlock(space);
1337 			/* Raise mach port guard exception */
1338 			mach_port_guard_exception(name, portguard, kGUARD_EXC_DESTROY);
1339 			goto guard_failure;
1340 		}
1341 
1342 		if (bits & MACH_PORT_TYPE_SEND) {
1343 			assert(IE_BITS_TYPE(bits) ==
1344 			    MACH_PORT_TYPE_SEND_RECEIVE);
1345 			assert(IE_BITS_UREFS(bits) > 0);
1346 			assert(port->ip_srights > 0);
1347 
1348 			if (ipc_port_has_prdrequest(port)) {
1349 				/*
1350 				 * Since another task has requested a
1351 				 * destroy notification for this port, it
1352 				 * isn't actually being destroyed - the receive
1353 				 * right is just being moved to another task.
1354 				 * Since we still have one or more send rights,
1355 				 * we need to record the loss of the receive
1356 				 * right and enter the remaining send right
1357 				 * into the hash table.
1358 				 */
1359 				bits &= ~MACH_PORT_TYPE_RECEIVE;
1360 				bits |= MACH_PORT_TYPE_EX_RECEIVE;
1361 				ipc_hash_insert(space, ip_to_object(port),
1362 				    name, entry);
1363 				ip_reference(port);
1364 			} else {
1365 				/*
1366 				 *	The remaining send right turns into a
1367 				 *	dead name.  Notice we don't decrement
1368 				 *	ip_srights, generate a no-senders notif,
1369 				 *	or use ipc_right_dncancel, because the
1370 				 *	port is destroyed "first".
1371 				 */
1372 				bits &= ~IE_BITS_TYPE_MASK;
1373 				bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1374 				if (entry->ie_request) {
1375 					entry->ie_request = IE_REQ_NONE;
1376 					/* if urefs are pegged due to overflow, leave them pegged */
1377 					if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1378 						bits++; /* increment urefs */
1379 					}
1380 				}
1381 				entry->ie_object = IPC_OBJECT_NULL;
1382 			}
1383 			entry->ie_bits = bits;
1384 			ipc_entry_modified(space, name, entry);
1385 		} else {
1386 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1387 			assert(IE_BITS_UREFS(bits) == 0);
1388 
1389 			request = ipc_right_request_cancel(port, name, entry);
1390 			assert(!ip_is_pinned(port));
1391 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1392 		}
1393 		is_write_unlock(space);
1394 
1395 		ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1396 
1397 		if (request != IP_NULL) {
1398 			ipc_notify_port_deleted(request, name);
1399 		}
1400 		break;
1401 	}
1402 
1403 	case MACH_PORT_RIGHT_SEND_ONCE: {
1404 		ipc_port_t request;
1405 
1406 		if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
1407 			goto invalid_right;
1408 		}
1409 
1410 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
1411 		assert(IE_BITS_UREFS(bits) == 1);
1412 
1413 		port = entry->ie_port;
1414 		assert(port != IP_NULL);
1415 
1416 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1417 			assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE));
1418 			mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_RIGHT);
1419 			/* port has died and removed from entry, release port */
1420 			goto invalid_right;
1421 		}
1422 		/* port is locked and active */
1423 
1424 		assert(port->ip_sorights > 0);
1425 
1426 		if ((delta > 0) || (delta < -1)) {
1427 			ip_mq_unlock(port);
1428 			goto invalid_value;
1429 		}
1430 
1431 		if (delta == 0) {
1432 			ip_mq_unlock(port);
1433 			goto success;
1434 		}
1435 
1436 		/*
1437 		 * clear any reply context:
1438 		 * no one will be sending the response b/c we are destroying
1439 		 * the single, outstanding send once right.
1440 		 */
1441 		port->ip_reply_context = 0;
1442 
1443 		request = ipc_right_request_cancel(port, name, entry);
1444 		assert(!ip_is_pinned(port));
1445 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1446 
1447 		is_write_unlock(space);
1448 
1449 		ipc_notify_send_once_and_unlock(port);
1450 
1451 		if (request != IP_NULL) {
1452 			ipc_notify_port_deleted(request, name);
1453 		}
1454 		break;
1455 	}
1456 
1457 	case MACH_PORT_RIGHT_DEAD_NAME: {
1458 		mach_port_urefs_t urefs;
1459 
1460 		if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1461 			port = entry->ie_port;
1462 			assert(port != IP_NULL);
1463 
1464 			if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1465 				/* port is locked and active */
1466 				ip_mq_unlock(port);
1467 				port = IP_NULL;
1468 				mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_RIGHT);
1469 				goto invalid_right;
1470 			}
1471 			bits = entry->ie_bits;
1472 			/* port has died and removed from entry, release port */
1473 			port_to_release = port;
1474 			port = IP_NULL;
1475 		} else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0) {
1476 			mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_RIGHT);
1477 			goto invalid_right;
1478 		}
1479 
1480 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1481 		assert(IE_BITS_UREFS(bits) > 0);
1482 		assert(entry->ie_object == IPC_OBJECT_NULL);
1483 		assert(entry->ie_request == IE_REQ_NONE);
1484 
1485 		if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1486 		    delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1487 			/* this will release port */
1488 			goto invalid_value;
1489 		}
1490 
1491 		urefs = IE_BITS_UREFS(bits);
1492 
1493 		if (urefs == MACH_PORT_UREFS_MAX) {
1494 			/*
1495 			 * urefs are pegged due to an overflow
1496 			 * only a delta removing all refs at once can change it
1497 			 */
1498 
1499 			if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1500 				delta = 0;
1501 			}
1502 		} else {
1503 			if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1504 				/* this will release port */
1505 				goto invalid_value;
1506 			}
1507 			if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1508 				/* leave urefs pegged to maximum if it overflowed */
1509 				delta = MACH_PORT_UREFS_MAX - urefs;
1510 			}
1511 		}
1512 
1513 		if ((urefs + delta) == 0) {
1514 			ipc_entry_dealloc(space, IPC_OBJECT_NULL, name, entry);
1515 		} else if (delta != 0) {
1516 			entry->ie_bits = bits + delta;
1517 			ipc_entry_modified(space, name, entry);
1518 		}
1519 
1520 		is_write_unlock(space);
1521 
1522 		if (port_to_release != IP_NULL) {
1523 			ip_release(port_to_release);
1524 			port_to_release = IP_NULL;
1525 		}
1526 
1527 		break;
1528 	}
1529 
1530 	case MACH_PORT_RIGHT_SEND: {
1531 		mach_port_urefs_t urefs;
1532 		ipc_port_t request = IP_NULL;
1533 		ipc_notify_nsenders_t nsrequest = { };
1534 
1535 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
1536 			/* invalid right exception only when not live/dead confusion */
1537 			if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0
1538 #if !defined(AE_MAKESENDRIGHT_FIXED)
1539 			    /*
1540 			     * AE tries to add single send right without knowing if it already owns one.
1541 			     * But if it doesn't, it should own the receive right and delta should be 1.
1542 			     */
1543 			    && (((bits & MACH_PORT_TYPE_RECEIVE) == 0) || (delta != 1))
1544 #endif
1545 			    ) {
1546 				mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_RIGHT);
1547 			}
1548 			goto invalid_right;
1549 		}
1550 
1551 		/* maximum urefs for send is MACH_PORT_UREFS_MAX */
1552 
1553 		port = entry->ie_port;
1554 		assert(port != IP_NULL);
1555 
1556 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1557 			assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0);
1558 			/* port has died and removed from entry, release port */
1559 			goto invalid_right;
1560 		}
1561 		/* port is locked and active */
1562 
1563 		assert(port->ip_srights > 0);
1564 
1565 		if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1566 		    delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1567 			ip_mq_unlock(port);
1568 			goto invalid_value;
1569 		}
1570 
1571 		urefs = IE_BITS_UREFS(bits);
1572 
1573 		if (urefs == MACH_PORT_UREFS_MAX) {
1574 			/*
1575 			 * urefs are pegged due to an overflow
1576 			 * only a delta removing all refs at once can change it
1577 			 */
1578 
1579 			if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1580 				delta = 0;
1581 			}
1582 		} else {
1583 			if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1584 				ip_mq_unlock(port);
1585 				goto invalid_value;
1586 			}
1587 			if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1588 				/* leave urefs pegged to maximum if it overflowed */
1589 				delta = MACH_PORT_UREFS_MAX - urefs;
1590 			}
1591 		}
1592 
1593 		if ((urefs + delta) == 0) {
1594 			if (ip_is_pinned(port)) {
1595 				ip_mq_unlock(port);
1596 				is_write_unlock(space);
1597 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1598 				return KERN_INVALID_CAPABILITY;
1599 			}
1600 
1601 			ip_srights_dec(port);
1602 			if (port->ip_srights == 0) {
1603 				nsrequest = ipc_notify_no_senders_prepare(port);
1604 			}
1605 
1606 			if (bits & MACH_PORT_TYPE_RECEIVE) {
1607 				assert(ip_get_receiver_name(port) == name);
1608 				assert(ip_in_space(port, space));
1609 				assert(IE_BITS_TYPE(bits) ==
1610 				    MACH_PORT_TYPE_SEND_RECEIVE);
1611 
1612 				entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1613 				    MACH_PORT_TYPE_SEND);
1614 				ipc_entry_modified(space, name, entry);
1615 			} else {
1616 				assert(IE_BITS_TYPE(bits) ==
1617 				    MACH_PORT_TYPE_SEND);
1618 
1619 				request = ipc_right_request_cancel(port, name, entry);
1620 				ipc_hash_delete(space, ip_to_object(port),
1621 				    name, entry);
1622 				assert(!ip_is_pinned(port));
1623 				ipc_entry_dealloc(space, ip_to_object(port),
1624 				    name, entry);
1625 				port_to_release = port;
1626 			}
1627 		} else if (delta != 0) {
1628 			entry->ie_bits = bits + delta;
1629 			ipc_entry_modified(space, name, entry);
1630 		}
1631 
1632 		ip_mq_unlock(port);
1633 
1634 		is_write_unlock(space);
1635 
1636 		if (port_to_release != IP_NULL) {
1637 			ip_release(port_to_release);
1638 			port_to_release = IP_NULL;
1639 		}
1640 
1641 		ipc_notify_no_senders_emit(nsrequest);
1642 
1643 		if (request != IP_NULL) {
1644 			ipc_notify_port_deleted(request, name);
1645 		}
1646 		break;
1647 	}
1648 
1649 	case MACH_PORT_RIGHT_LABELH:
1650 		goto invalid_right;
1651 
1652 	default:
1653 		panic("ipc_right_delta: strange right %d for 0x%x (%p) in space:%p",
1654 		    right, name, (void *)entry, (void *)space);
1655 	}
1656 
1657 	return KERN_SUCCESS;
1658 
1659 success:
1660 	is_write_unlock(space);
1661 	return KERN_SUCCESS;
1662 
1663 invalid_right:
1664 	is_write_unlock(space);
1665 	if (port != IP_NULL) {
1666 		ip_release(port);
1667 	}
1668 	return KERN_INVALID_RIGHT;
1669 
1670 invalid_value:
1671 	is_write_unlock(space);
1672 	if (port_to_release) {
1673 		ip_release(port_to_release);
1674 	}
1675 	mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_VALUE);
1676 	return KERN_INVALID_VALUE;
1677 
1678 guard_failure:
1679 	return KERN_INVALID_RIGHT;
1680 }
1681 
1682 /*
1683  *	Routine:	ipc_right_destruct
1684  *	Purpose:
1685  *		Deallocates the receive right and modifies the
1686  *		user-reference count for the send rights as requested.
1687  *	Conditions:
1688  *		The space is write-locked, and is unlocked upon return.
1689  *		The space must be active.
1690  *	Returns:
1691  *		KERN_SUCCESS		Count was modified.
1692  *		KERN_INVALID_RIGHT	Entry has wrong type.
1693  *		KERN_INVALID_VALUE	Bad delta for the right.
1694  */
1695 
1696 kern_return_t
ipc_right_destruct(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_delta_t srdelta,uint64_t guard)1697 ipc_right_destruct(
1698 	ipc_space_t             space,
1699 	mach_port_name_t        name,
1700 	ipc_entry_t             entry,
1701 	mach_port_delta_t       srdelta,
1702 	uint64_t                guard)
1703 {
1704 	ipc_port_t port = IP_NULL;
1705 	ipc_entry_bits_t bits;
1706 
1707 	mach_port_urefs_t urefs;
1708 	ipc_port_t request = IP_NULL;
1709 	ipc_notify_nsenders_t nsrequest = { };
1710 
1711 	bits = entry->ie_bits;
1712 
1713 	assert(is_active(space));
1714 
1715 	if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1716 		is_write_unlock(space);
1717 
1718 		/* No exception if we used to have receive and held entry since */
1719 		if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1720 			mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_RIGHT);
1721 		}
1722 		return KERN_INVALID_RIGHT;
1723 	}
1724 
1725 	if (srdelta && (bits & MACH_PORT_TYPE_SEND) == 0) {
1726 		is_write_unlock(space);
1727 		mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_RIGHT);
1728 		return KERN_INVALID_RIGHT;
1729 	}
1730 
1731 	if (srdelta > 0) {
1732 		goto invalid_value;
1733 	}
1734 
1735 	port = entry->ie_port;
1736 	assert(port != IP_NULL);
1737 
1738 	ip_mq_lock(port);
1739 	require_ip_active(port);
1740 	assert(ip_get_receiver_name(port) == name);
1741 	assert(ip_in_space(port, space));
1742 
1743 	/* Mach Port Guard Checking */
1744 	if (port->ip_guarded && (guard != port->ip_context)) {
1745 		uint64_t portguard = port->ip_context;
1746 		ip_mq_unlock(port);
1747 		is_write_unlock(space);
1748 		mach_port_guard_exception(name, portguard, kGUARD_EXC_DESTROY);
1749 		return KERN_INVALID_ARGUMENT;
1750 	}
1751 
1752 	/*
1753 	 * First reduce the send rights as requested and
1754 	 * adjust the entry->ie_bits accordingly. The
1755 	 * ipc_entry_modified() call is made once the receive
1756 	 * right is destroyed too.
1757 	 */
1758 
1759 	if (srdelta) {
1760 		assert(port->ip_srights > 0);
1761 
1762 		urefs = IE_BITS_UREFS(bits);
1763 
1764 		/*
1765 		 * Since we made sure that srdelta is negative,
1766 		 * the check for urefs overflow is not required.
1767 		 */
1768 		if (MACH_PORT_UREFS_UNDERFLOW(urefs, srdelta)) {
1769 			ip_mq_unlock(port);
1770 			goto invalid_value;
1771 		}
1772 
1773 		if (urefs == MACH_PORT_UREFS_MAX) {
1774 			/*
1775 			 * urefs are pegged due to an overflow
1776 			 * only a delta removing all refs at once can change it
1777 			 */
1778 			if (srdelta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1779 				srdelta = 0;
1780 			}
1781 		}
1782 
1783 		if ((urefs + srdelta) == 0) {
1784 			ip_srights_dec(port);
1785 			if (port->ip_srights == 0) {
1786 				nsrequest = ipc_notify_no_senders_prepare(port);
1787 			}
1788 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_RECEIVE);
1789 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1790 			    MACH_PORT_TYPE_SEND);
1791 		} else {
1792 			entry->ie_bits = bits + srdelta;
1793 		}
1794 	}
1795 
1796 	/*
1797 	 * Now destroy the receive right. Update space and
1798 	 * entry accordingly.
1799 	 */
1800 
1801 	bits = entry->ie_bits;
1802 	if (bits & MACH_PORT_TYPE_SEND) {
1803 		assert(IE_BITS_UREFS(bits) > 0);
1804 		assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX);
1805 
1806 		if (ipc_port_has_prdrequest(port)) {
1807 			/*
1808 			 * Since another task has requested a
1809 			 * destroy notification for this port, it
1810 			 * isn't actually being destroyed - the receive
1811 			 * right is just being moved to another task.
1812 			 * Since we still have one or more send rights,
1813 			 * we need to record the loss of the receive
1814 			 * right and enter the remaining send right
1815 			 * into the hash table.
1816 			 */
1817 			bits &= ~MACH_PORT_TYPE_RECEIVE;
1818 			bits |= MACH_PORT_TYPE_EX_RECEIVE;
1819 			ipc_hash_insert(space, ip_to_object(port),
1820 			    name, entry);
1821 			ip_reference(port);
1822 		} else {
1823 			/*
1824 			 *	The remaining send right turns into a
1825 			 *	dead name.  Notice we don't decrement
1826 			 *	ip_srights, generate a no-senders notif,
1827 			 *	or use ipc_right_dncancel, because the
1828 			 *	port is destroyed "first".
1829 			 */
1830 			bits &= ~IE_BITS_TYPE_MASK;
1831 			bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1832 			if (entry->ie_request) {
1833 				entry->ie_request = IE_REQ_NONE;
1834 				if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1835 					bits++; /* increment urefs */
1836 				}
1837 			}
1838 			entry->ie_object = IPC_OBJECT_NULL;
1839 		}
1840 		entry->ie_bits = bits;
1841 		ipc_entry_modified(space, name, entry);
1842 	} else {
1843 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1844 		assert(IE_BITS_UREFS(bits) == 0);
1845 		request = ipc_right_request_cancel(port, name, entry);
1846 		assert(!ip_is_pinned(port));
1847 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1848 	}
1849 
1850 	/* Unlock space */
1851 	is_write_unlock(space);
1852 
1853 	ipc_notify_no_senders_emit(nsrequest);
1854 
1855 	ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1856 
1857 	if (request != IP_NULL) {
1858 		ipc_notify_port_deleted(request, name);
1859 	}
1860 
1861 	return KERN_SUCCESS;
1862 
1863 invalid_value:
1864 	is_write_unlock(space);
1865 	mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_VALUE);
1866 	return KERN_INVALID_VALUE;
1867 }
1868 
1869 
1870 /*
1871  *	Routine:	ipc_right_info
1872  *	Purpose:
1873  *		Retrieves information about the right.
1874  *	Conditions:
1875  *		The space is active and write-locked.
1876  *	        The space is unlocked upon return.
1877  *	Returns:
1878  *		KERN_SUCCESS		Retrieved info
1879  */
1880 
1881 kern_return_t
ipc_right_info(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_type_t * typep,mach_port_urefs_t * urefsp)1882 ipc_right_info(
1883 	ipc_space_t             space,
1884 	mach_port_name_t        name,
1885 	ipc_entry_t             entry,
1886 	mach_port_type_t        *typep,
1887 	mach_port_urefs_t       *urefsp)
1888 {
1889 	ipc_port_t port;
1890 	ipc_entry_bits_t bits;
1891 	mach_port_type_t type = 0;
1892 	ipc_port_request_index_t request;
1893 
1894 	bits = entry->ie_bits;
1895 	request = entry->ie_request;
1896 	port = entry->ie_port;
1897 
1898 	if (bits & MACH_PORT_TYPE_RECEIVE) {
1899 		assert(IP_VALID(port));
1900 
1901 		if (request != IE_REQ_NONE) {
1902 			ip_mq_lock(port);
1903 			require_ip_active(port);
1904 			type |= ipc_port_request_type(port, name, request);
1905 			ip_mq_unlock(port);
1906 		}
1907 		is_write_unlock(space);
1908 	} else if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1909 		/*
1910 		 * validate port is still alive - if so, get request
1911 		 * types while we still have it locked.  Otherwise,
1912 		 * recapture the (now dead) bits.
1913 		 */
1914 		if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1915 			if (request != IE_REQ_NONE) {
1916 				type |= ipc_port_request_type(port, name, request);
1917 			}
1918 			ip_mq_unlock(port);
1919 			is_write_unlock(space);
1920 		} else {
1921 			bits = entry->ie_bits;
1922 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1923 			is_write_unlock(space);
1924 			ip_release(port);
1925 		}
1926 	} else {
1927 		is_write_unlock(space);
1928 	}
1929 
1930 	type |= IE_BITS_TYPE(bits);
1931 
1932 	*typep = type;
1933 	*urefsp = IE_BITS_UREFS(bits);
1934 	return KERN_SUCCESS;
1935 }
1936 
1937 /*
1938  *	Routine:	ipc_right_copyin_check_reply
1939  *	Purpose:
1940  *		Check if a subsequent ipc_right_copyin would succeed. Used only
1941  *		by ipc_kmsg_copyin_header to check if reply_port can be copied in.
1942  *		If the reply port is an immovable send right, it errors out.
1943  *	Conditions:
1944  *		The space is locked (read or write) and active.
1945  */
1946 
1947 boolean_t
ipc_right_copyin_check_reply(__assert_only ipc_space_t space,mach_port_name_t reply_name,ipc_entry_t reply_entry,mach_msg_type_name_t reply_type,ipc_entry_t dest_entry,uint8_t * reply_port_semantics_violation)1948 ipc_right_copyin_check_reply(
1949 	__assert_only ipc_space_t       space,
1950 	mach_port_name_t                reply_name,
1951 	ipc_entry_t                     reply_entry,
1952 	mach_msg_type_name_t            reply_type,
1953 	ipc_entry_t                     dest_entry,
1954 	uint8_t                         *reply_port_semantics_violation)
1955 {
1956 	ipc_entry_bits_t bits;
1957 	ipc_port_t reply_port;
1958 	ipc_port_t dest_port;
1959 	bool violate_reply_port_semantics = false;
1960 
1961 	bits = reply_entry->ie_bits;
1962 	assert(is_active(space));
1963 
1964 	switch (reply_type) {
1965 	case MACH_MSG_TYPE_MAKE_SEND:
1966 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1967 			return FALSE;
1968 		}
1969 		break;
1970 
1971 	case MACH_MSG_TYPE_MAKE_SEND_ONCE:
1972 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1973 			return FALSE;
1974 		}
1975 		break;
1976 
1977 	case MACH_MSG_TYPE_MOVE_RECEIVE:
1978 		/* ipc_kmsg_copyin_header already filters it out */
1979 		return FALSE;
1980 
1981 	case MACH_MSG_TYPE_COPY_SEND:
1982 	case MACH_MSG_TYPE_MOVE_SEND:
1983 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
1984 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
1985 			break;
1986 		}
1987 
1988 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
1989 			return FALSE;
1990 		}
1991 
1992 		reply_port = reply_entry->ie_port;
1993 		assert(reply_port != IP_NULL);
1994 
1995 		/*
1996 		 * active status peek to avoid checks that will be skipped
1997 		 * on copyin for dead ports.  Lock not held, so will not be
1998 		 * atomic (but once dead, there's no going back).
1999 		 */
2000 		if (!ip_active(reply_port)) {
2001 			break;
2002 		}
2003 
2004 		/*
2005 		 * Can't copyin a send right that is marked immovable. This bit
2006 		 * is set only during port creation and never unset. So it can
2007 		 * be read without a lock.
2008 		 */
2009 		if (ip_is_immovable_send(reply_port)) {
2010 			mach_port_guard_exception_immovable(space, reply_name, reply_port);
2011 			return FALSE;
2012 		}
2013 
2014 		if (reply_type == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2015 			if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2016 				return FALSE;
2017 			}
2018 		} else {
2019 			if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2020 				return FALSE;
2021 			}
2022 		}
2023 
2024 		break;
2025 	}
2026 
2027 	default:
2028 		panic("ipc_right_copyin_check: strange rights");
2029 	}
2030 
2031 	if ((IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_PORT_SET) ||
2032 	    (IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_PORT_SET)) {
2033 		return TRUE;
2034 	}
2035 
2036 	/* The only disp allowed when a reply port is a local port of mach msg is MAKE_SO. */
2037 	reply_port = reply_entry->ie_port;
2038 	assert(reply_port != IP_NULL);
2039 
2040 	if (ip_active(reply_port)) {
2041 		if (ip_is_reply_port(reply_port) && (reply_type != MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2042 			return FALSE;
2043 		}
2044 
2045 		/* When sending a msg to remote port that requires reply port semantics enforced the local port of that msg needs to be a reply port. */
2046 		dest_port = dest_entry->ie_port;
2047 		if (IP_VALID(dest_port)) {
2048 			ip_mq_lock(dest_port);
2049 			if (ip_active(dest_port)) {
2050 				/* populates reply_port_semantics_violation if we need to send telemetry */
2051 				violate_reply_port_semantics = ip_violates_rigid_reply_port_semantics(dest_port, reply_port, reply_port_semantics_violation) ||
2052 				    ip_violates_reply_port_semantics(dest_port, reply_port, reply_port_semantics_violation);
2053 			}
2054 			ip_mq_unlock(dest_port);
2055 			if (violate_reply_port_semantics && reply_port_semantics) {
2056 				mach_port_guard_exception(reply_name, 0, kGUARD_EXC_REQUIRE_REPLY_PORT_SEMANTICS);
2057 				return FALSE;
2058 			}
2059 		}
2060 	}
2061 
2062 	return TRUE;
2063 }
2064 
2065 /*
2066  *	Routine:	ipc_right_copyin_check_guard_locked
2067  *	Purpose:
2068  *		Check if the port is guarded and the guard
2069  *		value matches the one passed in the arguments.
2070  *		If MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND is set,
2071  *		check if the port is unguarded.
2072  *	Conditions:
2073  *		The port is locked.
2074  *	Returns:
2075  *		KERN_SUCCESS		Port is either unguarded
2076  *					or guarded with expected value
2077  *		KERN_INVALID_ARGUMENT	Port is either unguarded already or guard mismatch.
2078  *					This also raises a EXC_GUARD exception.
2079  */
2080 static kern_return_t
ipc_right_copyin_check_guard_locked(ipc_port_t port,mach_port_name_t name,mach_msg_guarded_port_descriptor_t * gdesc)2081 ipc_right_copyin_check_guard_locked(
2082 	ipc_port_t              port,
2083 	mach_port_name_t        name,
2084 	mach_msg_guarded_port_descriptor_t *gdesc)
2085 {
2086 	mach_port_context_t    context = gdesc->u_context;
2087 	mach_msg_guard_flags_t flags   = gdesc->flags;
2088 
2089 	if ((flags & MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND) && !port->ip_guarded && !context) {
2090 		return KERN_SUCCESS;
2091 	} else if (port->ip_guarded && (port->ip_context == context)) {
2092 		return KERN_SUCCESS;
2093 	}
2094 
2095 	/* Incorrect guard; Raise exception */
2096 	mach_port_guard_exception(name, port->ip_context, kGUARD_EXC_INCORRECT_GUARD);
2097 	return KERN_INVALID_ARGUMENT;
2098 }
2099 
2100 void
ipc_right_copyin_rcleanup_init(ipc_copyin_rcleanup_t * icrc,mach_msg_guarded_port_descriptor_t * gdesc)2101 ipc_right_copyin_rcleanup_init(
2102 	ipc_copyin_rcleanup_t  *icrc,
2103 	mach_msg_guarded_port_descriptor_t *gdesc)
2104 {
2105 	*icrc = (ipc_copyin_rcleanup_t){
2106 		.icrc_guarded_desc = gdesc,
2107 	};
2108 }
2109 
2110 void
ipc_right_copyin_cleanup_destroy(ipc_copyin_cleanup_t * icc,mach_port_name_t name)2111 ipc_right_copyin_cleanup_destroy(
2112 	ipc_copyin_cleanup_t   *icc,
2113 	mach_port_name_t        name)
2114 {
2115 	if (icc->icc_release_port) {
2116 		ip_release(icc->icc_release_port);
2117 	}
2118 	if (icc->icc_deleted_port) {
2119 		ipc_notify_port_deleted(icc->icc_deleted_port, name);
2120 	}
2121 }
2122 
2123 void
ipc_right_copyin_rcleanup_destroy(ipc_copyin_rcleanup_t * icrc)2124 ipc_right_copyin_rcleanup_destroy(ipc_copyin_rcleanup_t *icrc)
2125 {
2126 #if IMPORTANCE_INHERITANCE
2127 	if (icrc->icrc_assert_count) {
2128 		ipc_importance_task_drop_internal_assertion(current_task()->task_imp_base,
2129 		    icrc->icrc_assert_count);
2130 	}
2131 #endif /* IMPORTANCE_INHERITANCE */
2132 	if (icrc->icrc_free_list.next) {
2133 		waitq_link_free_list(WQT_PORT_SET, &icrc->icrc_free_list);
2134 	}
2135 }
2136 
2137 /*
2138  *	Routine:	ipc_right_copyin
2139  *	Purpose:
2140  *		Copyin a capability from a space.
2141  *		If successful, the caller gets a ref
2142  *		for the resulting port, unless it is IP_DEAD,
2143  *		and possibly a send-once right which should
2144  *		be used in a port-deleted notification.
2145  *
2146  *		If deadok is not TRUE, the copyin operation
2147  *		will fail instead of producing IO_DEAD.
2148  *
2149  *		The entry is deallocated if the entry type becomes
2150  *		MACH_PORT_TYPE_NONE.
2151  *	Conditions:
2152  *		The space is write-locked and active.
2153  *	Returns:
2154  *		KERN_SUCCESS		Acquired a port, possibly IP_DEAD.
2155  *		KERN_INVALID_RIGHT	Name doesn't denote correct right.
2156  *		KERN_INVALID_CAPABILITY	Trying to move a kobject port,
2157  *					an immovable right or
2158  *					the last ref of a pinned right
2159  *		KERN_INVALID_ARGUMENT	Port is unguarded or guard mismatch
2160  */
2161 
2162 kern_return_t
ipc_right_copyin(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name,ipc_object_copyin_flags_t flags,ipc_entry_t entry,ipc_port_t * portp,ipc_copyin_cleanup_t * icc,ipc_copyin_rcleanup_t * icrc)2163 ipc_right_copyin(
2164 	ipc_space_t             space,
2165 	mach_port_name_t        name,
2166 	mach_msg_type_name_t    msgt_name,
2167 	ipc_object_copyin_flags_t  flags,
2168 	ipc_entry_t             entry,
2169 	ipc_port_t             *portp,
2170 	ipc_copyin_cleanup_t   *icc,
2171 	ipc_copyin_rcleanup_t  *icrc)
2172 {
2173 	ipc_entry_bits_t bits;
2174 	ipc_port_t port;
2175 	kern_return_t kr;
2176 	uint32_t moves = (flags & IPC_OBJECT_COPYIN_FLAGS_DEST_EXTRA_MOVE) ? 2 : 1;
2177 	boolean_t deadok = !!(flags & IPC_OBJECT_COPYIN_FLAGS_DEADOK);
2178 	boolean_t allow_imm_send = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
2179 	boolean_t allow_reply_make_so = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_REPLY_MAKE_SEND_ONCE);
2180 	boolean_t allow_reply_move_so = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_REPLY_MOVE_SEND_ONCE);
2181 
2182 	if (flags & IPC_OBJECT_COPYIN_FLAGS_DEST_EXTRA_MOVE) {
2183 		assert((flags & IPC_OBJECT_COPYIN_FLAGS_DEST_EXTRA_COPY) == 0);
2184 		assert(msgt_name == MACH_MSG_TYPE_MOVE_SEND);
2185 	}
2186 	if (flags & IPC_OBJECT_COPYIN_FLAGS_DEST_EXTRA_COPY) {
2187 		assert(msgt_name == MACH_MSG_TYPE_MOVE_SEND ||
2188 		    msgt_name == MACH_MSG_TYPE_COPY_SEND);
2189 	}
2190 
2191 	*portp = IP_NULL;
2192 	icc->icc_release_port = IP_NULL;
2193 	icc->icc_deleted_port = IP_NULL;
2194 
2195 	bits = entry->ie_bits;
2196 
2197 	assert(is_active(space));
2198 
2199 	switch (msgt_name) {
2200 	case MACH_MSG_TYPE_MAKE_SEND: {
2201 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2202 			goto invalid_right;
2203 		}
2204 
2205 		port = entry->ie_port;
2206 		assert(port != IP_NULL);
2207 
2208 		if (ip_is_reply_port(port)) {
2209 			mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_RIGHT);
2210 			return KERN_INVALID_CAPABILITY;
2211 		}
2212 
2213 		ip_mq_lock(port);
2214 		assert(ip_get_receiver_name(port) == name);
2215 		assert(ip_in_space(port, space));
2216 
2217 		ipc_port_make_send_any_locked(port);
2218 		ip_mq_unlock(port);
2219 
2220 		*portp = port;
2221 		break;
2222 	}
2223 
2224 	case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
2225 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2226 			goto invalid_right;
2227 		}
2228 
2229 		port = entry->ie_port;
2230 		assert(port != IP_NULL);
2231 
2232 		if ((ip_is_reply_port(port)) && !allow_reply_make_so) {
2233 			mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_RIGHT);
2234 			return KERN_INVALID_CAPABILITY;
2235 		}
2236 
2237 		ip_mq_lock(port);
2238 		require_ip_active(port);
2239 		assert(ip_get_receiver_name(port) == name);
2240 		assert(ip_in_space(port, space));
2241 
2242 		ipc_port_make_sonce_locked(port);
2243 		ip_mq_unlock(port);
2244 
2245 		*portp = port;
2246 		break;
2247 	}
2248 
2249 	case MACH_MSG_TYPE_MOVE_RECEIVE: {
2250 		bool allow_imm_recv = false;
2251 		ipc_port_t request = IP_NULL;
2252 
2253 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2254 			goto invalid_right;
2255 		}
2256 
2257 		port = entry->ie_port;
2258 		assert(port != IP_NULL);
2259 
2260 		ip_mq_lock(port);
2261 		require_ip_active(port);
2262 		assert(ip_get_receiver_name(port) == name);
2263 		assert(ip_in_space(port, space));
2264 
2265 		/*
2266 		 * Disallow moving receive-right kobjects/kolabel, e.g. mk_timer ports
2267 		 * The ipc_port structure uses the kdata union of kobject and
2268 		 * imp_task exclusively. Thus, general use of a kobject port as
2269 		 * a receive right can cause type confusion in the importance
2270 		 * code.
2271 		 */
2272 		if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
2273 			/*
2274 			 * Distinguish an invalid right, e.g., trying to move
2275 			 * a send right as a receive right, from this
2276 			 * situation which is, "This is a valid receive right,
2277 			 * but it's also a kobject and you can't move it."
2278 			 */
2279 			ip_mq_unlock(port);
2280 			mach_port_guard_exception(name, 0, kGUARD_EXC_IMMOVABLE);
2281 			return KERN_INVALID_CAPABILITY;
2282 		}
2283 
2284 		if (port->ip_service_port && port->ip_splabel &&
2285 		    !ipc_service_port_label_is_bootstrap_port((ipc_service_port_label_t)port->ip_splabel)) {
2286 			allow_imm_recv = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_RECEIVE);
2287 		} else if (ip_is_libxpc_connection_port(port)) {
2288 			allow_imm_recv = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_CONN_IMMOVABLE_RECEIVE);
2289 		}
2290 
2291 		if ((!allow_imm_recv && port->ip_immovable_receive) ||
2292 		    ip_is_reply_port(port) ||     /* never move reply port rcv right */
2293 		    port->ip_specialreply) {
2294 			assert(!ip_in_space(port, ipc_space_kernel));
2295 			ip_mq_unlock(port);
2296 			assert(current_task() != kernel_task);
2297 			mach_port_guard_exception(name, 0, kGUARD_EXC_IMMOVABLE);
2298 			return KERN_INVALID_CAPABILITY;
2299 		}
2300 
2301 		if (icrc->icrc_guarded_desc) {
2302 			kr = ipc_right_copyin_check_guard_locked(port, name,
2303 			    icrc->icrc_guarded_desc);
2304 			if (kr != KERN_SUCCESS) {
2305 				ip_mq_unlock(port);
2306 				return kr;
2307 			}
2308 			/* this flag will be cleared during copyout */
2309 			icrc->icrc_guarded_desc->flags |=
2310 			    MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
2311 		}
2312 
2313 		if (bits & MACH_PORT_TYPE_SEND) {
2314 			assert(IE_BITS_TYPE(bits) ==
2315 			    MACH_PORT_TYPE_SEND_RECEIVE);
2316 			assert(IE_BITS_UREFS(bits) > 0);
2317 			assert(port->ip_srights > 0);
2318 
2319 			bits &= ~MACH_PORT_TYPE_RECEIVE;
2320 			bits |= MACH_PORT_TYPE_EX_RECEIVE;
2321 			entry->ie_bits = bits;
2322 			ipc_hash_insert(space, ip_to_object(port), name, entry);
2323 			ip_reference(port);
2324 			ipc_entry_modified(space, name, entry);
2325 		} else {
2326 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2327 			assert(IE_BITS_UREFS(bits) == 0);
2328 
2329 			request = ipc_right_request_cancel(port, name, entry);
2330 			assert(!ip_is_pinned(port));
2331 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2332 		}
2333 
2334 		/* ipc_port_clear_receiver unguards the port and clears the ip_immovable_receive bit */
2335 		(void)ipc_port_clear_receiver(port, FALSE, &icrc->icrc_free_list); /* don't destroy the port/mqueue */
2336 
2337 #if IMPORTANCE_INHERITANCE
2338 		/*
2339 		 * Account for boosts the current task is going to lose when
2340 		 * copying this right in.  Tempowner ports have either not
2341 		 * been accounting to any task (and therefore are already in
2342 		 * "limbo" state w.r.t. assertions) or to some other specific
2343 		 * task. As we have no way to drop the latter task's assertions
2344 		 * here, We'll deduct those when we enqueue it on its
2345 		 * destination port (see ipc_port_check_circularity()).
2346 		 */
2347 		if (port->ip_tempowner == 0) {
2348 			assert(IIT_NULL == ip_get_imp_task(port));
2349 
2350 			/* ports in limbo have to be tempowner */
2351 			port->ip_tempowner = 1;
2352 			icrc->icrc_assert_count = port->ip_impcount;
2353 		}
2354 #endif /* IMPORTANCE_INHERITANCE */
2355 
2356 		ip_mq_unlock(port);
2357 
2358 		*portp = port;
2359 		icc->icc_deleted_port = request;
2360 		break;
2361 	}
2362 
2363 	case MACH_MSG_TYPE_COPY_SEND: {
2364 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2365 			goto copy_dead;
2366 		}
2367 
2368 		/* allow for dead send-once rights */
2369 
2370 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2371 			goto invalid_right;
2372 		}
2373 
2374 		assert(IE_BITS_UREFS(bits) > 0);
2375 
2376 		port = entry->ie_port;
2377 		assert(port != IP_NULL);
2378 
2379 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2380 			bits = entry->ie_bits;
2381 			icc->icc_release_port = port;
2382 			goto copy_dead;
2383 		}
2384 		/* port is locked and active */
2385 
2386 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2387 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2388 			assert(port->ip_sorights > 0);
2389 
2390 			ip_mq_unlock(port);
2391 			goto invalid_right;
2392 		}
2393 
2394 		if (ip_is_reply_port(port)) {
2395 			ip_mq_unlock(port);
2396 			mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_RIGHT);
2397 			return KERN_INVALID_CAPABILITY;
2398 		}
2399 
2400 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2401 			ip_mq_unlock(port);
2402 			mach_port_guard_exception_immovable(space, name, port);
2403 			return KERN_INVALID_CAPABILITY;
2404 		}
2405 
2406 		ipc_port_copy_send_any_locked(port);
2407 		if (flags & IPC_OBJECT_COPYIN_FLAGS_DEST_EXTRA_COPY) {
2408 			ipc_port_copy_send_any_locked(port);
2409 		}
2410 		ip_mq_unlock(port);
2411 
2412 		*portp = port;
2413 		break;
2414 	}
2415 
2416 	case MACH_MSG_TYPE_MOVE_SEND: {
2417 		ipc_port_t request = IP_NULL;
2418 
2419 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2420 			goto move_dead;
2421 		}
2422 
2423 		/* allow for dead send-once rights */
2424 
2425 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2426 			goto invalid_right;
2427 		}
2428 
2429 		assert(IE_BITS_UREFS(bits) > 0);
2430 
2431 		port = entry->ie_port;
2432 		assert(port != IP_NULL);
2433 
2434 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2435 			bits = entry->ie_bits;
2436 			icc->icc_release_port = port;
2437 			goto move_dead;
2438 		}
2439 		/* port is locked and active */
2440 
2441 		if ((bits & MACH_PORT_TYPE_SEND) == 0 ||
2442 		    IE_BITS_UREFS(bits) < moves) {
2443 			ip_mq_unlock(port);
2444 			goto invalid_right;
2445 		}
2446 
2447 		if (ip_is_pinned(port) && IE_BITS_UREFS(bits) == moves) {
2448 			ip_mq_unlock(port);
2449 			mach_port_guard_exception_pinned(space, name,
2450 			    port, MPG_FLAGS_MOD_REFS_PINNED_COPYIN);
2451 			return KERN_INVALID_CAPABILITY;
2452 		}
2453 
2454 		if (ip_is_reply_port(port)) {
2455 			ip_mq_unlock(port);
2456 			mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_RIGHT);
2457 			return KERN_INVALID_CAPABILITY;
2458 		}
2459 
2460 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2461 			ip_mq_unlock(port);
2462 			mach_port_guard_exception_immovable(space, name, port);
2463 			return KERN_INVALID_CAPABILITY;
2464 		}
2465 
2466 		if (IE_BITS_UREFS(bits) == moves) {
2467 			assert(port->ip_srights > 0);
2468 
2469 			/*
2470 			 * We have exactly "moves" send rights for this port
2471 			 * in this space, which means that we will liberate the
2472 			 * naked send right held by this entry.
2473 			 *
2474 			 * However refcounting rules around entries are that
2475 			 * naked send rights on behalf of spaces do not have an
2476 			 * associated port reference, so we need to donate one
2477 			 * ...
2478 			 */
2479 			if (bits & MACH_PORT_TYPE_RECEIVE) {
2480 				assert(ip_get_receiver_name(port) == name);
2481 				assert(ip_in_space(port, space));
2482 				assert(IE_BITS_TYPE(bits) ==
2483 				    MACH_PORT_TYPE_SEND_RECEIVE);
2484 
2485 				/*
2486 				 * ... that we inject manually when the entry
2487 				 * stays alive
2488 				 */
2489 				entry->ie_bits = bits & ~
2490 				    (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2491 				ipc_entry_modified(space, name, entry);
2492 				ip_reference(port);
2493 			} else {
2494 				assert(IE_BITS_TYPE(bits) ==
2495 				    MACH_PORT_TYPE_SEND);
2496 
2497 				/* ... that we steal from the entry when it dies */
2498 				request = ipc_right_request_cancel(port, name, entry);
2499 				ipc_hash_delete(space, ip_to_object(port),
2500 				    name, entry);
2501 				ipc_entry_dealloc(space, ip_to_object(port),
2502 				    name, entry);
2503 				/* transfer entry's reference to caller */
2504 			}
2505 		} else {
2506 			ipc_port_copy_send_any_locked(port);
2507 			/* if urefs are pegged due to overflow, leave them pegged */
2508 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2509 				entry->ie_bits = bits - moves; /* decrement urefs */
2510 			}
2511 			ipc_entry_modified(space, name, entry);
2512 		}
2513 
2514 		if (flags & (IPC_OBJECT_COPYIN_FLAGS_DEST_EXTRA_COPY |
2515 		    IPC_OBJECT_COPYIN_FLAGS_DEST_EXTRA_MOVE)) {
2516 			ipc_port_copy_send_any_locked(port);
2517 		}
2518 
2519 		ip_mq_unlock(port);
2520 		*portp = port;
2521 		icc->icc_deleted_port = request;
2522 		break;
2523 	}
2524 
2525 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2526 		ipc_port_t request;
2527 
2528 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2529 			goto move_dead;
2530 		}
2531 
2532 		/* allow for dead send rights */
2533 
2534 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2535 			goto invalid_right;
2536 		}
2537 
2538 		assert(IE_BITS_UREFS(bits) > 0);
2539 
2540 		port = entry->ie_port;
2541 		assert(port != IP_NULL);
2542 
2543 		if (ipc_right_check(space, port, name, entry, flags)) {
2544 			bits = entry->ie_bits;
2545 			icc->icc_release_port = port;
2546 			goto move_dead;
2547 		}
2548 		/*
2549 		 * port is locked, but may not be active:
2550 		 * Allow copyin of inactive ports with no dead name request and treat it
2551 		 * as if the copyin of the port was successful and port became inactive
2552 		 * later.
2553 		 */
2554 
2555 		if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2556 			assert(bits & MACH_PORT_TYPE_SEND);
2557 			assert(port->ip_srights > 0);
2558 
2559 			ip_mq_unlock(port);
2560 			goto invalid_right;
2561 		}
2562 
2563 		if (ip_is_reply_port(port) && !allow_reply_move_so) {
2564 			ip_mq_unlock(port);
2565 			mach_port_guard_exception(name, 0, kGUARD_EXC_INVALID_RIGHT);
2566 			return KERN_INVALID_CAPABILITY;
2567 		}
2568 
2569 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2570 			ip_mq_unlock(port);
2571 			mach_port_guard_exception_immovable(space, name, port);
2572 			return KERN_INVALID_CAPABILITY;
2573 		}
2574 
2575 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2576 		assert(IE_BITS_UREFS(bits) == 1);
2577 		assert(port->ip_sorights > 0);
2578 
2579 		request = ipc_right_request_cancel(port, name, entry);
2580 		assert(!ip_is_pinned(port));
2581 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2582 		ip_mq_unlock(port);
2583 
2584 		*portp = port;
2585 		icc->icc_deleted_port = request;
2586 		break;
2587 	}
2588 
2589 	default:
2590 invalid_right:
2591 		return KERN_INVALID_RIGHT;
2592 	}
2593 
2594 	return KERN_SUCCESS;
2595 
2596 copy_dead:
2597 	assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2598 	assert(IE_BITS_UREFS(bits) > 0);
2599 	assert(entry->ie_request == IE_REQ_NONE);
2600 	assert(entry->ie_object == 0);
2601 
2602 	if (!deadok) {
2603 		goto invalid_right;
2604 	}
2605 
2606 	*portp = IP_DEAD;
2607 	return KERN_SUCCESS;
2608 
2609 move_dead:
2610 	assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2611 	assert(IE_BITS_UREFS(bits) > 0);
2612 	assert(entry->ie_request == IE_REQ_NONE);
2613 	assert(entry->ie_object == IPC_OBJECT_NULL);
2614 
2615 	if (!deadok || IE_BITS_UREFS(bits) < moves) {
2616 		goto invalid_right;
2617 	}
2618 
2619 	if (IE_BITS_UREFS(bits) == moves) {
2620 		ipc_entry_dealloc(space, IPC_OBJECT_NULL, name, entry);
2621 	} else {
2622 		/* if urefs are pegged due to overflow, leave them pegged */
2623 		if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2624 			entry->ie_bits = bits - moves; /* decrement urefs */
2625 		}
2626 		ipc_entry_modified(space, name, entry);
2627 	}
2628 	*portp = IP_DEAD;
2629 	return KERN_SUCCESS;
2630 }
2631 
2632 /*
2633  *	Routine:	ipc_right_copyout
2634  *	Purpose:
2635  *		Copyout a capability to a space.
2636  *		If successful, consumes a ref for the port.
2637  *
2638  *		Always succeeds when given a newly-allocated entry,
2639  *		because user-reference overflow isn't a possibility.
2640  *
2641  *		If copying out the port would cause the user-reference
2642  *		count in the entry to overflow, then the user-reference
2643  *		count is left pegged to its maximum value and the copyout
2644  *		succeeds anyway.
2645  *	Conditions:
2646  *		The space is write-locked and active.
2647  *		The port is locked and active.
2648  *		The port is unlocked; the space isn't.
2649  *	Returns:
2650  *		KERN_SUCCESS		Copied out capability.
2651  */
2652 
2653 kern_return_t
ipc_right_copyout(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_port_name_t name,ipc_entry_t entry,mach_msg_guarded_port_descriptor_t * gdesc)2654 ipc_right_copyout(
2655 	ipc_space_t             space,
2656 	ipc_port_t              port,
2657 	mach_msg_type_name_t    msgt_name,
2658 	ipc_object_copyout_flags_t flags,
2659 	mach_port_name_t        name,
2660 	ipc_entry_t             entry,
2661 	mach_msg_guarded_port_descriptor_t *gdesc)
2662 {
2663 	ipc_entry_bits_t bits;
2664 	mach_port_name_t sp_name = MACH_PORT_NULL;
2665 	mach_port_context_t sp_context = 0;
2666 
2667 	bits = entry->ie_bits;
2668 
2669 	assert(IP_VALID(port));
2670 	assert(ip_active(port));
2671 	assert(entry->ie_port == port);
2672 
2673 	if (flags & IPC_OBJECT_COPYOUT_FLAGS_PINNED) {
2674 		assert(!ip_is_pinned(port));
2675 		assert(ip_is_immovable_send(port));
2676 		assert(task_is_immovable(space->is_task));
2677 		assert(task_is_pinned(space->is_task));
2678 		port->ip_pinned = 1;
2679 	}
2680 
2681 	switch (msgt_name) {
2682 	case MACH_MSG_TYPE_PORT_SEND_ONCE:
2683 
2684 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2685 		assert(IE_BITS_UREFS(bits) == 0);
2686 		assert(port->ip_sorights > 0);
2687 
2688 		if (port->ip_specialreply) {
2689 			ipc_port_adjust_special_reply_port_locked(port,
2690 			    current_thread()->ith_knote, IPC_PORT_ADJUST_SR_LINK_WORKLOOP, FALSE);
2691 			/* port unlocked on return */
2692 		} else {
2693 			ip_mq_unlock(port);
2694 		}
2695 
2696 		entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1); /* set urefs to 1 */
2697 		ipc_entry_modified(space, name, entry);
2698 		break;
2699 
2700 	case MACH_MSG_TYPE_PORT_SEND:
2701 		assert(port->ip_srights > 0);
2702 
2703 		if (bits & MACH_PORT_TYPE_SEND) {
2704 			mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
2705 
2706 			assert(port->ip_srights > 1);
2707 			assert(urefs > 0);
2708 			assert(urefs <= MACH_PORT_UREFS_MAX);
2709 
2710 			if (urefs == MACH_PORT_UREFS_MAX) {
2711 				/*
2712 				 * leave urefs pegged to maximum,
2713 				 * consume send right and ref
2714 				 */
2715 
2716 				ip_srights_dec(port);
2717 				ip_mq_unlock(port);
2718 				ip_release_live(port);
2719 				return KERN_SUCCESS;
2720 			}
2721 
2722 			/* consume send right and ref */
2723 			ip_srights_dec(port);
2724 			ip_mq_unlock(port);
2725 			ip_release_live(port);
2726 		} else if (bits & MACH_PORT_TYPE_RECEIVE) {
2727 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2728 			assert(IE_BITS_UREFS(bits) == 0);
2729 
2730 			/* transfer send right to entry, consume ref */
2731 			ip_mq_unlock(port);
2732 			ip_release_live(port);
2733 		} else {
2734 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2735 			assert(IE_BITS_UREFS(bits) == 0);
2736 
2737 			/* transfer send right and ref to entry */
2738 			ip_mq_unlock(port);
2739 
2740 			/* entry is locked holding ref, so can use port */
2741 
2742 			ipc_hash_insert(space, ip_to_object(port), name, entry);
2743 		}
2744 
2745 		entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1; /* increment urefs */
2746 		ipc_entry_modified(space, name, entry);
2747 		break;
2748 
2749 	case MACH_MSG_TYPE_PORT_RECEIVE: {
2750 		ipc_port_t dest;
2751 #if IMPORTANCE_INHERITANCE
2752 		natural_t assertcnt = port->ip_impcount;
2753 #endif /* IMPORTANCE_INHERITANCE */
2754 
2755 		assert(port->ip_mscount == 0);
2756 		assert(!ip_in_a_space(port));
2757 
2758 		/*
2759 		 * Don't copyout kobjects or kolabels as receive right
2760 		 */
2761 		if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
2762 			panic("ipc_right_copyout: Copyout kobject/kolabel as receive right");
2763 		}
2764 
2765 		dest = ip_get_destination(port);
2766 
2767 		/* port transitions to IN-SPACE state */
2768 		port->ip_receiver_name = name;
2769 		port->ip_receiver = space;
2770 
2771 		struct knote *kn = current_thread()->ith_knote;
2772 
2773 		if (gdesc && gdesc->flags & MACH_MSG_GUARD_FLAGS_IMMOVABLE_RECEIVE) {
2774 			assert(port->ip_immovable_receive == 0);
2775 			port->ip_guarded = 1;
2776 			port->ip_strict_guard = 0;
2777 			/* pseudo receive shouldn't set the receive right as immovable in the sender's space */
2778 			if (kn != ITH_KNOTE_PSEUDO) {
2779 				port->ip_immovable_receive = 1;
2780 			}
2781 			port->ip_context = current_thread()->ith_recv_bufs.recv_msg_addr;
2782 			gdesc->u_context = port->ip_context;
2783 			gdesc->flags &= ~MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
2784 		}
2785 
2786 		if (ip_is_libxpc_connection_port(port)) {
2787 			/*
2788 			 * There are 3 ways to reach here.
2789 			 * 1. A libxpc client successfully sent this receive right to a named service
2790 			 *    and we are copying out in that service's ipc space.
2791 			 * 2. A libxpc client tried doing (1) but failed so we are doing pseudo-receive.
2792 			 * 3. Kernel sent this receive right to a libxpc client as a part of port destroyed notification.
2793 			 *
2794 			 * This flag needs to be set again in all 3 cases as they reset it as part of their flow.
2795 			 */
2796 			port->ip_immovable_receive = 1;
2797 		}
2798 
2799 		/* Check if this is a service port */
2800 		if (port->ip_service_port) {
2801 			assert(port->ip_splabel != NULL);
2802 			/*
2803 			 * This flag gets reset during all 3 ways described above for libxpc connection port.
2804 			 * The only difference is launchd acts as an initiator instead of a libxpc client.
2805 			 */
2806 			if (service_port_defense_enabled) {
2807 				port->ip_immovable_receive = 1;
2808 			}
2809 
2810 			/* Check if this is a port-destroyed notification to ensure
2811 			 * that initproc doesnt end up with a guarded service port
2812 			 * sent in a regular message
2813 			 */
2814 			if (!ipc_service_port_label_is_pd_notification((ipc_service_port_label_t)port->ip_splabel)) {
2815 				goto skip_sp_check;
2816 			}
2817 
2818 			ipc_service_port_label_clear_flag(port->ip_splabel, ISPL_FLAGS_SEND_PD_NOTIFICATION);
2819 #if !(DEVELOPMENT || DEBUG)
2820 			if (get_bsdtask_info(current_task()) != initproc) {
2821 				goto skip_sp_check;
2822 			}
2823 #endif /* !(DEVELOPMENT || DEBUG) */
2824 			ipc_service_port_label_get_attr(port->ip_splabel, &sp_name, &sp_context);
2825 			assert(sp_name != MACH_PORT_NULL);
2826 			/* Verify the port name and restore the guard value, if any */
2827 			if (name != sp_name) {
2828 				panic("Service port name = 0x%x doesnt match the stored launchd port name = 0x%x", name, sp_name);
2829 			}
2830 			if (sp_context) {
2831 				port->ip_guarded = 1;
2832 				port->ip_strict_guard = 1;
2833 				port->ip_context = sp_context;
2834 			}
2835 		}
2836 skip_sp_check:
2837 
2838 		assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
2839 		if (bits & MACH_PORT_TYPE_SEND) {
2840 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
2841 			assert(IE_BITS_UREFS(bits) > 0);
2842 			assert(port->ip_srights > 0);
2843 		} else {
2844 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2845 			assert(IE_BITS_UREFS(bits) == 0);
2846 		}
2847 		entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE;
2848 		ipc_entry_modified(space, name, entry);
2849 
2850 		boolean_t sync_bootstrap_checkin = FALSE;
2851 		if (kn != ITH_KNOTE_PSEUDO && port->ip_sync_bootstrap_checkin) {
2852 			sync_bootstrap_checkin = TRUE;
2853 		}
2854 		if (!ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_RECEIVE)) {
2855 			kn = NULL;
2856 		}
2857 		ipc_port_adjust_port_locked(port, kn, sync_bootstrap_checkin);
2858 		/* port unlocked */
2859 
2860 		if (bits & MACH_PORT_TYPE_SEND) {
2861 			ip_release_live(port);
2862 
2863 			/* entry is locked holding ref, so can use port */
2864 			ipc_hash_delete(space, ip_to_object(port), name, entry);
2865 		}
2866 
2867 		if (dest != IP_NULL) {
2868 #if IMPORTANCE_INHERITANCE
2869 			/*
2870 			 * Deduct the assertion counts we contributed to
2871 			 * the old destination port.  They've already
2872 			 * been reflected into the task as a result of
2873 			 * getting enqueued.
2874 			 */
2875 			ip_mq_lock(dest);
2876 			ipc_port_impcount_delta(dest, 0 - assertcnt, IP_NULL);
2877 			ip_mq_unlock(dest);
2878 #endif /* IMPORTANCE_INHERITANCE */
2879 
2880 			/* Drop turnstile ref on dest */
2881 			ipc_port_send_turnstile_complete(dest);
2882 			/* space lock is held */
2883 			ip_release_safe(dest);
2884 		}
2885 		break;
2886 	}
2887 
2888 	default:
2889 		ipc_unreachable("ipc_right_copyout: strange rights");
2890 	}
2891 	return KERN_SUCCESS;
2892 }
2893