xref: /xnu-8019.80.24/osfmk/ipc/ipc_right.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  * Copyright (c) 2005-2006 SPARTA, Inc.
62  */
63 /*
64  */
65 /*
66  *	File:	ipc/ipc_right.c
67  *	Author:	Rich Draves
68  *	Date:	1989
69  *
70  *	Functions to manipulate IPC capabilities.
71  */
72 
73 #include <mach/boolean.h>
74 #include <mach/kern_return.h>
75 #include <mach/port.h>
76 #include <mach/message.h>
77 #include <kern/assert.h>
78 #include <kern/ipc_kobject.h>
79 #include <kern/misc_protos.h>
80 #include <ipc/port.h>
81 #include <ipc/ipc_entry.h>
82 #include <ipc/ipc_space.h>
83 #include <ipc/ipc_object.h>
84 #include <ipc/ipc_hash.h>
85 #include <ipc/ipc_port.h>
86 #include <ipc/ipc_pset.h>
87 #include <ipc/ipc_right.h>
88 #include <ipc/ipc_notify.h>
89 #include <ipc/ipc_table.h>
90 #include <ipc/ipc_importance.h>
91 #include <ipc/ipc_service_port.h>
92 #include <security/mac_mach_internal.h>
93 
94 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
95 
96 /*
97  *	Routine:	ipc_right_lookup_read
98  *	Purpose:
99  *		Finds an entry in a space, given the name.
100  *	Conditions:
101  *		Nothing locked.
102  *		If an object is found, it is locked and active.
103  *	Returns:
104  *		KERN_SUCCESS		Found an entry.
105  *		KERN_INVALID_TASK	The space is dead.
106  *		KERN_INVALID_NAME	Name doesn't exist in space.
107  */
108 #if MACH_LOCKFREE_SPACE
109 kern_return_t
ipc_right_lookup_read(ipc_space_t space,mach_port_name_t name,ipc_entry_bits_t * bitsp,ipc_object_t * objectp)110 ipc_right_lookup_read(
111 	ipc_space_t             space,
112 	mach_port_name_t        name,
113 	ipc_entry_bits_t       *bitsp,
114 	ipc_object_t           *objectp)
115 {
116 	mach_port_index_t index;
117 	ipc_entry_t table;
118 	hazard_guard_t guard;
119 	ipc_object_t object;
120 	kern_return_t kr;
121 
122 	index = MACH_PORT_INDEX(name);
123 	if (__improbable(index == 0)) {
124 		*bitsp = 0;
125 		*objectp = IO_NULL;
126 		return KERN_INVALID_NAME;
127 	}
128 
129 	guard = hazard_guard_get(0);
130 
131 	/*
132 	 * Acquire a (possibly stale) pointer to the table,
133 	 * and guard it so that it can't be deallocated while we use it.
134 	 *
135 	 * hazard_guard_acquire() has the property that it strongly serializes
136 	 * after any store-release. This is important because it means that if
137 	 * one considers this (broken) userspace usage:
138 	 *
139 	 * Thread 1:
140 	 *   - makes a semaphore, gets name 0x1003
141 	 *   - stores that name to a global `sema` in userspace
142 	 *
143 	 * Thread 2:
144 	 *   - spins to observe `sema` becoming non 0
145 	 *   - calls semaphore_wait() on 0x1003
146 	 *
147 	 * Then, because in order to return 0x1003 this thread issued
148 	 * a store-release (when calling is_write_unlock()),
149 	 * then this hazard_guard_acquire() can't possibly observe a table
150 	 * pointer that is older than the one that was current when the
151 	 * semaphore was made.
152 	 *
153 	 * This fundamental property allows us to never loop (though arguably
154 	 * that is because the loop is inside hazard_guard_acquire()).
155 	 */
156 	table = hazard_guard_acquire(guard, &space->is_table);
157 	if (__improbable(table == NULL)) {
158 		kr = KERN_INVALID_TASK;
159 		goto out_put;
160 	}
161 	if (__improbable(index >= table->ie_size)) {
162 		kr = KERN_INVALID_NAME;
163 		goto out_put;
164 	}
165 
166 	/*
167 	 * Note: this should be an atomic load, but PAC and atomics
168 	 *       don't work interact well together.
169 	 */
170 	object = table[index].ie_volatile_object;
171 
172 	/*
173 	 * Attempt to lock an object that lives in this entry.
174 	 * It might fail or be a completely different object by now.
175 	 *
176 	 * Make sure that acquiring the lock is fully ordered after any
177 	 * lock-release (using os_atomic_barrier_before_lock_acquire()).
178 	 * This allows us to always reliably observe space termination below.
179 	 */
180 	os_atomic_barrier_before_lock_acquire();
181 	if (__improbable(object == IO_NULL || !io_lock_allow_invalid(object))) {
182 		kr = KERN_INVALID_NAME;
183 		goto out_put;
184 	}
185 
186 	/*
187 	 * Now that we hold the object lock, we are preventing any entry
188 	 * in this space for this object to be mutated.
189 	 *
190 	 * If the space didn't grow after we acquired our hazardous reference,
191 	 * and before a mutation of the entry, then holding the object lock
192 	 * guarantees we will observe the truth of ie_bits, ie_object and
193 	 * ie_request (those are always mutated with the object lock held).
194 	 *
195 	 * However this ordering is problematic:
196 	 * - [A]cquisition of the table pointer
197 	 * - [G]rowth of the space (making the table pointer in [A] stale)
198 	 * - [M]utation of the entry
199 	 * - [L]ocking of the object read through [A].
200 	 *
201 	 * The space lock is held for both [G] and [M], and the object lock
202 	 * is held for [M], which means that once we lock the object we can
203 	 * observe if [G] happenend by reloading the table pointer.
204 	 *
205 	 * We might still fail to observe any growth operation that happened
206 	 * after the last mutation of this object's entry, because holding
207 	 * an object lock doesn't guarantee anything about the liveness
208 	 * of the space table pointer. This is not a problem at all: by
209 	 * definition, those didn't affect the state of the entry.
210 	 *
211 	 * TODO: a data-structure where the entries are grown by "slabs",
212 	 *       would allow for the address of an ipc_entry_t to never
213 	 *       change once it exists in a space and would avoid a full
214 	 *       hazard reacquire (as well as making space growth faster).
215 	 *       We however still need to check for termination.
216 	 */
217 	ipc_entry_t tmp = hazard_ptr_load(&space->is_table);
218 	if (__improbable(tmp != table)) {
219 		table = hazard_guard_reacquire_val(guard, &space->is_table, tmp);
220 		if (__improbable(table == NULL)) {
221 			kr = KERN_INVALID_TASK;
222 			goto out_put_unlock;
223 		}
224 	}
225 
226 	/*
227 	 * Now that we hold the lock and have a "fresh enough" table pointer,
228 	 * validate if this entry is what we think it is.
229 	 *
230 	 * To the risk of being repetitive, we still need to protect
231 	 * those accesses under the hazard guard, because subsequent
232 	 * table growths might retire the memory. However we know
233 	 * those growths will have left our entry unchanged.
234 	 */
235 	if (__improbable(table[index].ie_object != object)) {
236 		kr = KERN_INVALID_NAME;
237 		goto out_put_unlock;
238 	}
239 
240 	ipc_entry_bits_t bits = table[index].ie_bits;
241 	if (__improbable(IE_BITS_GEN(bits) != MACH_PORT_GEN(name) ||
242 	    IE_BITS_TYPE(bits == MACH_PORT_TYPE_NONE))) {
243 		kr = KERN_INVALID_NAME;
244 		goto out_put_unlock;
245 	}
246 
247 	/* Done with hazardous accesses to the table */
248 	hazard_guard_put(guard);
249 
250 	*bitsp = bits;
251 	*objectp = object;
252 	return KERN_SUCCESS;
253 
254 out_put_unlock:
255 	ipc_object_unlock(object);
256 out_put:
257 	hazard_guard_put(guard);
258 	return kr;
259 }
260 #else /* !MACH_LOCKFREE_SPACE */
261 kern_return_t
ipc_right_lookup_read(ipc_space_t space,mach_port_name_t name,ipc_entry_bits_t * bitsp,ipc_object_t * objectp)262 ipc_right_lookup_read(
263 	ipc_space_t             space,
264 	mach_port_name_t        name,
265 	ipc_entry_bits_t       *bitsp,
266 	ipc_object_t           *objectp)
267 {
268 	kern_return_t kr;
269 	ipc_entry_t entry;
270 	ipc_object_t obj;
271 	ipc_entry_bits_t bits;
272 
273 	kr = ipc_right_lookup_write(space, name, &entry);
274 	if (kr != KERN_SUCCESS) {
275 		return kr;
276 	}
277 	/* space is read-locked and active */
278 
279 	obj = entry->ie_object;
280 	bits = entry->ie_bits;
281 	if (obj == IO_NULL) {
282 		is_write_unlock(space);
283 		return KERN_INVALID_NAME;
284 	}
285 
286 	io_lock(obj);
287 	is_write_unlock(space);
288 
289 	if (!io_active(obj)) {
290 		io_unlock(obj);
291 		return KERN_INVALID_NAME;
292 	}
293 
294 	*objectp = obj;
295 	*bitsp = bits;
296 	return KERN_SUCCESS;
297 }
298 #endif
299 
300 /*
301  *	Routine:	ipc_right_lookup_write
302  *	Purpose:
303  *		Finds an entry in a space, given the name.
304  *	Conditions:
305  *		Nothing locked.  If successful, the space is write-locked.
306  *	Returns:
307  *		KERN_SUCCESS		Found an entry.
308  *		KERN_INVALID_TASK	The space is dead.
309  *		KERN_INVALID_NAME	Name doesn't exist in space.
310  */
311 
312 kern_return_t
ipc_right_lookup_write(ipc_space_t space,mach_port_name_t name,ipc_entry_t * entryp)313 ipc_right_lookup_write(
314 	ipc_space_t             space,
315 	mach_port_name_t        name,
316 	ipc_entry_t             *entryp)
317 {
318 	ipc_entry_t entry;
319 
320 	assert(space != IS_NULL);
321 
322 	is_write_lock(space);
323 
324 	if (!is_active(space)) {
325 		is_write_unlock(space);
326 		return KERN_INVALID_TASK;
327 	}
328 
329 	if ((entry = ipc_entry_lookup(space, name)) == IE_NULL) {
330 		is_write_unlock(space);
331 		return KERN_INVALID_NAME;
332 	}
333 
334 	*entryp = entry;
335 	return KERN_SUCCESS;
336 }
337 
338 /*
339  *	Routine:	ipc_right_lookup_two_write
340  *	Purpose:
341  *		Like ipc_right_lookup except that it returns two
342  *		entries for two different names that were looked
343  *		up under the same space lock.
344  *	Conditions:
345  *		Nothing locked.  If successful, the space is write-locked.
346  *	Returns:
347  *		KERN_INVALID_TASK	The space is dead.
348  *		KERN_INVALID_NAME	Name doesn't exist in space.
349  */
350 
351 kern_return_t
ipc_right_lookup_two_write(ipc_space_t space,mach_port_name_t name1,ipc_entry_t * entryp1,mach_port_name_t name2,ipc_entry_t * entryp2)352 ipc_right_lookup_two_write(
353 	ipc_space_t             space,
354 	mach_port_name_t        name1,
355 	ipc_entry_t             *entryp1,
356 	mach_port_name_t        name2,
357 	ipc_entry_t             *entryp2)
358 {
359 	ipc_entry_t entry1;
360 	ipc_entry_t entry2;
361 
362 	assert(space != IS_NULL);
363 
364 	is_write_lock(space);
365 
366 	if (!is_active(space)) {
367 		is_write_unlock(space);
368 		return KERN_INVALID_TASK;
369 	}
370 
371 	if ((entry1 = ipc_entry_lookup(space, name1)) == IE_NULL) {
372 		is_write_unlock(space);
373 		mach_port_guard_exception(name1, 0, 0, kGUARD_EXC_INVALID_NAME);
374 		return KERN_INVALID_NAME;
375 	}
376 	if ((entry2 = ipc_entry_lookup(space, name2)) == IE_NULL) {
377 		is_write_unlock(space);
378 		mach_port_guard_exception(name2, 0, 0, kGUARD_EXC_INVALID_NAME);
379 		return KERN_INVALID_NAME;
380 	}
381 	*entryp1 = entry1;
382 	*entryp2 = entry2;
383 	return KERN_SUCCESS;
384 }
385 
386 /*
387  *	Routine:	ipc_right_reverse
388  *	Purpose:
389  *		Translate (space, object) -> (name, entry).
390  *		Only finds send/receive rights.
391  *		Returns TRUE if an entry is found; if so,
392  *		the object active.
393  *	Conditions:
394  *		The space must be locked (read or write) and active.
395  *		The port is locked and active
396  */
397 
398 bool
ipc_right_reverse(ipc_space_t space,ipc_object_t object,mach_port_name_t * namep,ipc_entry_t * entryp)399 ipc_right_reverse(
400 	ipc_space_t             space,
401 	ipc_object_t            object,
402 	mach_port_name_t        *namep,
403 	ipc_entry_t             *entryp)
404 {
405 	ipc_port_t port;
406 	mach_port_name_t name;
407 	ipc_entry_t entry;
408 
409 	/* would switch on io_otype to handle multiple types of object */
410 
411 	assert(is_active(space));
412 	assert(io_otype(object) == IOT_PORT);
413 
414 	port = ip_object_to_port(object);
415 	require_ip_active(port);
416 
417 	ip_mq_lock_held(port);
418 
419 	if (ip_in_space(port, space)) {
420 		name = ip_get_receiver_name(port);
421 		assert(name != MACH_PORT_NULL);
422 
423 		entry = ipc_entry_lookup(space, name);
424 
425 		assert(entry != IE_NULL);
426 		assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
427 		assert(port == ip_object_to_port(entry->ie_object));
428 
429 		*namep = name;
430 		*entryp = entry;
431 		return true;
432 	}
433 
434 	if (ipc_hash_lookup(space, ip_to_object(port), namep, entryp)) {
435 		assert((entry = *entryp) != IE_NULL);
436 		assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
437 		assert(port == ip_object_to_port(entry->ie_object));
438 
439 		return true;
440 	}
441 
442 	return false;
443 }
444 
445 /*
446  *	Routine:	ipc_right_dnrequest
447  *	Purpose:
448  *		Make a dead-name request, returning the previously
449  *		registered send-once right.  If notify is IP_NULL,
450  *		just cancels the previously registered request.
451  *
452  *	Conditions:
453  *		Nothing locked.  May allocate memory.
454  *		Only consumes/returns refs if successful.
455  *	Returns:
456  *		KERN_SUCCESS		Made/canceled dead-name request.
457  *		KERN_INVALID_TASK	The space is dead.
458  *		KERN_INVALID_NAME	Name doesn't exist in space.
459  *		KERN_INVALID_RIGHT	Name doesn't denote port/dead rights.
460  *		KERN_INVALID_ARGUMENT	Name denotes dead name, but
461  *			immediate is FALSE or notify is IP_NULL.
462  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
463  */
464 
465 kern_return_t
ipc_right_request_alloc(ipc_space_t space,mach_port_name_t name,boolean_t immediate,boolean_t send_possible,ipc_port_t notify,ipc_port_t * previousp)466 ipc_right_request_alloc(
467 	ipc_space_t             space,
468 	mach_port_name_t        name,
469 	boolean_t               immediate,
470 	boolean_t               send_possible,
471 	ipc_port_t              notify,
472 	ipc_port_t              *previousp)
473 {
474 	ipc_port_request_index_t prev_request;
475 	ipc_port_t previous = IP_NULL;
476 	ipc_entry_t entry;
477 	kern_return_t kr;
478 
479 #if IMPORTANCE_INHERITANCE
480 	boolean_t needboost = FALSE;
481 #endif /* IMPORTANCE_INHERITANCE */
482 
483 	for (;;) {
484 		ipc_port_t port = IP_NULL;
485 
486 		kr = ipc_right_lookup_write(space, name, &entry);
487 		if (kr != KERN_SUCCESS) {
488 			return kr;
489 		}
490 
491 		/* space is write-locked and active */
492 
493 		prev_request = entry->ie_request;
494 
495 		/* if nothing to do or undo, we're done */
496 		if (notify == IP_NULL && prev_request == IE_REQ_NONE) {
497 			is_write_unlock(space);
498 			*previousp = IP_NULL;
499 			return KERN_SUCCESS;
500 		}
501 
502 		/* see if the entry is of proper type for requests */
503 		if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) {
504 			ipc_port_request_index_t new_request;
505 
506 			port = ip_object_to_port(entry->ie_object);
507 			assert(port != IP_NULL);
508 
509 			if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
510 				/* port is locked and active */
511 
512 				/* if no new request, just cancel previous */
513 				if (notify == IP_NULL) {
514 					if (prev_request != IE_REQ_NONE) {
515 						previous = ipc_port_request_cancel(port, name, prev_request);
516 						entry->ie_request = IE_REQ_NONE;
517 					}
518 					ip_mq_unlock(port);
519 					ipc_entry_modified(space, name, entry);
520 					is_write_unlock(space);
521 					break;
522 				}
523 
524 				/*
525 				 * send-once rights, kernel objects, and non-full other queues
526 				 * fire immediately (if immediate specified).
527 				 */
528 				if (send_possible && immediate &&
529 				    ((entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE) ||
530 				    ip_in_space(port, ipc_space_kernel) || !ip_full(port))) {
531 					if (prev_request != IE_REQ_NONE) {
532 						previous = ipc_port_request_cancel(port, name, prev_request);
533 						entry->ie_request = IE_REQ_NONE;
534 					}
535 					ip_mq_unlock(port);
536 					ipc_entry_modified(space, name, entry);
537 					is_write_unlock(space);
538 
539 					ipc_notify_send_possible(notify, name);
540 					break;
541 				}
542 
543 				/*
544 				 * If there is a previous request, free it.  Any subsequent
545 				 * allocation cannot fail, thus assuring an atomic swap.
546 				 */
547 				if (prev_request != IE_REQ_NONE) {
548 					previous = ipc_port_request_cancel(port, name, prev_request);
549 				}
550 
551 #if IMPORTANCE_INHERITANCE
552 				kr = ipc_port_request_alloc(port, name, notify,
553 				    send_possible, immediate,
554 				    &new_request, &needboost);
555 #else
556 				kr = ipc_port_request_alloc(port, name, notify,
557 				    send_possible, immediate,
558 				    &new_request);
559 #endif /* IMPORTANCE_INHERITANCE */
560 				if (kr != KERN_SUCCESS) {
561 					assert(previous == IP_NULL);
562 					is_write_unlock(space);
563 
564 					kr = ipc_port_request_grow(port, ITS_SIZE_NONE);
565 					/* port is unlocked */
566 
567 					if (kr != KERN_SUCCESS) {
568 						return kr;
569 					}
570 
571 					continue;
572 				}
573 
574 
575 				assert(new_request != IE_REQ_NONE);
576 				entry->ie_request = new_request;
577 				ipc_entry_modified(space, name, entry);
578 				is_write_unlock(space);
579 
580 #if IMPORTANCE_INHERITANCE
581 				if (needboost == TRUE) {
582 					if (ipc_port_importance_delta(port, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) {
583 						ip_mq_unlock(port);
584 					}
585 				} else
586 #endif /* IMPORTANCE_INHERITANCE */
587 				ip_mq_unlock(port);
588 
589 				break;
590 			}
591 			/* entry may have changed to dead-name by ipc_right_check() */
592 		}
593 
594 		/* treat send_possible requests as immediate w.r.t. dead-name */
595 		if ((send_possible || immediate) && notify != IP_NULL &&
596 		    (entry->ie_bits & MACH_PORT_TYPE_DEAD_NAME)) {
597 			mach_port_urefs_t urefs = IE_BITS_UREFS(entry->ie_bits);
598 
599 			assert(urefs > 0);
600 
601 			/* leave urefs pegged to maximum if it overflowed */
602 			if (urefs < MACH_PORT_UREFS_MAX) {
603 				(entry->ie_bits)++; /* increment urefs */
604 			}
605 			ipc_entry_modified(space, name, entry);
606 
607 			is_write_unlock(space);
608 
609 			if (port != IP_NULL) {
610 				ip_release(port);
611 			}
612 
613 			ipc_notify_dead_name(notify, name);
614 			previous = IP_NULL;
615 			break;
616 		}
617 
618 		kr = (entry->ie_bits & MACH_PORT_TYPE_PORT_OR_DEAD) ?
619 		    KERN_INVALID_ARGUMENT : KERN_INVALID_RIGHT;
620 
621 		is_write_unlock(space);
622 
623 		if (port != IP_NULL) {
624 			ip_release(port);
625 		}
626 
627 		return kr;
628 	}
629 
630 	*previousp = previous;
631 	return KERN_SUCCESS;
632 }
633 
634 /*
635  *	Routine:	ipc_right_request_cancel
636  *	Purpose:
637  *		Cancel a notification request and return the send-once right.
638  *		Afterwards, entry->ie_request == 0.
639  *	Conditions:
640  *		The space must be write-locked; the port must be locked.
641  *		The port and space must be active.
642  */
643 
644 ipc_port_t
ipc_right_request_cancel(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry)645 ipc_right_request_cancel(
646 	ipc_space_t                     space,
647 	ipc_port_t                      port,
648 	mach_port_name_t                name,
649 	ipc_entry_t                     entry)
650 {
651 	ipc_port_t previous;
652 
653 	require_ip_active(port);
654 	assert(is_active(space));
655 	assert(port == ip_object_to_port(entry->ie_object));
656 
657 	if (entry->ie_request == IE_REQ_NONE) {
658 		return IP_NULL;
659 	}
660 
661 	previous = ipc_port_request_cancel(port, name, entry->ie_request);
662 	entry->ie_request = IE_REQ_NONE;
663 	ipc_entry_modified(space, name, entry);
664 	return previous;
665 }
666 
667 /*
668  *	Routine:	ipc_right_inuse
669  *	Purpose:
670  *		Check if an entry is being used.
671  *		Returns TRUE if it is.
672  *	Conditions:
673  *		The space is write-locked and active.
674  */
675 
676 bool
ipc_right_inuse(ipc_entry_t entry)677 ipc_right_inuse(
678 	ipc_entry_t entry)
679 {
680 	return IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_NONE;
681 }
682 
683 /*
684  *	Routine:	ipc_right_check
685  *	Purpose:
686  *		Check if the port has died.  If it has,
687  *              and IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE is not
688  *              passed and it is not a send once right then
689  *		clean up the entry and return TRUE.
690  *	Conditions:
691  *		The space is write-locked; the port is not locked.
692  *		If returns FALSE, the port is also locked.
693  *		Otherwise, entry is converted to a dead name.
694  *
695  *		Caller is responsible for a reference to port if it
696  *		had died (returns TRUE).
697  */
698 
699 boolean_t
ipc_right_check(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry,ipc_object_copyin_flags_t flags)700 ipc_right_check(
701 	ipc_space_t              space,
702 	ipc_port_t               port,
703 	mach_port_name_t         name,
704 	ipc_entry_t              entry,
705 	ipc_object_copyin_flags_t flags)
706 {
707 	ipc_entry_bits_t bits;
708 
709 	assert(is_active(space));
710 	assert(port == ip_object_to_port(entry->ie_object));
711 
712 	ip_mq_lock(port);
713 	if (ip_active(port) ||
714 	    ((flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE) &&
715 	    entry->ie_request == IE_REQ_NONE &&
716 	    (entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE))) {
717 		return FALSE;
718 	}
719 
720 	/* this was either a pure send right or a send-once right */
721 
722 	bits = entry->ie_bits;
723 	assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
724 	assert(IE_BITS_UREFS(bits) > 0);
725 
726 	if (bits & MACH_PORT_TYPE_SEND) {
727 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
728 		assert(IE_BITS_UREFS(bits) > 0);
729 		assert(port->ip_srights > 0);
730 		port->ip_srights--;
731 	} else {
732 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
733 		assert(IE_BITS_UREFS(bits) == 1);
734 		assert(port->ip_sorights > 0);
735 		port->ip_sorights--;
736 	}
737 
738 	/*
739 	 * delete SEND rights from ipc hash.
740 	 */
741 
742 	if ((bits & MACH_PORT_TYPE_SEND) != 0) {
743 		ipc_hash_delete(space, ip_to_object(port), name, entry);
744 	}
745 
746 	/* convert entry to dead name */
747 	bits = (bits & ~IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME;
748 
749 	/*
750 	 * If there was a notification request outstanding on this
751 	 * name, and the port went dead, that notification
752 	 * must already be on its way up from the port layer.
753 	 *
754 	 * Add the reference that the notification carries. It
755 	 * is done here, and not in the notification delivery,
756 	 * because the latter doesn't have a space reference and
757 	 * trying to actually move a send-right reference would
758 	 * get short-circuited into a MACH_PORT_DEAD by IPC. Since
759 	 * all calls that deal with the right eventually come
760 	 * through here, it has the same result.
761 	 *
762 	 * Once done, clear the request index so we only account
763 	 * for it once.
764 	 */
765 	if (entry->ie_request != IE_REQ_NONE) {
766 		if (ipc_port_request_type(port, name, entry->ie_request) != 0) {
767 			/* if urefs are pegged due to overflow, leave them pegged */
768 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
769 				bits++; /* increment urefs */
770 			}
771 		}
772 		entry->ie_request = IE_REQ_NONE;
773 	}
774 	entry->ie_bits = bits;
775 	entry->ie_object = IO_NULL;
776 
777 	ip_mq_unlock(port);
778 
779 	ipc_entry_modified(space, name, entry);
780 
781 	return TRUE;
782 }
783 
784 /*
785  *	Routine:	ipc_right_terminate
786  *	Purpose:
787  *		Cleans up an entry in a terminated space.
788  *		The entry isn't deallocated or removed
789  *		from reverse hash tables.
790  *	Conditions:
791  *		The space is dead and unlocked.
792  */
793 
794 void
ipc_right_terminate(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)795 ipc_right_terminate(
796 	ipc_space_t             space,
797 	mach_port_name_t        name,
798 	ipc_entry_t             entry)
799 {
800 	mach_port_type_t type;
801 	ipc_object_t object;
802 
803 	assert(!is_active(space));
804 
805 	type   = IE_BITS_TYPE(entry->ie_bits);
806 	object = entry->ie_object;
807 
808 	/*
809 	 * Hollow the entry under the port lock,
810 	 * in order to avoid dangling pointers.
811 	 *
812 	 * ipc_right_lookup_read() doesn't need it for correctness,
813 	 * but ipc_space_terminate() as it now goes through 2 rounds
814 	 * of termination (receive rights first, the rest second).
815 	 */
816 
817 	if (type != MACH_PORT_TYPE_DEAD_NAME) {
818 		assert(object != IO_NULL);
819 		io_lock(object);
820 	}
821 	entry->ie_object = IO_NULL;
822 	entry->ie_bits  &= (IE_BITS_GEN_MASK | IE_BITS_ROLL_MASK);
823 
824 	switch (type) {
825 	case MACH_PORT_TYPE_DEAD_NAME:
826 		assert(entry->ie_request == IE_REQ_NONE);
827 		assert(object == IO_NULL);
828 		break;
829 
830 	case MACH_PORT_TYPE_PORT_SET: {
831 		ipc_pset_t pset = ips_object_to_pset(object);
832 
833 		assert(entry->ie_request == IE_REQ_NONE);
834 		assert(ips_active(pset));
835 
836 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
837 		break;
838 	}
839 
840 	case MACH_PORT_TYPE_SEND:
841 	case MACH_PORT_TYPE_RECEIVE:
842 	case MACH_PORT_TYPE_SEND_RECEIVE:
843 	case MACH_PORT_TYPE_SEND_ONCE: {
844 		ipc_port_t port = ip_object_to_port(object);
845 		ipc_port_t request = IP_NULL;
846 		ipc_notify_nsenders_t nsrequest = { };
847 
848 		if (!ip_active(port)) {
849 			ip_mq_unlock(port);
850 			ip_release(port);
851 			break;
852 		}
853 
854 		/*
855 		 * same as ipc_right_request_cancel(),
856 		 * except for calling ipc_entry_modified()
857 		 * as the space is now table-less.
858 		 */
859 		if (entry->ie_request != IE_REQ_NONE) {
860 			request = ipc_port_request_cancel(port, name,
861 			    entry->ie_request);
862 			entry->ie_request = IE_REQ_NONE;
863 		}
864 
865 		if (type & MACH_PORT_TYPE_SEND) {
866 			assert(port->ip_srights > 0);
867 			if (--port->ip_srights == 0) {
868 				nsrequest = ipc_notify_no_senders_prepare(port);
869 			}
870 		}
871 
872 		if (type & MACH_PORT_TYPE_RECEIVE) {
873 			assert(ip_get_receiver_name(port) == name);
874 			assert(ip_in_space(port, space));
875 
876 			ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
877 		} else if (type & MACH_PORT_TYPE_SEND_ONCE) {
878 			assert(port->ip_sorights > 0);
879 			port->ip_reply_context = 0;
880 
881 			ipc_notify_send_once_and_unlock(port); /* consumes our ref */
882 		} else {
883 			/* port could be dead, in-transit, or in a foreign space */
884 			assert(!ip_in_space(port, space));
885 
886 			ip_mq_unlock(port);
887 			ip_release(port);
888 		}
889 
890 		/*
891 		 * For both no-senders and port-deleted notifications,
892 		 * look at whether the destination is still active.
893 		 * If it isn't, just swallow the send-once right.
894 		 *
895 		 * This is a racy check, but this ok because we can only
896 		 * fail to notice that the port is now inactive, which
897 		 * only causes us to fail at an optimizaiton.
898 		 *
899 		 * The purpose here is to avoid sending messages
900 		 * to receive rights that used to be in this space,
901 		 * which we can't fail to observe.
902 		 */
903 		if (nsrequest.ns_notify != IP_NULL) {
904 			if (ip_active(nsrequest.ns_notify)) {
905 				ipc_notify_no_senders_emit(nsrequest);
906 			} else {
907 				ipc_notify_no_senders_consume(nsrequest);
908 			}
909 		}
910 
911 		if (request != IP_NULL) {
912 			if (ip_active(request)) {
913 				ipc_notify_port_deleted(request, name);
914 			} else {
915 				ipc_port_release_sonce(request);
916 			}
917 		}
918 		break;
919 	}
920 
921 	default:
922 		panic("ipc_right_terminate: strange type - 0x%x", type);
923 	}
924 }
925 
926 /*
927  *	Routine:	ipc_right_destroy
928  *	Purpose:
929  *		Destroys an entry in a space.
930  *	Conditions:
931  *		The space is write-locked (returns unlocked).
932  *		The space must be active.
933  *	Returns:
934  *		KERN_SUCCESS		      The entry was destroyed.
935  *      KERN_INVALID_CAPABILITY   The port is pinned.
936  *      KERN_INVALID_RIGHT        Port guard violation.
937  */
938 
939 kern_return_t
ipc_right_destroy(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,boolean_t check_guard,uint64_t guard)940 ipc_right_destroy(
941 	ipc_space_t             space,
942 	mach_port_name_t        name,
943 	ipc_entry_t             entry,
944 	boolean_t               check_guard,
945 	uint64_t                guard)
946 {
947 	ipc_entry_bits_t bits;
948 	mach_port_type_t type;
949 
950 	bits = entry->ie_bits;
951 	entry->ie_bits &= ~IE_BITS_TYPE_MASK;
952 	type = IE_BITS_TYPE(bits);
953 
954 	assert(is_active(space));
955 
956 	switch (type) {
957 	case MACH_PORT_TYPE_DEAD_NAME:
958 		assert(entry->ie_request == IE_REQ_NONE);
959 		assert(entry->ie_object == IO_NULL);
960 
961 		ipc_entry_dealloc(space, IO_NULL, name, entry);
962 		is_write_unlock(space);
963 		break;
964 
965 	case MACH_PORT_TYPE_PORT_SET: {
966 		ipc_pset_t pset = ips_object_to_pset(entry->ie_object);
967 
968 		assert(entry->ie_request == IE_REQ_NONE);
969 		assert(pset != IPS_NULL);
970 
971 		ips_mq_lock(pset);
972 		assert(ips_active(pset));
973 
974 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
975 
976 		is_write_unlock(space);
977 
978 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
979 		break;
980 	}
981 
982 	case MACH_PORT_TYPE_SEND:
983 	case MACH_PORT_TYPE_RECEIVE:
984 	case MACH_PORT_TYPE_SEND_RECEIVE:
985 	case MACH_PORT_TYPE_SEND_ONCE: {
986 		ipc_port_t port = ip_object_to_port(entry->ie_object);
987 		ipc_notify_nsenders_t nsrequest = { };
988 		ipc_port_t request;
989 
990 		assert(port != IP_NULL);
991 
992 		if (type == MACH_PORT_TYPE_SEND) {
993 			if (ip_is_pinned(port)) {
994 				assert(ip_active(port));
995 				is_write_unlock(space);
996 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DESTROY);
997 				return KERN_INVALID_CAPABILITY;
998 			}
999 			ipc_hash_delete(space, ip_to_object(port), name, entry);
1000 		}
1001 
1002 		ip_mq_lock(port);
1003 
1004 		if (!ip_active(port)) {
1005 			assert((type & MACH_PORT_TYPE_RECEIVE) == 0);
1006 			entry->ie_request = IE_REQ_NONE;
1007 			assert(!ip_is_pinned(port));
1008 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1009 			ip_mq_unlock(port);
1010 			is_write_unlock(space);
1011 			ip_release(port);
1012 			break;
1013 		}
1014 
1015 		/* For receive rights, check for guarding */
1016 		if ((type & MACH_PORT_TYPE_RECEIVE) &&
1017 		    (check_guard) && (port->ip_guarded) &&
1018 		    (guard != port->ip_context)) {
1019 			/* Guard Violation */
1020 			uint64_t portguard = port->ip_context;
1021 			ip_mq_unlock(port);
1022 			is_write_unlock(space);
1023 			/* Raise mach port guard exception */
1024 			mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
1025 			return KERN_INVALID_RIGHT;
1026 		}
1027 
1028 
1029 		request = ipc_right_request_cancel_macro(space, port,
1030 		    name, entry);
1031 		assert(!ip_is_pinned(port));
1032 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1033 
1034 		is_write_unlock(space);
1035 
1036 		if (type & MACH_PORT_TYPE_SEND) {
1037 			assert(port->ip_srights > 0);
1038 			if (--port->ip_srights == 0) {
1039 				nsrequest = ipc_notify_no_senders_prepare(port);
1040 			}
1041 		}
1042 
1043 		if (type & MACH_PORT_TYPE_RECEIVE) {
1044 			require_ip_active(port);
1045 			assert(ip_in_space(port, space));
1046 
1047 			ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
1048 		} else if (type & MACH_PORT_TYPE_SEND_ONCE) {
1049 			assert(port->ip_sorights > 0);
1050 			port->ip_reply_context = 0;
1051 			ipc_notify_send_once_and_unlock(port); /* consumes our ref */
1052 		} else {
1053 			assert(!ip_in_space(port, space));
1054 
1055 			ip_mq_unlock(port);
1056 			ip_release(port);
1057 		}
1058 
1059 		ipc_notify_no_senders_emit(nsrequest);
1060 
1061 		if (request != IP_NULL) {
1062 			ipc_notify_port_deleted(request, name);
1063 		}
1064 
1065 
1066 		break;
1067 	}
1068 
1069 	default:
1070 		panic("ipc_right_destroy: strange type");
1071 	}
1072 
1073 	return KERN_SUCCESS;
1074 }
1075 
1076 /*
1077  *	Routine:	ipc_right_dealloc
1078  *	Purpose:
1079  *		Releases a send/send-once/dead-name/port_set user ref.
1080  *		Like ipc_right_delta with a delta of -1,
1081  *		but looks at the entry to determine the right.
1082  *	Conditions:
1083  *		The space is write-locked, and is unlocked upon return.
1084  *		The space must be active.
1085  *	Returns:
1086  *		KERN_SUCCESS		A user ref was released.
1087  *		KERN_INVALID_RIGHT	Entry has wrong type.
1088  *      KERN_INVALID_CAPABILITY  Deallocating a pinned right.
1089  */
1090 
1091 kern_return_t
ipc_right_dealloc(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)1092 ipc_right_dealloc(
1093 	ipc_space_t             space,
1094 	mach_port_name_t        name,
1095 	ipc_entry_t             entry)
1096 {
1097 	ipc_port_t port = IP_NULL;
1098 	ipc_entry_bits_t bits;
1099 	mach_port_type_t type;
1100 
1101 	bits = entry->ie_bits;
1102 	type = IE_BITS_TYPE(bits);
1103 
1104 
1105 	assert(is_active(space));
1106 
1107 	switch (type) {
1108 	case MACH_PORT_TYPE_PORT_SET: {
1109 		ipc_pset_t pset;
1110 
1111 		assert(IE_BITS_UREFS(bits) == 0);
1112 		assert(entry->ie_request == IE_REQ_NONE);
1113 
1114 		pset = ips_object_to_pset(entry->ie_object);
1115 		ips_mq_lock(pset);
1116 		assert(ips_active(pset));
1117 
1118 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1119 
1120 		is_write_unlock(space);
1121 
1122 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1123 		break;
1124 	}
1125 
1126 	case MACH_PORT_TYPE_DEAD_NAME: {
1127 dead_name:
1128 
1129 		assert(IE_BITS_UREFS(bits) > 0);
1130 		assert(entry->ie_request == IE_REQ_NONE);
1131 		assert(entry->ie_object == IO_NULL);
1132 
1133 		if (IE_BITS_UREFS(bits) == 1) {
1134 			ipc_entry_dealloc(space, IO_NULL, name, entry);
1135 		} else {
1136 			/* if urefs are pegged due to overflow, leave them pegged */
1137 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1138 				entry->ie_bits = bits - 1; /* decrement urefs */
1139 			}
1140 			ipc_entry_modified(space, name, entry);
1141 		}
1142 		is_write_unlock(space);
1143 
1144 		/* release any port that got converted to dead name below */
1145 		if (port != IP_NULL) {
1146 			ip_release(port);
1147 		}
1148 		break;
1149 	}
1150 
1151 	case MACH_PORT_TYPE_SEND_ONCE: {
1152 		ipc_port_t request;
1153 
1154 		assert(IE_BITS_UREFS(bits) == 1);
1155 
1156 		port = ip_object_to_port(entry->ie_object);
1157 		assert(port != IP_NULL);
1158 
1159 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1160 			bits = entry->ie_bits;
1161 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1162 			goto dead_name;     /* it will release port */
1163 		}
1164 		/* port is locked and active */
1165 
1166 		assert(port->ip_sorights > 0);
1167 
1168 		/*
1169 		 * clear any reply context:
1170 		 * no one will be sending the response b/c we are destroying
1171 		 * the single, outstanding send once right.
1172 		 */
1173 		port->ip_reply_context = 0;
1174 
1175 		request = ipc_right_request_cancel_macro(space, port,
1176 		    name, entry);
1177 		assert(!ip_is_pinned(port));
1178 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1179 
1180 		is_write_unlock(space);
1181 
1182 		ipc_notify_send_once_and_unlock(port);
1183 
1184 		if (request != IP_NULL) {
1185 			ipc_notify_port_deleted(request, name);
1186 		}
1187 		break;
1188 	}
1189 
1190 	case MACH_PORT_TYPE_SEND: {
1191 		ipc_port_t request = IP_NULL;
1192 		ipc_notify_nsenders_t nsrequest = { };
1193 
1194 		assert(IE_BITS_UREFS(bits) > 0);
1195 
1196 		port = ip_object_to_port(entry->ie_object);
1197 		assert(port != IP_NULL);
1198 
1199 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1200 			bits = entry->ie_bits;
1201 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1202 			goto dead_name;     /* it will release port */
1203 		}
1204 		/* port is locked and active */
1205 
1206 		assert(port->ip_srights > 0);
1207 
1208 		if (IE_BITS_UREFS(bits) == 1) {
1209 			if (ip_is_pinned(port)) {
1210 				ip_mq_unlock(port);
1211 				is_write_unlock(space);
1212 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1213 				return KERN_INVALID_CAPABILITY;
1214 			}
1215 			if (--port->ip_srights == 0) {
1216 				nsrequest = ipc_notify_no_senders_prepare(port);
1217 			}
1218 
1219 			request = ipc_right_request_cancel_macro(space, port,
1220 			    name, entry);
1221 			ipc_hash_delete(space, ip_to_object(port), name, entry);
1222 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1223 			ip_mq_unlock(port);
1224 			is_write_unlock(space);
1225 
1226 			ip_release(port);
1227 		} else {
1228 			/* if urefs are pegged due to overflow, leave them pegged */
1229 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1230 				entry->ie_bits = bits - 1; /* decrement urefs */
1231 			}
1232 			ip_mq_unlock(port);
1233 			ipc_entry_modified(space, name, entry);
1234 			is_write_unlock(space);
1235 		}
1236 
1237 		ipc_notify_no_senders_emit(nsrequest);
1238 
1239 		if (request != IP_NULL) {
1240 			ipc_notify_port_deleted(request, name);
1241 		}
1242 		break;
1243 	}
1244 
1245 	case MACH_PORT_TYPE_SEND_RECEIVE: {
1246 		ipc_notify_nsenders_t nsrequest = { };
1247 
1248 		assert(IE_BITS_UREFS(bits) > 0);
1249 
1250 		port = ip_object_to_port(entry->ie_object);
1251 		assert(port != IP_NULL);
1252 
1253 		ip_mq_lock(port);
1254 		require_ip_active(port);
1255 		assert(ip_get_receiver_name(port) == name);
1256 		assert(ip_in_space(port, space));
1257 		assert(port->ip_srights > 0);
1258 
1259 		if (IE_BITS_UREFS(bits) == 1) {
1260 			if (--port->ip_srights == 0) {
1261 				nsrequest = ipc_notify_no_senders_prepare(port);
1262 			}
1263 
1264 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1265 			    MACH_PORT_TYPE_SEND);
1266 		} else {
1267 			/* if urefs are pegged due to overflow, leave them pegged */
1268 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1269 				entry->ie_bits = bits - 1; /* decrement urefs */
1270 			}
1271 		}
1272 		ip_mq_unlock(port);
1273 
1274 		ipc_entry_modified(space, name, entry);
1275 		is_write_unlock(space);
1276 
1277 		ipc_notify_no_senders_emit(nsrequest);
1278 		break;
1279 	}
1280 
1281 	default:
1282 		is_write_unlock(space);
1283 		mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1284 		return KERN_INVALID_RIGHT;
1285 	}
1286 
1287 	return KERN_SUCCESS;
1288 }
1289 
1290 /*
1291  *	Routine:	ipc_right_delta
1292  *	Purpose:
1293  *		Modifies the user-reference count for a right.
1294  *		May deallocate the right, if the count goes to zero.
1295  *	Conditions:
1296  *		The space is write-locked, and is unlocked upon return.
1297  *		The space must be active.
1298  *	Returns:
1299  *		KERN_SUCCESS		Count was modified.
1300  *		KERN_INVALID_RIGHT	Entry has wrong type.
1301  *		KERN_INVALID_VALUE	Bad delta for the right.
1302  *		KERN_INVALID_CAPABILITY Deallocating a pinned right.
1303  */
1304 
1305 kern_return_t
ipc_right_delta(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_right_t right,mach_port_delta_t delta)1306 ipc_right_delta(
1307 	ipc_space_t             space,
1308 	mach_port_name_t        name,
1309 	ipc_entry_t             entry,
1310 	mach_port_right_t       right,
1311 	mach_port_delta_t       delta)
1312 {
1313 	ipc_port_t port = IP_NULL;
1314 	ipc_entry_bits_t bits;
1315 
1316 	bits = entry->ie_bits;
1317 
1318 /*
1319  *	The following is used (for case MACH_PORT_RIGHT_DEAD_NAME) in the
1320  *	switch below. It is used to keep track of those cases (in DIPC)
1321  *	where we have postponed the dropping of a port reference. Since
1322  *	the dropping of the reference could cause the port to disappear
1323  *	we postpone doing so when we are holding the space lock.
1324  */
1325 
1326 	assert(is_active(space));
1327 	assert(right < MACH_PORT_RIGHT_NUMBER);
1328 
1329 	/* Rights-specific restrictions and operations. */
1330 
1331 	switch (right) {
1332 	case MACH_PORT_RIGHT_PORT_SET: {
1333 		ipc_pset_t pset;
1334 
1335 		if ((bits & MACH_PORT_TYPE_PORT_SET) == 0) {
1336 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1337 			goto invalid_right;
1338 		}
1339 
1340 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_PORT_SET);
1341 		assert(IE_BITS_UREFS(bits) == 0);
1342 		assert(entry->ie_request == IE_REQ_NONE);
1343 
1344 		if (delta == 0) {
1345 			goto success;
1346 		}
1347 
1348 		if (delta != -1) {
1349 			goto invalid_value;
1350 		}
1351 
1352 		pset = ips_object_to_pset(entry->ie_object);
1353 		ips_mq_lock(pset);
1354 		assert(ips_active(pset));
1355 
1356 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1357 
1358 		is_write_unlock(space);
1359 
1360 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1361 		break;
1362 	}
1363 
1364 	case MACH_PORT_RIGHT_RECEIVE: {
1365 		ipc_port_t request = IP_NULL;
1366 
1367 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1368 			if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1369 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1370 			}
1371 			goto invalid_right;
1372 		}
1373 
1374 		if (delta == 0) {
1375 			goto success;
1376 		}
1377 
1378 		if (delta != -1) {
1379 			goto invalid_value;
1380 		}
1381 
1382 		port = ip_object_to_port(entry->ie_object);
1383 		assert(port != IP_NULL);
1384 
1385 		/*
1386 		 *	The port lock is needed for ipc_right_dncancel;
1387 		 *	otherwise, we wouldn't have to take the lock
1388 		 *	until just before dropping the space lock.
1389 		 */
1390 
1391 		ip_mq_lock(port);
1392 		require_ip_active(port);
1393 		assert(ip_get_receiver_name(port) == name);
1394 		assert(ip_in_space(port, space));
1395 
1396 		/* Mach Port Guard Checking */
1397 		if (port->ip_guarded) {
1398 			uint64_t portguard = port->ip_context;
1399 			ip_mq_unlock(port);
1400 			is_write_unlock(space);
1401 			/* Raise mach port guard exception */
1402 			mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_MOD_REFS);
1403 			goto guard_failure;
1404 		}
1405 
1406 		if (bits & MACH_PORT_TYPE_SEND) {
1407 			assert(IE_BITS_TYPE(bits) ==
1408 			    MACH_PORT_TYPE_SEND_RECEIVE);
1409 			assert(IE_BITS_UREFS(bits) > 0);
1410 			assert(port->ip_srights > 0);
1411 
1412 			if (port->ip_pdrequest != NULL) {
1413 				/*
1414 				 * Since another task has requested a
1415 				 * destroy notification for this port, it
1416 				 * isn't actually being destroyed - the receive
1417 				 * right is just being moved to another task.
1418 				 * Since we still have one or more send rights,
1419 				 * we need to record the loss of the receive
1420 				 * right and enter the remaining send right
1421 				 * into the hash table.
1422 				 */
1423 				bits &= ~MACH_PORT_TYPE_RECEIVE;
1424 				bits |= MACH_PORT_TYPE_EX_RECEIVE;
1425 				ipc_hash_insert(space, ip_to_object(port),
1426 				    name, entry);
1427 				ip_reference(port);
1428 			} else {
1429 				/*
1430 				 *	The remaining send right turns into a
1431 				 *	dead name.  Notice we don't decrement
1432 				 *	ip_srights, generate a no-senders notif,
1433 				 *	or use ipc_right_dncancel, because the
1434 				 *	port is destroyed "first".
1435 				 */
1436 				bits &= ~IE_BITS_TYPE_MASK;
1437 				bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1438 				if (entry->ie_request) {
1439 					entry->ie_request = IE_REQ_NONE;
1440 					/* if urefs are pegged due to overflow, leave them pegged */
1441 					if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1442 						bits++; /* increment urefs */
1443 					}
1444 				}
1445 				entry->ie_object = IO_NULL;
1446 			}
1447 			entry->ie_bits = bits;
1448 			ipc_entry_modified(space, name, entry);
1449 		} else {
1450 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1451 			assert(IE_BITS_UREFS(bits) == 0);
1452 
1453 			request = ipc_right_request_cancel_macro(space, port,
1454 			    name, entry);
1455 			assert(!ip_is_pinned(port));
1456 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1457 		}
1458 		is_write_unlock(space);
1459 
1460 		ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1461 
1462 		if (request != IP_NULL) {
1463 			ipc_notify_port_deleted(request, name);
1464 		}
1465 		break;
1466 	}
1467 
1468 	case MACH_PORT_RIGHT_SEND_ONCE: {
1469 		ipc_port_t request;
1470 
1471 		if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
1472 			goto invalid_right;
1473 		}
1474 
1475 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
1476 		assert(IE_BITS_UREFS(bits) == 1);
1477 
1478 		port = ip_object_to_port(entry->ie_object);
1479 		assert(port != IP_NULL);
1480 
1481 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1482 			assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE));
1483 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1484 			goto invalid_right;
1485 		}
1486 		/* port is locked and active */
1487 
1488 		assert(port->ip_sorights > 0);
1489 
1490 		if ((delta > 0) || (delta < -1)) {
1491 			ip_mq_unlock(port);
1492 			goto invalid_value;
1493 		}
1494 
1495 		if (delta == 0) {
1496 			ip_mq_unlock(port);
1497 			goto success;
1498 		}
1499 
1500 		/*
1501 		 * clear any reply context:
1502 		 * no one will be sending the response b/c we are destroying
1503 		 * the single, outstanding send once right.
1504 		 */
1505 		port->ip_reply_context = 0;
1506 
1507 		request = ipc_right_request_cancel_macro(space, port, name, entry);
1508 		assert(!ip_is_pinned(port));
1509 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1510 
1511 		is_write_unlock(space);
1512 
1513 		ipc_notify_send_once_and_unlock(port);
1514 
1515 		if (request != IP_NULL) {
1516 			ipc_notify_port_deleted(request, name);
1517 		}
1518 		break;
1519 	}
1520 
1521 	case MACH_PORT_RIGHT_DEAD_NAME: {
1522 		ipc_port_t relport = IP_NULL;
1523 		mach_port_urefs_t urefs;
1524 
1525 		if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1526 			port = ip_object_to_port(entry->ie_object);
1527 			assert(port != IP_NULL);
1528 
1529 			if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1530 				/* port is locked and active */
1531 				ip_mq_unlock(port);
1532 				port = IP_NULL;
1533 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1534 				goto invalid_right;
1535 			}
1536 			bits = entry->ie_bits;
1537 			relport = port;
1538 			port = IP_NULL;
1539 		} else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0) {
1540 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1541 			goto invalid_right;
1542 		}
1543 
1544 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1545 		assert(IE_BITS_UREFS(bits) > 0);
1546 		assert(entry->ie_object == IO_NULL);
1547 		assert(entry->ie_request == IE_REQ_NONE);
1548 
1549 		if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1550 		    delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1551 			goto invalid_value;
1552 		}
1553 
1554 		urefs = IE_BITS_UREFS(bits);
1555 
1556 		if (urefs == MACH_PORT_UREFS_MAX) {
1557 			/*
1558 			 * urefs are pegged due to an overflow
1559 			 * only a delta removing all refs at once can change it
1560 			 */
1561 
1562 			if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1563 				delta = 0;
1564 			}
1565 		} else {
1566 			if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1567 				goto invalid_value;
1568 			}
1569 			if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1570 				/* leave urefs pegged to maximum if it overflowed */
1571 				delta = MACH_PORT_UREFS_MAX - urefs;
1572 			}
1573 		}
1574 
1575 		if ((urefs + delta) == 0) {
1576 			ipc_entry_dealloc(space, IO_NULL, name, entry);
1577 		} else if (delta != 0) {
1578 			entry->ie_bits = bits + delta;
1579 			ipc_entry_modified(space, name, entry);
1580 		}
1581 
1582 		is_write_unlock(space);
1583 
1584 		if (relport != IP_NULL) {
1585 			ip_release(relport);
1586 		}
1587 
1588 		break;
1589 	}
1590 
1591 	case MACH_PORT_RIGHT_SEND: {
1592 		mach_port_urefs_t urefs;
1593 		ipc_port_t request = IP_NULL;
1594 		ipc_notify_nsenders_t nsrequest = { };
1595 		ipc_port_t port_to_release = IP_NULL;
1596 
1597 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
1598 			/* invalid right exception only when not live/dead confusion */
1599 			if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0
1600 #if !defined(AE_MAKESENDRIGHT_FIXED)
1601 			    /*
1602 			     * AE tries to add single send right without knowing if it already owns one.
1603 			     * But if it doesn't, it should own the receive right and delta should be 1.
1604 			     */
1605 			    && (((bits & MACH_PORT_TYPE_RECEIVE) == 0) || (delta != 1))
1606 #endif
1607 			    ) {
1608 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1609 			}
1610 			goto invalid_right;
1611 		}
1612 
1613 		/* maximum urefs for send is MACH_PORT_UREFS_MAX */
1614 
1615 		port = ip_object_to_port(entry->ie_object);
1616 		assert(port != IP_NULL);
1617 
1618 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1619 			assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0);
1620 			goto invalid_right;
1621 		}
1622 		/* port is locked and active */
1623 
1624 		assert(port->ip_srights > 0);
1625 
1626 		if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1627 		    delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1628 			ip_mq_unlock(port);
1629 			goto invalid_value;
1630 		}
1631 
1632 		urefs = IE_BITS_UREFS(bits);
1633 
1634 		if (urefs == MACH_PORT_UREFS_MAX) {
1635 			/*
1636 			 * urefs are pegged due to an overflow
1637 			 * only a delta removing all refs at once can change it
1638 			 */
1639 
1640 			if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1641 				delta = 0;
1642 			}
1643 		} else {
1644 			if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1645 				ip_mq_unlock(port);
1646 				goto invalid_value;
1647 			}
1648 			if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1649 				/* leave urefs pegged to maximum if it overflowed */
1650 				delta = MACH_PORT_UREFS_MAX - urefs;
1651 			}
1652 		}
1653 
1654 		if ((urefs + delta) == 0) {
1655 			if (ip_is_pinned(port)) {
1656 				ip_mq_unlock(port);
1657 				is_write_unlock(space);
1658 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1659 				return KERN_INVALID_CAPABILITY;
1660 			}
1661 
1662 			if (--port->ip_srights == 0) {
1663 				nsrequest = ipc_notify_no_senders_prepare(port);
1664 			}
1665 
1666 			if (bits & MACH_PORT_TYPE_RECEIVE) {
1667 				assert(ip_get_receiver_name(port) == name);
1668 				assert(ip_in_space(port, space));
1669 				assert(IE_BITS_TYPE(bits) ==
1670 				    MACH_PORT_TYPE_SEND_RECEIVE);
1671 
1672 				entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1673 				    MACH_PORT_TYPE_SEND);
1674 				ipc_entry_modified(space, name, entry);
1675 			} else {
1676 				assert(IE_BITS_TYPE(bits) ==
1677 				    MACH_PORT_TYPE_SEND);
1678 
1679 				request = ipc_right_request_cancel_macro(space, port,
1680 				    name, entry);
1681 				ipc_hash_delete(space, ip_to_object(port),
1682 				    name, entry);
1683 				assert(!ip_is_pinned(port));
1684 				ipc_entry_dealloc(space, ip_to_object(port),
1685 				    name, entry);
1686 				port_to_release = port;
1687 			}
1688 		} else if (delta != 0) {
1689 			entry->ie_bits = bits + delta;
1690 			ipc_entry_modified(space, name, entry);
1691 		}
1692 
1693 		ip_mq_unlock(port);
1694 
1695 		is_write_unlock(space);
1696 
1697 		if (port_to_release != IP_NULL) {
1698 			ip_release(port_to_release);
1699 		}
1700 
1701 		ipc_notify_no_senders_emit(nsrequest);
1702 
1703 		if (request != IP_NULL) {
1704 			ipc_notify_port_deleted(request, name);
1705 		}
1706 		break;
1707 	}
1708 
1709 	case MACH_PORT_RIGHT_LABELH:
1710 		goto invalid_right;
1711 
1712 	default:
1713 		panic("ipc_right_delta: strange right %d for 0x%x (%p) in space:%p",
1714 		    right, name, (void *)entry, (void *)space);
1715 	}
1716 
1717 	return KERN_SUCCESS;
1718 
1719 success:
1720 	is_write_unlock(space);
1721 	return KERN_SUCCESS;
1722 
1723 invalid_right:
1724 	is_write_unlock(space);
1725 	if (port != IP_NULL) {
1726 		ip_release(port);
1727 	}
1728 	return KERN_INVALID_RIGHT;
1729 
1730 invalid_value:
1731 	is_write_unlock(space);
1732 	mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1733 	return KERN_INVALID_VALUE;
1734 
1735 guard_failure:
1736 	return KERN_INVALID_RIGHT;
1737 }
1738 
1739 /*
1740  *	Routine:	ipc_right_destruct
1741  *	Purpose:
1742  *		Deallocates the receive right and modifies the
1743  *		user-reference count for the send rights as requested.
1744  *	Conditions:
1745  *		The space is write-locked, and is unlocked upon return.
1746  *		The space must be active.
1747  *	Returns:
1748  *		KERN_SUCCESS		Count was modified.
1749  *		KERN_INVALID_RIGHT	Entry has wrong type.
1750  *		KERN_INVALID_VALUE	Bad delta for the right.
1751  */
1752 
1753 kern_return_t
ipc_right_destruct(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_delta_t srdelta,uint64_t guard)1754 ipc_right_destruct(
1755 	ipc_space_t             space,
1756 	mach_port_name_t        name,
1757 	ipc_entry_t             entry,
1758 	mach_port_delta_t       srdelta,
1759 	uint64_t                guard)
1760 {
1761 	ipc_port_t port = IP_NULL;
1762 	ipc_entry_bits_t bits;
1763 
1764 	mach_port_urefs_t urefs;
1765 	ipc_port_t request = IP_NULL;
1766 	ipc_notify_nsenders_t nsrequest = { };
1767 
1768 	bits = entry->ie_bits;
1769 
1770 	assert(is_active(space));
1771 
1772 	if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1773 		is_write_unlock(space);
1774 
1775 		/* No exception if we used to have receive and held entry since */
1776 		if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1777 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1778 		}
1779 		return KERN_INVALID_RIGHT;
1780 	}
1781 
1782 	if (srdelta && (bits & MACH_PORT_TYPE_SEND) == 0) {
1783 		is_write_unlock(space);
1784 		mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1785 		return KERN_INVALID_RIGHT;
1786 	}
1787 
1788 	if (srdelta > 0) {
1789 		goto invalid_value;
1790 	}
1791 
1792 	port = ip_object_to_port(entry->ie_object);
1793 	assert(port != IP_NULL);
1794 
1795 	ip_mq_lock(port);
1796 	require_ip_active(port);
1797 	assert(ip_get_receiver_name(port) == name);
1798 	assert(ip_in_space(port, space));
1799 
1800 	/* Mach Port Guard Checking */
1801 	if (port->ip_guarded && (guard != port->ip_context)) {
1802 		uint64_t portguard = port->ip_context;
1803 		ip_mq_unlock(port);
1804 		is_write_unlock(space);
1805 		mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
1806 		return KERN_INVALID_ARGUMENT;
1807 	}
1808 
1809 	/*
1810 	 * First reduce the send rights as requested and
1811 	 * adjust the entry->ie_bits accordingly. The
1812 	 * ipc_entry_modified() call is made once the receive
1813 	 * right is destroyed too.
1814 	 */
1815 
1816 	if (srdelta) {
1817 		assert(port->ip_srights > 0);
1818 
1819 		urefs = IE_BITS_UREFS(bits);
1820 
1821 		/*
1822 		 * Since we made sure that srdelta is negative,
1823 		 * the check for urefs overflow is not required.
1824 		 */
1825 		if (MACH_PORT_UREFS_UNDERFLOW(urefs, srdelta)) {
1826 			ip_mq_unlock(port);
1827 			goto invalid_value;
1828 		}
1829 
1830 		if (urefs == MACH_PORT_UREFS_MAX) {
1831 			/*
1832 			 * urefs are pegged due to an overflow
1833 			 * only a delta removing all refs at once can change it
1834 			 */
1835 			if (srdelta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1836 				srdelta = 0;
1837 			}
1838 		}
1839 
1840 		if ((urefs + srdelta) == 0) {
1841 			if (--port->ip_srights == 0) {
1842 				nsrequest = ipc_notify_no_senders_prepare(port);
1843 			}
1844 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_RECEIVE);
1845 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1846 			    MACH_PORT_TYPE_SEND);
1847 		} else {
1848 			entry->ie_bits = bits + srdelta;
1849 		}
1850 	}
1851 
1852 	/*
1853 	 * Now destroy the receive right. Update space and
1854 	 * entry accordingly.
1855 	 */
1856 
1857 	bits = entry->ie_bits;
1858 	if (bits & MACH_PORT_TYPE_SEND) {
1859 		assert(IE_BITS_UREFS(bits) > 0);
1860 		assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX);
1861 
1862 		if (port->ip_pdrequest != NULL) {
1863 			/*
1864 			 * Since another task has requested a
1865 			 * destroy notification for this port, it
1866 			 * isn't actually being destroyed - the receive
1867 			 * right is just being moved to another task.
1868 			 * Since we still have one or more send rights,
1869 			 * we need to record the loss of the receive
1870 			 * right and enter the remaining send right
1871 			 * into the hash table.
1872 			 */
1873 			bits &= ~MACH_PORT_TYPE_RECEIVE;
1874 			bits |= MACH_PORT_TYPE_EX_RECEIVE;
1875 			ipc_hash_insert(space, ip_to_object(port),
1876 			    name, entry);
1877 			ip_reference(port);
1878 		} else {
1879 			/*
1880 			 *	The remaining send right turns into a
1881 			 *	dead name.  Notice we don't decrement
1882 			 *	ip_srights, generate a no-senders notif,
1883 			 *	or use ipc_right_dncancel, because the
1884 			 *	port is destroyed "first".
1885 			 */
1886 			bits &= ~IE_BITS_TYPE_MASK;
1887 			bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1888 			if (entry->ie_request) {
1889 				entry->ie_request = IE_REQ_NONE;
1890 				if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1891 					bits++; /* increment urefs */
1892 				}
1893 			}
1894 			entry->ie_object = IO_NULL;
1895 		}
1896 		entry->ie_bits = bits;
1897 		ipc_entry_modified(space, name, entry);
1898 	} else {
1899 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1900 		assert(IE_BITS_UREFS(bits) == 0);
1901 		request = ipc_right_request_cancel_macro(space, port,
1902 		    name, entry);
1903 		assert(!ip_is_pinned(port));
1904 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1905 	}
1906 
1907 	/* Unlock space */
1908 	is_write_unlock(space);
1909 
1910 	ipc_notify_no_senders_emit(nsrequest);
1911 
1912 	ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1913 
1914 	if (request != IP_NULL) {
1915 		ipc_notify_port_deleted(request, name);
1916 	}
1917 
1918 	return KERN_SUCCESS;
1919 
1920 invalid_value:
1921 	is_write_unlock(space);
1922 	mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1923 	return KERN_INVALID_VALUE;
1924 }
1925 
1926 
1927 /*
1928  *	Routine:	ipc_right_info
1929  *	Purpose:
1930  *		Retrieves information about the right.
1931  *	Conditions:
1932  *		The space is active and write-locked.
1933  *	        The space is unlocked upon return.
1934  *	Returns:
1935  *		KERN_SUCCESS		Retrieved info
1936  */
1937 
1938 kern_return_t
ipc_right_info(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_type_t * typep,mach_port_urefs_t * urefsp)1939 ipc_right_info(
1940 	ipc_space_t             space,
1941 	mach_port_name_t        name,
1942 	ipc_entry_t             entry,
1943 	mach_port_type_t        *typep,
1944 	mach_port_urefs_t       *urefsp)
1945 {
1946 	ipc_port_t port;
1947 	ipc_entry_bits_t bits;
1948 	mach_port_type_t type = 0;
1949 	ipc_port_request_index_t request;
1950 
1951 	bits = entry->ie_bits;
1952 	request = entry->ie_request;
1953 	port = ip_object_to_port(entry->ie_object);
1954 
1955 	if (bits & MACH_PORT_TYPE_RECEIVE) {
1956 		assert(IP_VALID(port));
1957 
1958 		if (request != IE_REQ_NONE) {
1959 			ip_mq_lock(port);
1960 			require_ip_active(port);
1961 			type |= ipc_port_request_type(port, name, request);
1962 			ip_mq_unlock(port);
1963 		}
1964 		is_write_unlock(space);
1965 	} else if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1966 		/*
1967 		 * validate port is still alive - if so, get request
1968 		 * types while we still have it locked.  Otherwise,
1969 		 * recapture the (now dead) bits.
1970 		 */
1971 		if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1972 			if (request != IE_REQ_NONE) {
1973 				type |= ipc_port_request_type(port, name, request);
1974 			}
1975 			ip_mq_unlock(port);
1976 			is_write_unlock(space);
1977 		} else {
1978 			bits = entry->ie_bits;
1979 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1980 			is_write_unlock(space);
1981 			ip_release(port);
1982 		}
1983 	} else {
1984 		is_write_unlock(space);
1985 	}
1986 
1987 	type |= IE_BITS_TYPE(bits);
1988 
1989 	*typep = type;
1990 	*urefsp = IE_BITS_UREFS(bits);
1991 	return KERN_SUCCESS;
1992 }
1993 
1994 /*
1995  *	Routine:	ipc_right_copyin_check_reply
1996  *	Purpose:
1997  *		Check if a subsequent ipc_right_copyin would succeed. Used only
1998  *		by ipc_kmsg_copyin_header to check if reply_port can be copied in.
1999  *		If the reply port is an immovable send right, it errors out.
2000  *	Conditions:
2001  *		The space is locked (read or write) and active.
2002  */
2003 
2004 boolean_t
ipc_right_copyin_check_reply(__assert_only ipc_space_t space,mach_port_name_t reply_name,ipc_entry_t reply_entry,mach_msg_type_name_t reply_type)2005 ipc_right_copyin_check_reply(
2006 	__assert_only ipc_space_t       space,
2007 	mach_port_name_t                reply_name,
2008 	ipc_entry_t                     reply_entry,
2009 	mach_msg_type_name_t            reply_type)
2010 {
2011 	ipc_entry_bits_t bits;
2012 	ipc_port_t reply_port;
2013 
2014 	bits = reply_entry->ie_bits;
2015 	assert(is_active(space));
2016 
2017 	switch (reply_type) {
2018 	case MACH_MSG_TYPE_MAKE_SEND:
2019 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2020 			return FALSE;
2021 		}
2022 		break;
2023 
2024 	case MACH_MSG_TYPE_MAKE_SEND_ONCE:
2025 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2026 			return FALSE;
2027 		}
2028 		break;
2029 
2030 	case MACH_MSG_TYPE_MOVE_RECEIVE:
2031 		/* ipc_kmsg_copyin_header already filters it out */
2032 		return FALSE;
2033 
2034 	case MACH_MSG_TYPE_COPY_SEND:
2035 	case MACH_MSG_TYPE_MOVE_SEND:
2036 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2037 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2038 			break;
2039 		}
2040 
2041 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2042 			return FALSE;
2043 		}
2044 
2045 		reply_port = ip_object_to_port(reply_entry->ie_object);
2046 		assert(reply_port != IP_NULL);
2047 
2048 		/*
2049 		 * active status peek to avoid checks that will be skipped
2050 		 * on copyin for dead ports.  Lock not held, so will not be
2051 		 * atomic (but once dead, there's no going back).
2052 		 */
2053 		if (!ip_active(reply_port)) {
2054 			break;
2055 		}
2056 
2057 		/*
2058 		 * Can't copyin a send right that is marked immovable. This bit
2059 		 * is set only during port creation and never unset. So it can
2060 		 * be read without a lock.
2061 		 */
2062 		if (ip_is_immovable_send(reply_port)) {
2063 			mach_port_guard_exception_immovable(space, reply_name, reply_port, MPG_FLAGS_NONE);
2064 			return FALSE;
2065 		}
2066 
2067 		if (reply_type == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2068 			if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2069 				return FALSE;
2070 			}
2071 		} else {
2072 			if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2073 				return FALSE;
2074 			}
2075 		}
2076 
2077 		break;
2078 	}
2079 
2080 	default:
2081 		panic("ipc_right_copyin_check: strange rights");
2082 	}
2083 
2084 	return TRUE;
2085 }
2086 
2087 /*
2088  *	Routine:	ipc_right_copyin_check_guard_locked
2089  *	Purpose:
2090  *		Check if the port is guarded and the guard
2091  *		value matches the one passed in the arguments.
2092  *		If MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND is set,
2093  *		check if the port is unguarded.
2094  *	Conditions:
2095  *		The port is locked.
2096  *	Returns:
2097  *		KERN_SUCCESS		Port is either unguarded
2098  *					or guarded with expected value
2099  *		KERN_INVALID_ARGUMENT	Port is either unguarded already or guard mismatch.
2100  *					This also raises a EXC_GUARD exception.
2101  */
2102 static kern_return_t
ipc_right_copyin_check_guard_locked(mach_port_name_t name,ipc_port_t port,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2103 ipc_right_copyin_check_guard_locked(
2104 	mach_port_name_t name,
2105 	ipc_port_t port,
2106 	mach_port_context_t context,
2107 	mach_msg_guard_flags_t *guard_flags)
2108 {
2109 	mach_msg_guard_flags_t flags = *guard_flags;
2110 	if ((flags & MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND) && !port->ip_guarded && !context) {
2111 		return KERN_SUCCESS;
2112 	} else if (port->ip_guarded && (port->ip_context == context)) {
2113 		return KERN_SUCCESS;
2114 	}
2115 
2116 	/* Incorrect guard; Raise exception */
2117 	mach_port_guard_exception(name, context, port->ip_context, kGUARD_EXC_INCORRECT_GUARD);
2118 	return KERN_INVALID_ARGUMENT;
2119 }
2120 
2121 /*
2122  *	Routine:	ipc_right_copyin
2123  *	Purpose:
2124  *		Copyin a capability from a space.
2125  *		If successful, the caller gets a ref
2126  *		for the resulting object, unless it is IO_DEAD,
2127  *		and possibly a send-once right which should
2128  *		be used in a port-deleted notification.
2129  *
2130  *		If deadok is not TRUE, the copyin operation
2131  *		will fail instead of producing IO_DEAD.
2132  *
2133  *		The entry is deallocated if the entry type becomes
2134  *		MACH_PORT_TYPE_NONE.
2135  *	Conditions:
2136  *		The space is write-locked and active.
2137  *	Returns:
2138  *		KERN_SUCCESS		Acquired an object, possibly IO_DEAD.
2139  *		KERN_INVALID_RIGHT	Name doesn't denote correct right.
2140  *		KERN_INVALID_CAPABILITY	Trying to move an kobject port or an immovable right,
2141  *								or moving the last ref of pinned right
2142  *		KERN_INVALID_ARGUMENT	Port is unguarded or guard mismatch
2143  */
2144 
2145 kern_return_t
ipc_right_copyin(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyin_flags_t flags,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep,int * assertcntp,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2146 ipc_right_copyin(
2147 	ipc_space_t                space,
2148 	mach_port_name_t           name,
2149 	ipc_entry_t                entry,
2150 	mach_msg_type_name_t       msgt_name,
2151 	ipc_object_copyin_flags_t   flags,
2152 	ipc_object_t               *objectp,
2153 	ipc_port_t                 *sorightp,
2154 	ipc_port_t                 *releasep,
2155 	int                        *assertcntp,
2156 	mach_port_context_t        context,
2157 	mach_msg_guard_flags_t     *guard_flags)
2158 {
2159 	ipc_entry_bits_t bits;
2160 	ipc_port_t port;
2161 	kern_return_t kr;
2162 	boolean_t deadok = !!(flags & IPC_OBJECT_COPYIN_FLAGS_DEADOK);
2163 	boolean_t allow_imm_send = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
2164 
2165 	*releasep = IP_NULL;
2166 	*assertcntp = 0;
2167 
2168 	bits = entry->ie_bits;
2169 
2170 	assert(is_active(space));
2171 
2172 	switch (msgt_name) {
2173 	case MACH_MSG_TYPE_MAKE_SEND: {
2174 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2175 			goto invalid_right;
2176 		}
2177 
2178 		port = ip_object_to_port(entry->ie_object);
2179 		assert(port != IP_NULL);
2180 
2181 		ip_mq_lock(port);
2182 		assert(ip_get_receiver_name(port) == name);
2183 		assert(ip_in_space(port, space));
2184 
2185 		ipc_port_make_send_locked(port);
2186 		ip_mq_unlock(port);
2187 
2188 		*objectp = ip_to_object(port);
2189 		*sorightp = IP_NULL;
2190 		break;
2191 	}
2192 
2193 	case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
2194 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2195 			goto invalid_right;
2196 		}
2197 
2198 		port = ip_object_to_port(entry->ie_object);
2199 		assert(port != IP_NULL);
2200 
2201 		ip_mq_lock(port);
2202 		require_ip_active(port);
2203 		assert(ip_get_receiver_name(port) == name);
2204 		assert(ip_in_space(port, space));
2205 
2206 		ipc_port_make_sonce_locked(port);
2207 		ip_mq_unlock(port);
2208 
2209 		*objectp = ip_to_object(port);
2210 		*sorightp = IP_NULL;
2211 		break;
2212 	}
2213 
2214 	case MACH_MSG_TYPE_MOVE_RECEIVE: {
2215 		ipc_port_t request = IP_NULL;
2216 
2217 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2218 			goto invalid_right;
2219 		}
2220 
2221 		port = ip_object_to_port(entry->ie_object);
2222 		assert(port != IP_NULL);
2223 
2224 		ip_mq_lock(port);
2225 		require_ip_active(port);
2226 		assert(ip_get_receiver_name(port) == name);
2227 		assert(ip_in_space(port, space));
2228 
2229 		/*
2230 		 * Disallow moving receive-right kobjects/kolabel, e.g. mk_timer ports
2231 		 * The ipc_port structure uses the kdata union of kobject and
2232 		 * imp_task exclusively. Thus, general use of a kobject port as
2233 		 * a receive right can cause type confusion in the importance
2234 		 * code.
2235 		 */
2236 		if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
2237 			/*
2238 			 * Distinguish an invalid right, e.g., trying to move
2239 			 * a send right as a receive right, from this
2240 			 * situation which is, "This is a valid receive right,
2241 			 * but it's also a kobject and you can't move it."
2242 			 */
2243 			ip_mq_unlock(port);
2244 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2245 			return KERN_INVALID_CAPABILITY;
2246 		}
2247 
2248 		if (port->ip_immovable_receive || port->ip_specialreply) {
2249 			assert(!ip_in_space(port, ipc_space_kernel));
2250 			ip_mq_unlock(port);
2251 			assert(current_task() != kernel_task);
2252 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2253 			return KERN_INVALID_CAPABILITY;
2254 		}
2255 
2256 		if (guard_flags != NULL) {
2257 			kr = ipc_right_copyin_check_guard_locked(name, port, context, guard_flags);
2258 			if (kr != KERN_SUCCESS) {
2259 				ip_mq_unlock(port);
2260 				return kr;
2261 			}
2262 		}
2263 
2264 		if (bits & MACH_PORT_TYPE_SEND) {
2265 			assert(IE_BITS_TYPE(bits) ==
2266 			    MACH_PORT_TYPE_SEND_RECEIVE);
2267 			assert(IE_BITS_UREFS(bits) > 0);
2268 			assert(port->ip_srights > 0);
2269 
2270 			bits &= ~MACH_PORT_TYPE_RECEIVE;
2271 			bits |= MACH_PORT_TYPE_EX_RECEIVE;
2272 			entry->ie_bits = bits;
2273 			ipc_hash_insert(space, ip_to_object(port),
2274 			    name, entry);
2275 			ip_reference(port);
2276 			ipc_entry_modified(space, name, entry);
2277 		} else {
2278 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2279 			assert(IE_BITS_UREFS(bits) == 0);
2280 
2281 			request = ipc_right_request_cancel_macro(space, port,
2282 			    name, entry);
2283 			assert(!ip_is_pinned(port));
2284 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2285 		}
2286 
2287 		/* ipc_port_clear_receiver unguards the port and clears the ip_immovable_receive bit */
2288 		(void)ipc_port_clear_receiver(port, FALSE); /* don't destroy the port/mqueue */
2289 		if (guard_flags != NULL) {
2290 			/* this flag will be cleared during copyout */
2291 			*guard_flags = *guard_flags | MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
2292 		}
2293 
2294 #if IMPORTANCE_INHERITANCE
2295 		/*
2296 		 * Account for boosts the current task is going to lose when
2297 		 * copying this right in.  Tempowner ports have either not
2298 		 * been accounting to any task (and therefore are already in
2299 		 * "limbo" state w.r.t. assertions) or to some other specific
2300 		 * task. As we have no way to drop the latter task's assertions
2301 		 * here, We'll deduct those when we enqueue it on its
2302 		 * destination port (see ipc_port_check_circularity()).
2303 		 */
2304 		if (port->ip_tempowner == 0) {
2305 			assert(IIT_NULL == ip_get_imp_task(port));
2306 
2307 			/* ports in limbo have to be tempowner */
2308 			port->ip_tempowner = 1;
2309 			*assertcntp = port->ip_impcount;
2310 		}
2311 #endif /* IMPORTANCE_INHERITANCE */
2312 
2313 		ip_mq_unlock(port);
2314 
2315 		*objectp = ip_to_object(port);
2316 		*sorightp = request;
2317 		break;
2318 	}
2319 
2320 	case MACH_MSG_TYPE_COPY_SEND: {
2321 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2322 			goto copy_dead;
2323 		}
2324 
2325 		/* allow for dead send-once rights */
2326 
2327 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2328 			goto invalid_right;
2329 		}
2330 
2331 		assert(IE_BITS_UREFS(bits) > 0);
2332 
2333 		port = ip_object_to_port(entry->ie_object);
2334 		assert(port != IP_NULL);
2335 
2336 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2337 			bits = entry->ie_bits;
2338 			*releasep = port;
2339 			goto copy_dead;
2340 		}
2341 		/* port is locked and active */
2342 
2343 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2344 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2345 			assert(port->ip_sorights > 0);
2346 
2347 			ip_mq_unlock(port);
2348 			goto invalid_right;
2349 		}
2350 
2351 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2352 			ip_mq_unlock(port);
2353 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2354 			return KERN_INVALID_CAPABILITY;
2355 		}
2356 
2357 		ipc_port_copy_send_locked(port);
2358 		ip_mq_unlock(port);
2359 
2360 		*objectp = ip_to_object(port);
2361 		*sorightp = IP_NULL;
2362 		break;
2363 	}
2364 
2365 	case MACH_MSG_TYPE_MOVE_SEND: {
2366 		ipc_port_t request = IP_NULL;
2367 
2368 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2369 			goto move_dead;
2370 		}
2371 
2372 		/* allow for dead send-once rights */
2373 
2374 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2375 			goto invalid_right;
2376 		}
2377 
2378 		assert(IE_BITS_UREFS(bits) > 0);
2379 
2380 		port = ip_object_to_port(entry->ie_object);
2381 		assert(port != IP_NULL);
2382 
2383 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2384 			bits = entry->ie_bits;
2385 			*releasep = port;
2386 			goto move_dead;
2387 		}
2388 		/* port is locked and active */
2389 
2390 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2391 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2392 			assert(port->ip_sorights > 0);
2393 			ip_mq_unlock(port);
2394 			goto invalid_right;
2395 		}
2396 
2397 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2398 			ip_mq_unlock(port);
2399 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2400 			return KERN_INVALID_CAPABILITY;
2401 		}
2402 
2403 		if (IE_BITS_UREFS(bits) == 1) {
2404 			assert(port->ip_srights > 0);
2405 			if (bits & MACH_PORT_TYPE_RECEIVE) {
2406 				assert(ip_get_receiver_name(port) == name);
2407 				assert(ip_in_space(port, space));
2408 				assert(IE_BITS_TYPE(bits) ==
2409 				    MACH_PORT_TYPE_SEND_RECEIVE);
2410 				assert(!ip_is_pinned(port));
2411 
2412 				entry->ie_bits = bits & ~
2413 				    (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2414 				ipc_entry_modified(space, name, entry);
2415 				ip_reference(port);
2416 			} else {
2417 				assert(IE_BITS_TYPE(bits) ==
2418 				    MACH_PORT_TYPE_SEND);
2419 
2420 				if (ip_is_pinned(port)) {
2421 					ip_mq_unlock(port);
2422 					mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_COPYIN);
2423 					return KERN_INVALID_CAPABILITY;
2424 				}
2425 
2426 				request = ipc_right_request_cancel_macro(space, port,
2427 				    name, entry);
2428 				ipc_hash_delete(space, ip_to_object(port),
2429 				    name, entry);
2430 				ipc_entry_dealloc(space, ip_to_object(port),
2431 				    name, entry);
2432 				/* transfer entry's reference to caller */
2433 			}
2434 		} else {
2435 			ipc_port_copy_send_locked(port);
2436 			/* if urefs are pegged due to overflow, leave them pegged */
2437 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2438 				entry->ie_bits = bits - 1; /* decrement urefs */
2439 			}
2440 			ipc_entry_modified(space, name, entry);
2441 		}
2442 
2443 		ip_mq_unlock(port);
2444 		*objectp = ip_to_object(port);
2445 		*sorightp = request;
2446 		break;
2447 	}
2448 
2449 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2450 		ipc_port_t request;
2451 
2452 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2453 			goto move_dead;
2454 		}
2455 
2456 		/* allow for dead send rights */
2457 
2458 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2459 			goto invalid_right;
2460 		}
2461 
2462 		assert(IE_BITS_UREFS(bits) > 0);
2463 
2464 		port = ip_object_to_port(entry->ie_object);
2465 		assert(port != IP_NULL);
2466 
2467 		if (ipc_right_check(space, port, name, entry, flags)) {
2468 			bits = entry->ie_bits;
2469 			*releasep = port;
2470 			goto move_dead;
2471 		}
2472 		/*
2473 		 * port is locked, but may not be active:
2474 		 * Allow copyin of inactive ports with no dead name request and treat it
2475 		 * as if the copyin of the port was successful and port became inactive
2476 		 * later.
2477 		 */
2478 
2479 		if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2480 			assert(bits & MACH_PORT_TYPE_SEND);
2481 			assert(port->ip_srights > 0);
2482 
2483 			ip_mq_unlock(port);
2484 			goto invalid_right;
2485 		}
2486 
2487 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2488 			ip_mq_unlock(port);
2489 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2490 			return KERN_INVALID_CAPABILITY;
2491 		}
2492 
2493 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2494 		assert(IE_BITS_UREFS(bits) == 1);
2495 		assert(port->ip_sorights > 0);
2496 
2497 		request = ipc_right_request_cancel_macro(space, port, name, entry);
2498 		assert(!ip_is_pinned(port));
2499 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2500 		ip_mq_unlock(port);
2501 
2502 		*objectp = ip_to_object(port);
2503 		*sorightp = request;
2504 		break;
2505 	}
2506 
2507 	default:
2508 invalid_right:
2509 		return KERN_INVALID_RIGHT;
2510 	}
2511 
2512 	return KERN_SUCCESS;
2513 
2514 copy_dead:
2515 	assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2516 	assert(IE_BITS_UREFS(bits) > 0);
2517 	assert(entry->ie_request == IE_REQ_NONE);
2518 	assert(entry->ie_object == 0);
2519 
2520 	if (!deadok) {
2521 		goto invalid_right;
2522 	}
2523 
2524 	*objectp = IO_DEAD;
2525 	*sorightp = IP_NULL;
2526 	return KERN_SUCCESS;
2527 
2528 move_dead:
2529 	assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2530 	assert(IE_BITS_UREFS(bits) > 0);
2531 	assert(entry->ie_request == IE_REQ_NONE);
2532 	assert(entry->ie_object == IO_NULL);
2533 
2534 	if (!deadok) {
2535 		goto invalid_right;
2536 	}
2537 
2538 	if (IE_BITS_UREFS(bits) == 1) {
2539 		ipc_entry_dealloc(space, IO_NULL, name, entry);
2540 	} else {
2541 		/* if urefs are pegged due to overflow, leave them pegged */
2542 		if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2543 			entry->ie_bits = bits - 1; /* decrement urefs */
2544 		}
2545 		ipc_entry_modified(space, name, entry);
2546 	}
2547 	*objectp = IO_DEAD;
2548 	*sorightp = IP_NULL;
2549 	return KERN_SUCCESS;
2550 }
2551 
2552 /*
2553  *	Routine:	ipc_right_copyin_two_move_sends
2554  *	Purpose:
2555  *		Like ipc_right_copyin with MACH_MSG_TYPE_MOVE_SEND
2556  *		and deadok == FALSE, except that this moves two
2557  *		send rights at once.
2558  *	Conditions:
2559  *		The space is write-locked and active.
2560  *		The object is returned with two refs/send rights.
2561  *	Returns:
2562  *		KERN_SUCCESS		Acquired an object.
2563  *		KERN_INVALID_RIGHT	Name doesn't denote correct right.
2564  */
2565 static
2566 kern_return_t
ipc_right_copyin_two_move_sends(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2567 ipc_right_copyin_two_move_sends(
2568 	ipc_space_t             space,
2569 	mach_port_name_t        name,
2570 	ipc_entry_t             entry,
2571 	ipc_object_t            *objectp,
2572 	ipc_port_t              *sorightp,
2573 	ipc_port_t              *releasep)
2574 {
2575 	ipc_entry_bits_t bits;
2576 	mach_port_urefs_t urefs;
2577 	ipc_port_t port;
2578 	ipc_port_t request = IP_NULL;
2579 
2580 	*releasep = IP_NULL;
2581 
2582 	assert(is_active(space));
2583 
2584 	bits = entry->ie_bits;
2585 
2586 	if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2587 		goto invalid_right;
2588 	}
2589 
2590 	urefs = IE_BITS_UREFS(bits);
2591 	if (urefs < 2) {
2592 		goto invalid_right;
2593 	}
2594 
2595 	port = ip_object_to_port(entry->ie_object);
2596 	assert(port != IP_NULL);
2597 
2598 	if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2599 		*releasep = port;
2600 		goto invalid_right;
2601 	}
2602 	/* port is locked and active */
2603 
2604 	/*
2605 	 * To reach here we either have:
2606 	 * (1) reply_name == voucher_name, but voucher is not immovable send right.
2607 	 * (2) reply_name == dest_name, but ipc_right_copyin_check_reply() guaranteed
2608 	 * that we can't use MOVE_SEND on reply port marked as immovable send right.
2609 	 */
2610 	assert(!ip_is_immovable_send(port));
2611 	assert(!ip_is_pinned(port));
2612 
2613 	if (urefs > 2) {
2614 		/*
2615 		 * We are moving 2 urefs as naked send rights, which is decomposed as:
2616 		 * - two copy sends (which doesn't affect the make send count)
2617 		 * - decrementing the local urefs twice.
2618 		 */
2619 		ipc_port_copy_send_locked(port);
2620 		ipc_port_copy_send_locked(port);
2621 		/* if urefs are pegged due to overflow, leave them pegged */
2622 		if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2623 			entry->ie_bits = bits - 2; /* decrement urefs */
2624 		}
2625 		ipc_entry_modified(space, name, entry);
2626 	} else {
2627 		/*
2628 		 * We have exactly 2 send rights for this port in this space,
2629 		 * which means that we will liberate the naked send right held
2630 		 * by this entry.
2631 		 *
2632 		 * However refcounting rules around entries are that naked send rights
2633 		 * on behalf of spaces do not have an associated port reference,
2634 		 * so we need to donate one ...
2635 		 */
2636 		ipc_port_copy_send_locked(port);
2637 
2638 		if (bits & MACH_PORT_TYPE_RECEIVE) {
2639 			assert(ip_get_receiver_name(port) == name);
2640 			assert(ip_in_space(port, space));
2641 			assert(IE_BITS_TYPE(bits) ==
2642 			    MACH_PORT_TYPE_SEND_RECEIVE);
2643 
2644 			/* ... that we inject manually when the entry stays alive */
2645 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2646 			ipc_entry_modified(space, name, entry);
2647 			ip_reference(port);
2648 		} else {
2649 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
2650 
2651 			/* ... that we steal from the entry when it dies */
2652 			request = ipc_right_request_cancel_macro(space, port,
2653 			    name, entry);
2654 			ipc_hash_delete(space, ip_to_object(port),
2655 			    name, entry);
2656 			ipc_entry_dealloc(space, ip_to_object(port),
2657 			    name, entry);
2658 		}
2659 	}
2660 
2661 	ip_mq_unlock(port);
2662 
2663 	*objectp = ip_to_object(port);
2664 	*sorightp = request;
2665 	return KERN_SUCCESS;
2666 
2667 invalid_right:
2668 	return KERN_INVALID_RIGHT;
2669 }
2670 
2671 
2672 /*
2673  *	Routine:	ipc_right_copyin_two
2674  *	Purpose:
2675  *		Like ipc_right_copyin with two dispositions,
2676  *		each of which results in a send or send-once right,
2677  *		and deadok = FALSE.
2678  *	Conditions:
2679  *		The space is write-locked and active.
2680  *		The object is returned with two refs/rights.
2681  *		Msgt_one refers to the dest_type
2682  *	Returns:
2683  *		KERN_SUCCESS		Acquired an object.
2684  *		KERN_INVALID_RIGHT	Name doesn't denote correct right(s).
2685  *		KERN_INVALID_CAPABILITY	Name doesn't denote correct right for msgt_two.
2686  */
2687 kern_return_t
ipc_right_copyin_two(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_one,mach_msg_type_name_t msgt_two,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2688 ipc_right_copyin_two(
2689 	ipc_space_t               space,
2690 	mach_port_name_t          name,
2691 	ipc_entry_t               entry,
2692 	mach_msg_type_name_t      msgt_one,
2693 	mach_msg_type_name_t      msgt_two,
2694 	ipc_object_t              *objectp,
2695 	ipc_port_t                *sorightp,
2696 	ipc_port_t                *releasep)
2697 {
2698 	kern_return_t kr;
2699 	int assertcnt = 0;
2700 
2701 	assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_one));
2702 	assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_two));
2703 
2704 	/*
2705 	 *	This is a little tedious to make atomic, because
2706 	 *	there are 25 combinations of valid dispositions.
2707 	 *	However, most are easy.
2708 	 */
2709 
2710 	/*
2711 	 *	If either is move-sonce, then there must be an error.
2712 	 */
2713 	if (msgt_one == MACH_MSG_TYPE_MOVE_SEND_ONCE ||
2714 	    msgt_two == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2715 		return KERN_INVALID_RIGHT;
2716 	}
2717 
2718 	if ((msgt_one == MACH_MSG_TYPE_MAKE_SEND) ||
2719 	    (msgt_one == MACH_MSG_TYPE_MAKE_SEND_ONCE) ||
2720 	    (msgt_two == MACH_MSG_TYPE_MAKE_SEND) ||
2721 	    (msgt_two == MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2722 		/*
2723 		 *	One of the dispositions needs a receive right.
2724 		 *
2725 		 *	If the copyin below succeeds, we know the receive
2726 		 *	right is there (because the pre-validation of
2727 		 *	the second disposition already succeeded in our
2728 		 *	caller).
2729 		 *
2730 		 *	Hence the port is not in danger of dying.
2731 		 */
2732 		ipc_object_t object_two;
2733 
2734 		kr = ipc_right_copyin(space, name, entry,
2735 		    msgt_one, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND,
2736 		    objectp, sorightp, releasep,
2737 		    &assertcnt, 0, NULL);
2738 		assert(assertcnt == 0);
2739 		if (kr != KERN_SUCCESS) {
2740 			return kr;
2741 		}
2742 
2743 		assert(IO_VALID(*objectp));
2744 		assert(*sorightp == IP_NULL);
2745 		assert(*releasep == IP_NULL);
2746 
2747 		/*
2748 		 *	Now copyin the second (previously validated)
2749 		 *	disposition.  The result can't be a dead port,
2750 		 *	as no valid disposition can make us lose our
2751 		 *	receive right.
2752 		 */
2753 		kr = ipc_right_copyin(space, name, entry,
2754 		    msgt_two, IPC_OBJECT_COPYIN_FLAGS_NONE,
2755 		    &object_two, sorightp, releasep,
2756 		    &assertcnt, 0, NULL);
2757 		assert(assertcnt == 0);
2758 		assert(kr == KERN_SUCCESS);
2759 		assert(*sorightp == IP_NULL);
2760 		assert(*releasep == IP_NULL);
2761 		assert(object_two == *objectp);
2762 		assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
2763 	} else if ((msgt_one == MACH_MSG_TYPE_MOVE_SEND) &&
2764 	    (msgt_two == MACH_MSG_TYPE_MOVE_SEND)) {
2765 		/*
2766 		 *	This is an easy case.  Just use our
2767 		 *	handy-dandy special-purpose copyin call
2768 		 *	to get two send rights for the price of one.
2769 		 */
2770 		kr = ipc_right_copyin_two_move_sends(space, name, entry,
2771 		    objectp, sorightp,
2772 		    releasep);
2773 		if (kr != KERN_SUCCESS) {
2774 			return kr;
2775 		}
2776 	} else {
2777 		mach_msg_type_name_t msgt_name;
2778 
2779 		/*
2780 		 *	Must be either a single move-send and a
2781 		 *	copy-send, or two copy-send dispositions.
2782 		 *	Use the disposition with the greatest side
2783 		 *	effects for the actual copyin - then just
2784 		 *	duplicate the send right you get back.
2785 		 */
2786 		if (msgt_one == MACH_MSG_TYPE_MOVE_SEND ||
2787 		    msgt_two == MACH_MSG_TYPE_MOVE_SEND) {
2788 			msgt_name = MACH_MSG_TYPE_MOVE_SEND;
2789 		} else {
2790 			msgt_name = MACH_MSG_TYPE_COPY_SEND;
2791 		}
2792 
2793 		kr = ipc_right_copyin(space, name, entry,
2794 		    msgt_name, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND,
2795 		    objectp, sorightp, releasep,
2796 		    &assertcnt, 0, NULL);
2797 		assert(assertcnt == 0);
2798 		if (kr != KERN_SUCCESS) {
2799 			return kr;
2800 		}
2801 
2802 		/*
2803 		 *	Copy the right we got back.  If it is dead now,
2804 		 *	that's OK.  Neither right will be usable to send
2805 		 *	a message anyway.
2806 		 *
2807 		 *	Note that the port could be concurrently moved
2808 		 *	outside of the space as a descriptor, and then
2809 		 *	destroyed, which would not happen under the space lock.
2810 		 *
2811 		 *	It means we can't use ipc_port_copy_send() which
2812 		 *	may fail if the port died.
2813 		 */
2814 		io_lock(*objectp);
2815 		ipc_port_copy_send_locked(ip_object_to_port(*objectp));
2816 		io_unlock(*objectp);
2817 	}
2818 
2819 	return KERN_SUCCESS;
2820 }
2821 
2822 
2823 /*
2824  *	Routine:	ipc_right_copyout
2825  *	Purpose:
2826  *		Copyout a capability to a space.
2827  *		If successful, consumes a ref for the object.
2828  *
2829  *		Always succeeds when given a newly-allocated entry,
2830  *		because user-reference overflow isn't a possibility.
2831  *
2832  *		If copying out the object would cause the user-reference
2833  *		count in the entry to overflow, then the user-reference
2834  *		count is left pegged to its maximum value and the copyout
2835  *		succeeds anyway.
2836  *	Conditions:
2837  *		The space is write-locked and active.
2838  *		The object is locked and active.
2839  *		The object is unlocked; the space isn't.
2840  *	Returns:
2841  *		KERN_SUCCESS		Copied out capability.
2842  */
2843 
2844 kern_return_t
ipc_right_copyout(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_port_context_t * context,mach_msg_guard_flags_t * guard_flags,ipc_object_t object)2845 ipc_right_copyout(
2846 	ipc_space_t             space,
2847 	mach_port_name_t        name,
2848 	ipc_entry_t             entry,
2849 	mach_msg_type_name_t    msgt_name,
2850 	ipc_object_copyout_flags_t flags,
2851 	mach_port_context_t     *context,
2852 	mach_msg_guard_flags_t  *guard_flags,
2853 	ipc_object_t            object)
2854 {
2855 	ipc_entry_bits_t bits;
2856 	ipc_port_t port;
2857 	mach_port_name_t sp_name = MACH_PORT_NULL;
2858 	mach_port_context_t sp_context = 0;
2859 
2860 	bits = entry->ie_bits;
2861 
2862 	assert(IO_VALID(object));
2863 	assert(io_otype(object) == IOT_PORT);
2864 	assert(io_active(object));
2865 	assert(entry->ie_object == object);
2866 
2867 	port = ip_object_to_port(object);
2868 
2869 	if (flags & IPC_OBJECT_COPYOUT_FLAGS_PINNED) {
2870 		assert(!ip_is_pinned(port));
2871 		assert(ip_is_immovable_send(port));
2872 		assert(task_is_immovable(space->is_task));
2873 		assert(task_is_pinned(space->is_task));
2874 		port->ip_pinned = 1;
2875 	}
2876 
2877 	switch (msgt_name) {
2878 	case MACH_MSG_TYPE_PORT_SEND_ONCE:
2879 
2880 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2881 		assert(IE_BITS_UREFS(bits) == 0);
2882 		assert(port->ip_sorights > 0);
2883 
2884 		if (port->ip_specialreply) {
2885 			ipc_port_adjust_special_reply_port_locked(port,
2886 			    current_thread()->ith_knote, IPC_PORT_ADJUST_SR_LINK_WORKLOOP, FALSE);
2887 			/* port unlocked on return */
2888 		} else {
2889 			ip_mq_unlock(port);
2890 		}
2891 
2892 		entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1); /* set urefs to 1 */
2893 		ipc_entry_modified(space, name, entry);
2894 		break;
2895 
2896 	case MACH_MSG_TYPE_PORT_SEND:
2897 		assert(port->ip_srights > 0);
2898 
2899 		if (bits & MACH_PORT_TYPE_SEND) {
2900 			mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
2901 
2902 			assert(port->ip_srights > 1);
2903 			assert(urefs > 0);
2904 			assert(urefs <= MACH_PORT_UREFS_MAX);
2905 
2906 			if (urefs == MACH_PORT_UREFS_MAX) {
2907 				/*
2908 				 * leave urefs pegged to maximum,
2909 				 * consume send right and ref
2910 				 */
2911 
2912 				port->ip_srights--;
2913 				ip_mq_unlock(port);
2914 				ip_release_live(port);
2915 				return KERN_SUCCESS;
2916 			}
2917 
2918 			/* consume send right and ref */
2919 			port->ip_srights--;
2920 			ip_mq_unlock(port);
2921 			ip_release_live(port);
2922 		} else if (bits & MACH_PORT_TYPE_RECEIVE) {
2923 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2924 			assert(IE_BITS_UREFS(bits) == 0);
2925 
2926 			/* transfer send right to entry, consume ref */
2927 			ip_mq_unlock(port);
2928 			ip_release_live(port);
2929 		} else {
2930 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2931 			assert(IE_BITS_UREFS(bits) == 0);
2932 
2933 			/* transfer send right and ref to entry */
2934 			ip_mq_unlock(port);
2935 
2936 			/* entry is locked holding ref, so can use port */
2937 
2938 			ipc_hash_insert(space, ip_to_object(port), name, entry);
2939 		}
2940 
2941 		entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1; /* increment urefs */
2942 		ipc_entry_modified(space, name, entry);
2943 		break;
2944 
2945 	case MACH_MSG_TYPE_PORT_RECEIVE: {
2946 		ipc_port_t dest;
2947 #if IMPORTANCE_INHERITANCE
2948 		natural_t assertcnt = port->ip_impcount;
2949 #endif /* IMPORTANCE_INHERITANCE */
2950 
2951 		assert(port->ip_mscount == 0);
2952 		assert(!ip_in_a_space(port));
2953 
2954 		/*
2955 		 * Don't copyout kobjects or kolabels as receive right
2956 		 */
2957 		if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
2958 			panic("ipc_right_copyout: Copyout kobject/kolabel as receive right");
2959 		}
2960 
2961 		dest = ip_get_destination(port);
2962 
2963 		/* port transitions to IN-SPACE state */
2964 		port->ip_receiver_name = name;
2965 		port->ip_receiver = space;
2966 
2967 		struct knote *kn = current_thread()->ith_knote;
2968 
2969 		if ((guard_flags != NULL) && ((*guard_flags & MACH_MSG_GUARD_FLAGS_IMMOVABLE_RECEIVE) != 0)) {
2970 			assert(port->ip_immovable_receive == 0);
2971 			port->ip_guarded = 1;
2972 			port->ip_strict_guard = 0;
2973 			/* pseudo receive shouldn't set the receive right as immovable in the sender's space */
2974 			if (kn != ITH_KNOTE_PSEUDO) {
2975 				port->ip_immovable_receive = 1;
2976 			}
2977 			port->ip_context = current_thread()->ith_msg_addr;
2978 			*context = port->ip_context;
2979 			*guard_flags = *guard_flags & ~MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
2980 		}
2981 
2982 		/* Check if this is a service port */
2983 		if (port->ip_service_port) {
2984 			assert(port->ip_splabel != NULL);
2985 			/* Check if this is a port-destroyed notification to ensure
2986 			 * that initproc doesnt end up with a guarded service port
2987 			 * sent in a regular message
2988 			 */
2989 			if (!ipc_service_port_label_is_pd_notification((ipc_service_port_label_t)port->ip_splabel)) {
2990 				goto skip_sp_check;
2991 			}
2992 			ipc_service_port_label_clear_flag(port->ip_splabel, ISPL_FLAGS_SEND_PD_NOTIFICATION);
2993 #if !(DEVELOPMENT || DEBUG)
2994 			if (current_task()->bsd_info != initproc) {
2995 				goto skip_sp_check;
2996 			}
2997 #endif /* !(DEVELOPMENT || DEBUG) */
2998 			ipc_service_port_label_get_attr(port->ip_splabel, &sp_name, &sp_context);
2999 			assert(sp_name != MACH_PORT_NULL);
3000 			/* Verify the port name and restore the guard value, if any */
3001 			if (name != sp_name) {
3002 				panic("Service port name = 0x%x doesnt match the stored launchd port name = 0x%x", name, sp_name);
3003 			}
3004 			if (sp_context) {
3005 				port->ip_guarded = 1;
3006 				port->ip_strict_guard = 1;
3007 				port->ip_context = sp_context;
3008 			}
3009 		}
3010 skip_sp_check:
3011 
3012 		assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
3013 		if (bits & MACH_PORT_TYPE_SEND) {
3014 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
3015 			assert(IE_BITS_UREFS(bits) > 0);
3016 			assert(port->ip_srights > 0);
3017 		} else {
3018 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
3019 			assert(IE_BITS_UREFS(bits) == 0);
3020 		}
3021 		entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE;
3022 		ipc_entry_modified(space, name, entry);
3023 
3024 		boolean_t sync_bootstrap_checkin = FALSE;
3025 		if (kn != ITH_KNOTE_PSEUDO && port->ip_sync_bootstrap_checkin) {
3026 			sync_bootstrap_checkin = TRUE;
3027 		}
3028 		if (!ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_RECEIVE)) {
3029 			kn = NULL;
3030 		}
3031 		ipc_port_adjust_port_locked(port, kn, sync_bootstrap_checkin);
3032 		/* port unlocked */
3033 
3034 		if (bits & MACH_PORT_TYPE_SEND) {
3035 			ip_release_live(port);
3036 
3037 			/* entry is locked holding ref, so can use port */
3038 			ipc_hash_delete(space, ip_to_object(port), name, entry);
3039 		}
3040 
3041 		if (dest != IP_NULL) {
3042 #if IMPORTANCE_INHERITANCE
3043 			/*
3044 			 * Deduct the assertion counts we contributed to
3045 			 * the old destination port.  They've already
3046 			 * been reflected into the task as a result of
3047 			 * getting enqueued.
3048 			 */
3049 			ip_mq_lock(dest);
3050 			ipc_port_impcount_delta(dest, 0 - assertcnt, IP_NULL);
3051 			ip_mq_unlock(dest);
3052 #endif /* IMPORTANCE_INHERITANCE */
3053 
3054 			/* Drop turnstile ref on dest */
3055 			ipc_port_send_turnstile_complete(dest);
3056 			/* space lock is held */
3057 			ip_release_safe(dest);
3058 		}
3059 		break;
3060 	}
3061 
3062 	default:
3063 		panic("ipc_right_copyout: strange rights");
3064 	}
3065 	return KERN_SUCCESS;
3066 }
3067