xref: /xnu-8792.41.9/osfmk/ipc/ipc_right.c (revision 5c2921b07a2480ab43ec66f5b9e41cb872bc554f)
1 /*
2  * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  * Copyright (c) 2005-2006 SPARTA, Inc.
62  */
63 /*
64  */
65 /*
66  *	File:	ipc/ipc_right.c
67  *	Author:	Rich Draves
68  *	Date:	1989
69  *
70  *	Functions to manipulate IPC capabilities.
71  */
72 
73 #include <mach/boolean.h>
74 #include <mach/kern_return.h>
75 #include <mach/port.h>
76 #include <mach/message.h>
77 #include <kern/assert.h>
78 #include <kern/ipc_kobject.h>
79 #include <kern/misc_protos.h>
80 #include <kern/policy_internal.h>
81 #include <ipc/port.h>
82 #include <ipc/ipc_entry.h>
83 #include <ipc/ipc_space.h>
84 #include <ipc/ipc_object.h>
85 #include <ipc/ipc_hash.h>
86 #include <ipc/ipc_port.h>
87 #include <ipc/ipc_pset.h>
88 #include <ipc/ipc_right.h>
89 #include <ipc/ipc_notify.h>
90 #include <ipc/ipc_importance.h>
91 #include <ipc/ipc_service_port.h>
92 #include <security/mac_mach_internal.h>
93 
94 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
95 
96 TUNABLE(bool, service_port_defense_enabled, "-service_port_defense_enabled", false);
97 static TUNABLE(bool, reply_port_semantics_enabled, "-reply_port_semantics", false);
98 
99 /*
100  *	Routine:	ipc_right_lookup_read
101  *	Purpose:
102  *		Finds an entry in a space, given the name.
103  *	Conditions:
104  *		Nothing locked.
105  *		If an object is found, it is locked and active.
106  *	Returns:
107  *		KERN_SUCCESS		Found an entry.
108  *		KERN_INVALID_TASK	The space is dead.
109  *		KERN_INVALID_NAME	Name doesn't exist in space.
110  */
111 kern_return_t
ipc_right_lookup_read(ipc_space_t space,mach_port_name_t name,ipc_entry_bits_t * bitsp,ipc_object_t * objectp)112 ipc_right_lookup_read(
113 	ipc_space_t             space,
114 	mach_port_name_t        name,
115 	ipc_entry_bits_t       *bitsp,
116 	ipc_object_t           *objectp)
117 {
118 	mach_port_index_t index;
119 	ipc_entry_table_t table;
120 	ipc_entry_t entry;
121 	ipc_object_t object;
122 	kern_return_t kr;
123 
124 	index = MACH_PORT_INDEX(name);
125 	if (__improbable(index == 0)) {
126 		*bitsp = 0;
127 		*objectp = IO_NULL;
128 		return KERN_INVALID_NAME;
129 	}
130 
131 	smr_global_enter();
132 
133 	/*
134 	 * Acquire a (possibly stale) pointer to the table,
135 	 * and guard it so that it can't be deallocated while we use it.
136 	 *
137 	 * smr_global_enter() has the property that it strongly serializes
138 	 * after any store-release. This is important because it means that if
139 	 * one considers this (broken) userspace usage:
140 	 *
141 	 * Thread 1:
142 	 *   - makes a semaphore, gets name 0x1003
143 	 *   - stores that name to a global `sema` in userspace
144 	 *
145 	 * Thread 2:
146 	 *   - spins to observe `sema` becoming non 0
147 	 *   - calls semaphore_wait() on 0x1003
148 	 *
149 	 * Then, because in order to return 0x1003 this thread issued
150 	 * a store-release (when calling is_write_unlock()),
151 	 * then this smr_entered_load() can't possibly observe a table
152 	 * pointer that is older than the one that was current when the
153 	 * semaphore was made.
154 	 *
155 	 * This fundamental property allows us to never loop.
156 	 */
157 	table = smr_entered_load(&space->is_table);
158 	if (__improbable(table == NULL)) {
159 		kr = KERN_INVALID_TASK;
160 		goto out_put;
161 	}
162 	entry = ipc_entry_table_get(table, index);
163 	if (__improbable(entry == NULL)) {
164 		kr = KERN_INVALID_NAME;
165 		goto out_put;
166 	}
167 
168 	/*
169 	 * Note: this should be an atomic load, but PAC and atomics
170 	 *       don't work interact well together.
171 	 */
172 	object = entry->ie_volatile_object;
173 
174 	/*
175 	 * Attempt to lock an object that lives in this entry.
176 	 * It might fail or be a completely different object by now.
177 	 *
178 	 * Make sure that acquiring the lock is fully ordered after any
179 	 * lock-release (using os_atomic_barrier_before_lock_acquire()).
180 	 * This allows us to always reliably observe space termination below.
181 	 */
182 	os_atomic_barrier_before_lock_acquire();
183 	if (__improbable(object == IO_NULL || !io_lock_allow_invalid(object))) {
184 		kr = KERN_INVALID_NAME;
185 		goto out_put;
186 	}
187 
188 	/*
189 	 * Now that we hold the object lock, we are preventing any entry
190 	 * in this space for this object to be mutated.
191 	 *
192 	 * If the space didn't grow after we acquired our hazardous reference,
193 	 * and before a mutation of the entry, then holding the object lock
194 	 * guarantees we will observe the truth of ie_bits, ie_object and
195 	 * ie_request (those are always mutated with the object lock held).
196 	 *
197 	 * However this ordering is problematic:
198 	 * - [A]cquisition of the table pointer
199 	 * - [G]rowth of the space (making the table pointer in [A] stale)
200 	 * - [M]utation of the entry
201 	 * - [L]ocking of the object read through [A].
202 	 *
203 	 * The space lock is held for both [G] and [M], and the object lock
204 	 * is held for [M], which means that once we lock the object we can
205 	 * observe if [G] happenend by reloading the table pointer.
206 	 *
207 	 * We might still fail to observe any growth operation that happened
208 	 * after the last mutation of this object's entry, because holding
209 	 * an object lock doesn't guarantee anything about the liveness
210 	 * of the space table pointer. This is not a problem at all: by
211 	 * definition, those didn't affect the state of the entry.
212 	 *
213 	 * TODO: a data-structure where the entries are grown by "slabs",
214 	 *       would allow for the address of an ipc_entry_t to never
215 	 *       change once it exists in a space and would avoid a reload
216 	 *       (as well as making space growth faster).
217 	 *       We however still need to check for termination.
218 	 */
219 	table = smr_entered_load(&space->is_table);
220 	if (__improbable(table == NULL)) {
221 		kr = KERN_INVALID_TASK;
222 		goto out_put_unlock;
223 	}
224 
225 	/*
226 	 * Tables never shrink so we don't need to validate the length twice.
227 	 */
228 	entry = ipc_entry_table_get_nocheck(table, index);
229 
230 	/*
231 	 * Now that we hold the lock and have a "fresh enough" table pointer,
232 	 * validate if this entry is what we think it is.
233 	 *
234 	 * To the risk of being repetitive, we still need to protect
235 	 * those accesses under SMR, because subsequent
236 	 * table growths might retire the memory. However we know
237 	 * those growths will have left our entry unchanged.
238 	 */
239 	if (__improbable(entry->ie_object != object)) {
240 		kr = KERN_INVALID_NAME;
241 		goto out_put_unlock;
242 	}
243 
244 	ipc_entry_bits_t bits = entry->ie_bits;
245 	if (__improbable(IE_BITS_GEN(bits) != MACH_PORT_GEN(name) ||
246 	    IE_BITS_TYPE(bits == MACH_PORT_TYPE_NONE))) {
247 		kr = KERN_INVALID_NAME;
248 		goto out_put_unlock;
249 	}
250 
251 	/* Done with hazardous accesses to the table */
252 	smr_global_leave();
253 
254 	*bitsp = bits;
255 	*objectp = object;
256 	return KERN_SUCCESS;
257 
258 out_put_unlock:
259 	ipc_object_unlock(object);
260 out_put:
261 	smr_global_leave();
262 	return kr;
263 }
264 
265 /*
266  *	Routine:	ipc_right_lookup_write
267  *	Purpose:
268  *		Finds an entry in a space, given the name.
269  *	Conditions:
270  *		Nothing locked.  If successful, the space is write-locked.
271  *	Returns:
272  *		KERN_SUCCESS		Found an entry.
273  *		KERN_INVALID_TASK	The space is dead.
274  *		KERN_INVALID_NAME	Name doesn't exist in space.
275  */
276 
277 kern_return_t
ipc_right_lookup_write(ipc_space_t space,mach_port_name_t name,ipc_entry_t * entryp)278 ipc_right_lookup_write(
279 	ipc_space_t             space,
280 	mach_port_name_t        name,
281 	ipc_entry_t             *entryp)
282 {
283 	ipc_entry_t entry;
284 
285 	assert(space != IS_NULL);
286 
287 	is_write_lock(space);
288 
289 	if (!is_active(space)) {
290 		is_write_unlock(space);
291 		return KERN_INVALID_TASK;
292 	}
293 
294 	if ((entry = ipc_entry_lookup(space, name)) == IE_NULL) {
295 		is_write_unlock(space);
296 		return KERN_INVALID_NAME;
297 	}
298 
299 	*entryp = entry;
300 	return KERN_SUCCESS;
301 }
302 
303 /*
304  *	Routine:	ipc_right_lookup_two_write
305  *	Purpose:
306  *		Like ipc_right_lookup except that it returns two
307  *		entries for two different names that were looked
308  *		up under the same space lock.
309  *	Conditions:
310  *		Nothing locked.  If successful, the space is write-locked.
311  *	Returns:
312  *		KERN_INVALID_TASK	The space is dead.
313  *		KERN_INVALID_NAME	Name doesn't exist in space.
314  */
315 
316 kern_return_t
ipc_right_lookup_two_write(ipc_space_t space,mach_port_name_t name1,ipc_entry_t * entryp1,mach_port_name_t name2,ipc_entry_t * entryp2)317 ipc_right_lookup_two_write(
318 	ipc_space_t             space,
319 	mach_port_name_t        name1,
320 	ipc_entry_t             *entryp1,
321 	mach_port_name_t        name2,
322 	ipc_entry_t             *entryp2)
323 {
324 	ipc_entry_t entry1;
325 	ipc_entry_t entry2;
326 
327 	assert(space != IS_NULL);
328 
329 	is_write_lock(space);
330 
331 	if (!is_active(space)) {
332 		is_write_unlock(space);
333 		return KERN_INVALID_TASK;
334 	}
335 
336 	if ((entry1 = ipc_entry_lookup(space, name1)) == IE_NULL) {
337 		is_write_unlock(space);
338 		mach_port_guard_exception(name1, 0, 0, kGUARD_EXC_INVALID_NAME);
339 		return KERN_INVALID_NAME;
340 	}
341 	if ((entry2 = ipc_entry_lookup(space, name2)) == IE_NULL) {
342 		is_write_unlock(space);
343 		mach_port_guard_exception(name2, 0, 0, kGUARD_EXC_INVALID_NAME);
344 		return KERN_INVALID_NAME;
345 	}
346 	*entryp1 = entry1;
347 	*entryp2 = entry2;
348 	return KERN_SUCCESS;
349 }
350 
351 /*
352  *	Routine:	ipc_right_reverse
353  *	Purpose:
354  *		Translate (space, object) -> (name, entry).
355  *		Only finds send/receive rights.
356  *		Returns TRUE if an entry is found; if so,
357  *		the object active.
358  *	Conditions:
359  *		The space must be locked (read or write) and active.
360  *		The port is locked and active
361  */
362 
363 bool
ipc_right_reverse(ipc_space_t space,ipc_object_t object,mach_port_name_t * namep,ipc_entry_t * entryp)364 ipc_right_reverse(
365 	ipc_space_t             space,
366 	ipc_object_t            object,
367 	mach_port_name_t        *namep,
368 	ipc_entry_t             *entryp)
369 {
370 	ipc_port_t port;
371 	mach_port_name_t name;
372 	ipc_entry_t entry;
373 
374 	/* would switch on io_otype to handle multiple types of object */
375 
376 	assert(is_active(space));
377 	assert(io_otype(object) == IOT_PORT);
378 
379 	port = ip_object_to_port(object);
380 	require_ip_active(port);
381 
382 	ip_mq_lock_held(port);
383 
384 	if (ip_in_space(port, space)) {
385 		name = ip_get_receiver_name(port);
386 		assert(name != MACH_PORT_NULL);
387 
388 		entry = ipc_entry_lookup(space, name);
389 
390 		assert(entry != IE_NULL);
391 		assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
392 		assert(port == ip_object_to_port(entry->ie_object));
393 
394 		*namep = name;
395 		*entryp = entry;
396 		return true;
397 	}
398 
399 	if (ipc_hash_lookup(space, ip_to_object(port), namep, entryp)) {
400 		entry = *entryp;
401 		assert(entry != IE_NULL);
402 		assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
403 		assert(port == ip_object_to_port(entry->ie_object));
404 
405 		return true;
406 	}
407 
408 	return false;
409 }
410 
411 /*
412  *	Routine:	ipc_right_dnrequest
413  *	Purpose:
414  *		Make a dead-name request, returning the previously
415  *		registered send-once right.  If notify is IP_NULL,
416  *		just cancels the previously registered request.
417  *
418  *	Conditions:
419  *		Nothing locked.  May allocate memory.
420  *		Only consumes/returns refs if successful.
421  *	Returns:
422  *		KERN_SUCCESS		Made/canceled dead-name request.
423  *		KERN_INVALID_TASK	The space is dead.
424  *		KERN_INVALID_NAME	Name doesn't exist in space.
425  *		KERN_INVALID_RIGHT	Name doesn't denote port/dead rights.
426  *		KERN_INVALID_ARGUMENT	Name denotes dead name, but
427  *			immediate is FALSE or notify is IP_NULL.
428  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
429  */
430 
431 kern_return_t
ipc_right_request_alloc(ipc_space_t space,mach_port_name_t name,ipc_port_request_opts_t options,ipc_port_t notify,ipc_port_t * previousp)432 ipc_right_request_alloc(
433 	ipc_space_t             space,
434 	mach_port_name_t        name,
435 	ipc_port_request_opts_t options,
436 	ipc_port_t              notify,
437 	ipc_port_t              *previousp)
438 {
439 	ipc_port_request_index_t prev_request;
440 	ipc_port_t previous = IP_NULL;
441 	ipc_entry_t entry;
442 	kern_return_t kr;
443 #if IMPORTANCE_INHERITANCE
444 	bool will_arm = false;
445 #endif /* IMPORTANCE_INHERITANCE */
446 
447 	for (;;) {
448 		ipc_port_t port = IP_NULL;
449 
450 		kr = ipc_right_lookup_write(space, name, &entry);
451 		if (kr != KERN_SUCCESS) {
452 			return kr;
453 		}
454 
455 		/* space is write-locked and active */
456 
457 		prev_request = entry->ie_request;
458 
459 		/* if nothing to do or undo, we're done */
460 		if (notify == IP_NULL && prev_request == IE_REQ_NONE) {
461 			is_write_unlock(space);
462 			*previousp = IP_NULL;
463 			return KERN_SUCCESS;
464 		}
465 
466 		/* see if the entry is of proper type for requests */
467 		if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) {
468 			ipc_port_request_index_t new_request;
469 
470 			port = ip_object_to_port(entry->ie_object);
471 			assert(port != IP_NULL);
472 
473 			if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
474 				/* port is locked and active */
475 
476 				/* if no new request, just cancel previous */
477 				if (notify == IP_NULL) {
478 					if (prev_request != IE_REQ_NONE) {
479 						previous = ipc_port_request_cancel(port, name, prev_request);
480 						entry->ie_request = IE_REQ_NONE;
481 					}
482 					ip_mq_unlock(port);
483 					ipc_entry_modified(space, name, entry);
484 					is_write_unlock(space);
485 					break;
486 				}
487 
488 				/*
489 				 * send-once rights, kernel objects, and non-full other queues
490 				 * fire immediately (if immediate specified).
491 				 */
492 				if (options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK) &&
493 				    ((entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE) ||
494 				    ip_in_space(port, ipc_space_kernel) || !ip_full(port))) {
495 					if (prev_request != IE_REQ_NONE) {
496 						previous = ipc_port_request_cancel(port, name, prev_request);
497 						entry->ie_request = IE_REQ_NONE;
498 					}
499 					ip_mq_unlock(port);
500 					ipc_entry_modified(space, name, entry);
501 					is_write_unlock(space);
502 
503 					ipc_notify_send_possible(notify, name);
504 					break;
505 				}
506 
507 				/*
508 				 * If there is a previous request, free it.  Any subsequent
509 				 * allocation cannot fail, thus assuring an atomic swap.
510 				 */
511 				if (prev_request != IE_REQ_NONE) {
512 					previous = ipc_port_request_cancel(port, name, prev_request);
513 				}
514 
515 #if IMPORTANCE_INHERITANCE
516 				will_arm = port->ip_sprequests == 0 &&
517 				    options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK);
518 #endif /* IMPORTANCE_INHERITANCE */
519 				kr = ipc_port_request_alloc(port, name, notify,
520 				    options, &new_request);
521 
522 				if (kr != KERN_SUCCESS) {
523 					assert(previous == IP_NULL);
524 					is_write_unlock(space);
525 
526 					kr = ipc_port_request_grow(port);
527 					/* port is unlocked */
528 
529 					if (kr != KERN_SUCCESS) {
530 						return kr;
531 					}
532 
533 					continue;
534 				}
535 
536 				assert(new_request != IE_REQ_NONE);
537 				entry->ie_request = new_request;
538 				ipc_entry_modified(space, name, entry);
539 				is_write_unlock(space);
540 
541 #if IMPORTANCE_INHERITANCE
542 				if (will_arm &&
543 				    port->ip_impdonation != 0 &&
544 				    port->ip_spimportant == 0 &&
545 				    task_is_importance_donor(current_task())) {
546 					if (ipc_port_importance_delta(port, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) {
547 						ip_mq_unlock(port);
548 					}
549 				} else
550 #endif /* IMPORTANCE_INHERITANCE */
551 				ip_mq_unlock(port);
552 
553 				break;
554 			}
555 			/* entry may have changed to dead-name by ipc_right_check() */
556 		}
557 
558 		/* treat send_possible requests as immediate w.r.t. dead-name */
559 		if (options && notify != IP_NULL &&
560 		    (entry->ie_bits & MACH_PORT_TYPE_DEAD_NAME)) {
561 			mach_port_urefs_t urefs = IE_BITS_UREFS(entry->ie_bits);
562 
563 			assert(urefs > 0);
564 
565 			/* leave urefs pegged to maximum if it overflowed */
566 			if (urefs < MACH_PORT_UREFS_MAX) {
567 				(entry->ie_bits)++; /* increment urefs */
568 			}
569 			ipc_entry_modified(space, name, entry);
570 
571 			is_write_unlock(space);
572 
573 			if (port != IP_NULL) {
574 				ip_release(port);
575 			}
576 
577 			ipc_notify_dead_name(notify, name);
578 			previous = IP_NULL;
579 			break;
580 		}
581 
582 		kr = (entry->ie_bits & MACH_PORT_TYPE_PORT_OR_DEAD) ?
583 		    KERN_INVALID_ARGUMENT : KERN_INVALID_RIGHT;
584 
585 		is_write_unlock(space);
586 
587 		if (port != IP_NULL) {
588 			ip_release(port);
589 		}
590 
591 		return kr;
592 	}
593 
594 	*previousp = previous;
595 	return KERN_SUCCESS;
596 }
597 
598 /*
599  *	Routine:	ipc_right_request_cancel
600  *	Purpose:
601  *		Cancel a notification request and return the send-once right.
602  *		Afterwards, entry->ie_request == 0.
603  *	Conditions:
604  *		The space must be write-locked; the port must be locked.
605  *		The port and space must be active.
606  */
607 
608 ipc_port_t
ipc_right_request_cancel(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry)609 ipc_right_request_cancel(
610 	ipc_space_t                     space,
611 	ipc_port_t                      port,
612 	mach_port_name_t                name,
613 	ipc_entry_t                     entry)
614 {
615 	ipc_port_t previous;
616 
617 	require_ip_active(port);
618 	assert(is_active(space));
619 	assert(port == ip_object_to_port(entry->ie_object));
620 
621 	if (entry->ie_request == IE_REQ_NONE) {
622 		return IP_NULL;
623 	}
624 
625 	previous = ipc_port_request_cancel(port, name, entry->ie_request);
626 	entry->ie_request = IE_REQ_NONE;
627 	ipc_entry_modified(space, name, entry);
628 	return previous;
629 }
630 
631 /*
632  *	Routine:	ipc_right_inuse
633  *	Purpose:
634  *		Check if an entry is being used.
635  *		Returns TRUE if it is.
636  *	Conditions:
637  *		The space is write-locked and active.
638  */
639 
640 bool
ipc_right_inuse(ipc_entry_t entry)641 ipc_right_inuse(
642 	ipc_entry_t entry)
643 {
644 	return IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_NONE;
645 }
646 
647 /*
648  *	Routine:	ipc_right_check
649  *	Purpose:
650  *		Check if the port has died.  If it has,
651  *              and IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE is not
652  *              passed and it is not a send once right then
653  *		clean up the entry and return TRUE.
654  *	Conditions:
655  *		The space is write-locked; the port is not locked.
656  *		If returns FALSE, the port is also locked.
657  *		Otherwise, entry is converted to a dead name.
658  *
659  *		Caller is responsible for a reference to port if it
660  *		had died (returns TRUE).
661  */
662 
663 boolean_t
ipc_right_check(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry,ipc_object_copyin_flags_t flags)664 ipc_right_check(
665 	ipc_space_t              space,
666 	ipc_port_t               port,
667 	mach_port_name_t         name,
668 	ipc_entry_t              entry,
669 	ipc_object_copyin_flags_t flags)
670 {
671 	ipc_entry_bits_t bits;
672 
673 	assert(is_active(space));
674 	assert(port == ip_object_to_port(entry->ie_object));
675 
676 	ip_mq_lock(port);
677 	if (ip_active(port) ||
678 	    ((flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE) &&
679 	    entry->ie_request == IE_REQ_NONE &&
680 	    (entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE))) {
681 		return FALSE;
682 	}
683 
684 	/* this was either a pure send right or a send-once right */
685 
686 	bits = entry->ie_bits;
687 	assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
688 	assert(IE_BITS_UREFS(bits) > 0);
689 
690 	if (bits & MACH_PORT_TYPE_SEND) {
691 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
692 		assert(IE_BITS_UREFS(bits) > 0);
693 		assert(port->ip_srights > 0);
694 		port->ip_srights--;
695 	} else {
696 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
697 		assert(IE_BITS_UREFS(bits) == 1);
698 		assert(port->ip_sorights > 0);
699 		port->ip_sorights--;
700 	}
701 
702 	/*
703 	 * delete SEND rights from ipc hash.
704 	 */
705 
706 	if ((bits & MACH_PORT_TYPE_SEND) != 0) {
707 		ipc_hash_delete(space, ip_to_object(port), name, entry);
708 	}
709 
710 	/* convert entry to dead name */
711 	bits = (bits & ~IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME;
712 
713 	/*
714 	 * If there was a notification request outstanding on this
715 	 * name, and the port went dead, that notification
716 	 * must already be on its way up from the port layer.
717 	 *
718 	 * Add the reference that the notification carries. It
719 	 * is done here, and not in the notification delivery,
720 	 * because the latter doesn't have a space reference and
721 	 * trying to actually move a send-right reference would
722 	 * get short-circuited into a MACH_PORT_DEAD by IPC. Since
723 	 * all calls that deal with the right eventually come
724 	 * through here, it has the same result.
725 	 *
726 	 * Once done, clear the request index so we only account
727 	 * for it once.
728 	 */
729 	if (entry->ie_request != IE_REQ_NONE) {
730 		if (ipc_port_request_type(port, name, entry->ie_request) != 0) {
731 			/* if urefs are pegged due to overflow, leave them pegged */
732 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
733 				bits++; /* increment urefs */
734 			}
735 		}
736 		entry->ie_request = IE_REQ_NONE;
737 	}
738 	entry->ie_bits = bits;
739 	entry->ie_object = IO_NULL;
740 
741 	ip_mq_unlock(port);
742 
743 	ipc_entry_modified(space, name, entry);
744 
745 	return TRUE;
746 }
747 
748 /*
749  *	Routine:	ipc_right_terminate
750  *	Purpose:
751  *		Cleans up an entry in a terminated space.
752  *		The entry isn't deallocated or removed
753  *		from reverse hash tables.
754  *	Conditions:
755  *		The space is dead and unlocked.
756  */
757 
758 void
ipc_right_terminate(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)759 ipc_right_terminate(
760 	ipc_space_t             space,
761 	mach_port_name_t        name,
762 	ipc_entry_t             entry)
763 {
764 	mach_port_type_t type;
765 	ipc_object_t object;
766 
767 	assert(!is_active(space));
768 
769 	type   = IE_BITS_TYPE(entry->ie_bits);
770 	object = entry->ie_object;
771 
772 	/*
773 	 * Hollow the entry under the port lock,
774 	 * in order to avoid dangling pointers.
775 	 *
776 	 * ipc_right_lookup_read() doesn't need it for correctness,
777 	 * but ipc_space_terminate() as it now goes through 2 rounds
778 	 * of termination (receive rights first, the rest second).
779 	 */
780 
781 	if (type != MACH_PORT_TYPE_DEAD_NAME) {
782 		assert(object != IO_NULL);
783 		io_lock(object);
784 	}
785 	entry->ie_object = IO_NULL;
786 	entry->ie_bits  &= (IE_BITS_GEN_MASK | IE_BITS_ROLL_MASK);
787 
788 	switch (type) {
789 	case MACH_PORT_TYPE_DEAD_NAME:
790 		assert(entry->ie_request == IE_REQ_NONE);
791 		assert(object == IO_NULL);
792 		break;
793 
794 	case MACH_PORT_TYPE_PORT_SET: {
795 		ipc_pset_t pset = ips_object_to_pset(object);
796 
797 		assert(entry->ie_request == IE_REQ_NONE);
798 		assert(ips_active(pset));
799 
800 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
801 		break;
802 	}
803 
804 	case MACH_PORT_TYPE_SEND:
805 	case MACH_PORT_TYPE_RECEIVE:
806 	case MACH_PORT_TYPE_SEND_RECEIVE:
807 	case MACH_PORT_TYPE_SEND_ONCE: {
808 		ipc_port_t port = ip_object_to_port(object);
809 		ipc_port_t request = IP_NULL;
810 		ipc_notify_nsenders_t nsrequest = { };
811 
812 		if (!ip_active(port)) {
813 			ip_mq_unlock(port);
814 			ip_release(port);
815 			break;
816 		}
817 
818 		/*
819 		 * same as ipc_right_request_cancel(),
820 		 * except for calling ipc_entry_modified()
821 		 * as the space is now table-less.
822 		 */
823 		if (entry->ie_request != IE_REQ_NONE) {
824 			request = ipc_port_request_cancel(port, name,
825 			    entry->ie_request);
826 			entry->ie_request = IE_REQ_NONE;
827 		}
828 
829 		if (type & MACH_PORT_TYPE_SEND) {
830 			assert(port->ip_srights > 0);
831 			if (--port->ip_srights == 0) {
832 				nsrequest = ipc_notify_no_senders_prepare(port);
833 			}
834 		}
835 
836 		if (type & MACH_PORT_TYPE_RECEIVE) {
837 			assert(ip_get_receiver_name(port) == name);
838 			assert(ip_in_space(port, space));
839 
840 			ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
841 		} else if (type & MACH_PORT_TYPE_SEND_ONCE) {
842 			assert(port->ip_sorights > 0);
843 			port->ip_reply_context = 0;
844 
845 			ipc_notify_send_once_and_unlock(port); /* consumes our ref */
846 		} else {
847 			/* port could be dead, in-transit, or in a foreign space */
848 			assert(!ip_in_space(port, space));
849 
850 			ip_mq_unlock(port);
851 			ip_release(port);
852 		}
853 
854 		/*
855 		 * For both no-senders and port-deleted notifications,
856 		 * look at whether the destination is still active.
857 		 * If it isn't, just swallow the send-once right.
858 		 *
859 		 * This is a racy check, but this ok because we can only
860 		 * fail to notice that the port is now inactive, which
861 		 * only causes us to fail at an optimizaiton.
862 		 *
863 		 * The purpose here is to avoid sending messages
864 		 * to receive rights that used to be in this space,
865 		 * which we can't fail to observe.
866 		 */
867 		if (nsrequest.ns_notify != IP_NULL) {
868 			if (ip_active(nsrequest.ns_notify)) {
869 				ipc_notify_no_senders_emit(nsrequest);
870 			} else {
871 				ipc_notify_no_senders_consume(nsrequest);
872 			}
873 		}
874 
875 		if (request != IP_NULL) {
876 			if (ip_active(request)) {
877 				ipc_notify_port_deleted(request, name);
878 			} else {
879 				ipc_port_release_sonce(request);
880 			}
881 		}
882 		break;
883 	}
884 
885 	default:
886 		panic("ipc_right_terminate: strange type - 0x%x", type);
887 	}
888 }
889 
890 /*
891  *	Routine:	ipc_right_destroy
892  *	Purpose:
893  *		Destroys an entry in a space.
894  *	Conditions:
895  *		The space is write-locked (returns unlocked).
896  *		The space must be active.
897  *	Returns:
898  *		KERN_SUCCESS		      The entry was destroyed.
899  *      KERN_INVALID_CAPABILITY   The port is pinned.
900  *      KERN_INVALID_RIGHT        Port guard violation.
901  */
902 
903 kern_return_t
ipc_right_destroy(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,boolean_t check_guard,uint64_t guard)904 ipc_right_destroy(
905 	ipc_space_t             space,
906 	mach_port_name_t        name,
907 	ipc_entry_t             entry,
908 	boolean_t               check_guard,
909 	uint64_t                guard)
910 {
911 	ipc_entry_bits_t bits;
912 	mach_port_type_t type;
913 
914 	bits = entry->ie_bits;
915 	entry->ie_bits &= ~IE_BITS_TYPE_MASK;
916 	type = IE_BITS_TYPE(bits);
917 
918 	assert(is_active(space));
919 
920 	switch (type) {
921 	case MACH_PORT_TYPE_DEAD_NAME:
922 		assert(entry->ie_request == IE_REQ_NONE);
923 		assert(entry->ie_object == IO_NULL);
924 
925 		ipc_entry_dealloc(space, IO_NULL, name, entry);
926 		is_write_unlock(space);
927 		break;
928 
929 	case MACH_PORT_TYPE_PORT_SET: {
930 		ipc_pset_t pset = ips_object_to_pset(entry->ie_object);
931 
932 		assert(entry->ie_request == IE_REQ_NONE);
933 		assert(pset != IPS_NULL);
934 
935 		ips_mq_lock(pset);
936 		assert(ips_active(pset));
937 
938 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
939 
940 		is_write_unlock(space);
941 
942 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
943 		break;
944 	}
945 
946 	case MACH_PORT_TYPE_SEND:
947 	case MACH_PORT_TYPE_RECEIVE:
948 	case MACH_PORT_TYPE_SEND_RECEIVE:
949 	case MACH_PORT_TYPE_SEND_ONCE: {
950 		ipc_port_t port = ip_object_to_port(entry->ie_object);
951 		ipc_notify_nsenders_t nsrequest = { };
952 		ipc_port_t request;
953 
954 		assert(port != IP_NULL);
955 
956 		if (type == MACH_PORT_TYPE_SEND) {
957 			if (ip_is_pinned(port)) {
958 				assert(ip_active(port));
959 				is_write_unlock(space);
960 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DESTROY);
961 				return KERN_INVALID_CAPABILITY;
962 			}
963 			ipc_hash_delete(space, ip_to_object(port), name, entry);
964 		}
965 
966 		ip_mq_lock(port);
967 
968 		if (!ip_active(port)) {
969 			assert((type & MACH_PORT_TYPE_RECEIVE) == 0);
970 			entry->ie_request = IE_REQ_NONE;
971 			assert(!ip_is_pinned(port));
972 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
973 			ip_mq_unlock(port);
974 			is_write_unlock(space);
975 			ip_release(port);
976 			break;
977 		}
978 
979 		/* For receive rights, check for guarding */
980 		if ((type & MACH_PORT_TYPE_RECEIVE) &&
981 		    (check_guard) && (port->ip_guarded) &&
982 		    (guard != port->ip_context)) {
983 			/* Guard Violation */
984 			uint64_t portguard = port->ip_context;
985 			ip_mq_unlock(port);
986 			is_write_unlock(space);
987 			/* Raise mach port guard exception */
988 			mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
989 			return KERN_INVALID_RIGHT;
990 		}
991 
992 
993 		request = ipc_right_request_cancel_macro(space, port,
994 		    name, entry);
995 		assert(!ip_is_pinned(port));
996 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
997 
998 		is_write_unlock(space);
999 
1000 		if (type & MACH_PORT_TYPE_SEND) {
1001 			assert(port->ip_srights > 0);
1002 			if (--port->ip_srights == 0) {
1003 				nsrequest = ipc_notify_no_senders_prepare(port);
1004 			}
1005 		}
1006 
1007 		if (type & MACH_PORT_TYPE_RECEIVE) {
1008 			require_ip_active(port);
1009 			assert(ip_in_space(port, space));
1010 
1011 			ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
1012 		} else if (type & MACH_PORT_TYPE_SEND_ONCE) {
1013 			assert(port->ip_sorights > 0);
1014 			port->ip_reply_context = 0;
1015 			ipc_notify_send_once_and_unlock(port); /* consumes our ref */
1016 		} else {
1017 			assert(!ip_in_space(port, space));
1018 
1019 			ip_mq_unlock(port);
1020 			ip_release(port);
1021 		}
1022 
1023 		ipc_notify_no_senders_emit(nsrequest);
1024 
1025 		if (request != IP_NULL) {
1026 			ipc_notify_port_deleted(request, name);
1027 		}
1028 
1029 
1030 		break;
1031 	}
1032 
1033 	default:
1034 		panic("ipc_right_destroy: strange type");
1035 	}
1036 
1037 	return KERN_SUCCESS;
1038 }
1039 
1040 /*
1041  *	Routine:	ipc_right_dealloc
1042  *	Purpose:
1043  *		Releases a send/send-once/dead-name/port_set user ref.
1044  *		Like ipc_right_delta with a delta of -1,
1045  *		but looks at the entry to determine the right.
1046  *	Conditions:
1047  *		The space is write-locked, and is unlocked upon return.
1048  *		The space must be active.
1049  *	Returns:
1050  *		KERN_SUCCESS		A user ref was released.
1051  *		KERN_INVALID_RIGHT	Entry has wrong type.
1052  *      KERN_INVALID_CAPABILITY  Deallocating a pinned right.
1053  */
1054 
1055 kern_return_t
ipc_right_dealloc(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)1056 ipc_right_dealloc(
1057 	ipc_space_t             space,
1058 	mach_port_name_t        name,
1059 	ipc_entry_t             entry)
1060 {
1061 	ipc_port_t port = IP_NULL;
1062 	ipc_entry_bits_t bits;
1063 	mach_port_type_t type;
1064 
1065 	bits = entry->ie_bits;
1066 	type = IE_BITS_TYPE(bits);
1067 
1068 
1069 	assert(is_active(space));
1070 
1071 	switch (type) {
1072 	case MACH_PORT_TYPE_PORT_SET: {
1073 		ipc_pset_t pset;
1074 
1075 		assert(IE_BITS_UREFS(bits) == 0);
1076 		assert(entry->ie_request == IE_REQ_NONE);
1077 
1078 		pset = ips_object_to_pset(entry->ie_object);
1079 		ips_mq_lock(pset);
1080 		assert(ips_active(pset));
1081 
1082 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1083 
1084 		is_write_unlock(space);
1085 
1086 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1087 		break;
1088 	}
1089 
1090 	case MACH_PORT_TYPE_DEAD_NAME: {
1091 dead_name:
1092 
1093 		assert(IE_BITS_UREFS(bits) > 0);
1094 		assert(entry->ie_request == IE_REQ_NONE);
1095 		assert(entry->ie_object == IO_NULL);
1096 
1097 		if (IE_BITS_UREFS(bits) == 1) {
1098 			ipc_entry_dealloc(space, IO_NULL, name, entry);
1099 		} else {
1100 			/* if urefs are pegged due to overflow, leave them pegged */
1101 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1102 				entry->ie_bits = bits - 1; /* decrement urefs */
1103 			}
1104 			ipc_entry_modified(space, name, entry);
1105 		}
1106 		is_write_unlock(space);
1107 
1108 		/* release any port that got converted to dead name below */
1109 		if (port != IP_NULL) {
1110 			ip_release(port);
1111 		}
1112 		break;
1113 	}
1114 
1115 	case MACH_PORT_TYPE_SEND_ONCE: {
1116 		ipc_port_t request;
1117 
1118 		assert(IE_BITS_UREFS(bits) == 1);
1119 
1120 		port = ip_object_to_port(entry->ie_object);
1121 		assert(port != IP_NULL);
1122 
1123 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1124 			bits = entry->ie_bits;
1125 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1126 			goto dead_name;     /* it will release port */
1127 		}
1128 		/* port is locked and active */
1129 
1130 		assert(port->ip_sorights > 0);
1131 
1132 		/*
1133 		 * clear any reply context:
1134 		 * no one will be sending the response b/c we are destroying
1135 		 * the single, outstanding send once right.
1136 		 */
1137 		port->ip_reply_context = 0;
1138 
1139 		request = ipc_right_request_cancel_macro(space, port,
1140 		    name, entry);
1141 		assert(!ip_is_pinned(port));
1142 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1143 
1144 		is_write_unlock(space);
1145 
1146 		ipc_notify_send_once_and_unlock(port);
1147 
1148 		if (request != IP_NULL) {
1149 			ipc_notify_port_deleted(request, name);
1150 		}
1151 		break;
1152 	}
1153 
1154 	case MACH_PORT_TYPE_SEND: {
1155 		ipc_port_t request = IP_NULL;
1156 		ipc_notify_nsenders_t nsrequest = { };
1157 
1158 		assert(IE_BITS_UREFS(bits) > 0);
1159 
1160 		port = ip_object_to_port(entry->ie_object);
1161 		assert(port != IP_NULL);
1162 
1163 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1164 			bits = entry->ie_bits;
1165 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1166 			goto dead_name;     /* it will release port */
1167 		}
1168 		/* port is locked and active */
1169 
1170 		assert(port->ip_srights > 0);
1171 
1172 		if (IE_BITS_UREFS(bits) == 1) {
1173 			if (ip_is_pinned(port)) {
1174 				ip_mq_unlock(port);
1175 				is_write_unlock(space);
1176 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1177 				return KERN_INVALID_CAPABILITY;
1178 			}
1179 			if (--port->ip_srights == 0) {
1180 				nsrequest = ipc_notify_no_senders_prepare(port);
1181 			}
1182 
1183 			request = ipc_right_request_cancel_macro(space, port,
1184 			    name, entry);
1185 			ipc_hash_delete(space, ip_to_object(port), name, entry);
1186 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1187 			ip_mq_unlock(port);
1188 			is_write_unlock(space);
1189 
1190 			ip_release(port);
1191 		} else {
1192 			/* if urefs are pegged due to overflow, leave them pegged */
1193 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1194 				entry->ie_bits = bits - 1; /* decrement urefs */
1195 			}
1196 			ip_mq_unlock(port);
1197 			ipc_entry_modified(space, name, entry);
1198 			is_write_unlock(space);
1199 		}
1200 
1201 		ipc_notify_no_senders_emit(nsrequest);
1202 
1203 		if (request != IP_NULL) {
1204 			ipc_notify_port_deleted(request, name);
1205 		}
1206 		break;
1207 	}
1208 
1209 	case MACH_PORT_TYPE_SEND_RECEIVE: {
1210 		ipc_notify_nsenders_t nsrequest = { };
1211 
1212 		assert(IE_BITS_UREFS(bits) > 0);
1213 
1214 		port = ip_object_to_port(entry->ie_object);
1215 		assert(port != IP_NULL);
1216 
1217 		ip_mq_lock(port);
1218 		require_ip_active(port);
1219 		assert(ip_get_receiver_name(port) == name);
1220 		assert(ip_in_space(port, space));
1221 		assert(port->ip_srights > 0);
1222 
1223 		if (IE_BITS_UREFS(bits) == 1) {
1224 			if (--port->ip_srights == 0) {
1225 				nsrequest = ipc_notify_no_senders_prepare(port);
1226 			}
1227 
1228 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1229 			    MACH_PORT_TYPE_SEND);
1230 		} else {
1231 			/* if urefs are pegged due to overflow, leave them pegged */
1232 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1233 				entry->ie_bits = bits - 1; /* decrement urefs */
1234 			}
1235 		}
1236 		ip_mq_unlock(port);
1237 
1238 		ipc_entry_modified(space, name, entry);
1239 		is_write_unlock(space);
1240 
1241 		ipc_notify_no_senders_emit(nsrequest);
1242 		break;
1243 	}
1244 
1245 	default:
1246 		is_write_unlock(space);
1247 		mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1248 		return KERN_INVALID_RIGHT;
1249 	}
1250 
1251 	return KERN_SUCCESS;
1252 }
1253 
1254 /*
1255  *	Routine:	ipc_right_delta
1256  *	Purpose:
1257  *		Modifies the user-reference count for a right.
1258  *		May deallocate the right, if the count goes to zero.
1259  *	Conditions:
1260  *		The space is write-locked, and is unlocked upon return.
1261  *		The space must be active.
1262  *	Returns:
1263  *		KERN_SUCCESS		Count was modified.
1264  *		KERN_INVALID_RIGHT	Entry has wrong type.
1265  *		KERN_INVALID_VALUE	Bad delta for the right.
1266  *		KERN_INVALID_CAPABILITY Deallocating a pinned right.
1267  */
1268 
1269 kern_return_t
ipc_right_delta(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_right_t right,mach_port_delta_t delta)1270 ipc_right_delta(
1271 	ipc_space_t             space,
1272 	mach_port_name_t        name,
1273 	ipc_entry_t             entry,
1274 	mach_port_right_t       right,
1275 	mach_port_delta_t       delta)
1276 {
1277 	ipc_port_t port = IP_NULL;
1278 	ipc_entry_bits_t bits;
1279 
1280 	bits = entry->ie_bits;
1281 
1282 /*
1283  *	The following is used (for case MACH_PORT_RIGHT_DEAD_NAME) in the
1284  *	switch below. It is used to keep track of those cases (in DIPC)
1285  *	where we have postponed the dropping of a port reference. Since
1286  *	the dropping of the reference could cause the port to disappear
1287  *	we postpone doing so when we are holding the space lock.
1288  */
1289 
1290 	assert(is_active(space));
1291 	assert(right < MACH_PORT_RIGHT_NUMBER);
1292 
1293 	/* Rights-specific restrictions and operations. */
1294 
1295 	switch (right) {
1296 	case MACH_PORT_RIGHT_PORT_SET: {
1297 		ipc_pset_t pset;
1298 
1299 		if ((bits & MACH_PORT_TYPE_PORT_SET) == 0) {
1300 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1301 			goto invalid_right;
1302 		}
1303 
1304 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_PORT_SET);
1305 		assert(IE_BITS_UREFS(bits) == 0);
1306 		assert(entry->ie_request == IE_REQ_NONE);
1307 
1308 		if (delta == 0) {
1309 			goto success;
1310 		}
1311 
1312 		if (delta != -1) {
1313 			goto invalid_value;
1314 		}
1315 
1316 		pset = ips_object_to_pset(entry->ie_object);
1317 		ips_mq_lock(pset);
1318 		assert(ips_active(pset));
1319 
1320 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1321 
1322 		is_write_unlock(space);
1323 
1324 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1325 		break;
1326 	}
1327 
1328 	case MACH_PORT_RIGHT_RECEIVE: {
1329 		ipc_port_t request = IP_NULL;
1330 
1331 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1332 			if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1333 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1334 			}
1335 			goto invalid_right;
1336 		}
1337 
1338 		if (delta == 0) {
1339 			goto success;
1340 		}
1341 
1342 		if (delta != -1) {
1343 			goto invalid_value;
1344 		}
1345 
1346 		port = ip_object_to_port(entry->ie_object);
1347 		assert(port != IP_NULL);
1348 
1349 		/*
1350 		 *	The port lock is needed for ipc_right_dncancel;
1351 		 *	otherwise, we wouldn't have to take the lock
1352 		 *	until just before dropping the space lock.
1353 		 */
1354 
1355 		ip_mq_lock(port);
1356 		require_ip_active(port);
1357 		assert(ip_get_receiver_name(port) == name);
1358 		assert(ip_in_space(port, space));
1359 
1360 		/* Mach Port Guard Checking */
1361 		if (port->ip_guarded) {
1362 			uint64_t portguard = port->ip_context;
1363 			ip_mq_unlock(port);
1364 			is_write_unlock(space);
1365 			/* Raise mach port guard exception */
1366 			mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_MOD_REFS);
1367 			goto guard_failure;
1368 		}
1369 
1370 		if (bits & MACH_PORT_TYPE_SEND) {
1371 			assert(IE_BITS_TYPE(bits) ==
1372 			    MACH_PORT_TYPE_SEND_RECEIVE);
1373 			assert(IE_BITS_UREFS(bits) > 0);
1374 			assert(port->ip_srights > 0);
1375 
1376 			if (ipc_port_has_prdrequest(port)) {
1377 				/*
1378 				 * Since another task has requested a
1379 				 * destroy notification for this port, it
1380 				 * isn't actually being destroyed - the receive
1381 				 * right is just being moved to another task.
1382 				 * Since we still have one or more send rights,
1383 				 * we need to record the loss of the receive
1384 				 * right and enter the remaining send right
1385 				 * into the hash table.
1386 				 */
1387 				bits &= ~MACH_PORT_TYPE_RECEIVE;
1388 				bits |= MACH_PORT_TYPE_EX_RECEIVE;
1389 				ipc_hash_insert(space, ip_to_object(port),
1390 				    name, entry);
1391 				ip_reference(port);
1392 			} else {
1393 				/*
1394 				 *	The remaining send right turns into a
1395 				 *	dead name.  Notice we don't decrement
1396 				 *	ip_srights, generate a no-senders notif,
1397 				 *	or use ipc_right_dncancel, because the
1398 				 *	port is destroyed "first".
1399 				 */
1400 				bits &= ~IE_BITS_TYPE_MASK;
1401 				bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1402 				if (entry->ie_request) {
1403 					entry->ie_request = IE_REQ_NONE;
1404 					/* if urefs are pegged due to overflow, leave them pegged */
1405 					if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1406 						bits++; /* increment urefs */
1407 					}
1408 				}
1409 				entry->ie_object = IO_NULL;
1410 			}
1411 			entry->ie_bits = bits;
1412 			ipc_entry_modified(space, name, entry);
1413 		} else {
1414 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1415 			assert(IE_BITS_UREFS(bits) == 0);
1416 
1417 			request = ipc_right_request_cancel_macro(space, port,
1418 			    name, entry);
1419 			assert(!ip_is_pinned(port));
1420 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1421 		}
1422 		is_write_unlock(space);
1423 
1424 		ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1425 
1426 		if (request != IP_NULL) {
1427 			ipc_notify_port_deleted(request, name);
1428 		}
1429 		break;
1430 	}
1431 
1432 	case MACH_PORT_RIGHT_SEND_ONCE: {
1433 		ipc_port_t request;
1434 
1435 		if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
1436 			goto invalid_right;
1437 		}
1438 
1439 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
1440 		assert(IE_BITS_UREFS(bits) == 1);
1441 
1442 		port = ip_object_to_port(entry->ie_object);
1443 		assert(port != IP_NULL);
1444 
1445 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1446 			assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE));
1447 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1448 			goto invalid_right;
1449 		}
1450 		/* port is locked and active */
1451 
1452 		assert(port->ip_sorights > 0);
1453 
1454 		if ((delta > 0) || (delta < -1)) {
1455 			ip_mq_unlock(port);
1456 			goto invalid_value;
1457 		}
1458 
1459 		if (delta == 0) {
1460 			ip_mq_unlock(port);
1461 			goto success;
1462 		}
1463 
1464 		/*
1465 		 * clear any reply context:
1466 		 * no one will be sending the response b/c we are destroying
1467 		 * the single, outstanding send once right.
1468 		 */
1469 		port->ip_reply_context = 0;
1470 
1471 		request = ipc_right_request_cancel_macro(space, port, name, entry);
1472 		assert(!ip_is_pinned(port));
1473 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1474 
1475 		is_write_unlock(space);
1476 
1477 		ipc_notify_send_once_and_unlock(port);
1478 
1479 		if (request != IP_NULL) {
1480 			ipc_notify_port_deleted(request, name);
1481 		}
1482 		break;
1483 	}
1484 
1485 	case MACH_PORT_RIGHT_DEAD_NAME: {
1486 		ipc_port_t relport = IP_NULL;
1487 		mach_port_urefs_t urefs;
1488 
1489 		if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1490 			port = ip_object_to_port(entry->ie_object);
1491 			assert(port != IP_NULL);
1492 
1493 			if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1494 				/* port is locked and active */
1495 				ip_mq_unlock(port);
1496 				port = IP_NULL;
1497 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1498 				goto invalid_right;
1499 			}
1500 			bits = entry->ie_bits;
1501 			relport = port;
1502 			port = IP_NULL;
1503 		} else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0) {
1504 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1505 			goto invalid_right;
1506 		}
1507 
1508 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1509 		assert(IE_BITS_UREFS(bits) > 0);
1510 		assert(entry->ie_object == IO_NULL);
1511 		assert(entry->ie_request == IE_REQ_NONE);
1512 
1513 		if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1514 		    delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1515 			goto invalid_value;
1516 		}
1517 
1518 		urefs = IE_BITS_UREFS(bits);
1519 
1520 		if (urefs == MACH_PORT_UREFS_MAX) {
1521 			/*
1522 			 * urefs are pegged due to an overflow
1523 			 * only a delta removing all refs at once can change it
1524 			 */
1525 
1526 			if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1527 				delta = 0;
1528 			}
1529 		} else {
1530 			if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1531 				goto invalid_value;
1532 			}
1533 			if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1534 				/* leave urefs pegged to maximum if it overflowed */
1535 				delta = MACH_PORT_UREFS_MAX - urefs;
1536 			}
1537 		}
1538 
1539 		if ((urefs + delta) == 0) {
1540 			ipc_entry_dealloc(space, IO_NULL, name, entry);
1541 		} else if (delta != 0) {
1542 			entry->ie_bits = bits + delta;
1543 			ipc_entry_modified(space, name, entry);
1544 		}
1545 
1546 		is_write_unlock(space);
1547 
1548 		if (relport != IP_NULL) {
1549 			ip_release(relport);
1550 		}
1551 
1552 		break;
1553 	}
1554 
1555 	case MACH_PORT_RIGHT_SEND: {
1556 		mach_port_urefs_t urefs;
1557 		ipc_port_t request = IP_NULL;
1558 		ipc_notify_nsenders_t nsrequest = { };
1559 		ipc_port_t port_to_release = IP_NULL;
1560 
1561 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
1562 			/* invalid right exception only when not live/dead confusion */
1563 			if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0
1564 #if !defined(AE_MAKESENDRIGHT_FIXED)
1565 			    /*
1566 			     * AE tries to add single send right without knowing if it already owns one.
1567 			     * But if it doesn't, it should own the receive right and delta should be 1.
1568 			     */
1569 			    && (((bits & MACH_PORT_TYPE_RECEIVE) == 0) || (delta != 1))
1570 #endif
1571 			    ) {
1572 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1573 			}
1574 			goto invalid_right;
1575 		}
1576 
1577 		/* maximum urefs for send is MACH_PORT_UREFS_MAX */
1578 
1579 		port = ip_object_to_port(entry->ie_object);
1580 		assert(port != IP_NULL);
1581 
1582 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1583 			assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0);
1584 			goto invalid_right;
1585 		}
1586 		/* port is locked and active */
1587 
1588 		assert(port->ip_srights > 0);
1589 
1590 		if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1591 		    delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1592 			ip_mq_unlock(port);
1593 			goto invalid_value;
1594 		}
1595 
1596 		urefs = IE_BITS_UREFS(bits);
1597 
1598 		if (urefs == MACH_PORT_UREFS_MAX) {
1599 			/*
1600 			 * urefs are pegged due to an overflow
1601 			 * only a delta removing all refs at once can change it
1602 			 */
1603 
1604 			if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1605 				delta = 0;
1606 			}
1607 		} else {
1608 			if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1609 				ip_mq_unlock(port);
1610 				goto invalid_value;
1611 			}
1612 			if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1613 				/* leave urefs pegged to maximum if it overflowed */
1614 				delta = MACH_PORT_UREFS_MAX - urefs;
1615 			}
1616 		}
1617 
1618 		if ((urefs + delta) == 0) {
1619 			if (ip_is_pinned(port)) {
1620 				ip_mq_unlock(port);
1621 				is_write_unlock(space);
1622 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1623 				return KERN_INVALID_CAPABILITY;
1624 			}
1625 
1626 			if (--port->ip_srights == 0) {
1627 				nsrequest = ipc_notify_no_senders_prepare(port);
1628 			}
1629 
1630 			if (bits & MACH_PORT_TYPE_RECEIVE) {
1631 				assert(ip_get_receiver_name(port) == name);
1632 				assert(ip_in_space(port, space));
1633 				assert(IE_BITS_TYPE(bits) ==
1634 				    MACH_PORT_TYPE_SEND_RECEIVE);
1635 
1636 				entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1637 				    MACH_PORT_TYPE_SEND);
1638 				ipc_entry_modified(space, name, entry);
1639 			} else {
1640 				assert(IE_BITS_TYPE(bits) ==
1641 				    MACH_PORT_TYPE_SEND);
1642 
1643 				request = ipc_right_request_cancel_macro(space, port,
1644 				    name, entry);
1645 				ipc_hash_delete(space, ip_to_object(port),
1646 				    name, entry);
1647 				assert(!ip_is_pinned(port));
1648 				ipc_entry_dealloc(space, ip_to_object(port),
1649 				    name, entry);
1650 				port_to_release = port;
1651 			}
1652 		} else if (delta != 0) {
1653 			entry->ie_bits = bits + delta;
1654 			ipc_entry_modified(space, name, entry);
1655 		}
1656 
1657 		ip_mq_unlock(port);
1658 
1659 		is_write_unlock(space);
1660 
1661 		if (port_to_release != IP_NULL) {
1662 			ip_release(port_to_release);
1663 		}
1664 
1665 		ipc_notify_no_senders_emit(nsrequest);
1666 
1667 		if (request != IP_NULL) {
1668 			ipc_notify_port_deleted(request, name);
1669 		}
1670 		break;
1671 	}
1672 
1673 	case MACH_PORT_RIGHT_LABELH:
1674 		goto invalid_right;
1675 
1676 	default:
1677 		panic("ipc_right_delta: strange right %d for 0x%x (%p) in space:%p",
1678 		    right, name, (void *)entry, (void *)space);
1679 	}
1680 
1681 	return KERN_SUCCESS;
1682 
1683 success:
1684 	is_write_unlock(space);
1685 	return KERN_SUCCESS;
1686 
1687 invalid_right:
1688 	is_write_unlock(space);
1689 	if (port != IP_NULL) {
1690 		ip_release(port);
1691 	}
1692 	return KERN_INVALID_RIGHT;
1693 
1694 invalid_value:
1695 	is_write_unlock(space);
1696 	mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1697 	return KERN_INVALID_VALUE;
1698 
1699 guard_failure:
1700 	return KERN_INVALID_RIGHT;
1701 }
1702 
1703 /*
1704  *	Routine:	ipc_right_destruct
1705  *	Purpose:
1706  *		Deallocates the receive right and modifies the
1707  *		user-reference count for the send rights as requested.
1708  *	Conditions:
1709  *		The space is write-locked, and is unlocked upon return.
1710  *		The space must be active.
1711  *	Returns:
1712  *		KERN_SUCCESS		Count was modified.
1713  *		KERN_INVALID_RIGHT	Entry has wrong type.
1714  *		KERN_INVALID_VALUE	Bad delta for the right.
1715  */
1716 
1717 kern_return_t
ipc_right_destruct(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_delta_t srdelta,uint64_t guard)1718 ipc_right_destruct(
1719 	ipc_space_t             space,
1720 	mach_port_name_t        name,
1721 	ipc_entry_t             entry,
1722 	mach_port_delta_t       srdelta,
1723 	uint64_t                guard)
1724 {
1725 	ipc_port_t port = IP_NULL;
1726 	ipc_entry_bits_t bits;
1727 
1728 	mach_port_urefs_t urefs;
1729 	ipc_port_t request = IP_NULL;
1730 	ipc_notify_nsenders_t nsrequest = { };
1731 
1732 	bits = entry->ie_bits;
1733 
1734 	assert(is_active(space));
1735 
1736 	if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1737 		is_write_unlock(space);
1738 
1739 		/* No exception if we used to have receive and held entry since */
1740 		if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1741 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1742 		}
1743 		return KERN_INVALID_RIGHT;
1744 	}
1745 
1746 	if (srdelta && (bits & MACH_PORT_TYPE_SEND) == 0) {
1747 		is_write_unlock(space);
1748 		mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1749 		return KERN_INVALID_RIGHT;
1750 	}
1751 
1752 	if (srdelta > 0) {
1753 		goto invalid_value;
1754 	}
1755 
1756 	port = ip_object_to_port(entry->ie_object);
1757 	assert(port != IP_NULL);
1758 
1759 	ip_mq_lock(port);
1760 	require_ip_active(port);
1761 	assert(ip_get_receiver_name(port) == name);
1762 	assert(ip_in_space(port, space));
1763 
1764 	/* Mach Port Guard Checking */
1765 	if (port->ip_guarded && (guard != port->ip_context)) {
1766 		uint64_t portguard = port->ip_context;
1767 		ip_mq_unlock(port);
1768 		is_write_unlock(space);
1769 		mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
1770 		return KERN_INVALID_ARGUMENT;
1771 	}
1772 
1773 	/*
1774 	 * First reduce the send rights as requested and
1775 	 * adjust the entry->ie_bits accordingly. The
1776 	 * ipc_entry_modified() call is made once the receive
1777 	 * right is destroyed too.
1778 	 */
1779 
1780 	if (srdelta) {
1781 		assert(port->ip_srights > 0);
1782 
1783 		urefs = IE_BITS_UREFS(bits);
1784 
1785 		/*
1786 		 * Since we made sure that srdelta is negative,
1787 		 * the check for urefs overflow is not required.
1788 		 */
1789 		if (MACH_PORT_UREFS_UNDERFLOW(urefs, srdelta)) {
1790 			ip_mq_unlock(port);
1791 			goto invalid_value;
1792 		}
1793 
1794 		if (urefs == MACH_PORT_UREFS_MAX) {
1795 			/*
1796 			 * urefs are pegged due to an overflow
1797 			 * only a delta removing all refs at once can change it
1798 			 */
1799 			if (srdelta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1800 				srdelta = 0;
1801 			}
1802 		}
1803 
1804 		if ((urefs + srdelta) == 0) {
1805 			if (--port->ip_srights == 0) {
1806 				nsrequest = ipc_notify_no_senders_prepare(port);
1807 			}
1808 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_RECEIVE);
1809 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1810 			    MACH_PORT_TYPE_SEND);
1811 		} else {
1812 			entry->ie_bits = bits + srdelta;
1813 		}
1814 	}
1815 
1816 	/*
1817 	 * Now destroy the receive right. Update space and
1818 	 * entry accordingly.
1819 	 */
1820 
1821 	bits = entry->ie_bits;
1822 	if (bits & MACH_PORT_TYPE_SEND) {
1823 		assert(IE_BITS_UREFS(bits) > 0);
1824 		assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX);
1825 
1826 		if (ipc_port_has_prdrequest(port)) {
1827 			/*
1828 			 * Since another task has requested a
1829 			 * destroy notification for this port, it
1830 			 * isn't actually being destroyed - the receive
1831 			 * right is just being moved to another task.
1832 			 * Since we still have one or more send rights,
1833 			 * we need to record the loss of the receive
1834 			 * right and enter the remaining send right
1835 			 * into the hash table.
1836 			 */
1837 			bits &= ~MACH_PORT_TYPE_RECEIVE;
1838 			bits |= MACH_PORT_TYPE_EX_RECEIVE;
1839 			ipc_hash_insert(space, ip_to_object(port),
1840 			    name, entry);
1841 			ip_reference(port);
1842 		} else {
1843 			/*
1844 			 *	The remaining send right turns into a
1845 			 *	dead name.  Notice we don't decrement
1846 			 *	ip_srights, generate a no-senders notif,
1847 			 *	or use ipc_right_dncancel, because the
1848 			 *	port is destroyed "first".
1849 			 */
1850 			bits &= ~IE_BITS_TYPE_MASK;
1851 			bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1852 			if (entry->ie_request) {
1853 				entry->ie_request = IE_REQ_NONE;
1854 				if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1855 					bits++; /* increment urefs */
1856 				}
1857 			}
1858 			entry->ie_object = IO_NULL;
1859 		}
1860 		entry->ie_bits = bits;
1861 		ipc_entry_modified(space, name, entry);
1862 	} else {
1863 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1864 		assert(IE_BITS_UREFS(bits) == 0);
1865 		request = ipc_right_request_cancel_macro(space, port,
1866 		    name, entry);
1867 		assert(!ip_is_pinned(port));
1868 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1869 	}
1870 
1871 	/* Unlock space */
1872 	is_write_unlock(space);
1873 
1874 	ipc_notify_no_senders_emit(nsrequest);
1875 
1876 	ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1877 
1878 	if (request != IP_NULL) {
1879 		ipc_notify_port_deleted(request, name);
1880 	}
1881 
1882 	return KERN_SUCCESS;
1883 
1884 invalid_value:
1885 	is_write_unlock(space);
1886 	mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1887 	return KERN_INVALID_VALUE;
1888 }
1889 
1890 
1891 /*
1892  *	Routine:	ipc_right_info
1893  *	Purpose:
1894  *		Retrieves information about the right.
1895  *	Conditions:
1896  *		The space is active and write-locked.
1897  *	        The space is unlocked upon return.
1898  *	Returns:
1899  *		KERN_SUCCESS		Retrieved info
1900  */
1901 
1902 kern_return_t
ipc_right_info(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_type_t * typep,mach_port_urefs_t * urefsp)1903 ipc_right_info(
1904 	ipc_space_t             space,
1905 	mach_port_name_t        name,
1906 	ipc_entry_t             entry,
1907 	mach_port_type_t        *typep,
1908 	mach_port_urefs_t       *urefsp)
1909 {
1910 	ipc_port_t port;
1911 	ipc_entry_bits_t bits;
1912 	mach_port_type_t type = 0;
1913 	ipc_port_request_index_t request;
1914 
1915 	bits = entry->ie_bits;
1916 	request = entry->ie_request;
1917 	port = ip_object_to_port(entry->ie_object);
1918 
1919 	if (bits & MACH_PORT_TYPE_RECEIVE) {
1920 		assert(IP_VALID(port));
1921 
1922 		if (request != IE_REQ_NONE) {
1923 			ip_mq_lock(port);
1924 			require_ip_active(port);
1925 			type |= ipc_port_request_type(port, name, request);
1926 			ip_mq_unlock(port);
1927 		}
1928 		is_write_unlock(space);
1929 	} else if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1930 		/*
1931 		 * validate port is still alive - if so, get request
1932 		 * types while we still have it locked.  Otherwise,
1933 		 * recapture the (now dead) bits.
1934 		 */
1935 		if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1936 			if (request != IE_REQ_NONE) {
1937 				type |= ipc_port_request_type(port, name, request);
1938 			}
1939 			ip_mq_unlock(port);
1940 			is_write_unlock(space);
1941 		} else {
1942 			bits = entry->ie_bits;
1943 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1944 			is_write_unlock(space);
1945 			ip_release(port);
1946 		}
1947 	} else {
1948 		is_write_unlock(space);
1949 	}
1950 
1951 	type |= IE_BITS_TYPE(bits);
1952 
1953 	*typep = type;
1954 	*urefsp = IE_BITS_UREFS(bits);
1955 	return KERN_SUCCESS;
1956 }
1957 
1958 /*
1959  *	Routine:	ipc_right_copyin_check_reply
1960  *	Purpose:
1961  *		Check if a subsequent ipc_right_copyin would succeed. Used only
1962  *		by ipc_kmsg_copyin_header to check if reply_port can be copied in.
1963  *		If the reply port is an immovable send right, it errors out.
1964  *	Conditions:
1965  *		The space is locked (read or write) and active.
1966  */
1967 
1968 boolean_t
ipc_right_copyin_check_reply(__assert_only ipc_space_t space,mach_port_name_t reply_name,ipc_entry_t reply_entry,mach_msg_type_name_t reply_type,ipc_entry_t dest_entry,boolean_t * reply_port_semantics_violation)1969 ipc_right_copyin_check_reply(
1970 	__assert_only ipc_space_t       space,
1971 	mach_port_name_t                reply_name,
1972 	ipc_entry_t                     reply_entry,
1973 	mach_msg_type_name_t            reply_type,
1974 	ipc_entry_t                     dest_entry,
1975 	boolean_t                       *reply_port_semantics_violation)
1976 {
1977 	ipc_entry_bits_t bits;
1978 	ipc_port_t reply_port;
1979 	ipc_port_t dest_port;
1980 
1981 	bits = reply_entry->ie_bits;
1982 	assert(is_active(space));
1983 
1984 	switch (reply_type) {
1985 	case MACH_MSG_TYPE_MAKE_SEND:
1986 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1987 			return FALSE;
1988 		}
1989 		break;
1990 
1991 	case MACH_MSG_TYPE_MAKE_SEND_ONCE:
1992 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1993 			return FALSE;
1994 		}
1995 		break;
1996 
1997 	case MACH_MSG_TYPE_MOVE_RECEIVE:
1998 		/* ipc_kmsg_copyin_header already filters it out */
1999 		return FALSE;
2000 
2001 	case MACH_MSG_TYPE_COPY_SEND:
2002 	case MACH_MSG_TYPE_MOVE_SEND:
2003 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2004 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2005 			break;
2006 		}
2007 
2008 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2009 			return FALSE;
2010 		}
2011 
2012 		reply_port = ip_object_to_port(reply_entry->ie_object);
2013 		assert(reply_port != IP_NULL);
2014 
2015 		/*
2016 		 * active status peek to avoid checks that will be skipped
2017 		 * on copyin for dead ports.  Lock not held, so will not be
2018 		 * atomic (but once dead, there's no going back).
2019 		 */
2020 		if (!ip_active(reply_port)) {
2021 			break;
2022 		}
2023 
2024 		/*
2025 		 * Can't copyin a send right that is marked immovable. This bit
2026 		 * is set only during port creation and never unset. So it can
2027 		 * be read without a lock.
2028 		 */
2029 		if (ip_is_immovable_send(reply_port)) {
2030 			mach_port_guard_exception_immovable(space, reply_name, reply_port, MPG_FLAGS_NONE);
2031 			return FALSE;
2032 		}
2033 
2034 		if (reply_type == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2035 			if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2036 				return FALSE;
2037 			}
2038 		} else {
2039 			if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2040 				return FALSE;
2041 			}
2042 		}
2043 
2044 		break;
2045 	}
2046 
2047 	default:
2048 		panic("ipc_right_copyin_check: strange rights");
2049 	}
2050 
2051 	if ((IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_PORT_SET) ||
2052 	    (IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_PORT_SET)) {
2053 		return TRUE;
2054 	}
2055 
2056 	/* The only disp allowed when a reply port is a local port of mach msg is MAKE_SO. */
2057 	reply_port = ip_object_to_port(reply_entry->ie_object);
2058 	assert(reply_port != IP_NULL);
2059 
2060 	if (ip_active(reply_port)) {
2061 		if (ip_is_reply_port(reply_port) && (reply_type != MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2062 			return FALSE;
2063 		}
2064 
2065 		/* When sending a msg to remote port that requires reply port semantics enforced the local port of that msg needs to be a reply port. */
2066 		dest_port = ip_object_to_port(dest_entry->ie_object);
2067 		if (IP_VALID(dest_port) && ip_active(dest_port) && ip_require_reply_port_semantics(dest_port)
2068 		    && !ip_is_reply_port(reply_port) && !ip_is_provisional_reply_port(reply_port)) {
2069 			*reply_port_semantics_violation = TRUE;
2070 
2071 			if (reply_port_semantics_enabled) {
2072 				mach_port_guard_exception(reply_name, 0, 0, kGUARD_EXC_REQUIRE_REPLY_PORT_SEMANTICS);
2073 			}
2074 		}
2075 	}
2076 
2077 	return TRUE;
2078 }
2079 
2080 /*
2081  *	Routine:	ipc_right_copyin_check_guard_locked
2082  *	Purpose:
2083  *		Check if the port is guarded and the guard
2084  *		value matches the one passed in the arguments.
2085  *		If MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND is set,
2086  *		check if the port is unguarded.
2087  *	Conditions:
2088  *		The port is locked.
2089  *	Returns:
2090  *		KERN_SUCCESS		Port is either unguarded
2091  *					or guarded with expected value
2092  *		KERN_INVALID_ARGUMENT	Port is either unguarded already or guard mismatch.
2093  *					This also raises a EXC_GUARD exception.
2094  */
2095 static kern_return_t
ipc_right_copyin_check_guard_locked(mach_port_name_t name,ipc_port_t port,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2096 ipc_right_copyin_check_guard_locked(
2097 	mach_port_name_t name,
2098 	ipc_port_t port,
2099 	mach_port_context_t context,
2100 	mach_msg_guard_flags_t *guard_flags)
2101 {
2102 	mach_msg_guard_flags_t flags = *guard_flags;
2103 	if ((flags & MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND) && !port->ip_guarded && !context) {
2104 		return KERN_SUCCESS;
2105 	} else if (port->ip_guarded && (port->ip_context == context)) {
2106 		return KERN_SUCCESS;
2107 	}
2108 
2109 	/* Incorrect guard; Raise exception */
2110 	mach_port_guard_exception(name, context, port->ip_context, kGUARD_EXC_INCORRECT_GUARD);
2111 	return KERN_INVALID_ARGUMENT;
2112 }
2113 
2114 /*
2115  *	Routine:	ipc_right_copyin
2116  *	Purpose:
2117  *		Copyin a capability from a space.
2118  *		If successful, the caller gets a ref
2119  *		for the resulting object, unless it is IO_DEAD,
2120  *		and possibly a send-once right which should
2121  *		be used in a port-deleted notification.
2122  *
2123  *		If deadok is not TRUE, the copyin operation
2124  *		will fail instead of producing IO_DEAD.
2125  *
2126  *		The entry is deallocated if the entry type becomes
2127  *		MACH_PORT_TYPE_NONE.
2128  *	Conditions:
2129  *		The space is write-locked and active.
2130  *	Returns:
2131  *		KERN_SUCCESS		Acquired an object, possibly IO_DEAD.
2132  *		KERN_INVALID_RIGHT	Name doesn't denote correct right.
2133  *		KERN_INVALID_CAPABILITY	Trying to move an kobject port or an immovable right,
2134  *								or moving the last ref of pinned right
2135  *		KERN_INVALID_ARGUMENT	Port is unguarded or guard mismatch
2136  */
2137 
2138 kern_return_t
ipc_right_copyin(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyin_flags_t flags,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep,int * assertcntp,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2139 ipc_right_copyin(
2140 	ipc_space_t                space,
2141 	mach_port_name_t           name,
2142 	ipc_entry_t                entry,
2143 	mach_msg_type_name_t       msgt_name,
2144 	ipc_object_copyin_flags_t  flags,
2145 	ipc_object_t               *objectp,
2146 	ipc_port_t                 *sorightp,
2147 	ipc_port_t                 *releasep,
2148 	int                        *assertcntp,
2149 	mach_port_context_t        context,
2150 	mach_msg_guard_flags_t     *guard_flags)
2151 {
2152 	ipc_entry_bits_t bits;
2153 	ipc_port_t port;
2154 	kern_return_t kr;
2155 	boolean_t deadok = !!(flags & IPC_OBJECT_COPYIN_FLAGS_DEADOK);
2156 	boolean_t allow_imm_send = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
2157 	boolean_t allow_reply_make_so = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_REPLY_MAKE_SEND_ONCE);
2158 	boolean_t allow_reply_move_so = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_REPLY_MOVE_SEND_ONCE);
2159 
2160 	*releasep = IP_NULL;
2161 	*assertcntp = 0;
2162 
2163 	bits = entry->ie_bits;
2164 
2165 	assert(is_active(space));
2166 
2167 	switch (msgt_name) {
2168 	case MACH_MSG_TYPE_MAKE_SEND: {
2169 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2170 			goto invalid_right;
2171 		}
2172 
2173 		port = ip_object_to_port(entry->ie_object);
2174 		assert(port != IP_NULL);
2175 
2176 		if (ip_is_reply_port(port)) {
2177 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2178 			return KERN_INVALID_CAPABILITY;
2179 		}
2180 
2181 		ip_mq_lock(port);
2182 		assert(ip_get_receiver_name(port) == name);
2183 		assert(ip_in_space(port, space));
2184 
2185 		ipc_port_make_send_any_locked(port);
2186 		ip_mq_unlock(port);
2187 
2188 		*objectp = ip_to_object(port);
2189 		*sorightp = IP_NULL;
2190 		break;
2191 	}
2192 
2193 	case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
2194 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2195 			goto invalid_right;
2196 		}
2197 
2198 		port = ip_object_to_port(entry->ie_object);
2199 		assert(port != IP_NULL);
2200 
2201 		if ((ip_is_reply_port(port)) && !allow_reply_make_so) {
2202 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2203 			return KERN_INVALID_CAPABILITY;
2204 		}
2205 
2206 		ip_mq_lock(port);
2207 		require_ip_active(port);
2208 		assert(ip_get_receiver_name(port) == name);
2209 		assert(ip_in_space(port, space));
2210 
2211 		ipc_port_make_sonce_locked(port);
2212 		ip_mq_unlock(port);
2213 
2214 		*objectp = ip_to_object(port);
2215 		*sorightp = IP_NULL;
2216 		break;
2217 	}
2218 
2219 	case MACH_MSG_TYPE_MOVE_RECEIVE: {
2220 		bool allow_imm_recv = false;
2221 		ipc_port_t request = IP_NULL;
2222 		waitq_link_list_t free_l = { };
2223 
2224 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2225 			goto invalid_right;
2226 		}
2227 
2228 		port = ip_object_to_port(entry->ie_object);
2229 		assert(port != IP_NULL);
2230 
2231 		ip_mq_lock(port);
2232 		require_ip_active(port);
2233 		assert(ip_get_receiver_name(port) == name);
2234 		assert(ip_in_space(port, space));
2235 
2236 		/*
2237 		 * Disallow moving receive-right kobjects/kolabel, e.g. mk_timer ports
2238 		 * The ipc_port structure uses the kdata union of kobject and
2239 		 * imp_task exclusively. Thus, general use of a kobject port as
2240 		 * a receive right can cause type confusion in the importance
2241 		 * code.
2242 		 */
2243 		if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
2244 			/*
2245 			 * Distinguish an invalid right, e.g., trying to move
2246 			 * a send right as a receive right, from this
2247 			 * situation which is, "This is a valid receive right,
2248 			 * but it's also a kobject and you can't move it."
2249 			 */
2250 			ip_mq_unlock(port);
2251 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2252 			return KERN_INVALID_CAPABILITY;
2253 		}
2254 
2255 		if (port->ip_service_port && port->ip_splabel &&
2256 		    !ipc_service_port_label_is_bootstrap_port((ipc_service_port_label_t)port->ip_splabel)) {
2257 			allow_imm_recv = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_RECEIVE);
2258 		} else if (ip_is_libxpc_connection_port(port)) {
2259 			allow_imm_recv = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_CONN_IMMOVABLE_RECEIVE);
2260 		}
2261 
2262 		if ((!allow_imm_recv && port->ip_immovable_receive) || port->ip_specialreply) {
2263 			assert(!ip_in_space(port, ipc_space_kernel));
2264 			ip_mq_unlock(port);
2265 			assert(current_task() != kernel_task);
2266 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2267 			return KERN_INVALID_CAPABILITY;
2268 		}
2269 
2270 		if (guard_flags != NULL) {
2271 			kr = ipc_right_copyin_check_guard_locked(name, port, context, guard_flags);
2272 			if (kr != KERN_SUCCESS) {
2273 				ip_mq_unlock(port);
2274 				return kr;
2275 			}
2276 		}
2277 
2278 		if (bits & MACH_PORT_TYPE_SEND) {
2279 			assert(IE_BITS_TYPE(bits) ==
2280 			    MACH_PORT_TYPE_SEND_RECEIVE);
2281 			assert(IE_BITS_UREFS(bits) > 0);
2282 			assert(port->ip_srights > 0);
2283 
2284 			bits &= ~MACH_PORT_TYPE_RECEIVE;
2285 			bits |= MACH_PORT_TYPE_EX_RECEIVE;
2286 			entry->ie_bits = bits;
2287 			ipc_hash_insert(space, ip_to_object(port),
2288 			    name, entry);
2289 			ip_reference(port);
2290 			ipc_entry_modified(space, name, entry);
2291 		} else {
2292 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2293 			assert(IE_BITS_UREFS(bits) == 0);
2294 
2295 			request = ipc_right_request_cancel_macro(space, port,
2296 			    name, entry);
2297 			assert(!ip_is_pinned(port));
2298 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2299 		}
2300 
2301 		/* ipc_port_clear_receiver unguards the port and clears the ip_immovable_receive bit */
2302 		(void)ipc_port_clear_receiver(port, FALSE, &free_l); /* don't destroy the port/mqueue */
2303 		if (guard_flags != NULL) {
2304 			/* this flag will be cleared during copyout */
2305 			*guard_flags = *guard_flags | MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
2306 		}
2307 
2308 #if IMPORTANCE_INHERITANCE
2309 		/*
2310 		 * Account for boosts the current task is going to lose when
2311 		 * copying this right in.  Tempowner ports have either not
2312 		 * been accounting to any task (and therefore are already in
2313 		 * "limbo" state w.r.t. assertions) or to some other specific
2314 		 * task. As we have no way to drop the latter task's assertions
2315 		 * here, We'll deduct those when we enqueue it on its
2316 		 * destination port (see ipc_port_check_circularity()).
2317 		 */
2318 		if (port->ip_tempowner == 0) {
2319 			assert(IIT_NULL == ip_get_imp_task(port));
2320 
2321 			/* ports in limbo have to be tempowner */
2322 			port->ip_tempowner = 1;
2323 			*assertcntp = port->ip_impcount;
2324 		}
2325 #endif /* IMPORTANCE_INHERITANCE */
2326 
2327 		ip_mq_unlock(port);
2328 
2329 		/*
2330 		 * This is unfortunate to do this while the space is locked,
2331 		 * but plumbing it through all callers really hurts.
2332 		 */
2333 		waitq_link_free_list(WQT_PORT_SET, &free_l);
2334 
2335 		*objectp = ip_to_object(port);
2336 		*sorightp = request;
2337 		break;
2338 	}
2339 
2340 	case MACH_MSG_TYPE_COPY_SEND: {
2341 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2342 			goto copy_dead;
2343 		}
2344 
2345 		/* allow for dead send-once rights */
2346 
2347 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2348 			goto invalid_right;
2349 		}
2350 
2351 		assert(IE_BITS_UREFS(bits) > 0);
2352 
2353 		port = ip_object_to_port(entry->ie_object);
2354 		assert(port != IP_NULL);
2355 
2356 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2357 			bits = entry->ie_bits;
2358 			*releasep = port;
2359 			goto copy_dead;
2360 		}
2361 		/* port is locked and active */
2362 
2363 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2364 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2365 			assert(port->ip_sorights > 0);
2366 
2367 			ip_mq_unlock(port);
2368 			goto invalid_right;
2369 		}
2370 
2371 		if (ip_is_reply_port(port)) {
2372 			ip_mq_unlock(port);
2373 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2374 			return KERN_INVALID_CAPABILITY;
2375 		}
2376 
2377 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2378 			ip_mq_unlock(port);
2379 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2380 			return KERN_INVALID_CAPABILITY;
2381 		}
2382 
2383 		ipc_port_copy_send_any_locked(port);
2384 		ip_mq_unlock(port);
2385 
2386 		*objectp = ip_to_object(port);
2387 		*sorightp = IP_NULL;
2388 		break;
2389 	}
2390 
2391 	case MACH_MSG_TYPE_MOVE_SEND: {
2392 		ipc_port_t request = IP_NULL;
2393 
2394 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2395 			goto move_dead;
2396 		}
2397 
2398 		/* allow for dead send-once rights */
2399 
2400 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2401 			goto invalid_right;
2402 		}
2403 
2404 		assert(IE_BITS_UREFS(bits) > 0);
2405 
2406 		port = ip_object_to_port(entry->ie_object);
2407 		assert(port != IP_NULL);
2408 
2409 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2410 			bits = entry->ie_bits;
2411 			*releasep = port;
2412 			goto move_dead;
2413 		}
2414 		/* port is locked and active */
2415 
2416 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2417 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2418 			assert(port->ip_sorights > 0);
2419 			ip_mq_unlock(port);
2420 			goto invalid_right;
2421 		}
2422 
2423 		if (ip_is_reply_port(port)) {
2424 			ip_mq_unlock(port);
2425 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2426 			return KERN_INVALID_CAPABILITY;
2427 		}
2428 
2429 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2430 			ip_mq_unlock(port);
2431 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2432 			return KERN_INVALID_CAPABILITY;
2433 		}
2434 
2435 		if (IE_BITS_UREFS(bits) == 1) {
2436 			assert(port->ip_srights > 0);
2437 			if (bits & MACH_PORT_TYPE_RECEIVE) {
2438 				assert(ip_get_receiver_name(port) == name);
2439 				assert(ip_in_space(port, space));
2440 				assert(IE_BITS_TYPE(bits) ==
2441 				    MACH_PORT_TYPE_SEND_RECEIVE);
2442 				assert(!ip_is_pinned(port));
2443 
2444 				entry->ie_bits = bits & ~
2445 				    (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2446 				ipc_entry_modified(space, name, entry);
2447 				ip_reference(port);
2448 			} else {
2449 				assert(IE_BITS_TYPE(bits) ==
2450 				    MACH_PORT_TYPE_SEND);
2451 
2452 				if (ip_is_pinned(port)) {
2453 					ip_mq_unlock(port);
2454 					mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_COPYIN);
2455 					return KERN_INVALID_CAPABILITY;
2456 				}
2457 
2458 				request = ipc_right_request_cancel_macro(space, port,
2459 				    name, entry);
2460 				ipc_hash_delete(space, ip_to_object(port),
2461 				    name, entry);
2462 				ipc_entry_dealloc(space, ip_to_object(port),
2463 				    name, entry);
2464 				/* transfer entry's reference to caller */
2465 			}
2466 		} else {
2467 			ipc_port_copy_send_any_locked(port);
2468 			/* if urefs are pegged due to overflow, leave them pegged */
2469 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2470 				entry->ie_bits = bits - 1; /* decrement urefs */
2471 			}
2472 			ipc_entry_modified(space, name, entry);
2473 		}
2474 
2475 		ip_mq_unlock(port);
2476 		*objectp = ip_to_object(port);
2477 		*sorightp = request;
2478 		break;
2479 	}
2480 
2481 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2482 		ipc_port_t request;
2483 
2484 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2485 			goto move_dead;
2486 		}
2487 
2488 		/* allow for dead send rights */
2489 
2490 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2491 			goto invalid_right;
2492 		}
2493 
2494 		assert(IE_BITS_UREFS(bits) > 0);
2495 
2496 		port = ip_object_to_port(entry->ie_object);
2497 		assert(port != IP_NULL);
2498 
2499 		if (ipc_right_check(space, port, name, entry, flags)) {
2500 			bits = entry->ie_bits;
2501 			*releasep = port;
2502 			goto move_dead;
2503 		}
2504 		/*
2505 		 * port is locked, but may not be active:
2506 		 * Allow copyin of inactive ports with no dead name request and treat it
2507 		 * as if the copyin of the port was successful and port became inactive
2508 		 * later.
2509 		 */
2510 
2511 		if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2512 			assert(bits & MACH_PORT_TYPE_SEND);
2513 			assert(port->ip_srights > 0);
2514 
2515 			ip_mq_unlock(port);
2516 			goto invalid_right;
2517 		}
2518 
2519 		if (ip_is_reply_port(port) && !allow_reply_move_so) {
2520 			ip_mq_unlock(port);
2521 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2522 			return KERN_INVALID_CAPABILITY;
2523 		}
2524 
2525 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2526 			ip_mq_unlock(port);
2527 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2528 			return KERN_INVALID_CAPABILITY;
2529 		}
2530 
2531 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2532 		assert(IE_BITS_UREFS(bits) == 1);
2533 		assert(port->ip_sorights > 0);
2534 
2535 		request = ipc_right_request_cancel_macro(space, port, name, entry);
2536 		assert(!ip_is_pinned(port));
2537 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2538 		ip_mq_unlock(port);
2539 
2540 		*objectp = ip_to_object(port);
2541 		*sorightp = request;
2542 		break;
2543 	}
2544 
2545 	default:
2546 invalid_right:
2547 		return KERN_INVALID_RIGHT;
2548 	}
2549 
2550 	return KERN_SUCCESS;
2551 
2552 copy_dead:
2553 	assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2554 	assert(IE_BITS_UREFS(bits) > 0);
2555 	assert(entry->ie_request == IE_REQ_NONE);
2556 	assert(entry->ie_object == 0);
2557 
2558 	if (!deadok) {
2559 		goto invalid_right;
2560 	}
2561 
2562 	*objectp = IO_DEAD;
2563 	*sorightp = IP_NULL;
2564 	return KERN_SUCCESS;
2565 
2566 move_dead:
2567 	assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2568 	assert(IE_BITS_UREFS(bits) > 0);
2569 	assert(entry->ie_request == IE_REQ_NONE);
2570 	assert(entry->ie_object == IO_NULL);
2571 
2572 	if (!deadok) {
2573 		goto invalid_right;
2574 	}
2575 
2576 	if (IE_BITS_UREFS(bits) == 1) {
2577 		ipc_entry_dealloc(space, IO_NULL, name, entry);
2578 	} else {
2579 		/* if urefs are pegged due to overflow, leave them pegged */
2580 		if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2581 			entry->ie_bits = bits - 1; /* decrement urefs */
2582 		}
2583 		ipc_entry_modified(space, name, entry);
2584 	}
2585 	*objectp = IO_DEAD;
2586 	*sorightp = IP_NULL;
2587 	return KERN_SUCCESS;
2588 }
2589 
2590 /*
2591  *	Routine:	ipc_right_copyin_two_move_sends
2592  *	Purpose:
2593  *		Like ipc_right_copyin with MACH_MSG_TYPE_MOVE_SEND
2594  *		and deadok == FALSE, except that this moves two
2595  *		send rights at once.
2596  *	Conditions:
2597  *		The space is write-locked and active.
2598  *		The object is returned with two refs/send rights.
2599  *	Returns:
2600  *		KERN_SUCCESS					Acquired an object.
2601  *		KERN_INVALID_RIGHT				Name doesn't denote correct right.
2602  *		KERN_INVALID_CAPABILITY			Name does not allow copyin move send capability.
2603  */
2604 static
2605 kern_return_t
ipc_right_copyin_two_move_sends(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2606 ipc_right_copyin_two_move_sends(
2607 	ipc_space_t             space,
2608 	mach_port_name_t        name,
2609 	ipc_entry_t             entry,
2610 	ipc_object_t            *objectp,
2611 	ipc_port_t              *sorightp,
2612 	ipc_port_t              *releasep)
2613 {
2614 	ipc_entry_bits_t bits;
2615 	mach_port_urefs_t urefs;
2616 	ipc_port_t port;
2617 	ipc_port_t request = IP_NULL;
2618 
2619 	*releasep = IP_NULL;
2620 
2621 	assert(is_active(space));
2622 
2623 	bits = entry->ie_bits;
2624 
2625 	if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2626 		goto invalid_right;
2627 	}
2628 
2629 	urefs = IE_BITS_UREFS(bits);
2630 	if (urefs < 2) {
2631 		goto invalid_right;
2632 	}
2633 
2634 	port = ip_object_to_port(entry->ie_object);
2635 	assert(port != IP_NULL);
2636 
2637 	if (ip_is_reply_port(port)) {
2638 		mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2639 		return KERN_INVALID_CAPABILITY;
2640 	}
2641 
2642 	if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2643 		*releasep = port;
2644 		goto invalid_right;
2645 	}
2646 	/* port is locked and active */
2647 
2648 	/*
2649 	 * To reach here we either have:
2650 	 * (1) reply_name == voucher_name, but voucher is not immovable send right.
2651 	 * (2) reply_name == dest_name, but ipc_right_copyin_check_reply() guaranteed
2652 	 * that we can't use MOVE_SEND on reply port marked as immovable send right.
2653 	 */
2654 	assert(!ip_is_immovable_send(port));
2655 	assert(!ip_is_pinned(port));
2656 
2657 	if (urefs > 2) {
2658 		/*
2659 		 * We are moving 2 urefs as naked send rights, which is decomposed as:
2660 		 * - two copy sends (which doesn't affect the make send count)
2661 		 * - decrementing the local urefs twice.
2662 		 */
2663 		ipc_port_copy_send_any_locked(port);
2664 		ipc_port_copy_send_any_locked(port);
2665 		/* if urefs are pegged due to overflow, leave them pegged */
2666 		if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2667 			entry->ie_bits = bits - 2; /* decrement urefs */
2668 		}
2669 		ipc_entry_modified(space, name, entry);
2670 	} else {
2671 		/*
2672 		 * We have exactly 2 send rights for this port in this space,
2673 		 * which means that we will liberate the naked send right held
2674 		 * by this entry.
2675 		 *
2676 		 * However refcounting rules around entries are that naked send rights
2677 		 * on behalf of spaces do not have an associated port reference,
2678 		 * so we need to donate one ...
2679 		 */
2680 		ipc_port_copy_send_any_locked(port);
2681 
2682 		if (bits & MACH_PORT_TYPE_RECEIVE) {
2683 			assert(ip_get_receiver_name(port) == name);
2684 			assert(ip_in_space(port, space));
2685 			assert(IE_BITS_TYPE(bits) ==
2686 			    MACH_PORT_TYPE_SEND_RECEIVE);
2687 
2688 			/* ... that we inject manually when the entry stays alive */
2689 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2690 			ipc_entry_modified(space, name, entry);
2691 			ip_reference(port);
2692 		} else {
2693 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
2694 
2695 			/* ... that we steal from the entry when it dies */
2696 			request = ipc_right_request_cancel_macro(space, port,
2697 			    name, entry);
2698 			ipc_hash_delete(space, ip_to_object(port),
2699 			    name, entry);
2700 			ipc_entry_dealloc(space, ip_to_object(port),
2701 			    name, entry);
2702 		}
2703 	}
2704 
2705 	ip_mq_unlock(port);
2706 
2707 	*objectp = ip_to_object(port);
2708 	*sorightp = request;
2709 	return KERN_SUCCESS;
2710 
2711 invalid_right:
2712 	return KERN_INVALID_RIGHT;
2713 }
2714 
2715 
2716 /*
2717  *	Routine:	ipc_right_copyin_two
2718  *	Purpose:
2719  *		Like ipc_right_copyin with two dispositions,
2720  *		each of which results in a send or send-once right,
2721  *		and deadok = FALSE.
2722  *	Conditions:
2723  *		The space is write-locked and active.
2724  *		The object is returned with two refs/rights.
2725  *		Msgt_one refers to the dest_type.
2726  *      Copyin flags are currently only used in the context of send once rights.
2727  *	Returns:
2728  *		KERN_SUCCESS		Acquired an object.
2729  *		KERN_INVALID_RIGHT	Name doesn't denote correct right(s).
2730  *		KERN_INVALID_CAPABILITY	Name doesn't denote correct right for msgt_two.
2731  */
2732 kern_return_t
ipc_right_copyin_two(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_one,mach_msg_type_name_t msgt_two,ipc_object_copyin_flags_t flags_one,ipc_object_copyin_flags_t flags_two,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2733 ipc_right_copyin_two(
2734 	ipc_space_t               space,
2735 	mach_port_name_t          name,
2736 	ipc_entry_t               entry,
2737 	mach_msg_type_name_t      msgt_one,
2738 	mach_msg_type_name_t      msgt_two,
2739 	ipc_object_copyin_flags_t flags_one, /* Used only for send once rights. */
2740 	ipc_object_copyin_flags_t flags_two, /* Used only for send once rights. */
2741 	ipc_object_t              *objectp,
2742 	ipc_port_t                *sorightp,
2743 	ipc_port_t                *releasep)
2744 {
2745 	kern_return_t kr;
2746 	int assertcnt = 0;
2747 
2748 	assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_one));
2749 	assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_two));
2750 
2751 	/*
2752 	 *	This is a little tedious to make atomic, because
2753 	 *	there are 25 combinations of valid dispositions.
2754 	 *	However, most are easy.
2755 	 */
2756 
2757 	/*
2758 	 *	If either is move-sonce, then there must be an error.
2759 	 */
2760 	if (msgt_one == MACH_MSG_TYPE_MOVE_SEND_ONCE ||
2761 	    msgt_two == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2762 		return KERN_INVALID_RIGHT;
2763 	}
2764 
2765 	if ((msgt_one == MACH_MSG_TYPE_MAKE_SEND) ||
2766 	    (msgt_one == MACH_MSG_TYPE_MAKE_SEND_ONCE) ||
2767 	    (msgt_two == MACH_MSG_TYPE_MAKE_SEND) ||
2768 	    (msgt_two == MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2769 		/*
2770 		 *	One of the dispositions needs a receive right.
2771 		 *
2772 		 *	If the copyin below succeeds, we know the receive
2773 		 *	right is there (because the pre-validation of
2774 		 *	the second disposition already succeeded in our
2775 		 *	caller).
2776 		 *
2777 		 *	Hence the port is not in danger of dying.
2778 		 */
2779 		ipc_object_t object_two;
2780 
2781 		flags_one = flags_one | IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND;
2782 		kr = ipc_right_copyin(space, name, entry,
2783 		    msgt_one, flags_one,
2784 		    objectp, sorightp, releasep,
2785 		    &assertcnt, 0, NULL);
2786 		assert(assertcnt == 0);
2787 		if (kr != KERN_SUCCESS) {
2788 			return kr;
2789 		}
2790 
2791 		assert(IO_VALID(*objectp));
2792 		assert(*sorightp == IP_NULL);
2793 		assert(*releasep == IP_NULL);
2794 
2795 		/*
2796 		 *	Now copyin the second (previously validated)
2797 		 *	disposition.  The result can't be a dead port,
2798 		 *	as no valid disposition can make us lose our
2799 		 *	receive right.
2800 		 */
2801 		kr = ipc_right_copyin(space, name, entry,
2802 		    msgt_two, flags_two,
2803 		    &object_two, sorightp, releasep,
2804 		    &assertcnt, 0, NULL);
2805 		assert(assertcnt == 0);
2806 		assert(kr == KERN_SUCCESS);
2807 		assert(*sorightp == IP_NULL);
2808 		assert(*releasep == IP_NULL);
2809 		assert(object_two == *objectp);
2810 		assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
2811 	} else if ((msgt_one == MACH_MSG_TYPE_MOVE_SEND) &&
2812 	    (msgt_two == MACH_MSG_TYPE_MOVE_SEND)) {
2813 		/*
2814 		 *	This is an easy case.  Just use our
2815 		 *	handy-dandy special-purpose copyin call
2816 		 *	to get two send rights for the price of one.
2817 		 */
2818 		kr = ipc_right_copyin_two_move_sends(space, name, entry,
2819 		    objectp, sorightp,
2820 		    releasep);
2821 		if (kr != KERN_SUCCESS) {
2822 			return kr;
2823 		}
2824 	} else {
2825 		mach_msg_type_name_t msgt_name;
2826 
2827 		/*
2828 		 *	Must be either a single move-send and a
2829 		 *	copy-send, or two copy-send dispositions.
2830 		 *	Use the disposition with the greatest side
2831 		 *	effects for the actual copyin - then just
2832 		 *	duplicate the send right you get back.
2833 		 */
2834 		if (msgt_one == MACH_MSG_TYPE_MOVE_SEND ||
2835 		    msgt_two == MACH_MSG_TYPE_MOVE_SEND) {
2836 			msgt_name = MACH_MSG_TYPE_MOVE_SEND;
2837 		} else {
2838 			msgt_name = MACH_MSG_TYPE_COPY_SEND;
2839 		}
2840 
2841 		kr = ipc_right_copyin(space, name, entry,
2842 		    msgt_name, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND,
2843 		    objectp, sorightp, releasep,
2844 		    &assertcnt, 0, NULL);
2845 		assert(assertcnt == 0);
2846 		if (kr != KERN_SUCCESS) {
2847 			return kr;
2848 		}
2849 
2850 		/*
2851 		 *	Copy the right we got back.  If it is dead now,
2852 		 *	that's OK.  Neither right will be usable to send
2853 		 *	a message anyway.
2854 		 *
2855 		 *	Note that the port could be concurrently moved
2856 		 *	outside of the space as a descriptor, and then
2857 		 *	destroyed, which would not happen under the space lock.
2858 		 *
2859 		 *	It means we can't use ipc_port_copy_send() which
2860 		 *	may fail if the port died.
2861 		 */
2862 		io_lock(*objectp);
2863 		ipc_port_copy_send_any_locked(ip_object_to_port(*objectp));
2864 		io_unlock(*objectp);
2865 	}
2866 
2867 	return KERN_SUCCESS;
2868 }
2869 
2870 
2871 /*
2872  *	Routine:	ipc_right_copyout
2873  *	Purpose:
2874  *		Copyout a capability to a space.
2875  *		If successful, consumes a ref for the object.
2876  *
2877  *		Always succeeds when given a newly-allocated entry,
2878  *		because user-reference overflow isn't a possibility.
2879  *
2880  *		If copying out the object would cause the user-reference
2881  *		count in the entry to overflow, then the user-reference
2882  *		count is left pegged to its maximum value and the copyout
2883  *		succeeds anyway.
2884  *	Conditions:
2885  *		The space is write-locked and active.
2886  *		The object is locked and active.
2887  *		The object is unlocked; the space isn't.
2888  *	Returns:
2889  *		KERN_SUCCESS		Copied out capability.
2890  */
2891 
2892 kern_return_t
ipc_right_copyout(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_port_context_t * context,mach_msg_guard_flags_t * guard_flags,ipc_object_t object)2893 ipc_right_copyout(
2894 	ipc_space_t             space,
2895 	mach_port_name_t        name,
2896 	ipc_entry_t             entry,
2897 	mach_msg_type_name_t    msgt_name,
2898 	ipc_object_copyout_flags_t flags,
2899 	mach_port_context_t     *context,
2900 	mach_msg_guard_flags_t  *guard_flags,
2901 	ipc_object_t            object)
2902 {
2903 	ipc_entry_bits_t bits;
2904 	ipc_port_t port;
2905 	mach_port_name_t sp_name = MACH_PORT_NULL;
2906 	mach_port_context_t sp_context = 0;
2907 
2908 	bits = entry->ie_bits;
2909 
2910 	assert(IO_VALID(object));
2911 	assert(io_otype(object) == IOT_PORT);
2912 	assert(io_active(object));
2913 	assert(entry->ie_object == object);
2914 
2915 	port = ip_object_to_port(object);
2916 
2917 	if (flags & IPC_OBJECT_COPYOUT_FLAGS_PINNED) {
2918 		assert(!ip_is_pinned(port));
2919 		assert(ip_is_immovable_send(port));
2920 		assert(task_is_immovable(space->is_task));
2921 		assert(task_is_pinned(space->is_task));
2922 		port->ip_pinned = 1;
2923 	}
2924 
2925 	switch (msgt_name) {
2926 	case MACH_MSG_TYPE_PORT_SEND_ONCE:
2927 
2928 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2929 		assert(IE_BITS_UREFS(bits) == 0);
2930 		assert(port->ip_sorights > 0);
2931 
2932 		if (port->ip_specialreply) {
2933 			ipc_port_adjust_special_reply_port_locked(port,
2934 			    current_thread()->ith_knote, IPC_PORT_ADJUST_SR_LINK_WORKLOOP, FALSE);
2935 			/* port unlocked on return */
2936 		} else {
2937 			ip_mq_unlock(port);
2938 		}
2939 
2940 		entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1); /* set urefs to 1 */
2941 		ipc_entry_modified(space, name, entry);
2942 		break;
2943 
2944 	case MACH_MSG_TYPE_PORT_SEND:
2945 		assert(port->ip_srights > 0);
2946 
2947 		if (bits & MACH_PORT_TYPE_SEND) {
2948 			mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
2949 
2950 			assert(port->ip_srights > 1);
2951 			assert(urefs > 0);
2952 			assert(urefs <= MACH_PORT_UREFS_MAX);
2953 
2954 			if (urefs == MACH_PORT_UREFS_MAX) {
2955 				/*
2956 				 * leave urefs pegged to maximum,
2957 				 * consume send right and ref
2958 				 */
2959 
2960 				port->ip_srights--;
2961 				ip_mq_unlock(port);
2962 				ip_release_live(port);
2963 				return KERN_SUCCESS;
2964 			}
2965 
2966 			/* consume send right and ref */
2967 			port->ip_srights--;
2968 			ip_mq_unlock(port);
2969 			ip_release_live(port);
2970 		} else if (bits & MACH_PORT_TYPE_RECEIVE) {
2971 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2972 			assert(IE_BITS_UREFS(bits) == 0);
2973 
2974 			/* transfer send right to entry, consume ref */
2975 			ip_mq_unlock(port);
2976 			ip_release_live(port);
2977 		} else {
2978 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2979 			assert(IE_BITS_UREFS(bits) == 0);
2980 
2981 			/* transfer send right and ref to entry */
2982 			ip_mq_unlock(port);
2983 
2984 			/* entry is locked holding ref, so can use port */
2985 
2986 			ipc_hash_insert(space, ip_to_object(port), name, entry);
2987 		}
2988 
2989 		entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1; /* increment urefs */
2990 		ipc_entry_modified(space, name, entry);
2991 		break;
2992 
2993 	case MACH_MSG_TYPE_PORT_RECEIVE: {
2994 		ipc_port_t dest;
2995 #if IMPORTANCE_INHERITANCE
2996 		natural_t assertcnt = port->ip_impcount;
2997 #endif /* IMPORTANCE_INHERITANCE */
2998 
2999 		assert(port->ip_mscount == 0);
3000 		assert(!ip_in_a_space(port));
3001 
3002 		/*
3003 		 * Don't copyout kobjects or kolabels as receive right
3004 		 */
3005 		if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
3006 			panic("ipc_right_copyout: Copyout kobject/kolabel as receive right");
3007 		}
3008 
3009 		dest = ip_get_destination(port);
3010 
3011 		/* port transitions to IN-SPACE state */
3012 		port->ip_receiver_name = name;
3013 		port->ip_receiver = space;
3014 
3015 		struct knote *kn = current_thread()->ith_knote;
3016 
3017 		if ((guard_flags != NULL) && ((*guard_flags & MACH_MSG_GUARD_FLAGS_IMMOVABLE_RECEIVE) != 0)) {
3018 			assert(port->ip_immovable_receive == 0);
3019 			port->ip_guarded = 1;
3020 			port->ip_strict_guard = 0;
3021 			/* pseudo receive shouldn't set the receive right as immovable in the sender's space */
3022 			if (kn != ITH_KNOTE_PSEUDO) {
3023 				port->ip_immovable_receive = 1;
3024 			}
3025 			port->ip_context = current_thread()->ith_msg_addr;
3026 			*context = port->ip_context;
3027 			*guard_flags = *guard_flags & ~MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
3028 		}
3029 
3030 		if (ip_is_libxpc_connection_port(port)) {
3031 			/*
3032 			 * There are 3 ways to reach here.
3033 			 * 1. A libxpc client successfully sent this receive right to a named service
3034 			 *    and we are copying out in that service's ipc space.
3035 			 * 2. A libxpc client tried doing (1) but failed so we are doing pseudo-receive.
3036 			 * 3. Kernel sent this receive right to a libxpc client as a part of port destroyed notification.
3037 			 *
3038 			 * This flag needs to be set again in all 3 cases as they reset it as part of their flow.
3039 			 */
3040 			port->ip_immovable_receive = 1;
3041 		}
3042 
3043 		/* Check if this is a service port */
3044 		if (port->ip_service_port) {
3045 			assert(port->ip_splabel != NULL);
3046 			/*
3047 			 * This flag gets reset during all 3 ways described above for libxpc connection port.
3048 			 * The only difference is launchd acts as an initiator instead of a libxpc client.
3049 			 */
3050 			if (service_port_defense_enabled) {
3051 				port->ip_immovable_receive = 1;
3052 			}
3053 
3054 			/* Check if this is a port-destroyed notification to ensure
3055 			 * that initproc doesnt end up with a guarded service port
3056 			 * sent in a regular message
3057 			 */
3058 			if (!ipc_service_port_label_is_pd_notification((ipc_service_port_label_t)port->ip_splabel)) {
3059 				goto skip_sp_check;
3060 			}
3061 
3062 			ipc_service_port_label_clear_flag(port->ip_splabel, ISPL_FLAGS_SEND_PD_NOTIFICATION);
3063 #if !(DEVELOPMENT || DEBUG)
3064 			if (get_bsdtask_info(current_task()) != initproc) {
3065 				goto skip_sp_check;
3066 			}
3067 #endif /* !(DEVELOPMENT || DEBUG) */
3068 			ipc_service_port_label_get_attr(port->ip_splabel, &sp_name, &sp_context);
3069 			assert(sp_name != MACH_PORT_NULL);
3070 			/* Verify the port name and restore the guard value, if any */
3071 			if (name != sp_name) {
3072 				panic("Service port name = 0x%x doesnt match the stored launchd port name = 0x%x", name, sp_name);
3073 			}
3074 			if (sp_context) {
3075 				port->ip_guarded = 1;
3076 				port->ip_strict_guard = 1;
3077 				port->ip_context = sp_context;
3078 			}
3079 		}
3080 skip_sp_check:
3081 
3082 		assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
3083 		if (bits & MACH_PORT_TYPE_SEND) {
3084 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
3085 			assert(IE_BITS_UREFS(bits) > 0);
3086 			assert(port->ip_srights > 0);
3087 		} else {
3088 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
3089 			assert(IE_BITS_UREFS(bits) == 0);
3090 		}
3091 		entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE;
3092 		ipc_entry_modified(space, name, entry);
3093 
3094 		boolean_t sync_bootstrap_checkin = FALSE;
3095 		if (kn != ITH_KNOTE_PSEUDO && port->ip_sync_bootstrap_checkin) {
3096 			sync_bootstrap_checkin = TRUE;
3097 		}
3098 		if (!ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_RECEIVE)) {
3099 			kn = NULL;
3100 		}
3101 		ipc_port_adjust_port_locked(port, kn, sync_bootstrap_checkin);
3102 		/* port unlocked */
3103 
3104 		if (bits & MACH_PORT_TYPE_SEND) {
3105 			ip_release_live(port);
3106 
3107 			/* entry is locked holding ref, so can use port */
3108 			ipc_hash_delete(space, ip_to_object(port), name, entry);
3109 		}
3110 
3111 		if (dest != IP_NULL) {
3112 #if IMPORTANCE_INHERITANCE
3113 			/*
3114 			 * Deduct the assertion counts we contributed to
3115 			 * the old destination port.  They've already
3116 			 * been reflected into the task as a result of
3117 			 * getting enqueued.
3118 			 */
3119 			ip_mq_lock(dest);
3120 			ipc_port_impcount_delta(dest, 0 - assertcnt, IP_NULL);
3121 			ip_mq_unlock(dest);
3122 #endif /* IMPORTANCE_INHERITANCE */
3123 
3124 			/* Drop turnstile ref on dest */
3125 			ipc_port_send_turnstile_complete(dest);
3126 			/* space lock is held */
3127 			ip_release_safe(dest);
3128 		}
3129 		break;
3130 	}
3131 
3132 	default:
3133 		panic("ipc_right_copyout: strange rights");
3134 	}
3135 	return KERN_SUCCESS;
3136 }
3137