xref: /xnu-11215.1.10/osfmk/ipc/ipc_right.c (revision 8d741a5de7ff4191bf97d57b9f54c2f6d4a15585)
1 /*
2  * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  * Copyright (c) 2005-2006 SPARTA, Inc.
62  */
63 /*
64  */
65 /*
66  *	File:	ipc/ipc_right.c
67  *	Author:	Rich Draves
68  *	Date:	1989
69  *
70  *	Functions to manipulate IPC capabilities.
71  */
72 
73 #include <mach/boolean.h>
74 #include <mach/kern_return.h>
75 #include <mach/port.h>
76 #include <mach/message.h>
77 #include <kern/assert.h>
78 #include <kern/ipc_kobject.h>
79 #include <kern/misc_protos.h>
80 #include <kern/policy_internal.h>
81 #include <libkern/coreanalytics/coreanalytics.h>
82 #include <ipc/port.h>
83 #include <ipc/ipc_entry.h>
84 #include <ipc/ipc_space.h>
85 #include <ipc/ipc_object.h>
86 #include <ipc/ipc_hash.h>
87 #include <ipc/ipc_port.h>
88 #include <ipc/ipc_pset.h>
89 #include <ipc/ipc_right.h>
90 #include <ipc/ipc_notify.h>
91 #include <ipc/ipc_importance.h>
92 #include <ipc/ipc_service_port.h>
93 #include <security/mac_mach_internal.h>
94 
95 extern struct proc *current_proc(void);
96 extern int csproc_hardened_runtime(struct proc* p);
97 
98 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
99 
100 TUNABLE(bool, service_port_defense_enabled, "-service_port_defense_enabled", false);
101 static TUNABLE(bool, reply_port_semantics, "reply_port_semantics", true);
102 
103 /*
104  *	Routine:	ipc_right_lookup_read
105  *	Purpose:
106  *		Finds an entry in a space, given the name.
107  *	Conditions:
108  *		Nothing locked.
109  *		If an object is found, it is locked and active.
110  *	Returns:
111  *		KERN_SUCCESS		Found an entry.
112  *		KERN_INVALID_TASK	The space is dead.
113  *		KERN_INVALID_NAME	Name doesn't exist in space.
114  */
115 kern_return_t
ipc_right_lookup_read(ipc_space_t space,mach_port_name_t name,ipc_entry_bits_t * bitsp,ipc_object_t * objectp)116 ipc_right_lookup_read(
117 	ipc_space_t             space,
118 	mach_port_name_t        name,
119 	ipc_entry_bits_t       *bitsp,
120 	ipc_object_t           *objectp)
121 {
122 	mach_port_index_t index;
123 	ipc_entry_table_t table;
124 	ipc_entry_t entry;
125 	ipc_object_t object;
126 	kern_return_t kr;
127 
128 	index = MACH_PORT_INDEX(name);
129 	if (__improbable(index == 0)) {
130 		*bitsp = 0;
131 		*objectp = IO_NULL;
132 		return KERN_INVALID_NAME;
133 	}
134 
135 	smr_ipc_enter();
136 
137 	/*
138 	 * Acquire a (possibly stale) pointer to the table,
139 	 * and guard it so that it can't be deallocated while we use it.
140 	 *
141 	 * smr_ipc_enter() has the property that it strongly serializes
142 	 * after any store-release. This is important because it means that if
143 	 * one considers this (broken) userspace usage:
144 	 *
145 	 * Thread 1:
146 	 *   - makes a semaphore, gets name 0x1003
147 	 *   - stores that name to a global `sema` in userspace
148 	 *
149 	 * Thread 2:
150 	 *   - spins to observe `sema` becoming non 0
151 	 *   - calls semaphore_wait() on 0x1003
152 	 *
153 	 * Then, because in order to return 0x1003 this thread issued
154 	 * a store-release (when calling is_write_unlock()),
155 	 * then this smr_entered_load() can't possibly observe a table
156 	 * pointer that is older than the one that was current when the
157 	 * semaphore was made.
158 	 *
159 	 * This fundamental property allows us to never loop.
160 	 */
161 	table = smr_entered_load(&space->is_table);
162 	if (__improbable(table == NULL)) {
163 		kr = KERN_INVALID_TASK;
164 		goto out_put;
165 	}
166 	entry = ipc_entry_table_get(table, index);
167 	if (__improbable(entry == NULL)) {
168 		kr = KERN_INVALID_NAME;
169 		goto out_put;
170 	}
171 
172 	/*
173 	 * Note: this should be an atomic load, but PAC and atomics
174 	 *       don't work interact well together.
175 	 */
176 	object = entry->ie_volatile_object;
177 
178 	/*
179 	 * Attempt to lock an object that lives in this entry.
180 	 * It might fail or be a completely different object by now.
181 	 *
182 	 * Make sure that acquiring the lock is fully ordered after any
183 	 * lock-release (using os_atomic_barrier_before_lock_acquire()).
184 	 * This allows us to always reliably observe space termination below.
185 	 */
186 	os_atomic_barrier_before_lock_acquire();
187 	if (__improbable(object == IO_NULL || !io_lock_allow_invalid(object))) {
188 		kr = KERN_INVALID_NAME;
189 		goto out_put;
190 	}
191 
192 	/*
193 	 * Now that we hold the object lock, we are preventing any entry
194 	 * in this space for this object to be mutated.
195 	 *
196 	 * If the space didn't grow after we acquired our hazardous reference,
197 	 * and before a mutation of the entry, then holding the object lock
198 	 * guarantees we will observe the truth of ie_bits, ie_object and
199 	 * ie_request (those are always mutated with the object lock held).
200 	 *
201 	 * However this ordering is problematic:
202 	 * - [A]cquisition of the table pointer
203 	 * - [G]rowth of the space (making the table pointer in [A] stale)
204 	 * - [M]utation of the entry
205 	 * - [L]ocking of the object read through [A].
206 	 *
207 	 * The space lock is held for both [G] and [M], and the object lock
208 	 * is held for [M], which means that once we lock the object we can
209 	 * observe if [G] happenend by reloading the table pointer.
210 	 *
211 	 * We might still fail to observe any growth operation that happened
212 	 * after the last mutation of this object's entry, because holding
213 	 * an object lock doesn't guarantee anything about the liveness
214 	 * of the space table pointer. This is not a problem at all: by
215 	 * definition, those didn't affect the state of the entry.
216 	 *
217 	 * TODO: a data-structure where the entries are grown by "slabs",
218 	 *       would allow for the address of an ipc_entry_t to never
219 	 *       change once it exists in a space and would avoid a reload
220 	 *       (as well as making space growth faster).
221 	 *       We however still need to check for termination.
222 	 */
223 	table = smr_entered_load(&space->is_table);
224 	if (__improbable(table == NULL)) {
225 		kr = KERN_INVALID_TASK;
226 		goto out_put_unlock;
227 	}
228 
229 	/*
230 	 * Tables never shrink so we don't need to validate the length twice.
231 	 */
232 	entry = ipc_entry_table_get_nocheck(table, index);
233 
234 	/*
235 	 * Now that we hold the lock and have a "fresh enough" table pointer,
236 	 * validate if this entry is what we think it is.
237 	 *
238 	 * To the risk of being repetitive, we still need to protect
239 	 * those accesses under SMR, because subsequent
240 	 * table growths might retire the memory. However we know
241 	 * those growths will have left our entry unchanged.
242 	 */
243 	if (__improbable(entry->ie_object != object)) {
244 		kr = KERN_INVALID_NAME;
245 		goto out_put_unlock;
246 	}
247 
248 	ipc_entry_bits_t bits = entry->ie_bits;
249 	if (__improbable(IE_BITS_GEN(bits) != MACH_PORT_GEN(name) ||
250 	    IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE)) {
251 		kr = KERN_INVALID_NAME;
252 		goto out_put_unlock;
253 	}
254 
255 	/* Done with hazardous accesses to the table */
256 	smr_ipc_leave();
257 
258 	*bitsp = bits;
259 	*objectp = object;
260 	return KERN_SUCCESS;
261 
262 out_put_unlock:
263 	ipc_object_unlock(object);
264 out_put:
265 	smr_ipc_leave();
266 	return kr;
267 }
268 
269 /*
270  *	Routine:	ipc_right_lookup_write
271  *	Purpose:
272  *		Finds an entry in a space, given the name.
273  *	Conditions:
274  *		Nothing locked.  If successful, the space is write-locked.
275  *	Returns:
276  *		KERN_SUCCESS		Found an entry.
277  *		KERN_INVALID_TASK	The space is dead.
278  *		KERN_INVALID_NAME	Name doesn't exist in space.
279  */
280 
281 kern_return_t
ipc_right_lookup_write(ipc_space_t space,mach_port_name_t name,ipc_entry_t * entryp)282 ipc_right_lookup_write(
283 	ipc_space_t             space,
284 	mach_port_name_t        name,
285 	ipc_entry_t             *entryp)
286 {
287 	ipc_entry_t entry;
288 
289 	assert(space != IS_NULL);
290 
291 	is_write_lock(space);
292 
293 	if (!is_active(space)) {
294 		is_write_unlock(space);
295 		return KERN_INVALID_TASK;
296 	}
297 
298 	if ((entry = ipc_entry_lookup(space, name)) == IE_NULL) {
299 		is_write_unlock(space);
300 		return KERN_INVALID_NAME;
301 	}
302 
303 	*entryp = entry;
304 	return KERN_SUCCESS;
305 }
306 
307 /*
308  *	Routine:	ipc_right_lookup_two_write
309  *	Purpose:
310  *		Like ipc_right_lookup except that it returns two
311  *		entries for two different names that were looked
312  *		up under the same space lock.
313  *	Conditions:
314  *		Nothing locked.  If successful, the space is write-locked.
315  *	Returns:
316  *		KERN_INVALID_TASK	The space is dead.
317  *		KERN_INVALID_NAME	Name doesn't exist in space.
318  */
319 
320 kern_return_t
ipc_right_lookup_two_write(ipc_space_t space,mach_port_name_t name1,ipc_entry_t * entryp1,mach_port_name_t name2,ipc_entry_t * entryp2)321 ipc_right_lookup_two_write(
322 	ipc_space_t             space,
323 	mach_port_name_t        name1,
324 	ipc_entry_t             *entryp1,
325 	mach_port_name_t        name2,
326 	ipc_entry_t             *entryp2)
327 {
328 	ipc_entry_t entry1;
329 	ipc_entry_t entry2;
330 
331 	assert(space != IS_NULL);
332 
333 	is_write_lock(space);
334 
335 	if (!is_active(space)) {
336 		is_write_unlock(space);
337 		return KERN_INVALID_TASK;
338 	}
339 
340 	if ((entry1 = ipc_entry_lookup(space, name1)) == IE_NULL) {
341 		is_write_unlock(space);
342 		mach_port_guard_exception(name1, 0, 0, kGUARD_EXC_INVALID_NAME);
343 		return KERN_INVALID_NAME;
344 	}
345 	if ((entry2 = ipc_entry_lookup(space, name2)) == IE_NULL) {
346 		is_write_unlock(space);
347 		mach_port_guard_exception(name2, 0, 0, kGUARD_EXC_INVALID_NAME);
348 		return KERN_INVALID_NAME;
349 	}
350 	*entryp1 = entry1;
351 	*entryp2 = entry2;
352 	return KERN_SUCCESS;
353 }
354 
355 /*
356  *	Routine:	ipc_right_reverse
357  *	Purpose:
358  *		Translate (space, object) -> (name, entry).
359  *		Only finds send/receive rights.
360  *		Returns TRUE if an entry is found; if so,
361  *		the object active.
362  *	Conditions:
363  *		The space must be locked (read or write) and active.
364  *		The port is locked and active
365  */
366 
367 bool
ipc_right_reverse(ipc_space_t space,ipc_object_t object,mach_port_name_t * namep,ipc_entry_t * entryp)368 ipc_right_reverse(
369 	ipc_space_t             space,
370 	ipc_object_t            object,
371 	mach_port_name_t        *namep,
372 	ipc_entry_t             *entryp)
373 {
374 	ipc_port_t port;
375 	mach_port_name_t name;
376 	ipc_entry_t entry;
377 
378 	/* would switch on io_otype to handle multiple types of object */
379 
380 	assert(is_active(space));
381 	assert(io_otype(object) == IOT_PORT);
382 
383 	port = ip_object_to_port(object);
384 	require_ip_active(port);
385 
386 	ip_mq_lock_held(port);
387 
388 	if (ip_in_space(port, space)) {
389 		name = ip_get_receiver_name(port);
390 		assert(name != MACH_PORT_NULL);
391 
392 		entry = ipc_entry_lookup(space, name);
393 
394 		assert(entry != IE_NULL);
395 		assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
396 		assert(port == ip_object_to_port(entry->ie_object));
397 
398 		*namep = name;
399 		*entryp = entry;
400 		return true;
401 	}
402 
403 	if (ipc_hash_lookup(space, ip_to_object(port), namep, entryp)) {
404 		entry = *entryp;
405 		assert(entry != IE_NULL);
406 		assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
407 		assert(port == ip_object_to_port(entry->ie_object));
408 
409 		return true;
410 	}
411 
412 	return false;
413 }
414 
415 /*
416  *	Routine:	ipc_right_request_cancel
417  *	Purpose:
418  *		Cancel a notification request and return the send-once right.
419  *		Afterwards, entry->ie_request == 0.
420  *	Conditions:
421  *		The space must be write-locked; the port must be locked.
422  *		The port must be active.
423  */
424 
425 static inline ipc_port_t
ipc_right_request_cancel(ipc_port_t port,mach_port_name_t name,ipc_entry_t entry)426 ipc_right_request_cancel(
427 	ipc_port_t              port,
428 	mach_port_name_t        name,
429 	ipc_entry_t             entry)
430 {
431 	ipc_port_request_index_t request = entry->ie_request;
432 
433 	if (request != IE_REQ_NONE) {
434 		entry->ie_request = IE_REQ_NONE;
435 		return ipc_port_request_cancel(port, name, request);
436 	}
437 	return IP_NULL;
438 }
439 
440 /*
441  *	Routine:	ipc_right_dnrequest
442  *	Purpose:
443  *		Make a dead-name request, returning the previously
444  *		registered send-once right.  If notify is IP_NULL,
445  *		just cancels the previously registered request.
446  *
447  *	Conditions:
448  *		Nothing locked.  May allocate memory.
449  *		Only consumes/returns refs if successful.
450  *	Returns:
451  *		KERN_SUCCESS		Made/canceled dead-name request.
452  *		KERN_INVALID_TASK	The space is dead.
453  *		KERN_INVALID_NAME	Name doesn't exist in space.
454  *		KERN_INVALID_RIGHT	Name doesn't denote port/dead rights.
455  *		KERN_INVALID_ARGUMENT	Name denotes dead name, but
456  *			immediate is FALSE or notify is IP_NULL.
457  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
458  */
459 
460 kern_return_t
ipc_right_request_alloc(ipc_space_t space,mach_port_name_t name,ipc_port_request_opts_t options,ipc_port_t notify,ipc_port_t * previousp)461 ipc_right_request_alloc(
462 	ipc_space_t             space,
463 	mach_port_name_t        name,
464 	ipc_port_request_opts_t options,
465 	ipc_port_t              notify,
466 	ipc_port_t              *previousp)
467 {
468 	ipc_port_t previous = IP_NULL;
469 	ipc_entry_t entry;
470 	kern_return_t kr;
471 #if IMPORTANCE_INHERITANCE
472 	bool will_arm = false;
473 #endif /* IMPORTANCE_INHERITANCE */
474 
475 	for (;;) {
476 		ipc_port_t port = IP_NULL;
477 
478 		kr = ipc_right_lookup_write(space, name, &entry);
479 		if (kr != KERN_SUCCESS) {
480 			return kr;
481 		}
482 
483 		/* space is write-locked and active */
484 
485 		/* if nothing to do or undo, we're done */
486 		if (notify == IP_NULL && entry->ie_request == IE_REQ_NONE) {
487 			is_write_unlock(space);
488 			*previousp = IP_NULL;
489 			return KERN_SUCCESS;
490 		}
491 
492 		/* see if the entry is of proper type for requests */
493 		if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) {
494 			ipc_port_request_index_t new_request;
495 
496 			port = ip_object_to_port(entry->ie_object);
497 			assert(port != IP_NULL);
498 
499 			if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
500 				/* port is locked and active */
501 
502 				/*
503 				 * No matter what, we need to cancel any
504 				 * previous request.
505 				 */
506 				previous = ipc_right_request_cancel(port, name, entry);
507 
508 				/* if no new request, just cancel previous */
509 				if (notify == IP_NULL) {
510 					ip_mq_unlock(port);
511 					ipc_entry_modified(space, name, entry);
512 					is_write_unlock(space);
513 					break;
514 				}
515 
516 				/*
517 				 * send-once rights, kernel objects, and non-full other queues
518 				 * fire immediately (if immediate specified).
519 				 */
520 				if (options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK) &&
521 				    ((entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE) ||
522 				    ip_in_space(port, ipc_space_kernel) ||
523 				    !ip_full(port))) {
524 					ip_mq_unlock(port);
525 					ipc_entry_modified(space, name, entry);
526 					is_write_unlock(space);
527 
528 					ipc_notify_send_possible(notify, name);
529 					break;
530 				}
531 
532 				/*
533 				 * If there was a previous request, freeing it
534 				 * above guarantees that the subsequent
535 				 * allocation will find a slot and succeed,
536 				 * thus assuring an atomic swap.
537 				 */
538 #if IMPORTANCE_INHERITANCE
539 				will_arm = port->ip_sprequests == 0 &&
540 				    options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK);
541 #endif /* IMPORTANCE_INHERITANCE */
542 				kr = ipc_port_request_alloc(port, name, notify,
543 				    options, &new_request);
544 
545 				if (kr != KERN_SUCCESS) {
546 					assert(previous == IP_NULL);
547 					is_write_unlock(space);
548 
549 					kr = ipc_port_request_grow(port);
550 					/* port is unlocked */
551 
552 					if (kr != KERN_SUCCESS) {
553 						return kr;
554 					}
555 
556 					continue;
557 				}
558 
559 				assert(new_request != IE_REQ_NONE);
560 				entry->ie_request = new_request;
561 				ipc_entry_modified(space, name, entry);
562 				is_write_unlock(space);
563 
564 #if IMPORTANCE_INHERITANCE
565 				if (will_arm &&
566 				    port->ip_impdonation != 0 &&
567 				    port->ip_spimportant == 0 &&
568 				    task_is_importance_donor(current_task())) {
569 					if (ipc_port_importance_delta(port, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) {
570 						ip_mq_unlock(port);
571 					}
572 				} else
573 #endif /* IMPORTANCE_INHERITANCE */
574 				ip_mq_unlock(port);
575 
576 				break;
577 			}
578 			/* entry may have changed to dead-name by ipc_right_check() */
579 		}
580 
581 		/* treat send_possible requests as immediate w.r.t. dead-name */
582 		if (options && notify != IP_NULL &&
583 		    (entry->ie_bits & MACH_PORT_TYPE_DEAD_NAME)) {
584 			mach_port_urefs_t urefs = IE_BITS_UREFS(entry->ie_bits);
585 
586 			assert(urefs > 0);
587 
588 			/* leave urefs pegged to maximum if it overflowed */
589 			if (urefs < MACH_PORT_UREFS_MAX) {
590 				(entry->ie_bits)++; /* increment urefs */
591 			}
592 			ipc_entry_modified(space, name, entry);
593 
594 			is_write_unlock(space);
595 
596 			if (port != IP_NULL) {
597 				ip_release(port);
598 			}
599 
600 			ipc_notify_dead_name(notify, name);
601 			previous = IP_NULL;
602 			break;
603 		}
604 
605 		kr = (entry->ie_bits & MACH_PORT_TYPE_PORT_OR_DEAD) ?
606 		    KERN_INVALID_ARGUMENT : KERN_INVALID_RIGHT;
607 
608 		is_write_unlock(space);
609 
610 		if (port != IP_NULL) {
611 			ip_release(port);
612 		}
613 
614 		return kr;
615 	}
616 
617 	*previousp = previous;
618 	return KERN_SUCCESS;
619 }
620 
621 /*
622  *	Routine:	ipc_right_inuse
623  *	Purpose:
624  *		Check if an entry is being used.
625  *		Returns TRUE if it is.
626  *	Conditions:
627  *		The space is write-locked and active.
628  */
629 
630 bool
ipc_right_inuse(ipc_entry_t entry)631 ipc_right_inuse(
632 	ipc_entry_t entry)
633 {
634 	return IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_NONE;
635 }
636 
637 /*
638  *	Routine:	ipc_right_check
639  *	Purpose:
640  *		Check if the port has died.  If it has,
641  *              and IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE is not
642  *              passed and it is not a send once right then
643  *		clean up the entry and return TRUE.
644  *	Conditions:
645  *		The space is write-locked; the port is not locked.
646  *		If returns FALSE, the port is also locked.
647  *		Otherwise, entry is converted to a dead name.
648  *
649  *		Caller is responsible for a reference to port if it
650  *		had died (returns TRUE).
651  */
652 
653 boolean_t
ipc_right_check(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry,ipc_object_copyin_flags_t flags)654 ipc_right_check(
655 	ipc_space_t              space,
656 	ipc_port_t               port,
657 	mach_port_name_t         name,
658 	ipc_entry_t              entry,
659 	ipc_object_copyin_flags_t flags)
660 {
661 	ipc_entry_bits_t bits;
662 
663 	assert(is_active(space));
664 	assert(port == ip_object_to_port(entry->ie_object));
665 
666 	ip_mq_lock(port);
667 	if (ip_active(port) ||
668 	    ((flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE) &&
669 	    entry->ie_request == IE_REQ_NONE &&
670 	    (entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE))) {
671 		return FALSE;
672 	}
673 
674 	/* this was either a pure send right or a send-once right */
675 
676 	bits = entry->ie_bits;
677 	assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
678 	assert(IE_BITS_UREFS(bits) > 0);
679 
680 	if (bits & MACH_PORT_TYPE_SEND) {
681 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
682 		assert(IE_BITS_UREFS(bits) > 0);
683 		ip_srights_dec(port);
684 	} else {
685 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
686 		assert(IE_BITS_UREFS(bits) == 1);
687 		ip_sorights_dec(port);
688 	}
689 
690 	/*
691 	 * delete SEND rights from ipc hash.
692 	 */
693 
694 	if ((bits & MACH_PORT_TYPE_SEND) != 0) {
695 		ipc_hash_delete(space, ip_to_object(port), name, entry);
696 	}
697 
698 	/* convert entry to dead name */
699 	bits = (bits & ~IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME;
700 
701 	/*
702 	 * If there was a notification request outstanding on this
703 	 * name, and the port went dead, that notification
704 	 * must already be on its way up from the port layer.
705 	 *
706 	 * Add the reference that the notification carries. It
707 	 * is done here, and not in the notification delivery,
708 	 * because the latter doesn't have a space reference and
709 	 * trying to actually move a send-right reference would
710 	 * get short-circuited into a MACH_PORT_DEAD by IPC. Since
711 	 * all calls that deal with the right eventually come
712 	 * through here, it has the same result.
713 	 *
714 	 * Once done, clear the request index so we only account
715 	 * for it once.
716 	 */
717 	if (entry->ie_request != IE_REQ_NONE) {
718 		if (ipc_port_request_type(port, name, entry->ie_request) != 0) {
719 			/* if urefs are pegged due to overflow, leave them pegged */
720 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
721 				bits++; /* increment urefs */
722 			}
723 		}
724 		entry->ie_request = IE_REQ_NONE;
725 	}
726 	entry->ie_bits = bits;
727 	entry->ie_object = IO_NULL;
728 
729 	ip_mq_unlock(port);
730 
731 	ipc_entry_modified(space, name, entry);
732 
733 	return TRUE;
734 }
735 
736 /*
737  *	Routine:	ipc_right_terminate
738  *	Purpose:
739  *		Cleans up an entry in a terminated space.
740  *		The entry isn't deallocated or removed
741  *		from reverse hash tables.
742  *	Conditions:
743  *		The space is dead and unlocked.
744  */
745 
746 void
ipc_right_terminate(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)747 ipc_right_terminate(
748 	ipc_space_t             space,
749 	mach_port_name_t        name,
750 	ipc_entry_t             entry)
751 {
752 	mach_port_type_t type;
753 	ipc_object_t object;
754 
755 	assert(!is_active(space));
756 
757 	type   = IE_BITS_TYPE(entry->ie_bits);
758 	object = entry->ie_object;
759 
760 	/*
761 	 * Hollow the entry under the port lock,
762 	 * in order to avoid dangling pointers.
763 	 *
764 	 * ipc_right_lookup_read() doesn't need it for correctness,
765 	 * but ipc_space_terminate() as it now goes through 2 rounds
766 	 * of termination (receive rights first, the rest second).
767 	 */
768 
769 	if (type != MACH_PORT_TYPE_DEAD_NAME) {
770 		assert(object != IO_NULL);
771 		io_lock(object);
772 	}
773 	entry->ie_object = IO_NULL;
774 	entry->ie_bits  &= (IE_BITS_GEN_MASK | IE_BITS_ROLL_MASK);
775 
776 	switch (type) {
777 	case MACH_PORT_TYPE_DEAD_NAME:
778 		assert(entry->ie_request == IE_REQ_NONE);
779 		assert(object == IO_NULL);
780 		break;
781 
782 	case MACH_PORT_TYPE_PORT_SET: {
783 		ipc_pset_t pset = ips_object_to_pset(object);
784 
785 		assert(entry->ie_request == IE_REQ_NONE);
786 		assert(ips_active(pset));
787 
788 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
789 		break;
790 	}
791 
792 	case MACH_PORT_TYPE_SEND:
793 	case MACH_PORT_TYPE_RECEIVE:
794 	case MACH_PORT_TYPE_SEND_RECEIVE:
795 	case MACH_PORT_TYPE_SEND_ONCE: {
796 		ipc_port_t port = ip_object_to_port(object);
797 		ipc_port_t request = IP_NULL;
798 		ipc_notify_nsenders_t nsrequest = { };
799 
800 		if (!ip_active(port)) {
801 			ip_mq_unlock(port);
802 			ip_release(port);
803 			break;
804 		}
805 
806 		request = ipc_right_request_cancel(port, name, entry);
807 
808 		if (type & MACH_PORT_TYPE_SEND) {
809 			ip_srights_dec(port);
810 			if (port->ip_srights == 0) {
811 				nsrequest = ipc_notify_no_senders_prepare(port);
812 			}
813 		}
814 
815 		if (type & MACH_PORT_TYPE_RECEIVE) {
816 			assert(ip_get_receiver_name(port) == name);
817 			assert(ip_in_space(port, space));
818 
819 			ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
820 		} else if (type & MACH_PORT_TYPE_SEND_ONCE) {
821 			assert(port->ip_sorights > 0);
822 			port->ip_reply_context = 0;
823 
824 			ipc_notify_send_once_and_unlock(port); /* consumes our ref */
825 		} else {
826 			/* port could be dead, in-transit, or in a foreign space */
827 			assert(!ip_in_space(port, space));
828 
829 			ip_mq_unlock(port);
830 			ip_release(port);
831 		}
832 
833 		/*
834 		 * For both no-senders and port-deleted notifications,
835 		 * look at whether the destination is still active.
836 		 * If it isn't, just swallow the send-once right.
837 		 *
838 		 * This is a racy check, but this ok because we can only
839 		 * fail to notice that the port is now inactive, which
840 		 * only causes us to fail at an optimizaiton.
841 		 *
842 		 * The purpose here is to avoid sending messages
843 		 * to receive rights that used to be in this space,
844 		 * which we can't fail to observe.
845 		 */
846 		if (nsrequest.ns_notify != IP_NULL) {
847 			if (ip_active(nsrequest.ns_notify)) {
848 				ipc_notify_no_senders_emit(nsrequest);
849 			} else {
850 				ipc_notify_no_senders_consume(nsrequest);
851 			}
852 		}
853 
854 		if (request != IP_NULL) {
855 			if (ip_active(request)) {
856 				ipc_notify_port_deleted(request, name);
857 			} else {
858 				ipc_port_release_sonce(request);
859 			}
860 		}
861 		break;
862 	}
863 
864 	default:
865 		panic("ipc_right_terminate: strange type - 0x%x", type);
866 	}
867 }
868 
869 /*
870  *	Routine:	ipc_right_destroy
871  *	Purpose:
872  *		Destroys an entry in a space.
873  *	Conditions:
874  *		The space is write-locked (returns unlocked).
875  *		The space must be active.
876  *	Returns:
877  *		KERN_SUCCESS		      The entry was destroyed.
878  *      KERN_INVALID_CAPABILITY   The port is pinned.
879  *      KERN_INVALID_RIGHT        Port guard violation.
880  */
881 
882 kern_return_t
ipc_right_destroy(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,boolean_t check_guard,uint64_t guard)883 ipc_right_destroy(
884 	ipc_space_t             space,
885 	mach_port_name_t        name,
886 	ipc_entry_t             entry,
887 	boolean_t               check_guard,
888 	uint64_t                guard)
889 {
890 	ipc_entry_bits_t bits;
891 	mach_port_type_t type;
892 
893 	bits = entry->ie_bits;
894 	type = IE_BITS_TYPE(bits);
895 
896 	assert(is_active(space));
897 
898 	switch (type) {
899 	case MACH_PORT_TYPE_DEAD_NAME:
900 		assert(entry->ie_request == IE_REQ_NONE);
901 		assert(entry->ie_object == IO_NULL);
902 
903 		ipc_entry_dealloc(space, IO_NULL, name, entry);
904 		is_write_unlock(space);
905 		break;
906 
907 	case MACH_PORT_TYPE_PORT_SET: {
908 		ipc_pset_t pset = ips_object_to_pset(entry->ie_object);
909 
910 		assert(entry->ie_request == IE_REQ_NONE);
911 		assert(pset != IPS_NULL);
912 
913 		ips_mq_lock(pset);
914 		assert(ips_active(pset));
915 
916 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
917 
918 		is_write_unlock(space);
919 
920 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
921 		break;
922 	}
923 
924 	case MACH_PORT_TYPE_SEND:
925 	case MACH_PORT_TYPE_RECEIVE:
926 	case MACH_PORT_TYPE_SEND_RECEIVE:
927 	case MACH_PORT_TYPE_SEND_ONCE: {
928 		ipc_port_t port = ip_object_to_port(entry->ie_object);
929 		ipc_notify_nsenders_t nsrequest = { };
930 		ipc_port_t request;
931 
932 		assert(port != IP_NULL);
933 
934 		if (type == MACH_PORT_TYPE_SEND) {
935 			if (ip_is_pinned(port)) {
936 				assert(ip_active(port));
937 				is_write_unlock(space);
938 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DESTROY);
939 				return KERN_INVALID_CAPABILITY;
940 			}
941 			ipc_hash_delete(space, ip_to_object(port), name, entry);
942 		}
943 
944 		ip_mq_lock(port);
945 
946 		if (!ip_active(port)) {
947 			assert((type & MACH_PORT_TYPE_RECEIVE) == 0);
948 			entry->ie_request = IE_REQ_NONE;
949 			assert(!ip_is_pinned(port));
950 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
951 			ip_mq_unlock(port);
952 			is_write_unlock(space);
953 			ip_release(port);
954 			break;
955 		}
956 
957 		/* For receive rights, check for guarding */
958 		if ((type & MACH_PORT_TYPE_RECEIVE) &&
959 		    (check_guard) && (port->ip_guarded) &&
960 		    (guard != port->ip_context)) {
961 			/* Guard Violation */
962 			uint64_t portguard = port->ip_context;
963 			ip_mq_unlock(port);
964 			is_write_unlock(space);
965 			/* Raise mach port guard exception */
966 			mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
967 			return KERN_INVALID_RIGHT;
968 		}
969 
970 
971 		request = ipc_right_request_cancel(port, name, entry);
972 		assert(!ip_is_pinned(port));
973 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
974 
975 		is_write_unlock(space);
976 
977 		if (type & MACH_PORT_TYPE_SEND) {
978 			ip_srights_dec(port);
979 			if (port->ip_srights == 0) {
980 				nsrequest = ipc_notify_no_senders_prepare(port);
981 			}
982 		}
983 
984 		if (type & MACH_PORT_TYPE_RECEIVE) {
985 			require_ip_active(port);
986 			assert(ip_in_space(port, space));
987 
988 			ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
989 		} else if (type & MACH_PORT_TYPE_SEND_ONCE) {
990 			assert(port->ip_sorights > 0);
991 			port->ip_reply_context = 0;
992 			ipc_notify_send_once_and_unlock(port); /* consumes our ref */
993 		} else {
994 			assert(!ip_in_space(port, space));
995 
996 			ip_mq_unlock(port);
997 			ip_release(port);
998 		}
999 
1000 		ipc_notify_no_senders_emit(nsrequest);
1001 
1002 		if (request != IP_NULL) {
1003 			ipc_notify_port_deleted(request, name);
1004 		}
1005 
1006 
1007 		break;
1008 	}
1009 
1010 	default:
1011 		panic("ipc_right_destroy: strange type");
1012 	}
1013 
1014 	return KERN_SUCCESS;
1015 }
1016 
1017 /*
1018  *	Routine:	ipc_right_dealloc
1019  *	Purpose:
1020  *		Releases a send/send-once/dead-name/port_set user ref.
1021  *		Like ipc_right_delta with a delta of -1,
1022  *		but looks at the entry to determine the right.
1023  *	Conditions:
1024  *		The space is write-locked, and is unlocked upon return.
1025  *		The space must be active.
1026  *	Returns:
1027  *		KERN_SUCCESS		A user ref was released.
1028  *		KERN_INVALID_RIGHT	Entry has wrong type.
1029  *      KERN_INVALID_CAPABILITY  Deallocating a pinned right.
1030  */
1031 
1032 kern_return_t
ipc_right_dealloc(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)1033 ipc_right_dealloc(
1034 	ipc_space_t             space,
1035 	mach_port_name_t        name,
1036 	ipc_entry_t             entry)
1037 {
1038 	ipc_port_t port = IP_NULL;
1039 	ipc_entry_bits_t bits;
1040 	mach_port_type_t type;
1041 
1042 	bits = entry->ie_bits;
1043 	type = IE_BITS_TYPE(bits);
1044 
1045 
1046 	assert(is_active(space));
1047 
1048 	switch (type) {
1049 	case MACH_PORT_TYPE_PORT_SET: {
1050 		ipc_pset_t pset;
1051 
1052 		assert(IE_BITS_UREFS(bits) == 0);
1053 		assert(entry->ie_request == IE_REQ_NONE);
1054 
1055 		pset = ips_object_to_pset(entry->ie_object);
1056 		ips_mq_lock(pset);
1057 		assert(ips_active(pset));
1058 
1059 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1060 
1061 		is_write_unlock(space);
1062 
1063 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1064 		break;
1065 	}
1066 
1067 	case MACH_PORT_TYPE_DEAD_NAME: {
1068 dead_name:
1069 
1070 		assert(IE_BITS_UREFS(bits) > 0);
1071 		assert(entry->ie_request == IE_REQ_NONE);
1072 		assert(entry->ie_object == IO_NULL);
1073 
1074 		if (IE_BITS_UREFS(bits) == 1) {
1075 			ipc_entry_dealloc(space, IO_NULL, name, entry);
1076 		} else {
1077 			/* if urefs are pegged due to overflow, leave them pegged */
1078 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1079 				entry->ie_bits = bits - 1; /* decrement urefs */
1080 			}
1081 			ipc_entry_modified(space, name, entry);
1082 		}
1083 		is_write_unlock(space);
1084 
1085 		/* release any port that got converted to dead name below */
1086 		if (port != IP_NULL) {
1087 			ip_release(port);
1088 		}
1089 		break;
1090 	}
1091 
1092 	case MACH_PORT_TYPE_SEND_ONCE: {
1093 		ipc_port_t request;
1094 
1095 		assert(IE_BITS_UREFS(bits) == 1);
1096 
1097 		port = ip_object_to_port(entry->ie_object);
1098 		assert(port != IP_NULL);
1099 
1100 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1101 			bits = entry->ie_bits;
1102 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1103 			goto dead_name;     /* it will release port */
1104 		}
1105 		/* port is locked and active */
1106 
1107 		assert(port->ip_sorights > 0);
1108 
1109 		/*
1110 		 * clear any reply context:
1111 		 * no one will be sending the response b/c we are destroying
1112 		 * the single, outstanding send once right.
1113 		 */
1114 		port->ip_reply_context = 0;
1115 
1116 		request = ipc_right_request_cancel(port, name, entry);
1117 		assert(!ip_is_pinned(port));
1118 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1119 
1120 		is_write_unlock(space);
1121 
1122 		ipc_notify_send_once_and_unlock(port);
1123 
1124 		if (request != IP_NULL) {
1125 			ipc_notify_port_deleted(request, name);
1126 		}
1127 		break;
1128 	}
1129 
1130 	case MACH_PORT_TYPE_SEND: {
1131 		ipc_port_t request = IP_NULL;
1132 		ipc_notify_nsenders_t nsrequest = { };
1133 
1134 		assert(IE_BITS_UREFS(bits) > 0);
1135 
1136 		port = ip_object_to_port(entry->ie_object);
1137 		assert(port != IP_NULL);
1138 
1139 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1140 			bits = entry->ie_bits;
1141 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1142 			goto dead_name;     /* it will release port */
1143 		}
1144 		/* port is locked and active */
1145 
1146 		assert(port->ip_srights > 0);
1147 
1148 		if (IE_BITS_UREFS(bits) == 1) {
1149 			if (ip_is_pinned(port)) {
1150 				ip_mq_unlock(port);
1151 				is_write_unlock(space);
1152 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1153 				return KERN_INVALID_CAPABILITY;
1154 			}
1155 			ip_srights_dec(port);
1156 			if (port->ip_srights == 0) {
1157 				nsrequest = ipc_notify_no_senders_prepare(port);
1158 			}
1159 
1160 			request = ipc_right_request_cancel(port, name, entry);
1161 			ipc_hash_delete(space, ip_to_object(port), name, entry);
1162 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1163 			ip_mq_unlock(port);
1164 			is_write_unlock(space);
1165 
1166 			ip_release(port);
1167 		} else {
1168 			/* if urefs are pegged due to overflow, leave them pegged */
1169 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1170 				entry->ie_bits = bits - 1; /* decrement urefs */
1171 			}
1172 			ip_mq_unlock(port);
1173 			ipc_entry_modified(space, name, entry);
1174 			is_write_unlock(space);
1175 		}
1176 
1177 		ipc_notify_no_senders_emit(nsrequest);
1178 
1179 		if (request != IP_NULL) {
1180 			ipc_notify_port_deleted(request, name);
1181 		}
1182 		break;
1183 	}
1184 
1185 	case MACH_PORT_TYPE_SEND_RECEIVE: {
1186 		ipc_notify_nsenders_t nsrequest = { };
1187 
1188 		assert(IE_BITS_UREFS(bits) > 0);
1189 
1190 		port = ip_object_to_port(entry->ie_object);
1191 		assert(port != IP_NULL);
1192 
1193 		ip_mq_lock(port);
1194 		require_ip_active(port);
1195 		assert(ip_get_receiver_name(port) == name);
1196 		assert(ip_in_space(port, space));
1197 		assert(port->ip_srights > 0);
1198 
1199 		if (IE_BITS_UREFS(bits) == 1) {
1200 			ip_srights_dec(port);
1201 			if (port->ip_srights == 0) {
1202 				nsrequest = ipc_notify_no_senders_prepare(port);
1203 			}
1204 
1205 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1206 			    MACH_PORT_TYPE_SEND);
1207 		} else {
1208 			/* if urefs are pegged due to overflow, leave them pegged */
1209 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1210 				entry->ie_bits = bits - 1; /* decrement urefs */
1211 			}
1212 		}
1213 		ip_mq_unlock(port);
1214 
1215 		ipc_entry_modified(space, name, entry);
1216 		is_write_unlock(space);
1217 
1218 		ipc_notify_no_senders_emit(nsrequest);
1219 		break;
1220 	}
1221 
1222 	default:
1223 		is_write_unlock(space);
1224 		mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1225 		return KERN_INVALID_RIGHT;
1226 	}
1227 
1228 	return KERN_SUCCESS;
1229 }
1230 
1231 /*
1232  *	Routine:	ipc_right_delta
1233  *	Purpose:
1234  *		Modifies the user-reference count for a right.
1235  *		May deallocate the right, if the count goes to zero.
1236  *	Conditions:
1237  *		The space is write-locked, and is unlocked upon return.
1238  *		The space must be active.
1239  *	Returns:
1240  *		KERN_SUCCESS		Count was modified.
1241  *		KERN_INVALID_RIGHT	Entry has wrong type.
1242  *		KERN_INVALID_VALUE	Bad delta for the right.
1243  *		KERN_INVALID_CAPABILITY Deallocating a pinned right.
1244  */
1245 
1246 kern_return_t
ipc_right_delta(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_right_t right,mach_port_delta_t delta)1247 ipc_right_delta(
1248 	ipc_space_t             space,
1249 	mach_port_name_t        name,
1250 	ipc_entry_t             entry,
1251 	mach_port_right_t       right,
1252 	mach_port_delta_t       delta)
1253 {
1254 	ipc_port_t port = IP_NULL;
1255 	ipc_entry_bits_t bits;
1256 
1257 	bits = entry->ie_bits;
1258 
1259 /*
1260  *	The following is used (for case MACH_PORT_RIGHT_DEAD_NAME) in the
1261  *	switch below. It is used to keep track of those cases (in DIPC)
1262  *	where we have postponed the dropping of a port reference. Since
1263  *	the dropping of the reference could cause the port to disappear
1264  *	we postpone doing so when we are holding the space lock.
1265  */
1266 
1267 	assert(is_active(space));
1268 	assert(right < MACH_PORT_RIGHT_NUMBER);
1269 
1270 	/* Rights-specific restrictions and operations. */
1271 
1272 	switch (right) {
1273 	case MACH_PORT_RIGHT_PORT_SET: {
1274 		ipc_pset_t pset;
1275 
1276 		if ((bits & MACH_PORT_TYPE_PORT_SET) == 0) {
1277 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1278 			goto invalid_right;
1279 		}
1280 
1281 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_PORT_SET);
1282 		assert(IE_BITS_UREFS(bits) == 0);
1283 		assert(entry->ie_request == IE_REQ_NONE);
1284 
1285 		if (delta == 0) {
1286 			goto success;
1287 		}
1288 
1289 		if (delta != -1) {
1290 			goto invalid_value;
1291 		}
1292 
1293 		pset = ips_object_to_pset(entry->ie_object);
1294 		ips_mq_lock(pset);
1295 		assert(ips_active(pset));
1296 
1297 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1298 
1299 		is_write_unlock(space);
1300 
1301 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1302 		break;
1303 	}
1304 
1305 	case MACH_PORT_RIGHT_RECEIVE: {
1306 		ipc_port_t request = IP_NULL;
1307 
1308 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1309 			if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1310 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1311 			}
1312 			goto invalid_right;
1313 		}
1314 
1315 		if (delta == 0) {
1316 			goto success;
1317 		}
1318 
1319 		if (delta != -1) {
1320 			goto invalid_value;
1321 		}
1322 
1323 		port = ip_object_to_port(entry->ie_object);
1324 		assert(port != IP_NULL);
1325 
1326 		/*
1327 		 *	The port lock is needed for ipc_right_dncancel;
1328 		 *	otherwise, we wouldn't have to take the lock
1329 		 *	until just before dropping the space lock.
1330 		 */
1331 
1332 		ip_mq_lock(port);
1333 		require_ip_active(port);
1334 		assert(ip_get_receiver_name(port) == name);
1335 		assert(ip_in_space(port, space));
1336 
1337 		/* Mach Port Guard Checking */
1338 		if (port->ip_guarded) {
1339 			uint64_t portguard = port->ip_context;
1340 			ip_mq_unlock(port);
1341 			is_write_unlock(space);
1342 			/* Raise mach port guard exception */
1343 			mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_MOD_REFS);
1344 			goto guard_failure;
1345 		}
1346 
1347 		if (bits & MACH_PORT_TYPE_SEND) {
1348 			assert(IE_BITS_TYPE(bits) ==
1349 			    MACH_PORT_TYPE_SEND_RECEIVE);
1350 			assert(IE_BITS_UREFS(bits) > 0);
1351 			assert(port->ip_srights > 0);
1352 
1353 			if (ipc_port_has_prdrequest(port)) {
1354 				/*
1355 				 * Since another task has requested a
1356 				 * destroy notification for this port, it
1357 				 * isn't actually being destroyed - the receive
1358 				 * right is just being moved to another task.
1359 				 * Since we still have one or more send rights,
1360 				 * we need to record the loss of the receive
1361 				 * right and enter the remaining send right
1362 				 * into the hash table.
1363 				 */
1364 				bits &= ~MACH_PORT_TYPE_RECEIVE;
1365 				bits |= MACH_PORT_TYPE_EX_RECEIVE;
1366 				ipc_hash_insert(space, ip_to_object(port),
1367 				    name, entry);
1368 				ip_reference(port);
1369 			} else {
1370 				/*
1371 				 *	The remaining send right turns into a
1372 				 *	dead name.  Notice we don't decrement
1373 				 *	ip_srights, generate a no-senders notif,
1374 				 *	or use ipc_right_dncancel, because the
1375 				 *	port is destroyed "first".
1376 				 */
1377 				bits &= ~IE_BITS_TYPE_MASK;
1378 				bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1379 				if (entry->ie_request) {
1380 					entry->ie_request = IE_REQ_NONE;
1381 					/* if urefs are pegged due to overflow, leave them pegged */
1382 					if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1383 						bits++; /* increment urefs */
1384 					}
1385 				}
1386 				entry->ie_object = IO_NULL;
1387 			}
1388 			entry->ie_bits = bits;
1389 			ipc_entry_modified(space, name, entry);
1390 		} else {
1391 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1392 			assert(IE_BITS_UREFS(bits) == 0);
1393 
1394 			request = ipc_right_request_cancel(port, name, entry);
1395 			assert(!ip_is_pinned(port));
1396 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1397 		}
1398 		is_write_unlock(space);
1399 
1400 		ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1401 
1402 		if (request != IP_NULL) {
1403 			ipc_notify_port_deleted(request, name);
1404 		}
1405 		break;
1406 	}
1407 
1408 	case MACH_PORT_RIGHT_SEND_ONCE: {
1409 		ipc_port_t request;
1410 
1411 		if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
1412 			goto invalid_right;
1413 		}
1414 
1415 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
1416 		assert(IE_BITS_UREFS(bits) == 1);
1417 
1418 		port = ip_object_to_port(entry->ie_object);
1419 		assert(port != IP_NULL);
1420 
1421 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1422 			assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE));
1423 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1424 			goto invalid_right;
1425 		}
1426 		/* port is locked and active */
1427 
1428 		assert(port->ip_sorights > 0);
1429 
1430 		if ((delta > 0) || (delta < -1)) {
1431 			ip_mq_unlock(port);
1432 			goto invalid_value;
1433 		}
1434 
1435 		if (delta == 0) {
1436 			ip_mq_unlock(port);
1437 			goto success;
1438 		}
1439 
1440 		/*
1441 		 * clear any reply context:
1442 		 * no one will be sending the response b/c we are destroying
1443 		 * the single, outstanding send once right.
1444 		 */
1445 		port->ip_reply_context = 0;
1446 
1447 		request = ipc_right_request_cancel(port, name, entry);
1448 		assert(!ip_is_pinned(port));
1449 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1450 
1451 		is_write_unlock(space);
1452 
1453 		ipc_notify_send_once_and_unlock(port);
1454 
1455 		if (request != IP_NULL) {
1456 			ipc_notify_port_deleted(request, name);
1457 		}
1458 		break;
1459 	}
1460 
1461 	case MACH_PORT_RIGHT_DEAD_NAME: {
1462 		ipc_port_t relport = IP_NULL;
1463 		mach_port_urefs_t urefs;
1464 
1465 		if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1466 			port = ip_object_to_port(entry->ie_object);
1467 			assert(port != IP_NULL);
1468 
1469 			if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1470 				/* port is locked and active */
1471 				ip_mq_unlock(port);
1472 				port = IP_NULL;
1473 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1474 				goto invalid_right;
1475 			}
1476 			bits = entry->ie_bits;
1477 			relport = port;
1478 			port = IP_NULL;
1479 		} else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0) {
1480 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1481 			goto invalid_right;
1482 		}
1483 
1484 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1485 		assert(IE_BITS_UREFS(bits) > 0);
1486 		assert(entry->ie_object == IO_NULL);
1487 		assert(entry->ie_request == IE_REQ_NONE);
1488 
1489 		if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1490 		    delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1491 			goto invalid_value;
1492 		}
1493 
1494 		urefs = IE_BITS_UREFS(bits);
1495 
1496 		if (urefs == MACH_PORT_UREFS_MAX) {
1497 			/*
1498 			 * urefs are pegged due to an overflow
1499 			 * only a delta removing all refs at once can change it
1500 			 */
1501 
1502 			if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1503 				delta = 0;
1504 			}
1505 		} else {
1506 			if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1507 				goto invalid_value;
1508 			}
1509 			if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1510 				/* leave urefs pegged to maximum if it overflowed */
1511 				delta = MACH_PORT_UREFS_MAX - urefs;
1512 			}
1513 		}
1514 
1515 		if ((urefs + delta) == 0) {
1516 			ipc_entry_dealloc(space, IO_NULL, name, entry);
1517 		} else if (delta != 0) {
1518 			entry->ie_bits = bits + delta;
1519 			ipc_entry_modified(space, name, entry);
1520 		}
1521 
1522 		is_write_unlock(space);
1523 
1524 		if (relport != IP_NULL) {
1525 			ip_release(relport);
1526 		}
1527 
1528 		break;
1529 	}
1530 
1531 	case MACH_PORT_RIGHT_SEND: {
1532 		mach_port_urefs_t urefs;
1533 		ipc_port_t request = IP_NULL;
1534 		ipc_notify_nsenders_t nsrequest = { };
1535 		ipc_port_t port_to_release = IP_NULL;
1536 
1537 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
1538 			/* invalid right exception only when not live/dead confusion */
1539 			if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0
1540 #if !defined(AE_MAKESENDRIGHT_FIXED)
1541 			    /*
1542 			     * AE tries to add single send right without knowing if it already owns one.
1543 			     * But if it doesn't, it should own the receive right and delta should be 1.
1544 			     */
1545 			    && (((bits & MACH_PORT_TYPE_RECEIVE) == 0) || (delta != 1))
1546 #endif
1547 			    ) {
1548 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1549 			}
1550 			goto invalid_right;
1551 		}
1552 
1553 		/* maximum urefs for send is MACH_PORT_UREFS_MAX */
1554 
1555 		port = ip_object_to_port(entry->ie_object);
1556 		assert(port != IP_NULL);
1557 
1558 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1559 			assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0);
1560 			goto invalid_right;
1561 		}
1562 		/* port is locked and active */
1563 
1564 		assert(port->ip_srights > 0);
1565 
1566 		if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1567 		    delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1568 			ip_mq_unlock(port);
1569 			goto invalid_value;
1570 		}
1571 
1572 		urefs = IE_BITS_UREFS(bits);
1573 
1574 		if (urefs == MACH_PORT_UREFS_MAX) {
1575 			/*
1576 			 * urefs are pegged due to an overflow
1577 			 * only a delta removing all refs at once can change it
1578 			 */
1579 
1580 			if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1581 				delta = 0;
1582 			}
1583 		} else {
1584 			if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1585 				ip_mq_unlock(port);
1586 				goto invalid_value;
1587 			}
1588 			if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1589 				/* leave urefs pegged to maximum if it overflowed */
1590 				delta = MACH_PORT_UREFS_MAX - urefs;
1591 			}
1592 		}
1593 
1594 		if ((urefs + delta) == 0) {
1595 			if (ip_is_pinned(port)) {
1596 				ip_mq_unlock(port);
1597 				is_write_unlock(space);
1598 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1599 				return KERN_INVALID_CAPABILITY;
1600 			}
1601 
1602 			ip_srights_dec(port);
1603 			if (port->ip_srights == 0) {
1604 				nsrequest = ipc_notify_no_senders_prepare(port);
1605 			}
1606 
1607 			if (bits & MACH_PORT_TYPE_RECEIVE) {
1608 				assert(ip_get_receiver_name(port) == name);
1609 				assert(ip_in_space(port, space));
1610 				assert(IE_BITS_TYPE(bits) ==
1611 				    MACH_PORT_TYPE_SEND_RECEIVE);
1612 
1613 				entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1614 				    MACH_PORT_TYPE_SEND);
1615 				ipc_entry_modified(space, name, entry);
1616 			} else {
1617 				assert(IE_BITS_TYPE(bits) ==
1618 				    MACH_PORT_TYPE_SEND);
1619 
1620 				request = ipc_right_request_cancel(port, name, entry);
1621 				ipc_hash_delete(space, ip_to_object(port),
1622 				    name, entry);
1623 				assert(!ip_is_pinned(port));
1624 				ipc_entry_dealloc(space, ip_to_object(port),
1625 				    name, entry);
1626 				port_to_release = port;
1627 			}
1628 		} else if (delta != 0) {
1629 			entry->ie_bits = bits + delta;
1630 			ipc_entry_modified(space, name, entry);
1631 		}
1632 
1633 		ip_mq_unlock(port);
1634 
1635 		is_write_unlock(space);
1636 
1637 		if (port_to_release != IP_NULL) {
1638 			ip_release(port_to_release);
1639 		}
1640 
1641 		ipc_notify_no_senders_emit(nsrequest);
1642 
1643 		if (request != IP_NULL) {
1644 			ipc_notify_port_deleted(request, name);
1645 		}
1646 		break;
1647 	}
1648 
1649 	case MACH_PORT_RIGHT_LABELH:
1650 		goto invalid_right;
1651 
1652 	default:
1653 		panic("ipc_right_delta: strange right %d for 0x%x (%p) in space:%p",
1654 		    right, name, (void *)entry, (void *)space);
1655 	}
1656 
1657 	return KERN_SUCCESS;
1658 
1659 success:
1660 	is_write_unlock(space);
1661 	return KERN_SUCCESS;
1662 
1663 invalid_right:
1664 	is_write_unlock(space);
1665 	if (port != IP_NULL) {
1666 		ip_release(port);
1667 	}
1668 	return KERN_INVALID_RIGHT;
1669 
1670 invalid_value:
1671 	is_write_unlock(space);
1672 	mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1673 	return KERN_INVALID_VALUE;
1674 
1675 guard_failure:
1676 	return KERN_INVALID_RIGHT;
1677 }
1678 
1679 /*
1680  *	Routine:	ipc_right_destruct
1681  *	Purpose:
1682  *		Deallocates the receive right and modifies the
1683  *		user-reference count for the send rights as requested.
1684  *	Conditions:
1685  *		The space is write-locked, and is unlocked upon return.
1686  *		The space must be active.
1687  *	Returns:
1688  *		KERN_SUCCESS		Count was modified.
1689  *		KERN_INVALID_RIGHT	Entry has wrong type.
1690  *		KERN_INVALID_VALUE	Bad delta for the right.
1691  */
1692 
1693 kern_return_t
ipc_right_destruct(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_delta_t srdelta,uint64_t guard)1694 ipc_right_destruct(
1695 	ipc_space_t             space,
1696 	mach_port_name_t        name,
1697 	ipc_entry_t             entry,
1698 	mach_port_delta_t       srdelta,
1699 	uint64_t                guard)
1700 {
1701 	ipc_port_t port = IP_NULL;
1702 	ipc_entry_bits_t bits;
1703 
1704 	mach_port_urefs_t urefs;
1705 	ipc_port_t request = IP_NULL;
1706 	ipc_notify_nsenders_t nsrequest = { };
1707 
1708 	bits = entry->ie_bits;
1709 
1710 	assert(is_active(space));
1711 
1712 	if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1713 		is_write_unlock(space);
1714 
1715 		/* No exception if we used to have receive and held entry since */
1716 		if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1717 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1718 		}
1719 		return KERN_INVALID_RIGHT;
1720 	}
1721 
1722 	if (srdelta && (bits & MACH_PORT_TYPE_SEND) == 0) {
1723 		is_write_unlock(space);
1724 		mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1725 		return KERN_INVALID_RIGHT;
1726 	}
1727 
1728 	if (srdelta > 0) {
1729 		goto invalid_value;
1730 	}
1731 
1732 	port = ip_object_to_port(entry->ie_object);
1733 	assert(port != IP_NULL);
1734 
1735 	ip_mq_lock(port);
1736 	require_ip_active(port);
1737 	assert(ip_get_receiver_name(port) == name);
1738 	assert(ip_in_space(port, space));
1739 
1740 	/* Mach Port Guard Checking */
1741 	if (port->ip_guarded && (guard != port->ip_context)) {
1742 		uint64_t portguard = port->ip_context;
1743 		ip_mq_unlock(port);
1744 		is_write_unlock(space);
1745 		mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
1746 		return KERN_INVALID_ARGUMENT;
1747 	}
1748 
1749 	/*
1750 	 * First reduce the send rights as requested and
1751 	 * adjust the entry->ie_bits accordingly. The
1752 	 * ipc_entry_modified() call is made once the receive
1753 	 * right is destroyed too.
1754 	 */
1755 
1756 	if (srdelta) {
1757 		assert(port->ip_srights > 0);
1758 
1759 		urefs = IE_BITS_UREFS(bits);
1760 
1761 		/*
1762 		 * Since we made sure that srdelta is negative,
1763 		 * the check for urefs overflow is not required.
1764 		 */
1765 		if (MACH_PORT_UREFS_UNDERFLOW(urefs, srdelta)) {
1766 			ip_mq_unlock(port);
1767 			goto invalid_value;
1768 		}
1769 
1770 		if (urefs == MACH_PORT_UREFS_MAX) {
1771 			/*
1772 			 * urefs are pegged due to an overflow
1773 			 * only a delta removing all refs at once can change it
1774 			 */
1775 			if (srdelta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1776 				srdelta = 0;
1777 			}
1778 		}
1779 
1780 		if ((urefs + srdelta) == 0) {
1781 			ip_srights_dec(port);
1782 			if (port->ip_srights == 0) {
1783 				nsrequest = ipc_notify_no_senders_prepare(port);
1784 			}
1785 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_RECEIVE);
1786 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1787 			    MACH_PORT_TYPE_SEND);
1788 		} else {
1789 			entry->ie_bits = bits + srdelta;
1790 		}
1791 	}
1792 
1793 	/*
1794 	 * Now destroy the receive right. Update space and
1795 	 * entry accordingly.
1796 	 */
1797 
1798 	bits = entry->ie_bits;
1799 	if (bits & MACH_PORT_TYPE_SEND) {
1800 		assert(IE_BITS_UREFS(bits) > 0);
1801 		assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX);
1802 
1803 		if (ipc_port_has_prdrequest(port)) {
1804 			/*
1805 			 * Since another task has requested a
1806 			 * destroy notification for this port, it
1807 			 * isn't actually being destroyed - the receive
1808 			 * right is just being moved to another task.
1809 			 * Since we still have one or more send rights,
1810 			 * we need to record the loss of the receive
1811 			 * right and enter the remaining send right
1812 			 * into the hash table.
1813 			 */
1814 			bits &= ~MACH_PORT_TYPE_RECEIVE;
1815 			bits |= MACH_PORT_TYPE_EX_RECEIVE;
1816 			ipc_hash_insert(space, ip_to_object(port),
1817 			    name, entry);
1818 			ip_reference(port);
1819 		} else {
1820 			/*
1821 			 *	The remaining send right turns into a
1822 			 *	dead name.  Notice we don't decrement
1823 			 *	ip_srights, generate a no-senders notif,
1824 			 *	or use ipc_right_dncancel, because the
1825 			 *	port is destroyed "first".
1826 			 */
1827 			bits &= ~IE_BITS_TYPE_MASK;
1828 			bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1829 			if (entry->ie_request) {
1830 				entry->ie_request = IE_REQ_NONE;
1831 				if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1832 					bits++; /* increment urefs */
1833 				}
1834 			}
1835 			entry->ie_object = IO_NULL;
1836 		}
1837 		entry->ie_bits = bits;
1838 		ipc_entry_modified(space, name, entry);
1839 	} else {
1840 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1841 		assert(IE_BITS_UREFS(bits) == 0);
1842 		request = ipc_right_request_cancel(port, name, entry);
1843 		assert(!ip_is_pinned(port));
1844 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1845 	}
1846 
1847 	/* Unlock space */
1848 	is_write_unlock(space);
1849 
1850 	ipc_notify_no_senders_emit(nsrequest);
1851 
1852 	ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1853 
1854 	if (request != IP_NULL) {
1855 		ipc_notify_port_deleted(request, name);
1856 	}
1857 
1858 	return KERN_SUCCESS;
1859 
1860 invalid_value:
1861 	is_write_unlock(space);
1862 	mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1863 	return KERN_INVALID_VALUE;
1864 }
1865 
1866 
1867 /*
1868  *	Routine:	ipc_right_info
1869  *	Purpose:
1870  *		Retrieves information about the right.
1871  *	Conditions:
1872  *		The space is active and write-locked.
1873  *	        The space is unlocked upon return.
1874  *	Returns:
1875  *		KERN_SUCCESS		Retrieved info
1876  */
1877 
1878 kern_return_t
ipc_right_info(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_type_t * typep,mach_port_urefs_t * urefsp)1879 ipc_right_info(
1880 	ipc_space_t             space,
1881 	mach_port_name_t        name,
1882 	ipc_entry_t             entry,
1883 	mach_port_type_t        *typep,
1884 	mach_port_urefs_t       *urefsp)
1885 {
1886 	ipc_port_t port;
1887 	ipc_entry_bits_t bits;
1888 	mach_port_type_t type = 0;
1889 	ipc_port_request_index_t request;
1890 
1891 	bits = entry->ie_bits;
1892 	request = entry->ie_request;
1893 	port = ip_object_to_port(entry->ie_object);
1894 
1895 	if (bits & MACH_PORT_TYPE_RECEIVE) {
1896 		assert(IP_VALID(port));
1897 
1898 		if (request != IE_REQ_NONE) {
1899 			ip_mq_lock(port);
1900 			require_ip_active(port);
1901 			type |= ipc_port_request_type(port, name, request);
1902 			ip_mq_unlock(port);
1903 		}
1904 		is_write_unlock(space);
1905 	} else if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1906 		/*
1907 		 * validate port is still alive - if so, get request
1908 		 * types while we still have it locked.  Otherwise,
1909 		 * recapture the (now dead) bits.
1910 		 */
1911 		if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1912 			if (request != IE_REQ_NONE) {
1913 				type |= ipc_port_request_type(port, name, request);
1914 			}
1915 			ip_mq_unlock(port);
1916 			is_write_unlock(space);
1917 		} else {
1918 			bits = entry->ie_bits;
1919 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1920 			is_write_unlock(space);
1921 			ip_release(port);
1922 		}
1923 	} else {
1924 		is_write_unlock(space);
1925 	}
1926 
1927 	type |= IE_BITS_TYPE(bits);
1928 
1929 	*typep = type;
1930 	*urefsp = IE_BITS_UREFS(bits);
1931 	return KERN_SUCCESS;
1932 }
1933 
1934 /*
1935  *	Routine:	ipc_right_copyin_check_reply
1936  *	Purpose:
1937  *		Check if a subsequent ipc_right_copyin would succeed. Used only
1938  *		by ipc_kmsg_copyin_header to check if reply_port can be copied in.
1939  *		If the reply port is an immovable send right, it errors out.
1940  *	Conditions:
1941  *		The space is locked (read or write) and active.
1942  */
1943 
1944 boolean_t
ipc_right_copyin_check_reply(__assert_only ipc_space_t space,mach_port_name_t reply_name,ipc_entry_t reply_entry,mach_msg_type_name_t reply_type,ipc_entry_t dest_entry,int * reply_port_semantics_violation)1945 ipc_right_copyin_check_reply(
1946 	__assert_only ipc_space_t       space,
1947 	mach_port_name_t                reply_name,
1948 	ipc_entry_t                     reply_entry,
1949 	mach_msg_type_name_t            reply_type,
1950 	ipc_entry_t                     dest_entry,
1951 	int                             *reply_port_semantics_violation)
1952 {
1953 	ipc_entry_bits_t bits;
1954 	ipc_port_t reply_port;
1955 	ipc_port_t dest_port;
1956 
1957 	bits = reply_entry->ie_bits;
1958 	assert(is_active(space));
1959 
1960 	switch (reply_type) {
1961 	case MACH_MSG_TYPE_MAKE_SEND:
1962 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1963 			return FALSE;
1964 		}
1965 		break;
1966 
1967 	case MACH_MSG_TYPE_MAKE_SEND_ONCE:
1968 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1969 			return FALSE;
1970 		}
1971 		break;
1972 
1973 	case MACH_MSG_TYPE_MOVE_RECEIVE:
1974 		/* ipc_kmsg_copyin_header already filters it out */
1975 		return FALSE;
1976 
1977 	case MACH_MSG_TYPE_COPY_SEND:
1978 	case MACH_MSG_TYPE_MOVE_SEND:
1979 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
1980 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
1981 			break;
1982 		}
1983 
1984 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
1985 			return FALSE;
1986 		}
1987 
1988 		reply_port = ip_object_to_port(reply_entry->ie_object);
1989 		assert(reply_port != IP_NULL);
1990 
1991 		/*
1992 		 * active status peek to avoid checks that will be skipped
1993 		 * on copyin for dead ports.  Lock not held, so will not be
1994 		 * atomic (but once dead, there's no going back).
1995 		 */
1996 		if (!ip_active(reply_port)) {
1997 			break;
1998 		}
1999 
2000 		/*
2001 		 * Can't copyin a send right that is marked immovable. This bit
2002 		 * is set only during port creation and never unset. So it can
2003 		 * be read without a lock.
2004 		 */
2005 		if (ip_is_immovable_send(reply_port)) {
2006 			mach_port_guard_exception_immovable(space, reply_name, reply_port, MPG_FLAGS_NONE);
2007 			return FALSE;
2008 		}
2009 
2010 		if (reply_type == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2011 			if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2012 				return FALSE;
2013 			}
2014 		} else {
2015 			if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2016 				return FALSE;
2017 			}
2018 		}
2019 
2020 		break;
2021 	}
2022 
2023 	default:
2024 		panic("ipc_right_copyin_check: strange rights");
2025 	}
2026 
2027 	if ((IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_PORT_SET) ||
2028 	    (IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_PORT_SET)) {
2029 		return TRUE;
2030 	}
2031 
2032 	/* The only disp allowed when a reply port is a local port of mach msg is MAKE_SO. */
2033 	reply_port = ip_object_to_port(reply_entry->ie_object);
2034 	assert(reply_port != IP_NULL);
2035 
2036 	if (ip_active(reply_port)) {
2037 		if (ip_is_reply_port(reply_port) && (reply_type != MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2038 			return FALSE;
2039 		}
2040 
2041 		/* When sending a msg to remote port that requires reply port semantics enforced the local port of that msg needs to be a reply port. */
2042 		dest_port = ip_object_to_port(dest_entry->ie_object);
2043 		if (IP_VALID(dest_port) && ip_active(dest_port)) {
2044 			/* populates reply_port_semantics_violation if we need to send telemetry */
2045 			if (ip_violates_rigid_reply_port_semantics(dest_port, reply_port, reply_port_semantics_violation) ||
2046 			    ip_violates_reply_port_semantics(dest_port, reply_port, reply_port_semantics_violation)) {
2047 				if (reply_port_semantics && (*reply_port_semantics_violation == REPLY_PORT_SEMANTICS_VIOLATOR)) {
2048 					/* Don't crash for rigid reply ports */
2049 					mach_port_guard_exception(reply_name, 0, 0, kGUARD_EXC_REQUIRE_REPLY_PORT_SEMANTICS);
2050 					return FALSE;
2051 				}
2052 			}
2053 		}
2054 	}
2055 
2056 	return TRUE;
2057 }
2058 
2059 /*
2060  *	Routine:	ipc_right_copyin_check_guard_locked
2061  *	Purpose:
2062  *		Check if the port is guarded and the guard
2063  *		value matches the one passed in the arguments.
2064  *		If MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND is set,
2065  *		check if the port is unguarded.
2066  *	Conditions:
2067  *		The port is locked.
2068  *	Returns:
2069  *		KERN_SUCCESS		Port is either unguarded
2070  *					or guarded with expected value
2071  *		KERN_INVALID_ARGUMENT	Port is either unguarded already or guard mismatch.
2072  *					This also raises a EXC_GUARD exception.
2073  */
2074 static kern_return_t
ipc_right_copyin_check_guard_locked(mach_port_name_t name,ipc_port_t port,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2075 ipc_right_copyin_check_guard_locked(
2076 	mach_port_name_t name,
2077 	ipc_port_t port,
2078 	mach_port_context_t context,
2079 	mach_msg_guard_flags_t *guard_flags)
2080 {
2081 	mach_msg_guard_flags_t flags = *guard_flags;
2082 	if ((flags & MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND) && !port->ip_guarded && !context) {
2083 		return KERN_SUCCESS;
2084 	} else if (port->ip_guarded && (port->ip_context == context)) {
2085 		return KERN_SUCCESS;
2086 	}
2087 
2088 	/* Incorrect guard; Raise exception */
2089 	mach_port_guard_exception(name, context, port->ip_context, kGUARD_EXC_INCORRECT_GUARD);
2090 	return KERN_INVALID_ARGUMENT;
2091 }
2092 
2093 /*
2094  *	Routine:	ipc_right_copyin
2095  *	Purpose:
2096  *		Copyin a capability from a space.
2097  *		If successful, the caller gets a ref
2098  *		for the resulting object, unless it is IO_DEAD,
2099  *		and possibly a send-once right which should
2100  *		be used in a port-deleted notification.
2101  *
2102  *		If deadok is not TRUE, the copyin operation
2103  *		will fail instead of producing IO_DEAD.
2104  *
2105  *		The entry is deallocated if the entry type becomes
2106  *		MACH_PORT_TYPE_NONE.
2107  *	Conditions:
2108  *		The space is write-locked and active.
2109  *	Returns:
2110  *		KERN_SUCCESS		Acquired an object, possibly IO_DEAD.
2111  *		KERN_INVALID_RIGHT	Name doesn't denote correct right.
2112  *		KERN_INVALID_CAPABILITY	Trying to move an kobject port or an immovable right,
2113  *								or moving the last ref of pinned right
2114  *		KERN_INVALID_ARGUMENT	Port is unguarded or guard mismatch
2115  */
2116 
2117 kern_return_t
ipc_right_copyin(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyin_flags_t flags,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep,int * assertcntp,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2118 ipc_right_copyin(
2119 	ipc_space_t                space,
2120 	mach_port_name_t           name,
2121 	ipc_entry_t                entry,
2122 	mach_msg_type_name_t       msgt_name,
2123 	ipc_object_copyin_flags_t  flags,
2124 	ipc_object_t               *objectp,
2125 	ipc_port_t                 *sorightp,
2126 	ipc_port_t                 *releasep,
2127 	int                        *assertcntp,
2128 	mach_port_context_t        context,
2129 	mach_msg_guard_flags_t     *guard_flags)
2130 {
2131 	ipc_entry_bits_t bits;
2132 	ipc_port_t port;
2133 	kern_return_t kr;
2134 	boolean_t deadok = !!(flags & IPC_OBJECT_COPYIN_FLAGS_DEADOK);
2135 	boolean_t allow_imm_send = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
2136 	boolean_t allow_reply_make_so = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_REPLY_MAKE_SEND_ONCE);
2137 	boolean_t allow_reply_move_so = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_REPLY_MOVE_SEND_ONCE);
2138 
2139 	*releasep = IP_NULL;
2140 	*assertcntp = 0;
2141 
2142 	bits = entry->ie_bits;
2143 
2144 	assert(is_active(space));
2145 
2146 	switch (msgt_name) {
2147 	case MACH_MSG_TYPE_MAKE_SEND: {
2148 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2149 			goto invalid_right;
2150 		}
2151 
2152 		port = ip_object_to_port(entry->ie_object);
2153 		assert(port != IP_NULL);
2154 
2155 		if (ip_is_reply_port(port)) {
2156 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2157 			return KERN_INVALID_CAPABILITY;
2158 		}
2159 
2160 		ip_mq_lock(port);
2161 		assert(ip_get_receiver_name(port) == name);
2162 		assert(ip_in_space(port, space));
2163 
2164 		ipc_port_make_send_any_locked(port);
2165 		ip_mq_unlock(port);
2166 
2167 		*objectp = ip_to_object(port);
2168 		*sorightp = IP_NULL;
2169 		break;
2170 	}
2171 
2172 	case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
2173 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2174 			goto invalid_right;
2175 		}
2176 
2177 		port = ip_object_to_port(entry->ie_object);
2178 		assert(port != IP_NULL);
2179 
2180 		if ((ip_is_reply_port(port)) && !allow_reply_make_so) {
2181 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2182 			return KERN_INVALID_CAPABILITY;
2183 		}
2184 
2185 		ip_mq_lock(port);
2186 		require_ip_active(port);
2187 		assert(ip_get_receiver_name(port) == name);
2188 		assert(ip_in_space(port, space));
2189 
2190 		ipc_port_make_sonce_locked(port);
2191 		ip_mq_unlock(port);
2192 
2193 		*objectp = ip_to_object(port);
2194 		*sorightp = IP_NULL;
2195 		break;
2196 	}
2197 
2198 	case MACH_MSG_TYPE_MOVE_RECEIVE: {
2199 		bool allow_imm_recv = false;
2200 		ipc_port_t request = IP_NULL;
2201 		waitq_link_list_t free_l = { };
2202 
2203 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2204 			goto invalid_right;
2205 		}
2206 
2207 		port = ip_object_to_port(entry->ie_object);
2208 		assert(port != IP_NULL);
2209 
2210 		ip_mq_lock(port);
2211 		require_ip_active(port);
2212 		assert(ip_get_receiver_name(port) == name);
2213 		assert(ip_in_space(port, space));
2214 
2215 		/*
2216 		 * Disallow moving receive-right kobjects/kolabel, e.g. mk_timer ports
2217 		 * The ipc_port structure uses the kdata union of kobject and
2218 		 * imp_task exclusively. Thus, general use of a kobject port as
2219 		 * a receive right can cause type confusion in the importance
2220 		 * code.
2221 		 */
2222 		if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
2223 			/*
2224 			 * Distinguish an invalid right, e.g., trying to move
2225 			 * a send right as a receive right, from this
2226 			 * situation which is, "This is a valid receive right,
2227 			 * but it's also a kobject and you can't move it."
2228 			 */
2229 			ip_mq_unlock(port);
2230 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2231 			return KERN_INVALID_CAPABILITY;
2232 		}
2233 
2234 		if (port->ip_service_port && port->ip_splabel &&
2235 		    !ipc_service_port_label_is_bootstrap_port((ipc_service_port_label_t)port->ip_splabel)) {
2236 			allow_imm_recv = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_RECEIVE);
2237 		} else if (ip_is_libxpc_connection_port(port)) {
2238 			allow_imm_recv = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_CONN_IMMOVABLE_RECEIVE);
2239 		}
2240 
2241 		if ((!allow_imm_recv && port->ip_immovable_receive) ||
2242 		    ip_is_reply_port(port) ||     /* never move reply port rcv right */
2243 		    port->ip_specialreply) {
2244 			assert(!ip_in_space(port, ipc_space_kernel));
2245 			ip_mq_unlock(port);
2246 			assert(current_task() != kernel_task);
2247 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2248 			return KERN_INVALID_CAPABILITY;
2249 		}
2250 
2251 		if (guard_flags != NULL) {
2252 			kr = ipc_right_copyin_check_guard_locked(name, port, context, guard_flags);
2253 			if (kr != KERN_SUCCESS) {
2254 				ip_mq_unlock(port);
2255 				return kr;
2256 			}
2257 		}
2258 
2259 		if (bits & MACH_PORT_TYPE_SEND) {
2260 			assert(IE_BITS_TYPE(bits) ==
2261 			    MACH_PORT_TYPE_SEND_RECEIVE);
2262 			assert(IE_BITS_UREFS(bits) > 0);
2263 			assert(port->ip_srights > 0);
2264 
2265 			bits &= ~MACH_PORT_TYPE_RECEIVE;
2266 			bits |= MACH_PORT_TYPE_EX_RECEIVE;
2267 			entry->ie_bits = bits;
2268 			ipc_hash_insert(space, ip_to_object(port),
2269 			    name, entry);
2270 			ip_reference(port);
2271 			ipc_entry_modified(space, name, entry);
2272 		} else {
2273 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2274 			assert(IE_BITS_UREFS(bits) == 0);
2275 
2276 			request = ipc_right_request_cancel(port, name, entry);
2277 			assert(!ip_is_pinned(port));
2278 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2279 		}
2280 
2281 		/* ipc_port_clear_receiver unguards the port and clears the ip_immovable_receive bit */
2282 		(void)ipc_port_clear_receiver(port, FALSE, &free_l); /* don't destroy the port/mqueue */
2283 		if (guard_flags != NULL) {
2284 			/* this flag will be cleared during copyout */
2285 			*guard_flags = *guard_flags | MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
2286 		}
2287 
2288 #if IMPORTANCE_INHERITANCE
2289 		/*
2290 		 * Account for boosts the current task is going to lose when
2291 		 * copying this right in.  Tempowner ports have either not
2292 		 * been accounting to any task (and therefore are already in
2293 		 * "limbo" state w.r.t. assertions) or to some other specific
2294 		 * task. As we have no way to drop the latter task's assertions
2295 		 * here, We'll deduct those when we enqueue it on its
2296 		 * destination port (see ipc_port_check_circularity()).
2297 		 */
2298 		if (port->ip_tempowner == 0) {
2299 			assert(IIT_NULL == ip_get_imp_task(port));
2300 
2301 			/* ports in limbo have to be tempowner */
2302 			port->ip_tempowner = 1;
2303 			*assertcntp = port->ip_impcount;
2304 		}
2305 #endif /* IMPORTANCE_INHERITANCE */
2306 
2307 		ip_mq_unlock(port);
2308 
2309 		/*
2310 		 * This is unfortunate to do this while the space is locked,
2311 		 * but plumbing it through all callers really hurts.
2312 		 */
2313 		waitq_link_free_list(WQT_PORT_SET, &free_l);
2314 
2315 		*objectp = ip_to_object(port);
2316 		*sorightp = request;
2317 		break;
2318 	}
2319 
2320 	case MACH_MSG_TYPE_COPY_SEND: {
2321 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2322 			goto copy_dead;
2323 		}
2324 
2325 		/* allow for dead send-once rights */
2326 
2327 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2328 			goto invalid_right;
2329 		}
2330 
2331 		assert(IE_BITS_UREFS(bits) > 0);
2332 
2333 		port = ip_object_to_port(entry->ie_object);
2334 		assert(port != IP_NULL);
2335 
2336 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2337 			bits = entry->ie_bits;
2338 			*releasep = port;
2339 			goto copy_dead;
2340 		}
2341 		/* port is locked and active */
2342 
2343 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2344 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2345 			assert(port->ip_sorights > 0);
2346 
2347 			ip_mq_unlock(port);
2348 			goto invalid_right;
2349 		}
2350 
2351 		if (ip_is_reply_port(port)) {
2352 			ip_mq_unlock(port);
2353 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2354 			return KERN_INVALID_CAPABILITY;
2355 		}
2356 
2357 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2358 			ip_mq_unlock(port);
2359 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2360 			return KERN_INVALID_CAPABILITY;
2361 		}
2362 
2363 		ipc_port_copy_send_any_locked(port);
2364 		ip_mq_unlock(port);
2365 
2366 		*objectp = ip_to_object(port);
2367 		*sorightp = IP_NULL;
2368 		break;
2369 	}
2370 
2371 	case MACH_MSG_TYPE_MOVE_SEND: {
2372 		ipc_port_t request = IP_NULL;
2373 
2374 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2375 			goto move_dead;
2376 		}
2377 
2378 		/* allow for dead send-once rights */
2379 
2380 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2381 			goto invalid_right;
2382 		}
2383 
2384 		assert(IE_BITS_UREFS(bits) > 0);
2385 
2386 		port = ip_object_to_port(entry->ie_object);
2387 		assert(port != IP_NULL);
2388 
2389 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2390 			bits = entry->ie_bits;
2391 			*releasep = port;
2392 			goto move_dead;
2393 		}
2394 		/* port is locked and active */
2395 
2396 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2397 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2398 			assert(port->ip_sorights > 0);
2399 			ip_mq_unlock(port);
2400 			goto invalid_right;
2401 		}
2402 
2403 		if (ip_is_reply_port(port)) {
2404 			ip_mq_unlock(port);
2405 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2406 			return KERN_INVALID_CAPABILITY;
2407 		}
2408 
2409 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2410 			ip_mq_unlock(port);
2411 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2412 			return KERN_INVALID_CAPABILITY;
2413 		}
2414 
2415 		if (IE_BITS_UREFS(bits) == 1) {
2416 			assert(port->ip_srights > 0);
2417 			if (bits & MACH_PORT_TYPE_RECEIVE) {
2418 				assert(ip_get_receiver_name(port) == name);
2419 				assert(ip_in_space(port, space));
2420 				assert(IE_BITS_TYPE(bits) ==
2421 				    MACH_PORT_TYPE_SEND_RECEIVE);
2422 				assert(!ip_is_pinned(port));
2423 
2424 				entry->ie_bits = bits & ~
2425 				    (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2426 				ipc_entry_modified(space, name, entry);
2427 				ip_reference(port);
2428 			} else {
2429 				assert(IE_BITS_TYPE(bits) ==
2430 				    MACH_PORT_TYPE_SEND);
2431 
2432 				if (ip_is_pinned(port)) {
2433 					ip_mq_unlock(port);
2434 					mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_COPYIN);
2435 					return KERN_INVALID_CAPABILITY;
2436 				}
2437 
2438 				request = ipc_right_request_cancel(port, name, entry);
2439 				ipc_hash_delete(space, ip_to_object(port),
2440 				    name, entry);
2441 				ipc_entry_dealloc(space, ip_to_object(port),
2442 				    name, entry);
2443 				/* transfer entry's reference to caller */
2444 			}
2445 		} else {
2446 			ipc_port_copy_send_any_locked(port);
2447 			/* if urefs are pegged due to overflow, leave them pegged */
2448 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2449 				entry->ie_bits = bits - 1; /* decrement urefs */
2450 			}
2451 			ipc_entry_modified(space, name, entry);
2452 		}
2453 
2454 		ip_mq_unlock(port);
2455 		*objectp = ip_to_object(port);
2456 		*sorightp = request;
2457 		break;
2458 	}
2459 
2460 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2461 		ipc_port_t request;
2462 
2463 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2464 			goto move_dead;
2465 		}
2466 
2467 		/* allow for dead send rights */
2468 
2469 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2470 			goto invalid_right;
2471 		}
2472 
2473 		assert(IE_BITS_UREFS(bits) > 0);
2474 
2475 		port = ip_object_to_port(entry->ie_object);
2476 		assert(port != IP_NULL);
2477 
2478 		if (ipc_right_check(space, port, name, entry, flags)) {
2479 			bits = entry->ie_bits;
2480 			*releasep = port;
2481 			goto move_dead;
2482 		}
2483 		/*
2484 		 * port is locked, but may not be active:
2485 		 * Allow copyin of inactive ports with no dead name request and treat it
2486 		 * as if the copyin of the port was successful and port became inactive
2487 		 * later.
2488 		 */
2489 
2490 		if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2491 			assert(bits & MACH_PORT_TYPE_SEND);
2492 			assert(port->ip_srights > 0);
2493 
2494 			ip_mq_unlock(port);
2495 			goto invalid_right;
2496 		}
2497 
2498 		if (ip_is_reply_port(port) && !allow_reply_move_so) {
2499 			ip_mq_unlock(port);
2500 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2501 			return KERN_INVALID_CAPABILITY;
2502 		}
2503 
2504 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2505 			ip_mq_unlock(port);
2506 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2507 			return KERN_INVALID_CAPABILITY;
2508 		}
2509 
2510 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2511 		assert(IE_BITS_UREFS(bits) == 1);
2512 		assert(port->ip_sorights > 0);
2513 
2514 		request = ipc_right_request_cancel(port, name, entry);
2515 		assert(!ip_is_pinned(port));
2516 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2517 		ip_mq_unlock(port);
2518 
2519 		*objectp = ip_to_object(port);
2520 		*sorightp = request;
2521 		break;
2522 	}
2523 
2524 	default:
2525 invalid_right:
2526 		return KERN_INVALID_RIGHT;
2527 	}
2528 
2529 	return KERN_SUCCESS;
2530 
2531 copy_dead:
2532 	assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2533 	assert(IE_BITS_UREFS(bits) > 0);
2534 	assert(entry->ie_request == IE_REQ_NONE);
2535 	assert(entry->ie_object == 0);
2536 
2537 	if (!deadok) {
2538 		goto invalid_right;
2539 	}
2540 
2541 	*objectp = IO_DEAD;
2542 	*sorightp = IP_NULL;
2543 	return KERN_SUCCESS;
2544 
2545 move_dead:
2546 	assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2547 	assert(IE_BITS_UREFS(bits) > 0);
2548 	assert(entry->ie_request == IE_REQ_NONE);
2549 	assert(entry->ie_object == IO_NULL);
2550 
2551 	if (!deadok) {
2552 		goto invalid_right;
2553 	}
2554 
2555 	if (IE_BITS_UREFS(bits) == 1) {
2556 		ipc_entry_dealloc(space, IO_NULL, name, entry);
2557 	} else {
2558 		/* if urefs are pegged due to overflow, leave them pegged */
2559 		if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2560 			entry->ie_bits = bits - 1; /* decrement urefs */
2561 		}
2562 		ipc_entry_modified(space, name, entry);
2563 	}
2564 	*objectp = IO_DEAD;
2565 	*sorightp = IP_NULL;
2566 	return KERN_SUCCESS;
2567 }
2568 
2569 /*
2570  *	Routine:	ipc_right_copyin_two_move_sends
2571  *	Purpose:
2572  *		Like ipc_right_copyin with MACH_MSG_TYPE_MOVE_SEND
2573  *		and deadok == FALSE, except that this moves two
2574  *		send rights at once.
2575  *	Conditions:
2576  *		The space is write-locked and active.
2577  *		The object is returned with two refs/send rights.
2578  *	Returns:
2579  *		KERN_SUCCESS					Acquired an object.
2580  *		KERN_INVALID_RIGHT				Name doesn't denote correct right.
2581  *		KERN_INVALID_CAPABILITY			Name does not allow copyin move send capability.
2582  */
2583 static
2584 kern_return_t
ipc_right_copyin_two_move_sends(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2585 ipc_right_copyin_two_move_sends(
2586 	ipc_space_t             space,
2587 	mach_port_name_t        name,
2588 	ipc_entry_t             entry,
2589 	ipc_object_t            *objectp,
2590 	ipc_port_t              *sorightp,
2591 	ipc_port_t              *releasep)
2592 {
2593 	ipc_entry_bits_t bits;
2594 	mach_port_urefs_t urefs;
2595 	ipc_port_t port;
2596 	ipc_port_t request = IP_NULL;
2597 
2598 	*releasep = IP_NULL;
2599 
2600 	assert(is_active(space));
2601 
2602 	bits = entry->ie_bits;
2603 
2604 	if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2605 		goto invalid_right;
2606 	}
2607 
2608 	urefs = IE_BITS_UREFS(bits);
2609 	if (urefs < 2) {
2610 		goto invalid_right;
2611 	}
2612 
2613 	port = ip_object_to_port(entry->ie_object);
2614 	assert(port != IP_NULL);
2615 
2616 	if (ip_is_reply_port(port)) {
2617 		mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2618 		return KERN_INVALID_CAPABILITY;
2619 	}
2620 
2621 	if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2622 		*releasep = port;
2623 		goto invalid_right;
2624 	}
2625 	/* port is locked and active */
2626 
2627 	/*
2628 	 * To reach here we either have:
2629 	 * (1) reply_name == voucher_name, but voucher is not immovable send right.
2630 	 * (2) reply_name == dest_name, but ipc_right_copyin_check_reply() guaranteed
2631 	 * that we can't use MOVE_SEND on reply port marked as immovable send right.
2632 	 */
2633 	assert(!ip_is_immovable_send(port));
2634 	assert(!ip_is_pinned(port));
2635 
2636 	if (urefs > 2) {
2637 		/*
2638 		 * We are moving 2 urefs as naked send rights, which is decomposed as:
2639 		 * - two copy sends (which doesn't affect the make send count)
2640 		 * - decrementing the local urefs twice.
2641 		 */
2642 		ipc_port_copy_send_any_locked(port);
2643 		ipc_port_copy_send_any_locked(port);
2644 		/* if urefs are pegged due to overflow, leave them pegged */
2645 		if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2646 			entry->ie_bits = bits - 2; /* decrement urefs */
2647 		}
2648 		ipc_entry_modified(space, name, entry);
2649 	} else {
2650 		/*
2651 		 * We have exactly 2 send rights for this port in this space,
2652 		 * which means that we will liberate the naked send right held
2653 		 * by this entry.
2654 		 *
2655 		 * However refcounting rules around entries are that naked send rights
2656 		 * on behalf of spaces do not have an associated port reference,
2657 		 * so we need to donate one ...
2658 		 */
2659 		ipc_port_copy_send_any_locked(port);
2660 
2661 		if (bits & MACH_PORT_TYPE_RECEIVE) {
2662 			assert(ip_get_receiver_name(port) == name);
2663 			assert(ip_in_space(port, space));
2664 			assert(IE_BITS_TYPE(bits) ==
2665 			    MACH_PORT_TYPE_SEND_RECEIVE);
2666 
2667 			/* ... that we inject manually when the entry stays alive */
2668 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2669 			ipc_entry_modified(space, name, entry);
2670 			ip_reference(port);
2671 		} else {
2672 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
2673 
2674 			/* ... that we steal from the entry when it dies */
2675 			request = ipc_right_request_cancel(port, name, entry);
2676 			ipc_hash_delete(space, ip_to_object(port),
2677 			    name, entry);
2678 			ipc_entry_dealloc(space, ip_to_object(port),
2679 			    name, entry);
2680 		}
2681 	}
2682 
2683 	ip_mq_unlock(port);
2684 
2685 	*objectp = ip_to_object(port);
2686 	*sorightp = request;
2687 	return KERN_SUCCESS;
2688 
2689 invalid_right:
2690 	return KERN_INVALID_RIGHT;
2691 }
2692 
2693 
2694 /*
2695  *	Routine:	ipc_right_copyin_two
2696  *	Purpose:
2697  *		Like ipc_right_copyin with two dispositions,
2698  *		each of which results in a send or send-once right,
2699  *		and deadok = FALSE.
2700  *	Conditions:
2701  *		The space is write-locked and active.
2702  *		The object is returned with two refs/rights.
2703  *		Msgt_one refers to the dest_type.
2704  *      Copyin flags are currently only used in the context of send once rights.
2705  *	Returns:
2706  *		KERN_SUCCESS		Acquired an object.
2707  *		KERN_INVALID_RIGHT	Name doesn't denote correct right(s).
2708  *		KERN_INVALID_CAPABILITY	Name doesn't denote correct right for msgt_two.
2709  */
2710 kern_return_t
ipc_right_copyin_two(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_one,mach_msg_type_name_t msgt_two,ipc_object_copyin_flags_t flags_one,ipc_object_copyin_flags_t flags_two,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2711 ipc_right_copyin_two(
2712 	ipc_space_t               space,
2713 	mach_port_name_t          name,
2714 	ipc_entry_t               entry,
2715 	mach_msg_type_name_t      msgt_one,
2716 	mach_msg_type_name_t      msgt_two,
2717 	ipc_object_copyin_flags_t flags_one, /* Used only for send once rights. */
2718 	ipc_object_copyin_flags_t flags_two, /* Used only for send once rights. */
2719 	ipc_object_t              *objectp,
2720 	ipc_port_t                *sorightp,
2721 	ipc_port_t                *releasep)
2722 {
2723 	ipc_port_t port;
2724 	kern_return_t kr;
2725 	int assertcnt = 0;
2726 
2727 	assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_one));
2728 	assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_two));
2729 
2730 	/*
2731 	 *	This is a little tedious to make atomic, because
2732 	 *	there are 25 combinations of valid dispositions.
2733 	 *	However, most are easy.
2734 	 */
2735 
2736 	/*
2737 	 *	If either is move-sonce, then there must be an error.
2738 	 */
2739 	if (msgt_one == MACH_MSG_TYPE_MOVE_SEND_ONCE ||
2740 	    msgt_two == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2741 		return KERN_INVALID_RIGHT;
2742 	}
2743 
2744 	if ((msgt_one == MACH_MSG_TYPE_MAKE_SEND) ||
2745 	    (msgt_one == MACH_MSG_TYPE_MAKE_SEND_ONCE) ||
2746 	    (msgt_two == MACH_MSG_TYPE_MAKE_SEND) ||
2747 	    (msgt_two == MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2748 		/*
2749 		 *	One of the dispositions needs a receive right.
2750 		 *
2751 		 *	If the copyin below succeeds, we know the receive
2752 		 *	right is there (because the pre-validation of
2753 		 *	the second disposition already succeeded in our
2754 		 *	caller).
2755 		 *
2756 		 *	Hence the port is not in danger of dying.
2757 		 */
2758 		ipc_object_t object_two;
2759 
2760 		flags_one = flags_one | IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND;
2761 		kr = ipc_right_copyin(space, name, entry,
2762 		    msgt_one, flags_one,
2763 		    objectp, sorightp, releasep,
2764 		    &assertcnt, 0, NULL);
2765 		assert(assertcnt == 0);
2766 		if (kr != KERN_SUCCESS) {
2767 			return kr;
2768 		}
2769 
2770 		assert(IO_VALID(*objectp));
2771 		assert(*sorightp == IP_NULL);
2772 		assert(*releasep == IP_NULL);
2773 
2774 		/*
2775 		 *	Now copyin the second (previously validated)
2776 		 *	disposition.  The result can't be a dead port,
2777 		 *	as no valid disposition can make us lose our
2778 		 *	receive right.
2779 		 */
2780 		kr = ipc_right_copyin(space, name, entry,
2781 		    msgt_two, flags_two,
2782 		    &object_two, sorightp, releasep,
2783 		    &assertcnt, 0, NULL);
2784 		assert(assertcnt == 0);
2785 		assert(kr == KERN_SUCCESS);
2786 		assert(*sorightp == IP_NULL);
2787 		assert(*releasep == IP_NULL);
2788 		assert(object_two == *objectp);
2789 		assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
2790 	} else if ((msgt_one == MACH_MSG_TYPE_MOVE_SEND) &&
2791 	    (msgt_two == MACH_MSG_TYPE_MOVE_SEND)) {
2792 		/*
2793 		 *	This is an easy case.  Just use our
2794 		 *	handy-dandy special-purpose copyin call
2795 		 *	to get two send rights for the price of one.
2796 		 */
2797 		kr = ipc_right_copyin_two_move_sends(space, name, entry,
2798 		    objectp, sorightp,
2799 		    releasep);
2800 		if (kr != KERN_SUCCESS) {
2801 			return kr;
2802 		}
2803 	} else {
2804 		mach_msg_type_name_t msgt_name;
2805 
2806 		/*
2807 		 *	Must be either a single move-send and a
2808 		 *	copy-send, or two copy-send dispositions.
2809 		 *	Use the disposition with the greatest side
2810 		 *	effects for the actual copyin - then just
2811 		 *	duplicate the send right you get back.
2812 		 */
2813 		if (msgt_one == MACH_MSG_TYPE_MOVE_SEND ||
2814 		    msgt_two == MACH_MSG_TYPE_MOVE_SEND) {
2815 			msgt_name = MACH_MSG_TYPE_MOVE_SEND;
2816 		} else {
2817 			msgt_name = MACH_MSG_TYPE_COPY_SEND;
2818 		}
2819 
2820 		kr = ipc_right_copyin(space, name, entry,
2821 		    msgt_name, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND,
2822 		    objectp, sorightp, releasep,
2823 		    &assertcnt, 0, NULL);
2824 		assert(assertcnt == 0);
2825 		if (kr != KERN_SUCCESS) {
2826 			return kr;
2827 		}
2828 
2829 		/*
2830 		 *	Copy the right we got back.  If it is dead now,
2831 		 *	that's OK.  Neither right will be usable to send
2832 		 *	a message anyway.
2833 		 *
2834 		 *	Note that the port could be concurrently moved
2835 		 *	outside of the space as a descriptor, and then
2836 		 *	destroyed, which would not happen under the space lock.
2837 		 *
2838 		 *	It means we can't use ipc_port_copy_send() which
2839 		 *	may fail if the port died.
2840 		 */
2841 		port = ip_object_to_port(*objectp);
2842 		ip_mq_lock(port);
2843 		ipc_port_copy_send_any_locked(port);
2844 		ip_mq_unlock(port);
2845 	}
2846 
2847 	return KERN_SUCCESS;
2848 }
2849 
2850 
2851 /*
2852  *	Routine:	ipc_right_copyout
2853  *	Purpose:
2854  *		Copyout a capability to a space.
2855  *		If successful, consumes a ref for the object.
2856  *
2857  *		Always succeeds when given a newly-allocated entry,
2858  *		because user-reference overflow isn't a possibility.
2859  *
2860  *		If copying out the object would cause the user-reference
2861  *		count in the entry to overflow, then the user-reference
2862  *		count is left pegged to its maximum value and the copyout
2863  *		succeeds anyway.
2864  *	Conditions:
2865  *		The space is write-locked and active.
2866  *		The object is locked and active.
2867  *		The object is unlocked; the space isn't.
2868  *	Returns:
2869  *		KERN_SUCCESS		Copied out capability.
2870  */
2871 
2872 kern_return_t
ipc_right_copyout(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_port_context_t * context,mach_msg_guard_flags_t * guard_flags,ipc_object_t object)2873 ipc_right_copyout(
2874 	ipc_space_t             space,
2875 	mach_port_name_t        name,
2876 	ipc_entry_t             entry,
2877 	mach_msg_type_name_t    msgt_name,
2878 	ipc_object_copyout_flags_t flags,
2879 	mach_port_context_t     *context,
2880 	mach_msg_guard_flags_t  *guard_flags,
2881 	ipc_object_t            object)
2882 {
2883 	ipc_entry_bits_t bits;
2884 	ipc_port_t port;
2885 	mach_port_name_t sp_name = MACH_PORT_NULL;
2886 	mach_port_context_t sp_context = 0;
2887 
2888 	bits = entry->ie_bits;
2889 
2890 	assert(IO_VALID(object));
2891 	assert(io_otype(object) == IOT_PORT);
2892 	assert(io_active(object));
2893 	assert(entry->ie_object == object);
2894 
2895 	port = ip_object_to_port(object);
2896 
2897 	if (flags & IPC_OBJECT_COPYOUT_FLAGS_PINNED) {
2898 		assert(!ip_is_pinned(port));
2899 		assert(ip_is_immovable_send(port));
2900 		assert(task_is_immovable(space->is_task));
2901 		assert(task_is_pinned(space->is_task));
2902 		port->ip_pinned = 1;
2903 	}
2904 
2905 	switch (msgt_name) {
2906 	case MACH_MSG_TYPE_PORT_SEND_ONCE:
2907 
2908 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2909 		assert(IE_BITS_UREFS(bits) == 0);
2910 		assert(port->ip_sorights > 0);
2911 
2912 		if (port->ip_specialreply) {
2913 			ipc_port_adjust_special_reply_port_locked(port,
2914 			    current_thread()->ith_knote, IPC_PORT_ADJUST_SR_LINK_WORKLOOP, FALSE);
2915 			/* port unlocked on return */
2916 		} else {
2917 			ip_mq_unlock(port);
2918 		}
2919 
2920 		entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1); /* set urefs to 1 */
2921 		ipc_entry_modified(space, name, entry);
2922 		break;
2923 
2924 	case MACH_MSG_TYPE_PORT_SEND:
2925 		assert(port->ip_srights > 0);
2926 
2927 		if (bits & MACH_PORT_TYPE_SEND) {
2928 			mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
2929 
2930 			assert(port->ip_srights > 1);
2931 			assert(urefs > 0);
2932 			assert(urefs <= MACH_PORT_UREFS_MAX);
2933 
2934 			if (urefs == MACH_PORT_UREFS_MAX) {
2935 				/*
2936 				 * leave urefs pegged to maximum,
2937 				 * consume send right and ref
2938 				 */
2939 
2940 				ip_srights_dec(port);
2941 				ip_mq_unlock(port);
2942 				ip_release_live(port);
2943 				return KERN_SUCCESS;
2944 			}
2945 
2946 			/* consume send right and ref */
2947 			ip_srights_dec(port);
2948 			ip_mq_unlock(port);
2949 			ip_release_live(port);
2950 		} else if (bits & MACH_PORT_TYPE_RECEIVE) {
2951 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2952 			assert(IE_BITS_UREFS(bits) == 0);
2953 
2954 			/* transfer send right to entry, consume ref */
2955 			ip_mq_unlock(port);
2956 			ip_release_live(port);
2957 		} else {
2958 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2959 			assert(IE_BITS_UREFS(bits) == 0);
2960 
2961 			/* transfer send right and ref to entry */
2962 			ip_mq_unlock(port);
2963 
2964 			/* entry is locked holding ref, so can use port */
2965 
2966 			ipc_hash_insert(space, ip_to_object(port), name, entry);
2967 		}
2968 
2969 		entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1; /* increment urefs */
2970 		ipc_entry_modified(space, name, entry);
2971 		break;
2972 
2973 	case MACH_MSG_TYPE_PORT_RECEIVE: {
2974 		ipc_port_t dest;
2975 #if IMPORTANCE_INHERITANCE
2976 		natural_t assertcnt = port->ip_impcount;
2977 #endif /* IMPORTANCE_INHERITANCE */
2978 
2979 		assert(port->ip_mscount == 0);
2980 		assert(!ip_in_a_space(port));
2981 
2982 		/*
2983 		 * Don't copyout kobjects or kolabels as receive right
2984 		 */
2985 		if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
2986 			panic("ipc_right_copyout: Copyout kobject/kolabel as receive right");
2987 		}
2988 
2989 		dest = ip_get_destination(port);
2990 
2991 		/* port transitions to IN-SPACE state */
2992 		port->ip_receiver_name = name;
2993 		port->ip_receiver = space;
2994 
2995 		struct knote *kn = current_thread()->ith_knote;
2996 
2997 		if ((guard_flags != NULL) && ((*guard_flags & MACH_MSG_GUARD_FLAGS_IMMOVABLE_RECEIVE) != 0)) {
2998 			assert(port->ip_immovable_receive == 0);
2999 			port->ip_guarded = 1;
3000 			port->ip_strict_guard = 0;
3001 			/* pseudo receive shouldn't set the receive right as immovable in the sender's space */
3002 			if (kn != ITH_KNOTE_PSEUDO) {
3003 				port->ip_immovable_receive = 1;
3004 			}
3005 			port->ip_context = current_thread()->ith_recv_bufs.recv_msg_addr;
3006 			*context = port->ip_context;
3007 			*guard_flags = *guard_flags & ~MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
3008 		}
3009 
3010 		if (ip_is_libxpc_connection_port(port)) {
3011 			/*
3012 			 * There are 3 ways to reach here.
3013 			 * 1. A libxpc client successfully sent this receive right to a named service
3014 			 *    and we are copying out in that service's ipc space.
3015 			 * 2. A libxpc client tried doing (1) but failed so we are doing pseudo-receive.
3016 			 * 3. Kernel sent this receive right to a libxpc client as a part of port destroyed notification.
3017 			 *
3018 			 * This flag needs to be set again in all 3 cases as they reset it as part of their flow.
3019 			 */
3020 			port->ip_immovable_receive = 1;
3021 		}
3022 
3023 		/* Check if this is a service port */
3024 		if (port->ip_service_port) {
3025 			assert(port->ip_splabel != NULL);
3026 			/*
3027 			 * This flag gets reset during all 3 ways described above for libxpc connection port.
3028 			 * The only difference is launchd acts as an initiator instead of a libxpc client.
3029 			 */
3030 			if (service_port_defense_enabled) {
3031 				port->ip_immovable_receive = 1;
3032 			}
3033 
3034 			/* Check if this is a port-destroyed notification to ensure
3035 			 * that initproc doesnt end up with a guarded service port
3036 			 * sent in a regular message
3037 			 */
3038 			if (!ipc_service_port_label_is_pd_notification((ipc_service_port_label_t)port->ip_splabel)) {
3039 				goto skip_sp_check;
3040 			}
3041 
3042 			ipc_service_port_label_clear_flag(port->ip_splabel, ISPL_FLAGS_SEND_PD_NOTIFICATION);
3043 #if !(DEVELOPMENT || DEBUG)
3044 			if (get_bsdtask_info(current_task()) != initproc) {
3045 				goto skip_sp_check;
3046 			}
3047 #endif /* !(DEVELOPMENT || DEBUG) */
3048 			ipc_service_port_label_get_attr(port->ip_splabel, &sp_name, &sp_context);
3049 			assert(sp_name != MACH_PORT_NULL);
3050 			/* Verify the port name and restore the guard value, if any */
3051 			if (name != sp_name) {
3052 				panic("Service port name = 0x%x doesnt match the stored launchd port name = 0x%x", name, sp_name);
3053 			}
3054 			if (sp_context) {
3055 				port->ip_guarded = 1;
3056 				port->ip_strict_guard = 1;
3057 				port->ip_context = sp_context;
3058 			}
3059 		}
3060 skip_sp_check:
3061 
3062 		assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
3063 		if (bits & MACH_PORT_TYPE_SEND) {
3064 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
3065 			assert(IE_BITS_UREFS(bits) > 0);
3066 			assert(port->ip_srights > 0);
3067 		} else {
3068 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
3069 			assert(IE_BITS_UREFS(bits) == 0);
3070 		}
3071 		entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE;
3072 		ipc_entry_modified(space, name, entry);
3073 
3074 		boolean_t sync_bootstrap_checkin = FALSE;
3075 		if (kn != ITH_KNOTE_PSEUDO && port->ip_sync_bootstrap_checkin) {
3076 			sync_bootstrap_checkin = TRUE;
3077 		}
3078 		if (!ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_RECEIVE)) {
3079 			kn = NULL;
3080 		}
3081 		ipc_port_adjust_port_locked(port, kn, sync_bootstrap_checkin);
3082 		/* port unlocked */
3083 
3084 		if (bits & MACH_PORT_TYPE_SEND) {
3085 			ip_release_live(port);
3086 
3087 			/* entry is locked holding ref, so can use port */
3088 			ipc_hash_delete(space, ip_to_object(port), name, entry);
3089 		}
3090 
3091 		if (dest != IP_NULL) {
3092 #if IMPORTANCE_INHERITANCE
3093 			/*
3094 			 * Deduct the assertion counts we contributed to
3095 			 * the old destination port.  They've already
3096 			 * been reflected into the task as a result of
3097 			 * getting enqueued.
3098 			 */
3099 			ip_mq_lock(dest);
3100 			ipc_port_impcount_delta(dest, 0 - assertcnt, IP_NULL);
3101 			ip_mq_unlock(dest);
3102 #endif /* IMPORTANCE_INHERITANCE */
3103 
3104 			/* Drop turnstile ref on dest */
3105 			ipc_port_send_turnstile_complete(dest);
3106 			/* space lock is held */
3107 			ip_release_safe(dest);
3108 		}
3109 		break;
3110 	}
3111 
3112 	default:
3113 		panic("ipc_right_copyout: strange rights");
3114 	}
3115 	return KERN_SUCCESS;
3116 }
3117