xref: /xnu-8796.121.2/osfmk/ipc/ipc_right.c (revision c54f35ca767986246321eb901baf8f5ff7923f6a)
1 /*
2  * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  * Copyright (c) 2005-2006 SPARTA, Inc.
62  */
63 /*
64  */
65 /*
66  *	File:	ipc/ipc_right.c
67  *	Author:	Rich Draves
68  *	Date:	1989
69  *
70  *	Functions to manipulate IPC capabilities.
71  */
72 
73 #include <mach/boolean.h>
74 #include <mach/kern_return.h>
75 #include <mach/port.h>
76 #include <mach/message.h>
77 #include <kern/assert.h>
78 #include <kern/ipc_kobject.h>
79 #include <kern/misc_protos.h>
80 #include <kern/policy_internal.h>
81 #include <ipc/port.h>
82 #include <ipc/ipc_entry.h>
83 #include <ipc/ipc_space.h>
84 #include <ipc/ipc_object.h>
85 #include <ipc/ipc_hash.h>
86 #include <ipc/ipc_port.h>
87 #include <ipc/ipc_pset.h>
88 #include <ipc/ipc_right.h>
89 #include <ipc/ipc_notify.h>
90 #include <ipc/ipc_importance.h>
91 #include <ipc/ipc_service_port.h>
92 #include <security/mac_mach_internal.h>
93 
94 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
95 
96 TUNABLE(bool, service_port_defense_enabled, "-service_port_defense_enabled", false);
97 static TUNABLE(bool, reply_port_semantics, "reply_port_semantics", true);
98 
99 /*
100  *	Routine:	ipc_right_lookup_read
101  *	Purpose:
102  *		Finds an entry in a space, given the name.
103  *	Conditions:
104  *		Nothing locked.
105  *		If an object is found, it is locked and active.
106  *	Returns:
107  *		KERN_SUCCESS		Found an entry.
108  *		KERN_INVALID_TASK	The space is dead.
109  *		KERN_INVALID_NAME	Name doesn't exist in space.
110  */
111 kern_return_t
ipc_right_lookup_read(ipc_space_t space,mach_port_name_t name,ipc_entry_bits_t * bitsp,ipc_object_t * objectp)112 ipc_right_lookup_read(
113 	ipc_space_t             space,
114 	mach_port_name_t        name,
115 	ipc_entry_bits_t       *bitsp,
116 	ipc_object_t           *objectp)
117 {
118 	mach_port_index_t index;
119 	ipc_entry_table_t table;
120 	ipc_entry_t entry;
121 	ipc_object_t object;
122 	kern_return_t kr;
123 
124 	index = MACH_PORT_INDEX(name);
125 	if (__improbable(index == 0)) {
126 		*bitsp = 0;
127 		*objectp = IO_NULL;
128 		return KERN_INVALID_NAME;
129 	}
130 
131 	smr_global_enter();
132 
133 	/*
134 	 * Acquire a (possibly stale) pointer to the table,
135 	 * and guard it so that it can't be deallocated while we use it.
136 	 *
137 	 * smr_global_enter() has the property that it strongly serializes
138 	 * after any store-release. This is important because it means that if
139 	 * one considers this (broken) userspace usage:
140 	 *
141 	 * Thread 1:
142 	 *   - makes a semaphore, gets name 0x1003
143 	 *   - stores that name to a global `sema` in userspace
144 	 *
145 	 * Thread 2:
146 	 *   - spins to observe `sema` becoming non 0
147 	 *   - calls semaphore_wait() on 0x1003
148 	 *
149 	 * Then, because in order to return 0x1003 this thread issued
150 	 * a store-release (when calling is_write_unlock()),
151 	 * then this smr_entered_load() can't possibly observe a table
152 	 * pointer that is older than the one that was current when the
153 	 * semaphore was made.
154 	 *
155 	 * This fundamental property allows us to never loop.
156 	 */
157 	table = smr_entered_load(&space->is_table);
158 	if (__improbable(table == NULL)) {
159 		kr = KERN_INVALID_TASK;
160 		goto out_put;
161 	}
162 	entry = ipc_entry_table_get(table, index);
163 	if (__improbable(entry == NULL)) {
164 		kr = KERN_INVALID_NAME;
165 		goto out_put;
166 	}
167 
168 	/*
169 	 * Note: this should be an atomic load, but PAC and atomics
170 	 *       don't work interact well together.
171 	 */
172 	object = entry->ie_volatile_object;
173 
174 	/*
175 	 * Attempt to lock an object that lives in this entry.
176 	 * It might fail or be a completely different object by now.
177 	 *
178 	 * Make sure that acquiring the lock is fully ordered after any
179 	 * lock-release (using os_atomic_barrier_before_lock_acquire()).
180 	 * This allows us to always reliably observe space termination below.
181 	 */
182 	os_atomic_barrier_before_lock_acquire();
183 	if (__improbable(object == IO_NULL || !io_lock_allow_invalid(object))) {
184 		kr = KERN_INVALID_NAME;
185 		goto out_put;
186 	}
187 
188 	/*
189 	 * Now that we hold the object lock, we are preventing any entry
190 	 * in this space for this object to be mutated.
191 	 *
192 	 * If the space didn't grow after we acquired our hazardous reference,
193 	 * and before a mutation of the entry, then holding the object lock
194 	 * guarantees we will observe the truth of ie_bits, ie_object and
195 	 * ie_request (those are always mutated with the object lock held).
196 	 *
197 	 * However this ordering is problematic:
198 	 * - [A]cquisition of the table pointer
199 	 * - [G]rowth of the space (making the table pointer in [A] stale)
200 	 * - [M]utation of the entry
201 	 * - [L]ocking of the object read through [A].
202 	 *
203 	 * The space lock is held for both [G] and [M], and the object lock
204 	 * is held for [M], which means that once we lock the object we can
205 	 * observe if [G] happenend by reloading the table pointer.
206 	 *
207 	 * We might still fail to observe any growth operation that happened
208 	 * after the last mutation of this object's entry, because holding
209 	 * an object lock doesn't guarantee anything about the liveness
210 	 * of the space table pointer. This is not a problem at all: by
211 	 * definition, those didn't affect the state of the entry.
212 	 *
213 	 * TODO: a data-structure where the entries are grown by "slabs",
214 	 *       would allow for the address of an ipc_entry_t to never
215 	 *       change once it exists in a space and would avoid a reload
216 	 *       (as well as making space growth faster).
217 	 *       We however still need to check for termination.
218 	 */
219 	table = smr_entered_load(&space->is_table);
220 	if (__improbable(table == NULL)) {
221 		kr = KERN_INVALID_TASK;
222 		goto out_put_unlock;
223 	}
224 
225 	/*
226 	 * Tables never shrink so we don't need to validate the length twice.
227 	 */
228 	entry = ipc_entry_table_get_nocheck(table, index);
229 
230 	/*
231 	 * Now that we hold the lock and have a "fresh enough" table pointer,
232 	 * validate if this entry is what we think it is.
233 	 *
234 	 * To the risk of being repetitive, we still need to protect
235 	 * those accesses under SMR, because subsequent
236 	 * table growths might retire the memory. However we know
237 	 * those growths will have left our entry unchanged.
238 	 */
239 	if (__improbable(entry->ie_object != object)) {
240 		kr = KERN_INVALID_NAME;
241 		goto out_put_unlock;
242 	}
243 
244 	ipc_entry_bits_t bits = entry->ie_bits;
245 	if (__improbable(IE_BITS_GEN(bits) != MACH_PORT_GEN(name) ||
246 	    IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE)) {
247 		kr = KERN_INVALID_NAME;
248 		goto out_put_unlock;
249 	}
250 
251 	/* Done with hazardous accesses to the table */
252 	smr_global_leave();
253 
254 	*bitsp = bits;
255 	*objectp = object;
256 	return KERN_SUCCESS;
257 
258 out_put_unlock:
259 	ipc_object_unlock(object);
260 out_put:
261 	smr_global_leave();
262 	return kr;
263 }
264 
265 /*
266  *	Routine:	ipc_right_lookup_write
267  *	Purpose:
268  *		Finds an entry in a space, given the name.
269  *	Conditions:
270  *		Nothing locked.  If successful, the space is write-locked.
271  *	Returns:
272  *		KERN_SUCCESS		Found an entry.
273  *		KERN_INVALID_TASK	The space is dead.
274  *		KERN_INVALID_NAME	Name doesn't exist in space.
275  */
276 
277 kern_return_t
ipc_right_lookup_write(ipc_space_t space,mach_port_name_t name,ipc_entry_t * entryp)278 ipc_right_lookup_write(
279 	ipc_space_t             space,
280 	mach_port_name_t        name,
281 	ipc_entry_t             *entryp)
282 {
283 	ipc_entry_t entry;
284 
285 	assert(space != IS_NULL);
286 
287 	is_write_lock(space);
288 
289 	if (!is_active(space)) {
290 		is_write_unlock(space);
291 		return KERN_INVALID_TASK;
292 	}
293 
294 	if ((entry = ipc_entry_lookup(space, name)) == IE_NULL) {
295 		is_write_unlock(space);
296 		return KERN_INVALID_NAME;
297 	}
298 
299 	*entryp = entry;
300 	return KERN_SUCCESS;
301 }
302 
303 /*
304  *	Routine:	ipc_right_lookup_two_write
305  *	Purpose:
306  *		Like ipc_right_lookup except that it returns two
307  *		entries for two different names that were looked
308  *		up under the same space lock.
309  *	Conditions:
310  *		Nothing locked.  If successful, the space is write-locked.
311  *	Returns:
312  *		KERN_INVALID_TASK	The space is dead.
313  *		KERN_INVALID_NAME	Name doesn't exist in space.
314  */
315 
316 kern_return_t
ipc_right_lookup_two_write(ipc_space_t space,mach_port_name_t name1,ipc_entry_t * entryp1,mach_port_name_t name2,ipc_entry_t * entryp2)317 ipc_right_lookup_two_write(
318 	ipc_space_t             space,
319 	mach_port_name_t        name1,
320 	ipc_entry_t             *entryp1,
321 	mach_port_name_t        name2,
322 	ipc_entry_t             *entryp2)
323 {
324 	ipc_entry_t entry1;
325 	ipc_entry_t entry2;
326 
327 	assert(space != IS_NULL);
328 
329 	is_write_lock(space);
330 
331 	if (!is_active(space)) {
332 		is_write_unlock(space);
333 		return KERN_INVALID_TASK;
334 	}
335 
336 	if ((entry1 = ipc_entry_lookup(space, name1)) == IE_NULL) {
337 		is_write_unlock(space);
338 		mach_port_guard_exception(name1, 0, 0, kGUARD_EXC_INVALID_NAME);
339 		return KERN_INVALID_NAME;
340 	}
341 	if ((entry2 = ipc_entry_lookup(space, name2)) == IE_NULL) {
342 		is_write_unlock(space);
343 		mach_port_guard_exception(name2, 0, 0, kGUARD_EXC_INVALID_NAME);
344 		return KERN_INVALID_NAME;
345 	}
346 	*entryp1 = entry1;
347 	*entryp2 = entry2;
348 	return KERN_SUCCESS;
349 }
350 
351 /*
352  *	Routine:	ipc_right_reverse
353  *	Purpose:
354  *		Translate (space, object) -> (name, entry).
355  *		Only finds send/receive rights.
356  *		Returns TRUE if an entry is found; if so,
357  *		the object active.
358  *	Conditions:
359  *		The space must be locked (read or write) and active.
360  *		The port is locked and active
361  */
362 
363 bool
ipc_right_reverse(ipc_space_t space,ipc_object_t object,mach_port_name_t * namep,ipc_entry_t * entryp)364 ipc_right_reverse(
365 	ipc_space_t             space,
366 	ipc_object_t            object,
367 	mach_port_name_t        *namep,
368 	ipc_entry_t             *entryp)
369 {
370 	ipc_port_t port;
371 	mach_port_name_t name;
372 	ipc_entry_t entry;
373 
374 	/* would switch on io_otype to handle multiple types of object */
375 
376 	assert(is_active(space));
377 	assert(io_otype(object) == IOT_PORT);
378 
379 	port = ip_object_to_port(object);
380 	require_ip_active(port);
381 
382 	ip_mq_lock_held(port);
383 
384 	if (ip_in_space(port, space)) {
385 		name = ip_get_receiver_name(port);
386 		assert(name != MACH_PORT_NULL);
387 
388 		entry = ipc_entry_lookup(space, name);
389 
390 		assert(entry != IE_NULL);
391 		assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
392 		assert(port == ip_object_to_port(entry->ie_object));
393 
394 		*namep = name;
395 		*entryp = entry;
396 		return true;
397 	}
398 
399 	if (ipc_hash_lookup(space, ip_to_object(port), namep, entryp)) {
400 		entry = *entryp;
401 		assert(entry != IE_NULL);
402 		assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
403 		assert(port == ip_object_to_port(entry->ie_object));
404 
405 		return true;
406 	}
407 
408 	return false;
409 }
410 
411 /*
412  *	Routine:	ipc_right_dnrequest
413  *	Purpose:
414  *		Make a dead-name request, returning the previously
415  *		registered send-once right.  If notify is IP_NULL,
416  *		just cancels the previously registered request.
417  *
418  *	Conditions:
419  *		Nothing locked.  May allocate memory.
420  *		Only consumes/returns refs if successful.
421  *	Returns:
422  *		KERN_SUCCESS		Made/canceled dead-name request.
423  *		KERN_INVALID_TASK	The space is dead.
424  *		KERN_INVALID_NAME	Name doesn't exist in space.
425  *		KERN_INVALID_RIGHT	Name doesn't denote port/dead rights.
426  *		KERN_INVALID_ARGUMENT	Name denotes dead name, but
427  *			immediate is FALSE or notify is IP_NULL.
428  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
429  */
430 
431 kern_return_t
ipc_right_request_alloc(ipc_space_t space,mach_port_name_t name,ipc_port_request_opts_t options,ipc_port_t notify,ipc_port_t * previousp)432 ipc_right_request_alloc(
433 	ipc_space_t             space,
434 	mach_port_name_t        name,
435 	ipc_port_request_opts_t options,
436 	ipc_port_t              notify,
437 	ipc_port_t              *previousp)
438 {
439 	ipc_port_request_index_t prev_request;
440 	ipc_port_t previous = IP_NULL;
441 	ipc_entry_t entry;
442 	kern_return_t kr;
443 #if IMPORTANCE_INHERITANCE
444 	bool will_arm = false;
445 #endif /* IMPORTANCE_INHERITANCE */
446 
447 	for (;;) {
448 		ipc_port_t port = IP_NULL;
449 
450 		kr = ipc_right_lookup_write(space, name, &entry);
451 		if (kr != KERN_SUCCESS) {
452 			return kr;
453 		}
454 
455 		/* space is write-locked and active */
456 
457 		prev_request = entry->ie_request;
458 
459 		/* if nothing to do or undo, we're done */
460 		if (notify == IP_NULL && prev_request == IE_REQ_NONE) {
461 			is_write_unlock(space);
462 			*previousp = IP_NULL;
463 			return KERN_SUCCESS;
464 		}
465 
466 		/* see if the entry is of proper type for requests */
467 		if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) {
468 			ipc_port_request_index_t new_request;
469 
470 			port = ip_object_to_port(entry->ie_object);
471 			assert(port != IP_NULL);
472 
473 			if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
474 				/* port is locked and active */
475 
476 				/* if no new request, just cancel previous */
477 				if (notify == IP_NULL) {
478 					if (prev_request != IE_REQ_NONE) {
479 						previous = ipc_port_request_cancel(port, name, prev_request);
480 						entry->ie_request = IE_REQ_NONE;
481 					}
482 					ip_mq_unlock(port);
483 					ipc_entry_modified(space, name, entry);
484 					is_write_unlock(space);
485 					break;
486 				}
487 
488 				/*
489 				 * send-once rights, kernel objects, and non-full other queues
490 				 * fire immediately (if immediate specified).
491 				 */
492 				if (options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK) &&
493 				    ((entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE) ||
494 				    ip_in_space(port, ipc_space_kernel) || !ip_full(port))) {
495 					if (prev_request != IE_REQ_NONE) {
496 						previous = ipc_port_request_cancel(port, name, prev_request);
497 						entry->ie_request = IE_REQ_NONE;
498 					}
499 					ip_mq_unlock(port);
500 					ipc_entry_modified(space, name, entry);
501 					is_write_unlock(space);
502 
503 					ipc_notify_send_possible(notify, name);
504 					break;
505 				}
506 
507 				/*
508 				 * If there is a previous request, free it.  Any subsequent
509 				 * allocation cannot fail, thus assuring an atomic swap.
510 				 */
511 				if (prev_request != IE_REQ_NONE) {
512 					previous = ipc_port_request_cancel(port, name, prev_request);
513 				}
514 
515 #if IMPORTANCE_INHERITANCE
516 				will_arm = port->ip_sprequests == 0 &&
517 				    options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK);
518 #endif /* IMPORTANCE_INHERITANCE */
519 				kr = ipc_port_request_alloc(port, name, notify,
520 				    options, &new_request);
521 
522 				if (kr != KERN_SUCCESS) {
523 					assert(previous == IP_NULL);
524 					is_write_unlock(space);
525 
526 					kr = ipc_port_request_grow(port);
527 					/* port is unlocked */
528 
529 					if (kr != KERN_SUCCESS) {
530 						return kr;
531 					}
532 
533 					continue;
534 				}
535 
536 				assert(new_request != IE_REQ_NONE);
537 				entry->ie_request = new_request;
538 				ipc_entry_modified(space, name, entry);
539 				is_write_unlock(space);
540 
541 #if IMPORTANCE_INHERITANCE
542 				if (will_arm &&
543 				    port->ip_impdonation != 0 &&
544 				    port->ip_spimportant == 0 &&
545 				    task_is_importance_donor(current_task())) {
546 					if (ipc_port_importance_delta(port, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) {
547 						ip_mq_unlock(port);
548 					}
549 				} else
550 #endif /* IMPORTANCE_INHERITANCE */
551 				ip_mq_unlock(port);
552 
553 				break;
554 			}
555 			/* entry may have changed to dead-name by ipc_right_check() */
556 		}
557 
558 		/* treat send_possible requests as immediate w.r.t. dead-name */
559 		if (options && notify != IP_NULL &&
560 		    (entry->ie_bits & MACH_PORT_TYPE_DEAD_NAME)) {
561 			mach_port_urefs_t urefs = IE_BITS_UREFS(entry->ie_bits);
562 
563 			assert(urefs > 0);
564 
565 			/* leave urefs pegged to maximum if it overflowed */
566 			if (urefs < MACH_PORT_UREFS_MAX) {
567 				(entry->ie_bits)++; /* increment urefs */
568 			}
569 			ipc_entry_modified(space, name, entry);
570 
571 			is_write_unlock(space);
572 
573 			if (port != IP_NULL) {
574 				ip_release(port);
575 			}
576 
577 			ipc_notify_dead_name(notify, name);
578 			previous = IP_NULL;
579 			break;
580 		}
581 
582 		kr = (entry->ie_bits & MACH_PORT_TYPE_PORT_OR_DEAD) ?
583 		    KERN_INVALID_ARGUMENT : KERN_INVALID_RIGHT;
584 
585 		is_write_unlock(space);
586 
587 		if (port != IP_NULL) {
588 			ip_release(port);
589 		}
590 
591 		return kr;
592 	}
593 
594 	*previousp = previous;
595 	return KERN_SUCCESS;
596 }
597 
598 /*
599  *	Routine:	ipc_right_request_cancel
600  *	Purpose:
601  *		Cancel a notification request and return the send-once right.
602  *		Afterwards, entry->ie_request == 0.
603  *	Conditions:
604  *		The space must be write-locked; the port must be locked.
605  *		The port and space must be active.
606  */
607 
608 ipc_port_t
ipc_right_request_cancel(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry)609 ipc_right_request_cancel(
610 	ipc_space_t                     space,
611 	ipc_port_t                      port,
612 	mach_port_name_t                name,
613 	ipc_entry_t                     entry)
614 {
615 	ipc_port_t previous;
616 
617 	require_ip_active(port);
618 	assert(is_active(space));
619 	assert(port == ip_object_to_port(entry->ie_object));
620 
621 	if (entry->ie_request == IE_REQ_NONE) {
622 		return IP_NULL;
623 	}
624 
625 	previous = ipc_port_request_cancel(port, name, entry->ie_request);
626 	entry->ie_request = IE_REQ_NONE;
627 	ipc_entry_modified(space, name, entry);
628 	return previous;
629 }
630 
631 /*
632  *	Routine:	ipc_right_inuse
633  *	Purpose:
634  *		Check if an entry is being used.
635  *		Returns TRUE if it is.
636  *	Conditions:
637  *		The space is write-locked and active.
638  */
639 
640 bool
ipc_right_inuse(ipc_entry_t entry)641 ipc_right_inuse(
642 	ipc_entry_t entry)
643 {
644 	return IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_NONE;
645 }
646 
647 /*
648  *	Routine:	ipc_right_check
649  *	Purpose:
650  *		Check if the port has died.  If it has,
651  *              and IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE is not
652  *              passed and it is not a send once right then
653  *		clean up the entry and return TRUE.
654  *	Conditions:
655  *		The space is write-locked; the port is not locked.
656  *		If returns FALSE, the port is also locked.
657  *		Otherwise, entry is converted to a dead name.
658  *
659  *		Caller is responsible for a reference to port if it
660  *		had died (returns TRUE).
661  */
662 
663 boolean_t
ipc_right_check(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry,ipc_object_copyin_flags_t flags)664 ipc_right_check(
665 	ipc_space_t              space,
666 	ipc_port_t               port,
667 	mach_port_name_t         name,
668 	ipc_entry_t              entry,
669 	ipc_object_copyin_flags_t flags)
670 {
671 	ipc_entry_bits_t bits;
672 
673 	assert(is_active(space));
674 	assert(port == ip_object_to_port(entry->ie_object));
675 
676 	ip_mq_lock(port);
677 	if (ip_active(port) ||
678 	    ((flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE) &&
679 	    entry->ie_request == IE_REQ_NONE &&
680 	    (entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE))) {
681 		return FALSE;
682 	}
683 
684 	/* this was either a pure send right or a send-once right */
685 
686 	bits = entry->ie_bits;
687 	assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
688 	assert(IE_BITS_UREFS(bits) > 0);
689 
690 	if (bits & MACH_PORT_TYPE_SEND) {
691 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
692 		assert(IE_BITS_UREFS(bits) > 0);
693 		ip_srights_dec(port);
694 	} else {
695 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
696 		assert(IE_BITS_UREFS(bits) == 1);
697 		ip_sorights_dec(port);
698 	}
699 
700 	/*
701 	 * delete SEND rights from ipc hash.
702 	 */
703 
704 	if ((bits & MACH_PORT_TYPE_SEND) != 0) {
705 		ipc_hash_delete(space, ip_to_object(port), name, entry);
706 	}
707 
708 	/* convert entry to dead name */
709 	bits = (bits & ~IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME;
710 
711 	/*
712 	 * If there was a notification request outstanding on this
713 	 * name, and the port went dead, that notification
714 	 * must already be on its way up from the port layer.
715 	 *
716 	 * Add the reference that the notification carries. It
717 	 * is done here, and not in the notification delivery,
718 	 * because the latter doesn't have a space reference and
719 	 * trying to actually move a send-right reference would
720 	 * get short-circuited into a MACH_PORT_DEAD by IPC. Since
721 	 * all calls that deal with the right eventually come
722 	 * through here, it has the same result.
723 	 *
724 	 * Once done, clear the request index so we only account
725 	 * for it once.
726 	 */
727 	if (entry->ie_request != IE_REQ_NONE) {
728 		if (ipc_port_request_type(port, name, entry->ie_request) != 0) {
729 			/* if urefs are pegged due to overflow, leave them pegged */
730 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
731 				bits++; /* increment urefs */
732 			}
733 		}
734 		entry->ie_request = IE_REQ_NONE;
735 	}
736 	entry->ie_bits = bits;
737 	entry->ie_object = IO_NULL;
738 
739 	ip_mq_unlock(port);
740 
741 	ipc_entry_modified(space, name, entry);
742 
743 	return TRUE;
744 }
745 
746 /*
747  *	Routine:	ipc_right_terminate
748  *	Purpose:
749  *		Cleans up an entry in a terminated space.
750  *		The entry isn't deallocated or removed
751  *		from reverse hash tables.
752  *	Conditions:
753  *		The space is dead and unlocked.
754  */
755 
756 void
ipc_right_terminate(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)757 ipc_right_terminate(
758 	ipc_space_t             space,
759 	mach_port_name_t        name,
760 	ipc_entry_t             entry)
761 {
762 	mach_port_type_t type;
763 	ipc_object_t object;
764 
765 	assert(!is_active(space));
766 
767 	type   = IE_BITS_TYPE(entry->ie_bits);
768 	object = entry->ie_object;
769 
770 	/*
771 	 * Hollow the entry under the port lock,
772 	 * in order to avoid dangling pointers.
773 	 *
774 	 * ipc_right_lookup_read() doesn't need it for correctness,
775 	 * but ipc_space_terminate() as it now goes through 2 rounds
776 	 * of termination (receive rights first, the rest second).
777 	 */
778 
779 	if (type != MACH_PORT_TYPE_DEAD_NAME) {
780 		assert(object != IO_NULL);
781 		io_lock(object);
782 	}
783 	entry->ie_object = IO_NULL;
784 	entry->ie_bits  &= (IE_BITS_GEN_MASK | IE_BITS_ROLL_MASK);
785 
786 	switch (type) {
787 	case MACH_PORT_TYPE_DEAD_NAME:
788 		assert(entry->ie_request == IE_REQ_NONE);
789 		assert(object == IO_NULL);
790 		break;
791 
792 	case MACH_PORT_TYPE_PORT_SET: {
793 		ipc_pset_t pset = ips_object_to_pset(object);
794 
795 		assert(entry->ie_request == IE_REQ_NONE);
796 		assert(ips_active(pset));
797 
798 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
799 		break;
800 	}
801 
802 	case MACH_PORT_TYPE_SEND:
803 	case MACH_PORT_TYPE_RECEIVE:
804 	case MACH_PORT_TYPE_SEND_RECEIVE:
805 	case MACH_PORT_TYPE_SEND_ONCE: {
806 		ipc_port_t port = ip_object_to_port(object);
807 		ipc_port_t request = IP_NULL;
808 		ipc_notify_nsenders_t nsrequest = { };
809 
810 		if (!ip_active(port)) {
811 			ip_mq_unlock(port);
812 			ip_release(port);
813 			break;
814 		}
815 
816 		/*
817 		 * same as ipc_right_request_cancel(),
818 		 * except for calling ipc_entry_modified()
819 		 * as the space is now table-less.
820 		 */
821 		if (entry->ie_request != IE_REQ_NONE) {
822 			request = ipc_port_request_cancel(port, name,
823 			    entry->ie_request);
824 			entry->ie_request = IE_REQ_NONE;
825 		}
826 
827 		if (type & MACH_PORT_TYPE_SEND) {
828 			ip_srights_dec(port);
829 			if (port->ip_srights == 0) {
830 				nsrequest = ipc_notify_no_senders_prepare(port);
831 			}
832 		}
833 
834 		if (type & MACH_PORT_TYPE_RECEIVE) {
835 			assert(ip_get_receiver_name(port) == name);
836 			assert(ip_in_space(port, space));
837 
838 			ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
839 		} else if (type & MACH_PORT_TYPE_SEND_ONCE) {
840 			assert(port->ip_sorights > 0);
841 			port->ip_reply_context = 0;
842 
843 			ipc_notify_send_once_and_unlock(port); /* consumes our ref */
844 		} else {
845 			/* port could be dead, in-transit, or in a foreign space */
846 			assert(!ip_in_space(port, space));
847 
848 			ip_mq_unlock(port);
849 			ip_release(port);
850 		}
851 
852 		/*
853 		 * For both no-senders and port-deleted notifications,
854 		 * look at whether the destination is still active.
855 		 * If it isn't, just swallow the send-once right.
856 		 *
857 		 * This is a racy check, but this ok because we can only
858 		 * fail to notice that the port is now inactive, which
859 		 * only causes us to fail at an optimizaiton.
860 		 *
861 		 * The purpose here is to avoid sending messages
862 		 * to receive rights that used to be in this space,
863 		 * which we can't fail to observe.
864 		 */
865 		if (nsrequest.ns_notify != IP_NULL) {
866 			if (ip_active(nsrequest.ns_notify)) {
867 				ipc_notify_no_senders_emit(nsrequest);
868 			} else {
869 				ipc_notify_no_senders_consume(nsrequest);
870 			}
871 		}
872 
873 		if (request != IP_NULL) {
874 			if (ip_active(request)) {
875 				ipc_notify_port_deleted(request, name);
876 			} else {
877 				ipc_port_release_sonce(request);
878 			}
879 		}
880 		break;
881 	}
882 
883 	default:
884 		panic("ipc_right_terminate: strange type - 0x%x", type);
885 	}
886 }
887 
888 /*
889  *	Routine:	ipc_right_destroy
890  *	Purpose:
891  *		Destroys an entry in a space.
892  *	Conditions:
893  *		The space is write-locked (returns unlocked).
894  *		The space must be active.
895  *	Returns:
896  *		KERN_SUCCESS		      The entry was destroyed.
897  *      KERN_INVALID_CAPABILITY   The port is pinned.
898  *      KERN_INVALID_RIGHT        Port guard violation.
899  */
900 
901 kern_return_t
ipc_right_destroy(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,boolean_t check_guard,uint64_t guard)902 ipc_right_destroy(
903 	ipc_space_t             space,
904 	mach_port_name_t        name,
905 	ipc_entry_t             entry,
906 	boolean_t               check_guard,
907 	uint64_t                guard)
908 {
909 	ipc_entry_bits_t bits;
910 	mach_port_type_t type;
911 
912 	bits = entry->ie_bits;
913 	entry->ie_bits &= ~IE_BITS_TYPE_MASK;
914 	type = IE_BITS_TYPE(bits);
915 
916 	assert(is_active(space));
917 
918 	switch (type) {
919 	case MACH_PORT_TYPE_DEAD_NAME:
920 		assert(entry->ie_request == IE_REQ_NONE);
921 		assert(entry->ie_object == IO_NULL);
922 
923 		ipc_entry_dealloc(space, IO_NULL, name, entry);
924 		is_write_unlock(space);
925 		break;
926 
927 	case MACH_PORT_TYPE_PORT_SET: {
928 		ipc_pset_t pset = ips_object_to_pset(entry->ie_object);
929 
930 		assert(entry->ie_request == IE_REQ_NONE);
931 		assert(pset != IPS_NULL);
932 
933 		ips_mq_lock(pset);
934 		assert(ips_active(pset));
935 
936 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
937 
938 		is_write_unlock(space);
939 
940 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
941 		break;
942 	}
943 
944 	case MACH_PORT_TYPE_SEND:
945 	case MACH_PORT_TYPE_RECEIVE:
946 	case MACH_PORT_TYPE_SEND_RECEIVE:
947 	case MACH_PORT_TYPE_SEND_ONCE: {
948 		ipc_port_t port = ip_object_to_port(entry->ie_object);
949 		ipc_notify_nsenders_t nsrequest = { };
950 		ipc_port_t request;
951 
952 		assert(port != IP_NULL);
953 
954 		if (type == MACH_PORT_TYPE_SEND) {
955 			if (ip_is_pinned(port)) {
956 				assert(ip_active(port));
957 				is_write_unlock(space);
958 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DESTROY);
959 				return KERN_INVALID_CAPABILITY;
960 			}
961 			ipc_hash_delete(space, ip_to_object(port), name, entry);
962 		}
963 
964 		ip_mq_lock(port);
965 
966 		if (!ip_active(port)) {
967 			assert((type & MACH_PORT_TYPE_RECEIVE) == 0);
968 			entry->ie_request = IE_REQ_NONE;
969 			assert(!ip_is_pinned(port));
970 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
971 			ip_mq_unlock(port);
972 			is_write_unlock(space);
973 			ip_release(port);
974 			break;
975 		}
976 
977 		/* For receive rights, check for guarding */
978 		if ((type & MACH_PORT_TYPE_RECEIVE) &&
979 		    (check_guard) && (port->ip_guarded) &&
980 		    (guard != port->ip_context)) {
981 			/* Guard Violation */
982 			uint64_t portguard = port->ip_context;
983 			ip_mq_unlock(port);
984 			is_write_unlock(space);
985 			/* Raise mach port guard exception */
986 			mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
987 			return KERN_INVALID_RIGHT;
988 		}
989 
990 
991 		request = ipc_right_request_cancel_macro(space, port,
992 		    name, entry);
993 		assert(!ip_is_pinned(port));
994 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
995 
996 		is_write_unlock(space);
997 
998 		if (type & MACH_PORT_TYPE_SEND) {
999 			ip_srights_dec(port);
1000 			if (port->ip_srights == 0) {
1001 				nsrequest = ipc_notify_no_senders_prepare(port);
1002 			}
1003 		}
1004 
1005 		if (type & MACH_PORT_TYPE_RECEIVE) {
1006 			require_ip_active(port);
1007 			assert(ip_in_space(port, space));
1008 
1009 			ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
1010 		} else if (type & MACH_PORT_TYPE_SEND_ONCE) {
1011 			assert(port->ip_sorights > 0);
1012 			port->ip_reply_context = 0;
1013 			ipc_notify_send_once_and_unlock(port); /* consumes our ref */
1014 		} else {
1015 			assert(!ip_in_space(port, space));
1016 
1017 			ip_mq_unlock(port);
1018 			ip_release(port);
1019 		}
1020 
1021 		ipc_notify_no_senders_emit(nsrequest);
1022 
1023 		if (request != IP_NULL) {
1024 			ipc_notify_port_deleted(request, name);
1025 		}
1026 
1027 
1028 		break;
1029 	}
1030 
1031 	default:
1032 		panic("ipc_right_destroy: strange type");
1033 	}
1034 
1035 	return KERN_SUCCESS;
1036 }
1037 
1038 /*
1039  *	Routine:	ipc_right_dealloc
1040  *	Purpose:
1041  *		Releases a send/send-once/dead-name/port_set user ref.
1042  *		Like ipc_right_delta with a delta of -1,
1043  *		but looks at the entry to determine the right.
1044  *	Conditions:
1045  *		The space is write-locked, and is unlocked upon return.
1046  *		The space must be active.
1047  *	Returns:
1048  *		KERN_SUCCESS		A user ref was released.
1049  *		KERN_INVALID_RIGHT	Entry has wrong type.
1050  *      KERN_INVALID_CAPABILITY  Deallocating a pinned right.
1051  */
1052 
1053 kern_return_t
ipc_right_dealloc(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)1054 ipc_right_dealloc(
1055 	ipc_space_t             space,
1056 	mach_port_name_t        name,
1057 	ipc_entry_t             entry)
1058 {
1059 	ipc_port_t port = IP_NULL;
1060 	ipc_entry_bits_t bits;
1061 	mach_port_type_t type;
1062 
1063 	bits = entry->ie_bits;
1064 	type = IE_BITS_TYPE(bits);
1065 
1066 
1067 	assert(is_active(space));
1068 
1069 	switch (type) {
1070 	case MACH_PORT_TYPE_PORT_SET: {
1071 		ipc_pset_t pset;
1072 
1073 		assert(IE_BITS_UREFS(bits) == 0);
1074 		assert(entry->ie_request == IE_REQ_NONE);
1075 
1076 		pset = ips_object_to_pset(entry->ie_object);
1077 		ips_mq_lock(pset);
1078 		assert(ips_active(pset));
1079 
1080 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1081 
1082 		is_write_unlock(space);
1083 
1084 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1085 		break;
1086 	}
1087 
1088 	case MACH_PORT_TYPE_DEAD_NAME: {
1089 dead_name:
1090 
1091 		assert(IE_BITS_UREFS(bits) > 0);
1092 		assert(entry->ie_request == IE_REQ_NONE);
1093 		assert(entry->ie_object == IO_NULL);
1094 
1095 		if (IE_BITS_UREFS(bits) == 1) {
1096 			ipc_entry_dealloc(space, IO_NULL, name, entry);
1097 		} else {
1098 			/* if urefs are pegged due to overflow, leave them pegged */
1099 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1100 				entry->ie_bits = bits - 1; /* decrement urefs */
1101 			}
1102 			ipc_entry_modified(space, name, entry);
1103 		}
1104 		is_write_unlock(space);
1105 
1106 		/* release any port that got converted to dead name below */
1107 		if (port != IP_NULL) {
1108 			ip_release(port);
1109 		}
1110 		break;
1111 	}
1112 
1113 	case MACH_PORT_TYPE_SEND_ONCE: {
1114 		ipc_port_t request;
1115 
1116 		assert(IE_BITS_UREFS(bits) == 1);
1117 
1118 		port = ip_object_to_port(entry->ie_object);
1119 		assert(port != IP_NULL);
1120 
1121 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1122 			bits = entry->ie_bits;
1123 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1124 			goto dead_name;     /* it will release port */
1125 		}
1126 		/* port is locked and active */
1127 
1128 		assert(port->ip_sorights > 0);
1129 
1130 		/*
1131 		 * clear any reply context:
1132 		 * no one will be sending the response b/c we are destroying
1133 		 * the single, outstanding send once right.
1134 		 */
1135 		port->ip_reply_context = 0;
1136 
1137 		request = ipc_right_request_cancel_macro(space, port,
1138 		    name, entry);
1139 		assert(!ip_is_pinned(port));
1140 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1141 
1142 		is_write_unlock(space);
1143 
1144 		ipc_notify_send_once_and_unlock(port);
1145 
1146 		if (request != IP_NULL) {
1147 			ipc_notify_port_deleted(request, name);
1148 		}
1149 		break;
1150 	}
1151 
1152 	case MACH_PORT_TYPE_SEND: {
1153 		ipc_port_t request = IP_NULL;
1154 		ipc_notify_nsenders_t nsrequest = { };
1155 
1156 		assert(IE_BITS_UREFS(bits) > 0);
1157 
1158 		port = ip_object_to_port(entry->ie_object);
1159 		assert(port != IP_NULL);
1160 
1161 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1162 			bits = entry->ie_bits;
1163 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1164 			goto dead_name;     /* it will release port */
1165 		}
1166 		/* port is locked and active */
1167 
1168 		assert(port->ip_srights > 0);
1169 
1170 		if (IE_BITS_UREFS(bits) == 1) {
1171 			if (ip_is_pinned(port)) {
1172 				ip_mq_unlock(port);
1173 				is_write_unlock(space);
1174 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1175 				return KERN_INVALID_CAPABILITY;
1176 			}
1177 			ip_srights_dec(port);
1178 			if (port->ip_srights == 0) {
1179 				nsrequest = ipc_notify_no_senders_prepare(port);
1180 			}
1181 
1182 			request = ipc_right_request_cancel_macro(space, port,
1183 			    name, entry);
1184 			ipc_hash_delete(space, ip_to_object(port), name, entry);
1185 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1186 			ip_mq_unlock(port);
1187 			is_write_unlock(space);
1188 
1189 			ip_release(port);
1190 		} else {
1191 			/* if urefs are pegged due to overflow, leave them pegged */
1192 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1193 				entry->ie_bits = bits - 1; /* decrement urefs */
1194 			}
1195 			ip_mq_unlock(port);
1196 			ipc_entry_modified(space, name, entry);
1197 			is_write_unlock(space);
1198 		}
1199 
1200 		ipc_notify_no_senders_emit(nsrequest);
1201 
1202 		if (request != IP_NULL) {
1203 			ipc_notify_port_deleted(request, name);
1204 		}
1205 		break;
1206 	}
1207 
1208 	case MACH_PORT_TYPE_SEND_RECEIVE: {
1209 		ipc_notify_nsenders_t nsrequest = { };
1210 
1211 		assert(IE_BITS_UREFS(bits) > 0);
1212 
1213 		port = ip_object_to_port(entry->ie_object);
1214 		assert(port != IP_NULL);
1215 
1216 		ip_mq_lock(port);
1217 		require_ip_active(port);
1218 		assert(ip_get_receiver_name(port) == name);
1219 		assert(ip_in_space(port, space));
1220 		assert(port->ip_srights > 0);
1221 
1222 		if (IE_BITS_UREFS(bits) == 1) {
1223 			ip_srights_dec(port);
1224 			if (port->ip_srights == 0) {
1225 				nsrequest = ipc_notify_no_senders_prepare(port);
1226 			}
1227 
1228 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1229 			    MACH_PORT_TYPE_SEND);
1230 		} else {
1231 			/* if urefs are pegged due to overflow, leave them pegged */
1232 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1233 				entry->ie_bits = bits - 1; /* decrement urefs */
1234 			}
1235 		}
1236 		ip_mq_unlock(port);
1237 
1238 		ipc_entry_modified(space, name, entry);
1239 		is_write_unlock(space);
1240 
1241 		ipc_notify_no_senders_emit(nsrequest);
1242 		break;
1243 	}
1244 
1245 	default:
1246 		is_write_unlock(space);
1247 		mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1248 		return KERN_INVALID_RIGHT;
1249 	}
1250 
1251 	return KERN_SUCCESS;
1252 }
1253 
1254 /*
1255  *	Routine:	ipc_right_delta
1256  *	Purpose:
1257  *		Modifies the user-reference count for a right.
1258  *		May deallocate the right, if the count goes to zero.
1259  *	Conditions:
1260  *		The space is write-locked, and is unlocked upon return.
1261  *		The space must be active.
1262  *	Returns:
1263  *		KERN_SUCCESS		Count was modified.
1264  *		KERN_INVALID_RIGHT	Entry has wrong type.
1265  *		KERN_INVALID_VALUE	Bad delta for the right.
1266  *		KERN_INVALID_CAPABILITY Deallocating a pinned right.
1267  */
1268 
1269 kern_return_t
ipc_right_delta(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_right_t right,mach_port_delta_t delta)1270 ipc_right_delta(
1271 	ipc_space_t             space,
1272 	mach_port_name_t        name,
1273 	ipc_entry_t             entry,
1274 	mach_port_right_t       right,
1275 	mach_port_delta_t       delta)
1276 {
1277 	ipc_port_t port = IP_NULL;
1278 	ipc_entry_bits_t bits;
1279 
1280 	bits = entry->ie_bits;
1281 
1282 /*
1283  *	The following is used (for case MACH_PORT_RIGHT_DEAD_NAME) in the
1284  *	switch below. It is used to keep track of those cases (in DIPC)
1285  *	where we have postponed the dropping of a port reference. Since
1286  *	the dropping of the reference could cause the port to disappear
1287  *	we postpone doing so when we are holding the space lock.
1288  */
1289 
1290 	assert(is_active(space));
1291 	assert(right < MACH_PORT_RIGHT_NUMBER);
1292 
1293 	/* Rights-specific restrictions and operations. */
1294 
1295 	switch (right) {
1296 	case MACH_PORT_RIGHT_PORT_SET: {
1297 		ipc_pset_t pset;
1298 
1299 		if ((bits & MACH_PORT_TYPE_PORT_SET) == 0) {
1300 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1301 			goto invalid_right;
1302 		}
1303 
1304 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_PORT_SET);
1305 		assert(IE_BITS_UREFS(bits) == 0);
1306 		assert(entry->ie_request == IE_REQ_NONE);
1307 
1308 		if (delta == 0) {
1309 			goto success;
1310 		}
1311 
1312 		if (delta != -1) {
1313 			goto invalid_value;
1314 		}
1315 
1316 		pset = ips_object_to_pset(entry->ie_object);
1317 		ips_mq_lock(pset);
1318 		assert(ips_active(pset));
1319 
1320 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1321 
1322 		is_write_unlock(space);
1323 
1324 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1325 		break;
1326 	}
1327 
1328 	case MACH_PORT_RIGHT_RECEIVE: {
1329 		ipc_port_t request = IP_NULL;
1330 
1331 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1332 			if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1333 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1334 			}
1335 			goto invalid_right;
1336 		}
1337 
1338 		if (delta == 0) {
1339 			goto success;
1340 		}
1341 
1342 		if (delta != -1) {
1343 			goto invalid_value;
1344 		}
1345 
1346 		port = ip_object_to_port(entry->ie_object);
1347 		assert(port != IP_NULL);
1348 
1349 		/*
1350 		 *	The port lock is needed for ipc_right_dncancel;
1351 		 *	otherwise, we wouldn't have to take the lock
1352 		 *	until just before dropping the space lock.
1353 		 */
1354 
1355 		ip_mq_lock(port);
1356 		require_ip_active(port);
1357 		assert(ip_get_receiver_name(port) == name);
1358 		assert(ip_in_space(port, space));
1359 
1360 		/* Mach Port Guard Checking */
1361 		if (port->ip_guarded) {
1362 			uint64_t portguard = port->ip_context;
1363 			ip_mq_unlock(port);
1364 			is_write_unlock(space);
1365 			/* Raise mach port guard exception */
1366 			mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_MOD_REFS);
1367 			goto guard_failure;
1368 		}
1369 
1370 		if (bits & MACH_PORT_TYPE_SEND) {
1371 			assert(IE_BITS_TYPE(bits) ==
1372 			    MACH_PORT_TYPE_SEND_RECEIVE);
1373 			assert(IE_BITS_UREFS(bits) > 0);
1374 			assert(port->ip_srights > 0);
1375 
1376 			if (ipc_port_has_prdrequest(port)) {
1377 				/*
1378 				 * Since another task has requested a
1379 				 * destroy notification for this port, it
1380 				 * isn't actually being destroyed - the receive
1381 				 * right is just being moved to another task.
1382 				 * Since we still have one or more send rights,
1383 				 * we need to record the loss of the receive
1384 				 * right and enter the remaining send right
1385 				 * into the hash table.
1386 				 */
1387 				bits &= ~MACH_PORT_TYPE_RECEIVE;
1388 				bits |= MACH_PORT_TYPE_EX_RECEIVE;
1389 				ipc_hash_insert(space, ip_to_object(port),
1390 				    name, entry);
1391 				ip_reference(port);
1392 			} else {
1393 				/*
1394 				 *	The remaining send right turns into a
1395 				 *	dead name.  Notice we don't decrement
1396 				 *	ip_srights, generate a no-senders notif,
1397 				 *	or use ipc_right_dncancel, because the
1398 				 *	port is destroyed "first".
1399 				 */
1400 				bits &= ~IE_BITS_TYPE_MASK;
1401 				bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1402 				if (entry->ie_request) {
1403 					entry->ie_request = IE_REQ_NONE;
1404 					/* if urefs are pegged due to overflow, leave them pegged */
1405 					if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1406 						bits++; /* increment urefs */
1407 					}
1408 				}
1409 				entry->ie_object = IO_NULL;
1410 			}
1411 			entry->ie_bits = bits;
1412 			ipc_entry_modified(space, name, entry);
1413 		} else {
1414 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1415 			assert(IE_BITS_UREFS(bits) == 0);
1416 
1417 			request = ipc_right_request_cancel_macro(space, port,
1418 			    name, entry);
1419 			assert(!ip_is_pinned(port));
1420 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1421 		}
1422 		is_write_unlock(space);
1423 
1424 		ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1425 
1426 		if (request != IP_NULL) {
1427 			ipc_notify_port_deleted(request, name);
1428 		}
1429 		break;
1430 	}
1431 
1432 	case MACH_PORT_RIGHT_SEND_ONCE: {
1433 		ipc_port_t request;
1434 
1435 		if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
1436 			goto invalid_right;
1437 		}
1438 
1439 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
1440 		assert(IE_BITS_UREFS(bits) == 1);
1441 
1442 		port = ip_object_to_port(entry->ie_object);
1443 		assert(port != IP_NULL);
1444 
1445 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1446 			assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE));
1447 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1448 			goto invalid_right;
1449 		}
1450 		/* port is locked and active */
1451 
1452 		assert(port->ip_sorights > 0);
1453 
1454 		if ((delta > 0) || (delta < -1)) {
1455 			ip_mq_unlock(port);
1456 			goto invalid_value;
1457 		}
1458 
1459 		if (delta == 0) {
1460 			ip_mq_unlock(port);
1461 			goto success;
1462 		}
1463 
1464 		/*
1465 		 * clear any reply context:
1466 		 * no one will be sending the response b/c we are destroying
1467 		 * the single, outstanding send once right.
1468 		 */
1469 		port->ip_reply_context = 0;
1470 
1471 		request = ipc_right_request_cancel_macro(space, port, name, entry);
1472 		assert(!ip_is_pinned(port));
1473 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1474 
1475 		is_write_unlock(space);
1476 
1477 		ipc_notify_send_once_and_unlock(port);
1478 
1479 		if (request != IP_NULL) {
1480 			ipc_notify_port_deleted(request, name);
1481 		}
1482 		break;
1483 	}
1484 
1485 	case MACH_PORT_RIGHT_DEAD_NAME: {
1486 		ipc_port_t relport = IP_NULL;
1487 		mach_port_urefs_t urefs;
1488 
1489 		if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1490 			port = ip_object_to_port(entry->ie_object);
1491 			assert(port != IP_NULL);
1492 
1493 			if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1494 				/* port is locked and active */
1495 				ip_mq_unlock(port);
1496 				port = IP_NULL;
1497 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1498 				goto invalid_right;
1499 			}
1500 			bits = entry->ie_bits;
1501 			relport = port;
1502 			port = IP_NULL;
1503 		} else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0) {
1504 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1505 			goto invalid_right;
1506 		}
1507 
1508 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1509 		assert(IE_BITS_UREFS(bits) > 0);
1510 		assert(entry->ie_object == IO_NULL);
1511 		assert(entry->ie_request == IE_REQ_NONE);
1512 
1513 		if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1514 		    delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1515 			goto invalid_value;
1516 		}
1517 
1518 		urefs = IE_BITS_UREFS(bits);
1519 
1520 		if (urefs == MACH_PORT_UREFS_MAX) {
1521 			/*
1522 			 * urefs are pegged due to an overflow
1523 			 * only a delta removing all refs at once can change it
1524 			 */
1525 
1526 			if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1527 				delta = 0;
1528 			}
1529 		} else {
1530 			if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1531 				goto invalid_value;
1532 			}
1533 			if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1534 				/* leave urefs pegged to maximum if it overflowed */
1535 				delta = MACH_PORT_UREFS_MAX - urefs;
1536 			}
1537 		}
1538 
1539 		if ((urefs + delta) == 0) {
1540 			ipc_entry_dealloc(space, IO_NULL, name, entry);
1541 		} else if (delta != 0) {
1542 			entry->ie_bits = bits + delta;
1543 			ipc_entry_modified(space, name, entry);
1544 		}
1545 
1546 		is_write_unlock(space);
1547 
1548 		if (relport != IP_NULL) {
1549 			ip_release(relport);
1550 		}
1551 
1552 		break;
1553 	}
1554 
1555 	case MACH_PORT_RIGHT_SEND: {
1556 		mach_port_urefs_t urefs;
1557 		ipc_port_t request = IP_NULL;
1558 		ipc_notify_nsenders_t nsrequest = { };
1559 		ipc_port_t port_to_release = IP_NULL;
1560 
1561 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
1562 			/* invalid right exception only when not live/dead confusion */
1563 			if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0
1564 #if !defined(AE_MAKESENDRIGHT_FIXED)
1565 			    /*
1566 			     * AE tries to add single send right without knowing if it already owns one.
1567 			     * But if it doesn't, it should own the receive right and delta should be 1.
1568 			     */
1569 			    && (((bits & MACH_PORT_TYPE_RECEIVE) == 0) || (delta != 1))
1570 #endif
1571 			    ) {
1572 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1573 			}
1574 			goto invalid_right;
1575 		}
1576 
1577 		/* maximum urefs for send is MACH_PORT_UREFS_MAX */
1578 
1579 		port = ip_object_to_port(entry->ie_object);
1580 		assert(port != IP_NULL);
1581 
1582 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1583 			assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0);
1584 			goto invalid_right;
1585 		}
1586 		/* port is locked and active */
1587 
1588 		assert(port->ip_srights > 0);
1589 
1590 		if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1591 		    delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1592 			ip_mq_unlock(port);
1593 			goto invalid_value;
1594 		}
1595 
1596 		urefs = IE_BITS_UREFS(bits);
1597 
1598 		if (urefs == MACH_PORT_UREFS_MAX) {
1599 			/*
1600 			 * urefs are pegged due to an overflow
1601 			 * only a delta removing all refs at once can change it
1602 			 */
1603 
1604 			if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1605 				delta = 0;
1606 			}
1607 		} else {
1608 			if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1609 				ip_mq_unlock(port);
1610 				goto invalid_value;
1611 			}
1612 			if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1613 				/* leave urefs pegged to maximum if it overflowed */
1614 				delta = MACH_PORT_UREFS_MAX - urefs;
1615 			}
1616 		}
1617 
1618 		if ((urefs + delta) == 0) {
1619 			if (ip_is_pinned(port)) {
1620 				ip_mq_unlock(port);
1621 				is_write_unlock(space);
1622 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1623 				return KERN_INVALID_CAPABILITY;
1624 			}
1625 
1626 			ip_srights_dec(port);
1627 			if (port->ip_srights == 0) {
1628 				nsrequest = ipc_notify_no_senders_prepare(port);
1629 			}
1630 
1631 			if (bits & MACH_PORT_TYPE_RECEIVE) {
1632 				assert(ip_get_receiver_name(port) == name);
1633 				assert(ip_in_space(port, space));
1634 				assert(IE_BITS_TYPE(bits) ==
1635 				    MACH_PORT_TYPE_SEND_RECEIVE);
1636 
1637 				entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1638 				    MACH_PORT_TYPE_SEND);
1639 				ipc_entry_modified(space, name, entry);
1640 			} else {
1641 				assert(IE_BITS_TYPE(bits) ==
1642 				    MACH_PORT_TYPE_SEND);
1643 
1644 				request = ipc_right_request_cancel_macro(space, port,
1645 				    name, entry);
1646 				ipc_hash_delete(space, ip_to_object(port),
1647 				    name, entry);
1648 				assert(!ip_is_pinned(port));
1649 				ipc_entry_dealloc(space, ip_to_object(port),
1650 				    name, entry);
1651 				port_to_release = port;
1652 			}
1653 		} else if (delta != 0) {
1654 			entry->ie_bits = bits + delta;
1655 			ipc_entry_modified(space, name, entry);
1656 		}
1657 
1658 		ip_mq_unlock(port);
1659 
1660 		is_write_unlock(space);
1661 
1662 		if (port_to_release != IP_NULL) {
1663 			ip_release(port_to_release);
1664 		}
1665 
1666 		ipc_notify_no_senders_emit(nsrequest);
1667 
1668 		if (request != IP_NULL) {
1669 			ipc_notify_port_deleted(request, name);
1670 		}
1671 		break;
1672 	}
1673 
1674 	case MACH_PORT_RIGHT_LABELH:
1675 		goto invalid_right;
1676 
1677 	default:
1678 		panic("ipc_right_delta: strange right %d for 0x%x (%p) in space:%p",
1679 		    right, name, (void *)entry, (void *)space);
1680 	}
1681 
1682 	return KERN_SUCCESS;
1683 
1684 success:
1685 	is_write_unlock(space);
1686 	return KERN_SUCCESS;
1687 
1688 invalid_right:
1689 	is_write_unlock(space);
1690 	if (port != IP_NULL) {
1691 		ip_release(port);
1692 	}
1693 	return KERN_INVALID_RIGHT;
1694 
1695 invalid_value:
1696 	is_write_unlock(space);
1697 	mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1698 	return KERN_INVALID_VALUE;
1699 
1700 guard_failure:
1701 	return KERN_INVALID_RIGHT;
1702 }
1703 
1704 /*
1705  *	Routine:	ipc_right_destruct
1706  *	Purpose:
1707  *		Deallocates the receive right and modifies the
1708  *		user-reference count for the send rights as requested.
1709  *	Conditions:
1710  *		The space is write-locked, and is unlocked upon return.
1711  *		The space must be active.
1712  *	Returns:
1713  *		KERN_SUCCESS		Count was modified.
1714  *		KERN_INVALID_RIGHT	Entry has wrong type.
1715  *		KERN_INVALID_VALUE	Bad delta for the right.
1716  */
1717 
1718 kern_return_t
ipc_right_destruct(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_delta_t srdelta,uint64_t guard)1719 ipc_right_destruct(
1720 	ipc_space_t             space,
1721 	mach_port_name_t        name,
1722 	ipc_entry_t             entry,
1723 	mach_port_delta_t       srdelta,
1724 	uint64_t                guard)
1725 {
1726 	ipc_port_t port = IP_NULL;
1727 	ipc_entry_bits_t bits;
1728 
1729 	mach_port_urefs_t urefs;
1730 	ipc_port_t request = IP_NULL;
1731 	ipc_notify_nsenders_t nsrequest = { };
1732 
1733 	bits = entry->ie_bits;
1734 
1735 	assert(is_active(space));
1736 
1737 	if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1738 		is_write_unlock(space);
1739 
1740 		/* No exception if we used to have receive and held entry since */
1741 		if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1742 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1743 		}
1744 		return KERN_INVALID_RIGHT;
1745 	}
1746 
1747 	if (srdelta && (bits & MACH_PORT_TYPE_SEND) == 0) {
1748 		is_write_unlock(space);
1749 		mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1750 		return KERN_INVALID_RIGHT;
1751 	}
1752 
1753 	if (srdelta > 0) {
1754 		goto invalid_value;
1755 	}
1756 
1757 	port = ip_object_to_port(entry->ie_object);
1758 	assert(port != IP_NULL);
1759 
1760 	ip_mq_lock(port);
1761 	require_ip_active(port);
1762 	assert(ip_get_receiver_name(port) == name);
1763 	assert(ip_in_space(port, space));
1764 
1765 	/* Mach Port Guard Checking */
1766 	if (port->ip_guarded && (guard != port->ip_context)) {
1767 		uint64_t portguard = port->ip_context;
1768 		ip_mq_unlock(port);
1769 		is_write_unlock(space);
1770 		mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
1771 		return KERN_INVALID_ARGUMENT;
1772 	}
1773 
1774 	/*
1775 	 * First reduce the send rights as requested and
1776 	 * adjust the entry->ie_bits accordingly. The
1777 	 * ipc_entry_modified() call is made once the receive
1778 	 * right is destroyed too.
1779 	 */
1780 
1781 	if (srdelta) {
1782 		assert(port->ip_srights > 0);
1783 
1784 		urefs = IE_BITS_UREFS(bits);
1785 
1786 		/*
1787 		 * Since we made sure that srdelta is negative,
1788 		 * the check for urefs overflow is not required.
1789 		 */
1790 		if (MACH_PORT_UREFS_UNDERFLOW(urefs, srdelta)) {
1791 			ip_mq_unlock(port);
1792 			goto invalid_value;
1793 		}
1794 
1795 		if (urefs == MACH_PORT_UREFS_MAX) {
1796 			/*
1797 			 * urefs are pegged due to an overflow
1798 			 * only a delta removing all refs at once can change it
1799 			 */
1800 			if (srdelta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1801 				srdelta = 0;
1802 			}
1803 		}
1804 
1805 		if ((urefs + srdelta) == 0) {
1806 			ip_srights_dec(port);
1807 			if (port->ip_srights == 0) {
1808 				nsrequest = ipc_notify_no_senders_prepare(port);
1809 			}
1810 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_RECEIVE);
1811 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1812 			    MACH_PORT_TYPE_SEND);
1813 		} else {
1814 			entry->ie_bits = bits + srdelta;
1815 		}
1816 	}
1817 
1818 	/*
1819 	 * Now destroy the receive right. Update space and
1820 	 * entry accordingly.
1821 	 */
1822 
1823 	bits = entry->ie_bits;
1824 	if (bits & MACH_PORT_TYPE_SEND) {
1825 		assert(IE_BITS_UREFS(bits) > 0);
1826 		assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX);
1827 
1828 		if (ipc_port_has_prdrequest(port)) {
1829 			/*
1830 			 * Since another task has requested a
1831 			 * destroy notification for this port, it
1832 			 * isn't actually being destroyed - the receive
1833 			 * right is just being moved to another task.
1834 			 * Since we still have one or more send rights,
1835 			 * we need to record the loss of the receive
1836 			 * right and enter the remaining send right
1837 			 * into the hash table.
1838 			 */
1839 			bits &= ~MACH_PORT_TYPE_RECEIVE;
1840 			bits |= MACH_PORT_TYPE_EX_RECEIVE;
1841 			ipc_hash_insert(space, ip_to_object(port),
1842 			    name, entry);
1843 			ip_reference(port);
1844 		} else {
1845 			/*
1846 			 *	The remaining send right turns into a
1847 			 *	dead name.  Notice we don't decrement
1848 			 *	ip_srights, generate a no-senders notif,
1849 			 *	or use ipc_right_dncancel, because the
1850 			 *	port is destroyed "first".
1851 			 */
1852 			bits &= ~IE_BITS_TYPE_MASK;
1853 			bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1854 			if (entry->ie_request) {
1855 				entry->ie_request = IE_REQ_NONE;
1856 				if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1857 					bits++; /* increment urefs */
1858 				}
1859 			}
1860 			entry->ie_object = IO_NULL;
1861 		}
1862 		entry->ie_bits = bits;
1863 		ipc_entry_modified(space, name, entry);
1864 	} else {
1865 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1866 		assert(IE_BITS_UREFS(bits) == 0);
1867 		request = ipc_right_request_cancel_macro(space, port,
1868 		    name, entry);
1869 		assert(!ip_is_pinned(port));
1870 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1871 	}
1872 
1873 	/* Unlock space */
1874 	is_write_unlock(space);
1875 
1876 	ipc_notify_no_senders_emit(nsrequest);
1877 
1878 	ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1879 
1880 	if (request != IP_NULL) {
1881 		ipc_notify_port_deleted(request, name);
1882 	}
1883 
1884 	return KERN_SUCCESS;
1885 
1886 invalid_value:
1887 	is_write_unlock(space);
1888 	mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1889 	return KERN_INVALID_VALUE;
1890 }
1891 
1892 
1893 /*
1894  *	Routine:	ipc_right_info
1895  *	Purpose:
1896  *		Retrieves information about the right.
1897  *	Conditions:
1898  *		The space is active and write-locked.
1899  *	        The space is unlocked upon return.
1900  *	Returns:
1901  *		KERN_SUCCESS		Retrieved info
1902  */
1903 
1904 kern_return_t
ipc_right_info(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_type_t * typep,mach_port_urefs_t * urefsp)1905 ipc_right_info(
1906 	ipc_space_t             space,
1907 	mach_port_name_t        name,
1908 	ipc_entry_t             entry,
1909 	mach_port_type_t        *typep,
1910 	mach_port_urefs_t       *urefsp)
1911 {
1912 	ipc_port_t port;
1913 	ipc_entry_bits_t bits;
1914 	mach_port_type_t type = 0;
1915 	ipc_port_request_index_t request;
1916 
1917 	bits = entry->ie_bits;
1918 	request = entry->ie_request;
1919 	port = ip_object_to_port(entry->ie_object);
1920 
1921 	if (bits & MACH_PORT_TYPE_RECEIVE) {
1922 		assert(IP_VALID(port));
1923 
1924 		if (request != IE_REQ_NONE) {
1925 			ip_mq_lock(port);
1926 			require_ip_active(port);
1927 			type |= ipc_port_request_type(port, name, request);
1928 			ip_mq_unlock(port);
1929 		}
1930 		is_write_unlock(space);
1931 	} else if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1932 		/*
1933 		 * validate port is still alive - if so, get request
1934 		 * types while we still have it locked.  Otherwise,
1935 		 * recapture the (now dead) bits.
1936 		 */
1937 		if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1938 			if (request != IE_REQ_NONE) {
1939 				type |= ipc_port_request_type(port, name, request);
1940 			}
1941 			ip_mq_unlock(port);
1942 			is_write_unlock(space);
1943 		} else {
1944 			bits = entry->ie_bits;
1945 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1946 			is_write_unlock(space);
1947 			ip_release(port);
1948 		}
1949 	} else {
1950 		is_write_unlock(space);
1951 	}
1952 
1953 	type |= IE_BITS_TYPE(bits);
1954 
1955 	*typep = type;
1956 	*urefsp = IE_BITS_UREFS(bits);
1957 	return KERN_SUCCESS;
1958 }
1959 
1960 /*
1961  *	Routine:	ipc_right_copyin_check_reply
1962  *	Purpose:
1963  *		Check if a subsequent ipc_right_copyin would succeed. Used only
1964  *		by ipc_kmsg_copyin_header to check if reply_port can be copied in.
1965  *		If the reply port is an immovable send right, it errors out.
1966  *	Conditions:
1967  *		The space is locked (read or write) and active.
1968  */
1969 
1970 boolean_t
ipc_right_copyin_check_reply(__assert_only ipc_space_t space,mach_port_name_t reply_name,ipc_entry_t reply_entry,mach_msg_type_name_t reply_type,ipc_entry_t dest_entry,boolean_t * reply_port_semantics_violation)1971 ipc_right_copyin_check_reply(
1972 	__assert_only ipc_space_t       space,
1973 	mach_port_name_t                reply_name,
1974 	ipc_entry_t                     reply_entry,
1975 	mach_msg_type_name_t            reply_type,
1976 	ipc_entry_t                     dest_entry,
1977 	boolean_t                       *reply_port_semantics_violation)
1978 {
1979 	ipc_entry_bits_t bits;
1980 	ipc_port_t reply_port;
1981 	ipc_port_t dest_port;
1982 
1983 	bits = reply_entry->ie_bits;
1984 	assert(is_active(space));
1985 
1986 	switch (reply_type) {
1987 	case MACH_MSG_TYPE_MAKE_SEND:
1988 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1989 			return FALSE;
1990 		}
1991 		break;
1992 
1993 	case MACH_MSG_TYPE_MAKE_SEND_ONCE:
1994 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1995 			return FALSE;
1996 		}
1997 		break;
1998 
1999 	case MACH_MSG_TYPE_MOVE_RECEIVE:
2000 		/* ipc_kmsg_copyin_header already filters it out */
2001 		return FALSE;
2002 
2003 	case MACH_MSG_TYPE_COPY_SEND:
2004 	case MACH_MSG_TYPE_MOVE_SEND:
2005 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2006 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2007 			break;
2008 		}
2009 
2010 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2011 			return FALSE;
2012 		}
2013 
2014 		reply_port = ip_object_to_port(reply_entry->ie_object);
2015 		assert(reply_port != IP_NULL);
2016 
2017 		/*
2018 		 * active status peek to avoid checks that will be skipped
2019 		 * on copyin for dead ports.  Lock not held, so will not be
2020 		 * atomic (but once dead, there's no going back).
2021 		 */
2022 		if (!ip_active(reply_port)) {
2023 			break;
2024 		}
2025 
2026 		/*
2027 		 * Can't copyin a send right that is marked immovable. This bit
2028 		 * is set only during port creation and never unset. So it can
2029 		 * be read without a lock.
2030 		 */
2031 		if (ip_is_immovable_send(reply_port)) {
2032 			mach_port_guard_exception_immovable(space, reply_name, reply_port, MPG_FLAGS_NONE);
2033 			return FALSE;
2034 		}
2035 
2036 		if (reply_type == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2037 			if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2038 				return FALSE;
2039 			}
2040 		} else {
2041 			if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2042 				return FALSE;
2043 			}
2044 		}
2045 
2046 		break;
2047 	}
2048 
2049 	default:
2050 		panic("ipc_right_copyin_check: strange rights");
2051 	}
2052 
2053 	if ((IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_PORT_SET) ||
2054 	    (IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_PORT_SET)) {
2055 		return TRUE;
2056 	}
2057 
2058 	/* The only disp allowed when a reply port is a local port of mach msg is MAKE_SO. */
2059 	reply_port = ip_object_to_port(reply_entry->ie_object);
2060 	assert(reply_port != IP_NULL);
2061 
2062 	if (ip_active(reply_port)) {
2063 		if (ip_is_reply_port(reply_port) && (reply_type != MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2064 			return FALSE;
2065 		}
2066 
2067 		/* When sending a msg to remote port that requires reply port semantics enforced the local port of that msg needs to be a reply port. */
2068 		dest_port = ip_object_to_port(dest_entry->ie_object);
2069 		if (IP_VALID(dest_port) && ip_active(dest_port) && ip_require_reply_port_semantics(dest_port)
2070 		    && !ip_is_reply_port(reply_port) && !ip_is_provisional_reply_port(reply_port)) {
2071 			*reply_port_semantics_violation = TRUE;
2072 
2073 			if (reply_port_semantics) {
2074 				mach_port_guard_exception(reply_name, 0, 0, kGUARD_EXC_REQUIRE_REPLY_PORT_SEMANTICS);
2075 				return FALSE;
2076 			}
2077 		}
2078 	}
2079 
2080 	return TRUE;
2081 }
2082 
2083 /*
2084  *	Routine:	ipc_right_copyin_check_guard_locked
2085  *	Purpose:
2086  *		Check if the port is guarded and the guard
2087  *		value matches the one passed in the arguments.
2088  *		If MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND is set,
2089  *		check if the port is unguarded.
2090  *	Conditions:
2091  *		The port is locked.
2092  *	Returns:
2093  *		KERN_SUCCESS		Port is either unguarded
2094  *					or guarded with expected value
2095  *		KERN_INVALID_ARGUMENT	Port is either unguarded already or guard mismatch.
2096  *					This also raises a EXC_GUARD exception.
2097  */
2098 static kern_return_t
ipc_right_copyin_check_guard_locked(mach_port_name_t name,ipc_port_t port,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2099 ipc_right_copyin_check_guard_locked(
2100 	mach_port_name_t name,
2101 	ipc_port_t port,
2102 	mach_port_context_t context,
2103 	mach_msg_guard_flags_t *guard_flags)
2104 {
2105 	mach_msg_guard_flags_t flags = *guard_flags;
2106 	if ((flags & MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND) && !port->ip_guarded && !context) {
2107 		return KERN_SUCCESS;
2108 	} else if (port->ip_guarded && (port->ip_context == context)) {
2109 		return KERN_SUCCESS;
2110 	}
2111 
2112 	/* Incorrect guard; Raise exception */
2113 	mach_port_guard_exception(name, context, port->ip_context, kGUARD_EXC_INCORRECT_GUARD);
2114 	return KERN_INVALID_ARGUMENT;
2115 }
2116 
2117 /*
2118  *	Routine:	ipc_right_copyin
2119  *	Purpose:
2120  *		Copyin a capability from a space.
2121  *		If successful, the caller gets a ref
2122  *		for the resulting object, unless it is IO_DEAD,
2123  *		and possibly a send-once right which should
2124  *		be used in a port-deleted notification.
2125  *
2126  *		If deadok is not TRUE, the copyin operation
2127  *		will fail instead of producing IO_DEAD.
2128  *
2129  *		The entry is deallocated if the entry type becomes
2130  *		MACH_PORT_TYPE_NONE.
2131  *	Conditions:
2132  *		The space is write-locked and active.
2133  *	Returns:
2134  *		KERN_SUCCESS		Acquired an object, possibly IO_DEAD.
2135  *		KERN_INVALID_RIGHT	Name doesn't denote correct right.
2136  *		KERN_INVALID_CAPABILITY	Trying to move an kobject port or an immovable right,
2137  *								or moving the last ref of pinned right
2138  *		KERN_INVALID_ARGUMENT	Port is unguarded or guard mismatch
2139  */
2140 
2141 kern_return_t
ipc_right_copyin(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyin_flags_t flags,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep,int * assertcntp,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2142 ipc_right_copyin(
2143 	ipc_space_t                space,
2144 	mach_port_name_t           name,
2145 	ipc_entry_t                entry,
2146 	mach_msg_type_name_t       msgt_name,
2147 	ipc_object_copyin_flags_t  flags,
2148 	ipc_object_t               *objectp,
2149 	ipc_port_t                 *sorightp,
2150 	ipc_port_t                 *releasep,
2151 	int                        *assertcntp,
2152 	mach_port_context_t        context,
2153 	mach_msg_guard_flags_t     *guard_flags)
2154 {
2155 	ipc_entry_bits_t bits;
2156 	ipc_port_t port;
2157 	kern_return_t kr;
2158 	boolean_t deadok = !!(flags & IPC_OBJECT_COPYIN_FLAGS_DEADOK);
2159 	boolean_t allow_imm_send = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
2160 	boolean_t allow_reply_make_so = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_REPLY_MAKE_SEND_ONCE);
2161 	boolean_t allow_reply_move_so = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_REPLY_MOVE_SEND_ONCE);
2162 
2163 	*releasep = IP_NULL;
2164 	*assertcntp = 0;
2165 
2166 	bits = entry->ie_bits;
2167 
2168 	assert(is_active(space));
2169 
2170 	switch (msgt_name) {
2171 	case MACH_MSG_TYPE_MAKE_SEND: {
2172 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2173 			goto invalid_right;
2174 		}
2175 
2176 		port = ip_object_to_port(entry->ie_object);
2177 		assert(port != IP_NULL);
2178 
2179 		if (ip_is_reply_port(port)) {
2180 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2181 			return KERN_INVALID_CAPABILITY;
2182 		}
2183 
2184 		ip_mq_lock(port);
2185 		assert(ip_get_receiver_name(port) == name);
2186 		assert(ip_in_space(port, space));
2187 
2188 		ipc_port_make_send_any_locked(port);
2189 		ip_mq_unlock(port);
2190 
2191 		*objectp = ip_to_object(port);
2192 		*sorightp = IP_NULL;
2193 		break;
2194 	}
2195 
2196 	case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
2197 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2198 			goto invalid_right;
2199 		}
2200 
2201 		port = ip_object_to_port(entry->ie_object);
2202 		assert(port != IP_NULL);
2203 
2204 		if ((ip_is_reply_port(port)) && !allow_reply_make_so) {
2205 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2206 			return KERN_INVALID_CAPABILITY;
2207 		}
2208 
2209 		ip_mq_lock(port);
2210 		require_ip_active(port);
2211 		assert(ip_get_receiver_name(port) == name);
2212 		assert(ip_in_space(port, space));
2213 
2214 		ipc_port_make_sonce_locked(port);
2215 		ip_mq_unlock(port);
2216 
2217 		*objectp = ip_to_object(port);
2218 		*sorightp = IP_NULL;
2219 		break;
2220 	}
2221 
2222 	case MACH_MSG_TYPE_MOVE_RECEIVE: {
2223 		bool allow_imm_recv = false;
2224 		ipc_port_t request = IP_NULL;
2225 		waitq_link_list_t free_l = { };
2226 
2227 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2228 			goto invalid_right;
2229 		}
2230 
2231 		port = ip_object_to_port(entry->ie_object);
2232 		assert(port != IP_NULL);
2233 
2234 		ip_mq_lock(port);
2235 		require_ip_active(port);
2236 		assert(ip_get_receiver_name(port) == name);
2237 		assert(ip_in_space(port, space));
2238 
2239 		/*
2240 		 * Disallow moving receive-right kobjects/kolabel, e.g. mk_timer ports
2241 		 * The ipc_port structure uses the kdata union of kobject and
2242 		 * imp_task exclusively. Thus, general use of a kobject port as
2243 		 * a receive right can cause type confusion in the importance
2244 		 * code.
2245 		 */
2246 		if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
2247 			/*
2248 			 * Distinguish an invalid right, e.g., trying to move
2249 			 * a send right as a receive right, from this
2250 			 * situation which is, "This is a valid receive right,
2251 			 * but it's also a kobject and you can't move it."
2252 			 */
2253 			ip_mq_unlock(port);
2254 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2255 			return KERN_INVALID_CAPABILITY;
2256 		}
2257 
2258 		if (port->ip_service_port && port->ip_splabel &&
2259 		    !ipc_service_port_label_is_bootstrap_port((ipc_service_port_label_t)port->ip_splabel)) {
2260 			allow_imm_recv = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_RECEIVE);
2261 		} else if (ip_is_libxpc_connection_port(port)) {
2262 			allow_imm_recv = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_CONN_IMMOVABLE_RECEIVE);
2263 		}
2264 
2265 		if ((!allow_imm_recv && port->ip_immovable_receive) || port->ip_specialreply) {
2266 			assert(!ip_in_space(port, ipc_space_kernel));
2267 			ip_mq_unlock(port);
2268 			assert(current_task() != kernel_task);
2269 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2270 			return KERN_INVALID_CAPABILITY;
2271 		}
2272 
2273 		if (guard_flags != NULL) {
2274 			kr = ipc_right_copyin_check_guard_locked(name, port, context, guard_flags);
2275 			if (kr != KERN_SUCCESS) {
2276 				ip_mq_unlock(port);
2277 				return kr;
2278 			}
2279 		}
2280 
2281 		if (bits & MACH_PORT_TYPE_SEND) {
2282 			assert(IE_BITS_TYPE(bits) ==
2283 			    MACH_PORT_TYPE_SEND_RECEIVE);
2284 			assert(IE_BITS_UREFS(bits) > 0);
2285 			assert(port->ip_srights > 0);
2286 
2287 			bits &= ~MACH_PORT_TYPE_RECEIVE;
2288 			bits |= MACH_PORT_TYPE_EX_RECEIVE;
2289 			entry->ie_bits = bits;
2290 			ipc_hash_insert(space, ip_to_object(port),
2291 			    name, entry);
2292 			ip_reference(port);
2293 			ipc_entry_modified(space, name, entry);
2294 		} else {
2295 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2296 			assert(IE_BITS_UREFS(bits) == 0);
2297 
2298 			request = ipc_right_request_cancel_macro(space, port,
2299 			    name, entry);
2300 			assert(!ip_is_pinned(port));
2301 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2302 		}
2303 
2304 		/* ipc_port_clear_receiver unguards the port and clears the ip_immovable_receive bit */
2305 		(void)ipc_port_clear_receiver(port, FALSE, &free_l); /* don't destroy the port/mqueue */
2306 		if (guard_flags != NULL) {
2307 			/* this flag will be cleared during copyout */
2308 			*guard_flags = *guard_flags | MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
2309 		}
2310 
2311 #if IMPORTANCE_INHERITANCE
2312 		/*
2313 		 * Account for boosts the current task is going to lose when
2314 		 * copying this right in.  Tempowner ports have either not
2315 		 * been accounting to any task (and therefore are already in
2316 		 * "limbo" state w.r.t. assertions) or to some other specific
2317 		 * task. As we have no way to drop the latter task's assertions
2318 		 * here, We'll deduct those when we enqueue it on its
2319 		 * destination port (see ipc_port_check_circularity()).
2320 		 */
2321 		if (port->ip_tempowner == 0) {
2322 			assert(IIT_NULL == ip_get_imp_task(port));
2323 
2324 			/* ports in limbo have to be tempowner */
2325 			port->ip_tempowner = 1;
2326 			*assertcntp = port->ip_impcount;
2327 		}
2328 #endif /* IMPORTANCE_INHERITANCE */
2329 
2330 		ip_mq_unlock(port);
2331 
2332 		/*
2333 		 * This is unfortunate to do this while the space is locked,
2334 		 * but plumbing it through all callers really hurts.
2335 		 */
2336 		waitq_link_free_list(WQT_PORT_SET, &free_l);
2337 
2338 		*objectp = ip_to_object(port);
2339 		*sorightp = request;
2340 		break;
2341 	}
2342 
2343 	case MACH_MSG_TYPE_COPY_SEND: {
2344 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2345 			goto copy_dead;
2346 		}
2347 
2348 		/* allow for dead send-once rights */
2349 
2350 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2351 			goto invalid_right;
2352 		}
2353 
2354 		assert(IE_BITS_UREFS(bits) > 0);
2355 
2356 		port = ip_object_to_port(entry->ie_object);
2357 		assert(port != IP_NULL);
2358 
2359 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2360 			bits = entry->ie_bits;
2361 			*releasep = port;
2362 			goto copy_dead;
2363 		}
2364 		/* port is locked and active */
2365 
2366 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2367 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2368 			assert(port->ip_sorights > 0);
2369 
2370 			ip_mq_unlock(port);
2371 			goto invalid_right;
2372 		}
2373 
2374 		if (ip_is_reply_port(port)) {
2375 			ip_mq_unlock(port);
2376 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2377 			return KERN_INVALID_CAPABILITY;
2378 		}
2379 
2380 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2381 			ip_mq_unlock(port);
2382 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2383 			return KERN_INVALID_CAPABILITY;
2384 		}
2385 
2386 		ipc_port_copy_send_any_locked(port);
2387 		ip_mq_unlock(port);
2388 
2389 		*objectp = ip_to_object(port);
2390 		*sorightp = IP_NULL;
2391 		break;
2392 	}
2393 
2394 	case MACH_MSG_TYPE_MOVE_SEND: {
2395 		ipc_port_t request = IP_NULL;
2396 
2397 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2398 			goto move_dead;
2399 		}
2400 
2401 		/* allow for dead send-once rights */
2402 
2403 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2404 			goto invalid_right;
2405 		}
2406 
2407 		assert(IE_BITS_UREFS(bits) > 0);
2408 
2409 		port = ip_object_to_port(entry->ie_object);
2410 		assert(port != IP_NULL);
2411 
2412 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2413 			bits = entry->ie_bits;
2414 			*releasep = port;
2415 			goto move_dead;
2416 		}
2417 		/* port is locked and active */
2418 
2419 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2420 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2421 			assert(port->ip_sorights > 0);
2422 			ip_mq_unlock(port);
2423 			goto invalid_right;
2424 		}
2425 
2426 		if (ip_is_reply_port(port)) {
2427 			ip_mq_unlock(port);
2428 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2429 			return KERN_INVALID_CAPABILITY;
2430 		}
2431 
2432 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2433 			ip_mq_unlock(port);
2434 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2435 			return KERN_INVALID_CAPABILITY;
2436 		}
2437 
2438 		if (IE_BITS_UREFS(bits) == 1) {
2439 			assert(port->ip_srights > 0);
2440 			if (bits & MACH_PORT_TYPE_RECEIVE) {
2441 				assert(ip_get_receiver_name(port) == name);
2442 				assert(ip_in_space(port, space));
2443 				assert(IE_BITS_TYPE(bits) ==
2444 				    MACH_PORT_TYPE_SEND_RECEIVE);
2445 				assert(!ip_is_pinned(port));
2446 
2447 				entry->ie_bits = bits & ~
2448 				    (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2449 				ipc_entry_modified(space, name, entry);
2450 				ip_reference(port);
2451 			} else {
2452 				assert(IE_BITS_TYPE(bits) ==
2453 				    MACH_PORT_TYPE_SEND);
2454 
2455 				if (ip_is_pinned(port)) {
2456 					ip_mq_unlock(port);
2457 					mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_COPYIN);
2458 					return KERN_INVALID_CAPABILITY;
2459 				}
2460 
2461 				request = ipc_right_request_cancel_macro(space, port,
2462 				    name, entry);
2463 				ipc_hash_delete(space, ip_to_object(port),
2464 				    name, entry);
2465 				ipc_entry_dealloc(space, ip_to_object(port),
2466 				    name, entry);
2467 				/* transfer entry's reference to caller */
2468 			}
2469 		} else {
2470 			ipc_port_copy_send_any_locked(port);
2471 			/* if urefs are pegged due to overflow, leave them pegged */
2472 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2473 				entry->ie_bits = bits - 1; /* decrement urefs */
2474 			}
2475 			ipc_entry_modified(space, name, entry);
2476 		}
2477 
2478 		ip_mq_unlock(port);
2479 		*objectp = ip_to_object(port);
2480 		*sorightp = request;
2481 		break;
2482 	}
2483 
2484 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2485 		ipc_port_t request;
2486 
2487 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2488 			goto move_dead;
2489 		}
2490 
2491 		/* allow for dead send rights */
2492 
2493 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2494 			goto invalid_right;
2495 		}
2496 
2497 		assert(IE_BITS_UREFS(bits) > 0);
2498 
2499 		port = ip_object_to_port(entry->ie_object);
2500 		assert(port != IP_NULL);
2501 
2502 		if (ipc_right_check(space, port, name, entry, flags)) {
2503 			bits = entry->ie_bits;
2504 			*releasep = port;
2505 			goto move_dead;
2506 		}
2507 		/*
2508 		 * port is locked, but may not be active:
2509 		 * Allow copyin of inactive ports with no dead name request and treat it
2510 		 * as if the copyin of the port was successful and port became inactive
2511 		 * later.
2512 		 */
2513 
2514 		if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2515 			assert(bits & MACH_PORT_TYPE_SEND);
2516 			assert(port->ip_srights > 0);
2517 
2518 			ip_mq_unlock(port);
2519 			goto invalid_right;
2520 		}
2521 
2522 		if (ip_is_reply_port(port) && !allow_reply_move_so) {
2523 			ip_mq_unlock(port);
2524 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2525 			return KERN_INVALID_CAPABILITY;
2526 		}
2527 
2528 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2529 			ip_mq_unlock(port);
2530 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2531 			return KERN_INVALID_CAPABILITY;
2532 		}
2533 
2534 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2535 		assert(IE_BITS_UREFS(bits) == 1);
2536 		assert(port->ip_sorights > 0);
2537 
2538 		request = ipc_right_request_cancel_macro(space, port, name, entry);
2539 		assert(!ip_is_pinned(port));
2540 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2541 		ip_mq_unlock(port);
2542 
2543 		*objectp = ip_to_object(port);
2544 		*sorightp = request;
2545 		break;
2546 	}
2547 
2548 	default:
2549 invalid_right:
2550 		return KERN_INVALID_RIGHT;
2551 	}
2552 
2553 	return KERN_SUCCESS;
2554 
2555 copy_dead:
2556 	assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2557 	assert(IE_BITS_UREFS(bits) > 0);
2558 	assert(entry->ie_request == IE_REQ_NONE);
2559 	assert(entry->ie_object == 0);
2560 
2561 	if (!deadok) {
2562 		goto invalid_right;
2563 	}
2564 
2565 	*objectp = IO_DEAD;
2566 	*sorightp = IP_NULL;
2567 	return KERN_SUCCESS;
2568 
2569 move_dead:
2570 	assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2571 	assert(IE_BITS_UREFS(bits) > 0);
2572 	assert(entry->ie_request == IE_REQ_NONE);
2573 	assert(entry->ie_object == IO_NULL);
2574 
2575 	if (!deadok) {
2576 		goto invalid_right;
2577 	}
2578 
2579 	if (IE_BITS_UREFS(bits) == 1) {
2580 		ipc_entry_dealloc(space, IO_NULL, name, entry);
2581 	} else {
2582 		/* if urefs are pegged due to overflow, leave them pegged */
2583 		if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2584 			entry->ie_bits = bits - 1; /* decrement urefs */
2585 		}
2586 		ipc_entry_modified(space, name, entry);
2587 	}
2588 	*objectp = IO_DEAD;
2589 	*sorightp = IP_NULL;
2590 	return KERN_SUCCESS;
2591 }
2592 
2593 /*
2594  *	Routine:	ipc_right_copyin_two_move_sends
2595  *	Purpose:
2596  *		Like ipc_right_copyin with MACH_MSG_TYPE_MOVE_SEND
2597  *		and deadok == FALSE, except that this moves two
2598  *		send rights at once.
2599  *	Conditions:
2600  *		The space is write-locked and active.
2601  *		The object is returned with two refs/send rights.
2602  *	Returns:
2603  *		KERN_SUCCESS					Acquired an object.
2604  *		KERN_INVALID_RIGHT				Name doesn't denote correct right.
2605  *		KERN_INVALID_CAPABILITY			Name does not allow copyin move send capability.
2606  */
2607 static
2608 kern_return_t
ipc_right_copyin_two_move_sends(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2609 ipc_right_copyin_two_move_sends(
2610 	ipc_space_t             space,
2611 	mach_port_name_t        name,
2612 	ipc_entry_t             entry,
2613 	ipc_object_t            *objectp,
2614 	ipc_port_t              *sorightp,
2615 	ipc_port_t              *releasep)
2616 {
2617 	ipc_entry_bits_t bits;
2618 	mach_port_urefs_t urefs;
2619 	ipc_port_t port;
2620 	ipc_port_t request = IP_NULL;
2621 
2622 	*releasep = IP_NULL;
2623 
2624 	assert(is_active(space));
2625 
2626 	bits = entry->ie_bits;
2627 
2628 	if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2629 		goto invalid_right;
2630 	}
2631 
2632 	urefs = IE_BITS_UREFS(bits);
2633 	if (urefs < 2) {
2634 		goto invalid_right;
2635 	}
2636 
2637 	port = ip_object_to_port(entry->ie_object);
2638 	assert(port != IP_NULL);
2639 
2640 	if (ip_is_reply_port(port)) {
2641 		mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2642 		return KERN_INVALID_CAPABILITY;
2643 	}
2644 
2645 	if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2646 		*releasep = port;
2647 		goto invalid_right;
2648 	}
2649 	/* port is locked and active */
2650 
2651 	/*
2652 	 * To reach here we either have:
2653 	 * (1) reply_name == voucher_name, but voucher is not immovable send right.
2654 	 * (2) reply_name == dest_name, but ipc_right_copyin_check_reply() guaranteed
2655 	 * that we can't use MOVE_SEND on reply port marked as immovable send right.
2656 	 */
2657 	assert(!ip_is_immovable_send(port));
2658 	assert(!ip_is_pinned(port));
2659 
2660 	if (urefs > 2) {
2661 		/*
2662 		 * We are moving 2 urefs as naked send rights, which is decomposed as:
2663 		 * - two copy sends (which doesn't affect the make send count)
2664 		 * - decrementing the local urefs twice.
2665 		 */
2666 		ipc_port_copy_send_any_locked(port);
2667 		ipc_port_copy_send_any_locked(port);
2668 		/* if urefs are pegged due to overflow, leave them pegged */
2669 		if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2670 			entry->ie_bits = bits - 2; /* decrement urefs */
2671 		}
2672 		ipc_entry_modified(space, name, entry);
2673 	} else {
2674 		/*
2675 		 * We have exactly 2 send rights for this port in this space,
2676 		 * which means that we will liberate the naked send right held
2677 		 * by this entry.
2678 		 *
2679 		 * However refcounting rules around entries are that naked send rights
2680 		 * on behalf of spaces do not have an associated port reference,
2681 		 * so we need to donate one ...
2682 		 */
2683 		ipc_port_copy_send_any_locked(port);
2684 
2685 		if (bits & MACH_PORT_TYPE_RECEIVE) {
2686 			assert(ip_get_receiver_name(port) == name);
2687 			assert(ip_in_space(port, space));
2688 			assert(IE_BITS_TYPE(bits) ==
2689 			    MACH_PORT_TYPE_SEND_RECEIVE);
2690 
2691 			/* ... that we inject manually when the entry stays alive */
2692 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2693 			ipc_entry_modified(space, name, entry);
2694 			ip_reference(port);
2695 		} else {
2696 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
2697 
2698 			/* ... that we steal from the entry when it dies */
2699 			request = ipc_right_request_cancel_macro(space, port,
2700 			    name, entry);
2701 			ipc_hash_delete(space, ip_to_object(port),
2702 			    name, entry);
2703 			ipc_entry_dealloc(space, ip_to_object(port),
2704 			    name, entry);
2705 		}
2706 	}
2707 
2708 	ip_mq_unlock(port);
2709 
2710 	*objectp = ip_to_object(port);
2711 	*sorightp = request;
2712 	return KERN_SUCCESS;
2713 
2714 invalid_right:
2715 	return KERN_INVALID_RIGHT;
2716 }
2717 
2718 
2719 /*
2720  *	Routine:	ipc_right_copyin_two
2721  *	Purpose:
2722  *		Like ipc_right_copyin with two dispositions,
2723  *		each of which results in a send or send-once right,
2724  *		and deadok = FALSE.
2725  *	Conditions:
2726  *		The space is write-locked and active.
2727  *		The object is returned with two refs/rights.
2728  *		Msgt_one refers to the dest_type.
2729  *      Copyin flags are currently only used in the context of send once rights.
2730  *	Returns:
2731  *		KERN_SUCCESS		Acquired an object.
2732  *		KERN_INVALID_RIGHT	Name doesn't denote correct right(s).
2733  *		KERN_INVALID_CAPABILITY	Name doesn't denote correct right for msgt_two.
2734  */
2735 kern_return_t
ipc_right_copyin_two(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_one,mach_msg_type_name_t msgt_two,ipc_object_copyin_flags_t flags_one,ipc_object_copyin_flags_t flags_two,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2736 ipc_right_copyin_two(
2737 	ipc_space_t               space,
2738 	mach_port_name_t          name,
2739 	ipc_entry_t               entry,
2740 	mach_msg_type_name_t      msgt_one,
2741 	mach_msg_type_name_t      msgt_two,
2742 	ipc_object_copyin_flags_t flags_one, /* Used only for send once rights. */
2743 	ipc_object_copyin_flags_t flags_two, /* Used only for send once rights. */
2744 	ipc_object_t              *objectp,
2745 	ipc_port_t                *sorightp,
2746 	ipc_port_t                *releasep)
2747 {
2748 	kern_return_t kr;
2749 	int assertcnt = 0;
2750 
2751 	assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_one));
2752 	assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_two));
2753 
2754 	/*
2755 	 *	This is a little tedious to make atomic, because
2756 	 *	there are 25 combinations of valid dispositions.
2757 	 *	However, most are easy.
2758 	 */
2759 
2760 	/*
2761 	 *	If either is move-sonce, then there must be an error.
2762 	 */
2763 	if (msgt_one == MACH_MSG_TYPE_MOVE_SEND_ONCE ||
2764 	    msgt_two == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2765 		return KERN_INVALID_RIGHT;
2766 	}
2767 
2768 	if ((msgt_one == MACH_MSG_TYPE_MAKE_SEND) ||
2769 	    (msgt_one == MACH_MSG_TYPE_MAKE_SEND_ONCE) ||
2770 	    (msgt_two == MACH_MSG_TYPE_MAKE_SEND) ||
2771 	    (msgt_two == MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2772 		/*
2773 		 *	One of the dispositions needs a receive right.
2774 		 *
2775 		 *	If the copyin below succeeds, we know the receive
2776 		 *	right is there (because the pre-validation of
2777 		 *	the second disposition already succeeded in our
2778 		 *	caller).
2779 		 *
2780 		 *	Hence the port is not in danger of dying.
2781 		 */
2782 		ipc_object_t object_two;
2783 
2784 		flags_one = flags_one | IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND;
2785 		kr = ipc_right_copyin(space, name, entry,
2786 		    msgt_one, flags_one,
2787 		    objectp, sorightp, releasep,
2788 		    &assertcnt, 0, NULL);
2789 		assert(assertcnt == 0);
2790 		if (kr != KERN_SUCCESS) {
2791 			return kr;
2792 		}
2793 
2794 		assert(IO_VALID(*objectp));
2795 		assert(*sorightp == IP_NULL);
2796 		assert(*releasep == IP_NULL);
2797 
2798 		/*
2799 		 *	Now copyin the second (previously validated)
2800 		 *	disposition.  The result can't be a dead port,
2801 		 *	as no valid disposition can make us lose our
2802 		 *	receive right.
2803 		 */
2804 		kr = ipc_right_copyin(space, name, entry,
2805 		    msgt_two, flags_two,
2806 		    &object_two, sorightp, releasep,
2807 		    &assertcnt, 0, NULL);
2808 		assert(assertcnt == 0);
2809 		assert(kr == KERN_SUCCESS);
2810 		assert(*sorightp == IP_NULL);
2811 		assert(*releasep == IP_NULL);
2812 		assert(object_two == *objectp);
2813 		assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
2814 	} else if ((msgt_one == MACH_MSG_TYPE_MOVE_SEND) &&
2815 	    (msgt_two == MACH_MSG_TYPE_MOVE_SEND)) {
2816 		/*
2817 		 *	This is an easy case.  Just use our
2818 		 *	handy-dandy special-purpose copyin call
2819 		 *	to get two send rights for the price of one.
2820 		 */
2821 		kr = ipc_right_copyin_two_move_sends(space, name, entry,
2822 		    objectp, sorightp,
2823 		    releasep);
2824 		if (kr != KERN_SUCCESS) {
2825 			return kr;
2826 		}
2827 	} else {
2828 		mach_msg_type_name_t msgt_name;
2829 
2830 		/*
2831 		 *	Must be either a single move-send and a
2832 		 *	copy-send, or two copy-send dispositions.
2833 		 *	Use the disposition with the greatest side
2834 		 *	effects for the actual copyin - then just
2835 		 *	duplicate the send right you get back.
2836 		 */
2837 		if (msgt_one == MACH_MSG_TYPE_MOVE_SEND ||
2838 		    msgt_two == MACH_MSG_TYPE_MOVE_SEND) {
2839 			msgt_name = MACH_MSG_TYPE_MOVE_SEND;
2840 		} else {
2841 			msgt_name = MACH_MSG_TYPE_COPY_SEND;
2842 		}
2843 
2844 		kr = ipc_right_copyin(space, name, entry,
2845 		    msgt_name, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND,
2846 		    objectp, sorightp, releasep,
2847 		    &assertcnt, 0, NULL);
2848 		assert(assertcnt == 0);
2849 		if (kr != KERN_SUCCESS) {
2850 			return kr;
2851 		}
2852 
2853 		/*
2854 		 *	Copy the right we got back.  If it is dead now,
2855 		 *	that's OK.  Neither right will be usable to send
2856 		 *	a message anyway.
2857 		 *
2858 		 *	Note that the port could be concurrently moved
2859 		 *	outside of the space as a descriptor, and then
2860 		 *	destroyed, which would not happen under the space lock.
2861 		 *
2862 		 *	It means we can't use ipc_port_copy_send() which
2863 		 *	may fail if the port died.
2864 		 */
2865 		io_lock(*objectp);
2866 		ipc_port_copy_send_any_locked(ip_object_to_port(*objectp));
2867 		io_unlock(*objectp);
2868 	}
2869 
2870 	return KERN_SUCCESS;
2871 }
2872 
2873 
2874 /*
2875  *	Routine:	ipc_right_copyout
2876  *	Purpose:
2877  *		Copyout a capability to a space.
2878  *		If successful, consumes a ref for the object.
2879  *
2880  *		Always succeeds when given a newly-allocated entry,
2881  *		because user-reference overflow isn't a possibility.
2882  *
2883  *		If copying out the object would cause the user-reference
2884  *		count in the entry to overflow, then the user-reference
2885  *		count is left pegged to its maximum value and the copyout
2886  *		succeeds anyway.
2887  *	Conditions:
2888  *		The space is write-locked and active.
2889  *		The object is locked and active.
2890  *		The object is unlocked; the space isn't.
2891  *	Returns:
2892  *		KERN_SUCCESS		Copied out capability.
2893  */
2894 
2895 kern_return_t
ipc_right_copyout(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_port_context_t * context,mach_msg_guard_flags_t * guard_flags,ipc_object_t object)2896 ipc_right_copyout(
2897 	ipc_space_t             space,
2898 	mach_port_name_t        name,
2899 	ipc_entry_t             entry,
2900 	mach_msg_type_name_t    msgt_name,
2901 	ipc_object_copyout_flags_t flags,
2902 	mach_port_context_t     *context,
2903 	mach_msg_guard_flags_t  *guard_flags,
2904 	ipc_object_t            object)
2905 {
2906 	ipc_entry_bits_t bits;
2907 	ipc_port_t port;
2908 	mach_port_name_t sp_name = MACH_PORT_NULL;
2909 	mach_port_context_t sp_context = 0;
2910 
2911 	bits = entry->ie_bits;
2912 
2913 	assert(IO_VALID(object));
2914 	assert(io_otype(object) == IOT_PORT);
2915 	assert(io_active(object));
2916 	assert(entry->ie_object == object);
2917 
2918 	port = ip_object_to_port(object);
2919 
2920 	if (flags & IPC_OBJECT_COPYOUT_FLAGS_PINNED) {
2921 		assert(!ip_is_pinned(port));
2922 		assert(ip_is_immovable_send(port));
2923 		assert(task_is_immovable(space->is_task));
2924 		assert(task_is_pinned(space->is_task));
2925 		port->ip_pinned = 1;
2926 	}
2927 
2928 	switch (msgt_name) {
2929 	case MACH_MSG_TYPE_PORT_SEND_ONCE:
2930 
2931 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2932 		assert(IE_BITS_UREFS(bits) == 0);
2933 		assert(port->ip_sorights > 0);
2934 
2935 		if (port->ip_specialreply) {
2936 			ipc_port_adjust_special_reply_port_locked(port,
2937 			    current_thread()->ith_knote, IPC_PORT_ADJUST_SR_LINK_WORKLOOP, FALSE);
2938 			/* port unlocked on return */
2939 		} else {
2940 			ip_mq_unlock(port);
2941 		}
2942 
2943 		entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1); /* set urefs to 1 */
2944 		ipc_entry_modified(space, name, entry);
2945 		break;
2946 
2947 	case MACH_MSG_TYPE_PORT_SEND:
2948 		assert(port->ip_srights > 0);
2949 
2950 		if (bits & MACH_PORT_TYPE_SEND) {
2951 			mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
2952 
2953 			assert(port->ip_srights > 1);
2954 			assert(urefs > 0);
2955 			assert(urefs <= MACH_PORT_UREFS_MAX);
2956 
2957 			if (urefs == MACH_PORT_UREFS_MAX) {
2958 				/*
2959 				 * leave urefs pegged to maximum,
2960 				 * consume send right and ref
2961 				 */
2962 
2963 				ip_srights_dec(port);
2964 				ip_mq_unlock(port);
2965 				ip_release_live(port);
2966 				return KERN_SUCCESS;
2967 			}
2968 
2969 			/* consume send right and ref */
2970 			ip_srights_dec(port);
2971 			ip_mq_unlock(port);
2972 			ip_release_live(port);
2973 		} else if (bits & MACH_PORT_TYPE_RECEIVE) {
2974 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2975 			assert(IE_BITS_UREFS(bits) == 0);
2976 
2977 			/* transfer send right to entry, consume ref */
2978 			ip_mq_unlock(port);
2979 			ip_release_live(port);
2980 		} else {
2981 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2982 			assert(IE_BITS_UREFS(bits) == 0);
2983 
2984 			/* transfer send right and ref to entry */
2985 			ip_mq_unlock(port);
2986 
2987 			/* entry is locked holding ref, so can use port */
2988 
2989 			ipc_hash_insert(space, ip_to_object(port), name, entry);
2990 		}
2991 
2992 		entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1; /* increment urefs */
2993 		ipc_entry_modified(space, name, entry);
2994 		break;
2995 
2996 	case MACH_MSG_TYPE_PORT_RECEIVE: {
2997 		ipc_port_t dest;
2998 #if IMPORTANCE_INHERITANCE
2999 		natural_t assertcnt = port->ip_impcount;
3000 #endif /* IMPORTANCE_INHERITANCE */
3001 
3002 		assert(port->ip_mscount == 0);
3003 		assert(!ip_in_a_space(port));
3004 
3005 		/*
3006 		 * Don't copyout kobjects or kolabels as receive right
3007 		 */
3008 		if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
3009 			panic("ipc_right_copyout: Copyout kobject/kolabel as receive right");
3010 		}
3011 
3012 		dest = ip_get_destination(port);
3013 
3014 		/* port transitions to IN-SPACE state */
3015 		port->ip_receiver_name = name;
3016 		port->ip_receiver = space;
3017 
3018 		struct knote *kn = current_thread()->ith_knote;
3019 
3020 		if ((guard_flags != NULL) && ((*guard_flags & MACH_MSG_GUARD_FLAGS_IMMOVABLE_RECEIVE) != 0)) {
3021 			assert(port->ip_immovable_receive == 0);
3022 			port->ip_guarded = 1;
3023 			port->ip_strict_guard = 0;
3024 			/* pseudo receive shouldn't set the receive right as immovable in the sender's space */
3025 			if (kn != ITH_KNOTE_PSEUDO) {
3026 				port->ip_immovable_receive = 1;
3027 			}
3028 			port->ip_context = current_thread()->ith_msg_addr;
3029 			*context = port->ip_context;
3030 			*guard_flags = *guard_flags & ~MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
3031 		}
3032 
3033 		if (ip_is_libxpc_connection_port(port)) {
3034 			/*
3035 			 * There are 3 ways to reach here.
3036 			 * 1. A libxpc client successfully sent this receive right to a named service
3037 			 *    and we are copying out in that service's ipc space.
3038 			 * 2. A libxpc client tried doing (1) but failed so we are doing pseudo-receive.
3039 			 * 3. Kernel sent this receive right to a libxpc client as a part of port destroyed notification.
3040 			 *
3041 			 * This flag needs to be set again in all 3 cases as they reset it as part of their flow.
3042 			 */
3043 			port->ip_immovable_receive = 1;
3044 		}
3045 
3046 		/* Check if this is a service port */
3047 		if (port->ip_service_port) {
3048 			assert(port->ip_splabel != NULL);
3049 			/*
3050 			 * This flag gets reset during all 3 ways described above for libxpc connection port.
3051 			 * The only difference is launchd acts as an initiator instead of a libxpc client.
3052 			 */
3053 			if (service_port_defense_enabled) {
3054 				port->ip_immovable_receive = 1;
3055 			}
3056 
3057 			/* Check if this is a port-destroyed notification to ensure
3058 			 * that initproc doesnt end up with a guarded service port
3059 			 * sent in a regular message
3060 			 */
3061 			if (!ipc_service_port_label_is_pd_notification((ipc_service_port_label_t)port->ip_splabel)) {
3062 				goto skip_sp_check;
3063 			}
3064 
3065 			ipc_service_port_label_clear_flag(port->ip_splabel, ISPL_FLAGS_SEND_PD_NOTIFICATION);
3066 #if !(DEVELOPMENT || DEBUG)
3067 			if (get_bsdtask_info(current_task()) != initproc) {
3068 				goto skip_sp_check;
3069 			}
3070 #endif /* !(DEVELOPMENT || DEBUG) */
3071 			ipc_service_port_label_get_attr(port->ip_splabel, &sp_name, &sp_context);
3072 			assert(sp_name != MACH_PORT_NULL);
3073 			/* Verify the port name and restore the guard value, if any */
3074 			if (name != sp_name) {
3075 				panic("Service port name = 0x%x doesnt match the stored launchd port name = 0x%x", name, sp_name);
3076 			}
3077 			if (sp_context) {
3078 				port->ip_guarded = 1;
3079 				port->ip_strict_guard = 1;
3080 				port->ip_context = sp_context;
3081 			}
3082 		}
3083 skip_sp_check:
3084 
3085 		assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
3086 		if (bits & MACH_PORT_TYPE_SEND) {
3087 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
3088 			assert(IE_BITS_UREFS(bits) > 0);
3089 			assert(port->ip_srights > 0);
3090 		} else {
3091 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
3092 			assert(IE_BITS_UREFS(bits) == 0);
3093 		}
3094 		entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE;
3095 		ipc_entry_modified(space, name, entry);
3096 
3097 		boolean_t sync_bootstrap_checkin = FALSE;
3098 		if (kn != ITH_KNOTE_PSEUDO && port->ip_sync_bootstrap_checkin) {
3099 			sync_bootstrap_checkin = TRUE;
3100 		}
3101 		if (!ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_RECEIVE)) {
3102 			kn = NULL;
3103 		}
3104 		ipc_port_adjust_port_locked(port, kn, sync_bootstrap_checkin);
3105 		/* port unlocked */
3106 
3107 		if (bits & MACH_PORT_TYPE_SEND) {
3108 			ip_release_live(port);
3109 
3110 			/* entry is locked holding ref, so can use port */
3111 			ipc_hash_delete(space, ip_to_object(port), name, entry);
3112 		}
3113 
3114 		if (dest != IP_NULL) {
3115 #if IMPORTANCE_INHERITANCE
3116 			/*
3117 			 * Deduct the assertion counts we contributed to
3118 			 * the old destination port.  They've already
3119 			 * been reflected into the task as a result of
3120 			 * getting enqueued.
3121 			 */
3122 			ip_mq_lock(dest);
3123 			ipc_port_impcount_delta(dest, 0 - assertcnt, IP_NULL);
3124 			ip_mq_unlock(dest);
3125 #endif /* IMPORTANCE_INHERITANCE */
3126 
3127 			/* Drop turnstile ref on dest */
3128 			ipc_port_send_turnstile_complete(dest);
3129 			/* space lock is held */
3130 			ip_release_safe(dest);
3131 		}
3132 		break;
3133 	}
3134 
3135 	default:
3136 		panic("ipc_right_copyout: strange rights");
3137 	}
3138 	return KERN_SUCCESS;
3139 }
3140