xref: /xnu-11215.81.4/osfmk/ipc/ipc_right.c (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1 /*
2  * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  * Copyright (c) 2005-2006 SPARTA, Inc.
62  */
63 /*
64  */
65 /*
66  *	File:	ipc/ipc_right.c
67  *	Author:	Rich Draves
68  *	Date:	1989
69  *
70  *	Functions to manipulate IPC capabilities.
71  */
72 
73 #include <mach/boolean.h>
74 #include <mach/kern_return.h>
75 #include <mach/port.h>
76 #include <mach/message.h>
77 #include <kern/assert.h>
78 #include <kern/ipc_kobject.h>
79 #include <kern/misc_protos.h>
80 #include <kern/policy_internal.h>
81 #include <libkern/coreanalytics/coreanalytics.h>
82 #include <ipc/port.h>
83 #include <ipc/ipc_entry.h>
84 #include <ipc/ipc_space.h>
85 #include <ipc/ipc_object.h>
86 #include <ipc/ipc_hash.h>
87 #include <ipc/ipc_port.h>
88 #include <ipc/ipc_pset.h>
89 #include <ipc/ipc_right.h>
90 #include <ipc/ipc_notify.h>
91 #include <ipc/ipc_importance.h>
92 #include <ipc/ipc_service_port.h>
93 #include <security/mac_mach_internal.h>
94 
95 extern struct proc *current_proc(void);
96 extern int csproc_hardened_runtime(struct proc* p);
97 
98 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
99 
100 TUNABLE(bool, service_port_defense_enabled, "-service_port_defense_enabled", false);
101 static TUNABLE(bool, reply_port_semantics, "reply_port_semantics", true);
102 
103 /*
104  *	Routine:	ipc_right_lookup_read
105  *	Purpose:
106  *		Finds an entry in a space, given the name.
107  *	Conditions:
108  *		Nothing locked.
109  *		If an object is found, it is locked and active.
110  *	Returns:
111  *		KERN_SUCCESS		Found an entry.
112  *		KERN_INVALID_TASK	The space is dead.
113  *		KERN_INVALID_NAME	Name doesn't exist in space.
114  */
115 kern_return_t
ipc_right_lookup_read(ipc_space_t space,mach_port_name_t name,ipc_entry_bits_t * bitsp,ipc_object_t * objectp)116 ipc_right_lookup_read(
117 	ipc_space_t             space,
118 	mach_port_name_t        name,
119 	ipc_entry_bits_t       *bitsp,
120 	ipc_object_t           *objectp)
121 {
122 	mach_port_index_t index;
123 	ipc_entry_table_t table;
124 	ipc_entry_t entry;
125 	ipc_object_t object;
126 	kern_return_t kr;
127 
128 	index = MACH_PORT_INDEX(name);
129 	if (__improbable(index == 0)) {
130 		*bitsp = 0;
131 		*objectp = IO_NULL;
132 		return KERN_INVALID_NAME;
133 	}
134 
135 	smr_ipc_enter();
136 
137 	/*
138 	 * Acquire a (possibly stale) pointer to the table,
139 	 * and guard it so that it can't be deallocated while we use it.
140 	 *
141 	 * smr_ipc_enter() has the property that it strongly serializes
142 	 * after any store-release. This is important because it means that if
143 	 * one considers this (broken) userspace usage:
144 	 *
145 	 * Thread 1:
146 	 *   - makes a semaphore, gets name 0x1003
147 	 *   - stores that name to a global `sema` in userspace
148 	 *
149 	 * Thread 2:
150 	 *   - spins to observe `sema` becoming non 0
151 	 *   - calls semaphore_wait() on 0x1003
152 	 *
153 	 * Then, because in order to return 0x1003 this thread issued
154 	 * a store-release (when calling is_write_unlock()),
155 	 * then this smr_entered_load() can't possibly observe a table
156 	 * pointer that is older than the one that was current when the
157 	 * semaphore was made.
158 	 *
159 	 * This fundamental property allows us to never loop.
160 	 */
161 	table = smr_entered_load(&space->is_table);
162 	if (__improbable(table == NULL)) {
163 		kr = KERN_INVALID_TASK;
164 		goto out_put;
165 	}
166 	entry = ipc_entry_table_get(table, index);
167 	if (__improbable(entry == NULL)) {
168 		kr = KERN_INVALID_NAME;
169 		goto out_put;
170 	}
171 
172 	/*
173 	 * Note: this should be an atomic load, but PAC and atomics
174 	 *       don't work interact well together.
175 	 */
176 	object = entry->ie_volatile_object;
177 
178 	/*
179 	 * Attempt to lock an object that lives in this entry.
180 	 * It might fail or be a completely different object by now.
181 	 *
182 	 * Make sure that acquiring the lock is fully ordered after any
183 	 * lock-release (using os_atomic_barrier_before_lock_acquire()).
184 	 * This allows us to always reliably observe space termination below.
185 	 */
186 	os_atomic_barrier_before_lock_acquire();
187 	if (__improbable(object == IO_NULL || !io_lock_allow_invalid(object))) {
188 		kr = KERN_INVALID_NAME;
189 		goto out_put;
190 	}
191 
192 	/*
193 	 * Now that we hold the object lock, we are preventing any entry
194 	 * in this space for this object to be mutated.
195 	 *
196 	 * If the space didn't grow after we acquired our hazardous reference,
197 	 * and before a mutation of the entry, then holding the object lock
198 	 * guarantees we will observe the truth of ie_bits, ie_object and
199 	 * ie_request (those are always mutated with the object lock held).
200 	 *
201 	 * However this ordering is problematic:
202 	 * - [A]cquisition of the table pointer
203 	 * - [G]rowth of the space (making the table pointer in [A] stale)
204 	 * - [M]utation of the entry
205 	 * - [L]ocking of the object read through [A].
206 	 *
207 	 * The space lock is held for both [G] and [M], and the object lock
208 	 * is held for [M], which means that once we lock the object we can
209 	 * observe if [G] happenend by reloading the table pointer.
210 	 *
211 	 * We might still fail to observe any growth operation that happened
212 	 * after the last mutation of this object's entry, because holding
213 	 * an object lock doesn't guarantee anything about the liveness
214 	 * of the space table pointer. This is not a problem at all: by
215 	 * definition, those didn't affect the state of the entry.
216 	 *
217 	 * TODO: a data-structure where the entries are grown by "slabs",
218 	 *       would allow for the address of an ipc_entry_t to never
219 	 *       change once it exists in a space and would avoid a reload
220 	 *       (as well as making space growth faster).
221 	 *       We however still need to check for termination.
222 	 */
223 	table = smr_entered_load(&space->is_table);
224 	if (__improbable(table == NULL)) {
225 		kr = KERN_INVALID_TASK;
226 		goto out_put_unlock;
227 	}
228 
229 	/*
230 	 * Tables never shrink so we don't need to validate the length twice.
231 	 */
232 	entry = ipc_entry_table_get_nocheck(table, index);
233 
234 	/*
235 	 * Now that we hold the lock and have a "fresh enough" table pointer,
236 	 * validate if this entry is what we think it is.
237 	 *
238 	 * To the risk of being repetitive, we still need to protect
239 	 * those accesses under SMR, because subsequent
240 	 * table growths might retire the memory. However we know
241 	 * those growths will have left our entry unchanged.
242 	 */
243 	if (__improbable(entry->ie_object != object)) {
244 		kr = KERN_INVALID_NAME;
245 		goto out_put_unlock;
246 	}
247 
248 	ipc_entry_bits_t bits = entry->ie_bits;
249 	if (__improbable(IE_BITS_GEN(bits) != MACH_PORT_GEN(name) ||
250 	    IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE)) {
251 		kr = KERN_INVALID_NAME;
252 		goto out_put_unlock;
253 	}
254 
255 	/* Done with hazardous accesses to the table */
256 	smr_ipc_leave();
257 
258 	*bitsp = bits;
259 	*objectp = object;
260 	return KERN_SUCCESS;
261 
262 out_put_unlock:
263 	ipc_object_unlock(object);
264 out_put:
265 	smr_ipc_leave();
266 	return kr;
267 }
268 
269 /*
270  *	Routine:	ipc_right_lookup_write
271  *	Purpose:
272  *		Finds an entry in a space, given the name.
273  *	Conditions:
274  *		Nothing locked.  If successful, the space is write-locked.
275  *	Returns:
276  *		KERN_SUCCESS		Found an entry.
277  *		KERN_INVALID_TASK	The space is dead.
278  *		KERN_INVALID_NAME	Name doesn't exist in space.
279  */
280 
281 kern_return_t
ipc_right_lookup_write(ipc_space_t space,mach_port_name_t name,ipc_entry_t * entryp)282 ipc_right_lookup_write(
283 	ipc_space_t             space,
284 	mach_port_name_t        name,
285 	ipc_entry_t             *entryp)
286 {
287 	ipc_entry_t entry;
288 
289 	assert(space != IS_NULL);
290 
291 	is_write_lock(space);
292 
293 	if (!is_active(space)) {
294 		is_write_unlock(space);
295 		return KERN_INVALID_TASK;
296 	}
297 
298 	if ((entry = ipc_entry_lookup(space, name)) == IE_NULL) {
299 		is_write_unlock(space);
300 		return KERN_INVALID_NAME;
301 	}
302 
303 	*entryp = entry;
304 	return KERN_SUCCESS;
305 }
306 
307 /*
308  *	Routine:	ipc_right_lookup_two_write
309  *	Purpose:
310  *		Like ipc_right_lookup except that it returns two
311  *		entries for two different names that were looked
312  *		up under the same space lock.
313  *	Conditions:
314  *		Nothing locked.  If successful, the space is write-locked.
315  *	Returns:
316  *		KERN_INVALID_TASK	The space is dead.
317  *		KERN_INVALID_NAME	Name doesn't exist in space.
318  */
319 
320 kern_return_t
ipc_right_lookup_two_write(ipc_space_t space,mach_port_name_t name1,ipc_entry_t * entryp1,mach_port_name_t name2,ipc_entry_t * entryp2)321 ipc_right_lookup_two_write(
322 	ipc_space_t             space,
323 	mach_port_name_t        name1,
324 	ipc_entry_t             *entryp1,
325 	mach_port_name_t        name2,
326 	ipc_entry_t             *entryp2)
327 {
328 	ipc_entry_t entry1;
329 	ipc_entry_t entry2;
330 
331 	assert(space != IS_NULL);
332 
333 	is_write_lock(space);
334 
335 	if (!is_active(space)) {
336 		is_write_unlock(space);
337 		return KERN_INVALID_TASK;
338 	}
339 
340 	if ((entry1 = ipc_entry_lookup(space, name1)) == IE_NULL) {
341 		is_write_unlock(space);
342 		mach_port_guard_exception(name1, 0, 0, kGUARD_EXC_INVALID_NAME);
343 		return KERN_INVALID_NAME;
344 	}
345 	if ((entry2 = ipc_entry_lookup(space, name2)) == IE_NULL) {
346 		is_write_unlock(space);
347 		mach_port_guard_exception(name2, 0, 0, kGUARD_EXC_INVALID_NAME);
348 		return KERN_INVALID_NAME;
349 	}
350 	*entryp1 = entry1;
351 	*entryp2 = entry2;
352 	return KERN_SUCCESS;
353 }
354 
355 /*
356  *	Routine:	ipc_right_reverse
357  *	Purpose:
358  *		Translate (space, object) -> (name, entry).
359  *		Only finds send/receive rights.
360  *		Returns TRUE if an entry is found; if so,
361  *		the object active.
362  *	Conditions:
363  *		The space must be locked (read or write) and active.
364  *		The port is locked and active
365  */
366 
367 bool
ipc_right_reverse(ipc_space_t space,ipc_object_t object,mach_port_name_t * namep,ipc_entry_t * entryp)368 ipc_right_reverse(
369 	ipc_space_t             space,
370 	ipc_object_t            object,
371 	mach_port_name_t        *namep,
372 	ipc_entry_t             *entryp)
373 {
374 	ipc_port_t port;
375 	mach_port_name_t name;
376 	ipc_entry_t entry;
377 
378 	/* would switch on io_otype to handle multiple types of object */
379 
380 	assert(is_active(space));
381 	assert(io_otype(object) == IOT_PORT);
382 
383 	port = ip_object_to_port(object);
384 	require_ip_active(port);
385 
386 	ip_mq_lock_held(port);
387 
388 	if (ip_in_space(port, space)) {
389 		name = ip_get_receiver_name(port);
390 		assert(name != MACH_PORT_NULL);
391 
392 		entry = ipc_entry_lookup(space, name);
393 
394 		assert(entry != IE_NULL);
395 		assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
396 		assert(port == ip_object_to_port(entry->ie_object));
397 
398 		*namep = name;
399 		*entryp = entry;
400 		return true;
401 	}
402 
403 	if (ipc_hash_lookup(space, ip_to_object(port), namep, entryp)) {
404 		entry = *entryp;
405 		assert(entry != IE_NULL);
406 		assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
407 		assert(port == ip_object_to_port(entry->ie_object));
408 
409 		return true;
410 	}
411 
412 	return false;
413 }
414 
415 /*
416  *	Routine:	ipc_right_request_cancel
417  *	Purpose:
418  *		Cancel a notification request and return the send-once right.
419  *		Afterwards, entry->ie_request == 0.
420  *	Conditions:
421  *		The space must be write-locked; the port must be locked.
422  *		The port must be active.
423  */
424 
425 static inline ipc_port_t
ipc_right_request_cancel(ipc_port_t port,mach_port_name_t name,ipc_entry_t entry)426 ipc_right_request_cancel(
427 	ipc_port_t              port,
428 	mach_port_name_t        name,
429 	ipc_entry_t             entry)
430 {
431 	ipc_port_request_index_t request = entry->ie_request;
432 
433 	if (request != IE_REQ_NONE) {
434 		entry->ie_request = IE_REQ_NONE;
435 		return ipc_port_request_cancel(port, name, request);
436 	}
437 	return IP_NULL;
438 }
439 
440 /*
441  *	Routine:	ipc_right_dnrequest
442  *	Purpose:
443  *		Make a dead-name request, returning the previously
444  *		registered send-once right.  If notify is IP_NULL,
445  *		just cancels the previously registered request.
446  *
447  *	Conditions:
448  *		Nothing locked.  May allocate memory.
449  *		Only consumes/returns refs if successful.
450  *	Returns:
451  *		KERN_SUCCESS		Made/canceled dead-name request.
452  *		KERN_INVALID_TASK	The space is dead.
453  *		KERN_INVALID_NAME	Name doesn't exist in space.
454  *		KERN_INVALID_RIGHT	Name doesn't denote port/dead rights.
455  *		KERN_INVALID_ARGUMENT	Name denotes dead name, but
456  *			immediate is FALSE or notify is IP_NULL.
457  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
458  */
459 
460 kern_return_t
ipc_right_request_alloc(ipc_space_t space,mach_port_name_t name,ipc_port_request_opts_t options,ipc_port_t notify,ipc_port_t * previousp)461 ipc_right_request_alloc(
462 	ipc_space_t             space,
463 	mach_port_name_t        name,
464 	ipc_port_request_opts_t options,
465 	ipc_port_t              notify,
466 	ipc_port_t              *previousp)
467 {
468 	ipc_port_t previous = IP_NULL;
469 	ipc_entry_t entry;
470 	kern_return_t kr;
471 #if IMPORTANCE_INHERITANCE
472 	bool will_arm = false;
473 #endif /* IMPORTANCE_INHERITANCE */
474 
475 	for (;;) {
476 		ipc_port_t port = IP_NULL;
477 
478 		kr = ipc_right_lookup_write(space, name, &entry);
479 		if (kr != KERN_SUCCESS) {
480 			return kr;
481 		}
482 
483 		/* space is write-locked and active */
484 
485 		/* if nothing to do or undo, we're done */
486 		if (notify == IP_NULL && entry->ie_request == IE_REQ_NONE) {
487 			is_write_unlock(space);
488 			*previousp = IP_NULL;
489 			return KERN_SUCCESS;
490 		}
491 
492 		/* see if the entry is of proper type for requests */
493 		if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) {
494 			ipc_port_request_index_t new_request;
495 
496 			port = ip_object_to_port(entry->ie_object);
497 			assert(port != IP_NULL);
498 
499 			if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
500 				/* port is locked and active */
501 
502 				/*
503 				 * No matter what, we need to cancel any
504 				 * previous request.
505 				 */
506 				previous = ipc_right_request_cancel(port, name, entry);
507 
508 				/* if no new request, just cancel previous */
509 				if (notify == IP_NULL) {
510 					ip_mq_unlock(port);
511 					ipc_entry_modified(space, name, entry);
512 					is_write_unlock(space);
513 					break;
514 				}
515 
516 				/*
517 				 * send-once rights, kernel objects, and non-full other queues
518 				 * fire immediately (if immediate specified).
519 				 */
520 				if (options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK) &&
521 				    ((entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE) ||
522 				    ip_in_space(port, ipc_space_kernel) ||
523 				    !ip_full(port))) {
524 					ip_mq_unlock(port);
525 					ipc_entry_modified(space, name, entry);
526 					is_write_unlock(space);
527 
528 					ipc_notify_send_possible(notify, name);
529 					break;
530 				}
531 
532 				/*
533 				 * If there was a previous request, freeing it
534 				 * above guarantees that the subsequent
535 				 * allocation will find a slot and succeed,
536 				 * thus assuring an atomic swap.
537 				 */
538 #if IMPORTANCE_INHERITANCE
539 				will_arm = port->ip_sprequests == 0 &&
540 				    options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK);
541 #endif /* IMPORTANCE_INHERITANCE */
542 				kr = ipc_port_request_alloc(port, name, notify,
543 				    options, &new_request);
544 
545 				if (kr != KERN_SUCCESS) {
546 					assert(previous == IP_NULL);
547 					is_write_unlock(space);
548 
549 					kr = ipc_port_request_grow(port);
550 					/* port is unlocked */
551 
552 					if (kr != KERN_SUCCESS) {
553 						return kr;
554 					}
555 
556 					continue;
557 				}
558 
559 				assert(new_request != IE_REQ_NONE);
560 				entry->ie_request = new_request;
561 				ipc_entry_modified(space, name, entry);
562 				is_write_unlock(space);
563 
564 #if IMPORTANCE_INHERITANCE
565 				if (will_arm &&
566 				    port->ip_impdonation != 0 &&
567 				    port->ip_spimportant == 0 &&
568 				    task_is_importance_donor(current_task())) {
569 					if (ipc_port_importance_delta(port, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) {
570 						ip_mq_unlock(port);
571 					}
572 				} else
573 #endif /* IMPORTANCE_INHERITANCE */
574 				ip_mq_unlock(port);
575 
576 				break;
577 			}
578 			/* entry may have changed to dead-name by ipc_right_check() */
579 		}
580 
581 		/* treat send_possible requests as immediate w.r.t. dead-name */
582 		if (options && notify != IP_NULL &&
583 		    (entry->ie_bits & MACH_PORT_TYPE_DEAD_NAME)) {
584 			mach_port_urefs_t urefs = IE_BITS_UREFS(entry->ie_bits);
585 
586 			assert(urefs > 0);
587 
588 			/* leave urefs pegged to maximum if it overflowed */
589 			if (urefs < MACH_PORT_UREFS_MAX) {
590 				(entry->ie_bits)++; /* increment urefs */
591 			}
592 			ipc_entry_modified(space, name, entry);
593 
594 			is_write_unlock(space);
595 
596 			if (port != IP_NULL) {
597 				ip_release(port);
598 			}
599 
600 			ipc_notify_dead_name(notify, name);
601 			previous = IP_NULL;
602 			break;
603 		}
604 
605 		kr = (entry->ie_bits & MACH_PORT_TYPE_PORT_OR_DEAD) ?
606 		    KERN_INVALID_ARGUMENT : KERN_INVALID_RIGHT;
607 
608 		is_write_unlock(space);
609 
610 		if (port != IP_NULL) {
611 			ip_release(port);
612 		}
613 
614 		return kr;
615 	}
616 
617 	*previousp = previous;
618 	return KERN_SUCCESS;
619 }
620 
621 /*
622  *	Routine:	ipc_right_inuse
623  *	Purpose:
624  *		Check if an entry is being used.
625  *		Returns TRUE if it is.
626  *	Conditions:
627  *		The space is write-locked and active.
628  */
629 
630 bool
ipc_right_inuse(ipc_entry_t entry)631 ipc_right_inuse(
632 	ipc_entry_t entry)
633 {
634 	return IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_NONE;
635 }
636 
637 /*
638  *	Routine:	ipc_right_check
639  *	Purpose:
640  *		Check if the port has died.  If it has,
641  *              and IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE is not
642  *              passed and it is not a send once right then
643  *		clean up the entry and return TRUE.
644  *	Conditions:
645  *		The space is write-locked; the port is not locked.
646  *		If returns FALSE, the port is also locked.
647  *		Otherwise, entry is converted to a dead name.
648  *
649  *		Caller is responsible for a reference to port if it
650  *		had died (returns TRUE).
651  */
652 
653 boolean_t
ipc_right_check(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry,ipc_object_copyin_flags_t flags)654 ipc_right_check(
655 	ipc_space_t              space,
656 	ipc_port_t               port,
657 	mach_port_name_t         name,
658 	ipc_entry_t              entry,
659 	ipc_object_copyin_flags_t flags)
660 {
661 	ipc_entry_bits_t bits;
662 
663 	assert(is_active(space));
664 	assert(port == ip_object_to_port(entry->ie_object));
665 
666 	ip_mq_lock(port);
667 	if (ip_active(port) ||
668 	    ((flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE) &&
669 	    entry->ie_request == IE_REQ_NONE &&
670 	    (entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE))) {
671 		return FALSE;
672 	}
673 
674 	/* this was either a pure send right or a send-once right */
675 
676 	bits = entry->ie_bits;
677 	assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
678 	assert(IE_BITS_UREFS(bits) > 0);
679 
680 	if (bits & MACH_PORT_TYPE_SEND) {
681 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
682 		assert(IE_BITS_UREFS(bits) > 0);
683 		ip_srights_dec(port);
684 	} else {
685 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
686 		assert(IE_BITS_UREFS(bits) == 1);
687 		ip_sorights_dec(port);
688 	}
689 
690 	/*
691 	 * delete SEND rights from ipc hash.
692 	 */
693 
694 	if ((bits & MACH_PORT_TYPE_SEND) != 0) {
695 		ipc_hash_delete(space, ip_to_object(port), name, entry);
696 	}
697 
698 	/* convert entry to dead name */
699 	bits = (bits & ~IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME;
700 
701 	/*
702 	 * If there was a notification request outstanding on this
703 	 * name, and the port went dead, that notification
704 	 * must already be on its way up from the port layer.
705 	 *
706 	 * Add the reference that the notification carries. It
707 	 * is done here, and not in the notification delivery,
708 	 * because the latter doesn't have a space reference and
709 	 * trying to actually move a send-right reference would
710 	 * get short-circuited into a MACH_PORT_DEAD by IPC. Since
711 	 * all calls that deal with the right eventually come
712 	 * through here, it has the same result.
713 	 *
714 	 * Once done, clear the request index so we only account
715 	 * for it once.
716 	 */
717 	if (entry->ie_request != IE_REQ_NONE) {
718 		if (ipc_port_request_type(port, name, entry->ie_request) != 0) {
719 			/* if urefs are pegged due to overflow, leave them pegged */
720 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
721 				bits++; /* increment urefs */
722 			}
723 		}
724 		entry->ie_request = IE_REQ_NONE;
725 	}
726 	entry->ie_bits = bits;
727 	entry->ie_object = IO_NULL;
728 
729 	ip_mq_unlock(port);
730 
731 	ipc_entry_modified(space, name, entry);
732 
733 	return TRUE;
734 }
735 
736 /*
737  *	Routine:	ipc_right_terminate
738  *	Purpose:
739  *		Cleans up an entry in a terminated space.
740  *		The entry isn't deallocated or removed
741  *		from reverse hash tables.
742  *	Conditions:
743  *		The space is dead and unlocked.
744  */
745 
746 void
ipc_right_terminate(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)747 ipc_right_terminate(
748 	ipc_space_t             space,
749 	mach_port_name_t        name,
750 	ipc_entry_t             entry)
751 {
752 	mach_port_type_t type;
753 	ipc_object_t object;
754 
755 	assert(!is_active(space));
756 
757 	type   = IE_BITS_TYPE(entry->ie_bits);
758 	object = entry->ie_object;
759 
760 	/*
761 	 * Hollow the entry under the port lock,
762 	 * in order to avoid dangling pointers.
763 	 *
764 	 * ipc_right_lookup_read() doesn't need it for correctness,
765 	 * but ipc_space_terminate() as it now goes through 2 rounds
766 	 * of termination (receive rights first, the rest second).
767 	 */
768 
769 	if (type != MACH_PORT_TYPE_DEAD_NAME) {
770 		assert(object != IO_NULL);
771 		io_lock(object);
772 	}
773 	entry->ie_object = IO_NULL;
774 	entry->ie_bits  &= (IE_BITS_GEN_MASK | IE_BITS_ROLL_MASK);
775 
776 	switch (type) {
777 	case MACH_PORT_TYPE_DEAD_NAME:
778 		assert(entry->ie_request == IE_REQ_NONE);
779 		assert(object == IO_NULL);
780 		break;
781 
782 	case MACH_PORT_TYPE_PORT_SET: {
783 		ipc_pset_t pset = ips_object_to_pset(object);
784 
785 		assert(entry->ie_request == IE_REQ_NONE);
786 		assert(ips_active(pset));
787 
788 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
789 		break;
790 	}
791 
792 	case MACH_PORT_TYPE_SEND:
793 	case MACH_PORT_TYPE_RECEIVE:
794 	case MACH_PORT_TYPE_SEND_RECEIVE:
795 	case MACH_PORT_TYPE_SEND_ONCE: {
796 		ipc_port_t port = ip_object_to_port(object);
797 		ipc_port_t request = IP_NULL;
798 		ipc_notify_nsenders_t nsrequest = { };
799 
800 		if (!ip_active(port)) {
801 			ip_mq_unlock(port);
802 			ip_release(port);
803 			break;
804 		}
805 
806 		request = ipc_right_request_cancel(port, name, entry);
807 
808 		if (type & MACH_PORT_TYPE_SEND) {
809 			ip_srights_dec(port);
810 			if (port->ip_srights == 0) {
811 				nsrequest = ipc_notify_no_senders_prepare(port);
812 			}
813 		}
814 
815 		if (type & MACH_PORT_TYPE_RECEIVE) {
816 			assert(ip_get_receiver_name(port) == name);
817 			assert(ip_in_space(port, space));
818 
819 			ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
820 		} else if (type & MACH_PORT_TYPE_SEND_ONCE) {
821 			assert(port->ip_sorights > 0);
822 			port->ip_reply_context = 0;
823 
824 			ipc_notify_send_once_and_unlock(port); /* consumes our ref */
825 		} else {
826 			/* port could be dead, in-transit, or in a foreign space */
827 			assert(!ip_in_space(port, space));
828 
829 			ip_mq_unlock(port);
830 			ip_release(port);
831 		}
832 
833 		/*
834 		 * For both no-senders and port-deleted notifications,
835 		 * look at whether the destination is still active.
836 		 * If it isn't, just swallow the send-once right.
837 		 *
838 		 * This is a racy check, but this ok because we can only
839 		 * fail to notice that the port is now inactive, which
840 		 * only causes us to fail at an optimizaiton.
841 		 *
842 		 * The purpose here is to avoid sending messages
843 		 * to receive rights that used to be in this space,
844 		 * which we can't fail to observe.
845 		 */
846 		if (nsrequest.ns_notify != IP_NULL) {
847 			if (ip_active(nsrequest.ns_notify)) {
848 				ipc_notify_no_senders_emit(nsrequest);
849 			} else {
850 				ipc_notify_no_senders_consume(nsrequest);
851 			}
852 		}
853 
854 		if (request != IP_NULL) {
855 			if (ip_active(request)) {
856 				ipc_notify_port_deleted(request, name);
857 			} else {
858 				ipc_port_release_sonce(request);
859 			}
860 		}
861 		break;
862 	}
863 
864 	default:
865 		panic("ipc_right_terminate: strange type - 0x%x", type);
866 	}
867 }
868 
869 /*
870  *	Routine:	ipc_right_destroy
871  *	Purpose:
872  *		Destroys an entry in a space.
873  *	Conditions:
874  *		The space is write-locked (returns unlocked).
875  *		The space must be active.
876  *	Returns:
877  *		KERN_SUCCESS		      The entry was destroyed.
878  *      KERN_INVALID_CAPABILITY   The port is pinned.
879  *      KERN_INVALID_RIGHT        Port guard violation.
880  */
881 
882 kern_return_t
ipc_right_destroy(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,boolean_t check_guard,uint64_t guard)883 ipc_right_destroy(
884 	ipc_space_t             space,
885 	mach_port_name_t        name,
886 	ipc_entry_t             entry,
887 	boolean_t               check_guard,
888 	uint64_t                guard)
889 {
890 	ipc_entry_bits_t bits;
891 	mach_port_type_t type;
892 
893 	bits = entry->ie_bits;
894 	type = IE_BITS_TYPE(bits);
895 
896 	assert(is_active(space));
897 
898 	switch (type) {
899 	case MACH_PORT_TYPE_DEAD_NAME:
900 		assert(entry->ie_request == IE_REQ_NONE);
901 		assert(entry->ie_object == IO_NULL);
902 
903 		ipc_entry_dealloc(space, IO_NULL, name, entry);
904 		is_write_unlock(space);
905 		break;
906 
907 	case MACH_PORT_TYPE_PORT_SET: {
908 		ipc_pset_t pset = ips_object_to_pset(entry->ie_object);
909 
910 		assert(entry->ie_request == IE_REQ_NONE);
911 		assert(pset != IPS_NULL);
912 
913 		ips_mq_lock(pset);
914 		assert(ips_active(pset));
915 
916 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
917 
918 		is_write_unlock(space);
919 
920 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
921 		break;
922 	}
923 
924 	case MACH_PORT_TYPE_SEND:
925 	case MACH_PORT_TYPE_RECEIVE:
926 	case MACH_PORT_TYPE_SEND_RECEIVE:
927 	case MACH_PORT_TYPE_SEND_ONCE: {
928 		ipc_port_t port = ip_object_to_port(entry->ie_object);
929 		ipc_notify_nsenders_t nsrequest = { };
930 		ipc_port_t request;
931 
932 		assert(port != IP_NULL);
933 
934 		if (type == MACH_PORT_TYPE_SEND) {
935 			if (ip_is_pinned(port)) {
936 				assert(ip_active(port));
937 				is_write_unlock(space);
938 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DESTROY);
939 				return KERN_INVALID_CAPABILITY;
940 			}
941 			ipc_hash_delete(space, ip_to_object(port), name, entry);
942 		}
943 
944 		ip_mq_lock(port);
945 
946 		if (!ip_active(port)) {
947 			assert((type & MACH_PORT_TYPE_RECEIVE) == 0);
948 			entry->ie_request = IE_REQ_NONE;
949 			assert(!ip_is_pinned(port));
950 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
951 			ip_mq_unlock(port);
952 			is_write_unlock(space);
953 			ip_release(port);
954 			break;
955 		}
956 
957 		/* For receive rights, check for guarding */
958 		if ((type & MACH_PORT_TYPE_RECEIVE) &&
959 		    (check_guard) && (port->ip_guarded) &&
960 		    (guard != port->ip_context)) {
961 			/* Guard Violation */
962 			uint64_t portguard = port->ip_context;
963 			ip_mq_unlock(port);
964 			is_write_unlock(space);
965 			/* Raise mach port guard exception */
966 			mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
967 			return KERN_INVALID_RIGHT;
968 		}
969 
970 
971 		request = ipc_right_request_cancel(port, name, entry);
972 		assert(!ip_is_pinned(port));
973 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
974 
975 		is_write_unlock(space);
976 
977 		if (type & MACH_PORT_TYPE_SEND) {
978 			ip_srights_dec(port);
979 			if (port->ip_srights == 0) {
980 				nsrequest = ipc_notify_no_senders_prepare(port);
981 			}
982 		}
983 
984 		if (type & MACH_PORT_TYPE_RECEIVE) {
985 			require_ip_active(port);
986 			assert(ip_in_space(port, space));
987 
988 			ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
989 		} else if (type & MACH_PORT_TYPE_SEND_ONCE) {
990 			assert(port->ip_sorights > 0);
991 			port->ip_reply_context = 0;
992 			ipc_notify_send_once_and_unlock(port); /* consumes our ref */
993 		} else {
994 			assert(!ip_in_space(port, space));
995 
996 			ip_mq_unlock(port);
997 			ip_release(port);
998 		}
999 
1000 		ipc_notify_no_senders_emit(nsrequest);
1001 
1002 		if (request != IP_NULL) {
1003 			ipc_notify_port_deleted(request, name);
1004 		}
1005 
1006 
1007 		break;
1008 	}
1009 
1010 	default:
1011 		panic("ipc_right_destroy: strange type");
1012 	}
1013 
1014 	return KERN_SUCCESS;
1015 }
1016 
1017 /*
1018  *	Routine:	ipc_right_dealloc
1019  *	Purpose:
1020  *		Releases a send/send-once/dead-name/port_set user ref.
1021  *		Like ipc_right_delta with a delta of -1,
1022  *		but looks at the entry to determine the right.
1023  *	Conditions:
1024  *		The space is write-locked, and is unlocked upon return.
1025  *		The space must be active.
1026  *	Returns:
1027  *		KERN_SUCCESS		A user ref was released.
1028  *		KERN_INVALID_RIGHT	Entry has wrong type.
1029  *      KERN_INVALID_CAPABILITY  Deallocating a pinned right.
1030  */
1031 
1032 kern_return_t
ipc_right_dealloc(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)1033 ipc_right_dealloc(
1034 	ipc_space_t             space,
1035 	mach_port_name_t        name,
1036 	ipc_entry_t             entry)
1037 {
1038 	ipc_port_t port = IP_NULL;
1039 	ipc_entry_bits_t bits;
1040 	mach_port_type_t type;
1041 
1042 	bits = entry->ie_bits;
1043 	type = IE_BITS_TYPE(bits);
1044 
1045 
1046 	assert(is_active(space));
1047 
1048 	switch (type) {
1049 	case MACH_PORT_TYPE_PORT_SET: {
1050 		ipc_pset_t pset;
1051 
1052 		assert(IE_BITS_UREFS(bits) == 0);
1053 		assert(entry->ie_request == IE_REQ_NONE);
1054 
1055 		pset = ips_object_to_pset(entry->ie_object);
1056 		ips_mq_lock(pset);
1057 		assert(ips_active(pset));
1058 
1059 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1060 
1061 		is_write_unlock(space);
1062 
1063 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1064 		break;
1065 	}
1066 
1067 	case MACH_PORT_TYPE_DEAD_NAME: {
1068 dead_name:
1069 
1070 		assert(IE_BITS_UREFS(bits) > 0);
1071 		assert(entry->ie_request == IE_REQ_NONE);
1072 		assert(entry->ie_object == IO_NULL);
1073 
1074 		if (IE_BITS_UREFS(bits) == 1) {
1075 			ipc_entry_dealloc(space, IO_NULL, name, entry);
1076 		} else {
1077 			/* if urefs are pegged due to overflow, leave them pegged */
1078 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1079 				entry->ie_bits = bits - 1; /* decrement urefs */
1080 			}
1081 			ipc_entry_modified(space, name, entry);
1082 		}
1083 		is_write_unlock(space);
1084 
1085 		/* release any port that got converted to dead name below */
1086 		if (port != IP_NULL) {
1087 			ip_release(port);
1088 		}
1089 		break;
1090 	}
1091 
1092 	case MACH_PORT_TYPE_SEND_ONCE: {
1093 		ipc_port_t request;
1094 
1095 		assert(IE_BITS_UREFS(bits) == 1);
1096 
1097 		port = ip_object_to_port(entry->ie_object);
1098 		assert(port != IP_NULL);
1099 
1100 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1101 			bits = entry->ie_bits;
1102 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1103 			goto dead_name;     /* it will release port */
1104 		}
1105 		/* port is locked and active */
1106 
1107 		assert(port->ip_sorights > 0);
1108 
1109 		/*
1110 		 * clear any reply context:
1111 		 * no one will be sending the response b/c we are destroying
1112 		 * the single, outstanding send once right.
1113 		 */
1114 		port->ip_reply_context = 0;
1115 
1116 		request = ipc_right_request_cancel(port, name, entry);
1117 		assert(!ip_is_pinned(port));
1118 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1119 
1120 		is_write_unlock(space);
1121 
1122 		ipc_notify_send_once_and_unlock(port);
1123 
1124 		if (request != IP_NULL) {
1125 			ipc_notify_port_deleted(request, name);
1126 		}
1127 		break;
1128 	}
1129 
1130 	case MACH_PORT_TYPE_SEND: {
1131 		ipc_port_t request = IP_NULL;
1132 		ipc_notify_nsenders_t nsrequest = { };
1133 
1134 		assert(IE_BITS_UREFS(bits) > 0);
1135 
1136 		port = ip_object_to_port(entry->ie_object);
1137 		assert(port != IP_NULL);
1138 
1139 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1140 			bits = entry->ie_bits;
1141 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1142 			goto dead_name;     /* it will release port */
1143 		}
1144 		/* port is locked and active */
1145 
1146 		assert(port->ip_srights > 0);
1147 
1148 		if (IE_BITS_UREFS(bits) == 1) {
1149 			if (ip_is_pinned(port)) {
1150 				ip_mq_unlock(port);
1151 				is_write_unlock(space);
1152 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1153 				return KERN_INVALID_CAPABILITY;
1154 			}
1155 			ip_srights_dec(port);
1156 			if (port->ip_srights == 0) {
1157 				nsrequest = ipc_notify_no_senders_prepare(port);
1158 			}
1159 
1160 			request = ipc_right_request_cancel(port, name, entry);
1161 			ipc_hash_delete(space, ip_to_object(port), name, entry);
1162 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1163 			ip_mq_unlock(port);
1164 			is_write_unlock(space);
1165 
1166 			ip_release(port);
1167 		} else {
1168 			/* if urefs are pegged due to overflow, leave them pegged */
1169 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1170 				entry->ie_bits = bits - 1; /* decrement urefs */
1171 			}
1172 			ip_mq_unlock(port);
1173 			ipc_entry_modified(space, name, entry);
1174 			is_write_unlock(space);
1175 		}
1176 
1177 		ipc_notify_no_senders_emit(nsrequest);
1178 
1179 		if (request != IP_NULL) {
1180 			ipc_notify_port_deleted(request, name);
1181 		}
1182 		break;
1183 	}
1184 
1185 	case MACH_PORT_TYPE_SEND_RECEIVE: {
1186 		ipc_notify_nsenders_t nsrequest = { };
1187 
1188 		assert(IE_BITS_UREFS(bits) > 0);
1189 
1190 		port = ip_object_to_port(entry->ie_object);
1191 		assert(port != IP_NULL);
1192 
1193 		ip_mq_lock(port);
1194 		require_ip_active(port);
1195 		assert(ip_get_receiver_name(port) == name);
1196 		assert(ip_in_space(port, space));
1197 		assert(port->ip_srights > 0);
1198 
1199 		if (IE_BITS_UREFS(bits) == 1) {
1200 			ip_srights_dec(port);
1201 			if (port->ip_srights == 0) {
1202 				nsrequest = ipc_notify_no_senders_prepare(port);
1203 			}
1204 
1205 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1206 			    MACH_PORT_TYPE_SEND);
1207 		} else {
1208 			/* if urefs are pegged due to overflow, leave them pegged */
1209 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1210 				entry->ie_bits = bits - 1; /* decrement urefs */
1211 			}
1212 		}
1213 		ip_mq_unlock(port);
1214 
1215 		ipc_entry_modified(space, name, entry);
1216 		is_write_unlock(space);
1217 
1218 		ipc_notify_no_senders_emit(nsrequest);
1219 		break;
1220 	}
1221 
1222 	default:
1223 		is_write_unlock(space);
1224 		mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1225 		return KERN_INVALID_RIGHT;
1226 	}
1227 
1228 	return KERN_SUCCESS;
1229 }
1230 
1231 /*
1232  *	Routine:	ipc_right_delta
1233  *	Purpose:
1234  *		Modifies the user-reference count for a right.
1235  *		May deallocate the right, if the count goes to zero.
1236  *	Conditions:
1237  *		The space is write-locked, and is unlocked upon return.
1238  *		The space must be active.
1239  *	Returns:
1240  *		KERN_SUCCESS		Count was modified.
1241  *		KERN_INVALID_RIGHT	Entry has wrong type.
1242  *		KERN_INVALID_VALUE	Bad delta for the right.
1243  *		KERN_INVALID_CAPABILITY Deallocating a pinned right.
1244  */
1245 
1246 kern_return_t
ipc_right_delta(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_right_t right,mach_port_delta_t delta)1247 ipc_right_delta(
1248 	ipc_space_t             space,
1249 	mach_port_name_t        name,
1250 	ipc_entry_t             entry,
1251 	mach_port_right_t       right,
1252 	mach_port_delta_t       delta)
1253 {
1254 	ipc_port_t port = IP_NULL;
1255 	ipc_entry_bits_t bits;
1256 
1257 	bits = entry->ie_bits;
1258 
1259 /*
1260  *	The following is used (for case MACH_PORT_RIGHT_DEAD_NAME) in the
1261  *	switch below. It is used to keep track of those cases (in DIPC)
1262  *	where we have postponed the dropping of a port reference. Since
1263  *	the dropping of the reference could cause the port to disappear
1264  *	we postpone doing so when we are holding the space lock.
1265  */
1266 
1267 	assert(is_active(space));
1268 	assert(right < MACH_PORT_RIGHT_NUMBER);
1269 
1270 	/* Rights-specific restrictions and operations. */
1271 
1272 	switch (right) {
1273 	case MACH_PORT_RIGHT_PORT_SET: {
1274 		ipc_pset_t pset;
1275 
1276 		if ((bits & MACH_PORT_TYPE_PORT_SET) == 0) {
1277 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1278 			goto invalid_right;
1279 		}
1280 
1281 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_PORT_SET);
1282 		assert(IE_BITS_UREFS(bits) == 0);
1283 		assert(entry->ie_request == IE_REQ_NONE);
1284 
1285 		if (delta == 0) {
1286 			goto success;
1287 		}
1288 
1289 		if (delta != -1) {
1290 			goto invalid_value;
1291 		}
1292 
1293 		pset = ips_object_to_pset(entry->ie_object);
1294 		ips_mq_lock(pset);
1295 		assert(ips_active(pset));
1296 
1297 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1298 
1299 		is_write_unlock(space);
1300 
1301 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1302 		break;
1303 	}
1304 
1305 	case MACH_PORT_RIGHT_RECEIVE: {
1306 		ipc_port_t request = IP_NULL;
1307 
1308 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1309 			if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1310 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1311 			}
1312 			goto invalid_right;
1313 		}
1314 
1315 		if (delta == 0) {
1316 			goto success;
1317 		}
1318 
1319 		if (delta != -1) {
1320 			goto invalid_value;
1321 		}
1322 
1323 		port = ip_object_to_port(entry->ie_object);
1324 		assert(port != IP_NULL);
1325 
1326 		/*
1327 		 *	The port lock is needed for ipc_right_dncancel;
1328 		 *	otherwise, we wouldn't have to take the lock
1329 		 *	until just before dropping the space lock.
1330 		 */
1331 
1332 		ip_mq_lock(port);
1333 		require_ip_active(port);
1334 		assert(ip_get_receiver_name(port) == name);
1335 		assert(ip_in_space(port, space));
1336 
1337 		/* Mach Port Guard Checking */
1338 		if (port->ip_guarded) {
1339 			uint64_t portguard = port->ip_context;
1340 			ip_mq_unlock(port);
1341 			is_write_unlock(space);
1342 			/* Raise mach port guard exception */
1343 			mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_MOD_REFS);
1344 			goto guard_failure;
1345 		}
1346 
1347 		if (bits & MACH_PORT_TYPE_SEND) {
1348 			assert(IE_BITS_TYPE(bits) ==
1349 			    MACH_PORT_TYPE_SEND_RECEIVE);
1350 			assert(IE_BITS_UREFS(bits) > 0);
1351 			assert(port->ip_srights > 0);
1352 
1353 			if (ipc_port_has_prdrequest(port)) {
1354 				/*
1355 				 * Since another task has requested a
1356 				 * destroy notification for this port, it
1357 				 * isn't actually being destroyed - the receive
1358 				 * right is just being moved to another task.
1359 				 * Since we still have one or more send rights,
1360 				 * we need to record the loss of the receive
1361 				 * right and enter the remaining send right
1362 				 * into the hash table.
1363 				 */
1364 				bits &= ~MACH_PORT_TYPE_RECEIVE;
1365 				bits |= MACH_PORT_TYPE_EX_RECEIVE;
1366 				ipc_hash_insert(space, ip_to_object(port),
1367 				    name, entry);
1368 				ip_reference(port);
1369 			} else {
1370 				/*
1371 				 *	The remaining send right turns into a
1372 				 *	dead name.  Notice we don't decrement
1373 				 *	ip_srights, generate a no-senders notif,
1374 				 *	or use ipc_right_dncancel, because the
1375 				 *	port is destroyed "first".
1376 				 */
1377 				bits &= ~IE_BITS_TYPE_MASK;
1378 				bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1379 				if (entry->ie_request) {
1380 					entry->ie_request = IE_REQ_NONE;
1381 					/* if urefs are pegged due to overflow, leave them pegged */
1382 					if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1383 						bits++; /* increment urefs */
1384 					}
1385 				}
1386 				entry->ie_object = IO_NULL;
1387 			}
1388 			entry->ie_bits = bits;
1389 			ipc_entry_modified(space, name, entry);
1390 		} else {
1391 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1392 			assert(IE_BITS_UREFS(bits) == 0);
1393 
1394 			request = ipc_right_request_cancel(port, name, entry);
1395 			assert(!ip_is_pinned(port));
1396 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1397 		}
1398 		is_write_unlock(space);
1399 
1400 		ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1401 
1402 		if (request != IP_NULL) {
1403 			ipc_notify_port_deleted(request, name);
1404 		}
1405 		break;
1406 	}
1407 
1408 	case MACH_PORT_RIGHT_SEND_ONCE: {
1409 		ipc_port_t request;
1410 
1411 		if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
1412 			goto invalid_right;
1413 		}
1414 
1415 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
1416 		assert(IE_BITS_UREFS(bits) == 1);
1417 
1418 		port = ip_object_to_port(entry->ie_object);
1419 		assert(port != IP_NULL);
1420 
1421 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1422 			assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE));
1423 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1424 			goto invalid_right;
1425 		}
1426 		/* port is locked and active */
1427 
1428 		assert(port->ip_sorights > 0);
1429 
1430 		if ((delta > 0) || (delta < -1)) {
1431 			ip_mq_unlock(port);
1432 			goto invalid_value;
1433 		}
1434 
1435 		if (delta == 0) {
1436 			ip_mq_unlock(port);
1437 			goto success;
1438 		}
1439 
1440 		/*
1441 		 * clear any reply context:
1442 		 * no one will be sending the response b/c we are destroying
1443 		 * the single, outstanding send once right.
1444 		 */
1445 		port->ip_reply_context = 0;
1446 
1447 		request = ipc_right_request_cancel(port, name, entry);
1448 		assert(!ip_is_pinned(port));
1449 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1450 
1451 		is_write_unlock(space);
1452 
1453 		ipc_notify_send_once_and_unlock(port);
1454 
1455 		if (request != IP_NULL) {
1456 			ipc_notify_port_deleted(request, name);
1457 		}
1458 		break;
1459 	}
1460 
1461 	case MACH_PORT_RIGHT_DEAD_NAME: {
1462 		ipc_port_t relport = IP_NULL;
1463 		mach_port_urefs_t urefs;
1464 
1465 		if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1466 			port = ip_object_to_port(entry->ie_object);
1467 			assert(port != IP_NULL);
1468 
1469 			if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1470 				/* port is locked and active */
1471 				ip_mq_unlock(port);
1472 				port = IP_NULL;
1473 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1474 				goto invalid_right;
1475 			}
1476 			bits = entry->ie_bits;
1477 			relport = port;
1478 			port = IP_NULL;
1479 		} else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0) {
1480 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1481 			goto invalid_right;
1482 		}
1483 
1484 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1485 		assert(IE_BITS_UREFS(bits) > 0);
1486 		assert(entry->ie_object == IO_NULL);
1487 		assert(entry->ie_request == IE_REQ_NONE);
1488 
1489 		if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1490 		    delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1491 			goto invalid_value;
1492 		}
1493 
1494 		urefs = IE_BITS_UREFS(bits);
1495 
1496 		if (urefs == MACH_PORT_UREFS_MAX) {
1497 			/*
1498 			 * urefs are pegged due to an overflow
1499 			 * only a delta removing all refs at once can change it
1500 			 */
1501 
1502 			if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1503 				delta = 0;
1504 			}
1505 		} else {
1506 			if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1507 				goto invalid_value;
1508 			}
1509 			if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1510 				/* leave urefs pegged to maximum if it overflowed */
1511 				delta = MACH_PORT_UREFS_MAX - urefs;
1512 			}
1513 		}
1514 
1515 		if ((urefs + delta) == 0) {
1516 			ipc_entry_dealloc(space, IO_NULL, name, entry);
1517 		} else if (delta != 0) {
1518 			entry->ie_bits = bits + delta;
1519 			ipc_entry_modified(space, name, entry);
1520 		}
1521 
1522 		is_write_unlock(space);
1523 
1524 		if (relport != IP_NULL) {
1525 			ip_release(relport);
1526 		}
1527 
1528 		break;
1529 	}
1530 
1531 	case MACH_PORT_RIGHT_SEND: {
1532 		mach_port_urefs_t urefs;
1533 		ipc_port_t request = IP_NULL;
1534 		ipc_notify_nsenders_t nsrequest = { };
1535 		ipc_port_t port_to_release = IP_NULL;
1536 
1537 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
1538 			/* invalid right exception only when not live/dead confusion */
1539 			if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0
1540 #if !defined(AE_MAKESENDRIGHT_FIXED)
1541 			    /*
1542 			     * AE tries to add single send right without knowing if it already owns one.
1543 			     * But if it doesn't, it should own the receive right and delta should be 1.
1544 			     */
1545 			    && (((bits & MACH_PORT_TYPE_RECEIVE) == 0) || (delta != 1))
1546 #endif
1547 			    ) {
1548 				mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1549 			}
1550 			goto invalid_right;
1551 		}
1552 
1553 		/* maximum urefs for send is MACH_PORT_UREFS_MAX */
1554 
1555 		port = ip_object_to_port(entry->ie_object);
1556 		assert(port != IP_NULL);
1557 
1558 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1559 			assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0);
1560 			goto invalid_right;
1561 		}
1562 		/* port is locked and active */
1563 
1564 		assert(port->ip_srights > 0);
1565 
1566 		if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1567 		    delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1568 			ip_mq_unlock(port);
1569 			goto invalid_value;
1570 		}
1571 
1572 		urefs = IE_BITS_UREFS(bits);
1573 
1574 		if (urefs == MACH_PORT_UREFS_MAX) {
1575 			/*
1576 			 * urefs are pegged due to an overflow
1577 			 * only a delta removing all refs at once can change it
1578 			 */
1579 
1580 			if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1581 				delta = 0;
1582 			}
1583 		} else {
1584 			if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1585 				ip_mq_unlock(port);
1586 				goto invalid_value;
1587 			}
1588 			if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1589 				/* leave urefs pegged to maximum if it overflowed */
1590 				delta = MACH_PORT_UREFS_MAX - urefs;
1591 			}
1592 		}
1593 
1594 		if ((urefs + delta) == 0) {
1595 			if (ip_is_pinned(port)) {
1596 				ip_mq_unlock(port);
1597 				is_write_unlock(space);
1598 				mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1599 				return KERN_INVALID_CAPABILITY;
1600 			}
1601 
1602 			ip_srights_dec(port);
1603 			if (port->ip_srights == 0) {
1604 				nsrequest = ipc_notify_no_senders_prepare(port);
1605 			}
1606 
1607 			if (bits & MACH_PORT_TYPE_RECEIVE) {
1608 				assert(ip_get_receiver_name(port) == name);
1609 				assert(ip_in_space(port, space));
1610 				assert(IE_BITS_TYPE(bits) ==
1611 				    MACH_PORT_TYPE_SEND_RECEIVE);
1612 
1613 				entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1614 				    MACH_PORT_TYPE_SEND);
1615 				ipc_entry_modified(space, name, entry);
1616 			} else {
1617 				assert(IE_BITS_TYPE(bits) ==
1618 				    MACH_PORT_TYPE_SEND);
1619 
1620 				request = ipc_right_request_cancel(port, name, entry);
1621 				ipc_hash_delete(space, ip_to_object(port),
1622 				    name, entry);
1623 				assert(!ip_is_pinned(port));
1624 				ipc_entry_dealloc(space, ip_to_object(port),
1625 				    name, entry);
1626 				port_to_release = port;
1627 			}
1628 		} else if (delta != 0) {
1629 			entry->ie_bits = bits + delta;
1630 			ipc_entry_modified(space, name, entry);
1631 		}
1632 
1633 		ip_mq_unlock(port);
1634 
1635 		is_write_unlock(space);
1636 
1637 		if (port_to_release != IP_NULL) {
1638 			ip_release(port_to_release);
1639 		}
1640 
1641 		ipc_notify_no_senders_emit(nsrequest);
1642 
1643 		if (request != IP_NULL) {
1644 			ipc_notify_port_deleted(request, name);
1645 		}
1646 		break;
1647 	}
1648 
1649 	case MACH_PORT_RIGHT_LABELH:
1650 		goto invalid_right;
1651 
1652 	default:
1653 		panic("ipc_right_delta: strange right %d for 0x%x (%p) in space:%p",
1654 		    right, name, (void *)entry, (void *)space);
1655 	}
1656 
1657 	return KERN_SUCCESS;
1658 
1659 success:
1660 	is_write_unlock(space);
1661 	return KERN_SUCCESS;
1662 
1663 invalid_right:
1664 	is_write_unlock(space);
1665 	if (port != IP_NULL) {
1666 		ip_release(port);
1667 	}
1668 	return KERN_INVALID_RIGHT;
1669 
1670 invalid_value:
1671 	is_write_unlock(space);
1672 	mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1673 	return KERN_INVALID_VALUE;
1674 
1675 guard_failure:
1676 	return KERN_INVALID_RIGHT;
1677 }
1678 
1679 /*
1680  *	Routine:	ipc_right_destruct
1681  *	Purpose:
1682  *		Deallocates the receive right and modifies the
1683  *		user-reference count for the send rights as requested.
1684  *	Conditions:
1685  *		The space is write-locked, and is unlocked upon return.
1686  *		The space must be active.
1687  *	Returns:
1688  *		KERN_SUCCESS		Count was modified.
1689  *		KERN_INVALID_RIGHT	Entry has wrong type.
1690  *		KERN_INVALID_VALUE	Bad delta for the right.
1691  */
1692 
1693 kern_return_t
ipc_right_destruct(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_delta_t srdelta,uint64_t guard)1694 ipc_right_destruct(
1695 	ipc_space_t             space,
1696 	mach_port_name_t        name,
1697 	ipc_entry_t             entry,
1698 	mach_port_delta_t       srdelta,
1699 	uint64_t                guard)
1700 {
1701 	ipc_port_t port = IP_NULL;
1702 	ipc_entry_bits_t bits;
1703 
1704 	mach_port_urefs_t urefs;
1705 	ipc_port_t request = IP_NULL;
1706 	ipc_notify_nsenders_t nsrequest = { };
1707 
1708 	bits = entry->ie_bits;
1709 
1710 	assert(is_active(space));
1711 
1712 	if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1713 		is_write_unlock(space);
1714 
1715 		/* No exception if we used to have receive and held entry since */
1716 		if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1717 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1718 		}
1719 		return KERN_INVALID_RIGHT;
1720 	}
1721 
1722 	if (srdelta && (bits & MACH_PORT_TYPE_SEND) == 0) {
1723 		is_write_unlock(space);
1724 		mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1725 		return KERN_INVALID_RIGHT;
1726 	}
1727 
1728 	if (srdelta > 0) {
1729 		goto invalid_value;
1730 	}
1731 
1732 	port = ip_object_to_port(entry->ie_object);
1733 	assert(port != IP_NULL);
1734 
1735 	ip_mq_lock(port);
1736 	require_ip_active(port);
1737 	assert(ip_get_receiver_name(port) == name);
1738 	assert(ip_in_space(port, space));
1739 
1740 	/* Mach Port Guard Checking */
1741 	if (port->ip_guarded && (guard != port->ip_context)) {
1742 		uint64_t portguard = port->ip_context;
1743 		ip_mq_unlock(port);
1744 		is_write_unlock(space);
1745 		mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
1746 		return KERN_INVALID_ARGUMENT;
1747 	}
1748 
1749 	/*
1750 	 * First reduce the send rights as requested and
1751 	 * adjust the entry->ie_bits accordingly. The
1752 	 * ipc_entry_modified() call is made once the receive
1753 	 * right is destroyed too.
1754 	 */
1755 
1756 	if (srdelta) {
1757 		assert(port->ip_srights > 0);
1758 
1759 		urefs = IE_BITS_UREFS(bits);
1760 
1761 		/*
1762 		 * Since we made sure that srdelta is negative,
1763 		 * the check for urefs overflow is not required.
1764 		 */
1765 		if (MACH_PORT_UREFS_UNDERFLOW(urefs, srdelta)) {
1766 			ip_mq_unlock(port);
1767 			goto invalid_value;
1768 		}
1769 
1770 		if (urefs == MACH_PORT_UREFS_MAX) {
1771 			/*
1772 			 * urefs are pegged due to an overflow
1773 			 * only a delta removing all refs at once can change it
1774 			 */
1775 			if (srdelta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1776 				srdelta = 0;
1777 			}
1778 		}
1779 
1780 		if ((urefs + srdelta) == 0) {
1781 			ip_srights_dec(port);
1782 			if (port->ip_srights == 0) {
1783 				nsrequest = ipc_notify_no_senders_prepare(port);
1784 			}
1785 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_RECEIVE);
1786 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1787 			    MACH_PORT_TYPE_SEND);
1788 		} else {
1789 			entry->ie_bits = bits + srdelta;
1790 		}
1791 	}
1792 
1793 	/*
1794 	 * Now destroy the receive right. Update space and
1795 	 * entry accordingly.
1796 	 */
1797 
1798 	bits = entry->ie_bits;
1799 	if (bits & MACH_PORT_TYPE_SEND) {
1800 		assert(IE_BITS_UREFS(bits) > 0);
1801 		assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX);
1802 
1803 		if (ipc_port_has_prdrequest(port)) {
1804 			/*
1805 			 * Since another task has requested a
1806 			 * destroy notification for this port, it
1807 			 * isn't actually being destroyed - the receive
1808 			 * right is just being moved to another task.
1809 			 * Since we still have one or more send rights,
1810 			 * we need to record the loss of the receive
1811 			 * right and enter the remaining send right
1812 			 * into the hash table.
1813 			 */
1814 			bits &= ~MACH_PORT_TYPE_RECEIVE;
1815 			bits |= MACH_PORT_TYPE_EX_RECEIVE;
1816 			ipc_hash_insert(space, ip_to_object(port),
1817 			    name, entry);
1818 			ip_reference(port);
1819 		} else {
1820 			/*
1821 			 *	The remaining send right turns into a
1822 			 *	dead name.  Notice we don't decrement
1823 			 *	ip_srights, generate a no-senders notif,
1824 			 *	or use ipc_right_dncancel, because the
1825 			 *	port is destroyed "first".
1826 			 */
1827 			bits &= ~IE_BITS_TYPE_MASK;
1828 			bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1829 			if (entry->ie_request) {
1830 				entry->ie_request = IE_REQ_NONE;
1831 				if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1832 					bits++; /* increment urefs */
1833 				}
1834 			}
1835 			entry->ie_object = IO_NULL;
1836 		}
1837 		entry->ie_bits = bits;
1838 		ipc_entry_modified(space, name, entry);
1839 	} else {
1840 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1841 		assert(IE_BITS_UREFS(bits) == 0);
1842 		request = ipc_right_request_cancel(port, name, entry);
1843 		assert(!ip_is_pinned(port));
1844 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1845 	}
1846 
1847 	/* Unlock space */
1848 	is_write_unlock(space);
1849 
1850 	ipc_notify_no_senders_emit(nsrequest);
1851 
1852 	ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1853 
1854 	if (request != IP_NULL) {
1855 		ipc_notify_port_deleted(request, name);
1856 	}
1857 
1858 	return KERN_SUCCESS;
1859 
1860 invalid_value:
1861 	is_write_unlock(space);
1862 	mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1863 	return KERN_INVALID_VALUE;
1864 }
1865 
1866 
1867 /*
1868  *	Routine:	ipc_right_info
1869  *	Purpose:
1870  *		Retrieves information about the right.
1871  *	Conditions:
1872  *		The space is active and write-locked.
1873  *	        The space is unlocked upon return.
1874  *	Returns:
1875  *		KERN_SUCCESS		Retrieved info
1876  */
1877 
1878 kern_return_t
ipc_right_info(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_type_t * typep,mach_port_urefs_t * urefsp)1879 ipc_right_info(
1880 	ipc_space_t             space,
1881 	mach_port_name_t        name,
1882 	ipc_entry_t             entry,
1883 	mach_port_type_t        *typep,
1884 	mach_port_urefs_t       *urefsp)
1885 {
1886 	ipc_port_t port;
1887 	ipc_entry_bits_t bits;
1888 	mach_port_type_t type = 0;
1889 	ipc_port_request_index_t request;
1890 
1891 	bits = entry->ie_bits;
1892 	request = entry->ie_request;
1893 	port = ip_object_to_port(entry->ie_object);
1894 
1895 	if (bits & MACH_PORT_TYPE_RECEIVE) {
1896 		assert(IP_VALID(port));
1897 
1898 		if (request != IE_REQ_NONE) {
1899 			ip_mq_lock(port);
1900 			require_ip_active(port);
1901 			type |= ipc_port_request_type(port, name, request);
1902 			ip_mq_unlock(port);
1903 		}
1904 		is_write_unlock(space);
1905 	} else if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1906 		/*
1907 		 * validate port is still alive - if so, get request
1908 		 * types while we still have it locked.  Otherwise,
1909 		 * recapture the (now dead) bits.
1910 		 */
1911 		if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1912 			if (request != IE_REQ_NONE) {
1913 				type |= ipc_port_request_type(port, name, request);
1914 			}
1915 			ip_mq_unlock(port);
1916 			is_write_unlock(space);
1917 		} else {
1918 			bits = entry->ie_bits;
1919 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1920 			is_write_unlock(space);
1921 			ip_release(port);
1922 		}
1923 	} else {
1924 		is_write_unlock(space);
1925 	}
1926 
1927 	type |= IE_BITS_TYPE(bits);
1928 
1929 	*typep = type;
1930 	*urefsp = IE_BITS_UREFS(bits);
1931 	return KERN_SUCCESS;
1932 }
1933 
1934 /*
1935  *	Routine:	ipc_right_copyin_check_reply
1936  *	Purpose:
1937  *		Check if a subsequent ipc_right_copyin would succeed. Used only
1938  *		by ipc_kmsg_copyin_header to check if reply_port can be copied in.
1939  *		If the reply port is an immovable send right, it errors out.
1940  *	Conditions:
1941  *		The space is locked (read or write) and active.
1942  */
1943 
1944 boolean_t
ipc_right_copyin_check_reply(__assert_only ipc_space_t space,mach_port_name_t reply_name,ipc_entry_t reply_entry,mach_msg_type_name_t reply_type,ipc_entry_t dest_entry,int * reply_port_semantics_violation)1945 ipc_right_copyin_check_reply(
1946 	__assert_only ipc_space_t       space,
1947 	mach_port_name_t                reply_name,
1948 	ipc_entry_t                     reply_entry,
1949 	mach_msg_type_name_t            reply_type,
1950 	ipc_entry_t                     dest_entry,
1951 	int                             *reply_port_semantics_violation)
1952 {
1953 	ipc_entry_bits_t bits;
1954 	ipc_port_t reply_port;
1955 	ipc_port_t dest_port;
1956 
1957 	bits = reply_entry->ie_bits;
1958 	assert(is_active(space));
1959 
1960 	switch (reply_type) {
1961 	case MACH_MSG_TYPE_MAKE_SEND:
1962 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1963 			return FALSE;
1964 		}
1965 		break;
1966 
1967 	case MACH_MSG_TYPE_MAKE_SEND_ONCE:
1968 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1969 			return FALSE;
1970 		}
1971 		break;
1972 
1973 	case MACH_MSG_TYPE_MOVE_RECEIVE:
1974 		/* ipc_kmsg_copyin_header already filters it out */
1975 		return FALSE;
1976 
1977 	case MACH_MSG_TYPE_COPY_SEND:
1978 	case MACH_MSG_TYPE_MOVE_SEND:
1979 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
1980 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
1981 			break;
1982 		}
1983 
1984 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
1985 			return FALSE;
1986 		}
1987 
1988 		reply_port = ip_object_to_port(reply_entry->ie_object);
1989 		assert(reply_port != IP_NULL);
1990 
1991 		/*
1992 		 * active status peek to avoid checks that will be skipped
1993 		 * on copyin for dead ports.  Lock not held, so will not be
1994 		 * atomic (but once dead, there's no going back).
1995 		 */
1996 		if (!ip_active(reply_port)) {
1997 			break;
1998 		}
1999 
2000 		/*
2001 		 * Can't copyin a send right that is marked immovable. This bit
2002 		 * is set only during port creation and never unset. So it can
2003 		 * be read without a lock.
2004 		 */
2005 		if (ip_is_immovable_send(reply_port)) {
2006 			mach_port_guard_exception_immovable(space, reply_name, reply_port, MPG_FLAGS_NONE);
2007 			return FALSE;
2008 		}
2009 
2010 		if (reply_type == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2011 			if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2012 				return FALSE;
2013 			}
2014 		} else {
2015 			if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2016 				return FALSE;
2017 			}
2018 		}
2019 
2020 		break;
2021 	}
2022 
2023 	default:
2024 		panic("ipc_right_copyin_check: strange rights");
2025 	}
2026 
2027 	if ((IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_PORT_SET) ||
2028 	    (IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_PORT_SET)) {
2029 		return TRUE;
2030 	}
2031 
2032 	/* The only disp allowed when a reply port is a local port of mach msg is MAKE_SO. */
2033 	reply_port = ip_object_to_port(reply_entry->ie_object);
2034 	assert(reply_port != IP_NULL);
2035 
2036 	if (ip_active(reply_port)) {
2037 		if (ip_is_reply_port(reply_port) && (reply_type != MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2038 			return FALSE;
2039 		}
2040 
2041 		/* When sending a msg to remote port that requires reply port semantics enforced the local port of that msg needs to be a reply port. */
2042 		dest_port = ip_object_to_port(dest_entry->ie_object);
2043 		if (IP_VALID(dest_port) && ip_active(dest_port)) {
2044 			/* populates reply_port_semantics_violation if we need to send telemetry */
2045 			if (ip_violates_rigid_reply_port_semantics(dest_port, reply_port, reply_port_semantics_violation) ||
2046 			    ip_violates_reply_port_semantics(dest_port, reply_port, reply_port_semantics_violation)) {
2047 				if (reply_port_semantics) {
2048 					mach_port_guard_exception(reply_name, 0, 0, kGUARD_EXC_REQUIRE_REPLY_PORT_SEMANTICS);
2049 					return FALSE;
2050 				}
2051 			}
2052 		}
2053 	}
2054 
2055 	return TRUE;
2056 }
2057 
2058 /*
2059  *	Routine:	ipc_right_copyin_check_guard_locked
2060  *	Purpose:
2061  *		Check if the port is guarded and the guard
2062  *		value matches the one passed in the arguments.
2063  *		If MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND is set,
2064  *		check if the port is unguarded.
2065  *	Conditions:
2066  *		The port is locked.
2067  *	Returns:
2068  *		KERN_SUCCESS		Port is either unguarded
2069  *					or guarded with expected value
2070  *		KERN_INVALID_ARGUMENT	Port is either unguarded already or guard mismatch.
2071  *					This also raises a EXC_GUARD exception.
2072  */
2073 static kern_return_t
ipc_right_copyin_check_guard_locked(mach_port_name_t name,ipc_port_t port,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2074 ipc_right_copyin_check_guard_locked(
2075 	mach_port_name_t name,
2076 	ipc_port_t port,
2077 	mach_port_context_t context,
2078 	mach_msg_guard_flags_t *guard_flags)
2079 {
2080 	mach_msg_guard_flags_t flags = *guard_flags;
2081 	if ((flags & MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND) && !port->ip_guarded && !context) {
2082 		return KERN_SUCCESS;
2083 	} else if (port->ip_guarded && (port->ip_context == context)) {
2084 		return KERN_SUCCESS;
2085 	}
2086 
2087 	/* Incorrect guard; Raise exception */
2088 	mach_port_guard_exception(name, context, port->ip_context, kGUARD_EXC_INCORRECT_GUARD);
2089 	return KERN_INVALID_ARGUMENT;
2090 }
2091 
2092 /*
2093  *	Routine:	ipc_right_copyin
2094  *	Purpose:
2095  *		Copyin a capability from a space.
2096  *		If successful, the caller gets a ref
2097  *		for the resulting object, unless it is IO_DEAD,
2098  *		and possibly a send-once right which should
2099  *		be used in a port-deleted notification.
2100  *
2101  *		If deadok is not TRUE, the copyin operation
2102  *		will fail instead of producing IO_DEAD.
2103  *
2104  *		The entry is deallocated if the entry type becomes
2105  *		MACH_PORT_TYPE_NONE.
2106  *	Conditions:
2107  *		The space is write-locked and active.
2108  *	Returns:
2109  *		KERN_SUCCESS		Acquired an object, possibly IO_DEAD.
2110  *		KERN_INVALID_RIGHT	Name doesn't denote correct right.
2111  *		KERN_INVALID_CAPABILITY	Trying to move an kobject port or an immovable right,
2112  *								or moving the last ref of pinned right
2113  *		KERN_INVALID_ARGUMENT	Port is unguarded or guard mismatch
2114  */
2115 
2116 kern_return_t
ipc_right_copyin(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyin_flags_t flags,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep,int * assertcntp,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2117 ipc_right_copyin(
2118 	ipc_space_t                space,
2119 	mach_port_name_t           name,
2120 	ipc_entry_t                entry,
2121 	mach_msg_type_name_t       msgt_name,
2122 	ipc_object_copyin_flags_t  flags,
2123 	ipc_object_t               *objectp,
2124 	ipc_port_t                 *sorightp,
2125 	ipc_port_t                 *releasep,
2126 	int                        *assertcntp,
2127 	mach_port_context_t        context,
2128 	mach_msg_guard_flags_t     *guard_flags)
2129 {
2130 	ipc_entry_bits_t bits;
2131 	ipc_port_t port;
2132 	kern_return_t kr;
2133 	boolean_t deadok = !!(flags & IPC_OBJECT_COPYIN_FLAGS_DEADOK);
2134 	boolean_t allow_imm_send = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
2135 	boolean_t allow_reply_make_so = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_REPLY_MAKE_SEND_ONCE);
2136 	boolean_t allow_reply_move_so = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_REPLY_MOVE_SEND_ONCE);
2137 
2138 	*releasep = IP_NULL;
2139 	*assertcntp = 0;
2140 
2141 	bits = entry->ie_bits;
2142 
2143 	assert(is_active(space));
2144 
2145 	switch (msgt_name) {
2146 	case MACH_MSG_TYPE_MAKE_SEND: {
2147 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2148 			goto invalid_right;
2149 		}
2150 
2151 		port = ip_object_to_port(entry->ie_object);
2152 		assert(port != IP_NULL);
2153 
2154 		if (ip_is_reply_port(port)) {
2155 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2156 			return KERN_INVALID_CAPABILITY;
2157 		}
2158 
2159 		ip_mq_lock(port);
2160 		assert(ip_get_receiver_name(port) == name);
2161 		assert(ip_in_space(port, space));
2162 
2163 		ipc_port_make_send_any_locked(port);
2164 		ip_mq_unlock(port);
2165 
2166 		*objectp = ip_to_object(port);
2167 		*sorightp = IP_NULL;
2168 		break;
2169 	}
2170 
2171 	case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
2172 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2173 			goto invalid_right;
2174 		}
2175 
2176 		port = ip_object_to_port(entry->ie_object);
2177 		assert(port != IP_NULL);
2178 
2179 		if ((ip_is_reply_port(port)) && !allow_reply_make_so) {
2180 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2181 			return KERN_INVALID_CAPABILITY;
2182 		}
2183 
2184 		ip_mq_lock(port);
2185 		require_ip_active(port);
2186 		assert(ip_get_receiver_name(port) == name);
2187 		assert(ip_in_space(port, space));
2188 
2189 		ipc_port_make_sonce_locked(port);
2190 		ip_mq_unlock(port);
2191 
2192 		*objectp = ip_to_object(port);
2193 		*sorightp = IP_NULL;
2194 		break;
2195 	}
2196 
2197 	case MACH_MSG_TYPE_MOVE_RECEIVE: {
2198 		bool allow_imm_recv = false;
2199 		ipc_port_t request = IP_NULL;
2200 		waitq_link_list_t free_l = { };
2201 
2202 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2203 			goto invalid_right;
2204 		}
2205 
2206 		port = ip_object_to_port(entry->ie_object);
2207 		assert(port != IP_NULL);
2208 
2209 		ip_mq_lock(port);
2210 		require_ip_active(port);
2211 		assert(ip_get_receiver_name(port) == name);
2212 		assert(ip_in_space(port, space));
2213 
2214 		/*
2215 		 * Disallow moving receive-right kobjects/kolabel, e.g. mk_timer ports
2216 		 * The ipc_port structure uses the kdata union of kobject and
2217 		 * imp_task exclusively. Thus, general use of a kobject port as
2218 		 * a receive right can cause type confusion in the importance
2219 		 * code.
2220 		 */
2221 		if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
2222 			/*
2223 			 * Distinguish an invalid right, e.g., trying to move
2224 			 * a send right as a receive right, from this
2225 			 * situation which is, "This is a valid receive right,
2226 			 * but it's also a kobject and you can't move it."
2227 			 */
2228 			ip_mq_unlock(port);
2229 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2230 			return KERN_INVALID_CAPABILITY;
2231 		}
2232 
2233 		if (port->ip_service_port && port->ip_splabel &&
2234 		    !ipc_service_port_label_is_bootstrap_port((ipc_service_port_label_t)port->ip_splabel)) {
2235 			allow_imm_recv = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_RECEIVE);
2236 		} else if (ip_is_libxpc_connection_port(port)) {
2237 			allow_imm_recv = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_CONN_IMMOVABLE_RECEIVE);
2238 		}
2239 
2240 		if ((!allow_imm_recv && port->ip_immovable_receive) ||
2241 		    ip_is_reply_port(port) ||     /* never move reply port rcv right */
2242 		    port->ip_specialreply) {
2243 			assert(!ip_in_space(port, ipc_space_kernel));
2244 			ip_mq_unlock(port);
2245 			assert(current_task() != kernel_task);
2246 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2247 			return KERN_INVALID_CAPABILITY;
2248 		}
2249 
2250 		if (guard_flags != NULL) {
2251 			kr = ipc_right_copyin_check_guard_locked(name, port, context, guard_flags);
2252 			if (kr != KERN_SUCCESS) {
2253 				ip_mq_unlock(port);
2254 				return kr;
2255 			}
2256 		}
2257 
2258 		if (bits & MACH_PORT_TYPE_SEND) {
2259 			assert(IE_BITS_TYPE(bits) ==
2260 			    MACH_PORT_TYPE_SEND_RECEIVE);
2261 			assert(IE_BITS_UREFS(bits) > 0);
2262 			assert(port->ip_srights > 0);
2263 
2264 			bits &= ~MACH_PORT_TYPE_RECEIVE;
2265 			bits |= MACH_PORT_TYPE_EX_RECEIVE;
2266 			entry->ie_bits = bits;
2267 			ipc_hash_insert(space, ip_to_object(port),
2268 			    name, entry);
2269 			ip_reference(port);
2270 			ipc_entry_modified(space, name, entry);
2271 		} else {
2272 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2273 			assert(IE_BITS_UREFS(bits) == 0);
2274 
2275 			request = ipc_right_request_cancel(port, name, entry);
2276 			assert(!ip_is_pinned(port));
2277 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2278 		}
2279 
2280 		/* ipc_port_clear_receiver unguards the port and clears the ip_immovable_receive bit */
2281 		(void)ipc_port_clear_receiver(port, FALSE, &free_l); /* don't destroy the port/mqueue */
2282 		if (guard_flags != NULL) {
2283 			/* this flag will be cleared during copyout */
2284 			*guard_flags = *guard_flags | MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
2285 		}
2286 
2287 #if IMPORTANCE_INHERITANCE
2288 		/*
2289 		 * Account for boosts the current task is going to lose when
2290 		 * copying this right in.  Tempowner ports have either not
2291 		 * been accounting to any task (and therefore are already in
2292 		 * "limbo" state w.r.t. assertions) or to some other specific
2293 		 * task. As we have no way to drop the latter task's assertions
2294 		 * here, We'll deduct those when we enqueue it on its
2295 		 * destination port (see ipc_port_check_circularity()).
2296 		 */
2297 		if (port->ip_tempowner == 0) {
2298 			assert(IIT_NULL == ip_get_imp_task(port));
2299 
2300 			/* ports in limbo have to be tempowner */
2301 			port->ip_tempowner = 1;
2302 			*assertcntp = port->ip_impcount;
2303 		}
2304 #endif /* IMPORTANCE_INHERITANCE */
2305 
2306 		ip_mq_unlock(port);
2307 
2308 		/*
2309 		 * This is unfortunate to do this while the space is locked,
2310 		 * but plumbing it through all callers really hurts.
2311 		 */
2312 		waitq_link_free_list(WQT_PORT_SET, &free_l);
2313 
2314 		*objectp = ip_to_object(port);
2315 		*sorightp = request;
2316 		break;
2317 	}
2318 
2319 	case MACH_MSG_TYPE_COPY_SEND: {
2320 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2321 			goto copy_dead;
2322 		}
2323 
2324 		/* allow for dead send-once rights */
2325 
2326 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2327 			goto invalid_right;
2328 		}
2329 
2330 		assert(IE_BITS_UREFS(bits) > 0);
2331 
2332 		port = ip_object_to_port(entry->ie_object);
2333 		assert(port != IP_NULL);
2334 
2335 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2336 			bits = entry->ie_bits;
2337 			*releasep = port;
2338 			goto copy_dead;
2339 		}
2340 		/* port is locked and active */
2341 
2342 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2343 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2344 			assert(port->ip_sorights > 0);
2345 
2346 			ip_mq_unlock(port);
2347 			goto invalid_right;
2348 		}
2349 
2350 		if (ip_is_reply_port(port)) {
2351 			ip_mq_unlock(port);
2352 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2353 			return KERN_INVALID_CAPABILITY;
2354 		}
2355 
2356 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2357 			ip_mq_unlock(port);
2358 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2359 			return KERN_INVALID_CAPABILITY;
2360 		}
2361 
2362 		ipc_port_copy_send_any_locked(port);
2363 		ip_mq_unlock(port);
2364 
2365 		*objectp = ip_to_object(port);
2366 		*sorightp = IP_NULL;
2367 		break;
2368 	}
2369 
2370 	case MACH_MSG_TYPE_MOVE_SEND: {
2371 		ipc_port_t request = IP_NULL;
2372 
2373 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2374 			goto move_dead;
2375 		}
2376 
2377 		/* allow for dead send-once rights */
2378 
2379 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2380 			goto invalid_right;
2381 		}
2382 
2383 		assert(IE_BITS_UREFS(bits) > 0);
2384 
2385 		port = ip_object_to_port(entry->ie_object);
2386 		assert(port != IP_NULL);
2387 
2388 		if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2389 			bits = entry->ie_bits;
2390 			*releasep = port;
2391 			goto move_dead;
2392 		}
2393 		/* port is locked and active */
2394 
2395 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2396 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2397 			assert(port->ip_sorights > 0);
2398 			ip_mq_unlock(port);
2399 			goto invalid_right;
2400 		}
2401 
2402 		if (ip_is_reply_port(port)) {
2403 			ip_mq_unlock(port);
2404 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2405 			return KERN_INVALID_CAPABILITY;
2406 		}
2407 
2408 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2409 			ip_mq_unlock(port);
2410 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2411 			return KERN_INVALID_CAPABILITY;
2412 		}
2413 
2414 		if (IE_BITS_UREFS(bits) == 1) {
2415 			assert(port->ip_srights > 0);
2416 			if (bits & MACH_PORT_TYPE_RECEIVE) {
2417 				assert(ip_get_receiver_name(port) == name);
2418 				assert(ip_in_space(port, space));
2419 				assert(IE_BITS_TYPE(bits) ==
2420 				    MACH_PORT_TYPE_SEND_RECEIVE);
2421 				assert(!ip_is_pinned(port));
2422 
2423 				entry->ie_bits = bits & ~
2424 				    (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2425 				ipc_entry_modified(space, name, entry);
2426 				ip_reference(port);
2427 			} else {
2428 				assert(IE_BITS_TYPE(bits) ==
2429 				    MACH_PORT_TYPE_SEND);
2430 
2431 				if (ip_is_pinned(port)) {
2432 					ip_mq_unlock(port);
2433 					mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_COPYIN);
2434 					return KERN_INVALID_CAPABILITY;
2435 				}
2436 
2437 				request = ipc_right_request_cancel(port, name, entry);
2438 				ipc_hash_delete(space, ip_to_object(port),
2439 				    name, entry);
2440 				ipc_entry_dealloc(space, ip_to_object(port),
2441 				    name, entry);
2442 				/* transfer entry's reference to caller */
2443 			}
2444 		} else {
2445 			ipc_port_copy_send_any_locked(port);
2446 			/* if urefs are pegged due to overflow, leave them pegged */
2447 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2448 				entry->ie_bits = bits - 1; /* decrement urefs */
2449 			}
2450 			ipc_entry_modified(space, name, entry);
2451 		}
2452 
2453 		ip_mq_unlock(port);
2454 		*objectp = ip_to_object(port);
2455 		*sorightp = request;
2456 		break;
2457 	}
2458 
2459 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2460 		ipc_port_t request;
2461 
2462 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2463 			goto move_dead;
2464 		}
2465 
2466 		/* allow for dead send rights */
2467 
2468 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2469 			goto invalid_right;
2470 		}
2471 
2472 		assert(IE_BITS_UREFS(bits) > 0);
2473 
2474 		port = ip_object_to_port(entry->ie_object);
2475 		assert(port != IP_NULL);
2476 
2477 		if (ipc_right_check(space, port, name, entry, flags)) {
2478 			bits = entry->ie_bits;
2479 			*releasep = port;
2480 			goto move_dead;
2481 		}
2482 		/*
2483 		 * port is locked, but may not be active:
2484 		 * Allow copyin of inactive ports with no dead name request and treat it
2485 		 * as if the copyin of the port was successful and port became inactive
2486 		 * later.
2487 		 */
2488 
2489 		if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2490 			assert(bits & MACH_PORT_TYPE_SEND);
2491 			assert(port->ip_srights > 0);
2492 
2493 			ip_mq_unlock(port);
2494 			goto invalid_right;
2495 		}
2496 
2497 		if (ip_is_reply_port(port) && !allow_reply_move_so) {
2498 			ip_mq_unlock(port);
2499 			mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2500 			return KERN_INVALID_CAPABILITY;
2501 		}
2502 
2503 		if (!allow_imm_send && ip_is_immovable_send(port)) {
2504 			ip_mq_unlock(port);
2505 			mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2506 			return KERN_INVALID_CAPABILITY;
2507 		}
2508 
2509 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2510 		assert(IE_BITS_UREFS(bits) == 1);
2511 		assert(port->ip_sorights > 0);
2512 
2513 		request = ipc_right_request_cancel(port, name, entry);
2514 		assert(!ip_is_pinned(port));
2515 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2516 		ip_mq_unlock(port);
2517 
2518 		*objectp = ip_to_object(port);
2519 		*sorightp = request;
2520 		break;
2521 	}
2522 
2523 	default:
2524 invalid_right:
2525 		return KERN_INVALID_RIGHT;
2526 	}
2527 
2528 	return KERN_SUCCESS;
2529 
2530 copy_dead:
2531 	assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2532 	assert(IE_BITS_UREFS(bits) > 0);
2533 	assert(entry->ie_request == IE_REQ_NONE);
2534 	assert(entry->ie_object == 0);
2535 
2536 	if (!deadok) {
2537 		goto invalid_right;
2538 	}
2539 
2540 	*objectp = IO_DEAD;
2541 	*sorightp = IP_NULL;
2542 	return KERN_SUCCESS;
2543 
2544 move_dead:
2545 	assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2546 	assert(IE_BITS_UREFS(bits) > 0);
2547 	assert(entry->ie_request == IE_REQ_NONE);
2548 	assert(entry->ie_object == IO_NULL);
2549 
2550 	if (!deadok) {
2551 		goto invalid_right;
2552 	}
2553 
2554 	if (IE_BITS_UREFS(bits) == 1) {
2555 		ipc_entry_dealloc(space, IO_NULL, name, entry);
2556 	} else {
2557 		/* if urefs are pegged due to overflow, leave them pegged */
2558 		if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2559 			entry->ie_bits = bits - 1; /* decrement urefs */
2560 		}
2561 		ipc_entry_modified(space, name, entry);
2562 	}
2563 	*objectp = IO_DEAD;
2564 	*sorightp = IP_NULL;
2565 	return KERN_SUCCESS;
2566 }
2567 
2568 /*
2569  *	Routine:	ipc_right_copyin_two_move_sends
2570  *	Purpose:
2571  *		Like ipc_right_copyin with MACH_MSG_TYPE_MOVE_SEND
2572  *		and deadok == FALSE, except that this moves two
2573  *		send rights at once.
2574  *	Conditions:
2575  *		The space is write-locked and active.
2576  *		The object is returned with two refs/send rights.
2577  *	Returns:
2578  *		KERN_SUCCESS					Acquired an object.
2579  *		KERN_INVALID_RIGHT				Name doesn't denote correct right.
2580  *		KERN_INVALID_CAPABILITY			Name does not allow copyin move send capability.
2581  */
2582 static
2583 kern_return_t
ipc_right_copyin_two_move_sends(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2584 ipc_right_copyin_two_move_sends(
2585 	ipc_space_t             space,
2586 	mach_port_name_t        name,
2587 	ipc_entry_t             entry,
2588 	ipc_object_t            *objectp,
2589 	ipc_port_t              *sorightp,
2590 	ipc_port_t              *releasep)
2591 {
2592 	ipc_entry_bits_t bits;
2593 	mach_port_urefs_t urefs;
2594 	ipc_port_t port;
2595 	ipc_port_t request = IP_NULL;
2596 
2597 	*releasep = IP_NULL;
2598 
2599 	assert(is_active(space));
2600 
2601 	bits = entry->ie_bits;
2602 
2603 	if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2604 		goto invalid_right;
2605 	}
2606 
2607 	urefs = IE_BITS_UREFS(bits);
2608 	if (urefs < 2) {
2609 		goto invalid_right;
2610 	}
2611 
2612 	port = ip_object_to_port(entry->ie_object);
2613 	assert(port != IP_NULL);
2614 
2615 	if (ip_is_reply_port(port)) {
2616 		mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2617 		return KERN_INVALID_CAPABILITY;
2618 	}
2619 
2620 	if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2621 		*releasep = port;
2622 		goto invalid_right;
2623 	}
2624 	/* port is locked and active */
2625 
2626 	/*
2627 	 * To reach here we either have:
2628 	 * (1) reply_name == voucher_name, but voucher is not immovable send right.
2629 	 * (2) reply_name == dest_name, but ipc_right_copyin_check_reply() guaranteed
2630 	 * that we can't use MOVE_SEND on reply port marked as immovable send right.
2631 	 */
2632 	assert(!ip_is_immovable_send(port));
2633 	assert(!ip_is_pinned(port));
2634 
2635 	if (urefs > 2) {
2636 		/*
2637 		 * We are moving 2 urefs as naked send rights, which is decomposed as:
2638 		 * - two copy sends (which doesn't affect the make send count)
2639 		 * - decrementing the local urefs twice.
2640 		 */
2641 		ipc_port_copy_send_any_locked(port);
2642 		ipc_port_copy_send_any_locked(port);
2643 		/* if urefs are pegged due to overflow, leave them pegged */
2644 		if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2645 			entry->ie_bits = bits - 2; /* decrement urefs */
2646 		}
2647 		ipc_entry_modified(space, name, entry);
2648 	} else {
2649 		/*
2650 		 * We have exactly 2 send rights for this port in this space,
2651 		 * which means that we will liberate the naked send right held
2652 		 * by this entry.
2653 		 *
2654 		 * However refcounting rules around entries are that naked send rights
2655 		 * on behalf of spaces do not have an associated port reference,
2656 		 * so we need to donate one ...
2657 		 */
2658 		ipc_port_copy_send_any_locked(port);
2659 
2660 		if (bits & MACH_PORT_TYPE_RECEIVE) {
2661 			assert(ip_get_receiver_name(port) == name);
2662 			assert(ip_in_space(port, space));
2663 			assert(IE_BITS_TYPE(bits) ==
2664 			    MACH_PORT_TYPE_SEND_RECEIVE);
2665 
2666 			/* ... that we inject manually when the entry stays alive */
2667 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2668 			ipc_entry_modified(space, name, entry);
2669 			ip_reference(port);
2670 		} else {
2671 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
2672 
2673 			/* ... that we steal from the entry when it dies */
2674 			request = ipc_right_request_cancel(port, name, entry);
2675 			ipc_hash_delete(space, ip_to_object(port),
2676 			    name, entry);
2677 			ipc_entry_dealloc(space, ip_to_object(port),
2678 			    name, entry);
2679 		}
2680 	}
2681 
2682 	ip_mq_unlock(port);
2683 
2684 	*objectp = ip_to_object(port);
2685 	*sorightp = request;
2686 	return KERN_SUCCESS;
2687 
2688 invalid_right:
2689 	return KERN_INVALID_RIGHT;
2690 }
2691 
2692 
2693 /*
2694  *	Routine:	ipc_right_copyin_two
2695  *	Purpose:
2696  *		Like ipc_right_copyin with two dispositions,
2697  *		each of which results in a send or send-once right,
2698  *		and deadok = FALSE.
2699  *	Conditions:
2700  *		The space is write-locked and active.
2701  *		The object is returned with two refs/rights.
2702  *		Msgt_one refers to the dest_type.
2703  *      Copyin flags are currently only used in the context of send once rights.
2704  *	Returns:
2705  *		KERN_SUCCESS		Acquired an object.
2706  *		KERN_INVALID_RIGHT	Name doesn't denote correct right(s).
2707  *		KERN_INVALID_CAPABILITY	Name doesn't denote correct right for msgt_two.
2708  */
2709 kern_return_t
ipc_right_copyin_two(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_one,mach_msg_type_name_t msgt_two,ipc_object_copyin_flags_t flags_one,ipc_object_copyin_flags_t flags_two,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2710 ipc_right_copyin_two(
2711 	ipc_space_t               space,
2712 	mach_port_name_t          name,
2713 	ipc_entry_t               entry,
2714 	mach_msg_type_name_t      msgt_one,
2715 	mach_msg_type_name_t      msgt_two,
2716 	ipc_object_copyin_flags_t flags_one, /* Used only for send once rights. */
2717 	ipc_object_copyin_flags_t flags_two, /* Used only for send once rights. */
2718 	ipc_object_t              *objectp,
2719 	ipc_port_t                *sorightp,
2720 	ipc_port_t                *releasep)
2721 {
2722 	ipc_port_t port;
2723 	kern_return_t kr;
2724 	int assertcnt = 0;
2725 
2726 	assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_one));
2727 	assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_two));
2728 
2729 	/*
2730 	 *	This is a little tedious to make atomic, because
2731 	 *	there are 25 combinations of valid dispositions.
2732 	 *	However, most are easy.
2733 	 */
2734 
2735 	/*
2736 	 *	If either is move-sonce, then there must be an error.
2737 	 */
2738 	if (msgt_one == MACH_MSG_TYPE_MOVE_SEND_ONCE ||
2739 	    msgt_two == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2740 		return KERN_INVALID_RIGHT;
2741 	}
2742 
2743 	if ((msgt_one == MACH_MSG_TYPE_MAKE_SEND) ||
2744 	    (msgt_one == MACH_MSG_TYPE_MAKE_SEND_ONCE) ||
2745 	    (msgt_two == MACH_MSG_TYPE_MAKE_SEND) ||
2746 	    (msgt_two == MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2747 		/*
2748 		 *	One of the dispositions needs a receive right.
2749 		 *
2750 		 *	If the copyin below succeeds, we know the receive
2751 		 *	right is there (because the pre-validation of
2752 		 *	the second disposition already succeeded in our
2753 		 *	caller).
2754 		 *
2755 		 *	Hence the port is not in danger of dying.
2756 		 */
2757 		ipc_object_t object_two;
2758 
2759 		flags_one = flags_one | IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND;
2760 		kr = ipc_right_copyin(space, name, entry,
2761 		    msgt_one, flags_one,
2762 		    objectp, sorightp, releasep,
2763 		    &assertcnt, 0, NULL);
2764 		assert(assertcnt == 0);
2765 		if (kr != KERN_SUCCESS) {
2766 			return kr;
2767 		}
2768 
2769 		assert(IO_VALID(*objectp));
2770 		assert(*sorightp == IP_NULL);
2771 		assert(*releasep == IP_NULL);
2772 
2773 		/*
2774 		 *	Now copyin the second (previously validated)
2775 		 *	disposition.  The result can't be a dead port,
2776 		 *	as no valid disposition can make us lose our
2777 		 *	receive right.
2778 		 */
2779 		kr = ipc_right_copyin(space, name, entry,
2780 		    msgt_two, flags_two,
2781 		    &object_two, sorightp, releasep,
2782 		    &assertcnt, 0, NULL);
2783 		assert(assertcnt == 0);
2784 		assert(kr == KERN_SUCCESS);
2785 		assert(*sorightp == IP_NULL);
2786 		assert(*releasep == IP_NULL);
2787 		assert(object_two == *objectp);
2788 		assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
2789 	} else if ((msgt_one == MACH_MSG_TYPE_MOVE_SEND) &&
2790 	    (msgt_two == MACH_MSG_TYPE_MOVE_SEND)) {
2791 		/*
2792 		 *	This is an easy case.  Just use our
2793 		 *	handy-dandy special-purpose copyin call
2794 		 *	to get two send rights for the price of one.
2795 		 */
2796 		kr = ipc_right_copyin_two_move_sends(space, name, entry,
2797 		    objectp, sorightp,
2798 		    releasep);
2799 		if (kr != KERN_SUCCESS) {
2800 			return kr;
2801 		}
2802 	} else {
2803 		mach_msg_type_name_t msgt_name;
2804 
2805 		/*
2806 		 *	Must be either a single move-send and a
2807 		 *	copy-send, or two copy-send dispositions.
2808 		 *	Use the disposition with the greatest side
2809 		 *	effects for the actual copyin - then just
2810 		 *	duplicate the send right you get back.
2811 		 */
2812 		if (msgt_one == MACH_MSG_TYPE_MOVE_SEND ||
2813 		    msgt_two == MACH_MSG_TYPE_MOVE_SEND) {
2814 			msgt_name = MACH_MSG_TYPE_MOVE_SEND;
2815 		} else {
2816 			msgt_name = MACH_MSG_TYPE_COPY_SEND;
2817 		}
2818 
2819 		kr = ipc_right_copyin(space, name, entry,
2820 		    msgt_name, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND,
2821 		    objectp, sorightp, releasep,
2822 		    &assertcnt, 0, NULL);
2823 		assert(assertcnt == 0);
2824 		if (kr != KERN_SUCCESS) {
2825 			return kr;
2826 		}
2827 
2828 		/*
2829 		 *	Copy the right we got back.  If it is dead now,
2830 		 *	that's OK.  Neither right will be usable to send
2831 		 *	a message anyway.
2832 		 *
2833 		 *	Note that the port could be concurrently moved
2834 		 *	outside of the space as a descriptor, and then
2835 		 *	destroyed, which would not happen under the space lock.
2836 		 *
2837 		 *	It means we can't use ipc_port_copy_send() which
2838 		 *	may fail if the port died.
2839 		 */
2840 		port = ip_object_to_port(*objectp);
2841 		ip_mq_lock(port);
2842 		ipc_port_copy_send_any_locked(port);
2843 		ip_mq_unlock(port);
2844 	}
2845 
2846 	return KERN_SUCCESS;
2847 }
2848 
2849 
2850 /*
2851  *	Routine:	ipc_right_copyout
2852  *	Purpose:
2853  *		Copyout a capability to a space.
2854  *		If successful, consumes a ref for the object.
2855  *
2856  *		Always succeeds when given a newly-allocated entry,
2857  *		because user-reference overflow isn't a possibility.
2858  *
2859  *		If copying out the object would cause the user-reference
2860  *		count in the entry to overflow, then the user-reference
2861  *		count is left pegged to its maximum value and the copyout
2862  *		succeeds anyway.
2863  *	Conditions:
2864  *		The space is write-locked and active.
2865  *		The object is locked and active.
2866  *		The object is unlocked; the space isn't.
2867  *	Returns:
2868  *		KERN_SUCCESS		Copied out capability.
2869  */
2870 
2871 kern_return_t
ipc_right_copyout(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_port_context_t * context,mach_msg_guard_flags_t * guard_flags,ipc_object_t object)2872 ipc_right_copyout(
2873 	ipc_space_t             space,
2874 	mach_port_name_t        name,
2875 	ipc_entry_t             entry,
2876 	mach_msg_type_name_t    msgt_name,
2877 	ipc_object_copyout_flags_t flags,
2878 	mach_port_context_t     *context,
2879 	mach_msg_guard_flags_t  *guard_flags,
2880 	ipc_object_t            object)
2881 {
2882 	ipc_entry_bits_t bits;
2883 	ipc_port_t port;
2884 	mach_port_name_t sp_name = MACH_PORT_NULL;
2885 	mach_port_context_t sp_context = 0;
2886 
2887 	bits = entry->ie_bits;
2888 
2889 	assert(IO_VALID(object));
2890 	assert(io_otype(object) == IOT_PORT);
2891 	assert(io_active(object));
2892 	assert(entry->ie_object == object);
2893 
2894 	port = ip_object_to_port(object);
2895 
2896 	if (flags & IPC_OBJECT_COPYOUT_FLAGS_PINNED) {
2897 		assert(!ip_is_pinned(port));
2898 		assert(ip_is_immovable_send(port));
2899 		assert(task_is_immovable(space->is_task));
2900 		assert(task_is_pinned(space->is_task));
2901 		port->ip_pinned = 1;
2902 	}
2903 
2904 	switch (msgt_name) {
2905 	case MACH_MSG_TYPE_PORT_SEND_ONCE:
2906 
2907 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2908 		assert(IE_BITS_UREFS(bits) == 0);
2909 		assert(port->ip_sorights > 0);
2910 
2911 		if (port->ip_specialreply) {
2912 			ipc_port_adjust_special_reply_port_locked(port,
2913 			    current_thread()->ith_knote, IPC_PORT_ADJUST_SR_LINK_WORKLOOP, FALSE);
2914 			/* port unlocked on return */
2915 		} else {
2916 			ip_mq_unlock(port);
2917 		}
2918 
2919 		entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1); /* set urefs to 1 */
2920 		ipc_entry_modified(space, name, entry);
2921 		break;
2922 
2923 	case MACH_MSG_TYPE_PORT_SEND:
2924 		assert(port->ip_srights > 0);
2925 
2926 		if (bits & MACH_PORT_TYPE_SEND) {
2927 			mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
2928 
2929 			assert(port->ip_srights > 1);
2930 			assert(urefs > 0);
2931 			assert(urefs <= MACH_PORT_UREFS_MAX);
2932 
2933 			if (urefs == MACH_PORT_UREFS_MAX) {
2934 				/*
2935 				 * leave urefs pegged to maximum,
2936 				 * consume send right and ref
2937 				 */
2938 
2939 				ip_srights_dec(port);
2940 				ip_mq_unlock(port);
2941 				ip_release_live(port);
2942 				return KERN_SUCCESS;
2943 			}
2944 
2945 			/* consume send right and ref */
2946 			ip_srights_dec(port);
2947 			ip_mq_unlock(port);
2948 			ip_release_live(port);
2949 		} else if (bits & MACH_PORT_TYPE_RECEIVE) {
2950 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2951 			assert(IE_BITS_UREFS(bits) == 0);
2952 
2953 			/* transfer send right to entry, consume ref */
2954 			ip_mq_unlock(port);
2955 			ip_release_live(port);
2956 		} else {
2957 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2958 			assert(IE_BITS_UREFS(bits) == 0);
2959 
2960 			/* transfer send right and ref to entry */
2961 			ip_mq_unlock(port);
2962 
2963 			/* entry is locked holding ref, so can use port */
2964 
2965 			ipc_hash_insert(space, ip_to_object(port), name, entry);
2966 		}
2967 
2968 		entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1; /* increment urefs */
2969 		ipc_entry_modified(space, name, entry);
2970 		break;
2971 
2972 	case MACH_MSG_TYPE_PORT_RECEIVE: {
2973 		ipc_port_t dest;
2974 #if IMPORTANCE_INHERITANCE
2975 		natural_t assertcnt = port->ip_impcount;
2976 #endif /* IMPORTANCE_INHERITANCE */
2977 
2978 		assert(port->ip_mscount == 0);
2979 		assert(!ip_in_a_space(port));
2980 
2981 		/*
2982 		 * Don't copyout kobjects or kolabels as receive right
2983 		 */
2984 		if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
2985 			panic("ipc_right_copyout: Copyout kobject/kolabel as receive right");
2986 		}
2987 
2988 		dest = ip_get_destination(port);
2989 
2990 		/* port transitions to IN-SPACE state */
2991 		port->ip_receiver_name = name;
2992 		port->ip_receiver = space;
2993 
2994 		struct knote *kn = current_thread()->ith_knote;
2995 
2996 		if ((guard_flags != NULL) && ((*guard_flags & MACH_MSG_GUARD_FLAGS_IMMOVABLE_RECEIVE) != 0)) {
2997 			assert(port->ip_immovable_receive == 0);
2998 			port->ip_guarded = 1;
2999 			port->ip_strict_guard = 0;
3000 			/* pseudo receive shouldn't set the receive right as immovable in the sender's space */
3001 			if (kn != ITH_KNOTE_PSEUDO) {
3002 				port->ip_immovable_receive = 1;
3003 			}
3004 			port->ip_context = current_thread()->ith_recv_bufs.recv_msg_addr;
3005 			*context = port->ip_context;
3006 			*guard_flags = *guard_flags & ~MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
3007 		}
3008 
3009 		if (ip_is_libxpc_connection_port(port)) {
3010 			/*
3011 			 * There are 3 ways to reach here.
3012 			 * 1. A libxpc client successfully sent this receive right to a named service
3013 			 *    and we are copying out in that service's ipc space.
3014 			 * 2. A libxpc client tried doing (1) but failed so we are doing pseudo-receive.
3015 			 * 3. Kernel sent this receive right to a libxpc client as a part of port destroyed notification.
3016 			 *
3017 			 * This flag needs to be set again in all 3 cases as they reset it as part of their flow.
3018 			 */
3019 			port->ip_immovable_receive = 1;
3020 		}
3021 
3022 		/* Check if this is a service port */
3023 		if (port->ip_service_port) {
3024 			assert(port->ip_splabel != NULL);
3025 			/*
3026 			 * This flag gets reset during all 3 ways described above for libxpc connection port.
3027 			 * The only difference is launchd acts as an initiator instead of a libxpc client.
3028 			 */
3029 			if (service_port_defense_enabled) {
3030 				port->ip_immovable_receive = 1;
3031 			}
3032 
3033 			/* Check if this is a port-destroyed notification to ensure
3034 			 * that initproc doesnt end up with a guarded service port
3035 			 * sent in a regular message
3036 			 */
3037 			if (!ipc_service_port_label_is_pd_notification((ipc_service_port_label_t)port->ip_splabel)) {
3038 				goto skip_sp_check;
3039 			}
3040 
3041 			ipc_service_port_label_clear_flag(port->ip_splabel, ISPL_FLAGS_SEND_PD_NOTIFICATION);
3042 #if !(DEVELOPMENT || DEBUG)
3043 			if (get_bsdtask_info(current_task()) != initproc) {
3044 				goto skip_sp_check;
3045 			}
3046 #endif /* !(DEVELOPMENT || DEBUG) */
3047 			ipc_service_port_label_get_attr(port->ip_splabel, &sp_name, &sp_context);
3048 			assert(sp_name != MACH_PORT_NULL);
3049 			/* Verify the port name and restore the guard value, if any */
3050 			if (name != sp_name) {
3051 				panic("Service port name = 0x%x doesnt match the stored launchd port name = 0x%x", name, sp_name);
3052 			}
3053 			if (sp_context) {
3054 				port->ip_guarded = 1;
3055 				port->ip_strict_guard = 1;
3056 				port->ip_context = sp_context;
3057 			}
3058 		}
3059 skip_sp_check:
3060 
3061 		assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
3062 		if (bits & MACH_PORT_TYPE_SEND) {
3063 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
3064 			assert(IE_BITS_UREFS(bits) > 0);
3065 			assert(port->ip_srights > 0);
3066 		} else {
3067 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
3068 			assert(IE_BITS_UREFS(bits) == 0);
3069 		}
3070 		entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE;
3071 		ipc_entry_modified(space, name, entry);
3072 
3073 		boolean_t sync_bootstrap_checkin = FALSE;
3074 		if (kn != ITH_KNOTE_PSEUDO && port->ip_sync_bootstrap_checkin) {
3075 			sync_bootstrap_checkin = TRUE;
3076 		}
3077 		if (!ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_RECEIVE)) {
3078 			kn = NULL;
3079 		}
3080 		ipc_port_adjust_port_locked(port, kn, sync_bootstrap_checkin);
3081 		/* port unlocked */
3082 
3083 		if (bits & MACH_PORT_TYPE_SEND) {
3084 			ip_release_live(port);
3085 
3086 			/* entry is locked holding ref, so can use port */
3087 			ipc_hash_delete(space, ip_to_object(port), name, entry);
3088 		}
3089 
3090 		if (dest != IP_NULL) {
3091 #if IMPORTANCE_INHERITANCE
3092 			/*
3093 			 * Deduct the assertion counts we contributed to
3094 			 * the old destination port.  They've already
3095 			 * been reflected into the task as a result of
3096 			 * getting enqueued.
3097 			 */
3098 			ip_mq_lock(dest);
3099 			ipc_port_impcount_delta(dest, 0 - assertcnt, IP_NULL);
3100 			ip_mq_unlock(dest);
3101 #endif /* IMPORTANCE_INHERITANCE */
3102 
3103 			/* Drop turnstile ref on dest */
3104 			ipc_port_send_turnstile_complete(dest);
3105 			/* space lock is held */
3106 			ip_release_safe(dest);
3107 		}
3108 		break;
3109 	}
3110 
3111 	default:
3112 		panic("ipc_right_copyout: strange rights");
3113 	}
3114 	return KERN_SUCCESS;
3115 }
3116