xref: /xnu-12377.81.4/osfmk/ipc/ipc_right.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  * Copyright (c) 2005-2006 SPARTA, Inc.
62  */
63 /*
64  */
65 /*
66  *	File:	ipc/ipc_right.c
67  *	Author:	Rich Draves
68  *	Date:	1989
69  *
70  *	Functions to manipulate IPC capabilities.
71  */
72 
73 #include <mach/boolean.h>
74 #include <mach/kern_return.h>
75 #include <mach/port.h>
76 #include <mach/message.h>
77 #include <kern/assert.h>
78 #include <kern/ipc_kobject.h>
79 #include <kern/misc_protos.h>
80 #include <kern/coalition.h>
81 #include <kern/policy_internal.h>
82 
83 #include <ipc/ipc_policy.h>
84 #include <ipc/ipc_entry.h>
85 #include <ipc/ipc_space.h>
86 #include <ipc/ipc_object.h>
87 #include <ipc/ipc_hash.h>
88 #include <ipc/ipc_policy.h>
89 #include <ipc/ipc_port.h>
90 #include <ipc/ipc_pset.h>
91 #include <ipc/ipc_right.h>
92 #include <ipc/ipc_notify.h>
93 #include <ipc/ipc_importance.h>
94 #include <ipc/ipc_service_port.h>
95 
96 /*
97  *	Routine:	ipc_right_lookup_read
98  *	Purpose:
99  *		Finds an entry in a space, given the name.
100  *	Conditions:
101  *		Nothing locked.
102  *		If an object is found, it is locked and active.
103  *	Returns:
104  *		KERN_SUCCESS		Found an entry.
105  *		KERN_INVALID_TASK	The space is dead.
106  *		KERN_INVALID_NAME	Name doesn't exist in space.
107  */
108 kern_return_t
ipc_right_lookup_read(ipc_space_t space,mach_port_name_t name,ipc_entry_bits_t * bitsp,ipc_object_t * objectp)109 ipc_right_lookup_read(
110 	ipc_space_t             space,
111 	mach_port_name_t        name,
112 	ipc_entry_bits_t       *bitsp,
113 	ipc_object_t           *objectp)
114 {
115 	mach_port_index_t index;
116 	ipc_entry_table_t table;
117 	ipc_entry_t entry;
118 	ipc_object_t object;
119 	kern_return_t kr;
120 
121 	index = MACH_PORT_INDEX(name);
122 	if (__improbable(index == 0)) {
123 		*bitsp = 0;
124 		*objectp = IPC_OBJECT_NULL;
125 		return KERN_INVALID_NAME;
126 	}
127 
128 	smr_ipc_enter();
129 
130 	/*
131 	 * Acquire a (possibly stale) pointer to the table,
132 	 * and guard it so that it can't be deallocated while we use it.
133 	 *
134 	 * smr_ipc_enter() has the property that it strongly serializes
135 	 * after any store-release. This is important because it means that if
136 	 * one considers this (broken) userspace usage:
137 	 *
138 	 * Thread 1:
139 	 *   - makes a semaphore, gets name 0x1003
140 	 *   - stores that name to a global `sema` in userspace
141 	 *
142 	 * Thread 2:
143 	 *   - spins to observe `sema` becoming non 0
144 	 *   - calls semaphore_wait() on 0x1003
145 	 *
146 	 * Then, because in order to return 0x1003 this thread issued
147 	 * a store-release (when calling is_write_unlock()),
148 	 * then this smr_entered_load() can't possibly observe a table
149 	 * pointer that is older than the one that was current when the
150 	 * semaphore was made.
151 	 *
152 	 * This fundamental property allows us to never loop.
153 	 */
154 	table = smr_entered_load(&space->is_table);
155 	if (__improbable(table == NULL)) {
156 		kr = KERN_INVALID_TASK;
157 		goto out_put;
158 	}
159 	entry = ipc_entry_table_get(table, index);
160 	if (__improbable(entry == NULL)) {
161 		kr = KERN_INVALID_NAME;
162 		goto out_put;
163 	}
164 
165 	/*
166 	 * Note: this should be an atomic load, but PAC and atomics
167 	 *       don't work interact well together.
168 	 */
169 	object = entry->ie_volatile_object;
170 
171 	/*
172 	 * Attempt to lock an object that lives in this entry.
173 	 * It might fail or be a completely different object by now.
174 	 *
175 	 * Make sure that acquiring the lock is fully ordered after any
176 	 * lock-release (using os_atomic_barrier_before_lock_acquire()).
177 	 * This allows us to always reliably observe space termination below.
178 	 */
179 	os_atomic_barrier_before_lock_acquire();
180 	if (__improbable(object == IPC_OBJECT_NULL ||
181 	    !io_lock_allow_invalid(object))) {
182 		kr = KERN_INVALID_NAME;
183 		goto out_put;
184 	}
185 
186 	/*
187 	 * Now that we hold the object lock, we are preventing any entry
188 	 * in this space for this object to be mutated.
189 	 *
190 	 * If the space didn't grow after we acquired our hazardous reference,
191 	 * and before a mutation of the entry, then holding the object lock
192 	 * guarantees we will observe the truth of ie_bits, ie_object and
193 	 * ie_request (those are always mutated with the object lock held).
194 	 *
195 	 * However this ordering is problematic:
196 	 * - [A]cquisition of the table pointer
197 	 * - [G]rowth of the space (making the table pointer in [A] stale)
198 	 * - [M]utation of the entry
199 	 * - [L]ocking of the object read through [A].
200 	 *
201 	 * The space lock is held for both [G] and [M], and the object lock
202 	 * is held for [M], which means that once we lock the object we can
203 	 * observe if [G] happenend by reloading the table pointer.
204 	 *
205 	 * We might still fail to observe any growth operation that happened
206 	 * after the last mutation of this object's entry, because holding
207 	 * an object lock doesn't guarantee anything about the liveness
208 	 * of the space table pointer. This is not a problem at all: by
209 	 * definition, those didn't affect the state of the entry.
210 	 *
211 	 * TODO: a data-structure where the entries are grown by "slabs",
212 	 *       would allow for the address of an ipc_entry_t to never
213 	 *       change once it exists in a space and would avoid a reload
214 	 *       (as well as making space growth faster).
215 	 *       We however still need to check for termination.
216 	 */
217 	table = smr_entered_load(&space->is_table);
218 	if (__improbable(table == NULL)) {
219 		kr = KERN_INVALID_TASK;
220 		goto out_put_unlock;
221 	}
222 
223 	/*
224 	 * Tables never shrink so we don't need to validate the length twice.
225 	 */
226 	entry = ipc_entry_table_get_nocheck(table, index);
227 
228 	/*
229 	 * Now that we hold the lock and have a "fresh enough" table pointer,
230 	 * validate if this entry is what we think it is.
231 	 *
232 	 * To the risk of being repetitive, we still need to protect
233 	 * those accesses under SMR, because subsequent
234 	 * table growths might retire the memory. However we know
235 	 * those growths will have left our entry unchanged.
236 	 */
237 	if (__improbable(entry->ie_object != object)) {
238 		kr = KERN_INVALID_NAME;
239 		goto out_put_unlock;
240 	}
241 
242 	ipc_entry_bits_t bits = entry->ie_bits;
243 	if (__improbable(IE_BITS_GEN(bits) != MACH_PORT_GEN(name) ||
244 	    IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE)) {
245 		kr = KERN_INVALID_NAME;
246 		goto out_put_unlock;
247 	}
248 
249 	/* Done with hazardous accesses to the table */
250 	smr_ipc_leave();
251 
252 	*bitsp = bits;
253 	*objectp = object;
254 	return KERN_SUCCESS;
255 
256 out_put_unlock:
257 	io_unlock(object);
258 out_put:
259 	smr_ipc_leave();
260 	return kr;
261 }
262 
263 /*
264  *	Routine:	ipc_right_lookup_write
265  *	Purpose:
266  *		Finds an entry in a space, given the name.
267  *	Conditions:
268  *		Nothing locked.  If successful, the space is write-locked.
269  *	Returns:
270  *		KERN_SUCCESS		Found an entry.
271  *		KERN_INVALID_TASK	The space is dead.
272  *		KERN_INVALID_NAME	Name doesn't exist in space.
273  */
274 
275 kern_return_t
ipc_right_lookup_write(ipc_space_t space,mach_port_name_t name,ipc_entry_t * entryp)276 ipc_right_lookup_write(
277 	ipc_space_t             space,
278 	mach_port_name_t        name,
279 	ipc_entry_t             *entryp)
280 {
281 	ipc_entry_t entry;
282 
283 	assert(space != IS_NULL);
284 
285 	is_write_lock(space);
286 
287 	if (!is_active(space)) {
288 		is_write_unlock(space);
289 		return KERN_INVALID_TASK;
290 	}
291 
292 	if ((entry = ipc_entry_lookup(space, name)) == IE_NULL) {
293 		is_write_unlock(space);
294 		return KERN_INVALID_NAME;
295 	}
296 
297 	*entryp = entry;
298 	return KERN_SUCCESS;
299 }
300 
301 /*
302  *	Routine:	ipc_right_lookup_two_write
303  *	Purpose:
304  *		Like ipc_right_lookup except that it returns two
305  *		entries for two different names that were looked
306  *		up under the same space lock.
307  *	Conditions:
308  *		Nothing locked.  If successful, the space is write-locked.
309  *	Returns:
310  *		KERN_INVALID_TASK	The space is dead.
311  *		KERN_INVALID_NAME	Name doesn't exist in space.
312  */
313 
314 kern_return_t
ipc_right_lookup_two_write(ipc_space_t space,mach_port_name_t name1,ipc_entry_t * entryp1,mach_port_name_t name2,ipc_entry_t * entryp2)315 ipc_right_lookup_two_write(
316 	ipc_space_t             space,
317 	mach_port_name_t        name1,
318 	ipc_entry_t             *entryp1,
319 	mach_port_name_t        name2,
320 	ipc_entry_t             *entryp2)
321 {
322 	ipc_entry_t entry1;
323 	ipc_entry_t entry2;
324 
325 	assert(space != IS_NULL);
326 
327 	is_write_lock(space);
328 
329 	if (!is_active(space)) {
330 		is_write_unlock(space);
331 		return KERN_INVALID_TASK;
332 	}
333 
334 	if ((entry1 = ipc_entry_lookup(space, name1)) == IE_NULL) {
335 		is_write_unlock(space);
336 		mach_port_guard_exception(name1, 0, kGUARD_EXC_INVALID_NAME);
337 		return KERN_INVALID_NAME;
338 	}
339 	if ((entry2 = ipc_entry_lookup(space, name2)) == IE_NULL) {
340 		is_write_unlock(space);
341 		mach_port_guard_exception(name2, 0, kGUARD_EXC_INVALID_NAME);
342 		return KERN_INVALID_NAME;
343 	}
344 	*entryp1 = entry1;
345 	*entryp2 = entry2;
346 	return KERN_SUCCESS;
347 }
348 
349 /*
350  *	Routine:	ipc_right_reverse
351  *	Purpose:
352  *		Translate (space, port) -> (name, entry).
353  *		Only finds send/receive rights.
354  *		Returns TRUE if an entry is found; if so,
355  *		the port active.
356  *	Conditions:
357  *		The space must be locked (read or write) and active.
358  *		The port is locked and active
359  */
360 
361 bool
ipc_right_reverse(ipc_space_t space,ipc_port_t port,mach_port_name_t * namep,ipc_entry_t * entryp)362 ipc_right_reverse(
363 	ipc_space_t             space,
364 	ipc_port_t              port,
365 	mach_port_name_t       *namep,
366 	ipc_entry_t            *entryp)
367 {
368 	mach_port_name_t name;
369 	ipc_entry_t entry;
370 
371 	assert(is_active(space));
372 
373 	require_ip_active(port);
374 
375 	ip_mq_lock_held(port);
376 
377 	if (ip_in_space(port, space)) {
378 		name = ip_get_receiver_name(port);
379 		assert(name != MACH_PORT_NULL);
380 
381 		entry = ipc_entry_lookup(space, name);
382 
383 		assert(entry != IE_NULL);
384 		assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
385 		assert(port == entry->ie_port);
386 
387 		*namep = name;
388 		*entryp = entry;
389 		return true;
390 	}
391 
392 	if (ipc_hash_lookup(space, ip_to_object(port), namep, entryp)) {
393 		entry = *entryp;
394 		assert(entry != IE_NULL);
395 		assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
396 		assert(port == entry->ie_port);
397 
398 		return true;
399 	}
400 
401 	return false;
402 }
403 
404 /*
405  *	Routine:	ipc_right_request_cancel
406  *	Purpose:
407  *		Cancel a notification request and return the send-once right.
408  *		Afterwards, entry->ie_request == 0.
409  *	Conditions:
410  *		The space must be write-locked; the port must be locked.
411  *		The port must be active.
412  */
413 
414 static inline ipc_port_t
ipc_right_request_cancel(ipc_port_t port,mach_port_name_t name,ipc_entry_t entry)415 ipc_right_request_cancel(
416 	ipc_port_t              port,
417 	mach_port_name_t        name,
418 	ipc_entry_t             entry)
419 {
420 	ipc_port_request_index_t request = entry->ie_request;
421 
422 	if (request != IE_REQ_NONE) {
423 		entry->ie_request = IE_REQ_NONE;
424 		return ipc_port_request_cancel(port, name, request);
425 	}
426 	return IP_NULL;
427 }
428 
429 /*
430  *	Routine:	ipc_right_dnrequest
431  *	Purpose:
432  *		Make a dead-name request, returning the previously
433  *		registered send-once right.  If notify is IP_NULL,
434  *		just cancels the previously registered request.
435  *
436  *	Conditions:
437  *		Nothing locked.  May allocate memory.
438  *		Only consumes/returns refs if successful.
439  *	Returns:
440  *		KERN_SUCCESS		Made/canceled dead-name request.
441  *		KERN_INVALID_TASK	The space is dead.
442  *		KERN_INVALID_NAME	Name doesn't exist in space.
443  *		KERN_INVALID_RIGHT	Name doesn't denote port/dead rights.
444  *		KERN_INVALID_ARGUMENT	Name denotes dead name, but
445  *			immediate is FALSE or notify is IP_NULL.
446  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
447  */
448 
449 kern_return_t
ipc_right_request_alloc(ipc_space_t space,mach_port_name_t name,ipc_port_request_opts_t options,ipc_port_t notify,mach_msg_id_t id,ipc_port_t * previousp)450 ipc_right_request_alloc(
451 	ipc_space_t             space,
452 	mach_port_name_t        name,
453 	ipc_port_request_opts_t options,
454 	ipc_port_t              notify,
455 	mach_msg_id_t           id,
456 	ipc_port_t             *previousp)
457 {
458 	ipc_port_t previous = IP_NULL;
459 	ipc_entry_t entry;
460 	kern_return_t kr;
461 #if IMPORTANCE_INHERITANCE
462 	bool will_arm = false;
463 #endif /* IMPORTANCE_INHERITANCE */
464 
465 	for (;;) {
466 		ipc_port_t port = IP_NULL;
467 
468 		kr = ipc_right_lookup_write(space, name, &entry);
469 		if (kr != KERN_SUCCESS) {
470 			return kr;
471 		}
472 
473 		/* space is write-locked and active */
474 
475 		/* if nothing to do or undo, we're done */
476 		if (notify == IP_NULL && entry->ie_request == IE_REQ_NONE) {
477 			is_write_unlock(space);
478 			*previousp = IP_NULL;
479 			return KERN_SUCCESS;
480 		}
481 
482 		/* see if the entry is of proper type for requests */
483 		if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) {
484 			ipc_port_request_index_t new_request;
485 
486 			port = entry->ie_port;
487 			assert(port != IP_NULL);
488 
489 			if (!ipc_right_check(space, port, name, entry, IPC_COPYIN_REASON_NONE)) {
490 				/* port is locked and active */
491 
492 				/*
493 				 * if this port doesn't allow send_possible /
494 				 * deadname notifications, fail We only need to
495 				 * protect send_once rights since they do not
496 				 * coalesce and allow for repeated notification
497 				 * requests/allocations
498 				 */
499 				if ((entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE) &&
500 				    !ipc_policy(port)->pol_notif_dead_name) {
501 					ip_mq_unlock(port);
502 					is_write_unlock(space);
503 					*previousp = IP_NULL;
504 					mach_port_guard_exception(ip_type(port), id, kGUARD_EXC_INVALID_NOTIFICATION_REQ);
505 					return KERN_DENIED;
506 				}
507 
508 				/*
509 				 * No matter what, we need to cancel any
510 				 * previous request.
511 				 */
512 				previous = ipc_right_request_cancel(port, name, entry);
513 
514 				/* if no new request, just cancel previous */
515 				if (notify == IP_NULL) {
516 					ip_mq_unlock(port);
517 					ipc_entry_modified(space, name, entry);
518 					is_write_unlock(space);
519 					break;
520 				}
521 
522 				/*
523 				 * send-once rights, kernel objects, and non-full other queues
524 				 * fire immediately (if immediate specified).
525 				 */
526 				if (options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK) &&
527 				    ((entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE) ||
528 				    ip_in_space(port, ipc_space_kernel) ||
529 				    !ip_full(port))) {
530 					ip_mq_unlock(port);
531 					ipc_entry_modified(space, name, entry);
532 					is_write_unlock(space);
533 
534 					ipc_notify_send_possible(notify, name);
535 					break;
536 				}
537 
538 				/*
539 				 * If there was a previous request, freeing it
540 				 * above guarantees that the subsequent
541 				 * allocation will find a slot and succeed,
542 				 * thus assuring an atomic swap.
543 				 */
544 #if IMPORTANCE_INHERITANCE
545 				will_arm = port->ip_sprequests == 0 &&
546 				    options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK);
547 #endif /* IMPORTANCE_INHERITANCE */
548 				kr = ipc_port_request_alloc(port, name, notify,
549 				    options, &new_request);
550 
551 				if (kr != KERN_SUCCESS) {
552 					assert(previous == IP_NULL);
553 					is_write_unlock(space);
554 
555 					kr = ipc_port_request_grow(port);
556 					/* port is unlocked */
557 
558 					if (kr != KERN_SUCCESS) {
559 						return kr;
560 					}
561 
562 					continue;
563 				}
564 
565 				assert(new_request != IE_REQ_NONE);
566 				entry->ie_request = new_request;
567 				ipc_entry_modified(space, name, entry);
568 				is_write_unlock(space);
569 
570 #if IMPORTANCE_INHERITANCE
571 				if (will_arm &&
572 				    port->ip_impdonation != 0 &&
573 				    port->ip_spimportant == 0 &&
574 				    task_is_importance_donor(current_task())) {
575 					if (ipc_port_importance_delta(port, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) {
576 						ip_mq_unlock(port);
577 					}
578 				} else
579 #endif /* IMPORTANCE_INHERITANCE */
580 				ip_mq_unlock(port);
581 
582 				break;
583 			}
584 			/* entry may have changed to dead-name by ipc_right_check() */
585 		}
586 
587 		/* treat send_possible requests as immediate w.r.t. dead-name */
588 		if (options && notify != IP_NULL &&
589 		    (entry->ie_bits & MACH_PORT_TYPE_DEAD_NAME)) {
590 			mach_port_urefs_t urefs = IE_BITS_UREFS(entry->ie_bits);
591 
592 			assert(urefs > 0);
593 
594 			/* leave urefs pegged to maximum if it overflowed */
595 			if (urefs < MACH_PORT_UREFS_MAX) {
596 				(entry->ie_bits)++; /* increment urefs */
597 			}
598 			ipc_entry_modified(space, name, entry);
599 
600 			is_write_unlock(space);
601 
602 			if (port != IP_NULL) {
603 				ip_release(port);
604 			}
605 
606 			ipc_notify_dead_name(notify, name);
607 			previous = IP_NULL;
608 			break;
609 		}
610 
611 		kr = (entry->ie_bits & MACH_PORT_TYPE_PORT_OR_DEAD) ?
612 		    KERN_INVALID_ARGUMENT : KERN_INVALID_RIGHT;
613 
614 		is_write_unlock(space);
615 
616 		if (port != IP_NULL) {
617 			ip_release(port);
618 		}
619 
620 		return kr;
621 	}
622 
623 	*previousp = previous;
624 	return KERN_SUCCESS;
625 }
626 
627 /*
628  *	Routine:	ipc_right_inuse
629  *	Purpose:
630  *		Check if an entry is being used.
631  *		Returns TRUE if it is.
632  *	Conditions:
633  *		The space is write-locked and active.
634  */
635 
636 bool
ipc_right_inuse(ipc_entry_t entry)637 ipc_right_inuse(
638 	ipc_entry_t entry)
639 {
640 	return IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_NONE;
641 }
642 
643 /*
644  *	Routine:	ipc_right_check
645  *	Purpose:
646  *      Check if the port has died.  If it has,
647  *      and the location is not IPC_COPYIN_KMSG_DESTINATION
648  *      and it is not a send once right then
649  *	    clean up the entry and return TRUE.
650  *	Conditions:
651  *		The space is write-locked; the port is not locked.
652  *		If returns FALSE, the port is also locked.
653  *		Otherwise, entry is converted to a dead name.
654  *
655  *		Caller is responsible for a reference to port if it
656  *		had died (returns TRUE).
657  */
658 
659 bool
ipc_right_check(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry,ipc_copyin_op_t copyin_reason)660 ipc_right_check(
661 	ipc_space_t              space,
662 	ipc_port_t               port,
663 	mach_port_name_t         name,
664 	ipc_entry_t              entry,
665 	ipc_copyin_op_t          copyin_reason)
666 {
667 	ipc_entry_bits_t bits;
668 
669 	assert(is_active(space));
670 	assert(port == entry->ie_port);
671 
672 	ip_mq_lock(port);
673 	if (ip_active(port) ||
674 	    ((copyin_reason == IPC_COPYIN_KMSG_DESTINATION) &&
675 	    entry->ie_request == IE_REQ_NONE &&
676 	    (entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE))) {
677 		return false;
678 	}
679 
680 	/* this was either a pure send right or a send-once right */
681 
682 	bits = entry->ie_bits;
683 	assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
684 	assert(IE_BITS_UREFS(bits) > 0);
685 
686 	if (bits & MACH_PORT_TYPE_SEND) {
687 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
688 		assert(IE_BITS_UREFS(bits) > 0);
689 		ip_srights_dec(port);
690 	} else {
691 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
692 		assert(IE_BITS_UREFS(bits) == 1);
693 		ip_sorights_dec(port);
694 	}
695 
696 	/*
697 	 * delete SEND rights from ipc hash.
698 	 */
699 
700 	if ((bits & MACH_PORT_TYPE_SEND) != 0) {
701 		ipc_hash_delete(space, ip_to_object(port), name, entry);
702 	}
703 
704 	/* convert entry to dead name */
705 	bits &= ~(IE_BITS_TYPE_MASK | IE_BITS_PINNED_SEND);
706 	bits |= MACH_PORT_TYPE_DEAD_NAME;
707 
708 	/*
709 	 * If there was a notification request outstanding on this
710 	 * name, and the port went dead, that notification
711 	 * must already be on its way up from the port layer.
712 	 *
713 	 * Add the reference that the notification carries. It
714 	 * is done here, and not in the notification delivery,
715 	 * because the latter doesn't have a space reference and
716 	 * trying to actually move a send-right reference would
717 	 * get short-circuited into a MACH_PORT_DEAD by IPC. Since
718 	 * all calls that deal with the right eventually come
719 	 * through here, it has the same result.
720 	 *
721 	 * Once done, clear the request index so we only account
722 	 * for it once.
723 	 */
724 	if (entry->ie_request != IE_REQ_NONE) {
725 		if (ipc_port_request_type(port, name, entry->ie_request) != 0) {
726 			/* if urefs are pegged due to overflow, leave them pegged */
727 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
728 				bits++; /* increment urefs */
729 			}
730 		}
731 		entry->ie_request = IE_REQ_NONE;
732 	}
733 	entry->ie_bits = bits;
734 	entry->ie_object = IPC_OBJECT_NULL;
735 
736 	ip_mq_unlock(port);
737 
738 	ipc_entry_modified(space, name, entry);
739 
740 	return true;
741 }
742 
743 /*
744  *	Routine:	ipc_right_terminate
745  *	Purpose:
746  *		Cleans up an entry in a terminated space.
747  *		The entry isn't deallocated or removed
748  *		from reverse hash tables.
749  *	Conditions:
750  *		The space is dead and unlocked.
751  */
752 
753 void
ipc_right_terminate(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)754 ipc_right_terminate(
755 	ipc_space_t             space,
756 	mach_port_name_t        name,
757 	ipc_entry_t             entry)
758 {
759 	mach_port_type_t type = IE_BITS_TYPE(entry->ie_bits);
760 
761 	assert(!is_active(space));
762 
763 	/*
764 	 * Hollow the entry under the port lock,
765 	 * in order to avoid dangling pointers.
766 	 *
767 	 * ipc_right_lookup_read() doesn't need it for correctness,
768 	 * but ipc_space_terminate() as it now goes through 2 rounds
769 	 * of termination (receive rights first, the rest second).
770 	 */
771 
772 	switch (type) {
773 	case MACH_PORT_TYPE_DEAD_NAME:
774 		assert(entry->ie_request == IE_REQ_NONE);
775 		assert(entry->ie_object == IPC_OBJECT_NULL);
776 		break;
777 
778 	case MACH_PORT_TYPE_PORT_SET: {
779 		ipc_pset_t pset = entry->ie_pset;
780 
781 		assert(entry->ie_request == IE_REQ_NONE);
782 		ips_mq_lock(pset);
783 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
784 		break;
785 	}
786 
787 	case MACH_PORT_TYPE_SEND:
788 	case MACH_PORT_TYPE_RECEIVE:
789 	case MACH_PORT_TYPE_SEND_RECEIVE:
790 	case MACH_PORT_TYPE_SEND_ONCE: {
791 		ipc_port_t port = entry->ie_port;
792 		ipc_port_t request = IP_NULL;
793 		ipc_notify_nsenders_t nsrequest = { };
794 
795 		ip_mq_lock(port);
796 
797 		if (ip_active(port)) {
798 			request = ipc_right_request_cancel(port, name, entry);
799 		}
800 
801 		if (type & MACH_PORT_TYPE_SEND) {
802 			if (ip_srights_dec(port) == 0) {
803 				nsrequest = ipc_notify_no_senders_prepare(port);
804 			}
805 		}
806 
807 		if (type & MACH_PORT_TYPE_RECEIVE) {
808 			assert(ip_get_receiver_name(port) == name);
809 			assert(ip_in_space(port, space));
810 
811 			ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
812 		} else if (type & MACH_PORT_TYPE_SEND_ONCE) {
813 			ipc_notify_send_once_and_unlock(port); /* consumes our ref */
814 		} else {
815 			/* port could be dead, in-transit, or in a foreign space */
816 			assert(!ip_in_space(port, space));
817 
818 			ip_mq_unlock(port);
819 			ip_release(port);
820 		}
821 
822 		ipc_notify_no_senders_emit(nsrequest);
823 
824 		if (request != IP_NULL) {
825 			ipc_notify_port_deleted(request, name);
826 		}
827 		break;
828 	}
829 
830 	default:
831 		ipc_unreachable("ipc_right_terminate: strange type");
832 	}
833 
834 	entry->ie_object = IPC_OBJECT_NULL;
835 	entry->ie_bits  &= (IE_BITS_GEN_MASK | IE_BITS_ROLL_MASK);
836 }
837 
838 /*
839  *	Routine:	ipc_right_destroy
840  *	Purpose:
841  *		Destroys an entry in a space.
842  *	Conditions:
843  *		The space is write-locked (returns unlocked).
844  *		The space must be active.
845  *	Returns:
846  *		KERN_SUCCESS		  The entry was destroyed.
847  *		KERN_INVALID_CAPABILITY   The port is pinned.
848  *		KERN_INVALID_RIGHT        Port guard violation.
849  */
850 
851 kern_return_t
ipc_right_destroy(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)852 ipc_right_destroy(
853 	ipc_space_t             space,
854 	mach_port_name_t        name,
855 	ipc_entry_t             entry)
856 {
857 	ipc_entry_bits_t bits;
858 	mach_port_type_t type;
859 
860 	bits = entry->ie_bits;
861 	type = IE_BITS_TYPE(bits);
862 
863 	assert(is_active(space));
864 
865 	switch (type) {
866 	case MACH_PORT_TYPE_DEAD_NAME:
867 		assert(entry->ie_request == IE_REQ_NONE);
868 		assert(entry->ie_object == IPC_OBJECT_NULL);
869 
870 		ipc_entry_dealloc(space, IPC_OBJECT_NULL, name, entry);
871 		is_write_unlock(space);
872 		break;
873 
874 	case MACH_PORT_TYPE_PORT_SET: {
875 		ipc_pset_t pset = entry->ie_pset;
876 
877 		assert(entry->ie_request == IE_REQ_NONE);
878 		assert(pset != IPS_NULL);
879 
880 		ips_mq_lock(pset);
881 		assert(ips_active(pset));
882 
883 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
884 
885 		is_write_unlock(space);
886 
887 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
888 		break;
889 	}
890 
891 	case MACH_PORT_TYPE_SEND:
892 	case MACH_PORT_TYPE_RECEIVE:
893 	case MACH_PORT_TYPE_SEND_RECEIVE:
894 	case MACH_PORT_TYPE_SEND_ONCE: {
895 		ipc_port_t port = entry->ie_port;
896 		ipc_notify_nsenders_t nsrequest = { };
897 		ipc_port_t request = IP_NULL;
898 
899 		ip_mq_lock(port);
900 
901 		if ((type & MACH_PORT_TYPE_RECEIVE) && port->ip_guarded &&
902 		    port->ip_context != 0) {
903 			uint64_t portguard = port->ip_context;
904 
905 			ip_mq_unlock(port);
906 			is_write_unlock(space);
907 			mach_port_guard_exception(name, portguard,
908 			    kGUARD_EXC_DESTROY);
909 			return KERN_INVALID_RIGHT;
910 		}
911 
912 		if ((bits & IE_BITS_PINNED_SEND) && ip_active(port)) {
913 			ip_mq_unlock(port);
914 			is_write_unlock(space);
915 			mach_port_guard_exception_pinned(space, name,
916 			    MPG_FLAGS_MOD_REFS_PINNED_DESTROY);
917 			return KERN_INVALID_CAPABILITY;
918 		}
919 
920 		/* point of no return */
921 
922 		if (ip_active(port)) {
923 			request = ipc_right_request_cancel(port, name, entry);
924 		} else {
925 			assert((type & MACH_PORT_TYPE_RECEIVE) == 0);
926 			entry->ie_request = IE_REQ_NONE;
927 		}
928 
929 		if (type == MACH_PORT_TYPE_SEND) {
930 			ipc_hash_delete(space, ip_to_object(port), name, entry);
931 		}
932 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
933 
934 		is_write_unlock(space);
935 
936 		if (type & MACH_PORT_TYPE_SEND) {
937 			ip_srights_dec(port);
938 			if (port->ip_srights == 0) {
939 				nsrequest = ipc_notify_no_senders_prepare(port);
940 			}
941 		}
942 
943 		if (type & MACH_PORT_TYPE_RECEIVE) {
944 			require_ip_active(port);
945 			assert(ip_in_space(port, space));
946 
947 			ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
948 		} else if (type & MACH_PORT_TYPE_SEND_ONCE) {
949 			ipc_notify_send_once_and_unlock(port); /* consumes our ref */
950 		} else {
951 			assert(!ip_in_space(port, space));
952 
953 			ip_mq_unlock(port);
954 			ip_release(port);
955 		}
956 
957 		ipc_notify_no_senders_emit(nsrequest);
958 
959 		if (request != IP_NULL) {
960 			ipc_notify_port_deleted(request, name);
961 		}
962 
963 
964 		break;
965 	}
966 
967 	default:
968 		ipc_unreachable("ipc_right_destroy: strange type");
969 	}
970 
971 	return KERN_SUCCESS;
972 }
973 
974 /*
975  *	Routine:	ipc_right_dealloc
976  *	Purpose:
977  *		Releases a send/send-once/dead-name/port_set user ref.
978  *		Like ipc_right_delta with a delta of -1,
979  *		but looks at the entry to determine the right.
980  *	Conditions:
981  *		The space is write-locked, and is unlocked upon return.
982  *		The space must be active.
983  *	Returns:
984  *		KERN_SUCCESS		A user ref was released.
985  *		KERN_INVALID_RIGHT	Entry has wrong type.
986  *		KERN_INVALID_CAPABILITY Deallocating a pinned right.
987  */
988 
989 kern_return_t
ipc_right_dealloc(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)990 ipc_right_dealloc(
991 	ipc_space_t             space,
992 	mach_port_name_t        name,
993 	ipc_entry_t             entry)
994 {
995 	ipc_port_t port = IP_NULL;
996 	ipc_entry_bits_t bits;
997 	mach_port_type_t type;
998 
999 	bits = entry->ie_bits;
1000 	type = IE_BITS_TYPE(bits);
1001 
1002 	assert(is_active(space));
1003 
1004 	switch (type) {
1005 	case MACH_PORT_TYPE_PORT_SET: {
1006 		ipc_pset_t pset;
1007 
1008 		assert(IE_BITS_UREFS(bits) == 0);
1009 		assert(entry->ie_request == IE_REQ_NONE);
1010 
1011 		pset = entry->ie_pset;
1012 		ips_mq_lock(pset);
1013 		assert(ips_active(pset));
1014 
1015 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1016 
1017 		is_write_unlock(space);
1018 
1019 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1020 		break;
1021 	}
1022 
1023 	case MACH_PORT_TYPE_DEAD_NAME: {
1024 dead_name:
1025 		assert(IE_BITS_UREFS(bits) > 0);
1026 		assert(entry->ie_request == IE_REQ_NONE);
1027 		assert(entry->ie_object == IPC_OBJECT_NULL);
1028 
1029 		if (IE_BITS_UREFS(bits) == 1) {
1030 			ipc_entry_dealloc(space, IPC_OBJECT_NULL, name, entry);
1031 		} else {
1032 			/* if urefs are pegged due to overflow, leave them pegged */
1033 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1034 				entry->ie_bits = bits - 1; /* decrement urefs */
1035 			}
1036 			ipc_entry_modified(space, name, entry);
1037 		}
1038 		is_write_unlock(space);
1039 
1040 		/* release any port that got converted to dead name below */
1041 		if (port != IP_NULL) {
1042 			ip_release(port);
1043 		}
1044 		break;
1045 	}
1046 
1047 	case MACH_PORT_TYPE_SEND_ONCE: {
1048 		ipc_port_t request;
1049 
1050 		assert(IE_BITS_UREFS(bits) == 1);
1051 
1052 		port = entry->ie_port;
1053 		assert(port != IP_NULL);
1054 
1055 		if (ipc_right_check(space, port, name, entry, IPC_COPYIN_REASON_NONE)) {
1056 			bits = entry->ie_bits;
1057 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1058 			goto dead_name;     /* it will release port */
1059 		}
1060 		/* port is locked and active */
1061 
1062 		request = ipc_right_request_cancel(port, name, entry);
1063 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1064 
1065 		is_write_unlock(space);
1066 
1067 		ipc_notify_send_once_and_unlock(port);
1068 
1069 		if (request != IP_NULL) {
1070 			ipc_notify_port_deleted(request, name);
1071 		}
1072 		break;
1073 	}
1074 
1075 	case MACH_PORT_TYPE_SEND: {
1076 		ipc_port_t request = IP_NULL;
1077 		ipc_notify_nsenders_t nsrequest = { };
1078 
1079 		assert(IE_BITS_UREFS(bits) > 0);
1080 
1081 		port = entry->ie_port;
1082 		assert(port != IP_NULL);
1083 
1084 		if (ipc_right_check(space, port, name, entry, IPC_COPYIN_REASON_NONE)) {
1085 			bits = entry->ie_bits;
1086 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1087 			goto dead_name;     /* it will release port */
1088 		}
1089 		/* port is locked and active */
1090 
1091 		assert(port->ip_srights > 0);
1092 
1093 		if (IE_BITS_UREFS(bits) == 1) {
1094 			if (bits & IE_BITS_PINNED_SEND) {
1095 				ip_mq_unlock(port);
1096 				goto destroy_pinned;
1097 			}
1098 
1099 			ip_srights_dec(port);
1100 			if (port->ip_srights == 0) {
1101 				nsrequest = ipc_notify_no_senders_prepare(port);
1102 			}
1103 
1104 			request = ipc_right_request_cancel(port, name, entry);
1105 			ipc_hash_delete(space, ip_to_object(port), name, entry);
1106 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1107 			ip_mq_unlock(port);
1108 			is_write_unlock(space);
1109 
1110 			ip_release(port);
1111 		} else {
1112 			/* if urefs are pegged due to overflow, leave them pegged */
1113 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1114 				entry->ie_bits = bits - 1; /* decrement urefs */
1115 			}
1116 			ip_mq_unlock(port);
1117 			ipc_entry_modified(space, name, entry);
1118 			is_write_unlock(space);
1119 		}
1120 
1121 		ipc_notify_no_senders_emit(nsrequest);
1122 
1123 		if (request != IP_NULL) {
1124 			ipc_notify_port_deleted(request, name);
1125 		}
1126 		break;
1127 	}
1128 
1129 	case MACH_PORT_TYPE_SEND_RECEIVE: {
1130 		ipc_notify_nsenders_t nsrequest = { };
1131 
1132 		assert(IE_BITS_UREFS(bits) > 0);
1133 
1134 		port = entry->ie_port;
1135 		assert(port != IP_NULL);
1136 
1137 		ip_mq_lock(port);
1138 		require_ip_active(port);
1139 		assert(ip_get_receiver_name(port) == name);
1140 		assert(ip_in_space(port, space));
1141 		assert(port->ip_srights > 0);
1142 
1143 		if (IE_BITS_UREFS(bits) == 1) {
1144 			if (bits & IE_BITS_PINNED_SEND) {
1145 				ip_mq_unlock(port);
1146 				goto destroy_pinned;
1147 			}
1148 
1149 			ip_srights_dec(port);
1150 			if (port->ip_srights == 0) {
1151 				nsrequest = ipc_notify_no_senders_prepare(port);
1152 			}
1153 
1154 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1155 			    MACH_PORT_TYPE_SEND);
1156 		} else {
1157 			/* if urefs are pegged due to overflow, leave them pegged */
1158 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1159 				entry->ie_bits = bits - 1; /* decrement urefs */
1160 			}
1161 		}
1162 		ip_mq_unlock(port);
1163 
1164 		ipc_entry_modified(space, name, entry);
1165 		is_write_unlock(space);
1166 
1167 		ipc_notify_no_senders_emit(nsrequest);
1168 		break;
1169 	}
1170 
1171 	default:
1172 		is_write_unlock(space);
1173 		mach_port_guard_exception(name,
1174 		    MPG_PAYLOAD(MPG_FLAGS_INVALID_RIGHT_DEALLOC, bits),
1175 		    kGUARD_EXC_INVALID_RIGHT);
1176 		return KERN_INVALID_RIGHT;
1177 	}
1178 
1179 	return KERN_SUCCESS;
1180 
1181 destroy_pinned:
1182 	is_write_unlock(space);
1183 	mach_port_guard_exception_pinned(space, name,
1184 	    MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1185 	return KERN_INVALID_CAPABILITY;
1186 }
1187 
1188 /*
1189  *	Routine:	ipc_right_delta
1190  *	Purpose:
1191  *		Modifies the user-reference count for a right.
1192  *		May deallocate the right, if the count goes to zero.
1193  *	Conditions:
1194  *		The space is write-locked, and is unlocked upon return.
1195  *		The space must be active.
1196  *	Returns:
1197  *		KERN_SUCCESS		Count was modified.
1198  *		KERN_INVALID_RIGHT	Entry has wrong type.
1199  *		KERN_INVALID_VALUE	Bad delta for the right.
1200  *		KERN_INVALID_CAPABILITY Deallocating a pinned right.
1201  */
1202 
1203 kern_return_t
ipc_right_delta(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_right_t right,mach_port_delta_t delta)1204 ipc_right_delta(
1205 	ipc_space_t             space,
1206 	mach_port_name_t        name,
1207 	ipc_entry_t             entry,
1208 	mach_port_right_t       right,
1209 	mach_port_delta_t       delta)
1210 {
1211 	ipc_port_t port = IP_NULL;
1212 	ipc_port_t port_to_release = IP_NULL;
1213 	ipc_entry_bits_t bits = entry->ie_bits;
1214 
1215 	/* Debugging information in case a mach port guard exception is raised */
1216 	bool throw_exception = false;
1217 
1218 /*
1219  *	The following is used (for case MACH_PORT_RIGHT_DEAD_NAME) in the
1220  *	switch below. It is used to keep track of those cases (in DIPC)
1221  *	where we have postponed the dropping of a port reference. Since
1222  *	the dropping of the reference could cause the port to disappear
1223  *	we postpone doing so when we are holding the space lock.
1224  */
1225 
1226 	assert(is_active(space));
1227 	assert(right < MACH_PORT_RIGHT_NUMBER);
1228 
1229 	/* Rights-specific restrictions and operations. */
1230 
1231 	switch (right) {
1232 	case MACH_PORT_RIGHT_PORT_SET: {
1233 		ipc_pset_t pset;
1234 
1235 		if ((bits & MACH_PORT_TYPE_PORT_SET) == 0) {
1236 			throw_exception = true;
1237 			goto invalid_right;
1238 		}
1239 
1240 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_PORT_SET);
1241 		assert(IE_BITS_UREFS(bits) == 0);
1242 		assert(entry->ie_request == IE_REQ_NONE);
1243 
1244 		if (delta == 0) {
1245 			goto success;
1246 		}
1247 
1248 		if (delta != -1) {
1249 			goto invalid_value;
1250 		}
1251 
1252 		pset = entry->ie_pset;
1253 		ips_mq_lock(pset);
1254 		assert(ips_active(pset));
1255 
1256 		ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1257 
1258 		is_write_unlock(space);
1259 
1260 		ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1261 		break;
1262 	}
1263 
1264 	case MACH_PORT_RIGHT_RECEIVE: {
1265 		ipc_port_t request = IP_NULL;
1266 
1267 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1268 			if ((bits & IE_BITS_EX_RECEIVE) == 0) {
1269 				throw_exception = true;
1270 			}
1271 			goto invalid_right;
1272 		}
1273 
1274 		if (delta == 0) {
1275 			goto success;
1276 		}
1277 
1278 		if (delta != -1) {
1279 			goto invalid_value;
1280 		}
1281 
1282 		port = entry->ie_port;
1283 		assert(port != IP_NULL);
1284 
1285 		/*
1286 		 *	The port lock is needed for ipc_right_dncancel;
1287 		 *	otherwise, we wouldn't have to take the lock
1288 		 *	until just before dropping the space lock.
1289 		 */
1290 
1291 		ip_mq_lock(port);
1292 		require_ip_active(port);
1293 		assert(ip_get_receiver_name(port) == name);
1294 		assert(ip_in_space(port, space));
1295 
1296 		/* Mach Port Guard Checking */
1297 		if (port->ip_guarded) {
1298 			uint64_t portguard = port->ip_context;
1299 			ip_mq_unlock(port);
1300 			is_write_unlock(space);
1301 			/* Raise mach port guard exception */
1302 			mach_port_guard_exception(name, portguard, kGUARD_EXC_DESTROY);
1303 			goto guard_failure;
1304 		}
1305 
1306 		if (bits & MACH_PORT_TYPE_SEND) {
1307 			assert(IE_BITS_TYPE(bits) ==
1308 			    MACH_PORT_TYPE_SEND_RECEIVE);
1309 			assert(IE_BITS_UREFS(bits) > 0);
1310 			assert(port->ip_srights > 0);
1311 
1312 			if (ipc_port_has_prdrequest(port)) {
1313 				/*
1314 				 * Since another task has requested a
1315 				 * destroy notification for this port, it
1316 				 * isn't actually being destroyed - the receive
1317 				 * right is just being moved to another task.
1318 				 * Since we still have one or more send rights,
1319 				 * we need to record the loss of the receive
1320 				 * right and enter the remaining send right
1321 				 * into the hash table.
1322 				 */
1323 				bits &= ~MACH_PORT_TYPE_RECEIVE;
1324 				bits |= IE_BITS_EX_RECEIVE;
1325 				ipc_hash_insert(space, ip_to_object(port),
1326 				    name, entry);
1327 				ip_reference(port);
1328 			} else {
1329 				/*
1330 				 *	The remaining send right turns into a
1331 				 *	dead name.  Notice we don't decrement
1332 				 *	ip_srights, generate a no-senders notif,
1333 				 *	or use ipc_right_dncancel, because the
1334 				 *	port is destroyed "first".
1335 				 */
1336 				bits &= ~(IE_BITS_TYPE_MASK | IE_BITS_PINNED_SEND | IE_BITS_IMMOVABLE_SEND);
1337 				bits |= (MACH_PORT_TYPE_DEAD_NAME | IE_BITS_EX_RECEIVE);
1338 				if (entry->ie_request) {
1339 					entry->ie_request = IE_REQ_NONE;
1340 					/* if urefs are pegged due to overflow, leave them pegged */
1341 					if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1342 						bits++; /* increment urefs */
1343 					}
1344 				}
1345 				entry->ie_object = IPC_OBJECT_NULL;
1346 			}
1347 			entry->ie_bits = bits;
1348 			ipc_entry_modified(space, name, entry);
1349 		} else {
1350 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1351 			assert(IE_BITS_UREFS(bits) == 0);
1352 
1353 			request = ipc_right_request_cancel(port, name, entry);
1354 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1355 		}
1356 		is_write_unlock(space);
1357 
1358 		ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1359 
1360 		if (request != IP_NULL) {
1361 			ipc_notify_port_deleted(request, name);
1362 		}
1363 		break;
1364 	}
1365 
1366 	case MACH_PORT_RIGHT_SEND_ONCE: {
1367 		ipc_port_t request;
1368 
1369 		if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
1370 			goto invalid_right;
1371 		}
1372 
1373 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
1374 		assert(IE_BITS_UREFS(bits) == 1);
1375 
1376 		port = entry->ie_port;
1377 		assert(port != IP_NULL);
1378 
1379 		if (ipc_right_check(space, port, name, entry, IPC_COPYIN_REASON_NONE)) {
1380 			assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE));
1381 			bits = entry->ie_bits;
1382 			throw_exception = true;
1383 			/* port has died and removed from entry, release port */
1384 			goto invalid_right;
1385 		}
1386 		/* port is locked and active */
1387 
1388 		assert(port->ip_sorights > 0);
1389 
1390 		if ((delta > 0) || (delta < -1)) {
1391 			ip_mq_unlock(port);
1392 			goto invalid_value;
1393 		}
1394 
1395 		if (delta == 0) {
1396 			ip_mq_unlock(port);
1397 			goto success;
1398 		}
1399 
1400 		request = ipc_right_request_cancel(port, name, entry);
1401 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1402 
1403 		is_write_unlock(space);
1404 
1405 		ipc_notify_send_once_and_unlock(port);
1406 
1407 		if (request != IP_NULL) {
1408 			ipc_notify_port_deleted(request, name);
1409 		}
1410 		break;
1411 	}
1412 
1413 	case MACH_PORT_RIGHT_DEAD_NAME: {
1414 		mach_port_urefs_t urefs;
1415 
1416 		if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1417 			port = entry->ie_port;
1418 			assert(port != IP_NULL);
1419 
1420 			if (!ipc_right_check(space, port, name, entry, IPC_COPYIN_REASON_NONE)) {
1421 				/* port is locked and active */
1422 				ip_mq_unlock(port);
1423 				port = IP_NULL;
1424 				throw_exception = true;
1425 				goto invalid_right;
1426 			}
1427 			bits = entry->ie_bits;
1428 			/* port has died and removed from entry, release port */
1429 			port_to_release = port;
1430 			port = IP_NULL;
1431 		} else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0) {
1432 			throw_exception = true;
1433 			goto invalid_right;
1434 		}
1435 
1436 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1437 		assert(IE_BITS_UREFS(bits) > 0);
1438 		assert(entry->ie_object == IPC_OBJECT_NULL);
1439 		assert(entry->ie_request == IE_REQ_NONE);
1440 
1441 		if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1442 		    delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1443 			/* this will release port */
1444 			goto invalid_value;
1445 		}
1446 
1447 		urefs = IE_BITS_UREFS(bits);
1448 
1449 		if (urefs == MACH_PORT_UREFS_MAX) {
1450 			/*
1451 			 * urefs are pegged due to an overflow
1452 			 * only a delta removing all refs at once can change it
1453 			 */
1454 
1455 			if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1456 				delta = 0;
1457 			}
1458 		} else {
1459 			if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1460 				/* this will release port */
1461 				goto invalid_value;
1462 			}
1463 			if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1464 				/* leave urefs pegged to maximum if it overflowed */
1465 				delta = MACH_PORT_UREFS_MAX - urefs;
1466 			}
1467 		}
1468 
1469 		if ((urefs + delta) == 0) {
1470 			ipc_entry_dealloc(space, IPC_OBJECT_NULL, name, entry);
1471 		} else if (delta != 0) {
1472 			entry->ie_bits = bits + delta;
1473 			ipc_entry_modified(space, name, entry);
1474 		}
1475 
1476 		is_write_unlock(space);
1477 
1478 		if (port_to_release != IP_NULL) {
1479 			ip_release(port_to_release);
1480 			port_to_release = IP_NULL;
1481 		}
1482 
1483 		break;
1484 	}
1485 
1486 	case MACH_PORT_RIGHT_SEND: {
1487 		mach_port_urefs_t urefs;
1488 		ipc_port_t request = IP_NULL;
1489 		ipc_notify_nsenders_t nsrequest = { };
1490 
1491 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
1492 			/* invalid right exception only when not live/dead confusion */
1493 			if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0
1494 #if !defined(AE_MAKESENDRIGHT_FIXED)
1495 			    /*
1496 			     * AE tries to add single send right without knowing if it already owns one.
1497 			     * But if it doesn't, it should own the receive right and delta should be 1.
1498 			     */
1499 			    && (((bits & MACH_PORT_TYPE_RECEIVE) == 0) || (delta != 1))
1500 #endif
1501 			    ) {
1502 				throw_exception = true;
1503 			}
1504 			goto invalid_right;
1505 		}
1506 
1507 		/* maximum urefs for send is MACH_PORT_UREFS_MAX */
1508 
1509 		port = entry->ie_port;
1510 		assert(port != IP_NULL);
1511 
1512 		if (ipc_right_check(space, port, name, entry, IPC_COPYIN_REASON_NONE)) {
1513 			assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0);
1514 			bits = entry->ie_bits;
1515 			/* port has died and removed from entry, release port */
1516 			goto invalid_right;
1517 		}
1518 		/* port is locked and active */
1519 
1520 		assert(port->ip_srights > 0);
1521 
1522 		if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1523 		    delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1524 			ip_mq_unlock(port);
1525 			goto invalid_value;
1526 		}
1527 
1528 		urefs = IE_BITS_UREFS(bits);
1529 
1530 		if (urefs == MACH_PORT_UREFS_MAX) {
1531 			/*
1532 			 * urefs are pegged due to an overflow
1533 			 * only a delta removing all refs at once can change it
1534 			 */
1535 
1536 			if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1537 				delta = 0;
1538 			}
1539 		} else {
1540 			if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1541 				ip_mq_unlock(port);
1542 				goto invalid_value;
1543 			}
1544 			if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1545 				/* leave urefs pegged to maximum if it overflowed */
1546 				delta = MACH_PORT_UREFS_MAX - urefs;
1547 			}
1548 		}
1549 
1550 		if ((urefs + delta) == 0) {
1551 			if (bits & IE_BITS_PINNED_SEND) {
1552 				ip_mq_unlock(port);
1553 				goto destroy_pinned;
1554 			}
1555 			ip_srights_dec(port);
1556 			if (port->ip_srights == 0) {
1557 				nsrequest = ipc_notify_no_senders_prepare(port);
1558 			}
1559 
1560 			if (bits & MACH_PORT_TYPE_RECEIVE) {
1561 				assert(ip_get_receiver_name(port) == name);
1562 				assert(ip_in_space(port, space));
1563 				assert(IE_BITS_TYPE(bits) ==
1564 				    MACH_PORT_TYPE_SEND_RECEIVE);
1565 
1566 				entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1567 				    MACH_PORT_TYPE_SEND);
1568 				ipc_entry_modified(space, name, entry);
1569 			} else {
1570 				assert(IE_BITS_TYPE(bits) ==
1571 				    MACH_PORT_TYPE_SEND);
1572 
1573 				request = ipc_right_request_cancel(port, name, entry);
1574 				ipc_hash_delete(space, ip_to_object(port),
1575 				    name, entry);
1576 				ipc_entry_dealloc(space, ip_to_object(port),
1577 				    name, entry);
1578 				port_to_release = port;
1579 			}
1580 		} else if (delta != 0) {
1581 			entry->ie_bits = bits + delta;
1582 			ipc_entry_modified(space, name, entry);
1583 		}
1584 
1585 		ip_mq_unlock(port);
1586 
1587 		is_write_unlock(space);
1588 
1589 		if (port_to_release != IP_NULL) {
1590 			ip_release(port_to_release);
1591 			port_to_release = IP_NULL;
1592 		}
1593 
1594 		ipc_notify_no_senders_emit(nsrequest);
1595 
1596 		if (request != IP_NULL) {
1597 			ipc_notify_port_deleted(request, name);
1598 		}
1599 		break;
1600 	}
1601 
1602 	case MACH_PORT_RIGHT_LABELH:
1603 		goto invalid_right;
1604 
1605 	default:
1606 		panic("ipc_right_delta: strange right %d for 0x%x (%p) in space:%p",
1607 		    right, name, (void *)entry, (void *)space);
1608 	}
1609 
1610 	return KERN_SUCCESS;
1611 
1612 success:
1613 	is_write_unlock(space);
1614 	return KERN_SUCCESS;
1615 
1616 invalid_right:
1617 	is_write_unlock(space);
1618 	if (port != IP_NULL) {
1619 		ip_release(port);
1620 	}
1621 	if (throw_exception) {
1622 		mach_port_guard_exception(name,
1623 		    MPG_PAYLOAD(MPG_FLAGS_INVALID_RIGHT_DELTA, right, bits),
1624 		    kGUARD_EXC_INVALID_RIGHT);
1625 	}
1626 	return KERN_INVALID_RIGHT;
1627 
1628 invalid_value:
1629 	is_write_unlock(space);
1630 	if (port_to_release) {
1631 		ip_release(port_to_release);
1632 	}
1633 	mach_port_guard_exception(name,
1634 	    MPG_PAYLOAD(MPG_FLAGS_INVALID_VALUE_DELTA, right, (uint16_t)delta,
1635 	    IE_BITS_UREFS(bits)),
1636 	    kGUARD_EXC_INVALID_VALUE);
1637 	return KERN_INVALID_VALUE;
1638 
1639 guard_failure:
1640 	return KERN_INVALID_RIGHT;
1641 
1642 destroy_pinned:
1643 	is_write_unlock(space);
1644 	mach_port_guard_exception_pinned(space, name,
1645 	    MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1646 	return KERN_INVALID_CAPABILITY;
1647 }
1648 
1649 /*
1650  *	Routine:	ipc_right_destruct
1651  *	Purpose:
1652  *		Deallocates the receive right and modifies the
1653  *		user-reference count for the send rights as requested.
1654  *	Conditions:
1655  *		The space is write-locked, and is unlocked upon return.
1656  *		The space must be active.
1657  *	Returns:
1658  *		KERN_SUCCESS		Count was modified.
1659  *		KERN_INVALID_RIGHT	Entry has wrong type.
1660  *		KERN_INVALID_VALUE	Bad delta for the right.
1661  */
1662 
1663 kern_return_t
ipc_right_destruct(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_delta_t srdelta,uint64_t guard)1664 ipc_right_destruct(
1665 	ipc_space_t             space,
1666 	mach_port_name_t        name,
1667 	ipc_entry_t             entry,
1668 	mach_port_delta_t       srdelta,
1669 	uint64_t                guard)
1670 {
1671 	ipc_port_t port = IP_NULL;
1672 	ipc_entry_bits_t bits;
1673 
1674 	mach_port_urefs_t urefs;
1675 	ipc_port_t request = IP_NULL;
1676 	ipc_notify_nsenders_t nsrequest = { };
1677 
1678 	bits = entry->ie_bits;
1679 
1680 	assert(is_active(space));
1681 
1682 	if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1683 		is_write_unlock(space);
1684 
1685 		/* No exception if we used to have receive and held entry since */
1686 		if ((bits & IE_BITS_EX_RECEIVE) == 0) {
1687 			mach_port_guard_exception(name,
1688 			    MPG_PAYLOAD(MPG_FLAGS_INVALID_RIGHT_DESTRUCT, bits),
1689 			    kGUARD_EXC_INVALID_RIGHT);
1690 		}
1691 		return KERN_INVALID_RIGHT;
1692 	}
1693 
1694 	if (srdelta && (bits & MACH_PORT_TYPE_SEND) == 0) {
1695 		is_write_unlock(space);
1696 		mach_port_guard_exception(name,
1697 		    MPG_PAYLOAD(MPG_FLAGS_INVALID_RIGHT_DESTRUCT, bits),
1698 		    kGUARD_EXC_INVALID_RIGHT);
1699 		return KERN_INVALID_RIGHT;
1700 	}
1701 
1702 	if (srdelta > 0) {
1703 		goto invalid_value;
1704 	}
1705 
1706 	port = entry->ie_port;
1707 	assert(port != IP_NULL);
1708 
1709 	ip_mq_lock(port);
1710 	require_ip_active(port);
1711 	assert(ip_get_receiver_name(port) == name);
1712 	assert(ip_in_space(port, space));
1713 
1714 	/* Mach Port Guard Checking */
1715 	if (port->ip_guarded && (guard != port->ip_context)) {
1716 		uint64_t portguard = port->ip_context;
1717 		ip_mq_unlock(port);
1718 		is_write_unlock(space);
1719 		mach_port_guard_exception(name, portguard, kGUARD_EXC_DESTROY);
1720 		return KERN_INVALID_ARGUMENT;
1721 	}
1722 
1723 	/*
1724 	 * First reduce the send rights as requested and
1725 	 * adjust the entry->ie_bits accordingly. The
1726 	 * ipc_entry_modified() call is made once the receive
1727 	 * right is destroyed too.
1728 	 */
1729 
1730 	if (srdelta) {
1731 		assert(port->ip_srights > 0);
1732 
1733 		urefs = IE_BITS_UREFS(bits);
1734 
1735 		/*
1736 		 * Since we made sure that srdelta is negative,
1737 		 * the check for urefs overflow is not required.
1738 		 */
1739 		if (MACH_PORT_UREFS_UNDERFLOW(urefs, srdelta)) {
1740 			ip_mq_unlock(port);
1741 			goto invalid_value;
1742 		}
1743 
1744 		if (urefs == MACH_PORT_UREFS_MAX) {
1745 			/*
1746 			 * urefs are pegged due to an overflow
1747 			 * only a delta removing all refs at once can change it
1748 			 */
1749 			if (srdelta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1750 				srdelta = 0;
1751 			}
1752 		}
1753 
1754 		if ((urefs + srdelta) == 0) {
1755 			ip_srights_dec(port);
1756 			if (port->ip_srights == 0) {
1757 				nsrequest = ipc_notify_no_senders_prepare(port);
1758 			}
1759 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_RECEIVE);
1760 			entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1761 			    MACH_PORT_TYPE_SEND);
1762 		} else {
1763 			entry->ie_bits = bits + srdelta;
1764 		}
1765 	}
1766 
1767 	/*
1768 	 * Now destroy the receive right. Update space and
1769 	 * entry accordingly.
1770 	 */
1771 
1772 	bits = entry->ie_bits;
1773 	if (bits & MACH_PORT_TYPE_SEND) {
1774 		assert(IE_BITS_UREFS(bits) > 0);
1775 		assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX);
1776 
1777 		if (ipc_port_has_prdrequest(port)) {
1778 			/*
1779 			 * Since another task has requested a
1780 			 * destroy notification for this port, it
1781 			 * isn't actually being destroyed - the receive
1782 			 * right is just being moved to another task.
1783 			 * Since we still have one or more send rights,
1784 			 * we need to record the loss of the receive
1785 			 * right and enter the remaining send right
1786 			 * into the hash table.
1787 			 */
1788 			bits &= ~MACH_PORT_TYPE_RECEIVE;
1789 			bits |= IE_BITS_EX_RECEIVE;
1790 			ipc_hash_insert(space, ip_to_object(port),
1791 			    name, entry);
1792 			ip_reference(port);
1793 		} else {
1794 			/*
1795 			 *	The remaining send right turns into a
1796 			 *	dead name.  Notice we don't decrement
1797 			 *	ip_srights, generate a no-senders notif,
1798 			 *	or use ipc_right_dncancel, because the
1799 			 *	port is destroyed "first".
1800 			 */
1801 			bits &= ~(IE_BITS_TYPE_MASK | IE_BITS_PINNED_SEND);
1802 			bits |= (MACH_PORT_TYPE_DEAD_NAME | IE_BITS_EX_RECEIVE);
1803 			if (entry->ie_request) {
1804 				entry->ie_request = IE_REQ_NONE;
1805 				if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1806 					bits++; /* increment urefs */
1807 				}
1808 			}
1809 			entry->ie_object = IPC_OBJECT_NULL;
1810 		}
1811 		entry->ie_bits = bits;
1812 		ipc_entry_modified(space, name, entry);
1813 	} else {
1814 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1815 		assert(IE_BITS_UREFS(bits) == 0);
1816 		request = ipc_right_request_cancel(port, name, entry);
1817 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1818 	}
1819 
1820 	/* Unlock space */
1821 	is_write_unlock(space);
1822 
1823 	ipc_notify_no_senders_emit(nsrequest);
1824 
1825 	ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1826 
1827 	if (request != IP_NULL) {
1828 		ipc_notify_port_deleted(request, name);
1829 	}
1830 
1831 	return KERN_SUCCESS;
1832 
1833 invalid_value:
1834 	is_write_unlock(space);
1835 	mach_port_guard_exception(name,
1836 	    MPG_PAYLOAD(MPG_FLAGS_INVALID_VALUE_DESTRUCT, srdelta,
1837 	    IE_BITS_UREFS(bits)),
1838 	    kGUARD_EXC_INVALID_VALUE);
1839 	return KERN_INVALID_VALUE;
1840 }
1841 
1842 
1843 /*
1844  *	Routine:	ipc_right_info
1845  *	Purpose:
1846  *		Retrieves information about the right.
1847  *	Conditions:
1848  *		The space is active and write-locked.
1849  *	        The space is unlocked upon return.
1850  *	Returns:
1851  *		KERN_SUCCESS		Retrieved info
1852  */
1853 
1854 kern_return_t
ipc_right_info(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_type_t * typep,mach_port_urefs_t * urefsp)1855 ipc_right_info(
1856 	ipc_space_t             space,
1857 	mach_port_name_t        name,
1858 	ipc_entry_t             entry,
1859 	mach_port_type_t        *typep,
1860 	mach_port_urefs_t       *urefsp)
1861 {
1862 	ipc_port_t port;
1863 	ipc_entry_bits_t bits;
1864 	mach_port_type_t type = 0;
1865 	ipc_port_request_index_t request;
1866 
1867 	bits = entry->ie_bits;
1868 	request = entry->ie_request;
1869 	port = entry->ie_port;
1870 
1871 	if (bits & MACH_PORT_TYPE_RECEIVE) {
1872 		assert(IP_VALID(port));
1873 
1874 		if (request != IE_REQ_NONE) {
1875 			ip_mq_lock(port);
1876 			require_ip_active(port);
1877 			type |= ipc_port_request_type(port, name, request);
1878 			ip_mq_unlock(port);
1879 		}
1880 		is_write_unlock(space);
1881 	} else if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1882 		/*
1883 		 * validate port is still alive - if so, get request
1884 		 * types while we still have it locked.  Otherwise,
1885 		 * recapture the (now dead) bits.
1886 		 */
1887 		if (!ipc_right_check(space, port, name, entry, IPC_COPYIN_REASON_NONE)) {
1888 			if (request != IE_REQ_NONE) {
1889 				type |= ipc_port_request_type(port, name, request);
1890 			}
1891 			ip_mq_unlock(port);
1892 			is_write_unlock(space);
1893 		} else {
1894 			bits = entry->ie_bits;
1895 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1896 			is_write_unlock(space);
1897 			ip_release(port);
1898 		}
1899 	} else {
1900 		is_write_unlock(space);
1901 	}
1902 
1903 	type |= IE_BITS_TYPE(bits);
1904 
1905 	*typep = type;
1906 	*urefsp = IE_BITS_UREFS(bits);
1907 	return KERN_SUCCESS;
1908 }
1909 
1910 /*
1911  *	Routine:	ipc_right_copyin_check_reply
1912  *	Purpose:
1913  *		Check if a subsequent ipc_right_copyin would succeed. Used only
1914  *		by ipc_kmsg_copyin_header to check if reply_port can be copied in.
1915  *		If the reply port is an immovable send right, it errors out.
1916  *	Conditions:
1917  *		The space is locked (read or write) and active.
1918  */
1919 
1920 bool
ipc_right_copyin_check_reply(__assert_only ipc_space_t space,mach_port_name_t reply_name,ipc_entry_t reply_entry,mach_msg_type_name_t reply_type)1921 ipc_right_copyin_check_reply(
1922 	__assert_only ipc_space_t       space,
1923 	mach_port_name_t                reply_name,
1924 	ipc_entry_t                     reply_entry,
1925 	mach_msg_type_name_t            reply_type)
1926 {
1927 	ipc_entry_bits_t reply_bits = reply_entry->ie_bits;
1928 	ipc_port_t reply_port = reply_entry->ie_port;
1929 
1930 	assert(is_active(space));
1931 
1932 	if (ip_is_reply_port(reply_port) &&
1933 	    !MACH_MSG_TYPE_PORT_ANY_SEND_ONCE(reply_type)) {
1934 		return false;
1935 	}
1936 
1937 	switch (reply_type) {
1938 	case MACH_MSG_TYPE_MAKE_SEND:
1939 		if ((reply_bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1940 			return false;
1941 		}
1942 		break;
1943 
1944 	case MACH_MSG_TYPE_MAKE_SEND_ONCE:
1945 		if ((reply_bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1946 			return false;
1947 		}
1948 		break;
1949 
1950 	case MACH_MSG_TYPE_MOVE_RECEIVE:
1951 		/* ipc_kmsg_copyin_header already filters it out */
1952 		return false;
1953 
1954 	case MACH_MSG_TYPE_MOVE_SEND:
1955 		if ((reply_bits & IE_BITS_PINNED_SEND) &&
1956 		    ip_active(reply_port) &&
1957 		    IE_BITS_UREFS(reply_bits) == 1) {
1958 			mach_port_guard_exception_pinned(space, reply_name,
1959 			    MPG_FLAGS_MOD_REFS_PINNED_COPYIN);
1960 			return false;
1961 		}
1962 		OS_FALLTHROUGH;
1963 
1964 	case MACH_MSG_TYPE_COPY_SEND:
1965 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
1966 		if (reply_bits & MACH_PORT_TYPE_DEAD_NAME) {
1967 			break;
1968 		}
1969 
1970 		if ((reply_bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
1971 			return false;
1972 		}
1973 
1974 		assert(reply_port != IP_NULL);
1975 
1976 		/*
1977 		 * active status peek to avoid checks that will be skipped
1978 		 * on copyin for dead ports.  Lock not held, so will not be
1979 		 * atomic (but once dead, there's no going back).
1980 		 */
1981 		if (!ip_active(reply_port)) {
1982 			break;
1983 		}
1984 
1985 		/*
1986 		 * Can't copyin a send right that is marked immovable. This bit is on
1987 		 * the entry and protected by the space lock.
1988 		 */
1989 		if (reply_entry->ie_bits & IE_BITS_IMMOVABLE_SEND) {
1990 			mach_port_guard_exception_immovable(space, reply_name, reply_port, MACH_MSG_TYPE_MOVE_SEND_ONCE, reply_entry);
1991 			return false;
1992 		}
1993 
1994 		if (reply_type == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
1995 			if ((reply_bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
1996 				return false;
1997 			}
1998 		} else {
1999 			if ((reply_bits & MACH_PORT_TYPE_SEND) == 0) {
2000 				return false;
2001 			}
2002 		}
2003 
2004 		break;
2005 	}
2006 
2007 	default:
2008 		panic("ipc_right_copyin_check: strange rights");
2009 	}
2010 
2011 	return true;
2012 }
2013 
2014 /*
2015  *	Routine:	ipc_right_copyin_check_guard_locked
2016  *	Purpose:
2017  *		Check if the port is guarded and the guard
2018  *		value matches the one passed in the arguments.
2019  *		If MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND is set,
2020  *		check if the port is unguarded.
2021  *	Conditions:
2022  *		The port is locked.
2023  *	Returns:
2024  *		KERN_SUCCESS		Port is either unguarded
2025  *					or guarded with expected value
2026  *		KERN_INVALID_ARGUMENT	Port is either unguarded already or guard mismatch.
2027  *					This also raises a EXC_GUARD exception.
2028  */
2029 static kern_return_t
ipc_right_copyin_check_guard_locked(ipc_port_t port,mach_port_name_t name,mach_msg_guarded_port_descriptor_t * gdesc)2030 ipc_right_copyin_check_guard_locked(
2031 	ipc_port_t              port,
2032 	mach_port_name_t        name,
2033 	mach_msg_guarded_port_descriptor_t *gdesc)
2034 {
2035 	mach_port_context_t    context = gdesc->u_context;
2036 	mach_msg_guard_flags_t flags   = gdesc->flags;
2037 
2038 	if ((flags & MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND) && !port->ip_guarded && !context) {
2039 		return KERN_SUCCESS;
2040 	} else if (port->ip_guarded && (port->ip_context == context)) {
2041 		return KERN_SUCCESS;
2042 	}
2043 
2044 	/* Incorrect guard; Raise exception */
2045 	mach_port_guard_exception(name, port->ip_context, kGUARD_EXC_INCORRECT_GUARD);
2046 	return KERN_INVALID_ARGUMENT;
2047 }
2048 
2049 void
ipc_right_copyin_rcleanup_init(ipc_copyin_rcleanup_t * icrc,mach_msg_guarded_port_descriptor_t * gdesc)2050 ipc_right_copyin_rcleanup_init(
2051 	ipc_copyin_rcleanup_t  *icrc,
2052 	mach_msg_guarded_port_descriptor_t *gdesc)
2053 {
2054 	*icrc = (ipc_copyin_rcleanup_t){
2055 		.icrc_guarded_desc = gdesc,
2056 	};
2057 }
2058 
2059 void
ipc_right_copyin_cleanup_destroy(ipc_copyin_cleanup_t * icc,mach_port_name_t name)2060 ipc_right_copyin_cleanup_destroy(
2061 	ipc_copyin_cleanup_t   *icc,
2062 	mach_port_name_t        name)
2063 {
2064 	if (icc->icc_release_port) {
2065 		ip_release(icc->icc_release_port);
2066 	}
2067 	if (icc->icc_deleted_port) {
2068 		ipc_notify_port_deleted(icc->icc_deleted_port, name);
2069 	}
2070 }
2071 
2072 void
ipc_right_copyin_rcleanup_destroy(ipc_copyin_rcleanup_t * icrc)2073 ipc_right_copyin_rcleanup_destroy(ipc_copyin_rcleanup_t *icrc)
2074 {
2075 #if IMPORTANCE_INHERITANCE
2076 	if (icrc->icrc_assert_count) {
2077 		ipc_importance_task_drop_internal_assertion(current_task()->task_imp_base,
2078 		    icrc->icrc_assert_count);
2079 	}
2080 #endif /* IMPORTANCE_INHERITANCE */
2081 	if (icrc->icrc_free_list.next) {
2082 		waitq_link_free_list(WQT_PORT_SET, &icrc->icrc_free_list);
2083 	}
2084 }
2085 
2086 /*
2087  *	Routine:	ipc_right_copyin
2088  *	Purpose:
2089  *		Copyin a capability from a space.
2090  *		If successful, the caller gets a ref
2091  *		for the resulting port, unless it is IP_DEAD,
2092  *		and possibly a send-once right which should
2093  *		be used in a port-deleted notification.
2094  *
2095  *		If deadok is not TRUE, the copyin operation
2096  *		will fail instead of producing IO_DEAD.
2097  *
2098  *		The entry is deallocated if the entry type becomes
2099  *		MACH_PORT_TYPE_NONE.
2100  *	Conditions:
2101  *		The space is write-locked and active.
2102  *	Returns:
2103  *		KERN_SUCCESS		Acquired a port, possibly IP_DEAD.
2104  *		KERN_INVALID_RIGHT	Name doesn't denote correct right.
2105  *		KERN_INVALID_CAPABILITY	Trying to move a kobject port,
2106  *					an immovable right or
2107  *					the last ref of a pinned right
2108  *		KERN_INVALID_ARGUMENT	Port is unguarded or guard mismatch
2109  */
2110 
2111 kern_return_t
ipc_right_copyin(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name,ipc_object_copyin_flags_t flags,ipc_copyin_op_t copyin_reason,ipc_entry_t entry,ipc_port_t * portp,ipc_copyin_cleanup_t * icc,ipc_copyin_rcleanup_t * icrc)2112 ipc_right_copyin(
2113 	ipc_space_t             space,
2114 	mach_port_name_t        name,
2115 	mach_msg_type_name_t    msgt_name,
2116 	ipc_object_copyin_flags_t  flags,
2117 	ipc_copyin_op_t         copyin_reason,
2118 	ipc_entry_t             entry,
2119 	ipc_port_t             *portp,
2120 	ipc_copyin_cleanup_t   *icc,
2121 	ipc_copyin_rcleanup_t  *icrc)
2122 {
2123 	ipc_entry_bits_t bits = entry->ie_bits;
2124 	ipc_port_t port = entry->ie_port;
2125 	ipc_object_label_t label;
2126 	kern_return_t kr;
2127 
2128 	uint32_t moves = (flags & IPC_OBJECT_COPYIN_FLAGS_DEST_EXTRA_MOVE) ? 2 : 1;
2129 	bool deadok = !!(flags & IPC_OBJECT_COPYIN_FLAGS_DEADOK);
2130 	bool allow_imm_send = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
2131 
2132 	if (flags & IPC_OBJECT_COPYIN_FLAGS_DEST_EXTRA_MOVE) {
2133 		assert((flags & IPC_OBJECT_COPYIN_FLAGS_DEST_EXTRA_COPY) == 0);
2134 		assert(msgt_name == MACH_MSG_TYPE_MOVE_SEND);
2135 	}
2136 	if (flags & IPC_OBJECT_COPYIN_FLAGS_DEST_EXTRA_COPY) {
2137 		assert(msgt_name == MACH_MSG_TYPE_MOVE_SEND ||
2138 		    msgt_name == MACH_MSG_TYPE_COPY_SEND);
2139 	}
2140 
2141 	*portp = IP_NULL;
2142 	icc->icc_release_port = IP_NULL;
2143 	icc->icc_deleted_port = IP_NULL;
2144 
2145 	assert(is_active(space));
2146 
2147 	/* Only allow send_once disposition on certain ports */
2148 	if (IP_VALID(port) && ip_is_reply_port(port) &&
2149 	    !MACH_MSG_TYPE_PORT_ANY_SEND_ONCE(msgt_name)) {
2150 		mach_port_guard_exception(name,
2151 		    MPG_PAYLOAD(MPG_FLAGS_NONE, ip_type(port), msgt_name),
2152 		    kGUARD_EXC_IMMOVABLE);
2153 		return KERN_INVALID_CAPABILITY;
2154 	}
2155 
2156 	switch (msgt_name) {
2157 	case MACH_MSG_TYPE_MAKE_SEND: {
2158 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2159 			goto invalid_right;
2160 		}
2161 
2162 		assert(port != IP_NULL);
2163 
2164 		ip_mq_lock(port);
2165 		assert(ip_get_receiver_name(port) == name);
2166 		assert(ip_in_space(port, space));
2167 
2168 		ipc_port_make_send_any_locked(port);
2169 		ip_mq_unlock(port);
2170 
2171 		*portp = port;
2172 		break;
2173 	}
2174 
2175 	case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
2176 		bool send_telemetry = false;
2177 
2178 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2179 			goto invalid_right;
2180 		}
2181 		assert(port != IP_NULL);
2182 
2183 		if (ip_is_reply_port(port) &&
2184 		    (copyin_reason != IPC_COPYIN_KMSG_REPLY &&
2185 		    copyin_reason != IPC_COPYIN_KMSG_DESTINATION)) {
2186 			mach_port_guard_exception(name,
2187 			    MPG_PAYLOAD(MPG_FLAGS_INVALID_RIGHT_COPYIN, copyin_reason,
2188 			    msgt_name),
2189 			    kGUARD_EXC_INVALID_RIGHT);
2190 			return KERN_INVALID_CAPABILITY;
2191 		}
2192 
2193 		ip_mq_lock(port);
2194 		require_ip_active(port);
2195 		assert(ip_get_receiver_name(port) == name);
2196 		assert(ip_in_space(port, space));
2197 
2198 		/*
2199 		 * Reply ports can extend one single
2200 		 * send-once right at any given moment.
2201 		 */
2202 #if DEVELOPMENT || DEBUG
2203 		if (ip_is_reply_port(port) && (port->ip_sorights > 0)) {
2204 			send_telemetry = true;
2205 		}
2206 #endif /* DEVELOPMENT || DEBUG */
2207 
2208 		ipc_port_make_sonce_locked(port);
2209 		ip_mq_unlock(port);
2210 
2211 		if (__improbable(send_telemetry)) {
2212 			mach_port_guard_exception(name,
2213 			    MPG_PAYLOAD(MPG_FLAGS_INVALID_RIGHT_COPYIN, copyin_reason,
2214 			    msgt_name),
2215 			    kGUARD_EXC_REPLY_PORT_SINGLE_SO_RIGHT);
2216 		}
2217 
2218 		*portp = port;
2219 		break;
2220 	}
2221 
2222 	case MACH_MSG_TYPE_MOVE_RECEIVE: {
2223 		ipc_port_t request = IP_NULL;
2224 
2225 		if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2226 			goto invalid_right;
2227 		}
2228 		assert(port != IP_NULL);
2229 
2230 		/*
2231 		 * ipc_move_receive_allowed raises the appropritate
2232 		 * Guard exception if needed
2233 		 */
2234 		if (!ipc_move_receive_allowed(space, port, name)) {
2235 			return KERN_INVALID_CAPABILITY;
2236 		}
2237 
2238 		label = ip_mq_lock_label_get(port);
2239 		require_ip_active(port);
2240 		assert(ip_get_receiver_name(port) == name);
2241 		assert(ip_in_space(port, space));
2242 
2243 		if (icrc->icrc_guarded_desc) {
2244 			kr = ipc_right_copyin_check_guard_locked(port, name,
2245 			    icrc->icrc_guarded_desc);
2246 			if (kr != KERN_SUCCESS) {
2247 				ip_mq_unlock_label_put(port, &label);
2248 				return kr;
2249 			}
2250 			/* this flag will be cleared during copyout */
2251 			icrc->icrc_guarded_desc->flags |=
2252 			    MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
2253 		}
2254 
2255 		if (bits & MACH_PORT_TYPE_SEND) {
2256 			assert(IE_BITS_TYPE(bits) ==
2257 			    MACH_PORT_TYPE_SEND_RECEIVE);
2258 			assert(IE_BITS_UREFS(bits) > 0);
2259 			assert(port->ip_srights > 0);
2260 
2261 			bits &= ~MACH_PORT_TYPE_RECEIVE;
2262 			bits |= IE_BITS_EX_RECEIVE;
2263 			entry->ie_bits = bits;
2264 			ipc_hash_insert(space, ip_to_object(port), name, entry);
2265 			ip_reference(port);
2266 			ipc_entry_modified(space, name, entry);
2267 		} else {
2268 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2269 			assert(IE_BITS_UREFS(bits) == 0);
2270 
2271 			request = ipc_right_request_cancel(port, name, entry);
2272 			ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2273 		}
2274 
2275 		/* This will unguard the port and make it movable. */
2276 		ipc_port_mark_in_limbo(port, &label, &icrc->icrc_free_list);
2277 
2278 #if IMPORTANCE_INHERITANCE
2279 		/*
2280 		 * Account for boosts the current task is going to lose when
2281 		 * copying this right in.  Tempowner ports have either not
2282 		 * been accounting to any task (and therefore are already in
2283 		 * "limbo" state w.r.t. assertions) or to some other specific
2284 		 * task. As we have no way to drop the latter task's assertions
2285 		 * here, We'll deduct those when we enqueue it on its
2286 		 * destination port (see ipc_port_check_circularity()).
2287 		 */
2288 		if (port->ip_tempowner == 0) {
2289 			assert(IIT_NULL == ip_get_imp_task(port));
2290 
2291 			/* ports in limbo have to be tempowner */
2292 			port->ip_tempowner = 1;
2293 			icrc->icrc_assert_count = port->ip_impcount;
2294 		}
2295 #endif /* IMPORTANCE_INHERITANCE */
2296 
2297 		/* We already set the label above */
2298 		ip_mq_unlock(port);
2299 
2300 		*portp = port;
2301 		icc->icc_deleted_port = request;
2302 		break;
2303 	}
2304 
2305 	case MACH_MSG_TYPE_COPY_SEND: {
2306 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2307 			goto copy_dead;
2308 		}
2309 
2310 		/* allow for dead send-once rights */
2311 
2312 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2313 			goto invalid_right;
2314 		}
2315 
2316 		assert(IE_BITS_UREFS(bits) > 0);
2317 
2318 		if (ipc_right_check(space, port, name, entry, copyin_reason)) {
2319 			bits = entry->ie_bits;
2320 			icc->icc_release_port = port;
2321 			goto copy_dead;
2322 		}
2323 		/* port is locked and active */
2324 
2325 		if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2326 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2327 			assert(port->ip_sorights > 0);
2328 
2329 			ip_mq_unlock(port);
2330 			goto invalid_right;
2331 		}
2332 
2333 		if (!allow_imm_send && (entry->ie_bits & IE_BITS_IMMOVABLE_SEND)) {
2334 			ip_mq_unlock(port);
2335 			mach_port_guard_exception_immovable(space, name, port, MACH_MSG_TYPE_COPY_SEND, entry);
2336 			return KERN_INVALID_CAPABILITY;
2337 		}
2338 
2339 		ipc_port_copy_send_any_locked(port);
2340 		if (flags & IPC_OBJECT_COPYIN_FLAGS_DEST_EXTRA_COPY) {
2341 			ipc_port_copy_send_any_locked(port);
2342 		}
2343 		ip_mq_unlock(port);
2344 
2345 		*portp = port;
2346 		break;
2347 	}
2348 
2349 	case MACH_MSG_TYPE_MOVE_SEND: {
2350 		ipc_port_t request = IP_NULL;
2351 
2352 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2353 			goto move_dead;
2354 		}
2355 
2356 		/* allow for dead send-once rights */
2357 
2358 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2359 			goto invalid_right;
2360 		}
2361 
2362 		assert(IE_BITS_UREFS(bits) > 0);
2363 		assert(port != IP_NULL);
2364 
2365 		if (ipc_right_check(space, port, name, entry, copyin_reason)) {
2366 			bits = entry->ie_bits;
2367 			icc->icc_release_port = port;
2368 			goto move_dead;
2369 		}
2370 		/* port is locked and active */
2371 
2372 		if ((bits & MACH_PORT_TYPE_SEND) == 0 ||
2373 		    IE_BITS_UREFS(bits) < moves) {
2374 			ip_mq_unlock(port);
2375 			goto invalid_right;
2376 		}
2377 
2378 		if ((bits & IE_BITS_PINNED_SEND) && IE_BITS_UREFS(bits) == moves) {
2379 			ip_mq_unlock(port);
2380 			mach_port_guard_exception_pinned(space, name,
2381 			    MPG_FLAGS_MOD_REFS_PINNED_COPYIN);
2382 			return KERN_INVALID_CAPABILITY;
2383 		}
2384 
2385 		if (!allow_imm_send && (entry->ie_bits & IE_BITS_IMMOVABLE_SEND)) {
2386 			ip_mq_unlock(port);
2387 			mach_port_guard_exception_immovable(space, name, port, MACH_MSG_TYPE_MOVE_SEND, entry);
2388 			return KERN_INVALID_CAPABILITY;
2389 		}
2390 
2391 		if (IE_BITS_UREFS(bits) == moves) {
2392 			assert(port->ip_srights > 0);
2393 
2394 			/*
2395 			 * We have exactly "moves" send rights for this port
2396 			 * in this space, which means that we will liberate the
2397 			 * naked send right held by this entry.
2398 			 *
2399 			 * However refcounting rules around entries are that
2400 			 * naked send rights on behalf of spaces do not have an
2401 			 * associated port reference, so we need to donate one
2402 			 * ...
2403 			 */
2404 			if (bits & MACH_PORT_TYPE_RECEIVE) {
2405 				assert(ip_get_receiver_name(port) == name);
2406 				assert(ip_in_space(port, space));
2407 				assert(IE_BITS_TYPE(bits) ==
2408 				    MACH_PORT_TYPE_SEND_RECEIVE);
2409 
2410 				/*
2411 				 * ... that we inject manually when the entry
2412 				 * stays alive
2413 				 */
2414 				entry->ie_bits = bits & ~
2415 				    (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2416 				ipc_entry_modified(space, name, entry);
2417 				ip_reference(port);
2418 			} else {
2419 				assert(IE_BITS_TYPE(bits) ==
2420 				    MACH_PORT_TYPE_SEND);
2421 
2422 				/* ... that we steal from the entry when it dies */
2423 				request = ipc_right_request_cancel(port, name, entry);
2424 				ipc_hash_delete(space, ip_to_object(port),
2425 				    name, entry);
2426 				ipc_entry_dealloc(space, ip_to_object(port),
2427 				    name, entry);
2428 				/* transfer entry's reference to caller */
2429 			}
2430 		} else {
2431 			ipc_port_copy_send_any_locked(port);
2432 			/* if urefs are pegged due to overflow, leave them pegged */
2433 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2434 				entry->ie_bits = bits - moves; /* decrement urefs */
2435 			}
2436 			ipc_entry_modified(space, name, entry);
2437 		}
2438 
2439 		if (flags & (IPC_OBJECT_COPYIN_FLAGS_DEST_EXTRA_COPY |
2440 		    IPC_OBJECT_COPYIN_FLAGS_DEST_EXTRA_MOVE)) {
2441 			ipc_port_copy_send_any_locked(port);
2442 		}
2443 
2444 		ip_mq_unlock(port);
2445 		*portp = port;
2446 		icc->icc_deleted_port = request;
2447 		break;
2448 	}
2449 
2450 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2451 		ipc_port_t request = IP_NULL;
2452 
2453 		if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2454 			goto move_dead;
2455 		}
2456 
2457 		/* allow for dead send rights */
2458 
2459 		if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2460 			goto invalid_right;
2461 		}
2462 
2463 		assert(IE_BITS_UREFS(bits) > 0);
2464 		assert(port != IP_NULL);
2465 
2466 		if (ipc_right_check(space, port, name, entry, copyin_reason)) {
2467 			bits = entry->ie_bits;
2468 			icc->icc_release_port = port;
2469 			goto move_dead;
2470 		}
2471 		/*
2472 		 * port is locked, but may not be active:
2473 		 * Allow copyin of inactive ports with no dead name request and treat it
2474 		 * as if the copyin of the port was successful and port became inactive
2475 		 * later.
2476 		 */
2477 
2478 		if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2479 			assert(bits & MACH_PORT_TYPE_SEND);
2480 			assert(port->ip_srights > 0);
2481 
2482 			ip_mq_unlock(port);
2483 			goto invalid_right;
2484 		}
2485 
2486 		if (ip_is_reply_port(port) && copyin_reason != IPC_COPYIN_KMSG_DESTINATION) {
2487 			ip_mq_unlock(port);
2488 			mach_port_guard_exception(name,
2489 			    MPG_PAYLOAD(MPG_FLAGS_INVALID_RIGHT_COPYIN, copyin_reason, msgt_name),
2490 			    kGUARD_EXC_INVALID_RIGHT);
2491 			return KERN_INVALID_CAPABILITY;
2492 		}
2493 
2494 		if (!allow_imm_send && (entry->ie_bits & IE_BITS_IMMOVABLE_SEND)) {
2495 			ip_mq_unlock(port);
2496 			mach_port_guard_exception_immovable(space, name, port, MACH_MSG_TYPE_MOVE_SEND_ONCE, entry);
2497 			return KERN_INVALID_CAPABILITY;
2498 		}
2499 
2500 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2501 		assert(IE_BITS_UREFS(bits) == 1);
2502 		assert(port->ip_sorights > 0);
2503 
2504 		request = ipc_right_request_cancel(port, name, entry);
2505 		ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2506 		ip_mq_unlock(port);
2507 
2508 		*portp = port;
2509 		icc->icc_deleted_port = request;
2510 		break;
2511 	}
2512 
2513 	default:
2514 invalid_right:
2515 		return KERN_INVALID_RIGHT;
2516 	}
2517 
2518 	return KERN_SUCCESS;
2519 
2520 copy_dead:
2521 	assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2522 	assert(IE_BITS_UREFS(bits) > 0);
2523 	assert(entry->ie_request == IE_REQ_NONE);
2524 	assert(entry->ie_object == 0);
2525 
2526 	if (!deadok) {
2527 		goto invalid_right;
2528 	}
2529 
2530 	*portp = IP_DEAD;
2531 	return KERN_SUCCESS;
2532 
2533 move_dead:
2534 	assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2535 	assert(IE_BITS_UREFS(bits) > 0);
2536 	assert(entry->ie_request == IE_REQ_NONE);
2537 	assert(entry->ie_object == IPC_OBJECT_NULL);
2538 
2539 	if (!deadok || IE_BITS_UREFS(bits) < moves) {
2540 		goto invalid_right;
2541 	}
2542 
2543 	if (IE_BITS_UREFS(bits) == moves) {
2544 		ipc_entry_dealloc(space, IPC_OBJECT_NULL, name, entry);
2545 	} else {
2546 		/* if urefs are pegged due to overflow, leave them pegged */
2547 		if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2548 			entry->ie_bits = bits - moves; /* decrement urefs */
2549 		}
2550 		ipc_entry_modified(space, name, entry);
2551 	}
2552 	*portp = IP_DEAD;
2553 	return KERN_SUCCESS;
2554 }
2555 
2556 /*
2557  *	Routine:	ipc_right_copyout_any_send
2558  *	Purpose:
2559  *		Copyout a capability to a space.
2560  *		If successful, consumes a ref for the port.
2561  *
2562  *		Always succeeds when given a newly-allocated entry,
2563  *		because user-reference overflow isn't a possibility.
2564  *
2565  *		If copying out the port would cause the user-reference
2566  *		count in the entry to overflow, then the user-reference
2567  *		count is left pegged to its maximum value and the copyout
2568  *		succeeds anyway.
2569  *	Conditions:
2570  *		The space is write-locked and active.
2571  *		The port is locked and active.
2572  *		The port is unlocked; the space isn't.
2573  */
2574 
2575 void
ipc_right_copyout_any_send(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_port_name_t name,ipc_entry_t entry)2576 ipc_right_copyout_any_send(
2577 	ipc_space_t             space,
2578 	ipc_port_t              port,
2579 	mach_msg_type_name_t    msgt_name,
2580 	ipc_object_copyout_flags_t flags,
2581 	mach_port_name_t        name,
2582 	ipc_entry_t             entry)
2583 {
2584 	ipc_entry_bits_t bits = entry->ie_bits;
2585 
2586 	assert(IP_VALID(port));
2587 	assert(ip_active(port));
2588 	assert(entry->ie_port == port);
2589 	ipc_object_label_t label = ip_label_get(port);
2590 
2591 	if (ipc_should_mark_immovable_send(space->is_task, port, label)) {
2592 		bits |= IE_BITS_IMMOVABLE_SEND;
2593 	}
2594 	ip_label_put(port, &label);
2595 
2596 	switch (msgt_name) {
2597 	case MACH_MSG_TYPE_PORT_SEND_ONCE:
2598 
2599 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2600 		assert(IE_BITS_UREFS(bits) == 0);
2601 		assert(port->ip_sorights > 0);
2602 
2603 		if (ip_is_special_reply_port(port)) {
2604 			ipc_port_adjust_special_reply_port_locked(port,
2605 			    current_thread()->ith_knote, IPC_PORT_ADJUST_SR_LINK_WORKLOOP, FALSE);
2606 			/* port unlocked on return */
2607 		} else {
2608 			ip_mq_unlock(port);
2609 		}
2610 
2611 		entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1); /* set urefs to 1 */
2612 		ipc_entry_modified(space, name, entry);
2613 		break;
2614 
2615 	case MACH_MSG_TYPE_PORT_SEND:
2616 		assert(port->ip_srights > 0);
2617 
2618 		if (bits & MACH_PORT_TYPE_SEND) {
2619 			__assert_only mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
2620 
2621 			assert(port->ip_srights > 1);
2622 			assert(urefs > 0);
2623 			assert(urefs <= MACH_PORT_UREFS_MAX);
2624 
2625 			/* consume send right and ref */
2626 			ip_srights_dec(port);
2627 			ip_mq_unlock(port);
2628 			ip_release_live(port);
2629 		} else if (bits & MACH_PORT_TYPE_RECEIVE) {
2630 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2631 			assert(IE_BITS_UREFS(bits) == 0);
2632 
2633 			/* transfer send right to entry, consume ref */
2634 			ip_mq_unlock(port);
2635 			ip_release_live(port);
2636 		} else {
2637 			assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2638 			assert(IE_BITS_UREFS(bits) == 0);
2639 
2640 			/* transfer send right and ref to entry */
2641 			ip_mq_unlock(port);
2642 
2643 			/* entry is locked holding ref, so can use port */
2644 
2645 			ipc_hash_insert(space, ip_to_object(port), name, entry);
2646 		}
2647 
2648 		if (flags & IPC_OBJECT_COPYOUT_FLAGS_PINNED) {
2649 			bits |= IE_BITS_PINNED_SEND;
2650 		}
2651 		if (IE_BITS_UREFS(bits) != MACH_PORT_UREFS_MAX) {
2652 			bits = (bits | MACH_PORT_TYPE_SEND) + 1; /* increment urefs */
2653 		}
2654 		entry->ie_bits = bits;
2655 		ipc_entry_modified(space, name, entry);
2656 		break;
2657 
2658 	default:
2659 		ipc_unreachable("ipc_right_copyout_any_send: strange rights");
2660 	}
2661 }
2662 
2663 void
ipc_right_copyout_recv_and_unlock_space(ipc_space_t space,ipc_port_t port,ipc_object_label_t * label,mach_port_name_t name,ipc_entry_t entry,mach_msg_guarded_port_descriptor_t * gdesc)2664 ipc_right_copyout_recv_and_unlock_space(
2665 	ipc_space_t             space,
2666 	ipc_port_t              port,
2667 	ipc_object_label_t     *label,
2668 	mach_port_name_t        name,
2669 	ipc_entry_t             entry,
2670 	mach_msg_guarded_port_descriptor_t *gdesc)
2671 {
2672 	struct knote *kn;
2673 	ipc_port_t dest;
2674 #if IMPORTANCE_INHERITANCE
2675 	natural_t assertcnt = port->ip_impcount;
2676 #endif /* IMPORTANCE_INHERITANCE */
2677 	ipc_object_state_t in_space = IO_STATE_INACTIVE; /* means default */
2678 	ipc_entry_bits_t bits = entry->ie_bits;
2679 
2680 	assert(IP_VALID(port));
2681 	assert(ip_active(port));
2682 	assert(entry->ie_port == port);
2683 	assert(port->ip_mscount == 0);
2684 
2685 	kn = current_thread()->ith_knote;
2686 
2687 	if (gdesc && gdesc->flags & MACH_MSG_GUARD_FLAGS_IMMOVABLE_RECEIVE) {
2688 		port->ip_guarded = 1;
2689 		port->ip_strict_guard = 0;
2690 		port->ip_context = current_thread()->ith_recv_bufs.recv_msg_addr;
2691 		gdesc->u_context = port->ip_context;
2692 		gdesc->flags &= ~MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
2693 		in_space = IO_STATE_IN_SPACE_IMMOVABLE;
2694 	}
2695 
2696 	if (ip_is_any_service_port_type(label->io_type) &&
2697 	    label->io_state == IO_STATE_IN_TRANSIT_PD) {
2698 		ipc_service_port_label_t sp_label = label->iol_service;
2699 
2700 		/*
2701 		 * Check if this is a special port-destroyed
2702 		 * notification to ensure that initproc doesnt end up
2703 		 * with a guarded service port sent in a regular message
2704 		 */
2705 
2706 #if !(DEVELOPMENT || DEBUG)
2707 #if CONFIG_COALITIONS
2708 		if (!task_is_in_privileged_coalition(current_task(), COALITION_TYPE_JETSAM)) {
2709 			panic("Service port not sent back to launchd");
2710 		}
2711 #else /* CONFIG_COALITIONS */
2712 		if (!task_is_initproc(current_task())) {
2713 			panic("Service port not sent back to launchd");
2714 		}
2715 #endif /* CONFIG_COALITIONS */
2716 #endif /* !(DEVELOPMENT || DEBUG) */
2717 
2718 		/*
2719 		 * If the service port was guarded, verify the port name
2720 		 * and restore the guard value.
2721 		 *
2722 		 * See mach_port_construct().
2723 		 */
2724 		if (sp_label->ispl_launchd_name) {
2725 			if (name != sp_label->ispl_launchd_name) {
2726 				panic("Service port name = 0x%x doesnt match "
2727 				    "the stored launchd port name = 0x%x",
2728 				    name, sp_label->ispl_launchd_name);
2729 			}
2730 
2731 			port->ip_guarded = 1;
2732 			port->ip_strict_guard = 1;
2733 			port->ip_context = sp_label->ispl_launchd_context;
2734 		}
2735 	}
2736 
2737 	/*
2738 	 * pseudo receive shouldn't set the receive right
2739 	 * as immovable in the sender's space, it clearly moved once.
2740 	 */
2741 	dest = ipc_port_mark_in_space(port, label, space, name,
2742 	    (kn == ITH_KNOTE_PSEUDO) ? IO_STATE_IN_SPACE : in_space);
2743 
2744 	assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
2745 	if (bits & MACH_PORT_TYPE_SEND) {
2746 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
2747 		assert(IE_BITS_UREFS(bits) > 0);
2748 		assert(port->ip_srights > 0);
2749 	} else {
2750 		assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2751 		assert(IE_BITS_UREFS(bits) == 0);
2752 	}
2753 	entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE;
2754 	ipc_entry_modified(space, name, entry);
2755 
2756 	boolean_t sync_bootstrap_checkin = FALSE;
2757 	if (kn != ITH_KNOTE_PSEUDO && port->ip_sync_bootstrap_checkin) {
2758 		sync_bootstrap_checkin = TRUE;
2759 	}
2760 	if (!ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_RECEIVE)) {
2761 		kn = NULL;
2762 	}
2763 	ipc_port_adjust_port_locked(port, kn, sync_bootstrap_checkin);
2764 	/* port unlocked */
2765 
2766 	if (bits & MACH_PORT_TYPE_SEND) {
2767 		ip_release_live(port);
2768 
2769 		/* entry is locked holding ref, so can use port */
2770 		ipc_hash_delete(space, ip_to_object(port), name, entry);
2771 	}
2772 
2773 	is_write_unlock(space);
2774 
2775 	if (dest != IP_NULL) {
2776 #if IMPORTANCE_INHERITANCE
2777 		/*
2778 		 * Deduct the assertion counts we contributed to
2779 		 * the old destination port.  They've already
2780 		 * been reflected into the task as a result of
2781 		 * getting enqueued.
2782 		 */
2783 		ip_mq_lock(dest);
2784 		ipc_port_impcount_delta(dest, 0 - assertcnt, IP_NULL);
2785 		ip_mq_unlock(dest);
2786 #endif /* IMPORTANCE_INHERITANCE */
2787 
2788 		/* Drop turnstile ref on dest */
2789 		ipc_port_send_turnstile_complete(dest);
2790 		ip_release(dest);
2791 	}
2792 }
2793