xref: /xnu-8019.80.24/osfmk/ipc/ipc_object.c (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  * Copyright (c) 2005-2006 SPARTA, Inc.
62  */
63 /*
64  */
65 /*
66  *	File:	ipc/ipc_object.c
67  *	Author:	Rich Draves
68  *	Date:	1989
69  *
70  *	Functions to manipulate IPC objects.
71  */
72 
73 #include <mach/mach_types.h>
74 #include <mach/boolean.h>
75 #include <mach/kern_return.h>
76 #include <mach/port.h>
77 #include <mach/message.h>
78 
79 #include <kern/kern_types.h>
80 #include <kern/misc_protos.h>
81 #include <kern/ipc_kobject.h>
82 #include <kern/zalloc_internal.h> // zone_id_for_native_element
83 
84 #include <ipc/ipc_types.h>
85 #include <ipc/ipc_importance.h>
86 #include <ipc/port.h>
87 #include <ipc/ipc_space.h>
88 #include <ipc/ipc_entry.h>
89 #include <ipc/ipc_object.h>
90 #include <ipc/ipc_hash.h>
91 #include <ipc/ipc_right.h>
92 #include <ipc/ipc_notify.h>
93 #include <ipc/ipc_port.h>
94 #include <ipc/ipc_pset.h>
95 
96 #include <security/mac_mach_internal.h>
97 
98 static struct mpsc_daemon_queue ipc_object_deallocate_queue;
99 SECURITY_READ_ONLY_LATE(zone_t) ipc_object_zones[IOT_NUMBER];
100 
101 /*
102  * In order to do lockfree lookups in the IPC space, we combine two schemes:
103  *
104  * - the ipc table pointer is protected with hazard pointers to allow
105  *   dereferencing it with only holding a ref on a task or space;
106  *
107  * - we use ipc_object_lock_allow_invalid in order to lock locks and validate
108  *   that they are the droid we're looking for.
109  *
110  * The second half requires that virtual addresses assigned that ever held
111  * a port, either hold a port, or nothing, forever. To get this property,
112  * we just piggy back on the zone sequestering security feature which gives
113  * us exactly that.
114  *
115  * However, sequestering really only "works" on a sufficiently large address
116  * space, especially for a resource that can be made by userspace at will,
117  * so we can't do lockless lookups on ILP32.
118  *
119  * Note: this scheme is incompatible with gzalloc (because it doesn't sequester)
120  *       and kasan quarantines (because it uses elements to store backtraces
121  *       in them which lets the waitq lock appear "valid" by accident when
122  *       elements are freed).
123  */
124 #if MACH_LOCKFREE_SPACE
125 #define IPC_OBJECT_ZC_BASE (ZC_ZFREE_CLEARMEM | ZC_SEQUESTER | \
126 	ZC_NOGZALLOC | ZC_KASAN_NOQUARANTINE)
127 #else
128 #define IPC_OBJECT_ZC_BASE (ZC_ZFREE_CLEARMEM)
129 #endif
130 
131 ZONE_INIT(&ipc_object_zones[IOT_PORT],
132     "ipc ports", sizeof(struct ipc_port),
133     IPC_OBJECT_ZC_BASE | ZC_CACHING, ZONE_ID_IPC_PORT, NULL);
134 
135 ZONE_INIT(&ipc_object_zones[IOT_PORT_SET],
136     "ipc port sets", sizeof(struct ipc_pset),
137     IPC_OBJECT_ZC_BASE, ZONE_ID_IPC_PORT_SET, NULL);
138 
139 __attribute__((noinline))
140 static void
ipc_object_free(unsigned int otype,ipc_object_t object,bool last_ref)141 ipc_object_free(unsigned int otype, ipc_object_t object, bool last_ref)
142 {
143 	if (last_ref && otype == IOT_PORT) {
144 		ipc_port_finalize(ip_object_to_port(object));
145 	}
146 	zfree(ipc_object_zones[otype], object);
147 }
148 
149 __attribute__((noinline))
150 static void
ipc_object_free_safe(ipc_object_t object)151 ipc_object_free_safe(ipc_object_t object)
152 {
153 	struct waitq *wq = io_waitq(object);
154 
155 	assert(!waitq_is_valid(wq));
156 	assert(wq->waitq_tspriv == NULL);
157 	assert(sizeof(wq->waitq_tspriv) == sizeof(struct mpsc_queue_chain));
158 	mpsc_daemon_enqueue(&ipc_object_deallocate_queue,
159 	    (mpsc_queue_chain_t)&wq->waitq_tspriv, MPSC_QUEUE_NONE);
160 }
161 
162 static void
ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)163 ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,
164     __assert_only mpsc_daemon_queue_t dq)
165 {
166 	struct waitq *wq;
167 	ipc_object_t io;
168 
169 	assert(dq == &ipc_object_deallocate_queue);
170 
171 	wq = __container_of((void **)e, struct waitq, waitq_tspriv);
172 	io = io_from_waitq(wq);
173 	ipc_object_free(io_otype(io), io, true);
174 }
175 
176 void
ipc_object_deallocate_register_queue(void)177 ipc_object_deallocate_register_queue(void)
178 {
179 	thread_deallocate_daemon_register_queue(&ipc_object_deallocate_queue,
180 	    ipc_object_deallocate_queue_invoke);
181 }
182 
183 /*
184  *	Routine:	ipc_object_reference
185  *	Purpose:
186  *		Take a reference to an object.
187  */
188 
189 void
ipc_object_reference(ipc_object_t io)190 ipc_object_reference(
191 	ipc_object_t    io)
192 {
193 	static_assert(sizeof(os_ref_atomic_t) == sizeof(io->io_references));
194 	os_ref_retain_raw((os_ref_atomic_t *)&io->io_references, NULL);
195 }
196 
197 /*
198  *	Routine:	ipc_object_release
199  *	Purpose:
200  *		Release a reference to an object.
201  */
202 
203 void
ipc_object_release(ipc_object_t io)204 ipc_object_release(
205 	ipc_object_t    io)
206 {
207 #if DEBUG
208 	assert(get_preemption_level() == 0);
209 #endif
210 
211 	if (os_ref_release_raw((os_ref_atomic_t *)&io->io_references, NULL) == 0) {
212 		/* Free the object */
213 		ipc_object_free(io_otype(io), io, true);
214 	}
215 }
216 
217 /*
218  *	Routine:	ipc_object_release_safe
219  *	Purpose:
220  *		Release a reference to an object safely
221  */
222 
223 void
ipc_object_release_safe(ipc_object_t io)224 ipc_object_release_safe(
225 	ipc_object_t    io)
226 {
227 	if (os_ref_release_raw((os_ref_atomic_t *)&io->io_references, NULL) == 0) {
228 		if (get_preemption_level() == 0) {
229 			ipc_object_free(io_otype(io), io, true);
230 		} else {
231 			ipc_object_free_safe(io);
232 		}
233 	}
234 }
235 
236 /*
237  *	Routine:	ipc_object_release_live
238  *	Purpose:
239  *		Release a reference to an object that isn't the last one.
240  */
241 
242 void
ipc_object_release_live(ipc_object_t io)243 ipc_object_release_live(
244 	ipc_object_t    io)
245 {
246 	os_ref_release_live_raw((os_ref_atomic_t *)&io->io_references, NULL);
247 }
248 
249 /*
250  *	Routine:	ipc_object_translate
251  *	Purpose:
252  *		Look up an object in a space.
253  *	Conditions:
254  *		Nothing locked before.  If successful, the object
255  *		is returned active and locked.  The caller doesn't get a ref.
256  *	Returns:
257  *		KERN_SUCCESS		Object returned locked.
258  *		KERN_INVALID_TASK	The space is dead.
259  *		KERN_INVALID_NAME	The name doesn't denote a right
260  *		KERN_INVALID_RIGHT	Name doesn't denote the correct right
261  */
262 kern_return_t
ipc_object_translate(ipc_space_t space,mach_port_name_t name,mach_port_right_t right,ipc_object_t * objectp)263 ipc_object_translate(
264 	ipc_space_t             space,
265 	mach_port_name_t        name,
266 	mach_port_right_t       right,
267 	ipc_object_t            *objectp)
268 {
269 	ipc_entry_bits_t bits;
270 	ipc_object_t object;
271 	kern_return_t kr;
272 
273 	if (!MACH_PORT_RIGHT_VALID_TRANSLATE(right)) {
274 		return KERN_INVALID_RIGHT;
275 	}
276 
277 	kr = ipc_right_lookup_read(space, name, &bits, &object);
278 	if (kr != KERN_SUCCESS) {
279 		return kr;
280 	}
281 	/* object is locked and active */
282 
283 	if ((bits & MACH_PORT_TYPE(right)) == MACH_PORT_TYPE_NONE) {
284 		io_unlock(object);
285 		return KERN_INVALID_RIGHT;
286 	}
287 
288 	*objectp = object;
289 	return KERN_SUCCESS;
290 }
291 
292 /*
293  *	Routine:	ipc_object_translate_two
294  *	Purpose:
295  *		Look up two objects in a space.
296  *	Conditions:
297  *		Nothing locked before.  If successful, the objects
298  *		are returned locked.  The caller doesn't get a ref.
299  *	Returns:
300  *		KERN_SUCCESS		Objects returned locked.
301  *		KERN_INVALID_TASK	The space is dead.
302  *		KERN_INVALID_NAME	A name doesn't denote a right.
303  *		KERN_INVALID_RIGHT	A name doesn't denote the correct right.
304  */
305 
306 kern_return_t
ipc_object_translate_two(ipc_space_t space,mach_port_name_t name1,mach_port_right_t right1,ipc_object_t * objectp1,mach_port_name_t name2,mach_port_right_t right2,ipc_object_t * objectp2)307 ipc_object_translate_two(
308 	ipc_space_t             space,
309 	mach_port_name_t        name1,
310 	mach_port_right_t       right1,
311 	ipc_object_t            *objectp1,
312 	mach_port_name_t        name2,
313 	mach_port_right_t       right2,
314 	ipc_object_t            *objectp2)
315 {
316 	ipc_entry_t entry1;
317 	ipc_entry_t entry2;
318 	ipc_object_t object1, object2;
319 	kern_return_t kr;
320 	boolean_t doguard = TRUE;
321 
322 	kr = ipc_right_lookup_two_read(space, name1, &entry1, name2, &entry2);
323 	if (kr != KERN_SUCCESS) {
324 		return kr;
325 	}
326 	/* space is read-locked and active */
327 
328 	if ((entry1->ie_bits & MACH_PORT_TYPE(right1)) == MACH_PORT_TYPE_NONE) {
329 		/* If looking for receive, and the entry used to hold one, give a pass on EXC_GUARD */
330 		if ((right1 & MACH_PORT_RIGHT_RECEIVE) == MACH_PORT_RIGHT_RECEIVE &&
331 		    (entry1->ie_bits & MACH_PORT_TYPE_EX_RECEIVE) == MACH_PORT_TYPE_EX_RECEIVE) {
332 			doguard = FALSE;
333 		}
334 		is_read_unlock(space);
335 		if (doguard) {
336 			mach_port_guard_exception(name1, 0, 0, kGUARD_EXC_INVALID_RIGHT);
337 		}
338 		return KERN_INVALID_RIGHT;
339 	}
340 
341 	if ((entry2->ie_bits & MACH_PORT_TYPE(right2)) == MACH_PORT_TYPE_NONE) {
342 		/* If looking for receive, and the entry used to hold one, give a pass on EXC_GUARD */
343 		if ((right2 & MACH_PORT_RIGHT_RECEIVE) == MACH_PORT_RIGHT_RECEIVE &&
344 		    (entry2->ie_bits & MACH_PORT_TYPE_EX_RECEIVE) == MACH_PORT_TYPE_EX_RECEIVE) {
345 			doguard = FALSE;
346 		}
347 		is_read_unlock(space);
348 		if (doguard) {
349 			mach_port_guard_exception(name2, 0, 0, kGUARD_EXC_INVALID_RIGHT);
350 		}
351 		return KERN_INVALID_RIGHT;
352 	}
353 
354 	object1 = entry1->ie_object;
355 	assert(object1 != IO_NULL);
356 	io_lock(object1);
357 	if (!io_active(object1)) {
358 		io_unlock(object1);
359 		is_read_unlock(space);
360 		return KERN_INVALID_NAME;
361 	}
362 
363 	object2 = entry2->ie_object;
364 	assert(object2 != IO_NULL);
365 	io_lock(object2);
366 	if (!io_active(object2)) {
367 		io_unlock(object1);
368 		io_unlock(object2);
369 		is_read_unlock(space);
370 		return KERN_INVALID_NAME;
371 	}
372 
373 	*objectp1 = object1;
374 	*objectp2 = object2;
375 
376 	is_read_unlock(space);
377 	return KERN_SUCCESS;
378 }
379 
380 /*
381  *	Routine:	ipc_object_alloc_dead
382  *	Purpose:
383  *		Allocate a dead-name entry.
384  *	Conditions:
385  *		Nothing locked.
386  *	Returns:
387  *		KERN_SUCCESS		The dead name is allocated.
388  *		KERN_INVALID_TASK	The space is dead.
389  *		KERN_NO_SPACE		No room for an entry in the space.
390  */
391 
392 kern_return_t
ipc_object_alloc_dead(ipc_space_t space,mach_port_name_t * namep)393 ipc_object_alloc_dead(
394 	ipc_space_t             space,
395 	mach_port_name_t        *namep)
396 {
397 	ipc_entry_t entry;
398 	kern_return_t kr;
399 
400 	kr = ipc_entry_alloc(space, IO_NULL, namep, &entry);
401 	if (kr != KERN_SUCCESS) {
402 		return kr;
403 	}
404 	/* space is write-locked */
405 
406 	/* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
407 
408 	entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1;
409 	ipc_entry_modified(space, *namep, entry);
410 	is_write_unlock(space);
411 	return KERN_SUCCESS;
412 }
413 
414 /*
415  *	Routine:	ipc_object_alloc_dead_name
416  *	Purpose:
417  *		Allocate a dead-name entry, with a specific name.
418  *	Conditions:
419  *		Nothing locked.
420  *	Returns:
421  *		KERN_SUCCESS		The dead name is allocated.
422  *		KERN_INVALID_TASK	The space is dead.
423  *		KERN_NAME_EXISTS	The name already denotes a right.
424  */
425 
426 kern_return_t
ipc_object_alloc_dead_name(ipc_space_t space,mach_port_name_t name)427 ipc_object_alloc_dead_name(
428 	ipc_space_t             space,
429 	mach_port_name_t        name)
430 {
431 	ipc_entry_t entry;
432 	kern_return_t kr;
433 
434 	kr = ipc_entry_alloc_name(space, name, &entry);
435 	if (kr != KERN_SUCCESS) {
436 		return kr;
437 	}
438 	/* space is write-locked */
439 
440 	if (ipc_right_inuse(entry)) {
441 		is_write_unlock(space);
442 		return KERN_NAME_EXISTS;
443 	}
444 
445 	/* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
446 
447 	assert(entry->ie_object == IO_NULL);
448 	entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1;
449 	ipc_entry_modified(space, name, entry);
450 	is_write_unlock(space);
451 	return KERN_SUCCESS;
452 }
453 
454 /*
455  *	Routine:	ipc_object_alloc
456  *	Purpose:
457  *		Allocate an object.
458  *	Conditions:
459  *		Nothing locked.
460  *		The space is write locked on successful return.
461  *		The caller doesn't get a reference for the object.
462  *	Returns:
463  *		KERN_SUCCESS		The object is allocated.
464  *		KERN_INVALID_TASK	The space is dead.
465  *		KERN_NO_SPACE		No room for an entry in the space.
466  */
467 
468 kern_return_t
ipc_object_alloc(ipc_space_t space,ipc_object_type_t otype,mach_port_type_t type,mach_port_urefs_t urefs,mach_port_name_t * namep,ipc_object_t * objectp)469 ipc_object_alloc(
470 	ipc_space_t             space,
471 	ipc_object_type_t       otype,
472 	mach_port_type_t        type,
473 	mach_port_urefs_t       urefs,
474 	mach_port_name_t        *namep,
475 	ipc_object_t            *objectp)
476 {
477 	ipc_object_t object;
478 	ipc_entry_t entry;
479 	kern_return_t kr;
480 
481 	assert(otype < IOT_NUMBER);
482 	assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
483 	assert(type != MACH_PORT_TYPE_NONE);
484 	assert(urefs <= MACH_PORT_UREFS_MAX);
485 
486 	object = io_alloc(otype, Z_WAITOK | Z_ZERO | Z_NOFAIL);
487 	os_atomic_init(&object->io_bits, io_makebits(TRUE, otype, 0));
488 	os_atomic_init(&object->io_references, 1); /* for entry, not caller */
489 
490 	*namep = CAST_MACH_PORT_TO_NAME(object);
491 	kr = ipc_entry_alloc(space, object, namep, &entry);
492 	if (kr != KERN_SUCCESS) {
493 		ipc_object_free(otype, object, false);
494 		return kr;
495 	}
496 	/* space is write-locked */
497 
498 	entry->ie_bits |= type | urefs;
499 	ipc_entry_modified(space, *namep, entry);
500 
501 	*objectp = object;
502 	return KERN_SUCCESS;
503 }
504 
505 /*
506  *	Routine:	ipc_object_alloc_name
507  *	Purpose:
508  *		Allocate an object, with a specific name.
509  *	Conditions:
510  *		Nothing locked.  If successful, the object is returned locked.
511  *		The caller doesn't get a reference for the object.
512  *
513  *		finish_init() must call an ipc_*_init function
514  *		that will return the object locked (using IPC_PORT_INIT_LOCKED,
515  *		or SYNC_POLICY_INIT_LOCKED, or equivalent).
516  *
517  *	Returns:
518  *		KERN_SUCCESS		The object is allocated.
519  *		KERN_INVALID_TASK	The space is dead.
520  *		KERN_NAME_EXISTS	The name already denotes a right.
521  */
522 
523 kern_return_t
524 ipc_object_alloc_name(
525 	ipc_space_t             space,
526 	ipc_object_type_t       otype,
527 	mach_port_type_t        type,
528 	mach_port_urefs_t       urefs,
529 	mach_port_name_t        name,
530 	ipc_object_t            *objectp,
531 	void                    (^finish_init)(ipc_object_t))
532 {
533 	ipc_object_t object;
534 	ipc_entry_t entry;
535 	kern_return_t kr;
536 
537 	assert(otype < IOT_NUMBER);
538 	assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
539 	assert(type != MACH_PORT_TYPE_NONE);
540 	assert(urefs <= MACH_PORT_UREFS_MAX);
541 
542 	object = io_alloc(otype, Z_WAITOK | Z_ZERO | Z_NOFAIL);
543 	os_atomic_init(&object->io_bits, io_makebits(TRUE, otype, 0));
544 	os_atomic_init(&object->io_references, 1); /* for entry, not caller */
545 
546 	kr = ipc_entry_alloc_name(space, name, &entry);
547 	if (kr != KERN_SUCCESS) {
548 		ipc_object_free(otype, object, false);
549 		return kr;
550 	}
551 	/* space is write-locked */
552 
553 	if (ipc_right_inuse(entry)) {
554 		is_write_unlock(space);
555 		ipc_object_free(otype, object, false);
556 		return KERN_NAME_EXISTS;
557 	}
558 
559 	entry->ie_bits |= type | urefs;
560 	entry->ie_object = object;
561 
562 	finish_init(object);
563 	/* object is locked */
564 	io_lock_held(object);
565 
566 	ipc_entry_modified(space, name, entry);
567 	is_write_unlock(space);
568 
569 	*objectp = object;
570 	return KERN_SUCCESS;
571 }
572 
573 /*	Routine:	ipc_object_validate
574  *	Purpose:
575  *		Validates an ipc port or port set as belonging to the correct
576  *		zone.
577  */
578 
579 void
ipc_object_validate(ipc_object_t object)580 ipc_object_validate(
581 	ipc_object_t    object)
582 {
583 	if (io_otype(object) != IOT_PORT_SET) {
584 		zone_id_require(ZONE_ID_IPC_PORT,
585 		    sizeof(struct ipc_port), object);
586 	} else {
587 		zone_id_require(ZONE_ID_IPC_PORT_SET,
588 		    sizeof(struct ipc_pset), object);
589 	}
590 }
591 
592 /*
593  *	Routine:	ipc_object_copyin_type
594  *	Purpose:
595  *		Convert a send type name to a received type name.
596  */
597 
598 mach_msg_type_name_t
ipc_object_copyin_type(mach_msg_type_name_t msgt_name)599 ipc_object_copyin_type(
600 	mach_msg_type_name_t    msgt_name)
601 {
602 	switch (msgt_name) {
603 	case MACH_MSG_TYPE_MOVE_RECEIVE:
604 		return MACH_MSG_TYPE_PORT_RECEIVE;
605 
606 	case MACH_MSG_TYPE_MOVE_SEND_ONCE:
607 	case MACH_MSG_TYPE_MAKE_SEND_ONCE:
608 		return MACH_MSG_TYPE_PORT_SEND_ONCE;
609 
610 	case MACH_MSG_TYPE_MOVE_SEND:
611 	case MACH_MSG_TYPE_MAKE_SEND:
612 	case MACH_MSG_TYPE_COPY_SEND:
613 		return MACH_MSG_TYPE_PORT_SEND;
614 
615 	case MACH_MSG_TYPE_DISPOSE_RECEIVE:
616 	case MACH_MSG_TYPE_DISPOSE_SEND:
617 	case MACH_MSG_TYPE_DISPOSE_SEND_ONCE:
618 	/* fall thru */
619 	default:
620 		return MACH_MSG_TYPE_PORT_NONE;
621 	}
622 }
623 
624 /*
625  *	Routine:	ipc_object_copyin
626  *	Purpose:
627  *		Copyin a capability from a space.
628  *		If successful, the caller gets a ref
629  *		for the resulting object, unless it is IO_DEAD.
630  *	Conditions:
631  *		Nothing locked.
632  *	Returns:
633  *		KERN_SUCCESS		Acquired an object, possibly IO_DEAD.
634  *		KERN_INVALID_TASK	The space is dead.
635  *		KERN_INVALID_NAME	Name doesn't exist in space.
636  *		KERN_INVALID_RIGHT	Name doesn't denote correct right.
637  */
638 
639 kern_return_t
ipc_object_copyin(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name,ipc_object_t * objectp,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags,ipc_object_copyin_flags_t copyin_flags)640 ipc_object_copyin(
641 	ipc_space_t                space,
642 	mach_port_name_t           name,
643 	mach_msg_type_name_t       msgt_name,
644 	ipc_object_t               *objectp,
645 	mach_port_context_t        context,
646 	mach_msg_guard_flags_t     *guard_flags,
647 	ipc_object_copyin_flags_t  copyin_flags)
648 {
649 	ipc_entry_t entry;
650 	ipc_port_t soright;
651 	ipc_port_t release_port;
652 	kern_return_t kr;
653 	int assertcnt = 0;
654 
655 	ipc_object_copyin_flags_t irc_flags = IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND;
656 	irc_flags = (copyin_flags & irc_flags) | IPC_OBJECT_COPYIN_FLAGS_DEADOK;
657 	/*
658 	 *	Could first try a read lock when doing
659 	 *	MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND,
660 	 *	and MACH_MSG_TYPE_MAKE_SEND_ONCE.
661 	 */
662 
663 	kr = ipc_right_lookup_write(space, name, &entry);
664 	if (kr != KERN_SUCCESS) {
665 		return kr;
666 	}
667 	/* space is write-locked and active */
668 
669 	release_port = IP_NULL;
670 	kr = ipc_right_copyin(space, name, entry,
671 	    msgt_name, irc_flags,
672 	    objectp, &soright,
673 	    &release_port,
674 	    &assertcnt,
675 	    context,
676 	    guard_flags);
677 	is_write_unlock(space);
678 
679 #if IMPORTANCE_INHERITANCE
680 	if (0 < assertcnt && ipc_importance_task_is_any_receiver_type(current_task()->task_imp_base)) {
681 		ipc_importance_task_drop_internal_assertion(current_task()->task_imp_base, assertcnt);
682 	}
683 #endif /* IMPORTANCE_INHERITANCE */
684 
685 	if (release_port != IP_NULL) {
686 		ip_release(release_port);
687 	}
688 
689 	if ((kr == KERN_SUCCESS) && (soright != IP_NULL)) {
690 		ipc_notify_port_deleted(soright, name);
691 	}
692 
693 	return kr;
694 }
695 
696 /*
697  *	Routine:	ipc_object_copyin_from_kernel
698  *	Purpose:
699  *		Copyin a naked capability from the kernel.
700  *
701  *		MACH_MSG_TYPE_MOVE_RECEIVE
702  *			The receiver must be ipc_space_kernel
703  *			or the receive right must already be in limbo.
704  *			Consumes the naked receive right.
705  *		MACH_MSG_TYPE_COPY_SEND
706  *			A naked send right must be supplied.
707  *			The port gains a reference, and a send right
708  *			if the port is still active.
709  *		MACH_MSG_TYPE_MAKE_SEND
710  *			The receiver must be ipc_space_kernel.
711  *			The port gains a reference and a send right.
712  *		MACH_MSG_TYPE_MOVE_SEND
713  *			Consumes a naked send right.
714  *		MACH_MSG_TYPE_MAKE_SEND_ONCE
715  *			The port gains a reference and a send-once right.
716  *			Receiver also be the caller of device subsystem,
717  *			so no assertion.
718  *		MACH_MSG_TYPE_MOVE_SEND_ONCE
719  *			Consumes a naked send-once right.
720  *	Conditions:
721  *		Nothing locked.
722  */
723 
724 void
ipc_object_copyin_from_kernel(ipc_object_t object,mach_msg_type_name_t msgt_name)725 ipc_object_copyin_from_kernel(
726 	ipc_object_t            object,
727 	mach_msg_type_name_t    msgt_name)
728 {
729 	assert(IO_VALID(object));
730 
731 	switch (msgt_name) {
732 	case MACH_MSG_TYPE_MOVE_RECEIVE: {
733 		ipc_port_t port = ip_object_to_port(object);
734 
735 		ip_mq_lock(port);
736 		require_ip_active(port);
737 		if (ip_in_a_space(port)) {
738 			assert(ip_in_space(port, ipc_space_kernel));
739 			assert(port->ip_immovable_receive == 0);
740 
741 			/* relevant part of ipc_port_clear_receiver */
742 			port->ip_mscount = 0;
743 
744 			/* port transtions to IN-LIMBO state */
745 			port->ip_receiver_name = MACH_PORT_NULL;
746 			port->ip_destination = IP_NULL;
747 		}
748 		ip_mq_unlock(port);
749 		break;
750 	}
751 
752 	case MACH_MSG_TYPE_COPY_SEND: {
753 		ipc_port_t port = ip_object_to_port(object);
754 
755 		ip_mq_lock(port);
756 		if (ip_active(port)) {
757 			assert(port->ip_srights > 0);
758 		}
759 		port->ip_srights++;
760 		ip_reference(port);
761 		ip_mq_unlock(port);
762 		break;
763 	}
764 
765 	case MACH_MSG_TYPE_MAKE_SEND: {
766 		ipc_port_t port = ip_object_to_port(object);
767 
768 		ip_mq_lock(port);
769 		if (ip_active(port)) {
770 			assert(ip_in_a_space(port));
771 			assert((ip_in_space(port, ipc_space_kernel)) ||
772 			    (port->ip_receiver->is_node_id != HOST_LOCAL_NODE));
773 			port->ip_mscount++;
774 		}
775 
776 		port->ip_srights++;
777 		ip_reference(port);
778 		ip_mq_unlock(port);
779 		break;
780 	}
781 
782 	case MACH_MSG_TYPE_MOVE_SEND: {
783 		/* move naked send right into the message */
784 		assert(ip_object_to_port(object)->ip_srights);
785 		break;
786 	}
787 
788 	case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
789 		ipc_port_t port = ip_object_to_port(object);
790 
791 		ip_mq_lock(port);
792 		if (ip_active(port)) {
793 			assert(ip_in_a_space(port));
794 		}
795 		ipc_port_make_sonce_locked(port);
796 		ip_mq_unlock(port);
797 		break;
798 	}
799 
800 	case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
801 		/* move naked send-once right into the message */
802 		assert(ip_object_to_port(object)->ip_sorights);
803 		break;
804 	}
805 
806 	default:
807 		panic("ipc_object_copyin_from_kernel: strange rights");
808 	}
809 }
810 
811 /*
812  *	Routine:	ipc_object_destroy
813  *	Purpose:
814  *		Destroys a naked capability.
815  *		Consumes a ref for the object.
816  *
817  *		A receive right should be in limbo or in transit.
818  *	Conditions:
819  *		Nothing locked.
820  */
821 
822 void
ipc_object_destroy(ipc_object_t object,mach_msg_type_name_t msgt_name)823 ipc_object_destroy(
824 	ipc_object_t            object,
825 	mach_msg_type_name_t    msgt_name)
826 {
827 	assert(IO_VALID(object));
828 	assert(io_otype(object) == IOT_PORT);
829 
830 	switch (msgt_name) {
831 	case MACH_MSG_TYPE_PORT_SEND:
832 		ipc_port_release_send(ip_object_to_port(object));
833 		break;
834 
835 	case MACH_MSG_TYPE_PORT_SEND_ONCE:
836 		io_lock(object);
837 		ipc_notify_send_once_and_unlock(ip_object_to_port(object));
838 		break;
839 
840 	case MACH_MSG_TYPE_PORT_RECEIVE:
841 		ipc_port_release_receive(ip_object_to_port(object));
842 		break;
843 
844 	default:
845 		panic("ipc_object_destroy: strange rights");
846 	}
847 }
848 
849 /*
850  *	Routine:	ipc_object_destroy_dest
851  *	Purpose:
852  *		Destroys a naked capability for the destination of
853  *		of a message. Consumes a ref for the object.
854  *
855  *	Conditions:
856  *		Nothing locked.
857  */
858 
859 void
ipc_object_destroy_dest(ipc_object_t object,mach_msg_type_name_t msgt_name)860 ipc_object_destroy_dest(
861 	ipc_object_t            object,
862 	mach_msg_type_name_t    msgt_name)
863 {
864 	ipc_port_t port = ip_object_to_port(object);
865 
866 	assert(IO_VALID(object));
867 	assert(io_otype(object) == IOT_PORT);
868 
869 	switch (msgt_name) {
870 	case MACH_MSG_TYPE_PORT_SEND:
871 		ipc_port_release_send(port);
872 		break;
873 
874 	case MACH_MSG_TYPE_PORT_SEND_ONCE:
875 		ip_mq_lock(port);
876 		ipc_notify_send_once_and_unlock(port);
877 		break;
878 
879 	default:
880 		panic("ipc_object_destroy_dest: strange rights");
881 	}
882 }
883 
884 /*
885  *	Routine:	ipc_object_insert_send_right
886  *	Purpose:
887  *		Insert a send right into an object already in the space.
888  *		The specified name must already point to a valid object.
889  *
890  *		Note: This really is a combined copyin()/copyout(),
891  *		that avoids most of the overhead of being implemented that way.
892  *
893  *		This is the fastpath for mach_port_insert_right.
894  *
895  *	Conditions:
896  *		Nothing locked.
897  *
898  *		msgt_name must be MACH_MSG_TYPE_MAKE_SEND_ONCE or
899  *		MACH_MSG_TYPE_MOVE_SEND_ONCE.
900  *
901  *	Returns:
902  *		KERN_SUCCESS		Copied out object, consumed ref.
903  *		KERN_INVALID_TASK	The space is dead.
904  *		KERN_INVALID_NAME	Name doesn't exist in space.
905  *		KERN_INVALID_CAPABILITY	The object is dead.
906  *		KERN_RIGHT_EXISTS	Space has rights under another name.
907  */
908 kern_return_t
ipc_object_insert_send_right(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name)909 ipc_object_insert_send_right(
910 	ipc_space_t             space,
911 	mach_port_name_t        name,
912 	mach_msg_type_name_t    msgt_name)
913 {
914 	ipc_entry_bits_t bits;
915 	ipc_object_t object;
916 	ipc_entry_t entry;
917 	kern_return_t kr;
918 
919 	assert(msgt_name == MACH_MSG_TYPE_MAKE_SEND ||
920 	    msgt_name == MACH_MSG_TYPE_COPY_SEND);
921 
922 	kr = ipc_right_lookup_write(space, name, &entry);
923 	if (kr != KERN_SUCCESS) {
924 		return kr;
925 	}
926 	/* space is write-locked and active */
927 
928 	if (!IO_VALID(entry->ie_object)) {
929 		is_write_unlock(space);
930 		return KERN_INVALID_CAPABILITY;
931 	}
932 
933 	bits = entry->ie_bits;
934 	object = entry->ie_object;
935 
936 	io_lock(object);
937 	if (!io_active(object)) {
938 		kr = KERN_INVALID_CAPABILITY;
939 	} else if (msgt_name == MACH_MSG_TYPE_MAKE_SEND) {
940 		if (bits & MACH_PORT_TYPE_RECEIVE) {
941 			ipc_port_t port = ip_object_to_port(object);
942 			port->ip_mscount++;
943 			if ((bits & MACH_PORT_TYPE_SEND) == 0) {
944 				port->ip_srights++;
945 				bits |= MACH_PORT_TYPE_SEND;
946 			}
947 			/* leave urefs pegged to maximum if it overflowed */
948 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
949 				bits += 1; /* increment urefs */
950 			}
951 			entry->ie_bits = bits;
952 			ipc_entry_modified(space, name, entry);
953 			kr = KERN_SUCCESS;
954 		} else {
955 			kr = KERN_INVALID_RIGHT;
956 		}
957 	} else { // MACH_MSG_TYPE_COPY_SEND
958 		if (bits & MACH_PORT_TYPE_SEND) {
959 			/* leave urefs pegged to maximum if it overflowed */
960 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
961 				entry->ie_bits = bits + 1; /* increment urefs */
962 			}
963 			ipc_entry_modified(space, name, entry);
964 			kr = KERN_SUCCESS;
965 		} else {
966 			kr = KERN_INVALID_RIGHT;
967 		}
968 	}
969 
970 	io_unlock(object);
971 	is_write_unlock(space);
972 
973 	return kr;
974 }
975 
976 /*
977  *	Routine:	ipc_object_copyout
978  *	Purpose:
979  *		Copyout a capability, placing it into a space.
980  *		Always consumes a ref for the object.
981  *	Conditions:
982  *		Nothing locked.
983  *	Returns:
984  *		KERN_SUCCESS		Copied out object, consumed ref.
985  *		KERN_INVALID_TASK	The space is dead.
986  *		KERN_INVALID_CAPABILITY	The object is dead.
987  *		KERN_NO_SPACE		No room in space for another right.
988  *		KERN_UREFS_OVERFLOW	Urefs limit exceeded
989  *			and overflow wasn't specified.
990  */
991 
992 kern_return_t
ipc_object_copyout(ipc_space_t space,ipc_object_t object,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_port_context_t * context,mach_msg_guard_flags_t * guard_flags,mach_port_name_t * namep)993 ipc_object_copyout(
994 	ipc_space_t             space,
995 	ipc_object_t            object,
996 	mach_msg_type_name_t    msgt_name,
997 	ipc_object_copyout_flags_t flags,
998 	mach_port_context_t     *context,
999 	mach_msg_guard_flags_t  *guard_flags,
1000 	mach_port_name_t        *namep)
1001 {
1002 	struct knote *kn = current_thread()->ith_knote;
1003 	mach_port_name_t name;
1004 	ipc_port_t port = ip_object_to_port(object);
1005 	ipc_entry_t entry;
1006 	kern_return_t kr;
1007 
1008 	assert(IO_VALID(object));
1009 	assert(io_otype(object) == IOT_PORT);
1010 
1011 	if (ITH_KNOTE_VALID(kn, msgt_name)) {
1012 		filt_machport_turnstile_prepare_lazily(kn, msgt_name, port);
1013 	}
1014 
1015 	is_write_lock(space);
1016 
1017 	for (;;) {
1018 		ipc_port_t port_subst = IP_NULL;
1019 
1020 		if (!is_active(space)) {
1021 			is_write_unlock(space);
1022 			kr = KERN_INVALID_TASK;
1023 			goto out;
1024 		}
1025 
1026 		kr = ipc_entries_hold(space, 1);
1027 		if (kr != KERN_SUCCESS) {
1028 			/* unlocks/locks space, so must start again */
1029 
1030 			kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
1031 			if (kr != KERN_SUCCESS) {
1032 				/* space is unlocked */
1033 				goto out;
1034 			}
1035 			continue;
1036 		}
1037 
1038 		io_lock(object);
1039 		if (!io_active(object)) {
1040 			io_unlock(object);
1041 			is_write_unlock(space);
1042 			kr = KERN_INVALID_CAPABILITY;
1043 			goto out;
1044 		}
1045 
1046 		/* Don't actually copyout rights we aren't allowed to */
1047 		if (!ip_label_check(space, port, msgt_name, &flags, &port_subst)) {
1048 			io_unlock(object);
1049 			is_write_unlock(space);
1050 			assert(port_subst == IP_NULL);
1051 			kr = KERN_INVALID_CAPABILITY;
1052 			goto out;
1053 		}
1054 
1055 		/* is the kolabel requesting a substitution */
1056 		if (port_subst != IP_NULL) {
1057 			/*
1058 			 * port is unlocked, its right consumed
1059 			 * space is unlocked
1060 			 */
1061 			assert(msgt_name == MACH_MSG_TYPE_PORT_SEND);
1062 			port = port_subst;
1063 			if (!IP_VALID(port)) {
1064 				object = IO_DEAD;
1065 				kr = KERN_INVALID_CAPABILITY;
1066 				goto out;
1067 			}
1068 
1069 			object = ip_to_object(port);
1070 			is_write_lock(space);
1071 			continue;
1072 		}
1073 
1074 		break;
1075 	}
1076 
1077 	/* space is write-locked and active, object is locked and active */
1078 
1079 	if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
1080 	    ipc_right_reverse(space, object, &name, &entry)) {
1081 		assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
1082 	} else {
1083 		ipc_entry_claim(space, object, &name, &entry);
1084 	}
1085 
1086 	kr = ipc_right_copyout(space, name, entry,
1087 	    msgt_name, flags, context, guard_flags, object);
1088 
1089 	/* object is unlocked */
1090 	is_write_unlock(space);
1091 
1092 out:
1093 	if (kr == KERN_SUCCESS) {
1094 		*namep = name;
1095 	} else if (IO_VALID(object)) {
1096 		ipc_object_destroy(object, msgt_name);
1097 	}
1098 
1099 	return kr;
1100 }
1101 
1102 /*
1103  *	Routine:	ipc_object_copyout_name
1104  *	Purpose:
1105  *		Copyout a capability, placing it into a space.
1106  *		The specified name is used for the capability.
1107  *		If successful, consumes a ref for the object.
1108  *	Conditions:
1109  *		Nothing locked.
1110  *	Returns:
1111  *		KERN_SUCCESS		Copied out object, consumed ref.
1112  *		KERN_INVALID_TASK	The space is dead.
1113  *		KERN_INVALID_CAPABILITY	The object is dead.
1114  *		KERN_UREFS_OVERFLOW	Urefs limit exceeded
1115  *			and overflow wasn't specified.
1116  *		KERN_RIGHT_EXISTS	Space has rights under another name.
1117  *		KERN_NAME_EXISTS	Name is already used.
1118  *      KERN_INVALID_VALUE  Supplied port name is invalid.
1119  */
1120 
1121 kern_return_t
ipc_object_copyout_name(ipc_space_t space,ipc_object_t object,mach_msg_type_name_t msgt_name,mach_port_name_t name)1122 ipc_object_copyout_name(
1123 	ipc_space_t             space,
1124 	ipc_object_t            object,
1125 	mach_msg_type_name_t    msgt_name,
1126 	mach_port_name_t        name)
1127 {
1128 	ipc_port_t port = ip_object_to_port(object);
1129 	mach_port_name_t oname;
1130 	ipc_entry_t oentry;
1131 	ipc_entry_t entry;
1132 	kern_return_t kr;
1133 
1134 #if IMPORTANCE_INHERITANCE
1135 	int assertcnt = 0;
1136 	ipc_importance_task_t task_imp = IIT_NULL;
1137 #endif /* IMPORTANCE_INHERITANCE */
1138 
1139 	assert(IO_VALID(object));
1140 	assert(io_otype(object) == IOT_PORT);
1141 
1142 	kr = ipc_entry_alloc_name(space, name, &entry);
1143 	if (kr != KERN_SUCCESS) {
1144 		return kr;
1145 	}
1146 	/* space is write-locked and active */
1147 
1148 	io_lock(object);
1149 
1150 	/*
1151 	 * Don't actually copyout rights we aren't allowed to
1152 	 *
1153 	 * In particular, kolabel-ed objects do not allow callers
1154 	 * to pick the name they end up with.
1155 	 */
1156 	if (!io_active(object) || ip_is_kolabeled(port)) {
1157 		io_unlock(object);
1158 		if (!ipc_right_inuse(entry)) {
1159 			ipc_entry_dealloc(space, IO_NULL, name, entry);
1160 		}
1161 		is_write_unlock(space);
1162 		return KERN_INVALID_CAPABILITY;
1163 	}
1164 
1165 	/* space is write-locked and active, object is locked and active */
1166 
1167 	if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
1168 	    ipc_right_reverse(space, object, &oname, &oentry)) {
1169 		if (name != oname) {
1170 			io_unlock(object);
1171 			if (!ipc_right_inuse(entry)) {
1172 				ipc_entry_dealloc(space, IO_NULL, name, entry);
1173 			}
1174 			is_write_unlock(space);
1175 			return KERN_RIGHT_EXISTS;
1176 		}
1177 
1178 		assert(entry == oentry);
1179 		assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
1180 	} else if (ipc_right_inuse(entry)) {
1181 		io_unlock(object);
1182 		is_write_unlock(space);
1183 		return KERN_NAME_EXISTS;
1184 	} else {
1185 		assert(entry->ie_object == IO_NULL);
1186 
1187 		entry->ie_object = object;
1188 	}
1189 
1190 #if IMPORTANCE_INHERITANCE
1191 	/*
1192 	 * We are slamming a receive right into the space, without
1193 	 * first having been enqueued on a port destined there.  So,
1194 	 * we have to arrange to boost the task appropriately if this
1195 	 * port has assertions (and the task wants them).
1196 	 */
1197 	if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1198 		if (space->is_task != TASK_NULL) {
1199 			task_imp = space->is_task->task_imp_base;
1200 			if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1201 				assertcnt = port->ip_impcount;
1202 				ipc_importance_task_reference(task_imp);
1203 			} else {
1204 				task_imp = IIT_NULL;
1205 			}
1206 		}
1207 
1208 		/* take port out of limbo */
1209 		port->ip_tempowner = 0;
1210 	}
1211 
1212 #endif /* IMPORTANCE_INHERITANCE */
1213 
1214 	kr = ipc_right_copyout(space, name, entry,
1215 	    msgt_name, IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, object);
1216 
1217 	/* object is unlocked */
1218 	is_write_unlock(space);
1219 
1220 #if IMPORTANCE_INHERITANCE
1221 	/*
1222 	 * Add the assertions to the task that we captured before
1223 	 */
1224 	if (task_imp != IIT_NULL) {
1225 		ipc_importance_task_hold_internal_assertion(task_imp, assertcnt);
1226 		ipc_importance_task_release(task_imp);
1227 	}
1228 #endif /* IMPORTANCE_INHERITANCE */
1229 
1230 	return kr;
1231 }
1232 
1233 /*
1234  *	Routine:	ipc_object_copyout_dest
1235  *	Purpose:
1236  *		Translates/consumes the destination right of a message.
1237  *		This is unlike normal copyout because the right is consumed
1238  *		in a funny way instead of being given to the receiving space.
1239  *		The receiver gets his name for the port, if he has receive
1240  *		rights, otherwise MACH_PORT_NULL.
1241  *	Conditions:
1242  *		The object is locked and active.  Nothing else locked.
1243  *		The object is unlocked and loses a reference.
1244  */
1245 
1246 void
ipc_object_copyout_dest(ipc_space_t space,ipc_object_t object,mach_msg_type_name_t msgt_name,mach_port_name_t * namep)1247 ipc_object_copyout_dest(
1248 	ipc_space_t             space,
1249 	ipc_object_t            object,
1250 	mach_msg_type_name_t    msgt_name,
1251 	mach_port_name_t        *namep)
1252 {
1253 	mach_port_name_t name;
1254 
1255 	assert(IO_VALID(object));
1256 	assert(io_active(object));
1257 
1258 	/*
1259 	 *	If the space is the receiver/owner of the object,
1260 	 *	then we quietly consume the right and return
1261 	 *	the space's name for the object.  Otherwise
1262 	 *	we destroy the right and return MACH_PORT_NULL.
1263 	 */
1264 
1265 	switch (msgt_name) {
1266 	case MACH_MSG_TYPE_PORT_SEND: {
1267 		ipc_port_t port = ip_object_to_port(object);
1268 		ipc_notify_nsenders_t nsrequest = { };
1269 
1270 		if (ip_in_space(port, space)) {
1271 			name = ip_get_receiver_name(port);
1272 		} else {
1273 			name = MACH_PORT_NULL;
1274 		}
1275 
1276 		assert(port->ip_srights > 0);
1277 		if (--port->ip_srights == 0) {
1278 			nsrequest = ipc_notify_no_senders_prepare(port);
1279 		}
1280 		ipc_port_clear_sync_rcv_thread_boost_locked(port);
1281 		/* port unlocked */
1282 
1283 		ipc_notify_no_senders_emit(nsrequest);
1284 
1285 		ip_release(port);
1286 		break;
1287 	}
1288 
1289 	case MACH_MSG_TYPE_PORT_SEND_ONCE: {
1290 		ipc_port_t port = ip_object_to_port(object);
1291 
1292 		assert(port->ip_sorights > 0);
1293 
1294 		if (ip_in_space(port, space)) {
1295 			/* quietly consume the send-once right */
1296 			port->ip_sorights--;
1297 			name = ip_get_receiver_name(port);
1298 			ipc_port_clear_sync_rcv_thread_boost_locked(port);
1299 			/* port unlocked */
1300 			ip_release(port);
1301 		} else {
1302 			/*
1303 			 *	A very bizarre case.  The message
1304 			 *	was received, but before this copyout
1305 			 *	happened the space lost receive rights.
1306 			 *	We can't quietly consume the soright
1307 			 *	out from underneath some other task,
1308 			 *	so generate a send-once notification.
1309 			 */
1310 
1311 			ipc_notify_send_once_and_unlock(port);
1312 			name = MACH_PORT_NULL;
1313 		}
1314 
1315 		break;
1316 	}
1317 
1318 	default:
1319 		panic("ipc_object_copyout_dest: strange rights");
1320 		name = MACH_PORT_DEAD;
1321 	}
1322 
1323 	*namep = name;
1324 }
1325 
1326 static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1327     offsetof(struct ipc_port, ip_waitq));
1328 static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1329     offsetof(struct ipc_pset, ips_wqset.wqset_q));
1330 
1331 /*
1332  *	Routine:        ipc_object_lock
1333  *	Purpose:
1334  *		Validate, then acquire a lock on an ipc object
1335  */
1336 void
ipc_object_lock(ipc_object_t io)1337 ipc_object_lock(ipc_object_t io)
1338 {
1339 	ipc_object_validate(io);
1340 	waitq_lock(io_waitq(io));
1341 }
1342 
1343 #if MACH_LOCKFREE_SPACE
1344 __abortlike
1345 static void
ipc_object_validate_preflight_panic(ipc_object_t io)1346 ipc_object_validate_preflight_panic(ipc_object_t io)
1347 {
1348 	panic("ipc object %p is neither a port or a port-set", io);
1349 }
1350 
1351 /*
1352  *	Routine:	ipc_object_lock_allow_invalid
1353  *	Purpose:
1354  *		Speculatively try to lock an object in an undefined state.
1355  *
1356  *		This relies on the fact that IPC object memory is allocated
1357  *		from sequestered zones, so at a given address, one can find:
1358  *		1. a valid object,
1359  *		2. a freed or invalid (uninitialized) object,
1360  *		3. unmapped memory.
1361  *
1362  *		(2) is possible because the zone is made with ZC_ZFREE_CLEARMEM which
1363  *		    ensures freed elements are always zeroed.
1364  *
1365  *		(3) is a direct courtesy of waitq_lock_allow_invalid().
1366  *
1367  *		In order to disambiguate (1) from (2), we use the "waitq valid"
1368  *		bit which is part of the lock. When that bit is absent,
1369  *		waitq_lock() will function as expected, but
1370  *		waitq_lock_allow_invalid() will not.
1371  *
1372  *		Objects are then initialized and destroyed carefully so that
1373  *		this "valid bit" is only set when the object invariants are
1374  *		respected.
1375  *
1376  *	Returns:
1377  *		true:  the lock was acquired
1378  *		false: the object was freed or not initialized.
1379  */
1380 bool
ipc_object_lock_allow_invalid(ipc_object_t io)1381 ipc_object_lock_allow_invalid(ipc_object_t io)
1382 {
1383 	struct waitq *wq = io_waitq(io);
1384 
1385 	switch (zone_id_for_native_element(wq, sizeof(*wq))) {
1386 	case ZONE_ID_IPC_PORT:
1387 	case ZONE_ID_IPC_PORT_SET:
1388 		break;
1389 	default:
1390 		ipc_object_validate_preflight_panic(io);
1391 	}
1392 
1393 	if (__probable(waitq_lock_allow_invalid(wq))) {
1394 		ipc_object_validate(io);
1395 		return true;
1396 	}
1397 	return false;
1398 }
1399 #endif /* MACH_LOCKFREE_SPACE */
1400 
1401 /*
1402  *	Routine:	ipc_object_lock_try
1403  *	Purpose:
1404  *		Validate, then try to acquire a lock on an object,
1405  *		fail if there is an existing busy lock
1406  */
1407 bool
ipc_object_lock_try(ipc_object_t io)1408 ipc_object_lock_try(ipc_object_t io)
1409 {
1410 	ipc_object_validate(io);
1411 	return waitq_lock_try(io_waitq(io));
1412 }
1413 
1414 /*
1415  *	Routine:        ipc_object_unlock
1416  *	Purpose:
1417  *	    Unlocks the given object.
1418  */
1419 void
ipc_object_unlock(ipc_object_t io)1420 ipc_object_unlock(ipc_object_t io)
1421 {
1422 	waitq_unlock(io_waitq(io));
1423 }
1424