xref: /xnu-12377.61.12/osfmk/ipc/ipc_object.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  * Copyright (c) 2005-2006 SPARTA, Inc.
62  */
63 /*
64  */
65 /*
66  *	File:	ipc/ipc_object.c
67  *	Author:	Rich Draves
68  *	Date:	1989
69  *
70  *	Functions to manipulate IPC objects.
71  */
72 
73 #include <mach/mach_types.h>
74 #include <mach/boolean.h>
75 #include <mach/kern_return.h>
76 #include <mach/port.h>
77 #include <mach/message.h>
78 
79 #include <kern/kern_types.h>
80 #include <kern/misc_protos.h>
81 #include <kern/ipc_kobject.h>
82 #include <kern/zalloc_internal.h> // zone_id_for_element
83 
84 #include <ipc/ipc_types.h>
85 #include <ipc/ipc_importance.h>
86 #include <ipc/port.h>
87 #include <ipc/ipc_space.h>
88 #include <ipc/ipc_entry.h>
89 #include <ipc/ipc_object.h>
90 #include <ipc/ipc_hash.h>
91 #include <ipc/ipc_kmsg.h>
92 #include <ipc/ipc_right.h>
93 #include <ipc/ipc_notify.h>
94 #include <ipc/ipc_policy.h>
95 #include <ipc/ipc_port.h>
96 #include <ipc/ipc_pset.h>
97 
98 #include <security/mac_mach_internal.h>
99 
100 static struct mpsc_daemon_queue ipc_object_deallocate_queue;
101 
102 /*
103  * In order to do lockfree lookups in the IPC space, we combine two schemes:
104  *
105  * - the ipc table pointer is protected with hazard pointers to allow
106  *   dereferencing it with only holding a ref on a task or space;
107  *
108  * - we use ipc_object_lock_allow_invalid in order to lock locks and validate
109  *   that they are the droid we're looking for.
110  *
111  * The second half requires that virtual addresses assigned that ever held
112  * a port, either hold a port, or nothing, forever. To get this property,
113  * we just piggy back on the zone sequestering security feature which gives
114  * us exactly that.
115  *
116  * However, sequestering really only "works" on a sufficiently large address
117  * space, especially for a resource that can be made by userspace at will,
118  * so we can't do lockless lookups on ILP32.
119  *
120  * Note: this scheme is incompatible with kasan quarantines
121  *       (because it uses elements to store backtraces in them
122  *       which lets the waitq lock appear "valid" by accident when
123  *       elements are freed).
124  */
125 #define IPC_OBJECT_ZC_BASE (ZC_ZFREE_CLEARMEM | ZC_SEQUESTER)
126 
127 ZONE_DEFINE_ID(ZONE_ID_IPC_PORT, "ipc ports", struct ipc_port,
128     IPC_OBJECT_ZC_BASE | ZC_CACHING);
129 
130 ZONE_DEFINE_ID(ZONE_ID_IPC_PORT_SET, "ipc port sets", struct ipc_pset,
131     IPC_OBJECT_ZC_BASE);
132 
133 __attribute__((noinline))
134 static void
ipc_object_free(ipc_object_t object)135 ipc_object_free(ipc_object_t object)
136 {
137 	if (io_is_any_port(object)) {
138 		ipc_port_free(ip_object_to_port(object));
139 	} else {
140 		ipc_pset_free(ips_object_to_pset(object));
141 	}
142 }
143 
144 __attribute__((noinline))
145 static void
ipc_object_free_safe(ipc_object_t object)146 ipc_object_free_safe(ipc_object_t object)
147 {
148 	struct waitq *wq = io_waitq(object);
149 
150 	assert(!waitq_is_valid(wq));
151 	assert(os_atomic_load(&wq->waitq_defer.mpqc_next, relaxed) == NULL);
152 	mpsc_daemon_enqueue(&ipc_object_deallocate_queue,
153 	    &wq->waitq_defer, MPSC_QUEUE_NONE);
154 }
155 
156 static void
ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)157 ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,
158     __assert_only mpsc_daemon_queue_t dq)
159 {
160 	struct waitq *wq = __container_of(e, struct waitq, waitq_defer);
161 	ipc_object_t  io = io_from_waitq(wq);
162 
163 	assert(dq == &ipc_object_deallocate_queue);
164 
165 	os_atomic_store(&wq->waitq_defer.mpqc_next, NULL, relaxed);
166 	ipc_object_free(io);
167 }
168 
169 void
ipc_object_deallocate_register_queue(void)170 ipc_object_deallocate_register_queue(void)
171 {
172 	thread_deallocate_daemon_register_queue(&ipc_object_deallocate_queue,
173 	    ipc_object_deallocate_queue_invoke);
174 }
175 
176 /*
177  *	Routine:	ipc_object_reference
178  *	Purpose:
179  *		Take a reference to an object.
180  */
181 
182 void
ipc_object_reference(ipc_object_t io)183 ipc_object_reference(
184 	ipc_object_t    io)
185 {
186 	static_assert(sizeof(os_ref_atomic_t) == sizeof(io->io_references));
187 	os_ref_retain_raw(&io->io_references, NULL);
188 }
189 
190 /*
191  *	Routine:	ipc_object_release
192  *	Purpose:
193  *		Release a reference to an object.
194  */
195 
196 void
ipc_object_release(ipc_object_t io)197 ipc_object_release(
198 	ipc_object_t    io)
199 {
200 #if DEBUG
201 	assert(get_preemption_level() == 0);
202 #endif
203 
204 	if (os_ref_release_raw(&io->io_references, NULL) == 0) {
205 		/* Free the object */
206 		ipc_object_free(io);
207 	}
208 }
209 
210 /*
211  *	Routine:	ipc_object_release_safe
212  *	Purpose:
213  *		Release a reference to an object safely
214  */
215 
216 void
ipc_object_release_safe(ipc_object_t io)217 ipc_object_release_safe(
218 	ipc_object_t    io)
219 {
220 	if (os_ref_release_raw(&io->io_references, NULL) == 0) {
221 		if (get_preemption_level() == 0) {
222 			ipc_object_free(io);
223 		} else {
224 			ipc_object_free_safe(io);
225 		}
226 	}
227 }
228 
229 /*
230  *	Routine:	ipc_object_release_live
231  *	Purpose:
232  *		Release a reference to an object that isn't the last one.
233  */
234 
235 void
ipc_object_release_live(ipc_object_t io)236 ipc_object_release_live(
237 	ipc_object_t    io)
238 {
239 	os_ref_release_live_raw(&io->io_references, NULL);
240 }
241 
242 /*
243  *	Routine:	ipc_object_translate
244  *	Purpose:
245  *		Look up an object in a space.
246  *	Conditions:
247  *		Nothing locked before.  If successful, the object
248  *		is returned active and locked.  The caller doesn't get a ref.
249  *	Returns:
250  *		KERN_SUCCESS		Object returned locked.
251  *		KERN_INVALID_TASK	The space is dead.
252  *		KERN_INVALID_NAME	The name doesn't denote a right
253  *		KERN_INVALID_RIGHT	Name doesn't denote the correct right
254  */
255 kern_return_t
ipc_object_translate(ipc_space_t space,mach_port_name_t name,mach_port_right_t right,ipc_object_t * objectp)256 ipc_object_translate(
257 	ipc_space_t             space,
258 	mach_port_name_t        name,
259 	mach_port_right_t       right,
260 	ipc_object_t            *objectp)
261 {
262 	ipc_entry_bits_t bits;
263 	ipc_object_t object;
264 	kern_return_t kr;
265 
266 	if (!MACH_PORT_RIGHT_VALID_TRANSLATE(right)) {
267 		return KERN_INVALID_RIGHT;
268 	}
269 
270 	kr = ipc_right_lookup_read(space, name, &bits, &object);
271 	if (kr != KERN_SUCCESS) {
272 		return kr;
273 	}
274 	/* object is locked and active */
275 
276 	if ((bits & MACH_PORT_TYPE(right)) == MACH_PORT_TYPE_NONE) {
277 		io_unlock(object);
278 		return KERN_INVALID_RIGHT;
279 	}
280 
281 	*objectp = object;
282 	return KERN_SUCCESS;
283 }
284 
285 /*
286  *	Routine:	ipc_object_translate_pset_receive
287  *	Purpose:
288  *		Look up two objects in a space (a port set and a receive right)
289  *	Conditions:
290  *		Nothing locked before.  If successful, the objects
291  *		are returned locked.  The caller doesn't get a ref.
292  *	Returns:
293  *		KERN_SUCCESS		Objects returned locked.
294  *		KERN_INVALID_TASK	The space is dead.
295  *		KERN_INVALID_NAME	A name doesn't denote a right.
296  *		KERN_INVALID_RIGHT	A name doesn't denote the correct right.
297  */
298 
299 kern_return_t
ipc_object_translate_port_pset(ipc_space_t space,mach_port_name_t port_name,ipc_port_t * portp,mach_port_name_t pset_name,ipc_pset_t * psetp)300 ipc_object_translate_port_pset(
301 	ipc_space_t             space,
302 	mach_port_name_t        port_name,
303 	ipc_port_t             *portp,
304 	mach_port_name_t        pset_name,
305 	ipc_pset_t             *psetp)
306 {
307 	ipc_entry_t port_entry;
308 	ipc_entry_t pset_entry;
309 	ipc_port_t port;
310 	ipc_pset_t pset;
311 	kern_return_t kr;
312 
313 	kr = ipc_right_lookup_two_read(space,
314 	    port_name, &port_entry, pset_name, &pset_entry);
315 	if (kr != KERN_SUCCESS) {
316 		return kr;
317 	}
318 	/* space is read-locked and active */
319 
320 	if ((port_entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) {
321 		bool guard = !(port_entry->ie_bits & IE_BITS_EX_RECEIVE);
322 
323 		is_read_unlock(space);
324 		if (guard) {
325 			mach_port_guard_exception(port_name,
326 			    MPG_PAYLOAD(MPG_FLAGS_INVALID_RIGHT_TRANSLATE_PORT,
327 			    port_entry->ie_bits),
328 			    kGUARD_EXC_INVALID_RIGHT);
329 		}
330 		return KERN_INVALID_RIGHT;
331 	}
332 
333 	if ((pset_entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0) {
334 		is_read_unlock(space);
335 		mach_port_guard_exception(pset_name,
336 		    MPG_PAYLOAD(MPG_FLAGS_INVALID_RIGHT_TRANSLATE_PSET,
337 		    pset_entry->ie_bits),
338 		    kGUARD_EXC_INVALID_RIGHT);
339 		return KERN_INVALID_RIGHT;
340 	}
341 
342 	port = port_entry->ie_port;
343 	assert(port != IP_NULL);
344 	ip_mq_lock(port);
345 	if (!ip_active(port)) {
346 		ip_mq_unlock(port);
347 		is_read_unlock(space);
348 		return KERN_INVALID_NAME;
349 	}
350 
351 	pset = pset_entry->ie_pset;
352 	assert(pset != IPS_NULL);
353 	ips_mq_lock(pset);
354 	if (!ips_active(pset)) {
355 		ip_mq_unlock(port);
356 		ips_mq_unlock(pset);
357 		is_read_unlock(space);
358 		return KERN_INVALID_NAME;
359 	}
360 
361 	*portp = port;
362 	*psetp = pset;
363 
364 	is_read_unlock(space);
365 	return KERN_SUCCESS;
366 }
367 
368 /*
369  *	Routine:	ipc_object_alloc_dead
370  *	Purpose:
371  *		Allocate a dead-name entry.
372  *	Conditions:
373  *		Nothing locked.
374  *	Returns:
375  *		KERN_SUCCESS		The dead name is allocated.
376  *		KERN_INVALID_TASK	The space is dead.
377  *		KERN_NO_SPACE		No room for an entry in the space.
378  */
379 
380 kern_return_t
ipc_object_alloc_dead(ipc_space_t space,mach_port_name_t * namep)381 ipc_object_alloc_dead(
382 	ipc_space_t             space,
383 	mach_port_name_t        *namep)
384 {
385 	ipc_entry_t entry;
386 	kern_return_t kr;
387 	mach_port_type_t type = MACH_PORT_TYPE_DEAD_NAME;
388 	mach_port_urefs_t urefs = 1;
389 
390 	kr = ipc_entry_alloc(space, IPC_OBJECT_NULL, namep, &entry);
391 	if (kr != KERN_SUCCESS) {
392 		return kr;
393 	}
394 	/* space is write-locked */
395 
396 	ipc_entry_init(space, IPC_OBJECT_NULL, type, entry, urefs, *namep);
397 
398 	is_write_unlock(space);
399 	return KERN_SUCCESS;
400 }
401 
402 /*
403  *	Routine:	ipc_object_alloc_entry
404  *	Purpose:
405  *		Allocate an unitialized entry for a new object.
406  *      That entry must later be initialized by `ipc_entry_init` after
407  *      the object has been initialized
408  *	Conditions:
409  *		Nothing locked.
410  *		The space is write locked on successful return.
411  *	Returns:
412  *		KERN_SUCCESS		The object is allocated.
413  *		KERN_INVALID_TASK	The space is dead.
414  *		KERN_NO_SPACE		No room for an entry in the space.
415  */
416 
417 kern_return_t
ipc_object_alloc_entry(ipc_space_t space,ipc_object_t object,mach_port_name_t * namep,ipc_entry_t * entry)418 ipc_object_alloc_entry(
419 	ipc_space_t         space,
420 	ipc_object_t        object,
421 	mach_port_name_t    *namep,
422 	ipc_entry_t         *entry)
423 {
424 	kern_return_t kr;
425 
426 	kr = ipc_entry_alloc(space, object, namep, entry);
427 
428 	return kr;
429 }
430 
431 /*
432  *	Routine:	ipc_object_alloc_entry_with_name
433  *	Purpose:
434  *		Allocate an uninitalized entry for a new object, with a specific name.
435  *      That entry must later be initialized by `ipc_entry_init` after
436  *      the object has been initialized
437  *	Conditions:
438  *		Nothing locked.
439  *		The space is write locked on successful return.
440  *
441  *	Returns:
442  *		KERN_SUCCESS		The object is allocated.
443  *		KERN_INVALID_TASK	The space is dead.
444  *		KERN_NAME_EXISTS	The name already denotes a right.
445  */
446 
447 kern_return_t
ipc_object_alloc_entry_with_name(ipc_space_t space,mach_port_name_t name,ipc_entry_t * entry)448 ipc_object_alloc_entry_with_name(
449 	ipc_space_t         space,
450 	mach_port_name_t    name,
451 	ipc_entry_t         *entry)
452 {
453 	kern_return_t kr;
454 
455 	kr = ipc_entry_alloc_name(space, name, entry);
456 	if (kr == KERN_SUCCESS && ipc_right_inuse(*entry)) {
457 		is_write_unlock(space);
458 		kr = KERN_NAME_EXISTS;
459 	}
460 
461 	return kr;
462 }
463 
464 /*	Routine:	ipc_object_validate
465  *	Purpose:
466  *		Validates an ipc port or port set as belonging to the correct
467  *		zone.
468  */
469 
470 void
ipc_object_validate(ipc_object_t object,ipc_object_type_t type)471 ipc_object_validate(
472 	ipc_object_t            object,
473 	ipc_object_type_t       type)
474 {
475 	if (io_is_any_port_type(type)) {
476 		ip_validate(object);
477 	} else {
478 		ips_validate(object);
479 	}
480 }
481 
482 /*
483  *	Routine:	ipc_object_copyin_type
484  *	Purpose:
485  *		Convert a send type name to a received type name.
486  */
487 
488 mach_msg_type_name_t
ipc_object_copyin_type(mach_msg_type_name_t msgt_name)489 ipc_object_copyin_type(
490 	mach_msg_type_name_t    msgt_name)
491 {
492 	switch (msgt_name) {
493 	case MACH_MSG_TYPE_MOVE_RECEIVE:
494 		return MACH_MSG_TYPE_PORT_RECEIVE;
495 
496 	case MACH_MSG_TYPE_MOVE_SEND_ONCE:
497 	case MACH_MSG_TYPE_MAKE_SEND_ONCE:
498 		return MACH_MSG_TYPE_PORT_SEND_ONCE;
499 
500 	case MACH_MSG_TYPE_MOVE_SEND:
501 	case MACH_MSG_TYPE_MAKE_SEND:
502 	case MACH_MSG_TYPE_COPY_SEND:
503 		return MACH_MSG_TYPE_PORT_SEND;
504 
505 	default:
506 		return MACH_MSG_TYPE_PORT_NONE;
507 	}
508 }
509 
510 /*
511  *	Routine:	ipc_object_copyin
512  *	Purpose:
513  *		Copyin a capability from a space.
514  *		If successful, the caller gets a ref
515  *		for the resulting port, unless it is IO_DEAD.
516  *	Conditions:
517  *		Nothing locked.
518  *	Returns:
519  *		KERN_SUCCESS		Acquired a port, possibly IP_DEAD.
520  *		KERN_INVALID_TASK	The space is dead.
521  *		KERN_INVALID_NAME	Name doesn't exist in space.
522  *		KERN_INVALID_RIGHT	Name doesn't denote correct right.
523  */
524 
525 kern_return_t
ipc_object_copyin(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name,ipc_object_copyin_flags_t copyin_flags,ipc_copyin_op_t copyin_reason,mach_msg_guarded_port_descriptor_t * gdesc,ipc_port_t * portp)526 ipc_object_copyin(
527 	ipc_space_t                space,
528 	mach_port_name_t           name,
529 	mach_msg_type_name_t       msgt_name,
530 	ipc_object_copyin_flags_t  copyin_flags,
531 	ipc_copyin_op_t            copyin_reason,
532 	mach_msg_guarded_port_descriptor_t *gdesc,
533 	ipc_port_t                *portp)
534 {
535 	ipc_copyin_rcleanup_t icrc;
536 	ipc_copyin_cleanup_t icc;
537 	ipc_entry_t entry;
538 	kern_return_t kr;
539 
540 	ipc_object_copyin_flags_t copyin_mask = IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND;
541 	copyin_mask = (copyin_flags & copyin_mask) | IPC_OBJECT_COPYIN_FLAGS_DEADOK;
542 
543 	/*
544 	 *	Could first try a read lock when doing
545 	 *	MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND,
546 	 *	and MACH_MSG_TYPE_MAKE_SEND_ONCE.
547 	 */
548 
549 	ipc_right_copyin_rcleanup_init(&icrc, gdesc);
550 
551 	kr = ipc_right_lookup_write(space, name, &entry);
552 	if (kr != KERN_SUCCESS) {
553 		return kr;
554 	}
555 	/* space is write-locked and active */
556 
557 	kr = ipc_right_copyin(space, name, msgt_name, copyin_mask, copyin_reason,
558 	    entry, portp, &icc, &icrc);
559 	is_write_unlock(space);
560 
561 	ipc_right_copyin_cleanup_destroy(&icc, name);
562 	ipc_right_copyin_rcleanup_destroy(&icrc);
563 
564 	return kr;
565 }
566 
567 /*
568  *	Routine:	ipc_object_copyin_from_kernel
569  *	Purpose:
570  *		Copyin a naked capability from the kernel.
571  *
572  *		MACH_MSG_TYPE_MOVE_RECEIVE
573  *			The receiver must be ipc_space_kernel
574  *			or the receive right must already be in limbo.
575  *			Consumes the naked receive right.
576  *		MACH_MSG_TYPE_COPY_SEND
577  *			A naked send right must be supplied.
578  *			The port gains a reference, and a send right
579  *			if the port is still active.
580  *		MACH_MSG_TYPE_MAKE_SEND
581  *			The receiver must be ipc_space_kernel.
582  *			The port gains a reference and a send right.
583  *		MACH_MSG_TYPE_MOVE_SEND
584  *			Consumes a naked send right.
585  *		MACH_MSG_TYPE_MAKE_SEND_ONCE
586  *			The port gains a reference and a send-once right.
587  *			Receiver also be the caller of device subsystem,
588  *			so no assertion.
589  *		MACH_MSG_TYPE_MOVE_SEND_ONCE
590  *			Consumes a naked send-once right.
591  *	Conditions:
592  *		Nothing locked.
593  */
594 
595 void
ipc_object_copyin_from_kernel(ipc_port_t port,mach_msg_type_name_t msgt_name)596 ipc_object_copyin_from_kernel(
597 	ipc_port_t              port,
598 	mach_msg_type_name_t    msgt_name)
599 {
600 	ipc_object_label_t label;
601 	assert(IP_VALID(port));
602 
603 	switch (msgt_name) {
604 	case MACH_MSG_TYPE_MOVE_RECEIVE:
605 		label = ip_mq_lock_label_get(port);
606 
607 		if (label.io_state == IO_STATE_IN_SPACE) {
608 			assert(ip_in_space(port, ipc_space_kernel));
609 
610 			/*
611 			 * Ports in kernel are never hooked to a pset,
612 			 * so we don't need to pass a waitq linkage free list.
613 			 */
614 			ipc_port_mark_in_limbo(port, &label, NULL);
615 			ip_mq_unlock(port);
616 		} else {
617 			ipc_release_assert(io_state_in_limbo(label.io_state));
618 			ip_mq_unlock_label_put(port, &label);
619 		}
620 		break;
621 
622 	case MACH_MSG_TYPE_COPY_SEND:
623 		ip_mq_lock(port);
624 		if (ip_active(port)) {
625 			assert(port->ip_srights > 0);
626 		}
627 		ip_srights_inc(port);
628 		ip_reference(port);
629 		ip_mq_unlock(port);
630 		break;
631 
632 	case MACH_MSG_TYPE_MAKE_SEND:
633 		ip_mq_lock(port);
634 		if (ip_active(port)) {
635 			assert(ip_in_a_space(port));
636 			assert(ip_in_space(port, ipc_space_kernel));
637 			port->ip_mscount++;
638 		}
639 
640 		ip_srights_inc(port);
641 		ip_reference(port);
642 		ip_mq_unlock(port);
643 		break;
644 
645 	case MACH_MSG_TYPE_MOVE_SEND:
646 		/* move naked send right into the message */
647 		assert(port->ip_srights);
648 		break;
649 
650 	case MACH_MSG_TYPE_MAKE_SEND_ONCE:
651 		ip_mq_lock(port);
652 		if (ip_active(port)) {
653 			assert(ip_in_a_space(port));
654 		}
655 		ipc_port_make_sonce_locked(port);
656 		ip_mq_unlock(port);
657 		break;
658 
659 	case MACH_MSG_TYPE_MOVE_SEND_ONCE:
660 		/* move naked send-once right into the message */
661 		assert(port->ip_sorights);
662 		break;
663 
664 	default:
665 		ipc_unreachable("ipc_object_copyin_from_kernel: strange rights");
666 	}
667 }
668 
669 /*
670  *	Routine:	ipc_object_destroy
671  *	Purpose:
672  *		Destroys a naked capability.
673  *		Consumes a ref for the port.
674  *
675  *		A receive right should be in limbo or in transit.
676  *	Conditions:
677  *		Nothing locked.
678  */
679 
680 void
ipc_object_destroy(ipc_port_t port,mach_msg_type_name_t msgt_name)681 ipc_object_destroy(
682 	ipc_port_t              port,
683 	mach_msg_type_name_t    msgt_name)
684 {
685 	assert(IP_VALID(port));
686 
687 	switch (msgt_name) {
688 	case MACH_MSG_TYPE_PORT_SEND:
689 		ipc_port_release_send(port);
690 		break;
691 
692 	case MACH_MSG_TYPE_PORT_SEND_ONCE:
693 		ip_mq_lock(port);
694 		ipc_notify_send_once_and_unlock(port);
695 		break;
696 
697 	case MACH_MSG_TYPE_PORT_RECEIVE:
698 		ipc_port_release_receive(port);
699 		break;
700 
701 	default:
702 		ipc_unreachable("ipc_object_destroy: strange rights");
703 	}
704 }
705 
706 /*
707  *	Routine:	ipc_object_destroy_dest
708  *	Purpose:
709  *		Destroys a naked capability for the destination of
710  *		of a message. Consumes a ref for the port.
711  *
712  *	Conditions:
713  *		Nothing locked.
714  */
715 
716 void
ipc_object_destroy_dest(ipc_port_t port,mach_msg_type_name_t msgt_name)717 ipc_object_destroy_dest(
718 	ipc_port_t              port,
719 	mach_msg_type_name_t    msgt_name)
720 {
721 	assert(IP_VALID(port));
722 
723 	switch (msgt_name) {
724 	case MACH_MSG_TYPE_PORT_SEND:
725 		ipc_port_release_send(port);
726 		break;
727 
728 	case MACH_MSG_TYPE_PORT_SEND_ONCE:
729 		ip_mq_lock(port);
730 		ipc_notify_send_once_and_unlock(port);
731 		break;
732 
733 	default:
734 		ipc_unreachable("ipc_object_destroy_dest: strange rights");
735 	}
736 }
737 
738 /*
739  *	Routine:	ipc_object_insert_send_right
740  *	Purpose:
741  *		Insert a send right into an object already in the space.
742  *		The specified name must already point to a valid object.
743  *
744  *		Note: This really is a combined copyin()/copyout(),
745  *		that avoids most of the overhead of being implemented that way.
746  *
747  *		This is the fastpath for mach_port_insert_right.
748  *
749  *	Conditions:
750  *		Nothing locked.
751  *
752  *		msgt_name must be MACH_MSG_TYPE_MAKE_SEND or
753  *		MACH_MSG_TYPE_COPY_SEND.
754  *
755  *	Returns:
756  *		KERN_SUCCESS		Copied out object, consumed ref.
757  *		KERN_INVALID_TASK	The space is dead.
758  *		KERN_INVALID_NAME	Name doesn't exist in space.
759  *		KERN_INVALID_CAPABILITY	The object is dead.
760  *		KERN_RIGHT_EXISTS	Space has rights under another name.
761  */
762 kern_return_t
ipc_object_insert_send_right(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name)763 ipc_object_insert_send_right(
764 	ipc_space_t             space,
765 	mach_port_name_t        name,
766 	mach_msg_type_name_t    msgt_name)
767 {
768 	ipc_entry_bits_t bits;
769 	ipc_object_t object;
770 	ipc_entry_t entry;
771 	ipc_port_t port;
772 	kern_return_t kr;
773 
774 	assert(msgt_name == MACH_MSG_TYPE_MAKE_SEND ||
775 	    msgt_name == MACH_MSG_TYPE_COPY_SEND);
776 
777 	kr = ipc_right_lookup_write(space, name, &entry);
778 	if (kr != KERN_SUCCESS) {
779 		return kr;
780 	}
781 	/* space is write-locked and active */
782 
783 	bits   = entry->ie_bits;
784 	object = entry->ie_object;
785 
786 	if (object == IPC_OBJECT_NULL) {
787 		is_write_unlock(space);
788 		return KERN_INVALID_CAPABILITY;
789 	}
790 	if ((bits & MACH_PORT_TYPE_PORT_RIGHTS) == 0) {
791 		is_write_unlock(space);
792 		return KERN_INVALID_RIGHT;
793 	}
794 
795 	port   = ip_object_to_port(object);
796 
797 	ip_mq_lock(port);
798 	if (!ip_active(port)) {
799 		kr = KERN_INVALID_CAPABILITY;
800 	} else if (msgt_name == MACH_MSG_TYPE_MAKE_SEND) {
801 		if (bits & MACH_PORT_TYPE_RECEIVE) {
802 			port->ip_mscount++;
803 			if ((bits & MACH_PORT_TYPE_SEND) == 0) {
804 				ip_srights_inc(port);
805 				bits |= MACH_PORT_TYPE_SEND;
806 			}
807 			/* leave urefs pegged to maximum if it overflowed */
808 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
809 				bits += 1; /* increment urefs */
810 			}
811 			entry->ie_bits = bits;
812 			ipc_entry_modified(space, name, entry);
813 			kr = KERN_SUCCESS;
814 		} else {
815 			kr = KERN_INVALID_RIGHT;
816 		}
817 	} else { // MACH_MSG_TYPE_COPY_SEND
818 		if (bits & MACH_PORT_TYPE_SEND) {
819 			/* leave urefs pegged to maximum if it overflowed */
820 			if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
821 				entry->ie_bits = bits + 1; /* increment urefs */
822 			}
823 			ipc_entry_modified(space, name, entry);
824 			kr = KERN_SUCCESS;
825 		} else {
826 			kr = KERN_INVALID_RIGHT;
827 		}
828 	}
829 
830 	ip_mq_unlock(port);
831 	is_write_unlock(space);
832 
833 	return kr;
834 }
835 
836 /*
837  *	Routine:	ipc_object_copyout
838  *	Purpose:
839  *		Copyout a capability, placing it into a space.
840  *		Always consumes a ref for the port.
841  *	Conditions:
842  *		Nothing locked.
843  *	Returns:
844  *		KERN_SUCCESS		Copied out port, consumed ref.
845  *		KERN_INVALID_TASK	The space is dead.
846  *		KERN_INVALID_CAPABILITY	The port is dead.
847  *		KERN_NO_SPACE		No room in space for another right.
848  *		KERN_UREFS_OVERFLOW	Urefs limit exceeded
849  *			and overflow wasn't specified.
850  */
851 
852 kern_return_t
ipc_object_copyout(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_msg_guarded_port_descriptor_t * gdesc,mach_port_name_t * namep)853 ipc_object_copyout(
854 	ipc_space_t             space,
855 	ipc_port_t              port,
856 	mach_msg_type_name_t    msgt_name,
857 	ipc_object_copyout_flags_t flags,
858 	mach_msg_guarded_port_descriptor_t *gdesc,
859 	mach_port_name_t        *namep)
860 {
861 	struct knote *kn = current_thread()->ith_knote;
862 	ipc_object_label_t label;
863 	mach_port_name_t name;
864 	ipc_entry_t entry;
865 	kern_return_t kr;
866 
867 	assert(IP_VALID(port));
868 
869 	if (ITH_KNOTE_VALID(kn, msgt_name)) {
870 		filt_machport_turnstile_prepare_lazily(kn, msgt_name, port);
871 	}
872 
873 	is_write_lock(space);
874 
875 	for (;;) {
876 		ipc_port_t port_subst = IP_NULL;
877 
878 		if (!is_active(space)) {
879 			is_write_unlock(space);
880 			kr = KERN_INVALID_TASK;
881 			goto out;
882 		}
883 
884 		kr = ipc_entries_hold(space, 1);
885 		if (kr != KERN_SUCCESS) {
886 			/* unlocks/locks space, so must start again */
887 
888 			kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
889 			if (kr != KERN_SUCCESS) {
890 				/* space is unlocked */
891 				goto out;
892 			}
893 			continue;
894 		}
895 
896 		label = ip_mq_lock_check_aligned(port);
897 
898 		/* Don't actually copyout rights we aren't allowed to */
899 		if (!io_state_active(label.io_state) ||
900 		    !ip_label_check_or_substitute(space, port, &label,
901 		    msgt_name, &port_subst)) {
902 			ip_mq_unlock_label_put(port, &label);
903 			is_write_unlock(space);
904 			assert(port_subst == IP_NULL);
905 			kr = KERN_INVALID_CAPABILITY;
906 			goto out;
907 		}
908 
909 		/* is the kolabel requesting a substitution */
910 		if (port_subst != IP_NULL) {
911 			/*
912 			 * port is unlocked, its right consumed
913 			 * space is unlocked
914 			 */
915 			assert(msgt_name == MACH_MSG_TYPE_PORT_SEND);
916 			port = port_subst;
917 			if (!IP_VALID(port)) {
918 				port = IP_DEAD;
919 				kr = KERN_INVALID_CAPABILITY;
920 				goto out;
921 			}
922 
923 			is_write_lock(space);
924 			continue;
925 		}
926 
927 		break;
928 	}
929 
930 	/* space is write-locked and active, port is locked and active */
931 
932 	if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
933 	    ipc_right_reverse(space, port, &name, &entry)) {
934 		assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
935 	} else {
936 		ipc_entry_claim(space, ip_to_object(port), &name, &entry);
937 	}
938 
939 	if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
940 		ipc_right_copyout_recv_and_unlock_space(space, port, &label,
941 		    name, entry, gdesc);
942 	} else {
943 		ip_label_put(port, &label);
944 		ipc_right_copyout_any_send(space, port, msgt_name,
945 		    flags, name, entry);
946 		is_write_unlock(space);
947 	}
948 
949 	/* port is unlocked */
950 
951 out:
952 	if (kr == KERN_SUCCESS) {
953 		*namep = name;
954 	} else if (IP_VALID(port)) {
955 		ipc_object_destroy(port, msgt_name);
956 	}
957 
958 	return kr;
959 }
960 
961 /*
962  *	Routine:	ipc_object_copyout_name
963  *	Purpose:
964  *		Copyout a capability, placing it into a space.
965  *		The specified name is used for the capability.
966  *		If successful, consumes a ref for the port.
967  *	Conditions:
968  *		Nothing locked.
969  *	Returns:
970  *		KERN_SUCCESS		Copied out port, consumed ref.
971  *		KERN_INVALID_TASK	The space is dead.
972  *		KERN_INVALID_CAPABILITY	The port is dead.
973  *		KERN_UREFS_OVERFLOW	Urefs limit exceeded
974  *			and overflow wasn't specified.
975  *		KERN_RIGHT_EXISTS	Space has rights under another name.
976  *		KERN_NAME_EXISTS	Name is already used.
977  *		KERN_INVALID_VALUE	Supplied port name is invalid.
978  */
979 
980 kern_return_t
ipc_object_copyout_name(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,mach_port_name_t name)981 ipc_object_copyout_name(
982 	ipc_space_t             space,
983 	ipc_port_t              port,
984 	mach_msg_type_name_t    msgt_name,
985 	mach_port_name_t        name)
986 {
987 	ipc_object_label_t label;
988 	mach_port_name_t oname;
989 	ipc_entry_t oentry;
990 	ipc_entry_t entry;
991 	kern_return_t kr;
992 
993 #if IMPORTANCE_INHERITANCE
994 	int assertcnt = 0;
995 	ipc_importance_task_t task_imp = IIT_NULL;
996 #endif /* IMPORTANCE_INHERITANCE */
997 
998 	assert(IP_VALID(port));
999 
1000 	kr = ipc_entry_alloc_name(space, name, &entry);
1001 	if (kr != KERN_SUCCESS) {
1002 		return kr;
1003 	}
1004 	/* space is write-locked and active */
1005 
1006 	label = ip_mq_lock_check_aligned(port);
1007 
1008 	/*
1009 	 * Don't actually copyout rights we aren't allowed to
1010 	 *
1011 	 * In particular, kolabel-ed ports do not allow callers
1012 	 * to pick the name they end up with.
1013 	 */
1014 	if (!io_state_active(label.io_state) ||
1015 	    (io_is_kobject_type(label.io_type) && label.iol_kobject)) {
1016 		ip_mq_unlock_label_put(port, &label);
1017 		if (!ipc_right_inuse(entry)) {
1018 			ipc_entry_dealloc(space, IPC_OBJECT_NULL, name, entry);
1019 		}
1020 		is_write_unlock(space);
1021 		return KERN_INVALID_CAPABILITY;
1022 	}
1023 
1024 	/* space is write-locked and active, port is locked and active */
1025 
1026 	if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
1027 	    ipc_right_reverse(space, port, &oname, &oentry)) {
1028 		if (name != oname) {
1029 			ip_mq_unlock_label_put(port, &label);
1030 			if (!ipc_right_inuse(entry)) {
1031 				ipc_entry_dealloc(space, IPC_OBJECT_NULL, name, entry);
1032 			}
1033 			is_write_unlock(space);
1034 			return KERN_RIGHT_EXISTS;
1035 		}
1036 
1037 		assert(entry == oentry);
1038 		assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
1039 	} else if (ipc_right_inuse(entry)) {
1040 		ip_mq_unlock_label_put(port, &label);
1041 		is_write_unlock(space);
1042 		return KERN_NAME_EXISTS;
1043 	} else {
1044 		assert(entry->ie_port == IP_NULL);
1045 		entry->ie_port = port;
1046 	}
1047 
1048 	if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1049 #if IMPORTANCE_INHERITANCE
1050 		/*
1051 		 * We are slamming a receive right into the space, without
1052 		 * first having been enqueued on a port destined there.  So,
1053 		 * we have to arrange to boost the task appropriately if this
1054 		 * port has assertions (and the task wants them).
1055 		 */
1056 		if (space->is_task != TASK_NULL) {
1057 			task_imp = space->is_task->task_imp_base;
1058 			if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1059 				assertcnt = port->ip_impcount;
1060 				ipc_importance_task_reference(task_imp);
1061 			} else {
1062 				task_imp = IIT_NULL;
1063 			}
1064 		}
1065 
1066 		/* take port out of limbo */
1067 		port->ip_tempowner = 0;
1068 
1069 #endif /* IMPORTANCE_INHERITANCE */
1070 		ipc_right_copyout_recv_and_unlock_space(space, port, &label,
1071 		    name, entry, NULL);
1072 	} else {
1073 		ip_label_put(port, &label);
1074 		ipc_right_copyout_any_send(space, port, msgt_name,
1075 		    IPC_OBJECT_COPYOUT_FLAGS_NONE, name, entry);
1076 		is_write_unlock(space);
1077 	}
1078 
1079 #if IMPORTANCE_INHERITANCE
1080 	/*
1081 	 * Add the assertions to the task that we captured before
1082 	 */
1083 	if (task_imp != IIT_NULL) {
1084 		ipc_importance_task_hold_internal_assertion(task_imp, assertcnt);
1085 		ipc_importance_task_release(task_imp);
1086 	}
1087 #endif /* IMPORTANCE_INHERITANCE */
1088 
1089 	return KERN_SUCCESS;
1090 }
1091 
1092 /*
1093  *	Routine:	ipc_object_copyout_dest
1094  *	Purpose:
1095  *		Translates/consumes the destination right of a message.
1096  *		This is unlike normal copyout because the right is consumed
1097  *		in a funny way instead of being given to the receiving space.
1098  *		The receiver gets his name for the port, if he has receive
1099  *		rights, otherwise MACH_PORT_NULL.
1100  *	Conditions:
1101  *		The port is locked and active.  Nothing else locked.
1102  *		The port is unlocked and loses a reference.
1103  */
1104 
1105 void
ipc_object_copyout_dest(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,mach_port_name_t * namep)1106 ipc_object_copyout_dest(
1107 	ipc_space_t             space,
1108 	ipc_port_t              port,
1109 	mach_msg_type_name_t    msgt_name,
1110 	mach_port_name_t       *namep)
1111 {
1112 	mach_port_name_t name;
1113 
1114 	assert(IP_VALID(port));
1115 	assert(ip_active(port));
1116 
1117 	/*
1118 	 *	If the space is the receiver/owner of the port,
1119 	 *	then we quietly consume the right and return
1120 	 *	the space's name for the port.  Otherwise
1121 	 *	we destroy the right and return MACH_PORT_NULL.
1122 	 */
1123 
1124 	switch (msgt_name) {
1125 	case MACH_MSG_TYPE_PORT_SEND: {
1126 		ipc_notify_nsenders_t nsrequest = { };
1127 
1128 		if (ip_in_space(port, space)) {
1129 			name = ip_get_receiver_name(port);
1130 		} else {
1131 			name = MACH_PORT_NULL;
1132 		}
1133 		ip_srights_dec(port);
1134 		if (port->ip_srights == 0) {
1135 			nsrequest = ipc_notify_no_senders_prepare(port);
1136 		}
1137 		ipc_port_clear_sync_rcv_thread_boost_locked(port);
1138 		/* port unlocked */
1139 
1140 		ipc_notify_no_senders_emit(nsrequest);
1141 
1142 		ip_release(port);
1143 		break;
1144 	}
1145 
1146 	case MACH_MSG_TYPE_PORT_SEND_ONCE: {
1147 		if (ip_in_space(port, space)) {
1148 			/* quietly consume the send-once right */
1149 			ip_sorights_dec(port);
1150 			name = ip_get_receiver_name(port);
1151 			ipc_port_clear_sync_rcv_thread_boost_locked(port);
1152 			/* port unlocked */
1153 			ip_release(port);
1154 		} else {
1155 			/*
1156 			 *	A very bizarre case.  The message
1157 			 *	was received, but before this copyout
1158 			 *	happened the space lost receive rights.
1159 			 *	We can't quietly consume the soright
1160 			 *	out from underneath some other task,
1161 			 *	so generate a send-once notification.
1162 			 */
1163 
1164 			ipc_notify_send_once_and_unlock(port);
1165 			name = MACH_PORT_NULL;
1166 		}
1167 
1168 		break;
1169 	}
1170 
1171 	default:
1172 		ipc_unreachable("ipc_object_copyout_dest: strange rights");
1173 	}
1174 
1175 	*namep = name;
1176 }
1177 
1178 void
ipc_object_unpin(ipc_space_t space,ipc_port_t port)1179 ipc_object_unpin(
1180 	ipc_space_t             space,
1181 	ipc_port_t              port)
1182 {
1183 	mach_port_name_t name;
1184 	ipc_entry_t entry;
1185 
1186 	if (IP_VALID(port)) {
1187 		is_write_lock(space);
1188 		ip_mq_lock(port);
1189 
1190 		if (is_active(space) &&
1191 		    ipc_right_reverse(space, port, &name, &entry) &&
1192 		    (entry->ie_bits & IE_BITS_PINNED_SEND)) {
1193 			assert(entry->ie_bits & MACH_PORT_TYPE_SEND);
1194 			entry->ie_bits &= ~IE_BITS_PINNED_SEND;
1195 
1196 			ipc_entry_modified(space, name, entry);
1197 		}
1198 
1199 		ip_mq_unlock(port);
1200 		is_write_unlock(space);
1201 	}
1202 }
1203 
1204 static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1205     offsetof(struct ipc_port, ip_waitq));
1206 static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1207     offsetof(struct ipc_pset, ips_wqset));
1208 
1209 __abortlike
1210 static void
ipc_object_validate_preflight_panic(ipc_object_t io)1211 ipc_object_validate_preflight_panic(ipc_object_t io)
1212 {
1213 	panic("ipc object %p is neither a port or a port-set", io);
1214 }
1215 
1216 /*
1217  *	Routine:	ipc_object_lock_allow_invalid
1218  *	Purpose:
1219  *		Speculatively try to lock an object in an undefined state.
1220  *
1221  *		This relies on the fact that IPC object memory is allocated
1222  *		from sequestered zones, so at a given address, one can find:
1223  *		1. a valid object,
1224  *		2. a freed or invalid (uninitialized) object,
1225  *		3. unmapped memory.
1226  *
1227  *		(2) is possible because the zone is made with ZC_ZFREE_CLEARMEM which
1228  *		    ensures freed elements are always zeroed.
1229  *
1230  *		(3) is a direct courtesy of waitq_lock_allow_invalid().
1231  *
1232  *		In order to disambiguate (1) from (2), we use the "waitq valid"
1233  *		bit which is part of the lock. When that bit is absent,
1234  *		waitq_lock() will function as expected, but
1235  *		waitq_lock_allow_invalid() will not.
1236  *
1237  *		Objects are then initialized and destroyed carefully so that
1238  *		this "valid bit" is only set when the object invariants are
1239  *		respected.
1240  *
1241  *	Returns:
1242  *		true:  the lock was acquired
1243  *		false: the object was freed or not initialized.
1244  */
1245 bool
ipc_object_lock_allow_invalid(ipc_object_t orig_io)1246 ipc_object_lock_allow_invalid(ipc_object_t orig_io)
1247 {
1248 	struct waitq *wq = io_waitq(orig_io);
1249 
1250 	switch (zone_id_for_element(wq, sizeof(*wq))) {
1251 	case ZONE_ID_IPC_PORT:
1252 	case ZONE_ID_IPC_PORT_SET:
1253 		break;
1254 	default:
1255 		ipc_object_validate_preflight_panic(orig_io);
1256 	}
1257 
1258 	if (__probable(waitq_lock_allow_invalid(wq))) {
1259 		ipc_object_t io = io_from_waitq(wq);
1260 
1261 		ipc_object_validate(io, io_type(io));
1262 		return true;
1263 	}
1264 	return false;
1265 }
1266 
1267 __attribute__((always_inline))
1268 void
ipc_object_unlock(ipc_object_t object)1269 ipc_object_unlock(ipc_object_t object)
1270 {
1271 	release_assert(!object->io_label_lock);
1272 	io_unlock_nocheck(object);
1273 }
1274