1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005-2006 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: ipc/ipc_object.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions to manipulate IPC objects.
71 */
72
73 #include <mach/mach_types.h>
74 #include <mach/boolean.h>
75 #include <mach/kern_return.h>
76 #include <mach/port.h>
77 #include <mach/message.h>
78
79 #include <kern/kern_types.h>
80 #include <kern/misc_protos.h>
81 #include <kern/ipc_kobject.h>
82 #include <kern/zalloc_internal.h> // zone_id_for_element
83
84 #include <ipc/ipc_types.h>
85 #include <ipc/ipc_importance.h>
86 #include <ipc/port.h>
87 #include <ipc/ipc_space.h>
88 #include <ipc/ipc_entry.h>
89 #include <ipc/ipc_object.h>
90 #include <ipc/ipc_hash.h>
91 #include <ipc/ipc_kmsg.h>
92 #include <ipc/ipc_right.h>
93 #include <ipc/ipc_notify.h>
94 #include <ipc/ipc_policy.h>
95 #include <ipc/ipc_port.h>
96 #include <ipc/ipc_pset.h>
97
98 #include <security/mac_mach_internal.h>
99
100 static struct mpsc_daemon_queue ipc_object_deallocate_queue;
101
102 /*
103 * In order to do lockfree lookups in the IPC space, we combine two schemes:
104 *
105 * - the ipc table pointer is protected with hazard pointers to allow
106 * dereferencing it with only holding a ref on a task or space;
107 *
108 * - we use ipc_object_lock_allow_invalid in order to lock locks and validate
109 * that they are the droid we're looking for.
110 *
111 * The second half requires that virtual addresses assigned that ever held
112 * a port, either hold a port, or nothing, forever. To get this property,
113 * we just piggy back on the zone sequestering security feature which gives
114 * us exactly that.
115 *
116 * However, sequestering really only "works" on a sufficiently large address
117 * space, especially for a resource that can be made by userspace at will,
118 * so we can't do lockless lookups on ILP32.
119 *
120 * Note: this scheme is incompatible with kasan quarantines
121 * (because it uses elements to store backtraces in them
122 * which lets the waitq lock appear "valid" by accident when
123 * elements are freed).
124 */
125 #define IPC_OBJECT_ZC_BASE (ZC_ZFREE_CLEARMEM | ZC_SEQUESTER)
126
127 ZONE_DEFINE_ID(ZONE_ID_IPC_PORT, "ipc ports", struct ipc_port,
128 IPC_OBJECT_ZC_BASE | ZC_CACHING);
129
130 ZONE_DEFINE_ID(ZONE_ID_IPC_PORT_SET, "ipc port sets", struct ipc_pset,
131 IPC_OBJECT_ZC_BASE);
132
133 __attribute__((noinline))
134 static void
ipc_object_free(ipc_object_t object)135 ipc_object_free(ipc_object_t object)
136 {
137 if (io_is_any_port(object)) {
138 ipc_port_free(ip_object_to_port(object));
139 } else {
140 ipc_pset_free(ips_object_to_pset(object));
141 }
142 }
143
144 __attribute__((noinline))
145 static void
ipc_object_free_safe(ipc_object_t object)146 ipc_object_free_safe(ipc_object_t object)
147 {
148 struct waitq *wq = io_waitq(object);
149
150 assert(!waitq_is_valid(wq));
151 assert(os_atomic_load(&wq->waitq_defer.mpqc_next, relaxed) == NULL);
152 mpsc_daemon_enqueue(&ipc_object_deallocate_queue,
153 &wq->waitq_defer, MPSC_QUEUE_NONE);
154 }
155
156 static void
ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)157 ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,
158 __assert_only mpsc_daemon_queue_t dq)
159 {
160 struct waitq *wq = __container_of(e, struct waitq, waitq_defer);
161 ipc_object_t io = io_from_waitq(wq);
162
163 assert(dq == &ipc_object_deallocate_queue);
164
165 os_atomic_store(&wq->waitq_defer.mpqc_next, NULL, relaxed);
166 ipc_object_free(io);
167 }
168
169 void
ipc_object_deallocate_register_queue(void)170 ipc_object_deallocate_register_queue(void)
171 {
172 thread_deallocate_daemon_register_queue(&ipc_object_deallocate_queue,
173 ipc_object_deallocate_queue_invoke);
174 }
175
176 /*
177 * Routine: ipc_object_reference
178 * Purpose:
179 * Take a reference to an object.
180 */
181
182 void
ipc_object_reference(ipc_object_t io)183 ipc_object_reference(
184 ipc_object_t io)
185 {
186 static_assert(sizeof(os_ref_atomic_t) == sizeof(io->io_references));
187 os_ref_retain_raw(&io->io_references, NULL);
188 }
189
190 /*
191 * Routine: ipc_object_release
192 * Purpose:
193 * Release a reference to an object.
194 */
195
196 void
ipc_object_release(ipc_object_t io)197 ipc_object_release(
198 ipc_object_t io)
199 {
200 #if DEBUG
201 assert(get_preemption_level() == 0);
202 #endif
203
204 if (os_ref_release_raw(&io->io_references, NULL) == 0) {
205 /* Free the object */
206 ipc_object_free(io);
207 }
208 }
209
210 /*
211 * Routine: ipc_object_release_safe
212 * Purpose:
213 * Release a reference to an object safely
214 */
215
216 void
ipc_object_release_safe(ipc_object_t io)217 ipc_object_release_safe(
218 ipc_object_t io)
219 {
220 if (os_ref_release_raw(&io->io_references, NULL) == 0) {
221 if (get_preemption_level() == 0) {
222 ipc_object_free(io);
223 } else {
224 ipc_object_free_safe(io);
225 }
226 }
227 }
228
229 /*
230 * Routine: ipc_object_release_live
231 * Purpose:
232 * Release a reference to an object that isn't the last one.
233 */
234
235 void
ipc_object_release_live(ipc_object_t io)236 ipc_object_release_live(
237 ipc_object_t io)
238 {
239 os_ref_release_live_raw(&io->io_references, NULL);
240 }
241
242 /*
243 * Routine: ipc_object_translate
244 * Purpose:
245 * Look up an object in a space.
246 * Conditions:
247 * Nothing locked before. If successful, the object
248 * is returned active and locked. The caller doesn't get a ref.
249 * Returns:
250 * KERN_SUCCESS Object returned locked.
251 * KERN_INVALID_TASK The space is dead.
252 * KERN_INVALID_NAME The name doesn't denote a right
253 * KERN_INVALID_RIGHT Name doesn't denote the correct right
254 */
255 kern_return_t
ipc_object_translate(ipc_space_t space,mach_port_name_t name,mach_port_right_t right,ipc_object_t * objectp)256 ipc_object_translate(
257 ipc_space_t space,
258 mach_port_name_t name,
259 mach_port_right_t right,
260 ipc_object_t *objectp)
261 {
262 ipc_entry_bits_t bits;
263 ipc_object_t object;
264 kern_return_t kr;
265
266 if (!MACH_PORT_RIGHT_VALID_TRANSLATE(right)) {
267 return KERN_INVALID_RIGHT;
268 }
269
270 kr = ipc_right_lookup_read(space, name, &bits, &object);
271 if (kr != KERN_SUCCESS) {
272 return kr;
273 }
274 /* object is locked and active */
275
276 if ((bits & MACH_PORT_TYPE(right)) == MACH_PORT_TYPE_NONE) {
277 io_unlock(object);
278 return KERN_INVALID_RIGHT;
279 }
280
281 *objectp = object;
282 return KERN_SUCCESS;
283 }
284
285 /*
286 * Routine: ipc_object_translate_pset_receive
287 * Purpose:
288 * Look up two objects in a space (a port set and a receive right)
289 * Conditions:
290 * Nothing locked before. If successful, the objects
291 * are returned locked. The caller doesn't get a ref.
292 * Returns:
293 * KERN_SUCCESS Objects returned locked.
294 * KERN_INVALID_TASK The space is dead.
295 * KERN_INVALID_NAME A name doesn't denote a right.
296 * KERN_INVALID_RIGHT A name doesn't denote the correct right.
297 */
298
299 kern_return_t
ipc_object_translate_port_pset(ipc_space_t space,mach_port_name_t port_name,ipc_port_t * portp,mach_port_name_t pset_name,ipc_pset_t * psetp)300 ipc_object_translate_port_pset(
301 ipc_space_t space,
302 mach_port_name_t port_name,
303 ipc_port_t *portp,
304 mach_port_name_t pset_name,
305 ipc_pset_t *psetp)
306 {
307 ipc_entry_t port_entry;
308 ipc_entry_t pset_entry;
309 ipc_port_t port;
310 ipc_pset_t pset;
311 kern_return_t kr;
312
313 kr = ipc_right_lookup_two_read(space,
314 port_name, &port_entry, pset_name, &pset_entry);
315 if (kr != KERN_SUCCESS) {
316 return kr;
317 }
318 /* space is read-locked and active */
319
320 if ((port_entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) {
321 bool guard = !(port_entry->ie_bits & IE_BITS_EX_RECEIVE);
322
323 is_read_unlock(space);
324 if (guard) {
325 mach_port_guard_exception(port_name,
326 MPG_PAYLOAD(MPG_FLAGS_INVALID_RIGHT_TRANSLATE_PORT,
327 port_entry->ie_bits),
328 kGUARD_EXC_INVALID_RIGHT);
329 }
330 return KERN_INVALID_RIGHT;
331 }
332
333 if ((pset_entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0) {
334 is_read_unlock(space);
335 mach_port_guard_exception(pset_name,
336 MPG_PAYLOAD(MPG_FLAGS_INVALID_RIGHT_TRANSLATE_PSET,
337 pset_entry->ie_bits),
338 kGUARD_EXC_INVALID_RIGHT);
339 return KERN_INVALID_RIGHT;
340 }
341
342 port = port_entry->ie_port;
343 assert(port != IP_NULL);
344 ip_mq_lock(port);
345 if (!ip_active(port)) {
346 ip_mq_unlock(port);
347 is_read_unlock(space);
348 return KERN_INVALID_NAME;
349 }
350
351 pset = pset_entry->ie_pset;
352 assert(pset != IPS_NULL);
353 ips_mq_lock(pset);
354 if (!ips_active(pset)) {
355 ip_mq_unlock(port);
356 ips_mq_unlock(pset);
357 is_read_unlock(space);
358 return KERN_INVALID_NAME;
359 }
360
361 *portp = port;
362 *psetp = pset;
363
364 is_read_unlock(space);
365 return KERN_SUCCESS;
366 }
367
368 /*
369 * Routine: ipc_object_alloc_dead
370 * Purpose:
371 * Allocate a dead-name entry.
372 * Conditions:
373 * Nothing locked.
374 * Returns:
375 * KERN_SUCCESS The dead name is allocated.
376 * KERN_INVALID_TASK The space is dead.
377 * KERN_NO_SPACE No room for an entry in the space.
378 */
379
380 kern_return_t
ipc_object_alloc_dead(ipc_space_t space,mach_port_name_t * namep)381 ipc_object_alloc_dead(
382 ipc_space_t space,
383 mach_port_name_t *namep)
384 {
385 ipc_entry_t entry;
386 kern_return_t kr;
387 mach_port_type_t type = MACH_PORT_TYPE_DEAD_NAME;
388 mach_port_urefs_t urefs = 1;
389
390 kr = ipc_entry_alloc(space, IPC_OBJECT_NULL, namep, &entry);
391 if (kr != KERN_SUCCESS) {
392 return kr;
393 }
394 /* space is write-locked */
395
396 ipc_entry_init(space, IPC_OBJECT_NULL, type, entry, urefs, *namep);
397
398 is_write_unlock(space);
399 return KERN_SUCCESS;
400 }
401
402 /*
403 * Routine: ipc_object_alloc_entry
404 * Purpose:
405 * Allocate an unitialized entry for a new object.
406 * That entry must later be initialized by `ipc_entry_init` after
407 * the object has been initialized
408 * Conditions:
409 * Nothing locked.
410 * The space is write locked on successful return.
411 * Returns:
412 * KERN_SUCCESS The object is allocated.
413 * KERN_INVALID_TASK The space is dead.
414 * KERN_NO_SPACE No room for an entry in the space.
415 */
416
417 kern_return_t
ipc_object_alloc_entry(ipc_space_t space,ipc_object_t object,mach_port_name_t * namep,ipc_entry_t * entry)418 ipc_object_alloc_entry(
419 ipc_space_t space,
420 ipc_object_t object,
421 mach_port_name_t *namep,
422 ipc_entry_t *entry)
423 {
424 kern_return_t kr;
425
426 kr = ipc_entry_alloc(space, object, namep, entry);
427
428 return kr;
429 }
430
431 /*
432 * Routine: ipc_object_alloc_entry_with_name
433 * Purpose:
434 * Allocate an uninitalized entry for a new object, with a specific name.
435 * That entry must later be initialized by `ipc_entry_init` after
436 * the object has been initialized
437 * Conditions:
438 * Nothing locked.
439 * The space is write locked on successful return.
440 *
441 * Returns:
442 * KERN_SUCCESS The object is allocated.
443 * KERN_INVALID_TASK The space is dead.
444 * KERN_NAME_EXISTS The name already denotes a right.
445 */
446
447 kern_return_t
ipc_object_alloc_entry_with_name(ipc_space_t space,mach_port_name_t name,ipc_entry_t * entry)448 ipc_object_alloc_entry_with_name(
449 ipc_space_t space,
450 mach_port_name_t name,
451 ipc_entry_t *entry)
452 {
453 kern_return_t kr;
454
455 kr = ipc_entry_alloc_name(space, name, entry);
456 if (kr == KERN_SUCCESS && ipc_right_inuse(*entry)) {
457 is_write_unlock(space);
458 kr = KERN_NAME_EXISTS;
459 }
460
461 return kr;
462 }
463
464 /* Routine: ipc_object_validate
465 * Purpose:
466 * Validates an ipc port or port set as belonging to the correct
467 * zone.
468 */
469
470 void
ipc_object_validate(ipc_object_t object,ipc_object_type_t type)471 ipc_object_validate(
472 ipc_object_t object,
473 ipc_object_type_t type)
474 {
475 if (io_is_any_port_type(type)) {
476 ip_validate(object);
477 } else {
478 ips_validate(object);
479 }
480 }
481
482 /*
483 * Routine: ipc_object_copyin_type
484 * Purpose:
485 * Convert a send type name to a received type name.
486 */
487
488 mach_msg_type_name_t
ipc_object_copyin_type(mach_msg_type_name_t msgt_name)489 ipc_object_copyin_type(
490 mach_msg_type_name_t msgt_name)
491 {
492 switch (msgt_name) {
493 case MACH_MSG_TYPE_MOVE_RECEIVE:
494 return MACH_MSG_TYPE_PORT_RECEIVE;
495
496 case MACH_MSG_TYPE_MOVE_SEND_ONCE:
497 case MACH_MSG_TYPE_MAKE_SEND_ONCE:
498 return MACH_MSG_TYPE_PORT_SEND_ONCE;
499
500 case MACH_MSG_TYPE_MOVE_SEND:
501 case MACH_MSG_TYPE_MAKE_SEND:
502 case MACH_MSG_TYPE_COPY_SEND:
503 return MACH_MSG_TYPE_PORT_SEND;
504
505 default:
506 return MACH_MSG_TYPE_PORT_NONE;
507 }
508 }
509
510 /*
511 * Routine: ipc_object_copyin
512 * Purpose:
513 * Copyin a capability from a space.
514 * If successful, the caller gets a ref
515 * for the resulting port, unless it is IO_DEAD.
516 * Conditions:
517 * Nothing locked.
518 * Returns:
519 * KERN_SUCCESS Acquired a port, possibly IP_DEAD.
520 * KERN_INVALID_TASK The space is dead.
521 * KERN_INVALID_NAME Name doesn't exist in space.
522 * KERN_INVALID_RIGHT Name doesn't denote correct right.
523 */
524
525 kern_return_t
ipc_object_copyin(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name,ipc_object_copyin_flags_t copyin_flags,ipc_copyin_op_t copyin_reason,mach_msg_guarded_port_descriptor_t * gdesc,ipc_port_t * portp)526 ipc_object_copyin(
527 ipc_space_t space,
528 mach_port_name_t name,
529 mach_msg_type_name_t msgt_name,
530 ipc_object_copyin_flags_t copyin_flags,
531 ipc_copyin_op_t copyin_reason,
532 mach_msg_guarded_port_descriptor_t *gdesc,
533 ipc_port_t *portp)
534 {
535 ipc_copyin_rcleanup_t icrc;
536 ipc_copyin_cleanup_t icc;
537 ipc_entry_t entry;
538 kern_return_t kr;
539
540 ipc_object_copyin_flags_t copyin_mask = IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND;
541 copyin_mask = (copyin_flags & copyin_mask) | IPC_OBJECT_COPYIN_FLAGS_DEADOK;
542
543 /*
544 * Could first try a read lock when doing
545 * MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND,
546 * and MACH_MSG_TYPE_MAKE_SEND_ONCE.
547 */
548
549 ipc_right_copyin_rcleanup_init(&icrc, gdesc);
550
551 kr = ipc_right_lookup_write(space, name, &entry);
552 if (kr != KERN_SUCCESS) {
553 return kr;
554 }
555 /* space is write-locked and active */
556
557 kr = ipc_right_copyin(space, name, msgt_name, copyin_mask, copyin_reason,
558 entry, portp, &icc, &icrc);
559 is_write_unlock(space);
560
561 ipc_right_copyin_cleanup_destroy(&icc, name);
562 ipc_right_copyin_rcleanup_destroy(&icrc);
563
564 if (IP_VALID(*portp) &&
565 ip_type(*portp) == IOT_SERVICE_PORT &&
566 msgt_name == MACH_MSG_TYPE_MOVE_RECEIVE &&
567 !task_is_initproc(space->is_task) &&
568 !ipc_space_has_telemetry_type(space, IS_HAS_SERVICE_PORT_TELEMETRY)) {
569 ipc_stash_policy_violations_telemetry(IPCPV_MOVE_SERVICE_PORT,
570 *portp, name);
571 }
572
573 return kr;
574 }
575
576 /*
577 * Routine: ipc_object_copyin_from_kernel
578 * Purpose:
579 * Copyin a naked capability from the kernel.
580 *
581 * MACH_MSG_TYPE_MOVE_RECEIVE
582 * The receiver must be ipc_space_kernel
583 * or the receive right must already be in limbo.
584 * Consumes the naked receive right.
585 * MACH_MSG_TYPE_COPY_SEND
586 * A naked send right must be supplied.
587 * The port gains a reference, and a send right
588 * if the port is still active.
589 * MACH_MSG_TYPE_MAKE_SEND
590 * The receiver must be ipc_space_kernel.
591 * The port gains a reference and a send right.
592 * MACH_MSG_TYPE_MOVE_SEND
593 * Consumes a naked send right.
594 * MACH_MSG_TYPE_MAKE_SEND_ONCE
595 * The port gains a reference and a send-once right.
596 * Receiver also be the caller of device subsystem,
597 * so no assertion.
598 * MACH_MSG_TYPE_MOVE_SEND_ONCE
599 * Consumes a naked send-once right.
600 * Conditions:
601 * Nothing locked.
602 */
603
604 void
ipc_object_copyin_from_kernel(ipc_port_t port,mach_msg_type_name_t msgt_name)605 ipc_object_copyin_from_kernel(
606 ipc_port_t port,
607 mach_msg_type_name_t msgt_name)
608 {
609 ipc_object_label_t label;
610 assert(IP_VALID(port));
611
612 switch (msgt_name) {
613 case MACH_MSG_TYPE_MOVE_RECEIVE:
614 label = ip_mq_lock_label_get(port);
615
616 if (label.io_state == IO_STATE_IN_SPACE) {
617 assert(ip_in_space(port, ipc_space_kernel));
618
619 /*
620 * Ports in kernel are never hooked to a pset,
621 * so we don't need to pass a waitq linkage free list.
622 */
623 ipc_port_mark_in_limbo(port, &label, NULL);
624 ip_mq_unlock(port);
625 } else {
626 ipc_release_assert(io_state_in_limbo(label.io_state));
627 ip_mq_unlock_label_put(port, &label);
628 }
629 break;
630
631 case MACH_MSG_TYPE_COPY_SEND:
632 ip_mq_lock(port);
633 if (ip_active(port)) {
634 assert(port->ip_srights > 0);
635 }
636 ip_srights_inc(port);
637 ip_reference(port);
638 ip_mq_unlock(port);
639 break;
640
641 case MACH_MSG_TYPE_MAKE_SEND:
642 ip_mq_lock(port);
643 if (ip_active(port)) {
644 assert(ip_in_a_space(port));
645 assert(ip_in_space(port, ipc_space_kernel));
646 port->ip_mscount++;
647 }
648
649 ip_srights_inc(port);
650 ip_reference(port);
651 ip_mq_unlock(port);
652 break;
653
654 case MACH_MSG_TYPE_MOVE_SEND:
655 /* move naked send right into the message */
656 assert(port->ip_srights);
657 break;
658
659 case MACH_MSG_TYPE_MAKE_SEND_ONCE:
660 ip_mq_lock(port);
661 if (ip_active(port)) {
662 assert(ip_in_a_space(port));
663 }
664 ipc_port_make_sonce_locked(port);
665 ip_mq_unlock(port);
666 break;
667
668 case MACH_MSG_TYPE_MOVE_SEND_ONCE:
669 /* move naked send-once right into the message */
670 assert(port->ip_sorights);
671 break;
672
673 default:
674 ipc_unreachable("ipc_object_copyin_from_kernel: strange rights");
675 }
676 }
677
678 /*
679 * Routine: ipc_object_destroy
680 * Purpose:
681 * Destroys a naked capability.
682 * Consumes a ref for the port.
683 *
684 * A receive right should be in limbo or in transit.
685 * Conditions:
686 * Nothing locked.
687 */
688
689 void
ipc_object_destroy(ipc_port_t port,mach_msg_type_name_t msgt_name)690 ipc_object_destroy(
691 ipc_port_t port,
692 mach_msg_type_name_t msgt_name)
693 {
694 assert(IP_VALID(port));
695
696 switch (msgt_name) {
697 case MACH_MSG_TYPE_PORT_SEND:
698 ipc_port_release_send(port);
699 break;
700
701 case MACH_MSG_TYPE_PORT_SEND_ONCE:
702 ip_mq_lock(port);
703 ipc_notify_send_once_and_unlock(port);
704 break;
705
706 case MACH_MSG_TYPE_PORT_RECEIVE:
707 ipc_port_release_receive(port);
708 break;
709
710 default:
711 ipc_unreachable("ipc_object_destroy: strange rights");
712 }
713 }
714
715 /*
716 * Routine: ipc_object_destroy_dest
717 * Purpose:
718 * Destroys a naked capability for the destination of
719 * of a message. Consumes a ref for the port.
720 *
721 * Conditions:
722 * Nothing locked.
723 */
724
725 void
ipc_object_destroy_dest(ipc_port_t port,mach_msg_type_name_t msgt_name)726 ipc_object_destroy_dest(
727 ipc_port_t port,
728 mach_msg_type_name_t msgt_name)
729 {
730 assert(IP_VALID(port));
731
732 switch (msgt_name) {
733 case MACH_MSG_TYPE_PORT_SEND:
734 ipc_port_release_send(port);
735 break;
736
737 case MACH_MSG_TYPE_PORT_SEND_ONCE:
738 ip_mq_lock(port);
739 ipc_notify_send_once_and_unlock(port);
740 break;
741
742 default:
743 ipc_unreachable("ipc_object_destroy_dest: strange rights");
744 }
745 }
746
747 /*
748 * Routine: ipc_object_insert_send_right
749 * Purpose:
750 * Insert a send right into an object already in the space.
751 * The specified name must already point to a valid object.
752 *
753 * Note: This really is a combined copyin()/copyout(),
754 * that avoids most of the overhead of being implemented that way.
755 *
756 * This is the fastpath for mach_port_insert_right.
757 *
758 * Conditions:
759 * Nothing locked.
760 *
761 * msgt_name must be MACH_MSG_TYPE_MAKE_SEND or
762 * MACH_MSG_TYPE_COPY_SEND.
763 *
764 * Returns:
765 * KERN_SUCCESS Copied out object, consumed ref.
766 * KERN_INVALID_TASK The space is dead.
767 * KERN_INVALID_NAME Name doesn't exist in space.
768 * KERN_INVALID_CAPABILITY The object is dead.
769 * KERN_RIGHT_EXISTS Space has rights under another name.
770 */
771 kern_return_t
ipc_object_insert_send_right(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name)772 ipc_object_insert_send_right(
773 ipc_space_t space,
774 mach_port_name_t name,
775 mach_msg_type_name_t msgt_name)
776 {
777 ipc_entry_bits_t bits;
778 ipc_object_t object;
779 ipc_entry_t entry;
780 ipc_port_t port;
781 kern_return_t kr;
782
783 assert(msgt_name == MACH_MSG_TYPE_MAKE_SEND ||
784 msgt_name == MACH_MSG_TYPE_COPY_SEND);
785
786 kr = ipc_right_lookup_write(space, name, &entry);
787 if (kr != KERN_SUCCESS) {
788 return kr;
789 }
790 /* space is write-locked and active */
791
792 bits = entry->ie_bits;
793 object = entry->ie_object;
794
795 if (object == IPC_OBJECT_NULL) {
796 is_write_unlock(space);
797 return KERN_INVALID_CAPABILITY;
798 }
799 if ((bits & MACH_PORT_TYPE_PORT_RIGHTS) == 0) {
800 is_write_unlock(space);
801 return KERN_INVALID_RIGHT;
802 }
803
804 port = ip_object_to_port(object);
805
806 ip_mq_lock(port);
807 if (!ip_active(port)) {
808 kr = KERN_INVALID_CAPABILITY;
809 } else if (msgt_name == MACH_MSG_TYPE_MAKE_SEND) {
810 if (bits & MACH_PORT_TYPE_RECEIVE) {
811 port->ip_mscount++;
812 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
813 ip_srights_inc(port);
814 bits |= MACH_PORT_TYPE_SEND;
815 }
816 /* leave urefs pegged to maximum if it overflowed */
817 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
818 bits += 1; /* increment urefs */
819 }
820 entry->ie_bits = bits;
821 ipc_entry_modified(space, name, entry);
822 kr = KERN_SUCCESS;
823 } else {
824 kr = KERN_INVALID_RIGHT;
825 }
826 } else { // MACH_MSG_TYPE_COPY_SEND
827 if (bits & MACH_PORT_TYPE_SEND) {
828 /* leave urefs pegged to maximum if it overflowed */
829 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
830 entry->ie_bits = bits + 1; /* increment urefs */
831 }
832 ipc_entry_modified(space, name, entry);
833 kr = KERN_SUCCESS;
834 } else {
835 kr = KERN_INVALID_RIGHT;
836 }
837 }
838
839 ip_mq_unlock(port);
840 is_write_unlock(space);
841
842 return kr;
843 }
844
845 /*
846 * Routine: ipc_object_copyout
847 * Purpose:
848 * Copyout a capability, placing it into a space.
849 * Always consumes a ref for the port.
850 * Conditions:
851 * Nothing locked.
852 * Returns:
853 * KERN_SUCCESS Copied out port, consumed ref.
854 * KERN_INVALID_TASK The space is dead.
855 * KERN_INVALID_CAPABILITY The port is dead.
856 * KERN_NO_SPACE No room in space for another right.
857 * KERN_UREFS_OVERFLOW Urefs limit exceeded
858 * and overflow wasn't specified.
859 */
860
861 kern_return_t
ipc_object_copyout(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_msg_guarded_port_descriptor_t * gdesc,mach_port_name_t * namep)862 ipc_object_copyout(
863 ipc_space_t space,
864 ipc_port_t port,
865 mach_msg_type_name_t msgt_name,
866 ipc_object_copyout_flags_t flags,
867 mach_msg_guarded_port_descriptor_t *gdesc,
868 mach_port_name_t *namep)
869 {
870 struct knote *kn = current_thread()->ith_knote;
871 ipc_object_label_t label;
872 mach_port_name_t name;
873 ipc_entry_t entry;
874 kern_return_t kr;
875
876 assert(IP_VALID(port));
877
878 if (ITH_KNOTE_VALID(kn, msgt_name)) {
879 filt_machport_turnstile_prepare_lazily(kn, msgt_name, port);
880 }
881
882 is_write_lock(space);
883
884 for (;;) {
885 ipc_port_t port_subst = IP_NULL;
886
887 if (!is_active(space)) {
888 is_write_unlock(space);
889 kr = KERN_INVALID_TASK;
890 goto out;
891 }
892
893 kr = ipc_entries_hold(space, 1);
894 if (kr != KERN_SUCCESS) {
895 /* unlocks/locks space, so must start again */
896
897 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
898 if (kr != KERN_SUCCESS) {
899 /* space is unlocked */
900 goto out;
901 }
902 continue;
903 }
904
905 label = ip_mq_lock_check_aligned(port);
906
907 /* Don't actually copyout rights we aren't allowed to */
908 if (!io_state_active(label.io_state) ||
909 !ip_label_check_or_substitute(space, port, &label,
910 msgt_name, &port_subst)) {
911 ip_mq_unlock_label_put(port, &label);
912 is_write_unlock(space);
913 assert(port_subst == IP_NULL);
914 kr = KERN_INVALID_CAPABILITY;
915 goto out;
916 }
917
918 /* is the kolabel requesting a substitution */
919 if (port_subst != IP_NULL) {
920 /*
921 * port is unlocked, its right consumed
922 * space is unlocked
923 */
924 assert(msgt_name == MACH_MSG_TYPE_PORT_SEND);
925 port = port_subst;
926 if (!IP_VALID(port)) {
927 port = IP_DEAD;
928 kr = KERN_INVALID_CAPABILITY;
929 goto out;
930 }
931
932 is_write_lock(space);
933 continue;
934 }
935
936 break;
937 }
938
939 /* space is write-locked and active, port is locked and active */
940
941 if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
942 ipc_right_reverse(space, port, &name, &entry)) {
943 assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
944 } else {
945 ipc_entry_claim(space, ip_to_object(port), &name, &entry);
946 }
947
948 if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
949 ipc_right_copyout_recv_and_unlock_space(space, port, &label,
950 name, entry, gdesc);
951 } else {
952 ip_label_put(port, &label);
953 ipc_right_copyout_any_send(space, port, msgt_name,
954 flags, name, entry);
955 is_write_unlock(space);
956 }
957
958 /* port is unlocked */
959
960 out:
961 if (kr == KERN_SUCCESS) {
962 *namep = name;
963 } else if (IP_VALID(port)) {
964 ipc_object_destroy(port, msgt_name);
965 }
966
967 return kr;
968 }
969
970 /*
971 * Routine: ipc_object_copyout_name
972 * Purpose:
973 * Copyout a capability, placing it into a space.
974 * The specified name is used for the capability.
975 * If successful, consumes a ref for the port.
976 * Conditions:
977 * Nothing locked.
978 * Returns:
979 * KERN_SUCCESS Copied out port, consumed ref.
980 * KERN_INVALID_TASK The space is dead.
981 * KERN_INVALID_CAPABILITY The port is dead.
982 * KERN_UREFS_OVERFLOW Urefs limit exceeded
983 * and overflow wasn't specified.
984 * KERN_RIGHT_EXISTS Space has rights under another name.
985 * KERN_NAME_EXISTS Name is already used.
986 * KERN_INVALID_VALUE Supplied port name is invalid.
987 */
988
989 kern_return_t
ipc_object_copyout_name(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,mach_port_name_t name)990 ipc_object_copyout_name(
991 ipc_space_t space,
992 ipc_port_t port,
993 mach_msg_type_name_t msgt_name,
994 mach_port_name_t name)
995 {
996 ipc_object_label_t label;
997 mach_port_name_t oname;
998 ipc_entry_t oentry;
999 ipc_entry_t entry;
1000 kern_return_t kr;
1001
1002 #if IMPORTANCE_INHERITANCE
1003 int assertcnt = 0;
1004 ipc_importance_task_t task_imp = IIT_NULL;
1005 #endif /* IMPORTANCE_INHERITANCE */
1006
1007 assert(IP_VALID(port));
1008
1009 kr = ipc_entry_alloc_name(space, name, &entry);
1010 if (kr != KERN_SUCCESS) {
1011 return kr;
1012 }
1013 /* space is write-locked and active */
1014
1015 label = ip_mq_lock_check_aligned(port);
1016
1017 /*
1018 * Don't actually copyout rights we aren't allowed to
1019 *
1020 * In particular, kolabel-ed ports do not allow callers
1021 * to pick the name they end up with.
1022 */
1023 if (!io_state_active(label.io_state) ||
1024 (io_is_kobject_type(label.io_type) && label.iol_kobject)) {
1025 ip_mq_unlock_label_put(port, &label);
1026 if (!ipc_right_inuse(entry)) {
1027 ipc_entry_dealloc(space, IPC_OBJECT_NULL, name, entry);
1028 }
1029 is_write_unlock(space);
1030 return KERN_INVALID_CAPABILITY;
1031 }
1032
1033 /* space is write-locked and active, port is locked and active */
1034
1035 if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
1036 ipc_right_reverse(space, port, &oname, &oentry)) {
1037 if (name != oname) {
1038 ip_mq_unlock_label_put(port, &label);
1039 if (!ipc_right_inuse(entry)) {
1040 ipc_entry_dealloc(space, IPC_OBJECT_NULL, name, entry);
1041 }
1042 is_write_unlock(space);
1043 return KERN_RIGHT_EXISTS;
1044 }
1045
1046 assert(entry == oentry);
1047 assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
1048 } else if (ipc_right_inuse(entry)) {
1049 ip_mq_unlock_label_put(port, &label);
1050 is_write_unlock(space);
1051 return KERN_NAME_EXISTS;
1052 } else {
1053 assert(entry->ie_port == IP_NULL);
1054 entry->ie_port = port;
1055 }
1056
1057 if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1058 #if IMPORTANCE_INHERITANCE
1059 /*
1060 * We are slamming a receive right into the space, without
1061 * first having been enqueued on a port destined there. So,
1062 * we have to arrange to boost the task appropriately if this
1063 * port has assertions (and the task wants them).
1064 */
1065 if (space->is_task != TASK_NULL) {
1066 task_imp = space->is_task->task_imp_base;
1067 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1068 assertcnt = port->ip_impcount;
1069 ipc_importance_task_reference(task_imp);
1070 } else {
1071 task_imp = IIT_NULL;
1072 }
1073 }
1074
1075 /* take port out of limbo */
1076 port->ip_tempowner = 0;
1077
1078 #endif /* IMPORTANCE_INHERITANCE */
1079 ipc_right_copyout_recv_and_unlock_space(space, port, &label,
1080 name, entry, NULL);
1081 } else {
1082 ip_label_put(port, &label);
1083 ipc_right_copyout_any_send(space, port, msgt_name,
1084 IPC_OBJECT_COPYOUT_FLAGS_NONE, name, entry);
1085 is_write_unlock(space);
1086 }
1087
1088 #if IMPORTANCE_INHERITANCE
1089 /*
1090 * Add the assertions to the task that we captured before
1091 */
1092 if (task_imp != IIT_NULL) {
1093 ipc_importance_task_hold_internal_assertion(task_imp, assertcnt);
1094 ipc_importance_task_release(task_imp);
1095 }
1096 #endif /* IMPORTANCE_INHERITANCE */
1097
1098 return KERN_SUCCESS;
1099 }
1100
1101 /*
1102 * Routine: ipc_object_copyout_dest
1103 * Purpose:
1104 * Translates/consumes the destination right of a message.
1105 * This is unlike normal copyout because the right is consumed
1106 * in a funny way instead of being given to the receiving space.
1107 * The receiver gets his name for the port, if he has receive
1108 * rights, otherwise MACH_PORT_NULL.
1109 * Conditions:
1110 * The port is locked and active. Nothing else locked.
1111 * The port is unlocked and loses a reference.
1112 */
1113
1114 void
ipc_object_copyout_dest(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,mach_port_name_t * namep)1115 ipc_object_copyout_dest(
1116 ipc_space_t space,
1117 ipc_port_t port,
1118 mach_msg_type_name_t msgt_name,
1119 mach_port_name_t *namep)
1120 {
1121 mach_port_name_t name;
1122
1123 assert(IP_VALID(port));
1124 assert(ip_active(port));
1125
1126 /*
1127 * If the space is the receiver/owner of the port,
1128 * then we quietly consume the right and return
1129 * the space's name for the port. Otherwise
1130 * we destroy the right and return MACH_PORT_NULL.
1131 */
1132
1133 switch (msgt_name) {
1134 case MACH_MSG_TYPE_PORT_SEND: {
1135 ipc_notify_nsenders_t nsrequest = { };
1136
1137 if (ip_in_space(port, space)) {
1138 name = ip_get_receiver_name(port);
1139 } else {
1140 name = MACH_PORT_NULL;
1141 }
1142 ip_srights_dec(port);
1143 if (port->ip_srights == 0) {
1144 nsrequest = ipc_notify_no_senders_prepare(port);
1145 }
1146 ipc_port_clear_sync_rcv_thread_boost_locked(port);
1147 /* port unlocked */
1148
1149 ipc_notify_no_senders_emit(nsrequest);
1150
1151 ip_release(port);
1152 break;
1153 }
1154
1155 case MACH_MSG_TYPE_PORT_SEND_ONCE: {
1156 if (ip_in_space(port, space)) {
1157 /* quietly consume the send-once right */
1158 ip_sorights_dec(port);
1159 name = ip_get_receiver_name(port);
1160 ipc_port_clear_sync_rcv_thread_boost_locked(port);
1161 /* port unlocked */
1162 ip_release(port);
1163 } else {
1164 /*
1165 * A very bizarre case. The message
1166 * was received, but before this copyout
1167 * happened the space lost receive rights.
1168 * We can't quietly consume the soright
1169 * out from underneath some other task,
1170 * so generate a send-once notification.
1171 */
1172
1173 ipc_notify_send_once_and_unlock(port);
1174 name = MACH_PORT_NULL;
1175 }
1176
1177 break;
1178 }
1179
1180 default:
1181 ipc_unreachable("ipc_object_copyout_dest: strange rights");
1182 }
1183
1184 *namep = name;
1185 }
1186
1187 void
ipc_object_unpin(ipc_space_t space,ipc_port_t port)1188 ipc_object_unpin(
1189 ipc_space_t space,
1190 ipc_port_t port)
1191 {
1192 mach_port_name_t name;
1193 ipc_entry_t entry;
1194
1195 if (IP_VALID(port)) {
1196 is_write_lock(space);
1197 ip_mq_lock(port);
1198
1199 if (is_active(space) &&
1200 ipc_right_reverse(space, port, &name, &entry) &&
1201 (entry->ie_bits & IE_BITS_PINNED_SEND)) {
1202 assert(entry->ie_bits & MACH_PORT_TYPE_SEND);
1203 entry->ie_bits &= ~IE_BITS_PINNED_SEND;
1204
1205 ipc_entry_modified(space, name, entry);
1206 }
1207
1208 ip_mq_unlock(port);
1209 is_write_unlock(space);
1210 }
1211 }
1212
1213 static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1214 offsetof(struct ipc_port, ip_waitq));
1215 static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1216 offsetof(struct ipc_pset, ips_wqset));
1217
1218 __abortlike
1219 static void
ipc_object_validate_preflight_panic(ipc_object_t io)1220 ipc_object_validate_preflight_panic(ipc_object_t io)
1221 {
1222 panic("ipc object %p is neither a port or a port-set", io);
1223 }
1224
1225 /*
1226 * Routine: ipc_object_lock_allow_invalid
1227 * Purpose:
1228 * Speculatively try to lock an object in an undefined state.
1229 *
1230 * This relies on the fact that IPC object memory is allocated
1231 * from sequestered zones, so at a given address, one can find:
1232 * 1. a valid object,
1233 * 2. a freed or invalid (uninitialized) object,
1234 * 3. unmapped memory.
1235 *
1236 * (2) is possible because the zone is made with ZC_ZFREE_CLEARMEM which
1237 * ensures freed elements are always zeroed.
1238 *
1239 * (3) is a direct courtesy of waitq_lock_allow_invalid().
1240 *
1241 * In order to disambiguate (1) from (2), we use the "waitq valid"
1242 * bit which is part of the lock. When that bit is absent,
1243 * waitq_lock() will function as expected, but
1244 * waitq_lock_allow_invalid() will not.
1245 *
1246 * Objects are then initialized and destroyed carefully so that
1247 * this "valid bit" is only set when the object invariants are
1248 * respected.
1249 *
1250 * Returns:
1251 * true: the lock was acquired
1252 * false: the object was freed or not initialized.
1253 */
1254 bool
ipc_object_lock_allow_invalid(ipc_object_t orig_io)1255 ipc_object_lock_allow_invalid(ipc_object_t orig_io)
1256 {
1257 struct waitq *wq = io_waitq(orig_io);
1258
1259 switch (zone_id_for_element(wq, sizeof(*wq))) {
1260 case ZONE_ID_IPC_PORT:
1261 case ZONE_ID_IPC_PORT_SET:
1262 break;
1263 default:
1264 ipc_object_validate_preflight_panic(orig_io);
1265 }
1266
1267 if (__probable(waitq_lock_allow_invalid(wq))) {
1268 ipc_object_t io = io_from_waitq(wq);
1269
1270 ipc_object_validate(io, io_type(io));
1271 return true;
1272 }
1273 return false;
1274 }
1275
1276 __attribute__((always_inline))
1277 void
ipc_object_unlock(ipc_object_t object)1278 ipc_object_unlock(ipc_object_t object)
1279 {
1280 release_assert(!object->io_label_lock);
1281 io_unlock_nocheck(object);
1282 }
1283