1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005-2006 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: ipc/ipc_object.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions to manipulate IPC objects.
71 */
72
73 #include <mach/mach_types.h>
74 #include <mach/boolean.h>
75 #include <mach/kern_return.h>
76 #include <mach/port.h>
77 #include <mach/message.h>
78
79 #include <kern/kern_types.h>
80 #include <kern/misc_protos.h>
81 #include <kern/ipc_kobject.h>
82 #include <kern/zalloc_internal.h> // zone_id_for_element
83
84 #include <ipc/ipc_types.h>
85 #include <ipc/ipc_importance.h>
86 #include <ipc/port.h>
87 #include <ipc/ipc_space.h>
88 #include <ipc/ipc_entry.h>
89 #include <ipc/ipc_object.h>
90 #include <ipc/ipc_hash.h>
91 #include <ipc/ipc_kmsg.h>
92 #include <ipc/ipc_right.h>
93 #include <ipc/ipc_notify.h>
94 #include <ipc/ipc_policy.h>
95 #include <ipc/ipc_port.h>
96 #include <ipc/ipc_pset.h>
97
98 #include <security/mac_mach_internal.h>
99
100 static struct mpsc_daemon_queue ipc_object_deallocate_queue;
101 SECURITY_READ_ONLY_LATE(zone_t) ipc_object_zones[IOT_NUMBER];
102
103 /*
104 * In order to do lockfree lookups in the IPC space, we combine two schemes:
105 *
106 * - the ipc table pointer is protected with hazard pointers to allow
107 * dereferencing it with only holding a ref on a task or space;
108 *
109 * - we use ipc_object_lock_allow_invalid in order to lock locks and validate
110 * that they are the droid we're looking for.
111 *
112 * The second half requires that virtual addresses assigned that ever held
113 * a port, either hold a port, or nothing, forever. To get this property,
114 * we just piggy back on the zone sequestering security feature which gives
115 * us exactly that.
116 *
117 * However, sequestering really only "works" on a sufficiently large address
118 * space, especially for a resource that can be made by userspace at will,
119 * so we can't do lockless lookups on ILP32.
120 *
121 * Note: this scheme is incompatible with kasan quarantines
122 * (because it uses elements to store backtraces in them
123 * which lets the waitq lock appear "valid" by accident when
124 * elements are freed).
125 */
126 #define IPC_OBJECT_ZC_BASE (ZC_ZFREE_CLEARMEM | ZC_SEQUESTER)
127
128 ZONE_INIT(&ipc_object_zones[IOT_PORT],
129 "ipc ports", sizeof(struct ipc_port),
130 IPC_OBJECT_ZC_BASE | ZC_CACHING, ZONE_ID_IPC_PORT, NULL);
131
132 ZONE_INIT(&ipc_object_zones[IOT_PORT_SET],
133 "ipc port sets", sizeof(struct ipc_pset),
134 IPC_OBJECT_ZC_BASE, ZONE_ID_IPC_PORT_SET, NULL);
135
136 __attribute__((noinline))
137 static void
ipc_object_free(unsigned int otype,ipc_object_t object,bool last_ref)138 ipc_object_free(unsigned int otype, ipc_object_t object, bool last_ref)
139 {
140 if (last_ref) {
141 if (otype == IOT_PORT) {
142 ipc_port_finalize(ip_object_to_port(object));
143 } else {
144 ipc_pset_finalize(ips_object_to_pset(object));
145 }
146 }
147 zfree(ipc_object_zones[otype], object);
148 }
149
150 __attribute__((noinline))
151 static void
ipc_object_free_safe(ipc_object_t object)152 ipc_object_free_safe(ipc_object_t object)
153 {
154 struct waitq *wq = io_waitq(object);
155
156 assert(!waitq_is_valid(wq));
157 assert(os_atomic_load(&wq->waitq_defer.mpqc_next, relaxed) == NULL);
158 mpsc_daemon_enqueue(&ipc_object_deallocate_queue,
159 &wq->waitq_defer, MPSC_QUEUE_NONE);
160 }
161
162 static void
ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)163 ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,
164 __assert_only mpsc_daemon_queue_t dq)
165 {
166 struct waitq *wq = __container_of(e, struct waitq, waitq_defer);
167 ipc_object_t io = io_from_waitq(wq);
168
169 assert(dq == &ipc_object_deallocate_queue);
170
171 os_atomic_store(&wq->waitq_defer.mpqc_next, NULL, relaxed);
172 ipc_object_free(io_otype(io), io, true);
173 }
174
175 void
ipc_object_deallocate_register_queue(void)176 ipc_object_deallocate_register_queue(void)
177 {
178 thread_deallocate_daemon_register_queue(&ipc_object_deallocate_queue,
179 ipc_object_deallocate_queue_invoke);
180 }
181
182 /*
183 * Routine: ipc_object_reference
184 * Purpose:
185 * Take a reference to an object.
186 */
187
188 void
ipc_object_reference(ipc_object_t io)189 ipc_object_reference(
190 ipc_object_t io)
191 {
192 static_assert(sizeof(os_ref_atomic_t) == sizeof(io->io_references));
193 os_ref_retain_raw((os_ref_atomic_t *)&io->io_references, NULL);
194 }
195
196 /*
197 * Routine: ipc_object_release
198 * Purpose:
199 * Release a reference to an object.
200 */
201
202 void
ipc_object_release(ipc_object_t io)203 ipc_object_release(
204 ipc_object_t io)
205 {
206 #if DEBUG
207 assert(get_preemption_level() == 0);
208 #endif
209
210 if (os_ref_release_raw((os_ref_atomic_t *)&io->io_references, NULL) == 0) {
211 /* Free the object */
212 ipc_object_free(io_otype(io), io, true);
213 }
214 }
215
216 /*
217 * Routine: ipc_object_release_safe
218 * Purpose:
219 * Release a reference to an object safely
220 */
221
222 void
ipc_object_release_safe(ipc_object_t io)223 ipc_object_release_safe(
224 ipc_object_t io)
225 {
226 if (os_ref_release_raw((os_ref_atomic_t *)&io->io_references, NULL) == 0) {
227 if (get_preemption_level() == 0) {
228 ipc_object_free(io_otype(io), io, true);
229 } else {
230 ipc_object_free_safe(io);
231 }
232 }
233 }
234
235 /*
236 * Routine: ipc_object_release_live
237 * Purpose:
238 * Release a reference to an object that isn't the last one.
239 */
240
241 void
ipc_object_release_live(ipc_object_t io)242 ipc_object_release_live(
243 ipc_object_t io)
244 {
245 os_ref_release_live_raw((os_ref_atomic_t *)&io->io_references, NULL);
246 }
247
248 /*
249 * Routine: ipc_object_translate
250 * Purpose:
251 * Look up an object in a space.
252 * Conditions:
253 * Nothing locked before. If successful, the object
254 * is returned active and locked. The caller doesn't get a ref.
255 * Returns:
256 * KERN_SUCCESS Object returned locked.
257 * KERN_INVALID_TASK The space is dead.
258 * KERN_INVALID_NAME The name doesn't denote a right
259 * KERN_INVALID_RIGHT Name doesn't denote the correct right
260 */
261 kern_return_t
ipc_object_translate(ipc_space_t space,mach_port_name_t name,mach_port_right_t right,ipc_object_t * objectp)262 ipc_object_translate(
263 ipc_space_t space,
264 mach_port_name_t name,
265 mach_port_right_t right,
266 ipc_object_t *objectp)
267 {
268 ipc_entry_bits_t bits;
269 ipc_object_t object;
270 kern_return_t kr;
271
272 if (!MACH_PORT_RIGHT_VALID_TRANSLATE(right)) {
273 return KERN_INVALID_RIGHT;
274 }
275
276 kr = ipc_right_lookup_read(space, name, &bits, &object);
277 if (kr != KERN_SUCCESS) {
278 return kr;
279 }
280 /* object is locked and active */
281
282 if ((bits & MACH_PORT_TYPE(right)) == MACH_PORT_TYPE_NONE) {
283 io_unlock(object);
284 return KERN_INVALID_RIGHT;
285 }
286
287 *objectp = object;
288 return KERN_SUCCESS;
289 }
290
291 /*
292 * Routine: ipc_object_translate_pset_receive
293 * Purpose:
294 * Look up two objects in a space (a port set and a receive right)
295 * Conditions:
296 * Nothing locked before. If successful, the objects
297 * are returned locked. The caller doesn't get a ref.
298 * Returns:
299 * KERN_SUCCESS Objects returned locked.
300 * KERN_INVALID_TASK The space is dead.
301 * KERN_INVALID_NAME A name doesn't denote a right.
302 * KERN_INVALID_RIGHT A name doesn't denote the correct right.
303 */
304
305 kern_return_t
ipc_object_translate_port_pset(ipc_space_t space,mach_port_name_t port_name,ipc_port_t * portp,mach_port_name_t pset_name,ipc_pset_t * psetp)306 ipc_object_translate_port_pset(
307 ipc_space_t space,
308 mach_port_name_t port_name,
309 ipc_port_t *portp,
310 mach_port_name_t pset_name,
311 ipc_pset_t *psetp)
312 {
313 ipc_entry_t port_entry;
314 ipc_entry_t pset_entry;
315 ipc_port_t port;
316 ipc_pset_t pset;
317 kern_return_t kr;
318
319 kr = ipc_right_lookup_two_read(space,
320 port_name, &port_entry, pset_name, &pset_entry);
321 if (kr != KERN_SUCCESS) {
322 return kr;
323 }
324 /* space is read-locked and active */
325
326 if ((port_entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) {
327 bool guard = !(port_entry->ie_bits & MACH_PORT_TYPE_EX_RECEIVE);
328 is_read_unlock(space);
329 if (guard) {
330 mach_port_guard_exception(port_name, 0,
331 kGUARD_EXC_INVALID_RIGHT);
332 }
333 return KERN_INVALID_RIGHT;
334 }
335
336 if ((pset_entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0) {
337 is_read_unlock(space);
338 mach_port_guard_exception(pset_name, 0,
339 kGUARD_EXC_INVALID_RIGHT);
340 return KERN_INVALID_RIGHT;
341 }
342
343 port = port_entry->ie_port;
344 assert(port != IP_NULL);
345 ip_mq_lock(port);
346 if (!ip_active(port)) {
347 ip_mq_unlock(port);
348 is_read_unlock(space);
349 return KERN_INVALID_NAME;
350 }
351
352 pset = pset_entry->ie_pset;
353 assert(pset != IPS_NULL);
354 ips_mq_lock(pset);
355 if (!ips_active(pset)) {
356 ip_mq_unlock(port);
357 ips_mq_unlock(pset);
358 is_read_unlock(space);
359 return KERN_INVALID_NAME;
360 }
361
362 *portp = port;
363 *psetp = pset;
364
365 is_read_unlock(space);
366 return KERN_SUCCESS;
367 }
368
369 /*
370 * Routine: ipc_object_alloc_dead
371 * Purpose:
372 * Allocate a dead-name entry.
373 * Conditions:
374 * Nothing locked.
375 * Returns:
376 * KERN_SUCCESS The dead name is allocated.
377 * KERN_INVALID_TASK The space is dead.
378 * KERN_NO_SPACE No room for an entry in the space.
379 */
380
381 kern_return_t
ipc_object_alloc_dead(ipc_space_t space,mach_port_name_t * namep)382 ipc_object_alloc_dead(
383 ipc_space_t space,
384 mach_port_name_t *namep)
385 {
386 ipc_entry_t entry;
387 kern_return_t kr;
388
389 kr = ipc_entry_alloc(space, IPC_OBJECT_NULL, namep, &entry);
390 if (kr != KERN_SUCCESS) {
391 return kr;
392 }
393 /* space is write-locked */
394
395 /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
396
397 entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1;
398 ipc_entry_modified(space, *namep, entry);
399 is_write_unlock(space);
400 return KERN_SUCCESS;
401 }
402
403 /*
404 * Routine: ipc_object_alloc
405 * Purpose:
406 * Allocate an object.
407 * Conditions:
408 * Nothing locked.
409 * The space is write locked on successful return.
410 * The caller doesn't get a reference for the object.
411 * Returns:
412 * KERN_SUCCESS The object is allocated.
413 * KERN_INVALID_TASK The space is dead.
414 * KERN_NO_SPACE No room for an entry in the space.
415 */
416
417 kern_return_t
ipc_object_alloc(ipc_space_t space,ipc_object_type_t otype,mach_port_type_t type,mach_port_urefs_t urefs,mach_port_name_t * namep,ipc_object_t * objectp)418 ipc_object_alloc(
419 ipc_space_t space,
420 ipc_object_type_t otype,
421 mach_port_type_t type,
422 mach_port_urefs_t urefs,
423 mach_port_name_t *namep,
424 ipc_object_t *objectp)
425 {
426 ipc_object_t object;
427 ipc_entry_t entry;
428 kern_return_t kr;
429
430 assert(otype < IOT_NUMBER);
431 assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
432 assert(type != MACH_PORT_TYPE_NONE);
433 assert(urefs <= MACH_PORT_UREFS_MAX);
434
435 object = io_alloc(otype, Z_WAITOK | Z_ZERO | Z_NOFAIL);
436 os_atomic_init(&object->io_bits, io_makebits(otype));
437 os_atomic_init(&object->io_references, 1); /* for entry, not caller */
438
439 *namep = CAST_MACH_PORT_TO_NAME(object);
440 kr = ipc_entry_alloc(space, object, namep, &entry);
441 if (kr != KERN_SUCCESS) {
442 ipc_object_free(otype, object, false);
443 return kr;
444 }
445 /* space is write-locked */
446
447 entry->ie_bits |= type | urefs;
448 ipc_entry_modified(space, *namep, entry);
449
450 *objectp = object;
451 return KERN_SUCCESS;
452 }
453
454 /*
455 * Routine: ipc_object_alloc_name
456 * Purpose:
457 * Allocate an object, with a specific name.
458 * Conditions:
459 * Nothing locked. If successful, the object is returned locked.
460 * The caller doesn't get a reference for the object.
461 *
462 * finish_init() must call an ipc_*_init function
463 * that will return the object locked (using IPC_PORT_INIT_LOCKED,
464 * or SYNC_POLICY_INIT_LOCKED, or equivalent).
465 *
466 * Returns:
467 * KERN_SUCCESS The object is allocated.
468 * KERN_INVALID_TASK The space is dead.
469 * KERN_NAME_EXISTS The name already denotes a right.
470 */
471
472 kern_return_t
473 ipc_object_alloc_name(
474 ipc_space_t space,
475 ipc_object_type_t otype,
476 mach_port_type_t type,
477 mach_port_urefs_t urefs,
478 mach_port_name_t name,
479 ipc_object_t *objectp,
480 void (^finish_init)(ipc_object_t))
481 {
482 ipc_object_t object;
483 ipc_entry_t entry;
484 kern_return_t kr;
485
486 assert(otype < IOT_NUMBER);
487 assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
488 assert(type != MACH_PORT_TYPE_NONE);
489 assert(urefs <= MACH_PORT_UREFS_MAX);
490
491 object = io_alloc(otype, Z_WAITOK | Z_ZERO | Z_NOFAIL);
492 os_atomic_init(&object->io_bits, io_makebits(otype));
493 os_atomic_init(&object->io_references, 1); /* for entry, not caller */
494
495 kr = ipc_entry_alloc_name(space, name, &entry);
496 if (kr != KERN_SUCCESS) {
497 ipc_object_free(otype, object, false);
498 return kr;
499 }
500 /* space is write-locked */
501
502 if (ipc_right_inuse(entry)) {
503 is_write_unlock(space);
504 ipc_object_free(otype, object, false);
505 return KERN_NAME_EXISTS;
506 }
507
508 entry->ie_bits |= type | urefs;
509 entry->ie_object = object;
510
511 finish_init(object);
512 /* object is locked */
513 io_lock_held(object);
514
515 ipc_entry_modified(space, name, entry);
516 is_write_unlock(space);
517
518 *objectp = object;
519 return KERN_SUCCESS;
520 }
521
522 /* Routine: ipc_object_validate
523 * Purpose:
524 * Validates an ipc port or port set as belonging to the correct
525 * zone.
526 */
527
528 void
ipc_object_validate(ipc_object_t object,ipc_object_type_t type)529 ipc_object_validate(
530 ipc_object_t object,
531 ipc_object_type_t type)
532 {
533 if (type != IOT_PORT_SET) {
534 ip_validate(object);
535 } else {
536 ips_validate(object);
537 }
538 }
539
540 /*
541 * Routine: ipc_object_copyin_type
542 * Purpose:
543 * Convert a send type name to a received type name.
544 */
545
546 mach_msg_type_name_t
ipc_object_copyin_type(mach_msg_type_name_t msgt_name)547 ipc_object_copyin_type(
548 mach_msg_type_name_t msgt_name)
549 {
550 switch (msgt_name) {
551 case MACH_MSG_TYPE_MOVE_RECEIVE:
552 return MACH_MSG_TYPE_PORT_RECEIVE;
553
554 case MACH_MSG_TYPE_MOVE_SEND_ONCE:
555 case MACH_MSG_TYPE_MAKE_SEND_ONCE:
556 return MACH_MSG_TYPE_PORT_SEND_ONCE;
557
558 case MACH_MSG_TYPE_MOVE_SEND:
559 case MACH_MSG_TYPE_MAKE_SEND:
560 case MACH_MSG_TYPE_COPY_SEND:
561 return MACH_MSG_TYPE_PORT_SEND;
562
563 default:
564 return MACH_MSG_TYPE_PORT_NONE;
565 }
566 }
567
568 /*
569 * Routine: ipc_object_copyin
570 * Purpose:
571 * Copyin a capability from a space.
572 * If successful, the caller gets a ref
573 * for the resulting port, unless it is IO_DEAD.
574 * Conditions:
575 * Nothing locked.
576 * Returns:
577 * KERN_SUCCESS Acquired a port, possibly IP_DEAD.
578 * KERN_INVALID_TASK The space is dead.
579 * KERN_INVALID_NAME Name doesn't exist in space.
580 * KERN_INVALID_RIGHT Name doesn't denote correct right.
581 */
582
583 kern_return_t
ipc_object_copyin(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name,ipc_object_copyin_flags_t copyin_flags,mach_msg_guarded_port_descriptor_t * gdesc,ipc_port_t * portp)584 ipc_object_copyin(
585 ipc_space_t space,
586 mach_port_name_t name,
587 mach_msg_type_name_t msgt_name,
588 ipc_object_copyin_flags_t copyin_flags,
589 mach_msg_guarded_port_descriptor_t *gdesc,
590 ipc_port_t *portp)
591 {
592 ipc_copyin_rcleanup_t icrc;
593 ipc_copyin_cleanup_t icc;
594 ipc_entry_t entry;
595 kern_return_t kr;
596
597 ipc_object_copyin_flags_t copyin_mask = IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND
598 | IPC_OBJECT_COPYIN_FLAGS_ALLOW_CONN_IMMOVABLE_RECEIVE;
599 copyin_mask = (copyin_flags & copyin_mask) | IPC_OBJECT_COPYIN_FLAGS_DEADOK;
600
601 /*
602 * We allow moving of immovable receive right of a service port when it is from launchd.
603 */
604 task_t task = current_task_early();
605 #ifdef MACH_BSD
606 if (task && proc_isinitproc(get_bsdtask_info(task))) {
607 copyin_mask |= IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_RECEIVE;
608 }
609 #endif
610
611 /*
612 * Could first try a read lock when doing
613 * MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND,
614 * and MACH_MSG_TYPE_MAKE_SEND_ONCE.
615 */
616
617 ipc_right_copyin_rcleanup_init(&icrc, gdesc);
618
619 kr = ipc_right_lookup_write(space, name, &entry);
620 if (kr != KERN_SUCCESS) {
621 return kr;
622 }
623 /* space is write-locked and active */
624
625 kr = ipc_right_copyin(space, name, msgt_name, copyin_mask, entry,
626 portp, &icc, &icrc);
627 is_write_unlock(space);
628
629 if (moved_provisional_reply_port(msgt_name, icc.icc_deleted_port)) {
630 send_prp_telemetry(-1);
631 }
632
633 ipc_right_copyin_cleanup_destroy(&icc, name);
634 ipc_right_copyin_rcleanup_destroy(&icrc);
635
636 return kr;
637 }
638
639 /*
640 * Routine: ipc_object_copyin_from_kernel
641 * Purpose:
642 * Copyin a naked capability from the kernel.
643 *
644 * MACH_MSG_TYPE_MOVE_RECEIVE
645 * The receiver must be ipc_space_kernel
646 * or the receive right must already be in limbo.
647 * Consumes the naked receive right.
648 * MACH_MSG_TYPE_COPY_SEND
649 * A naked send right must be supplied.
650 * The port gains a reference, and a send right
651 * if the port is still active.
652 * MACH_MSG_TYPE_MAKE_SEND
653 * The receiver must be ipc_space_kernel.
654 * The port gains a reference and a send right.
655 * MACH_MSG_TYPE_MOVE_SEND
656 * Consumes a naked send right.
657 * MACH_MSG_TYPE_MAKE_SEND_ONCE
658 * The port gains a reference and a send-once right.
659 * Receiver also be the caller of device subsystem,
660 * so no assertion.
661 * MACH_MSG_TYPE_MOVE_SEND_ONCE
662 * Consumes a naked send-once right.
663 * Conditions:
664 * Nothing locked.
665 */
666
667 void
ipc_object_copyin_from_kernel(ipc_port_t port,mach_msg_type_name_t msgt_name)668 ipc_object_copyin_from_kernel(
669 ipc_port_t port,
670 mach_msg_type_name_t msgt_name)
671 {
672 assert(IP_VALID(port));
673
674 switch (msgt_name) {
675 case MACH_MSG_TYPE_MOVE_RECEIVE:
676 ip_mq_lock(port);
677 require_ip_active(port);
678 if (ip_in_a_space(port)) {
679 assert(ip_in_space(port, ipc_space_kernel));
680 assert(port->ip_immovable_receive == 0);
681
682 /* relevant part of ipc_port_clear_receiver */
683 port->ip_mscount = 0;
684
685 /* port transtions to IN-LIMBO state */
686 port->ip_receiver_name = MACH_PORT_NULL;
687 port->ip_destination = IP_NULL;
688 }
689 ip_mq_unlock(port);
690 break;
691
692 case MACH_MSG_TYPE_COPY_SEND:
693 ip_mq_lock(port);
694 if (ip_active(port)) {
695 assert(port->ip_srights > 0);
696 }
697 ip_srights_inc(port);
698 ip_reference(port);
699 ip_mq_unlock(port);
700 break;
701
702 case MACH_MSG_TYPE_MAKE_SEND:
703 ip_mq_lock(port);
704 if (ip_active(port)) {
705 assert(ip_in_a_space(port));
706 assert((ip_in_space(port, ipc_space_kernel)) ||
707 (port->ip_receiver->is_node_id != HOST_LOCAL_NODE));
708 port->ip_mscount++;
709 }
710
711 ip_srights_inc(port);
712 ip_reference(port);
713 ip_mq_unlock(port);
714 break;
715
716 case MACH_MSG_TYPE_MOVE_SEND:
717 /* move naked send right into the message */
718 assert(port->ip_srights);
719 break;
720
721 case MACH_MSG_TYPE_MAKE_SEND_ONCE:
722 ip_mq_lock(port);
723 if (ip_active(port)) {
724 assert(ip_in_a_space(port));
725 }
726 ipc_port_make_sonce_locked(port);
727 ip_mq_unlock(port);
728 break;
729
730 case MACH_MSG_TYPE_MOVE_SEND_ONCE:
731 /* move naked send-once right into the message */
732 assert(port->ip_sorights);
733 break;
734
735 default:
736 ipc_unreachable("ipc_object_copyin_from_kernel: strange rights");
737 }
738 }
739
740 /*
741 * Routine: ipc_object_destroy
742 * Purpose:
743 * Destroys a naked capability.
744 * Consumes a ref for the port.
745 *
746 * A receive right should be in limbo or in transit.
747 * Conditions:
748 * Nothing locked.
749 */
750
751 void
ipc_object_destroy(ipc_port_t port,mach_msg_type_name_t msgt_name)752 ipc_object_destroy(
753 ipc_port_t port,
754 mach_msg_type_name_t msgt_name)
755 {
756 assert(IP_VALID(port));
757
758 switch (msgt_name) {
759 case MACH_MSG_TYPE_PORT_SEND:
760 ipc_port_release_send(port);
761 break;
762
763 case MACH_MSG_TYPE_PORT_SEND_ONCE:
764 ip_mq_lock(port);
765 ipc_notify_send_once_and_unlock(port);
766 break;
767
768 case MACH_MSG_TYPE_PORT_RECEIVE:
769 ipc_port_release_receive(port);
770 break;
771
772 default:
773 ipc_unreachable("ipc_object_destroy: strange rights");
774 }
775 }
776
777 /*
778 * Routine: ipc_object_destroy_dest
779 * Purpose:
780 * Destroys a naked capability for the destination of
781 * of a message. Consumes a ref for the port.
782 *
783 * Conditions:
784 * Nothing locked.
785 */
786
787 void
ipc_object_destroy_dest(ipc_port_t port,mach_msg_type_name_t msgt_name)788 ipc_object_destroy_dest(
789 ipc_port_t port,
790 mach_msg_type_name_t msgt_name)
791 {
792 assert(IP_VALID(port));
793
794 switch (msgt_name) {
795 case MACH_MSG_TYPE_PORT_SEND:
796 ipc_port_release_send(port);
797 break;
798
799 case MACH_MSG_TYPE_PORT_SEND_ONCE:
800 ip_mq_lock(port);
801 ipc_notify_send_once_and_unlock(port);
802 break;
803
804 default:
805 ipc_unreachable("ipc_object_destroy_dest: strange rights");
806 }
807 }
808
809 /*
810 * Routine: ipc_object_insert_send_right
811 * Purpose:
812 * Insert a send right into an object already in the space.
813 * The specified name must already point to a valid object.
814 *
815 * Note: This really is a combined copyin()/copyout(),
816 * that avoids most of the overhead of being implemented that way.
817 *
818 * This is the fastpath for mach_port_insert_right.
819 *
820 * Conditions:
821 * Nothing locked.
822 *
823 * msgt_name must be MACH_MSG_TYPE_MAKE_SEND or
824 * MACH_MSG_TYPE_COPY_SEND.
825 *
826 * Returns:
827 * KERN_SUCCESS Copied out object, consumed ref.
828 * KERN_INVALID_TASK The space is dead.
829 * KERN_INVALID_NAME Name doesn't exist in space.
830 * KERN_INVALID_CAPABILITY The object is dead.
831 * KERN_RIGHT_EXISTS Space has rights under another name.
832 */
833 kern_return_t
ipc_object_insert_send_right(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name)834 ipc_object_insert_send_right(
835 ipc_space_t space,
836 mach_port_name_t name,
837 mach_msg_type_name_t msgt_name)
838 {
839 ipc_entry_bits_t bits;
840 ipc_object_t object;
841 ipc_entry_t entry;
842 ipc_port_t port;
843 kern_return_t kr;
844
845 assert(msgt_name == MACH_MSG_TYPE_MAKE_SEND ||
846 msgt_name == MACH_MSG_TYPE_COPY_SEND);
847
848 kr = ipc_right_lookup_write(space, name, &entry);
849 if (kr != KERN_SUCCESS) {
850 return kr;
851 }
852 /* space is write-locked and active */
853
854 bits = entry->ie_bits;
855 object = entry->ie_object;
856
857 if (object == IPC_OBJECT_NULL) {
858 is_write_unlock(space);
859 return KERN_INVALID_CAPABILITY;
860 }
861 if ((bits & MACH_PORT_TYPE_PORT_RIGHTS) == 0) {
862 is_write_unlock(space);
863 return KERN_INVALID_RIGHT;
864 }
865
866 port = ip_object_to_port(object);
867
868 ip_mq_lock(port);
869 if (!ip_active(port)) {
870 kr = KERN_INVALID_CAPABILITY;
871 } else if (msgt_name == MACH_MSG_TYPE_MAKE_SEND) {
872 if (bits & MACH_PORT_TYPE_RECEIVE) {
873 port->ip_mscount++;
874 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
875 ip_srights_inc(port);
876 bits |= MACH_PORT_TYPE_SEND;
877 }
878 /* leave urefs pegged to maximum if it overflowed */
879 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
880 bits += 1; /* increment urefs */
881 }
882 entry->ie_bits = bits;
883 ipc_entry_modified(space, name, entry);
884 kr = KERN_SUCCESS;
885 } else {
886 kr = KERN_INVALID_RIGHT;
887 }
888 } else { // MACH_MSG_TYPE_COPY_SEND
889 if (bits & MACH_PORT_TYPE_SEND) {
890 /* leave urefs pegged to maximum if it overflowed */
891 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
892 entry->ie_bits = bits + 1; /* increment urefs */
893 }
894 ipc_entry_modified(space, name, entry);
895 kr = KERN_SUCCESS;
896 } else {
897 kr = KERN_INVALID_RIGHT;
898 }
899 }
900
901 ip_mq_unlock(port);
902 is_write_unlock(space);
903
904 return kr;
905 }
906
907 /*
908 * Routine: ipc_object_copyout
909 * Purpose:
910 * Copyout a capability, placing it into a space.
911 * Always consumes a ref for the port.
912 * Conditions:
913 * Nothing locked.
914 * Returns:
915 * KERN_SUCCESS Copied out port, consumed ref.
916 * KERN_INVALID_TASK The space is dead.
917 * KERN_INVALID_CAPABILITY The port is dead.
918 * KERN_NO_SPACE No room in space for another right.
919 * KERN_UREFS_OVERFLOW Urefs limit exceeded
920 * and overflow wasn't specified.
921 */
922
923 kern_return_t
ipc_object_copyout(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_msg_guarded_port_descriptor_t * gdesc,mach_port_name_t * namep)924 ipc_object_copyout(
925 ipc_space_t space,
926 ipc_port_t port,
927 mach_msg_type_name_t msgt_name,
928 ipc_object_copyout_flags_t flags,
929 mach_msg_guarded_port_descriptor_t *gdesc,
930 mach_port_name_t *namep)
931 {
932 struct knote *kn = current_thread()->ith_knote;
933 mach_port_name_t name;
934 ipc_entry_t entry;
935 kern_return_t kr;
936
937 assert(IP_VALID(port));
938
939 if (ITH_KNOTE_VALID(kn, msgt_name)) {
940 filt_machport_turnstile_prepare_lazily(kn, msgt_name, port);
941 }
942
943 is_write_lock(space);
944
945 for (;;) {
946 ipc_port_t port_subst = IP_NULL;
947
948 if (!is_active(space)) {
949 is_write_unlock(space);
950 kr = KERN_INVALID_TASK;
951 goto out;
952 }
953
954 kr = ipc_entries_hold(space, 1);
955 if (kr != KERN_SUCCESS) {
956 /* unlocks/locks space, so must start again */
957
958 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
959 if (kr != KERN_SUCCESS) {
960 /* space is unlocked */
961 goto out;
962 }
963 continue;
964 }
965
966 ip_mq_lock_check_aligned(port);
967 if (!ip_active(port)) {
968 ip_mq_unlock(port);
969 is_write_unlock(space);
970 kr = KERN_INVALID_CAPABILITY;
971 goto out;
972 }
973
974 /* Don't actually copyout rights we aren't allowed to */
975 if (!ip_label_check(space, port, msgt_name, &flags, &port_subst)) {
976 ip_mq_unlock(port);
977 is_write_unlock(space);
978 assert(port_subst == IP_NULL);
979 kr = KERN_INVALID_CAPABILITY;
980 goto out;
981 }
982
983 /* is the kolabel requesting a substitution */
984 if (port_subst != IP_NULL) {
985 /*
986 * port is unlocked, its right consumed
987 * space is unlocked
988 */
989 assert(msgt_name == MACH_MSG_TYPE_PORT_SEND);
990 port = port_subst;
991 if (!IP_VALID(port)) {
992 port = IP_DEAD;
993 kr = KERN_INVALID_CAPABILITY;
994 goto out;
995 }
996
997 is_write_lock(space);
998 continue;
999 }
1000
1001 break;
1002 }
1003
1004 /* space is write-locked and active, port is locked and active */
1005
1006 if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
1007 ipc_right_reverse(space, port, &name, &entry)) {
1008 assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
1009 } else {
1010 ipc_entry_claim(space, ip_to_object(port), &name, &entry);
1011 }
1012
1013 kr = ipc_right_copyout(space, port, msgt_name,
1014 flags, name, entry, gdesc);
1015
1016 /* port is unlocked */
1017 is_write_unlock(space);
1018
1019 out:
1020 if (kr == KERN_SUCCESS) {
1021 *namep = name;
1022 } else if (IP_VALID(port)) {
1023 ipc_object_destroy(port, msgt_name);
1024 }
1025
1026 return kr;
1027 }
1028
1029 /*
1030 * Routine: ipc_object_copyout_name
1031 * Purpose:
1032 * Copyout a capability, placing it into a space.
1033 * The specified name is used for the capability.
1034 * If successful, consumes a ref for the port.
1035 * Conditions:
1036 * Nothing locked.
1037 * Returns:
1038 * KERN_SUCCESS Copied out port, consumed ref.
1039 * KERN_INVALID_TASK The space is dead.
1040 * KERN_INVALID_CAPABILITY The port is dead.
1041 * KERN_UREFS_OVERFLOW Urefs limit exceeded
1042 * and overflow wasn't specified.
1043 * KERN_RIGHT_EXISTS Space has rights under another name.
1044 * KERN_NAME_EXISTS Name is already used.
1045 * KERN_INVALID_VALUE Supplied port name is invalid.
1046 */
1047
1048 kern_return_t
ipc_object_copyout_name(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,mach_port_name_t name)1049 ipc_object_copyout_name(
1050 ipc_space_t space,
1051 ipc_port_t port,
1052 mach_msg_type_name_t msgt_name,
1053 mach_port_name_t name)
1054 {
1055 mach_port_name_t oname;
1056 ipc_entry_t oentry;
1057 ipc_entry_t entry;
1058 kern_return_t kr;
1059
1060 #if IMPORTANCE_INHERITANCE
1061 int assertcnt = 0;
1062 ipc_importance_task_t task_imp = IIT_NULL;
1063 #endif /* IMPORTANCE_INHERITANCE */
1064
1065 assert(IP_VALID(port));
1066
1067 kr = ipc_entry_alloc_name(space, name, &entry);
1068 if (kr != KERN_SUCCESS) {
1069 return kr;
1070 }
1071 /* space is write-locked and active */
1072
1073 ip_mq_lock_check_aligned(port);
1074
1075 /*
1076 * Don't actually copyout rights we aren't allowed to
1077 *
1078 * In particular, kolabel-ed ports do not allow callers
1079 * to pick the name they end up with.
1080 */
1081 if (!ip_active(port) || ip_is_kolabeled(port)) {
1082 ip_mq_unlock(port);
1083 if (!ipc_right_inuse(entry)) {
1084 ipc_entry_dealloc(space, IPC_OBJECT_NULL, name, entry);
1085 }
1086 is_write_unlock(space);
1087 return KERN_INVALID_CAPABILITY;
1088 }
1089
1090 /* space is write-locked and active, port is locked and active */
1091
1092 if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
1093 ipc_right_reverse(space, port, &oname, &oentry)) {
1094 if (name != oname) {
1095 ip_mq_unlock(port);
1096 if (!ipc_right_inuse(entry)) {
1097 ipc_entry_dealloc(space, IPC_OBJECT_NULL, name, entry);
1098 }
1099 is_write_unlock(space);
1100 return KERN_RIGHT_EXISTS;
1101 }
1102
1103 assert(entry == oentry);
1104 assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
1105 } else if (ipc_right_inuse(entry)) {
1106 ip_mq_unlock(port);
1107 is_write_unlock(space);
1108 return KERN_NAME_EXISTS;
1109 } else {
1110 assert(entry->ie_port == IP_NULL);
1111 entry->ie_port = port;
1112 }
1113
1114 #if IMPORTANCE_INHERITANCE
1115 /*
1116 * We are slamming a receive right into the space, without
1117 * first having been enqueued on a port destined there. So,
1118 * we have to arrange to boost the task appropriately if this
1119 * port has assertions (and the task wants them).
1120 */
1121 if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1122 if (space->is_task != TASK_NULL) {
1123 task_imp = space->is_task->task_imp_base;
1124 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1125 assertcnt = port->ip_impcount;
1126 ipc_importance_task_reference(task_imp);
1127 } else {
1128 task_imp = IIT_NULL;
1129 }
1130 }
1131
1132 /* take port out of limbo */
1133 port->ip_tempowner = 0;
1134 }
1135
1136 #endif /* IMPORTANCE_INHERITANCE */
1137
1138 kr = ipc_right_copyout(space, port, msgt_name,
1139 IPC_OBJECT_COPYOUT_FLAGS_NONE, name, entry, NULL);
1140
1141 /* port is unlocked */
1142 is_write_unlock(space);
1143
1144 #if IMPORTANCE_INHERITANCE
1145 /*
1146 * Add the assertions to the task that we captured before
1147 */
1148 if (task_imp != IIT_NULL) {
1149 ipc_importance_task_hold_internal_assertion(task_imp, assertcnt);
1150 ipc_importance_task_release(task_imp);
1151 }
1152 #endif /* IMPORTANCE_INHERITANCE */
1153
1154 return kr;
1155 }
1156
1157 /*
1158 * Routine: ipc_object_copyout_dest
1159 * Purpose:
1160 * Translates/consumes the destination right of a message.
1161 * This is unlike normal copyout because the right is consumed
1162 * in a funny way instead of being given to the receiving space.
1163 * The receiver gets his name for the port, if he has receive
1164 * rights, otherwise MACH_PORT_NULL.
1165 * Conditions:
1166 * The port is locked and active. Nothing else locked.
1167 * The port is unlocked and loses a reference.
1168 */
1169
1170 void
ipc_object_copyout_dest(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,mach_port_name_t * namep)1171 ipc_object_copyout_dest(
1172 ipc_space_t space,
1173 ipc_port_t port,
1174 mach_msg_type_name_t msgt_name,
1175 mach_port_name_t *namep)
1176 {
1177 mach_port_name_t name;
1178
1179 assert(IP_VALID(port));
1180 assert(ip_active(port));
1181
1182 /*
1183 * If the space is the receiver/owner of the port,
1184 * then we quietly consume the right and return
1185 * the space's name for the port. Otherwise
1186 * we destroy the right and return MACH_PORT_NULL.
1187 */
1188
1189 switch (msgt_name) {
1190 case MACH_MSG_TYPE_PORT_SEND: {
1191 ipc_notify_nsenders_t nsrequest = { };
1192
1193 if (ip_in_space(port, space)) {
1194 name = ip_get_receiver_name(port);
1195 } else {
1196 name = MACH_PORT_NULL;
1197 }
1198 ip_srights_dec(port);
1199 if (port->ip_srights == 0) {
1200 nsrequest = ipc_notify_no_senders_prepare(port);
1201 }
1202 ipc_port_clear_sync_rcv_thread_boost_locked(port);
1203 /* port unlocked */
1204
1205 ipc_notify_no_senders_emit(nsrequest);
1206
1207 ip_release(port);
1208 break;
1209 }
1210
1211 case MACH_MSG_TYPE_PORT_SEND_ONCE: {
1212 assert(port->ip_sorights > 0);
1213
1214 if (ip_in_space(port, space)) {
1215 /* quietly consume the send-once right */
1216 ip_sorights_dec(port);
1217 name = ip_get_receiver_name(port);
1218 ipc_port_clear_sync_rcv_thread_boost_locked(port);
1219 /* port unlocked */
1220 ip_release(port);
1221 } else {
1222 /*
1223 * A very bizarre case. The message
1224 * was received, but before this copyout
1225 * happened the space lost receive rights.
1226 * We can't quietly consume the soright
1227 * out from underneath some other task,
1228 * so generate a send-once notification.
1229 */
1230
1231 ipc_notify_send_once_and_unlock(port);
1232 name = MACH_PORT_NULL;
1233 }
1234
1235 break;
1236 }
1237
1238 default:
1239 ipc_unreachable("ipc_object_copyout_dest: strange rights");
1240 }
1241
1242 *namep = name;
1243 }
1244
1245 static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1246 offsetof(struct ipc_port, ip_waitq));
1247 static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1248 offsetof(struct ipc_pset, ips_wqset));
1249
1250 __abortlike
1251 static void
ipc_object_validate_preflight_panic(ipc_object_t io)1252 ipc_object_validate_preflight_panic(ipc_object_t io)
1253 {
1254 panic("ipc object %p is neither a port or a port-set", io);
1255 }
1256
1257 /*
1258 * Routine: ipc_object_lock_allow_invalid
1259 * Purpose:
1260 * Speculatively try to lock an object in an undefined state.
1261 *
1262 * This relies on the fact that IPC object memory is allocated
1263 * from sequestered zones, so at a given address, one can find:
1264 * 1. a valid object,
1265 * 2. a freed or invalid (uninitialized) object,
1266 * 3. unmapped memory.
1267 *
1268 * (2) is possible because the zone is made with ZC_ZFREE_CLEARMEM which
1269 * ensures freed elements are always zeroed.
1270 *
1271 * (3) is a direct courtesy of waitq_lock_allow_invalid().
1272 *
1273 * In order to disambiguate (1) from (2), we use the "waitq valid"
1274 * bit which is part of the lock. When that bit is absent,
1275 * waitq_lock() will function as expected, but
1276 * waitq_lock_allow_invalid() will not.
1277 *
1278 * Objects are then initialized and destroyed carefully so that
1279 * this "valid bit" is only set when the object invariants are
1280 * respected.
1281 *
1282 * Returns:
1283 * true: the lock was acquired
1284 * false: the object was freed or not initialized.
1285 */
1286 bool
ipc_object_lock_allow_invalid(ipc_object_t orig_io)1287 ipc_object_lock_allow_invalid(ipc_object_t orig_io)
1288 {
1289 struct waitq *orig_wq = io_waitq(orig_io);
1290 struct waitq *wq = pgz_decode_allow_invalid(orig_wq, ZONE_ID_ANY);
1291
1292 switch (zone_id_for_element(wq, sizeof(*wq))) {
1293 case ZONE_ID_IPC_PORT:
1294 case ZONE_ID_IPC_PORT_SET:
1295 break;
1296 default:
1297 #if CONFIG_PROB_GZALLOC
1298 if (orig_wq != wq) {
1299 /*
1300 * The element was PGZ protected, and the translation
1301 * returned another type than port or port-set, or
1302 * ZONE_ID_INVALID (wq is NULL).
1303 *
1304 * We have to allow this skew, and assumed the slot
1305 * has held a now freed port/port-set.
1306 */
1307 return false;
1308 }
1309 #endif /* CONFIG_PROB_GZALLOC */
1310 ipc_object_validate_preflight_panic(orig_io);
1311 }
1312
1313 if (__probable(waitq_lock_allow_invalid(wq))) {
1314 ipc_object_t io = io_from_waitq(wq);
1315
1316 ipc_object_validate(io, io_otype(io));
1317 #if CONFIG_PROB_GZALLOC
1318 if (__improbable(wq != orig_wq &&
1319 wq != pgz_decode_allow_invalid(orig_wq, ZONE_ID_ANY))) {
1320 /*
1321 * This object is no longer held in the slot,
1322 * whatever this object is, it's not the droid
1323 * we're looking for. Pretend we failed the lock.
1324 */
1325 waitq_unlock(wq);
1326 return false;
1327 }
1328 #endif /* CONFIG_PROB_GZALLOC */
1329 return true;
1330 }
1331 return false;
1332 }
1333