1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005-2006 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: ipc/ipc_object.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions to manipulate IPC objects.
71 */
72
73 #include <mach/mach_types.h>
74 #include <mach/boolean.h>
75 #include <mach/kern_return.h>
76 #include <mach/port.h>
77 #include <mach/message.h>
78
79 #include <kern/kern_types.h>
80 #include <kern/misc_protos.h>
81 #include <kern/ipc_kobject.h>
82 #include <kern/zalloc_internal.h> // zone_id_for_element
83
84 #include <ipc/ipc_types.h>
85 #include <ipc/ipc_importance.h>
86 #include <ipc/port.h>
87 #include <ipc/ipc_space.h>
88 #include <ipc/ipc_entry.h>
89 #include <ipc/ipc_object.h>
90 #include <ipc/ipc_hash.h>
91 #include <ipc/ipc_right.h>
92 #include <ipc/ipc_notify.h>
93 #include <ipc/ipc_port.h>
94 #include <ipc/ipc_pset.h>
95
96 #include <security/mac_mach_internal.h>
97
98 static struct mpsc_daemon_queue ipc_object_deallocate_queue;
99 SECURITY_READ_ONLY_LATE(zone_t) ipc_object_zones[IOT_NUMBER];
100
101 /*
102 * In order to do lockfree lookups in the IPC space, we combine two schemes:
103 *
104 * - the ipc table pointer is protected with hazard pointers to allow
105 * dereferencing it with only holding a ref on a task or space;
106 *
107 * - we use ipc_object_lock_allow_invalid in order to lock locks and validate
108 * that they are the droid we're looking for.
109 *
110 * The second half requires that virtual addresses assigned that ever held
111 * a port, either hold a port, or nothing, forever. To get this property,
112 * we just piggy back on the zone sequestering security feature which gives
113 * us exactly that.
114 *
115 * However, sequestering really only "works" on a sufficiently large address
116 * space, especially for a resource that can be made by userspace at will,
117 * so we can't do lockless lookups on ILP32.
118 *
119 * Note: this scheme is incompatible with gzalloc (because it doesn't sequester)
120 * and kasan quarantines (because it uses elements to store backtraces
121 * in them which lets the waitq lock appear "valid" by accident when
122 * elements are freed).
123 */
124 #if MACH_LOCKFREE_SPACE
125 #define IPC_OBJECT_ZC_BASE (ZC_ZFREE_CLEARMEM | ZC_SEQUESTER | \
126 ZC_NOGZALLOC | ZC_KASAN_NOQUARANTINE)
127 #else
128 #define IPC_OBJECT_ZC_BASE (ZC_ZFREE_CLEARMEM)
129 #endif
130
131 ZONE_INIT(&ipc_object_zones[IOT_PORT],
132 "ipc ports", sizeof(struct ipc_port),
133 IPC_OBJECT_ZC_BASE | ZC_CACHING, ZONE_ID_IPC_PORT, NULL);
134
135 ZONE_INIT(&ipc_object_zones[IOT_PORT_SET],
136 "ipc port sets", sizeof(struct ipc_pset),
137 IPC_OBJECT_ZC_BASE, ZONE_ID_IPC_PORT_SET, NULL);
138
139 __attribute__((noinline))
140 static void
ipc_object_free(unsigned int otype,ipc_object_t object,bool last_ref)141 ipc_object_free(unsigned int otype, ipc_object_t object, bool last_ref)
142 {
143 if (last_ref) {
144 if (otype == IOT_PORT) {
145 ipc_port_finalize(ip_object_to_port(object));
146 } else {
147 ipc_pset_finalize(ips_object_to_pset(object));
148 }
149 }
150 zfree(ipc_object_zones[otype], object);
151 }
152
153 __attribute__((noinline))
154 static void
ipc_object_free_safe(ipc_object_t object)155 ipc_object_free_safe(ipc_object_t object)
156 {
157 struct waitq *wq = io_waitq(object);
158
159 assert(!waitq_is_valid(wq));
160 assert(os_atomic_load(&wq->waitq_defer.mpqc_next, relaxed) == NULL);
161 mpsc_daemon_enqueue(&ipc_object_deallocate_queue,
162 &wq->waitq_defer, MPSC_QUEUE_NONE);
163 }
164
165 static void
ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)166 ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,
167 __assert_only mpsc_daemon_queue_t dq)
168 {
169 struct waitq *wq = __container_of(e, struct waitq, waitq_defer);
170 ipc_object_t io = io_from_waitq(wq);
171
172 assert(dq == &ipc_object_deallocate_queue);
173
174 os_atomic_store(&wq->waitq_defer.mpqc_next, NULL, relaxed);
175 ipc_object_free(io_otype(io), io, true);
176 }
177
178 void
ipc_object_deallocate_register_queue(void)179 ipc_object_deallocate_register_queue(void)
180 {
181 thread_deallocate_daemon_register_queue(&ipc_object_deallocate_queue,
182 ipc_object_deallocate_queue_invoke);
183 }
184
185 /*
186 * Routine: ipc_object_reference
187 * Purpose:
188 * Take a reference to an object.
189 */
190
191 void
ipc_object_reference(ipc_object_t io)192 ipc_object_reference(
193 ipc_object_t io)
194 {
195 static_assert(sizeof(os_ref_atomic_t) == sizeof(io->io_references));
196 os_ref_retain_raw((os_ref_atomic_t *)&io->io_references, NULL);
197 }
198
199 /*
200 * Routine: ipc_object_release
201 * Purpose:
202 * Release a reference to an object.
203 */
204
205 void
ipc_object_release(ipc_object_t io)206 ipc_object_release(
207 ipc_object_t io)
208 {
209 #if DEBUG
210 assert(get_preemption_level() == 0);
211 #endif
212
213 if (os_ref_release_raw((os_ref_atomic_t *)&io->io_references, NULL) == 0) {
214 /* Free the object */
215 ipc_object_free(io_otype(io), io, true);
216 }
217 }
218
219 /*
220 * Routine: ipc_object_release_safe
221 * Purpose:
222 * Release a reference to an object safely
223 */
224
225 void
ipc_object_release_safe(ipc_object_t io)226 ipc_object_release_safe(
227 ipc_object_t io)
228 {
229 if (os_ref_release_raw((os_ref_atomic_t *)&io->io_references, NULL) == 0) {
230 if (get_preemption_level() == 0) {
231 ipc_object_free(io_otype(io), io, true);
232 } else {
233 ipc_object_free_safe(io);
234 }
235 }
236 }
237
238 /*
239 * Routine: ipc_object_release_live
240 * Purpose:
241 * Release a reference to an object that isn't the last one.
242 */
243
244 void
ipc_object_release_live(ipc_object_t io)245 ipc_object_release_live(
246 ipc_object_t io)
247 {
248 os_ref_release_live_raw((os_ref_atomic_t *)&io->io_references, NULL);
249 }
250
251 /*
252 * Routine: ipc_object_translate
253 * Purpose:
254 * Look up an object in a space.
255 * Conditions:
256 * Nothing locked before. If successful, the object
257 * is returned active and locked. The caller doesn't get a ref.
258 * Returns:
259 * KERN_SUCCESS Object returned locked.
260 * KERN_INVALID_TASK The space is dead.
261 * KERN_INVALID_NAME The name doesn't denote a right
262 * KERN_INVALID_RIGHT Name doesn't denote the correct right
263 */
264 kern_return_t
ipc_object_translate(ipc_space_t space,mach_port_name_t name,mach_port_right_t right,ipc_object_t * objectp)265 ipc_object_translate(
266 ipc_space_t space,
267 mach_port_name_t name,
268 mach_port_right_t right,
269 ipc_object_t *objectp)
270 {
271 ipc_entry_bits_t bits;
272 ipc_object_t object;
273 kern_return_t kr;
274
275 if (!MACH_PORT_RIGHT_VALID_TRANSLATE(right)) {
276 return KERN_INVALID_RIGHT;
277 }
278
279 kr = ipc_right_lookup_read(space, name, &bits, &object);
280 if (kr != KERN_SUCCESS) {
281 return kr;
282 }
283 /* object is locked and active */
284
285 if ((bits & MACH_PORT_TYPE(right)) == MACH_PORT_TYPE_NONE) {
286 io_unlock(object);
287 return KERN_INVALID_RIGHT;
288 }
289
290 *objectp = object;
291 return KERN_SUCCESS;
292 }
293
294 /*
295 * Routine: ipc_object_translate_two
296 * Purpose:
297 * Look up two objects in a space.
298 * Conditions:
299 * Nothing locked before. If successful, the objects
300 * are returned locked. The caller doesn't get a ref.
301 * Returns:
302 * KERN_SUCCESS Objects returned locked.
303 * KERN_INVALID_TASK The space is dead.
304 * KERN_INVALID_NAME A name doesn't denote a right.
305 * KERN_INVALID_RIGHT A name doesn't denote the correct right.
306 */
307
308 kern_return_t
ipc_object_translate_two(ipc_space_t space,mach_port_name_t name1,mach_port_right_t right1,ipc_object_t * objectp1,mach_port_name_t name2,mach_port_right_t right2,ipc_object_t * objectp2)309 ipc_object_translate_two(
310 ipc_space_t space,
311 mach_port_name_t name1,
312 mach_port_right_t right1,
313 ipc_object_t *objectp1,
314 mach_port_name_t name2,
315 mach_port_right_t right2,
316 ipc_object_t *objectp2)
317 {
318 ipc_entry_t entry1;
319 ipc_entry_t entry2;
320 ipc_object_t object1, object2;
321 kern_return_t kr;
322 boolean_t doguard = TRUE;
323
324 kr = ipc_right_lookup_two_read(space, name1, &entry1, name2, &entry2);
325 if (kr != KERN_SUCCESS) {
326 return kr;
327 }
328 /* space is read-locked and active */
329
330 if ((entry1->ie_bits & MACH_PORT_TYPE(right1)) == MACH_PORT_TYPE_NONE) {
331 /* If looking for receive, and the entry used to hold one, give a pass on EXC_GUARD */
332 if ((right1 & MACH_PORT_RIGHT_RECEIVE) == MACH_PORT_RIGHT_RECEIVE &&
333 (entry1->ie_bits & MACH_PORT_TYPE_EX_RECEIVE) == MACH_PORT_TYPE_EX_RECEIVE) {
334 doguard = FALSE;
335 }
336 is_read_unlock(space);
337 if (doguard) {
338 mach_port_guard_exception(name1, 0, 0, kGUARD_EXC_INVALID_RIGHT);
339 }
340 return KERN_INVALID_RIGHT;
341 }
342
343 if ((entry2->ie_bits & MACH_PORT_TYPE(right2)) == MACH_PORT_TYPE_NONE) {
344 /* If looking for receive, and the entry used to hold one, give a pass on EXC_GUARD */
345 if ((right2 & MACH_PORT_RIGHT_RECEIVE) == MACH_PORT_RIGHT_RECEIVE &&
346 (entry2->ie_bits & MACH_PORT_TYPE_EX_RECEIVE) == MACH_PORT_TYPE_EX_RECEIVE) {
347 doguard = FALSE;
348 }
349 is_read_unlock(space);
350 if (doguard) {
351 mach_port_guard_exception(name2, 0, 0, kGUARD_EXC_INVALID_RIGHT);
352 }
353 return KERN_INVALID_RIGHT;
354 }
355
356 object1 = entry1->ie_object;
357 assert(object1 != IO_NULL);
358 io_lock(object1);
359 if (!io_active(object1)) {
360 io_unlock(object1);
361 is_read_unlock(space);
362 return KERN_INVALID_NAME;
363 }
364
365 object2 = entry2->ie_object;
366 assert(object2 != IO_NULL);
367 io_lock(object2);
368 if (!io_active(object2)) {
369 io_unlock(object1);
370 io_unlock(object2);
371 is_read_unlock(space);
372 return KERN_INVALID_NAME;
373 }
374
375 *objectp1 = object1;
376 *objectp2 = object2;
377
378 is_read_unlock(space);
379 return KERN_SUCCESS;
380 }
381
382 /*
383 * Routine: ipc_object_alloc_dead
384 * Purpose:
385 * Allocate a dead-name entry.
386 * Conditions:
387 * Nothing locked.
388 * Returns:
389 * KERN_SUCCESS The dead name is allocated.
390 * KERN_INVALID_TASK The space is dead.
391 * KERN_NO_SPACE No room for an entry in the space.
392 */
393
394 kern_return_t
ipc_object_alloc_dead(ipc_space_t space,mach_port_name_t * namep)395 ipc_object_alloc_dead(
396 ipc_space_t space,
397 mach_port_name_t *namep)
398 {
399 ipc_entry_t entry;
400 kern_return_t kr;
401
402 kr = ipc_entry_alloc(space, IO_NULL, namep, &entry);
403 if (kr != KERN_SUCCESS) {
404 return kr;
405 }
406 /* space is write-locked */
407
408 /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
409
410 entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1;
411 ipc_entry_modified(space, *namep, entry);
412 is_write_unlock(space);
413 return KERN_SUCCESS;
414 }
415
416 /*
417 * Routine: ipc_object_alloc_dead_name
418 * Purpose:
419 * Allocate a dead-name entry, with a specific name.
420 * Conditions:
421 * Nothing locked.
422 * Returns:
423 * KERN_SUCCESS The dead name is allocated.
424 * KERN_INVALID_TASK The space is dead.
425 * KERN_NAME_EXISTS The name already denotes a right.
426 */
427
428 kern_return_t
ipc_object_alloc_dead_name(ipc_space_t space,mach_port_name_t name)429 ipc_object_alloc_dead_name(
430 ipc_space_t space,
431 mach_port_name_t name)
432 {
433 ipc_entry_t entry;
434 kern_return_t kr;
435
436 kr = ipc_entry_alloc_name(space, name, &entry);
437 if (kr != KERN_SUCCESS) {
438 return kr;
439 }
440 /* space is write-locked */
441
442 if (ipc_right_inuse(entry)) {
443 is_write_unlock(space);
444 return KERN_NAME_EXISTS;
445 }
446
447 /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
448
449 assert(entry->ie_object == IO_NULL);
450 entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1;
451 ipc_entry_modified(space, name, entry);
452 is_write_unlock(space);
453 return KERN_SUCCESS;
454 }
455
456 /*
457 * Routine: ipc_object_alloc
458 * Purpose:
459 * Allocate an object.
460 * Conditions:
461 * Nothing locked.
462 * The space is write locked on successful return.
463 * The caller doesn't get a reference for the object.
464 * Returns:
465 * KERN_SUCCESS The object is allocated.
466 * KERN_INVALID_TASK The space is dead.
467 * KERN_NO_SPACE No room for an entry in the space.
468 */
469
470 kern_return_t
ipc_object_alloc(ipc_space_t space,ipc_object_type_t otype,mach_port_type_t type,mach_port_urefs_t urefs,mach_port_name_t * namep,ipc_object_t * objectp)471 ipc_object_alloc(
472 ipc_space_t space,
473 ipc_object_type_t otype,
474 mach_port_type_t type,
475 mach_port_urefs_t urefs,
476 mach_port_name_t *namep,
477 ipc_object_t *objectp)
478 {
479 ipc_object_t object;
480 ipc_entry_t entry;
481 kern_return_t kr;
482
483 assert(otype < IOT_NUMBER);
484 assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
485 assert(type != MACH_PORT_TYPE_NONE);
486 assert(urefs <= MACH_PORT_UREFS_MAX);
487
488 object = io_alloc(otype, Z_WAITOK | Z_ZERO | Z_NOFAIL);
489 os_atomic_init(&object->io_bits, io_makebits(TRUE, otype, 0));
490 os_atomic_init(&object->io_references, 1); /* for entry, not caller */
491
492 *namep = CAST_MACH_PORT_TO_NAME(object);
493 kr = ipc_entry_alloc(space, object, namep, &entry);
494 if (kr != KERN_SUCCESS) {
495 ipc_object_free(otype, object, false);
496 return kr;
497 }
498 /* space is write-locked */
499
500 entry->ie_bits |= type | urefs;
501 ipc_entry_modified(space, *namep, entry);
502
503 *objectp = object;
504 return KERN_SUCCESS;
505 }
506
507 /*
508 * Routine: ipc_object_alloc_name
509 * Purpose:
510 * Allocate an object, with a specific name.
511 * Conditions:
512 * Nothing locked. If successful, the object is returned locked.
513 * The caller doesn't get a reference for the object.
514 *
515 * finish_init() must call an ipc_*_init function
516 * that will return the object locked (using IPC_PORT_INIT_LOCKED,
517 * or SYNC_POLICY_INIT_LOCKED, or equivalent).
518 *
519 * Returns:
520 * KERN_SUCCESS The object is allocated.
521 * KERN_INVALID_TASK The space is dead.
522 * KERN_NAME_EXISTS The name already denotes a right.
523 */
524
525 kern_return_t
526 ipc_object_alloc_name(
527 ipc_space_t space,
528 ipc_object_type_t otype,
529 mach_port_type_t type,
530 mach_port_urefs_t urefs,
531 mach_port_name_t name,
532 ipc_object_t *objectp,
533 void (^finish_init)(ipc_object_t))
534 {
535 ipc_object_t object;
536 ipc_entry_t entry;
537 kern_return_t kr;
538
539 assert(otype < IOT_NUMBER);
540 assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
541 assert(type != MACH_PORT_TYPE_NONE);
542 assert(urefs <= MACH_PORT_UREFS_MAX);
543
544 object = io_alloc(otype, Z_WAITOK | Z_ZERO | Z_NOFAIL);
545 os_atomic_init(&object->io_bits, io_makebits(TRUE, otype, 0));
546 os_atomic_init(&object->io_references, 1); /* for entry, not caller */
547
548 kr = ipc_entry_alloc_name(space, name, &entry);
549 if (kr != KERN_SUCCESS) {
550 ipc_object_free(otype, object, false);
551 return kr;
552 }
553 /* space is write-locked */
554
555 if (ipc_right_inuse(entry)) {
556 is_write_unlock(space);
557 ipc_object_free(otype, object, false);
558 return KERN_NAME_EXISTS;
559 }
560
561 entry->ie_bits |= type | urefs;
562 entry->ie_object = object;
563
564 finish_init(object);
565 /* object is locked */
566 io_lock_held(object);
567
568 ipc_entry_modified(space, name, entry);
569 is_write_unlock(space);
570
571 *objectp = object;
572 return KERN_SUCCESS;
573 }
574
575 /* Routine: ipc_object_validate
576 * Purpose:
577 * Validates an ipc port or port set as belonging to the correct
578 * zone.
579 */
580
581 void
ipc_object_validate(ipc_object_t object)582 ipc_object_validate(
583 ipc_object_t object)
584 {
585 if (io_otype(object) != IOT_PORT_SET) {
586 zone_id_require(ZONE_ID_IPC_PORT,
587 sizeof(struct ipc_port), object);
588 } else {
589 zone_id_require(ZONE_ID_IPC_PORT_SET,
590 sizeof(struct ipc_pset), object);
591 }
592 }
593
594 /*
595 * Routine: ipc_object_copyin_type
596 * Purpose:
597 * Convert a send type name to a received type name.
598 */
599
600 mach_msg_type_name_t
ipc_object_copyin_type(mach_msg_type_name_t msgt_name)601 ipc_object_copyin_type(
602 mach_msg_type_name_t msgt_name)
603 {
604 switch (msgt_name) {
605 case MACH_MSG_TYPE_MOVE_RECEIVE:
606 return MACH_MSG_TYPE_PORT_RECEIVE;
607
608 case MACH_MSG_TYPE_MOVE_SEND_ONCE:
609 case MACH_MSG_TYPE_MAKE_SEND_ONCE:
610 return MACH_MSG_TYPE_PORT_SEND_ONCE;
611
612 case MACH_MSG_TYPE_MOVE_SEND:
613 case MACH_MSG_TYPE_MAKE_SEND:
614 case MACH_MSG_TYPE_COPY_SEND:
615 return MACH_MSG_TYPE_PORT_SEND;
616
617 case MACH_MSG_TYPE_DISPOSE_RECEIVE:
618 case MACH_MSG_TYPE_DISPOSE_SEND:
619 case MACH_MSG_TYPE_DISPOSE_SEND_ONCE:
620 /* fall thru */
621 default:
622 return MACH_MSG_TYPE_PORT_NONE;
623 }
624 }
625
626 /*
627 * Routine: ipc_object_copyin
628 * Purpose:
629 * Copyin a capability from a space.
630 * If successful, the caller gets a ref
631 * for the resulting object, unless it is IO_DEAD.
632 * Conditions:
633 * Nothing locked.
634 * Returns:
635 * KERN_SUCCESS Acquired an object, possibly IO_DEAD.
636 * KERN_INVALID_TASK The space is dead.
637 * KERN_INVALID_NAME Name doesn't exist in space.
638 * KERN_INVALID_RIGHT Name doesn't denote correct right.
639 */
640
641 kern_return_t
ipc_object_copyin(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name,ipc_object_t * objectp,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags,ipc_object_copyin_flags_t copyin_flags)642 ipc_object_copyin(
643 ipc_space_t space,
644 mach_port_name_t name,
645 mach_msg_type_name_t msgt_name,
646 ipc_object_t *objectp,
647 mach_port_context_t context,
648 mach_msg_guard_flags_t *guard_flags,
649 ipc_object_copyin_flags_t copyin_flags)
650 {
651 ipc_entry_t entry;
652 ipc_port_t soright;
653 ipc_port_t release_port;
654 kern_return_t kr;
655 int assertcnt = 0;
656
657 ipc_object_copyin_flags_t irc_flags = IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND;
658 irc_flags = (copyin_flags & irc_flags) | IPC_OBJECT_COPYIN_FLAGS_DEADOK;
659 /*
660 * Could first try a read lock when doing
661 * MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND,
662 * and MACH_MSG_TYPE_MAKE_SEND_ONCE.
663 */
664
665 kr = ipc_right_lookup_write(space, name, &entry);
666 if (kr != KERN_SUCCESS) {
667 return kr;
668 }
669 /* space is write-locked and active */
670
671 release_port = IP_NULL;
672 kr = ipc_right_copyin(space, name, entry,
673 msgt_name, irc_flags,
674 objectp, &soright,
675 &release_port,
676 &assertcnt,
677 context,
678 guard_flags);
679 is_write_unlock(space);
680
681 #if IMPORTANCE_INHERITANCE
682 if (0 < assertcnt && ipc_importance_task_is_any_receiver_type(current_task()->task_imp_base)) {
683 ipc_importance_task_drop_internal_assertion(current_task()->task_imp_base, assertcnt);
684 }
685 #endif /* IMPORTANCE_INHERITANCE */
686
687 if (release_port != IP_NULL) {
688 ip_release(release_port);
689 }
690
691 if ((kr == KERN_SUCCESS) && (soright != IP_NULL)) {
692 ipc_notify_port_deleted(soright, name);
693 }
694
695 return kr;
696 }
697
698 /*
699 * Routine: ipc_object_copyin_from_kernel
700 * Purpose:
701 * Copyin a naked capability from the kernel.
702 *
703 * MACH_MSG_TYPE_MOVE_RECEIVE
704 * The receiver must be ipc_space_kernel
705 * or the receive right must already be in limbo.
706 * Consumes the naked receive right.
707 * MACH_MSG_TYPE_COPY_SEND
708 * A naked send right must be supplied.
709 * The port gains a reference, and a send right
710 * if the port is still active.
711 * MACH_MSG_TYPE_MAKE_SEND
712 * The receiver must be ipc_space_kernel.
713 * The port gains a reference and a send right.
714 * MACH_MSG_TYPE_MOVE_SEND
715 * Consumes a naked send right.
716 * MACH_MSG_TYPE_MAKE_SEND_ONCE
717 * The port gains a reference and a send-once right.
718 * Receiver also be the caller of device subsystem,
719 * so no assertion.
720 * MACH_MSG_TYPE_MOVE_SEND_ONCE
721 * Consumes a naked send-once right.
722 * Conditions:
723 * Nothing locked.
724 */
725
726 void
ipc_object_copyin_from_kernel(ipc_object_t object,mach_msg_type_name_t msgt_name)727 ipc_object_copyin_from_kernel(
728 ipc_object_t object,
729 mach_msg_type_name_t msgt_name)
730 {
731 assert(IO_VALID(object));
732
733 switch (msgt_name) {
734 case MACH_MSG_TYPE_MOVE_RECEIVE: {
735 ipc_port_t port = ip_object_to_port(object);
736
737 ip_mq_lock(port);
738 require_ip_active(port);
739 if (ip_in_a_space(port)) {
740 assert(ip_in_space(port, ipc_space_kernel));
741 assert(port->ip_immovable_receive == 0);
742
743 /* relevant part of ipc_port_clear_receiver */
744 port->ip_mscount = 0;
745
746 /* port transtions to IN-LIMBO state */
747 port->ip_receiver_name = MACH_PORT_NULL;
748 port->ip_destination = IP_NULL;
749 }
750 ip_mq_unlock(port);
751 break;
752 }
753
754 case MACH_MSG_TYPE_COPY_SEND: {
755 ipc_port_t port = ip_object_to_port(object);
756
757 ip_mq_lock(port);
758 if (ip_active(port)) {
759 assert(port->ip_srights > 0);
760 }
761 port->ip_srights++;
762 ip_reference(port);
763 ip_mq_unlock(port);
764 break;
765 }
766
767 case MACH_MSG_TYPE_MAKE_SEND: {
768 ipc_port_t port = ip_object_to_port(object);
769
770 ip_mq_lock(port);
771 if (ip_active(port)) {
772 assert(ip_in_a_space(port));
773 assert((ip_in_space(port, ipc_space_kernel)) ||
774 (port->ip_receiver->is_node_id != HOST_LOCAL_NODE));
775 port->ip_mscount++;
776 }
777
778 port->ip_srights++;
779 ip_reference(port);
780 ip_mq_unlock(port);
781 break;
782 }
783
784 case MACH_MSG_TYPE_MOVE_SEND: {
785 /* move naked send right into the message */
786 assert(ip_object_to_port(object)->ip_srights);
787 break;
788 }
789
790 case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
791 ipc_port_t port = ip_object_to_port(object);
792
793 ip_mq_lock(port);
794 if (ip_active(port)) {
795 assert(ip_in_a_space(port));
796 }
797 ipc_port_make_sonce_locked(port);
798 ip_mq_unlock(port);
799 break;
800 }
801
802 case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
803 /* move naked send-once right into the message */
804 assert(ip_object_to_port(object)->ip_sorights);
805 break;
806 }
807
808 default:
809 panic("ipc_object_copyin_from_kernel: strange rights");
810 }
811 }
812
813 /*
814 * Routine: ipc_object_destroy
815 * Purpose:
816 * Destroys a naked capability.
817 * Consumes a ref for the object.
818 *
819 * A receive right should be in limbo or in transit.
820 * Conditions:
821 * Nothing locked.
822 */
823
824 void
ipc_object_destroy(ipc_object_t object,mach_msg_type_name_t msgt_name)825 ipc_object_destroy(
826 ipc_object_t object,
827 mach_msg_type_name_t msgt_name)
828 {
829 assert(IO_VALID(object));
830 assert(io_otype(object) == IOT_PORT);
831
832 switch (msgt_name) {
833 case MACH_MSG_TYPE_PORT_SEND:
834 ipc_port_release_send(ip_object_to_port(object));
835 break;
836
837 case MACH_MSG_TYPE_PORT_SEND_ONCE:
838 io_lock(object);
839 ipc_notify_send_once_and_unlock(ip_object_to_port(object));
840 break;
841
842 case MACH_MSG_TYPE_PORT_RECEIVE:
843 ipc_port_release_receive(ip_object_to_port(object));
844 break;
845
846 default:
847 panic("ipc_object_destroy: strange rights");
848 }
849 }
850
851 /*
852 * Routine: ipc_object_destroy_dest
853 * Purpose:
854 * Destroys a naked capability for the destination of
855 * of a message. Consumes a ref for the object.
856 *
857 * Conditions:
858 * Nothing locked.
859 */
860
861 void
ipc_object_destroy_dest(ipc_object_t object,mach_msg_type_name_t msgt_name)862 ipc_object_destroy_dest(
863 ipc_object_t object,
864 mach_msg_type_name_t msgt_name)
865 {
866 ipc_port_t port = ip_object_to_port(object);
867
868 assert(IO_VALID(object));
869 assert(io_otype(object) == IOT_PORT);
870
871 switch (msgt_name) {
872 case MACH_MSG_TYPE_PORT_SEND:
873 ipc_port_release_send(port);
874 break;
875
876 case MACH_MSG_TYPE_PORT_SEND_ONCE:
877 ip_mq_lock(port);
878 ipc_notify_send_once_and_unlock(port);
879 break;
880
881 default:
882 panic("ipc_object_destroy_dest: strange rights");
883 }
884 }
885
886 /*
887 * Routine: ipc_object_insert_send_right
888 * Purpose:
889 * Insert a send right into an object already in the space.
890 * The specified name must already point to a valid object.
891 *
892 * Note: This really is a combined copyin()/copyout(),
893 * that avoids most of the overhead of being implemented that way.
894 *
895 * This is the fastpath for mach_port_insert_right.
896 *
897 * Conditions:
898 * Nothing locked.
899 *
900 * msgt_name must be MACH_MSG_TYPE_MAKE_SEND_ONCE or
901 * MACH_MSG_TYPE_MOVE_SEND_ONCE.
902 *
903 * Returns:
904 * KERN_SUCCESS Copied out object, consumed ref.
905 * KERN_INVALID_TASK The space is dead.
906 * KERN_INVALID_NAME Name doesn't exist in space.
907 * KERN_INVALID_CAPABILITY The object is dead.
908 * KERN_RIGHT_EXISTS Space has rights under another name.
909 */
910 kern_return_t
ipc_object_insert_send_right(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name)911 ipc_object_insert_send_right(
912 ipc_space_t space,
913 mach_port_name_t name,
914 mach_msg_type_name_t msgt_name)
915 {
916 ipc_entry_bits_t bits;
917 ipc_object_t object;
918 ipc_entry_t entry;
919 kern_return_t kr;
920
921 assert(msgt_name == MACH_MSG_TYPE_MAKE_SEND ||
922 msgt_name == MACH_MSG_TYPE_COPY_SEND);
923
924 kr = ipc_right_lookup_write(space, name, &entry);
925 if (kr != KERN_SUCCESS) {
926 return kr;
927 }
928 /* space is write-locked and active */
929
930 if (!IO_VALID(entry->ie_object)) {
931 is_write_unlock(space);
932 return KERN_INVALID_CAPABILITY;
933 }
934
935 bits = entry->ie_bits;
936 object = entry->ie_object;
937
938 io_lock(object);
939 if (!io_active(object)) {
940 kr = KERN_INVALID_CAPABILITY;
941 } else if (msgt_name == MACH_MSG_TYPE_MAKE_SEND) {
942 if (bits & MACH_PORT_TYPE_RECEIVE) {
943 ipc_port_t port = ip_object_to_port(object);
944 port->ip_mscount++;
945 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
946 port->ip_srights++;
947 bits |= MACH_PORT_TYPE_SEND;
948 }
949 /* leave urefs pegged to maximum if it overflowed */
950 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
951 bits += 1; /* increment urefs */
952 }
953 entry->ie_bits = bits;
954 ipc_entry_modified(space, name, entry);
955 kr = KERN_SUCCESS;
956 } else {
957 kr = KERN_INVALID_RIGHT;
958 }
959 } else { // MACH_MSG_TYPE_COPY_SEND
960 if (bits & MACH_PORT_TYPE_SEND) {
961 /* leave urefs pegged to maximum if it overflowed */
962 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
963 entry->ie_bits = bits + 1; /* increment urefs */
964 }
965 ipc_entry_modified(space, name, entry);
966 kr = KERN_SUCCESS;
967 } else {
968 kr = KERN_INVALID_RIGHT;
969 }
970 }
971
972 io_unlock(object);
973 is_write_unlock(space);
974
975 return kr;
976 }
977
978 /*
979 * Routine: ipc_object_copyout
980 * Purpose:
981 * Copyout a capability, placing it into a space.
982 * Always consumes a ref for the object.
983 * Conditions:
984 * Nothing locked.
985 * Returns:
986 * KERN_SUCCESS Copied out object, consumed ref.
987 * KERN_INVALID_TASK The space is dead.
988 * KERN_INVALID_CAPABILITY The object is dead.
989 * KERN_NO_SPACE No room in space for another right.
990 * KERN_UREFS_OVERFLOW Urefs limit exceeded
991 * and overflow wasn't specified.
992 */
993
994 kern_return_t
ipc_object_copyout(ipc_space_t space,ipc_object_t object,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_port_context_t * context,mach_msg_guard_flags_t * guard_flags,mach_port_name_t * namep)995 ipc_object_copyout(
996 ipc_space_t space,
997 ipc_object_t object,
998 mach_msg_type_name_t msgt_name,
999 ipc_object_copyout_flags_t flags,
1000 mach_port_context_t *context,
1001 mach_msg_guard_flags_t *guard_flags,
1002 mach_port_name_t *namep)
1003 {
1004 struct knote *kn = current_thread()->ith_knote;
1005 mach_port_name_t name;
1006 ipc_port_t port = ip_object_to_port(object);
1007 ipc_entry_t entry;
1008 kern_return_t kr;
1009
1010 assert(IO_VALID(object));
1011 assert(io_otype(object) == IOT_PORT);
1012
1013 if (ITH_KNOTE_VALID(kn, msgt_name)) {
1014 filt_machport_turnstile_prepare_lazily(kn, msgt_name, port);
1015 }
1016
1017 is_write_lock(space);
1018
1019 for (;;) {
1020 ipc_port_t port_subst = IP_NULL;
1021
1022 if (!is_active(space)) {
1023 is_write_unlock(space);
1024 kr = KERN_INVALID_TASK;
1025 goto out;
1026 }
1027
1028 kr = ipc_entries_hold(space, 1);
1029 if (kr != KERN_SUCCESS) {
1030 /* unlocks/locks space, so must start again */
1031
1032 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
1033 if (kr != KERN_SUCCESS) {
1034 /* space is unlocked */
1035 goto out;
1036 }
1037 continue;
1038 }
1039
1040 io_lock(object);
1041 if (!io_active(object)) {
1042 io_unlock(object);
1043 is_write_unlock(space);
1044 kr = KERN_INVALID_CAPABILITY;
1045 goto out;
1046 }
1047
1048 /* Don't actually copyout rights we aren't allowed to */
1049 if (!ip_label_check(space, port, msgt_name, &flags, &port_subst)) {
1050 io_unlock(object);
1051 is_write_unlock(space);
1052 assert(port_subst == IP_NULL);
1053 kr = KERN_INVALID_CAPABILITY;
1054 goto out;
1055 }
1056
1057 /* is the kolabel requesting a substitution */
1058 if (port_subst != IP_NULL) {
1059 /*
1060 * port is unlocked, its right consumed
1061 * space is unlocked
1062 */
1063 assert(msgt_name == MACH_MSG_TYPE_PORT_SEND);
1064 port = port_subst;
1065 if (!IP_VALID(port)) {
1066 object = IO_DEAD;
1067 kr = KERN_INVALID_CAPABILITY;
1068 goto out;
1069 }
1070
1071 object = ip_to_object(port);
1072 is_write_lock(space);
1073 continue;
1074 }
1075
1076 break;
1077 }
1078
1079 /* space is write-locked and active, object is locked and active */
1080
1081 if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
1082 ipc_right_reverse(space, object, &name, &entry)) {
1083 assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
1084 } else {
1085 ipc_entry_claim(space, object, &name, &entry);
1086 }
1087
1088 kr = ipc_right_copyout(space, name, entry,
1089 msgt_name, flags, context, guard_flags, object);
1090
1091 /* object is unlocked */
1092 is_write_unlock(space);
1093
1094 out:
1095 if (kr == KERN_SUCCESS) {
1096 *namep = name;
1097 } else if (IO_VALID(object)) {
1098 ipc_object_destroy(object, msgt_name);
1099 }
1100
1101 return kr;
1102 }
1103
1104 /*
1105 * Routine: ipc_object_copyout_name
1106 * Purpose:
1107 * Copyout a capability, placing it into a space.
1108 * The specified name is used for the capability.
1109 * If successful, consumes a ref for the object.
1110 * Conditions:
1111 * Nothing locked.
1112 * Returns:
1113 * KERN_SUCCESS Copied out object, consumed ref.
1114 * KERN_INVALID_TASK The space is dead.
1115 * KERN_INVALID_CAPABILITY The object is dead.
1116 * KERN_UREFS_OVERFLOW Urefs limit exceeded
1117 * and overflow wasn't specified.
1118 * KERN_RIGHT_EXISTS Space has rights under another name.
1119 * KERN_NAME_EXISTS Name is already used.
1120 * KERN_INVALID_VALUE Supplied port name is invalid.
1121 */
1122
1123 kern_return_t
ipc_object_copyout_name(ipc_space_t space,ipc_object_t object,mach_msg_type_name_t msgt_name,mach_port_name_t name)1124 ipc_object_copyout_name(
1125 ipc_space_t space,
1126 ipc_object_t object,
1127 mach_msg_type_name_t msgt_name,
1128 mach_port_name_t name)
1129 {
1130 ipc_port_t port = ip_object_to_port(object);
1131 mach_port_name_t oname;
1132 ipc_entry_t oentry;
1133 ipc_entry_t entry;
1134 kern_return_t kr;
1135
1136 #if IMPORTANCE_INHERITANCE
1137 int assertcnt = 0;
1138 ipc_importance_task_t task_imp = IIT_NULL;
1139 #endif /* IMPORTANCE_INHERITANCE */
1140
1141 assert(IO_VALID(object));
1142 assert(io_otype(object) == IOT_PORT);
1143
1144 kr = ipc_entry_alloc_name(space, name, &entry);
1145 if (kr != KERN_SUCCESS) {
1146 return kr;
1147 }
1148 /* space is write-locked and active */
1149
1150 io_lock(object);
1151
1152 /*
1153 * Don't actually copyout rights we aren't allowed to
1154 *
1155 * In particular, kolabel-ed objects do not allow callers
1156 * to pick the name they end up with.
1157 */
1158 if (!io_active(object) || ip_is_kolabeled(port)) {
1159 io_unlock(object);
1160 if (!ipc_right_inuse(entry)) {
1161 ipc_entry_dealloc(space, IO_NULL, name, entry);
1162 }
1163 is_write_unlock(space);
1164 return KERN_INVALID_CAPABILITY;
1165 }
1166
1167 /* space is write-locked and active, object is locked and active */
1168
1169 if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
1170 ipc_right_reverse(space, object, &oname, &oentry)) {
1171 if (name != oname) {
1172 io_unlock(object);
1173 if (!ipc_right_inuse(entry)) {
1174 ipc_entry_dealloc(space, IO_NULL, name, entry);
1175 }
1176 is_write_unlock(space);
1177 return KERN_RIGHT_EXISTS;
1178 }
1179
1180 assert(entry == oentry);
1181 assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
1182 } else if (ipc_right_inuse(entry)) {
1183 io_unlock(object);
1184 is_write_unlock(space);
1185 return KERN_NAME_EXISTS;
1186 } else {
1187 assert(entry->ie_object == IO_NULL);
1188
1189 entry->ie_object = object;
1190 }
1191
1192 #if IMPORTANCE_INHERITANCE
1193 /*
1194 * We are slamming a receive right into the space, without
1195 * first having been enqueued on a port destined there. So,
1196 * we have to arrange to boost the task appropriately if this
1197 * port has assertions (and the task wants them).
1198 */
1199 if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1200 if (space->is_task != TASK_NULL) {
1201 task_imp = space->is_task->task_imp_base;
1202 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1203 assertcnt = port->ip_impcount;
1204 ipc_importance_task_reference(task_imp);
1205 } else {
1206 task_imp = IIT_NULL;
1207 }
1208 }
1209
1210 /* take port out of limbo */
1211 port->ip_tempowner = 0;
1212 }
1213
1214 #endif /* IMPORTANCE_INHERITANCE */
1215
1216 kr = ipc_right_copyout(space, name, entry,
1217 msgt_name, IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, object);
1218
1219 /* object is unlocked */
1220 is_write_unlock(space);
1221
1222 #if IMPORTANCE_INHERITANCE
1223 /*
1224 * Add the assertions to the task that we captured before
1225 */
1226 if (task_imp != IIT_NULL) {
1227 ipc_importance_task_hold_internal_assertion(task_imp, assertcnt);
1228 ipc_importance_task_release(task_imp);
1229 }
1230 #endif /* IMPORTANCE_INHERITANCE */
1231
1232 return kr;
1233 }
1234
1235 /*
1236 * Routine: ipc_object_copyout_dest
1237 * Purpose:
1238 * Translates/consumes the destination right of a message.
1239 * This is unlike normal copyout because the right is consumed
1240 * in a funny way instead of being given to the receiving space.
1241 * The receiver gets his name for the port, if he has receive
1242 * rights, otherwise MACH_PORT_NULL.
1243 * Conditions:
1244 * The object is locked and active. Nothing else locked.
1245 * The object is unlocked and loses a reference.
1246 */
1247
1248 void
ipc_object_copyout_dest(ipc_space_t space,ipc_object_t object,mach_msg_type_name_t msgt_name,mach_port_name_t * namep)1249 ipc_object_copyout_dest(
1250 ipc_space_t space,
1251 ipc_object_t object,
1252 mach_msg_type_name_t msgt_name,
1253 mach_port_name_t *namep)
1254 {
1255 mach_port_name_t name;
1256
1257 assert(IO_VALID(object));
1258 assert(io_active(object));
1259
1260 /*
1261 * If the space is the receiver/owner of the object,
1262 * then we quietly consume the right and return
1263 * the space's name for the object. Otherwise
1264 * we destroy the right and return MACH_PORT_NULL.
1265 */
1266
1267 switch (msgt_name) {
1268 case MACH_MSG_TYPE_PORT_SEND: {
1269 ipc_port_t port = ip_object_to_port(object);
1270 ipc_notify_nsenders_t nsrequest = { };
1271
1272 if (ip_in_space(port, space)) {
1273 name = ip_get_receiver_name(port);
1274 } else {
1275 name = MACH_PORT_NULL;
1276 }
1277
1278 assert(port->ip_srights > 0);
1279 if (--port->ip_srights == 0) {
1280 nsrequest = ipc_notify_no_senders_prepare(port);
1281 }
1282 ipc_port_clear_sync_rcv_thread_boost_locked(port);
1283 /* port unlocked */
1284
1285 ipc_notify_no_senders_emit(nsrequest);
1286
1287 ip_release(port);
1288 break;
1289 }
1290
1291 case MACH_MSG_TYPE_PORT_SEND_ONCE: {
1292 ipc_port_t port = ip_object_to_port(object);
1293
1294 assert(port->ip_sorights > 0);
1295
1296 if (ip_in_space(port, space)) {
1297 /* quietly consume the send-once right */
1298 port->ip_sorights--;
1299 name = ip_get_receiver_name(port);
1300 ipc_port_clear_sync_rcv_thread_boost_locked(port);
1301 /* port unlocked */
1302 ip_release(port);
1303 } else {
1304 /*
1305 * A very bizarre case. The message
1306 * was received, but before this copyout
1307 * happened the space lost receive rights.
1308 * We can't quietly consume the soright
1309 * out from underneath some other task,
1310 * so generate a send-once notification.
1311 */
1312
1313 ipc_notify_send_once_and_unlock(port);
1314 name = MACH_PORT_NULL;
1315 }
1316
1317 break;
1318 }
1319
1320 default:
1321 panic("ipc_object_copyout_dest: strange rights");
1322 name = MACH_PORT_DEAD;
1323 }
1324
1325 *namep = name;
1326 }
1327
1328 static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1329 offsetof(struct ipc_port, ip_waitq));
1330 static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1331 offsetof(struct ipc_pset, ips_wqset));
1332
1333 /*
1334 * Routine: ipc_object_lock
1335 * Purpose:
1336 * Validate, then acquire a lock on an ipc object
1337 */
1338 void
ipc_object_lock(ipc_object_t io)1339 ipc_object_lock(ipc_object_t io)
1340 {
1341 ipc_object_validate(io);
1342 waitq_lock(io_waitq(io));
1343 }
1344
1345 #if MACH_LOCKFREE_SPACE
1346 __abortlike
1347 static void
ipc_object_validate_preflight_panic(ipc_object_t io)1348 ipc_object_validate_preflight_panic(ipc_object_t io)
1349 {
1350 panic("ipc object %p is neither a port or a port-set", io);
1351 }
1352
1353 /*
1354 * Routine: ipc_object_lock_allow_invalid
1355 * Purpose:
1356 * Speculatively try to lock an object in an undefined state.
1357 *
1358 * This relies on the fact that IPC object memory is allocated
1359 * from sequestered zones, so at a given address, one can find:
1360 * 1. a valid object,
1361 * 2. a freed or invalid (uninitialized) object,
1362 * 3. unmapped memory.
1363 *
1364 * (2) is possible because the zone is made with ZC_ZFREE_CLEARMEM which
1365 * ensures freed elements are always zeroed.
1366 *
1367 * (3) is a direct courtesy of waitq_lock_allow_invalid().
1368 *
1369 * In order to disambiguate (1) from (2), we use the "waitq valid"
1370 * bit which is part of the lock. When that bit is absent,
1371 * waitq_lock() will function as expected, but
1372 * waitq_lock_allow_invalid() will not.
1373 *
1374 * Objects are then initialized and destroyed carefully so that
1375 * this "valid bit" is only set when the object invariants are
1376 * respected.
1377 *
1378 * Returns:
1379 * true: the lock was acquired
1380 * false: the object was freed or not initialized.
1381 */
1382 bool
ipc_object_lock_allow_invalid(ipc_object_t io)1383 ipc_object_lock_allow_invalid(ipc_object_t io)
1384 {
1385 struct waitq *orig_wq = io_waitq(io);
1386 struct waitq *wq = pgz_decode_allow_invalid(orig_wq, ZONE_ID_ANY);
1387
1388 switch (zone_id_for_element(wq, sizeof(*wq))) {
1389 case ZONE_ID_IPC_PORT:
1390 case ZONE_ID_IPC_PORT_SET:
1391 break;
1392 default:
1393 #if CONFIG_PROB_GZALLOC
1394 if (orig_wq != wq) {
1395 /*
1396 * The element was PGZ protected, and the translation
1397 * returned another type than port or port-set.
1398 *
1399 * We have to allow this skew, and assumed the slot
1400 * has held a now freed port/port-set.
1401 */
1402 return false;
1403 }
1404 #endif /* CONFIG_PROB_GZALLOC */
1405 ipc_object_validate_preflight_panic(io);
1406 }
1407
1408 if (__probable(waitq_lock_allow_invalid(wq))) {
1409 ipc_object_validate(io);
1410 #if CONFIG_PROB_GZALLOC
1411 if (__improbable(wq != orig_wq &&
1412 wq != pgz_decode_allow_invalid(orig_wq, ZONE_ID_ANY))) {
1413 /*
1414 * This object is no longer held in the slot,
1415 * whatever this object is, it's not the droid
1416 * we're looking for. Pretend we failed the lock.
1417 */
1418 waitq_unlock(orig_wq);
1419 return false;
1420 }
1421 #endif /* CONFIG_PROB_GZALLOC */
1422 return true;
1423 }
1424 return false;
1425 }
1426 #endif /* MACH_LOCKFREE_SPACE */
1427
1428 /*
1429 * Routine: ipc_object_lock_try
1430 * Purpose:
1431 * Validate, then try to acquire a lock on an object,
1432 * fail if there is an existing busy lock
1433 */
1434 bool
ipc_object_lock_try(ipc_object_t io)1435 ipc_object_lock_try(ipc_object_t io)
1436 {
1437 ipc_object_validate(io);
1438 return waitq_lock_try(io_waitq(io));
1439 }
1440
1441 /*
1442 * Routine: ipc_object_unlock
1443 * Purpose:
1444 * Unlocks the given object.
1445 */
1446 void
ipc_object_unlock(ipc_object_t io)1447 ipc_object_unlock(ipc_object_t io)
1448 {
1449 waitq_unlock(io_waitq(io));
1450 }
1451