1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005-2006 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: ipc/ipc_object.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions to manipulate IPC objects.
71 */
72
73 #include <mach/mach_types.h>
74 #include <mach/boolean.h>
75 #include <mach/kern_return.h>
76 #include <mach/port.h>
77 #include <mach/message.h>
78
79 #include <kern/kern_types.h>
80 #include <kern/misc_protos.h>
81 #include <kern/ipc_kobject.h>
82 #include <kern/zalloc_internal.h> // zone_id_for_element
83
84 #include <ipc/ipc_types.h>
85 #include <ipc/ipc_importance.h>
86 #include <ipc/port.h>
87 #include <ipc/ipc_space.h>
88 #include <ipc/ipc_entry.h>
89 #include <ipc/ipc_object.h>
90 #include <ipc/ipc_hash.h>
91 #include <ipc/ipc_right.h>
92 #include <ipc/ipc_notify.h>
93 #include <ipc/ipc_port.h>
94 #include <ipc/ipc_pset.h>
95
96 #include <security/mac_mach_internal.h>
97
98 static struct mpsc_daemon_queue ipc_object_deallocate_queue;
99 SECURITY_READ_ONLY_LATE(zone_t) ipc_object_zones[IOT_NUMBER];
100
101 /*
102 * In order to do lockfree lookups in the IPC space, we combine two schemes:
103 *
104 * - the ipc table pointer is protected with hazard pointers to allow
105 * dereferencing it with only holding a ref on a task or space;
106 *
107 * - we use ipc_object_lock_allow_invalid in order to lock locks and validate
108 * that they are the droid we're looking for.
109 *
110 * The second half requires that virtual addresses assigned that ever held
111 * a port, either hold a port, or nothing, forever. To get this property,
112 * we just piggy back on the zone sequestering security feature which gives
113 * us exactly that.
114 *
115 * However, sequestering really only "works" on a sufficiently large address
116 * space, especially for a resource that can be made by userspace at will,
117 * so we can't do lockless lookups on ILP32.
118 *
119 * Note: this scheme is incompatible with kasan quarantines
120 * (because it uses elements to store backtraces in them
121 * which lets the waitq lock appear "valid" by accident when
122 * elements are freed).
123 */
124 #define IPC_OBJECT_ZC_BASE (ZC_ZFREE_CLEARMEM | ZC_SEQUESTER)
125
126 ZONE_INIT(&ipc_object_zones[IOT_PORT],
127 "ipc ports", sizeof(struct ipc_port),
128 IPC_OBJECT_ZC_BASE | ZC_CACHING, ZONE_ID_IPC_PORT, NULL);
129
130 ZONE_INIT(&ipc_object_zones[IOT_PORT_SET],
131 "ipc port sets", sizeof(struct ipc_pset),
132 IPC_OBJECT_ZC_BASE, ZONE_ID_IPC_PORT_SET, NULL);
133
134 __attribute__((noinline))
135 static void
ipc_object_free(unsigned int otype,ipc_object_t object,bool last_ref)136 ipc_object_free(unsigned int otype, ipc_object_t object, bool last_ref)
137 {
138 if (last_ref) {
139 if (otype == IOT_PORT) {
140 ipc_port_finalize(ip_object_to_port(object));
141 } else {
142 ipc_pset_finalize(ips_object_to_pset(object));
143 }
144 }
145 zfree(ipc_object_zones[otype], object);
146 }
147
148 __attribute__((noinline))
149 static void
ipc_object_free_safe(ipc_object_t object)150 ipc_object_free_safe(ipc_object_t object)
151 {
152 struct waitq *wq = io_waitq(object);
153
154 assert(!waitq_is_valid(wq));
155 assert(os_atomic_load(&wq->waitq_defer.mpqc_next, relaxed) == NULL);
156 mpsc_daemon_enqueue(&ipc_object_deallocate_queue,
157 &wq->waitq_defer, MPSC_QUEUE_NONE);
158 }
159
160 static void
ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,__assert_only mpsc_daemon_queue_t dq)161 ipc_object_deallocate_queue_invoke(mpsc_queue_chain_t e,
162 __assert_only mpsc_daemon_queue_t dq)
163 {
164 struct waitq *wq = __container_of(e, struct waitq, waitq_defer);
165 ipc_object_t io = io_from_waitq(wq);
166
167 assert(dq == &ipc_object_deallocate_queue);
168
169 os_atomic_store(&wq->waitq_defer.mpqc_next, NULL, relaxed);
170 ipc_object_free(io_otype(io), io, true);
171 }
172
173 void
ipc_object_deallocate_register_queue(void)174 ipc_object_deallocate_register_queue(void)
175 {
176 thread_deallocate_daemon_register_queue(&ipc_object_deallocate_queue,
177 ipc_object_deallocate_queue_invoke);
178 }
179
180 /*
181 * Routine: ipc_object_reference
182 * Purpose:
183 * Take a reference to an object.
184 */
185
186 void
ipc_object_reference(ipc_object_t io)187 ipc_object_reference(
188 ipc_object_t io)
189 {
190 static_assert(sizeof(os_ref_atomic_t) == sizeof(io->io_references));
191 os_ref_retain_raw((os_ref_atomic_t *)&io->io_references, NULL);
192 }
193
194 /*
195 * Routine: ipc_object_release
196 * Purpose:
197 * Release a reference to an object.
198 */
199
200 void
ipc_object_release(ipc_object_t io)201 ipc_object_release(
202 ipc_object_t io)
203 {
204 #if DEBUG
205 assert(get_preemption_level() == 0);
206 #endif
207
208 if (os_ref_release_raw((os_ref_atomic_t *)&io->io_references, NULL) == 0) {
209 /* Free the object */
210 ipc_object_free(io_otype(io), io, true);
211 }
212 }
213
214 /*
215 * Routine: ipc_object_release_safe
216 * Purpose:
217 * Release a reference to an object safely
218 */
219
220 void
ipc_object_release_safe(ipc_object_t io)221 ipc_object_release_safe(
222 ipc_object_t io)
223 {
224 if (os_ref_release_raw((os_ref_atomic_t *)&io->io_references, NULL) == 0) {
225 if (get_preemption_level() == 0) {
226 ipc_object_free(io_otype(io), io, true);
227 } else {
228 ipc_object_free_safe(io);
229 }
230 }
231 }
232
233 /*
234 * Routine: ipc_object_release_live
235 * Purpose:
236 * Release a reference to an object that isn't the last one.
237 */
238
239 void
ipc_object_release_live(ipc_object_t io)240 ipc_object_release_live(
241 ipc_object_t io)
242 {
243 os_ref_release_live_raw((os_ref_atomic_t *)&io->io_references, NULL);
244 }
245
246 /*
247 * Routine: ipc_object_translate
248 * Purpose:
249 * Look up an object in a space.
250 * Conditions:
251 * Nothing locked before. If successful, the object
252 * is returned active and locked. The caller doesn't get a ref.
253 * Returns:
254 * KERN_SUCCESS Object returned locked.
255 * KERN_INVALID_TASK The space is dead.
256 * KERN_INVALID_NAME The name doesn't denote a right
257 * KERN_INVALID_RIGHT Name doesn't denote the correct right
258 */
259 kern_return_t
ipc_object_translate(ipc_space_t space,mach_port_name_t name,mach_port_right_t right,ipc_object_t * objectp)260 ipc_object_translate(
261 ipc_space_t space,
262 mach_port_name_t name,
263 mach_port_right_t right,
264 ipc_object_t *objectp)
265 {
266 ipc_entry_bits_t bits;
267 ipc_object_t object;
268 kern_return_t kr;
269
270 if (!MACH_PORT_RIGHT_VALID_TRANSLATE(right)) {
271 return KERN_INVALID_RIGHT;
272 }
273
274 kr = ipc_right_lookup_read(space, name, &bits, &object);
275 if (kr != KERN_SUCCESS) {
276 return kr;
277 }
278 /* object is locked and active */
279
280 if ((bits & MACH_PORT_TYPE(right)) == MACH_PORT_TYPE_NONE) {
281 io_unlock(object);
282 return KERN_INVALID_RIGHT;
283 }
284
285 *objectp = object;
286 return KERN_SUCCESS;
287 }
288
289 /*
290 * Routine: ipc_object_translate_two
291 * Purpose:
292 * Look up two objects in a space.
293 * Conditions:
294 * Nothing locked before. If successful, the objects
295 * are returned locked. The caller doesn't get a ref.
296 * Returns:
297 * KERN_SUCCESS Objects returned locked.
298 * KERN_INVALID_TASK The space is dead.
299 * KERN_INVALID_NAME A name doesn't denote a right.
300 * KERN_INVALID_RIGHT A name doesn't denote the correct right.
301 */
302
303 kern_return_t
ipc_object_translate_two(ipc_space_t space,mach_port_name_t name1,mach_port_right_t right1,ipc_object_t * objectp1,mach_port_name_t name2,mach_port_right_t right2,ipc_object_t * objectp2)304 ipc_object_translate_two(
305 ipc_space_t space,
306 mach_port_name_t name1,
307 mach_port_right_t right1,
308 ipc_object_t *objectp1,
309 mach_port_name_t name2,
310 mach_port_right_t right2,
311 ipc_object_t *objectp2)
312 {
313 ipc_entry_t entry1;
314 ipc_entry_t entry2;
315 ipc_object_t object1, object2;
316 kern_return_t kr;
317 boolean_t doguard = TRUE;
318
319 kr = ipc_right_lookup_two_read(space, name1, &entry1, name2, &entry2);
320 if (kr != KERN_SUCCESS) {
321 return kr;
322 }
323 /* space is read-locked and active */
324
325 if ((entry1->ie_bits & MACH_PORT_TYPE(right1)) == MACH_PORT_TYPE_NONE) {
326 /* If looking for receive, and the entry used to hold one, give a pass on EXC_GUARD */
327 if ((right1 & MACH_PORT_RIGHT_RECEIVE) == MACH_PORT_RIGHT_RECEIVE &&
328 (entry1->ie_bits & MACH_PORT_TYPE_EX_RECEIVE) == MACH_PORT_TYPE_EX_RECEIVE) {
329 doguard = FALSE;
330 }
331 is_read_unlock(space);
332 if (doguard) {
333 mach_port_guard_exception(name1, 0, 0, kGUARD_EXC_INVALID_RIGHT);
334 }
335 return KERN_INVALID_RIGHT;
336 }
337
338 if ((entry2->ie_bits & MACH_PORT_TYPE(right2)) == MACH_PORT_TYPE_NONE) {
339 /* If looking for receive, and the entry used to hold one, give a pass on EXC_GUARD */
340 if ((right2 & MACH_PORT_RIGHT_RECEIVE) == MACH_PORT_RIGHT_RECEIVE &&
341 (entry2->ie_bits & MACH_PORT_TYPE_EX_RECEIVE) == MACH_PORT_TYPE_EX_RECEIVE) {
342 doguard = FALSE;
343 }
344 is_read_unlock(space);
345 if (doguard) {
346 mach_port_guard_exception(name2, 0, 0, kGUARD_EXC_INVALID_RIGHT);
347 }
348 return KERN_INVALID_RIGHT;
349 }
350
351 object1 = entry1->ie_object;
352 assert(object1 != IO_NULL);
353 io_lock(object1);
354 if (!io_active(object1)) {
355 io_unlock(object1);
356 is_read_unlock(space);
357 return KERN_INVALID_NAME;
358 }
359
360 object2 = entry2->ie_object;
361 assert(object2 != IO_NULL);
362 io_lock(object2);
363 if (!io_active(object2)) {
364 io_unlock(object1);
365 io_unlock(object2);
366 is_read_unlock(space);
367 return KERN_INVALID_NAME;
368 }
369
370 *objectp1 = object1;
371 *objectp2 = object2;
372
373 is_read_unlock(space);
374 return KERN_SUCCESS;
375 }
376
377 /*
378 * Routine: ipc_object_alloc_dead
379 * Purpose:
380 * Allocate a dead-name entry.
381 * Conditions:
382 * Nothing locked.
383 * Returns:
384 * KERN_SUCCESS The dead name is allocated.
385 * KERN_INVALID_TASK The space is dead.
386 * KERN_NO_SPACE No room for an entry in the space.
387 */
388
389 kern_return_t
ipc_object_alloc_dead(ipc_space_t space,mach_port_name_t * namep)390 ipc_object_alloc_dead(
391 ipc_space_t space,
392 mach_port_name_t *namep)
393 {
394 ipc_entry_t entry;
395 kern_return_t kr;
396
397 kr = ipc_entry_alloc(space, IO_NULL, namep, &entry);
398 if (kr != KERN_SUCCESS) {
399 return kr;
400 }
401 /* space is write-locked */
402
403 /* null object, MACH_PORT_TYPE_DEAD_NAME, 1 uref */
404
405 entry->ie_bits |= MACH_PORT_TYPE_DEAD_NAME | 1;
406 ipc_entry_modified(space, *namep, entry);
407 is_write_unlock(space);
408 return KERN_SUCCESS;
409 }
410
411 /*
412 * Routine: ipc_object_alloc
413 * Purpose:
414 * Allocate an object.
415 * Conditions:
416 * Nothing locked.
417 * The space is write locked on successful return.
418 * The caller doesn't get a reference for the object.
419 * Returns:
420 * KERN_SUCCESS The object is allocated.
421 * KERN_INVALID_TASK The space is dead.
422 * KERN_NO_SPACE No room for an entry in the space.
423 */
424
425 kern_return_t
ipc_object_alloc(ipc_space_t space,ipc_object_type_t otype,mach_port_type_t type,mach_port_urefs_t urefs,mach_port_name_t * namep,ipc_object_t * objectp)426 ipc_object_alloc(
427 ipc_space_t space,
428 ipc_object_type_t otype,
429 mach_port_type_t type,
430 mach_port_urefs_t urefs,
431 mach_port_name_t *namep,
432 ipc_object_t *objectp)
433 {
434 ipc_object_t object;
435 ipc_entry_t entry;
436 kern_return_t kr;
437
438 assert(otype < IOT_NUMBER);
439 assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
440 assert(type != MACH_PORT_TYPE_NONE);
441 assert(urefs <= MACH_PORT_UREFS_MAX);
442
443 object = io_alloc(otype, Z_WAITOK | Z_ZERO | Z_NOFAIL);
444 os_atomic_init(&object->io_bits, io_makebits(otype));
445 os_atomic_init(&object->io_references, 1); /* for entry, not caller */
446
447 *namep = CAST_MACH_PORT_TO_NAME(object);
448 kr = ipc_entry_alloc(space, object, namep, &entry);
449 if (kr != KERN_SUCCESS) {
450 ipc_object_free(otype, object, false);
451 return kr;
452 }
453 /* space is write-locked */
454
455 entry->ie_bits |= type | urefs;
456 ipc_entry_modified(space, *namep, entry);
457
458 *objectp = object;
459 return KERN_SUCCESS;
460 }
461
462 /*
463 * Routine: ipc_object_alloc_name
464 * Purpose:
465 * Allocate an object, with a specific name.
466 * Conditions:
467 * Nothing locked. If successful, the object is returned locked.
468 * The caller doesn't get a reference for the object.
469 *
470 * finish_init() must call an ipc_*_init function
471 * that will return the object locked (using IPC_PORT_INIT_LOCKED,
472 * or SYNC_POLICY_INIT_LOCKED, or equivalent).
473 *
474 * Returns:
475 * KERN_SUCCESS The object is allocated.
476 * KERN_INVALID_TASK The space is dead.
477 * KERN_NAME_EXISTS The name already denotes a right.
478 */
479
480 kern_return_t
481 ipc_object_alloc_name(
482 ipc_space_t space,
483 ipc_object_type_t otype,
484 mach_port_type_t type,
485 mach_port_urefs_t urefs,
486 mach_port_name_t name,
487 ipc_object_t *objectp,
488 void (^finish_init)(ipc_object_t))
489 {
490 ipc_object_t object;
491 ipc_entry_t entry;
492 kern_return_t kr;
493
494 assert(otype < IOT_NUMBER);
495 assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
496 assert(type != MACH_PORT_TYPE_NONE);
497 assert(urefs <= MACH_PORT_UREFS_MAX);
498
499 object = io_alloc(otype, Z_WAITOK | Z_ZERO | Z_NOFAIL);
500 os_atomic_init(&object->io_bits, io_makebits(otype));
501 os_atomic_init(&object->io_references, 1); /* for entry, not caller */
502
503 kr = ipc_entry_alloc_name(space, name, &entry);
504 if (kr != KERN_SUCCESS) {
505 ipc_object_free(otype, object, false);
506 return kr;
507 }
508 /* space is write-locked */
509
510 if (ipc_right_inuse(entry)) {
511 is_write_unlock(space);
512 ipc_object_free(otype, object, false);
513 return KERN_NAME_EXISTS;
514 }
515
516 entry->ie_bits |= type | urefs;
517 entry->ie_object = object;
518
519 finish_init(object);
520 /* object is locked */
521 io_lock_held(object);
522
523 ipc_entry_modified(space, name, entry);
524 is_write_unlock(space);
525
526 *objectp = object;
527 return KERN_SUCCESS;
528 }
529
530 /* Routine: ipc_object_validate
531 * Purpose:
532 * Validates an ipc port or port set as belonging to the correct
533 * zone.
534 */
535
536 void
ipc_object_validate(ipc_object_t object)537 ipc_object_validate(
538 ipc_object_t object)
539 {
540 if (io_otype(object) != IOT_PORT_SET) {
541 ip_validate(object);
542 } else {
543 ips_validate(object);
544 }
545 }
546
547 /*
548 * Routine: ipc_object_copyin_type
549 * Purpose:
550 * Convert a send type name to a received type name.
551 */
552
553 mach_msg_type_name_t
ipc_object_copyin_type(mach_msg_type_name_t msgt_name)554 ipc_object_copyin_type(
555 mach_msg_type_name_t msgt_name)
556 {
557 switch (msgt_name) {
558 case MACH_MSG_TYPE_MOVE_RECEIVE:
559 return MACH_MSG_TYPE_PORT_RECEIVE;
560
561 case MACH_MSG_TYPE_MOVE_SEND_ONCE:
562 case MACH_MSG_TYPE_MAKE_SEND_ONCE:
563 return MACH_MSG_TYPE_PORT_SEND_ONCE;
564
565 case MACH_MSG_TYPE_MOVE_SEND:
566 case MACH_MSG_TYPE_MAKE_SEND:
567 case MACH_MSG_TYPE_COPY_SEND:
568 return MACH_MSG_TYPE_PORT_SEND;
569
570 case MACH_MSG_TYPE_DISPOSE_RECEIVE:
571 case MACH_MSG_TYPE_DISPOSE_SEND:
572 case MACH_MSG_TYPE_DISPOSE_SEND_ONCE:
573 /* fall thru */
574 default:
575 return MACH_MSG_TYPE_PORT_NONE;
576 }
577 }
578
579 /*
580 * Routine: ipc_object_copyin
581 * Purpose:
582 * Copyin a capability from a space.
583 * If successful, the caller gets a ref
584 * for the resulting object, unless it is IO_DEAD.
585 * Conditions:
586 * Nothing locked.
587 * Returns:
588 * KERN_SUCCESS Acquired an object, possibly IO_DEAD.
589 * KERN_INVALID_TASK The space is dead.
590 * KERN_INVALID_NAME Name doesn't exist in space.
591 * KERN_INVALID_RIGHT Name doesn't denote correct right.
592 */
593
594 kern_return_t
ipc_object_copyin(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name,ipc_object_t * objectp,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags,ipc_object_copyin_flags_t copyin_flags)595 ipc_object_copyin(
596 ipc_space_t space,
597 mach_port_name_t name,
598 mach_msg_type_name_t msgt_name,
599 ipc_object_t *objectp,
600 mach_port_context_t context,
601 mach_msg_guard_flags_t *guard_flags,
602 ipc_object_copyin_flags_t copyin_flags)
603 {
604 ipc_entry_t entry;
605 ipc_port_t soright;
606 ipc_port_t release_port;
607 kern_return_t kr;
608 int assertcnt = 0;
609
610 ipc_object_copyin_flags_t copyin_mask = IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND
611 | IPC_OBJECT_COPYIN_FLAGS_ALLOW_CONN_IMMOVABLE_RECEIVE;
612 copyin_mask = (copyin_flags & copyin_mask) | IPC_OBJECT_COPYIN_FLAGS_DEADOK;
613
614 /*
615 * We allow moving of immovable receive right of a service port when it is from launchd.
616 */
617 task_t task = current_task_early();
618 #ifdef MACH_BSD
619 if (task && proc_isinitproc(get_bsdtask_info(task))) {
620 copyin_mask |= IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_RECEIVE;
621 }
622 #endif
623
624 /*
625 * Could first try a read lock when doing
626 * MACH_MSG_TYPE_COPY_SEND, MACH_MSG_TYPE_MAKE_SEND,
627 * and MACH_MSG_TYPE_MAKE_SEND_ONCE.
628 */
629
630 kr = ipc_right_lookup_write(space, name, &entry);
631 if (kr != KERN_SUCCESS) {
632 return kr;
633 }
634 /* space is write-locked and active */
635
636 release_port = IP_NULL;
637 kr = ipc_right_copyin(space, name, entry,
638 msgt_name, copyin_mask,
639 objectp, &soright,
640 &release_port,
641 &assertcnt,
642 context,
643 guard_flags);
644 is_write_unlock(space);
645
646 #if IMPORTANCE_INHERITANCE
647 if (0 < assertcnt && ipc_importance_task_is_any_receiver_type(current_task()->task_imp_base)) {
648 ipc_importance_task_drop_internal_assertion(current_task()->task_imp_base, assertcnt);
649 }
650 #endif /* IMPORTANCE_INHERITANCE */
651
652 if (release_port != IP_NULL) {
653 ip_release(release_port);
654 }
655
656 if ((kr == KERN_SUCCESS) && (soright != IP_NULL)) {
657 ipc_notify_port_deleted(soright, name);
658 }
659
660 return kr;
661 }
662
663 /*
664 * Routine: ipc_object_copyin_from_kernel
665 * Purpose:
666 * Copyin a naked capability from the kernel.
667 *
668 * MACH_MSG_TYPE_MOVE_RECEIVE
669 * The receiver must be ipc_space_kernel
670 * or the receive right must already be in limbo.
671 * Consumes the naked receive right.
672 * MACH_MSG_TYPE_COPY_SEND
673 * A naked send right must be supplied.
674 * The port gains a reference, and a send right
675 * if the port is still active.
676 * MACH_MSG_TYPE_MAKE_SEND
677 * The receiver must be ipc_space_kernel.
678 * The port gains a reference and a send right.
679 * MACH_MSG_TYPE_MOVE_SEND
680 * Consumes a naked send right.
681 * MACH_MSG_TYPE_MAKE_SEND_ONCE
682 * The port gains a reference and a send-once right.
683 * Receiver also be the caller of device subsystem,
684 * so no assertion.
685 * MACH_MSG_TYPE_MOVE_SEND_ONCE
686 * Consumes a naked send-once right.
687 * Conditions:
688 * Nothing locked.
689 */
690
691 void
ipc_object_copyin_from_kernel(ipc_object_t object,mach_msg_type_name_t msgt_name)692 ipc_object_copyin_from_kernel(
693 ipc_object_t object,
694 mach_msg_type_name_t msgt_name)
695 {
696 assert(IO_VALID(object));
697
698 switch (msgt_name) {
699 case MACH_MSG_TYPE_MOVE_RECEIVE: {
700 ipc_port_t port = ip_object_to_port(object);
701
702 ip_mq_lock(port);
703 require_ip_active(port);
704 if (ip_in_a_space(port)) {
705 assert(ip_in_space(port, ipc_space_kernel));
706 assert(port->ip_immovable_receive == 0);
707
708 /* relevant part of ipc_port_clear_receiver */
709 port->ip_mscount = 0;
710
711 /* port transtions to IN-LIMBO state */
712 port->ip_receiver_name = MACH_PORT_NULL;
713 port->ip_destination = IP_NULL;
714 }
715 ip_mq_unlock(port);
716 break;
717 }
718
719 case MACH_MSG_TYPE_COPY_SEND: {
720 ipc_port_t port = ip_object_to_port(object);
721
722 ip_mq_lock(port);
723 if (ip_active(port)) {
724 assert(port->ip_srights > 0);
725 }
726 ip_srights_inc(port);
727 ip_reference(port);
728 ip_mq_unlock(port);
729 break;
730 }
731
732 case MACH_MSG_TYPE_MAKE_SEND: {
733 ipc_port_t port = ip_object_to_port(object);
734
735 ip_mq_lock(port);
736 if (ip_active(port)) {
737 assert(ip_in_a_space(port));
738 assert((ip_in_space(port, ipc_space_kernel)) ||
739 (port->ip_receiver->is_node_id != HOST_LOCAL_NODE));
740 port->ip_mscount++;
741 }
742
743 ip_srights_inc(port);
744 ip_reference(port);
745 ip_mq_unlock(port);
746 break;
747 }
748
749 case MACH_MSG_TYPE_MOVE_SEND: {
750 /* move naked send right into the message */
751 assert(ip_object_to_port(object)->ip_srights);
752 break;
753 }
754
755 case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
756 ipc_port_t port = ip_object_to_port(object);
757
758 ip_mq_lock(port);
759 if (ip_active(port)) {
760 assert(ip_in_a_space(port));
761 }
762 ipc_port_make_sonce_locked(port);
763 ip_mq_unlock(port);
764 break;
765 }
766
767 case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
768 /* move naked send-once right into the message */
769 assert(ip_object_to_port(object)->ip_sorights);
770 break;
771 }
772
773 default:
774 panic("ipc_object_copyin_from_kernel: strange rights");
775 }
776 }
777
778 /*
779 * Routine: ipc_object_destroy
780 * Purpose:
781 * Destroys a naked capability.
782 * Consumes a ref for the object.
783 *
784 * A receive right should be in limbo or in transit.
785 * Conditions:
786 * Nothing locked.
787 */
788
789 void
ipc_object_destroy(ipc_object_t object,mach_msg_type_name_t msgt_name)790 ipc_object_destroy(
791 ipc_object_t object,
792 mach_msg_type_name_t msgt_name)
793 {
794 assert(IO_VALID(object));
795 assert(io_otype(object) == IOT_PORT);
796
797 switch (msgt_name) {
798 case MACH_MSG_TYPE_PORT_SEND:
799 ipc_port_release_send(ip_object_to_port(object));
800 break;
801
802 case MACH_MSG_TYPE_PORT_SEND_ONCE:
803 io_lock(object);
804 ipc_notify_send_once_and_unlock(ip_object_to_port(object));
805 break;
806
807 case MACH_MSG_TYPE_PORT_RECEIVE:
808 ipc_port_release_receive(ip_object_to_port(object));
809 break;
810
811 default:
812 panic("ipc_object_destroy: strange rights");
813 }
814 }
815
816 /*
817 * Routine: ipc_object_destroy_dest
818 * Purpose:
819 * Destroys a naked capability for the destination of
820 * of a message. Consumes a ref for the object.
821 *
822 * Conditions:
823 * Nothing locked.
824 */
825
826 void
ipc_object_destroy_dest(ipc_object_t object,mach_msg_type_name_t msgt_name)827 ipc_object_destroy_dest(
828 ipc_object_t object,
829 mach_msg_type_name_t msgt_name)
830 {
831 ipc_port_t port = ip_object_to_port(object);
832
833 assert(IO_VALID(object));
834 assert(io_otype(object) == IOT_PORT);
835
836 switch (msgt_name) {
837 case MACH_MSG_TYPE_PORT_SEND:
838 ipc_port_release_send(port);
839 break;
840
841 case MACH_MSG_TYPE_PORT_SEND_ONCE:
842 ip_mq_lock(port);
843 ipc_notify_send_once_and_unlock(port);
844 break;
845
846 default:
847 panic("ipc_object_destroy_dest: strange rights");
848 }
849 }
850
851 /*
852 * Routine: ipc_object_insert_send_right
853 * Purpose:
854 * Insert a send right into an object already in the space.
855 * The specified name must already point to a valid object.
856 *
857 * Note: This really is a combined copyin()/copyout(),
858 * that avoids most of the overhead of being implemented that way.
859 *
860 * This is the fastpath for mach_port_insert_right.
861 *
862 * Conditions:
863 * Nothing locked.
864 *
865 * msgt_name must be MACH_MSG_TYPE_MAKE_SEND_ONCE or
866 * MACH_MSG_TYPE_MOVE_SEND_ONCE.
867 *
868 * Returns:
869 * KERN_SUCCESS Copied out object, consumed ref.
870 * KERN_INVALID_TASK The space is dead.
871 * KERN_INVALID_NAME Name doesn't exist in space.
872 * KERN_INVALID_CAPABILITY The object is dead.
873 * KERN_RIGHT_EXISTS Space has rights under another name.
874 */
875 kern_return_t
ipc_object_insert_send_right(ipc_space_t space,mach_port_name_t name,mach_msg_type_name_t msgt_name)876 ipc_object_insert_send_right(
877 ipc_space_t space,
878 mach_port_name_t name,
879 mach_msg_type_name_t msgt_name)
880 {
881 ipc_entry_bits_t bits;
882 ipc_object_t object;
883 ipc_entry_t entry;
884 kern_return_t kr;
885
886 assert(msgt_name == MACH_MSG_TYPE_MAKE_SEND ||
887 msgt_name == MACH_MSG_TYPE_COPY_SEND);
888
889 kr = ipc_right_lookup_write(space, name, &entry);
890 if (kr != KERN_SUCCESS) {
891 return kr;
892 }
893 /* space is write-locked and active */
894
895 if (!IO_VALID(entry->ie_object)) {
896 is_write_unlock(space);
897 return KERN_INVALID_CAPABILITY;
898 }
899
900 bits = entry->ie_bits;
901 object = entry->ie_object;
902
903 io_lock(object);
904 if (!io_active(object)) {
905 kr = KERN_INVALID_CAPABILITY;
906 } else if (msgt_name == MACH_MSG_TYPE_MAKE_SEND) {
907 if (bits & MACH_PORT_TYPE_RECEIVE) {
908 ipc_port_t port = ip_object_to_port(object);
909 port->ip_mscount++;
910 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
911 ip_srights_inc(port);
912 bits |= MACH_PORT_TYPE_SEND;
913 }
914 /* leave urefs pegged to maximum if it overflowed */
915 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
916 bits += 1; /* increment urefs */
917 }
918 entry->ie_bits = bits;
919 ipc_entry_modified(space, name, entry);
920 kr = KERN_SUCCESS;
921 } else {
922 kr = KERN_INVALID_RIGHT;
923 }
924 } else { // MACH_MSG_TYPE_COPY_SEND
925 if (bits & MACH_PORT_TYPE_SEND) {
926 /* leave urefs pegged to maximum if it overflowed */
927 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
928 entry->ie_bits = bits + 1; /* increment urefs */
929 }
930 ipc_entry_modified(space, name, entry);
931 kr = KERN_SUCCESS;
932 } else {
933 kr = KERN_INVALID_RIGHT;
934 }
935 }
936
937 io_unlock(object);
938 is_write_unlock(space);
939
940 return kr;
941 }
942
943 /*
944 * Routine: ipc_object_copyout
945 * Purpose:
946 * Copyout a capability, placing it into a space.
947 * Always consumes a ref for the object.
948 * Conditions:
949 * Nothing locked.
950 * Returns:
951 * KERN_SUCCESS Copied out object, consumed ref.
952 * KERN_INVALID_TASK The space is dead.
953 * KERN_INVALID_CAPABILITY The object is dead.
954 * KERN_NO_SPACE No room in space for another right.
955 * KERN_UREFS_OVERFLOW Urefs limit exceeded
956 * and overflow wasn't specified.
957 */
958
959 kern_return_t
ipc_object_copyout(ipc_space_t space,ipc_object_t object,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_port_context_t * context,mach_msg_guard_flags_t * guard_flags,mach_port_name_t * namep)960 ipc_object_copyout(
961 ipc_space_t space,
962 ipc_object_t object,
963 mach_msg_type_name_t msgt_name,
964 ipc_object_copyout_flags_t flags,
965 mach_port_context_t *context,
966 mach_msg_guard_flags_t *guard_flags,
967 mach_port_name_t *namep)
968 {
969 struct knote *kn = current_thread()->ith_knote;
970 mach_port_name_t name;
971 ipc_port_t port = ip_object_to_port(object);
972 ipc_entry_t entry;
973 kern_return_t kr;
974
975 assert(IO_VALID(object));
976 assert(io_otype(object) == IOT_PORT);
977
978 if (ITH_KNOTE_VALID(kn, msgt_name)) {
979 filt_machport_turnstile_prepare_lazily(kn, msgt_name, port);
980 }
981
982 is_write_lock(space);
983
984 for (;;) {
985 ipc_port_t port_subst = IP_NULL;
986
987 if (!is_active(space)) {
988 is_write_unlock(space);
989 kr = KERN_INVALID_TASK;
990 goto out;
991 }
992
993 kr = ipc_entries_hold(space, 1);
994 if (kr != KERN_SUCCESS) {
995 /* unlocks/locks space, so must start again */
996
997 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
998 if (kr != KERN_SUCCESS) {
999 /* space is unlocked */
1000 goto out;
1001 }
1002 continue;
1003 }
1004
1005 io_lock(object);
1006 if (!io_active(object)) {
1007 io_unlock(object);
1008 is_write_unlock(space);
1009 kr = KERN_INVALID_CAPABILITY;
1010 goto out;
1011 }
1012
1013 /* Don't actually copyout rights we aren't allowed to */
1014 if (!ip_label_check(space, port, msgt_name, &flags, &port_subst)) {
1015 io_unlock(object);
1016 is_write_unlock(space);
1017 assert(port_subst == IP_NULL);
1018 kr = KERN_INVALID_CAPABILITY;
1019 goto out;
1020 }
1021
1022 /* is the kolabel requesting a substitution */
1023 if (port_subst != IP_NULL) {
1024 /*
1025 * port is unlocked, its right consumed
1026 * space is unlocked
1027 */
1028 assert(msgt_name == MACH_MSG_TYPE_PORT_SEND);
1029 port = port_subst;
1030 if (!IP_VALID(port)) {
1031 object = IO_DEAD;
1032 kr = KERN_INVALID_CAPABILITY;
1033 goto out;
1034 }
1035
1036 object = ip_to_object(port);
1037 is_write_lock(space);
1038 continue;
1039 }
1040
1041 break;
1042 }
1043
1044 /* space is write-locked and active, object is locked and active */
1045
1046 if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
1047 ipc_right_reverse(space, object, &name, &entry)) {
1048 assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
1049 } else {
1050 ipc_entry_claim(space, object, &name, &entry);
1051 }
1052
1053 kr = ipc_right_copyout(space, name, entry,
1054 msgt_name, flags, context, guard_flags, object);
1055
1056 /* object is unlocked */
1057 is_write_unlock(space);
1058
1059 out:
1060 if (kr == KERN_SUCCESS) {
1061 *namep = name;
1062 } else if (IO_VALID(object)) {
1063 ipc_object_destroy(object, msgt_name);
1064 }
1065
1066 return kr;
1067 }
1068
1069 /*
1070 * Routine: ipc_object_copyout_name
1071 * Purpose:
1072 * Copyout a capability, placing it into a space.
1073 * The specified name is used for the capability.
1074 * If successful, consumes a ref for the object.
1075 * Conditions:
1076 * Nothing locked.
1077 * Returns:
1078 * KERN_SUCCESS Copied out object, consumed ref.
1079 * KERN_INVALID_TASK The space is dead.
1080 * KERN_INVALID_CAPABILITY The object is dead.
1081 * KERN_UREFS_OVERFLOW Urefs limit exceeded
1082 * and overflow wasn't specified.
1083 * KERN_RIGHT_EXISTS Space has rights under another name.
1084 * KERN_NAME_EXISTS Name is already used.
1085 * KERN_INVALID_VALUE Supplied port name is invalid.
1086 */
1087
1088 kern_return_t
ipc_object_copyout_name(ipc_space_t space,ipc_object_t object,mach_msg_type_name_t msgt_name,mach_port_name_t name)1089 ipc_object_copyout_name(
1090 ipc_space_t space,
1091 ipc_object_t object,
1092 mach_msg_type_name_t msgt_name,
1093 mach_port_name_t name)
1094 {
1095 ipc_port_t port = ip_object_to_port(object);
1096 mach_port_name_t oname;
1097 ipc_entry_t oentry;
1098 ipc_entry_t entry;
1099 kern_return_t kr;
1100
1101 #if IMPORTANCE_INHERITANCE
1102 int assertcnt = 0;
1103 ipc_importance_task_t task_imp = IIT_NULL;
1104 #endif /* IMPORTANCE_INHERITANCE */
1105
1106 assert(IO_VALID(object));
1107 assert(io_otype(object) == IOT_PORT);
1108
1109 kr = ipc_entry_alloc_name(space, name, &entry);
1110 if (kr != KERN_SUCCESS) {
1111 return kr;
1112 }
1113 /* space is write-locked and active */
1114
1115 io_lock(object);
1116
1117 /*
1118 * Don't actually copyout rights we aren't allowed to
1119 *
1120 * In particular, kolabel-ed objects do not allow callers
1121 * to pick the name they end up with.
1122 */
1123 if (!io_active(object) || ip_is_kolabeled(port)) {
1124 io_unlock(object);
1125 if (!ipc_right_inuse(entry)) {
1126 ipc_entry_dealloc(space, IO_NULL, name, entry);
1127 }
1128 is_write_unlock(space);
1129 return KERN_INVALID_CAPABILITY;
1130 }
1131
1132 /* space is write-locked and active, object is locked and active */
1133
1134 if ((msgt_name != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
1135 ipc_right_reverse(space, object, &oname, &oentry)) {
1136 if (name != oname) {
1137 io_unlock(object);
1138 if (!ipc_right_inuse(entry)) {
1139 ipc_entry_dealloc(space, IO_NULL, name, entry);
1140 }
1141 is_write_unlock(space);
1142 return KERN_RIGHT_EXISTS;
1143 }
1144
1145 assert(entry == oentry);
1146 assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
1147 } else if (ipc_right_inuse(entry)) {
1148 io_unlock(object);
1149 is_write_unlock(space);
1150 return KERN_NAME_EXISTS;
1151 } else {
1152 assert(entry->ie_object == IO_NULL);
1153
1154 entry->ie_object = object;
1155 }
1156
1157 #if IMPORTANCE_INHERITANCE
1158 /*
1159 * We are slamming a receive right into the space, without
1160 * first having been enqueued on a port destined there. So,
1161 * we have to arrange to boost the task appropriately if this
1162 * port has assertions (and the task wants them).
1163 */
1164 if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1165 if (space->is_task != TASK_NULL) {
1166 task_imp = space->is_task->task_imp_base;
1167 if (ipc_importance_task_is_any_receiver_type(task_imp)) {
1168 assertcnt = port->ip_impcount;
1169 ipc_importance_task_reference(task_imp);
1170 } else {
1171 task_imp = IIT_NULL;
1172 }
1173 }
1174
1175 /* take port out of limbo */
1176 port->ip_tempowner = 0;
1177 }
1178
1179 #endif /* IMPORTANCE_INHERITANCE */
1180
1181 kr = ipc_right_copyout(space, name, entry,
1182 msgt_name, IPC_OBJECT_COPYOUT_FLAGS_NONE, NULL, NULL, object);
1183
1184 /* object is unlocked */
1185 is_write_unlock(space);
1186
1187 #if IMPORTANCE_INHERITANCE
1188 /*
1189 * Add the assertions to the task that we captured before
1190 */
1191 if (task_imp != IIT_NULL) {
1192 ipc_importance_task_hold_internal_assertion(task_imp, assertcnt);
1193 ipc_importance_task_release(task_imp);
1194 }
1195 #endif /* IMPORTANCE_INHERITANCE */
1196
1197 return kr;
1198 }
1199
1200 /*
1201 * Routine: ipc_object_copyout_dest
1202 * Purpose:
1203 * Translates/consumes the destination right of a message.
1204 * This is unlike normal copyout because the right is consumed
1205 * in a funny way instead of being given to the receiving space.
1206 * The receiver gets his name for the port, if he has receive
1207 * rights, otherwise MACH_PORT_NULL.
1208 * Conditions:
1209 * The object is locked and active. Nothing else locked.
1210 * The object is unlocked and loses a reference.
1211 */
1212
1213 void
ipc_object_copyout_dest(ipc_space_t space,ipc_object_t object,mach_msg_type_name_t msgt_name,mach_port_name_t * namep)1214 ipc_object_copyout_dest(
1215 ipc_space_t space,
1216 ipc_object_t object,
1217 mach_msg_type_name_t msgt_name,
1218 mach_port_name_t *namep)
1219 {
1220 mach_port_name_t name;
1221
1222 assert(IO_VALID(object));
1223 assert(io_active(object));
1224
1225 /*
1226 * If the space is the receiver/owner of the object,
1227 * then we quietly consume the right and return
1228 * the space's name for the object. Otherwise
1229 * we destroy the right and return MACH_PORT_NULL.
1230 */
1231
1232 switch (msgt_name) {
1233 case MACH_MSG_TYPE_PORT_SEND: {
1234 ipc_port_t port = ip_object_to_port(object);
1235 ipc_notify_nsenders_t nsrequest = { };
1236
1237 if (ip_in_space(port, space)) {
1238 name = ip_get_receiver_name(port);
1239 } else {
1240 name = MACH_PORT_NULL;
1241 }
1242 ip_srights_dec(port);
1243 if (port->ip_srights == 0) {
1244 nsrequest = ipc_notify_no_senders_prepare(port);
1245 }
1246 ipc_port_clear_sync_rcv_thread_boost_locked(port);
1247 /* port unlocked */
1248
1249 ipc_notify_no_senders_emit(nsrequest);
1250
1251 ip_release(port);
1252 break;
1253 }
1254
1255 case MACH_MSG_TYPE_PORT_SEND_ONCE: {
1256 ipc_port_t port = ip_object_to_port(object);
1257
1258 assert(port->ip_sorights > 0);
1259
1260 if (ip_in_space(port, space)) {
1261 /* quietly consume the send-once right */
1262 ip_sorights_dec(port);
1263 name = ip_get_receiver_name(port);
1264 ipc_port_clear_sync_rcv_thread_boost_locked(port);
1265 /* port unlocked */
1266 ip_release(port);
1267 } else {
1268 /*
1269 * A very bizarre case. The message
1270 * was received, but before this copyout
1271 * happened the space lost receive rights.
1272 * We can't quietly consume the soright
1273 * out from underneath some other task,
1274 * so generate a send-once notification.
1275 */
1276
1277 ipc_notify_send_once_and_unlock(port);
1278 name = MACH_PORT_NULL;
1279 }
1280
1281 break;
1282 }
1283
1284 default:
1285 panic("ipc_object_copyout_dest: strange rights");
1286 name = MACH_PORT_DEAD;
1287 }
1288
1289 *namep = name;
1290 }
1291
1292 static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1293 offsetof(struct ipc_port, ip_waitq));
1294 static_assert(offsetof(struct ipc_object_waitq, iowq_waitq) ==
1295 offsetof(struct ipc_pset, ips_wqset));
1296
1297 /*
1298 * Routine: ipc_object_lock
1299 * Purpose:
1300 * Validate, then acquire a lock on an ipc object
1301 */
1302 void
ipc_object_lock(ipc_object_t io)1303 ipc_object_lock(ipc_object_t io)
1304 {
1305 ipc_object_validate(io);
1306 waitq_lock(io_waitq(io));
1307 }
1308
1309 __abortlike
1310 static void
ipc_object_validate_preflight_panic(ipc_object_t io)1311 ipc_object_validate_preflight_panic(ipc_object_t io)
1312 {
1313 panic("ipc object %p is neither a port or a port-set", io);
1314 }
1315
1316 /*
1317 * Routine: ipc_object_lock_allow_invalid
1318 * Purpose:
1319 * Speculatively try to lock an object in an undefined state.
1320 *
1321 * This relies on the fact that IPC object memory is allocated
1322 * from sequestered zones, so at a given address, one can find:
1323 * 1. a valid object,
1324 * 2. a freed or invalid (uninitialized) object,
1325 * 3. unmapped memory.
1326 *
1327 * (2) is possible because the zone is made with ZC_ZFREE_CLEARMEM which
1328 * ensures freed elements are always zeroed.
1329 *
1330 * (3) is a direct courtesy of waitq_lock_allow_invalid().
1331 *
1332 * In order to disambiguate (1) from (2), we use the "waitq valid"
1333 * bit which is part of the lock. When that bit is absent,
1334 * waitq_lock() will function as expected, but
1335 * waitq_lock_allow_invalid() will not.
1336 *
1337 * Objects are then initialized and destroyed carefully so that
1338 * this "valid bit" is only set when the object invariants are
1339 * respected.
1340 *
1341 * Returns:
1342 * true: the lock was acquired
1343 * false: the object was freed or not initialized.
1344 */
1345 bool
ipc_object_lock_allow_invalid(ipc_object_t orig_io)1346 ipc_object_lock_allow_invalid(ipc_object_t orig_io)
1347 {
1348 struct waitq *orig_wq = io_waitq(orig_io);
1349 struct waitq *wq = pgz_decode_allow_invalid(orig_wq, ZONE_ID_ANY);
1350
1351 switch (zone_id_for_element(wq, sizeof(*wq))) {
1352 case ZONE_ID_IPC_PORT:
1353 case ZONE_ID_IPC_PORT_SET:
1354 break;
1355 default:
1356 #if CONFIG_PROB_GZALLOC
1357 if (orig_wq != wq) {
1358 /*
1359 * The element was PGZ protected, and the translation
1360 * returned another type than port or port-set, or
1361 * ZONE_ID_INVALID (wq is NULL).
1362 *
1363 * We have to allow this skew, and assumed the slot
1364 * has held a now freed port/port-set.
1365 */
1366 return false;
1367 }
1368 #endif /* CONFIG_PROB_GZALLOC */
1369 ipc_object_validate_preflight_panic(orig_io);
1370 }
1371
1372 if (__probable(waitq_lock_allow_invalid(wq))) {
1373 ipc_object_validate(io_from_waitq(wq));
1374 #if CONFIG_PROB_GZALLOC
1375 if (__improbable(wq != orig_wq &&
1376 wq != pgz_decode_allow_invalid(orig_wq, ZONE_ID_ANY))) {
1377 /*
1378 * This object is no longer held in the slot,
1379 * whatever this object is, it's not the droid
1380 * we're looking for. Pretend we failed the lock.
1381 */
1382 waitq_unlock(wq);
1383 return false;
1384 }
1385 #endif /* CONFIG_PROB_GZALLOC */
1386 return true;
1387 }
1388 return false;
1389 }
1390
1391 /*
1392 * Routine: ipc_object_lock_try
1393 * Purpose:
1394 * Validate, then try to acquire a lock on an object,
1395 * fail if there is an existing busy lock
1396 */
1397 bool
ipc_object_lock_try(ipc_object_t io)1398 ipc_object_lock_try(ipc_object_t io)
1399 {
1400 ipc_object_validate(io);
1401 return waitq_lock_try(io_waitq(io));
1402 }
1403
1404 /*
1405 * Routine: ipc_object_unlock
1406 * Purpose:
1407 * Unlocks the given object.
1408 */
1409 void
ipc_object_unlock(ipc_object_t io)1410 ipc_object_unlock(ipc_object_t io)
1411 {
1412 waitq_unlock(io_waitq(io));
1413 }
1414