1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005-2006 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: ipc/ipc_right.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions to manipulate IPC capabilities.
71 */
72
73 #include <mach/boolean.h>
74 #include <mach/kern_return.h>
75 #include <mach/port.h>
76 #include <mach/message.h>
77 #include <kern/assert.h>
78 #include <kern/ipc_kobject.h>
79 #include <kern/misc_protos.h>
80 #include <ipc/port.h>
81 #include <ipc/ipc_entry.h>
82 #include <ipc/ipc_space.h>
83 #include <ipc/ipc_object.h>
84 #include <ipc/ipc_hash.h>
85 #include <ipc/ipc_port.h>
86 #include <ipc/ipc_pset.h>
87 #include <ipc/ipc_right.h>
88 #include <ipc/ipc_notify.h>
89 #include <ipc/ipc_table.h>
90 #include <ipc/ipc_importance.h>
91 #include <ipc/ipc_service_port.h>
92 #include <security/mac_mach_internal.h>
93
94 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
95
96 /*
97 * Routine: ipc_right_lookup_read
98 * Purpose:
99 * Finds an entry in a space, given the name.
100 * Conditions:
101 * Nothing locked.
102 * If an object is found, it is locked and active.
103 * Returns:
104 * KERN_SUCCESS Found an entry.
105 * KERN_INVALID_TASK The space is dead.
106 * KERN_INVALID_NAME Name doesn't exist in space.
107 */
108 #if MACH_LOCKFREE_SPACE
109 kern_return_t
ipc_right_lookup_read(ipc_space_t space,mach_port_name_t name,ipc_entry_bits_t * bitsp,ipc_object_t * objectp)110 ipc_right_lookup_read(
111 ipc_space_t space,
112 mach_port_name_t name,
113 ipc_entry_bits_t *bitsp,
114 ipc_object_t *objectp)
115 {
116 mach_port_index_t index;
117 ipc_entry_t table;
118 hazard_guard_t guard;
119 ipc_object_t object;
120 kern_return_t kr;
121
122 index = MACH_PORT_INDEX(name);
123 if (__improbable(index == 0)) {
124 *bitsp = 0;
125 *objectp = IO_NULL;
126 return KERN_INVALID_NAME;
127 }
128
129 guard = hazard_guard_get(0);
130
131 /*
132 * Acquire a (possibly stale) pointer to the table,
133 * and guard it so that it can't be deallocated while we use it.
134 *
135 * hazard_guard_acquire() has the property that it strongly serializes
136 * after any store-release. This is important because it means that if
137 * one considers this (broken) userspace usage:
138 *
139 * Thread 1:
140 * - makes a semaphore, gets name 0x1003
141 * - stores that name to a global `sema` in userspace
142 *
143 * Thread 2:
144 * - spins to observe `sema` becoming non 0
145 * - calls semaphore_wait() on 0x1003
146 *
147 * Then, because in order to return 0x1003 this thread issued
148 * a store-release (when calling is_write_unlock()),
149 * then this hazard_guard_acquire() can't possibly observe a table
150 * pointer that is older than the one that was current when the
151 * semaphore was made.
152 *
153 * This fundamental property allows us to never loop (though arguably
154 * that is because the loop is inside hazard_guard_acquire()).
155 */
156 table = hazard_guard_acquire(guard, &space->is_table);
157 if (__improbable(table == NULL)) {
158 kr = KERN_INVALID_TASK;
159 goto out_put;
160 }
161 if (__improbable(index >= table->ie_size)) {
162 kr = KERN_INVALID_NAME;
163 goto out_put;
164 }
165
166 /*
167 * Note: this should be an atomic load, but PAC and atomics
168 * don't work interact well together.
169 */
170 object = table[index].ie_volatile_object;
171
172 /*
173 * Attempt to lock an object that lives in this entry.
174 * It might fail or be a completely different object by now.
175 *
176 * Make sure that acquiring the lock is fully ordered after any
177 * lock-release (using os_atomic_barrier_before_lock_acquire()).
178 * This allows us to always reliably observe space termination below.
179 */
180 os_atomic_barrier_before_lock_acquire();
181 if (__improbable(object == IO_NULL || !io_lock_allow_invalid(object))) {
182 kr = KERN_INVALID_NAME;
183 goto out_put;
184 }
185
186 /*
187 * Now that we hold the object lock, we are preventing any entry
188 * in this space for this object to be mutated.
189 *
190 * If the space didn't grow after we acquired our hazardous reference,
191 * and before a mutation of the entry, then holding the object lock
192 * guarantees we will observe the truth of ie_bits, ie_object and
193 * ie_request (those are always mutated with the object lock held).
194 *
195 * However this ordering is problematic:
196 * - [A]cquisition of the table pointer
197 * - [G]rowth of the space (making the table pointer in [A] stale)
198 * - [M]utation of the entry
199 * - [L]ocking of the object read through [A].
200 *
201 * The space lock is held for both [G] and [M], and the object lock
202 * is held for [M], which means that once we lock the object we can
203 * observe if [G] happenend by reloading the table pointer.
204 *
205 * We might still fail to observe any growth operation that happened
206 * after the last mutation of this object's entry, because holding
207 * an object lock doesn't guarantee anything about the liveness
208 * of the space table pointer. This is not a problem at all: by
209 * definition, those didn't affect the state of the entry.
210 *
211 * TODO: a data-structure where the entries are grown by "slabs",
212 * would allow for the address of an ipc_entry_t to never
213 * change once it exists in a space and would avoid a full
214 * hazard reacquire (as well as making space growth faster).
215 * We however still need to check for termination.
216 */
217 ipc_entry_t tmp = hazard_ptr_load(&space->is_table);
218 if (__improbable(tmp != table)) {
219 table = hazard_guard_reacquire_val(guard, &space->is_table, tmp);
220 if (__improbable(table == NULL)) {
221 kr = KERN_INVALID_TASK;
222 goto out_put_unlock;
223 }
224 }
225
226 /*
227 * Now that we hold the lock and have a "fresh enough" table pointer,
228 * validate if this entry is what we think it is.
229 *
230 * To the risk of being repetitive, we still need to protect
231 * those accesses under the hazard guard, because subsequent
232 * table growths might retire the memory. However we know
233 * those growths will have left our entry unchanged.
234 */
235 if (__improbable(table[index].ie_object != object)) {
236 kr = KERN_INVALID_NAME;
237 goto out_put_unlock;
238 }
239
240 ipc_entry_bits_t bits = table[index].ie_bits;
241 if (__improbable(IE_BITS_GEN(bits) != MACH_PORT_GEN(name) ||
242 IE_BITS_TYPE(bits == MACH_PORT_TYPE_NONE))) {
243 kr = KERN_INVALID_NAME;
244 goto out_put_unlock;
245 }
246
247 /* Done with hazardous accesses to the table */
248 hazard_guard_put(guard);
249
250 *bitsp = bits;
251 *objectp = object;
252 return KERN_SUCCESS;
253
254 out_put_unlock:
255 ipc_object_unlock(object);
256 out_put:
257 hazard_guard_put(guard);
258 return kr;
259 }
260 #else /* !MACH_LOCKFREE_SPACE */
261 kern_return_t
ipc_right_lookup_read(ipc_space_t space,mach_port_name_t name,ipc_entry_bits_t * bitsp,ipc_object_t * objectp)262 ipc_right_lookup_read(
263 ipc_space_t space,
264 mach_port_name_t name,
265 ipc_entry_bits_t *bitsp,
266 ipc_object_t *objectp)
267 {
268 kern_return_t kr;
269 ipc_entry_t entry;
270 ipc_object_t obj;
271 ipc_entry_bits_t bits;
272
273 kr = ipc_right_lookup_write(space, name, &entry);
274 if (kr != KERN_SUCCESS) {
275 return kr;
276 }
277 /* space is read-locked and active */
278
279 obj = entry->ie_object;
280 bits = entry->ie_bits;
281 if (obj == IO_NULL) {
282 is_write_unlock(space);
283 return KERN_INVALID_NAME;
284 }
285
286 io_lock(obj);
287 is_write_unlock(space);
288
289 if (!io_active(obj)) {
290 io_unlock(obj);
291 return KERN_INVALID_NAME;
292 }
293
294 *objectp = obj;
295 *bitsp = bits;
296 return KERN_SUCCESS;
297 }
298 #endif
299
300 /*
301 * Routine: ipc_right_lookup_write
302 * Purpose:
303 * Finds an entry in a space, given the name.
304 * Conditions:
305 * Nothing locked. If successful, the space is write-locked.
306 * Returns:
307 * KERN_SUCCESS Found an entry.
308 * KERN_INVALID_TASK The space is dead.
309 * KERN_INVALID_NAME Name doesn't exist in space.
310 */
311
312 kern_return_t
ipc_right_lookup_write(ipc_space_t space,mach_port_name_t name,ipc_entry_t * entryp)313 ipc_right_lookup_write(
314 ipc_space_t space,
315 mach_port_name_t name,
316 ipc_entry_t *entryp)
317 {
318 ipc_entry_t entry;
319
320 assert(space != IS_NULL);
321
322 is_write_lock(space);
323
324 if (!is_active(space)) {
325 is_write_unlock(space);
326 return KERN_INVALID_TASK;
327 }
328
329 if ((entry = ipc_entry_lookup(space, name)) == IE_NULL) {
330 is_write_unlock(space);
331 return KERN_INVALID_NAME;
332 }
333
334 *entryp = entry;
335 return KERN_SUCCESS;
336 }
337
338 /*
339 * Routine: ipc_right_lookup_two_write
340 * Purpose:
341 * Like ipc_right_lookup except that it returns two
342 * entries for two different names that were looked
343 * up under the same space lock.
344 * Conditions:
345 * Nothing locked. If successful, the space is write-locked.
346 * Returns:
347 * KERN_INVALID_TASK The space is dead.
348 * KERN_INVALID_NAME Name doesn't exist in space.
349 */
350
351 kern_return_t
ipc_right_lookup_two_write(ipc_space_t space,mach_port_name_t name1,ipc_entry_t * entryp1,mach_port_name_t name2,ipc_entry_t * entryp2)352 ipc_right_lookup_two_write(
353 ipc_space_t space,
354 mach_port_name_t name1,
355 ipc_entry_t *entryp1,
356 mach_port_name_t name2,
357 ipc_entry_t *entryp2)
358 {
359 ipc_entry_t entry1;
360 ipc_entry_t entry2;
361
362 assert(space != IS_NULL);
363
364 is_write_lock(space);
365
366 if (!is_active(space)) {
367 is_write_unlock(space);
368 return KERN_INVALID_TASK;
369 }
370
371 if ((entry1 = ipc_entry_lookup(space, name1)) == IE_NULL) {
372 is_write_unlock(space);
373 mach_port_guard_exception(name1, 0, 0, kGUARD_EXC_INVALID_NAME);
374 return KERN_INVALID_NAME;
375 }
376 if ((entry2 = ipc_entry_lookup(space, name2)) == IE_NULL) {
377 is_write_unlock(space);
378 mach_port_guard_exception(name2, 0, 0, kGUARD_EXC_INVALID_NAME);
379 return KERN_INVALID_NAME;
380 }
381 *entryp1 = entry1;
382 *entryp2 = entry2;
383 return KERN_SUCCESS;
384 }
385
386 /*
387 * Routine: ipc_right_reverse
388 * Purpose:
389 * Translate (space, object) -> (name, entry).
390 * Only finds send/receive rights.
391 * Returns TRUE if an entry is found; if so,
392 * the object active.
393 * Conditions:
394 * The space must be locked (read or write) and active.
395 * The port is locked and active
396 */
397
398 bool
ipc_right_reverse(ipc_space_t space,ipc_object_t object,mach_port_name_t * namep,ipc_entry_t * entryp)399 ipc_right_reverse(
400 ipc_space_t space,
401 ipc_object_t object,
402 mach_port_name_t *namep,
403 ipc_entry_t *entryp)
404 {
405 ipc_port_t port;
406 mach_port_name_t name;
407 ipc_entry_t entry;
408
409 /* would switch on io_otype to handle multiple types of object */
410
411 assert(is_active(space));
412 assert(io_otype(object) == IOT_PORT);
413
414 port = ip_object_to_port(object);
415 require_ip_active(port);
416
417 ip_mq_lock_held(port);
418
419 if (ip_in_space(port, space)) {
420 name = ip_get_receiver_name(port);
421 assert(name != MACH_PORT_NULL);
422
423 entry = ipc_entry_lookup(space, name);
424
425 assert(entry != IE_NULL);
426 assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
427 assert(port == ip_object_to_port(entry->ie_object));
428
429 *namep = name;
430 *entryp = entry;
431 return true;
432 }
433
434 if (ipc_hash_lookup(space, ip_to_object(port), namep, entryp)) {
435 entry = *entryp;
436 assert(entry != IE_NULL);
437 assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
438 assert(port == ip_object_to_port(entry->ie_object));
439
440 return true;
441 }
442
443 return false;
444 }
445
446 /*
447 * Routine: ipc_right_dnrequest
448 * Purpose:
449 * Make a dead-name request, returning the previously
450 * registered send-once right. If notify is IP_NULL,
451 * just cancels the previously registered request.
452 *
453 * Conditions:
454 * Nothing locked. May allocate memory.
455 * Only consumes/returns refs if successful.
456 * Returns:
457 * KERN_SUCCESS Made/canceled dead-name request.
458 * KERN_INVALID_TASK The space is dead.
459 * KERN_INVALID_NAME Name doesn't exist in space.
460 * KERN_INVALID_RIGHT Name doesn't denote port/dead rights.
461 * KERN_INVALID_ARGUMENT Name denotes dead name, but
462 * immediate is FALSE or notify is IP_NULL.
463 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
464 */
465
466 kern_return_t
ipc_right_request_alloc(ipc_space_t space,mach_port_name_t name,boolean_t immediate,boolean_t send_possible,ipc_port_t notify,ipc_port_t * previousp)467 ipc_right_request_alloc(
468 ipc_space_t space,
469 mach_port_name_t name,
470 boolean_t immediate,
471 boolean_t send_possible,
472 ipc_port_t notify,
473 ipc_port_t *previousp)
474 {
475 ipc_port_request_index_t prev_request;
476 ipc_port_t previous = IP_NULL;
477 ipc_entry_t entry;
478 kern_return_t kr;
479
480 #if IMPORTANCE_INHERITANCE
481 boolean_t needboost = FALSE;
482 #endif /* IMPORTANCE_INHERITANCE */
483
484 for (;;) {
485 ipc_port_t port = IP_NULL;
486
487 kr = ipc_right_lookup_write(space, name, &entry);
488 if (kr != KERN_SUCCESS) {
489 return kr;
490 }
491
492 /* space is write-locked and active */
493
494 prev_request = entry->ie_request;
495
496 /* if nothing to do or undo, we're done */
497 if (notify == IP_NULL && prev_request == IE_REQ_NONE) {
498 is_write_unlock(space);
499 *previousp = IP_NULL;
500 return KERN_SUCCESS;
501 }
502
503 /* see if the entry is of proper type for requests */
504 if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) {
505 ipc_port_request_index_t new_request;
506
507 port = ip_object_to_port(entry->ie_object);
508 assert(port != IP_NULL);
509
510 if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
511 /* port is locked and active */
512
513 /* if no new request, just cancel previous */
514 if (notify == IP_NULL) {
515 if (prev_request != IE_REQ_NONE) {
516 previous = ipc_port_request_cancel(port, name, prev_request);
517 entry->ie_request = IE_REQ_NONE;
518 }
519 ip_mq_unlock(port);
520 ipc_entry_modified(space, name, entry);
521 is_write_unlock(space);
522 break;
523 }
524
525 /*
526 * send-once rights, kernel objects, and non-full other queues
527 * fire immediately (if immediate specified).
528 */
529 if (send_possible && immediate &&
530 ((entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE) ||
531 ip_in_space(port, ipc_space_kernel) || !ip_full(port))) {
532 if (prev_request != IE_REQ_NONE) {
533 previous = ipc_port_request_cancel(port, name, prev_request);
534 entry->ie_request = IE_REQ_NONE;
535 }
536 ip_mq_unlock(port);
537 ipc_entry_modified(space, name, entry);
538 is_write_unlock(space);
539
540 ipc_notify_send_possible(notify, name);
541 break;
542 }
543
544 /*
545 * If there is a previous request, free it. Any subsequent
546 * allocation cannot fail, thus assuring an atomic swap.
547 */
548 if (prev_request != IE_REQ_NONE) {
549 previous = ipc_port_request_cancel(port, name, prev_request);
550 }
551
552 #if IMPORTANCE_INHERITANCE
553 kr = ipc_port_request_alloc(port, name, notify,
554 send_possible, immediate,
555 &new_request, &needboost);
556 #else
557 kr = ipc_port_request_alloc(port, name, notify,
558 send_possible, immediate,
559 &new_request);
560 #endif /* IMPORTANCE_INHERITANCE */
561 if (kr != KERN_SUCCESS) {
562 assert(previous == IP_NULL);
563 is_write_unlock(space);
564
565 kr = ipc_port_request_grow(port, ITS_SIZE_NONE);
566 /* port is unlocked */
567
568 if (kr != KERN_SUCCESS) {
569 return kr;
570 }
571
572 continue;
573 }
574
575
576 assert(new_request != IE_REQ_NONE);
577 entry->ie_request = new_request;
578 ipc_entry_modified(space, name, entry);
579 is_write_unlock(space);
580
581 #if IMPORTANCE_INHERITANCE
582 if (needboost == TRUE) {
583 if (ipc_port_importance_delta(port, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) {
584 ip_mq_unlock(port);
585 }
586 } else
587 #endif /* IMPORTANCE_INHERITANCE */
588 ip_mq_unlock(port);
589
590 break;
591 }
592 /* entry may have changed to dead-name by ipc_right_check() */
593 }
594
595 /* treat send_possible requests as immediate w.r.t. dead-name */
596 if ((send_possible || immediate) && notify != IP_NULL &&
597 (entry->ie_bits & MACH_PORT_TYPE_DEAD_NAME)) {
598 mach_port_urefs_t urefs = IE_BITS_UREFS(entry->ie_bits);
599
600 assert(urefs > 0);
601
602 /* leave urefs pegged to maximum if it overflowed */
603 if (urefs < MACH_PORT_UREFS_MAX) {
604 (entry->ie_bits)++; /* increment urefs */
605 }
606 ipc_entry_modified(space, name, entry);
607
608 is_write_unlock(space);
609
610 if (port != IP_NULL) {
611 ip_release(port);
612 }
613
614 ipc_notify_dead_name(notify, name);
615 previous = IP_NULL;
616 break;
617 }
618
619 kr = (entry->ie_bits & MACH_PORT_TYPE_PORT_OR_DEAD) ?
620 KERN_INVALID_ARGUMENT : KERN_INVALID_RIGHT;
621
622 is_write_unlock(space);
623
624 if (port != IP_NULL) {
625 ip_release(port);
626 }
627
628 return kr;
629 }
630
631 *previousp = previous;
632 return KERN_SUCCESS;
633 }
634
635 /*
636 * Routine: ipc_right_request_cancel
637 * Purpose:
638 * Cancel a notification request and return the send-once right.
639 * Afterwards, entry->ie_request == 0.
640 * Conditions:
641 * The space must be write-locked; the port must be locked.
642 * The port and space must be active.
643 */
644
645 ipc_port_t
ipc_right_request_cancel(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry)646 ipc_right_request_cancel(
647 ipc_space_t space,
648 ipc_port_t port,
649 mach_port_name_t name,
650 ipc_entry_t entry)
651 {
652 ipc_port_t previous;
653
654 require_ip_active(port);
655 assert(is_active(space));
656 assert(port == ip_object_to_port(entry->ie_object));
657
658 if (entry->ie_request == IE_REQ_NONE) {
659 return IP_NULL;
660 }
661
662 previous = ipc_port_request_cancel(port, name, entry->ie_request);
663 entry->ie_request = IE_REQ_NONE;
664 ipc_entry_modified(space, name, entry);
665 return previous;
666 }
667
668 /*
669 * Routine: ipc_right_inuse
670 * Purpose:
671 * Check if an entry is being used.
672 * Returns TRUE if it is.
673 * Conditions:
674 * The space is write-locked and active.
675 */
676
677 bool
ipc_right_inuse(ipc_entry_t entry)678 ipc_right_inuse(
679 ipc_entry_t entry)
680 {
681 return IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_NONE;
682 }
683
684 /*
685 * Routine: ipc_right_check
686 * Purpose:
687 * Check if the port has died. If it has,
688 * and IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE is not
689 * passed and it is not a send once right then
690 * clean up the entry and return TRUE.
691 * Conditions:
692 * The space is write-locked; the port is not locked.
693 * If returns FALSE, the port is also locked.
694 * Otherwise, entry is converted to a dead name.
695 *
696 * Caller is responsible for a reference to port if it
697 * had died (returns TRUE).
698 */
699
700 boolean_t
ipc_right_check(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry,ipc_object_copyin_flags_t flags)701 ipc_right_check(
702 ipc_space_t space,
703 ipc_port_t port,
704 mach_port_name_t name,
705 ipc_entry_t entry,
706 ipc_object_copyin_flags_t flags)
707 {
708 ipc_entry_bits_t bits;
709
710 assert(is_active(space));
711 assert(port == ip_object_to_port(entry->ie_object));
712
713 ip_mq_lock(port);
714 if (ip_active(port) ||
715 ((flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE) &&
716 entry->ie_request == IE_REQ_NONE &&
717 (entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE))) {
718 return FALSE;
719 }
720
721 /* this was either a pure send right or a send-once right */
722
723 bits = entry->ie_bits;
724 assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
725 assert(IE_BITS_UREFS(bits) > 0);
726
727 if (bits & MACH_PORT_TYPE_SEND) {
728 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
729 assert(IE_BITS_UREFS(bits) > 0);
730 assert(port->ip_srights > 0);
731 port->ip_srights--;
732 } else {
733 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
734 assert(IE_BITS_UREFS(bits) == 1);
735 assert(port->ip_sorights > 0);
736 port->ip_sorights--;
737 }
738
739 /*
740 * delete SEND rights from ipc hash.
741 */
742
743 if ((bits & MACH_PORT_TYPE_SEND) != 0) {
744 ipc_hash_delete(space, ip_to_object(port), name, entry);
745 }
746
747 /* convert entry to dead name */
748 bits = (bits & ~IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME;
749
750 /*
751 * If there was a notification request outstanding on this
752 * name, and the port went dead, that notification
753 * must already be on its way up from the port layer.
754 *
755 * Add the reference that the notification carries. It
756 * is done here, and not in the notification delivery,
757 * because the latter doesn't have a space reference and
758 * trying to actually move a send-right reference would
759 * get short-circuited into a MACH_PORT_DEAD by IPC. Since
760 * all calls that deal with the right eventually come
761 * through here, it has the same result.
762 *
763 * Once done, clear the request index so we only account
764 * for it once.
765 */
766 if (entry->ie_request != IE_REQ_NONE) {
767 if (ipc_port_request_type(port, name, entry->ie_request) != 0) {
768 /* if urefs are pegged due to overflow, leave them pegged */
769 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
770 bits++; /* increment urefs */
771 }
772 }
773 entry->ie_request = IE_REQ_NONE;
774 }
775 entry->ie_bits = bits;
776 entry->ie_object = IO_NULL;
777
778 ip_mq_unlock(port);
779
780 ipc_entry_modified(space, name, entry);
781
782 return TRUE;
783 }
784
785 /*
786 * Routine: ipc_right_terminate
787 * Purpose:
788 * Cleans up an entry in a terminated space.
789 * The entry isn't deallocated or removed
790 * from reverse hash tables.
791 * Conditions:
792 * The space is dead and unlocked.
793 */
794
795 void
ipc_right_terminate(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)796 ipc_right_terminate(
797 ipc_space_t space,
798 mach_port_name_t name,
799 ipc_entry_t entry)
800 {
801 mach_port_type_t type;
802 ipc_object_t object;
803
804 assert(!is_active(space));
805
806 type = IE_BITS_TYPE(entry->ie_bits);
807 object = entry->ie_object;
808
809 /*
810 * Hollow the entry under the port lock,
811 * in order to avoid dangling pointers.
812 *
813 * ipc_right_lookup_read() doesn't need it for correctness,
814 * but ipc_space_terminate() as it now goes through 2 rounds
815 * of termination (receive rights first, the rest second).
816 */
817
818 if (type != MACH_PORT_TYPE_DEAD_NAME) {
819 assert(object != IO_NULL);
820 io_lock(object);
821 }
822 entry->ie_object = IO_NULL;
823 entry->ie_bits &= (IE_BITS_GEN_MASK | IE_BITS_ROLL_MASK);
824
825 switch (type) {
826 case MACH_PORT_TYPE_DEAD_NAME:
827 assert(entry->ie_request == IE_REQ_NONE);
828 assert(object == IO_NULL);
829 break;
830
831 case MACH_PORT_TYPE_PORT_SET: {
832 ipc_pset_t pset = ips_object_to_pset(object);
833
834 assert(entry->ie_request == IE_REQ_NONE);
835 assert(ips_active(pset));
836
837 ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
838 break;
839 }
840
841 case MACH_PORT_TYPE_SEND:
842 case MACH_PORT_TYPE_RECEIVE:
843 case MACH_PORT_TYPE_SEND_RECEIVE:
844 case MACH_PORT_TYPE_SEND_ONCE: {
845 ipc_port_t port = ip_object_to_port(object);
846 ipc_port_t request = IP_NULL;
847 ipc_notify_nsenders_t nsrequest = { };
848
849 if (!ip_active(port)) {
850 ip_mq_unlock(port);
851 ip_release(port);
852 break;
853 }
854
855 /*
856 * same as ipc_right_request_cancel(),
857 * except for calling ipc_entry_modified()
858 * as the space is now table-less.
859 */
860 if (entry->ie_request != IE_REQ_NONE) {
861 request = ipc_port_request_cancel(port, name,
862 entry->ie_request);
863 entry->ie_request = IE_REQ_NONE;
864 }
865
866 if (type & MACH_PORT_TYPE_SEND) {
867 assert(port->ip_srights > 0);
868 if (--port->ip_srights == 0) {
869 nsrequest = ipc_notify_no_senders_prepare(port);
870 }
871 }
872
873 if (type & MACH_PORT_TYPE_RECEIVE) {
874 assert(ip_get_receiver_name(port) == name);
875 assert(ip_in_space(port, space));
876
877 ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
878 } else if (type & MACH_PORT_TYPE_SEND_ONCE) {
879 assert(port->ip_sorights > 0);
880 port->ip_reply_context = 0;
881
882 ipc_notify_send_once_and_unlock(port); /* consumes our ref */
883 } else {
884 /* port could be dead, in-transit, or in a foreign space */
885 assert(!ip_in_space(port, space));
886
887 ip_mq_unlock(port);
888 ip_release(port);
889 }
890
891 /*
892 * For both no-senders and port-deleted notifications,
893 * look at whether the destination is still active.
894 * If it isn't, just swallow the send-once right.
895 *
896 * This is a racy check, but this ok because we can only
897 * fail to notice that the port is now inactive, which
898 * only causes us to fail at an optimizaiton.
899 *
900 * The purpose here is to avoid sending messages
901 * to receive rights that used to be in this space,
902 * which we can't fail to observe.
903 */
904 if (nsrequest.ns_notify != IP_NULL) {
905 if (ip_active(nsrequest.ns_notify)) {
906 ipc_notify_no_senders_emit(nsrequest);
907 } else {
908 ipc_notify_no_senders_consume(nsrequest);
909 }
910 }
911
912 if (request != IP_NULL) {
913 if (ip_active(request)) {
914 ipc_notify_port_deleted(request, name);
915 } else {
916 ipc_port_release_sonce(request);
917 }
918 }
919 break;
920 }
921
922 default:
923 panic("ipc_right_terminate: strange type - 0x%x", type);
924 }
925 }
926
927 /*
928 * Routine: ipc_right_destroy
929 * Purpose:
930 * Destroys an entry in a space.
931 * Conditions:
932 * The space is write-locked (returns unlocked).
933 * The space must be active.
934 * Returns:
935 * KERN_SUCCESS The entry was destroyed.
936 * KERN_INVALID_CAPABILITY The port is pinned.
937 * KERN_INVALID_RIGHT Port guard violation.
938 */
939
940 kern_return_t
ipc_right_destroy(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,boolean_t check_guard,uint64_t guard)941 ipc_right_destroy(
942 ipc_space_t space,
943 mach_port_name_t name,
944 ipc_entry_t entry,
945 boolean_t check_guard,
946 uint64_t guard)
947 {
948 ipc_entry_bits_t bits;
949 mach_port_type_t type;
950
951 bits = entry->ie_bits;
952 entry->ie_bits &= ~IE_BITS_TYPE_MASK;
953 type = IE_BITS_TYPE(bits);
954
955 assert(is_active(space));
956
957 switch (type) {
958 case MACH_PORT_TYPE_DEAD_NAME:
959 assert(entry->ie_request == IE_REQ_NONE);
960 assert(entry->ie_object == IO_NULL);
961
962 ipc_entry_dealloc(space, IO_NULL, name, entry);
963 is_write_unlock(space);
964 break;
965
966 case MACH_PORT_TYPE_PORT_SET: {
967 ipc_pset_t pset = ips_object_to_pset(entry->ie_object);
968
969 assert(entry->ie_request == IE_REQ_NONE);
970 assert(pset != IPS_NULL);
971
972 ips_mq_lock(pset);
973 assert(ips_active(pset));
974
975 ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
976
977 is_write_unlock(space);
978
979 ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
980 break;
981 }
982
983 case MACH_PORT_TYPE_SEND:
984 case MACH_PORT_TYPE_RECEIVE:
985 case MACH_PORT_TYPE_SEND_RECEIVE:
986 case MACH_PORT_TYPE_SEND_ONCE: {
987 ipc_port_t port = ip_object_to_port(entry->ie_object);
988 ipc_notify_nsenders_t nsrequest = { };
989 ipc_port_t request;
990
991 assert(port != IP_NULL);
992
993 if (type == MACH_PORT_TYPE_SEND) {
994 if (ip_is_pinned(port)) {
995 assert(ip_active(port));
996 is_write_unlock(space);
997 mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DESTROY);
998 return KERN_INVALID_CAPABILITY;
999 }
1000 ipc_hash_delete(space, ip_to_object(port), name, entry);
1001 }
1002
1003 ip_mq_lock(port);
1004
1005 if (!ip_active(port)) {
1006 assert((type & MACH_PORT_TYPE_RECEIVE) == 0);
1007 entry->ie_request = IE_REQ_NONE;
1008 assert(!ip_is_pinned(port));
1009 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1010 ip_mq_unlock(port);
1011 is_write_unlock(space);
1012 ip_release(port);
1013 break;
1014 }
1015
1016 /* For receive rights, check for guarding */
1017 if ((type & MACH_PORT_TYPE_RECEIVE) &&
1018 (check_guard) && (port->ip_guarded) &&
1019 (guard != port->ip_context)) {
1020 /* Guard Violation */
1021 uint64_t portguard = port->ip_context;
1022 ip_mq_unlock(port);
1023 is_write_unlock(space);
1024 /* Raise mach port guard exception */
1025 mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
1026 return KERN_INVALID_RIGHT;
1027 }
1028
1029
1030 request = ipc_right_request_cancel_macro(space, port,
1031 name, entry);
1032 assert(!ip_is_pinned(port));
1033 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1034
1035 is_write_unlock(space);
1036
1037 if (type & MACH_PORT_TYPE_SEND) {
1038 assert(port->ip_srights > 0);
1039 if (--port->ip_srights == 0) {
1040 nsrequest = ipc_notify_no_senders_prepare(port);
1041 }
1042 }
1043
1044 if (type & MACH_PORT_TYPE_RECEIVE) {
1045 require_ip_active(port);
1046 assert(ip_in_space(port, space));
1047
1048 ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
1049 } else if (type & MACH_PORT_TYPE_SEND_ONCE) {
1050 assert(port->ip_sorights > 0);
1051 port->ip_reply_context = 0;
1052 ipc_notify_send_once_and_unlock(port); /* consumes our ref */
1053 } else {
1054 assert(!ip_in_space(port, space));
1055
1056 ip_mq_unlock(port);
1057 ip_release(port);
1058 }
1059
1060 ipc_notify_no_senders_emit(nsrequest);
1061
1062 if (request != IP_NULL) {
1063 ipc_notify_port_deleted(request, name);
1064 }
1065
1066
1067 break;
1068 }
1069
1070 default:
1071 panic("ipc_right_destroy: strange type");
1072 }
1073
1074 return KERN_SUCCESS;
1075 }
1076
1077 /*
1078 * Routine: ipc_right_dealloc
1079 * Purpose:
1080 * Releases a send/send-once/dead-name/port_set user ref.
1081 * Like ipc_right_delta with a delta of -1,
1082 * but looks at the entry to determine the right.
1083 * Conditions:
1084 * The space is write-locked, and is unlocked upon return.
1085 * The space must be active.
1086 * Returns:
1087 * KERN_SUCCESS A user ref was released.
1088 * KERN_INVALID_RIGHT Entry has wrong type.
1089 * KERN_INVALID_CAPABILITY Deallocating a pinned right.
1090 */
1091
1092 kern_return_t
ipc_right_dealloc(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)1093 ipc_right_dealloc(
1094 ipc_space_t space,
1095 mach_port_name_t name,
1096 ipc_entry_t entry)
1097 {
1098 ipc_port_t port = IP_NULL;
1099 ipc_entry_bits_t bits;
1100 mach_port_type_t type;
1101
1102 bits = entry->ie_bits;
1103 type = IE_BITS_TYPE(bits);
1104
1105
1106 assert(is_active(space));
1107
1108 switch (type) {
1109 case MACH_PORT_TYPE_PORT_SET: {
1110 ipc_pset_t pset;
1111
1112 assert(IE_BITS_UREFS(bits) == 0);
1113 assert(entry->ie_request == IE_REQ_NONE);
1114
1115 pset = ips_object_to_pset(entry->ie_object);
1116 ips_mq_lock(pset);
1117 assert(ips_active(pset));
1118
1119 ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1120
1121 is_write_unlock(space);
1122
1123 ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1124 break;
1125 }
1126
1127 case MACH_PORT_TYPE_DEAD_NAME: {
1128 dead_name:
1129
1130 assert(IE_BITS_UREFS(bits) > 0);
1131 assert(entry->ie_request == IE_REQ_NONE);
1132 assert(entry->ie_object == IO_NULL);
1133
1134 if (IE_BITS_UREFS(bits) == 1) {
1135 ipc_entry_dealloc(space, IO_NULL, name, entry);
1136 } else {
1137 /* if urefs are pegged due to overflow, leave them pegged */
1138 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1139 entry->ie_bits = bits - 1; /* decrement urefs */
1140 }
1141 ipc_entry_modified(space, name, entry);
1142 }
1143 is_write_unlock(space);
1144
1145 /* release any port that got converted to dead name below */
1146 if (port != IP_NULL) {
1147 ip_release(port);
1148 }
1149 break;
1150 }
1151
1152 case MACH_PORT_TYPE_SEND_ONCE: {
1153 ipc_port_t request;
1154
1155 assert(IE_BITS_UREFS(bits) == 1);
1156
1157 port = ip_object_to_port(entry->ie_object);
1158 assert(port != IP_NULL);
1159
1160 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1161 bits = entry->ie_bits;
1162 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1163 goto dead_name; /* it will release port */
1164 }
1165 /* port is locked and active */
1166
1167 assert(port->ip_sorights > 0);
1168
1169 /*
1170 * clear any reply context:
1171 * no one will be sending the response b/c we are destroying
1172 * the single, outstanding send once right.
1173 */
1174 port->ip_reply_context = 0;
1175
1176 request = ipc_right_request_cancel_macro(space, port,
1177 name, entry);
1178 assert(!ip_is_pinned(port));
1179 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1180
1181 is_write_unlock(space);
1182
1183 ipc_notify_send_once_and_unlock(port);
1184
1185 if (request != IP_NULL) {
1186 ipc_notify_port_deleted(request, name);
1187 }
1188 break;
1189 }
1190
1191 case MACH_PORT_TYPE_SEND: {
1192 ipc_port_t request = IP_NULL;
1193 ipc_notify_nsenders_t nsrequest = { };
1194
1195 assert(IE_BITS_UREFS(bits) > 0);
1196
1197 port = ip_object_to_port(entry->ie_object);
1198 assert(port != IP_NULL);
1199
1200 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1201 bits = entry->ie_bits;
1202 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1203 goto dead_name; /* it will release port */
1204 }
1205 /* port is locked and active */
1206
1207 assert(port->ip_srights > 0);
1208
1209 if (IE_BITS_UREFS(bits) == 1) {
1210 if (ip_is_pinned(port)) {
1211 ip_mq_unlock(port);
1212 is_write_unlock(space);
1213 mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1214 return KERN_INVALID_CAPABILITY;
1215 }
1216 if (--port->ip_srights == 0) {
1217 nsrequest = ipc_notify_no_senders_prepare(port);
1218 }
1219
1220 request = ipc_right_request_cancel_macro(space, port,
1221 name, entry);
1222 ipc_hash_delete(space, ip_to_object(port), name, entry);
1223 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1224 ip_mq_unlock(port);
1225 is_write_unlock(space);
1226
1227 ip_release(port);
1228 } else {
1229 /* if urefs are pegged due to overflow, leave them pegged */
1230 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1231 entry->ie_bits = bits - 1; /* decrement urefs */
1232 }
1233 ip_mq_unlock(port);
1234 ipc_entry_modified(space, name, entry);
1235 is_write_unlock(space);
1236 }
1237
1238 ipc_notify_no_senders_emit(nsrequest);
1239
1240 if (request != IP_NULL) {
1241 ipc_notify_port_deleted(request, name);
1242 }
1243 break;
1244 }
1245
1246 case MACH_PORT_TYPE_SEND_RECEIVE: {
1247 ipc_notify_nsenders_t nsrequest = { };
1248
1249 assert(IE_BITS_UREFS(bits) > 0);
1250
1251 port = ip_object_to_port(entry->ie_object);
1252 assert(port != IP_NULL);
1253
1254 ip_mq_lock(port);
1255 require_ip_active(port);
1256 assert(ip_get_receiver_name(port) == name);
1257 assert(ip_in_space(port, space));
1258 assert(port->ip_srights > 0);
1259
1260 if (IE_BITS_UREFS(bits) == 1) {
1261 if (--port->ip_srights == 0) {
1262 nsrequest = ipc_notify_no_senders_prepare(port);
1263 }
1264
1265 entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1266 MACH_PORT_TYPE_SEND);
1267 } else {
1268 /* if urefs are pegged due to overflow, leave them pegged */
1269 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1270 entry->ie_bits = bits - 1; /* decrement urefs */
1271 }
1272 }
1273 ip_mq_unlock(port);
1274
1275 ipc_entry_modified(space, name, entry);
1276 is_write_unlock(space);
1277
1278 ipc_notify_no_senders_emit(nsrequest);
1279 break;
1280 }
1281
1282 default:
1283 is_write_unlock(space);
1284 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1285 return KERN_INVALID_RIGHT;
1286 }
1287
1288 return KERN_SUCCESS;
1289 }
1290
1291 /*
1292 * Routine: ipc_right_delta
1293 * Purpose:
1294 * Modifies the user-reference count for a right.
1295 * May deallocate the right, if the count goes to zero.
1296 * Conditions:
1297 * The space is write-locked, and is unlocked upon return.
1298 * The space must be active.
1299 * Returns:
1300 * KERN_SUCCESS Count was modified.
1301 * KERN_INVALID_RIGHT Entry has wrong type.
1302 * KERN_INVALID_VALUE Bad delta for the right.
1303 * KERN_INVALID_CAPABILITY Deallocating a pinned right.
1304 */
1305
1306 kern_return_t
ipc_right_delta(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_right_t right,mach_port_delta_t delta)1307 ipc_right_delta(
1308 ipc_space_t space,
1309 mach_port_name_t name,
1310 ipc_entry_t entry,
1311 mach_port_right_t right,
1312 mach_port_delta_t delta)
1313 {
1314 ipc_port_t port = IP_NULL;
1315 ipc_entry_bits_t bits;
1316
1317 bits = entry->ie_bits;
1318
1319 /*
1320 * The following is used (for case MACH_PORT_RIGHT_DEAD_NAME) in the
1321 * switch below. It is used to keep track of those cases (in DIPC)
1322 * where we have postponed the dropping of a port reference. Since
1323 * the dropping of the reference could cause the port to disappear
1324 * we postpone doing so when we are holding the space lock.
1325 */
1326
1327 assert(is_active(space));
1328 assert(right < MACH_PORT_RIGHT_NUMBER);
1329
1330 /* Rights-specific restrictions and operations. */
1331
1332 switch (right) {
1333 case MACH_PORT_RIGHT_PORT_SET: {
1334 ipc_pset_t pset;
1335
1336 if ((bits & MACH_PORT_TYPE_PORT_SET) == 0) {
1337 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1338 goto invalid_right;
1339 }
1340
1341 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_PORT_SET);
1342 assert(IE_BITS_UREFS(bits) == 0);
1343 assert(entry->ie_request == IE_REQ_NONE);
1344
1345 if (delta == 0) {
1346 goto success;
1347 }
1348
1349 if (delta != -1) {
1350 goto invalid_value;
1351 }
1352
1353 pset = ips_object_to_pset(entry->ie_object);
1354 ips_mq_lock(pset);
1355 assert(ips_active(pset));
1356
1357 ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1358
1359 is_write_unlock(space);
1360
1361 ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1362 break;
1363 }
1364
1365 case MACH_PORT_RIGHT_RECEIVE: {
1366 ipc_port_t request = IP_NULL;
1367
1368 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1369 if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1370 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1371 }
1372 goto invalid_right;
1373 }
1374
1375 if (delta == 0) {
1376 goto success;
1377 }
1378
1379 if (delta != -1) {
1380 goto invalid_value;
1381 }
1382
1383 port = ip_object_to_port(entry->ie_object);
1384 assert(port != IP_NULL);
1385
1386 /*
1387 * The port lock is needed for ipc_right_dncancel;
1388 * otherwise, we wouldn't have to take the lock
1389 * until just before dropping the space lock.
1390 */
1391
1392 ip_mq_lock(port);
1393 require_ip_active(port);
1394 assert(ip_get_receiver_name(port) == name);
1395 assert(ip_in_space(port, space));
1396
1397 /* Mach Port Guard Checking */
1398 if (port->ip_guarded) {
1399 uint64_t portguard = port->ip_context;
1400 ip_mq_unlock(port);
1401 is_write_unlock(space);
1402 /* Raise mach port guard exception */
1403 mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_MOD_REFS);
1404 goto guard_failure;
1405 }
1406
1407 if (bits & MACH_PORT_TYPE_SEND) {
1408 assert(IE_BITS_TYPE(bits) ==
1409 MACH_PORT_TYPE_SEND_RECEIVE);
1410 assert(IE_BITS_UREFS(bits) > 0);
1411 assert(port->ip_srights > 0);
1412
1413 if (ipc_port_has_prdrequest(port)) {
1414 /*
1415 * Since another task has requested a
1416 * destroy notification for this port, it
1417 * isn't actually being destroyed - the receive
1418 * right is just being moved to another task.
1419 * Since we still have one or more send rights,
1420 * we need to record the loss of the receive
1421 * right and enter the remaining send right
1422 * into the hash table.
1423 */
1424 bits &= ~MACH_PORT_TYPE_RECEIVE;
1425 bits |= MACH_PORT_TYPE_EX_RECEIVE;
1426 ipc_hash_insert(space, ip_to_object(port),
1427 name, entry);
1428 ip_reference(port);
1429 } else {
1430 /*
1431 * The remaining send right turns into a
1432 * dead name. Notice we don't decrement
1433 * ip_srights, generate a no-senders notif,
1434 * or use ipc_right_dncancel, because the
1435 * port is destroyed "first".
1436 */
1437 bits &= ~IE_BITS_TYPE_MASK;
1438 bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1439 if (entry->ie_request) {
1440 entry->ie_request = IE_REQ_NONE;
1441 /* if urefs are pegged due to overflow, leave them pegged */
1442 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1443 bits++; /* increment urefs */
1444 }
1445 }
1446 entry->ie_object = IO_NULL;
1447 }
1448 entry->ie_bits = bits;
1449 ipc_entry_modified(space, name, entry);
1450 } else {
1451 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1452 assert(IE_BITS_UREFS(bits) == 0);
1453
1454 request = ipc_right_request_cancel_macro(space, port,
1455 name, entry);
1456 assert(!ip_is_pinned(port));
1457 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1458 }
1459 is_write_unlock(space);
1460
1461 ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1462
1463 if (request != IP_NULL) {
1464 ipc_notify_port_deleted(request, name);
1465 }
1466 break;
1467 }
1468
1469 case MACH_PORT_RIGHT_SEND_ONCE: {
1470 ipc_port_t request;
1471
1472 if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
1473 goto invalid_right;
1474 }
1475
1476 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
1477 assert(IE_BITS_UREFS(bits) == 1);
1478
1479 port = ip_object_to_port(entry->ie_object);
1480 assert(port != IP_NULL);
1481
1482 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1483 assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE));
1484 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1485 goto invalid_right;
1486 }
1487 /* port is locked and active */
1488
1489 assert(port->ip_sorights > 0);
1490
1491 if ((delta > 0) || (delta < -1)) {
1492 ip_mq_unlock(port);
1493 goto invalid_value;
1494 }
1495
1496 if (delta == 0) {
1497 ip_mq_unlock(port);
1498 goto success;
1499 }
1500
1501 /*
1502 * clear any reply context:
1503 * no one will be sending the response b/c we are destroying
1504 * the single, outstanding send once right.
1505 */
1506 port->ip_reply_context = 0;
1507
1508 request = ipc_right_request_cancel_macro(space, port, name, entry);
1509 assert(!ip_is_pinned(port));
1510 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1511
1512 is_write_unlock(space);
1513
1514 ipc_notify_send_once_and_unlock(port);
1515
1516 if (request != IP_NULL) {
1517 ipc_notify_port_deleted(request, name);
1518 }
1519 break;
1520 }
1521
1522 case MACH_PORT_RIGHT_DEAD_NAME: {
1523 ipc_port_t relport = IP_NULL;
1524 mach_port_urefs_t urefs;
1525
1526 if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1527 port = ip_object_to_port(entry->ie_object);
1528 assert(port != IP_NULL);
1529
1530 if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1531 /* port is locked and active */
1532 ip_mq_unlock(port);
1533 port = IP_NULL;
1534 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1535 goto invalid_right;
1536 }
1537 bits = entry->ie_bits;
1538 relport = port;
1539 port = IP_NULL;
1540 } else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0) {
1541 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1542 goto invalid_right;
1543 }
1544
1545 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1546 assert(IE_BITS_UREFS(bits) > 0);
1547 assert(entry->ie_object == IO_NULL);
1548 assert(entry->ie_request == IE_REQ_NONE);
1549
1550 if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1551 delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1552 goto invalid_value;
1553 }
1554
1555 urefs = IE_BITS_UREFS(bits);
1556
1557 if (urefs == MACH_PORT_UREFS_MAX) {
1558 /*
1559 * urefs are pegged due to an overflow
1560 * only a delta removing all refs at once can change it
1561 */
1562
1563 if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1564 delta = 0;
1565 }
1566 } else {
1567 if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1568 goto invalid_value;
1569 }
1570 if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1571 /* leave urefs pegged to maximum if it overflowed */
1572 delta = MACH_PORT_UREFS_MAX - urefs;
1573 }
1574 }
1575
1576 if ((urefs + delta) == 0) {
1577 ipc_entry_dealloc(space, IO_NULL, name, entry);
1578 } else if (delta != 0) {
1579 entry->ie_bits = bits + delta;
1580 ipc_entry_modified(space, name, entry);
1581 }
1582
1583 is_write_unlock(space);
1584
1585 if (relport != IP_NULL) {
1586 ip_release(relport);
1587 }
1588
1589 break;
1590 }
1591
1592 case MACH_PORT_RIGHT_SEND: {
1593 mach_port_urefs_t urefs;
1594 ipc_port_t request = IP_NULL;
1595 ipc_notify_nsenders_t nsrequest = { };
1596 ipc_port_t port_to_release = IP_NULL;
1597
1598 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
1599 /* invalid right exception only when not live/dead confusion */
1600 if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0
1601 #if !defined(AE_MAKESENDRIGHT_FIXED)
1602 /*
1603 * AE tries to add single send right without knowing if it already owns one.
1604 * But if it doesn't, it should own the receive right and delta should be 1.
1605 */
1606 && (((bits & MACH_PORT_TYPE_RECEIVE) == 0) || (delta != 1))
1607 #endif
1608 ) {
1609 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1610 }
1611 goto invalid_right;
1612 }
1613
1614 /* maximum urefs for send is MACH_PORT_UREFS_MAX */
1615
1616 port = ip_object_to_port(entry->ie_object);
1617 assert(port != IP_NULL);
1618
1619 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1620 assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0);
1621 goto invalid_right;
1622 }
1623 /* port is locked and active */
1624
1625 assert(port->ip_srights > 0);
1626
1627 if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1628 delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1629 ip_mq_unlock(port);
1630 goto invalid_value;
1631 }
1632
1633 urefs = IE_BITS_UREFS(bits);
1634
1635 if (urefs == MACH_PORT_UREFS_MAX) {
1636 /*
1637 * urefs are pegged due to an overflow
1638 * only a delta removing all refs at once can change it
1639 */
1640
1641 if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1642 delta = 0;
1643 }
1644 } else {
1645 if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1646 ip_mq_unlock(port);
1647 goto invalid_value;
1648 }
1649 if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1650 /* leave urefs pegged to maximum if it overflowed */
1651 delta = MACH_PORT_UREFS_MAX - urefs;
1652 }
1653 }
1654
1655 if ((urefs + delta) == 0) {
1656 if (ip_is_pinned(port)) {
1657 ip_mq_unlock(port);
1658 is_write_unlock(space);
1659 mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1660 return KERN_INVALID_CAPABILITY;
1661 }
1662
1663 if (--port->ip_srights == 0) {
1664 nsrequest = ipc_notify_no_senders_prepare(port);
1665 }
1666
1667 if (bits & MACH_PORT_TYPE_RECEIVE) {
1668 assert(ip_get_receiver_name(port) == name);
1669 assert(ip_in_space(port, space));
1670 assert(IE_BITS_TYPE(bits) ==
1671 MACH_PORT_TYPE_SEND_RECEIVE);
1672
1673 entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1674 MACH_PORT_TYPE_SEND);
1675 ipc_entry_modified(space, name, entry);
1676 } else {
1677 assert(IE_BITS_TYPE(bits) ==
1678 MACH_PORT_TYPE_SEND);
1679
1680 request = ipc_right_request_cancel_macro(space, port,
1681 name, entry);
1682 ipc_hash_delete(space, ip_to_object(port),
1683 name, entry);
1684 assert(!ip_is_pinned(port));
1685 ipc_entry_dealloc(space, ip_to_object(port),
1686 name, entry);
1687 port_to_release = port;
1688 }
1689 } else if (delta != 0) {
1690 entry->ie_bits = bits + delta;
1691 ipc_entry_modified(space, name, entry);
1692 }
1693
1694 ip_mq_unlock(port);
1695
1696 is_write_unlock(space);
1697
1698 if (port_to_release != IP_NULL) {
1699 ip_release(port_to_release);
1700 }
1701
1702 ipc_notify_no_senders_emit(nsrequest);
1703
1704 if (request != IP_NULL) {
1705 ipc_notify_port_deleted(request, name);
1706 }
1707 break;
1708 }
1709
1710 case MACH_PORT_RIGHT_LABELH:
1711 goto invalid_right;
1712
1713 default:
1714 panic("ipc_right_delta: strange right %d for 0x%x (%p) in space:%p",
1715 right, name, (void *)entry, (void *)space);
1716 }
1717
1718 return KERN_SUCCESS;
1719
1720 success:
1721 is_write_unlock(space);
1722 return KERN_SUCCESS;
1723
1724 invalid_right:
1725 is_write_unlock(space);
1726 if (port != IP_NULL) {
1727 ip_release(port);
1728 }
1729 return KERN_INVALID_RIGHT;
1730
1731 invalid_value:
1732 is_write_unlock(space);
1733 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1734 return KERN_INVALID_VALUE;
1735
1736 guard_failure:
1737 return KERN_INVALID_RIGHT;
1738 }
1739
1740 /*
1741 * Routine: ipc_right_destruct
1742 * Purpose:
1743 * Deallocates the receive right and modifies the
1744 * user-reference count for the send rights as requested.
1745 * Conditions:
1746 * The space is write-locked, and is unlocked upon return.
1747 * The space must be active.
1748 * Returns:
1749 * KERN_SUCCESS Count was modified.
1750 * KERN_INVALID_RIGHT Entry has wrong type.
1751 * KERN_INVALID_VALUE Bad delta for the right.
1752 */
1753
1754 kern_return_t
ipc_right_destruct(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_delta_t srdelta,uint64_t guard)1755 ipc_right_destruct(
1756 ipc_space_t space,
1757 mach_port_name_t name,
1758 ipc_entry_t entry,
1759 mach_port_delta_t srdelta,
1760 uint64_t guard)
1761 {
1762 ipc_port_t port = IP_NULL;
1763 ipc_entry_bits_t bits;
1764
1765 mach_port_urefs_t urefs;
1766 ipc_port_t request = IP_NULL;
1767 ipc_notify_nsenders_t nsrequest = { };
1768
1769 bits = entry->ie_bits;
1770
1771 assert(is_active(space));
1772
1773 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1774 is_write_unlock(space);
1775
1776 /* No exception if we used to have receive and held entry since */
1777 if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1778 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1779 }
1780 return KERN_INVALID_RIGHT;
1781 }
1782
1783 if (srdelta && (bits & MACH_PORT_TYPE_SEND) == 0) {
1784 is_write_unlock(space);
1785 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1786 return KERN_INVALID_RIGHT;
1787 }
1788
1789 if (srdelta > 0) {
1790 goto invalid_value;
1791 }
1792
1793 port = ip_object_to_port(entry->ie_object);
1794 assert(port != IP_NULL);
1795
1796 ip_mq_lock(port);
1797 require_ip_active(port);
1798 assert(ip_get_receiver_name(port) == name);
1799 assert(ip_in_space(port, space));
1800
1801 /* Mach Port Guard Checking */
1802 if (port->ip_guarded && (guard != port->ip_context)) {
1803 uint64_t portguard = port->ip_context;
1804 ip_mq_unlock(port);
1805 is_write_unlock(space);
1806 mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
1807 return KERN_INVALID_ARGUMENT;
1808 }
1809
1810 /*
1811 * First reduce the send rights as requested and
1812 * adjust the entry->ie_bits accordingly. The
1813 * ipc_entry_modified() call is made once the receive
1814 * right is destroyed too.
1815 */
1816
1817 if (srdelta) {
1818 assert(port->ip_srights > 0);
1819
1820 urefs = IE_BITS_UREFS(bits);
1821
1822 /*
1823 * Since we made sure that srdelta is negative,
1824 * the check for urefs overflow is not required.
1825 */
1826 if (MACH_PORT_UREFS_UNDERFLOW(urefs, srdelta)) {
1827 ip_mq_unlock(port);
1828 goto invalid_value;
1829 }
1830
1831 if (urefs == MACH_PORT_UREFS_MAX) {
1832 /*
1833 * urefs are pegged due to an overflow
1834 * only a delta removing all refs at once can change it
1835 */
1836 if (srdelta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1837 srdelta = 0;
1838 }
1839 }
1840
1841 if ((urefs + srdelta) == 0) {
1842 if (--port->ip_srights == 0) {
1843 nsrequest = ipc_notify_no_senders_prepare(port);
1844 }
1845 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_RECEIVE);
1846 entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1847 MACH_PORT_TYPE_SEND);
1848 } else {
1849 entry->ie_bits = bits + srdelta;
1850 }
1851 }
1852
1853 /*
1854 * Now destroy the receive right. Update space and
1855 * entry accordingly.
1856 */
1857
1858 bits = entry->ie_bits;
1859 if (bits & MACH_PORT_TYPE_SEND) {
1860 assert(IE_BITS_UREFS(bits) > 0);
1861 assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX);
1862
1863 if (ipc_port_has_prdrequest(port)) {
1864 /*
1865 * Since another task has requested a
1866 * destroy notification for this port, it
1867 * isn't actually being destroyed - the receive
1868 * right is just being moved to another task.
1869 * Since we still have one or more send rights,
1870 * we need to record the loss of the receive
1871 * right and enter the remaining send right
1872 * into the hash table.
1873 */
1874 bits &= ~MACH_PORT_TYPE_RECEIVE;
1875 bits |= MACH_PORT_TYPE_EX_RECEIVE;
1876 ipc_hash_insert(space, ip_to_object(port),
1877 name, entry);
1878 ip_reference(port);
1879 } else {
1880 /*
1881 * The remaining send right turns into a
1882 * dead name. Notice we don't decrement
1883 * ip_srights, generate a no-senders notif,
1884 * or use ipc_right_dncancel, because the
1885 * port is destroyed "first".
1886 */
1887 bits &= ~IE_BITS_TYPE_MASK;
1888 bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1889 if (entry->ie_request) {
1890 entry->ie_request = IE_REQ_NONE;
1891 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1892 bits++; /* increment urefs */
1893 }
1894 }
1895 entry->ie_object = IO_NULL;
1896 }
1897 entry->ie_bits = bits;
1898 ipc_entry_modified(space, name, entry);
1899 } else {
1900 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1901 assert(IE_BITS_UREFS(bits) == 0);
1902 request = ipc_right_request_cancel_macro(space, port,
1903 name, entry);
1904 assert(!ip_is_pinned(port));
1905 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1906 }
1907
1908 /* Unlock space */
1909 is_write_unlock(space);
1910
1911 ipc_notify_no_senders_emit(nsrequest);
1912
1913 ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1914
1915 if (request != IP_NULL) {
1916 ipc_notify_port_deleted(request, name);
1917 }
1918
1919 return KERN_SUCCESS;
1920
1921 invalid_value:
1922 is_write_unlock(space);
1923 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1924 return KERN_INVALID_VALUE;
1925 }
1926
1927
1928 /*
1929 * Routine: ipc_right_info
1930 * Purpose:
1931 * Retrieves information about the right.
1932 * Conditions:
1933 * The space is active and write-locked.
1934 * The space is unlocked upon return.
1935 * Returns:
1936 * KERN_SUCCESS Retrieved info
1937 */
1938
1939 kern_return_t
ipc_right_info(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_type_t * typep,mach_port_urefs_t * urefsp)1940 ipc_right_info(
1941 ipc_space_t space,
1942 mach_port_name_t name,
1943 ipc_entry_t entry,
1944 mach_port_type_t *typep,
1945 mach_port_urefs_t *urefsp)
1946 {
1947 ipc_port_t port;
1948 ipc_entry_bits_t bits;
1949 mach_port_type_t type = 0;
1950 ipc_port_request_index_t request;
1951
1952 bits = entry->ie_bits;
1953 request = entry->ie_request;
1954 port = ip_object_to_port(entry->ie_object);
1955
1956 if (bits & MACH_PORT_TYPE_RECEIVE) {
1957 assert(IP_VALID(port));
1958
1959 if (request != IE_REQ_NONE) {
1960 ip_mq_lock(port);
1961 require_ip_active(port);
1962 type |= ipc_port_request_type(port, name, request);
1963 ip_mq_unlock(port);
1964 }
1965 is_write_unlock(space);
1966 } else if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1967 /*
1968 * validate port is still alive - if so, get request
1969 * types while we still have it locked. Otherwise,
1970 * recapture the (now dead) bits.
1971 */
1972 if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1973 if (request != IE_REQ_NONE) {
1974 type |= ipc_port_request_type(port, name, request);
1975 }
1976 ip_mq_unlock(port);
1977 is_write_unlock(space);
1978 } else {
1979 bits = entry->ie_bits;
1980 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1981 is_write_unlock(space);
1982 ip_release(port);
1983 }
1984 } else {
1985 is_write_unlock(space);
1986 }
1987
1988 type |= IE_BITS_TYPE(bits);
1989
1990 *typep = type;
1991 *urefsp = IE_BITS_UREFS(bits);
1992 return KERN_SUCCESS;
1993 }
1994
1995 /*
1996 * Routine: ipc_right_copyin_check_reply
1997 * Purpose:
1998 * Check if a subsequent ipc_right_copyin would succeed. Used only
1999 * by ipc_kmsg_copyin_header to check if reply_port can be copied in.
2000 * If the reply port is an immovable send right, it errors out.
2001 * Conditions:
2002 * The space is locked (read or write) and active.
2003 */
2004
2005 boolean_t
ipc_right_copyin_check_reply(__assert_only ipc_space_t space,mach_port_name_t reply_name,ipc_entry_t reply_entry,mach_msg_type_name_t reply_type)2006 ipc_right_copyin_check_reply(
2007 __assert_only ipc_space_t space,
2008 mach_port_name_t reply_name,
2009 ipc_entry_t reply_entry,
2010 mach_msg_type_name_t reply_type)
2011 {
2012 ipc_entry_bits_t bits;
2013 ipc_port_t reply_port;
2014
2015 bits = reply_entry->ie_bits;
2016 assert(is_active(space));
2017
2018 switch (reply_type) {
2019 case MACH_MSG_TYPE_MAKE_SEND:
2020 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2021 return FALSE;
2022 }
2023 break;
2024
2025 case MACH_MSG_TYPE_MAKE_SEND_ONCE:
2026 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2027 return FALSE;
2028 }
2029 break;
2030
2031 case MACH_MSG_TYPE_MOVE_RECEIVE:
2032 /* ipc_kmsg_copyin_header already filters it out */
2033 return FALSE;
2034
2035 case MACH_MSG_TYPE_COPY_SEND:
2036 case MACH_MSG_TYPE_MOVE_SEND:
2037 case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2038 if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2039 break;
2040 }
2041
2042 if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2043 return FALSE;
2044 }
2045
2046 reply_port = ip_object_to_port(reply_entry->ie_object);
2047 assert(reply_port != IP_NULL);
2048
2049 /*
2050 * active status peek to avoid checks that will be skipped
2051 * on copyin for dead ports. Lock not held, so will not be
2052 * atomic (but once dead, there's no going back).
2053 */
2054 if (!ip_active(reply_port)) {
2055 break;
2056 }
2057
2058 /*
2059 * Can't copyin a send right that is marked immovable. This bit
2060 * is set only during port creation and never unset. So it can
2061 * be read without a lock.
2062 */
2063 if (ip_is_immovable_send(reply_port)) {
2064 mach_port_guard_exception_immovable(space, reply_name, reply_port, MPG_FLAGS_NONE);
2065 return FALSE;
2066 }
2067
2068 if (reply_type == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2069 if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2070 return FALSE;
2071 }
2072 } else {
2073 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2074 return FALSE;
2075 }
2076 }
2077
2078 break;
2079 }
2080
2081 default:
2082 panic("ipc_right_copyin_check: strange rights");
2083 }
2084
2085 return TRUE;
2086 }
2087
2088 /*
2089 * Routine: ipc_right_copyin_check_guard_locked
2090 * Purpose:
2091 * Check if the port is guarded and the guard
2092 * value matches the one passed in the arguments.
2093 * If MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND is set,
2094 * check if the port is unguarded.
2095 * Conditions:
2096 * The port is locked.
2097 * Returns:
2098 * KERN_SUCCESS Port is either unguarded
2099 * or guarded with expected value
2100 * KERN_INVALID_ARGUMENT Port is either unguarded already or guard mismatch.
2101 * This also raises a EXC_GUARD exception.
2102 */
2103 static kern_return_t
ipc_right_copyin_check_guard_locked(mach_port_name_t name,ipc_port_t port,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2104 ipc_right_copyin_check_guard_locked(
2105 mach_port_name_t name,
2106 ipc_port_t port,
2107 mach_port_context_t context,
2108 mach_msg_guard_flags_t *guard_flags)
2109 {
2110 mach_msg_guard_flags_t flags = *guard_flags;
2111 if ((flags & MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND) && !port->ip_guarded && !context) {
2112 return KERN_SUCCESS;
2113 } else if (port->ip_guarded && (port->ip_context == context)) {
2114 return KERN_SUCCESS;
2115 }
2116
2117 /* Incorrect guard; Raise exception */
2118 mach_port_guard_exception(name, context, port->ip_context, kGUARD_EXC_INCORRECT_GUARD);
2119 return KERN_INVALID_ARGUMENT;
2120 }
2121
2122 /*
2123 * Routine: ipc_right_copyin
2124 * Purpose:
2125 * Copyin a capability from a space.
2126 * If successful, the caller gets a ref
2127 * for the resulting object, unless it is IO_DEAD,
2128 * and possibly a send-once right which should
2129 * be used in a port-deleted notification.
2130 *
2131 * If deadok is not TRUE, the copyin operation
2132 * will fail instead of producing IO_DEAD.
2133 *
2134 * The entry is deallocated if the entry type becomes
2135 * MACH_PORT_TYPE_NONE.
2136 * Conditions:
2137 * The space is write-locked and active.
2138 * Returns:
2139 * KERN_SUCCESS Acquired an object, possibly IO_DEAD.
2140 * KERN_INVALID_RIGHT Name doesn't denote correct right.
2141 * KERN_INVALID_CAPABILITY Trying to move an kobject port or an immovable right,
2142 * or moving the last ref of pinned right
2143 * KERN_INVALID_ARGUMENT Port is unguarded or guard mismatch
2144 */
2145
2146 kern_return_t
ipc_right_copyin(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyin_flags_t flags,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep,int * assertcntp,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2147 ipc_right_copyin(
2148 ipc_space_t space,
2149 mach_port_name_t name,
2150 ipc_entry_t entry,
2151 mach_msg_type_name_t msgt_name,
2152 ipc_object_copyin_flags_t flags,
2153 ipc_object_t *objectp,
2154 ipc_port_t *sorightp,
2155 ipc_port_t *releasep,
2156 int *assertcntp,
2157 mach_port_context_t context,
2158 mach_msg_guard_flags_t *guard_flags)
2159 {
2160 ipc_entry_bits_t bits;
2161 ipc_port_t port;
2162 kern_return_t kr;
2163 boolean_t deadok = !!(flags & IPC_OBJECT_COPYIN_FLAGS_DEADOK);
2164 boolean_t allow_imm_send = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
2165
2166 *releasep = IP_NULL;
2167 *assertcntp = 0;
2168
2169 bits = entry->ie_bits;
2170
2171 assert(is_active(space));
2172
2173 switch (msgt_name) {
2174 case MACH_MSG_TYPE_MAKE_SEND: {
2175 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2176 goto invalid_right;
2177 }
2178
2179 port = ip_object_to_port(entry->ie_object);
2180 assert(port != IP_NULL);
2181
2182 ip_mq_lock(port);
2183 assert(ip_get_receiver_name(port) == name);
2184 assert(ip_in_space(port, space));
2185
2186 ipc_port_make_send_locked(port);
2187 ip_mq_unlock(port);
2188
2189 *objectp = ip_to_object(port);
2190 *sorightp = IP_NULL;
2191 break;
2192 }
2193
2194 case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
2195 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2196 goto invalid_right;
2197 }
2198
2199 port = ip_object_to_port(entry->ie_object);
2200 assert(port != IP_NULL);
2201
2202 ip_mq_lock(port);
2203 require_ip_active(port);
2204 assert(ip_get_receiver_name(port) == name);
2205 assert(ip_in_space(port, space));
2206
2207 ipc_port_make_sonce_locked(port);
2208 ip_mq_unlock(port);
2209
2210 *objectp = ip_to_object(port);
2211 *sorightp = IP_NULL;
2212 break;
2213 }
2214
2215 case MACH_MSG_TYPE_MOVE_RECEIVE: {
2216 ipc_port_t request = IP_NULL;
2217 waitq_link_list_t free_l = { };
2218
2219 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2220 goto invalid_right;
2221 }
2222
2223 port = ip_object_to_port(entry->ie_object);
2224 assert(port != IP_NULL);
2225
2226 ip_mq_lock(port);
2227 require_ip_active(port);
2228 assert(ip_get_receiver_name(port) == name);
2229 assert(ip_in_space(port, space));
2230
2231 /*
2232 * Disallow moving receive-right kobjects/kolabel, e.g. mk_timer ports
2233 * The ipc_port structure uses the kdata union of kobject and
2234 * imp_task exclusively. Thus, general use of a kobject port as
2235 * a receive right can cause type confusion in the importance
2236 * code.
2237 */
2238 if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
2239 /*
2240 * Distinguish an invalid right, e.g., trying to move
2241 * a send right as a receive right, from this
2242 * situation which is, "This is a valid receive right,
2243 * but it's also a kobject and you can't move it."
2244 */
2245 ip_mq_unlock(port);
2246 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2247 return KERN_INVALID_CAPABILITY;
2248 }
2249
2250 if (port->ip_immovable_receive || port->ip_specialreply) {
2251 assert(!ip_in_space(port, ipc_space_kernel));
2252 ip_mq_unlock(port);
2253 assert(current_task() != kernel_task);
2254 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2255 return KERN_INVALID_CAPABILITY;
2256 }
2257
2258 if (guard_flags != NULL) {
2259 kr = ipc_right_copyin_check_guard_locked(name, port, context, guard_flags);
2260 if (kr != KERN_SUCCESS) {
2261 ip_mq_unlock(port);
2262 return kr;
2263 }
2264 }
2265
2266 if (bits & MACH_PORT_TYPE_SEND) {
2267 assert(IE_BITS_TYPE(bits) ==
2268 MACH_PORT_TYPE_SEND_RECEIVE);
2269 assert(IE_BITS_UREFS(bits) > 0);
2270 assert(port->ip_srights > 0);
2271
2272 bits &= ~MACH_PORT_TYPE_RECEIVE;
2273 bits |= MACH_PORT_TYPE_EX_RECEIVE;
2274 entry->ie_bits = bits;
2275 ipc_hash_insert(space, ip_to_object(port),
2276 name, entry);
2277 ip_reference(port);
2278 ipc_entry_modified(space, name, entry);
2279 } else {
2280 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2281 assert(IE_BITS_UREFS(bits) == 0);
2282
2283 request = ipc_right_request_cancel_macro(space, port,
2284 name, entry);
2285 assert(!ip_is_pinned(port));
2286 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2287 }
2288
2289 /* ipc_port_clear_receiver unguards the port and clears the ip_immovable_receive bit */
2290 (void)ipc_port_clear_receiver(port, FALSE, &free_l); /* don't destroy the port/mqueue */
2291 if (guard_flags != NULL) {
2292 /* this flag will be cleared during copyout */
2293 *guard_flags = *guard_flags | MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
2294 }
2295
2296 #if IMPORTANCE_INHERITANCE
2297 /*
2298 * Account for boosts the current task is going to lose when
2299 * copying this right in. Tempowner ports have either not
2300 * been accounting to any task (and therefore are already in
2301 * "limbo" state w.r.t. assertions) or to some other specific
2302 * task. As we have no way to drop the latter task's assertions
2303 * here, We'll deduct those when we enqueue it on its
2304 * destination port (see ipc_port_check_circularity()).
2305 */
2306 if (port->ip_tempowner == 0) {
2307 assert(IIT_NULL == ip_get_imp_task(port));
2308
2309 /* ports in limbo have to be tempowner */
2310 port->ip_tempowner = 1;
2311 *assertcntp = port->ip_impcount;
2312 }
2313 #endif /* IMPORTANCE_INHERITANCE */
2314
2315 ip_mq_unlock(port);
2316
2317 /*
2318 * This is unfortunate to do this while the space is locked,
2319 * but plumbing it through all callers really hurts.
2320 */
2321 waitq_link_free_list(WQT_PORT_SET, &free_l);
2322
2323 *objectp = ip_to_object(port);
2324 *sorightp = request;
2325 break;
2326 }
2327
2328 case MACH_MSG_TYPE_COPY_SEND: {
2329 if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2330 goto copy_dead;
2331 }
2332
2333 /* allow for dead send-once rights */
2334
2335 if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2336 goto invalid_right;
2337 }
2338
2339 assert(IE_BITS_UREFS(bits) > 0);
2340
2341 port = ip_object_to_port(entry->ie_object);
2342 assert(port != IP_NULL);
2343
2344 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2345 bits = entry->ie_bits;
2346 *releasep = port;
2347 goto copy_dead;
2348 }
2349 /* port is locked and active */
2350
2351 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2352 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2353 assert(port->ip_sorights > 0);
2354
2355 ip_mq_unlock(port);
2356 goto invalid_right;
2357 }
2358
2359 if (!allow_imm_send && ip_is_immovable_send(port)) {
2360 ip_mq_unlock(port);
2361 mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2362 return KERN_INVALID_CAPABILITY;
2363 }
2364
2365 ipc_port_copy_send_locked(port);
2366 ip_mq_unlock(port);
2367
2368 *objectp = ip_to_object(port);
2369 *sorightp = IP_NULL;
2370 break;
2371 }
2372
2373 case MACH_MSG_TYPE_MOVE_SEND: {
2374 ipc_port_t request = IP_NULL;
2375
2376 if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2377 goto move_dead;
2378 }
2379
2380 /* allow for dead send-once rights */
2381
2382 if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2383 goto invalid_right;
2384 }
2385
2386 assert(IE_BITS_UREFS(bits) > 0);
2387
2388 port = ip_object_to_port(entry->ie_object);
2389 assert(port != IP_NULL);
2390
2391 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2392 bits = entry->ie_bits;
2393 *releasep = port;
2394 goto move_dead;
2395 }
2396 /* port is locked and active */
2397
2398 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2399 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2400 assert(port->ip_sorights > 0);
2401 ip_mq_unlock(port);
2402 goto invalid_right;
2403 }
2404
2405 if (!allow_imm_send && ip_is_immovable_send(port)) {
2406 ip_mq_unlock(port);
2407 mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2408 return KERN_INVALID_CAPABILITY;
2409 }
2410
2411 if (IE_BITS_UREFS(bits) == 1) {
2412 assert(port->ip_srights > 0);
2413 if (bits & MACH_PORT_TYPE_RECEIVE) {
2414 assert(ip_get_receiver_name(port) == name);
2415 assert(ip_in_space(port, space));
2416 assert(IE_BITS_TYPE(bits) ==
2417 MACH_PORT_TYPE_SEND_RECEIVE);
2418 assert(!ip_is_pinned(port));
2419
2420 entry->ie_bits = bits & ~
2421 (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2422 ipc_entry_modified(space, name, entry);
2423 ip_reference(port);
2424 } else {
2425 assert(IE_BITS_TYPE(bits) ==
2426 MACH_PORT_TYPE_SEND);
2427
2428 if (ip_is_pinned(port)) {
2429 ip_mq_unlock(port);
2430 mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_COPYIN);
2431 return KERN_INVALID_CAPABILITY;
2432 }
2433
2434 request = ipc_right_request_cancel_macro(space, port,
2435 name, entry);
2436 ipc_hash_delete(space, ip_to_object(port),
2437 name, entry);
2438 ipc_entry_dealloc(space, ip_to_object(port),
2439 name, entry);
2440 /* transfer entry's reference to caller */
2441 }
2442 } else {
2443 ipc_port_copy_send_locked(port);
2444 /* if urefs are pegged due to overflow, leave them pegged */
2445 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2446 entry->ie_bits = bits - 1; /* decrement urefs */
2447 }
2448 ipc_entry_modified(space, name, entry);
2449 }
2450
2451 ip_mq_unlock(port);
2452 *objectp = ip_to_object(port);
2453 *sorightp = request;
2454 break;
2455 }
2456
2457 case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2458 ipc_port_t request;
2459
2460 if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2461 goto move_dead;
2462 }
2463
2464 /* allow for dead send rights */
2465
2466 if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2467 goto invalid_right;
2468 }
2469
2470 assert(IE_BITS_UREFS(bits) > 0);
2471
2472 port = ip_object_to_port(entry->ie_object);
2473 assert(port != IP_NULL);
2474
2475 if (ipc_right_check(space, port, name, entry, flags)) {
2476 bits = entry->ie_bits;
2477 *releasep = port;
2478 goto move_dead;
2479 }
2480 /*
2481 * port is locked, but may not be active:
2482 * Allow copyin of inactive ports with no dead name request and treat it
2483 * as if the copyin of the port was successful and port became inactive
2484 * later.
2485 */
2486
2487 if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2488 assert(bits & MACH_PORT_TYPE_SEND);
2489 assert(port->ip_srights > 0);
2490
2491 ip_mq_unlock(port);
2492 goto invalid_right;
2493 }
2494
2495 if (!allow_imm_send && ip_is_immovable_send(port)) {
2496 ip_mq_unlock(port);
2497 mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2498 return KERN_INVALID_CAPABILITY;
2499 }
2500
2501 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2502 assert(IE_BITS_UREFS(bits) == 1);
2503 assert(port->ip_sorights > 0);
2504
2505 request = ipc_right_request_cancel_macro(space, port, name, entry);
2506 assert(!ip_is_pinned(port));
2507 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2508 ip_mq_unlock(port);
2509
2510 *objectp = ip_to_object(port);
2511 *sorightp = request;
2512 break;
2513 }
2514
2515 default:
2516 invalid_right:
2517 return KERN_INVALID_RIGHT;
2518 }
2519
2520 return KERN_SUCCESS;
2521
2522 copy_dead:
2523 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2524 assert(IE_BITS_UREFS(bits) > 0);
2525 assert(entry->ie_request == IE_REQ_NONE);
2526 assert(entry->ie_object == 0);
2527
2528 if (!deadok) {
2529 goto invalid_right;
2530 }
2531
2532 *objectp = IO_DEAD;
2533 *sorightp = IP_NULL;
2534 return KERN_SUCCESS;
2535
2536 move_dead:
2537 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2538 assert(IE_BITS_UREFS(bits) > 0);
2539 assert(entry->ie_request == IE_REQ_NONE);
2540 assert(entry->ie_object == IO_NULL);
2541
2542 if (!deadok) {
2543 goto invalid_right;
2544 }
2545
2546 if (IE_BITS_UREFS(bits) == 1) {
2547 ipc_entry_dealloc(space, IO_NULL, name, entry);
2548 } else {
2549 /* if urefs are pegged due to overflow, leave them pegged */
2550 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2551 entry->ie_bits = bits - 1; /* decrement urefs */
2552 }
2553 ipc_entry_modified(space, name, entry);
2554 }
2555 *objectp = IO_DEAD;
2556 *sorightp = IP_NULL;
2557 return KERN_SUCCESS;
2558 }
2559
2560 /*
2561 * Routine: ipc_right_copyin_two_move_sends
2562 * Purpose:
2563 * Like ipc_right_copyin with MACH_MSG_TYPE_MOVE_SEND
2564 * and deadok == FALSE, except that this moves two
2565 * send rights at once.
2566 * Conditions:
2567 * The space is write-locked and active.
2568 * The object is returned with two refs/send rights.
2569 * Returns:
2570 * KERN_SUCCESS Acquired an object.
2571 * KERN_INVALID_RIGHT Name doesn't denote correct right.
2572 */
2573 static
2574 kern_return_t
ipc_right_copyin_two_move_sends(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2575 ipc_right_copyin_two_move_sends(
2576 ipc_space_t space,
2577 mach_port_name_t name,
2578 ipc_entry_t entry,
2579 ipc_object_t *objectp,
2580 ipc_port_t *sorightp,
2581 ipc_port_t *releasep)
2582 {
2583 ipc_entry_bits_t bits;
2584 mach_port_urefs_t urefs;
2585 ipc_port_t port;
2586 ipc_port_t request = IP_NULL;
2587
2588 *releasep = IP_NULL;
2589
2590 assert(is_active(space));
2591
2592 bits = entry->ie_bits;
2593
2594 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2595 goto invalid_right;
2596 }
2597
2598 urefs = IE_BITS_UREFS(bits);
2599 if (urefs < 2) {
2600 goto invalid_right;
2601 }
2602
2603 port = ip_object_to_port(entry->ie_object);
2604 assert(port != IP_NULL);
2605
2606 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2607 *releasep = port;
2608 goto invalid_right;
2609 }
2610 /* port is locked and active */
2611
2612 /*
2613 * To reach here we either have:
2614 * (1) reply_name == voucher_name, but voucher is not immovable send right.
2615 * (2) reply_name == dest_name, but ipc_right_copyin_check_reply() guaranteed
2616 * that we can't use MOVE_SEND on reply port marked as immovable send right.
2617 */
2618 assert(!ip_is_immovable_send(port));
2619 assert(!ip_is_pinned(port));
2620
2621 if (urefs > 2) {
2622 /*
2623 * We are moving 2 urefs as naked send rights, which is decomposed as:
2624 * - two copy sends (which doesn't affect the make send count)
2625 * - decrementing the local urefs twice.
2626 */
2627 ipc_port_copy_send_locked(port);
2628 ipc_port_copy_send_locked(port);
2629 /* if urefs are pegged due to overflow, leave them pegged */
2630 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2631 entry->ie_bits = bits - 2; /* decrement urefs */
2632 }
2633 ipc_entry_modified(space, name, entry);
2634 } else {
2635 /*
2636 * We have exactly 2 send rights for this port in this space,
2637 * which means that we will liberate the naked send right held
2638 * by this entry.
2639 *
2640 * However refcounting rules around entries are that naked send rights
2641 * on behalf of spaces do not have an associated port reference,
2642 * so we need to donate one ...
2643 */
2644 ipc_port_copy_send_locked(port);
2645
2646 if (bits & MACH_PORT_TYPE_RECEIVE) {
2647 assert(ip_get_receiver_name(port) == name);
2648 assert(ip_in_space(port, space));
2649 assert(IE_BITS_TYPE(bits) ==
2650 MACH_PORT_TYPE_SEND_RECEIVE);
2651
2652 /* ... that we inject manually when the entry stays alive */
2653 entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2654 ipc_entry_modified(space, name, entry);
2655 ip_reference(port);
2656 } else {
2657 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
2658
2659 /* ... that we steal from the entry when it dies */
2660 request = ipc_right_request_cancel_macro(space, port,
2661 name, entry);
2662 ipc_hash_delete(space, ip_to_object(port),
2663 name, entry);
2664 ipc_entry_dealloc(space, ip_to_object(port),
2665 name, entry);
2666 }
2667 }
2668
2669 ip_mq_unlock(port);
2670
2671 *objectp = ip_to_object(port);
2672 *sorightp = request;
2673 return KERN_SUCCESS;
2674
2675 invalid_right:
2676 return KERN_INVALID_RIGHT;
2677 }
2678
2679
2680 /*
2681 * Routine: ipc_right_copyin_two
2682 * Purpose:
2683 * Like ipc_right_copyin with two dispositions,
2684 * each of which results in a send or send-once right,
2685 * and deadok = FALSE.
2686 * Conditions:
2687 * The space is write-locked and active.
2688 * The object is returned with two refs/rights.
2689 * Msgt_one refers to the dest_type
2690 * Returns:
2691 * KERN_SUCCESS Acquired an object.
2692 * KERN_INVALID_RIGHT Name doesn't denote correct right(s).
2693 * KERN_INVALID_CAPABILITY Name doesn't denote correct right for msgt_two.
2694 */
2695 kern_return_t
ipc_right_copyin_two(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_one,mach_msg_type_name_t msgt_two,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2696 ipc_right_copyin_two(
2697 ipc_space_t space,
2698 mach_port_name_t name,
2699 ipc_entry_t entry,
2700 mach_msg_type_name_t msgt_one,
2701 mach_msg_type_name_t msgt_two,
2702 ipc_object_t *objectp,
2703 ipc_port_t *sorightp,
2704 ipc_port_t *releasep)
2705 {
2706 kern_return_t kr;
2707 int assertcnt = 0;
2708
2709 assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_one));
2710 assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_two));
2711
2712 /*
2713 * This is a little tedious to make atomic, because
2714 * there are 25 combinations of valid dispositions.
2715 * However, most are easy.
2716 */
2717
2718 /*
2719 * If either is move-sonce, then there must be an error.
2720 */
2721 if (msgt_one == MACH_MSG_TYPE_MOVE_SEND_ONCE ||
2722 msgt_two == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2723 return KERN_INVALID_RIGHT;
2724 }
2725
2726 if ((msgt_one == MACH_MSG_TYPE_MAKE_SEND) ||
2727 (msgt_one == MACH_MSG_TYPE_MAKE_SEND_ONCE) ||
2728 (msgt_two == MACH_MSG_TYPE_MAKE_SEND) ||
2729 (msgt_two == MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2730 /*
2731 * One of the dispositions needs a receive right.
2732 *
2733 * If the copyin below succeeds, we know the receive
2734 * right is there (because the pre-validation of
2735 * the second disposition already succeeded in our
2736 * caller).
2737 *
2738 * Hence the port is not in danger of dying.
2739 */
2740 ipc_object_t object_two;
2741
2742 kr = ipc_right_copyin(space, name, entry,
2743 msgt_one, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND,
2744 objectp, sorightp, releasep,
2745 &assertcnt, 0, NULL);
2746 assert(assertcnt == 0);
2747 if (kr != KERN_SUCCESS) {
2748 return kr;
2749 }
2750
2751 assert(IO_VALID(*objectp));
2752 assert(*sorightp == IP_NULL);
2753 assert(*releasep == IP_NULL);
2754
2755 /*
2756 * Now copyin the second (previously validated)
2757 * disposition. The result can't be a dead port,
2758 * as no valid disposition can make us lose our
2759 * receive right.
2760 */
2761 kr = ipc_right_copyin(space, name, entry,
2762 msgt_two, IPC_OBJECT_COPYIN_FLAGS_NONE,
2763 &object_two, sorightp, releasep,
2764 &assertcnt, 0, NULL);
2765 assert(assertcnt == 0);
2766 assert(kr == KERN_SUCCESS);
2767 assert(*sorightp == IP_NULL);
2768 assert(*releasep == IP_NULL);
2769 assert(object_two == *objectp);
2770 assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
2771 } else if ((msgt_one == MACH_MSG_TYPE_MOVE_SEND) &&
2772 (msgt_two == MACH_MSG_TYPE_MOVE_SEND)) {
2773 /*
2774 * This is an easy case. Just use our
2775 * handy-dandy special-purpose copyin call
2776 * to get two send rights for the price of one.
2777 */
2778 kr = ipc_right_copyin_two_move_sends(space, name, entry,
2779 objectp, sorightp,
2780 releasep);
2781 if (kr != KERN_SUCCESS) {
2782 return kr;
2783 }
2784 } else {
2785 mach_msg_type_name_t msgt_name;
2786
2787 /*
2788 * Must be either a single move-send and a
2789 * copy-send, or two copy-send dispositions.
2790 * Use the disposition with the greatest side
2791 * effects for the actual copyin - then just
2792 * duplicate the send right you get back.
2793 */
2794 if (msgt_one == MACH_MSG_TYPE_MOVE_SEND ||
2795 msgt_two == MACH_MSG_TYPE_MOVE_SEND) {
2796 msgt_name = MACH_MSG_TYPE_MOVE_SEND;
2797 } else {
2798 msgt_name = MACH_MSG_TYPE_COPY_SEND;
2799 }
2800
2801 kr = ipc_right_copyin(space, name, entry,
2802 msgt_name, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND,
2803 objectp, sorightp, releasep,
2804 &assertcnt, 0, NULL);
2805 assert(assertcnt == 0);
2806 if (kr != KERN_SUCCESS) {
2807 return kr;
2808 }
2809
2810 /*
2811 * Copy the right we got back. If it is dead now,
2812 * that's OK. Neither right will be usable to send
2813 * a message anyway.
2814 *
2815 * Note that the port could be concurrently moved
2816 * outside of the space as a descriptor, and then
2817 * destroyed, which would not happen under the space lock.
2818 *
2819 * It means we can't use ipc_port_copy_send() which
2820 * may fail if the port died.
2821 */
2822 io_lock(*objectp);
2823 ipc_port_copy_send_locked(ip_object_to_port(*objectp));
2824 io_unlock(*objectp);
2825 }
2826
2827 return KERN_SUCCESS;
2828 }
2829
2830
2831 /*
2832 * Routine: ipc_right_copyout
2833 * Purpose:
2834 * Copyout a capability to a space.
2835 * If successful, consumes a ref for the object.
2836 *
2837 * Always succeeds when given a newly-allocated entry,
2838 * because user-reference overflow isn't a possibility.
2839 *
2840 * If copying out the object would cause the user-reference
2841 * count in the entry to overflow, then the user-reference
2842 * count is left pegged to its maximum value and the copyout
2843 * succeeds anyway.
2844 * Conditions:
2845 * The space is write-locked and active.
2846 * The object is locked and active.
2847 * The object is unlocked; the space isn't.
2848 * Returns:
2849 * KERN_SUCCESS Copied out capability.
2850 */
2851
2852 kern_return_t
ipc_right_copyout(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_port_context_t * context,mach_msg_guard_flags_t * guard_flags,ipc_object_t object)2853 ipc_right_copyout(
2854 ipc_space_t space,
2855 mach_port_name_t name,
2856 ipc_entry_t entry,
2857 mach_msg_type_name_t msgt_name,
2858 ipc_object_copyout_flags_t flags,
2859 mach_port_context_t *context,
2860 mach_msg_guard_flags_t *guard_flags,
2861 ipc_object_t object)
2862 {
2863 ipc_entry_bits_t bits;
2864 ipc_port_t port;
2865 mach_port_name_t sp_name = MACH_PORT_NULL;
2866 mach_port_context_t sp_context = 0;
2867
2868 bits = entry->ie_bits;
2869
2870 assert(IO_VALID(object));
2871 assert(io_otype(object) == IOT_PORT);
2872 assert(io_active(object));
2873 assert(entry->ie_object == object);
2874
2875 port = ip_object_to_port(object);
2876
2877 if (flags & IPC_OBJECT_COPYOUT_FLAGS_PINNED) {
2878 assert(!ip_is_pinned(port));
2879 assert(ip_is_immovable_send(port));
2880 assert(task_is_immovable(space->is_task));
2881 assert(task_is_pinned(space->is_task));
2882 port->ip_pinned = 1;
2883 }
2884
2885 switch (msgt_name) {
2886 case MACH_MSG_TYPE_PORT_SEND_ONCE:
2887
2888 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2889 assert(IE_BITS_UREFS(bits) == 0);
2890 assert(port->ip_sorights > 0);
2891
2892 if (port->ip_specialreply) {
2893 ipc_port_adjust_special_reply_port_locked(port,
2894 current_thread()->ith_knote, IPC_PORT_ADJUST_SR_LINK_WORKLOOP, FALSE);
2895 /* port unlocked on return */
2896 } else {
2897 ip_mq_unlock(port);
2898 }
2899
2900 entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1); /* set urefs to 1 */
2901 ipc_entry_modified(space, name, entry);
2902 break;
2903
2904 case MACH_MSG_TYPE_PORT_SEND:
2905 assert(port->ip_srights > 0);
2906
2907 if (bits & MACH_PORT_TYPE_SEND) {
2908 mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
2909
2910 assert(port->ip_srights > 1);
2911 assert(urefs > 0);
2912 assert(urefs <= MACH_PORT_UREFS_MAX);
2913
2914 if (urefs == MACH_PORT_UREFS_MAX) {
2915 /*
2916 * leave urefs pegged to maximum,
2917 * consume send right and ref
2918 */
2919
2920 port->ip_srights--;
2921 ip_mq_unlock(port);
2922 ip_release_live(port);
2923 return KERN_SUCCESS;
2924 }
2925
2926 /* consume send right and ref */
2927 port->ip_srights--;
2928 ip_mq_unlock(port);
2929 ip_release_live(port);
2930 } else if (bits & MACH_PORT_TYPE_RECEIVE) {
2931 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2932 assert(IE_BITS_UREFS(bits) == 0);
2933
2934 /* transfer send right to entry, consume ref */
2935 ip_mq_unlock(port);
2936 ip_release_live(port);
2937 } else {
2938 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2939 assert(IE_BITS_UREFS(bits) == 0);
2940
2941 /* transfer send right and ref to entry */
2942 ip_mq_unlock(port);
2943
2944 /* entry is locked holding ref, so can use port */
2945
2946 ipc_hash_insert(space, ip_to_object(port), name, entry);
2947 }
2948
2949 entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1; /* increment urefs */
2950 ipc_entry_modified(space, name, entry);
2951 break;
2952
2953 case MACH_MSG_TYPE_PORT_RECEIVE: {
2954 ipc_port_t dest;
2955 #if IMPORTANCE_INHERITANCE
2956 natural_t assertcnt = port->ip_impcount;
2957 #endif /* IMPORTANCE_INHERITANCE */
2958
2959 assert(port->ip_mscount == 0);
2960 assert(!ip_in_a_space(port));
2961
2962 /*
2963 * Don't copyout kobjects or kolabels as receive right
2964 */
2965 if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
2966 panic("ipc_right_copyout: Copyout kobject/kolabel as receive right");
2967 }
2968
2969 dest = ip_get_destination(port);
2970
2971 /* port transitions to IN-SPACE state */
2972 port->ip_receiver_name = name;
2973 port->ip_receiver = space;
2974
2975 struct knote *kn = current_thread()->ith_knote;
2976
2977 if ((guard_flags != NULL) && ((*guard_flags & MACH_MSG_GUARD_FLAGS_IMMOVABLE_RECEIVE) != 0)) {
2978 assert(port->ip_immovable_receive == 0);
2979 port->ip_guarded = 1;
2980 port->ip_strict_guard = 0;
2981 /* pseudo receive shouldn't set the receive right as immovable in the sender's space */
2982 if (kn != ITH_KNOTE_PSEUDO) {
2983 port->ip_immovable_receive = 1;
2984 }
2985 port->ip_context = current_thread()->ith_msg_addr;
2986 *context = port->ip_context;
2987 *guard_flags = *guard_flags & ~MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
2988 }
2989
2990 /* Check if this is a service port */
2991 if (port->ip_service_port) {
2992 assert(port->ip_splabel != NULL);
2993 /* Check if this is a port-destroyed notification to ensure
2994 * that initproc doesnt end up with a guarded service port
2995 * sent in a regular message
2996 */
2997 if (!ipc_service_port_label_is_pd_notification((ipc_service_port_label_t)port->ip_splabel)) {
2998 goto skip_sp_check;
2999 }
3000 ipc_service_port_label_clear_flag(port->ip_splabel, ISPL_FLAGS_SEND_PD_NOTIFICATION);
3001 #if !(DEVELOPMENT || DEBUG)
3002 if (current_task()->bsd_info != initproc) {
3003 goto skip_sp_check;
3004 }
3005 #endif /* !(DEVELOPMENT || DEBUG) */
3006 ipc_service_port_label_get_attr(port->ip_splabel, &sp_name, &sp_context);
3007 assert(sp_name != MACH_PORT_NULL);
3008 /* Verify the port name and restore the guard value, if any */
3009 if (name != sp_name) {
3010 panic("Service port name = 0x%x doesnt match the stored launchd port name = 0x%x", name, sp_name);
3011 }
3012 if (sp_context) {
3013 port->ip_guarded = 1;
3014 port->ip_strict_guard = 1;
3015 port->ip_context = sp_context;
3016 }
3017 }
3018 skip_sp_check:
3019
3020 assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
3021 if (bits & MACH_PORT_TYPE_SEND) {
3022 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
3023 assert(IE_BITS_UREFS(bits) > 0);
3024 assert(port->ip_srights > 0);
3025 } else {
3026 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
3027 assert(IE_BITS_UREFS(bits) == 0);
3028 }
3029 entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE;
3030 ipc_entry_modified(space, name, entry);
3031
3032 boolean_t sync_bootstrap_checkin = FALSE;
3033 if (kn != ITH_KNOTE_PSEUDO && port->ip_sync_bootstrap_checkin) {
3034 sync_bootstrap_checkin = TRUE;
3035 }
3036 if (!ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_RECEIVE)) {
3037 kn = NULL;
3038 }
3039 ipc_port_adjust_port_locked(port, kn, sync_bootstrap_checkin);
3040 /* port unlocked */
3041
3042 if (bits & MACH_PORT_TYPE_SEND) {
3043 ip_release_live(port);
3044
3045 /* entry is locked holding ref, so can use port */
3046 ipc_hash_delete(space, ip_to_object(port), name, entry);
3047 }
3048
3049 if (dest != IP_NULL) {
3050 #if IMPORTANCE_INHERITANCE
3051 /*
3052 * Deduct the assertion counts we contributed to
3053 * the old destination port. They've already
3054 * been reflected into the task as a result of
3055 * getting enqueued.
3056 */
3057 ip_mq_lock(dest);
3058 ipc_port_impcount_delta(dest, 0 - assertcnt, IP_NULL);
3059 ip_mq_unlock(dest);
3060 #endif /* IMPORTANCE_INHERITANCE */
3061
3062 /* Drop turnstile ref on dest */
3063 ipc_port_send_turnstile_complete(dest);
3064 /* space lock is held */
3065 ip_release_safe(dest);
3066 }
3067 break;
3068 }
3069
3070 default:
3071 panic("ipc_right_copyout: strange rights");
3072 }
3073 return KERN_SUCCESS;
3074 }
3075