1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005-2006 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: ipc/ipc_right.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions to manipulate IPC capabilities.
71 */
72
73 #include <mach/boolean.h>
74 #include <mach/kern_return.h>
75 #include <mach/port.h>
76 #include <mach/message.h>
77 #include <kern/assert.h>
78 #include <kern/ipc_kobject.h>
79 #include <kern/misc_protos.h>
80 #include <kern/policy_internal.h>
81 #include <ipc/port.h>
82 #include <ipc/ipc_entry.h>
83 #include <ipc/ipc_space.h>
84 #include <ipc/ipc_object.h>
85 #include <ipc/ipc_hash.h>
86 #include <ipc/ipc_port.h>
87 #include <ipc/ipc_pset.h>
88 #include <ipc/ipc_right.h>
89 #include <ipc/ipc_notify.h>
90 #include <ipc/ipc_importance.h>
91 #include <ipc/ipc_service_port.h>
92 #include <security/mac_mach_internal.h>
93
94 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
95
96 TUNABLE(bool, service_port_defense_enabled, "-service_port_defense_enabled", false);
97 static TUNABLE(bool, reply_port_semantics, "reply_port_semantics", true);
98
99 /*
100 * Routine: ipc_right_lookup_read
101 * Purpose:
102 * Finds an entry in a space, given the name.
103 * Conditions:
104 * Nothing locked.
105 * If an object is found, it is locked and active.
106 * Returns:
107 * KERN_SUCCESS Found an entry.
108 * KERN_INVALID_TASK The space is dead.
109 * KERN_INVALID_NAME Name doesn't exist in space.
110 */
111 kern_return_t
ipc_right_lookup_read(ipc_space_t space,mach_port_name_t name,ipc_entry_bits_t * bitsp,ipc_object_t * objectp)112 ipc_right_lookup_read(
113 ipc_space_t space,
114 mach_port_name_t name,
115 ipc_entry_bits_t *bitsp,
116 ipc_object_t *objectp)
117 {
118 mach_port_index_t index;
119 ipc_entry_table_t table;
120 ipc_entry_t entry;
121 ipc_object_t object;
122 kern_return_t kr;
123
124 index = MACH_PORT_INDEX(name);
125 if (__improbable(index == 0)) {
126 *bitsp = 0;
127 *objectp = IO_NULL;
128 return KERN_INVALID_NAME;
129 }
130
131 smr_global_enter();
132
133 /*
134 * Acquire a (possibly stale) pointer to the table,
135 * and guard it so that it can't be deallocated while we use it.
136 *
137 * smr_global_enter() has the property that it strongly serializes
138 * after any store-release. This is important because it means that if
139 * one considers this (broken) userspace usage:
140 *
141 * Thread 1:
142 * - makes a semaphore, gets name 0x1003
143 * - stores that name to a global `sema` in userspace
144 *
145 * Thread 2:
146 * - spins to observe `sema` becoming non 0
147 * - calls semaphore_wait() on 0x1003
148 *
149 * Then, because in order to return 0x1003 this thread issued
150 * a store-release (when calling is_write_unlock()),
151 * then this smr_entered_load() can't possibly observe a table
152 * pointer that is older than the one that was current when the
153 * semaphore was made.
154 *
155 * This fundamental property allows us to never loop.
156 */
157 table = smr_entered_load(&space->is_table);
158 if (__improbable(table == NULL)) {
159 kr = KERN_INVALID_TASK;
160 goto out_put;
161 }
162 entry = ipc_entry_table_get(table, index);
163 if (__improbable(entry == NULL)) {
164 kr = KERN_INVALID_NAME;
165 goto out_put;
166 }
167
168 /*
169 * Note: this should be an atomic load, but PAC and atomics
170 * don't work interact well together.
171 */
172 object = entry->ie_volatile_object;
173
174 /*
175 * Attempt to lock an object that lives in this entry.
176 * It might fail or be a completely different object by now.
177 *
178 * Make sure that acquiring the lock is fully ordered after any
179 * lock-release (using os_atomic_barrier_before_lock_acquire()).
180 * This allows us to always reliably observe space termination below.
181 */
182 os_atomic_barrier_before_lock_acquire();
183 if (__improbable(object == IO_NULL || !io_lock_allow_invalid(object))) {
184 kr = KERN_INVALID_NAME;
185 goto out_put;
186 }
187
188 /*
189 * Now that we hold the object lock, we are preventing any entry
190 * in this space for this object to be mutated.
191 *
192 * If the space didn't grow after we acquired our hazardous reference,
193 * and before a mutation of the entry, then holding the object lock
194 * guarantees we will observe the truth of ie_bits, ie_object and
195 * ie_request (those are always mutated with the object lock held).
196 *
197 * However this ordering is problematic:
198 * - [A]cquisition of the table pointer
199 * - [G]rowth of the space (making the table pointer in [A] stale)
200 * - [M]utation of the entry
201 * - [L]ocking of the object read through [A].
202 *
203 * The space lock is held for both [G] and [M], and the object lock
204 * is held for [M], which means that once we lock the object we can
205 * observe if [G] happenend by reloading the table pointer.
206 *
207 * We might still fail to observe any growth operation that happened
208 * after the last mutation of this object's entry, because holding
209 * an object lock doesn't guarantee anything about the liveness
210 * of the space table pointer. This is not a problem at all: by
211 * definition, those didn't affect the state of the entry.
212 *
213 * TODO: a data-structure where the entries are grown by "slabs",
214 * would allow for the address of an ipc_entry_t to never
215 * change once it exists in a space and would avoid a reload
216 * (as well as making space growth faster).
217 * We however still need to check for termination.
218 */
219 table = smr_entered_load(&space->is_table);
220 if (__improbable(table == NULL)) {
221 kr = KERN_INVALID_TASK;
222 goto out_put_unlock;
223 }
224
225 /*
226 * Tables never shrink so we don't need to validate the length twice.
227 */
228 entry = ipc_entry_table_get_nocheck(table, index);
229
230 /*
231 * Now that we hold the lock and have a "fresh enough" table pointer,
232 * validate if this entry is what we think it is.
233 *
234 * To the risk of being repetitive, we still need to protect
235 * those accesses under SMR, because subsequent
236 * table growths might retire the memory. However we know
237 * those growths will have left our entry unchanged.
238 */
239 if (__improbable(entry->ie_object != object)) {
240 kr = KERN_INVALID_NAME;
241 goto out_put_unlock;
242 }
243
244 ipc_entry_bits_t bits = entry->ie_bits;
245 if (__improbable(IE_BITS_GEN(bits) != MACH_PORT_GEN(name) ||
246 IE_BITS_TYPE(bits == MACH_PORT_TYPE_NONE))) {
247 kr = KERN_INVALID_NAME;
248 goto out_put_unlock;
249 }
250
251 /* Done with hazardous accesses to the table */
252 smr_global_leave();
253
254 *bitsp = bits;
255 *objectp = object;
256 return KERN_SUCCESS;
257
258 out_put_unlock:
259 ipc_object_unlock(object);
260 out_put:
261 smr_global_leave();
262 return kr;
263 }
264
265 /*
266 * Routine: ipc_right_lookup_write
267 * Purpose:
268 * Finds an entry in a space, given the name.
269 * Conditions:
270 * Nothing locked. If successful, the space is write-locked.
271 * Returns:
272 * KERN_SUCCESS Found an entry.
273 * KERN_INVALID_TASK The space is dead.
274 * KERN_INVALID_NAME Name doesn't exist in space.
275 */
276
277 kern_return_t
ipc_right_lookup_write(ipc_space_t space,mach_port_name_t name,ipc_entry_t * entryp)278 ipc_right_lookup_write(
279 ipc_space_t space,
280 mach_port_name_t name,
281 ipc_entry_t *entryp)
282 {
283 ipc_entry_t entry;
284
285 assert(space != IS_NULL);
286
287 is_write_lock(space);
288
289 if (!is_active(space)) {
290 is_write_unlock(space);
291 return KERN_INVALID_TASK;
292 }
293
294 if ((entry = ipc_entry_lookup(space, name)) == IE_NULL) {
295 is_write_unlock(space);
296 return KERN_INVALID_NAME;
297 }
298
299 *entryp = entry;
300 return KERN_SUCCESS;
301 }
302
303 /*
304 * Routine: ipc_right_lookup_two_write
305 * Purpose:
306 * Like ipc_right_lookup except that it returns two
307 * entries for two different names that were looked
308 * up under the same space lock.
309 * Conditions:
310 * Nothing locked. If successful, the space is write-locked.
311 * Returns:
312 * KERN_INVALID_TASK The space is dead.
313 * KERN_INVALID_NAME Name doesn't exist in space.
314 */
315
316 kern_return_t
ipc_right_lookup_two_write(ipc_space_t space,mach_port_name_t name1,ipc_entry_t * entryp1,mach_port_name_t name2,ipc_entry_t * entryp2)317 ipc_right_lookup_two_write(
318 ipc_space_t space,
319 mach_port_name_t name1,
320 ipc_entry_t *entryp1,
321 mach_port_name_t name2,
322 ipc_entry_t *entryp2)
323 {
324 ipc_entry_t entry1;
325 ipc_entry_t entry2;
326
327 assert(space != IS_NULL);
328
329 is_write_lock(space);
330
331 if (!is_active(space)) {
332 is_write_unlock(space);
333 return KERN_INVALID_TASK;
334 }
335
336 if ((entry1 = ipc_entry_lookup(space, name1)) == IE_NULL) {
337 is_write_unlock(space);
338 mach_port_guard_exception(name1, 0, 0, kGUARD_EXC_INVALID_NAME);
339 return KERN_INVALID_NAME;
340 }
341 if ((entry2 = ipc_entry_lookup(space, name2)) == IE_NULL) {
342 is_write_unlock(space);
343 mach_port_guard_exception(name2, 0, 0, kGUARD_EXC_INVALID_NAME);
344 return KERN_INVALID_NAME;
345 }
346 *entryp1 = entry1;
347 *entryp2 = entry2;
348 return KERN_SUCCESS;
349 }
350
351 /*
352 * Routine: ipc_right_reverse
353 * Purpose:
354 * Translate (space, object) -> (name, entry).
355 * Only finds send/receive rights.
356 * Returns TRUE if an entry is found; if so,
357 * the object active.
358 * Conditions:
359 * The space must be locked (read or write) and active.
360 * The port is locked and active
361 */
362
363 bool
ipc_right_reverse(ipc_space_t space,ipc_object_t object,mach_port_name_t * namep,ipc_entry_t * entryp)364 ipc_right_reverse(
365 ipc_space_t space,
366 ipc_object_t object,
367 mach_port_name_t *namep,
368 ipc_entry_t *entryp)
369 {
370 ipc_port_t port;
371 mach_port_name_t name;
372 ipc_entry_t entry;
373
374 /* would switch on io_otype to handle multiple types of object */
375
376 assert(is_active(space));
377 assert(io_otype(object) == IOT_PORT);
378
379 port = ip_object_to_port(object);
380 require_ip_active(port);
381
382 ip_mq_lock_held(port);
383
384 if (ip_in_space(port, space)) {
385 name = ip_get_receiver_name(port);
386 assert(name != MACH_PORT_NULL);
387
388 entry = ipc_entry_lookup(space, name);
389
390 assert(entry != IE_NULL);
391 assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
392 assert(port == ip_object_to_port(entry->ie_object));
393
394 *namep = name;
395 *entryp = entry;
396 return true;
397 }
398
399 if (ipc_hash_lookup(space, ip_to_object(port), namep, entryp)) {
400 entry = *entryp;
401 assert(entry != IE_NULL);
402 assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
403 assert(port == ip_object_to_port(entry->ie_object));
404
405 return true;
406 }
407
408 return false;
409 }
410
411 /*
412 * Routine: ipc_right_dnrequest
413 * Purpose:
414 * Make a dead-name request, returning the previously
415 * registered send-once right. If notify is IP_NULL,
416 * just cancels the previously registered request.
417 *
418 * Conditions:
419 * Nothing locked. May allocate memory.
420 * Only consumes/returns refs if successful.
421 * Returns:
422 * KERN_SUCCESS Made/canceled dead-name request.
423 * KERN_INVALID_TASK The space is dead.
424 * KERN_INVALID_NAME Name doesn't exist in space.
425 * KERN_INVALID_RIGHT Name doesn't denote port/dead rights.
426 * KERN_INVALID_ARGUMENT Name denotes dead name, but
427 * immediate is FALSE or notify is IP_NULL.
428 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
429 */
430
431 kern_return_t
ipc_right_request_alloc(ipc_space_t space,mach_port_name_t name,ipc_port_request_opts_t options,ipc_port_t notify,ipc_port_t * previousp)432 ipc_right_request_alloc(
433 ipc_space_t space,
434 mach_port_name_t name,
435 ipc_port_request_opts_t options,
436 ipc_port_t notify,
437 ipc_port_t *previousp)
438 {
439 ipc_port_request_index_t prev_request;
440 ipc_port_t previous = IP_NULL;
441 ipc_entry_t entry;
442 kern_return_t kr;
443 #if IMPORTANCE_INHERITANCE
444 bool will_arm = false;
445 #endif /* IMPORTANCE_INHERITANCE */
446
447 for (;;) {
448 ipc_port_t port = IP_NULL;
449
450 kr = ipc_right_lookup_write(space, name, &entry);
451 if (kr != KERN_SUCCESS) {
452 return kr;
453 }
454
455 /* space is write-locked and active */
456
457 prev_request = entry->ie_request;
458
459 /* if nothing to do or undo, we're done */
460 if (notify == IP_NULL && prev_request == IE_REQ_NONE) {
461 is_write_unlock(space);
462 *previousp = IP_NULL;
463 return KERN_SUCCESS;
464 }
465
466 /* see if the entry is of proper type for requests */
467 if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) {
468 ipc_port_request_index_t new_request;
469
470 port = ip_object_to_port(entry->ie_object);
471 assert(port != IP_NULL);
472
473 if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
474 /* port is locked and active */
475
476 /* if no new request, just cancel previous */
477 if (notify == IP_NULL) {
478 if (prev_request != IE_REQ_NONE) {
479 previous = ipc_port_request_cancel(port, name, prev_request);
480 entry->ie_request = IE_REQ_NONE;
481 }
482 ip_mq_unlock(port);
483 ipc_entry_modified(space, name, entry);
484 is_write_unlock(space);
485 break;
486 }
487
488 /*
489 * send-once rights, kernel objects, and non-full other queues
490 * fire immediately (if immediate specified).
491 */
492 if (options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK) &&
493 ((entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE) ||
494 ip_in_space(port, ipc_space_kernel) || !ip_full(port))) {
495 if (prev_request != IE_REQ_NONE) {
496 previous = ipc_port_request_cancel(port, name, prev_request);
497 entry->ie_request = IE_REQ_NONE;
498 }
499 ip_mq_unlock(port);
500 ipc_entry_modified(space, name, entry);
501 is_write_unlock(space);
502
503 ipc_notify_send_possible(notify, name);
504 break;
505 }
506
507 /*
508 * If there is a previous request, free it. Any subsequent
509 * allocation cannot fail, thus assuring an atomic swap.
510 */
511 if (prev_request != IE_REQ_NONE) {
512 previous = ipc_port_request_cancel(port, name, prev_request);
513 }
514
515 #if IMPORTANCE_INHERITANCE
516 will_arm = port->ip_sprequests == 0 &&
517 options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK);
518 #endif /* IMPORTANCE_INHERITANCE */
519 kr = ipc_port_request_alloc(port, name, notify,
520 options, &new_request);
521
522 if (kr != KERN_SUCCESS) {
523 assert(previous == IP_NULL);
524 is_write_unlock(space);
525
526 kr = ipc_port_request_grow(port);
527 /* port is unlocked */
528
529 if (kr != KERN_SUCCESS) {
530 return kr;
531 }
532
533 continue;
534 }
535
536 assert(new_request != IE_REQ_NONE);
537 entry->ie_request = new_request;
538 ipc_entry_modified(space, name, entry);
539 is_write_unlock(space);
540
541 #if IMPORTANCE_INHERITANCE
542 if (will_arm &&
543 port->ip_impdonation != 0 &&
544 port->ip_spimportant == 0 &&
545 task_is_importance_donor(current_task())) {
546 if (ipc_port_importance_delta(port, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) {
547 ip_mq_unlock(port);
548 }
549 } else
550 #endif /* IMPORTANCE_INHERITANCE */
551 ip_mq_unlock(port);
552
553 break;
554 }
555 /* entry may have changed to dead-name by ipc_right_check() */
556 }
557
558 /* treat send_possible requests as immediate w.r.t. dead-name */
559 if (options && notify != IP_NULL &&
560 (entry->ie_bits & MACH_PORT_TYPE_DEAD_NAME)) {
561 mach_port_urefs_t urefs = IE_BITS_UREFS(entry->ie_bits);
562
563 assert(urefs > 0);
564
565 /* leave urefs pegged to maximum if it overflowed */
566 if (urefs < MACH_PORT_UREFS_MAX) {
567 (entry->ie_bits)++; /* increment urefs */
568 }
569 ipc_entry_modified(space, name, entry);
570
571 is_write_unlock(space);
572
573 if (port != IP_NULL) {
574 ip_release(port);
575 }
576
577 ipc_notify_dead_name(notify, name);
578 previous = IP_NULL;
579 break;
580 }
581
582 kr = (entry->ie_bits & MACH_PORT_TYPE_PORT_OR_DEAD) ?
583 KERN_INVALID_ARGUMENT : KERN_INVALID_RIGHT;
584
585 is_write_unlock(space);
586
587 if (port != IP_NULL) {
588 ip_release(port);
589 }
590
591 return kr;
592 }
593
594 *previousp = previous;
595 return KERN_SUCCESS;
596 }
597
598 /*
599 * Routine: ipc_right_request_cancel
600 * Purpose:
601 * Cancel a notification request and return the send-once right.
602 * Afterwards, entry->ie_request == 0.
603 * Conditions:
604 * The space must be write-locked; the port must be locked.
605 * The port and space must be active.
606 */
607
608 ipc_port_t
ipc_right_request_cancel(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry)609 ipc_right_request_cancel(
610 ipc_space_t space,
611 ipc_port_t port,
612 mach_port_name_t name,
613 ipc_entry_t entry)
614 {
615 ipc_port_t previous;
616
617 require_ip_active(port);
618 assert(is_active(space));
619 assert(port == ip_object_to_port(entry->ie_object));
620
621 if (entry->ie_request == IE_REQ_NONE) {
622 return IP_NULL;
623 }
624
625 previous = ipc_port_request_cancel(port, name, entry->ie_request);
626 entry->ie_request = IE_REQ_NONE;
627 ipc_entry_modified(space, name, entry);
628 return previous;
629 }
630
631 /*
632 * Routine: ipc_right_inuse
633 * Purpose:
634 * Check if an entry is being used.
635 * Returns TRUE if it is.
636 * Conditions:
637 * The space is write-locked and active.
638 */
639
640 bool
ipc_right_inuse(ipc_entry_t entry)641 ipc_right_inuse(
642 ipc_entry_t entry)
643 {
644 return IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_NONE;
645 }
646
647 /*
648 * Routine: ipc_right_check
649 * Purpose:
650 * Check if the port has died. If it has,
651 * and IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE is not
652 * passed and it is not a send once right then
653 * clean up the entry and return TRUE.
654 * Conditions:
655 * The space is write-locked; the port is not locked.
656 * If returns FALSE, the port is also locked.
657 * Otherwise, entry is converted to a dead name.
658 *
659 * Caller is responsible for a reference to port if it
660 * had died (returns TRUE).
661 */
662
663 boolean_t
ipc_right_check(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry,ipc_object_copyin_flags_t flags)664 ipc_right_check(
665 ipc_space_t space,
666 ipc_port_t port,
667 mach_port_name_t name,
668 ipc_entry_t entry,
669 ipc_object_copyin_flags_t flags)
670 {
671 ipc_entry_bits_t bits;
672
673 assert(is_active(space));
674 assert(port == ip_object_to_port(entry->ie_object));
675
676 ip_mq_lock(port);
677 if (ip_active(port) ||
678 ((flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE) &&
679 entry->ie_request == IE_REQ_NONE &&
680 (entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE))) {
681 return FALSE;
682 }
683
684 /* this was either a pure send right or a send-once right */
685
686 bits = entry->ie_bits;
687 assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
688 assert(IE_BITS_UREFS(bits) > 0);
689
690 if (bits & MACH_PORT_TYPE_SEND) {
691 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
692 assert(IE_BITS_UREFS(bits) > 0);
693 assert(port->ip_srights > 0);
694 port->ip_srights--;
695 } else {
696 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
697 assert(IE_BITS_UREFS(bits) == 1);
698 assert(port->ip_sorights > 0);
699 port->ip_sorights--;
700 }
701
702 /*
703 * delete SEND rights from ipc hash.
704 */
705
706 if ((bits & MACH_PORT_TYPE_SEND) != 0) {
707 ipc_hash_delete(space, ip_to_object(port), name, entry);
708 }
709
710 /* convert entry to dead name */
711 bits = (bits & ~IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME;
712
713 /*
714 * If there was a notification request outstanding on this
715 * name, and the port went dead, that notification
716 * must already be on its way up from the port layer.
717 *
718 * Add the reference that the notification carries. It
719 * is done here, and not in the notification delivery,
720 * because the latter doesn't have a space reference and
721 * trying to actually move a send-right reference would
722 * get short-circuited into a MACH_PORT_DEAD by IPC. Since
723 * all calls that deal with the right eventually come
724 * through here, it has the same result.
725 *
726 * Once done, clear the request index so we only account
727 * for it once.
728 */
729 if (entry->ie_request != IE_REQ_NONE) {
730 if (ipc_port_request_type(port, name, entry->ie_request) != 0) {
731 /* if urefs are pegged due to overflow, leave them pegged */
732 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
733 bits++; /* increment urefs */
734 }
735 }
736 entry->ie_request = IE_REQ_NONE;
737 }
738 entry->ie_bits = bits;
739 entry->ie_object = IO_NULL;
740
741 ip_mq_unlock(port);
742
743 ipc_entry_modified(space, name, entry);
744
745 return TRUE;
746 }
747
748 /*
749 * Routine: ipc_right_terminate
750 * Purpose:
751 * Cleans up an entry in a terminated space.
752 * The entry isn't deallocated or removed
753 * from reverse hash tables.
754 * Conditions:
755 * The space is dead and unlocked.
756 */
757
758 void
ipc_right_terminate(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)759 ipc_right_terminate(
760 ipc_space_t space,
761 mach_port_name_t name,
762 ipc_entry_t entry)
763 {
764 mach_port_type_t type;
765 ipc_object_t object;
766
767 assert(!is_active(space));
768
769 type = IE_BITS_TYPE(entry->ie_bits);
770 object = entry->ie_object;
771
772 /*
773 * Hollow the entry under the port lock,
774 * in order to avoid dangling pointers.
775 *
776 * ipc_right_lookup_read() doesn't need it for correctness,
777 * but ipc_space_terminate() as it now goes through 2 rounds
778 * of termination (receive rights first, the rest second).
779 */
780
781 if (type != MACH_PORT_TYPE_DEAD_NAME) {
782 assert(object != IO_NULL);
783 io_lock(object);
784 }
785 entry->ie_object = IO_NULL;
786 entry->ie_bits &= (IE_BITS_GEN_MASK | IE_BITS_ROLL_MASK);
787
788 switch (type) {
789 case MACH_PORT_TYPE_DEAD_NAME:
790 assert(entry->ie_request == IE_REQ_NONE);
791 assert(object == IO_NULL);
792 break;
793
794 case MACH_PORT_TYPE_PORT_SET: {
795 ipc_pset_t pset = ips_object_to_pset(object);
796
797 assert(entry->ie_request == IE_REQ_NONE);
798 assert(ips_active(pset));
799
800 ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
801 break;
802 }
803
804 case MACH_PORT_TYPE_SEND:
805 case MACH_PORT_TYPE_RECEIVE:
806 case MACH_PORT_TYPE_SEND_RECEIVE:
807 case MACH_PORT_TYPE_SEND_ONCE: {
808 ipc_port_t port = ip_object_to_port(object);
809 ipc_port_t request = IP_NULL;
810 ipc_notify_nsenders_t nsrequest = { };
811
812 if (!ip_active(port)) {
813 ip_mq_unlock(port);
814 ip_release(port);
815 break;
816 }
817
818 /*
819 * same as ipc_right_request_cancel(),
820 * except for calling ipc_entry_modified()
821 * as the space is now table-less.
822 */
823 if (entry->ie_request != IE_REQ_NONE) {
824 request = ipc_port_request_cancel(port, name,
825 entry->ie_request);
826 entry->ie_request = IE_REQ_NONE;
827 }
828
829 if (type & MACH_PORT_TYPE_SEND) {
830 assert(port->ip_srights > 0);
831 if (--port->ip_srights == 0) {
832 nsrequest = ipc_notify_no_senders_prepare(port);
833 }
834 }
835
836 if (type & MACH_PORT_TYPE_RECEIVE) {
837 assert(ip_get_receiver_name(port) == name);
838 assert(ip_in_space(port, space));
839
840 ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
841 } else if (type & MACH_PORT_TYPE_SEND_ONCE) {
842 assert(port->ip_sorights > 0);
843 port->ip_reply_context = 0;
844
845 ipc_notify_send_once_and_unlock(port); /* consumes our ref */
846 } else {
847 /* port could be dead, in-transit, or in a foreign space */
848 assert(!ip_in_space(port, space));
849
850 ip_mq_unlock(port);
851 ip_release(port);
852 }
853
854 /*
855 * For both no-senders and port-deleted notifications,
856 * look at whether the destination is still active.
857 * If it isn't, just swallow the send-once right.
858 *
859 * This is a racy check, but this ok because we can only
860 * fail to notice that the port is now inactive, which
861 * only causes us to fail at an optimizaiton.
862 *
863 * The purpose here is to avoid sending messages
864 * to receive rights that used to be in this space,
865 * which we can't fail to observe.
866 */
867 if (nsrequest.ns_notify != IP_NULL) {
868 if (ip_active(nsrequest.ns_notify)) {
869 ipc_notify_no_senders_emit(nsrequest);
870 } else {
871 ipc_notify_no_senders_consume(nsrequest);
872 }
873 }
874
875 if (request != IP_NULL) {
876 if (ip_active(request)) {
877 ipc_notify_port_deleted(request, name);
878 } else {
879 ipc_port_release_sonce(request);
880 }
881 }
882 break;
883 }
884
885 default:
886 panic("ipc_right_terminate: strange type - 0x%x", type);
887 }
888 }
889
890 /*
891 * Routine: ipc_right_destroy
892 * Purpose:
893 * Destroys an entry in a space.
894 * Conditions:
895 * The space is write-locked (returns unlocked).
896 * The space must be active.
897 * Returns:
898 * KERN_SUCCESS The entry was destroyed.
899 * KERN_INVALID_CAPABILITY The port is pinned.
900 * KERN_INVALID_RIGHT Port guard violation.
901 */
902
903 kern_return_t
ipc_right_destroy(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,boolean_t check_guard,uint64_t guard)904 ipc_right_destroy(
905 ipc_space_t space,
906 mach_port_name_t name,
907 ipc_entry_t entry,
908 boolean_t check_guard,
909 uint64_t guard)
910 {
911 ipc_entry_bits_t bits;
912 mach_port_type_t type;
913
914 bits = entry->ie_bits;
915 entry->ie_bits &= ~IE_BITS_TYPE_MASK;
916 type = IE_BITS_TYPE(bits);
917
918 assert(is_active(space));
919
920 switch (type) {
921 case MACH_PORT_TYPE_DEAD_NAME:
922 assert(entry->ie_request == IE_REQ_NONE);
923 assert(entry->ie_object == IO_NULL);
924
925 ipc_entry_dealloc(space, IO_NULL, name, entry);
926 is_write_unlock(space);
927 break;
928
929 case MACH_PORT_TYPE_PORT_SET: {
930 ipc_pset_t pset = ips_object_to_pset(entry->ie_object);
931
932 assert(entry->ie_request == IE_REQ_NONE);
933 assert(pset != IPS_NULL);
934
935 ips_mq_lock(pset);
936 assert(ips_active(pset));
937
938 ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
939
940 is_write_unlock(space);
941
942 ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
943 break;
944 }
945
946 case MACH_PORT_TYPE_SEND:
947 case MACH_PORT_TYPE_RECEIVE:
948 case MACH_PORT_TYPE_SEND_RECEIVE:
949 case MACH_PORT_TYPE_SEND_ONCE: {
950 ipc_port_t port = ip_object_to_port(entry->ie_object);
951 ipc_notify_nsenders_t nsrequest = { };
952 ipc_port_t request;
953
954 assert(port != IP_NULL);
955
956 if (type == MACH_PORT_TYPE_SEND) {
957 if (ip_is_pinned(port)) {
958 assert(ip_active(port));
959 is_write_unlock(space);
960 mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DESTROY);
961 return KERN_INVALID_CAPABILITY;
962 }
963 ipc_hash_delete(space, ip_to_object(port), name, entry);
964 }
965
966 ip_mq_lock(port);
967
968 if (!ip_active(port)) {
969 assert((type & MACH_PORT_TYPE_RECEIVE) == 0);
970 entry->ie_request = IE_REQ_NONE;
971 assert(!ip_is_pinned(port));
972 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
973 ip_mq_unlock(port);
974 is_write_unlock(space);
975 ip_release(port);
976 break;
977 }
978
979 /* For receive rights, check for guarding */
980 if ((type & MACH_PORT_TYPE_RECEIVE) &&
981 (check_guard) && (port->ip_guarded) &&
982 (guard != port->ip_context)) {
983 /* Guard Violation */
984 uint64_t portguard = port->ip_context;
985 ip_mq_unlock(port);
986 is_write_unlock(space);
987 /* Raise mach port guard exception */
988 mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
989 return KERN_INVALID_RIGHT;
990 }
991
992
993 request = ipc_right_request_cancel_macro(space, port,
994 name, entry);
995 assert(!ip_is_pinned(port));
996 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
997
998 is_write_unlock(space);
999
1000 if (type & MACH_PORT_TYPE_SEND) {
1001 assert(port->ip_srights > 0);
1002 if (--port->ip_srights == 0) {
1003 nsrequest = ipc_notify_no_senders_prepare(port);
1004 }
1005 }
1006
1007 if (type & MACH_PORT_TYPE_RECEIVE) {
1008 require_ip_active(port);
1009 assert(ip_in_space(port, space));
1010
1011 ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
1012 } else if (type & MACH_PORT_TYPE_SEND_ONCE) {
1013 assert(port->ip_sorights > 0);
1014 port->ip_reply_context = 0;
1015 ipc_notify_send_once_and_unlock(port); /* consumes our ref */
1016 } else {
1017 assert(!ip_in_space(port, space));
1018
1019 ip_mq_unlock(port);
1020 ip_release(port);
1021 }
1022
1023 ipc_notify_no_senders_emit(nsrequest);
1024
1025 if (request != IP_NULL) {
1026 ipc_notify_port_deleted(request, name);
1027 }
1028
1029
1030 break;
1031 }
1032
1033 default:
1034 panic("ipc_right_destroy: strange type");
1035 }
1036
1037 return KERN_SUCCESS;
1038 }
1039
1040 /*
1041 * Routine: ipc_right_dealloc
1042 * Purpose:
1043 * Releases a send/send-once/dead-name/port_set user ref.
1044 * Like ipc_right_delta with a delta of -1,
1045 * but looks at the entry to determine the right.
1046 * Conditions:
1047 * The space is write-locked, and is unlocked upon return.
1048 * The space must be active.
1049 * Returns:
1050 * KERN_SUCCESS A user ref was released.
1051 * KERN_INVALID_RIGHT Entry has wrong type.
1052 * KERN_INVALID_CAPABILITY Deallocating a pinned right.
1053 */
1054
1055 kern_return_t
ipc_right_dealloc(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)1056 ipc_right_dealloc(
1057 ipc_space_t space,
1058 mach_port_name_t name,
1059 ipc_entry_t entry)
1060 {
1061 ipc_port_t port = IP_NULL;
1062 ipc_entry_bits_t bits;
1063 mach_port_type_t type;
1064
1065 bits = entry->ie_bits;
1066 type = IE_BITS_TYPE(bits);
1067
1068
1069 assert(is_active(space));
1070
1071 switch (type) {
1072 case MACH_PORT_TYPE_PORT_SET: {
1073 ipc_pset_t pset;
1074
1075 assert(IE_BITS_UREFS(bits) == 0);
1076 assert(entry->ie_request == IE_REQ_NONE);
1077
1078 pset = ips_object_to_pset(entry->ie_object);
1079 ips_mq_lock(pset);
1080 assert(ips_active(pset));
1081
1082 ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1083
1084 is_write_unlock(space);
1085
1086 ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1087 break;
1088 }
1089
1090 case MACH_PORT_TYPE_DEAD_NAME: {
1091 dead_name:
1092
1093 assert(IE_BITS_UREFS(bits) > 0);
1094 assert(entry->ie_request == IE_REQ_NONE);
1095 assert(entry->ie_object == IO_NULL);
1096
1097 if (IE_BITS_UREFS(bits) == 1) {
1098 ipc_entry_dealloc(space, IO_NULL, name, entry);
1099 } else {
1100 /* if urefs are pegged due to overflow, leave them pegged */
1101 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1102 entry->ie_bits = bits - 1; /* decrement urefs */
1103 }
1104 ipc_entry_modified(space, name, entry);
1105 }
1106 is_write_unlock(space);
1107
1108 /* release any port that got converted to dead name below */
1109 if (port != IP_NULL) {
1110 ip_release(port);
1111 }
1112 break;
1113 }
1114
1115 case MACH_PORT_TYPE_SEND_ONCE: {
1116 ipc_port_t request;
1117
1118 assert(IE_BITS_UREFS(bits) == 1);
1119
1120 port = ip_object_to_port(entry->ie_object);
1121 assert(port != IP_NULL);
1122
1123 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1124 bits = entry->ie_bits;
1125 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1126 goto dead_name; /* it will release port */
1127 }
1128 /* port is locked and active */
1129
1130 assert(port->ip_sorights > 0);
1131
1132 /*
1133 * clear any reply context:
1134 * no one will be sending the response b/c we are destroying
1135 * the single, outstanding send once right.
1136 */
1137 port->ip_reply_context = 0;
1138
1139 request = ipc_right_request_cancel_macro(space, port,
1140 name, entry);
1141 assert(!ip_is_pinned(port));
1142 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1143
1144 is_write_unlock(space);
1145
1146 ipc_notify_send_once_and_unlock(port);
1147
1148 if (request != IP_NULL) {
1149 ipc_notify_port_deleted(request, name);
1150 }
1151 break;
1152 }
1153
1154 case MACH_PORT_TYPE_SEND: {
1155 ipc_port_t request = IP_NULL;
1156 ipc_notify_nsenders_t nsrequest = { };
1157
1158 assert(IE_BITS_UREFS(bits) > 0);
1159
1160 port = ip_object_to_port(entry->ie_object);
1161 assert(port != IP_NULL);
1162
1163 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1164 bits = entry->ie_bits;
1165 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1166 goto dead_name; /* it will release port */
1167 }
1168 /* port is locked and active */
1169
1170 assert(port->ip_srights > 0);
1171
1172 if (IE_BITS_UREFS(bits) == 1) {
1173 if (ip_is_pinned(port)) {
1174 ip_mq_unlock(port);
1175 is_write_unlock(space);
1176 mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1177 return KERN_INVALID_CAPABILITY;
1178 }
1179 if (--port->ip_srights == 0) {
1180 nsrequest = ipc_notify_no_senders_prepare(port);
1181 }
1182
1183 request = ipc_right_request_cancel_macro(space, port,
1184 name, entry);
1185 ipc_hash_delete(space, ip_to_object(port), name, entry);
1186 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1187 ip_mq_unlock(port);
1188 is_write_unlock(space);
1189
1190 ip_release(port);
1191 } else {
1192 /* if urefs are pegged due to overflow, leave them pegged */
1193 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1194 entry->ie_bits = bits - 1; /* decrement urefs */
1195 }
1196 ip_mq_unlock(port);
1197 ipc_entry_modified(space, name, entry);
1198 is_write_unlock(space);
1199 }
1200
1201 ipc_notify_no_senders_emit(nsrequest);
1202
1203 if (request != IP_NULL) {
1204 ipc_notify_port_deleted(request, name);
1205 }
1206 break;
1207 }
1208
1209 case MACH_PORT_TYPE_SEND_RECEIVE: {
1210 ipc_notify_nsenders_t nsrequest = { };
1211
1212 assert(IE_BITS_UREFS(bits) > 0);
1213
1214 port = ip_object_to_port(entry->ie_object);
1215 assert(port != IP_NULL);
1216
1217 ip_mq_lock(port);
1218 require_ip_active(port);
1219 assert(ip_get_receiver_name(port) == name);
1220 assert(ip_in_space(port, space));
1221 assert(port->ip_srights > 0);
1222
1223 if (IE_BITS_UREFS(bits) == 1) {
1224 if (--port->ip_srights == 0) {
1225 nsrequest = ipc_notify_no_senders_prepare(port);
1226 }
1227
1228 entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1229 MACH_PORT_TYPE_SEND);
1230 } else {
1231 /* if urefs are pegged due to overflow, leave them pegged */
1232 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1233 entry->ie_bits = bits - 1; /* decrement urefs */
1234 }
1235 }
1236 ip_mq_unlock(port);
1237
1238 ipc_entry_modified(space, name, entry);
1239 is_write_unlock(space);
1240
1241 ipc_notify_no_senders_emit(nsrequest);
1242 break;
1243 }
1244
1245 default:
1246 is_write_unlock(space);
1247 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1248 return KERN_INVALID_RIGHT;
1249 }
1250
1251 return KERN_SUCCESS;
1252 }
1253
1254 /*
1255 * Routine: ipc_right_delta
1256 * Purpose:
1257 * Modifies the user-reference count for a right.
1258 * May deallocate the right, if the count goes to zero.
1259 * Conditions:
1260 * The space is write-locked, and is unlocked upon return.
1261 * The space must be active.
1262 * Returns:
1263 * KERN_SUCCESS Count was modified.
1264 * KERN_INVALID_RIGHT Entry has wrong type.
1265 * KERN_INVALID_VALUE Bad delta for the right.
1266 * KERN_INVALID_CAPABILITY Deallocating a pinned right.
1267 */
1268
1269 kern_return_t
ipc_right_delta(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_right_t right,mach_port_delta_t delta)1270 ipc_right_delta(
1271 ipc_space_t space,
1272 mach_port_name_t name,
1273 ipc_entry_t entry,
1274 mach_port_right_t right,
1275 mach_port_delta_t delta)
1276 {
1277 ipc_port_t port = IP_NULL;
1278 ipc_entry_bits_t bits;
1279
1280 bits = entry->ie_bits;
1281
1282 /*
1283 * The following is used (for case MACH_PORT_RIGHT_DEAD_NAME) in the
1284 * switch below. It is used to keep track of those cases (in DIPC)
1285 * where we have postponed the dropping of a port reference. Since
1286 * the dropping of the reference could cause the port to disappear
1287 * we postpone doing so when we are holding the space lock.
1288 */
1289
1290 assert(is_active(space));
1291 assert(right < MACH_PORT_RIGHT_NUMBER);
1292
1293 /* Rights-specific restrictions and operations. */
1294
1295 switch (right) {
1296 case MACH_PORT_RIGHT_PORT_SET: {
1297 ipc_pset_t pset;
1298
1299 if ((bits & MACH_PORT_TYPE_PORT_SET) == 0) {
1300 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1301 goto invalid_right;
1302 }
1303
1304 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_PORT_SET);
1305 assert(IE_BITS_UREFS(bits) == 0);
1306 assert(entry->ie_request == IE_REQ_NONE);
1307
1308 if (delta == 0) {
1309 goto success;
1310 }
1311
1312 if (delta != -1) {
1313 goto invalid_value;
1314 }
1315
1316 pset = ips_object_to_pset(entry->ie_object);
1317 ips_mq_lock(pset);
1318 assert(ips_active(pset));
1319
1320 ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1321
1322 is_write_unlock(space);
1323
1324 ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1325 break;
1326 }
1327
1328 case MACH_PORT_RIGHT_RECEIVE: {
1329 ipc_port_t request = IP_NULL;
1330
1331 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1332 if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1333 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1334 }
1335 goto invalid_right;
1336 }
1337
1338 if (delta == 0) {
1339 goto success;
1340 }
1341
1342 if (delta != -1) {
1343 goto invalid_value;
1344 }
1345
1346 port = ip_object_to_port(entry->ie_object);
1347 assert(port != IP_NULL);
1348
1349 /*
1350 * The port lock is needed for ipc_right_dncancel;
1351 * otherwise, we wouldn't have to take the lock
1352 * until just before dropping the space lock.
1353 */
1354
1355 ip_mq_lock(port);
1356 require_ip_active(port);
1357 assert(ip_get_receiver_name(port) == name);
1358 assert(ip_in_space(port, space));
1359
1360 /* Mach Port Guard Checking */
1361 if (port->ip_guarded) {
1362 uint64_t portguard = port->ip_context;
1363 ip_mq_unlock(port);
1364 is_write_unlock(space);
1365 /* Raise mach port guard exception */
1366 mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_MOD_REFS);
1367 goto guard_failure;
1368 }
1369
1370 if (bits & MACH_PORT_TYPE_SEND) {
1371 assert(IE_BITS_TYPE(bits) ==
1372 MACH_PORT_TYPE_SEND_RECEIVE);
1373 assert(IE_BITS_UREFS(bits) > 0);
1374 assert(port->ip_srights > 0);
1375
1376 if (ipc_port_has_prdrequest(port)) {
1377 /*
1378 * Since another task has requested a
1379 * destroy notification for this port, it
1380 * isn't actually being destroyed - the receive
1381 * right is just being moved to another task.
1382 * Since we still have one or more send rights,
1383 * we need to record the loss of the receive
1384 * right and enter the remaining send right
1385 * into the hash table.
1386 */
1387 bits &= ~MACH_PORT_TYPE_RECEIVE;
1388 bits |= MACH_PORT_TYPE_EX_RECEIVE;
1389 ipc_hash_insert(space, ip_to_object(port),
1390 name, entry);
1391 ip_reference(port);
1392 } else {
1393 /*
1394 * The remaining send right turns into a
1395 * dead name. Notice we don't decrement
1396 * ip_srights, generate a no-senders notif,
1397 * or use ipc_right_dncancel, because the
1398 * port is destroyed "first".
1399 */
1400 bits &= ~IE_BITS_TYPE_MASK;
1401 bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1402 if (entry->ie_request) {
1403 entry->ie_request = IE_REQ_NONE;
1404 /* if urefs are pegged due to overflow, leave them pegged */
1405 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1406 bits++; /* increment urefs */
1407 }
1408 }
1409 entry->ie_object = IO_NULL;
1410 }
1411 entry->ie_bits = bits;
1412 ipc_entry_modified(space, name, entry);
1413 } else {
1414 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1415 assert(IE_BITS_UREFS(bits) == 0);
1416
1417 request = ipc_right_request_cancel_macro(space, port,
1418 name, entry);
1419 assert(!ip_is_pinned(port));
1420 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1421 }
1422 is_write_unlock(space);
1423
1424 ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1425
1426 if (request != IP_NULL) {
1427 ipc_notify_port_deleted(request, name);
1428 }
1429 break;
1430 }
1431
1432 case MACH_PORT_RIGHT_SEND_ONCE: {
1433 ipc_port_t request;
1434
1435 if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
1436 goto invalid_right;
1437 }
1438
1439 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
1440 assert(IE_BITS_UREFS(bits) == 1);
1441
1442 port = ip_object_to_port(entry->ie_object);
1443 assert(port != IP_NULL);
1444
1445 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1446 assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE));
1447 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1448 goto invalid_right;
1449 }
1450 /* port is locked and active */
1451
1452 assert(port->ip_sorights > 0);
1453
1454 if ((delta > 0) || (delta < -1)) {
1455 ip_mq_unlock(port);
1456 goto invalid_value;
1457 }
1458
1459 if (delta == 0) {
1460 ip_mq_unlock(port);
1461 goto success;
1462 }
1463
1464 /*
1465 * clear any reply context:
1466 * no one will be sending the response b/c we are destroying
1467 * the single, outstanding send once right.
1468 */
1469 port->ip_reply_context = 0;
1470
1471 request = ipc_right_request_cancel_macro(space, port, name, entry);
1472 assert(!ip_is_pinned(port));
1473 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1474
1475 is_write_unlock(space);
1476
1477 ipc_notify_send_once_and_unlock(port);
1478
1479 if (request != IP_NULL) {
1480 ipc_notify_port_deleted(request, name);
1481 }
1482 break;
1483 }
1484
1485 case MACH_PORT_RIGHT_DEAD_NAME: {
1486 ipc_port_t relport = IP_NULL;
1487 mach_port_urefs_t urefs;
1488
1489 if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1490 port = ip_object_to_port(entry->ie_object);
1491 assert(port != IP_NULL);
1492
1493 if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1494 /* port is locked and active */
1495 ip_mq_unlock(port);
1496 port = IP_NULL;
1497 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1498 goto invalid_right;
1499 }
1500 bits = entry->ie_bits;
1501 relport = port;
1502 port = IP_NULL;
1503 } else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0) {
1504 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1505 goto invalid_right;
1506 }
1507
1508 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1509 assert(IE_BITS_UREFS(bits) > 0);
1510 assert(entry->ie_object == IO_NULL);
1511 assert(entry->ie_request == IE_REQ_NONE);
1512
1513 if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1514 delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1515 goto invalid_value;
1516 }
1517
1518 urefs = IE_BITS_UREFS(bits);
1519
1520 if (urefs == MACH_PORT_UREFS_MAX) {
1521 /*
1522 * urefs are pegged due to an overflow
1523 * only a delta removing all refs at once can change it
1524 */
1525
1526 if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1527 delta = 0;
1528 }
1529 } else {
1530 if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1531 goto invalid_value;
1532 }
1533 if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1534 /* leave urefs pegged to maximum if it overflowed */
1535 delta = MACH_PORT_UREFS_MAX - urefs;
1536 }
1537 }
1538
1539 if ((urefs + delta) == 0) {
1540 ipc_entry_dealloc(space, IO_NULL, name, entry);
1541 } else if (delta != 0) {
1542 entry->ie_bits = bits + delta;
1543 ipc_entry_modified(space, name, entry);
1544 }
1545
1546 is_write_unlock(space);
1547
1548 if (relport != IP_NULL) {
1549 ip_release(relport);
1550 }
1551
1552 break;
1553 }
1554
1555 case MACH_PORT_RIGHT_SEND: {
1556 mach_port_urefs_t urefs;
1557 ipc_port_t request = IP_NULL;
1558 ipc_notify_nsenders_t nsrequest = { };
1559 ipc_port_t port_to_release = IP_NULL;
1560
1561 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
1562 /* invalid right exception only when not live/dead confusion */
1563 if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0
1564 #if !defined(AE_MAKESENDRIGHT_FIXED)
1565 /*
1566 * AE tries to add single send right without knowing if it already owns one.
1567 * But if it doesn't, it should own the receive right and delta should be 1.
1568 */
1569 && (((bits & MACH_PORT_TYPE_RECEIVE) == 0) || (delta != 1))
1570 #endif
1571 ) {
1572 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1573 }
1574 goto invalid_right;
1575 }
1576
1577 /* maximum urefs for send is MACH_PORT_UREFS_MAX */
1578
1579 port = ip_object_to_port(entry->ie_object);
1580 assert(port != IP_NULL);
1581
1582 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1583 assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0);
1584 goto invalid_right;
1585 }
1586 /* port is locked and active */
1587
1588 assert(port->ip_srights > 0);
1589
1590 if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1591 delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1592 ip_mq_unlock(port);
1593 goto invalid_value;
1594 }
1595
1596 urefs = IE_BITS_UREFS(bits);
1597
1598 if (urefs == MACH_PORT_UREFS_MAX) {
1599 /*
1600 * urefs are pegged due to an overflow
1601 * only a delta removing all refs at once can change it
1602 */
1603
1604 if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1605 delta = 0;
1606 }
1607 } else {
1608 if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1609 ip_mq_unlock(port);
1610 goto invalid_value;
1611 }
1612 if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1613 /* leave urefs pegged to maximum if it overflowed */
1614 delta = MACH_PORT_UREFS_MAX - urefs;
1615 }
1616 }
1617
1618 if ((urefs + delta) == 0) {
1619 if (ip_is_pinned(port)) {
1620 ip_mq_unlock(port);
1621 is_write_unlock(space);
1622 mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1623 return KERN_INVALID_CAPABILITY;
1624 }
1625
1626 if (--port->ip_srights == 0) {
1627 nsrequest = ipc_notify_no_senders_prepare(port);
1628 }
1629
1630 if (bits & MACH_PORT_TYPE_RECEIVE) {
1631 assert(ip_get_receiver_name(port) == name);
1632 assert(ip_in_space(port, space));
1633 assert(IE_BITS_TYPE(bits) ==
1634 MACH_PORT_TYPE_SEND_RECEIVE);
1635
1636 entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1637 MACH_PORT_TYPE_SEND);
1638 ipc_entry_modified(space, name, entry);
1639 } else {
1640 assert(IE_BITS_TYPE(bits) ==
1641 MACH_PORT_TYPE_SEND);
1642
1643 request = ipc_right_request_cancel_macro(space, port,
1644 name, entry);
1645 ipc_hash_delete(space, ip_to_object(port),
1646 name, entry);
1647 assert(!ip_is_pinned(port));
1648 ipc_entry_dealloc(space, ip_to_object(port),
1649 name, entry);
1650 port_to_release = port;
1651 }
1652 } else if (delta != 0) {
1653 entry->ie_bits = bits + delta;
1654 ipc_entry_modified(space, name, entry);
1655 }
1656
1657 ip_mq_unlock(port);
1658
1659 is_write_unlock(space);
1660
1661 if (port_to_release != IP_NULL) {
1662 ip_release(port_to_release);
1663 }
1664
1665 ipc_notify_no_senders_emit(nsrequest);
1666
1667 if (request != IP_NULL) {
1668 ipc_notify_port_deleted(request, name);
1669 }
1670 break;
1671 }
1672
1673 case MACH_PORT_RIGHT_LABELH:
1674 goto invalid_right;
1675
1676 default:
1677 panic("ipc_right_delta: strange right %d for 0x%x (%p) in space:%p",
1678 right, name, (void *)entry, (void *)space);
1679 }
1680
1681 return KERN_SUCCESS;
1682
1683 success:
1684 is_write_unlock(space);
1685 return KERN_SUCCESS;
1686
1687 invalid_right:
1688 is_write_unlock(space);
1689 if (port != IP_NULL) {
1690 ip_release(port);
1691 }
1692 return KERN_INVALID_RIGHT;
1693
1694 invalid_value:
1695 is_write_unlock(space);
1696 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1697 return KERN_INVALID_VALUE;
1698
1699 guard_failure:
1700 return KERN_INVALID_RIGHT;
1701 }
1702
1703 /*
1704 * Routine: ipc_right_destruct
1705 * Purpose:
1706 * Deallocates the receive right and modifies the
1707 * user-reference count for the send rights as requested.
1708 * Conditions:
1709 * The space is write-locked, and is unlocked upon return.
1710 * The space must be active.
1711 * Returns:
1712 * KERN_SUCCESS Count was modified.
1713 * KERN_INVALID_RIGHT Entry has wrong type.
1714 * KERN_INVALID_VALUE Bad delta for the right.
1715 */
1716
1717 kern_return_t
ipc_right_destruct(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_delta_t srdelta,uint64_t guard)1718 ipc_right_destruct(
1719 ipc_space_t space,
1720 mach_port_name_t name,
1721 ipc_entry_t entry,
1722 mach_port_delta_t srdelta,
1723 uint64_t guard)
1724 {
1725 ipc_port_t port = IP_NULL;
1726 ipc_entry_bits_t bits;
1727
1728 mach_port_urefs_t urefs;
1729 ipc_port_t request = IP_NULL;
1730 ipc_notify_nsenders_t nsrequest = { };
1731
1732 bits = entry->ie_bits;
1733
1734 assert(is_active(space));
1735
1736 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1737 is_write_unlock(space);
1738
1739 /* No exception if we used to have receive and held entry since */
1740 if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1741 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1742 }
1743 return KERN_INVALID_RIGHT;
1744 }
1745
1746 if (srdelta && (bits & MACH_PORT_TYPE_SEND) == 0) {
1747 is_write_unlock(space);
1748 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1749 return KERN_INVALID_RIGHT;
1750 }
1751
1752 if (srdelta > 0) {
1753 goto invalid_value;
1754 }
1755
1756 port = ip_object_to_port(entry->ie_object);
1757 assert(port != IP_NULL);
1758
1759 ip_mq_lock(port);
1760 require_ip_active(port);
1761 assert(ip_get_receiver_name(port) == name);
1762 assert(ip_in_space(port, space));
1763
1764 /* Mach Port Guard Checking */
1765 if (port->ip_guarded && (guard != port->ip_context)) {
1766 uint64_t portguard = port->ip_context;
1767 ip_mq_unlock(port);
1768 is_write_unlock(space);
1769 mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
1770 return KERN_INVALID_ARGUMENT;
1771 }
1772
1773 /*
1774 * First reduce the send rights as requested and
1775 * adjust the entry->ie_bits accordingly. The
1776 * ipc_entry_modified() call is made once the receive
1777 * right is destroyed too.
1778 */
1779
1780 if (srdelta) {
1781 assert(port->ip_srights > 0);
1782
1783 urefs = IE_BITS_UREFS(bits);
1784
1785 /*
1786 * Since we made sure that srdelta is negative,
1787 * the check for urefs overflow is not required.
1788 */
1789 if (MACH_PORT_UREFS_UNDERFLOW(urefs, srdelta)) {
1790 ip_mq_unlock(port);
1791 goto invalid_value;
1792 }
1793
1794 if (urefs == MACH_PORT_UREFS_MAX) {
1795 /*
1796 * urefs are pegged due to an overflow
1797 * only a delta removing all refs at once can change it
1798 */
1799 if (srdelta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1800 srdelta = 0;
1801 }
1802 }
1803
1804 if ((urefs + srdelta) == 0) {
1805 if (--port->ip_srights == 0) {
1806 nsrequest = ipc_notify_no_senders_prepare(port);
1807 }
1808 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_RECEIVE);
1809 entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1810 MACH_PORT_TYPE_SEND);
1811 } else {
1812 entry->ie_bits = bits + srdelta;
1813 }
1814 }
1815
1816 /*
1817 * Now destroy the receive right. Update space and
1818 * entry accordingly.
1819 */
1820
1821 bits = entry->ie_bits;
1822 if (bits & MACH_PORT_TYPE_SEND) {
1823 assert(IE_BITS_UREFS(bits) > 0);
1824 assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX);
1825
1826 if (ipc_port_has_prdrequest(port)) {
1827 /*
1828 * Since another task has requested a
1829 * destroy notification for this port, it
1830 * isn't actually being destroyed - the receive
1831 * right is just being moved to another task.
1832 * Since we still have one or more send rights,
1833 * we need to record the loss of the receive
1834 * right and enter the remaining send right
1835 * into the hash table.
1836 */
1837 bits &= ~MACH_PORT_TYPE_RECEIVE;
1838 bits |= MACH_PORT_TYPE_EX_RECEIVE;
1839 ipc_hash_insert(space, ip_to_object(port),
1840 name, entry);
1841 ip_reference(port);
1842 } else {
1843 /*
1844 * The remaining send right turns into a
1845 * dead name. Notice we don't decrement
1846 * ip_srights, generate a no-senders notif,
1847 * or use ipc_right_dncancel, because the
1848 * port is destroyed "first".
1849 */
1850 bits &= ~IE_BITS_TYPE_MASK;
1851 bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1852 if (entry->ie_request) {
1853 entry->ie_request = IE_REQ_NONE;
1854 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1855 bits++; /* increment urefs */
1856 }
1857 }
1858 entry->ie_object = IO_NULL;
1859 }
1860 entry->ie_bits = bits;
1861 ipc_entry_modified(space, name, entry);
1862 } else {
1863 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1864 assert(IE_BITS_UREFS(bits) == 0);
1865 request = ipc_right_request_cancel_macro(space, port,
1866 name, entry);
1867 assert(!ip_is_pinned(port));
1868 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1869 }
1870
1871 /* Unlock space */
1872 is_write_unlock(space);
1873
1874 ipc_notify_no_senders_emit(nsrequest);
1875
1876 ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1877
1878 if (request != IP_NULL) {
1879 ipc_notify_port_deleted(request, name);
1880 }
1881
1882 return KERN_SUCCESS;
1883
1884 invalid_value:
1885 is_write_unlock(space);
1886 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1887 return KERN_INVALID_VALUE;
1888 }
1889
1890
1891 /*
1892 * Routine: ipc_right_info
1893 * Purpose:
1894 * Retrieves information about the right.
1895 * Conditions:
1896 * The space is active and write-locked.
1897 * The space is unlocked upon return.
1898 * Returns:
1899 * KERN_SUCCESS Retrieved info
1900 */
1901
1902 kern_return_t
ipc_right_info(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_type_t * typep,mach_port_urefs_t * urefsp)1903 ipc_right_info(
1904 ipc_space_t space,
1905 mach_port_name_t name,
1906 ipc_entry_t entry,
1907 mach_port_type_t *typep,
1908 mach_port_urefs_t *urefsp)
1909 {
1910 ipc_port_t port;
1911 ipc_entry_bits_t bits;
1912 mach_port_type_t type = 0;
1913 ipc_port_request_index_t request;
1914
1915 bits = entry->ie_bits;
1916 request = entry->ie_request;
1917 port = ip_object_to_port(entry->ie_object);
1918
1919 if (bits & MACH_PORT_TYPE_RECEIVE) {
1920 assert(IP_VALID(port));
1921
1922 if (request != IE_REQ_NONE) {
1923 ip_mq_lock(port);
1924 require_ip_active(port);
1925 type |= ipc_port_request_type(port, name, request);
1926 ip_mq_unlock(port);
1927 }
1928 is_write_unlock(space);
1929 } else if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1930 /*
1931 * validate port is still alive - if so, get request
1932 * types while we still have it locked. Otherwise,
1933 * recapture the (now dead) bits.
1934 */
1935 if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1936 if (request != IE_REQ_NONE) {
1937 type |= ipc_port_request_type(port, name, request);
1938 }
1939 ip_mq_unlock(port);
1940 is_write_unlock(space);
1941 } else {
1942 bits = entry->ie_bits;
1943 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1944 is_write_unlock(space);
1945 ip_release(port);
1946 }
1947 } else {
1948 is_write_unlock(space);
1949 }
1950
1951 type |= IE_BITS_TYPE(bits);
1952
1953 *typep = type;
1954 *urefsp = IE_BITS_UREFS(bits);
1955 return KERN_SUCCESS;
1956 }
1957
1958 /*
1959 * Routine: ipc_right_copyin_check_reply
1960 * Purpose:
1961 * Check if a subsequent ipc_right_copyin would succeed. Used only
1962 * by ipc_kmsg_copyin_header to check if reply_port can be copied in.
1963 * If the reply port is an immovable send right, it errors out.
1964 * Conditions:
1965 * The space is locked (read or write) and active.
1966 */
1967
1968 boolean_t
ipc_right_copyin_check_reply(__assert_only ipc_space_t space,mach_port_name_t reply_name,ipc_entry_t reply_entry,mach_msg_type_name_t reply_type,ipc_entry_t dest_entry,boolean_t * reply_port_semantics_violation)1969 ipc_right_copyin_check_reply(
1970 __assert_only ipc_space_t space,
1971 mach_port_name_t reply_name,
1972 ipc_entry_t reply_entry,
1973 mach_msg_type_name_t reply_type,
1974 ipc_entry_t dest_entry,
1975 boolean_t *reply_port_semantics_violation)
1976 {
1977 ipc_entry_bits_t bits;
1978 ipc_port_t reply_port;
1979 ipc_port_t dest_port;
1980
1981 bits = reply_entry->ie_bits;
1982 assert(is_active(space));
1983
1984 switch (reply_type) {
1985 case MACH_MSG_TYPE_MAKE_SEND:
1986 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1987 return FALSE;
1988 }
1989 break;
1990
1991 case MACH_MSG_TYPE_MAKE_SEND_ONCE:
1992 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1993 return FALSE;
1994 }
1995 break;
1996
1997 case MACH_MSG_TYPE_MOVE_RECEIVE:
1998 /* ipc_kmsg_copyin_header already filters it out */
1999 return FALSE;
2000
2001 case MACH_MSG_TYPE_COPY_SEND:
2002 case MACH_MSG_TYPE_MOVE_SEND:
2003 case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2004 if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2005 break;
2006 }
2007
2008 if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2009 return FALSE;
2010 }
2011
2012 reply_port = ip_object_to_port(reply_entry->ie_object);
2013 assert(reply_port != IP_NULL);
2014
2015 /*
2016 * active status peek to avoid checks that will be skipped
2017 * on copyin for dead ports. Lock not held, so will not be
2018 * atomic (but once dead, there's no going back).
2019 */
2020 if (!ip_active(reply_port)) {
2021 break;
2022 }
2023
2024 /*
2025 * Can't copyin a send right that is marked immovable. This bit
2026 * is set only during port creation and never unset. So it can
2027 * be read without a lock.
2028 */
2029 if (ip_is_immovable_send(reply_port)) {
2030 mach_port_guard_exception_immovable(space, reply_name, reply_port, MPG_FLAGS_NONE);
2031 return FALSE;
2032 }
2033
2034 if (reply_type == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2035 if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2036 return FALSE;
2037 }
2038 } else {
2039 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2040 return FALSE;
2041 }
2042 }
2043
2044 break;
2045 }
2046
2047 default:
2048 panic("ipc_right_copyin_check: strange rights");
2049 }
2050
2051 if ((IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_PORT_SET) ||
2052 (IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_PORT_SET)) {
2053 return TRUE;
2054 }
2055
2056 /* The only disp allowed when a reply port is a local port of mach msg is MAKE_SO. */
2057 reply_port = ip_object_to_port(reply_entry->ie_object);
2058 assert(reply_port != IP_NULL);
2059
2060 if (ip_active(reply_port)) {
2061 if (ip_is_reply_port(reply_port) && (reply_type != MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2062 return FALSE;
2063 }
2064
2065 /* When sending a msg to remote port that requires reply port semantics enforced the local port of that msg needs to be a reply port. */
2066 dest_port = ip_object_to_port(dest_entry->ie_object);
2067 if (IP_VALID(dest_port) && ip_active(dest_port) && ip_require_reply_port_semantics(dest_port)
2068 && !ip_is_reply_port(reply_port) && !ip_is_provisional_reply_port(reply_port)) {
2069 *reply_port_semantics_violation = TRUE;
2070
2071 if (reply_port_semantics) {
2072 mach_port_guard_exception(reply_name, 0, 0, kGUARD_EXC_REQUIRE_REPLY_PORT_SEMANTICS);
2073 return FALSE;
2074 }
2075 }
2076 }
2077
2078 return TRUE;
2079 }
2080
2081 /*
2082 * Routine: ipc_right_copyin_check_guard_locked
2083 * Purpose:
2084 * Check if the port is guarded and the guard
2085 * value matches the one passed in the arguments.
2086 * If MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND is set,
2087 * check if the port is unguarded.
2088 * Conditions:
2089 * The port is locked.
2090 * Returns:
2091 * KERN_SUCCESS Port is either unguarded
2092 * or guarded with expected value
2093 * KERN_INVALID_ARGUMENT Port is either unguarded already or guard mismatch.
2094 * This also raises a EXC_GUARD exception.
2095 */
2096 static kern_return_t
ipc_right_copyin_check_guard_locked(mach_port_name_t name,ipc_port_t port,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2097 ipc_right_copyin_check_guard_locked(
2098 mach_port_name_t name,
2099 ipc_port_t port,
2100 mach_port_context_t context,
2101 mach_msg_guard_flags_t *guard_flags)
2102 {
2103 mach_msg_guard_flags_t flags = *guard_flags;
2104 if ((flags & MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND) && !port->ip_guarded && !context) {
2105 return KERN_SUCCESS;
2106 } else if (port->ip_guarded && (port->ip_context == context)) {
2107 return KERN_SUCCESS;
2108 }
2109
2110 /* Incorrect guard; Raise exception */
2111 mach_port_guard_exception(name, context, port->ip_context, kGUARD_EXC_INCORRECT_GUARD);
2112 return KERN_INVALID_ARGUMENT;
2113 }
2114
2115 /*
2116 * Routine: ipc_right_copyin
2117 * Purpose:
2118 * Copyin a capability from a space.
2119 * If successful, the caller gets a ref
2120 * for the resulting object, unless it is IO_DEAD,
2121 * and possibly a send-once right which should
2122 * be used in a port-deleted notification.
2123 *
2124 * If deadok is not TRUE, the copyin operation
2125 * will fail instead of producing IO_DEAD.
2126 *
2127 * The entry is deallocated if the entry type becomes
2128 * MACH_PORT_TYPE_NONE.
2129 * Conditions:
2130 * The space is write-locked and active.
2131 * Returns:
2132 * KERN_SUCCESS Acquired an object, possibly IO_DEAD.
2133 * KERN_INVALID_RIGHT Name doesn't denote correct right.
2134 * KERN_INVALID_CAPABILITY Trying to move an kobject port or an immovable right,
2135 * or moving the last ref of pinned right
2136 * KERN_INVALID_ARGUMENT Port is unguarded or guard mismatch
2137 */
2138
2139 kern_return_t
ipc_right_copyin(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyin_flags_t flags,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep,int * assertcntp,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2140 ipc_right_copyin(
2141 ipc_space_t space,
2142 mach_port_name_t name,
2143 ipc_entry_t entry,
2144 mach_msg_type_name_t msgt_name,
2145 ipc_object_copyin_flags_t flags,
2146 ipc_object_t *objectp,
2147 ipc_port_t *sorightp,
2148 ipc_port_t *releasep,
2149 int *assertcntp,
2150 mach_port_context_t context,
2151 mach_msg_guard_flags_t *guard_flags)
2152 {
2153 ipc_entry_bits_t bits;
2154 ipc_port_t port;
2155 kern_return_t kr;
2156 boolean_t deadok = !!(flags & IPC_OBJECT_COPYIN_FLAGS_DEADOK);
2157 boolean_t allow_imm_send = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
2158 boolean_t allow_reply_make_so = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_REPLY_MAKE_SEND_ONCE);
2159 boolean_t allow_reply_move_so = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_REPLY_MOVE_SEND_ONCE);
2160
2161 *releasep = IP_NULL;
2162 *assertcntp = 0;
2163
2164 bits = entry->ie_bits;
2165
2166 assert(is_active(space));
2167
2168 switch (msgt_name) {
2169 case MACH_MSG_TYPE_MAKE_SEND: {
2170 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2171 goto invalid_right;
2172 }
2173
2174 port = ip_object_to_port(entry->ie_object);
2175 assert(port != IP_NULL);
2176
2177 if (ip_is_reply_port(port)) {
2178 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2179 return KERN_INVALID_CAPABILITY;
2180 }
2181
2182 ip_mq_lock(port);
2183 assert(ip_get_receiver_name(port) == name);
2184 assert(ip_in_space(port, space));
2185
2186 ipc_port_make_send_any_locked(port);
2187 ip_mq_unlock(port);
2188
2189 *objectp = ip_to_object(port);
2190 *sorightp = IP_NULL;
2191 break;
2192 }
2193
2194 case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
2195 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2196 goto invalid_right;
2197 }
2198
2199 port = ip_object_to_port(entry->ie_object);
2200 assert(port != IP_NULL);
2201
2202 if ((ip_is_reply_port(port)) && !allow_reply_make_so) {
2203 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2204 return KERN_INVALID_CAPABILITY;
2205 }
2206
2207 ip_mq_lock(port);
2208 require_ip_active(port);
2209 assert(ip_get_receiver_name(port) == name);
2210 assert(ip_in_space(port, space));
2211
2212 ipc_port_make_sonce_locked(port);
2213 ip_mq_unlock(port);
2214
2215 *objectp = ip_to_object(port);
2216 *sorightp = IP_NULL;
2217 break;
2218 }
2219
2220 case MACH_MSG_TYPE_MOVE_RECEIVE: {
2221 bool allow_imm_recv = false;
2222 ipc_port_t request = IP_NULL;
2223 waitq_link_list_t free_l = { };
2224
2225 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2226 goto invalid_right;
2227 }
2228
2229 port = ip_object_to_port(entry->ie_object);
2230 assert(port != IP_NULL);
2231
2232 ip_mq_lock(port);
2233 require_ip_active(port);
2234 assert(ip_get_receiver_name(port) == name);
2235 assert(ip_in_space(port, space));
2236
2237 /*
2238 * Disallow moving receive-right kobjects/kolabel, e.g. mk_timer ports
2239 * The ipc_port structure uses the kdata union of kobject and
2240 * imp_task exclusively. Thus, general use of a kobject port as
2241 * a receive right can cause type confusion in the importance
2242 * code.
2243 */
2244 if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
2245 /*
2246 * Distinguish an invalid right, e.g., trying to move
2247 * a send right as a receive right, from this
2248 * situation which is, "This is a valid receive right,
2249 * but it's also a kobject and you can't move it."
2250 */
2251 ip_mq_unlock(port);
2252 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2253 return KERN_INVALID_CAPABILITY;
2254 }
2255
2256 if (port->ip_service_port && port->ip_splabel &&
2257 !ipc_service_port_label_is_bootstrap_port((ipc_service_port_label_t)port->ip_splabel)) {
2258 allow_imm_recv = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_RECEIVE);
2259 } else if (ip_is_libxpc_connection_port(port)) {
2260 allow_imm_recv = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_CONN_IMMOVABLE_RECEIVE);
2261 }
2262
2263 if ((!allow_imm_recv && port->ip_immovable_receive) || port->ip_specialreply) {
2264 assert(!ip_in_space(port, ipc_space_kernel));
2265 ip_mq_unlock(port);
2266 assert(current_task() != kernel_task);
2267 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2268 return KERN_INVALID_CAPABILITY;
2269 }
2270
2271 if (guard_flags != NULL) {
2272 kr = ipc_right_copyin_check_guard_locked(name, port, context, guard_flags);
2273 if (kr != KERN_SUCCESS) {
2274 ip_mq_unlock(port);
2275 return kr;
2276 }
2277 }
2278
2279 if (bits & MACH_PORT_TYPE_SEND) {
2280 assert(IE_BITS_TYPE(bits) ==
2281 MACH_PORT_TYPE_SEND_RECEIVE);
2282 assert(IE_BITS_UREFS(bits) > 0);
2283 assert(port->ip_srights > 0);
2284
2285 bits &= ~MACH_PORT_TYPE_RECEIVE;
2286 bits |= MACH_PORT_TYPE_EX_RECEIVE;
2287 entry->ie_bits = bits;
2288 ipc_hash_insert(space, ip_to_object(port),
2289 name, entry);
2290 ip_reference(port);
2291 ipc_entry_modified(space, name, entry);
2292 } else {
2293 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2294 assert(IE_BITS_UREFS(bits) == 0);
2295
2296 request = ipc_right_request_cancel_macro(space, port,
2297 name, entry);
2298 assert(!ip_is_pinned(port));
2299 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2300 }
2301
2302 /* ipc_port_clear_receiver unguards the port and clears the ip_immovable_receive bit */
2303 (void)ipc_port_clear_receiver(port, FALSE, &free_l); /* don't destroy the port/mqueue */
2304 if (guard_flags != NULL) {
2305 /* this flag will be cleared during copyout */
2306 *guard_flags = *guard_flags | MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
2307 }
2308
2309 #if IMPORTANCE_INHERITANCE
2310 /*
2311 * Account for boosts the current task is going to lose when
2312 * copying this right in. Tempowner ports have either not
2313 * been accounting to any task (and therefore are already in
2314 * "limbo" state w.r.t. assertions) or to some other specific
2315 * task. As we have no way to drop the latter task's assertions
2316 * here, We'll deduct those when we enqueue it on its
2317 * destination port (see ipc_port_check_circularity()).
2318 */
2319 if (port->ip_tempowner == 0) {
2320 assert(IIT_NULL == ip_get_imp_task(port));
2321
2322 /* ports in limbo have to be tempowner */
2323 port->ip_tempowner = 1;
2324 *assertcntp = port->ip_impcount;
2325 }
2326 #endif /* IMPORTANCE_INHERITANCE */
2327
2328 ip_mq_unlock(port);
2329
2330 /*
2331 * This is unfortunate to do this while the space is locked,
2332 * but plumbing it through all callers really hurts.
2333 */
2334 waitq_link_free_list(WQT_PORT_SET, &free_l);
2335
2336 *objectp = ip_to_object(port);
2337 *sorightp = request;
2338 break;
2339 }
2340
2341 case MACH_MSG_TYPE_COPY_SEND: {
2342 if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2343 goto copy_dead;
2344 }
2345
2346 /* allow for dead send-once rights */
2347
2348 if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2349 goto invalid_right;
2350 }
2351
2352 assert(IE_BITS_UREFS(bits) > 0);
2353
2354 port = ip_object_to_port(entry->ie_object);
2355 assert(port != IP_NULL);
2356
2357 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2358 bits = entry->ie_bits;
2359 *releasep = port;
2360 goto copy_dead;
2361 }
2362 /* port is locked and active */
2363
2364 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2365 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2366 assert(port->ip_sorights > 0);
2367
2368 ip_mq_unlock(port);
2369 goto invalid_right;
2370 }
2371
2372 if (ip_is_reply_port(port)) {
2373 ip_mq_unlock(port);
2374 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2375 return KERN_INVALID_CAPABILITY;
2376 }
2377
2378 if (!allow_imm_send && ip_is_immovable_send(port)) {
2379 ip_mq_unlock(port);
2380 mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2381 return KERN_INVALID_CAPABILITY;
2382 }
2383
2384 ipc_port_copy_send_any_locked(port);
2385 ip_mq_unlock(port);
2386
2387 *objectp = ip_to_object(port);
2388 *sorightp = IP_NULL;
2389 break;
2390 }
2391
2392 case MACH_MSG_TYPE_MOVE_SEND: {
2393 ipc_port_t request = IP_NULL;
2394
2395 if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2396 goto move_dead;
2397 }
2398
2399 /* allow for dead send-once rights */
2400
2401 if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2402 goto invalid_right;
2403 }
2404
2405 assert(IE_BITS_UREFS(bits) > 0);
2406
2407 port = ip_object_to_port(entry->ie_object);
2408 assert(port != IP_NULL);
2409
2410 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2411 bits = entry->ie_bits;
2412 *releasep = port;
2413 goto move_dead;
2414 }
2415 /* port is locked and active */
2416
2417 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2418 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2419 assert(port->ip_sorights > 0);
2420 ip_mq_unlock(port);
2421 goto invalid_right;
2422 }
2423
2424 if (ip_is_reply_port(port)) {
2425 ip_mq_unlock(port);
2426 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2427 return KERN_INVALID_CAPABILITY;
2428 }
2429
2430 if (!allow_imm_send && ip_is_immovable_send(port)) {
2431 ip_mq_unlock(port);
2432 mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2433 return KERN_INVALID_CAPABILITY;
2434 }
2435
2436 if (IE_BITS_UREFS(bits) == 1) {
2437 assert(port->ip_srights > 0);
2438 if (bits & MACH_PORT_TYPE_RECEIVE) {
2439 assert(ip_get_receiver_name(port) == name);
2440 assert(ip_in_space(port, space));
2441 assert(IE_BITS_TYPE(bits) ==
2442 MACH_PORT_TYPE_SEND_RECEIVE);
2443 assert(!ip_is_pinned(port));
2444
2445 entry->ie_bits = bits & ~
2446 (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2447 ipc_entry_modified(space, name, entry);
2448 ip_reference(port);
2449 } else {
2450 assert(IE_BITS_TYPE(bits) ==
2451 MACH_PORT_TYPE_SEND);
2452
2453 if (ip_is_pinned(port)) {
2454 ip_mq_unlock(port);
2455 mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_COPYIN);
2456 return KERN_INVALID_CAPABILITY;
2457 }
2458
2459 request = ipc_right_request_cancel_macro(space, port,
2460 name, entry);
2461 ipc_hash_delete(space, ip_to_object(port),
2462 name, entry);
2463 ipc_entry_dealloc(space, ip_to_object(port),
2464 name, entry);
2465 /* transfer entry's reference to caller */
2466 }
2467 } else {
2468 ipc_port_copy_send_any_locked(port);
2469 /* if urefs are pegged due to overflow, leave them pegged */
2470 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2471 entry->ie_bits = bits - 1; /* decrement urefs */
2472 }
2473 ipc_entry_modified(space, name, entry);
2474 }
2475
2476 ip_mq_unlock(port);
2477 *objectp = ip_to_object(port);
2478 *sorightp = request;
2479 break;
2480 }
2481
2482 case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2483 ipc_port_t request;
2484
2485 if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2486 goto move_dead;
2487 }
2488
2489 /* allow for dead send rights */
2490
2491 if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2492 goto invalid_right;
2493 }
2494
2495 assert(IE_BITS_UREFS(bits) > 0);
2496
2497 port = ip_object_to_port(entry->ie_object);
2498 assert(port != IP_NULL);
2499
2500 if (ipc_right_check(space, port, name, entry, flags)) {
2501 bits = entry->ie_bits;
2502 *releasep = port;
2503 goto move_dead;
2504 }
2505 /*
2506 * port is locked, but may not be active:
2507 * Allow copyin of inactive ports with no dead name request and treat it
2508 * as if the copyin of the port was successful and port became inactive
2509 * later.
2510 */
2511
2512 if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2513 assert(bits & MACH_PORT_TYPE_SEND);
2514 assert(port->ip_srights > 0);
2515
2516 ip_mq_unlock(port);
2517 goto invalid_right;
2518 }
2519
2520 if (ip_is_reply_port(port) && !allow_reply_move_so) {
2521 ip_mq_unlock(port);
2522 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2523 return KERN_INVALID_CAPABILITY;
2524 }
2525
2526 if (!allow_imm_send && ip_is_immovable_send(port)) {
2527 ip_mq_unlock(port);
2528 mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2529 return KERN_INVALID_CAPABILITY;
2530 }
2531
2532 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2533 assert(IE_BITS_UREFS(bits) == 1);
2534 assert(port->ip_sorights > 0);
2535
2536 request = ipc_right_request_cancel_macro(space, port, name, entry);
2537 assert(!ip_is_pinned(port));
2538 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2539 ip_mq_unlock(port);
2540
2541 *objectp = ip_to_object(port);
2542 *sorightp = request;
2543 break;
2544 }
2545
2546 default:
2547 invalid_right:
2548 return KERN_INVALID_RIGHT;
2549 }
2550
2551 return KERN_SUCCESS;
2552
2553 copy_dead:
2554 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2555 assert(IE_BITS_UREFS(bits) > 0);
2556 assert(entry->ie_request == IE_REQ_NONE);
2557 assert(entry->ie_object == 0);
2558
2559 if (!deadok) {
2560 goto invalid_right;
2561 }
2562
2563 *objectp = IO_DEAD;
2564 *sorightp = IP_NULL;
2565 return KERN_SUCCESS;
2566
2567 move_dead:
2568 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2569 assert(IE_BITS_UREFS(bits) > 0);
2570 assert(entry->ie_request == IE_REQ_NONE);
2571 assert(entry->ie_object == IO_NULL);
2572
2573 if (!deadok) {
2574 goto invalid_right;
2575 }
2576
2577 if (IE_BITS_UREFS(bits) == 1) {
2578 ipc_entry_dealloc(space, IO_NULL, name, entry);
2579 } else {
2580 /* if urefs are pegged due to overflow, leave them pegged */
2581 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2582 entry->ie_bits = bits - 1; /* decrement urefs */
2583 }
2584 ipc_entry_modified(space, name, entry);
2585 }
2586 *objectp = IO_DEAD;
2587 *sorightp = IP_NULL;
2588 return KERN_SUCCESS;
2589 }
2590
2591 /*
2592 * Routine: ipc_right_copyin_two_move_sends
2593 * Purpose:
2594 * Like ipc_right_copyin with MACH_MSG_TYPE_MOVE_SEND
2595 * and deadok == FALSE, except that this moves two
2596 * send rights at once.
2597 * Conditions:
2598 * The space is write-locked and active.
2599 * The object is returned with two refs/send rights.
2600 * Returns:
2601 * KERN_SUCCESS Acquired an object.
2602 * KERN_INVALID_RIGHT Name doesn't denote correct right.
2603 * KERN_INVALID_CAPABILITY Name does not allow copyin move send capability.
2604 */
2605 static
2606 kern_return_t
ipc_right_copyin_two_move_sends(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2607 ipc_right_copyin_two_move_sends(
2608 ipc_space_t space,
2609 mach_port_name_t name,
2610 ipc_entry_t entry,
2611 ipc_object_t *objectp,
2612 ipc_port_t *sorightp,
2613 ipc_port_t *releasep)
2614 {
2615 ipc_entry_bits_t bits;
2616 mach_port_urefs_t urefs;
2617 ipc_port_t port;
2618 ipc_port_t request = IP_NULL;
2619
2620 *releasep = IP_NULL;
2621
2622 assert(is_active(space));
2623
2624 bits = entry->ie_bits;
2625
2626 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2627 goto invalid_right;
2628 }
2629
2630 urefs = IE_BITS_UREFS(bits);
2631 if (urefs < 2) {
2632 goto invalid_right;
2633 }
2634
2635 port = ip_object_to_port(entry->ie_object);
2636 assert(port != IP_NULL);
2637
2638 if (ip_is_reply_port(port)) {
2639 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2640 return KERN_INVALID_CAPABILITY;
2641 }
2642
2643 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2644 *releasep = port;
2645 goto invalid_right;
2646 }
2647 /* port is locked and active */
2648
2649 /*
2650 * To reach here we either have:
2651 * (1) reply_name == voucher_name, but voucher is not immovable send right.
2652 * (2) reply_name == dest_name, but ipc_right_copyin_check_reply() guaranteed
2653 * that we can't use MOVE_SEND on reply port marked as immovable send right.
2654 */
2655 assert(!ip_is_immovable_send(port));
2656 assert(!ip_is_pinned(port));
2657
2658 if (urefs > 2) {
2659 /*
2660 * We are moving 2 urefs as naked send rights, which is decomposed as:
2661 * - two copy sends (which doesn't affect the make send count)
2662 * - decrementing the local urefs twice.
2663 */
2664 ipc_port_copy_send_any_locked(port);
2665 ipc_port_copy_send_any_locked(port);
2666 /* if urefs are pegged due to overflow, leave them pegged */
2667 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2668 entry->ie_bits = bits - 2; /* decrement urefs */
2669 }
2670 ipc_entry_modified(space, name, entry);
2671 } else {
2672 /*
2673 * We have exactly 2 send rights for this port in this space,
2674 * which means that we will liberate the naked send right held
2675 * by this entry.
2676 *
2677 * However refcounting rules around entries are that naked send rights
2678 * on behalf of spaces do not have an associated port reference,
2679 * so we need to donate one ...
2680 */
2681 ipc_port_copy_send_any_locked(port);
2682
2683 if (bits & MACH_PORT_TYPE_RECEIVE) {
2684 assert(ip_get_receiver_name(port) == name);
2685 assert(ip_in_space(port, space));
2686 assert(IE_BITS_TYPE(bits) ==
2687 MACH_PORT_TYPE_SEND_RECEIVE);
2688
2689 /* ... that we inject manually when the entry stays alive */
2690 entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2691 ipc_entry_modified(space, name, entry);
2692 ip_reference(port);
2693 } else {
2694 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
2695
2696 /* ... that we steal from the entry when it dies */
2697 request = ipc_right_request_cancel_macro(space, port,
2698 name, entry);
2699 ipc_hash_delete(space, ip_to_object(port),
2700 name, entry);
2701 ipc_entry_dealloc(space, ip_to_object(port),
2702 name, entry);
2703 }
2704 }
2705
2706 ip_mq_unlock(port);
2707
2708 *objectp = ip_to_object(port);
2709 *sorightp = request;
2710 return KERN_SUCCESS;
2711
2712 invalid_right:
2713 return KERN_INVALID_RIGHT;
2714 }
2715
2716
2717 /*
2718 * Routine: ipc_right_copyin_two
2719 * Purpose:
2720 * Like ipc_right_copyin with two dispositions,
2721 * each of which results in a send or send-once right,
2722 * and deadok = FALSE.
2723 * Conditions:
2724 * The space is write-locked and active.
2725 * The object is returned with two refs/rights.
2726 * Msgt_one refers to the dest_type.
2727 * Copyin flags are currently only used in the context of send once rights.
2728 * Returns:
2729 * KERN_SUCCESS Acquired an object.
2730 * KERN_INVALID_RIGHT Name doesn't denote correct right(s).
2731 * KERN_INVALID_CAPABILITY Name doesn't denote correct right for msgt_two.
2732 */
2733 kern_return_t
ipc_right_copyin_two(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_one,mach_msg_type_name_t msgt_two,ipc_object_copyin_flags_t flags_one,ipc_object_copyin_flags_t flags_two,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2734 ipc_right_copyin_two(
2735 ipc_space_t space,
2736 mach_port_name_t name,
2737 ipc_entry_t entry,
2738 mach_msg_type_name_t msgt_one,
2739 mach_msg_type_name_t msgt_two,
2740 ipc_object_copyin_flags_t flags_one, /* Used only for send once rights. */
2741 ipc_object_copyin_flags_t flags_two, /* Used only for send once rights. */
2742 ipc_object_t *objectp,
2743 ipc_port_t *sorightp,
2744 ipc_port_t *releasep)
2745 {
2746 kern_return_t kr;
2747 int assertcnt = 0;
2748
2749 assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_one));
2750 assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_two));
2751
2752 /*
2753 * This is a little tedious to make atomic, because
2754 * there are 25 combinations of valid dispositions.
2755 * However, most are easy.
2756 */
2757
2758 /*
2759 * If either is move-sonce, then there must be an error.
2760 */
2761 if (msgt_one == MACH_MSG_TYPE_MOVE_SEND_ONCE ||
2762 msgt_two == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2763 return KERN_INVALID_RIGHT;
2764 }
2765
2766 if ((msgt_one == MACH_MSG_TYPE_MAKE_SEND) ||
2767 (msgt_one == MACH_MSG_TYPE_MAKE_SEND_ONCE) ||
2768 (msgt_two == MACH_MSG_TYPE_MAKE_SEND) ||
2769 (msgt_two == MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2770 /*
2771 * One of the dispositions needs a receive right.
2772 *
2773 * If the copyin below succeeds, we know the receive
2774 * right is there (because the pre-validation of
2775 * the second disposition already succeeded in our
2776 * caller).
2777 *
2778 * Hence the port is not in danger of dying.
2779 */
2780 ipc_object_t object_two;
2781
2782 flags_one = flags_one | IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND;
2783 kr = ipc_right_copyin(space, name, entry,
2784 msgt_one, flags_one,
2785 objectp, sorightp, releasep,
2786 &assertcnt, 0, NULL);
2787 assert(assertcnt == 0);
2788 if (kr != KERN_SUCCESS) {
2789 return kr;
2790 }
2791
2792 assert(IO_VALID(*objectp));
2793 assert(*sorightp == IP_NULL);
2794 assert(*releasep == IP_NULL);
2795
2796 /*
2797 * Now copyin the second (previously validated)
2798 * disposition. The result can't be a dead port,
2799 * as no valid disposition can make us lose our
2800 * receive right.
2801 */
2802 kr = ipc_right_copyin(space, name, entry,
2803 msgt_two, flags_two,
2804 &object_two, sorightp, releasep,
2805 &assertcnt, 0, NULL);
2806 assert(assertcnt == 0);
2807 assert(kr == KERN_SUCCESS);
2808 assert(*sorightp == IP_NULL);
2809 assert(*releasep == IP_NULL);
2810 assert(object_two == *objectp);
2811 assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
2812 } else if ((msgt_one == MACH_MSG_TYPE_MOVE_SEND) &&
2813 (msgt_two == MACH_MSG_TYPE_MOVE_SEND)) {
2814 /*
2815 * This is an easy case. Just use our
2816 * handy-dandy special-purpose copyin call
2817 * to get two send rights for the price of one.
2818 */
2819 kr = ipc_right_copyin_two_move_sends(space, name, entry,
2820 objectp, sorightp,
2821 releasep);
2822 if (kr != KERN_SUCCESS) {
2823 return kr;
2824 }
2825 } else {
2826 mach_msg_type_name_t msgt_name;
2827
2828 /*
2829 * Must be either a single move-send and a
2830 * copy-send, or two copy-send dispositions.
2831 * Use the disposition with the greatest side
2832 * effects for the actual copyin - then just
2833 * duplicate the send right you get back.
2834 */
2835 if (msgt_one == MACH_MSG_TYPE_MOVE_SEND ||
2836 msgt_two == MACH_MSG_TYPE_MOVE_SEND) {
2837 msgt_name = MACH_MSG_TYPE_MOVE_SEND;
2838 } else {
2839 msgt_name = MACH_MSG_TYPE_COPY_SEND;
2840 }
2841
2842 kr = ipc_right_copyin(space, name, entry,
2843 msgt_name, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND,
2844 objectp, sorightp, releasep,
2845 &assertcnt, 0, NULL);
2846 assert(assertcnt == 0);
2847 if (kr != KERN_SUCCESS) {
2848 return kr;
2849 }
2850
2851 /*
2852 * Copy the right we got back. If it is dead now,
2853 * that's OK. Neither right will be usable to send
2854 * a message anyway.
2855 *
2856 * Note that the port could be concurrently moved
2857 * outside of the space as a descriptor, and then
2858 * destroyed, which would not happen under the space lock.
2859 *
2860 * It means we can't use ipc_port_copy_send() which
2861 * may fail if the port died.
2862 */
2863 io_lock(*objectp);
2864 ipc_port_copy_send_any_locked(ip_object_to_port(*objectp));
2865 io_unlock(*objectp);
2866 }
2867
2868 return KERN_SUCCESS;
2869 }
2870
2871
2872 /*
2873 * Routine: ipc_right_copyout
2874 * Purpose:
2875 * Copyout a capability to a space.
2876 * If successful, consumes a ref for the object.
2877 *
2878 * Always succeeds when given a newly-allocated entry,
2879 * because user-reference overflow isn't a possibility.
2880 *
2881 * If copying out the object would cause the user-reference
2882 * count in the entry to overflow, then the user-reference
2883 * count is left pegged to its maximum value and the copyout
2884 * succeeds anyway.
2885 * Conditions:
2886 * The space is write-locked and active.
2887 * The object is locked and active.
2888 * The object is unlocked; the space isn't.
2889 * Returns:
2890 * KERN_SUCCESS Copied out capability.
2891 */
2892
2893 kern_return_t
ipc_right_copyout(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_port_context_t * context,mach_msg_guard_flags_t * guard_flags,ipc_object_t object)2894 ipc_right_copyout(
2895 ipc_space_t space,
2896 mach_port_name_t name,
2897 ipc_entry_t entry,
2898 mach_msg_type_name_t msgt_name,
2899 ipc_object_copyout_flags_t flags,
2900 mach_port_context_t *context,
2901 mach_msg_guard_flags_t *guard_flags,
2902 ipc_object_t object)
2903 {
2904 ipc_entry_bits_t bits;
2905 ipc_port_t port;
2906 mach_port_name_t sp_name = MACH_PORT_NULL;
2907 mach_port_context_t sp_context = 0;
2908
2909 bits = entry->ie_bits;
2910
2911 assert(IO_VALID(object));
2912 assert(io_otype(object) == IOT_PORT);
2913 assert(io_active(object));
2914 assert(entry->ie_object == object);
2915
2916 port = ip_object_to_port(object);
2917
2918 if (flags & IPC_OBJECT_COPYOUT_FLAGS_PINNED) {
2919 assert(!ip_is_pinned(port));
2920 assert(ip_is_immovable_send(port));
2921 assert(task_is_immovable(space->is_task));
2922 assert(task_is_pinned(space->is_task));
2923 port->ip_pinned = 1;
2924 }
2925
2926 switch (msgt_name) {
2927 case MACH_MSG_TYPE_PORT_SEND_ONCE:
2928
2929 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2930 assert(IE_BITS_UREFS(bits) == 0);
2931 assert(port->ip_sorights > 0);
2932
2933 if (port->ip_specialreply) {
2934 ipc_port_adjust_special_reply_port_locked(port,
2935 current_thread()->ith_knote, IPC_PORT_ADJUST_SR_LINK_WORKLOOP, FALSE);
2936 /* port unlocked on return */
2937 } else {
2938 ip_mq_unlock(port);
2939 }
2940
2941 entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1); /* set urefs to 1 */
2942 ipc_entry_modified(space, name, entry);
2943 break;
2944
2945 case MACH_MSG_TYPE_PORT_SEND:
2946 assert(port->ip_srights > 0);
2947
2948 if (bits & MACH_PORT_TYPE_SEND) {
2949 mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
2950
2951 assert(port->ip_srights > 1);
2952 assert(urefs > 0);
2953 assert(urefs <= MACH_PORT_UREFS_MAX);
2954
2955 if (urefs == MACH_PORT_UREFS_MAX) {
2956 /*
2957 * leave urefs pegged to maximum,
2958 * consume send right and ref
2959 */
2960
2961 port->ip_srights--;
2962 ip_mq_unlock(port);
2963 ip_release_live(port);
2964 return KERN_SUCCESS;
2965 }
2966
2967 /* consume send right and ref */
2968 port->ip_srights--;
2969 ip_mq_unlock(port);
2970 ip_release_live(port);
2971 } else if (bits & MACH_PORT_TYPE_RECEIVE) {
2972 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2973 assert(IE_BITS_UREFS(bits) == 0);
2974
2975 /* transfer send right to entry, consume ref */
2976 ip_mq_unlock(port);
2977 ip_release_live(port);
2978 } else {
2979 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2980 assert(IE_BITS_UREFS(bits) == 0);
2981
2982 /* transfer send right and ref to entry */
2983 ip_mq_unlock(port);
2984
2985 /* entry is locked holding ref, so can use port */
2986
2987 ipc_hash_insert(space, ip_to_object(port), name, entry);
2988 }
2989
2990 entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1; /* increment urefs */
2991 ipc_entry_modified(space, name, entry);
2992 break;
2993
2994 case MACH_MSG_TYPE_PORT_RECEIVE: {
2995 ipc_port_t dest;
2996 #if IMPORTANCE_INHERITANCE
2997 natural_t assertcnt = port->ip_impcount;
2998 #endif /* IMPORTANCE_INHERITANCE */
2999
3000 assert(port->ip_mscount == 0);
3001 assert(!ip_in_a_space(port));
3002
3003 /*
3004 * Don't copyout kobjects or kolabels as receive right
3005 */
3006 if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
3007 panic("ipc_right_copyout: Copyout kobject/kolabel as receive right");
3008 }
3009
3010 dest = ip_get_destination(port);
3011
3012 /* port transitions to IN-SPACE state */
3013 port->ip_receiver_name = name;
3014 port->ip_receiver = space;
3015
3016 struct knote *kn = current_thread()->ith_knote;
3017
3018 if ((guard_flags != NULL) && ((*guard_flags & MACH_MSG_GUARD_FLAGS_IMMOVABLE_RECEIVE) != 0)) {
3019 assert(port->ip_immovable_receive == 0);
3020 port->ip_guarded = 1;
3021 port->ip_strict_guard = 0;
3022 /* pseudo receive shouldn't set the receive right as immovable in the sender's space */
3023 if (kn != ITH_KNOTE_PSEUDO) {
3024 port->ip_immovable_receive = 1;
3025 }
3026 port->ip_context = current_thread()->ith_msg_addr;
3027 *context = port->ip_context;
3028 *guard_flags = *guard_flags & ~MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
3029 }
3030
3031 if (ip_is_libxpc_connection_port(port)) {
3032 /*
3033 * There are 3 ways to reach here.
3034 * 1. A libxpc client successfully sent this receive right to a named service
3035 * and we are copying out in that service's ipc space.
3036 * 2. A libxpc client tried doing (1) but failed so we are doing pseudo-receive.
3037 * 3. Kernel sent this receive right to a libxpc client as a part of port destroyed notification.
3038 *
3039 * This flag needs to be set again in all 3 cases as they reset it as part of their flow.
3040 */
3041 port->ip_immovable_receive = 1;
3042 }
3043
3044 /* Check if this is a service port */
3045 if (port->ip_service_port) {
3046 assert(port->ip_splabel != NULL);
3047 /*
3048 * This flag gets reset during all 3 ways described above for libxpc connection port.
3049 * The only difference is launchd acts as an initiator instead of a libxpc client.
3050 */
3051 if (service_port_defense_enabled) {
3052 port->ip_immovable_receive = 1;
3053 }
3054
3055 /* Check if this is a port-destroyed notification to ensure
3056 * that initproc doesnt end up with a guarded service port
3057 * sent in a regular message
3058 */
3059 if (!ipc_service_port_label_is_pd_notification((ipc_service_port_label_t)port->ip_splabel)) {
3060 goto skip_sp_check;
3061 }
3062
3063 ipc_service_port_label_clear_flag(port->ip_splabel, ISPL_FLAGS_SEND_PD_NOTIFICATION);
3064 #if !(DEVELOPMENT || DEBUG)
3065 if (get_bsdtask_info(current_task()) != initproc) {
3066 goto skip_sp_check;
3067 }
3068 #endif /* !(DEVELOPMENT || DEBUG) */
3069 ipc_service_port_label_get_attr(port->ip_splabel, &sp_name, &sp_context);
3070 assert(sp_name != MACH_PORT_NULL);
3071 /* Verify the port name and restore the guard value, if any */
3072 if (name != sp_name) {
3073 panic("Service port name = 0x%x doesnt match the stored launchd port name = 0x%x", name, sp_name);
3074 }
3075 if (sp_context) {
3076 port->ip_guarded = 1;
3077 port->ip_strict_guard = 1;
3078 port->ip_context = sp_context;
3079 }
3080 }
3081 skip_sp_check:
3082
3083 assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
3084 if (bits & MACH_PORT_TYPE_SEND) {
3085 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
3086 assert(IE_BITS_UREFS(bits) > 0);
3087 assert(port->ip_srights > 0);
3088 } else {
3089 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
3090 assert(IE_BITS_UREFS(bits) == 0);
3091 }
3092 entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE;
3093 ipc_entry_modified(space, name, entry);
3094
3095 boolean_t sync_bootstrap_checkin = FALSE;
3096 if (kn != ITH_KNOTE_PSEUDO && port->ip_sync_bootstrap_checkin) {
3097 sync_bootstrap_checkin = TRUE;
3098 }
3099 if (!ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_RECEIVE)) {
3100 kn = NULL;
3101 }
3102 ipc_port_adjust_port_locked(port, kn, sync_bootstrap_checkin);
3103 /* port unlocked */
3104
3105 if (bits & MACH_PORT_TYPE_SEND) {
3106 ip_release_live(port);
3107
3108 /* entry is locked holding ref, so can use port */
3109 ipc_hash_delete(space, ip_to_object(port), name, entry);
3110 }
3111
3112 if (dest != IP_NULL) {
3113 #if IMPORTANCE_INHERITANCE
3114 /*
3115 * Deduct the assertion counts we contributed to
3116 * the old destination port. They've already
3117 * been reflected into the task as a result of
3118 * getting enqueued.
3119 */
3120 ip_mq_lock(dest);
3121 ipc_port_impcount_delta(dest, 0 - assertcnt, IP_NULL);
3122 ip_mq_unlock(dest);
3123 #endif /* IMPORTANCE_INHERITANCE */
3124
3125 /* Drop turnstile ref on dest */
3126 ipc_port_send_turnstile_complete(dest);
3127 /* space lock is held */
3128 ip_release_safe(dest);
3129 }
3130 break;
3131 }
3132
3133 default:
3134 panic("ipc_right_copyout: strange rights");
3135 }
3136 return KERN_SUCCESS;
3137 }
3138