1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005-2006 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: ipc/ipc_right.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions to manipulate IPC capabilities.
71 */
72
73 #include <mach/boolean.h>
74 #include <mach/kern_return.h>
75 #include <mach/port.h>
76 #include <mach/message.h>
77 #include <kern/assert.h>
78 #include <kern/ipc_kobject.h>
79 #include <kern/misc_protos.h>
80 #include <kern/policy_internal.h>
81 #include <ipc/port.h>
82 #include <ipc/ipc_entry.h>
83 #include <ipc/ipc_space.h>
84 #include <ipc/ipc_object.h>
85 #include <ipc/ipc_hash.h>
86 #include <ipc/ipc_port.h>
87 #include <ipc/ipc_pset.h>
88 #include <ipc/ipc_right.h>
89 #include <ipc/ipc_notify.h>
90 #include <ipc/ipc_importance.h>
91 #include <ipc/ipc_service_port.h>
92 #include <security/mac_mach_internal.h>
93
94 extern void * XNU_PTRAUTH_SIGNED_PTR("initproc") initproc;
95
96 TUNABLE(bool, service_port_defense_enabled, "-service_port_defense_enabled", false);
97 static TUNABLE(bool, reply_port_semantics, "reply_port_semantics", true);
98
99 /*
100 * Routine: ipc_right_lookup_read
101 * Purpose:
102 * Finds an entry in a space, given the name.
103 * Conditions:
104 * Nothing locked.
105 * If an object is found, it is locked and active.
106 * Returns:
107 * KERN_SUCCESS Found an entry.
108 * KERN_INVALID_TASK The space is dead.
109 * KERN_INVALID_NAME Name doesn't exist in space.
110 */
111 kern_return_t
ipc_right_lookup_read(ipc_space_t space,mach_port_name_t name,ipc_entry_bits_t * bitsp,ipc_object_t * objectp)112 ipc_right_lookup_read(
113 ipc_space_t space,
114 mach_port_name_t name,
115 ipc_entry_bits_t *bitsp,
116 ipc_object_t *objectp)
117 {
118 mach_port_index_t index;
119 ipc_entry_table_t table;
120 ipc_entry_t entry;
121 ipc_object_t object;
122 kern_return_t kr;
123
124 index = MACH_PORT_INDEX(name);
125 if (__improbable(index == 0)) {
126 *bitsp = 0;
127 *objectp = IO_NULL;
128 return KERN_INVALID_NAME;
129 }
130
131 smr_global_enter();
132
133 /*
134 * Acquire a (possibly stale) pointer to the table,
135 * and guard it so that it can't be deallocated while we use it.
136 *
137 * smr_global_enter() has the property that it strongly serializes
138 * after any store-release. This is important because it means that if
139 * one considers this (broken) userspace usage:
140 *
141 * Thread 1:
142 * - makes a semaphore, gets name 0x1003
143 * - stores that name to a global `sema` in userspace
144 *
145 * Thread 2:
146 * - spins to observe `sema` becoming non 0
147 * - calls semaphore_wait() on 0x1003
148 *
149 * Then, because in order to return 0x1003 this thread issued
150 * a store-release (when calling is_write_unlock()),
151 * then this smr_entered_load() can't possibly observe a table
152 * pointer that is older than the one that was current when the
153 * semaphore was made.
154 *
155 * This fundamental property allows us to never loop.
156 */
157 table = smr_entered_load(&space->is_table);
158 if (__improbable(table == NULL)) {
159 kr = KERN_INVALID_TASK;
160 goto out_put;
161 }
162 entry = ipc_entry_table_get(table, index);
163 if (__improbable(entry == NULL)) {
164 kr = KERN_INVALID_NAME;
165 goto out_put;
166 }
167
168 /*
169 * Note: this should be an atomic load, but PAC and atomics
170 * don't work interact well together.
171 */
172 object = entry->ie_volatile_object;
173
174 /*
175 * Attempt to lock an object that lives in this entry.
176 * It might fail or be a completely different object by now.
177 *
178 * Make sure that acquiring the lock is fully ordered after any
179 * lock-release (using os_atomic_barrier_before_lock_acquire()).
180 * This allows us to always reliably observe space termination below.
181 */
182 os_atomic_barrier_before_lock_acquire();
183 if (__improbable(object == IO_NULL || !io_lock_allow_invalid(object))) {
184 kr = KERN_INVALID_NAME;
185 goto out_put;
186 }
187
188 /*
189 * Now that we hold the object lock, we are preventing any entry
190 * in this space for this object to be mutated.
191 *
192 * If the space didn't grow after we acquired our hazardous reference,
193 * and before a mutation of the entry, then holding the object lock
194 * guarantees we will observe the truth of ie_bits, ie_object and
195 * ie_request (those are always mutated with the object lock held).
196 *
197 * However this ordering is problematic:
198 * - [A]cquisition of the table pointer
199 * - [G]rowth of the space (making the table pointer in [A] stale)
200 * - [M]utation of the entry
201 * - [L]ocking of the object read through [A].
202 *
203 * The space lock is held for both [G] and [M], and the object lock
204 * is held for [M], which means that once we lock the object we can
205 * observe if [G] happenend by reloading the table pointer.
206 *
207 * We might still fail to observe any growth operation that happened
208 * after the last mutation of this object's entry, because holding
209 * an object lock doesn't guarantee anything about the liveness
210 * of the space table pointer. This is not a problem at all: by
211 * definition, those didn't affect the state of the entry.
212 *
213 * TODO: a data-structure where the entries are grown by "slabs",
214 * would allow for the address of an ipc_entry_t to never
215 * change once it exists in a space and would avoid a reload
216 * (as well as making space growth faster).
217 * We however still need to check for termination.
218 */
219 table = smr_entered_load(&space->is_table);
220 if (__improbable(table == NULL)) {
221 kr = KERN_INVALID_TASK;
222 goto out_put_unlock;
223 }
224
225 /*
226 * Tables never shrink so we don't need to validate the length twice.
227 */
228 entry = ipc_entry_table_get_nocheck(table, index);
229
230 /*
231 * Now that we hold the lock and have a "fresh enough" table pointer,
232 * validate if this entry is what we think it is.
233 *
234 * To the risk of being repetitive, we still need to protect
235 * those accesses under SMR, because subsequent
236 * table growths might retire the memory. However we know
237 * those growths will have left our entry unchanged.
238 */
239 if (__improbable(entry->ie_object != object)) {
240 kr = KERN_INVALID_NAME;
241 goto out_put_unlock;
242 }
243
244 ipc_entry_bits_t bits = entry->ie_bits;
245 if (__improbable(IE_BITS_GEN(bits) != MACH_PORT_GEN(name) ||
246 IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE)) {
247 kr = KERN_INVALID_NAME;
248 goto out_put_unlock;
249 }
250
251 /* Done with hazardous accesses to the table */
252 smr_global_leave();
253
254 *bitsp = bits;
255 *objectp = object;
256 return KERN_SUCCESS;
257
258 out_put_unlock:
259 ipc_object_unlock(object);
260 out_put:
261 smr_global_leave();
262 return kr;
263 }
264
265 /*
266 * Routine: ipc_right_lookup_write
267 * Purpose:
268 * Finds an entry in a space, given the name.
269 * Conditions:
270 * Nothing locked. If successful, the space is write-locked.
271 * Returns:
272 * KERN_SUCCESS Found an entry.
273 * KERN_INVALID_TASK The space is dead.
274 * KERN_INVALID_NAME Name doesn't exist in space.
275 */
276
277 kern_return_t
ipc_right_lookup_write(ipc_space_t space,mach_port_name_t name,ipc_entry_t * entryp)278 ipc_right_lookup_write(
279 ipc_space_t space,
280 mach_port_name_t name,
281 ipc_entry_t *entryp)
282 {
283 ipc_entry_t entry;
284
285 assert(space != IS_NULL);
286
287 is_write_lock(space);
288
289 if (!is_active(space)) {
290 is_write_unlock(space);
291 return KERN_INVALID_TASK;
292 }
293
294 if ((entry = ipc_entry_lookup(space, name)) == IE_NULL) {
295 is_write_unlock(space);
296 return KERN_INVALID_NAME;
297 }
298
299 *entryp = entry;
300 return KERN_SUCCESS;
301 }
302
303 /*
304 * Routine: ipc_right_lookup_two_write
305 * Purpose:
306 * Like ipc_right_lookup except that it returns two
307 * entries for two different names that were looked
308 * up under the same space lock.
309 * Conditions:
310 * Nothing locked. If successful, the space is write-locked.
311 * Returns:
312 * KERN_INVALID_TASK The space is dead.
313 * KERN_INVALID_NAME Name doesn't exist in space.
314 */
315
316 kern_return_t
ipc_right_lookup_two_write(ipc_space_t space,mach_port_name_t name1,ipc_entry_t * entryp1,mach_port_name_t name2,ipc_entry_t * entryp2)317 ipc_right_lookup_two_write(
318 ipc_space_t space,
319 mach_port_name_t name1,
320 ipc_entry_t *entryp1,
321 mach_port_name_t name2,
322 ipc_entry_t *entryp2)
323 {
324 ipc_entry_t entry1;
325 ipc_entry_t entry2;
326
327 assert(space != IS_NULL);
328
329 is_write_lock(space);
330
331 if (!is_active(space)) {
332 is_write_unlock(space);
333 return KERN_INVALID_TASK;
334 }
335
336 if ((entry1 = ipc_entry_lookup(space, name1)) == IE_NULL) {
337 is_write_unlock(space);
338 mach_port_guard_exception(name1, 0, 0, kGUARD_EXC_INVALID_NAME);
339 return KERN_INVALID_NAME;
340 }
341 if ((entry2 = ipc_entry_lookup(space, name2)) == IE_NULL) {
342 is_write_unlock(space);
343 mach_port_guard_exception(name2, 0, 0, kGUARD_EXC_INVALID_NAME);
344 return KERN_INVALID_NAME;
345 }
346 *entryp1 = entry1;
347 *entryp2 = entry2;
348 return KERN_SUCCESS;
349 }
350
351 /*
352 * Routine: ipc_right_reverse
353 * Purpose:
354 * Translate (space, object) -> (name, entry).
355 * Only finds send/receive rights.
356 * Returns TRUE if an entry is found; if so,
357 * the object active.
358 * Conditions:
359 * The space must be locked (read or write) and active.
360 * The port is locked and active
361 */
362
363 bool
ipc_right_reverse(ipc_space_t space,ipc_object_t object,mach_port_name_t * namep,ipc_entry_t * entryp)364 ipc_right_reverse(
365 ipc_space_t space,
366 ipc_object_t object,
367 mach_port_name_t *namep,
368 ipc_entry_t *entryp)
369 {
370 ipc_port_t port;
371 mach_port_name_t name;
372 ipc_entry_t entry;
373
374 /* would switch on io_otype to handle multiple types of object */
375
376 assert(is_active(space));
377 assert(io_otype(object) == IOT_PORT);
378
379 port = ip_object_to_port(object);
380 require_ip_active(port);
381
382 ip_mq_lock_held(port);
383
384 if (ip_in_space(port, space)) {
385 name = ip_get_receiver_name(port);
386 assert(name != MACH_PORT_NULL);
387
388 entry = ipc_entry_lookup(space, name);
389
390 assert(entry != IE_NULL);
391 assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
392 assert(port == ip_object_to_port(entry->ie_object));
393
394 *namep = name;
395 *entryp = entry;
396 return true;
397 }
398
399 if (ipc_hash_lookup(space, ip_to_object(port), namep, entryp)) {
400 entry = *entryp;
401 assert(entry != IE_NULL);
402 assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_SEND);
403 assert(port == ip_object_to_port(entry->ie_object));
404
405 return true;
406 }
407
408 return false;
409 }
410
411 /*
412 * Routine: ipc_right_dnrequest
413 * Purpose:
414 * Make a dead-name request, returning the previously
415 * registered send-once right. If notify is IP_NULL,
416 * just cancels the previously registered request.
417 *
418 * Conditions:
419 * Nothing locked. May allocate memory.
420 * Only consumes/returns refs if successful.
421 * Returns:
422 * KERN_SUCCESS Made/canceled dead-name request.
423 * KERN_INVALID_TASK The space is dead.
424 * KERN_INVALID_NAME Name doesn't exist in space.
425 * KERN_INVALID_RIGHT Name doesn't denote port/dead rights.
426 * KERN_INVALID_ARGUMENT Name denotes dead name, but
427 * immediate is FALSE or notify is IP_NULL.
428 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
429 */
430
431 kern_return_t
ipc_right_request_alloc(ipc_space_t space,mach_port_name_t name,ipc_port_request_opts_t options,ipc_port_t notify,ipc_port_t * previousp)432 ipc_right_request_alloc(
433 ipc_space_t space,
434 mach_port_name_t name,
435 ipc_port_request_opts_t options,
436 ipc_port_t notify,
437 ipc_port_t *previousp)
438 {
439 ipc_port_request_index_t prev_request;
440 ipc_port_t previous = IP_NULL;
441 ipc_entry_t entry;
442 kern_return_t kr;
443 #if IMPORTANCE_INHERITANCE
444 bool will_arm = false;
445 #endif /* IMPORTANCE_INHERITANCE */
446
447 for (;;) {
448 ipc_port_t port = IP_NULL;
449
450 kr = ipc_right_lookup_write(space, name, &entry);
451 if (kr != KERN_SUCCESS) {
452 return kr;
453 }
454
455 /* space is write-locked and active */
456
457 prev_request = entry->ie_request;
458
459 /* if nothing to do or undo, we're done */
460 if (notify == IP_NULL && prev_request == IE_REQ_NONE) {
461 is_write_unlock(space);
462 *previousp = IP_NULL;
463 return KERN_SUCCESS;
464 }
465
466 /* see if the entry is of proper type for requests */
467 if (entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) {
468 ipc_port_request_index_t new_request;
469
470 port = ip_object_to_port(entry->ie_object);
471 assert(port != IP_NULL);
472
473 if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
474 /* port is locked and active */
475
476 /* if no new request, just cancel previous */
477 if (notify == IP_NULL) {
478 if (prev_request != IE_REQ_NONE) {
479 previous = ipc_port_request_cancel(port, name, prev_request);
480 entry->ie_request = IE_REQ_NONE;
481 }
482 ip_mq_unlock(port);
483 ipc_entry_modified(space, name, entry);
484 is_write_unlock(space);
485 break;
486 }
487
488 /*
489 * send-once rights, kernel objects, and non-full other queues
490 * fire immediately (if immediate specified).
491 */
492 if (options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK) &&
493 ((entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE) ||
494 ip_in_space(port, ipc_space_kernel) || !ip_full(port))) {
495 if (prev_request != IE_REQ_NONE) {
496 previous = ipc_port_request_cancel(port, name, prev_request);
497 entry->ie_request = IE_REQ_NONE;
498 }
499 ip_mq_unlock(port);
500 ipc_entry_modified(space, name, entry);
501 is_write_unlock(space);
502
503 ipc_notify_send_possible(notify, name);
504 break;
505 }
506
507 /*
508 * If there is a previous request, free it. Any subsequent
509 * allocation cannot fail, thus assuring an atomic swap.
510 */
511 if (prev_request != IE_REQ_NONE) {
512 previous = ipc_port_request_cancel(port, name, prev_request);
513 }
514
515 #if IMPORTANCE_INHERITANCE
516 will_arm = port->ip_sprequests == 0 &&
517 options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK);
518 #endif /* IMPORTANCE_INHERITANCE */
519 kr = ipc_port_request_alloc(port, name, notify,
520 options, &new_request);
521
522 if (kr != KERN_SUCCESS) {
523 assert(previous == IP_NULL);
524 is_write_unlock(space);
525
526 kr = ipc_port_request_grow(port);
527 /* port is unlocked */
528
529 if (kr != KERN_SUCCESS) {
530 return kr;
531 }
532
533 continue;
534 }
535
536 assert(new_request != IE_REQ_NONE);
537 entry->ie_request = new_request;
538 ipc_entry_modified(space, name, entry);
539 is_write_unlock(space);
540
541 #if IMPORTANCE_INHERITANCE
542 if (will_arm &&
543 port->ip_impdonation != 0 &&
544 port->ip_spimportant == 0 &&
545 task_is_importance_donor(current_task())) {
546 if (ipc_port_importance_delta(port, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) {
547 ip_mq_unlock(port);
548 }
549 } else
550 #endif /* IMPORTANCE_INHERITANCE */
551 ip_mq_unlock(port);
552
553 break;
554 }
555 /* entry may have changed to dead-name by ipc_right_check() */
556 }
557
558 /* treat send_possible requests as immediate w.r.t. dead-name */
559 if (options && notify != IP_NULL &&
560 (entry->ie_bits & MACH_PORT_TYPE_DEAD_NAME)) {
561 mach_port_urefs_t urefs = IE_BITS_UREFS(entry->ie_bits);
562
563 assert(urefs > 0);
564
565 /* leave urefs pegged to maximum if it overflowed */
566 if (urefs < MACH_PORT_UREFS_MAX) {
567 (entry->ie_bits)++; /* increment urefs */
568 }
569 ipc_entry_modified(space, name, entry);
570
571 is_write_unlock(space);
572
573 if (port != IP_NULL) {
574 ip_release(port);
575 }
576
577 ipc_notify_dead_name(notify, name);
578 previous = IP_NULL;
579 break;
580 }
581
582 kr = (entry->ie_bits & MACH_PORT_TYPE_PORT_OR_DEAD) ?
583 KERN_INVALID_ARGUMENT : KERN_INVALID_RIGHT;
584
585 is_write_unlock(space);
586
587 if (port != IP_NULL) {
588 ip_release(port);
589 }
590
591 return kr;
592 }
593
594 *previousp = previous;
595 return KERN_SUCCESS;
596 }
597
598 /*
599 * Routine: ipc_right_request_cancel
600 * Purpose:
601 * Cancel a notification request and return the send-once right.
602 * Afterwards, entry->ie_request == 0.
603 * Conditions:
604 * The space must be write-locked; the port must be locked.
605 * The port and space must be active.
606 */
607
608 ipc_port_t
ipc_right_request_cancel(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry)609 ipc_right_request_cancel(
610 ipc_space_t space,
611 ipc_port_t port,
612 mach_port_name_t name,
613 ipc_entry_t entry)
614 {
615 ipc_port_t previous;
616
617 require_ip_active(port);
618 assert(is_active(space));
619 assert(port == ip_object_to_port(entry->ie_object));
620
621 if (entry->ie_request == IE_REQ_NONE) {
622 return IP_NULL;
623 }
624
625 previous = ipc_port_request_cancel(port, name, entry->ie_request);
626 entry->ie_request = IE_REQ_NONE;
627 ipc_entry_modified(space, name, entry);
628 return previous;
629 }
630
631 /*
632 * Routine: ipc_right_inuse
633 * Purpose:
634 * Check if an entry is being used.
635 * Returns TRUE if it is.
636 * Conditions:
637 * The space is write-locked and active.
638 */
639
640 bool
ipc_right_inuse(ipc_entry_t entry)641 ipc_right_inuse(
642 ipc_entry_t entry)
643 {
644 return IE_BITS_TYPE(entry->ie_bits) != MACH_PORT_TYPE_NONE;
645 }
646
647 /*
648 * Routine: ipc_right_check
649 * Purpose:
650 * Check if the port has died. If it has,
651 * and IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE is not
652 * passed and it is not a send once right then
653 * clean up the entry and return TRUE.
654 * Conditions:
655 * The space is write-locked; the port is not locked.
656 * If returns FALSE, the port is also locked.
657 * Otherwise, entry is converted to a dead name.
658 *
659 * Caller is responsible for a reference to port if it
660 * had died (returns TRUE).
661 */
662
663 boolean_t
ipc_right_check(ipc_space_t space,ipc_port_t port,mach_port_name_t name,ipc_entry_t entry,ipc_object_copyin_flags_t flags)664 ipc_right_check(
665 ipc_space_t space,
666 ipc_port_t port,
667 mach_port_name_t name,
668 ipc_entry_t entry,
669 ipc_object_copyin_flags_t flags)
670 {
671 ipc_entry_bits_t bits;
672
673 assert(is_active(space));
674 assert(port == ip_object_to_port(entry->ie_object));
675
676 ip_mq_lock(port);
677 if (ip_active(port) ||
678 ((flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE) &&
679 entry->ie_request == IE_REQ_NONE &&
680 (entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE))) {
681 return FALSE;
682 }
683
684 /* this was either a pure send right or a send-once right */
685
686 bits = entry->ie_bits;
687 assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
688 assert(IE_BITS_UREFS(bits) > 0);
689
690 if (bits & MACH_PORT_TYPE_SEND) {
691 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
692 assert(IE_BITS_UREFS(bits) > 0);
693 ip_srights_dec(port);
694 } else {
695 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
696 assert(IE_BITS_UREFS(bits) == 1);
697 ip_sorights_dec(port);
698 }
699
700 /*
701 * delete SEND rights from ipc hash.
702 */
703
704 if ((bits & MACH_PORT_TYPE_SEND) != 0) {
705 ipc_hash_delete(space, ip_to_object(port), name, entry);
706 }
707
708 /* convert entry to dead name */
709 bits = (bits & ~IE_BITS_TYPE_MASK) | MACH_PORT_TYPE_DEAD_NAME;
710
711 /*
712 * If there was a notification request outstanding on this
713 * name, and the port went dead, that notification
714 * must already be on its way up from the port layer.
715 *
716 * Add the reference that the notification carries. It
717 * is done here, and not in the notification delivery,
718 * because the latter doesn't have a space reference and
719 * trying to actually move a send-right reference would
720 * get short-circuited into a MACH_PORT_DEAD by IPC. Since
721 * all calls that deal with the right eventually come
722 * through here, it has the same result.
723 *
724 * Once done, clear the request index so we only account
725 * for it once.
726 */
727 if (entry->ie_request != IE_REQ_NONE) {
728 if (ipc_port_request_type(port, name, entry->ie_request) != 0) {
729 /* if urefs are pegged due to overflow, leave them pegged */
730 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
731 bits++; /* increment urefs */
732 }
733 }
734 entry->ie_request = IE_REQ_NONE;
735 }
736 entry->ie_bits = bits;
737 entry->ie_object = IO_NULL;
738
739 ip_mq_unlock(port);
740
741 ipc_entry_modified(space, name, entry);
742
743 return TRUE;
744 }
745
746 /*
747 * Routine: ipc_right_terminate
748 * Purpose:
749 * Cleans up an entry in a terminated space.
750 * The entry isn't deallocated or removed
751 * from reverse hash tables.
752 * Conditions:
753 * The space is dead and unlocked.
754 */
755
756 void
ipc_right_terminate(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)757 ipc_right_terminate(
758 ipc_space_t space,
759 mach_port_name_t name,
760 ipc_entry_t entry)
761 {
762 mach_port_type_t type;
763 ipc_object_t object;
764
765 assert(!is_active(space));
766
767 type = IE_BITS_TYPE(entry->ie_bits);
768 object = entry->ie_object;
769
770 /*
771 * Hollow the entry under the port lock,
772 * in order to avoid dangling pointers.
773 *
774 * ipc_right_lookup_read() doesn't need it for correctness,
775 * but ipc_space_terminate() as it now goes through 2 rounds
776 * of termination (receive rights first, the rest second).
777 */
778
779 if (type != MACH_PORT_TYPE_DEAD_NAME) {
780 assert(object != IO_NULL);
781 io_lock(object);
782 }
783 entry->ie_object = IO_NULL;
784 entry->ie_bits &= (IE_BITS_GEN_MASK | IE_BITS_ROLL_MASK);
785
786 switch (type) {
787 case MACH_PORT_TYPE_DEAD_NAME:
788 assert(entry->ie_request == IE_REQ_NONE);
789 assert(object == IO_NULL);
790 break;
791
792 case MACH_PORT_TYPE_PORT_SET: {
793 ipc_pset_t pset = ips_object_to_pset(object);
794
795 assert(entry->ie_request == IE_REQ_NONE);
796 assert(ips_active(pset));
797
798 ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
799 break;
800 }
801
802 case MACH_PORT_TYPE_SEND:
803 case MACH_PORT_TYPE_RECEIVE:
804 case MACH_PORT_TYPE_SEND_RECEIVE:
805 case MACH_PORT_TYPE_SEND_ONCE: {
806 ipc_port_t port = ip_object_to_port(object);
807 ipc_port_t request = IP_NULL;
808 ipc_notify_nsenders_t nsrequest = { };
809
810 if (!ip_active(port)) {
811 ip_mq_unlock(port);
812 ip_release(port);
813 break;
814 }
815
816 /*
817 * same as ipc_right_request_cancel(),
818 * except for calling ipc_entry_modified()
819 * as the space is now table-less.
820 */
821 if (entry->ie_request != IE_REQ_NONE) {
822 request = ipc_port_request_cancel(port, name,
823 entry->ie_request);
824 entry->ie_request = IE_REQ_NONE;
825 }
826
827 if (type & MACH_PORT_TYPE_SEND) {
828 ip_srights_dec(port);
829 if (port->ip_srights == 0) {
830 nsrequest = ipc_notify_no_senders_prepare(port);
831 }
832 }
833
834 if (type & MACH_PORT_TYPE_RECEIVE) {
835 assert(ip_get_receiver_name(port) == name);
836 assert(ip_in_space(port, space));
837
838 ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
839 } else if (type & MACH_PORT_TYPE_SEND_ONCE) {
840 assert(port->ip_sorights > 0);
841 port->ip_reply_context = 0;
842
843 ipc_notify_send_once_and_unlock(port); /* consumes our ref */
844 } else {
845 /* port could be dead, in-transit, or in a foreign space */
846 assert(!ip_in_space(port, space));
847
848 ip_mq_unlock(port);
849 ip_release(port);
850 }
851
852 /*
853 * For both no-senders and port-deleted notifications,
854 * look at whether the destination is still active.
855 * If it isn't, just swallow the send-once right.
856 *
857 * This is a racy check, but this ok because we can only
858 * fail to notice that the port is now inactive, which
859 * only causes us to fail at an optimizaiton.
860 *
861 * The purpose here is to avoid sending messages
862 * to receive rights that used to be in this space,
863 * which we can't fail to observe.
864 */
865 if (nsrequest.ns_notify != IP_NULL) {
866 if (ip_active(nsrequest.ns_notify)) {
867 ipc_notify_no_senders_emit(nsrequest);
868 } else {
869 ipc_notify_no_senders_consume(nsrequest);
870 }
871 }
872
873 if (request != IP_NULL) {
874 if (ip_active(request)) {
875 ipc_notify_port_deleted(request, name);
876 } else {
877 ipc_port_release_sonce(request);
878 }
879 }
880 break;
881 }
882
883 default:
884 panic("ipc_right_terminate: strange type - 0x%x", type);
885 }
886 }
887
888 /*
889 * Routine: ipc_right_destroy
890 * Purpose:
891 * Destroys an entry in a space.
892 * Conditions:
893 * The space is write-locked (returns unlocked).
894 * The space must be active.
895 * Returns:
896 * KERN_SUCCESS The entry was destroyed.
897 * KERN_INVALID_CAPABILITY The port is pinned.
898 * KERN_INVALID_RIGHT Port guard violation.
899 */
900
901 kern_return_t
ipc_right_destroy(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,boolean_t check_guard,uint64_t guard)902 ipc_right_destroy(
903 ipc_space_t space,
904 mach_port_name_t name,
905 ipc_entry_t entry,
906 boolean_t check_guard,
907 uint64_t guard)
908 {
909 ipc_entry_bits_t bits;
910 mach_port_type_t type;
911
912 bits = entry->ie_bits;
913 entry->ie_bits &= ~IE_BITS_TYPE_MASK;
914 type = IE_BITS_TYPE(bits);
915
916 assert(is_active(space));
917
918 switch (type) {
919 case MACH_PORT_TYPE_DEAD_NAME:
920 assert(entry->ie_request == IE_REQ_NONE);
921 assert(entry->ie_object == IO_NULL);
922
923 ipc_entry_dealloc(space, IO_NULL, name, entry);
924 is_write_unlock(space);
925 break;
926
927 case MACH_PORT_TYPE_PORT_SET: {
928 ipc_pset_t pset = ips_object_to_pset(entry->ie_object);
929
930 assert(entry->ie_request == IE_REQ_NONE);
931 assert(pset != IPS_NULL);
932
933 ips_mq_lock(pset);
934 assert(ips_active(pset));
935
936 ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
937
938 is_write_unlock(space);
939
940 ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
941 break;
942 }
943
944 case MACH_PORT_TYPE_SEND:
945 case MACH_PORT_TYPE_RECEIVE:
946 case MACH_PORT_TYPE_SEND_RECEIVE:
947 case MACH_PORT_TYPE_SEND_ONCE: {
948 ipc_port_t port = ip_object_to_port(entry->ie_object);
949 ipc_notify_nsenders_t nsrequest = { };
950 ipc_port_t request;
951
952 assert(port != IP_NULL);
953
954 if (type == MACH_PORT_TYPE_SEND) {
955 if (ip_is_pinned(port)) {
956 assert(ip_active(port));
957 is_write_unlock(space);
958 mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DESTROY);
959 return KERN_INVALID_CAPABILITY;
960 }
961 ipc_hash_delete(space, ip_to_object(port), name, entry);
962 }
963
964 ip_mq_lock(port);
965
966 if (!ip_active(port)) {
967 assert((type & MACH_PORT_TYPE_RECEIVE) == 0);
968 entry->ie_request = IE_REQ_NONE;
969 assert(!ip_is_pinned(port));
970 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
971 ip_mq_unlock(port);
972 is_write_unlock(space);
973 ip_release(port);
974 break;
975 }
976
977 /* For receive rights, check for guarding */
978 if ((type & MACH_PORT_TYPE_RECEIVE) &&
979 (check_guard) && (port->ip_guarded) &&
980 (guard != port->ip_context)) {
981 /* Guard Violation */
982 uint64_t portguard = port->ip_context;
983 ip_mq_unlock(port);
984 is_write_unlock(space);
985 /* Raise mach port guard exception */
986 mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
987 return KERN_INVALID_RIGHT;
988 }
989
990
991 request = ipc_right_request_cancel_macro(space, port,
992 name, entry);
993 assert(!ip_is_pinned(port));
994 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
995
996 is_write_unlock(space);
997
998 if (type & MACH_PORT_TYPE_SEND) {
999 ip_srights_dec(port);
1000 if (port->ip_srights == 0) {
1001 nsrequest = ipc_notify_no_senders_prepare(port);
1002 }
1003 }
1004
1005 if (type & MACH_PORT_TYPE_RECEIVE) {
1006 require_ip_active(port);
1007 assert(ip_in_space(port, space));
1008
1009 ipc_port_destroy(port); /* clears receiver, consumes our ref, unlocks */
1010 } else if (type & MACH_PORT_TYPE_SEND_ONCE) {
1011 assert(port->ip_sorights > 0);
1012 port->ip_reply_context = 0;
1013 ipc_notify_send_once_and_unlock(port); /* consumes our ref */
1014 } else {
1015 assert(!ip_in_space(port, space));
1016
1017 ip_mq_unlock(port);
1018 ip_release(port);
1019 }
1020
1021 ipc_notify_no_senders_emit(nsrequest);
1022
1023 if (request != IP_NULL) {
1024 ipc_notify_port_deleted(request, name);
1025 }
1026
1027
1028 break;
1029 }
1030
1031 default:
1032 panic("ipc_right_destroy: strange type");
1033 }
1034
1035 return KERN_SUCCESS;
1036 }
1037
1038 /*
1039 * Routine: ipc_right_dealloc
1040 * Purpose:
1041 * Releases a send/send-once/dead-name/port_set user ref.
1042 * Like ipc_right_delta with a delta of -1,
1043 * but looks at the entry to determine the right.
1044 * Conditions:
1045 * The space is write-locked, and is unlocked upon return.
1046 * The space must be active.
1047 * Returns:
1048 * KERN_SUCCESS A user ref was released.
1049 * KERN_INVALID_RIGHT Entry has wrong type.
1050 * KERN_INVALID_CAPABILITY Deallocating a pinned right.
1051 */
1052
1053 kern_return_t
ipc_right_dealloc(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry)1054 ipc_right_dealloc(
1055 ipc_space_t space,
1056 mach_port_name_t name,
1057 ipc_entry_t entry)
1058 {
1059 ipc_port_t port = IP_NULL;
1060 ipc_entry_bits_t bits;
1061 mach_port_type_t type;
1062
1063 bits = entry->ie_bits;
1064 type = IE_BITS_TYPE(bits);
1065
1066
1067 assert(is_active(space));
1068
1069 switch (type) {
1070 case MACH_PORT_TYPE_PORT_SET: {
1071 ipc_pset_t pset;
1072
1073 assert(IE_BITS_UREFS(bits) == 0);
1074 assert(entry->ie_request == IE_REQ_NONE);
1075
1076 pset = ips_object_to_pset(entry->ie_object);
1077 ips_mq_lock(pset);
1078 assert(ips_active(pset));
1079
1080 ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1081
1082 is_write_unlock(space);
1083
1084 ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1085 break;
1086 }
1087
1088 case MACH_PORT_TYPE_DEAD_NAME: {
1089 dead_name:
1090
1091 assert(IE_BITS_UREFS(bits) > 0);
1092 assert(entry->ie_request == IE_REQ_NONE);
1093 assert(entry->ie_object == IO_NULL);
1094
1095 if (IE_BITS_UREFS(bits) == 1) {
1096 ipc_entry_dealloc(space, IO_NULL, name, entry);
1097 } else {
1098 /* if urefs are pegged due to overflow, leave them pegged */
1099 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1100 entry->ie_bits = bits - 1; /* decrement urefs */
1101 }
1102 ipc_entry_modified(space, name, entry);
1103 }
1104 is_write_unlock(space);
1105
1106 /* release any port that got converted to dead name below */
1107 if (port != IP_NULL) {
1108 ip_release(port);
1109 }
1110 break;
1111 }
1112
1113 case MACH_PORT_TYPE_SEND_ONCE: {
1114 ipc_port_t request;
1115
1116 assert(IE_BITS_UREFS(bits) == 1);
1117
1118 port = ip_object_to_port(entry->ie_object);
1119 assert(port != IP_NULL);
1120
1121 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1122 bits = entry->ie_bits;
1123 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1124 goto dead_name; /* it will release port */
1125 }
1126 /* port is locked and active */
1127
1128 assert(port->ip_sorights > 0);
1129
1130 /*
1131 * clear any reply context:
1132 * no one will be sending the response b/c we are destroying
1133 * the single, outstanding send once right.
1134 */
1135 port->ip_reply_context = 0;
1136
1137 request = ipc_right_request_cancel_macro(space, port,
1138 name, entry);
1139 assert(!ip_is_pinned(port));
1140 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1141
1142 is_write_unlock(space);
1143
1144 ipc_notify_send_once_and_unlock(port);
1145
1146 if (request != IP_NULL) {
1147 ipc_notify_port_deleted(request, name);
1148 }
1149 break;
1150 }
1151
1152 case MACH_PORT_TYPE_SEND: {
1153 ipc_port_t request = IP_NULL;
1154 ipc_notify_nsenders_t nsrequest = { };
1155
1156 assert(IE_BITS_UREFS(bits) > 0);
1157
1158 port = ip_object_to_port(entry->ie_object);
1159 assert(port != IP_NULL);
1160
1161 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1162 bits = entry->ie_bits;
1163 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1164 goto dead_name; /* it will release port */
1165 }
1166 /* port is locked and active */
1167
1168 assert(port->ip_srights > 0);
1169
1170 if (IE_BITS_UREFS(bits) == 1) {
1171 if (ip_is_pinned(port)) {
1172 ip_mq_unlock(port);
1173 is_write_unlock(space);
1174 mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1175 return KERN_INVALID_CAPABILITY;
1176 }
1177 ip_srights_dec(port);
1178 if (port->ip_srights == 0) {
1179 nsrequest = ipc_notify_no_senders_prepare(port);
1180 }
1181
1182 request = ipc_right_request_cancel_macro(space, port,
1183 name, entry);
1184 ipc_hash_delete(space, ip_to_object(port), name, entry);
1185 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1186 ip_mq_unlock(port);
1187 is_write_unlock(space);
1188
1189 ip_release(port);
1190 } else {
1191 /* if urefs are pegged due to overflow, leave them pegged */
1192 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1193 entry->ie_bits = bits - 1; /* decrement urefs */
1194 }
1195 ip_mq_unlock(port);
1196 ipc_entry_modified(space, name, entry);
1197 is_write_unlock(space);
1198 }
1199
1200 ipc_notify_no_senders_emit(nsrequest);
1201
1202 if (request != IP_NULL) {
1203 ipc_notify_port_deleted(request, name);
1204 }
1205 break;
1206 }
1207
1208 case MACH_PORT_TYPE_SEND_RECEIVE: {
1209 ipc_notify_nsenders_t nsrequest = { };
1210
1211 assert(IE_BITS_UREFS(bits) > 0);
1212
1213 port = ip_object_to_port(entry->ie_object);
1214 assert(port != IP_NULL);
1215
1216 ip_mq_lock(port);
1217 require_ip_active(port);
1218 assert(ip_get_receiver_name(port) == name);
1219 assert(ip_in_space(port, space));
1220 assert(port->ip_srights > 0);
1221
1222 if (IE_BITS_UREFS(bits) == 1) {
1223 ip_srights_dec(port);
1224 if (port->ip_srights == 0) {
1225 nsrequest = ipc_notify_no_senders_prepare(port);
1226 }
1227
1228 entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1229 MACH_PORT_TYPE_SEND);
1230 } else {
1231 /* if urefs are pegged due to overflow, leave them pegged */
1232 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1233 entry->ie_bits = bits - 1; /* decrement urefs */
1234 }
1235 }
1236 ip_mq_unlock(port);
1237
1238 ipc_entry_modified(space, name, entry);
1239 is_write_unlock(space);
1240
1241 ipc_notify_no_senders_emit(nsrequest);
1242 break;
1243 }
1244
1245 default:
1246 is_write_unlock(space);
1247 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1248 return KERN_INVALID_RIGHT;
1249 }
1250
1251 return KERN_SUCCESS;
1252 }
1253
1254 /*
1255 * Routine: ipc_right_delta
1256 * Purpose:
1257 * Modifies the user-reference count for a right.
1258 * May deallocate the right, if the count goes to zero.
1259 * Conditions:
1260 * The space is write-locked, and is unlocked upon return.
1261 * The space must be active.
1262 * Returns:
1263 * KERN_SUCCESS Count was modified.
1264 * KERN_INVALID_RIGHT Entry has wrong type.
1265 * KERN_INVALID_VALUE Bad delta for the right.
1266 * KERN_INVALID_CAPABILITY Deallocating a pinned right.
1267 */
1268
1269 kern_return_t
ipc_right_delta(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_right_t right,mach_port_delta_t delta)1270 ipc_right_delta(
1271 ipc_space_t space,
1272 mach_port_name_t name,
1273 ipc_entry_t entry,
1274 mach_port_right_t right,
1275 mach_port_delta_t delta)
1276 {
1277 ipc_port_t port = IP_NULL;
1278 ipc_entry_bits_t bits;
1279
1280 bits = entry->ie_bits;
1281
1282 /*
1283 * The following is used (for case MACH_PORT_RIGHT_DEAD_NAME) in the
1284 * switch below. It is used to keep track of those cases (in DIPC)
1285 * where we have postponed the dropping of a port reference. Since
1286 * the dropping of the reference could cause the port to disappear
1287 * we postpone doing so when we are holding the space lock.
1288 */
1289
1290 assert(is_active(space));
1291 assert(right < MACH_PORT_RIGHT_NUMBER);
1292
1293 /* Rights-specific restrictions and operations. */
1294
1295 switch (right) {
1296 case MACH_PORT_RIGHT_PORT_SET: {
1297 ipc_pset_t pset;
1298
1299 if ((bits & MACH_PORT_TYPE_PORT_SET) == 0) {
1300 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1301 goto invalid_right;
1302 }
1303
1304 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_PORT_SET);
1305 assert(IE_BITS_UREFS(bits) == 0);
1306 assert(entry->ie_request == IE_REQ_NONE);
1307
1308 if (delta == 0) {
1309 goto success;
1310 }
1311
1312 if (delta != -1) {
1313 goto invalid_value;
1314 }
1315
1316 pset = ips_object_to_pset(entry->ie_object);
1317 ips_mq_lock(pset);
1318 assert(ips_active(pset));
1319
1320 ipc_entry_dealloc(space, ips_to_object(pset), name, entry);
1321
1322 is_write_unlock(space);
1323
1324 ipc_pset_destroy(space, pset); /* consumes ref, unlocks */
1325 break;
1326 }
1327
1328 case MACH_PORT_RIGHT_RECEIVE: {
1329 ipc_port_t request = IP_NULL;
1330
1331 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1332 if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1333 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1334 }
1335 goto invalid_right;
1336 }
1337
1338 if (delta == 0) {
1339 goto success;
1340 }
1341
1342 if (delta != -1) {
1343 goto invalid_value;
1344 }
1345
1346 port = ip_object_to_port(entry->ie_object);
1347 assert(port != IP_NULL);
1348
1349 /*
1350 * The port lock is needed for ipc_right_dncancel;
1351 * otherwise, we wouldn't have to take the lock
1352 * until just before dropping the space lock.
1353 */
1354
1355 ip_mq_lock(port);
1356 require_ip_active(port);
1357 assert(ip_get_receiver_name(port) == name);
1358 assert(ip_in_space(port, space));
1359
1360 /* Mach Port Guard Checking */
1361 if (port->ip_guarded) {
1362 uint64_t portguard = port->ip_context;
1363 ip_mq_unlock(port);
1364 is_write_unlock(space);
1365 /* Raise mach port guard exception */
1366 mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_MOD_REFS);
1367 goto guard_failure;
1368 }
1369
1370 if (bits & MACH_PORT_TYPE_SEND) {
1371 assert(IE_BITS_TYPE(bits) ==
1372 MACH_PORT_TYPE_SEND_RECEIVE);
1373 assert(IE_BITS_UREFS(bits) > 0);
1374 assert(port->ip_srights > 0);
1375
1376 if (ipc_port_has_prdrequest(port)) {
1377 /*
1378 * Since another task has requested a
1379 * destroy notification for this port, it
1380 * isn't actually being destroyed - the receive
1381 * right is just being moved to another task.
1382 * Since we still have one or more send rights,
1383 * we need to record the loss of the receive
1384 * right and enter the remaining send right
1385 * into the hash table.
1386 */
1387 bits &= ~MACH_PORT_TYPE_RECEIVE;
1388 bits |= MACH_PORT_TYPE_EX_RECEIVE;
1389 ipc_hash_insert(space, ip_to_object(port),
1390 name, entry);
1391 ip_reference(port);
1392 } else {
1393 /*
1394 * The remaining send right turns into a
1395 * dead name. Notice we don't decrement
1396 * ip_srights, generate a no-senders notif,
1397 * or use ipc_right_dncancel, because the
1398 * port is destroyed "first".
1399 */
1400 bits &= ~IE_BITS_TYPE_MASK;
1401 bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1402 if (entry->ie_request) {
1403 entry->ie_request = IE_REQ_NONE;
1404 /* if urefs are pegged due to overflow, leave them pegged */
1405 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1406 bits++; /* increment urefs */
1407 }
1408 }
1409 entry->ie_object = IO_NULL;
1410 }
1411 entry->ie_bits = bits;
1412 ipc_entry_modified(space, name, entry);
1413 } else {
1414 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1415 assert(IE_BITS_UREFS(bits) == 0);
1416
1417 request = ipc_right_request_cancel_macro(space, port,
1418 name, entry);
1419 assert(!ip_is_pinned(port));
1420 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1421 }
1422 is_write_unlock(space);
1423
1424 ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1425
1426 if (request != IP_NULL) {
1427 ipc_notify_port_deleted(request, name);
1428 }
1429 break;
1430 }
1431
1432 case MACH_PORT_RIGHT_SEND_ONCE: {
1433 ipc_port_t request;
1434
1435 if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
1436 goto invalid_right;
1437 }
1438
1439 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
1440 assert(IE_BITS_UREFS(bits) == 1);
1441
1442 port = ip_object_to_port(entry->ie_object);
1443 assert(port != IP_NULL);
1444
1445 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1446 assert(!(entry->ie_bits & MACH_PORT_TYPE_SEND_ONCE));
1447 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1448 goto invalid_right;
1449 }
1450 /* port is locked and active */
1451
1452 assert(port->ip_sorights > 0);
1453
1454 if ((delta > 0) || (delta < -1)) {
1455 ip_mq_unlock(port);
1456 goto invalid_value;
1457 }
1458
1459 if (delta == 0) {
1460 ip_mq_unlock(port);
1461 goto success;
1462 }
1463
1464 /*
1465 * clear any reply context:
1466 * no one will be sending the response b/c we are destroying
1467 * the single, outstanding send once right.
1468 */
1469 port->ip_reply_context = 0;
1470
1471 request = ipc_right_request_cancel_macro(space, port, name, entry);
1472 assert(!ip_is_pinned(port));
1473 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1474
1475 is_write_unlock(space);
1476
1477 ipc_notify_send_once_and_unlock(port);
1478
1479 if (request != IP_NULL) {
1480 ipc_notify_port_deleted(request, name);
1481 }
1482 break;
1483 }
1484
1485 case MACH_PORT_RIGHT_DEAD_NAME: {
1486 ipc_port_t relport = IP_NULL;
1487 mach_port_urefs_t urefs;
1488
1489 if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1490 port = ip_object_to_port(entry->ie_object);
1491 assert(port != IP_NULL);
1492
1493 if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1494 /* port is locked and active */
1495 ip_mq_unlock(port);
1496 port = IP_NULL;
1497 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1498 goto invalid_right;
1499 }
1500 bits = entry->ie_bits;
1501 relport = port;
1502 port = IP_NULL;
1503 } else if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0) {
1504 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1505 goto invalid_right;
1506 }
1507
1508 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1509 assert(IE_BITS_UREFS(bits) > 0);
1510 assert(entry->ie_object == IO_NULL);
1511 assert(entry->ie_request == IE_REQ_NONE);
1512
1513 if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1514 delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1515 goto invalid_value;
1516 }
1517
1518 urefs = IE_BITS_UREFS(bits);
1519
1520 if (urefs == MACH_PORT_UREFS_MAX) {
1521 /*
1522 * urefs are pegged due to an overflow
1523 * only a delta removing all refs at once can change it
1524 */
1525
1526 if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1527 delta = 0;
1528 }
1529 } else {
1530 if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1531 goto invalid_value;
1532 }
1533 if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1534 /* leave urefs pegged to maximum if it overflowed */
1535 delta = MACH_PORT_UREFS_MAX - urefs;
1536 }
1537 }
1538
1539 if ((urefs + delta) == 0) {
1540 ipc_entry_dealloc(space, IO_NULL, name, entry);
1541 } else if (delta != 0) {
1542 entry->ie_bits = bits + delta;
1543 ipc_entry_modified(space, name, entry);
1544 }
1545
1546 is_write_unlock(space);
1547
1548 if (relport != IP_NULL) {
1549 ip_release(relport);
1550 }
1551
1552 break;
1553 }
1554
1555 case MACH_PORT_RIGHT_SEND: {
1556 mach_port_urefs_t urefs;
1557 ipc_port_t request = IP_NULL;
1558 ipc_notify_nsenders_t nsrequest = { };
1559 ipc_port_t port_to_release = IP_NULL;
1560
1561 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
1562 /* invalid right exception only when not live/dead confusion */
1563 if ((bits & MACH_PORT_TYPE_DEAD_NAME) == 0
1564 #if !defined(AE_MAKESENDRIGHT_FIXED)
1565 /*
1566 * AE tries to add single send right without knowing if it already owns one.
1567 * But if it doesn't, it should own the receive right and delta should be 1.
1568 */
1569 && (((bits & MACH_PORT_TYPE_RECEIVE) == 0) || (delta != 1))
1570 #endif
1571 ) {
1572 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1573 }
1574 goto invalid_right;
1575 }
1576
1577 /* maximum urefs for send is MACH_PORT_UREFS_MAX */
1578
1579 port = ip_object_to_port(entry->ie_object);
1580 assert(port != IP_NULL);
1581
1582 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1583 assert((entry->ie_bits & MACH_PORT_TYPE_SEND) == 0);
1584 goto invalid_right;
1585 }
1586 /* port is locked and active */
1587
1588 assert(port->ip_srights > 0);
1589
1590 if (delta > ((mach_port_delta_t)MACH_PORT_UREFS_MAX) ||
1591 delta < (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1592 ip_mq_unlock(port);
1593 goto invalid_value;
1594 }
1595
1596 urefs = IE_BITS_UREFS(bits);
1597
1598 if (urefs == MACH_PORT_UREFS_MAX) {
1599 /*
1600 * urefs are pegged due to an overflow
1601 * only a delta removing all refs at once can change it
1602 */
1603
1604 if (delta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1605 delta = 0;
1606 }
1607 } else {
1608 if (MACH_PORT_UREFS_UNDERFLOW(urefs, delta)) {
1609 ip_mq_unlock(port);
1610 goto invalid_value;
1611 }
1612 if (MACH_PORT_UREFS_OVERFLOW(urefs, delta)) {
1613 /* leave urefs pegged to maximum if it overflowed */
1614 delta = MACH_PORT_UREFS_MAX - urefs;
1615 }
1616 }
1617
1618 if ((urefs + delta) == 0) {
1619 if (ip_is_pinned(port)) {
1620 ip_mq_unlock(port);
1621 is_write_unlock(space);
1622 mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_DEALLOC);
1623 return KERN_INVALID_CAPABILITY;
1624 }
1625
1626 ip_srights_dec(port);
1627 if (port->ip_srights == 0) {
1628 nsrequest = ipc_notify_no_senders_prepare(port);
1629 }
1630
1631 if (bits & MACH_PORT_TYPE_RECEIVE) {
1632 assert(ip_get_receiver_name(port) == name);
1633 assert(ip_in_space(port, space));
1634 assert(IE_BITS_TYPE(bits) ==
1635 MACH_PORT_TYPE_SEND_RECEIVE);
1636
1637 entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1638 MACH_PORT_TYPE_SEND);
1639 ipc_entry_modified(space, name, entry);
1640 } else {
1641 assert(IE_BITS_TYPE(bits) ==
1642 MACH_PORT_TYPE_SEND);
1643
1644 request = ipc_right_request_cancel_macro(space, port,
1645 name, entry);
1646 ipc_hash_delete(space, ip_to_object(port),
1647 name, entry);
1648 assert(!ip_is_pinned(port));
1649 ipc_entry_dealloc(space, ip_to_object(port),
1650 name, entry);
1651 port_to_release = port;
1652 }
1653 } else if (delta != 0) {
1654 entry->ie_bits = bits + delta;
1655 ipc_entry_modified(space, name, entry);
1656 }
1657
1658 ip_mq_unlock(port);
1659
1660 is_write_unlock(space);
1661
1662 if (port_to_release != IP_NULL) {
1663 ip_release(port_to_release);
1664 }
1665
1666 ipc_notify_no_senders_emit(nsrequest);
1667
1668 if (request != IP_NULL) {
1669 ipc_notify_port_deleted(request, name);
1670 }
1671 break;
1672 }
1673
1674 case MACH_PORT_RIGHT_LABELH:
1675 goto invalid_right;
1676
1677 default:
1678 panic("ipc_right_delta: strange right %d for 0x%x (%p) in space:%p",
1679 right, name, (void *)entry, (void *)space);
1680 }
1681
1682 return KERN_SUCCESS;
1683
1684 success:
1685 is_write_unlock(space);
1686 return KERN_SUCCESS;
1687
1688 invalid_right:
1689 is_write_unlock(space);
1690 if (port != IP_NULL) {
1691 ip_release(port);
1692 }
1693 return KERN_INVALID_RIGHT;
1694
1695 invalid_value:
1696 is_write_unlock(space);
1697 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1698 return KERN_INVALID_VALUE;
1699
1700 guard_failure:
1701 return KERN_INVALID_RIGHT;
1702 }
1703
1704 /*
1705 * Routine: ipc_right_destruct
1706 * Purpose:
1707 * Deallocates the receive right and modifies the
1708 * user-reference count for the send rights as requested.
1709 * Conditions:
1710 * The space is write-locked, and is unlocked upon return.
1711 * The space must be active.
1712 * Returns:
1713 * KERN_SUCCESS Count was modified.
1714 * KERN_INVALID_RIGHT Entry has wrong type.
1715 * KERN_INVALID_VALUE Bad delta for the right.
1716 */
1717
1718 kern_return_t
ipc_right_destruct(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_delta_t srdelta,uint64_t guard)1719 ipc_right_destruct(
1720 ipc_space_t space,
1721 mach_port_name_t name,
1722 ipc_entry_t entry,
1723 mach_port_delta_t srdelta,
1724 uint64_t guard)
1725 {
1726 ipc_port_t port = IP_NULL;
1727 ipc_entry_bits_t bits;
1728
1729 mach_port_urefs_t urefs;
1730 ipc_port_t request = IP_NULL;
1731 ipc_notify_nsenders_t nsrequest = { };
1732
1733 bits = entry->ie_bits;
1734
1735 assert(is_active(space));
1736
1737 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1738 is_write_unlock(space);
1739
1740 /* No exception if we used to have receive and held entry since */
1741 if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) {
1742 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1743 }
1744 return KERN_INVALID_RIGHT;
1745 }
1746
1747 if (srdelta && (bits & MACH_PORT_TYPE_SEND) == 0) {
1748 is_write_unlock(space);
1749 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
1750 return KERN_INVALID_RIGHT;
1751 }
1752
1753 if (srdelta > 0) {
1754 goto invalid_value;
1755 }
1756
1757 port = ip_object_to_port(entry->ie_object);
1758 assert(port != IP_NULL);
1759
1760 ip_mq_lock(port);
1761 require_ip_active(port);
1762 assert(ip_get_receiver_name(port) == name);
1763 assert(ip_in_space(port, space));
1764
1765 /* Mach Port Guard Checking */
1766 if (port->ip_guarded && (guard != port->ip_context)) {
1767 uint64_t portguard = port->ip_context;
1768 ip_mq_unlock(port);
1769 is_write_unlock(space);
1770 mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_DESTROY);
1771 return KERN_INVALID_ARGUMENT;
1772 }
1773
1774 /*
1775 * First reduce the send rights as requested and
1776 * adjust the entry->ie_bits accordingly. The
1777 * ipc_entry_modified() call is made once the receive
1778 * right is destroyed too.
1779 */
1780
1781 if (srdelta) {
1782 assert(port->ip_srights > 0);
1783
1784 urefs = IE_BITS_UREFS(bits);
1785
1786 /*
1787 * Since we made sure that srdelta is negative,
1788 * the check for urefs overflow is not required.
1789 */
1790 if (MACH_PORT_UREFS_UNDERFLOW(urefs, srdelta)) {
1791 ip_mq_unlock(port);
1792 goto invalid_value;
1793 }
1794
1795 if (urefs == MACH_PORT_UREFS_MAX) {
1796 /*
1797 * urefs are pegged due to an overflow
1798 * only a delta removing all refs at once can change it
1799 */
1800 if (srdelta != (-((mach_port_delta_t)MACH_PORT_UREFS_MAX))) {
1801 srdelta = 0;
1802 }
1803 }
1804
1805 if ((urefs + srdelta) == 0) {
1806 ip_srights_dec(port);
1807 if (port->ip_srights == 0) {
1808 nsrequest = ipc_notify_no_senders_prepare(port);
1809 }
1810 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_RECEIVE);
1811 entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK |
1812 MACH_PORT_TYPE_SEND);
1813 } else {
1814 entry->ie_bits = bits + srdelta;
1815 }
1816 }
1817
1818 /*
1819 * Now destroy the receive right. Update space and
1820 * entry accordingly.
1821 */
1822
1823 bits = entry->ie_bits;
1824 if (bits & MACH_PORT_TYPE_SEND) {
1825 assert(IE_BITS_UREFS(bits) > 0);
1826 assert(IE_BITS_UREFS(bits) <= MACH_PORT_UREFS_MAX);
1827
1828 if (ipc_port_has_prdrequest(port)) {
1829 /*
1830 * Since another task has requested a
1831 * destroy notification for this port, it
1832 * isn't actually being destroyed - the receive
1833 * right is just being moved to another task.
1834 * Since we still have one or more send rights,
1835 * we need to record the loss of the receive
1836 * right and enter the remaining send right
1837 * into the hash table.
1838 */
1839 bits &= ~MACH_PORT_TYPE_RECEIVE;
1840 bits |= MACH_PORT_TYPE_EX_RECEIVE;
1841 ipc_hash_insert(space, ip_to_object(port),
1842 name, entry);
1843 ip_reference(port);
1844 } else {
1845 /*
1846 * The remaining send right turns into a
1847 * dead name. Notice we don't decrement
1848 * ip_srights, generate a no-senders notif,
1849 * or use ipc_right_dncancel, because the
1850 * port is destroyed "first".
1851 */
1852 bits &= ~IE_BITS_TYPE_MASK;
1853 bits |= (MACH_PORT_TYPE_DEAD_NAME | MACH_PORT_TYPE_EX_RECEIVE);
1854 if (entry->ie_request) {
1855 entry->ie_request = IE_REQ_NONE;
1856 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
1857 bits++; /* increment urefs */
1858 }
1859 }
1860 entry->ie_object = IO_NULL;
1861 }
1862 entry->ie_bits = bits;
1863 ipc_entry_modified(space, name, entry);
1864 } else {
1865 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
1866 assert(IE_BITS_UREFS(bits) == 0);
1867 request = ipc_right_request_cancel_macro(space, port,
1868 name, entry);
1869 assert(!ip_is_pinned(port));
1870 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
1871 }
1872
1873 /* Unlock space */
1874 is_write_unlock(space);
1875
1876 ipc_notify_no_senders_emit(nsrequest);
1877
1878 ipc_port_destroy(port); /* clears receiver, consumes ref, unlocks */
1879
1880 if (request != IP_NULL) {
1881 ipc_notify_port_deleted(request, name);
1882 }
1883
1884 return KERN_SUCCESS;
1885
1886 invalid_value:
1887 is_write_unlock(space);
1888 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
1889 return KERN_INVALID_VALUE;
1890 }
1891
1892
1893 /*
1894 * Routine: ipc_right_info
1895 * Purpose:
1896 * Retrieves information about the right.
1897 * Conditions:
1898 * The space is active and write-locked.
1899 * The space is unlocked upon return.
1900 * Returns:
1901 * KERN_SUCCESS Retrieved info
1902 */
1903
1904 kern_return_t
ipc_right_info(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_port_type_t * typep,mach_port_urefs_t * urefsp)1905 ipc_right_info(
1906 ipc_space_t space,
1907 mach_port_name_t name,
1908 ipc_entry_t entry,
1909 mach_port_type_t *typep,
1910 mach_port_urefs_t *urefsp)
1911 {
1912 ipc_port_t port;
1913 ipc_entry_bits_t bits;
1914 mach_port_type_t type = 0;
1915 ipc_port_request_index_t request;
1916
1917 bits = entry->ie_bits;
1918 request = entry->ie_request;
1919 port = ip_object_to_port(entry->ie_object);
1920
1921 if (bits & MACH_PORT_TYPE_RECEIVE) {
1922 assert(IP_VALID(port));
1923
1924 if (request != IE_REQ_NONE) {
1925 ip_mq_lock(port);
1926 require_ip_active(port);
1927 type |= ipc_port_request_type(port, name, request);
1928 ip_mq_unlock(port);
1929 }
1930 is_write_unlock(space);
1931 } else if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
1932 /*
1933 * validate port is still alive - if so, get request
1934 * types while we still have it locked. Otherwise,
1935 * recapture the (now dead) bits.
1936 */
1937 if (!ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
1938 if (request != IE_REQ_NONE) {
1939 type |= ipc_port_request_type(port, name, request);
1940 }
1941 ip_mq_unlock(port);
1942 is_write_unlock(space);
1943 } else {
1944 bits = entry->ie_bits;
1945 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
1946 is_write_unlock(space);
1947 ip_release(port);
1948 }
1949 } else {
1950 is_write_unlock(space);
1951 }
1952
1953 type |= IE_BITS_TYPE(bits);
1954
1955 *typep = type;
1956 *urefsp = IE_BITS_UREFS(bits);
1957 return KERN_SUCCESS;
1958 }
1959
1960 /*
1961 * Routine: ipc_right_copyin_check_reply
1962 * Purpose:
1963 * Check if a subsequent ipc_right_copyin would succeed. Used only
1964 * by ipc_kmsg_copyin_header to check if reply_port can be copied in.
1965 * If the reply port is an immovable send right, it errors out.
1966 * Conditions:
1967 * The space is locked (read or write) and active.
1968 */
1969
1970 boolean_t
ipc_right_copyin_check_reply(__assert_only ipc_space_t space,mach_port_name_t reply_name,ipc_entry_t reply_entry,mach_msg_type_name_t reply_type,ipc_entry_t dest_entry,boolean_t * reply_port_semantics_violation)1971 ipc_right_copyin_check_reply(
1972 __assert_only ipc_space_t space,
1973 mach_port_name_t reply_name,
1974 ipc_entry_t reply_entry,
1975 mach_msg_type_name_t reply_type,
1976 ipc_entry_t dest_entry,
1977 boolean_t *reply_port_semantics_violation)
1978 {
1979 ipc_entry_bits_t bits;
1980 ipc_port_t reply_port;
1981 ipc_port_t dest_port;
1982
1983 bits = reply_entry->ie_bits;
1984 assert(is_active(space));
1985
1986 switch (reply_type) {
1987 case MACH_MSG_TYPE_MAKE_SEND:
1988 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1989 return FALSE;
1990 }
1991 break;
1992
1993 case MACH_MSG_TYPE_MAKE_SEND_ONCE:
1994 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
1995 return FALSE;
1996 }
1997 break;
1998
1999 case MACH_MSG_TYPE_MOVE_RECEIVE:
2000 /* ipc_kmsg_copyin_header already filters it out */
2001 return FALSE;
2002
2003 case MACH_MSG_TYPE_COPY_SEND:
2004 case MACH_MSG_TYPE_MOVE_SEND:
2005 case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2006 if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2007 break;
2008 }
2009
2010 if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2011 return FALSE;
2012 }
2013
2014 reply_port = ip_object_to_port(reply_entry->ie_object);
2015 assert(reply_port != IP_NULL);
2016
2017 /*
2018 * active status peek to avoid checks that will be skipped
2019 * on copyin for dead ports. Lock not held, so will not be
2020 * atomic (but once dead, there's no going back).
2021 */
2022 if (!ip_active(reply_port)) {
2023 break;
2024 }
2025
2026 /*
2027 * Can't copyin a send right that is marked immovable. This bit
2028 * is set only during port creation and never unset. So it can
2029 * be read without a lock.
2030 */
2031 if (ip_is_immovable_send(reply_port)) {
2032 mach_port_guard_exception_immovable(space, reply_name, reply_port, MPG_FLAGS_NONE);
2033 return FALSE;
2034 }
2035
2036 if (reply_type == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2037 if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2038 return FALSE;
2039 }
2040 } else {
2041 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2042 return FALSE;
2043 }
2044 }
2045
2046 break;
2047 }
2048
2049 default:
2050 panic("ipc_right_copyin_check: strange rights");
2051 }
2052
2053 if ((IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_PORT_SET) ||
2054 (IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_PORT_SET)) {
2055 return TRUE;
2056 }
2057
2058 /* The only disp allowed when a reply port is a local port of mach msg is MAKE_SO. */
2059 reply_port = ip_object_to_port(reply_entry->ie_object);
2060 assert(reply_port != IP_NULL);
2061
2062 if (ip_active(reply_port)) {
2063 if (ip_is_reply_port(reply_port) && (reply_type != MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2064 return FALSE;
2065 }
2066
2067 *reply_port_semantics_violation = FALSE;
2068 /* When sending a msg to remote port that requires reply port semantics enforced the local port of that msg needs to be a reply port. */
2069 dest_port = ip_object_to_port(dest_entry->ie_object);
2070 if (IP_VALID(dest_port) && ip_active(dest_port) && reply_port_semantics) {
2071 if (ip_violates_strict_reply_port_semantics(dest_port, reply_port) ||
2072 ip_violates_reply_port_semantics(dest_port, reply_port)) {
2073 mach_port_guard_exception(reply_name, 0, 0, kGUARD_EXC_REQUIRE_REPLY_PORT_SEMANTICS);
2074 return FALSE;
2075 }
2076 }
2077 }
2078
2079 return TRUE;
2080 }
2081
2082 /*
2083 * Routine: ipc_right_copyin_check_guard_locked
2084 * Purpose:
2085 * Check if the port is guarded and the guard
2086 * value matches the one passed in the arguments.
2087 * If MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND is set,
2088 * check if the port is unguarded.
2089 * Conditions:
2090 * The port is locked.
2091 * Returns:
2092 * KERN_SUCCESS Port is either unguarded
2093 * or guarded with expected value
2094 * KERN_INVALID_ARGUMENT Port is either unguarded already or guard mismatch.
2095 * This also raises a EXC_GUARD exception.
2096 */
2097 static kern_return_t
ipc_right_copyin_check_guard_locked(mach_port_name_t name,ipc_port_t port,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2098 ipc_right_copyin_check_guard_locked(
2099 mach_port_name_t name,
2100 ipc_port_t port,
2101 mach_port_context_t context,
2102 mach_msg_guard_flags_t *guard_flags)
2103 {
2104 mach_msg_guard_flags_t flags = *guard_flags;
2105 if ((flags & MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND) && !port->ip_guarded && !context) {
2106 return KERN_SUCCESS;
2107 } else if (port->ip_guarded && (port->ip_context == context)) {
2108 return KERN_SUCCESS;
2109 }
2110
2111 /* Incorrect guard; Raise exception */
2112 mach_port_guard_exception(name, context, port->ip_context, kGUARD_EXC_INCORRECT_GUARD);
2113 return KERN_INVALID_ARGUMENT;
2114 }
2115
2116 /*
2117 * Routine: ipc_right_copyin
2118 * Purpose:
2119 * Copyin a capability from a space.
2120 * If successful, the caller gets a ref
2121 * for the resulting object, unless it is IO_DEAD,
2122 * and possibly a send-once right which should
2123 * be used in a port-deleted notification.
2124 *
2125 * If deadok is not TRUE, the copyin operation
2126 * will fail instead of producing IO_DEAD.
2127 *
2128 * The entry is deallocated if the entry type becomes
2129 * MACH_PORT_TYPE_NONE.
2130 * Conditions:
2131 * The space is write-locked and active.
2132 * Returns:
2133 * KERN_SUCCESS Acquired an object, possibly IO_DEAD.
2134 * KERN_INVALID_RIGHT Name doesn't denote correct right.
2135 * KERN_INVALID_CAPABILITY Trying to move an kobject port or an immovable right,
2136 * or moving the last ref of pinned right
2137 * KERN_INVALID_ARGUMENT Port is unguarded or guard mismatch
2138 */
2139
2140 kern_return_t
ipc_right_copyin(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyin_flags_t flags,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep,int * assertcntp,mach_port_context_t context,mach_msg_guard_flags_t * guard_flags)2141 ipc_right_copyin(
2142 ipc_space_t space,
2143 mach_port_name_t name,
2144 ipc_entry_t entry,
2145 mach_msg_type_name_t msgt_name,
2146 ipc_object_copyin_flags_t flags,
2147 ipc_object_t *objectp,
2148 ipc_port_t *sorightp,
2149 ipc_port_t *releasep,
2150 int *assertcntp,
2151 mach_port_context_t context,
2152 mach_msg_guard_flags_t *guard_flags)
2153 {
2154 ipc_entry_bits_t bits;
2155 ipc_port_t port;
2156 kern_return_t kr;
2157 boolean_t deadok = !!(flags & IPC_OBJECT_COPYIN_FLAGS_DEADOK);
2158 boolean_t allow_imm_send = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND);
2159 boolean_t allow_reply_make_so = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_REPLY_MAKE_SEND_ONCE);
2160 boolean_t allow_reply_move_so = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_REPLY_MOVE_SEND_ONCE);
2161
2162 *releasep = IP_NULL;
2163 *assertcntp = 0;
2164
2165 bits = entry->ie_bits;
2166
2167 assert(is_active(space));
2168
2169 switch (msgt_name) {
2170 case MACH_MSG_TYPE_MAKE_SEND: {
2171 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2172 goto invalid_right;
2173 }
2174
2175 port = ip_object_to_port(entry->ie_object);
2176 assert(port != IP_NULL);
2177
2178 if (ip_is_reply_port(port)) {
2179 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2180 return KERN_INVALID_CAPABILITY;
2181 }
2182
2183 ip_mq_lock(port);
2184 assert(ip_get_receiver_name(port) == name);
2185 assert(ip_in_space(port, space));
2186
2187 ipc_port_make_send_any_locked(port);
2188 ip_mq_unlock(port);
2189
2190 *objectp = ip_to_object(port);
2191 *sorightp = IP_NULL;
2192 break;
2193 }
2194
2195 case MACH_MSG_TYPE_MAKE_SEND_ONCE: {
2196 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2197 goto invalid_right;
2198 }
2199
2200 port = ip_object_to_port(entry->ie_object);
2201 assert(port != IP_NULL);
2202
2203 if ((ip_is_reply_port(port)) && !allow_reply_make_so) {
2204 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2205 return KERN_INVALID_CAPABILITY;
2206 }
2207
2208 ip_mq_lock(port);
2209 require_ip_active(port);
2210 assert(ip_get_receiver_name(port) == name);
2211 assert(ip_in_space(port, space));
2212
2213 ipc_port_make_sonce_locked(port);
2214 ip_mq_unlock(port);
2215
2216 *objectp = ip_to_object(port);
2217 *sorightp = IP_NULL;
2218 break;
2219 }
2220
2221 case MACH_MSG_TYPE_MOVE_RECEIVE: {
2222 bool allow_imm_recv = false;
2223 ipc_port_t request = IP_NULL;
2224 waitq_link_list_t free_l = { };
2225
2226 if ((bits & MACH_PORT_TYPE_RECEIVE) == 0) {
2227 goto invalid_right;
2228 }
2229
2230 port = ip_object_to_port(entry->ie_object);
2231 assert(port != IP_NULL);
2232
2233 ip_mq_lock(port);
2234 require_ip_active(port);
2235 assert(ip_get_receiver_name(port) == name);
2236 assert(ip_in_space(port, space));
2237
2238 /*
2239 * Disallow moving receive-right kobjects/kolabel, e.g. mk_timer ports
2240 * The ipc_port structure uses the kdata union of kobject and
2241 * imp_task exclusively. Thus, general use of a kobject port as
2242 * a receive right can cause type confusion in the importance
2243 * code.
2244 */
2245 if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
2246 /*
2247 * Distinguish an invalid right, e.g., trying to move
2248 * a send right as a receive right, from this
2249 * situation which is, "This is a valid receive right,
2250 * but it's also a kobject and you can't move it."
2251 */
2252 ip_mq_unlock(port);
2253 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2254 return KERN_INVALID_CAPABILITY;
2255 }
2256
2257 if (port->ip_service_port && port->ip_splabel &&
2258 !ipc_service_port_label_is_bootstrap_port((ipc_service_port_label_t)port->ip_splabel)) {
2259 allow_imm_recv = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_RECEIVE);
2260 } else if (ip_is_libxpc_connection_port(port)) {
2261 allow_imm_recv = !!(flags & IPC_OBJECT_COPYIN_FLAGS_ALLOW_CONN_IMMOVABLE_RECEIVE);
2262 }
2263
2264 if ((!allow_imm_recv && port->ip_immovable_receive) ||
2265 ip_is_reply_port(port) || /* never move reply port rcv right */
2266 port->ip_specialreply) {
2267 assert(!ip_in_space(port, ipc_space_kernel));
2268 ip_mq_unlock(port);
2269 assert(current_task() != kernel_task);
2270 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_IMMOVABLE);
2271 return KERN_INVALID_CAPABILITY;
2272 }
2273
2274 if (guard_flags != NULL) {
2275 kr = ipc_right_copyin_check_guard_locked(name, port, context, guard_flags);
2276 if (kr != KERN_SUCCESS) {
2277 ip_mq_unlock(port);
2278 return kr;
2279 }
2280 }
2281
2282 if (bits & MACH_PORT_TYPE_SEND) {
2283 assert(IE_BITS_TYPE(bits) ==
2284 MACH_PORT_TYPE_SEND_RECEIVE);
2285 assert(IE_BITS_UREFS(bits) > 0);
2286 assert(port->ip_srights > 0);
2287
2288 bits &= ~MACH_PORT_TYPE_RECEIVE;
2289 bits |= MACH_PORT_TYPE_EX_RECEIVE;
2290 entry->ie_bits = bits;
2291 ipc_hash_insert(space, ip_to_object(port),
2292 name, entry);
2293 ip_reference(port);
2294 ipc_entry_modified(space, name, entry);
2295 } else {
2296 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2297 assert(IE_BITS_UREFS(bits) == 0);
2298
2299 request = ipc_right_request_cancel_macro(space, port,
2300 name, entry);
2301 assert(!ip_is_pinned(port));
2302 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2303 }
2304
2305 /* ipc_port_clear_receiver unguards the port and clears the ip_immovable_receive bit */
2306 (void)ipc_port_clear_receiver(port, FALSE, &free_l); /* don't destroy the port/mqueue */
2307 if (guard_flags != NULL) {
2308 /* this flag will be cleared during copyout */
2309 *guard_flags = *guard_flags | MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
2310 }
2311
2312 #if IMPORTANCE_INHERITANCE
2313 /*
2314 * Account for boosts the current task is going to lose when
2315 * copying this right in. Tempowner ports have either not
2316 * been accounting to any task (and therefore are already in
2317 * "limbo" state w.r.t. assertions) or to some other specific
2318 * task. As we have no way to drop the latter task's assertions
2319 * here, We'll deduct those when we enqueue it on its
2320 * destination port (see ipc_port_check_circularity()).
2321 */
2322 if (port->ip_tempowner == 0) {
2323 assert(IIT_NULL == ip_get_imp_task(port));
2324
2325 /* ports in limbo have to be tempowner */
2326 port->ip_tempowner = 1;
2327 *assertcntp = port->ip_impcount;
2328 }
2329 #endif /* IMPORTANCE_INHERITANCE */
2330
2331 ip_mq_unlock(port);
2332
2333 /*
2334 * This is unfortunate to do this while the space is locked,
2335 * but plumbing it through all callers really hurts.
2336 */
2337 waitq_link_free_list(WQT_PORT_SET, &free_l);
2338
2339 *objectp = ip_to_object(port);
2340 *sorightp = request;
2341 break;
2342 }
2343
2344 case MACH_MSG_TYPE_COPY_SEND: {
2345 if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2346 goto copy_dead;
2347 }
2348
2349 /* allow for dead send-once rights */
2350
2351 if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2352 goto invalid_right;
2353 }
2354
2355 assert(IE_BITS_UREFS(bits) > 0);
2356
2357 port = ip_object_to_port(entry->ie_object);
2358 assert(port != IP_NULL);
2359
2360 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2361 bits = entry->ie_bits;
2362 *releasep = port;
2363 goto copy_dead;
2364 }
2365 /* port is locked and active */
2366
2367 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2368 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2369 assert(port->ip_sorights > 0);
2370
2371 ip_mq_unlock(port);
2372 goto invalid_right;
2373 }
2374
2375 if (ip_is_reply_port(port)) {
2376 ip_mq_unlock(port);
2377 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2378 return KERN_INVALID_CAPABILITY;
2379 }
2380
2381 if (!allow_imm_send && ip_is_immovable_send(port)) {
2382 ip_mq_unlock(port);
2383 mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2384 return KERN_INVALID_CAPABILITY;
2385 }
2386
2387 ipc_port_copy_send_any_locked(port);
2388 ip_mq_unlock(port);
2389
2390 *objectp = ip_to_object(port);
2391 *sorightp = IP_NULL;
2392 break;
2393 }
2394
2395 case MACH_MSG_TYPE_MOVE_SEND: {
2396 ipc_port_t request = IP_NULL;
2397
2398 if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2399 goto move_dead;
2400 }
2401
2402 /* allow for dead send-once rights */
2403
2404 if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2405 goto invalid_right;
2406 }
2407
2408 assert(IE_BITS_UREFS(bits) > 0);
2409
2410 port = ip_object_to_port(entry->ie_object);
2411 assert(port != IP_NULL);
2412
2413 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2414 bits = entry->ie_bits;
2415 *releasep = port;
2416 goto move_dead;
2417 }
2418 /* port is locked and active */
2419
2420 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2421 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2422 assert(port->ip_sorights > 0);
2423 ip_mq_unlock(port);
2424 goto invalid_right;
2425 }
2426
2427 if (ip_is_reply_port(port)) {
2428 ip_mq_unlock(port);
2429 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2430 return KERN_INVALID_CAPABILITY;
2431 }
2432
2433 if (!allow_imm_send && ip_is_immovable_send(port)) {
2434 ip_mq_unlock(port);
2435 mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2436 return KERN_INVALID_CAPABILITY;
2437 }
2438
2439 if (IE_BITS_UREFS(bits) == 1) {
2440 assert(port->ip_srights > 0);
2441 if (bits & MACH_PORT_TYPE_RECEIVE) {
2442 assert(ip_get_receiver_name(port) == name);
2443 assert(ip_in_space(port, space));
2444 assert(IE_BITS_TYPE(bits) ==
2445 MACH_PORT_TYPE_SEND_RECEIVE);
2446 assert(!ip_is_pinned(port));
2447
2448 entry->ie_bits = bits & ~
2449 (IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2450 ipc_entry_modified(space, name, entry);
2451 ip_reference(port);
2452 } else {
2453 assert(IE_BITS_TYPE(bits) ==
2454 MACH_PORT_TYPE_SEND);
2455
2456 if (ip_is_pinned(port)) {
2457 ip_mq_unlock(port);
2458 mach_port_guard_exception_pinned(space, name, port, MPG_FLAGS_MOD_REFS_PINNED_COPYIN);
2459 return KERN_INVALID_CAPABILITY;
2460 }
2461
2462 request = ipc_right_request_cancel_macro(space, port,
2463 name, entry);
2464 ipc_hash_delete(space, ip_to_object(port),
2465 name, entry);
2466 ipc_entry_dealloc(space, ip_to_object(port),
2467 name, entry);
2468 /* transfer entry's reference to caller */
2469 }
2470 } else {
2471 ipc_port_copy_send_any_locked(port);
2472 /* if urefs are pegged due to overflow, leave them pegged */
2473 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2474 entry->ie_bits = bits - 1; /* decrement urefs */
2475 }
2476 ipc_entry_modified(space, name, entry);
2477 }
2478
2479 ip_mq_unlock(port);
2480 *objectp = ip_to_object(port);
2481 *sorightp = request;
2482 break;
2483 }
2484
2485 case MACH_MSG_TYPE_MOVE_SEND_ONCE: {
2486 ipc_port_t request;
2487
2488 if (bits & MACH_PORT_TYPE_DEAD_NAME) {
2489 goto move_dead;
2490 }
2491
2492 /* allow for dead send rights */
2493
2494 if ((bits & MACH_PORT_TYPE_SEND_RIGHTS) == 0) {
2495 goto invalid_right;
2496 }
2497
2498 assert(IE_BITS_UREFS(bits) > 0);
2499
2500 port = ip_object_to_port(entry->ie_object);
2501 assert(port != IP_NULL);
2502
2503 if (ipc_right_check(space, port, name, entry, flags)) {
2504 bits = entry->ie_bits;
2505 *releasep = port;
2506 goto move_dead;
2507 }
2508 /*
2509 * port is locked, but may not be active:
2510 * Allow copyin of inactive ports with no dead name request and treat it
2511 * as if the copyin of the port was successful and port became inactive
2512 * later.
2513 */
2514
2515 if ((bits & MACH_PORT_TYPE_SEND_ONCE) == 0) {
2516 assert(bits & MACH_PORT_TYPE_SEND);
2517 assert(port->ip_srights > 0);
2518
2519 ip_mq_unlock(port);
2520 goto invalid_right;
2521 }
2522
2523 if (ip_is_reply_port(port) && !allow_reply_move_so) {
2524 ip_mq_unlock(port);
2525 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2526 return KERN_INVALID_CAPABILITY;
2527 }
2528
2529 if (!allow_imm_send && ip_is_immovable_send(port)) {
2530 ip_mq_unlock(port);
2531 mach_port_guard_exception_immovable(space, name, port, MPG_FLAGS_NONE);
2532 return KERN_INVALID_CAPABILITY;
2533 }
2534
2535 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND_ONCE);
2536 assert(IE_BITS_UREFS(bits) == 1);
2537 assert(port->ip_sorights > 0);
2538
2539 request = ipc_right_request_cancel_macro(space, port, name, entry);
2540 assert(!ip_is_pinned(port));
2541 ipc_entry_dealloc(space, ip_to_object(port), name, entry);
2542 ip_mq_unlock(port);
2543
2544 *objectp = ip_to_object(port);
2545 *sorightp = request;
2546 break;
2547 }
2548
2549 default:
2550 invalid_right:
2551 return KERN_INVALID_RIGHT;
2552 }
2553
2554 return KERN_SUCCESS;
2555
2556 copy_dead:
2557 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2558 assert(IE_BITS_UREFS(bits) > 0);
2559 assert(entry->ie_request == IE_REQ_NONE);
2560 assert(entry->ie_object == 0);
2561
2562 if (!deadok) {
2563 goto invalid_right;
2564 }
2565
2566 *objectp = IO_DEAD;
2567 *sorightp = IP_NULL;
2568 return KERN_SUCCESS;
2569
2570 move_dead:
2571 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_DEAD_NAME);
2572 assert(IE_BITS_UREFS(bits) > 0);
2573 assert(entry->ie_request == IE_REQ_NONE);
2574 assert(entry->ie_object == IO_NULL);
2575
2576 if (!deadok) {
2577 goto invalid_right;
2578 }
2579
2580 if (IE_BITS_UREFS(bits) == 1) {
2581 ipc_entry_dealloc(space, IO_NULL, name, entry);
2582 } else {
2583 /* if urefs are pegged due to overflow, leave them pegged */
2584 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2585 entry->ie_bits = bits - 1; /* decrement urefs */
2586 }
2587 ipc_entry_modified(space, name, entry);
2588 }
2589 *objectp = IO_DEAD;
2590 *sorightp = IP_NULL;
2591 return KERN_SUCCESS;
2592 }
2593
2594 /*
2595 * Routine: ipc_right_copyin_two_move_sends
2596 * Purpose:
2597 * Like ipc_right_copyin with MACH_MSG_TYPE_MOVE_SEND
2598 * and deadok == FALSE, except that this moves two
2599 * send rights at once.
2600 * Conditions:
2601 * The space is write-locked and active.
2602 * The object is returned with two refs/send rights.
2603 * Returns:
2604 * KERN_SUCCESS Acquired an object.
2605 * KERN_INVALID_RIGHT Name doesn't denote correct right.
2606 * KERN_INVALID_CAPABILITY Name does not allow copyin move send capability.
2607 */
2608 static
2609 kern_return_t
ipc_right_copyin_two_move_sends(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2610 ipc_right_copyin_two_move_sends(
2611 ipc_space_t space,
2612 mach_port_name_t name,
2613 ipc_entry_t entry,
2614 ipc_object_t *objectp,
2615 ipc_port_t *sorightp,
2616 ipc_port_t *releasep)
2617 {
2618 ipc_entry_bits_t bits;
2619 mach_port_urefs_t urefs;
2620 ipc_port_t port;
2621 ipc_port_t request = IP_NULL;
2622
2623 *releasep = IP_NULL;
2624
2625 assert(is_active(space));
2626
2627 bits = entry->ie_bits;
2628
2629 if ((bits & MACH_PORT_TYPE_SEND) == 0) {
2630 goto invalid_right;
2631 }
2632
2633 urefs = IE_BITS_UREFS(bits);
2634 if (urefs < 2) {
2635 goto invalid_right;
2636 }
2637
2638 port = ip_object_to_port(entry->ie_object);
2639 assert(port != IP_NULL);
2640
2641 if (ip_is_reply_port(port)) {
2642 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_RIGHT);
2643 return KERN_INVALID_CAPABILITY;
2644 }
2645
2646 if (ipc_right_check(space, port, name, entry, IPC_OBJECT_COPYIN_FLAGS_NONE)) {
2647 *releasep = port;
2648 goto invalid_right;
2649 }
2650 /* port is locked and active */
2651
2652 /*
2653 * To reach here we either have:
2654 * (1) reply_name == voucher_name, but voucher is not immovable send right.
2655 * (2) reply_name == dest_name, but ipc_right_copyin_check_reply() guaranteed
2656 * that we can't use MOVE_SEND on reply port marked as immovable send right.
2657 */
2658 assert(!ip_is_immovable_send(port));
2659 assert(!ip_is_pinned(port));
2660
2661 if (urefs > 2) {
2662 /*
2663 * We are moving 2 urefs as naked send rights, which is decomposed as:
2664 * - two copy sends (which doesn't affect the make send count)
2665 * - decrementing the local urefs twice.
2666 */
2667 ipc_port_copy_send_any_locked(port);
2668 ipc_port_copy_send_any_locked(port);
2669 /* if urefs are pegged due to overflow, leave them pegged */
2670 if (IE_BITS_UREFS(bits) < MACH_PORT_UREFS_MAX) {
2671 entry->ie_bits = bits - 2; /* decrement urefs */
2672 }
2673 ipc_entry_modified(space, name, entry);
2674 } else {
2675 /*
2676 * We have exactly 2 send rights for this port in this space,
2677 * which means that we will liberate the naked send right held
2678 * by this entry.
2679 *
2680 * However refcounting rules around entries are that naked send rights
2681 * on behalf of spaces do not have an associated port reference,
2682 * so we need to donate one ...
2683 */
2684 ipc_port_copy_send_any_locked(port);
2685
2686 if (bits & MACH_PORT_TYPE_RECEIVE) {
2687 assert(ip_get_receiver_name(port) == name);
2688 assert(ip_in_space(port, space));
2689 assert(IE_BITS_TYPE(bits) ==
2690 MACH_PORT_TYPE_SEND_RECEIVE);
2691
2692 /* ... that we inject manually when the entry stays alive */
2693 entry->ie_bits = bits & ~(IE_BITS_UREFS_MASK | MACH_PORT_TYPE_SEND);
2694 ipc_entry_modified(space, name, entry);
2695 ip_reference(port);
2696 } else {
2697 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
2698
2699 /* ... that we steal from the entry when it dies */
2700 request = ipc_right_request_cancel_macro(space, port,
2701 name, entry);
2702 ipc_hash_delete(space, ip_to_object(port),
2703 name, entry);
2704 ipc_entry_dealloc(space, ip_to_object(port),
2705 name, entry);
2706 }
2707 }
2708
2709 ip_mq_unlock(port);
2710
2711 *objectp = ip_to_object(port);
2712 *sorightp = request;
2713 return KERN_SUCCESS;
2714
2715 invalid_right:
2716 return KERN_INVALID_RIGHT;
2717 }
2718
2719
2720 /*
2721 * Routine: ipc_right_copyin_two
2722 * Purpose:
2723 * Like ipc_right_copyin with two dispositions,
2724 * each of which results in a send or send-once right,
2725 * and deadok = FALSE.
2726 * Conditions:
2727 * The space is write-locked and active.
2728 * The object is returned with two refs/rights.
2729 * Msgt_one refers to the dest_type.
2730 * Copyin flags are currently only used in the context of send once rights.
2731 * Returns:
2732 * KERN_SUCCESS Acquired an object.
2733 * KERN_INVALID_RIGHT Name doesn't denote correct right(s).
2734 * KERN_INVALID_CAPABILITY Name doesn't denote correct right for msgt_two.
2735 */
2736 kern_return_t
ipc_right_copyin_two(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_one,mach_msg_type_name_t msgt_two,ipc_object_copyin_flags_t flags_one,ipc_object_copyin_flags_t flags_two,ipc_object_t * objectp,ipc_port_t * sorightp,ipc_port_t * releasep)2737 ipc_right_copyin_two(
2738 ipc_space_t space,
2739 mach_port_name_t name,
2740 ipc_entry_t entry,
2741 mach_msg_type_name_t msgt_one,
2742 mach_msg_type_name_t msgt_two,
2743 ipc_object_copyin_flags_t flags_one, /* Used only for send once rights. */
2744 ipc_object_copyin_flags_t flags_two, /* Used only for send once rights. */
2745 ipc_object_t *objectp,
2746 ipc_port_t *sorightp,
2747 ipc_port_t *releasep)
2748 {
2749 kern_return_t kr;
2750 int assertcnt = 0;
2751
2752 assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_one));
2753 assert(MACH_MSG_TYPE_PORT_ANY_SEND(msgt_two));
2754
2755 /*
2756 * This is a little tedious to make atomic, because
2757 * there are 25 combinations of valid dispositions.
2758 * However, most are easy.
2759 */
2760
2761 /*
2762 * If either is move-sonce, then there must be an error.
2763 */
2764 if (msgt_one == MACH_MSG_TYPE_MOVE_SEND_ONCE ||
2765 msgt_two == MACH_MSG_TYPE_MOVE_SEND_ONCE) {
2766 return KERN_INVALID_RIGHT;
2767 }
2768
2769 if ((msgt_one == MACH_MSG_TYPE_MAKE_SEND) ||
2770 (msgt_one == MACH_MSG_TYPE_MAKE_SEND_ONCE) ||
2771 (msgt_two == MACH_MSG_TYPE_MAKE_SEND) ||
2772 (msgt_two == MACH_MSG_TYPE_MAKE_SEND_ONCE)) {
2773 /*
2774 * One of the dispositions needs a receive right.
2775 *
2776 * If the copyin below succeeds, we know the receive
2777 * right is there (because the pre-validation of
2778 * the second disposition already succeeded in our
2779 * caller).
2780 *
2781 * Hence the port is not in danger of dying.
2782 */
2783 ipc_object_t object_two;
2784
2785 flags_one = flags_one | IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND;
2786 kr = ipc_right_copyin(space, name, entry,
2787 msgt_one, flags_one,
2788 objectp, sorightp, releasep,
2789 &assertcnt, 0, NULL);
2790 assert(assertcnt == 0);
2791 if (kr != KERN_SUCCESS) {
2792 return kr;
2793 }
2794
2795 assert(IO_VALID(*objectp));
2796 assert(*sorightp == IP_NULL);
2797 assert(*releasep == IP_NULL);
2798
2799 /*
2800 * Now copyin the second (previously validated)
2801 * disposition. The result can't be a dead port,
2802 * as no valid disposition can make us lose our
2803 * receive right.
2804 */
2805 kr = ipc_right_copyin(space, name, entry,
2806 msgt_two, flags_two,
2807 &object_two, sorightp, releasep,
2808 &assertcnt, 0, NULL);
2809 assert(assertcnt == 0);
2810 assert(kr == KERN_SUCCESS);
2811 assert(*sorightp == IP_NULL);
2812 assert(*releasep == IP_NULL);
2813 assert(object_two == *objectp);
2814 assert(entry->ie_bits & MACH_PORT_TYPE_RECEIVE);
2815 } else if ((msgt_one == MACH_MSG_TYPE_MOVE_SEND) &&
2816 (msgt_two == MACH_MSG_TYPE_MOVE_SEND)) {
2817 /*
2818 * This is an easy case. Just use our
2819 * handy-dandy special-purpose copyin call
2820 * to get two send rights for the price of one.
2821 */
2822 kr = ipc_right_copyin_two_move_sends(space, name, entry,
2823 objectp, sorightp,
2824 releasep);
2825 if (kr != KERN_SUCCESS) {
2826 return kr;
2827 }
2828 } else {
2829 mach_msg_type_name_t msgt_name;
2830
2831 /*
2832 * Must be either a single move-send and a
2833 * copy-send, or two copy-send dispositions.
2834 * Use the disposition with the greatest side
2835 * effects for the actual copyin - then just
2836 * duplicate the send right you get back.
2837 */
2838 if (msgt_one == MACH_MSG_TYPE_MOVE_SEND ||
2839 msgt_two == MACH_MSG_TYPE_MOVE_SEND) {
2840 msgt_name = MACH_MSG_TYPE_MOVE_SEND;
2841 } else {
2842 msgt_name = MACH_MSG_TYPE_COPY_SEND;
2843 }
2844
2845 kr = ipc_right_copyin(space, name, entry,
2846 msgt_name, IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND,
2847 objectp, sorightp, releasep,
2848 &assertcnt, 0, NULL);
2849 assert(assertcnt == 0);
2850 if (kr != KERN_SUCCESS) {
2851 return kr;
2852 }
2853
2854 /*
2855 * Copy the right we got back. If it is dead now,
2856 * that's OK. Neither right will be usable to send
2857 * a message anyway.
2858 *
2859 * Note that the port could be concurrently moved
2860 * outside of the space as a descriptor, and then
2861 * destroyed, which would not happen under the space lock.
2862 *
2863 * It means we can't use ipc_port_copy_send() which
2864 * may fail if the port died.
2865 */
2866 io_lock(*objectp);
2867 ipc_port_copy_send_any_locked(ip_object_to_port(*objectp));
2868 io_unlock(*objectp);
2869 }
2870
2871 return KERN_SUCCESS;
2872 }
2873
2874
2875 /*
2876 * Routine: ipc_right_copyout
2877 * Purpose:
2878 * Copyout a capability to a space.
2879 * If successful, consumes a ref for the object.
2880 *
2881 * Always succeeds when given a newly-allocated entry,
2882 * because user-reference overflow isn't a possibility.
2883 *
2884 * If copying out the object would cause the user-reference
2885 * count in the entry to overflow, then the user-reference
2886 * count is left pegged to its maximum value and the copyout
2887 * succeeds anyway.
2888 * Conditions:
2889 * The space is write-locked and active.
2890 * The object is locked and active.
2891 * The object is unlocked; the space isn't.
2892 * Returns:
2893 * KERN_SUCCESS Copied out capability.
2894 */
2895
2896 kern_return_t
ipc_right_copyout(ipc_space_t space,mach_port_name_t name,ipc_entry_t entry,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t flags,mach_port_context_t * context,mach_msg_guard_flags_t * guard_flags,ipc_object_t object)2897 ipc_right_copyout(
2898 ipc_space_t space,
2899 mach_port_name_t name,
2900 ipc_entry_t entry,
2901 mach_msg_type_name_t msgt_name,
2902 ipc_object_copyout_flags_t flags,
2903 mach_port_context_t *context,
2904 mach_msg_guard_flags_t *guard_flags,
2905 ipc_object_t object)
2906 {
2907 ipc_entry_bits_t bits;
2908 ipc_port_t port;
2909 mach_port_name_t sp_name = MACH_PORT_NULL;
2910 mach_port_context_t sp_context = 0;
2911
2912 bits = entry->ie_bits;
2913
2914 assert(IO_VALID(object));
2915 assert(io_otype(object) == IOT_PORT);
2916 assert(io_active(object));
2917 assert(entry->ie_object == object);
2918
2919 port = ip_object_to_port(object);
2920
2921 if (flags & IPC_OBJECT_COPYOUT_FLAGS_PINNED) {
2922 assert(!ip_is_pinned(port));
2923 assert(ip_is_immovable_send(port));
2924 assert(task_is_immovable(space->is_task));
2925 assert(task_is_pinned(space->is_task));
2926 port->ip_pinned = 1;
2927 }
2928
2929 switch (msgt_name) {
2930 case MACH_MSG_TYPE_PORT_SEND_ONCE:
2931
2932 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2933 assert(IE_BITS_UREFS(bits) == 0);
2934 assert(port->ip_sorights > 0);
2935
2936 if (port->ip_specialreply) {
2937 ipc_port_adjust_special_reply_port_locked(port,
2938 current_thread()->ith_knote, IPC_PORT_ADJUST_SR_LINK_WORKLOOP, FALSE);
2939 /* port unlocked on return */
2940 } else {
2941 ip_mq_unlock(port);
2942 }
2943
2944 entry->ie_bits = bits | (MACH_PORT_TYPE_SEND_ONCE | 1); /* set urefs to 1 */
2945 ipc_entry_modified(space, name, entry);
2946 break;
2947
2948 case MACH_MSG_TYPE_PORT_SEND:
2949 assert(port->ip_srights > 0);
2950
2951 if (bits & MACH_PORT_TYPE_SEND) {
2952 mach_port_urefs_t urefs = IE_BITS_UREFS(bits);
2953
2954 assert(port->ip_srights > 1);
2955 assert(urefs > 0);
2956 assert(urefs <= MACH_PORT_UREFS_MAX);
2957
2958 if (urefs == MACH_PORT_UREFS_MAX) {
2959 /*
2960 * leave urefs pegged to maximum,
2961 * consume send right and ref
2962 */
2963
2964 ip_srights_dec(port);
2965 ip_mq_unlock(port);
2966 ip_release_live(port);
2967 return KERN_SUCCESS;
2968 }
2969
2970 /* consume send right and ref */
2971 ip_srights_dec(port);
2972 ip_mq_unlock(port);
2973 ip_release_live(port);
2974 } else if (bits & MACH_PORT_TYPE_RECEIVE) {
2975 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_RECEIVE);
2976 assert(IE_BITS_UREFS(bits) == 0);
2977
2978 /* transfer send right to entry, consume ref */
2979 ip_mq_unlock(port);
2980 ip_release_live(port);
2981 } else {
2982 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
2983 assert(IE_BITS_UREFS(bits) == 0);
2984
2985 /* transfer send right and ref to entry */
2986 ip_mq_unlock(port);
2987
2988 /* entry is locked holding ref, so can use port */
2989
2990 ipc_hash_insert(space, ip_to_object(port), name, entry);
2991 }
2992
2993 entry->ie_bits = (bits | MACH_PORT_TYPE_SEND) + 1; /* increment urefs */
2994 ipc_entry_modified(space, name, entry);
2995 break;
2996
2997 case MACH_MSG_TYPE_PORT_RECEIVE: {
2998 ipc_port_t dest;
2999 #if IMPORTANCE_INHERITANCE
3000 natural_t assertcnt = port->ip_impcount;
3001 #endif /* IMPORTANCE_INHERITANCE */
3002
3003 assert(port->ip_mscount == 0);
3004 assert(!ip_in_a_space(port));
3005
3006 /*
3007 * Don't copyout kobjects or kolabels as receive right
3008 */
3009 if (ip_is_kobject(port) || ip_is_kolabeled(port)) {
3010 panic("ipc_right_copyout: Copyout kobject/kolabel as receive right");
3011 }
3012
3013 dest = ip_get_destination(port);
3014
3015 /* port transitions to IN-SPACE state */
3016 port->ip_receiver_name = name;
3017 port->ip_receiver = space;
3018
3019 struct knote *kn = current_thread()->ith_knote;
3020
3021 if ((guard_flags != NULL) && ((*guard_flags & MACH_MSG_GUARD_FLAGS_IMMOVABLE_RECEIVE) != 0)) {
3022 assert(port->ip_immovable_receive == 0);
3023 port->ip_guarded = 1;
3024 port->ip_strict_guard = 0;
3025 /* pseudo receive shouldn't set the receive right as immovable in the sender's space */
3026 if (kn != ITH_KNOTE_PSEUDO) {
3027 port->ip_immovable_receive = 1;
3028 }
3029 port->ip_context = current_thread()->ith_msg_addr;
3030 *context = port->ip_context;
3031 *guard_flags = *guard_flags & ~MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND;
3032 }
3033
3034 if (ip_is_libxpc_connection_port(port)) {
3035 /*
3036 * There are 3 ways to reach here.
3037 * 1. A libxpc client successfully sent this receive right to a named service
3038 * and we are copying out in that service's ipc space.
3039 * 2. A libxpc client tried doing (1) but failed so we are doing pseudo-receive.
3040 * 3. Kernel sent this receive right to a libxpc client as a part of port destroyed notification.
3041 *
3042 * This flag needs to be set again in all 3 cases as they reset it as part of their flow.
3043 */
3044 port->ip_immovable_receive = 1;
3045 }
3046
3047 /* Check if this is a service port */
3048 if (port->ip_service_port) {
3049 assert(port->ip_splabel != NULL);
3050 /*
3051 * This flag gets reset during all 3 ways described above for libxpc connection port.
3052 * The only difference is launchd acts as an initiator instead of a libxpc client.
3053 */
3054 if (service_port_defense_enabled) {
3055 port->ip_immovable_receive = 1;
3056 }
3057
3058 /* Check if this is a port-destroyed notification to ensure
3059 * that initproc doesnt end up with a guarded service port
3060 * sent in a regular message
3061 */
3062 if (!ipc_service_port_label_is_pd_notification((ipc_service_port_label_t)port->ip_splabel)) {
3063 goto skip_sp_check;
3064 }
3065
3066 ipc_service_port_label_clear_flag(port->ip_splabel, ISPL_FLAGS_SEND_PD_NOTIFICATION);
3067 #if !(DEVELOPMENT || DEBUG)
3068 if (get_bsdtask_info(current_task()) != initproc) {
3069 goto skip_sp_check;
3070 }
3071 #endif /* !(DEVELOPMENT || DEBUG) */
3072 ipc_service_port_label_get_attr(port->ip_splabel, &sp_name, &sp_context);
3073 assert(sp_name != MACH_PORT_NULL);
3074 /* Verify the port name and restore the guard value, if any */
3075 if (name != sp_name) {
3076 panic("Service port name = 0x%x doesnt match the stored launchd port name = 0x%x", name, sp_name);
3077 }
3078 if (sp_context) {
3079 port->ip_guarded = 1;
3080 port->ip_strict_guard = 1;
3081 port->ip_context = sp_context;
3082 }
3083 }
3084 skip_sp_check:
3085
3086 assert((bits & MACH_PORT_TYPE_RECEIVE) == 0);
3087 if (bits & MACH_PORT_TYPE_SEND) {
3088 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_SEND);
3089 assert(IE_BITS_UREFS(bits) > 0);
3090 assert(port->ip_srights > 0);
3091 } else {
3092 assert(IE_BITS_TYPE(bits) == MACH_PORT_TYPE_NONE);
3093 assert(IE_BITS_UREFS(bits) == 0);
3094 }
3095 entry->ie_bits = bits | MACH_PORT_TYPE_RECEIVE;
3096 ipc_entry_modified(space, name, entry);
3097
3098 boolean_t sync_bootstrap_checkin = FALSE;
3099 if (kn != ITH_KNOTE_PSEUDO && port->ip_sync_bootstrap_checkin) {
3100 sync_bootstrap_checkin = TRUE;
3101 }
3102 if (!ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_RECEIVE)) {
3103 kn = NULL;
3104 }
3105 ipc_port_adjust_port_locked(port, kn, sync_bootstrap_checkin);
3106 /* port unlocked */
3107
3108 if (bits & MACH_PORT_TYPE_SEND) {
3109 ip_release_live(port);
3110
3111 /* entry is locked holding ref, so can use port */
3112 ipc_hash_delete(space, ip_to_object(port), name, entry);
3113 }
3114
3115 if (dest != IP_NULL) {
3116 #if IMPORTANCE_INHERITANCE
3117 /*
3118 * Deduct the assertion counts we contributed to
3119 * the old destination port. They've already
3120 * been reflected into the task as a result of
3121 * getting enqueued.
3122 */
3123 ip_mq_lock(dest);
3124 ipc_port_impcount_delta(dest, 0 - assertcnt, IP_NULL);
3125 ip_mq_unlock(dest);
3126 #endif /* IMPORTANCE_INHERITANCE */
3127
3128 /* Drop turnstile ref on dest */
3129 ipc_port_send_turnstile_complete(dest);
3130 /* space lock is held */
3131 ip_release_safe(dest);
3132 }
3133 break;
3134 }
3135
3136 default:
3137 panic("ipc_right_copyout: strange rights");
3138 }
3139 return KERN_SUCCESS;
3140 }
3141