1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64 /*
65 * File: ipc/ipc_port.c
66 * Author: Rich Draves
67 * Date: 1989
68 *
69 * Functions to manipulate IPC ports.
70 */
71
72 #include <mach/boolean.h>
73 #include <mach_assert.h>
74
75 #include <mach/port.h>
76 #include <mach/kern_return.h>
77 #include <kern/backtrace.h>
78 #include <kern/debug.h>
79 #include <kern/ipc_kobject.h>
80 #include <kern/kcdata.h>
81 #include <kern/misc_protos.h>
82 #include <kern/policy_internal.h>
83 #include <kern/thread.h>
84 #include <kern/waitq.h>
85 #include <kern/host_notify.h>
86 #include <ipc/ipc_entry.h>
87 #include <ipc/ipc_space.h>
88 #include <ipc/ipc_object.h>
89 #include <ipc/ipc_right.h>
90 #include <ipc/ipc_port.h>
91 #include <ipc/ipc_pset.h>
92 #include <ipc/ipc_kmsg.h>
93 #include <ipc/ipc_mqueue.h>
94 #include <ipc/ipc_notify.h>
95 #include <ipc/ipc_importance.h>
96 #include <ipc/ipc_policy.h>
97 #include <machine/limits.h>
98 #include <kern/turnstile.h>
99 #include <kern/machine.h>
100
101 #include <security/mac_mach_internal.h>
102 #include <ipc/ipc_service_port.h>
103
104 #include <string.h>
105
106 extern bool proc_is_simulated(struct proc *);
107 extern struct proc *current_proc(void);
108 extern int csproc_hardened_runtime(struct proc* p);
109
110 static TUNABLE(bool, prioritize_launch, "prioritize_launch", true);
111 TUNABLE_WRITEABLE(int, ipc_portbt, "ipc_portbt", false);
112
113 extern zone_t ipc_kobject_label_zone;
114
115 LCK_SPIN_DECLARE_ATTR(ipc_port_multiple_lock_data, &ipc_lck_grp, &ipc_lck_attr);
116 ipc_port_timestamp_t ipc_port_timestamp_data;
117
118 KALLOC_ARRAY_TYPE_DEFINE(ipc_port_request_table,
119 struct ipc_port_request, KT_DEFAULT);
120
121 #if MACH_ASSERT
122 static void ipc_port_init_debug(ipc_port_t, void *fp);
123 #endif /* MACH_ASSERT */
124
125 void __abortlike
__ipc_port_inactive_panic(ipc_port_t port)126 __ipc_port_inactive_panic(ipc_port_t port)
127 {
128 panic("Using inactive port %p", port);
129 }
130
131 static __abortlike void
__ipc_port_translate_receive_panic(ipc_space_t space,ipc_port_t port)132 __ipc_port_translate_receive_panic(ipc_space_t space, ipc_port_t port)
133 {
134 panic("found receive right in space %p for port %p owned by space %p",
135 space, port, ip_get_receiver(port));
136 }
137
138 __abortlike void
__ipc_right_delta_overflow_panic(ipc_port_t port,natural_t * field,int delta)139 __ipc_right_delta_overflow_panic(ipc_port_t port, natural_t *field, int delta)
140 {
141 const char *what;
142 if (field == &port->ip_srights) {
143 what = "send right";
144 } else {
145 what = "send-once right";
146 }
147 panic("port %p %s count overflow (delta: %d)", port, what, delta);
148 }
149
150 static void
151 ipc_port_send_turnstile_recompute_push_locked(
152 ipc_port_t port);
153
154 static thread_t
155 ipc_port_get_watchport_inheritor(
156 ipc_port_t port);
157
158 static kern_return_t
159 ipc_port_update_qos_n_iotier(
160 ipc_port_t port,
161 uint8_t qos,
162 uint8_t iotier);
163
164 void
ipc_port_lock(ipc_port_t port)165 ipc_port_lock(ipc_port_t port)
166 {
167 ip_validate(port);
168 waitq_lock(&port->ip_waitq);
169 }
170
171 void
ipc_port_lock_check_aligned(ipc_port_t port)172 ipc_port_lock_check_aligned(ipc_port_t port)
173 {
174 zone_id_require_aligned(ZONE_ID_IPC_PORT, port);
175 waitq_lock(&port->ip_waitq);
176 }
177
178 bool
ipc_port_lock_try(ipc_port_t port)179 ipc_port_lock_try(ipc_port_t port)
180 {
181 ip_validate(port);
182 return waitq_lock_try(&port->ip_waitq);
183 }
184
185 void
ipc_port_release(ipc_port_t port)186 ipc_port_release(ipc_port_t port)
187 {
188 ip_release(port);
189 }
190
191 void
ipc_port_reference(ipc_port_t port)192 ipc_port_reference(ipc_port_t port)
193 {
194 ip_validate(port);
195 ip_reference(port);
196 }
197
198 /*
199 * Routine: ipc_port_timestamp
200 * Purpose:
201 * Retrieve a timestamp value.
202 */
203
204 ipc_port_timestamp_t
ipc_port_timestamp(void)205 ipc_port_timestamp(void)
206 {
207 return OSIncrementAtomic(&ipc_port_timestamp_data);
208 }
209
210
211 /*
212 * Routine: ipc_port_translate_send
213 * Purpose:
214 * Look up a send right in a space.
215 * Conditions:
216 * Nothing locked before. If successful, the object
217 * is returned active and locked. The caller doesn't get a ref.
218 * Returns:
219 * KERN_SUCCESS Object returned locked.
220 * KERN_INVALID_TASK The space is dead.
221 * KERN_INVALID_NAME The name doesn't denote a right
222 * KERN_INVALID_RIGHT Name doesn't denote the correct right
223 */
224 kern_return_t
ipc_port_translate_send(ipc_space_t space,mach_port_name_t name,ipc_port_t * portp)225 ipc_port_translate_send(
226 ipc_space_t space,
227 mach_port_name_t name,
228 ipc_port_t *portp)
229 {
230 ipc_port_t port = IP_NULL;
231 ipc_object_t object;
232 kern_return_t kr;
233
234 kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_SEND, &object);
235 if (kr == KERN_SUCCESS) {
236 port = ip_object_to_port(object);
237 }
238 *portp = port;
239 return kr;
240 }
241
242
243 /*
244 * Routine: ipc_port_translate_receive
245 * Purpose:
246 * Look up a receive right in a space.
247 * Performs some minimal security checks against tampering.
248 * Conditions:
249 * Nothing locked before. If successful, the object
250 * is returned active and locked. The caller doesn't get a ref.
251 * Returns:
252 * KERN_SUCCESS Object returned locked.
253 * KERN_INVALID_TASK The space is dead.
254 * KERN_INVALID_NAME The name doesn't denote a right
255 * KERN_INVALID_RIGHT Name doesn't denote the correct right
256 */
257 kern_return_t
ipc_port_translate_receive(ipc_space_t space,mach_port_name_t name,ipc_port_t * portp)258 ipc_port_translate_receive(
259 ipc_space_t space,
260 mach_port_name_t name,
261 ipc_port_t *portp)
262 {
263 ipc_port_t port = IP_NULL;
264 ipc_object_t object;
265 kern_return_t kr;
266
267 kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_RECEIVE, &object);
268 if (kr == KERN_SUCCESS) {
269 /* object is locked */
270 port = ip_object_to_port(object);
271 if (!ip_in_space(port, space)) {
272 __ipc_port_translate_receive_panic(space, port);
273 }
274 }
275 *portp = port;
276 return kr;
277 }
278
279
280 /*
281 * Routine: ipc_port_request_alloc
282 * Purpose:
283 * Try to allocate a request slot.
284 * If successful, returns the request index.
285 * Otherwise returns zero.
286 * Conditions:
287 * The port is locked and active.
288 * Returns:
289 * KERN_SUCCESS A request index was found.
290 * KERN_NO_SPACE No index allocated.
291 */
292
293 kern_return_t
ipc_port_request_alloc(ipc_port_t port,mach_port_name_t name,ipc_port_t soright,ipc_port_request_opts_t options,ipc_port_request_index_t * indexp)294 ipc_port_request_alloc(
295 ipc_port_t port,
296 mach_port_name_t name,
297 ipc_port_t soright,
298 ipc_port_request_opts_t options,
299 ipc_port_request_index_t *indexp)
300 {
301 ipc_port_request_table_t table;
302 ipc_port_request_index_t index;
303 ipc_port_request_t ipr, base;
304
305 require_ip_active(port);
306 assert(name != MACH_PORT_NULL);
307 assert(soright != IP_NULL);
308
309 table = port->ip_requests;
310 if (table == NULL) {
311 return KERN_NO_SPACE;
312 }
313
314 base = ipc_port_request_table_base(table);
315 index = base->ipr_next;
316 if (index == 0) {
317 return KERN_NO_SPACE;
318 }
319
320 ipr = ipc_port_request_table_get(table, index);
321 assert(ipr->ipr_soright == IP_NULL);
322
323 base->ipr_next = ipr->ipr_next;
324 ipr->ipr_name = name;
325 ipr->ipr_soright = IPR_SOR_MAKE(soright, options);
326
327 if (options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK) &&
328 port->ip_sprequests == 0) {
329 port->ip_sprequests = 1;
330 }
331
332 *indexp = index;
333
334 return KERN_SUCCESS;
335 }
336
337
338 /*
339 * Routine: ipc_port_request_hnotify_alloc
340 * Purpose:
341 * Try to allocate a request slot.
342 * If successful, returns the request index.
343 * Otherwise returns zero.
344 * Conditions:
345 * The port is locked and active.
346 * Returns:
347 * KERN_SUCCESS A request index was found.
348 * KERN_NO_SPACE No index allocated.
349 * KERN_INVALID_CAPABILITY A host notify registration already
350 * existed
351 */
352
353 kern_return_t
ipc_port_request_hnotify_alloc(ipc_port_t port,struct host_notify_entry * hnotify,ipc_port_request_index_t * indexp)354 ipc_port_request_hnotify_alloc(
355 ipc_port_t port,
356 struct host_notify_entry *hnotify,
357 ipc_port_request_index_t *indexp)
358 {
359 ipc_port_request_table_t table;
360 ipc_port_request_index_t index;
361 ipc_port_request_t ipr, base;
362
363 require_ip_active(port);
364
365 table = port->ip_requests;
366 if (table == NULL) {
367 return KERN_NO_SPACE;
368 }
369
370 base = ipc_port_request_table_base(table);
371 if (base->ipr_hn_slot) {
372 return KERN_INVALID_CAPABILITY;
373 }
374 index = base->ipr_next;
375 if (index == 0) {
376 return KERN_NO_SPACE;
377 }
378
379 ipr = ipc_port_request_table_get(table, index);
380 assert(ipr->ipr_soright == IP_NULL);
381
382 base->ipr_hn_slot = ipr;
383 base->ipr_next = ipr->ipr_next;
384 ipr->ipr_hnotify = hnotify;
385 ipr->ipr_name = IPR_HOST_NOTIFY;
386
387 *indexp = index;
388
389 return KERN_SUCCESS;
390 }
391
392 /*
393 * Routine: ipc_port_request_grow
394 * Purpose:
395 * Grow a port's table of requests.
396 * Conditions:
397 * The port must be locked and active.
398 * Nothing else locked; will allocate memory.
399 * Upon return the port is unlocked.
400 * Returns:
401 * KERN_SUCCESS Grew the table.
402 * KERN_SUCCESS Somebody else grew the table.
403 * KERN_SUCCESS The port died.
404 * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
405 * KERN_NO_SPACE Couldn't grow to desired size
406 */
407
408 kern_return_t
ipc_port_request_grow(ipc_port_t port)409 ipc_port_request_grow(
410 ipc_port_t port)
411 {
412 ipc_port_request_table_t otable, ntable;
413 uint32_t osize, nsize;
414 uint32_t ocount, ncount;
415
416 require_ip_active(port);
417
418 otable = port->ip_requests;
419 if (otable) {
420 osize = ipc_port_request_table_size(otable);
421 } else {
422 osize = 0;
423 }
424 nsize = ipc_port_request_table_next_size(2, osize, 16);
425 if (nsize > CONFIG_IPC_TABLE_REQUEST_SIZE_MAX) {
426 nsize = CONFIG_IPC_TABLE_REQUEST_SIZE_MAX;
427 }
428 if (nsize == osize) {
429 return KERN_RESOURCE_SHORTAGE;
430 }
431
432 ip_reference(port);
433 ip_mq_unlock(port);
434
435 ntable = ipc_port_request_table_alloc_by_size(nsize, Z_WAITOK | Z_ZERO);
436 if (ntable == NULL) {
437 ip_release(port);
438 return KERN_RESOURCE_SHORTAGE;
439 }
440
441 ip_mq_lock(port);
442
443 /*
444 * Check that port is still active and that nobody else
445 * has slipped in and grown the table on us. Note that
446 * just checking if the current table pointer == otable
447 * isn't sufficient; must check ipr_size.
448 */
449
450 ocount = ipc_port_request_table_size_to_count(osize);
451 ncount = ipc_port_request_table_size_to_count(nsize);
452
453 if (ip_active(port) && port->ip_requests == otable) {
454 ipc_port_request_index_t free, i;
455
456 /* copy old table to new table */
457
458 if (otable != NULL) {
459 ipc_port_request_t obase, nbase, ohn, nhn;
460
461 obase = ipc_port_request_table_base(otable);
462 nbase = ipc_port_request_table_base(ntable);
463 memcpy(nbase, obase, osize);
464
465 /*
466 * if there is a host-notify registration,
467 * fixup dPAC for the registration's ipr_hnotify field,
468 * and the ipr_hn_slot sentinel.
469 */
470 ohn = obase->ipr_hn_slot;
471 if (ohn) {
472 nhn = nbase + (ohn - obase);
473 nhn->ipr_hnotify = ohn->ipr_hnotify;
474 nbase->ipr_hn_slot = nhn;
475 }
476 } else {
477 ocount = 1;
478 free = 0;
479 }
480
481 /* add new elements to the new table's free list */
482
483 for (i = ocount; i < ncount; i++) {
484 ipc_port_request_table_get_nocheck(ntable, i)->ipr_next = free;
485 free = i;
486 }
487
488 ipc_port_request_table_base(ntable)->ipr_next = free;
489 port->ip_requests = ntable;
490 ip_mq_unlock(port);
491 ip_release(port);
492
493 if (otable != NULL) {
494 ipc_port_request_table_free(&otable);
495 }
496 } else {
497 ip_mq_unlock(port);
498 ip_release(port);
499 ipc_port_request_table_free(&ntable);
500 }
501
502 return KERN_SUCCESS;
503 }
504
505 /*
506 * Routine: ipc_port_request_sparm
507 * Purpose:
508 * Arm delayed send-possible request.
509 * Conditions:
510 * The port must be locked and active.
511 *
512 * Returns TRUE if the request was armed with importance.
513 */
514
515 bool
ipc_port_request_sparm(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index,mach_msg_option64_t option,mach_msg_priority_t priority)516 ipc_port_request_sparm(
517 ipc_port_t port,
518 __assert_only mach_port_name_t name,
519 ipc_port_request_index_t index,
520 mach_msg_option64_t option,
521 mach_msg_priority_t priority)
522 {
523 if (index != IE_REQ_NONE) {
524 ipc_port_request_table_t table;
525 ipc_port_request_t ipr;
526
527 require_ip_active(port);
528
529 table = port->ip_requests;
530 assert(table != NULL);
531
532 ipr = ipc_port_request_table_get(table, index);
533 assert(ipr->ipr_name == name);
534
535 /* Is there a valid destination? */
536 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
537 ipr->ipr_soright = IPR_SOR_MAKE(ipr->ipr_soright, IPR_SOR_SPARM_MASK);
538 port->ip_sprequests = 1;
539
540 if (option & MACH_SEND_OVERRIDE) {
541 /* apply override to message queue */
542 mach_msg_qos_t qos_ovr;
543 if (mach_msg_priority_is_pthread_priority(priority)) {
544 qos_ovr = _pthread_priority_thread_qos(priority);
545 } else {
546 qos_ovr = mach_msg_priority_overide_qos(priority);
547 }
548 if (qos_ovr) {
549 ipc_mqueue_override_send_locked(&port->ip_messages, qos_ovr);
550 }
551 }
552
553 #if IMPORTANCE_INHERITANCE
554 if (((option & MACH_SEND_NOIMPORTANCE) == 0) &&
555 (port->ip_impdonation != 0) &&
556 (port->ip_spimportant == 0) &&
557 (((option & MACH_SEND_IMPORTANCE) != 0) ||
558 (task_is_importance_donor(current_task())))) {
559 return true;
560 }
561 #endif /* IMPORTANCE_INHERITANCE */
562 }
563 }
564 return false;
565 }
566
567 /*
568 * Routine: ipc_port_request_type
569 * Purpose:
570 * Determine the type(s) of port requests enabled for a name.
571 * Conditions:
572 * The port must be locked or inactive (to avoid table growth).
573 * The index must not be IE_REQ_NONE and for the name in question.
574 */
575 mach_port_type_t
ipc_port_request_type(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index)576 ipc_port_request_type(
577 ipc_port_t port,
578 __assert_only mach_port_name_t name,
579 ipc_port_request_index_t index)
580 {
581 ipc_port_request_table_t table;
582 ipc_port_request_t ipr;
583 mach_port_type_t type = 0;
584
585 table = port->ip_requests;
586 assert(table != NULL);
587
588 assert(index != IE_REQ_NONE);
589 ipr = ipc_port_request_table_get(table, index);
590 assert(ipr->ipr_name == name);
591
592 if (IP_VALID(IPR_SOR_PORT(ipr->ipr_soright))) {
593 type |= MACH_PORT_TYPE_DNREQUEST;
594
595 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
596 type |= MACH_PORT_TYPE_SPREQUEST;
597
598 if (!IPR_SOR_SPARMED(ipr->ipr_soright)) {
599 type |= MACH_PORT_TYPE_SPREQUEST_DELAYED;
600 }
601 }
602 }
603 return type;
604 }
605
606 /*
607 * Routine: ipc_port_request_cancel
608 * Purpose:
609 * Cancel a dead-name/send-possible request and return the send-once right.
610 * Conditions:
611 * The port must be locked and active.
612 * The index must not be IPR_REQ_NONE and must correspond with name.
613 */
614
615 ipc_port_t
ipc_port_request_cancel(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index)616 ipc_port_request_cancel(
617 ipc_port_t port,
618 __assert_only mach_port_name_t name,
619 ipc_port_request_index_t index)
620 {
621 ipc_port_request_table_t table;
622 ipc_port_request_t base, ipr;
623 ipc_port_t request = IP_NULL;
624
625 require_ip_active(port);
626 table = port->ip_requests;
627 base = ipc_port_request_table_base(table);
628 assert(table != NULL);
629
630 assert(index != IE_REQ_NONE);
631 ipr = ipc_port_request_table_get(table, index);
632 assert(ipr->ipr_name == name);
633 request = IPR_SOR_PORT(ipr->ipr_soright);
634
635 /* return ipr to the free list inside the table */
636 ipr->ipr_next = base->ipr_next;
637 ipr->ipr_soright = IP_NULL;
638 if (base->ipr_hn_slot == ipr) {
639 base->ipr_hn_slot = NULL;
640 }
641 base->ipr_next = index;
642
643 return request;
644 }
645
646
647 /*
648 * Routine: ipc_port_nsrequest
649 * Purpose:
650 * Make a no-senders request, returning the
651 * previously registered send-once right.
652 * Just cancels the previous request if notify is IP_NULL.
653 * Conditions:
654 * The port is locked and active. It is unlocked.
655 * Consumes a ref for notify (if non-null), and
656 * returns previous with a ref (if non-null).
657 */
658
659 void
ipc_port_nsrequest(ipc_port_t port,mach_port_mscount_t sync,ipc_port_t notify,ipc_port_t * previousp)660 ipc_port_nsrequest(
661 ipc_port_t port,
662 mach_port_mscount_t sync,
663 ipc_port_t notify,
664 ipc_port_t *previousp)
665 {
666 ipc_port_t previous;
667 mach_port_mscount_t mscount;
668 require_ip_active(port);
669
670 assert(!ip_in_space(port, ipc_space_kernel));
671 assert(port->ip_nsrequest != IP_KOBJECT_NSREQUEST_ARMED);
672
673 previous = port->ip_nsrequest;
674 mscount = port->ip_mscount;
675
676 if ((port->ip_srights == 0) && (sync <= mscount) &&
677 (notify != IP_NULL)) {
678 port->ip_nsrequest = IP_NULL;
679 ip_mq_unlock(port);
680 ipc_notify_no_senders(notify, mscount, /* kobject */ false);
681 } else {
682 port->ip_nsrequest = notify;
683 ip_mq_unlock(port);
684 }
685
686 *previousp = previous;
687 }
688
689
690 /*
691 * Routine: ipc_port_clear_receiver
692 * Purpose:
693 * Prepares a receive right for transmission/destruction,
694 * optionally performs mqueue destruction (with port lock held)
695 *
696 * Conditions:
697 * The port is locked and active.
698 * Returns:
699 * If should_destroy is TRUE, then the return value indicates
700 * whether the caller needs to reap kmsg structures that should
701 * be destroyed (by calling ipc_kmsg_reap_delayed)
702 *
703 * If should_destroy is FALSE, this always returns FALSE
704 */
705
706 boolean_t
ipc_port_clear_receiver(ipc_port_t port,boolean_t should_destroy,waitq_link_list_t * free_l)707 ipc_port_clear_receiver(
708 ipc_port_t port,
709 boolean_t should_destroy,
710 waitq_link_list_t *free_l)
711 {
712 ipc_mqueue_t mqueue = &port->ip_messages;
713 boolean_t reap_messages = FALSE;
714
715 /*
716 * Pull ourselves out of any sets to which we belong.
717 * We hold the write space lock or the receive entry has
718 * been deleted, so even though this acquires and releases
719 * the port lock, we know we won't be added to any other sets.
720 */
721 if (ip_in_pset(port)) {
722 waitq_unlink_all_locked(&port->ip_waitq, NULL, free_l);
723 assert(!ip_in_pset(port));
724 }
725
726 /*
727 * Send anyone waiting on the port's queue directly away.
728 * Also clear the mscount, seqno, guard bits
729 */
730 if (ip_in_a_space(port)) {
731 ipc_mqueue_changed(ip_get_receiver(port), &port->ip_waitq);
732 } else {
733 ipc_mqueue_changed(NULL, &port->ip_waitq);
734 }
735 port->ip_mscount = 0;
736 mqueue->imq_seqno = 0;
737 port->ip_context = port->ip_guarded = port->ip_strict_guard = 0;
738
739 if (should_destroy) {
740 /*
741 * Mark the port and mqueue invalid, preventing further send/receive
742 * operations from succeeding. It's important for this to be
743 * done under the same lock hold as the ipc_mqueue_changed
744 * call to avoid additional threads blocking on an mqueue
745 * that's being destroyed.
746 *
747 * The port active bit needs to be guarded under mqueue lock for
748 * turnstiles
749 */
750
751 /* port transitions to INACTIVE state */
752 io_bits_andnot(ip_to_object(port), IO_BITS_ACTIVE);
753 port->ip_receiver_name = MACH_PORT_NULL;
754 port->ip_timestamp = ipc_port_timestamp();
755
756 reap_messages = ipc_mqueue_destroy_locked(mqueue, free_l);
757 } else {
758 /*
759 * clear the immovable bit so the port can move back to anyone
760 * listening for the port destroy notification.
761 */
762 port->ip_immovable_receive = 0;
763
764 /* port transtions to IN-LIMBO state */
765 port->ip_receiver_name = MACH_PORT_NULL;
766 port->ip_destination = IP_NULL;
767 }
768
769 return reap_messages;
770 }
771
772
773 /*
774 * Routine: ipc_port_init_validate_flags
775 * Purpose:
776 * Validates the flag arguments for ipc_port_init
777 * so that overlapping flags are not accidentally used together
778 */
779
780 static kern_return_t
ipc_port_init_validate_flags(ipc_port_init_flags_t flags)781 ipc_port_init_validate_flags(ipc_port_init_flags_t flags)
782 {
783 uint32_t at_most_one_flags = flags & (IPC_PORT_ENFORCE_REPLY_PORT_SEMANTICS |
784 IPC_PORT_ENFORCE_RIGID_REPLY_PORT_SEMANTICS |
785 IPC_PORT_INIT_EXCEPTION_PORT |
786 IPC_PORT_INIT_PROVISIONAL_REPLY);
787
788 if (at_most_one_flags & (at_most_one_flags - 1)) {
789 /* at most one of the listed flags can be set */
790 return KERN_INVALID_ARGUMENT;
791 }
792 return KERN_SUCCESS;
793 }
794
795
796 /*
797 * Routine: ipc_port_init
798 * Purpose:
799 * Initializes a newly-allocated port.
800 *
801 * The memory is expected to be zero initialized (allocated with Z_ZERO).
802 */
803
804 void
ipc_port_init(ipc_port_t port,ipc_space_t space,ipc_port_init_flags_t flags,mach_port_name_t name)805 ipc_port_init(
806 ipc_port_t port,
807 ipc_space_t space,
808 ipc_port_init_flags_t flags,
809 mach_port_name_t name)
810 {
811 int policy = SYNC_POLICY_FIFO;
812 task_t task = TASK_NULL;
813
814 /* the port has been 0 initialized when called */
815
816 if (flags & IPC_PORT_INIT_FILTER_MESSAGE) {
817 io_bits_or(ip_to_object(port), IP_BIT_FILTER_MSG);
818 }
819 if (flags & IPC_PORT_INIT_LOCKED) {
820 policy |= SYNC_POLICY_INIT_LOCKED;
821 }
822
823 /* must be done first, many ip_* bits live inside the waitq */
824 waitq_init(&port->ip_waitq, WQT_PORT, policy);
825 if (flags & IPC_PORT_INIT_TG_BLOCK_TRACKING) {
826 port->ip_tg_block_tracking = true;
827 }
828 if (flags & IPC_PORT_INIT_SPECIAL_REPLY) {
829 port->ip_specialreply = true;
830 }
831 if ((flags & IPC_PORT_INIT_REPLY) || (flags & IPC_PORT_INIT_SPECIAL_REPLY)) {
832 task = current_task_early();
833
834 /* Strict enforcement of reply port semantics are disabled for 3p - rdar://97441265. */
835 if (task && task_is_hardened_binary(task)) {
836 port->ip_immovable_receive = true;
837 ip_mark_reply_port(port);
838 } else {
839 ip_mark_provisional_reply_port(port);
840 }
841 }
842 if (flags & IPC_PORT_ENFORCE_REPLY_PORT_SEMANTICS) {
843 ip_enforce_reply_port_semantics(port);
844 }
845 if (flags & IPC_PORT_ENFORCE_RIGID_REPLY_PORT_SEMANTICS) {
846 ip_enforce_rigid_reply_port_semantics(port);
847 }
848 if (flags & IPC_PORT_INIT_PROVISIONAL_REPLY) {
849 ip_mark_provisional_reply_port(port);
850 }
851
852 if (flags & IPC_PORT_INIT_EXCEPTION_PORT) {
853 ip_mark_exception_port(port);
854 port->ip_immovable_receive = true;
855 }
856
857 port->ip_kernel_qos_override = THREAD_QOS_UNSPECIFIED;
858 port->ip_kernel_iotier_override = THROTTLE_LEVEL_END;
859
860 ipc_mqueue_init(&port->ip_messages);
861 #if MACH_ASSERT
862 ipc_port_init_debug(port, __builtin_frame_address(0));
863 #endif /* MACH_ASSERT */
864
865 /* port transitions to IN-SPACE state */
866 port->ip_receiver_name = name;
867 port->ip_receiver = space;
868
869 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
870 port->ip_srights = 1;
871 port->ip_mscount = 1;
872 }
873 }
874
875 /*
876 * Routine: ipc_port_alloc
877 * Purpose:
878 * Allocate a port.
879 * Conditions:
880 * Nothing locked. If successful, the port is returned
881 * locked. (The caller doesn't have a reference.)
882 * Returns:
883 * KERN_SUCCESS The port is allocated.
884 * KERN_INVALID_TASK The space is dead.
885 * KERN_NO_SPACE No room for an entry in the space.
886 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
887 */
888
889 kern_return_t
ipc_port_alloc(ipc_space_t space,ipc_port_init_flags_t flags,mach_port_name_t * namep,ipc_port_t * portp)890 ipc_port_alloc(
891 ipc_space_t space,
892 ipc_port_init_flags_t flags,
893 mach_port_name_t *namep,
894 ipc_port_t *portp)
895 {
896 ipc_port_t port;
897 mach_port_name_t name;
898 kern_return_t kr;
899 mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
900 mach_port_urefs_t urefs = 0;
901
902 kr = ipc_port_init_validate_flags(flags);
903 if (kr != KERN_SUCCESS) {
904 return kr;
905 }
906
907 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
908 type |= MACH_PORT_TYPE_SEND;
909 urefs = 1;
910 }
911 kr = ipc_object_alloc(space, IOT_PORT, type, urefs,
912 &name, (ipc_object_t *) &port);
913 if (kr != KERN_SUCCESS) {
914 return kr;
915 }
916
917 /* space is locked */
918 ipc_port_init(port, space, flags | IPC_PORT_INIT_LOCKED, name);
919 /* port is locked */
920 #if MACH_ASSERT
921 ipc_port_init_debug(port, __builtin_frame_address(0));
922 #endif /* MACH_ASSERT */
923
924 /* unlock space after init */
925 is_write_unlock(space);
926
927 *namep = name;
928 *portp = port;
929
930 return KERN_SUCCESS;
931 }
932
933 /*
934 * Routine: ipc_port_alloc_name
935 * Purpose:
936 * Allocate a port, with a specific name.
937 * Conditions:
938 * Nothing locked. If successful, the port is returned
939 * locked. (The caller doesn't have a reference.)
940 * Returns:
941 * KERN_SUCCESS The port is allocated.
942 * KERN_INVALID_TASK The space is dead.
943 * KERN_NAME_EXISTS The name already denotes a right.
944 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
945 */
946
947 kern_return_t
ipc_port_alloc_name(ipc_space_t space,ipc_port_init_flags_t flags,mach_port_name_t name,ipc_port_t * portp)948 ipc_port_alloc_name(
949 ipc_space_t space,
950 ipc_port_init_flags_t flags,
951 mach_port_name_t name,
952 ipc_port_t *portp)
953 {
954 mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
955 mach_port_urefs_t urefs = 0;
956
957 kern_return_t kr = ipc_port_init_validate_flags(flags);
958 if (kr != KERN_SUCCESS) {
959 return kr;
960 }
961
962 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
963 type |= MACH_PORT_TYPE_SEND;
964 urefs = 1;
965 }
966 flags |= IPC_PORT_INIT_LOCKED;
967
968 return ipc_object_alloc_name(space, IOT_PORT, type, urefs,
969 name, (ipc_object_t *)portp, ^(ipc_object_t object){
970 ipc_port_init(ip_object_to_port(object), space, flags, name);
971 });
972 }
973
974 /*
975 * Routine: ipc_port_spnotify
976 * Purpose:
977 * Generate send-possible port notifications.
978 * Conditions:
979 * Nothing locked, reference held on port.
980 */
981 void
ipc_port_spnotify(ipc_port_t port)982 ipc_port_spnotify(
983 ipc_port_t port)
984 {
985 ipc_port_request_index_t index = 0;
986 ipc_table_elems_t size = 0;
987
988 /*
989 * If the port has no send-possible request
990 * armed, don't bother to lock the port.
991 */
992 if (port->ip_sprequests == 0) {
993 return;
994 }
995
996 ip_mq_lock(port);
997
998 #if IMPORTANCE_INHERITANCE
999 if (port->ip_spimportant != 0) {
1000 port->ip_spimportant = 0;
1001 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == TRUE) {
1002 ip_mq_lock(port);
1003 }
1004 }
1005 #endif /* IMPORTANCE_INHERITANCE */
1006
1007 if (port->ip_sprequests == 0) {
1008 ip_mq_unlock(port);
1009 return;
1010 }
1011 port->ip_sprequests = 0;
1012
1013 revalidate:
1014 if (ip_active(port)) {
1015 ipc_port_request_table_t requests;
1016
1017 /* table may change each time port unlocked (reload) */
1018 requests = port->ip_requests;
1019 assert(requests != NULL);
1020
1021 /*
1022 * no need to go beyond table size when first
1023 * we entered - those are future notifications.
1024 */
1025 if (size == 0) {
1026 size = ipc_port_request_table_count(requests);
1027 }
1028
1029 /* no need to backtrack either */
1030 while (++index < size) {
1031 ipc_port_request_t ipr = ipc_port_request_table_get_nocheck(requests, index);
1032 mach_port_name_t name = ipr->ipr_name;
1033 ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
1034 boolean_t armed = IPR_SOR_SPARMED(ipr->ipr_soright);
1035
1036 if (MACH_PORT_VALID(name) && armed && IP_VALID(soright)) {
1037 /* claim send-once right - slot still inuse */
1038 assert(name != IPR_HOST_NOTIFY);
1039 ipr->ipr_soright = IP_NULL;
1040 ip_mq_unlock(port);
1041
1042 ipc_notify_send_possible(soright, name);
1043
1044 ip_mq_lock(port);
1045 goto revalidate;
1046 }
1047 }
1048 }
1049 ip_mq_unlock(port);
1050 return;
1051 }
1052
1053 /*
1054 * Routine: ipc_port_dnnotify
1055 * Purpose:
1056 * Generate dead name notifications for
1057 * all outstanding dead-name and send-
1058 * possible requests.
1059 * Conditions:
1060 * Nothing locked.
1061 * Port must be inactive.
1062 * Reference held on port.
1063 */
1064 void
ipc_port_dnnotify(ipc_port_t port)1065 ipc_port_dnnotify(
1066 ipc_port_t port)
1067 {
1068 ipc_port_request_table_t requests = port->ip_requests;
1069
1070 assert(!ip_active(port));
1071 if (requests != NULL) {
1072 ipc_port_request_t ipr, base;
1073
1074 base = ipr = ipc_port_request_table_base(requests);
1075
1076 while ((ipr = ipc_port_request_table_next_elem(requests, ipr))) {
1077 mach_port_name_t name = ipr->ipr_name;
1078 ipc_port_t soright;
1079
1080 switch (name) {
1081 case MACH_PORT_DEAD:
1082 case MACH_PORT_NULL:
1083 break;
1084 case IPR_HOST_NOTIFY:
1085 assert(base->ipr_hn_slot == ipr);
1086 host_notify_cancel(ipr->ipr_hnotify);
1087 break;
1088 default:
1089 soright = IPR_SOR_PORT(ipr->ipr_soright);
1090 if (IP_VALID(soright)) {
1091 ipc_notify_dead_name(soright, name);
1092 }
1093 break;
1094 }
1095 }
1096 }
1097 }
1098
1099 /*
1100 * Routine: ipc_port_destroy
1101 * Purpose:
1102 * Destroys a port. Cleans up queued messages.
1103 *
1104 * If the port has a backup, it doesn't get destroyed,
1105 * but is sent in a port-destroyed notification to the backup.
1106 * Conditions:
1107 * The port is locked and alive; nothing else locked.
1108 * The caller has a reference, which is consumed.
1109 * Afterwards, the port is unlocked and dead.
1110 */
1111
1112 void
ipc_port_destroy(ipc_port_t port)1113 ipc_port_destroy(ipc_port_t port)
1114 {
1115 bool special_reply = port->ip_specialreply;
1116 bool service_port = port->ip_service_port;
1117 bool reap_msgs;
1118
1119 ipc_port_t pdrequest = IP_NULL;
1120 struct task_watchport_elem *twe = NULL;
1121 waitq_link_list_t free_l = { };
1122
1123 #if IMPORTANCE_INHERITANCE
1124 ipc_importance_task_t release_imp_task = IIT_NULL;
1125 thread_t self = current_thread();
1126 boolean_t top = (self->ith_assertions == 0);
1127 natural_t assertcnt = 0;
1128 #endif /* IMPORTANCE_INHERITANCE */
1129
1130 require_ip_active(port);
1131 /* port->ip_receiver_name is garbage */
1132 /* port->ip_receiver/port->ip_destination is garbage */
1133
1134 /* clear any reply-port context */
1135 port->ip_reply_context = 0;
1136
1137 /* must be done before we access ip_pdrequest */
1138 twe = ipc_port_clear_watchport_elem_internal(port);
1139 assert(!port->ip_has_watchport);
1140
1141 if (!special_reply) {
1142 /* we assume the ref for pdrequest */
1143 pdrequest = port->ip_pdrequest;
1144 port->ip_pdrequest = IP_NULL;
1145 } else if (port->ip_tempowner) {
1146 panic("ipc_port_destroy: invalid state");
1147 }
1148
1149 #if IMPORTANCE_INHERITANCE
1150 /* determine how many assertions to drop and from whom */
1151 if (port->ip_tempowner != 0) {
1152 assert(top);
1153 release_imp_task = ip_get_imp_task(port);
1154 if (IIT_NULL != release_imp_task) {
1155 port->ip_imp_task = IIT_NULL;
1156 assertcnt = port->ip_impcount;
1157 }
1158 /* Otherwise, nothing to drop */
1159 } else {
1160 assertcnt = port->ip_impcount;
1161 if (pdrequest != IP_NULL) {
1162 /* mark in limbo for the journey */
1163 port->ip_tempowner = 1;
1164 }
1165 }
1166
1167 if (top) {
1168 self->ith_assertions = assertcnt;
1169 }
1170 #endif /* IMPORTANCE_INHERITANCE */
1171
1172 /*
1173 * If no port-destroyed notification is armed, calling
1174 * ipc_port_clear_receiver() will mark the port inactive
1175 * and will wakeup any threads which may be blocked receiving on it.
1176 */
1177 reap_msgs = ipc_port_clear_receiver(port, pdrequest == IP_NULL, &free_l);
1178 assert(!ip_in_pset(port));
1179 assert(port->ip_mscount == 0);
1180
1181 /*
1182 * Handle port-destroyed notification
1183 */
1184 if (pdrequest != IP_NULL) {
1185 assert(reap_msgs == false);
1186
1187 if (service_port) {
1188 assert(port->ip_splabel != NULL);
1189 if (ipc_service_port_label_is_special_pdrequest((ipc_service_port_label_t)port->ip_splabel)) {
1190 ipc_service_port_label_set_flag(port->ip_splabel, ISPL_FLAGS_SEND_PD_NOTIFICATION);
1191 }
1192 }
1193
1194 ipc_port_send_turnstile_recompute_push_locked(port);
1195 /* port unlocked */
1196
1197 /* consumes our refs for port and pdrequest */
1198 ipc_notify_port_destroyed(pdrequest, port);
1199 } else {
1200 ipc_service_port_label_t splabel = NULL;
1201 ipc_notify_nsenders_t nsrequest;
1202
1203 nsrequest = ipc_notify_no_senders_prepare(port);
1204
1205 if (!ip_is_kolabeled(port)) {
1206 splabel = port->ip_splabel;
1207 port->ip_splabel = NULL;
1208 port->ip_service_port = false;
1209 }
1210
1211 ipc_port_send_turnstile_recompute_push_locked(port);
1212 /* port unlocked */
1213
1214 /* unlink the kmsg from special reply port */
1215 if (special_reply) {
1216 ipc_port_adjust_special_reply_port(port,
1217 IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE);
1218 }
1219
1220 /* Deallocate the service/connection port label */
1221 if (splabel) {
1222 ipc_service_port_label_dealloc(splabel, service_port);
1223 splabel = NULL;
1224 }
1225
1226 if (reap_msgs) {
1227 ipc_kmsg_reap_delayed();
1228 }
1229
1230 if (nsrequest.ns_notify) {
1231 /*
1232 * ipc_notify_no_senders_prepare will consume
1233 * the reference for kobjects.
1234 */
1235 assert(!nsrequest.ns_is_kobject);
1236 ip_mq_lock(nsrequest.ns_notify);
1237 ipc_notify_send_once_and_unlock(nsrequest.ns_notify); /* consumes ref */
1238 }
1239
1240 /* generate dead-name notifications */
1241 ipc_port_dnnotify(port);
1242
1243 ipc_kobject_destroy(port);
1244
1245 ip_release(port); /* consume caller's ref */
1246 }
1247
1248 if (twe) {
1249 task_watchport_elem_deallocate(twe);
1250 twe = NULL;
1251 }
1252
1253 waitq_link_free_list(WQT_PORT_SET, &free_l);
1254
1255 #if IMPORTANCE_INHERITANCE
1256 if (release_imp_task != IIT_NULL) {
1257 if (assertcnt > 0) {
1258 assert(top);
1259 self->ith_assertions = 0;
1260 assert(ipc_importance_task_is_any_receiver_type(release_imp_task));
1261 ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1262 }
1263 ipc_importance_task_release(release_imp_task);
1264 } else if (assertcnt > 0) {
1265 if (top) {
1266 self->ith_assertions = 0;
1267 release_imp_task = current_task()->task_imp_base;
1268 if (ipc_importance_task_is_any_receiver_type(release_imp_task)) {
1269 ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1270 }
1271 }
1272 }
1273 #endif /* IMPORTANCE_INHERITANCE */
1274 }
1275
1276 /*
1277 * Routine: ipc_port_destination_chain_lock
1278 * Purpose:
1279 * Search for the end of the chain (a port not in transit),
1280 * acquiring locks along the way, and return it in `base`.
1281 *
1282 * Returns true if a reference was taken on `base`
1283 *
1284 * Conditions:
1285 * No ports locked.
1286 * ipc_port_multiple_lock held.
1287 */
1288 boolean_t
ipc_port_destination_chain_lock(ipc_port_t port,ipc_port_t * base)1289 ipc_port_destination_chain_lock(
1290 ipc_port_t port,
1291 ipc_port_t *base)
1292 {
1293 for (;;) {
1294 ip_mq_lock(port);
1295
1296 if (!ip_active(port)) {
1297 /*
1298 * Active ports that are ip_mq_lock()ed cannot go away.
1299 *
1300 * But inactive ports at the end of walking
1301 * an ip_destination chain are only protected
1302 * from space termination cleanup while the entire
1303 * chain of ports leading to them is held.
1304 *
1305 * Callers of this code tend to unlock the chain
1306 * in the same order than this walk which doesn't
1307 * protect `base` properly when it's inactive.
1308 *
1309 * In that case, take a reference that the caller
1310 * is responsible for releasing.
1311 */
1312 ip_reference(port);
1313 *base = port;
1314 return true;
1315 }
1316
1317 /* port is active */
1318 if (!ip_in_transit(port)) {
1319 *base = port;
1320 return false;
1321 }
1322
1323 port = ip_get_destination(port);
1324 }
1325 }
1326
1327
1328 /*
1329 * Routine: ipc_port_check_circularity
1330 * Purpose:
1331 * Check if queueing "port" in a message for "dest"
1332 * would create a circular group of ports and messages.
1333 *
1334 * If no circularity (FALSE returned), then "port"
1335 * is changed from "in limbo" to "in transit".
1336 *
1337 * That is, we want to set port->ip_destination == dest,
1338 * but guaranteeing that this doesn't create a circle
1339 * port->ip_destination->ip_destination->... == port
1340 *
1341 * Conditions:
1342 * No ports locked. References held for "port" and "dest".
1343 */
1344
1345 boolean_t
ipc_port_check_circularity(ipc_port_t port,ipc_port_t dest)1346 ipc_port_check_circularity(
1347 ipc_port_t port,
1348 ipc_port_t dest)
1349 {
1350 #if IMPORTANCE_INHERITANCE
1351 /* adjust importance counts at the same time */
1352 return ipc_importance_check_circularity(port, dest);
1353 #else
1354 ipc_port_t base;
1355 struct task_watchport_elem *watchport_elem = NULL;
1356 bool took_base_ref = false;
1357
1358 assert(port != IP_NULL);
1359 assert(dest != IP_NULL);
1360
1361 if (port == dest) {
1362 return TRUE;
1363 }
1364 base = dest;
1365
1366 /* Check if destination needs a turnstile */
1367 ipc_port_send_turnstile_prepare(dest);
1368
1369 /*
1370 * First try a quick check that can run in parallel.
1371 * No circularity if dest is not in transit.
1372 */
1373 ip_mq_lock(port);
1374 if (ip_mq_lock_try(dest)) {
1375 if (!ip_in_transit(dest)) {
1376 goto not_circular;
1377 }
1378
1379 /* dest is in transit; further checking necessary */
1380
1381 ip_mq_unlock(dest);
1382 }
1383 ip_mq_unlock(port);
1384
1385 ipc_port_multiple_lock(); /* massive serialization */
1386
1387 /*
1388 * Search for the end of the chain (a port not in transit),
1389 * acquiring locks along the way.
1390 */
1391
1392 took_base_ref = ipc_port_destination_chain_lock(dest, &base);
1393 /* all ports in chain from dest to base, inclusive, are locked */
1394
1395 if (port == base) {
1396 /* circularity detected! */
1397
1398 ipc_port_multiple_unlock();
1399
1400 /* port (== base) is in limbo */
1401 require_ip_active(port);
1402 assert(ip_in_limbo(port));
1403 assert(!took_base_ref);
1404
1405 base = dest;
1406 while (base != IP_NULL) {
1407 ipc_port_t next;
1408
1409 /* dest is in transit or in limbo */
1410 require_ip_active(base);
1411 assert(!ip_in_a_space(base));
1412
1413 next = ip_get_destination(base);
1414 ip_mq_unlock(base);
1415 base = next;
1416 }
1417
1418 ipc_port_send_turnstile_complete(dest);
1419 return TRUE;
1420 }
1421
1422 /*
1423 * The guarantee: lock port while the entire chain is locked.
1424 * Once port is locked, we can take a reference to dest,
1425 * add port to the chain, and unlock everything.
1426 */
1427
1428 ip_mq_lock(port);
1429 ipc_port_multiple_unlock();
1430
1431 not_circular:
1432 require_ip_active(port);
1433 assert(ip_in_limbo(port));
1434
1435 /* Clear the watchport boost */
1436 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
1437
1438 /* Check if the port is being enqueued as a part of sync bootstrap checkin */
1439 if (dest->ip_specialreply && dest->ip_sync_bootstrap_checkin) {
1440 port->ip_sync_bootstrap_checkin = 1;
1441 }
1442
1443 ip_reference(dest);
1444
1445 /* port transitions to IN-TRANSIT state */
1446 assert(port->ip_receiver_name == MACH_PORT_NULL);
1447 port->ip_destination = dest;
1448
1449 /* Setup linkage for source port if it has sync ipc push */
1450 struct turnstile *send_turnstile = TURNSTILE_NULL;
1451 if (port_send_turnstile(port)) {
1452 send_turnstile = turnstile_prepare((uintptr_t)port,
1453 port_send_turnstile_address(port),
1454 TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
1455
1456 /*
1457 * What ipc_port_adjust_port_locked would do,
1458 * but we need to also drop even more locks before
1459 * calling turnstile_update_inheritor_complete().
1460 */
1461 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
1462
1463 turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
1464 (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
1465
1466 /* update complete and turnstile complete called after dropping all locks */
1467 }
1468 /* now unlock chain */
1469
1470 ip_mq_unlock(port);
1471
1472 for (;;) {
1473 ipc_port_t next;
1474
1475 if (dest == base) {
1476 break;
1477 }
1478
1479 /* port is IN-TRANSIT */
1480 require_ip_active(dest);
1481 assert(ip_in_transit(dest));
1482
1483 next = ip_get_destination(dest);
1484 ip_mq_unlock(dest);
1485 dest = next;
1486 }
1487
1488 /* base is not IN-TRANSIT */
1489 assert(!ip_in_transit(base));
1490
1491 ip_mq_unlock(base);
1492 if (took_base_ref) {
1493 ip_release(base);
1494 }
1495
1496 /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
1497 if (send_turnstile) {
1498 turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
1499
1500 /* Take the mq lock to call turnstile complete */
1501 ip_mq_lock(port);
1502 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC);
1503 send_turnstile = TURNSTILE_NULL;
1504 ip_mq_unlock(port);
1505 turnstile_cleanup();
1506 }
1507
1508 if (watchport_elem) {
1509 task_watchport_elem_deallocate(watchport_elem);
1510 }
1511
1512 return FALSE;
1513 #endif /* !IMPORTANCE_INHERITANCE */
1514 }
1515
1516 /*
1517 * Routine: ipc_port_watchport_elem
1518 * Purpose:
1519 * Get the port's watchport elem field
1520 *
1521 * Conditions:
1522 * port locked
1523 */
1524 static struct task_watchport_elem *
ipc_port_watchport_elem(ipc_port_t port)1525 ipc_port_watchport_elem(ipc_port_t port)
1526 {
1527 if (port->ip_has_watchport) {
1528 assert(!port->ip_specialreply);
1529 return port->ip_twe;
1530 }
1531 return NULL;
1532 }
1533
1534 /*
1535 * Routine: ipc_port_update_watchport_elem
1536 * Purpose:
1537 * Set the port's watchport elem field
1538 *
1539 * Conditions:
1540 * port locked and is not a special reply port.
1541 */
1542 static inline struct task_watchport_elem *
ipc_port_update_watchport_elem(ipc_port_t port,struct task_watchport_elem * we)1543 ipc_port_update_watchport_elem(ipc_port_t port, struct task_watchport_elem *we)
1544 {
1545 struct task_watchport_elem *old_we;
1546 ipc_port_t pdrequest;
1547
1548 assert(!port->ip_specialreply);
1549
1550 /*
1551 * Note: ip_pdrequest and ip_twe are unioned.
1552 * and ip_has_watchport controls the union "type"
1553 */
1554 if (port->ip_has_watchport) {
1555 old_we = port->ip_twe;
1556 pdrequest = old_we->twe_pdrequest;
1557 old_we->twe_pdrequest = IP_NULL;
1558 } else {
1559 old_we = NULL;
1560 pdrequest = port->ip_pdrequest;
1561 }
1562
1563 if (we) {
1564 port->ip_has_watchport = true;
1565 we->twe_pdrequest = pdrequest;
1566 port->ip_twe = we;
1567 } else {
1568 port->ip_has_watchport = false;
1569 port->ip_pdrequest = pdrequest;
1570 }
1571
1572 return old_we;
1573 }
1574
1575 /*
1576 * Routine: ipc_special_reply_stash_pid_locked
1577 * Purpose:
1578 * Set the pid of process that copied out send once right to special reply port.
1579 *
1580 * Conditions:
1581 * port locked
1582 */
1583 static inline void
ipc_special_reply_stash_pid_locked(ipc_port_t port,int pid)1584 ipc_special_reply_stash_pid_locked(ipc_port_t port, int pid)
1585 {
1586 assert(port->ip_specialreply);
1587 port->ip_pid = pid;
1588 }
1589
1590 /*
1591 * Routine: ipc_special_reply_get_pid_locked
1592 * Purpose:
1593 * Get the pid of process that copied out send once right to special reply port.
1594 *
1595 * Conditions:
1596 * port locked
1597 */
1598 int
ipc_special_reply_get_pid_locked(ipc_port_t port)1599 ipc_special_reply_get_pid_locked(ipc_port_t port)
1600 {
1601 assert(port->ip_specialreply);
1602 return port->ip_pid;
1603 }
1604
1605 /*
1606 * Update the recv turnstile inheritor for a port.
1607 *
1608 * Sync IPC through the port receive turnstile only happens for the special
1609 * reply port case. It has three sub-cases:
1610 *
1611 * 1. a send-once right is in transit, and pushes on the send turnstile of its
1612 * destination mqueue.
1613 *
1614 * 2. a send-once right has been stashed on a knote it was copied out "through",
1615 * as the first such copied out port.
1616 *
1617 * 3. a send-once right has been stashed on a knote it was copied out "through",
1618 * as the second or more copied out port.
1619 */
1620 void
ipc_port_recv_update_inheritor(ipc_port_t port,struct turnstile * rcv_turnstile,turnstile_update_flags_t flags)1621 ipc_port_recv_update_inheritor(
1622 ipc_port_t port,
1623 struct turnstile *rcv_turnstile,
1624 turnstile_update_flags_t flags)
1625 {
1626 struct turnstile *inheritor = TURNSTILE_NULL;
1627 struct knote *kn;
1628
1629 if (ip_active(port) && port->ip_specialreply) {
1630 ip_mq_lock_held(port);
1631
1632 switch (port->ip_sync_link_state) {
1633 case PORT_SYNC_LINK_PORT:
1634 if (port->ip_sync_inheritor_port != NULL) {
1635 inheritor = port_send_turnstile(port->ip_sync_inheritor_port);
1636 }
1637 break;
1638
1639 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1640 kn = port->ip_sync_inheritor_knote;
1641 inheritor = filt_ipc_kqueue_turnstile(kn);
1642 break;
1643
1644 case PORT_SYNC_LINK_WORKLOOP_STASH:
1645 inheritor = port->ip_sync_inheritor_ts;
1646 break;
1647 }
1648 }
1649
1650 turnstile_update_inheritor(rcv_turnstile, inheritor,
1651 flags | TURNSTILE_INHERITOR_TURNSTILE);
1652 }
1653
1654 /*
1655 * Update the send turnstile inheritor for a port.
1656 *
1657 * Sync IPC through the port send turnstile has 7 possible reasons to be linked:
1658 *
1659 * 1. a special reply port is part of sync ipc for bootstrap checkin and needs
1660 * to push on thread doing the sync ipc.
1661 *
1662 * 2. a receive right is in transit, and pushes on the send turnstile of its
1663 * destination mqueue.
1664 *
1665 * 3. port was passed as an exec watchport and port is pushing on main thread
1666 * of the task.
1667 *
1668 * 4. a receive right has been stashed on a knote it was copied out "through",
1669 * as the first such copied out port (same as PORT_SYNC_LINK_WORKLOOP_KNOTE
1670 * for the special reply port)
1671 *
1672 * 5. a receive right has been stashed on a knote it was copied out "through",
1673 * as the second or more copied out port (same as
1674 * PORT_SYNC_LINK_WORKLOOP_STASH for the special reply port)
1675 *
1676 * 6. a receive right has been copied out as a part of sync bootstrap checkin
1677 * and needs to push on thread doing the sync bootstrap checkin.
1678 *
1679 * 7. the receive right is monitored by a knote, and pushes on any that is
1680 * registered on a workloop. filt_machport makes sure that if such a knote
1681 * exists, it is kept as the first item in the knote list, so we never need
1682 * to walk.
1683 */
1684 void
ipc_port_send_update_inheritor(ipc_port_t port,struct turnstile * send_turnstile,turnstile_update_flags_t flags)1685 ipc_port_send_update_inheritor(
1686 ipc_port_t port,
1687 struct turnstile *send_turnstile,
1688 turnstile_update_flags_t flags)
1689 {
1690 ipc_mqueue_t mqueue = &port->ip_messages;
1691 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1692 struct knote *kn;
1693 turnstile_update_flags_t inheritor_flags = TURNSTILE_INHERITOR_TURNSTILE;
1694
1695 ip_mq_lock_held(port);
1696
1697 if (!ip_active(port)) {
1698 /* this port is no longer active, it should not push anywhere */
1699 } else if (port->ip_specialreply) {
1700 /* Case 1. */
1701 if (port->ip_sync_bootstrap_checkin && prioritize_launch) {
1702 inheritor = port->ip_messages.imq_srp_owner_thread;
1703 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1704 }
1705 } else if (ip_in_transit(port)) {
1706 /* Case 2. */
1707 inheritor = port_send_turnstile(ip_get_destination(port));
1708 } else if (port->ip_has_watchport) {
1709 /* Case 3. */
1710 if (prioritize_launch) {
1711 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1712 inheritor = ipc_port_get_watchport_inheritor(port);
1713 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1714 }
1715 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
1716 /* Case 4. */
1717 inheritor = filt_ipc_kqueue_turnstile(mqueue->imq_inheritor_knote);
1718 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_STASH) {
1719 /* Case 5. */
1720 inheritor = mqueue->imq_inheritor_turnstile;
1721 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_RCV_THREAD) {
1722 /* Case 6. */
1723 if (prioritize_launch) {
1724 inheritor = port->ip_messages.imq_inheritor_thread_ref;
1725 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1726 }
1727 } else if ((kn = SLIST_FIRST(&port->ip_klist))) {
1728 /* Case 7. Push on a workloop that is interested */
1729 if (filt_machport_kqueue_has_turnstile(kn)) {
1730 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1731 inheritor = filt_ipc_kqueue_turnstile(kn);
1732 }
1733 }
1734
1735 turnstile_update_inheritor(send_turnstile, inheritor,
1736 flags | inheritor_flags);
1737 }
1738
1739 /*
1740 * Routine: ipc_port_send_turnstile_prepare
1741 * Purpose:
1742 * Get a reference on port's send turnstile, if
1743 * port does not have a send turnstile then allocate one.
1744 *
1745 * Conditions:
1746 * Nothing is locked.
1747 */
1748 void
ipc_port_send_turnstile_prepare(ipc_port_t port)1749 ipc_port_send_turnstile_prepare(ipc_port_t port)
1750 {
1751 struct turnstile *turnstile = TURNSTILE_NULL;
1752 struct turnstile *send_turnstile = TURNSTILE_NULL;
1753
1754 retry_alloc:
1755 ip_mq_lock(port);
1756
1757 if (port_send_turnstile(port) == NULL ||
1758 port_send_turnstile(port)->ts_prim_count == 0) {
1759 if (turnstile == TURNSTILE_NULL) {
1760 ip_mq_unlock(port);
1761 turnstile = turnstile_alloc();
1762 goto retry_alloc;
1763 }
1764
1765 send_turnstile = turnstile_prepare((uintptr_t)port,
1766 port_send_turnstile_address(port),
1767 turnstile, TURNSTILE_SYNC_IPC);
1768 turnstile = TURNSTILE_NULL;
1769
1770 ipc_port_send_update_inheritor(port, send_turnstile,
1771 TURNSTILE_IMMEDIATE_UPDATE);
1772
1773 /* turnstile complete will be called in ipc_port_send_turnstile_complete */
1774 }
1775
1776 /* Increment turnstile counter */
1777 port_send_turnstile(port)->ts_prim_count++;
1778 ip_mq_unlock(port);
1779
1780 if (send_turnstile) {
1781 turnstile_update_inheritor_complete(send_turnstile,
1782 TURNSTILE_INTERLOCK_NOT_HELD);
1783 }
1784 if (turnstile != TURNSTILE_NULL) {
1785 turnstile_deallocate(turnstile);
1786 }
1787 }
1788
1789
1790 /*
1791 * Routine: ipc_port_send_turnstile_complete
1792 * Purpose:
1793 * Drop a ref on the port's send turnstile, if the
1794 * ref becomes zero, deallocate the turnstile.
1795 *
1796 * Conditions:
1797 * The space might be locked
1798 */
1799 void
ipc_port_send_turnstile_complete(ipc_port_t port)1800 ipc_port_send_turnstile_complete(ipc_port_t port)
1801 {
1802 struct turnstile *turnstile = TURNSTILE_NULL;
1803
1804 /* Drop turnstile count on dest port */
1805 ip_mq_lock(port);
1806
1807 port_send_turnstile(port)->ts_prim_count--;
1808 if (port_send_turnstile(port)->ts_prim_count == 0) {
1809 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port),
1810 &turnstile, TURNSTILE_SYNC_IPC);
1811 assert(turnstile != TURNSTILE_NULL);
1812 }
1813 ip_mq_unlock(port);
1814 turnstile_cleanup();
1815
1816 if (turnstile != TURNSTILE_NULL) {
1817 turnstile_deallocate(turnstile);
1818 turnstile = TURNSTILE_NULL;
1819 }
1820 }
1821
1822 /*
1823 * Routine: ipc_port_rcv_turnstile
1824 * Purpose:
1825 * Get the port's receive turnstile
1826 *
1827 * Conditions:
1828 * mqueue locked or thread waiting on turnstile is locked.
1829 */
1830 static struct turnstile *
ipc_port_rcv_turnstile(ipc_port_t port)1831 ipc_port_rcv_turnstile(ipc_port_t port)
1832 {
1833 return *port_rcv_turnstile_address(port);
1834 }
1835
1836
1837 /*
1838 * Routine: ipc_port_link_special_reply_port
1839 * Purpose:
1840 * Link the special reply port with the destination port.
1841 * Allocates turnstile to dest port.
1842 *
1843 * Conditions:
1844 * Nothing is locked.
1845 */
1846 void
ipc_port_link_special_reply_port(ipc_port_t special_reply_port,ipc_port_t dest_port,boolean_t sync_bootstrap_checkin)1847 ipc_port_link_special_reply_port(
1848 ipc_port_t special_reply_port,
1849 ipc_port_t dest_port,
1850 boolean_t sync_bootstrap_checkin)
1851 {
1852 boolean_t drop_turnstile_ref = FALSE;
1853 boolean_t special_reply = FALSE;
1854
1855 /* Check if dest_port needs a turnstile */
1856 ipc_port_send_turnstile_prepare(dest_port);
1857
1858 /* Lock the special reply port and establish the linkage */
1859 ip_mq_lock(special_reply_port);
1860
1861 special_reply = special_reply_port->ip_specialreply;
1862
1863 if (sync_bootstrap_checkin && special_reply) {
1864 special_reply_port->ip_sync_bootstrap_checkin = 1;
1865 }
1866
1867 /* Check if we need to drop the acquired turnstile ref on dest port */
1868 if (!special_reply ||
1869 special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY ||
1870 special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) {
1871 drop_turnstile_ref = TRUE;
1872 } else {
1873 /* take a reference on dest_port */
1874 ip_reference(dest_port);
1875 special_reply_port->ip_sync_inheritor_port = dest_port;
1876 special_reply_port->ip_sync_link_state = PORT_SYNC_LINK_PORT;
1877 }
1878
1879 ip_mq_unlock(special_reply_port);
1880
1881 if (special_reply) {
1882 /*
1883 * For special reply ports, if the destination port is
1884 * marked with the thread group blocked tracking flag,
1885 * callout to the performance controller.
1886 */
1887 ipc_port_thread_group_blocked(dest_port);
1888 }
1889
1890 if (drop_turnstile_ref) {
1891 ipc_port_send_turnstile_complete(dest_port);
1892 }
1893
1894 return;
1895 }
1896
1897 /*
1898 * Routine: ipc_port_thread_group_blocked
1899 * Purpose:
1900 * Call thread_group_blocked callout if the port
1901 * has ip_tg_block_tracking bit set and the thread
1902 * has not made this callout already.
1903 *
1904 * Conditions:
1905 * Nothing is locked.
1906 */
1907 void
ipc_port_thread_group_blocked(ipc_port_t port __unused)1908 ipc_port_thread_group_blocked(ipc_port_t port __unused)
1909 {
1910 #if CONFIG_THREAD_GROUPS
1911 bool port_tg_block_tracking = false;
1912 thread_t self = current_thread();
1913
1914 if (self->thread_group == NULL ||
1915 (self->options & TH_OPT_IPC_TG_BLOCKED)) {
1916 return;
1917 }
1918
1919 port_tg_block_tracking = port->ip_tg_block_tracking;
1920 if (!port_tg_block_tracking) {
1921 return;
1922 }
1923
1924 machine_thread_group_blocked(self->thread_group, NULL,
1925 PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
1926
1927 self->options |= TH_OPT_IPC_TG_BLOCKED;
1928 #endif
1929 }
1930
1931 /*
1932 * Routine: ipc_port_thread_group_unblocked
1933 * Purpose:
1934 * Call thread_group_unblocked callout if the
1935 * thread had previously made a thread_group_blocked
1936 * callout before (indicated by TH_OPT_IPC_TG_BLOCKED
1937 * flag on the thread).
1938 *
1939 * Conditions:
1940 * Nothing is locked.
1941 */
1942 void
ipc_port_thread_group_unblocked(void)1943 ipc_port_thread_group_unblocked(void)
1944 {
1945 #if CONFIG_THREAD_GROUPS
1946 thread_t self = current_thread();
1947
1948 if (!(self->options & TH_OPT_IPC_TG_BLOCKED)) {
1949 return;
1950 }
1951
1952 machine_thread_group_unblocked(self->thread_group, NULL,
1953 PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
1954
1955 self->options &= ~TH_OPT_IPC_TG_BLOCKED;
1956 #endif
1957 }
1958
1959 #if DEVELOPMENT || DEBUG
1960 inline void
ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)1961 ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)
1962 {
1963 special_reply_port->ip_srp_lost_link = 0;
1964 special_reply_port->ip_srp_msg_sent = 0;
1965 }
1966
1967 static inline void
ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)1968 ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)
1969 {
1970 if (special_reply_port->ip_specialreply == 1) {
1971 special_reply_port->ip_srp_msg_sent = 0;
1972 }
1973 }
1974
1975 inline void
ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)1976 ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)
1977 {
1978 if (special_reply_port->ip_specialreply == 1) {
1979 special_reply_port->ip_srp_msg_sent = 1;
1980 }
1981 }
1982
1983 static inline void
ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)1984 ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)
1985 {
1986 if (special_reply_port->ip_specialreply == 1 && special_reply_port->ip_srp_msg_sent == 0) {
1987 special_reply_port->ip_srp_lost_link = 1;
1988 }
1989 }
1990
1991 #else /* DEVELOPMENT || DEBUG */
1992 inline void
ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)1993 ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)
1994 {
1995 return;
1996 }
1997
1998 static inline void
ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)1999 ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)
2000 {
2001 return;
2002 }
2003
2004 inline void
ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)2005 ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)
2006 {
2007 return;
2008 }
2009
2010 static inline void
ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)2011 ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)
2012 {
2013 return;
2014 }
2015 #endif /* DEVELOPMENT || DEBUG */
2016
2017 /*
2018 * Routine: ipc_port_adjust_special_reply_port_locked
2019 * Purpose:
2020 * If the special port has a turnstile, update its inheritor.
2021 * Condition:
2022 * Special reply port locked on entry.
2023 * Special reply port unlocked on return.
2024 * The passed in port is a special reply port.
2025 * Returns:
2026 * None.
2027 */
2028 void
ipc_port_adjust_special_reply_port_locked(ipc_port_t special_reply_port,struct knote * kn,uint8_t flags,boolean_t get_turnstile)2029 ipc_port_adjust_special_reply_port_locked(
2030 ipc_port_t special_reply_port,
2031 struct knote *kn,
2032 uint8_t flags,
2033 boolean_t get_turnstile)
2034 {
2035 ipc_port_t dest_port = IPC_PORT_NULL;
2036 int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE;
2037 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
2038 struct turnstile *ts = TURNSTILE_NULL;
2039 struct turnstile *port_stashed_turnstile = TURNSTILE_NULL;
2040
2041 ip_mq_lock_held(special_reply_port); // ip_sync_link_state is touched
2042
2043 if (!special_reply_port->ip_specialreply) {
2044 // only mach_msg_receive_results_complete() calls this with any port
2045 assert(get_turnstile);
2046 goto not_special;
2047 }
2048
2049 if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) {
2050 ipc_special_reply_port_msg_sent_reset(special_reply_port);
2051 }
2052
2053 if (flags & IPC_PORT_ADJUST_UNLINK_THREAD) {
2054 special_reply_port->ip_messages.imq_srp_owner_thread = NULL;
2055 }
2056
2057 if (flags & IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN) {
2058 special_reply_port->ip_sync_bootstrap_checkin = 0;
2059 }
2060
2061 /* Check if the special reply port is marked non-special */
2062 if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) {
2063 not_special:
2064 if (get_turnstile) {
2065 turnstile_complete((uintptr_t)special_reply_port,
2066 port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
2067 }
2068 ip_mq_unlock(special_reply_port);
2069 if (get_turnstile) {
2070 turnstile_cleanup();
2071 }
2072 return;
2073 }
2074
2075 if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) {
2076 if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) {
2077 inheritor = filt_machport_stash_port(kn, special_reply_port,
2078 &sync_link_state);
2079 }
2080 } else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) {
2081 sync_link_state = PORT_SYNC_LINK_ANY;
2082 }
2083
2084 /* Check if need to break linkage */
2085 if (!get_turnstile && sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
2086 special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
2087 ip_mq_unlock(special_reply_port);
2088 return;
2089 }
2090
2091 switch (special_reply_port->ip_sync_link_state) {
2092 case PORT_SYNC_LINK_PORT:
2093 dest_port = special_reply_port->ip_sync_inheritor_port;
2094 special_reply_port->ip_sync_inheritor_port = IPC_PORT_NULL;
2095 break;
2096 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2097 special_reply_port->ip_sync_inheritor_knote = NULL;
2098 break;
2099 case PORT_SYNC_LINK_WORKLOOP_STASH:
2100 port_stashed_turnstile = special_reply_port->ip_sync_inheritor_ts;
2101 special_reply_port->ip_sync_inheritor_ts = NULL;
2102 break;
2103 }
2104
2105 /*
2106 * Stash (or unstash) the server's PID in the ip_sorights field of the
2107 * special reply port, so that stackshot can later retrieve who the client
2108 * is blocked on.
2109 */
2110 if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT &&
2111 sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
2112 ipc_special_reply_stash_pid_locked(special_reply_port, pid_from_task(current_task()));
2113 } else if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
2114 sync_link_state == PORT_SYNC_LINK_ANY) {
2115 /* If we are resetting the special reply port, remove the stashed pid. */
2116 ipc_special_reply_stash_pid_locked(special_reply_port, 0);
2117 }
2118
2119 special_reply_port->ip_sync_link_state = sync_link_state;
2120
2121 switch (sync_link_state) {
2122 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2123 special_reply_port->ip_sync_inheritor_knote = kn;
2124 break;
2125 case PORT_SYNC_LINK_WORKLOOP_STASH:
2126 turnstile_reference(inheritor);
2127 special_reply_port->ip_sync_inheritor_ts = inheritor;
2128 break;
2129 case PORT_SYNC_LINK_NO_LINKAGE:
2130 if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) {
2131 ipc_special_reply_port_lost_link(special_reply_port);
2132 }
2133 break;
2134 }
2135
2136 /* Get thread's turnstile donated to special reply port */
2137 if (get_turnstile) {
2138 turnstile_complete((uintptr_t)special_reply_port,
2139 port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
2140 } else {
2141 ts = ipc_port_rcv_turnstile(special_reply_port);
2142 if (ts) {
2143 turnstile_reference(ts);
2144 ipc_port_recv_update_inheritor(special_reply_port, ts,
2145 TURNSTILE_IMMEDIATE_UPDATE);
2146 }
2147 }
2148
2149 ip_mq_unlock(special_reply_port);
2150
2151 if (get_turnstile) {
2152 turnstile_cleanup();
2153 } else if (ts) {
2154 /* Call turnstile cleanup after dropping the interlock */
2155 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
2156 turnstile_deallocate(ts);
2157 }
2158
2159 if (port_stashed_turnstile) {
2160 turnstile_deallocate(port_stashed_turnstile);
2161 }
2162
2163 /* Release the ref on the dest port and its turnstile */
2164 if (dest_port) {
2165 ipc_port_send_turnstile_complete(dest_port);
2166 /* release the reference on the dest port, space lock might be held */
2167 ip_release_safe(dest_port);
2168 }
2169 }
2170
2171 /*
2172 * Routine: ipc_port_adjust_special_reply_port
2173 * Purpose:
2174 * If the special port has a turnstile, update its inheritor.
2175 * Condition:
2176 * Nothing locked.
2177 * Returns:
2178 * None.
2179 */
2180 void
ipc_port_adjust_special_reply_port(ipc_port_t port,uint8_t flags)2181 ipc_port_adjust_special_reply_port(
2182 ipc_port_t port,
2183 uint8_t flags)
2184 {
2185 if (port->ip_specialreply) {
2186 ip_mq_lock(port);
2187 ipc_port_adjust_special_reply_port_locked(port, NULL, flags, FALSE);
2188 }
2189 }
2190
2191 /*
2192 * Routine: ipc_port_adjust_sync_link_state_locked
2193 * Purpose:
2194 * Update the sync link state of the port and the
2195 * turnstile inheritor.
2196 * Condition:
2197 * Port locked on entry.
2198 * Port locked on return.
2199 * Returns:
2200 * None.
2201 */
2202 void
ipc_port_adjust_sync_link_state_locked(ipc_port_t port,int sync_link_state,turnstile_inheritor_t inheritor)2203 ipc_port_adjust_sync_link_state_locked(
2204 ipc_port_t port,
2205 int sync_link_state,
2206 turnstile_inheritor_t inheritor)
2207 {
2208 switch (port->ip_sync_link_state) {
2209 case PORT_SYNC_LINK_RCV_THREAD:
2210 /* deallocate the thread reference for the inheritor */
2211 thread_deallocate_safe(port->ip_messages.imq_inheritor_thread_ref);
2212 break;
2213 case PORT_SYNC_LINK_WORKLOOP_STASH:
2214 /* deallocate the turnstile reference for the inheritor */
2215 turnstile_deallocate(port->ip_messages.imq_inheritor_turnstile);
2216 break;
2217 }
2218
2219 klist_init(&port->ip_klist);
2220
2221 switch (sync_link_state) {
2222 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2223 port->ip_messages.imq_inheritor_knote = inheritor;
2224 break;
2225 case PORT_SYNC_LINK_WORKLOOP_STASH:
2226 /* knote can be deleted by userspace, take a reference on turnstile */
2227 turnstile_reference(inheritor);
2228 port->ip_messages.imq_inheritor_turnstile = inheritor;
2229 break;
2230 case PORT_SYNC_LINK_RCV_THREAD:
2231 /* The thread could exit without clearing port state, take a thread ref */
2232 thread_reference((thread_t)inheritor);
2233 port->ip_messages.imq_inheritor_thread_ref = inheritor;
2234 break;
2235 default:
2236 klist_init(&port->ip_klist);
2237 sync_link_state = PORT_SYNC_LINK_ANY;
2238 }
2239
2240 port->ip_sync_link_state = sync_link_state;
2241 }
2242
2243
2244 /*
2245 * Routine: ipc_port_adjust_port_locked
2246 * Purpose:
2247 * If the port has a turnstile, update its inheritor.
2248 * Condition:
2249 * Port locked on entry.
2250 * Port unlocked on return.
2251 * Returns:
2252 * None.
2253 */
2254 void
ipc_port_adjust_port_locked(ipc_port_t port,struct knote * kn,boolean_t sync_bootstrap_checkin)2255 ipc_port_adjust_port_locked(
2256 ipc_port_t port,
2257 struct knote *kn,
2258 boolean_t sync_bootstrap_checkin)
2259 {
2260 int sync_link_state = PORT_SYNC_LINK_ANY;
2261 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
2262
2263 ip_mq_lock_held(port); // ip_sync_link_state is touched
2264 assert(!port->ip_specialreply);
2265
2266 if (kn) {
2267 inheritor = filt_machport_stash_port(kn, port, &sync_link_state);
2268 if (sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
2269 inheritor = kn;
2270 }
2271 } else if (sync_bootstrap_checkin) {
2272 inheritor = current_thread();
2273 sync_link_state = PORT_SYNC_LINK_RCV_THREAD;
2274 }
2275
2276 ipc_port_adjust_sync_link_state_locked(port, sync_link_state, inheritor);
2277 port->ip_sync_bootstrap_checkin = 0;
2278
2279 ipc_port_send_turnstile_recompute_push_locked(port);
2280 /* port unlocked */
2281 }
2282
2283 /*
2284 * Routine: ipc_port_clear_sync_rcv_thread_boost_locked
2285 * Purpose:
2286 * If the port is pushing on rcv thread, clear it.
2287 * Condition:
2288 * Port locked on entry
2289 * Port unlocked on return.
2290 * Returns:
2291 * None.
2292 */
2293 void
ipc_port_clear_sync_rcv_thread_boost_locked(ipc_port_t port)2294 ipc_port_clear_sync_rcv_thread_boost_locked(
2295 ipc_port_t port)
2296 {
2297 ip_mq_lock_held(port); // ip_sync_link_state is touched
2298
2299 if (port->ip_sync_link_state != PORT_SYNC_LINK_RCV_THREAD) {
2300 ip_mq_unlock(port);
2301 return;
2302 }
2303
2304 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
2305
2306 ipc_port_send_turnstile_recompute_push_locked(port);
2307 /* port unlocked */
2308 }
2309
2310 /*
2311 * Routine: ipc_port_has_prdrequest
2312 * Purpose:
2313 * Returns whether a port has a port-destroyed request armed
2314 * Condition:
2315 * Port is locked.
2316 */
2317 bool
ipc_port_has_prdrequest(ipc_port_t port)2318 ipc_port_has_prdrequest(
2319 ipc_port_t port)
2320 {
2321 if (port->ip_specialreply) {
2322 return false;
2323 }
2324 if (port->ip_has_watchport) {
2325 return port->ip_twe->twe_pdrequest != IP_NULL;
2326 }
2327 return port->ip_pdrequest != IP_NULL;
2328 }
2329
2330 /*
2331 * Routine: ipc_port_add_watchport_elem_locked
2332 * Purpose:
2333 * Transfer the turnstile boost of watchport to task calling exec.
2334 * Condition:
2335 * Port locked on entry.
2336 * Port unlocked on return.
2337 * Returns:
2338 * KERN_SUCESS on success.
2339 * KERN_FAILURE otherwise.
2340 */
2341 kern_return_t
ipc_port_add_watchport_elem_locked(ipc_port_t port,struct task_watchport_elem * watchport_elem,struct task_watchport_elem ** old_elem)2342 ipc_port_add_watchport_elem_locked(
2343 ipc_port_t port,
2344 struct task_watchport_elem *watchport_elem,
2345 struct task_watchport_elem **old_elem)
2346 {
2347 ip_mq_lock_held(port);
2348
2349 /* Watchport boost only works for non-special active ports mapped in an ipc space */
2350 if (!ip_active(port) || port->ip_specialreply || !ip_in_a_space(port)) {
2351 ip_mq_unlock(port);
2352 return KERN_FAILURE;
2353 }
2354
2355 if (port->ip_sync_link_state != PORT_SYNC_LINK_ANY) {
2356 /* Sever the linkage if the port was pushing on knote */
2357 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
2358 }
2359
2360 *old_elem = ipc_port_update_watchport_elem(port, watchport_elem);
2361
2362 ipc_port_send_turnstile_recompute_push_locked(port);
2363 /* port unlocked */
2364 return KERN_SUCCESS;
2365 }
2366
2367 /*
2368 * Routine: ipc_port_clear_watchport_elem_internal_conditional_locked
2369 * Purpose:
2370 * Remove the turnstile boost of watchport and recompute the push.
2371 * Condition:
2372 * Port locked on entry.
2373 * Port unlocked on return.
2374 * Returns:
2375 * KERN_SUCESS on success.
2376 * KERN_FAILURE otherwise.
2377 */
2378 kern_return_t
ipc_port_clear_watchport_elem_internal_conditional_locked(ipc_port_t port,struct task_watchport_elem * watchport_elem)2379 ipc_port_clear_watchport_elem_internal_conditional_locked(
2380 ipc_port_t port,
2381 struct task_watchport_elem *watchport_elem)
2382 {
2383 ip_mq_lock_held(port);
2384
2385 if (ipc_port_watchport_elem(port) != watchport_elem) {
2386 ip_mq_unlock(port);
2387 return KERN_FAILURE;
2388 }
2389
2390 ipc_port_clear_watchport_elem_internal(port);
2391 ipc_port_send_turnstile_recompute_push_locked(port);
2392 /* port unlocked */
2393 return KERN_SUCCESS;
2394 }
2395
2396 /*
2397 * Routine: ipc_port_replace_watchport_elem_conditional_locked
2398 * Purpose:
2399 * Replace the turnstile boost of watchport and recompute the push.
2400 * Condition:
2401 * Port locked on entry.
2402 * Port unlocked on return.
2403 * Returns:
2404 * KERN_SUCESS on success.
2405 * KERN_FAILURE otherwise.
2406 */
2407 kern_return_t
ipc_port_replace_watchport_elem_conditional_locked(ipc_port_t port,struct task_watchport_elem * old_watchport_elem,struct task_watchport_elem * new_watchport_elem)2408 ipc_port_replace_watchport_elem_conditional_locked(
2409 ipc_port_t port,
2410 struct task_watchport_elem *old_watchport_elem,
2411 struct task_watchport_elem *new_watchport_elem)
2412 {
2413 ip_mq_lock_held(port);
2414
2415 if (port->ip_specialreply ||
2416 ipc_port_watchport_elem(port) != old_watchport_elem) {
2417 ip_mq_unlock(port);
2418 return KERN_FAILURE;
2419 }
2420
2421 ipc_port_update_watchport_elem(port, new_watchport_elem);
2422 ipc_port_send_turnstile_recompute_push_locked(port);
2423 /* port unlocked */
2424 return KERN_SUCCESS;
2425 }
2426
2427 /*
2428 * Routine: ipc_port_clear_watchport_elem_internal
2429 * Purpose:
2430 * Remove the turnstile boost of watchport.
2431 * Condition:
2432 * Port locked on entry.
2433 * Port locked on return.
2434 * Returns:
2435 * Old task_watchport_elem returned.
2436 */
2437 struct task_watchport_elem *
ipc_port_clear_watchport_elem_internal(ipc_port_t port)2438 ipc_port_clear_watchport_elem_internal(
2439 ipc_port_t port)
2440 {
2441 ip_mq_lock_held(port);
2442
2443 if (!port->ip_has_watchport) {
2444 return NULL;
2445 }
2446
2447 return ipc_port_update_watchport_elem(port, NULL);
2448 }
2449
2450 /*
2451 * Routine: ipc_port_send_turnstile_recompute_push_locked
2452 * Purpose:
2453 * Update send turnstile inheritor of port and recompute the push.
2454 * Condition:
2455 * Port locked on entry.
2456 * Port unlocked on return.
2457 * Returns:
2458 * None.
2459 */
2460 static void
ipc_port_send_turnstile_recompute_push_locked(ipc_port_t port)2461 ipc_port_send_turnstile_recompute_push_locked(
2462 ipc_port_t port)
2463 {
2464 struct turnstile *send_turnstile = port_send_turnstile(port);
2465 if (send_turnstile) {
2466 turnstile_reference(send_turnstile);
2467 ipc_port_send_update_inheritor(port, send_turnstile,
2468 TURNSTILE_IMMEDIATE_UPDATE);
2469 }
2470 ip_mq_unlock(port);
2471
2472 if (send_turnstile) {
2473 turnstile_update_inheritor_complete(send_turnstile,
2474 TURNSTILE_INTERLOCK_NOT_HELD);
2475 turnstile_deallocate(send_turnstile);
2476 }
2477 }
2478
2479 /*
2480 * Routine: ipc_port_get_watchport_inheritor
2481 * Purpose:
2482 * Returns inheritor for watchport.
2483 *
2484 * Conditions:
2485 * mqueue locked.
2486 * Returns:
2487 * watchport inheritor.
2488 */
2489 static thread_t
ipc_port_get_watchport_inheritor(ipc_port_t port)2490 ipc_port_get_watchport_inheritor(
2491 ipc_port_t port)
2492 {
2493 ip_mq_lock_held(port);
2494 return ipc_port_watchport_elem(port)->twe_task->watchports->tw_thread;
2495 }
2496
2497 /*
2498 * Routine: ipc_port_get_receiver_task
2499 * Purpose:
2500 * Returns receiver task pointer and its pid (if any) for port.
2501 *
2502 * Conditions:
2503 * Assumes the port is locked.
2504 */
2505 pid_t
ipc_port_get_receiver_task_locked(ipc_port_t port,uintptr_t * task)2506 ipc_port_get_receiver_task_locked(ipc_port_t port, uintptr_t *task)
2507 {
2508 task_t receiver = TASK_NULL;
2509 pid_t pid = -1;
2510
2511 if (!port) {
2512 goto out;
2513 }
2514
2515 if (ip_in_a_space(port) &&
2516 !ip_in_space(port, ipc_space_kernel) &&
2517 !ip_in_space(port, ipc_space_reply)) {
2518 receiver = port->ip_receiver->is_task;
2519 pid = task_pid(receiver);
2520 }
2521
2522 out:
2523 if (task) {
2524 *task = (uintptr_t)receiver;
2525 }
2526 return pid;
2527 }
2528
2529 /*
2530 * Routine: ipc_port_get_receiver_task
2531 * Purpose:
2532 * Returns receiver task pointer and its pid (if any) for port.
2533 *
2534 * Conditions:
2535 * Nothing locked. The routine takes port lock.
2536 */
2537 pid_t
ipc_port_get_receiver_task(ipc_port_t port,uintptr_t * task)2538 ipc_port_get_receiver_task(ipc_port_t port, uintptr_t *task)
2539 {
2540 pid_t pid = -1;
2541
2542 if (!port) {
2543 if (task) {
2544 *task = (uintptr_t)TASK_NULL;
2545 }
2546 return pid;
2547 }
2548
2549 ip_mq_lock(port);
2550 pid = ipc_port_get_receiver_task_locked(port, task);
2551 ip_mq_unlock(port);
2552
2553 return pid;
2554 }
2555
2556 /*
2557 * Routine: ipc_port_impcount_delta
2558 * Purpose:
2559 * Adjust only the importance count associated with a port.
2560 * If there are any adjustments to be made to receiver task,
2561 * those are handled elsewhere.
2562 *
2563 * For now, be defensive during deductions to make sure the
2564 * impcount for the port doesn't underflow zero. This will
2565 * go away when the port boost addition is made atomic (see
2566 * note in ipc_port_importance_delta()).
2567 * Conditions:
2568 * The port is referenced and locked.
2569 * Nothing else is locked.
2570 */
2571 mach_port_delta_t
ipc_port_impcount_delta(ipc_port_t port,mach_port_delta_t delta,ipc_port_t __unused base)2572 ipc_port_impcount_delta(
2573 ipc_port_t port,
2574 mach_port_delta_t delta,
2575 ipc_port_t __unused base)
2576 {
2577 mach_port_delta_t absdelta;
2578
2579 if (!ip_active(port)) {
2580 return 0;
2581 }
2582
2583 /* adding/doing nothing is easy */
2584 if (delta >= 0) {
2585 port->ip_impcount += delta;
2586 return delta;
2587 }
2588
2589 absdelta = 0 - delta;
2590 if (port->ip_impcount >= absdelta) {
2591 port->ip_impcount -= absdelta;
2592 return delta;
2593 }
2594
2595 #if (DEVELOPMENT || DEBUG)
2596 if (ip_in_a_space(port)) {
2597 task_t target_task = port->ip_receiver->is_task;
2598 ipc_importance_task_t target_imp = target_task->task_imp_base;
2599 const char *target_procname;
2600 int target_pid;
2601
2602 if (target_imp != IIT_NULL) {
2603 target_procname = target_imp->iit_procname;
2604 target_pid = target_imp->iit_bsd_pid;
2605 } else {
2606 target_procname = "unknown";
2607 target_pid = -1;
2608 }
2609 printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
2610 "dropping %d assertion(s) but port only has %d remaining.\n",
2611 ip_get_receiver_name(port),
2612 target_pid, target_procname,
2613 absdelta, port->ip_impcount);
2614 } else if (base != IP_NULL) {
2615 assert(ip_in_a_space(base));
2616 task_t target_task = base->ip_receiver->is_task;
2617 ipc_importance_task_t target_imp = target_task->task_imp_base;
2618 const char *target_procname;
2619 int target_pid;
2620
2621 if (target_imp != IIT_NULL) {
2622 target_procname = target_imp->iit_procname;
2623 target_pid = target_imp->iit_bsd_pid;
2624 } else {
2625 target_procname = "unknown";
2626 target_pid = -1;
2627 }
2628 printf("Over-release of importance assertions for port 0x%lx "
2629 "enqueued on port 0x%x with receiver pid %d (%s), "
2630 "dropping %d assertion(s) but port only has %d remaining.\n",
2631 (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port),
2632 ip_get_receiver_name(base),
2633 target_pid, target_procname,
2634 absdelta, port->ip_impcount);
2635 }
2636 #endif
2637
2638 delta = 0 - port->ip_impcount;
2639 port->ip_impcount = 0;
2640 return delta;
2641 }
2642
2643 /*
2644 * Routine: ipc_port_importance_delta_internal
2645 * Purpose:
2646 * Adjust the importance count through the given port.
2647 * If the port is in transit, apply the delta throughout
2648 * the chain. Determine if the there is a task at the
2649 * base of the chain that wants/needs to be adjusted,
2650 * and if so, apply the delta.
2651 * Conditions:
2652 * The port is referenced and locked on entry.
2653 * Importance may be locked.
2654 * Nothing else is locked.
2655 * The lock may be dropped on exit.
2656 * Returns TRUE if lock was dropped.
2657 */
2658 #if IMPORTANCE_INHERITANCE
2659
2660 boolean_t
ipc_port_importance_delta_internal(ipc_port_t port,natural_t options,mach_port_delta_t * deltap,ipc_importance_task_t * imp_task)2661 ipc_port_importance_delta_internal(
2662 ipc_port_t port,
2663 natural_t options,
2664 mach_port_delta_t *deltap,
2665 ipc_importance_task_t *imp_task)
2666 {
2667 ipc_port_t next, base;
2668 bool dropped = false;
2669 bool took_base_ref = false;
2670
2671 *imp_task = IIT_NULL;
2672
2673 if (*deltap == 0) {
2674 return FALSE;
2675 }
2676
2677 assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE);
2678
2679 base = port;
2680
2681 /* if port is in transit, have to search for end of chain */
2682 if (ip_in_transit(port)) {
2683 dropped = true;
2684
2685
2686 ip_mq_unlock(port);
2687 ipc_port_multiple_lock(); /* massive serialization */
2688
2689 took_base_ref = ipc_port_destination_chain_lock(port, &base);
2690 /* all ports in chain from port to base, inclusive, are locked */
2691
2692 ipc_port_multiple_unlock();
2693 }
2694
2695 /*
2696 * If the port lock is dropped b/c the port is in transit, there is a
2697 * race window where another thread can drain messages and/or fire a
2698 * send possible notification before we get here.
2699 *
2700 * We solve this race by checking to see if our caller armed the send
2701 * possible notification, whether or not it's been fired yet, and
2702 * whether or not we've already set the port's ip_spimportant bit. If
2703 * we don't need a send-possible boost, then we'll just apply a
2704 * harmless 0-boost to the port.
2705 */
2706 if (options & IPID_OPTION_SENDPOSSIBLE) {
2707 assert(*deltap == 1);
2708 if (port->ip_sprequests && port->ip_spimportant == 0) {
2709 port->ip_spimportant = 1;
2710 } else {
2711 *deltap = 0;
2712 }
2713 }
2714
2715 /* unlock down to the base, adjusting boost(s) at each level */
2716 for (;;) {
2717 *deltap = ipc_port_impcount_delta(port, *deltap, base);
2718
2719 if (port == base) {
2720 break;
2721 }
2722
2723 /* port is in transit */
2724 assert(port->ip_tempowner == 0);
2725 assert(ip_in_transit(port));
2726 next = ip_get_destination(port);
2727 ip_mq_unlock(port);
2728 port = next;
2729 }
2730
2731 /* find the task (if any) to boost according to the base */
2732 if (ip_active(base)) {
2733 if (base->ip_tempowner != 0) {
2734 if (IIT_NULL != ip_get_imp_task(base)) {
2735 *imp_task = ip_get_imp_task(base);
2736 }
2737 /* otherwise don't boost */
2738 } else if (ip_in_a_space(base)) {
2739 ipc_space_t space = ip_get_receiver(base);
2740
2741 /* only spaces with boost-accepting tasks */
2742 if (space->is_task != TASK_NULL &&
2743 ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
2744 *imp_task = space->is_task->task_imp_base;
2745 }
2746 }
2747 }
2748
2749 /*
2750 * Only the base is locked. If we have to hold or drop task
2751 * importance assertions, we'll have to drop that lock as well.
2752 */
2753 if (*imp_task != IIT_NULL) {
2754 /* take a reference before unlocking base */
2755 ipc_importance_task_reference(*imp_task);
2756 }
2757
2758 if (dropped) {
2759 ip_mq_unlock(base);
2760 if (took_base_ref) {
2761 /* importance lock might be held */
2762 ip_release_safe(base);
2763 }
2764 }
2765
2766 return dropped;
2767 }
2768 #endif /* IMPORTANCE_INHERITANCE */
2769
2770 /*
2771 * Routine: ipc_port_importance_delta
2772 * Purpose:
2773 * Adjust the importance count through the given port.
2774 * If the port is in transit, apply the delta throughout
2775 * the chain.
2776 *
2777 * If there is a task at the base of the chain that wants/needs
2778 * to be adjusted, apply the delta.
2779 * Conditions:
2780 * The port is referenced and locked on entry.
2781 * Nothing else is locked.
2782 * The lock may be dropped on exit.
2783 * Returns TRUE if lock was dropped.
2784 */
2785 #if IMPORTANCE_INHERITANCE
2786
2787 boolean_t
ipc_port_importance_delta(ipc_port_t port,natural_t options,mach_port_delta_t delta)2788 ipc_port_importance_delta(
2789 ipc_port_t port,
2790 natural_t options,
2791 mach_port_delta_t delta)
2792 {
2793 ipc_importance_task_t imp_task = IIT_NULL;
2794 boolean_t dropped;
2795
2796 dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task);
2797
2798 if (IIT_NULL == imp_task || delta == 0) {
2799 if (imp_task) {
2800 ipc_importance_task_release(imp_task);
2801 }
2802 return dropped;
2803 }
2804
2805 if (!dropped) {
2806 ip_mq_unlock(port);
2807 }
2808
2809 assert(ipc_importance_task_is_any_receiver_type(imp_task));
2810
2811 if (delta > 0) {
2812 ipc_importance_task_hold_internal_assertion(imp_task, delta);
2813 } else {
2814 ipc_importance_task_drop_internal_assertion(imp_task, -delta);
2815 }
2816
2817 ipc_importance_task_release(imp_task);
2818 return TRUE;
2819 }
2820 #endif /* IMPORTANCE_INHERITANCE */
2821
2822 ipc_port_t
ipc_port_make_send_any_locked(ipc_port_t port)2823 ipc_port_make_send_any_locked(
2824 ipc_port_t port)
2825 {
2826 require_ip_active(port);
2827 port->ip_mscount++;
2828 ip_srights_inc(port);
2829 ip_reference(port);
2830 return port;
2831 }
2832
2833 ipc_port_t
ipc_port_make_send_any(ipc_port_t port)2834 ipc_port_make_send_any(
2835 ipc_port_t port)
2836 {
2837 ipc_port_t sright = port;
2838
2839 if (IP_VALID(port)) {
2840 ip_mq_lock(port);
2841 if (ip_active(port)) {
2842 ipc_port_make_send_any_locked(port);
2843 } else {
2844 sright = IP_DEAD;
2845 }
2846 ip_mq_unlock(port);
2847 }
2848
2849 return sright;
2850 }
2851
2852 ipc_port_t
ipc_port_make_send_mqueue(ipc_port_t port)2853 ipc_port_make_send_mqueue(
2854 ipc_port_t port)
2855 {
2856 ipc_port_t sright = port;
2857
2858 if (IP_VALID(port)) {
2859 ip_mq_lock(port);
2860 if (__improbable(!ip_active(port))) {
2861 sright = IP_DEAD;
2862 } else if (ip_kotype(port) == IKOT_NONE) {
2863 ipc_port_make_send_any_locked(port);
2864 } else {
2865 sright = IP_NULL;
2866 }
2867 ip_mq_unlock(port);
2868 }
2869
2870 return sright;
2871 }
2872
2873 void
ipc_port_copy_send_any_locked(ipc_port_t port)2874 ipc_port_copy_send_any_locked(
2875 ipc_port_t port)
2876 {
2877 assert(port->ip_srights > 0);
2878 ip_srights_inc(port);
2879 ip_reference(port);
2880 }
2881
2882 ipc_port_t
ipc_port_copy_send_any(ipc_port_t port)2883 ipc_port_copy_send_any(
2884 ipc_port_t port)
2885 {
2886 ipc_port_t sright = port;
2887
2888 if (IP_VALID(port)) {
2889 ip_mq_lock(port);
2890 if (ip_active(port)) {
2891 ipc_port_copy_send_any_locked(port);
2892 } else {
2893 sright = IP_DEAD;
2894 }
2895 ip_mq_unlock(port);
2896 }
2897
2898 return sright;
2899 }
2900
2901 ipc_port_t
ipc_port_copy_send_mqueue(ipc_port_t port)2902 ipc_port_copy_send_mqueue(
2903 ipc_port_t port)
2904 {
2905 ipc_port_t sright = port;
2906
2907 if (IP_VALID(port)) {
2908 ip_mq_lock(port);
2909 if (__improbable(!ip_active(port))) {
2910 sright = IP_DEAD;
2911 } else if (ip_kotype(port) == IKOT_NONE) {
2912 ipc_port_copy_send_any_locked(port);
2913 } else {
2914 sright = IP_NULL;
2915 }
2916 ip_mq_unlock(port);
2917 }
2918
2919 return sright;
2920 }
2921
2922 /*
2923 * Routine: ipc_port_copyout_send
2924 * Purpose:
2925 * Copyout a naked send right (possibly null/dead),
2926 * or if that fails, destroy the right.
2927 * Conditions:
2928 * Nothing locked.
2929 */
2930
2931 static mach_port_name_t
ipc_port_copyout_send_internal(ipc_port_t sright,ipc_space_t space,ipc_object_copyout_flags_t flags)2932 ipc_port_copyout_send_internal(
2933 ipc_port_t sright,
2934 ipc_space_t space,
2935 ipc_object_copyout_flags_t flags)
2936 {
2937 mach_port_name_t name;
2938
2939 if (IP_VALID(sright)) {
2940 kern_return_t kr;
2941
2942 kr = ipc_object_copyout(space, sright, MACH_MSG_TYPE_PORT_SEND,
2943 flags, NULL, &name);
2944 if (kr != KERN_SUCCESS) {
2945 if (kr == KERN_INVALID_CAPABILITY) {
2946 name = MACH_PORT_DEAD;
2947 } else {
2948 name = MACH_PORT_NULL;
2949 }
2950 }
2951 } else {
2952 name = CAST_MACH_PORT_TO_NAME(sright);
2953 }
2954
2955 return name;
2956 }
2957
2958 mach_port_name_t
ipc_port_copyout_send(ipc_port_t sright,ipc_space_t space)2959 ipc_port_copyout_send(
2960 ipc_port_t sright, /* can be invalid */
2961 ipc_space_t space)
2962 {
2963 return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_NONE);
2964 }
2965
2966 /* Used by pthread kext to copyout thread port only */
2967 mach_port_name_t
ipc_port_copyout_send_pinned(ipc_port_t sright,ipc_space_t space)2968 ipc_port_copyout_send_pinned(
2969 ipc_port_t sright, /* can be invalid */
2970 ipc_space_t space)
2971 {
2972 assert(space->is_task != TASK_NULL);
2973
2974 if (IP_VALID(sright)) {
2975 assert(ip_kotype(sright) == IKOT_THREAD_CONTROL);
2976 }
2977
2978 if (task_is_pinned(space->is_task)) {
2979 return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_PINNED);
2980 } else {
2981 return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_NONE);
2982 }
2983 }
2984
2985 /*
2986 * Routine: ipc_port_release_send_and_unlock
2987 * Purpose:
2988 * Release a naked send right.
2989 * Consumes a ref for the port.
2990 * Conditions:
2991 * Port is valid and locked on entry
2992 * Port is unlocked on exit.
2993 */
2994 void
ipc_port_release_send_and_unlock(ipc_port_t port)2995 ipc_port_release_send_and_unlock(
2996 ipc_port_t port)
2997 {
2998 ipc_notify_nsenders_t nsrequest = { };
2999
3000 ip_srights_dec(port);
3001
3002 if (ip_active(port) && port->ip_srights == 0) {
3003 nsrequest = ipc_notify_no_senders_prepare(port);
3004 }
3005
3006 ip_mq_unlock(port);
3007 ip_release(port);
3008
3009 ipc_notify_no_senders_emit(nsrequest);
3010 }
3011
3012 /*
3013 * Routine: ipc_port_release_send
3014 * Purpose:
3015 * Release a naked send right.
3016 * Consumes a ref for the port.
3017 * Conditions:
3018 * Nothing locked.
3019 */
3020
3021 __attribute__((flatten, noinline))
3022 void
ipc_port_release_send(ipc_port_t port)3023 ipc_port_release_send(
3024 ipc_port_t port)
3025 {
3026 if (IP_VALID(port)) {
3027 ip_mq_lock(port);
3028 ipc_port_release_send_and_unlock(port);
3029 }
3030 }
3031
3032 /*
3033 * Routine: ipc_port_make_sonce_locked
3034 * Purpose:
3035 * Make a naked send-once right from a receive right.
3036 * Conditions:
3037 * The port is locked and active.
3038 */
3039
3040 ipc_port_t
ipc_port_make_sonce_locked(ipc_port_t port)3041 ipc_port_make_sonce_locked(
3042 ipc_port_t port)
3043 {
3044 require_ip_active(port);
3045 ip_sorights_inc(port);
3046 ip_reference(port);
3047 return port;
3048 }
3049
3050 /*
3051 * Routine: ipc_port_make_sonce
3052 * Purpose:
3053 * Make a naked send-once right from a receive right.
3054 * Conditions:
3055 * The port is not locked.
3056 */
3057
3058 ipc_port_t
ipc_port_make_sonce(ipc_port_t port)3059 ipc_port_make_sonce(
3060 ipc_port_t port)
3061 {
3062 if (!IP_VALID(port)) {
3063 return port;
3064 }
3065
3066 ip_mq_lock(port);
3067 if (ip_active(port)) {
3068 ipc_port_make_sonce_locked(port);
3069 ip_mq_unlock(port);
3070 return port;
3071 }
3072 ip_mq_unlock(port);
3073 return IP_DEAD;
3074 }
3075
3076 /*
3077 * Routine: ipc_port_release_sonce
3078 * Purpose:
3079 * Release a naked send-once right.
3080 * Consumes a ref for the port.
3081 *
3082 * In normal situations, this is never used.
3083 * Send-once rights are only consumed when
3084 * a message (possibly a send-once notification)
3085 * is sent to them.
3086 * Conditions:
3087 * The port is locked, possibly a space too.
3088 */
3089 void
ipc_port_release_sonce_and_unlock(ipc_port_t port)3090 ipc_port_release_sonce_and_unlock(
3091 ipc_port_t port)
3092 {
3093 ip_mq_lock_held(port);
3094
3095 ip_sorights_dec(port);
3096
3097 if (port->ip_specialreply) {
3098 ipc_port_adjust_special_reply_port_locked(port, NULL,
3099 IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN, FALSE);
3100 } else {
3101 ip_mq_unlock(port);
3102 }
3103
3104 ip_release(port);
3105 }
3106
3107 /*
3108 * Routine: ipc_port_release_sonce
3109 * Purpose:
3110 * Release a naked send-once right.
3111 * Consumes a ref for the port.
3112 *
3113 * In normal situations, this is never used.
3114 * Send-once rights are only consumed when
3115 * a message (possibly a send-once notification)
3116 * is sent to them.
3117 * Conditions:
3118 * Nothing locked except possibly a space.
3119 */
3120 void
ipc_port_release_sonce(ipc_port_t port)3121 ipc_port_release_sonce(
3122 ipc_port_t port)
3123 {
3124 if (IP_VALID(port)) {
3125 ip_mq_lock(port);
3126 ipc_port_release_sonce_and_unlock(port);
3127 }
3128 }
3129
3130 /*
3131 * Routine: ipc_port_release_receive
3132 * Purpose:
3133 * Release a naked (in limbo or in transit) receive right.
3134 * Consumes a ref for the port; destroys the port.
3135 * Conditions:
3136 * Nothing locked.
3137 */
3138
3139 void
ipc_port_release_receive(ipc_port_t port)3140 ipc_port_release_receive(
3141 ipc_port_t port)
3142 {
3143 ipc_port_t dest;
3144
3145 if (!IP_VALID(port)) {
3146 return;
3147 }
3148
3149 ip_mq_lock(port);
3150 require_ip_active(port);
3151 assert(!ip_in_a_space(port));
3152 dest = ip_get_destination(port);
3153
3154 ipc_port_destroy(port); /* consumes ref, unlocks */
3155
3156 if (dest != IP_NULL) {
3157 ipc_port_send_turnstile_complete(dest);
3158 ip_release(dest);
3159 }
3160 }
3161
3162 /*
3163 * Routine: ipc_port_alloc_special
3164 * Purpose:
3165 * Allocate a port in a special space.
3166 * The new port is returned with one ref.
3167 * If unsuccessful, IP_NULL is returned.
3168 * Conditions:
3169 * Nothing locked.
3170 */
3171
3172 ipc_port_t
ipc_port_alloc_special(ipc_space_t space,ipc_port_init_flags_t flags)3173 ipc_port_alloc_special(
3174 ipc_space_t space,
3175 ipc_port_init_flags_t flags)
3176 {
3177 ipc_port_t port;
3178
3179 kern_return_t kr = ipc_port_init_validate_flags(flags);
3180 if (kr != KERN_SUCCESS) {
3181 return IP_NULL;
3182 }
3183
3184 port = ip_object_to_port(io_alloc(IOT_PORT, Z_WAITOK | Z_ZERO));
3185 if (port == IP_NULL) {
3186 return IP_NULL;
3187 }
3188
3189 os_atomic_init(&port->ip_object.io_bits, io_makebits(IOT_PORT));
3190 os_atomic_init(&port->ip_object.io_references, 1);
3191
3192 ipc_port_init(port, space, flags, MACH_PORT_SPECIAL_DEFAULT);
3193 return port;
3194 }
3195
3196 /*
3197 * Routine: ipc_port_dealloc_special_and_unlock
3198 * Purpose:
3199 * Deallocate a port in a special space.
3200 * Consumes one ref for the port.
3201 * Conditions:
3202 * Port is locked.
3203 */
3204
3205 void
ipc_port_dealloc_special_and_unlock(ipc_port_t port,__assert_only ipc_space_t space)3206 ipc_port_dealloc_special_and_unlock(
3207 ipc_port_t port,
3208 __assert_only ipc_space_t space)
3209 {
3210 require_ip_active(port);
3211 // assert(port->ip_receiver_name != MACH_PORT_NULL);
3212 assert(ip_in_space(port, space));
3213
3214 /*
3215 * We clear ip_receiver_name and ip_receiver to simplify
3216 * the ipc_space_kernel check in ipc_mqueue_send.
3217 */
3218
3219 /* port transtions to IN-LIMBO state */
3220 port->ip_receiver_name = MACH_PORT_NULL;
3221 port->ip_receiver = IS_NULL;
3222
3223 /* relevant part of ipc_port_clear_receiver */
3224 port->ip_mscount = 0;
3225 port->ip_messages.imq_seqno = 0;
3226
3227 ipc_port_destroy(port);
3228 }
3229
3230 /*
3231 * Routine: ipc_port_dealloc_special
3232 * Purpose:
3233 * Deallocate a port in a special space.
3234 * Consumes one ref for the port.
3235 * Conditions:
3236 * Nothing locked.
3237 */
3238
3239 void
ipc_port_dealloc_special(ipc_port_t port,ipc_space_t space)3240 ipc_port_dealloc_special(
3241 ipc_port_t port,
3242 ipc_space_t space)
3243 {
3244 ip_mq_lock(port);
3245 ipc_port_dealloc_special_and_unlock(port, space);
3246 }
3247
3248 /*
3249 * Routine: ipc_port_finalize
3250 * Purpose:
3251 * Called on last reference deallocate to
3252 * free any remaining data associated with the
3253 * port.
3254 * Conditions:
3255 * Nothing locked.
3256 */
3257 void
ipc_port_finalize(ipc_port_t port)3258 ipc_port_finalize(
3259 ipc_port_t port)
3260 {
3261 ipc_port_request_table_t requests = port->ip_requests;
3262
3263 assert(port_send_turnstile(port) == TURNSTILE_NULL);
3264
3265 if (waitq_type(&port->ip_waitq) == WQT_PORT) {
3266 assert(ipc_port_rcv_turnstile(port) == TURNSTILE_NULL);
3267 }
3268
3269 if (ip_active(port)) {
3270 panic("Trying to free an active port. port %p", port);
3271 }
3272
3273 if (requests) {
3274 port->ip_requests = NULL;
3275 ipc_port_request_table_free_noclear(requests);
3276 }
3277
3278 waitq_deinit(&port->ip_waitq);
3279 #if MACH_ASSERT
3280 if (port->ip_made_bt) {
3281 btref_put(port->ip_made_bt);
3282 }
3283 #endif
3284 }
3285
3286 /*
3287 * Routine: kdp_mqueue_send_find_owner
3288 * Purpose:
3289 * Discover the owner of the ipc object that contains the input
3290 * waitq object. The thread blocked on the waitq should be
3291 * waiting for an IPC_MQUEUE_FULL event.
3292 * Conditions:
3293 * The 'waitinfo->wait_type' value should already be set to
3294 * kThreadWaitPortSend.
3295 * Note:
3296 * If we find out that the containing port is actually in
3297 * transit, we reset the wait_type field to reflect this.
3298 */
3299 void
kdp_mqueue_send_find_owner(struct waitq * waitq,__assert_only event64_t event,thread_waitinfo_v2_t * waitinfo,struct ipc_service_port_label ** isplp)3300 kdp_mqueue_send_find_owner(
3301 struct waitq *waitq,
3302 __assert_only event64_t event,
3303 thread_waitinfo_v2_t *waitinfo,
3304 struct ipc_service_port_label **isplp)
3305 {
3306 struct turnstile *turnstile;
3307 assert(waitinfo->wait_type == kThreadWaitPortSend);
3308 assert(event == IPC_MQUEUE_FULL);
3309 assert(waitq_type(waitq) == WQT_TURNSTILE);
3310
3311 turnstile = waitq_to_turnstile(waitq);
3312 ipc_port_t port = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */
3313
3314 zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port);
3315
3316 waitinfo->owner = 0;
3317 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
3318 if (ip_mq_lock_held_kdp(port)) {
3319 /*
3320 * someone has the port locked: it may be in an
3321 * inconsistent state: bail
3322 */
3323 waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
3324 return;
3325 }
3326
3327 /* now we are the only one accessing the port */
3328 if (ip_active(port)) {
3329 /*
3330 * In kdp context, port must be left unlocked throughout.
3331 * Therefore can't use union field accessor helpers, manually strip PAC
3332 * and compare raw pointer.
3333 */
3334 void *raw_ptr = ip_get_receiver_ptr_noauth(port);
3335
3336 if (port->ip_tempowner) {
3337 ipc_importance_task_t imp_task = ip_get_imp_task(port);
3338 if (imp_task != IIT_NULL && imp_task->iit_task != NULL) {
3339 /* port is held by a tempowner */
3340 waitinfo->owner = pid_from_task(port->ip_imp_task->iit_task);
3341 } else {
3342 waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
3343 }
3344 } else if (ip_in_a_space(port)) { /* no port lock needed */
3345 if ((ipc_space_t)raw_ptr == ipc_space_kernel) { /* access union field as ip_receiver */
3346 /*
3347 * The kernel pid is 0, make this
3348 * distinguishable from no-owner and
3349 * inconsistent port state.
3350 */
3351 waitinfo->owner = STACKSHOT_WAITOWNER_KERNEL;
3352 } else {
3353 waitinfo->owner = pid_from_task(((ipc_space_t)raw_ptr)->is_task);
3354 }
3355 } else if ((ipc_port_t)raw_ptr != IP_NULL) { /* access union field as ip_destination */
3356 waitinfo->wait_type = kThreadWaitPortSendInTransit;
3357 waitinfo->owner = VM_KERNEL_UNSLIDE_OR_PERM((ipc_port_t)raw_ptr);
3358 }
3359 if (port->ip_service_port && port->ip_splabel != NULL) {
3360 *isplp = (struct ipc_service_port_label *)port->ip_splabel;
3361 }
3362 }
3363 }
3364
3365 /*
3366 * Routine: kdp_mqueue_recv_find_owner
3367 * Purpose:
3368 * Discover the "owner" of the ipc object that contains the input
3369 * waitq object. The thread blocked on the waitq is trying to
3370 * receive on the mqueue.
3371 * Conditions:
3372 * The 'waitinfo->wait_type' value should already be set to
3373 * kThreadWaitPortReceive.
3374 * Note:
3375 * If we find that we are actualy waiting on a port set, we reset
3376 * the wait_type field to reflect this.
3377 */
3378 void
kdp_mqueue_recv_find_owner(struct waitq * waitq,__assert_only event64_t event,thread_waitinfo_v2_t * waitinfo,struct ipc_service_port_label ** isplp)3379 kdp_mqueue_recv_find_owner(
3380 struct waitq *waitq,
3381 __assert_only event64_t event,
3382 thread_waitinfo_v2_t *waitinfo,
3383 struct ipc_service_port_label **isplp)
3384 {
3385 assert(waitinfo->wait_type == kThreadWaitPortReceive);
3386 assert(event == IPC_MQUEUE_RECEIVE);
3387
3388 waitinfo->owner = 0;
3389
3390 if (waitq_type(waitq) == WQT_PORT_SET) {
3391 ipc_pset_t set = ips_from_waitq(waitq);
3392
3393 zone_id_require(ZONE_ID_IPC_PORT_SET, sizeof(struct ipc_pset), set);
3394
3395 /* Reset wait type to specify waiting on port set receive */
3396 waitinfo->wait_type = kThreadWaitPortSetReceive;
3397 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(set);
3398 if (ips_mq_lock_held_kdp(set)) {
3399 waitinfo->owner = STACKSHOT_WAITOWNER_PSET_LOCKED;
3400 }
3401 /* There is no specific owner "at the other end" of a port set, so leave unset. */
3402 } else if (waitq_type(waitq) == WQT_PORT) {
3403 ipc_port_t port = ip_from_waitq(waitq);
3404
3405 zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port);
3406
3407 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
3408 if (ip_mq_lock_held_kdp(port)) {
3409 waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
3410 return;
3411 }
3412
3413 if (ip_active(port)) {
3414 if (ip_in_a_space(port)) { /* no port lock needed */
3415 waitinfo->owner = ip_get_receiver_name(port);
3416 } else {
3417 waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
3418 }
3419 if (port->ip_specialreply) {
3420 waitinfo->wait_flags |= STACKSHOT_WAITINFO_FLAGS_SPECIALREPLY;
3421 }
3422 if (port->ip_splabel != NULL) {
3423 *isplp = (struct ipc_service_port_label *)port->ip_splabel;
3424 }
3425 }
3426 }
3427 }
3428
3429 void
ipc_port_set_label(ipc_port_t port,ipc_label_t label)3430 ipc_port_set_label(
3431 ipc_port_t port,
3432 ipc_label_t label)
3433 {
3434 ipc_kobject_label_t labelp;
3435
3436 assert(!ip_is_kolabeled(port));
3437
3438 labelp = zalloc_flags(ipc_kobject_label_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3439 labelp->ikol_label = label;
3440
3441 port->ip_kolabel = labelp;
3442 io_bits_or(ip_to_object(port), IO_BITS_KOLABEL);
3443 }
3444
3445 kern_return_t
ipc_port_reset_thread_attr(ipc_port_t port)3446 ipc_port_reset_thread_attr(
3447 ipc_port_t port)
3448 {
3449 uint8_t iotier = THROTTLE_LEVEL_END;
3450 uint8_t qos = THREAD_QOS_UNSPECIFIED;
3451
3452 return ipc_port_update_qos_n_iotier(port, qos, iotier);
3453 }
3454
3455 kern_return_t
ipc_port_propagate_thread_attr(ipc_port_t port,struct thread_attr_for_ipc_propagation attr)3456 ipc_port_propagate_thread_attr(
3457 ipc_port_t port,
3458 struct thread_attr_for_ipc_propagation attr)
3459 {
3460 uint8_t iotier = attr.tafip_iotier;
3461 uint8_t qos = attr.tafip_qos;
3462
3463 return ipc_port_update_qos_n_iotier(port, qos, iotier);
3464 }
3465
3466 static kern_return_t
ipc_port_update_qos_n_iotier(ipc_port_t port,uint8_t qos,uint8_t iotier)3467 ipc_port_update_qos_n_iotier(
3468 ipc_port_t port,
3469 uint8_t qos,
3470 uint8_t iotier)
3471 {
3472 if (port == IPC_PORT_NULL) {
3473 return KERN_INVALID_ARGUMENT;
3474 }
3475
3476 ip_mq_lock(port);
3477
3478 if (!ip_active(port)) {
3479 ip_mq_unlock(port);
3480 return KERN_TERMINATED;
3481 }
3482
3483 if (port->ip_specialreply) {
3484 ip_mq_unlock(port);
3485 return KERN_INVALID_ARGUMENT;
3486 }
3487
3488 port->ip_kernel_iotier_override = iotier;
3489 port->ip_kernel_qos_override = qos;
3490
3491 if (ip_in_a_space(port) &&
3492 is_active(ip_get_receiver(port)) &&
3493 ipc_port_has_klist(port)) {
3494 KNOTE(&port->ip_klist, 0);
3495 }
3496
3497 ip_mq_unlock(port);
3498 return KERN_SUCCESS;
3499 }
3500
3501 /* Returns true if a rigid reply port violation should be enforced (by killing the process) */
3502 static bool
__ip_rigid_reply_port_semantics_violation(ipc_port_t reply_port,ipc_policy_violation_id_t * reply_port_semantics_violation)3503 __ip_rigid_reply_port_semantics_violation(
3504 ipc_port_t reply_port,
3505 ipc_policy_violation_id_t *reply_port_semantics_violation)
3506 {
3507 bool hardened_runtime = csproc_hardened_runtime(current_proc());
3508
3509 if (proc_is_simulated(current_proc())
3510 #if CONFIG_ROSETTA
3511 || task_is_translated(current_task())
3512 #endif
3513 #if XNU_TARGET_OS_OSX
3514 || task_opted_out_mach_hardening(current_task())
3515 #endif /* XNU_TARGET_OS_OSX */
3516 ) {
3517 return FALSE;
3518 }
3519
3520 if (task_is_hardened_binary(current_task())) {
3521 return TRUE;
3522 }
3523 if (!ip_is_provisional_reply_port(reply_port)) {
3524 /* record telemetry for when third party fails to use a provisional reply port */
3525 *reply_port_semantics_violation = hardened_runtime ? IPCPV_RIGID_REPLY_PORT_HARDENED_RUNTIME : IPCPV_RIGID_REPLY_PORT_3P;
3526 }
3527 return FALSE;
3528 }
3529
3530 bool
ip_violates_reply_port_semantics(ipc_port_t dest_port,ipc_port_t reply_port,ipc_policy_violation_id_t * reply_port_semantics_violation)3531 ip_violates_reply_port_semantics(
3532 ipc_port_t dest_port,
3533 ipc_port_t reply_port,
3534 ipc_policy_violation_id_t *reply_port_semantics_violation)
3535 {
3536 /*
3537 * dest_port lock must be held to avoid race condition
3538 * when accessing ip_splabel rdar://139066947
3539 */
3540 ip_mq_lock_held(dest_port);
3541
3542 if (ip_require_reply_port_semantics(dest_port)
3543 && !ip_is_reply_port(reply_port)
3544 && !ip_is_provisional_reply_port(reply_port)) {
3545 *reply_port_semantics_violation = IPCPV_REPLY_PORT_SEMANTICS;
3546 return TRUE;
3547 }
3548
3549 if (dest_port->ip_service_port) {
3550 ipc_service_port_label_t label = dest_port->ip_splabel;
3551 if (!ipc_service_port_label_is_bootstrap_port(label)
3552 && !ip_is_reply_port(reply_port)
3553 && !ip_is_provisional_reply_port(reply_port)) {
3554 *reply_port_semantics_violation = IPCPV_REPLY_PORT_SEMANTICS_OPTOUT;
3555 }
3556 }
3557
3558 return FALSE;
3559 }
3560
3561 /* Rigid reply port semantics don't allow for provisional reply ports */
3562 bool
ip_violates_rigid_reply_port_semantics(ipc_port_t dest_port,ipc_port_t reply_port,ipc_policy_violation_id_t * violates_3p)3563 ip_violates_rigid_reply_port_semantics(
3564 ipc_port_t dest_port,
3565 ipc_port_t reply_port,
3566 ipc_policy_violation_id_t *violates_3p)
3567 {
3568 return ip_require_rigid_reply_port_semantics(dest_port)
3569 && !ip_is_reply_port(reply_port)
3570 && __ip_rigid_reply_port_semantics_violation(reply_port, violates_3p);
3571 }
3572
3573 #if MACH_ASSERT
3574 #include <kern/machine.h>
3575
3576 unsigned long port_count = 0;
3577 unsigned long port_count_warning = 20000;
3578 unsigned long port_timestamp = 0;
3579
3580 void db_port_stack_trace(
3581 ipc_port_t port);
3582 void db_ref(
3583 int refs);
3584 int db_port_walk(
3585 unsigned int verbose,
3586 unsigned int display,
3587 unsigned int ref_search,
3588 unsigned int ref_target);
3589
3590 #ifdef MACH_BSD
3591 extern int proc_pid(struct proc*);
3592 #endif /* MACH_BSD */
3593
3594 /*
3595 * Initialize all of the debugging state in a port.
3596 * Insert the port into a global list of all allocated ports.
3597 */
3598 void
ipc_port_init_debug(ipc_port_t port,void * fp)3599 ipc_port_init_debug(ipc_port_t port, void *fp)
3600 {
3601 port->ip_timetrack = port_timestamp++;
3602
3603 if (ipc_portbt) {
3604 port->ip_made_bt = btref_get(fp, 0);
3605 }
3606
3607 #ifdef MACH_BSD
3608 task_t task = current_task_early();
3609 if (task != TASK_NULL) {
3610 struct proc *proc = get_bsdtask_info(task);
3611 if (proc) {
3612 port->ip_made_pid = proc_pid(proc);
3613 }
3614 }
3615 #endif /* MACH_BSD */
3616 }
3617
3618 #endif /* MACH_ASSERT */
3619