1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64 /*
65 * File: ipc/ipc_port.c
66 * Author: Rich Draves
67 * Date: 1989
68 *
69 * Functions to manipulate IPC ports.
70 */
71
72 #include <mach/boolean.h>
73 #include <mach_assert.h>
74
75 #include <mach/port.h>
76 #include <mach/kern_return.h>
77 #include <kern/backtrace.h>
78 #include <kern/debug.h>
79 #include <kern/ipc_kobject.h>
80 #include <kern/kcdata.h>
81 #include <kern/misc_protos.h>
82 #include <kern/policy_internal.h>
83 #include <kern/thread.h>
84 #include <kern/waitq.h>
85 #include <kern/host_notify.h>
86 #include <ipc/ipc_entry.h>
87 #include <ipc/ipc_space.h>
88 #include <ipc/ipc_object.h>
89 #include <ipc/ipc_right.h>
90 #include <ipc/ipc_policy.h>
91 #include <ipc/ipc_pset.h>
92 #include <ipc/ipc_kmsg.h>
93 #include <ipc/ipc_mqueue.h>
94 #include <ipc/ipc_notify.h>
95 #include <ipc/ipc_importance.h>
96 #include <ipc/ipc_policy.h>
97 #include <machine/limits.h>
98 #include <kern/turnstile.h>
99 #include <kern/machine.h>
100
101 #include <security/mac_mach_internal.h>
102 #include <ipc/ipc_service_port.h>
103
104 #include <string.h>
105
106 extern bool proc_is_simulated(struct proc *);
107 extern struct proc *current_proc(void);
108 extern int csproc_hardened_runtime(struct proc* p);
109
110 static TUNABLE(bool, prioritize_launch, "prioritize_launch", true);
111 TUNABLE_WRITEABLE(int, ipc_portbt, "ipc_portbt", false);
112
113 LCK_SPIN_DECLARE_ATTR(ipc_port_multiple_lock_data, &ipc_lck_grp, &ipc_lck_attr);
114 static ipc_port_timestamp_t ipc_port_timestamp_data;
115
116 KALLOC_ARRAY_TYPE_DEFINE(ipc_port_request_table,
117 struct ipc_port_request, KT_DEFAULT);
118
119 #if MACH_ASSERT
120 static void ipc_port_init_debug(ipc_port_t, void *fp);
121 #endif /* MACH_ASSERT */
122
123 void __abortlike
__ipc_port_inactive_panic(ipc_port_t port)124 __ipc_port_inactive_panic(ipc_port_t port)
125 {
126 panic("Using inactive port %p", port);
127 }
128
129 static __abortlike void
__ipc_port_translate_receive_panic(ipc_space_t space,ipc_port_t port)130 __ipc_port_translate_receive_panic(ipc_space_t space, ipc_port_t port)
131 {
132 panic("found receive right in space %p for port %p owned by space %p",
133 space, port, ip_get_receiver(port));
134 }
135
136 __abortlike void
__ipc_right_delta_overflow_panic(ipc_port_t port,natural_t * field,int delta)137 __ipc_right_delta_overflow_panic(ipc_port_t port, natural_t *field, int delta)
138 {
139 const char *what;
140 if (field == &port->ip_srights) {
141 what = "send right";
142 } else {
143 what = "send-once right";
144 }
145 panic("port %p %s count overflow (delta: %d)", port, what, delta);
146 }
147
148 static void
149 ipc_port_send_turnstile_recompute_push_locked(
150 ipc_port_t port);
151
152 static thread_t
153 ipc_port_get_watchport_inheritor(
154 ipc_port_t port);
155
156 static kern_return_t
157 ipc_port_update_qos_n_iotier(
158 ipc_port_t port,
159 uint8_t qos,
160 uint8_t iotier);
161
162 void
ipc_port_lock(ipc_port_t port)163 ipc_port_lock(ipc_port_t port)
164 {
165 ip_validate(port);
166 waitq_lock(&port->ip_waitq);
167 }
168
169 ipc_object_label_t
ipc_port_lock_label_get(ipc_port_t port)170 ipc_port_lock_label_get(ipc_port_t port)
171 {
172 ip_validate(port);
173 waitq_lock(&port->ip_waitq);
174 return ip_label_get(port);
175 }
176
177 ipc_object_label_t
ipc_port_lock_check_aligned(ipc_port_t port)178 ipc_port_lock_check_aligned(ipc_port_t port)
179 {
180 zone_id_require_aligned(ZONE_ID_IPC_PORT, port);
181 waitq_lock(&port->ip_waitq);
182 return ip_label_get(port);
183 }
184
185 bool
ipc_port_lock_try(ipc_port_t port)186 ipc_port_lock_try(ipc_port_t port)
187 {
188 ip_validate(port);
189 return waitq_lock_try(&port->ip_waitq);
190 }
191
192 void
ipc_port_release(ipc_port_t port)193 ipc_port_release(ipc_port_t port)
194 {
195 ip_release(port);
196 }
197
198 void
ipc_port_reference(ipc_port_t port)199 ipc_port_reference(ipc_port_t port)
200 {
201 ip_validate(port);
202 ip_reference(port);
203 }
204
205 /*
206 * Routine: ipc_port_timestamp
207 * Purpose:
208 * Retrieve a timestamp value.
209 */
210
211 ipc_port_timestamp_t
ipc_port_timestamp(void)212 ipc_port_timestamp(void)
213 {
214 return os_atomic_inc_orig(&ipc_port_timestamp_data, relaxed);
215 }
216
217
218 /*
219 * Routine: ipc_port_translate_send
220 * Purpose:
221 * Look up a send right in a space.
222 * Conditions:
223 * Nothing locked before. If successful, the object
224 * is returned active and locked. The caller doesn't get a ref.
225 * Returns:
226 * KERN_SUCCESS Object returned locked.
227 * KERN_INVALID_TASK The space is dead.
228 * KERN_INVALID_NAME The name doesn't denote a right
229 * KERN_INVALID_RIGHT Name doesn't denote the correct right
230 */
231 kern_return_t
ipc_port_translate_send(ipc_space_t space,mach_port_name_t name,ipc_port_t * portp)232 ipc_port_translate_send(
233 ipc_space_t space,
234 mach_port_name_t name,
235 ipc_port_t *portp)
236 {
237 ipc_port_t port = IP_NULL;
238 ipc_object_t object;
239 kern_return_t kr;
240
241 kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_SEND, &object);
242 if (kr == KERN_SUCCESS) {
243 port = ip_object_to_port(object);
244 }
245 *portp = port;
246 return kr;
247 }
248
249
250 /*
251 * Routine: ipc_port_translate_receive
252 * Purpose:
253 * Look up a receive right in a space.
254 * Performs some minimal security checks against tampering.
255 * Conditions:
256 * Nothing locked before. If successful, the object
257 * is returned active and locked. The caller doesn't get a ref.
258 * Returns:
259 * KERN_SUCCESS Object returned locked.
260 * KERN_INVALID_TASK The space is dead.
261 * KERN_INVALID_NAME The name doesn't denote a right
262 * KERN_INVALID_RIGHT Name doesn't denote the correct right
263 */
264 kern_return_t
ipc_port_translate_receive(ipc_space_t space,mach_port_name_t name,ipc_port_t * portp)265 ipc_port_translate_receive(
266 ipc_space_t space,
267 mach_port_name_t name,
268 ipc_port_t *portp)
269 {
270 ipc_port_t port = IP_NULL;
271 ipc_object_t object;
272 kern_return_t kr;
273
274 kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_RECEIVE, &object);
275 if (kr == KERN_SUCCESS) {
276 /* object is locked */
277 port = ip_object_to_port(object);
278 if (!ip_in_space(port, space)) {
279 __ipc_port_translate_receive_panic(space, port);
280 }
281 }
282 *portp = port;
283 return kr;
284 }
285
286
287 /*
288 * Routine: ipc_port_request_alloc
289 * Purpose:
290 * Try to allocate a request slot.
291 * If successful, returns the request index.
292 * Otherwise returns zero.
293 * Conditions:
294 * The port is locked and active.
295 * Returns:
296 * KERN_SUCCESS A request index was found.
297 * KERN_NO_SPACE No index allocated.
298 */
299
300 kern_return_t
ipc_port_request_alloc(ipc_port_t port,mach_port_name_t name,ipc_port_t soright,ipc_port_request_opts_t options,ipc_port_request_index_t * indexp)301 ipc_port_request_alloc(
302 ipc_port_t port,
303 mach_port_name_t name,
304 ipc_port_t soright,
305 ipc_port_request_opts_t options,
306 ipc_port_request_index_t *indexp)
307 {
308 ipc_port_request_table_t table;
309 ipc_port_request_index_t index;
310 ipc_port_request_t ipr, base;
311
312 require_ip_active(port);
313 assert(name != MACH_PORT_NULL);
314 assert(soright != IP_NULL);
315
316 table = port->ip_requests;
317 if (table == NULL) {
318 return KERN_NO_SPACE;
319 }
320
321 base = ipc_port_request_table_base(table);
322 index = base->ipr_next;
323 if (index == 0) {
324 return KERN_NO_SPACE;
325 }
326
327 ipr = ipc_port_request_table_get(table, index);
328 assert(ipr->ipr_soright == IP_NULL);
329
330 base->ipr_next = ipr->ipr_next;
331 ipr->ipr_name = name;
332 ipr->ipr_soright = IPR_SOR_MAKE(soright, options);
333
334 if (options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK) &&
335 port->ip_sprequests == 0) {
336 port->ip_sprequests = 1;
337 }
338
339 *indexp = index;
340
341 return KERN_SUCCESS;
342 }
343
344
345 /*
346 * Routine: ipc_port_request_hnotify_alloc
347 * Purpose:
348 * Try to allocate a request slot.
349 * If successful, returns the request index.
350 * Otherwise returns zero.
351 * Conditions:
352 * The port is locked and active.
353 * Returns:
354 * KERN_SUCCESS A request index was found.
355 * KERN_NO_SPACE No index allocated.
356 * KERN_INVALID_CAPABILITY A host notify registration already
357 * existed
358 */
359
360 kern_return_t
ipc_port_request_hnotify_alloc(ipc_port_t port,struct host_notify_entry * hnotify,ipc_port_request_index_t * indexp)361 ipc_port_request_hnotify_alloc(
362 ipc_port_t port,
363 struct host_notify_entry *hnotify,
364 ipc_port_request_index_t *indexp)
365 {
366 ipc_port_request_table_t table;
367 ipc_port_request_index_t index;
368 ipc_port_request_t ipr, base;
369
370 require_ip_active(port);
371
372 table = port->ip_requests;
373 if (table == NULL) {
374 return KERN_NO_SPACE;
375 }
376
377 base = ipc_port_request_table_base(table);
378 if (base->ipr_hn_slot) {
379 return KERN_INVALID_CAPABILITY;
380 }
381 index = base->ipr_next;
382 if (index == 0) {
383 return KERN_NO_SPACE;
384 }
385
386 ipr = ipc_port_request_table_get(table, index);
387 assert(ipr->ipr_soright == IP_NULL);
388
389 base->ipr_hn_slot = ipr;
390 base->ipr_next = ipr->ipr_next;
391 ipr->ipr_hnotify = hnotify;
392 ipr->ipr_name = IPR_HOST_NOTIFY;
393
394 *indexp = index;
395
396 return KERN_SUCCESS;
397 }
398
399 /*
400 * Routine: ipc_port_request_grow
401 * Purpose:
402 * Grow a port's table of requests.
403 * Conditions:
404 * The port must be locked and active.
405 * Nothing else locked; will allocate memory.
406 * Upon return the port is unlocked.
407 * Returns:
408 * KERN_SUCCESS Grew the table.
409 * KERN_SUCCESS Somebody else grew the table.
410 * KERN_SUCCESS The port died.
411 * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
412 * KERN_NO_SPACE Couldn't grow to desired size
413 */
414
415 kern_return_t
ipc_port_request_grow(ipc_port_t port)416 ipc_port_request_grow(
417 ipc_port_t port)
418 {
419 ipc_port_request_table_t otable, ntable;
420 uint32_t osize, nsize;
421 uint32_t ocount, ncount;
422
423 require_ip_active(port);
424
425 otable = port->ip_requests;
426 if (otable) {
427 osize = ipc_port_request_table_size(otable);
428 } else {
429 osize = 0;
430 }
431 nsize = ipc_port_request_table_next_size(2, osize, 16);
432 if (nsize > CONFIG_IPC_TABLE_REQUEST_SIZE_MAX) {
433 nsize = CONFIG_IPC_TABLE_REQUEST_SIZE_MAX;
434 }
435 if (nsize == osize) {
436 return KERN_RESOURCE_SHORTAGE;
437 }
438
439 ip_reference(port);
440 ip_mq_unlock(port);
441
442 ntable = ipc_port_request_table_alloc_by_size(nsize, Z_WAITOK | Z_ZERO);
443 if (ntable == NULL) {
444 ip_release(port);
445 return KERN_RESOURCE_SHORTAGE;
446 }
447
448 ip_mq_lock(port);
449
450 /*
451 * Check that port is still active and that nobody else
452 * has slipped in and grown the table on us. Note that
453 * just checking if the current table pointer == otable
454 * isn't sufficient; must check ipr_size.
455 */
456
457 ocount = ipc_port_request_table_size_to_count(osize);
458 ncount = ipc_port_request_table_size_to_count(nsize);
459
460 if (ip_active(port) && port->ip_requests == otable) {
461 ipc_port_request_index_t free, i;
462
463 /* copy old table to new table */
464
465 if (otable != NULL) {
466 ipc_port_request_t obase, nbase, ohn, nhn;
467
468 obase = ipc_port_request_table_base(otable);
469 nbase = ipc_port_request_table_base(ntable);
470 memcpy(nbase, obase, osize);
471
472 /*
473 * if there is a host-notify registration,
474 * fixup dPAC for the registration's ipr_hnotify field,
475 * and the ipr_hn_slot sentinel.
476 */
477 ohn = obase->ipr_hn_slot;
478 if (ohn) {
479 nhn = nbase + (ohn - obase);
480 nhn->ipr_hnotify = ohn->ipr_hnotify;
481 nbase->ipr_hn_slot = nhn;
482 }
483 } else {
484 ocount = 1;
485 free = 0;
486 }
487
488 /* add new elements to the new table's free list */
489
490 for (i = ocount; i < ncount; i++) {
491 ipc_port_request_table_get_nocheck(ntable, i)->ipr_next = free;
492 free = i;
493 }
494
495 ipc_port_request_table_base(ntable)->ipr_next = free;
496 port->ip_requests = ntable;
497 ip_mq_unlock(port);
498 ip_release(port);
499
500 if (otable != NULL) {
501 ipc_port_request_table_free(&otable);
502 }
503 } else {
504 ip_mq_unlock(port);
505 ip_release(port);
506 ipc_port_request_table_free(&ntable);
507 }
508
509 return KERN_SUCCESS;
510 }
511
512 /*
513 * Routine: ipc_port_request_sparm
514 * Purpose:
515 * Arm delayed send-possible request.
516 * Conditions:
517 * The port must be locked and active.
518 *
519 * Returns TRUE if the request was armed with importance.
520 */
521
522 bool
ipc_port_request_sparm(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index,mach_msg_option64_t option,mach_msg_priority_t priority)523 ipc_port_request_sparm(
524 ipc_port_t port,
525 __assert_only mach_port_name_t name,
526 ipc_port_request_index_t index,
527 mach_msg_option64_t option,
528 mach_msg_priority_t priority)
529 {
530 if (index != IE_REQ_NONE) {
531 ipc_port_request_table_t table;
532 ipc_port_request_t ipr;
533
534 require_ip_active(port);
535
536 table = port->ip_requests;
537 assert(table != NULL);
538
539 ipr = ipc_port_request_table_get(table, index);
540 assert(ipr->ipr_name == name);
541
542 /* Is there a valid destination? */
543 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
544 ipr->ipr_soright = IPR_SOR_MAKE(ipr->ipr_soright, IPR_SOR_SPARM_MASK);
545 port->ip_sprequests = 1;
546
547 if (option & MACH_SEND_OVERRIDE) {
548 /* apply override to message queue */
549 mach_msg_qos_t qos_ovr;
550 if (mach_msg_priority_is_pthread_priority(priority)) {
551 qos_ovr = _pthread_priority_thread_qos(priority);
552 } else {
553 qos_ovr = mach_msg_priority_overide_qos(priority);
554 }
555 if (qos_ovr) {
556 ipc_mqueue_override_send_locked(&port->ip_messages, qos_ovr);
557 }
558 }
559
560 #if IMPORTANCE_INHERITANCE
561 if (((option & MACH_SEND_NOIMPORTANCE) == 0) &&
562 (port->ip_impdonation != 0) &&
563 (port->ip_spimportant == 0) &&
564 (((option & MACH_SEND_IMPORTANCE) != 0) ||
565 (task_is_importance_donor(current_task())))) {
566 return true;
567 }
568 #endif /* IMPORTANCE_INHERITANCE */
569 }
570 }
571 return false;
572 }
573
574 /*
575 * Routine: ipc_port_request_type
576 * Purpose:
577 * Determine the type(s) of port requests enabled for a name.
578 * Conditions:
579 * The port must be locked or inactive (to avoid table growth).
580 * The index must not be IE_REQ_NONE and for the name in question.
581 */
582 mach_port_type_t
ipc_port_request_type(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index)583 ipc_port_request_type(
584 ipc_port_t port,
585 __assert_only mach_port_name_t name,
586 ipc_port_request_index_t index)
587 {
588 ipc_port_request_table_t table;
589 ipc_port_request_t ipr;
590 mach_port_type_t type = 0;
591
592 table = port->ip_requests;
593 assert(table != NULL);
594
595 assert(index != IE_REQ_NONE);
596 ipr = ipc_port_request_table_get(table, index);
597 assert(ipr->ipr_name == name);
598
599 if (IP_VALID(IPR_SOR_PORT(ipr->ipr_soright))) {
600 type |= MACH_PORT_TYPE_DNREQUEST;
601
602 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
603 type |= MACH_PORT_TYPE_SPREQUEST;
604
605 if (!IPR_SOR_SPARMED(ipr->ipr_soright)) {
606 type |= MACH_PORT_TYPE_SPREQUEST_DELAYED;
607 }
608 }
609 }
610 return type;
611 }
612
613 /*
614 * Routine: ipc_port_request_cancel
615 * Purpose:
616 * Cancel a dead-name/send-possible request and return the send-once right.
617 * Conditions:
618 * The port must be locked and active.
619 * The index must not be IPR_REQ_NONE and must correspond with name.
620 */
621
622 ipc_port_t
ipc_port_request_cancel(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index)623 ipc_port_request_cancel(
624 ipc_port_t port,
625 __assert_only mach_port_name_t name,
626 ipc_port_request_index_t index)
627 {
628 ipc_port_request_table_t table;
629 ipc_port_request_t base, ipr;
630 ipc_port_t request = IP_NULL;
631
632 require_ip_active(port);
633 table = port->ip_requests;
634 base = ipc_port_request_table_base(table);
635 assert(table != NULL);
636
637 assert(index != IE_REQ_NONE);
638 ipr = ipc_port_request_table_get(table, index);
639 assert(ipr->ipr_name == name);
640 request = IPR_SOR_PORT(ipr->ipr_soright);
641
642 /* return ipr to the free list inside the table */
643 ipr->ipr_next = base->ipr_next;
644 ipr->ipr_soright = IP_NULL;
645 if (base->ipr_hn_slot == ipr) {
646 base->ipr_hn_slot = NULL;
647 }
648 base->ipr_next = index;
649
650 return request;
651 }
652
653
654 /*
655 * Routine: ipc_port_prepare_move
656 * Purpose:
657 * Prepares a receive right for transmission/destruction,
658 *
659 * Conditions:
660 * The port is locked and active.
661 */
662 __attribute__((always_inline))
663 static void
ipc_port_prepare_move(ipc_port_t port,ipc_object_label_t * label,waitq_link_list_t * free_l)664 ipc_port_prepare_move(
665 ipc_port_t port,
666 ipc_object_label_t *label,
667 waitq_link_list_t *free_l)
668 {
669 /*
670 * Pull ourselves out of any sets to which we belong.
671 * We hold the write space lock or the receive entry has
672 * been deleted, so even though this acquires and releases
673 * the port lock, we know we won't be added to any other sets.
674 */
675 if (ip_in_pset(port)) {
676 waitq_unlink_all_locked(&port->ip_waitq, NULL, free_l);
677 assert(!ip_in_pset(port));
678 }
679
680 /*
681 * Send anyone waiting on the port's queue directly away.
682 * Also clear the mscount, seqno, guard bits
683 */
684 if (io_state_in_space(label->io_state)) {
685 ipc_mqueue_changed(ip_get_receiver(port), &port->ip_waitq);
686 } else {
687 ipc_mqueue_changed(NULL, &port->ip_waitq);
688 }
689
690 port->ip_mscount = 0;
691 port->ip_messages.imq_seqno = 0;
692 port->ip_context = port->ip_guarded = port->ip_strict_guard = 0;
693 }
694
695 __attribute__((always_inline))
696 ipc_port_t
ipc_port_mark_in_space(ipc_port_t port,ipc_object_label_t * label,ipc_space_t space,mach_port_name_t name,ipc_object_state_t force_state)697 ipc_port_mark_in_space(
698 ipc_port_t port,
699 ipc_object_label_t *label,
700 ipc_space_t space,
701 mach_port_name_t name,
702 ipc_object_state_t force_state)
703 {
704 ipc_move_policy_t pol = ipc_policy(label)->pol_movability;
705 ipc_port_t dest;
706
707 /*
708 * Unfortunately, IO_STATE_IN_LIMBO has to be allowed because
709 * of _kernelrpc_mach_port_insert_right_trap(MACH_MSG_TYPE_MOVE_RECEIVE)
710 * which will copyin a naked receive right and copy it back out,
711 * without it ever being in a message.
712 */
713 ipc_release_assert(pol != IPC_MOVE_POLICY_NEVER &&
714 (io_state_in_transit(label->io_state) ||
715 label->io_state == IO_STATE_IN_LIMBO));
716
717 dest = port->ip_destination;
718 port->ip_receiver_name = name;
719 port->ip_receiver = space;
720
721 if (io_state_in_space(force_state)) {
722 label->io_state = force_state;
723 } else if (pol == IPC_MOVE_POLICY_ONCE) {
724 label->io_state = IO_STATE_IN_SPACE_IMMOVABLE;
725 } else if (pol == IPC_MOVE_POLICY_ONCE_OR_AFTER_PD &&
726 label->io_state != IO_STATE_IN_TRANSIT_PD) {
727 label->io_state = IO_STATE_IN_SPACE_IMMOVABLE;
728 } else {
729 label->io_state = IO_STATE_IN_SPACE;
730 }
731
732 io_label_set_and_put(&port->ip_object, label);
733
734 return dest;
735 }
736
737 __attribute__((always_inline))
738 void
ipc_port_mark_in_limbo(ipc_port_t port,ipc_object_label_t * label,waitq_link_list_t * free_l)739 ipc_port_mark_in_limbo(
740 ipc_port_t port,
741 ipc_object_label_t *label,
742 waitq_link_list_t *free_l)
743 {
744 ipc_release_assert(io_state_in_space(label->io_state));
745
746 ipc_port_prepare_move(port, label, free_l);
747
748 port->ip_receiver_name = MACH_PORT_NULL;
749 port->ip_receiver = IS_NULL;
750
751 label->io_state = IO_STATE_IN_LIMBO;
752 io_label_set_and_put(&port->ip_object, label);
753 }
754
755 __attribute__((always_inline))
756 static void
ipc_port_mark_in_limbo_pd(ipc_port_t port,ipc_object_label_t * label,waitq_link_list_t * free_l)757 ipc_port_mark_in_limbo_pd(
758 ipc_port_t port,
759 ipc_object_label_t *label,
760 waitq_link_list_t *free_l)
761 {
762 ipc_release_assert(ipc_policy(label)->pol_movability != IPC_MOVE_POLICY_NEVER &&
763 (io_state_in_space(label->io_state) ||
764 label->io_state == IO_STATE_IN_LIMBO ||
765 label->io_state == IO_STATE_IN_TRANSIT));
766
767 ipc_port_prepare_move(port, label, free_l);
768
769 port->ip_receiver_name = MACH_PORT_NULL;
770 port->ip_receiver = IS_NULL;
771
772 label->io_state = IO_STATE_IN_LIMBO_PD;
773 io_label_set_and_put(&port->ip_object, label);
774 }
775
776 void
ipc_port_mark_in_transit(ipc_port_t port,ipc_port_t dest)777 ipc_port_mark_in_transit(ipc_port_t port, ipc_port_t dest)
778 {
779 ipc_object_label_t label = ip_label_get(port);
780
781 ipc_release_assert(io_state_in_limbo(label.io_state));
782
783 ip_reference(dest);
784 port->ip_receiver_name = MACH_PORT_NULL;
785 port->ip_destination = dest;
786
787 if (label.io_state == IO_STATE_IN_LIMBO) {
788 label.io_state = IO_STATE_IN_TRANSIT;
789 } else {
790 assert(label.io_state == IO_STATE_IN_LIMBO_PD);
791 label.io_state = IO_STATE_IN_TRANSIT_PD;
792 }
793
794 io_label_set_and_put(&port->ip_object, &label);
795 }
796
797 __attribute__((always_inline))
798 static bool
ipc_port_mark_inactive(ipc_port_t port,ipc_object_label_t * label,waitq_link_list_t * free_l)799 ipc_port_mark_inactive(
800 ipc_port_t port,
801 ipc_object_label_t *label,
802 waitq_link_list_t *free_l)
803 {
804 ipc_release_assert(io_state_active(label->io_state));
805
806 ipc_port_prepare_move(port, label, free_l);
807
808 port->ip_receiver_name = MACH_PORT_NULL;
809 port->ip_receiver = IS_NULL;
810 port->ip_timestamp = ipc_port_timestamp();
811
812 /*
813 * It's important for this to be done under the same lock hold
814 * as the ipc_mqueue_changed call that ipc_port_prepare_move()
815 * did to avoid additional threads blocking on an mqueue that's
816 * being destroyed.
817 */
818 label->io_state = IO_STATE_INACTIVE;
819 label->iol_pointer = NULL; /* the caller will free it */
820 io_label_set_and_put(&port->ip_object, label);
821
822 return ipc_mqueue_destroy_locked(&port->ip_messages, free_l);
823 }
824
825 /*
826 * Routine: ipc_port_init
827 * Purpose:
828 * Initializes a newly-allocated port.
829 *
830 * The memory is expected to be zero initialized (allocated with Z_ZERO).
831 */
832 static void
ipc_port_init(ipc_port_t port,ipc_space_t space,ipc_object_label_t label,ipc_port_init_flags_t flags,mach_port_name_t name)833 ipc_port_init(
834 ipc_port_t port,
835 ipc_space_t space,
836 ipc_object_label_t label,
837 ipc_port_init_flags_t flags,
838 mach_port_name_t name)
839 {
840 /* the port has been 0 initialized when called */
841
842 assert(label.io_type != IOT_PORT_SET && label.io_type < IOT_UNKNOWN);
843
844 /* must be done first, many ip_* bits live inside the waitq */
845 os_ref_init_raw(&port->ip_object.io_references, NULL);
846 waitq_init(&port->ip_waitq, WQT_PORT, SYNC_POLICY_INIT_LOCKED);
847
848
849 /* ensure default policies are enforced */
850
851 if (ipc_policy(label)->pol_movability == IPC_MOVE_POLICY_NEVER) {
852 label.io_state = IO_STATE_IN_SPACE_IMMOVABLE;
853 }
854
855 /* initialize the other fields */
856
857 port->ip_kernel_qos_override = THREAD_QOS_UNSPECIFIED;
858 port->ip_kernel_iotier_override = THROTTLE_LEVEL_END;
859
860 ipc_mqueue_init(&port->ip_messages);
861 #if MACH_ASSERT
862 ipc_port_init_debug(port, __builtin_frame_address(0));
863 #endif /* MACH_ASSERT */
864
865 /* ports are born "in-space" */
866 port->ip_receiver_name = name;
867 port->ip_receiver = space;
868
869
870 assert(io_state_in_space(label.io_state));
871 io_label_init(&port->ip_object, label);
872
873 if (flags & IP_INIT_MAKE_SEND_RIGHT) {
874 port->ip_srights = 1;
875 port->ip_mscount = 1;
876 }
877 }
878
879 /*
880 * Routine: ipc_port_alloc
881 * Purpose:
882 * Allocate a port.
883 * Conditions:
884 * Nothing locked. If successful, the port is returned
885 * locked. (The caller doesn't have a reference.)
886 * On failure, port and label will be freed.
887 * Returns:
888 * KERN_SUCCESS The port is allocated.
889 * KERN_INVALID_TASK The space is dead.
890 * KERN_NO_SPACE No room for an entry in the space.
891 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
892 */
893
894 kern_return_t
ipc_port_alloc(ipc_space_t space,ipc_object_label_t label,ipc_port_init_flags_t flags,mach_port_name_t * namep,ipc_port_t * portp)895 ipc_port_alloc(
896 ipc_space_t space,
897 ipc_object_label_t label,
898 ipc_port_init_flags_t flags,
899 mach_port_name_t *namep,
900 ipc_port_t *portp)
901 {
902 mach_port_name_t name;
903 kern_return_t kr;
904 mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
905 mach_port_urefs_t urefs = 0;
906 ipc_entry_t entry;
907 ipc_port_t port;
908 ipc_object_t object;
909
910 if (flags & IP_INIT_MAKE_SEND_RIGHT) {
911 type |= MACH_PORT_TYPE_SEND;
912 urefs = 1;
913 }
914
915 port = ip_alloc();
916 object = ip_to_object(port);
917 kr = ipc_object_alloc_entry(space, object, &name, &entry);
918 if (kr != KERN_SUCCESS) {
919 ipc_port_label_free(label);
920 ip_free(port);
921 return kr;
922 }
923
924 /* space is locked */
925 ipc_port_init(port, space, label, flags, name);
926 /* port is locked */
927 ipc_entry_init(space, object, type, entry, urefs, name);
928
929 is_write_unlock(space);
930
931 *namep = name;
932 *portp = port;
933
934 return KERN_SUCCESS;
935 }
936
937 /*
938 * Routine: ipc_port_alloc_name
939 * Purpose:
940 * Allocate a port, with a specific name.
941 * Conditions:
942 * Nothing locked. If successful, the port is returned
943 * locked. (The caller doesn't have a reference.)
944 * Returns:
945 * KERN_SUCCESS The port is allocated.
946 * KERN_INVALID_TASK The space is dead.
947 * KERN_NAME_EXISTS The name already denotes a right.
948 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
949 */
950
951 kern_return_t
ipc_port_alloc_name(ipc_space_t space,ipc_object_label_t label,ipc_port_init_flags_t flags,mach_port_name_t name,ipc_port_t * portp)952 ipc_port_alloc_name(
953 ipc_space_t space,
954 ipc_object_label_t label,
955 ipc_port_init_flags_t flags,
956 mach_port_name_t name,
957 ipc_port_t *portp)
958 {
959 kern_return_t kr;
960 ipc_entry_t entry;
961 mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
962 mach_port_urefs_t urefs = 0;
963 ipc_port_t port;
964 ipc_object_t object;
965
966 if (flags & IP_INIT_MAKE_SEND_RIGHT) {
967 type |= MACH_PORT_TYPE_SEND;
968 urefs = 1;
969 }
970
971 port = ip_alloc();
972 object = ip_to_object(port);
973 kr = ipc_object_alloc_entry_with_name(space, name, &entry);
974 if (kr != KERN_SUCCESS) {
975 ipc_port_label_free(label);
976 ip_free(port);
977 return kr;
978 }
979
980 /* space is locked */
981 ipc_port_init(port, space, label, flags, name);
982 /* port is locked */
983 ipc_entry_init(space, object, type, entry, urefs, name);
984
985 is_write_unlock(space);
986
987 *portp = port;
988 return kr;
989 }
990
991 /*
992 * Routine: ipc_port_spnotify
993 * Purpose:
994 * Generate send-possible port notifications.
995 * Conditions:
996 * Nothing locked, reference held on port.
997 */
998 void
ipc_port_spnotify(ipc_port_t port)999 ipc_port_spnotify(
1000 ipc_port_t port)
1001 {
1002 ipc_port_request_index_t index = 0;
1003 ipc_table_elems_t size = 0;
1004
1005 /*
1006 * If the port has no send-possible request
1007 * armed, don't bother to lock the port.
1008 */
1009 if (port->ip_sprequests == 0) {
1010 return;
1011 }
1012
1013 ip_mq_lock(port);
1014
1015 #if IMPORTANCE_INHERITANCE
1016 if (port->ip_spimportant != 0) {
1017 port->ip_spimportant = 0;
1018 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == TRUE) {
1019 ip_mq_lock(port);
1020 }
1021 }
1022 #endif /* IMPORTANCE_INHERITANCE */
1023
1024 if (port->ip_sprequests == 0) {
1025 ip_mq_unlock(port);
1026 return;
1027 }
1028 port->ip_sprequests = 0;
1029
1030 revalidate:
1031 if (ip_active(port)) {
1032 ipc_port_request_table_t requests;
1033
1034 /* table may change each time port unlocked (reload) */
1035 requests = port->ip_requests;
1036 assert(requests != NULL);
1037
1038 /*
1039 * no need to go beyond table size when first
1040 * we entered - those are future notifications.
1041 */
1042 if (size == 0) {
1043 size = ipc_port_request_table_count(requests);
1044 }
1045
1046 /* no need to backtrack either */
1047 while (++index < size) {
1048 ipc_port_request_t ipr = ipc_port_request_table_get_nocheck(requests, index);
1049 mach_port_name_t name = ipr->ipr_name;
1050 ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
1051 boolean_t armed = IPR_SOR_SPARMED(ipr->ipr_soright);
1052
1053 if (MACH_PORT_VALID(name) && armed && IP_VALID(soright)) {
1054 /* claim send-once right - slot still inuse */
1055 assert(name != IPR_HOST_NOTIFY);
1056 ipr->ipr_soright = IP_NULL;
1057 ip_mq_unlock(port);
1058
1059 ipc_notify_send_possible(soright, name);
1060
1061 ip_mq_lock(port);
1062 goto revalidate;
1063 }
1064 }
1065 }
1066 ip_mq_unlock(port);
1067 return;
1068 }
1069
1070 /*
1071 * Routine: ipc_port_dnnotify
1072 * Purpose:
1073 * Generate dead name notifications for
1074 * all outstanding dead-name and send-
1075 * possible requests.
1076 * Conditions:
1077 * Nothing locked.
1078 * Port must be inactive.
1079 * Reference held on port.
1080 */
1081 void
ipc_port_dnnotify(ipc_port_t port)1082 ipc_port_dnnotify(
1083 ipc_port_t port)
1084 {
1085 ipc_port_request_table_t requests = port->ip_requests;
1086
1087 assert(!ip_active(port));
1088 if (requests != NULL) {
1089 ipc_port_request_t ipr, base;
1090
1091 base = ipr = ipc_port_request_table_base(requests);
1092
1093 while ((ipr = ipc_port_request_table_next_elem(requests, ipr))) {
1094 mach_port_name_t name = ipr->ipr_name;
1095 ipc_port_t soright;
1096
1097 switch (name) {
1098 case MACH_PORT_DEAD:
1099 case MACH_PORT_NULL:
1100 break;
1101 case IPR_HOST_NOTIFY:
1102 assert(base->ipr_hn_slot == ipr);
1103 host_notify_cancel(ipr->ipr_hnotify);
1104 break;
1105 default:
1106 soright = IPR_SOR_PORT(ipr->ipr_soright);
1107 if (IP_VALID(soright)) {
1108 ipc_notify_dead_name(soright, name);
1109 }
1110 break;
1111 }
1112 }
1113 }
1114 }
1115
1116 /*
1117 * Routine: ipc_port_destroy
1118 * Purpose:
1119 * Destroys a port. Cleans up queued messages.
1120 *
1121 * If the port has a backup, it doesn't get destroyed,
1122 * but is sent in a port-destroyed notification to the backup.
1123 * Conditions:
1124 * The port is locked and alive; nothing else locked.
1125 * The caller has a reference, which is consumed.
1126 * Afterwards, the port is unlocked and dead.
1127 */
1128
1129 void
ipc_port_destroy(ipc_port_t port)1130 ipc_port_destroy(ipc_port_t port)
1131 {
1132 ipc_port_t pdrequest = IP_NULL;
1133 struct task_watchport_elem *twe = NULL;
1134 waitq_link_list_t free_l = { };
1135
1136 #if IMPORTANCE_INHERITANCE
1137 ipc_importance_task_t release_imp_task = IIT_NULL;
1138 thread_t self = current_thread();
1139 boolean_t top = (self->ith_assertions == 0);
1140 natural_t assertcnt = 0;
1141 #endif /* IMPORTANCE_INHERITANCE */
1142
1143 ipc_object_label_t label = ip_label_get(port);
1144 ipc_release_assert(io_state_active(label.io_state));
1145
1146 /*
1147 * permanent ports cannot be destroyed.
1148 *
1149 * It's safe to check this on entry of port destruction,
1150 * since kobjects cannot register to port-destroyed notifications.
1151 */
1152 if (ipc_policy(label)->pol_kobject_permanent) {
1153 panic("trying to destroy a permanent port %p with kobject type: %d",
1154 port, ip_type(port));
1155 }
1156
1157 /* port->ip_receiver_name is garbage */
1158 /* port->ip_receiver/port->ip_destination is garbage */
1159
1160 /* clear any reply-port context */
1161 port->ip_reply_context = 0;
1162
1163 /* must be done before we access ip_pdrequest */
1164 twe = ipc_port_clear_watchport_elem_internal(port);
1165 assert(!port->ip_has_watchport);
1166
1167 if (!ip_is_special_reply_port_type(label.io_type)) {
1168 /* we assume the ref for pdrequest */
1169 pdrequest = port->ip_pdrequest;
1170 port->ip_pdrequest = IP_NULL;
1171 } else if (port->ip_tempowner) {
1172 panic("ipc_port_destroy: invalid state");
1173 }
1174
1175 #if IMPORTANCE_INHERITANCE
1176 /* determine how many assertions to drop and from whom */
1177 if (port->ip_tempowner != 0) {
1178 assert(top);
1179 release_imp_task = ip_get_imp_task(port);
1180 if (IIT_NULL != release_imp_task) {
1181 port->ip_imp_task = IIT_NULL;
1182 assertcnt = port->ip_impcount;
1183 }
1184 /* Otherwise, nothing to drop */
1185 } else {
1186 assertcnt = port->ip_impcount;
1187 if (pdrequest != IP_NULL) {
1188 /* mark in limbo for the journey */
1189 port->ip_tempowner = 1;
1190 }
1191 }
1192
1193 if (top) {
1194 self->ith_assertions = assertcnt;
1195 }
1196 #endif /* IMPORTANCE_INHERITANCE */
1197
1198 /*
1199 * Handle port-destroyed notification
1200 */
1201 if (pdrequest != IP_NULL && ip_active(pdrequest)) {
1202 ipc_port_mark_in_limbo_pd(port, &label, &free_l);
1203
1204 ipc_port_send_turnstile_recompute_push_locked(port);
1205 /* port unlocked */
1206
1207 /* consumes our refs for port and pdrequest */
1208 ipc_notify_port_destroyed(pdrequest, port);
1209 } else {
1210 ipc_notify_nsenders_t nsrequest;
1211 ipc_object_label_t label_unsafe_copy = label;
1212 bool reap_msgs;
1213
1214 /*
1215 * Mark the port and mqueue invalid,
1216 * preventing further send/receive operations from succeeding.
1217 */
1218 reap_msgs = ipc_port_mark_inactive(port, &label, &free_l);
1219
1220 nsrequest = ipc_notify_no_senders_prepare(port);
1221
1222 ipc_port_send_turnstile_recompute_push_locked(port);
1223 /* port unlocked */
1224
1225 /* unlink the kmsg from special reply port */
1226 if (ip_is_special_reply_port_type(label.io_type)) {
1227 ipc_port_adjust_special_reply_port(port,
1228 IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE);
1229 }
1230
1231 /*
1232 * If the port-destroyed notification port didn't look active,
1233 * we destroyed the port right away but still need to consume
1234 * a send-once right to it.
1235 *
1236 * This is racy check, which is ok because it is really an
1237 * optimization. See ipc_notify_should_send().
1238 */
1239 if (pdrequest) {
1240 ipc_port_release_sonce(pdrequest);
1241 }
1242
1243 /*
1244 * We violate the rules around labels here by making a copy
1245 * because we know that ipc_port_mark_inactive() will nil out
1246 * the iol_pointer value to the port and we must free it.
1247 */
1248 ipc_port_label_free(label_unsafe_copy);
1249
1250 if (reap_msgs) {
1251 ipc_kmsg_reap_delayed();
1252 }
1253
1254 if (nsrequest.ns_notify) {
1255 /*
1256 * ipc_notify_no_senders_prepare will never set
1257 * ns_notify for a dead kobject port.
1258 */
1259 assert(!nsrequest.ns_is_kobject);
1260 ip_mq_lock(nsrequest.ns_notify);
1261 ipc_notify_send_once_and_unlock(nsrequest.ns_notify); /* consumes ref */
1262 }
1263
1264 /* generate dead-name notifications */
1265 ipc_port_dnnotify(port);
1266
1267 ip_release(port); /* consume caller's ref */
1268 }
1269
1270 if (twe) {
1271 task_watchport_elem_deallocate(twe);
1272 twe = NULL;
1273 }
1274
1275 waitq_link_free_list(WQT_PORT_SET, &free_l);
1276
1277 #if IMPORTANCE_INHERITANCE
1278 if (release_imp_task != IIT_NULL) {
1279 if (assertcnt > 0) {
1280 assert(top);
1281 self->ith_assertions = 0;
1282 assert(ipc_importance_task_is_any_receiver_type(release_imp_task));
1283 ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1284 }
1285 ipc_importance_task_release(release_imp_task);
1286 } else if (assertcnt > 0) {
1287 if (top) {
1288 self->ith_assertions = 0;
1289 release_imp_task = current_task()->task_imp_base;
1290 if (ipc_importance_task_is_any_receiver_type(release_imp_task)) {
1291 ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1292 }
1293 }
1294 }
1295 #endif /* IMPORTANCE_INHERITANCE */
1296 }
1297
1298 /*
1299 * Routine: ipc_port_destination_chain_lock
1300 * Purpose:
1301 * Search for the end of the chain (a port not in transit),
1302 * acquiring locks along the way, and return it in `base`.
1303 *
1304 * Returns true if a reference was taken on `base`
1305 *
1306 * Conditions:
1307 * No ports locked.
1308 * ipc_port_multiple_lock held.
1309 */
1310 boolean_t
ipc_port_destination_chain_lock(ipc_port_t port,ipc_port_t * base)1311 ipc_port_destination_chain_lock(
1312 ipc_port_t port,
1313 ipc_port_t *base)
1314 {
1315 for (;;) {
1316 ip_mq_lock(port);
1317
1318 if (!ip_active(port)) {
1319 /*
1320 * Active ports that are ip_mq_lock()ed cannot go away.
1321 *
1322 * But inactive ports at the end of walking
1323 * an ip_destination chain are only protected
1324 * from space termination cleanup while the entire
1325 * chain of ports leading to them is held.
1326 *
1327 * Callers of this code tend to unlock the chain
1328 * in the same order than this walk which doesn't
1329 * protect `base` properly when it's inactive.
1330 *
1331 * In that case, take a reference that the caller
1332 * is responsible for releasing.
1333 */
1334 ip_reference(port);
1335 *base = port;
1336 return true;
1337 }
1338
1339 /* port is active */
1340 if (!ip_in_transit(port)) {
1341 *base = port;
1342 return false;
1343 }
1344
1345 port = ip_get_destination(port);
1346 }
1347 }
1348
1349
1350 /*
1351 * Routine: ipc_port_check_circularity
1352 * Purpose:
1353 * Check if queueing "port" in a message for "dest"
1354 * would create a circular group of ports and messages.
1355 *
1356 * If no circularity (FALSE returned), then "port"
1357 * is changed from "in limbo" to "in transit".
1358 *
1359 * That is, we want to set port->ip_destination == dest,
1360 * but guaranteeing that this doesn't create a circle
1361 * port->ip_destination->ip_destination->... == port
1362 *
1363 * Conditions:
1364 * No ports locked. References held for "port" and "dest".
1365 */
1366
1367 boolean_t
ipc_port_check_circularity(ipc_port_t port,ipc_port_t dest)1368 ipc_port_check_circularity(
1369 ipc_port_t port,
1370 ipc_port_t dest)
1371 {
1372 #if IMPORTANCE_INHERITANCE
1373 /* adjust importance counts at the same time */
1374 return ipc_importance_check_circularity(port, dest);
1375 #else
1376 ipc_port_t base;
1377 struct task_watchport_elem *watchport_elem = NULL;
1378 bool took_base_ref = false;
1379
1380 assert(port != IP_NULL);
1381 assert(dest != IP_NULL);
1382
1383 if (port == dest) {
1384 return TRUE;
1385 }
1386 base = dest;
1387
1388 /* Check if destination needs a turnstile */
1389 ipc_port_send_turnstile_prepare(dest);
1390
1391 /*
1392 * First try a quick check that can run in parallel.
1393 * No circularity if dest is not in transit.
1394 */
1395 ip_mq_lock(port);
1396 if (ip_mq_lock_try(dest)) {
1397 if (!ip_in_transit(dest)) {
1398 goto not_circular;
1399 }
1400
1401 /* dest is in transit; further checking necessary */
1402
1403 ip_mq_unlock(dest);
1404 }
1405 ip_mq_unlock(port);
1406
1407 ipc_port_multiple_lock(); /* massive serialization */
1408
1409 /*
1410 * Search for the end of the chain (a port not in transit),
1411 * acquiring locks along the way.
1412 */
1413
1414 took_base_ref = ipc_port_destination_chain_lock(dest, &base);
1415 /* all ports in chain from dest to base, inclusive, are locked */
1416
1417 if (port == base) {
1418 /* circularity detected! */
1419
1420 ipc_port_multiple_unlock();
1421
1422 /* port (== base) is in limbo */
1423 ipc_release_assert(ip_in_limbo(port));
1424 assert(!took_base_ref);
1425
1426 base = dest;
1427 while (base != IP_NULL) {
1428 ipc_port_t next;
1429
1430 ipc_release_assert(ip_is_moving(base));
1431 next = ip_get_destination(base);
1432
1433 ip_mq_unlock(base);
1434 base = next;
1435 }
1436
1437 ipc_port_send_turnstile_complete(dest);
1438 return TRUE;
1439 }
1440
1441 /*
1442 * The guarantee: lock port while the entire chain is locked.
1443 * Once port is locked, we can take a reference to dest,
1444 * add port to the chain, and unlock everything.
1445 */
1446
1447 ip_mq_lock(port);
1448 ipc_port_multiple_unlock();
1449
1450 not_circular:
1451 /* Clear the watchport boost */
1452 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
1453
1454 /* Check if the port is being enqueued as a part of sync bootstrap checkin */
1455 if (ip_is_special_reply_port(dest) && dest->ip_sync_bootstrap_checkin) {
1456 port->ip_sync_bootstrap_checkin = 1;
1457 }
1458
1459 ipc_port_mark_in_transit(port, dest);
1460
1461 /* Setup linkage for source port if it has sync ipc push */
1462 struct turnstile *send_turnstile = TURNSTILE_NULL;
1463 if (port_send_turnstile(port)) {
1464 send_turnstile = turnstile_prepare((uintptr_t)port,
1465 port_send_turnstile_address(port),
1466 TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
1467
1468 /*
1469 * What ipc_port_adjust_port_locked would do,
1470 * but we need to also drop even more locks before
1471 * calling turnstile_update_inheritor_complete().
1472 */
1473 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
1474
1475 turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
1476 (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
1477
1478 /* update complete and turnstile complete called after dropping all locks */
1479 }
1480 /* now unlock chain */
1481
1482 ip_mq_unlock(port);
1483
1484 for (;;) {
1485 ipc_port_t next;
1486
1487 if (dest == base) {
1488 break;
1489 }
1490
1491 ipc_release_assert(ip_in_transit(dest));
1492 next = ip_get_destination(dest);
1493
1494 ip_mq_unlock(dest);
1495 dest = next;
1496 }
1497
1498 /* base is not IN-TRANSIT */
1499 assert(!ip_in_transit(base));
1500
1501 ip_mq_unlock(base);
1502 if (took_base_ref) {
1503 ip_release(base);
1504 }
1505
1506 /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
1507 if (send_turnstile) {
1508 turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
1509
1510 /* Take the mq lock to call turnstile complete */
1511 ip_mq_lock(port);
1512 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC);
1513 send_turnstile = TURNSTILE_NULL;
1514 ip_mq_unlock(port);
1515 turnstile_cleanup();
1516 }
1517
1518 if (watchport_elem) {
1519 task_watchport_elem_deallocate(watchport_elem);
1520 }
1521
1522 return FALSE;
1523 #endif /* !IMPORTANCE_INHERITANCE */
1524 }
1525
1526 /*
1527 * Routine: ipc_port_watchport_elem
1528 * Purpose:
1529 * Get the port's watchport elem field
1530 *
1531 * Conditions:
1532 * port locked
1533 */
1534 static struct task_watchport_elem *
ipc_port_watchport_elem(ipc_port_t port)1535 ipc_port_watchport_elem(ipc_port_t port)
1536 {
1537 if (port->ip_has_watchport) {
1538 assert(!ip_is_special_reply_port(port));
1539 return port->ip_twe;
1540 }
1541 return NULL;
1542 }
1543
1544 /*
1545 * Routine: ipc_port_update_watchport_elem
1546 * Purpose:
1547 * Set the port's watchport elem field
1548 *
1549 * Conditions:
1550 * port locked and is not a special reply port.
1551 */
1552 static inline struct task_watchport_elem *
ipc_port_update_watchport_elem(ipc_port_t port,struct task_watchport_elem * we)1553 ipc_port_update_watchport_elem(ipc_port_t port, struct task_watchport_elem *we)
1554 {
1555 struct task_watchport_elem *old_we;
1556 ipc_port_t pdrequest;
1557
1558 assert(!ip_is_special_reply_port(port));
1559
1560 /*
1561 * Note: ip_pdrequest and ip_twe are unioned.
1562 * and ip_has_watchport controls the union "type"
1563 */
1564 if (port->ip_has_watchport) {
1565 old_we = port->ip_twe;
1566 pdrequest = old_we->twe_pdrequest;
1567 old_we->twe_pdrequest = IP_NULL;
1568 } else {
1569 old_we = NULL;
1570 pdrequest = port->ip_pdrequest;
1571 }
1572
1573 if (we) {
1574 port->ip_has_watchport = true;
1575 we->twe_pdrequest = pdrequest;
1576 port->ip_twe = we;
1577 } else {
1578 port->ip_has_watchport = false;
1579 port->ip_pdrequest = pdrequest;
1580 }
1581
1582 return old_we;
1583 }
1584
1585 /*
1586 * Routine: ipc_special_reply_stash_pid_locked
1587 * Purpose:
1588 * Set the pid of process that copied out send once right to special reply port.
1589 *
1590 * Conditions:
1591 * port locked
1592 */
1593 static inline void
ipc_special_reply_stash_pid_locked(ipc_port_t port,int pid)1594 ipc_special_reply_stash_pid_locked(ipc_port_t port, int pid)
1595 {
1596 assert(ip_is_special_reply_port(port));
1597 port->ip_pid = pid;
1598 }
1599
1600 /*
1601 * Routine: ipc_special_reply_get_pid_locked
1602 * Purpose:
1603 * Get the pid of process that copied out send once right to special reply port.
1604 *
1605 * Conditions:
1606 * port locked
1607 */
1608 int
ipc_special_reply_get_pid_locked(ipc_port_t port)1609 ipc_special_reply_get_pid_locked(ipc_port_t port)
1610 {
1611 assert(ip_is_special_reply_port(port));
1612 return port->ip_pid;
1613 }
1614
1615 /*
1616 * Update the recv turnstile inheritor for a port.
1617 *
1618 * Sync IPC through the port receive turnstile only happens for the special
1619 * reply port case. It has three sub-cases:
1620 *
1621 * 1. a send-once right is in transit, and pushes on the send turnstile of its
1622 * destination mqueue.
1623 *
1624 * 2. a send-once right has been stashed on a knote it was copied out "through",
1625 * as the first such copied out port.
1626 *
1627 * 3. a send-once right has been stashed on a knote it was copied out "through",
1628 * as the second or more copied out port.
1629 */
1630 void
ipc_port_recv_update_inheritor(ipc_port_t port,struct turnstile * rcv_turnstile,turnstile_update_flags_t flags)1631 ipc_port_recv_update_inheritor(
1632 ipc_port_t port,
1633 struct turnstile *rcv_turnstile,
1634 turnstile_update_flags_t flags)
1635 {
1636 struct turnstile *inheritor = TURNSTILE_NULL;
1637 struct knote *kn;
1638
1639 if (ip_active(port) && ip_is_special_reply_port(port)) {
1640 ip_mq_lock_held(port);
1641
1642 switch (port->ip_sync_link_state) {
1643 case PORT_SYNC_LINK_PORT:
1644 if (port->ip_sync_inheritor_port != NULL) {
1645 inheritor = port_send_turnstile(port->ip_sync_inheritor_port);
1646 }
1647 break;
1648
1649 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1650 kn = port->ip_sync_inheritor_knote;
1651 inheritor = filt_ipc_kqueue_turnstile(kn);
1652 break;
1653
1654 case PORT_SYNC_LINK_WORKLOOP_STASH:
1655 inheritor = port->ip_sync_inheritor_ts;
1656 break;
1657 }
1658 }
1659
1660 turnstile_update_inheritor(rcv_turnstile, inheritor,
1661 flags | TURNSTILE_INHERITOR_TURNSTILE);
1662 }
1663
1664 /*
1665 * Update the send turnstile inheritor for a port.
1666 *
1667 * Sync IPC through the port send turnstile has 7 possible reasons to be linked:
1668 *
1669 * 1. a special reply port is part of sync ipc for bootstrap checkin and needs
1670 * to push on thread doing the sync ipc.
1671 *
1672 * 2. a receive right is in transit, and pushes on the send turnstile of its
1673 * destination mqueue.
1674 *
1675 * 3. port was passed as an exec watchport and port is pushing on main thread
1676 * of the task.
1677 *
1678 * 4. a receive right has been stashed on a knote it was copied out "through",
1679 * as the first such copied out port (same as PORT_SYNC_LINK_WORKLOOP_KNOTE
1680 * for the special reply port)
1681 *
1682 * 5. a receive right has been stashed on a knote it was copied out "through",
1683 * as the second or more copied out port (same as
1684 * PORT_SYNC_LINK_WORKLOOP_STASH for the special reply port)
1685 *
1686 * 6. a receive right has been copied out as a part of sync bootstrap checkin
1687 * and needs to push on thread doing the sync bootstrap checkin.
1688 *
1689 * 7. the receive right is monitored by a knote, and pushes on any that is
1690 * registered on a workloop. filt_machport makes sure that if such a knote
1691 * exists, it is kept as the first item in the knote list, so we never need
1692 * to walk.
1693 */
1694 void
ipc_port_send_update_inheritor(ipc_port_t port,struct turnstile * send_turnstile,turnstile_update_flags_t flags)1695 ipc_port_send_update_inheritor(
1696 ipc_port_t port,
1697 struct turnstile *send_turnstile,
1698 turnstile_update_flags_t flags)
1699 {
1700 ipc_mqueue_t mqueue = &port->ip_messages;
1701 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1702 struct knote *kn;
1703 turnstile_update_flags_t inheritor_flags = TURNSTILE_INHERITOR_TURNSTILE;
1704
1705 ip_mq_lock_held(port);
1706
1707 if (!ip_active(port)) {
1708 /* this port is no longer active, it should not push anywhere */
1709 } else if (ip_is_special_reply_port(port)) {
1710 /* Case 1. */
1711 if (port->ip_sync_bootstrap_checkin && prioritize_launch) {
1712 inheritor = port->ip_messages.imq_srp_owner_thread;
1713 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1714 }
1715 } else if (ip_in_transit(port)) {
1716 /* Case 2. */
1717 inheritor = port_send_turnstile(ip_get_destination(port));
1718 } else if (port->ip_has_watchport) {
1719 /* Case 3. */
1720 if (prioritize_launch) {
1721 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1722 inheritor = ipc_port_get_watchport_inheritor(port);
1723 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1724 }
1725 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
1726 /* Case 4. */
1727 inheritor = filt_ipc_kqueue_turnstile(mqueue->imq_inheritor_knote);
1728 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_STASH) {
1729 /* Case 5. */
1730 inheritor = mqueue->imq_inheritor_turnstile;
1731 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_RCV_THREAD) {
1732 /* Case 6. */
1733 if (prioritize_launch) {
1734 inheritor = port->ip_messages.imq_inheritor_thread_ref;
1735 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1736 }
1737 } else if ((kn = SLIST_FIRST(&port->ip_klist))) {
1738 /* Case 7. Push on a workloop that is interested */
1739 if (filt_machport_kqueue_has_turnstile(kn)) {
1740 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1741 inheritor = filt_ipc_kqueue_turnstile(kn);
1742 }
1743 }
1744
1745 turnstile_update_inheritor(send_turnstile, inheritor,
1746 flags | inheritor_flags);
1747 }
1748
1749 /*
1750 * Routine: ipc_port_send_turnstile_prepare
1751 * Purpose:
1752 * Get a reference on port's send turnstile, if
1753 * port does not have a send turnstile then allocate one.
1754 *
1755 * Conditions:
1756 * Nothing is locked.
1757 */
1758 void
ipc_port_send_turnstile_prepare(ipc_port_t port)1759 ipc_port_send_turnstile_prepare(ipc_port_t port)
1760 {
1761 struct turnstile *turnstile = TURNSTILE_NULL;
1762 struct turnstile *send_turnstile = TURNSTILE_NULL;
1763
1764 retry_alloc:
1765 ip_mq_lock(port);
1766
1767 if (port_send_turnstile(port) == NULL ||
1768 port_send_turnstile(port)->ts_prim_count == 0) {
1769 if (turnstile == TURNSTILE_NULL) {
1770 ip_mq_unlock(port);
1771 turnstile = turnstile_alloc();
1772 goto retry_alloc;
1773 }
1774
1775 send_turnstile = turnstile_prepare((uintptr_t)port,
1776 port_send_turnstile_address(port),
1777 turnstile, TURNSTILE_SYNC_IPC);
1778 turnstile = TURNSTILE_NULL;
1779
1780 ipc_port_send_update_inheritor(port, send_turnstile,
1781 TURNSTILE_IMMEDIATE_UPDATE);
1782
1783 /* turnstile complete will be called in ipc_port_send_turnstile_complete */
1784 }
1785
1786 /* Increment turnstile counter */
1787 port_send_turnstile(port)->ts_prim_count++;
1788 ip_mq_unlock(port);
1789
1790 if (send_turnstile) {
1791 turnstile_update_inheritor_complete(send_turnstile,
1792 TURNSTILE_INTERLOCK_NOT_HELD);
1793 }
1794 if (turnstile != TURNSTILE_NULL) {
1795 turnstile_deallocate(turnstile);
1796 }
1797 }
1798
1799
1800 /*
1801 * Routine: ipc_port_send_turnstile_complete
1802 * Purpose:
1803 * Drop a ref on the port's send turnstile, if the
1804 * ref becomes zero, deallocate the turnstile.
1805 *
1806 * Conditions:
1807 * The space might be locked
1808 */
1809 void
ipc_port_send_turnstile_complete(ipc_port_t port)1810 ipc_port_send_turnstile_complete(ipc_port_t port)
1811 {
1812 struct turnstile *turnstile = TURNSTILE_NULL;
1813
1814 /* Drop turnstile count on dest port */
1815 ip_mq_lock(port);
1816
1817 port_send_turnstile(port)->ts_prim_count--;
1818 if (port_send_turnstile(port)->ts_prim_count == 0) {
1819 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port),
1820 &turnstile, TURNSTILE_SYNC_IPC);
1821 assert(turnstile != TURNSTILE_NULL);
1822 }
1823 ip_mq_unlock(port);
1824 turnstile_cleanup();
1825
1826 if (turnstile != TURNSTILE_NULL) {
1827 turnstile_deallocate(turnstile);
1828 turnstile = TURNSTILE_NULL;
1829 }
1830 }
1831
1832 /*
1833 * Routine: ipc_port_rcv_turnstile
1834 * Purpose:
1835 * Get the port's receive turnstile
1836 *
1837 * Conditions:
1838 * mqueue locked or thread waiting on turnstile is locked.
1839 */
1840 static struct turnstile *
ipc_port_rcv_turnstile(ipc_port_t port)1841 ipc_port_rcv_turnstile(ipc_port_t port)
1842 {
1843 return *port_rcv_turnstile_address(port);
1844 }
1845
1846
1847 /*
1848 * Routine: ipc_port_link_special_reply_port
1849 * Purpose:
1850 * Link the special reply port with the destination port.
1851 * Allocates turnstile to dest port.
1852 *
1853 * Conditions:
1854 * Nothing is locked.
1855 */
1856 void
ipc_port_link_special_reply_port(ipc_port_t special_reply_port,ipc_port_t dest_port,boolean_t sync_bootstrap_checkin)1857 ipc_port_link_special_reply_port(
1858 ipc_port_t special_reply_port,
1859 ipc_port_t dest_port,
1860 boolean_t sync_bootstrap_checkin)
1861 {
1862 boolean_t drop_turnstile_ref = FALSE;
1863 boolean_t special_reply = FALSE;
1864
1865 /* Check if dest_port needs a turnstile */
1866 ipc_port_send_turnstile_prepare(dest_port);
1867
1868 /* Lock the special reply port and establish the linkage */
1869 ip_mq_lock(special_reply_port);
1870
1871 special_reply = ip_is_special_reply_port(special_reply_port);
1872
1873 if (sync_bootstrap_checkin && special_reply) {
1874 special_reply_port->ip_sync_bootstrap_checkin = 1;
1875 }
1876
1877 /* Check if we need to drop the acquired turnstile ref on dest port */
1878 if (!special_reply ||
1879 special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY ||
1880 special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) {
1881 drop_turnstile_ref = TRUE;
1882 } else {
1883 /* take a reference on dest_port */
1884 ip_reference(dest_port);
1885 special_reply_port->ip_sync_inheritor_port = dest_port;
1886 special_reply_port->ip_sync_link_state = PORT_SYNC_LINK_PORT;
1887 }
1888
1889 ip_mq_unlock(special_reply_port);
1890
1891 if (special_reply) {
1892 /*
1893 * For special reply ports, if the destination port is
1894 * marked with the thread group blocked tracking flag,
1895 * callout to the performance controller.
1896 */
1897 ipc_port_thread_group_blocked(dest_port);
1898 }
1899
1900 if (drop_turnstile_ref) {
1901 ipc_port_send_turnstile_complete(dest_port);
1902 }
1903
1904 return;
1905 }
1906
1907 /*
1908 * Routine: ipc_port_thread_group_blocked
1909 * Purpose:
1910 * Call thread_group_blocked callout if the port
1911 * has ip_tg_block_tracking bit set and the thread
1912 * has not made this callout already.
1913 *
1914 * Conditions:
1915 * Nothing is locked.
1916 */
1917 void
ipc_port_thread_group_blocked(ipc_port_t port __unused)1918 ipc_port_thread_group_blocked(ipc_port_t port __unused)
1919 {
1920 #if CONFIG_THREAD_GROUPS
1921 bool port_tg_block_tracking = false;
1922 thread_t self = current_thread();
1923
1924 if (self->thread_group == NULL ||
1925 (self->options & TH_OPT_IPC_TG_BLOCKED)) {
1926 return;
1927 }
1928
1929 port_tg_block_tracking = port->ip_tg_block_tracking;
1930 if (!port_tg_block_tracking) {
1931 return;
1932 }
1933
1934 machine_thread_group_blocked(self->thread_group, NULL,
1935 PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
1936
1937 self->options |= TH_OPT_IPC_TG_BLOCKED;
1938 #endif
1939 }
1940
1941 /*
1942 * Routine: ipc_port_thread_group_unblocked
1943 * Purpose:
1944 * Call thread_group_unblocked callout if the
1945 * thread had previously made a thread_group_blocked
1946 * callout before (indicated by TH_OPT_IPC_TG_BLOCKED
1947 * flag on the thread).
1948 *
1949 * Conditions:
1950 * Nothing is locked.
1951 */
1952 void
ipc_port_thread_group_unblocked(void)1953 ipc_port_thread_group_unblocked(void)
1954 {
1955 #if CONFIG_THREAD_GROUPS
1956 thread_t self = current_thread();
1957
1958 if (!(self->options & TH_OPT_IPC_TG_BLOCKED)) {
1959 return;
1960 }
1961
1962 machine_thread_group_unblocked(self->thread_group, NULL,
1963 PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
1964
1965 self->options &= ~TH_OPT_IPC_TG_BLOCKED;
1966 #endif
1967 }
1968
1969 #if DEVELOPMENT || DEBUG
1970 inline void
ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)1971 ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)
1972 {
1973 special_reply_port->ip_srp_lost_link = 0;
1974 special_reply_port->ip_srp_msg_sent = 0;
1975 }
1976
1977 static inline void
ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)1978 ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)
1979 {
1980 if (ip_is_special_reply_port(special_reply_port)) {
1981 special_reply_port->ip_srp_msg_sent = 0;
1982 }
1983 }
1984
1985 inline void
ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)1986 ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)
1987 {
1988 if (ip_is_special_reply_port(special_reply_port)) {
1989 special_reply_port->ip_srp_msg_sent = 1;
1990 }
1991 }
1992
1993 static inline void
ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)1994 ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)
1995 {
1996 if (ip_is_special_reply_port(special_reply_port) && special_reply_port->ip_srp_msg_sent == 0) {
1997 special_reply_port->ip_srp_lost_link = 1;
1998 }
1999 }
2000
2001 #else /* DEVELOPMENT || DEBUG */
2002 inline void
ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)2003 ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)
2004 {
2005 return;
2006 }
2007
2008 static inline void
ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)2009 ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)
2010 {
2011 return;
2012 }
2013
2014 inline void
ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)2015 ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)
2016 {
2017 return;
2018 }
2019
2020 static inline void
ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)2021 ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)
2022 {
2023 return;
2024 }
2025 #endif /* DEVELOPMENT || DEBUG */
2026
2027 /*
2028 * Routine: ipc_port_adjust_special_reply_port_locked
2029 * Purpose:
2030 * If the special port has a turnstile, update its inheritor.
2031 * Condition:
2032 * Special reply port locked on entry.
2033 * Special reply port unlocked on return.
2034 * The passed in port is a special reply port.
2035 * Returns:
2036 * None.
2037 */
2038 void
ipc_port_adjust_special_reply_port_locked(ipc_port_t special_reply_port,struct knote * kn,uint8_t flags,boolean_t get_turnstile)2039 ipc_port_adjust_special_reply_port_locked(
2040 ipc_port_t special_reply_port,
2041 struct knote *kn,
2042 uint8_t flags,
2043 boolean_t get_turnstile)
2044 {
2045 ipc_port_t dest_port = IPC_PORT_NULL;
2046 int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE;
2047 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
2048 struct turnstile *ts = TURNSTILE_NULL;
2049 struct turnstile *port_stashed_turnstile = TURNSTILE_NULL;
2050
2051 ip_mq_lock_held(special_reply_port); // ip_sync_link_state is touched
2052
2053 if (!ip_is_special_reply_port(special_reply_port)) {
2054 // only mach_msg_receive_results_complete() calls this with any port
2055 assert(get_turnstile);
2056 goto not_special;
2057 }
2058
2059 if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) {
2060 ipc_special_reply_port_msg_sent_reset(special_reply_port);
2061 }
2062
2063 if (flags & IPC_PORT_ADJUST_UNLINK_THREAD) {
2064 special_reply_port->ip_messages.imq_srp_owner_thread = NULL;
2065 }
2066
2067 if (flags & IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN) {
2068 special_reply_port->ip_sync_bootstrap_checkin = 0;
2069 }
2070
2071 /* Check if the special reply port is marked non-special */
2072 if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) {
2073 not_special:
2074 if (get_turnstile) {
2075 turnstile_complete((uintptr_t)special_reply_port,
2076 port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
2077 }
2078 ip_mq_unlock(special_reply_port);
2079 if (get_turnstile) {
2080 turnstile_cleanup();
2081 }
2082 return;
2083 }
2084
2085 if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) {
2086 if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) {
2087 inheritor = filt_machport_stash_port(kn, special_reply_port,
2088 &sync_link_state);
2089 }
2090 } else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) {
2091 sync_link_state = PORT_SYNC_LINK_ANY;
2092 }
2093
2094 /* Check if need to break linkage */
2095 if (!get_turnstile && sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
2096 special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
2097 ip_mq_unlock(special_reply_port);
2098 return;
2099 }
2100
2101 switch (special_reply_port->ip_sync_link_state) {
2102 case PORT_SYNC_LINK_PORT:
2103 dest_port = special_reply_port->ip_sync_inheritor_port;
2104 special_reply_port->ip_sync_inheritor_port = IPC_PORT_NULL;
2105 break;
2106 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2107 special_reply_port->ip_sync_inheritor_knote = NULL;
2108 break;
2109 case PORT_SYNC_LINK_WORKLOOP_STASH:
2110 port_stashed_turnstile = special_reply_port->ip_sync_inheritor_ts;
2111 special_reply_port->ip_sync_inheritor_ts = NULL;
2112 break;
2113 }
2114
2115 /*
2116 * Stash (or unstash) the server's PID in the ip_sorights field of the
2117 * special reply port, so that stackshot can later retrieve who the client
2118 * is blocked on.
2119 */
2120 if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT &&
2121 sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
2122 ipc_special_reply_stash_pid_locked(special_reply_port, pid_from_task(current_task()));
2123 } else if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
2124 sync_link_state == PORT_SYNC_LINK_ANY) {
2125 /* If we are resetting the special reply port, remove the stashed pid. */
2126 ipc_special_reply_stash_pid_locked(special_reply_port, 0);
2127 }
2128
2129 special_reply_port->ip_sync_link_state = sync_link_state;
2130
2131 switch (sync_link_state) {
2132 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2133 special_reply_port->ip_sync_inheritor_knote = kn;
2134 break;
2135 case PORT_SYNC_LINK_WORKLOOP_STASH:
2136 turnstile_reference(inheritor);
2137 special_reply_port->ip_sync_inheritor_ts = inheritor;
2138 break;
2139 case PORT_SYNC_LINK_NO_LINKAGE:
2140 if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) {
2141 ipc_special_reply_port_lost_link(special_reply_port);
2142 }
2143 break;
2144 }
2145
2146 /* Get thread's turnstile donated to special reply port */
2147 if (get_turnstile) {
2148 turnstile_complete((uintptr_t)special_reply_port,
2149 port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
2150 } else {
2151 ts = ipc_port_rcv_turnstile(special_reply_port);
2152 if (ts) {
2153 turnstile_reference(ts);
2154 ipc_port_recv_update_inheritor(special_reply_port, ts,
2155 TURNSTILE_IMMEDIATE_UPDATE);
2156 }
2157 }
2158
2159 ip_mq_unlock(special_reply_port);
2160
2161 if (get_turnstile) {
2162 turnstile_cleanup();
2163 } else if (ts) {
2164 /* Call turnstile cleanup after dropping the interlock */
2165 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
2166 turnstile_deallocate(ts);
2167 }
2168
2169 if (port_stashed_turnstile) {
2170 turnstile_deallocate(port_stashed_turnstile);
2171 }
2172
2173 /* Release the ref on the dest port and its turnstile */
2174 if (dest_port) {
2175 ipc_port_send_turnstile_complete(dest_port);
2176 /* release the reference on the dest port, space lock might be held */
2177 ip_release_safe(dest_port);
2178 }
2179 }
2180
2181 /*
2182 * Routine: ipc_port_adjust_special_reply_port
2183 * Purpose:
2184 * If the special port has a turnstile, update its inheritor.
2185 * Condition:
2186 * Nothing locked.
2187 * Returns:
2188 * None.
2189 */
2190 void
ipc_port_adjust_special_reply_port(ipc_port_t port,uint8_t flags)2191 ipc_port_adjust_special_reply_port(
2192 ipc_port_t port,
2193 uint8_t flags)
2194 {
2195 if (ip_is_special_reply_port(port)) {
2196 ip_mq_lock(port);
2197 ipc_port_adjust_special_reply_port_locked(port, NULL, flags, FALSE);
2198 }
2199 }
2200
2201 /*
2202 * Routine: ipc_port_adjust_sync_link_state_locked
2203 * Purpose:
2204 * Update the sync link state of the port and the
2205 * turnstile inheritor.
2206 * Condition:
2207 * Port locked on entry.
2208 * Port locked on return.
2209 * Returns:
2210 * None.
2211 */
2212 void
ipc_port_adjust_sync_link_state_locked(ipc_port_t port,int sync_link_state,turnstile_inheritor_t inheritor)2213 ipc_port_adjust_sync_link_state_locked(
2214 ipc_port_t port,
2215 int sync_link_state,
2216 turnstile_inheritor_t inheritor)
2217 {
2218 switch (port->ip_sync_link_state) {
2219 case PORT_SYNC_LINK_RCV_THREAD:
2220 /* deallocate the thread reference for the inheritor */
2221 thread_deallocate_safe(port->ip_messages.imq_inheritor_thread_ref);
2222 break;
2223 case PORT_SYNC_LINK_WORKLOOP_STASH:
2224 /* deallocate the turnstile reference for the inheritor */
2225 turnstile_deallocate(port->ip_messages.imq_inheritor_turnstile);
2226 break;
2227 }
2228
2229 klist_init(&port->ip_klist);
2230
2231 switch (sync_link_state) {
2232 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2233 port->ip_messages.imq_inheritor_knote = inheritor;
2234 break;
2235 case PORT_SYNC_LINK_WORKLOOP_STASH:
2236 /* knote can be deleted by userspace, take a reference on turnstile */
2237 turnstile_reference(inheritor);
2238 port->ip_messages.imq_inheritor_turnstile = inheritor;
2239 break;
2240 case PORT_SYNC_LINK_RCV_THREAD:
2241 /* The thread could exit without clearing port state, take a thread ref */
2242 thread_reference((thread_t)inheritor);
2243 port->ip_messages.imq_inheritor_thread_ref = inheritor;
2244 break;
2245 default:
2246 klist_init(&port->ip_klist);
2247 sync_link_state = PORT_SYNC_LINK_ANY;
2248 }
2249
2250 port->ip_sync_link_state = sync_link_state;
2251 }
2252
2253
2254 /*
2255 * Routine: ipc_port_adjust_port_locked
2256 * Purpose:
2257 * If the port has a turnstile, update its inheritor.
2258 * Condition:
2259 * Port locked on entry.
2260 * Port unlocked on return.
2261 * Returns:
2262 * None.
2263 */
2264 void
ipc_port_adjust_port_locked(ipc_port_t port,struct knote * kn,boolean_t sync_bootstrap_checkin)2265 ipc_port_adjust_port_locked(
2266 ipc_port_t port,
2267 struct knote *kn,
2268 boolean_t sync_bootstrap_checkin)
2269 {
2270 int sync_link_state = PORT_SYNC_LINK_ANY;
2271 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
2272
2273 ip_mq_lock_held(port); // ip_sync_link_state is touched
2274 assert(!ip_is_special_reply_port(port));
2275
2276 if (kn) {
2277 inheritor = filt_machport_stash_port(kn, port, &sync_link_state);
2278 if (sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
2279 inheritor = kn;
2280 }
2281 } else if (sync_bootstrap_checkin) {
2282 inheritor = current_thread();
2283 sync_link_state = PORT_SYNC_LINK_RCV_THREAD;
2284 }
2285
2286 ipc_port_adjust_sync_link_state_locked(port, sync_link_state, inheritor);
2287 port->ip_sync_bootstrap_checkin = 0;
2288
2289 ipc_port_send_turnstile_recompute_push_locked(port);
2290 /* port unlocked */
2291 }
2292
2293 /*
2294 * Routine: ipc_port_clear_sync_rcv_thread_boost_locked
2295 * Purpose:
2296 * If the port is pushing on rcv thread, clear it.
2297 * Condition:
2298 * Port locked on entry
2299 * Port unlocked on return.
2300 * Returns:
2301 * None.
2302 */
2303 void
ipc_port_clear_sync_rcv_thread_boost_locked(ipc_port_t port)2304 ipc_port_clear_sync_rcv_thread_boost_locked(
2305 ipc_port_t port)
2306 {
2307 ip_mq_lock_held(port); // ip_sync_link_state is touched
2308
2309 if (port->ip_sync_link_state != PORT_SYNC_LINK_RCV_THREAD) {
2310 ip_mq_unlock(port);
2311 return;
2312 }
2313
2314 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
2315
2316 ipc_port_send_turnstile_recompute_push_locked(port);
2317 /* port unlocked */
2318 }
2319
2320 /*
2321 * Routine: ipc_port_has_prdrequest
2322 * Purpose:
2323 * Returns whether a port has a port-destroyed request armed
2324 * Condition:
2325 * Port is locked.
2326 */
2327 bool
ipc_port_has_prdrequest(ipc_port_t port)2328 ipc_port_has_prdrequest(
2329 ipc_port_t port)
2330 {
2331 if (ip_is_special_reply_port(port)) {
2332 return false;
2333 }
2334 if (port->ip_has_watchport) {
2335 return port->ip_twe->twe_pdrequest != IP_NULL;
2336 }
2337 return port->ip_pdrequest != IP_NULL;
2338 }
2339
2340 /*
2341 * Routine: ipc_port_add_watchport_elem_locked
2342 * Purpose:
2343 * Transfer the turnstile boost of watchport to task calling exec.
2344 * Condition:
2345 * Port locked on entry.
2346 * Port unlocked on return.
2347 * Returns:
2348 * KERN_SUCESS on success.
2349 * KERN_FAILURE otherwise.
2350 */
2351 kern_return_t
ipc_port_add_watchport_elem_locked(ipc_port_t port,struct task_watchport_elem * watchport_elem,struct task_watchport_elem ** old_elem)2352 ipc_port_add_watchport_elem_locked(
2353 ipc_port_t port,
2354 struct task_watchport_elem *watchport_elem,
2355 struct task_watchport_elem **old_elem)
2356 {
2357 ip_mq_lock_held(port);
2358
2359 /* Watchport boost only works for non-special active ports mapped in an ipc space */
2360 if (!ip_active(port) || ip_is_special_reply_port(port) || !ip_in_a_space(port)) {
2361 ip_mq_unlock(port);
2362 return KERN_FAILURE;
2363 }
2364
2365 if (port->ip_sync_link_state != PORT_SYNC_LINK_ANY) {
2366 /* Sever the linkage if the port was pushing on knote */
2367 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
2368 }
2369
2370 *old_elem = ipc_port_update_watchport_elem(port, watchport_elem);
2371
2372 ipc_port_send_turnstile_recompute_push_locked(port);
2373 /* port unlocked */
2374 return KERN_SUCCESS;
2375 }
2376
2377 /*
2378 * Routine: ipc_port_clear_watchport_elem_internal_conditional_locked
2379 * Purpose:
2380 * Remove the turnstile boost of watchport and recompute the push.
2381 * Condition:
2382 * Port locked on entry.
2383 * Port unlocked on return.
2384 * Returns:
2385 * KERN_SUCESS on success.
2386 * KERN_FAILURE otherwise.
2387 */
2388 kern_return_t
ipc_port_clear_watchport_elem_internal_conditional_locked(ipc_port_t port,struct task_watchport_elem * watchport_elem)2389 ipc_port_clear_watchport_elem_internal_conditional_locked(
2390 ipc_port_t port,
2391 struct task_watchport_elem *watchport_elem)
2392 {
2393 ip_mq_lock_held(port);
2394
2395 if (ipc_port_watchport_elem(port) != watchport_elem) {
2396 ip_mq_unlock(port);
2397 return KERN_FAILURE;
2398 }
2399
2400 ipc_port_clear_watchport_elem_internal(port);
2401 ipc_port_send_turnstile_recompute_push_locked(port);
2402 /* port unlocked */
2403 return KERN_SUCCESS;
2404 }
2405
2406 /*
2407 * Routine: ipc_port_replace_watchport_elem_conditional_locked
2408 * Purpose:
2409 * Replace the turnstile boost of watchport and recompute the push.
2410 * Condition:
2411 * Port locked on entry.
2412 * Port unlocked on return.
2413 * Returns:
2414 * KERN_SUCESS on success.
2415 * KERN_FAILURE otherwise.
2416 */
2417 kern_return_t
ipc_port_replace_watchport_elem_conditional_locked(ipc_port_t port,struct task_watchport_elem * old_watchport_elem,struct task_watchport_elem * new_watchport_elem)2418 ipc_port_replace_watchport_elem_conditional_locked(
2419 ipc_port_t port,
2420 struct task_watchport_elem *old_watchport_elem,
2421 struct task_watchport_elem *new_watchport_elem)
2422 {
2423 ip_mq_lock_held(port);
2424
2425 if (ip_is_special_reply_port(port) ||
2426 ipc_port_watchport_elem(port) != old_watchport_elem) {
2427 ip_mq_unlock(port);
2428 return KERN_FAILURE;
2429 }
2430
2431 ipc_port_update_watchport_elem(port, new_watchport_elem);
2432 ipc_port_send_turnstile_recompute_push_locked(port);
2433 /* port unlocked */
2434 return KERN_SUCCESS;
2435 }
2436
2437 /*
2438 * Routine: ipc_port_clear_watchport_elem_internal
2439 * Purpose:
2440 * Remove the turnstile boost of watchport.
2441 * Condition:
2442 * Port locked on entry.
2443 * Port locked on return.
2444 * Returns:
2445 * Old task_watchport_elem returned.
2446 */
2447 struct task_watchport_elem *
ipc_port_clear_watchport_elem_internal(ipc_port_t port)2448 ipc_port_clear_watchport_elem_internal(
2449 ipc_port_t port)
2450 {
2451 ip_mq_lock_held(port);
2452
2453 if (!port->ip_has_watchport) {
2454 return NULL;
2455 }
2456
2457 return ipc_port_update_watchport_elem(port, NULL);
2458 }
2459
2460 /*
2461 * Routine: ipc_port_send_turnstile_recompute_push_locked
2462 * Purpose:
2463 * Update send turnstile inheritor of port and recompute the push.
2464 * Condition:
2465 * Port locked on entry.
2466 * Port unlocked on return.
2467 * Returns:
2468 * None.
2469 */
2470 static void
ipc_port_send_turnstile_recompute_push_locked(ipc_port_t port)2471 ipc_port_send_turnstile_recompute_push_locked(
2472 ipc_port_t port)
2473 {
2474 struct turnstile *send_turnstile = port_send_turnstile(port);
2475 if (send_turnstile) {
2476 turnstile_reference(send_turnstile);
2477 ipc_port_send_update_inheritor(port, send_turnstile,
2478 TURNSTILE_IMMEDIATE_UPDATE);
2479 }
2480 ip_mq_unlock(port);
2481
2482 if (send_turnstile) {
2483 turnstile_update_inheritor_complete(send_turnstile,
2484 TURNSTILE_INTERLOCK_NOT_HELD);
2485 turnstile_deallocate(send_turnstile);
2486 }
2487 }
2488
2489 /*
2490 * Routine: ipc_port_get_watchport_inheritor
2491 * Purpose:
2492 * Returns inheritor for watchport.
2493 *
2494 * Conditions:
2495 * mqueue locked.
2496 * Returns:
2497 * watchport inheritor.
2498 */
2499 static thread_t
ipc_port_get_watchport_inheritor(ipc_port_t port)2500 ipc_port_get_watchport_inheritor(
2501 ipc_port_t port)
2502 {
2503 ip_mq_lock_held(port);
2504 return ipc_port_watchport_elem(port)->twe_task->watchports->tw_thread;
2505 }
2506
2507 /*
2508 * Routine: ipc_port_get_receiver_task
2509 * Purpose:
2510 * Returns receiver task pointer and its pid (if any) for port.
2511 *
2512 * Conditions:
2513 * Assumes the port is locked.
2514 */
2515 pid_t
ipc_port_get_receiver_task_locked(ipc_port_t port,task_t * task)2516 ipc_port_get_receiver_task_locked(ipc_port_t port, task_t *task)
2517 {
2518 task_t receiver = TASK_NULL;
2519 pid_t pid = -1;
2520
2521 if (!port) {
2522 goto out;
2523 }
2524
2525 if (ip_in_a_space(port) &&
2526 !ip_in_space(port, ipc_space_kernel) &&
2527 !ip_in_space(port, ipc_space_reply)) {
2528 receiver = port->ip_receiver->is_task;
2529 pid = task_pid(receiver);
2530 }
2531
2532 out:
2533 if (task) {
2534 *task = receiver;
2535 }
2536 return pid;
2537 }
2538
2539 /*
2540 * Routine: ipc_port_get_receiver_task
2541 * Purpose:
2542 * Returns receiver task pointer and its pid (if any) for port.
2543 *
2544 * Conditions:
2545 * Nothing locked. The routine takes port lock.
2546 */
2547 pid_t
ipc_port_get_receiver_task(ipc_port_t port,task_t * task)2548 ipc_port_get_receiver_task(ipc_port_t port, task_t *task)
2549 {
2550 pid_t pid = -1;
2551
2552 if (!port) {
2553 if (task) {
2554 *task = TASK_NULL;
2555 }
2556 return pid;
2557 }
2558
2559 ip_mq_lock(port);
2560 pid = ipc_port_get_receiver_task_locked(port, task);
2561 ip_mq_unlock(port);
2562
2563 return pid;
2564 }
2565
2566 /*
2567 * Routine: ipc_port_impcount_delta
2568 * Purpose:
2569 * Adjust only the importance count associated with a port.
2570 * If there are any adjustments to be made to receiver task,
2571 * those are handled elsewhere.
2572 *
2573 * For now, be defensive during deductions to make sure the
2574 * impcount for the port doesn't underflow zero. This will
2575 * go away when the port boost addition is made atomic (see
2576 * note in ipc_port_importance_delta()).
2577 * Conditions:
2578 * The port is referenced and locked.
2579 * Nothing else is locked.
2580 */
2581 mach_port_delta_t
ipc_port_impcount_delta(ipc_port_t port,mach_port_delta_t delta,ipc_port_t __unused base)2582 ipc_port_impcount_delta(
2583 ipc_port_t port,
2584 mach_port_delta_t delta,
2585 ipc_port_t __unused base)
2586 {
2587 mach_port_delta_t absdelta;
2588
2589 if (!ip_active(port)) {
2590 return 0;
2591 }
2592
2593 /* adding/doing nothing is easy */
2594 if (delta >= 0) {
2595 port->ip_impcount += delta;
2596 return delta;
2597 }
2598
2599 absdelta = 0 - delta;
2600 if (port->ip_impcount >= absdelta) {
2601 port->ip_impcount -= absdelta;
2602 return delta;
2603 }
2604
2605 #if (DEVELOPMENT || DEBUG)
2606 if (ip_in_a_space(port)) {
2607 task_t target_task = port->ip_receiver->is_task;
2608 ipc_importance_task_t target_imp = target_task->task_imp_base;
2609 const char *target_procname;
2610 int target_pid;
2611
2612 if (target_imp != IIT_NULL) {
2613 target_procname = target_imp->iit_procname;
2614 target_pid = target_imp->iit_bsd_pid;
2615 } else {
2616 target_procname = "unknown";
2617 target_pid = -1;
2618 }
2619 printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
2620 "dropping %d assertion(s) but port only has %d remaining.\n",
2621 ip_get_receiver_name(port),
2622 target_pid, target_procname,
2623 absdelta, port->ip_impcount);
2624 } else if (base != IP_NULL) {
2625 assert(ip_in_a_space(base));
2626 task_t target_task = base->ip_receiver->is_task;
2627 ipc_importance_task_t target_imp = target_task->task_imp_base;
2628 const char *target_procname;
2629 int target_pid;
2630
2631 if (target_imp != IIT_NULL) {
2632 target_procname = target_imp->iit_procname;
2633 target_pid = target_imp->iit_bsd_pid;
2634 } else {
2635 target_procname = "unknown";
2636 target_pid = -1;
2637 }
2638 printf("Over-release of importance assertions for port 0x%lx "
2639 "enqueued on port 0x%x with receiver pid %d (%s), "
2640 "dropping %d assertion(s) but port only has %d remaining.\n",
2641 (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port),
2642 ip_get_receiver_name(base),
2643 target_pid, target_procname,
2644 absdelta, port->ip_impcount);
2645 }
2646 #endif
2647
2648 delta = 0 - port->ip_impcount;
2649 port->ip_impcount = 0;
2650 return delta;
2651 }
2652
2653 /*
2654 * Routine: ipc_port_importance_delta_internal
2655 * Purpose:
2656 * Adjust the importance count through the given port.
2657 * If the port is in transit, apply the delta throughout
2658 * the chain. Determine if the there is a task at the
2659 * base of the chain that wants/needs to be adjusted,
2660 * and if so, apply the delta.
2661 * Conditions:
2662 * The port is referenced and locked on entry.
2663 * Importance may be locked.
2664 * Nothing else is locked.
2665 * The lock may be dropped on exit.
2666 * Returns TRUE if lock was dropped.
2667 */
2668 #if IMPORTANCE_INHERITANCE
2669
2670 boolean_t
ipc_port_importance_delta_internal(ipc_port_t port,natural_t options,mach_port_delta_t * deltap,ipc_importance_task_t * imp_task)2671 ipc_port_importance_delta_internal(
2672 ipc_port_t port,
2673 natural_t options,
2674 mach_port_delta_t *deltap,
2675 ipc_importance_task_t *imp_task)
2676 {
2677 ipc_port_t next, base;
2678 bool dropped = false;
2679 bool took_base_ref = false;
2680
2681 *imp_task = IIT_NULL;
2682
2683 if (*deltap == 0) {
2684 return FALSE;
2685 }
2686
2687 assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE);
2688
2689 base = port;
2690
2691 /* if port is in transit, have to search for end of chain */
2692 if (ip_in_transit(port)) {
2693 dropped = true;
2694
2695 ip_mq_unlock(port);
2696 ipc_port_multiple_lock(); /* massive serialization */
2697
2698 took_base_ref = ipc_port_destination_chain_lock(port, &base);
2699 /* all ports in chain from port to base, inclusive, are locked */
2700
2701 ipc_port_multiple_unlock();
2702 }
2703
2704 /*
2705 * If the port lock is dropped b/c the port is in transit, there is a
2706 * race window where another thread can drain messages and/or fire a
2707 * send possible notification before we get here.
2708 *
2709 * We solve this race by checking to see if our caller armed the send
2710 * possible notification, whether or not it's been fired yet, and
2711 * whether or not we've already set the port's ip_spimportant bit. If
2712 * we don't need a send-possible boost, then we'll just apply a
2713 * harmless 0-boost to the port.
2714 */
2715 if (options & IPID_OPTION_SENDPOSSIBLE) {
2716 assert(*deltap == 1);
2717 if (port->ip_sprequests && port->ip_spimportant == 0) {
2718 port->ip_spimportant = 1;
2719 } else {
2720 *deltap = 0;
2721 }
2722 }
2723
2724 /* unlock down to the base, adjusting boost(s) at each level */
2725 for (;;) {
2726 *deltap = ipc_port_impcount_delta(port, *deltap, base);
2727
2728 if (port == base) {
2729 break;
2730 }
2731
2732 /* port is in transit */
2733 assert(port->ip_tempowner == 0);
2734 assert(ip_in_transit(port));
2735 next = ip_get_destination(port);
2736 ip_mq_unlock(port);
2737 port = next;
2738 }
2739
2740 /* find the task (if any) to boost according to the base */
2741 if (ip_active(base)) {
2742 if (base->ip_tempowner != 0) {
2743 if (IIT_NULL != ip_get_imp_task(base)) {
2744 *imp_task = ip_get_imp_task(base);
2745 }
2746 /* otherwise don't boost */
2747 } else if (ip_in_a_space(base)) {
2748 ipc_space_t space = ip_get_receiver(base);
2749
2750 /* only spaces with boost-accepting tasks */
2751 if (space->is_task != TASK_NULL &&
2752 ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
2753 *imp_task = space->is_task->task_imp_base;
2754 }
2755 }
2756 }
2757
2758 /*
2759 * Only the base is locked. If we have to hold or drop task
2760 * importance assertions, we'll have to drop that lock as well.
2761 */
2762 if (*imp_task != IIT_NULL) {
2763 /* take a reference before unlocking base */
2764 ipc_importance_task_reference(*imp_task);
2765 }
2766
2767 if (dropped) {
2768 ip_mq_unlock(base);
2769 if (took_base_ref) {
2770 /* importance lock might be held */
2771 ip_release_safe(base);
2772 }
2773 }
2774
2775 return dropped;
2776 }
2777 #endif /* IMPORTANCE_INHERITANCE */
2778
2779 /*
2780 * Routine: ipc_port_importance_delta
2781 * Purpose:
2782 * Adjust the importance count through the given port.
2783 * If the port is in transit, apply the delta throughout
2784 * the chain.
2785 *
2786 * If there is a task at the base of the chain that wants/needs
2787 * to be adjusted, apply the delta.
2788 * Conditions:
2789 * The port is referenced and locked on entry.
2790 * Nothing else is locked.
2791 * The lock may be dropped on exit.
2792 * Returns TRUE if lock was dropped.
2793 */
2794 #if IMPORTANCE_INHERITANCE
2795
2796 boolean_t
ipc_port_importance_delta(ipc_port_t port,natural_t options,mach_port_delta_t delta)2797 ipc_port_importance_delta(
2798 ipc_port_t port,
2799 natural_t options,
2800 mach_port_delta_t delta)
2801 {
2802 ipc_importance_task_t imp_task = IIT_NULL;
2803 boolean_t dropped;
2804
2805 dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task);
2806
2807 if (IIT_NULL == imp_task || delta == 0) {
2808 if (imp_task) {
2809 ipc_importance_task_release(imp_task);
2810 }
2811 return dropped;
2812 }
2813
2814 if (!dropped) {
2815 ip_mq_unlock(port);
2816 }
2817
2818 assert(ipc_importance_task_is_any_receiver_type(imp_task));
2819
2820 if (delta > 0) {
2821 ipc_importance_task_hold_internal_assertion(imp_task, delta);
2822 } else {
2823 ipc_importance_task_drop_internal_assertion(imp_task, -delta);
2824 }
2825
2826 ipc_importance_task_release(imp_task);
2827 return TRUE;
2828 }
2829 #endif /* IMPORTANCE_INHERITANCE */
2830
2831 ipc_port_t
ipc_port_make_send_any_locked(ipc_port_t port)2832 ipc_port_make_send_any_locked(
2833 ipc_port_t port)
2834 {
2835 require_ip_active(port);
2836 port->ip_mscount++;
2837 ip_srights_inc(port);
2838 ip_reference(port);
2839 return port;
2840 }
2841
2842 ipc_port_t
ipc_port_make_send_any(ipc_port_t port)2843 ipc_port_make_send_any(
2844 ipc_port_t port)
2845 {
2846 ipc_port_t sright = port;
2847
2848 if (IP_VALID(port)) {
2849 ip_mq_lock(port);
2850 if (ip_active(port)) {
2851 ipc_port_make_send_any_locked(port);
2852 } else {
2853 sright = IP_DEAD;
2854 }
2855 ip_mq_unlock(port);
2856 }
2857
2858 return sright;
2859 }
2860
2861 ipc_port_t
ipc_port_make_send_mqueue(ipc_port_t port)2862 ipc_port_make_send_mqueue(
2863 ipc_port_t port)
2864 {
2865 ipc_port_t sright = port;
2866
2867 if (IP_VALID(port)) {
2868 ip_mq_lock(port);
2869 if (__improbable(!ip_active(port))) {
2870 sright = IP_DEAD;
2871 } else if (__improbable(ip_is_kobject(port))) {
2872 sright = IP_NULL;
2873 } else {
2874 ipc_port_make_send_any_locked(port);
2875 }
2876 ip_mq_unlock(port);
2877 }
2878
2879 return sright;
2880 }
2881
2882 void
ipc_port_copy_send_any_locked(ipc_port_t port)2883 ipc_port_copy_send_any_locked(
2884 ipc_port_t port)
2885 {
2886 assert(port->ip_srights > 0);
2887 ip_srights_inc(port);
2888 ip_reference(port);
2889 }
2890
2891 ipc_port_t
ipc_port_copy_send_any(ipc_port_t port)2892 ipc_port_copy_send_any(
2893 ipc_port_t port)
2894 {
2895 ipc_port_t sright = port;
2896
2897 if (IP_VALID(port)) {
2898 ip_mq_lock(port);
2899 if (ip_active(port)) {
2900 ipc_port_copy_send_any_locked(port);
2901 } else {
2902 sright = IP_DEAD;
2903 }
2904 ip_mq_unlock(port);
2905 }
2906
2907 return sright;
2908 }
2909
2910 ipc_port_t
ipc_port_copy_send_mqueue(ipc_port_t port)2911 ipc_port_copy_send_mqueue(
2912 ipc_port_t port)
2913 {
2914 ipc_port_t sright = port;
2915
2916 if (IP_VALID(port)) {
2917 ip_mq_lock(port);
2918 if (__improbable(!ip_active(port))) {
2919 sright = IP_DEAD;
2920 } else if (__improbable(ip_is_kobject(port))) {
2921 sright = IP_NULL;
2922 } else {
2923 ipc_port_copy_send_any_locked(port);
2924 }
2925 ip_mq_unlock(port);
2926 }
2927
2928 return sright;
2929 }
2930
2931 /*
2932 * Routine: ipc_port_copyout_send
2933 * Purpose:
2934 * Copyout a naked send right (possibly null/dead),
2935 * or if that fails, destroy the right.
2936 * Conditions:
2937 * Nothing locked.
2938 */
2939
2940 static mach_port_name_t
ipc_port_copyout_send_internal(ipc_port_t sright,ipc_space_t space,ipc_object_copyout_flags_t flags)2941 ipc_port_copyout_send_internal(
2942 ipc_port_t sright,
2943 ipc_space_t space,
2944 ipc_object_copyout_flags_t flags)
2945 {
2946 mach_port_name_t name;
2947
2948 if (IP_VALID(sright)) {
2949 kern_return_t kr;
2950
2951 kr = ipc_object_copyout(space, sright, MACH_MSG_TYPE_PORT_SEND,
2952 flags, NULL, &name);
2953 if (kr != KERN_SUCCESS) {
2954 if (kr == KERN_INVALID_CAPABILITY) {
2955 name = MACH_PORT_DEAD;
2956 } else {
2957 name = MACH_PORT_NULL;
2958 }
2959 }
2960 } else {
2961 name = CAST_MACH_PORT_TO_NAME(sright);
2962 }
2963
2964 return name;
2965 }
2966
2967 mach_port_name_t
ipc_port_copyout_send(ipc_port_t sright,ipc_space_t space)2968 ipc_port_copyout_send(
2969 ipc_port_t sright, /* can be invalid */
2970 ipc_space_t space)
2971 {
2972 return ipc_port_copyout_send_internal(sright, space,
2973 IPC_OBJECT_COPYOUT_FLAGS_NONE);
2974 }
2975
2976 /* Used by pthread kext to copyout thread port only */
2977 mach_port_name_t
ipc_port_copyout_send_pinned(ipc_port_t sright,ipc_space_t space)2978 ipc_port_copyout_send_pinned(
2979 ipc_port_t sright, /* can be invalid */
2980 ipc_space_t space)
2981 {
2982 return ipc_port_copyout_send_internal(sright, space,
2983 IPC_OBJECT_COPYOUT_FLAGS_PINNED);
2984 }
2985
2986 /*
2987 * Routine: ipc_port_release_send_and_unlock
2988 * Purpose:
2989 * Release a naked send right.
2990 * Consumes a ref for the port.
2991 * Conditions:
2992 * Port is valid and locked on entry
2993 * Port is unlocked on exit.
2994 */
2995 void
ipc_port_release_send_and_unlock(ipc_port_t port)2996 ipc_port_release_send_and_unlock(
2997 ipc_port_t port)
2998 {
2999 ipc_notify_nsenders_t nsrequest = { };
3000
3001 ip_srights_dec(port);
3002
3003 if (ip_active(port) && port->ip_srights == 0) {
3004 nsrequest = ipc_notify_no_senders_prepare(port);
3005 }
3006
3007 ip_mq_unlock(port);
3008 ip_release(port);
3009
3010 ipc_notify_no_senders_emit(nsrequest);
3011 }
3012
3013 /*
3014 * Routine: ipc_port_release_send
3015 * Purpose:
3016 * Release a naked send right.
3017 * Consumes a ref for the port.
3018 * Conditions:
3019 * Nothing locked.
3020 */
3021
3022 __attribute__((flatten, noinline))
3023 void
ipc_port_release_send(ipc_port_t port)3024 ipc_port_release_send(
3025 ipc_port_t port)
3026 {
3027 if (IP_VALID(port)) {
3028 ip_mq_lock(port);
3029 ipc_port_release_send_and_unlock(port);
3030 }
3031 }
3032
3033 /*
3034 * Routine: ipc_port_make_sonce_locked
3035 * Purpose:
3036 * Make a naked send-once right from a receive right.
3037 * Conditions:
3038 * The port is locked and active.
3039 */
3040
3041 ipc_port_t
ipc_port_make_sonce_locked(ipc_port_t port)3042 ipc_port_make_sonce_locked(
3043 ipc_port_t port)
3044 {
3045 require_ip_active(port);
3046 ip_sorights_inc(port);
3047 ip_reference(port);
3048 return port;
3049 }
3050
3051 /*
3052 * Routine: ipc_port_make_sonce
3053 * Purpose:
3054 * Make a naked send-once right from a receive right.
3055 * Conditions:
3056 * The port is not locked.
3057 */
3058
3059 ipc_port_t
ipc_port_make_sonce(ipc_port_t port)3060 ipc_port_make_sonce(
3061 ipc_port_t port)
3062 {
3063 if (!IP_VALID(port)) {
3064 return port;
3065 }
3066
3067 ip_mq_lock(port);
3068 if (ip_active(port)) {
3069 ipc_port_make_sonce_locked(port);
3070 ip_mq_unlock(port);
3071 return port;
3072 }
3073 ip_mq_unlock(port);
3074 return IP_DEAD;
3075 }
3076
3077 /*
3078 * Routine: ipc_port_release_sonce
3079 * Purpose:
3080 * Release a naked send-once right.
3081 * Consumes a ref for the port.
3082 *
3083 * In normal situations, this is never used.
3084 * Send-once rights are only consumed when
3085 * a message (possibly a send-once notification)
3086 * is sent to them.
3087 * Conditions:
3088 * The port is locked, possibly a space too.
3089 */
3090 void
ipc_port_release_sonce_and_unlock(ipc_port_t port)3091 ipc_port_release_sonce_and_unlock(
3092 ipc_port_t port)
3093 {
3094 ip_mq_lock_held(port);
3095
3096 ip_sorights_dec(port);
3097
3098 if (ip_is_special_reply_port(port)) {
3099 ipc_port_adjust_special_reply_port_locked(port, NULL,
3100 IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN, FALSE);
3101 } else {
3102 ip_mq_unlock(port);
3103 }
3104
3105 ip_release(port);
3106 }
3107
3108 /*
3109 * Routine: ipc_port_release_sonce
3110 * Purpose:
3111 * Release a naked send-once right.
3112 * Consumes a ref for the port.
3113 *
3114 * In normal situations, this is never used.
3115 * Send-once rights are only consumed when
3116 * a message (possibly a send-once notification)
3117 * is sent to them.
3118 * Conditions:
3119 * Nothing locked except possibly a space.
3120 */
3121 void
ipc_port_release_sonce(ipc_port_t port)3122 ipc_port_release_sonce(
3123 ipc_port_t port)
3124 {
3125 if (IP_VALID(port)) {
3126 ip_mq_lock(port);
3127 ipc_port_release_sonce_and_unlock(port);
3128 }
3129 }
3130
3131 /*
3132 * Routine: ipc_port_release_receive
3133 * Purpose:
3134 * Release a naked (in limbo or in transit) receive right.
3135 * Consumes a ref for the port; destroys the port.
3136 * Conditions:
3137 * Nothing locked.
3138 */
3139
3140 void
ipc_port_release_receive(ipc_port_t port)3141 ipc_port_release_receive(
3142 ipc_port_t port)
3143 {
3144 ipc_port_t dest;
3145
3146 if (!IP_VALID(port)) {
3147 return;
3148 }
3149
3150 ip_mq_lock(port);
3151
3152 ipc_release_assert(ip_is_moving(port));
3153 dest = ip_get_destination(port);
3154
3155 ipc_port_destroy(port); /* consumes ref, unlocks */
3156
3157 if (dest != IP_NULL) {
3158 ipc_port_send_turnstile_complete(dest);
3159 ip_release(dest);
3160 }
3161 }
3162
3163 /*
3164 * Routine: ipc_port_alloc_special
3165 * Purpose:
3166 * Allocate a port in a special space.
3167 * The new port is returned with one ref and locked.
3168 * If unsuccessful, IP_NULL is returned.
3169 * Conditions:
3170 * Nothing locked.
3171 */
3172
3173 ipc_port_t
ipc_port_alloc_special(ipc_space_t space,ipc_object_label_t label,ipc_port_init_flags_t flags)3174 ipc_port_alloc_special(
3175 ipc_space_t space,
3176 ipc_object_label_t label,
3177 ipc_port_init_flags_t flags)
3178 {
3179 ipc_port_t port;
3180
3181 port = ip_alloc();
3182 ipc_port_init(port, space, label, flags, MACH_PORT_SPECIAL_DEFAULT);
3183 return port;
3184 }
3185
3186 /*
3187 * Routine: ipc_port_free
3188 * Purpose:
3189 * Called on last reference deallocate to
3190 * free any remaining data associated with the
3191 * port.
3192 * Conditions:
3193 * Nothing locked.
3194 */
3195 void
ipc_port_free(ipc_port_t port)3196 ipc_port_free(
3197 ipc_port_t port)
3198 {
3199 ipc_port_request_table_t requests = port->ip_requests;
3200
3201 assert(port_send_turnstile(port) == TURNSTILE_NULL);
3202
3203 if (waitq_type(&port->ip_waitq) == WQT_PORT) {
3204 assert(ipc_port_rcv_turnstile(port) == TURNSTILE_NULL);
3205 }
3206
3207 ipc_release_assert(!ip_active(port));
3208
3209 if (requests) {
3210 port->ip_requests = NULL;
3211 ipc_port_request_table_free_noclear(requests);
3212 }
3213
3214 waitq_deinit(&port->ip_waitq);
3215 #if MACH_ASSERT
3216 if (port->ip_made_bt) {
3217 btref_put(port->ip_made_bt);
3218 }
3219 #endif
3220 ip_free(port);
3221 }
3222
3223 /*
3224 * Routine: kdp_mqueue_send_find_owner
3225 * Purpose:
3226 * Discover the owner of the ipc object that contains the input
3227 * waitq object. The thread blocked on the waitq should be
3228 * waiting for an IPC_MQUEUE_FULL event.
3229 * Conditions:
3230 * The 'waitinfo->wait_type' value should already be set to
3231 * kThreadWaitPortSend.
3232 * Note:
3233 * If we find out that the containing port is actually in
3234 * transit, we reset the wait_type field to reflect this.
3235 */
3236 void
kdp_mqueue_send_find_owner(struct waitq * waitq,__assert_only event64_t event,thread_waitinfo_v2_t * waitinfo,struct ipc_service_port_label ** isplp)3237 kdp_mqueue_send_find_owner(
3238 struct waitq *waitq,
3239 __assert_only event64_t event,
3240 thread_waitinfo_v2_t *waitinfo,
3241 struct ipc_service_port_label **isplp)
3242 {
3243 struct turnstile *turnstile;
3244 assert(waitinfo->wait_type == kThreadWaitPortSend);
3245 assert(event == IPC_MQUEUE_FULL);
3246 assert(waitq_type(waitq) == WQT_TURNSTILE);
3247
3248 turnstile = waitq_to_turnstile(waitq);
3249 ipc_port_t port = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */
3250
3251 ip_validate(port);
3252
3253 waitinfo->owner = 0;
3254 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
3255 if (ip_mq_lock_held_kdp(port)) {
3256 /*
3257 * someone has the port locked: it may be in an
3258 * inconsistent state: bail
3259 */
3260 waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
3261 return;
3262 }
3263
3264 /* now we are the only one accessing the port */
3265 if (ip_active(port)) {
3266 if (port->ip_tempowner) {
3267 ipc_importance_task_t imp_task = ip_get_imp_task(port);
3268 if (imp_task != IIT_NULL && imp_task->iit_task != NULL) {
3269 /* port is held by a tempowner */
3270 waitinfo->owner = pid_from_task(port->ip_imp_task->iit_task);
3271 } else {
3272 waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
3273 }
3274 } else if (ip_in_a_space(port)) { /* no port lock needed */
3275 ipc_space_t space = port->ip_receiver;
3276
3277 if (space == ipc_space_kernel) { /* access union field as ip_receiver */
3278 /*
3279 * The kernel pid is 0, make this
3280 * distinguishable from no-owner and
3281 * inconsistent port state.
3282 */
3283 waitinfo->owner = STACKSHOT_WAITOWNER_KERNEL;
3284 } else {
3285 waitinfo->owner = pid_from_task(space->is_task);
3286 }
3287 } else if (ip_in_transit(port)) { /* access union field as ip_destination */
3288 waitinfo->wait_type = kThreadWaitPortSendInTransit;
3289 waitinfo->owner = VM_KERNEL_UNSLIDE_OR_PERM(port->ip_destination);
3290 }
3291 if (ip_is_any_service_port(port) ||
3292 ip_is_bootstrap_port(port)) {
3293 *isplp = ip_label_peek_kdp(port).iol_service;
3294 }
3295 }
3296 }
3297
3298 /*
3299 * Routine: kdp_mqueue_recv_find_owner
3300 * Purpose:
3301 * Discover the "owner" of the ipc object that contains the input
3302 * waitq object. The thread blocked on the waitq is trying to
3303 * receive on the mqueue.
3304 * Conditions:
3305 * The 'waitinfo->wait_type' value should already be set to
3306 * kThreadWaitPortReceive.
3307 * Note:
3308 * If we find that we are actualy waiting on a port set, we reset
3309 * the wait_type field to reflect this.
3310 */
3311 void
kdp_mqueue_recv_find_owner(struct waitq * waitq,__assert_only event64_t event,thread_waitinfo_v2_t * waitinfo,struct ipc_service_port_label ** isplp)3312 kdp_mqueue_recv_find_owner(
3313 struct waitq *waitq,
3314 __assert_only event64_t event,
3315 thread_waitinfo_v2_t *waitinfo,
3316 struct ipc_service_port_label **isplp)
3317 {
3318 assert(waitinfo->wait_type == kThreadWaitPortReceive);
3319 assert(event == IPC_MQUEUE_RECEIVE);
3320
3321 waitinfo->owner = 0;
3322
3323 if (waitq_type(waitq) == WQT_PORT_SET) {
3324 ipc_pset_t set = ips_from_waitq(waitq);
3325
3326 ips_validate(set);
3327
3328 /* Reset wait type to specify waiting on port set receive */
3329 waitinfo->wait_type = kThreadWaitPortSetReceive;
3330 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(set);
3331 if (ips_mq_lock_held_kdp(set)) {
3332 waitinfo->owner = STACKSHOT_WAITOWNER_PSET_LOCKED;
3333 }
3334 /* There is no specific owner "at the other end" of a port set, so leave unset. */
3335 } else if (waitq_type(waitq) == WQT_PORT) {
3336 ipc_port_t port = ip_from_waitq(waitq);
3337
3338 ip_validate(port);
3339
3340 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
3341 if (ip_mq_lock_held_kdp(port)) {
3342 waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
3343 return;
3344 }
3345
3346 if (ip_active(port)) {
3347 if (ip_in_a_space(port)) { /* no port lock needed */
3348 waitinfo->owner = port->ip_receiver_name;
3349 } else {
3350 waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
3351 }
3352 if (ip_is_special_reply_port(port)) {
3353 waitinfo->wait_flags |= STACKSHOT_WAITINFO_FLAGS_SPECIALREPLY;
3354 } else if (ip_is_any_service_port(port) ||
3355 ip_is_bootstrap_port(port)) {
3356 *isplp = ip_label_peek_kdp(port).iol_service;
3357 }
3358 }
3359 }
3360 }
3361
3362 kern_return_t
ipc_port_reset_thread_attr(ipc_port_t port)3363 ipc_port_reset_thread_attr(
3364 ipc_port_t port)
3365 {
3366 uint8_t iotier = THROTTLE_LEVEL_END;
3367 uint8_t qos = THREAD_QOS_UNSPECIFIED;
3368
3369 return ipc_port_update_qos_n_iotier(port, qos, iotier);
3370 }
3371
3372 kern_return_t
ipc_port_propagate_thread_attr(ipc_port_t port,struct thread_attr_for_ipc_propagation attr)3373 ipc_port_propagate_thread_attr(
3374 ipc_port_t port,
3375 struct thread_attr_for_ipc_propagation attr)
3376 {
3377 uint8_t iotier = attr.tafip_iotier;
3378 uint8_t qos = attr.tafip_qos;
3379
3380 return ipc_port_update_qos_n_iotier(port, qos, iotier);
3381 }
3382
3383 static kern_return_t
ipc_port_update_qos_n_iotier(ipc_port_t port,uint8_t qos,uint8_t iotier)3384 ipc_port_update_qos_n_iotier(
3385 ipc_port_t port,
3386 uint8_t qos,
3387 uint8_t iotier)
3388 {
3389 if (port == IPC_PORT_NULL) {
3390 return KERN_INVALID_ARGUMENT;
3391 }
3392
3393 ip_mq_lock(port);
3394
3395 if (!ip_active(port)) {
3396 ip_mq_unlock(port);
3397 return KERN_TERMINATED;
3398 }
3399
3400 if (ip_is_special_reply_port(port)) {
3401 ip_mq_unlock(port);
3402 return KERN_INVALID_ARGUMENT;
3403 }
3404
3405 port->ip_kernel_iotier_override = iotier;
3406 port->ip_kernel_qos_override = qos;
3407
3408 if (ip_in_a_space(port) &&
3409 is_active(ip_get_receiver(port)) &&
3410 ipc_port_has_klist(port)) {
3411 KNOTE(&port->ip_klist, 0);
3412 }
3413
3414 ip_mq_unlock(port);
3415 return KERN_SUCCESS;
3416 }
3417
3418 #if MACH_ASSERT
3419 #include <kern/machine.h>
3420
3421 unsigned long port_count = 0;
3422 unsigned long port_count_warning = 20000;
3423 unsigned long port_timestamp = 0;
3424
3425 void db_port_stack_trace(
3426 ipc_port_t port);
3427 void db_ref(
3428 int refs);
3429 int db_port_walk(
3430 unsigned int verbose,
3431 unsigned int display,
3432 unsigned int ref_search,
3433 unsigned int ref_target);
3434
3435 #ifdef MACH_BSD
3436 extern int proc_pid(struct proc*);
3437 #endif /* MACH_BSD */
3438
3439 /*
3440 * Initialize all of the debugging state in a port.
3441 * Insert the port into a global list of all allocated ports.
3442 */
3443 void
ipc_port_init_debug(ipc_port_t port,void * fp)3444 ipc_port_init_debug(ipc_port_t port, void *fp)
3445 {
3446 port->ip_timetrack = port_timestamp++;
3447
3448 if (ipc_portbt) {
3449 port->ip_made_bt = btref_get(fp, 0);
3450 }
3451
3452 #ifdef MACH_BSD
3453 task_t task = current_task_early();
3454 if (task != TASK_NULL) {
3455 struct proc *proc = get_bsdtask_info(task);
3456 if (proc) {
3457 port->ip_made_pid = proc_pid(proc);
3458 }
3459 }
3460 #endif /* MACH_BSD */
3461 }
3462
3463 #endif /* MACH_ASSERT */
3464