1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64 /*
65 * File: ipc/ipc_port.c
66 * Author: Rich Draves
67 * Date: 1989
68 *
69 * Functions to manipulate IPC ports.
70 */
71
72 #include <mach_assert.h>
73
74 #include <mach/port.h>
75 #include <mach/kern_return.h>
76 #include <kern/backtrace.h>
77 #include <kern/debug.h>
78 #include <kern/ipc_kobject.h>
79 #include <kern/kcdata.h>
80 #include <kern/misc_protos.h>
81 #include <kern/policy_internal.h>
82 #include <kern/thread.h>
83 #include <kern/waitq.h>
84 #include <kern/host_notify.h>
85 #include <ipc/ipc_entry.h>
86 #include <ipc/ipc_space.h>
87 #include <ipc/ipc_object.h>
88 #include <ipc/ipc_right.h>
89 #include <ipc/ipc_port.h>
90 #include <ipc/ipc_pset.h>
91 #include <ipc/ipc_kmsg.h>
92 #include <ipc/ipc_mqueue.h>
93 #include <ipc/ipc_notify.h>
94 #include <ipc/ipc_importance.h>
95 #include <machine/limits.h>
96 #include <kern/task.h>
97 #include <kern/turnstile.h>
98 #include <kern/machine.h>
99
100 #include <security/mac_mach_internal.h>
101 #include <ipc/ipc_service_port.h>
102
103 #include <string.h>
104
105 typedef struct proc *proc_t;
106 extern boolean_t proc_is_simulated(const proc_t p);
107 extern struct proc *current_proc(void);
108
109 static TUNABLE(bool, prioritize_launch, "prioritize_launch", true);
110 TUNABLE_WRITEABLE(int, ipc_portbt, "ipc_portbt", false);
111
112 extern zone_t ipc_kobject_label_zone;
113
114 LCK_SPIN_DECLARE_ATTR(ipc_port_multiple_lock_data, &ipc_lck_grp, &ipc_lck_attr);
115 ipc_port_timestamp_t ipc_port_timestamp_data;
116
117 KALLOC_ARRAY_TYPE_DEFINE(ipc_port_request_table,
118 struct ipc_port_request, KT_DEFAULT);
119
120 #if MACH_ASSERT
121 static void ipc_port_init_debug(ipc_port_t, void *fp);
122 #endif /* MACH_ASSERT */
123
124 void __abortlike
__ipc_port_inactive_panic(ipc_port_t port)125 __ipc_port_inactive_panic(ipc_port_t port)
126 {
127 panic("Using inactive port %p", port);
128 }
129
130 static __abortlike void
__ipc_port_translate_receive_panic(ipc_space_t space,ipc_port_t port)131 __ipc_port_translate_receive_panic(ipc_space_t space, ipc_port_t port)
132 {
133 panic("found receive right in space %p for port %p owned by space %p",
134 space, port, ip_get_receiver(port));
135 }
136
137 __abortlike void
__ipc_right_delta_overflow_panic(ipc_port_t port,natural_t * field,int delta)138 __ipc_right_delta_overflow_panic(ipc_port_t port, natural_t *field, int delta)
139 {
140 const char *what;
141 if (field == &port->ip_srights) {
142 what = "send right";
143 } else {
144 what = "send-once right";
145 }
146 panic("port %p %s count overflow (delta: %d)", port, what, delta);
147 }
148
149 static void
150 ipc_port_send_turnstile_recompute_push_locked(
151 ipc_port_t port);
152
153 static thread_t
154 ipc_port_get_watchport_inheritor(
155 ipc_port_t port);
156
157 static kern_return_t
158 ipc_port_update_qos_n_iotier(
159 ipc_port_t port,
160 uint8_t qos,
161 uint8_t iotier);
162
163 void
ipc_port_release(ipc_port_t port)164 ipc_port_release(ipc_port_t port)
165 {
166 ip_release(port);
167 }
168
169 void
ipc_port_reference(ipc_port_t port)170 ipc_port_reference(ipc_port_t port)
171 {
172 ip_validate(port);
173 ip_reference(port);
174 }
175
176 /*
177 * Routine: ipc_port_timestamp
178 * Purpose:
179 * Retrieve a timestamp value.
180 */
181
182 ipc_port_timestamp_t
ipc_port_timestamp(void)183 ipc_port_timestamp(void)
184 {
185 return OSIncrementAtomic(&ipc_port_timestamp_data);
186 }
187
188
189 /*
190 * Routine: ipc_port_translate_send
191 * Purpose:
192 * Look up a send right in a space.
193 * Conditions:
194 * Nothing locked before. If successful, the object
195 * is returned active and locked. The caller doesn't get a ref.
196 * Returns:
197 * KERN_SUCCESS Object returned locked.
198 * KERN_INVALID_TASK The space is dead.
199 * KERN_INVALID_NAME The name doesn't denote a right
200 * KERN_INVALID_RIGHT Name doesn't denote the correct right
201 */
202 kern_return_t
ipc_port_translate_send(ipc_space_t space,mach_port_name_t name,ipc_port_t * portp)203 ipc_port_translate_send(
204 ipc_space_t space,
205 mach_port_name_t name,
206 ipc_port_t *portp)
207 {
208 ipc_port_t port = IP_NULL;
209 ipc_object_t object;
210 kern_return_t kr;
211
212 kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_SEND, &object);
213 if (kr == KERN_SUCCESS) {
214 port = ip_object_to_port(object);
215 }
216 *portp = port;
217 return kr;
218 }
219
220
221 /*
222 * Routine: ipc_port_translate_receive
223 * Purpose:
224 * Look up a receive right in a space.
225 * Performs some minimal security checks against tampering.
226 * Conditions:
227 * Nothing locked before. If successful, the object
228 * is returned active and locked. The caller doesn't get a ref.
229 * Returns:
230 * KERN_SUCCESS Object returned locked.
231 * KERN_INVALID_TASK The space is dead.
232 * KERN_INVALID_NAME The name doesn't denote a right
233 * KERN_INVALID_RIGHT Name doesn't denote the correct right
234 */
235 kern_return_t
ipc_port_translate_receive(ipc_space_t space,mach_port_name_t name,ipc_port_t * portp)236 ipc_port_translate_receive(
237 ipc_space_t space,
238 mach_port_name_t name,
239 ipc_port_t *portp)
240 {
241 ipc_port_t port = IP_NULL;
242 ipc_object_t object;
243 kern_return_t kr;
244
245 kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_RECEIVE, &object);
246 if (kr == KERN_SUCCESS) {
247 /* object is locked */
248 port = ip_object_to_port(object);
249 if (!ip_in_space(port, space)) {
250 __ipc_port_translate_receive_panic(space, port);
251 }
252 }
253 *portp = port;
254 return kr;
255 }
256
257
258 /*
259 * Routine: ipc_port_request_alloc
260 * Purpose:
261 * Try to allocate a request slot.
262 * If successful, returns the request index.
263 * Otherwise returns zero.
264 * Conditions:
265 * The port is locked and active.
266 * Returns:
267 * KERN_SUCCESS A request index was found.
268 * KERN_NO_SPACE No index allocated.
269 */
270
271 kern_return_t
ipc_port_request_alloc(ipc_port_t port,mach_port_name_t name,ipc_port_t soright,ipc_port_request_opts_t options,ipc_port_request_index_t * indexp)272 ipc_port_request_alloc(
273 ipc_port_t port,
274 mach_port_name_t name,
275 ipc_port_t soright,
276 ipc_port_request_opts_t options,
277 ipc_port_request_index_t *indexp)
278 {
279 ipc_port_request_table_t table;
280 ipc_port_request_index_t index;
281 ipc_port_request_t ipr, base;
282
283 require_ip_active(port);
284 assert(name != MACH_PORT_NULL);
285 assert(soright != IP_NULL);
286
287 table = port->ip_requests;
288 if (table == NULL) {
289 return KERN_NO_SPACE;
290 }
291
292 base = ipc_port_request_table_base(table);
293 index = base->ipr_next;
294 if (index == 0) {
295 return KERN_NO_SPACE;
296 }
297
298 ipr = ipc_port_request_table_get(table, index);
299 assert(ipr->ipr_soright == IP_NULL);
300
301 base->ipr_next = ipr->ipr_next;
302 ipr->ipr_name = name;
303 ipr->ipr_soright = IPR_SOR_MAKE(soright, options);
304
305 if (options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK) &&
306 port->ip_sprequests == 0) {
307 port->ip_sprequests = 1;
308 }
309
310 *indexp = index;
311
312 return KERN_SUCCESS;
313 }
314
315
316 /*
317 * Routine: ipc_port_request_hnotify_alloc
318 * Purpose:
319 * Try to allocate a request slot.
320 * If successful, returns the request index.
321 * Otherwise returns zero.
322 * Conditions:
323 * The port is locked and active.
324 * Returns:
325 * KERN_SUCCESS A request index was found.
326 * KERN_NO_SPACE No index allocated.
327 */
328
329 kern_return_t
ipc_port_request_hnotify_alloc(ipc_port_t port,struct host_notify_entry * hnotify,ipc_port_request_index_t * indexp)330 ipc_port_request_hnotify_alloc(
331 ipc_port_t port,
332 struct host_notify_entry *hnotify,
333 ipc_port_request_index_t *indexp)
334 {
335 ipc_port_request_table_t table;
336 ipc_port_request_index_t index;
337 ipc_port_request_t ipr, base;
338
339 require_ip_active(port);
340
341 table = port->ip_requests;
342 if (table == NULL) {
343 return KERN_NO_SPACE;
344 }
345
346 base = ipc_port_request_table_base(table);
347 index = base->ipr_next;
348 if (index == 0) {
349 return KERN_NO_SPACE;
350 }
351
352 ipr = ipc_port_request_table_get(table, index);
353 assert(ipr->ipr_soright == IP_NULL);
354
355 base->ipr_next = ipr->ipr_next;
356 ipr->ipr_name = IPR_HOST_NOTIFY;
357 ipr->ipr_hnotify = hnotify;
358
359 *indexp = index;
360
361 return KERN_SUCCESS;
362 }
363
364 /*
365 * Routine: ipc_port_request_grow
366 * Purpose:
367 * Grow a port's table of requests.
368 * Conditions:
369 * The port must be locked and active.
370 * Nothing else locked; will allocate memory.
371 * Upon return the port is unlocked.
372 * Returns:
373 * KERN_SUCCESS Grew the table.
374 * KERN_SUCCESS Somebody else grew the table.
375 * KERN_SUCCESS The port died.
376 * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
377 * KERN_NO_SPACE Couldn't grow to desired size
378 */
379
380 kern_return_t
ipc_port_request_grow(ipc_port_t port)381 ipc_port_request_grow(
382 ipc_port_t port)
383 {
384 ipc_port_request_table_t otable, ntable;
385 uint32_t osize, nsize;
386 uint32_t ocount, ncount;
387
388 require_ip_active(port);
389
390 otable = port->ip_requests;
391 if (otable) {
392 osize = ipc_port_request_table_size(otable);
393 } else {
394 osize = 0;
395 }
396 nsize = ipc_port_request_table_next_size(2, osize, 16);
397 if (nsize > CONFIG_IPC_TABLE_REQUEST_SIZE_MAX) {
398 nsize = CONFIG_IPC_TABLE_REQUEST_SIZE_MAX;
399 }
400 if (nsize == osize) {
401 return KERN_RESOURCE_SHORTAGE;
402 }
403
404 ip_reference(port);
405 ip_mq_unlock(port);
406
407 ntable = ipc_port_request_table_alloc_by_size(nsize, Z_WAITOK | Z_ZERO);
408 if (ntable == NULL) {
409 ip_release(port);
410 return KERN_RESOURCE_SHORTAGE;
411 }
412
413 ip_mq_lock(port);
414
415 /*
416 * Check that port is still active and that nobody else
417 * has slipped in and grown the table on us. Note that
418 * just checking if the current table pointer == otable
419 * isn't sufficient; must check ipr_size.
420 */
421
422 ocount = ipc_port_request_table_size_to_count(osize);
423 ncount = ipc_port_request_table_size_to_count(nsize);
424
425 if (ip_active(port) && port->ip_requests == otable) {
426 ipc_port_request_index_t free, i;
427
428 /* copy old table to new table */
429
430 if (otable != NULL) {
431 memcpy(ipc_port_request_table_base(ntable),
432 ipc_port_request_table_base(otable),
433 osize);
434 } else {
435 ocount = 1;
436 free = 0;
437 }
438
439 /* add new elements to the new table's free list */
440
441 for (i = ocount; i < ncount; i++) {
442 ipc_port_request_table_get_nocheck(ntable, i)->ipr_next = free;
443 free = i;
444 }
445
446 ipc_port_request_table_base(ntable)->ipr_next = free;
447 port->ip_requests = ntable;
448 ip_mq_unlock(port);
449 ip_release(port);
450
451 if (otable != NULL) {
452 ipc_port_request_table_free(&otable);
453 }
454 } else {
455 ip_mq_unlock(port);
456 ip_release(port);
457 ipc_port_request_table_free(&ntable);
458 }
459
460 return KERN_SUCCESS;
461 }
462
463 /*
464 * Routine: ipc_port_request_sparm
465 * Purpose:
466 * Arm delayed send-possible request.
467 * Conditions:
468 * The port must be locked and active.
469 *
470 * Returns TRUE if the request was armed
471 * (or armed with importance in that version).
472 */
473
474 boolean_t
ipc_port_request_sparm(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index,mach_msg_option_t option,mach_msg_priority_t priority)475 ipc_port_request_sparm(
476 ipc_port_t port,
477 __assert_only mach_port_name_t name,
478 ipc_port_request_index_t index,
479 mach_msg_option_t option,
480 mach_msg_priority_t priority)
481 {
482 if (index != IE_REQ_NONE) {
483 ipc_port_request_table_t table;
484 ipc_port_request_t ipr;
485
486 require_ip_active(port);
487
488 table = port->ip_requests;
489 assert(table != NULL);
490
491 ipr = ipc_port_request_table_get(table, index);
492 assert(ipr->ipr_name == name);
493
494 /* Is there a valid destination? */
495 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
496 ipr->ipr_soright = IPR_SOR_MAKE(ipr->ipr_soright, IPR_SOR_SPARM_MASK);
497 port->ip_sprequests = 1;
498
499 if (option & MACH_SEND_OVERRIDE) {
500 /* apply override to message queue */
501 mach_msg_qos_t qos_ovr;
502 if (mach_msg_priority_is_pthread_priority(priority)) {
503 qos_ovr = _pthread_priority_thread_qos(priority);
504 } else {
505 qos_ovr = mach_msg_priority_overide_qos(priority);
506 }
507 if (qos_ovr) {
508 ipc_mqueue_override_send_locked(&port->ip_messages, qos_ovr);
509 }
510 }
511
512 #if IMPORTANCE_INHERITANCE
513 if (((option & MACH_SEND_NOIMPORTANCE) == 0) &&
514 (port->ip_impdonation != 0) &&
515 (port->ip_spimportant == 0) &&
516 (((option & MACH_SEND_IMPORTANCE) != 0) ||
517 (task_is_importance_donor(current_task())))) {
518 return TRUE;
519 }
520 #else
521 return TRUE;
522 #endif /* IMPORTANCE_INHERITANCE */
523 }
524 }
525 return FALSE;
526 }
527
528 /*
529 * Routine: ipc_port_request_type
530 * Purpose:
531 * Determine the type(s) of port requests enabled for a name.
532 * Conditions:
533 * The port must be locked or inactive (to avoid table growth).
534 * The index must not be IE_REQ_NONE and for the name in question.
535 */
536 mach_port_type_t
ipc_port_request_type(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index)537 ipc_port_request_type(
538 ipc_port_t port,
539 __assert_only mach_port_name_t name,
540 ipc_port_request_index_t index)
541 {
542 ipc_port_request_table_t table;
543 ipc_port_request_t ipr;
544 mach_port_type_t type = 0;
545
546 table = port->ip_requests;
547 assert(table != NULL);
548
549 assert(index != IE_REQ_NONE);
550 ipr = ipc_port_request_table_get(table, index);
551 assert(ipr->ipr_name == name);
552
553 if (IP_VALID(IPR_SOR_PORT(ipr->ipr_soright))) {
554 type |= MACH_PORT_TYPE_DNREQUEST;
555
556 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
557 type |= MACH_PORT_TYPE_SPREQUEST;
558
559 if (!IPR_SOR_SPARMED(ipr->ipr_soright)) {
560 type |= MACH_PORT_TYPE_SPREQUEST_DELAYED;
561 }
562 }
563 }
564 return type;
565 }
566
567 /*
568 * Routine: ipc_port_request_cancel
569 * Purpose:
570 * Cancel a dead-name/send-possible request and return the send-once right.
571 * Conditions:
572 * The port must be locked and active.
573 * The index must not be IPR_REQ_NONE and must correspond with name.
574 */
575
576 ipc_port_t
ipc_port_request_cancel(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index)577 ipc_port_request_cancel(
578 ipc_port_t port,
579 __assert_only mach_port_name_t name,
580 ipc_port_request_index_t index)
581 {
582 ipc_port_request_table_t table;
583 ipc_port_request_t base, ipr;
584 ipc_port_t request = IP_NULL;
585
586 require_ip_active(port);
587 table = port->ip_requests;
588 base = ipc_port_request_table_base(table);
589 assert(table != NULL);
590
591 assert(index != IE_REQ_NONE);
592 ipr = ipc_port_request_table_get(table, index);
593 assert(ipr->ipr_name == name);
594 request = IPR_SOR_PORT(ipr->ipr_soright);
595
596 /* return ipr to the free list inside the table */
597 ipr->ipr_next = base->ipr_next;
598 ipr->ipr_soright = IP_NULL;
599 base->ipr_next = index;
600
601 return request;
602 }
603
604
605 /*
606 * Routine: ipc_port_nsrequest
607 * Purpose:
608 * Make a no-senders request, returning the
609 * previously registered send-once right.
610 * Just cancels the previous request if notify is IP_NULL.
611 * Conditions:
612 * The port is locked and active. It is unlocked.
613 * Consumes a ref for notify (if non-null), and
614 * returns previous with a ref (if non-null).
615 */
616
617 void
ipc_port_nsrequest(ipc_port_t port,mach_port_mscount_t sync,ipc_port_t notify,ipc_port_t * previousp)618 ipc_port_nsrequest(
619 ipc_port_t port,
620 mach_port_mscount_t sync,
621 ipc_port_t notify,
622 ipc_port_t *previousp)
623 {
624 ipc_port_t previous;
625 mach_port_mscount_t mscount;
626 require_ip_active(port);
627
628 assert(!ip_in_space(port, ipc_space_kernel));
629 assert(port->ip_nsrequest != IP_KOBJECT_NSREQUEST_ARMED);
630
631 previous = port->ip_nsrequest;
632 mscount = port->ip_mscount;
633
634 if ((port->ip_srights == 0) && (sync <= mscount) &&
635 (notify != IP_NULL)) {
636 port->ip_nsrequest = IP_NULL;
637 ip_mq_unlock(port);
638 ipc_notify_no_senders(notify, mscount, /* kobject */ false);
639 } else {
640 port->ip_nsrequest = notify;
641 ip_mq_unlock(port);
642 }
643
644 *previousp = previous;
645 }
646
647
648 /*
649 * Routine: ipc_port_clear_receiver
650 * Purpose:
651 * Prepares a receive right for transmission/destruction,
652 * optionally performs mqueue destruction (with port lock held)
653 *
654 * Conditions:
655 * The port is locked and active.
656 * Returns:
657 * If should_destroy is TRUE, then the return value indicates
658 * whether the caller needs to reap kmsg structures that should
659 * be destroyed (by calling ipc_kmsg_reap_delayed)
660 *
661 * If should_destroy is FALSE, this always returns FALSE
662 */
663
664 boolean_t
ipc_port_clear_receiver(ipc_port_t port,boolean_t should_destroy,waitq_link_list_t * free_l)665 ipc_port_clear_receiver(
666 ipc_port_t port,
667 boolean_t should_destroy,
668 waitq_link_list_t *free_l)
669 {
670 ipc_mqueue_t mqueue = &port->ip_messages;
671 boolean_t reap_messages = FALSE;
672
673 /*
674 * Pull ourselves out of any sets to which we belong.
675 * We hold the write space lock or the receive entry has
676 * been deleted, so even though this acquires and releases
677 * the port lock, we know we won't be added to any other sets.
678 */
679 if (ip_in_pset(port)) {
680 waitq_unlink_all_locked(&port->ip_waitq, NULL, free_l);
681 assert(!ip_in_pset(port));
682 }
683
684 /*
685 * Send anyone waiting on the port's queue directly away.
686 * Also clear the mscount, seqno, guard bits
687 */
688 if (ip_in_a_space(port)) {
689 ipc_mqueue_changed(ip_get_receiver(port), &port->ip_waitq);
690 } else {
691 ipc_mqueue_changed(NULL, &port->ip_waitq);
692 }
693 port->ip_mscount = 0;
694 mqueue->imq_seqno = 0;
695 port->ip_context = port->ip_guarded = port->ip_strict_guard = 0;
696
697 /*
698 * clear the immovable bit so the port can move back to anyone listening
699 * for the port destroy notification.
700 */
701 port->ip_immovable_receive = 0;
702
703 if (should_destroy) {
704 /*
705 * Mark the port and mqueue invalid, preventing further send/receive
706 * operations from succeeding. It's important for this to be
707 * done under the same lock hold as the ipc_mqueue_changed
708 * call to avoid additional threads blocking on an mqueue
709 * that's being destroyed.
710 *
711 * The port active bit needs to be guarded under mqueue lock for
712 * turnstiles
713 */
714
715 /* port transitions to INACTIVE state */
716 io_bits_andnot(ip_to_object(port), IO_BITS_ACTIVE);
717 port->ip_receiver_name = MACH_PORT_NULL;
718 port->ip_timestamp = ipc_port_timestamp();
719
720 reap_messages = ipc_mqueue_destroy_locked(mqueue, free_l);
721 } else {
722 /* port transtions to IN-LIMBO state */
723 port->ip_receiver_name = MACH_PORT_NULL;
724 port->ip_destination = IP_NULL;
725 }
726
727 return reap_messages;
728 }
729
730 /*
731 * Routine: ipc_port_init
732 * Purpose:
733 * Initializes a newly-allocated port.
734 *
735 * The memory is expected to be zero initialized (allocated with Z_ZERO).
736 */
737
738 void
ipc_port_init(ipc_port_t port,ipc_space_t space,ipc_port_init_flags_t flags,mach_port_name_t name)739 ipc_port_init(
740 ipc_port_t port,
741 ipc_space_t space,
742 ipc_port_init_flags_t flags,
743 mach_port_name_t name)
744 {
745 int policy = SYNC_POLICY_FIFO;
746 task_t task = TASK_NULL;
747
748 /* the port has been 0 initialized when called */
749
750 if (flags & IPC_PORT_INIT_FILTER_MESSAGE) {
751 io_bits_or(ip_to_object(port), IP_BIT_FILTER_MSG);
752 }
753 if (flags & IPC_PORT_INIT_LOCKED) {
754 policy |= SYNC_POLICY_INIT_LOCKED;
755 }
756
757 /* must be done first, many ip_* bits live inside the waitq */
758 waitq_init(&port->ip_waitq, WQT_PORT, policy);
759 if (flags & IPC_PORT_INIT_TG_BLOCK_TRACKING) {
760 port->ip_tg_block_tracking = true;
761 }
762 if (flags & IPC_PORT_INIT_SPECIAL_REPLY) {
763 port->ip_specialreply = true;
764 }
765 if ((flags & IPC_PORT_INIT_REPLY) || (flags & IPC_PORT_INIT_SPECIAL_REPLY)) {
766 task = current_task_early();
767
768 /* Strict enforcement of reply port semantics are disabled for 3p - rdar://97441265. */
769 if (task && task_get_platform_binary(task)) {
770 port->ip_immovable_receive = true;
771 ip_mark_reply_port(port);
772 } else {
773 ip_mark_provisional_reply_port(port);
774 }
775 }
776 if (flags & IPC_PORT_ENFORCE_REPLY_PORT_SEMANTICS) {
777 ip_enforce_reply_port_semantics(port);
778 }
779 if (flags & IPC_PORT_ENFORCE_STRICT_REPLY_PORT_SEMANTICS) {
780 ip_enforce_strict_reply_port_semantics(port);
781 }
782 if (flags & IPC_PORT_INIT_PROVISIONAL_REPLY) {
783 ip_mark_provisional_reply_port(port);
784 }
785
786 port->ip_kernel_qos_override = THREAD_QOS_UNSPECIFIED;
787 port->ip_kernel_iotier_override = THROTTLE_LEVEL_END;
788
789 ipc_mqueue_init(&port->ip_messages);
790 #if MACH_ASSERT
791 ipc_port_init_debug(port, __builtin_frame_address(0));
792 #endif /* MACH_ASSERT */
793
794 /* port transitions to IN-SPACE state */
795 port->ip_receiver_name = name;
796 port->ip_receiver = space;
797
798 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
799 port->ip_srights = 1;
800 port->ip_mscount = 1;
801 }
802 }
803
804 /*
805 * Routine: ipc_port_alloc
806 * Purpose:
807 * Allocate a port.
808 * Conditions:
809 * Nothing locked. If successful, the port is returned
810 * locked. (The caller doesn't have a reference.)
811 * Returns:
812 * KERN_SUCCESS The port is allocated.
813 * KERN_INVALID_TASK The space is dead.
814 * KERN_NO_SPACE No room for an entry in the space.
815 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
816 */
817
818 kern_return_t
ipc_port_alloc(ipc_space_t space,ipc_port_init_flags_t flags,mach_port_name_t * namep,ipc_port_t * portp)819 ipc_port_alloc(
820 ipc_space_t space,
821 ipc_port_init_flags_t flags,
822 mach_port_name_t *namep,
823 ipc_port_t *portp)
824 {
825 ipc_port_t port;
826 mach_port_name_t name;
827 kern_return_t kr;
828 mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
829 mach_port_urefs_t urefs = 0;
830
831 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
832 type |= MACH_PORT_TYPE_SEND;
833 urefs = 1;
834 }
835 kr = ipc_object_alloc(space, IOT_PORT, type, urefs,
836 &name, (ipc_object_t *) &port);
837 if (kr != KERN_SUCCESS) {
838 return kr;
839 }
840
841 /* space is locked */
842 ipc_port_init(port, space, flags | IPC_PORT_INIT_LOCKED, name);
843 /* port is locked */
844 #if MACH_ASSERT
845 ipc_port_init_debug(port, __builtin_frame_address(0));
846 #endif /* MACH_ASSERT */
847
848 /* unlock space after init */
849 is_write_unlock(space);
850
851 *namep = name;
852 *portp = port;
853
854 return KERN_SUCCESS;
855 }
856
857 /*
858 * Routine: ipc_port_alloc_name
859 * Purpose:
860 * Allocate a port, with a specific name.
861 * Conditions:
862 * Nothing locked. If successful, the port is returned
863 * locked. (The caller doesn't have a reference.)
864 * Returns:
865 * KERN_SUCCESS The port is allocated.
866 * KERN_INVALID_TASK The space is dead.
867 * KERN_NAME_EXISTS The name already denotes a right.
868 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
869 */
870
871 kern_return_t
ipc_port_alloc_name(ipc_space_t space,ipc_port_init_flags_t flags,mach_port_name_t name,ipc_port_t * portp)872 ipc_port_alloc_name(
873 ipc_space_t space,
874 ipc_port_init_flags_t flags,
875 mach_port_name_t name,
876 ipc_port_t *portp)
877 {
878 mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
879 mach_port_urefs_t urefs = 0;
880
881 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
882 type |= MACH_PORT_TYPE_SEND;
883 urefs = 1;
884 }
885 flags |= IPC_PORT_INIT_LOCKED;
886
887 return ipc_object_alloc_name(space, IOT_PORT, type, urefs,
888 name, (ipc_object_t *)portp, ^(ipc_object_t object){
889 ipc_port_init(ip_object_to_port(object), space, flags, name);
890 });
891 }
892
893 /*
894 * Routine: ipc_port_spnotify
895 * Purpose:
896 * Generate send-possible port notifications.
897 * Conditions:
898 * Nothing locked, reference held on port.
899 */
900 void
ipc_port_spnotify(ipc_port_t port)901 ipc_port_spnotify(
902 ipc_port_t port)
903 {
904 ipc_port_request_index_t index = 0;
905 ipc_table_elems_t size = 0;
906
907 /*
908 * If the port has no send-possible request
909 * armed, don't bother to lock the port.
910 */
911 if (port->ip_sprequests == 0) {
912 return;
913 }
914
915 ip_mq_lock(port);
916
917 #if IMPORTANCE_INHERITANCE
918 if (port->ip_spimportant != 0) {
919 port->ip_spimportant = 0;
920 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == TRUE) {
921 ip_mq_lock(port);
922 }
923 }
924 #endif /* IMPORTANCE_INHERITANCE */
925
926 if (port->ip_sprequests == 0) {
927 ip_mq_unlock(port);
928 return;
929 }
930 port->ip_sprequests = 0;
931
932 revalidate:
933 if (ip_active(port)) {
934 ipc_port_request_table_t requests;
935
936 /* table may change each time port unlocked (reload) */
937 requests = port->ip_requests;
938 assert(requests != NULL);
939
940 /*
941 * no need to go beyond table size when first
942 * we entered - those are future notifications.
943 */
944 if (size == 0) {
945 size = ipc_port_request_table_count(requests);
946 }
947
948 /* no need to backtrack either */
949 while (++index < size) {
950 ipc_port_request_t ipr = ipc_port_request_table_get_nocheck(requests, index);
951 mach_port_name_t name = ipr->ipr_name;
952 ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
953 boolean_t armed = IPR_SOR_SPARMED(ipr->ipr_soright);
954
955 if (MACH_PORT_VALID(name) && armed && IP_VALID(soright)) {
956 /* claim send-once right - slot still inuse */
957 assert(name != IPR_HOST_NOTIFY);
958 ipr->ipr_soright = IP_NULL;
959 ip_mq_unlock(port);
960
961 ipc_notify_send_possible(soright, name);
962
963 ip_mq_lock(port);
964 goto revalidate;
965 }
966 }
967 }
968 ip_mq_unlock(port);
969 return;
970 }
971
972 /*
973 * Routine: ipc_port_dnnotify
974 * Purpose:
975 * Generate dead name notifications for
976 * all outstanding dead-name and send-
977 * possible requests.
978 * Conditions:
979 * Nothing locked.
980 * Port must be inactive.
981 * Reference held on port.
982 */
983 void
ipc_port_dnnotify(ipc_port_t port)984 ipc_port_dnnotify(
985 ipc_port_t port)
986 {
987 ipc_port_request_table_t requests = port->ip_requests;
988
989 assert(!ip_active(port));
990 if (requests != NULL) {
991 ipc_port_request_t ipr = ipc_port_request_table_base(requests);
992
993 while ((ipr = ipc_port_request_table_next_elem(requests, ipr))) {
994 mach_port_name_t name = ipr->ipr_name;
995 ipc_port_t soright;
996
997 switch (name) {
998 case MACH_PORT_DEAD:
999 case MACH_PORT_NULL:
1000 break;
1001 case IPR_HOST_NOTIFY:
1002 host_notify_cancel(ipr->ipr_hnotify);
1003 break;
1004 default:
1005 soright = IPR_SOR_PORT(ipr->ipr_soright);
1006 if (IP_VALID(soright)) {
1007 ipc_notify_dead_name(soright, name);
1008 }
1009 break;
1010 }
1011 }
1012 }
1013 }
1014
1015 /*
1016 * Routine: ipc_port_destroy
1017 * Purpose:
1018 * Destroys a port. Cleans up queued messages.
1019 *
1020 * If the port has a backup, it doesn't get destroyed,
1021 * but is sent in a port-destroyed notification to the backup.
1022 * Conditions:
1023 * The port is locked and alive; nothing else locked.
1024 * The caller has a reference, which is consumed.
1025 * Afterwards, the port is unlocked and dead.
1026 */
1027
1028 void
ipc_port_destroy(ipc_port_t port)1029 ipc_port_destroy(ipc_port_t port)
1030 {
1031 bool special_reply = port->ip_specialreply;
1032 bool service_port = port->ip_service_port;
1033 bool reap_msgs;
1034
1035 ipc_port_t pdrequest = IP_NULL;
1036 struct task_watchport_elem *twe = NULL;
1037 waitq_link_list_t free_l = { };
1038
1039 #if IMPORTANCE_INHERITANCE
1040 ipc_importance_task_t release_imp_task = IIT_NULL;
1041 thread_t self = current_thread();
1042 boolean_t top = (self->ith_assertions == 0);
1043 natural_t assertcnt = 0;
1044 #endif /* IMPORTANCE_INHERITANCE */
1045
1046 require_ip_active(port);
1047 /* port->ip_receiver_name is garbage */
1048 /* port->ip_receiver/port->ip_destination is garbage */
1049
1050 /* clear any reply-port context */
1051 port->ip_reply_context = 0;
1052
1053 /* must be done before we access ip_pdrequest */
1054 twe = ipc_port_clear_watchport_elem_internal(port);
1055 assert(!port->ip_has_watchport);
1056
1057 if (!special_reply) {
1058 /* we assume the ref for pdrequest */
1059 pdrequest = port->ip_pdrequest;
1060 port->ip_pdrequest = IP_NULL;
1061 } else if (port->ip_tempowner) {
1062 panic("ipc_port_destroy: invalid state");
1063 }
1064
1065 #if IMPORTANCE_INHERITANCE
1066 /* determine how many assertions to drop and from whom */
1067 if (port->ip_tempowner != 0) {
1068 assert(top);
1069 release_imp_task = ip_get_imp_task(port);
1070 if (IIT_NULL != release_imp_task) {
1071 port->ip_imp_task = IIT_NULL;
1072 assertcnt = port->ip_impcount;
1073 }
1074 /* Otherwise, nothing to drop */
1075 } else {
1076 assertcnt = port->ip_impcount;
1077 if (pdrequest != IP_NULL) {
1078 /* mark in limbo for the journey */
1079 port->ip_tempowner = 1;
1080 }
1081 }
1082
1083 if (top) {
1084 self->ith_assertions = assertcnt;
1085 }
1086 #endif /* IMPORTANCE_INHERITANCE */
1087
1088 /*
1089 * If no port-destroyed notification is armed, calling
1090 * ipc_port_clear_receiver() will mark the port inactive
1091 * and will wakeup any threads which may be blocked receiving on it.
1092 */
1093 reap_msgs = ipc_port_clear_receiver(port, pdrequest == IP_NULL, &free_l);
1094 assert(!ip_in_pset(port));
1095 assert(port->ip_mscount == 0);
1096
1097 /*
1098 * Handle port-destroyed notification
1099 */
1100 if (pdrequest != IP_NULL) {
1101 assert(reap_msgs == false);
1102
1103 if (service_port) {
1104 assert(port->ip_splabel != NULL);
1105 if (ipc_service_port_label_is_special_pdrequest((ipc_service_port_label_t)port->ip_splabel)) {
1106 ipc_service_port_label_set_flag(port->ip_splabel, ISPL_FLAGS_SEND_PD_NOTIFICATION);
1107 }
1108 }
1109
1110 ipc_port_send_turnstile_recompute_push_locked(port);
1111 /* port unlocked */
1112
1113 /* consumes our refs for port and pdrequest */
1114 ipc_notify_port_destroyed(pdrequest, port);
1115 } else {
1116 ipc_service_port_label_t splabel = NULL;
1117 ipc_notify_nsenders_t nsrequest;
1118
1119 nsrequest = ipc_notify_no_senders_prepare(port);
1120
1121 if (!ip_is_kolabeled(port)) {
1122 splabel = port->ip_splabel;
1123 port->ip_splabel = NULL;
1124 port->ip_service_port = false;
1125 }
1126
1127 ipc_port_send_turnstile_recompute_push_locked(port);
1128 /* port unlocked */
1129
1130 /* unlink the kmsg from special reply port */
1131 if (special_reply) {
1132 ipc_port_adjust_special_reply_port(port,
1133 IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE);
1134 }
1135
1136 /* Deallocate the service/connection port label */
1137 if (splabel) {
1138 ipc_service_port_label_dealloc(splabel, service_port);
1139 splabel = NULL;
1140 }
1141
1142 if (reap_msgs) {
1143 ipc_kmsg_reap_delayed();
1144 }
1145
1146 if (nsrequest.ns_notify) {
1147 /*
1148 * ipc_notify_no_senders_prepare will consume
1149 * the reference for kobjects.
1150 */
1151 assert(!nsrequest.ns_is_kobject);
1152 ip_mq_lock(nsrequest.ns_notify);
1153 ipc_notify_send_once_and_unlock(nsrequest.ns_notify); /* consumes ref */
1154 }
1155
1156 /* generate dead-name notifications */
1157 ipc_port_dnnotify(port);
1158
1159 ipc_kobject_destroy(port);
1160
1161 ip_release(port); /* consume caller's ref */
1162 }
1163
1164 if (twe) {
1165 task_watchport_elem_deallocate(twe);
1166 twe = NULL;
1167 }
1168
1169 waitq_link_free_list(WQT_PORT_SET, &free_l);
1170
1171 #if IMPORTANCE_INHERITANCE
1172 if (release_imp_task != IIT_NULL) {
1173 if (assertcnt > 0) {
1174 assert(top);
1175 self->ith_assertions = 0;
1176 assert(ipc_importance_task_is_any_receiver_type(release_imp_task));
1177 ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1178 }
1179 ipc_importance_task_release(release_imp_task);
1180 } else if (assertcnt > 0) {
1181 if (top) {
1182 self->ith_assertions = 0;
1183 release_imp_task = current_task()->task_imp_base;
1184 if (ipc_importance_task_is_any_receiver_type(release_imp_task)) {
1185 ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1186 }
1187 }
1188 }
1189 #endif /* IMPORTANCE_INHERITANCE */
1190 }
1191
1192 /*
1193 * Routine: ipc_port_destination_chain_lock
1194 * Purpose:
1195 * Search for the end of the chain (a port not in transit),
1196 * acquiring locks along the way, and return it in `base`.
1197 *
1198 * Returns true if a reference was taken on `base`
1199 *
1200 * Conditions:
1201 * No ports locked.
1202 * ipc_port_multiple_lock held.
1203 */
1204 boolean_t
ipc_port_destination_chain_lock(ipc_port_t port,ipc_port_t * base)1205 ipc_port_destination_chain_lock(
1206 ipc_port_t port,
1207 ipc_port_t *base)
1208 {
1209 for (;;) {
1210 ip_mq_lock(port);
1211
1212 if (!ip_active(port)) {
1213 /*
1214 * Active ports that are ip_mq_lock()ed cannot go away.
1215 *
1216 * But inactive ports at the end of walking
1217 * an ip_destination chain are only protected
1218 * from space termination cleanup while the entire
1219 * chain of ports leading to them is held.
1220 *
1221 * Callers of this code tend to unlock the chain
1222 * in the same order than this walk which doesn't
1223 * protect `base` properly when it's inactive.
1224 *
1225 * In that case, take a reference that the caller
1226 * is responsible for releasing.
1227 */
1228 ip_reference(port);
1229 *base = port;
1230 return true;
1231 }
1232
1233 /* port is active */
1234 if (!ip_in_transit(port)) {
1235 *base = port;
1236 return false;
1237 }
1238
1239 port = ip_get_destination(port);
1240 }
1241 }
1242
1243
1244 /*
1245 * Routine: ipc_port_check_circularity
1246 * Purpose:
1247 * Check if queueing "port" in a message for "dest"
1248 * would create a circular group of ports and messages.
1249 *
1250 * If no circularity (FALSE returned), then "port"
1251 * is changed from "in limbo" to "in transit".
1252 *
1253 * That is, we want to set port->ip_destination == dest,
1254 * but guaranteeing that this doesn't create a circle
1255 * port->ip_destination->ip_destination->... == port
1256 *
1257 * Conditions:
1258 * No ports locked. References held for "port" and "dest".
1259 */
1260
1261 boolean_t
ipc_port_check_circularity(ipc_port_t port,ipc_port_t dest)1262 ipc_port_check_circularity(
1263 ipc_port_t port,
1264 ipc_port_t dest)
1265 {
1266 #if IMPORTANCE_INHERITANCE
1267 /* adjust importance counts at the same time */
1268 return ipc_importance_check_circularity(port, dest);
1269 #else
1270 ipc_port_t base;
1271 struct task_watchport_elem *watchport_elem = NULL;
1272 bool took_base_ref = false;
1273
1274 assert(port != IP_NULL);
1275 assert(dest != IP_NULL);
1276
1277 if (port == dest) {
1278 return TRUE;
1279 }
1280 base = dest;
1281
1282 /* Check if destination needs a turnstile */
1283 ipc_port_send_turnstile_prepare(dest);
1284
1285 /*
1286 * First try a quick check that can run in parallel.
1287 * No circularity if dest is not in transit.
1288 */
1289 ip_mq_lock(port);
1290 if (ip_mq_lock_try(dest)) {
1291 if (!ip_in_transit(dest)) {
1292 goto not_circular;
1293 }
1294
1295 /* dest is in transit; further checking necessary */
1296
1297 ip_mq_unlock(dest);
1298 }
1299 ip_mq_unlock(port);
1300
1301 ipc_port_multiple_lock(); /* massive serialization */
1302
1303 /*
1304 * Search for the end of the chain (a port not in transit),
1305 * acquiring locks along the way.
1306 */
1307
1308 took_base_ref = ipc_port_destination_chain_lock(dest, &base);
1309 /* all ports in chain from dest to base, inclusive, are locked */
1310
1311 if (port == base) {
1312 /* circularity detected! */
1313
1314 ipc_port_multiple_unlock();
1315
1316 /* port (== base) is in limbo */
1317 require_ip_active(port);
1318 assert(ip_in_limbo(port));
1319 assert(!took_base_ref);
1320
1321 base = dest;
1322 while (base != IP_NULL) {
1323 ipc_port_t next;
1324
1325 /* dest is in transit or in limbo */
1326 require_ip_active(base);
1327 assert(!ip_in_a_space(base));
1328
1329 next = ip_get_destination(base);
1330 ip_mq_unlock(base);
1331 base = next;
1332 }
1333
1334 ipc_port_send_turnstile_complete(dest);
1335 return TRUE;
1336 }
1337
1338 /*
1339 * The guarantee: lock port while the entire chain is locked.
1340 * Once port is locked, we can take a reference to dest,
1341 * add port to the chain, and unlock everything.
1342 */
1343
1344 ip_mq_lock(port);
1345 ipc_port_multiple_unlock();
1346
1347 not_circular:
1348 require_ip_active(port);
1349 assert(ip_in_limbo(port));
1350
1351 /* Clear the watchport boost */
1352 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
1353
1354 /* Check if the port is being enqueued as a part of sync bootstrap checkin */
1355 if (dest->ip_specialreply && dest->ip_sync_bootstrap_checkin) {
1356 port->ip_sync_bootstrap_checkin = 1;
1357 }
1358
1359 ip_reference(dest);
1360
1361 /* port transitions to IN-TRANSIT state */
1362 assert(port->ip_receiver_name == MACH_PORT_NULL);
1363 port->ip_destination = dest;
1364
1365 /* Setup linkage for source port if it has sync ipc push */
1366 struct turnstile *send_turnstile = TURNSTILE_NULL;
1367 if (port_send_turnstile(port)) {
1368 send_turnstile = turnstile_prepare((uintptr_t)port,
1369 port_send_turnstile_address(port),
1370 TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
1371
1372 /*
1373 * What ipc_port_adjust_port_locked would do,
1374 * but we need to also drop even more locks before
1375 * calling turnstile_update_inheritor_complete().
1376 */
1377 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
1378
1379 turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
1380 (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
1381
1382 /* update complete and turnstile complete called after dropping all locks */
1383 }
1384 /* now unlock chain */
1385
1386 ip_mq_unlock(port);
1387
1388 for (;;) {
1389 ipc_port_t next;
1390
1391 if (dest == base) {
1392 break;
1393 }
1394
1395 /* port is IN-TRANSIT */
1396 require_ip_active(dest);
1397 assert(ip_in_transit(dest));
1398
1399 next = ip_get_destination(dest);
1400 ip_mq_unlock(dest);
1401 dest = next;
1402 }
1403
1404 /* base is not IN-TRANSIT */
1405 assert(!ip_in_transit(base));
1406
1407 ip_mq_unlock(base);
1408 if (took_base_ref) {
1409 ip_release(base);
1410 }
1411
1412 /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
1413 if (send_turnstile) {
1414 turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
1415
1416 /* Take the mq lock to call turnstile complete */
1417 ip_mq_lock(port);
1418 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC);
1419 send_turnstile = TURNSTILE_NULL;
1420 ip_mq_unlock(port);
1421 turnstile_cleanup();
1422 }
1423
1424 if (watchport_elem) {
1425 task_watchport_elem_deallocate(watchport_elem);
1426 }
1427
1428 return FALSE;
1429 #endif /* !IMPORTANCE_INHERITANCE */
1430 }
1431
1432 /*
1433 * Routine: ipc_port_watchport_elem
1434 * Purpose:
1435 * Get the port's watchport elem field
1436 *
1437 * Conditions:
1438 * port locked
1439 */
1440 static struct task_watchport_elem *
ipc_port_watchport_elem(ipc_port_t port)1441 ipc_port_watchport_elem(ipc_port_t port)
1442 {
1443 if (port->ip_has_watchport) {
1444 assert(!port->ip_specialreply);
1445 return port->ip_twe;
1446 }
1447 return NULL;
1448 }
1449
1450 /*
1451 * Routine: ipc_port_update_watchport_elem
1452 * Purpose:
1453 * Set the port's watchport elem field
1454 *
1455 * Conditions:
1456 * port locked and is not a special reply port.
1457 */
1458 static inline struct task_watchport_elem *
ipc_port_update_watchport_elem(ipc_port_t port,struct task_watchport_elem * we)1459 ipc_port_update_watchport_elem(ipc_port_t port, struct task_watchport_elem *we)
1460 {
1461 struct task_watchport_elem *old_we;
1462 ipc_port_t pdrequest;
1463
1464 assert(!port->ip_specialreply);
1465
1466 /*
1467 * Note: ip_pdrequest and ip_twe are unioned.
1468 * and ip_has_watchport controls the union "type"
1469 */
1470 if (port->ip_has_watchport) {
1471 old_we = port->ip_twe;
1472 pdrequest = old_we->twe_pdrequest;
1473 old_we->twe_pdrequest = IP_NULL;
1474 } else {
1475 old_we = NULL;
1476 pdrequest = port->ip_pdrequest;
1477 }
1478
1479 if (we) {
1480 port->ip_has_watchport = true;
1481 we->twe_pdrequest = pdrequest;
1482 port->ip_twe = we;
1483 } else {
1484 port->ip_has_watchport = false;
1485 port->ip_pdrequest = pdrequest;
1486 }
1487
1488 return old_we;
1489 }
1490
1491 /*
1492 * Routine: ipc_special_reply_stash_pid_locked
1493 * Purpose:
1494 * Set the pid of process that copied out send once right to special reply port.
1495 *
1496 * Conditions:
1497 * port locked
1498 */
1499 static inline void
ipc_special_reply_stash_pid_locked(ipc_port_t port,int pid)1500 ipc_special_reply_stash_pid_locked(ipc_port_t port, int pid)
1501 {
1502 assert(port->ip_specialreply);
1503 port->ip_pid = pid;
1504 }
1505
1506 /*
1507 * Routine: ipc_special_reply_get_pid_locked
1508 * Purpose:
1509 * Get the pid of process that copied out send once right to special reply port.
1510 *
1511 * Conditions:
1512 * port locked
1513 */
1514 int
ipc_special_reply_get_pid_locked(ipc_port_t port)1515 ipc_special_reply_get_pid_locked(ipc_port_t port)
1516 {
1517 assert(port->ip_specialreply);
1518 return port->ip_pid;
1519 }
1520
1521 /*
1522 * Update the recv turnstile inheritor for a port.
1523 *
1524 * Sync IPC through the port receive turnstile only happens for the special
1525 * reply port case. It has three sub-cases:
1526 *
1527 * 1. a send-once right is in transit, and pushes on the send turnstile of its
1528 * destination mqueue.
1529 *
1530 * 2. a send-once right has been stashed on a knote it was copied out "through",
1531 * as the first such copied out port.
1532 *
1533 * 3. a send-once right has been stashed on a knote it was copied out "through",
1534 * as the second or more copied out port.
1535 */
1536 void
ipc_port_recv_update_inheritor(ipc_port_t port,struct turnstile * rcv_turnstile,turnstile_update_flags_t flags)1537 ipc_port_recv_update_inheritor(
1538 ipc_port_t port,
1539 struct turnstile *rcv_turnstile,
1540 turnstile_update_flags_t flags)
1541 {
1542 struct turnstile *inheritor = TURNSTILE_NULL;
1543 struct knote *kn;
1544
1545 if (ip_active(port) && port->ip_specialreply) {
1546 ip_mq_lock_held(port);
1547
1548 switch (port->ip_sync_link_state) {
1549 case PORT_SYNC_LINK_PORT:
1550 if (port->ip_sync_inheritor_port != NULL) {
1551 inheritor = port_send_turnstile(port->ip_sync_inheritor_port);
1552 }
1553 break;
1554
1555 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1556 kn = port->ip_sync_inheritor_knote;
1557 inheritor = filt_ipc_kqueue_turnstile(kn);
1558 break;
1559
1560 case PORT_SYNC_LINK_WORKLOOP_STASH:
1561 inheritor = port->ip_sync_inheritor_ts;
1562 break;
1563 }
1564 }
1565
1566 turnstile_update_inheritor(rcv_turnstile, inheritor,
1567 flags | TURNSTILE_INHERITOR_TURNSTILE);
1568 }
1569
1570 /*
1571 * Update the send turnstile inheritor for a port.
1572 *
1573 * Sync IPC through the port send turnstile has 7 possible reasons to be linked:
1574 *
1575 * 1. a special reply port is part of sync ipc for bootstrap checkin and needs
1576 * to push on thread doing the sync ipc.
1577 *
1578 * 2. a receive right is in transit, and pushes on the send turnstile of its
1579 * destination mqueue.
1580 *
1581 * 3. port was passed as an exec watchport and port is pushing on main thread
1582 * of the task.
1583 *
1584 * 4. a receive right has been stashed on a knote it was copied out "through",
1585 * as the first such copied out port (same as PORT_SYNC_LINK_WORKLOOP_KNOTE
1586 * for the special reply port)
1587 *
1588 * 5. a receive right has been stashed on a knote it was copied out "through",
1589 * as the second or more copied out port (same as
1590 * PORT_SYNC_LINK_WORKLOOP_STASH for the special reply port)
1591 *
1592 * 6. a receive right has been copied out as a part of sync bootstrap checkin
1593 * and needs to push on thread doing the sync bootstrap checkin.
1594 *
1595 * 7. the receive right is monitored by a knote, and pushes on any that is
1596 * registered on a workloop. filt_machport makes sure that if such a knote
1597 * exists, it is kept as the first item in the knote list, so we never need
1598 * to walk.
1599 */
1600 void
ipc_port_send_update_inheritor(ipc_port_t port,struct turnstile * send_turnstile,turnstile_update_flags_t flags)1601 ipc_port_send_update_inheritor(
1602 ipc_port_t port,
1603 struct turnstile *send_turnstile,
1604 turnstile_update_flags_t flags)
1605 {
1606 ipc_mqueue_t mqueue = &port->ip_messages;
1607 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1608 struct knote *kn;
1609 turnstile_update_flags_t inheritor_flags = TURNSTILE_INHERITOR_TURNSTILE;
1610
1611 ip_mq_lock_held(port);
1612
1613 if (!ip_active(port)) {
1614 /* this port is no longer active, it should not push anywhere */
1615 } else if (port->ip_specialreply) {
1616 /* Case 1. */
1617 if (port->ip_sync_bootstrap_checkin && prioritize_launch) {
1618 inheritor = port->ip_messages.imq_srp_owner_thread;
1619 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1620 }
1621 } else if (ip_in_transit(port)) {
1622 /* Case 2. */
1623 inheritor = port_send_turnstile(ip_get_destination(port));
1624 } else if (port->ip_has_watchport) {
1625 /* Case 3. */
1626 if (prioritize_launch) {
1627 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1628 inheritor = ipc_port_get_watchport_inheritor(port);
1629 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1630 }
1631 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
1632 /* Case 4. */
1633 inheritor = filt_ipc_kqueue_turnstile(mqueue->imq_inheritor_knote);
1634 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_STASH) {
1635 /* Case 5. */
1636 inheritor = mqueue->imq_inheritor_turnstile;
1637 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_RCV_THREAD) {
1638 /* Case 6. */
1639 if (prioritize_launch) {
1640 inheritor = port->ip_messages.imq_inheritor_thread_ref;
1641 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1642 }
1643 } else if ((kn = SLIST_FIRST(&port->ip_klist))) {
1644 /* Case 7. Push on a workloop that is interested */
1645 if (filt_machport_kqueue_has_turnstile(kn)) {
1646 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1647 inheritor = filt_ipc_kqueue_turnstile(kn);
1648 }
1649 }
1650
1651 turnstile_update_inheritor(send_turnstile, inheritor,
1652 flags | inheritor_flags);
1653 }
1654
1655 /*
1656 * Routine: ipc_port_send_turnstile_prepare
1657 * Purpose:
1658 * Get a reference on port's send turnstile, if
1659 * port does not have a send turnstile then allocate one.
1660 *
1661 * Conditions:
1662 * Nothing is locked.
1663 */
1664 void
ipc_port_send_turnstile_prepare(ipc_port_t port)1665 ipc_port_send_turnstile_prepare(ipc_port_t port)
1666 {
1667 struct turnstile *turnstile = TURNSTILE_NULL;
1668 struct turnstile *send_turnstile = TURNSTILE_NULL;
1669
1670 retry_alloc:
1671 ip_mq_lock(port);
1672
1673 if (port_send_turnstile(port) == NULL ||
1674 port_send_turnstile(port)->ts_prim_count == 0) {
1675 if (turnstile == TURNSTILE_NULL) {
1676 ip_mq_unlock(port);
1677 turnstile = turnstile_alloc();
1678 goto retry_alloc;
1679 }
1680
1681 send_turnstile = turnstile_prepare((uintptr_t)port,
1682 port_send_turnstile_address(port),
1683 turnstile, TURNSTILE_SYNC_IPC);
1684 turnstile = TURNSTILE_NULL;
1685
1686 ipc_port_send_update_inheritor(port, send_turnstile,
1687 TURNSTILE_IMMEDIATE_UPDATE);
1688
1689 /* turnstile complete will be called in ipc_port_send_turnstile_complete */
1690 }
1691
1692 /* Increment turnstile counter */
1693 port_send_turnstile(port)->ts_prim_count++;
1694 ip_mq_unlock(port);
1695
1696 if (send_turnstile) {
1697 turnstile_update_inheritor_complete(send_turnstile,
1698 TURNSTILE_INTERLOCK_NOT_HELD);
1699 }
1700 if (turnstile != TURNSTILE_NULL) {
1701 turnstile_deallocate(turnstile);
1702 }
1703 }
1704
1705
1706 /*
1707 * Routine: ipc_port_send_turnstile_complete
1708 * Purpose:
1709 * Drop a ref on the port's send turnstile, if the
1710 * ref becomes zero, deallocate the turnstile.
1711 *
1712 * Conditions:
1713 * The space might be locked, use safe deallocate.
1714 */
1715 void
ipc_port_send_turnstile_complete(ipc_port_t port)1716 ipc_port_send_turnstile_complete(ipc_port_t port)
1717 {
1718 struct turnstile *turnstile = TURNSTILE_NULL;
1719
1720 /* Drop turnstile count on dest port */
1721 ip_mq_lock(port);
1722
1723 port_send_turnstile(port)->ts_prim_count--;
1724 if (port_send_turnstile(port)->ts_prim_count == 0) {
1725 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port),
1726 &turnstile, TURNSTILE_SYNC_IPC);
1727 assert(turnstile != TURNSTILE_NULL);
1728 }
1729 ip_mq_unlock(port);
1730 turnstile_cleanup();
1731
1732 if (turnstile != TURNSTILE_NULL) {
1733 turnstile_deallocate_safe(turnstile);
1734 turnstile = TURNSTILE_NULL;
1735 }
1736 }
1737
1738 /*
1739 * Routine: ipc_port_rcv_turnstile
1740 * Purpose:
1741 * Get the port's receive turnstile
1742 *
1743 * Conditions:
1744 * mqueue locked or thread waiting on turnstile is locked.
1745 */
1746 static struct turnstile *
ipc_port_rcv_turnstile(ipc_port_t port)1747 ipc_port_rcv_turnstile(ipc_port_t port)
1748 {
1749 return *port_rcv_turnstile_address(port);
1750 }
1751
1752
1753 /*
1754 * Routine: ipc_port_link_special_reply_port
1755 * Purpose:
1756 * Link the special reply port with the destination port.
1757 * Allocates turnstile to dest port.
1758 *
1759 * Conditions:
1760 * Nothing is locked.
1761 */
1762 void
ipc_port_link_special_reply_port(ipc_port_t special_reply_port,ipc_port_t dest_port,boolean_t sync_bootstrap_checkin)1763 ipc_port_link_special_reply_port(
1764 ipc_port_t special_reply_port,
1765 ipc_port_t dest_port,
1766 boolean_t sync_bootstrap_checkin)
1767 {
1768 boolean_t drop_turnstile_ref = FALSE;
1769 boolean_t special_reply = FALSE;
1770
1771 /* Check if dest_port needs a turnstile */
1772 ipc_port_send_turnstile_prepare(dest_port);
1773
1774 /* Lock the special reply port and establish the linkage */
1775 ip_mq_lock(special_reply_port);
1776
1777 special_reply = special_reply_port->ip_specialreply;
1778
1779 if (sync_bootstrap_checkin && special_reply) {
1780 special_reply_port->ip_sync_bootstrap_checkin = 1;
1781 }
1782
1783 /* Check if we need to drop the acquired turnstile ref on dest port */
1784 if (!special_reply ||
1785 special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY ||
1786 special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) {
1787 drop_turnstile_ref = TRUE;
1788 } else {
1789 /* take a reference on dest_port */
1790 ip_reference(dest_port);
1791 special_reply_port->ip_sync_inheritor_port = dest_port;
1792 special_reply_port->ip_sync_link_state = PORT_SYNC_LINK_PORT;
1793 }
1794
1795 ip_mq_unlock(special_reply_port);
1796
1797 if (special_reply) {
1798 /*
1799 * For special reply ports, if the destination port is
1800 * marked with the thread group blocked tracking flag,
1801 * callout to the performance controller.
1802 */
1803 ipc_port_thread_group_blocked(dest_port);
1804 }
1805
1806 if (drop_turnstile_ref) {
1807 ipc_port_send_turnstile_complete(dest_port);
1808 }
1809
1810 return;
1811 }
1812
1813 /*
1814 * Routine: ipc_port_thread_group_blocked
1815 * Purpose:
1816 * Call thread_group_blocked callout if the port
1817 * has ip_tg_block_tracking bit set and the thread
1818 * has not made this callout already.
1819 *
1820 * Conditions:
1821 * Nothing is locked.
1822 */
1823 void
ipc_port_thread_group_blocked(ipc_port_t port __unused)1824 ipc_port_thread_group_blocked(ipc_port_t port __unused)
1825 {
1826 #if CONFIG_THREAD_GROUPS
1827 bool port_tg_block_tracking = false;
1828 thread_t self = current_thread();
1829
1830 if (self->thread_group == NULL ||
1831 (self->options & TH_OPT_IPC_TG_BLOCKED)) {
1832 return;
1833 }
1834
1835 port_tg_block_tracking = port->ip_tg_block_tracking;
1836 if (!port_tg_block_tracking) {
1837 return;
1838 }
1839
1840 machine_thread_group_blocked(self->thread_group, NULL,
1841 PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
1842
1843 self->options |= TH_OPT_IPC_TG_BLOCKED;
1844 #endif
1845 }
1846
1847 /*
1848 * Routine: ipc_port_thread_group_unblocked
1849 * Purpose:
1850 * Call thread_group_unblocked callout if the
1851 * thread had previously made a thread_group_blocked
1852 * callout before (indicated by TH_OPT_IPC_TG_BLOCKED
1853 * flag on the thread).
1854 *
1855 * Conditions:
1856 * Nothing is locked.
1857 */
1858 void
ipc_port_thread_group_unblocked(void)1859 ipc_port_thread_group_unblocked(void)
1860 {
1861 #if CONFIG_THREAD_GROUPS
1862 thread_t self = current_thread();
1863
1864 if (!(self->options & TH_OPT_IPC_TG_BLOCKED)) {
1865 return;
1866 }
1867
1868 machine_thread_group_unblocked(self->thread_group, NULL,
1869 PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
1870
1871 self->options &= ~TH_OPT_IPC_TG_BLOCKED;
1872 #endif
1873 }
1874
1875 #if DEVELOPMENT || DEBUG
1876 inline void
ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)1877 ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)
1878 {
1879 special_reply_port->ip_srp_lost_link = 0;
1880 special_reply_port->ip_srp_msg_sent = 0;
1881 }
1882
1883 static inline void
ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)1884 ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)
1885 {
1886 if (special_reply_port->ip_specialreply == 1) {
1887 special_reply_port->ip_srp_msg_sent = 0;
1888 }
1889 }
1890
1891 inline void
ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)1892 ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)
1893 {
1894 if (special_reply_port->ip_specialreply == 1) {
1895 special_reply_port->ip_srp_msg_sent = 1;
1896 }
1897 }
1898
1899 static inline void
ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)1900 ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)
1901 {
1902 if (special_reply_port->ip_specialreply == 1 && special_reply_port->ip_srp_msg_sent == 0) {
1903 special_reply_port->ip_srp_lost_link = 1;
1904 }
1905 }
1906
1907 #else /* DEVELOPMENT || DEBUG */
1908 inline void
ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)1909 ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)
1910 {
1911 return;
1912 }
1913
1914 static inline void
ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)1915 ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)
1916 {
1917 return;
1918 }
1919
1920 inline void
ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)1921 ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)
1922 {
1923 return;
1924 }
1925
1926 static inline void
ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)1927 ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)
1928 {
1929 return;
1930 }
1931 #endif /* DEVELOPMENT || DEBUG */
1932
1933 /*
1934 * Routine: ipc_port_adjust_special_reply_port_locked
1935 * Purpose:
1936 * If the special port has a turnstile, update its inheritor.
1937 * Condition:
1938 * Special reply port locked on entry.
1939 * Special reply port unlocked on return.
1940 * The passed in port is a special reply port.
1941 * Returns:
1942 * None.
1943 */
1944 void
ipc_port_adjust_special_reply_port_locked(ipc_port_t special_reply_port,struct knote * kn,uint8_t flags,boolean_t get_turnstile)1945 ipc_port_adjust_special_reply_port_locked(
1946 ipc_port_t special_reply_port,
1947 struct knote *kn,
1948 uint8_t flags,
1949 boolean_t get_turnstile)
1950 {
1951 ipc_port_t dest_port = IPC_PORT_NULL;
1952 int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE;
1953 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1954 struct turnstile *ts = TURNSTILE_NULL;
1955 struct turnstile *port_stashed_turnstile = TURNSTILE_NULL;
1956
1957 ip_mq_lock_held(special_reply_port); // ip_sync_link_state is touched
1958
1959 if (!special_reply_port->ip_specialreply) {
1960 // only mach_msg_receive_results_complete() calls this with any port
1961 assert(get_turnstile);
1962 goto not_special;
1963 }
1964
1965 if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) {
1966 ipc_special_reply_port_msg_sent_reset(special_reply_port);
1967 }
1968
1969 if (flags & IPC_PORT_ADJUST_UNLINK_THREAD) {
1970 special_reply_port->ip_messages.imq_srp_owner_thread = NULL;
1971 }
1972
1973 if (flags & IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN) {
1974 special_reply_port->ip_sync_bootstrap_checkin = 0;
1975 }
1976
1977 /* Check if the special reply port is marked non-special */
1978 if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) {
1979 not_special:
1980 if (get_turnstile) {
1981 turnstile_complete((uintptr_t)special_reply_port,
1982 port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
1983 }
1984 ip_mq_unlock(special_reply_port);
1985 if (get_turnstile) {
1986 turnstile_cleanup();
1987 }
1988 return;
1989 }
1990
1991 if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) {
1992 if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) {
1993 inheritor = filt_machport_stash_port(kn, special_reply_port,
1994 &sync_link_state);
1995 }
1996 } else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) {
1997 sync_link_state = PORT_SYNC_LINK_ANY;
1998 }
1999
2000 /* Check if need to break linkage */
2001 if (!get_turnstile && sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
2002 special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
2003 ip_mq_unlock(special_reply_port);
2004 return;
2005 }
2006
2007 switch (special_reply_port->ip_sync_link_state) {
2008 case PORT_SYNC_LINK_PORT:
2009 dest_port = special_reply_port->ip_sync_inheritor_port;
2010 special_reply_port->ip_sync_inheritor_port = IPC_PORT_NULL;
2011 break;
2012 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2013 special_reply_port->ip_sync_inheritor_knote = NULL;
2014 break;
2015 case PORT_SYNC_LINK_WORKLOOP_STASH:
2016 port_stashed_turnstile = special_reply_port->ip_sync_inheritor_ts;
2017 special_reply_port->ip_sync_inheritor_ts = NULL;
2018 break;
2019 }
2020
2021 /*
2022 * Stash (or unstash) the server's PID in the ip_sorights field of the
2023 * special reply port, so that stackshot can later retrieve who the client
2024 * is blocked on.
2025 */
2026 if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT &&
2027 sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
2028 ipc_special_reply_stash_pid_locked(special_reply_port, pid_from_task(current_task()));
2029 } else if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
2030 sync_link_state == PORT_SYNC_LINK_ANY) {
2031 /* If we are resetting the special reply port, remove the stashed pid. */
2032 ipc_special_reply_stash_pid_locked(special_reply_port, 0);
2033 }
2034
2035 special_reply_port->ip_sync_link_state = sync_link_state;
2036
2037 switch (sync_link_state) {
2038 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2039 special_reply_port->ip_sync_inheritor_knote = kn;
2040 break;
2041 case PORT_SYNC_LINK_WORKLOOP_STASH:
2042 turnstile_reference(inheritor);
2043 special_reply_port->ip_sync_inheritor_ts = inheritor;
2044 break;
2045 case PORT_SYNC_LINK_NO_LINKAGE:
2046 if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) {
2047 ipc_special_reply_port_lost_link(special_reply_port);
2048 }
2049 break;
2050 }
2051
2052 /* Get thread's turnstile donated to special reply port */
2053 if (get_turnstile) {
2054 turnstile_complete((uintptr_t)special_reply_port,
2055 port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
2056 } else {
2057 ts = ipc_port_rcv_turnstile(special_reply_port);
2058 if (ts) {
2059 turnstile_reference(ts);
2060 ipc_port_recv_update_inheritor(special_reply_port, ts,
2061 TURNSTILE_IMMEDIATE_UPDATE);
2062 }
2063 }
2064
2065 ip_mq_unlock(special_reply_port);
2066
2067 if (get_turnstile) {
2068 turnstile_cleanup();
2069 } else if (ts) {
2070 /* Call turnstile cleanup after dropping the interlock */
2071 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
2072 turnstile_deallocate_safe(ts);
2073 }
2074
2075 if (port_stashed_turnstile) {
2076 turnstile_deallocate_safe(port_stashed_turnstile);
2077 }
2078
2079 /* Release the ref on the dest port and its turnstile */
2080 if (dest_port) {
2081 ipc_port_send_turnstile_complete(dest_port);
2082 /* release the reference on the dest port, space lock might be held */
2083 ip_release_safe(dest_port);
2084 }
2085 }
2086
2087 /*
2088 * Routine: ipc_port_adjust_special_reply_port
2089 * Purpose:
2090 * If the special port has a turnstile, update its inheritor.
2091 * Condition:
2092 * Nothing locked.
2093 * Returns:
2094 * None.
2095 */
2096 void
ipc_port_adjust_special_reply_port(ipc_port_t port,uint8_t flags)2097 ipc_port_adjust_special_reply_port(
2098 ipc_port_t port,
2099 uint8_t flags)
2100 {
2101 if (port->ip_specialreply) {
2102 ip_mq_lock(port);
2103 ipc_port_adjust_special_reply_port_locked(port, NULL, flags, FALSE);
2104 }
2105 }
2106
2107 /*
2108 * Routine: ipc_port_adjust_sync_link_state_locked
2109 * Purpose:
2110 * Update the sync link state of the port and the
2111 * turnstile inheritor.
2112 * Condition:
2113 * Port locked on entry.
2114 * Port locked on return.
2115 * Returns:
2116 * None.
2117 */
2118 void
ipc_port_adjust_sync_link_state_locked(ipc_port_t port,int sync_link_state,turnstile_inheritor_t inheritor)2119 ipc_port_adjust_sync_link_state_locked(
2120 ipc_port_t port,
2121 int sync_link_state,
2122 turnstile_inheritor_t inheritor)
2123 {
2124 switch (port->ip_sync_link_state) {
2125 case PORT_SYNC_LINK_RCV_THREAD:
2126 /* deallocate the thread reference for the inheritor */
2127 thread_deallocate_safe(port->ip_messages.imq_inheritor_thread_ref);
2128 break;
2129 case PORT_SYNC_LINK_WORKLOOP_STASH:
2130 /* deallocate the turnstile reference for the inheritor */
2131 turnstile_deallocate_safe(port->ip_messages.imq_inheritor_turnstile);
2132 break;
2133 }
2134
2135 klist_init(&port->ip_klist);
2136
2137 switch (sync_link_state) {
2138 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2139 port->ip_messages.imq_inheritor_knote = inheritor;
2140 break;
2141 case PORT_SYNC_LINK_WORKLOOP_STASH:
2142 /* knote can be deleted by userspace, take a reference on turnstile */
2143 turnstile_reference(inheritor);
2144 port->ip_messages.imq_inheritor_turnstile = inheritor;
2145 break;
2146 case PORT_SYNC_LINK_RCV_THREAD:
2147 /* The thread could exit without clearing port state, take a thread ref */
2148 thread_reference((thread_t)inheritor);
2149 port->ip_messages.imq_inheritor_thread_ref = inheritor;
2150 break;
2151 default:
2152 klist_init(&port->ip_klist);
2153 sync_link_state = PORT_SYNC_LINK_ANY;
2154 }
2155
2156 port->ip_sync_link_state = sync_link_state;
2157 }
2158
2159
2160 /*
2161 * Routine: ipc_port_adjust_port_locked
2162 * Purpose:
2163 * If the port has a turnstile, update its inheritor.
2164 * Condition:
2165 * Port locked on entry.
2166 * Port unlocked on return.
2167 * Returns:
2168 * None.
2169 */
2170 void
ipc_port_adjust_port_locked(ipc_port_t port,struct knote * kn,boolean_t sync_bootstrap_checkin)2171 ipc_port_adjust_port_locked(
2172 ipc_port_t port,
2173 struct knote *kn,
2174 boolean_t sync_bootstrap_checkin)
2175 {
2176 int sync_link_state = PORT_SYNC_LINK_ANY;
2177 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
2178
2179 ip_mq_lock_held(port); // ip_sync_link_state is touched
2180 assert(!port->ip_specialreply);
2181
2182 if (kn) {
2183 inheritor = filt_machport_stash_port(kn, port, &sync_link_state);
2184 if (sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
2185 inheritor = kn;
2186 }
2187 } else if (sync_bootstrap_checkin) {
2188 inheritor = current_thread();
2189 sync_link_state = PORT_SYNC_LINK_RCV_THREAD;
2190 }
2191
2192 ipc_port_adjust_sync_link_state_locked(port, sync_link_state, inheritor);
2193 port->ip_sync_bootstrap_checkin = 0;
2194
2195 ipc_port_send_turnstile_recompute_push_locked(port);
2196 /* port unlocked */
2197 }
2198
2199 /*
2200 * Routine: ipc_port_clear_sync_rcv_thread_boost_locked
2201 * Purpose:
2202 * If the port is pushing on rcv thread, clear it.
2203 * Condition:
2204 * Port locked on entry
2205 * Port unlocked on return.
2206 * Returns:
2207 * None.
2208 */
2209 void
ipc_port_clear_sync_rcv_thread_boost_locked(ipc_port_t port)2210 ipc_port_clear_sync_rcv_thread_boost_locked(
2211 ipc_port_t port)
2212 {
2213 ip_mq_lock_held(port); // ip_sync_link_state is touched
2214
2215 if (port->ip_sync_link_state != PORT_SYNC_LINK_RCV_THREAD) {
2216 ip_mq_unlock(port);
2217 return;
2218 }
2219
2220 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
2221
2222 ipc_port_send_turnstile_recompute_push_locked(port);
2223 /* port unlocked */
2224 }
2225
2226 /*
2227 * Routine: ipc_port_has_prdrequest
2228 * Purpose:
2229 * Returns whether a port has a port-destroyed request armed
2230 * Condition:
2231 * Port is locked.
2232 */
2233 bool
ipc_port_has_prdrequest(ipc_port_t port)2234 ipc_port_has_prdrequest(
2235 ipc_port_t port)
2236 {
2237 if (port->ip_specialreply) {
2238 return false;
2239 }
2240 if (port->ip_has_watchport) {
2241 return port->ip_twe->twe_pdrequest != IP_NULL;
2242 }
2243 return port->ip_pdrequest != IP_NULL;
2244 }
2245
2246 /*
2247 * Routine: ipc_port_add_watchport_elem_locked
2248 * Purpose:
2249 * Transfer the turnstile boost of watchport to task calling exec.
2250 * Condition:
2251 * Port locked on entry.
2252 * Port unlocked on return.
2253 * Returns:
2254 * KERN_SUCESS on success.
2255 * KERN_FAILURE otherwise.
2256 */
2257 kern_return_t
ipc_port_add_watchport_elem_locked(ipc_port_t port,struct task_watchport_elem * watchport_elem,struct task_watchport_elem ** old_elem)2258 ipc_port_add_watchport_elem_locked(
2259 ipc_port_t port,
2260 struct task_watchport_elem *watchport_elem,
2261 struct task_watchport_elem **old_elem)
2262 {
2263 ip_mq_lock_held(port);
2264
2265 /* Watchport boost only works for non-special active ports mapped in an ipc space */
2266 if (!ip_active(port) || port->ip_specialreply || !ip_in_a_space(port)) {
2267 ip_mq_unlock(port);
2268 return KERN_FAILURE;
2269 }
2270
2271 if (port->ip_sync_link_state != PORT_SYNC_LINK_ANY) {
2272 /* Sever the linkage if the port was pushing on knote */
2273 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
2274 }
2275
2276 *old_elem = ipc_port_update_watchport_elem(port, watchport_elem);
2277
2278 ipc_port_send_turnstile_recompute_push_locked(port);
2279 /* port unlocked */
2280 return KERN_SUCCESS;
2281 }
2282
2283 /*
2284 * Routine: ipc_port_clear_watchport_elem_internal_conditional_locked
2285 * Purpose:
2286 * Remove the turnstile boost of watchport and recompute the push.
2287 * Condition:
2288 * Port locked on entry.
2289 * Port unlocked on return.
2290 * Returns:
2291 * KERN_SUCESS on success.
2292 * KERN_FAILURE otherwise.
2293 */
2294 kern_return_t
ipc_port_clear_watchport_elem_internal_conditional_locked(ipc_port_t port,struct task_watchport_elem * watchport_elem)2295 ipc_port_clear_watchport_elem_internal_conditional_locked(
2296 ipc_port_t port,
2297 struct task_watchport_elem *watchport_elem)
2298 {
2299 ip_mq_lock_held(port);
2300
2301 if (ipc_port_watchport_elem(port) != watchport_elem) {
2302 ip_mq_unlock(port);
2303 return KERN_FAILURE;
2304 }
2305
2306 ipc_port_clear_watchport_elem_internal(port);
2307 ipc_port_send_turnstile_recompute_push_locked(port);
2308 /* port unlocked */
2309 return KERN_SUCCESS;
2310 }
2311
2312 /*
2313 * Routine: ipc_port_replace_watchport_elem_conditional_locked
2314 * Purpose:
2315 * Replace the turnstile boost of watchport and recompute the push.
2316 * Condition:
2317 * Port locked on entry.
2318 * Port unlocked on return.
2319 * Returns:
2320 * KERN_SUCESS on success.
2321 * KERN_FAILURE otherwise.
2322 */
2323 kern_return_t
ipc_port_replace_watchport_elem_conditional_locked(ipc_port_t port,struct task_watchport_elem * old_watchport_elem,struct task_watchport_elem * new_watchport_elem)2324 ipc_port_replace_watchport_elem_conditional_locked(
2325 ipc_port_t port,
2326 struct task_watchport_elem *old_watchport_elem,
2327 struct task_watchport_elem *new_watchport_elem)
2328 {
2329 ip_mq_lock_held(port);
2330
2331 if (port->ip_specialreply ||
2332 ipc_port_watchport_elem(port) != old_watchport_elem) {
2333 ip_mq_unlock(port);
2334 return KERN_FAILURE;
2335 }
2336
2337 ipc_port_update_watchport_elem(port, new_watchport_elem);
2338 ipc_port_send_turnstile_recompute_push_locked(port);
2339 /* port unlocked */
2340 return KERN_SUCCESS;
2341 }
2342
2343 /*
2344 * Routine: ipc_port_clear_watchport_elem_internal
2345 * Purpose:
2346 * Remove the turnstile boost of watchport.
2347 * Condition:
2348 * Port locked on entry.
2349 * Port locked on return.
2350 * Returns:
2351 * Old task_watchport_elem returned.
2352 */
2353 struct task_watchport_elem *
ipc_port_clear_watchport_elem_internal(ipc_port_t port)2354 ipc_port_clear_watchport_elem_internal(
2355 ipc_port_t port)
2356 {
2357 ip_mq_lock_held(port);
2358
2359 if (!port->ip_has_watchport) {
2360 return NULL;
2361 }
2362
2363 return ipc_port_update_watchport_elem(port, NULL);
2364 }
2365
2366 /*
2367 * Routine: ipc_port_send_turnstile_recompute_push_locked
2368 * Purpose:
2369 * Update send turnstile inheritor of port and recompute the push.
2370 * Condition:
2371 * Port locked on entry.
2372 * Port unlocked on return.
2373 * Returns:
2374 * None.
2375 */
2376 static void
ipc_port_send_turnstile_recompute_push_locked(ipc_port_t port)2377 ipc_port_send_turnstile_recompute_push_locked(
2378 ipc_port_t port)
2379 {
2380 struct turnstile *send_turnstile = port_send_turnstile(port);
2381 if (send_turnstile) {
2382 turnstile_reference(send_turnstile);
2383 ipc_port_send_update_inheritor(port, send_turnstile,
2384 TURNSTILE_IMMEDIATE_UPDATE);
2385 }
2386 ip_mq_unlock(port);
2387
2388 if (send_turnstile) {
2389 turnstile_update_inheritor_complete(send_turnstile,
2390 TURNSTILE_INTERLOCK_NOT_HELD);
2391 turnstile_deallocate_safe(send_turnstile);
2392 }
2393 }
2394
2395 /*
2396 * Routine: ipc_port_get_watchport_inheritor
2397 * Purpose:
2398 * Returns inheritor for watchport.
2399 *
2400 * Conditions:
2401 * mqueue locked.
2402 * Returns:
2403 * watchport inheritor.
2404 */
2405 static thread_t
ipc_port_get_watchport_inheritor(ipc_port_t port)2406 ipc_port_get_watchport_inheritor(
2407 ipc_port_t port)
2408 {
2409 ip_mq_lock_held(port);
2410 return ipc_port_watchport_elem(port)->twe_task->watchports->tw_thread;
2411 }
2412
2413 /*
2414 * Routine: ipc_port_get_receiver_task
2415 * Purpose:
2416 * Returns receiver task pointer and its pid (if any) for port.
2417 *
2418 * Conditions:
2419 * Assumes the port is locked.
2420 */
2421 pid_t
ipc_port_get_receiver_task_locked(ipc_port_t port,uintptr_t * task)2422 ipc_port_get_receiver_task_locked(ipc_port_t port, uintptr_t *task)
2423 {
2424 task_t receiver = TASK_NULL;
2425 pid_t pid = -1;
2426
2427 if (!port) {
2428 goto out;
2429 }
2430
2431 if (ip_in_a_space(port) &&
2432 !ip_in_space(port, ipc_space_kernel) &&
2433 !ip_in_space(port, ipc_space_reply)) {
2434 receiver = port->ip_receiver->is_task;
2435 pid = task_pid(receiver);
2436 }
2437
2438 out:
2439 if (task) {
2440 *task = (uintptr_t)receiver;
2441 }
2442 return pid;
2443 }
2444
2445 /*
2446 * Routine: ipc_port_get_receiver_task
2447 * Purpose:
2448 * Returns receiver task pointer and its pid (if any) for port.
2449 *
2450 * Conditions:
2451 * Nothing locked. The routine takes port lock.
2452 */
2453 pid_t
ipc_port_get_receiver_task(ipc_port_t port,uintptr_t * task)2454 ipc_port_get_receiver_task(ipc_port_t port, uintptr_t *task)
2455 {
2456 pid_t pid = -1;
2457
2458 if (!port) {
2459 if (task) {
2460 *task = (uintptr_t)TASK_NULL;
2461 }
2462 return pid;
2463 }
2464
2465 ip_mq_lock(port);
2466 pid = ipc_port_get_receiver_task_locked(port, task);
2467 ip_mq_unlock(port);
2468
2469 return pid;
2470 }
2471
2472 /*
2473 * Routine: ipc_port_impcount_delta
2474 * Purpose:
2475 * Adjust only the importance count associated with a port.
2476 * If there are any adjustments to be made to receiver task,
2477 * those are handled elsewhere.
2478 *
2479 * For now, be defensive during deductions to make sure the
2480 * impcount for the port doesn't underflow zero. This will
2481 * go away when the port boost addition is made atomic (see
2482 * note in ipc_port_importance_delta()).
2483 * Conditions:
2484 * The port is referenced and locked.
2485 * Nothing else is locked.
2486 */
2487 mach_port_delta_t
ipc_port_impcount_delta(ipc_port_t port,mach_port_delta_t delta,ipc_port_t __unused base)2488 ipc_port_impcount_delta(
2489 ipc_port_t port,
2490 mach_port_delta_t delta,
2491 ipc_port_t __unused base)
2492 {
2493 mach_port_delta_t absdelta;
2494
2495 if (!ip_active(port)) {
2496 return 0;
2497 }
2498
2499 /* adding/doing nothing is easy */
2500 if (delta >= 0) {
2501 port->ip_impcount += delta;
2502 return delta;
2503 }
2504
2505 absdelta = 0 - delta;
2506 if (port->ip_impcount >= absdelta) {
2507 port->ip_impcount -= absdelta;
2508 return delta;
2509 }
2510
2511 #if (DEVELOPMENT || DEBUG)
2512 if (ip_in_a_space(port)) {
2513 task_t target_task = port->ip_receiver->is_task;
2514 ipc_importance_task_t target_imp = target_task->task_imp_base;
2515 const char *target_procname;
2516 int target_pid;
2517
2518 if (target_imp != IIT_NULL) {
2519 target_procname = target_imp->iit_procname;
2520 target_pid = target_imp->iit_bsd_pid;
2521 } else {
2522 target_procname = "unknown";
2523 target_pid = -1;
2524 }
2525 printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
2526 "dropping %d assertion(s) but port only has %d remaining.\n",
2527 ip_get_receiver_name(port),
2528 target_pid, target_procname,
2529 absdelta, port->ip_impcount);
2530 } else if (base != IP_NULL) {
2531 assert(ip_in_a_space(base));
2532 task_t target_task = base->ip_receiver->is_task;
2533 ipc_importance_task_t target_imp = target_task->task_imp_base;
2534 const char *target_procname;
2535 int target_pid;
2536
2537 if (target_imp != IIT_NULL) {
2538 target_procname = target_imp->iit_procname;
2539 target_pid = target_imp->iit_bsd_pid;
2540 } else {
2541 target_procname = "unknown";
2542 target_pid = -1;
2543 }
2544 printf("Over-release of importance assertions for port 0x%lx "
2545 "enqueued on port 0x%x with receiver pid %d (%s), "
2546 "dropping %d assertion(s) but port only has %d remaining.\n",
2547 (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port),
2548 ip_get_receiver_name(base),
2549 target_pid, target_procname,
2550 absdelta, port->ip_impcount);
2551 }
2552 #endif
2553
2554 delta = 0 - port->ip_impcount;
2555 port->ip_impcount = 0;
2556 return delta;
2557 }
2558
2559 /*
2560 * Routine: ipc_port_importance_delta_internal
2561 * Purpose:
2562 * Adjust the importance count through the given port.
2563 * If the port is in transit, apply the delta throughout
2564 * the chain. Determine if the there is a task at the
2565 * base of the chain that wants/needs to be adjusted,
2566 * and if so, apply the delta.
2567 * Conditions:
2568 * The port is referenced and locked on entry.
2569 * Importance may be locked.
2570 * Nothing else is locked.
2571 * The lock may be dropped on exit.
2572 * Returns TRUE if lock was dropped.
2573 */
2574 #if IMPORTANCE_INHERITANCE
2575
2576 boolean_t
ipc_port_importance_delta_internal(ipc_port_t port,natural_t options,mach_port_delta_t * deltap,ipc_importance_task_t * imp_task)2577 ipc_port_importance_delta_internal(
2578 ipc_port_t port,
2579 natural_t options,
2580 mach_port_delta_t *deltap,
2581 ipc_importance_task_t *imp_task)
2582 {
2583 ipc_port_t next, base;
2584 bool dropped = false;
2585 bool took_base_ref = false;
2586
2587 *imp_task = IIT_NULL;
2588
2589 if (*deltap == 0) {
2590 return FALSE;
2591 }
2592
2593 assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE);
2594
2595 base = port;
2596
2597 /* if port is in transit, have to search for end of chain */
2598 if (ip_in_transit(port)) {
2599 dropped = true;
2600
2601
2602 ip_mq_unlock(port);
2603 ipc_port_multiple_lock(); /* massive serialization */
2604
2605 took_base_ref = ipc_port_destination_chain_lock(port, &base);
2606 /* all ports in chain from port to base, inclusive, are locked */
2607
2608 ipc_port_multiple_unlock();
2609 }
2610
2611 /*
2612 * If the port lock is dropped b/c the port is in transit, there is a
2613 * race window where another thread can drain messages and/or fire a
2614 * send possible notification before we get here.
2615 *
2616 * We solve this race by checking to see if our caller armed the send
2617 * possible notification, whether or not it's been fired yet, and
2618 * whether or not we've already set the port's ip_spimportant bit. If
2619 * we don't need a send-possible boost, then we'll just apply a
2620 * harmless 0-boost to the port.
2621 */
2622 if (options & IPID_OPTION_SENDPOSSIBLE) {
2623 assert(*deltap == 1);
2624 if (port->ip_sprequests && port->ip_spimportant == 0) {
2625 port->ip_spimportant = 1;
2626 } else {
2627 *deltap = 0;
2628 }
2629 }
2630
2631 /* unlock down to the base, adjusting boost(s) at each level */
2632 for (;;) {
2633 *deltap = ipc_port_impcount_delta(port, *deltap, base);
2634
2635 if (port == base) {
2636 break;
2637 }
2638
2639 /* port is in transit */
2640 assert(port->ip_tempowner == 0);
2641 assert(ip_in_transit(port));
2642 next = ip_get_destination(port);
2643 ip_mq_unlock(port);
2644 port = next;
2645 }
2646
2647 /* find the task (if any) to boost according to the base */
2648 if (ip_active(base)) {
2649 if (base->ip_tempowner != 0) {
2650 if (IIT_NULL != ip_get_imp_task(base)) {
2651 *imp_task = ip_get_imp_task(base);
2652 }
2653 /* otherwise don't boost */
2654 } else if (ip_in_a_space(base)) {
2655 ipc_space_t space = ip_get_receiver(base);
2656
2657 /* only spaces with boost-accepting tasks */
2658 if (space->is_task != TASK_NULL &&
2659 ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
2660 *imp_task = space->is_task->task_imp_base;
2661 }
2662 }
2663 }
2664
2665 /*
2666 * Only the base is locked. If we have to hold or drop task
2667 * importance assertions, we'll have to drop that lock as well.
2668 */
2669 if (*imp_task != IIT_NULL) {
2670 /* take a reference before unlocking base */
2671 ipc_importance_task_reference(*imp_task);
2672 }
2673
2674 if (dropped) {
2675 ip_mq_unlock(base);
2676 if (took_base_ref) {
2677 /* importance lock might be held */
2678 ip_release_safe(base);
2679 }
2680 }
2681
2682 return dropped;
2683 }
2684 #endif /* IMPORTANCE_INHERITANCE */
2685
2686 /*
2687 * Routine: ipc_port_importance_delta
2688 * Purpose:
2689 * Adjust the importance count through the given port.
2690 * If the port is in transit, apply the delta throughout
2691 * the chain.
2692 *
2693 * If there is a task at the base of the chain that wants/needs
2694 * to be adjusted, apply the delta.
2695 * Conditions:
2696 * The port is referenced and locked on entry.
2697 * Nothing else is locked.
2698 * The lock may be dropped on exit.
2699 * Returns TRUE if lock was dropped.
2700 */
2701 #if IMPORTANCE_INHERITANCE
2702
2703 boolean_t
ipc_port_importance_delta(ipc_port_t port,natural_t options,mach_port_delta_t delta)2704 ipc_port_importance_delta(
2705 ipc_port_t port,
2706 natural_t options,
2707 mach_port_delta_t delta)
2708 {
2709 ipc_importance_task_t imp_task = IIT_NULL;
2710 boolean_t dropped;
2711
2712 dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task);
2713
2714 if (IIT_NULL == imp_task || delta == 0) {
2715 return dropped;
2716 }
2717
2718 if (!dropped) {
2719 ip_mq_unlock(port);
2720 }
2721
2722 assert(ipc_importance_task_is_any_receiver_type(imp_task));
2723
2724 if (delta > 0) {
2725 ipc_importance_task_hold_internal_assertion(imp_task, delta);
2726 } else {
2727 ipc_importance_task_drop_internal_assertion(imp_task, -delta);
2728 }
2729
2730 ipc_importance_task_release(imp_task);
2731 return TRUE;
2732 }
2733 #endif /* IMPORTANCE_INHERITANCE */
2734
2735 ipc_port_t
ipc_port_make_send_any_locked(ipc_port_t port)2736 ipc_port_make_send_any_locked(
2737 ipc_port_t port)
2738 {
2739 require_ip_active(port);
2740 port->ip_mscount++;
2741 ip_srights_inc(port);
2742 ip_reference(port);
2743 return port;
2744 }
2745
2746 ipc_port_t
ipc_port_make_send_any(ipc_port_t port)2747 ipc_port_make_send_any(
2748 ipc_port_t port)
2749 {
2750 ipc_port_t sright = port;
2751
2752 if (IP_VALID(port)) {
2753 ip_mq_lock(port);
2754 if (ip_active(port)) {
2755 ipc_port_make_send_any_locked(port);
2756 } else {
2757 sright = IP_DEAD;
2758 }
2759 ip_mq_unlock(port);
2760 }
2761
2762 return sright;
2763 }
2764
2765 ipc_port_t
ipc_port_make_send_mqueue(ipc_port_t port)2766 ipc_port_make_send_mqueue(
2767 ipc_port_t port)
2768 {
2769 ipc_port_t sright = port;
2770 ipc_kobject_type_t kotype;
2771
2772 if (IP_VALID(port)) {
2773 kotype = ip_kotype(port);
2774
2775 ip_mq_lock(port);
2776 if (__improbable(!ip_active(port))) {
2777 sright = IP_DEAD;
2778 } else if (kotype == IKOT_NONE) {
2779 ipc_port_make_send_any_locked(port);
2780 } else if (kotype == IKOT_TIMER) {
2781 ipc_kobject_mktimer_require_locked(port);
2782 ipc_port_make_send_any_locked(port);
2783 } else {
2784 sright = IP_NULL;
2785 }
2786 ip_mq_unlock(port);
2787 }
2788
2789 return sright;
2790 }
2791
2792 void
ipc_port_copy_send_any_locked(ipc_port_t port)2793 ipc_port_copy_send_any_locked(
2794 ipc_port_t port)
2795 {
2796 assert(port->ip_srights > 0);
2797 ip_srights_inc(port);
2798 ip_reference(port);
2799 }
2800
2801 ipc_port_t
ipc_port_copy_send_any(ipc_port_t port)2802 ipc_port_copy_send_any(
2803 ipc_port_t port)
2804 {
2805 ipc_port_t sright = port;
2806
2807 if (IP_VALID(port)) {
2808 ip_mq_lock(port);
2809 if (ip_active(port)) {
2810 ipc_port_copy_send_any_locked(port);
2811 } else {
2812 sright = IP_DEAD;
2813 }
2814 ip_mq_unlock(port);
2815 }
2816
2817 return sright;
2818 }
2819
2820 ipc_port_t
ipc_port_copy_send_mqueue(ipc_port_t port)2821 ipc_port_copy_send_mqueue(
2822 ipc_port_t port)
2823 {
2824 ipc_port_t sright = port;
2825 ipc_kobject_type_t kotype;
2826
2827 if (IP_VALID(port)) {
2828 kotype = ip_kotype(port);
2829
2830 ip_mq_lock(port);
2831 if (__improbable(!ip_active(port))) {
2832 sright = IP_DEAD;
2833 } else if (kotype == IKOT_NONE) {
2834 ipc_port_copy_send_any_locked(port);
2835 } else if (kotype == IKOT_TIMER) {
2836 ipc_kobject_mktimer_require_locked(port);
2837 ipc_port_copy_send_any_locked(port);
2838 } else {
2839 sright = IP_NULL;
2840 }
2841 ip_mq_unlock(port);
2842 }
2843
2844 return sright;
2845 }
2846
2847 /*
2848 * Routine: ipc_port_copyout_send
2849 * Purpose:
2850 * Copyout a naked send right (possibly null/dead),
2851 * or if that fails, destroy the right.
2852 * Conditions:
2853 * Nothing locked.
2854 */
2855
2856 static mach_port_name_t
ipc_port_copyout_send_internal(ipc_port_t sright,ipc_space_t space,ipc_object_copyout_flags_t flags)2857 ipc_port_copyout_send_internal(
2858 ipc_port_t sright,
2859 ipc_space_t space,
2860 ipc_object_copyout_flags_t flags)
2861 {
2862 mach_port_name_t name;
2863
2864 if (IP_VALID(sright)) {
2865 kern_return_t kr;
2866
2867 kr = ipc_object_copyout(space, ip_to_object(sright),
2868 MACH_MSG_TYPE_PORT_SEND, flags, NULL, NULL, &name);
2869 if (kr != KERN_SUCCESS) {
2870 if (kr == KERN_INVALID_CAPABILITY) {
2871 name = MACH_PORT_DEAD;
2872 } else {
2873 name = MACH_PORT_NULL;
2874 }
2875 }
2876 } else {
2877 name = CAST_MACH_PORT_TO_NAME(sright);
2878 }
2879
2880 return name;
2881 }
2882
2883 mach_port_name_t
ipc_port_copyout_send(ipc_port_t sright,ipc_space_t space)2884 ipc_port_copyout_send(
2885 ipc_port_t sright, /* can be invalid */
2886 ipc_space_t space)
2887 {
2888 return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_NONE);
2889 }
2890
2891 /* Used by pthread kext to copyout thread port only */
2892 mach_port_name_t
ipc_port_copyout_send_pinned(ipc_port_t sright,ipc_space_t space)2893 ipc_port_copyout_send_pinned(
2894 ipc_port_t sright, /* can be invalid */
2895 ipc_space_t space)
2896 {
2897 assert(space->is_task != TASK_NULL);
2898
2899 if (IP_VALID(sright)) {
2900 assert(ip_kotype(sright) == IKOT_THREAD_CONTROL);
2901 }
2902
2903 if (task_is_pinned(space->is_task)) {
2904 return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_PINNED);
2905 } else {
2906 return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_NONE);
2907 }
2908 }
2909
2910 /*
2911 * Routine: ipc_port_release_send_and_unlock
2912 * Purpose:
2913 * Release a naked send right.
2914 * Consumes a ref for the port.
2915 * Conditions:
2916 * Port is valid and locked on entry
2917 * Port is unlocked on exit.
2918 */
2919 void
ipc_port_release_send_and_unlock(ipc_port_t port)2920 ipc_port_release_send_and_unlock(
2921 ipc_port_t port)
2922 {
2923 ipc_notify_nsenders_t nsrequest = { };
2924
2925 ip_srights_dec(port);
2926
2927 if (ip_active(port) && port->ip_srights == 0) {
2928 nsrequest = ipc_notify_no_senders_prepare(port);
2929 }
2930
2931 ip_mq_unlock(port);
2932 ip_release(port);
2933
2934 ipc_notify_no_senders_emit(nsrequest);
2935 }
2936
2937 /*
2938 * Routine: ipc_port_release_send
2939 * Purpose:
2940 * Release a naked send right.
2941 * Consumes a ref for the port.
2942 * Conditions:
2943 * Nothing locked.
2944 */
2945
2946 __attribute__((flatten, noinline))
2947 void
ipc_port_release_send(ipc_port_t port)2948 ipc_port_release_send(
2949 ipc_port_t port)
2950 {
2951 if (IP_VALID(port)) {
2952 ip_mq_lock(port);
2953 ipc_port_release_send_and_unlock(port);
2954 }
2955 }
2956
2957 /*
2958 * Routine: ipc_port_make_sonce_locked
2959 * Purpose:
2960 * Make a naked send-once right from a receive right.
2961 * Conditions:
2962 * The port is locked and active.
2963 */
2964
2965 ipc_port_t
ipc_port_make_sonce_locked(ipc_port_t port)2966 ipc_port_make_sonce_locked(
2967 ipc_port_t port)
2968 {
2969 require_ip_active(port);
2970 ip_sorights_inc(port);
2971 ip_reference(port);
2972 return port;
2973 }
2974
2975 /*
2976 * Routine: ipc_port_make_sonce
2977 * Purpose:
2978 * Make a naked send-once right from a receive right.
2979 * Conditions:
2980 * The port is not locked.
2981 */
2982
2983 ipc_port_t
ipc_port_make_sonce(ipc_port_t port)2984 ipc_port_make_sonce(
2985 ipc_port_t port)
2986 {
2987 if (!IP_VALID(port)) {
2988 return port;
2989 }
2990
2991 ip_mq_lock(port);
2992 if (ip_active(port)) {
2993 ipc_port_make_sonce_locked(port);
2994 ip_mq_unlock(port);
2995 return port;
2996 }
2997 ip_mq_unlock(port);
2998 return IP_DEAD;
2999 }
3000
3001 /*
3002 * Routine: ipc_port_release_sonce
3003 * Purpose:
3004 * Release a naked send-once right.
3005 * Consumes a ref for the port.
3006 *
3007 * In normal situations, this is never used.
3008 * Send-once rights are only consumed when
3009 * a message (possibly a send-once notification)
3010 * is sent to them.
3011 * Conditions:
3012 * The port is locked, possibly a space too.
3013 */
3014 void
ipc_port_release_sonce_and_unlock(ipc_port_t port)3015 ipc_port_release_sonce_and_unlock(
3016 ipc_port_t port)
3017 {
3018 ip_mq_lock_held(port);
3019
3020 ip_sorights_dec(port);
3021
3022 if (port->ip_specialreply) {
3023 ipc_port_adjust_special_reply_port_locked(port, NULL,
3024 IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN, FALSE);
3025 } else {
3026 ip_mq_unlock(port);
3027 }
3028
3029 ip_release(port);
3030 }
3031
3032 /*
3033 * Routine: ipc_port_release_sonce
3034 * Purpose:
3035 * Release a naked send-once right.
3036 * Consumes a ref for the port.
3037 *
3038 * In normal situations, this is never used.
3039 * Send-once rights are only consumed when
3040 * a message (possibly a send-once notification)
3041 * is sent to them.
3042 * Conditions:
3043 * Nothing locked except possibly a space.
3044 */
3045 void
ipc_port_release_sonce(ipc_port_t port)3046 ipc_port_release_sonce(
3047 ipc_port_t port)
3048 {
3049 if (IP_VALID(port)) {
3050 ip_mq_lock(port);
3051 ipc_port_release_sonce_and_unlock(port);
3052 }
3053 }
3054
3055 /*
3056 * Routine: ipc_port_release_receive
3057 * Purpose:
3058 * Release a naked (in limbo or in transit) receive right.
3059 * Consumes a ref for the port; destroys the port.
3060 * Conditions:
3061 * Nothing locked.
3062 */
3063
3064 void
ipc_port_release_receive(ipc_port_t port)3065 ipc_port_release_receive(
3066 ipc_port_t port)
3067 {
3068 ipc_port_t dest;
3069
3070 if (!IP_VALID(port)) {
3071 return;
3072 }
3073
3074 ip_mq_lock(port);
3075 require_ip_active(port);
3076 assert(!ip_in_a_space(port));
3077 dest = ip_get_destination(port);
3078
3079 ipc_port_destroy(port); /* consumes ref, unlocks */
3080
3081 if (dest != IP_NULL) {
3082 ipc_port_send_turnstile_complete(dest);
3083 ip_release(dest);
3084 }
3085 }
3086
3087 /*
3088 * Routine: ipc_port_alloc_special
3089 * Purpose:
3090 * Allocate a port in a special space.
3091 * The new port is returned with one ref.
3092 * If unsuccessful, IP_NULL is returned.
3093 * Conditions:
3094 * Nothing locked.
3095 */
3096
3097 ipc_port_t
ipc_port_alloc_special(ipc_space_t space,ipc_port_init_flags_t flags)3098 ipc_port_alloc_special(
3099 ipc_space_t space,
3100 ipc_port_init_flags_t flags)
3101 {
3102 ipc_port_t port;
3103
3104 port = ip_object_to_port(io_alloc(IOT_PORT, Z_WAITOK | Z_ZERO));
3105 if (port == IP_NULL) {
3106 return IP_NULL;
3107 }
3108
3109 os_atomic_init(&port->ip_object.io_bits, io_makebits(IOT_PORT));
3110 os_atomic_init(&port->ip_object.io_references, 1);
3111
3112 ipc_port_init(port, space, flags, MACH_PORT_SPECIAL_DEFAULT);
3113 return port;
3114 }
3115
3116 /*
3117 * Routine: ipc_port_dealloc_special_and_unlock
3118 * Purpose:
3119 * Deallocate a port in a special space.
3120 * Consumes one ref for the port.
3121 * Conditions:
3122 * Port is locked.
3123 */
3124
3125 void
ipc_port_dealloc_special_and_unlock(ipc_port_t port,__assert_only ipc_space_t space)3126 ipc_port_dealloc_special_and_unlock(
3127 ipc_port_t port,
3128 __assert_only ipc_space_t space)
3129 {
3130 require_ip_active(port);
3131 // assert(port->ip_receiver_name != MACH_PORT_NULL);
3132 assert(ip_in_space(port, space));
3133
3134 /*
3135 * We clear ip_receiver_name and ip_receiver to simplify
3136 * the ipc_space_kernel check in ipc_mqueue_send.
3137 */
3138
3139 /* port transtions to IN-LIMBO state */
3140 port->ip_receiver_name = MACH_PORT_NULL;
3141 port->ip_receiver = IS_NULL;
3142
3143 /* relevant part of ipc_port_clear_receiver */
3144 port->ip_mscount = 0;
3145 port->ip_messages.imq_seqno = 0;
3146
3147 ipc_port_destroy(port);
3148 }
3149
3150 /*
3151 * Routine: ipc_port_dealloc_special
3152 * Purpose:
3153 * Deallocate a port in a special space.
3154 * Consumes one ref for the port.
3155 * Conditions:
3156 * Nothing locked.
3157 */
3158
3159 void
ipc_port_dealloc_special(ipc_port_t port,ipc_space_t space)3160 ipc_port_dealloc_special(
3161 ipc_port_t port,
3162 ipc_space_t space)
3163 {
3164 ip_mq_lock(port);
3165 ipc_port_dealloc_special_and_unlock(port, space);
3166 }
3167
3168 /*
3169 * Routine: ipc_port_finalize
3170 * Purpose:
3171 * Called on last reference deallocate to
3172 * free any remaining data associated with the
3173 * port.
3174 * Conditions:
3175 * Nothing locked.
3176 */
3177 void
ipc_port_finalize(ipc_port_t port)3178 ipc_port_finalize(
3179 ipc_port_t port)
3180 {
3181 ipc_port_request_table_t requests = port->ip_requests;
3182
3183 assert(port_send_turnstile(port) == TURNSTILE_NULL);
3184
3185 if (waitq_type(&port->ip_waitq) == WQT_PORT) {
3186 assert(ipc_port_rcv_turnstile(port) == TURNSTILE_NULL);
3187 }
3188
3189 if (ip_active(port)) {
3190 panic("Trying to free an active port. port %p", port);
3191 }
3192
3193 if (requests) {
3194 port->ip_requests = NULL;
3195 ipc_port_request_table_free_noclear(requests);
3196 }
3197
3198 /*
3199 * (81997111) now it is safe to deallocate the prealloc message.
3200 * Keep the IP_BIT_PREALLOC bit, it has to be sticky as the turnstile
3201 * code looks at it without holding locks.
3202 */
3203 if (IP_PREALLOC(port)) {
3204 ipc_kmsg_t kmsg = port->ip_premsg;
3205
3206 if (kmsg == IKM_NULL || ikm_prealloc_inuse_port(kmsg)) {
3207 panic("port(%p, %p): prealloc message in an invalid state",
3208 port, kmsg);
3209 }
3210
3211 port->ip_premsg = IKM_NULL;
3212 ipc_kmsg_free(kmsg);
3213 }
3214
3215 waitq_deinit(&port->ip_waitq);
3216 #if MACH_ASSERT
3217 if (port->ip_made_bt) {
3218 btref_put(port->ip_made_bt);
3219 }
3220 #endif
3221 }
3222
3223 /*
3224 * Routine: kdp_mqueue_send_find_owner
3225 * Purpose:
3226 * Discover the owner of the ipc object that contains the input
3227 * waitq object. The thread blocked on the waitq should be
3228 * waiting for an IPC_MQUEUE_FULL event.
3229 * Conditions:
3230 * The 'waitinfo->wait_type' value should already be set to
3231 * kThreadWaitPortSend.
3232 * Note:
3233 * If we find out that the containing port is actually in
3234 * transit, we reset the wait_type field to reflect this.
3235 */
3236 void
kdp_mqueue_send_find_owner(struct waitq * waitq,__assert_only event64_t event,thread_waitinfo_v2_t * waitinfo,struct ipc_service_port_label ** isplp)3237 kdp_mqueue_send_find_owner(
3238 struct waitq *waitq,
3239 __assert_only event64_t event,
3240 thread_waitinfo_v2_t *waitinfo,
3241 struct ipc_service_port_label **isplp)
3242 {
3243 struct turnstile *turnstile;
3244 assert(waitinfo->wait_type == kThreadWaitPortSend);
3245 assert(event == IPC_MQUEUE_FULL);
3246 assert(waitq_type(waitq) == WQT_TURNSTILE);
3247
3248 turnstile = waitq_to_turnstile(waitq);
3249 ipc_port_t port = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */
3250
3251 zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port);
3252
3253 waitinfo->owner = 0;
3254 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
3255 if (ip_mq_lock_held_kdp(port)) {
3256 /*
3257 * someone has the port locked: it may be in an
3258 * inconsistent state: bail
3259 */
3260 waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
3261 return;
3262 }
3263
3264 /* now we are the only one accessing the port */
3265 if (ip_active(port)) {
3266 /*
3267 * In kdp context, port must be left unlocked throughout.
3268 * Therefore can't use union field accessor helpers, manually strip PAC
3269 * and compare raw pointer.
3270 */
3271 void *raw_ptr = ip_get_receiver_ptr_noauth(port);
3272
3273 if (port->ip_tempowner) {
3274 ipc_importance_task_t imp_task = ip_get_imp_task(port);
3275 if (imp_task != IIT_NULL && imp_task->iit_task != NULL) {
3276 /* port is held by a tempowner */
3277 waitinfo->owner = pid_from_task(port->ip_imp_task->iit_task);
3278 } else {
3279 waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
3280 }
3281 } else if (ip_in_a_space(port)) { /* no port lock needed */
3282 if ((ipc_space_t)raw_ptr == ipc_space_kernel) { /* access union field as ip_receiver */
3283 /*
3284 * The kernel pid is 0, make this
3285 * distinguishable from no-owner and
3286 * inconsistent port state.
3287 */
3288 waitinfo->owner = STACKSHOT_WAITOWNER_KERNEL;
3289 } else {
3290 waitinfo->owner = pid_from_task(((ipc_space_t)raw_ptr)->is_task);
3291 }
3292 } else if ((ipc_port_t)raw_ptr != IP_NULL) { /* access union field as ip_destination */
3293 waitinfo->wait_type = kThreadWaitPortSendInTransit;
3294 waitinfo->owner = VM_KERNEL_UNSLIDE_OR_PERM((ipc_port_t)raw_ptr);
3295 }
3296 if (port->ip_service_port && port->ip_splabel != NULL) {
3297 *isplp = (struct ipc_service_port_label *)port->ip_splabel;
3298 }
3299 }
3300 }
3301
3302 /*
3303 * Routine: kdp_mqueue_recv_find_owner
3304 * Purpose:
3305 * Discover the "owner" of the ipc object that contains the input
3306 * waitq object. The thread blocked on the waitq is trying to
3307 * receive on the mqueue.
3308 * Conditions:
3309 * The 'waitinfo->wait_type' value should already be set to
3310 * kThreadWaitPortReceive.
3311 * Note:
3312 * If we find that we are actualy waiting on a port set, we reset
3313 * the wait_type field to reflect this.
3314 */
3315 void
kdp_mqueue_recv_find_owner(struct waitq * waitq,__assert_only event64_t event,thread_waitinfo_v2_t * waitinfo,struct ipc_service_port_label ** isplp)3316 kdp_mqueue_recv_find_owner(
3317 struct waitq *waitq,
3318 __assert_only event64_t event,
3319 thread_waitinfo_v2_t *waitinfo,
3320 struct ipc_service_port_label **isplp)
3321 {
3322 assert(waitinfo->wait_type == kThreadWaitPortReceive);
3323 assert(event == IPC_MQUEUE_RECEIVE);
3324
3325 waitinfo->owner = 0;
3326
3327 if (waitq_type(waitq) == WQT_PORT_SET) {
3328 ipc_pset_t set = ips_from_waitq(waitq);
3329
3330 zone_id_require(ZONE_ID_IPC_PORT_SET, sizeof(struct ipc_pset), set);
3331
3332 /* Reset wait type to specify waiting on port set receive */
3333 waitinfo->wait_type = kThreadWaitPortSetReceive;
3334 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(set);
3335 if (ips_mq_lock_held_kdp(set)) {
3336 waitinfo->owner = STACKSHOT_WAITOWNER_PSET_LOCKED;
3337 }
3338 /* There is no specific owner "at the other end" of a port set, so leave unset. */
3339 } else if (waitq_type(waitq) == WQT_PORT) {
3340 ipc_port_t port = ip_from_waitq(waitq);
3341
3342 zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port);
3343
3344 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
3345 if (ip_mq_lock_held_kdp(port)) {
3346 waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
3347 return;
3348 }
3349
3350 if (ip_active(port)) {
3351 if (ip_in_a_space(port)) { /* no port lock needed */
3352 waitinfo->owner = ip_get_receiver_name(port);
3353 } else {
3354 waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
3355 }
3356 if (port->ip_specialreply) {
3357 waitinfo->wait_flags |= STACKSHOT_WAITINFO_FLAGS_SPECIALREPLY;
3358 }
3359 if (port->ip_splabel != NULL) {
3360 *isplp = (struct ipc_service_port_label *)port->ip_splabel;
3361 }
3362 }
3363 }
3364 }
3365
3366 void
ipc_port_set_label(ipc_port_t port,ipc_label_t label)3367 ipc_port_set_label(
3368 ipc_port_t port,
3369 ipc_label_t label)
3370 {
3371 ipc_kobject_label_t labelp;
3372
3373 assert(!ip_is_kolabeled(port));
3374
3375 labelp = zalloc_flags(ipc_kobject_label_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3376 labelp->ikol_label = label;
3377
3378 port->ip_kolabel = labelp;
3379 io_bits_or(ip_to_object(port), IO_BITS_KOLABEL);
3380 }
3381
3382 kern_return_t
ipc_port_reset_thread_attr(ipc_port_t port)3383 ipc_port_reset_thread_attr(
3384 ipc_port_t port)
3385 {
3386 uint8_t iotier = THROTTLE_LEVEL_END;
3387 uint8_t qos = THREAD_QOS_UNSPECIFIED;
3388
3389 return ipc_port_update_qos_n_iotier(port, qos, iotier);
3390 }
3391
3392 kern_return_t
ipc_port_propagate_thread_attr(ipc_port_t port,struct thread_attr_for_ipc_propagation attr)3393 ipc_port_propagate_thread_attr(
3394 ipc_port_t port,
3395 struct thread_attr_for_ipc_propagation attr)
3396 {
3397 uint8_t iotier = attr.tafip_iotier;
3398 uint8_t qos = attr.tafip_qos;
3399
3400 return ipc_port_update_qos_n_iotier(port, qos, iotier);
3401 }
3402
3403 static kern_return_t
ipc_port_update_qos_n_iotier(ipc_port_t port,uint8_t qos,uint8_t iotier)3404 ipc_port_update_qos_n_iotier(
3405 ipc_port_t port,
3406 uint8_t qos,
3407 uint8_t iotier)
3408 {
3409 if (port == IPC_PORT_NULL) {
3410 return KERN_INVALID_ARGUMENT;
3411 }
3412
3413 ip_mq_lock(port);
3414
3415 if (!ip_active(port)) {
3416 ip_mq_unlock(port);
3417 return KERN_TERMINATED;
3418 }
3419
3420 if (port->ip_specialreply) {
3421 ip_mq_unlock(port);
3422 return KERN_INVALID_ARGUMENT;
3423 }
3424
3425 port->ip_kernel_iotier_override = iotier;
3426 port->ip_kernel_qos_override = qos;
3427
3428 if (ip_in_a_space(port) &&
3429 is_active(ip_get_receiver(port)) &&
3430 ipc_port_has_klist(port)) {
3431 KNOTE(&port->ip_klist, 0);
3432 }
3433
3434 ip_mq_unlock(port);
3435 return KERN_SUCCESS;
3436 }
3437
3438 boolean_t
__ip_strict_reply_port_semantics_violation(void)3439 __ip_strict_reply_port_semantics_violation(void)
3440 {
3441 return task_get_platform_binary(current_task())
3442 #if CONFIG_ROSETTA
3443 && !task_is_translated(current_task()) /* ignore rosetta violators */
3444 #endif
3445 && !proc_is_simulated(current_proc());
3446 }
3447
3448 #if MACH_ASSERT
3449 #include <kern/machine.h>
3450
3451 unsigned long port_count = 0;
3452 unsigned long port_count_warning = 20000;
3453 unsigned long port_timestamp = 0;
3454
3455 void db_port_stack_trace(
3456 ipc_port_t port);
3457 void db_ref(
3458 int refs);
3459 int db_port_walk(
3460 unsigned int verbose,
3461 unsigned int display,
3462 unsigned int ref_search,
3463 unsigned int ref_target);
3464
3465 #ifdef MACH_BSD
3466 extern int proc_pid(struct proc*);
3467 #endif /* MACH_BSD */
3468
3469 /*
3470 * Initialize all of the debugging state in a port.
3471 * Insert the port into a global list of all allocated ports.
3472 */
3473 void
ipc_port_init_debug(ipc_port_t port,void * fp)3474 ipc_port_init_debug(ipc_port_t port, void *fp)
3475 {
3476 port->ip_timetrack = port_timestamp++;
3477
3478 if (ipc_portbt) {
3479 port->ip_made_bt = btref_get(fp, 0);
3480 }
3481
3482 #ifdef MACH_BSD
3483 task_t task = current_task_early();
3484 if (task != TASK_NULL) {
3485 struct proc *proc = get_bsdtask_info(task);
3486 if (proc) {
3487 port->ip_made_pid = proc_pid(proc);
3488 }
3489 }
3490 #endif /* MACH_BSD */
3491 }
3492
3493 #endif /* MACH_ASSERT */
3494