xref: /xnu-8020.121.3/osfmk/ipc/ipc_port.c (revision fdd8201d7b966f0c3ea610489d29bd841d358941)
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  */
62 /*
63  */
64 /*
65  *	File:	ipc/ipc_port.c
66  *	Author:	Rich Draves
67  *	Date:	1989
68  *
69  *	Functions to manipulate IPC ports.
70  */
71 
72 #include <mach_assert.h>
73 
74 #include <mach/port.h>
75 #include <mach/kern_return.h>
76 #include <kern/backtrace.h>
77 #include <kern/debug.h>
78 #include <kern/ipc_kobject.h>
79 #include <kern/kcdata.h>
80 #include <kern/misc_protos.h>
81 #include <kern/policy_internal.h>
82 #include <kern/thread.h>
83 #include <kern/waitq.h>
84 #include <ipc/ipc_entry.h>
85 #include <ipc/ipc_space.h>
86 #include <ipc/ipc_object.h>
87 #include <ipc/ipc_right.h>
88 #include <ipc/ipc_port.h>
89 #include <ipc/ipc_pset.h>
90 #include <ipc/ipc_kmsg.h>
91 #include <ipc/ipc_mqueue.h>
92 #include <ipc/ipc_notify.h>
93 #include <ipc/ipc_table.h>
94 #include <ipc/ipc_importance.h>
95 #include <machine/limits.h>
96 #include <kern/turnstile.h>
97 #include <kern/machine.h>
98 
99 #include <security/mac_mach_internal.h>
100 #include <ipc/ipc_service_port.h>
101 
102 #include <string.h>
103 
104 static TUNABLE(bool, prioritize_launch, "prioritize_launch", true);
105 TUNABLE_WRITEABLE(int, ipc_portbt, "ipc_portbt", false);
106 
107 extern zone_t ipc_kobject_label_zone;
108 
109 LCK_SPIN_DECLARE_ATTR(ipc_port_multiple_lock_data, &ipc_lck_grp, &ipc_lck_attr);
110 ipc_port_timestamp_t ipc_port_timestamp_data;
111 
112 #if     MACH_ASSERT
113 static void ipc_port_init_debug(ipc_port_t, void *fp);
114 #endif  /* MACH_ASSERT */
115 
116 void __abortlike
__ipc_port_inactive_panic(ipc_port_t port)117 __ipc_port_inactive_panic(ipc_port_t port)
118 {
119 	panic("Using inactive port %p", port);
120 }
121 
122 static __abortlike void
__ipc_port_translate_receive_panic(ipc_space_t space,ipc_port_t port)123 __ipc_port_translate_receive_panic(ipc_space_t space, ipc_port_t port)
124 {
125 	panic("found receive right in space %p for port %p owned by space %p",
126 	    space, port, ip_get_receiver(port));
127 }
128 
129 static void
130 ipc_port_send_turnstile_recompute_push_locked(
131 	ipc_port_t port);
132 
133 static thread_t
134 ipc_port_get_watchport_inheritor(
135 	ipc_port_t port);
136 
137 static kern_return_t
138 ipc_port_update_qos_n_iotier(
139 	ipc_port_t port,
140 	uint8_t    qos,
141 	uint8_t    iotier);
142 
143 void
ipc_port_release(ipc_port_t port)144 ipc_port_release(ipc_port_t port)
145 {
146 	ip_release(port);
147 }
148 
149 void
ipc_port_reference(ipc_port_t port)150 ipc_port_reference(ipc_port_t port)
151 {
152 	ip_reference(port);
153 }
154 
155 /*
156  *	Routine:	ipc_port_timestamp
157  *	Purpose:
158  *		Retrieve a timestamp value.
159  */
160 
161 ipc_port_timestamp_t
ipc_port_timestamp(void)162 ipc_port_timestamp(void)
163 {
164 	return OSIncrementAtomic(&ipc_port_timestamp_data);
165 }
166 
167 
168 /*
169  *	Routine:	ipc_port_translate_send
170  *	Purpose:
171  *		Look up a send right in a space.
172  *	Conditions:
173  *		Nothing locked before.  If successful, the object
174  *		is returned active and locked.  The caller doesn't get a ref.
175  *	Returns:
176  *		KERN_SUCCESS		Object returned locked.
177  *		KERN_INVALID_TASK	The space is dead.
178  *		KERN_INVALID_NAME	The name doesn't denote a right
179  *		KERN_INVALID_RIGHT	Name doesn't denote the correct right
180  */
181 kern_return_t
ipc_port_translate_send(ipc_space_t space,mach_port_name_t name,ipc_port_t * portp)182 ipc_port_translate_send(
183 	ipc_space_t                     space,
184 	mach_port_name_t                name,
185 	ipc_port_t                     *portp)
186 {
187 	ipc_port_t port = IP_NULL;
188 	ipc_object_t object;
189 	kern_return_t kr;
190 
191 	kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_SEND, &object);
192 	if (kr == KERN_SUCCESS) {
193 		port = ip_object_to_port(object);
194 	}
195 	*portp = port;
196 	return kr;
197 }
198 
199 
200 /*
201  *	Routine:	ipc_port_translate_receive
202  *	Purpose:
203  *		Look up a receive right in a space.
204  *		Performs some minimal security checks against tampering.
205  *	Conditions:
206  *		Nothing locked before.  If successful, the object
207  *		is returned active and locked.  The caller doesn't get a ref.
208  *	Returns:
209  *		KERN_SUCCESS		Object returned locked.
210  *		KERN_INVALID_TASK	The space is dead.
211  *		KERN_INVALID_NAME	The name doesn't denote a right
212  *		KERN_INVALID_RIGHT	Name doesn't denote the correct right
213  */
214 kern_return_t
ipc_port_translate_receive(ipc_space_t space,mach_port_name_t name,ipc_port_t * portp)215 ipc_port_translate_receive(
216 	ipc_space_t                     space,
217 	mach_port_name_t                name,
218 	ipc_port_t                     *portp)
219 {
220 	ipc_port_t port = IP_NULL;
221 	ipc_object_t object;
222 	kern_return_t kr;
223 
224 	kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_RECEIVE, &object);
225 	if (kr == KERN_SUCCESS) {
226 		/* object is locked */
227 		port = ip_object_to_port(object);
228 		if (!ip_in_space(port, space)) {
229 			__ipc_port_translate_receive_panic(space, port);
230 		}
231 	}
232 	*portp = port;
233 	return kr;
234 }
235 
236 
237 /*
238  *	Routine:	ipc_port_request_alloc
239  *	Purpose:
240  *		Try to allocate a request slot.
241  *		If successful, returns the request index.
242  *		Otherwise returns zero.
243  *	Conditions:
244  *		The port is locked and active.
245  *	Returns:
246  *		KERN_SUCCESS		A request index was found.
247  *		KERN_NO_SPACE		No index allocated.
248  */
249 
250 #if IMPORTANCE_INHERITANCE
251 kern_return_t
ipc_port_request_alloc(ipc_port_t port,mach_port_name_t name,ipc_port_t soright,boolean_t send_possible,boolean_t immediate,ipc_port_request_index_t * indexp,boolean_t * importantp)252 ipc_port_request_alloc(
253 	ipc_port_t                      port,
254 	mach_port_name_t                name,
255 	ipc_port_t                      soright,
256 	boolean_t                       send_possible,
257 	boolean_t                       immediate,
258 	ipc_port_request_index_t        *indexp,
259 	boolean_t                       *importantp)
260 #else
261 kern_return_t
262 ipc_port_request_alloc(
263 	ipc_port_t                      port,
264 	mach_port_name_t                name,
265 	ipc_port_t                      soright,
266 	boolean_t                       send_possible,
267 	boolean_t                       immediate,
268 	ipc_port_request_index_t        *indexp)
269 #endif /* IMPORTANCE_INHERITANCE */
270 {
271 	ipc_port_request_t ipr, table;
272 	ipc_port_request_index_t index;
273 	uintptr_t mask = 0;
274 
275 #if IMPORTANCE_INHERITANCE
276 	*importantp = FALSE;
277 #endif /* IMPORTANCE_INHERITANCE */
278 
279 	require_ip_active(port);
280 	assert(name != MACH_PORT_NULL);
281 	assert(soright != IP_NULL);
282 
283 	table = port->ip_requests;
284 
285 	if (table == IPR_NULL) {
286 		return KERN_NO_SPACE;
287 	}
288 
289 	index = table->ipr_next;
290 	if (index == 0) {
291 		return KERN_NO_SPACE;
292 	}
293 
294 	ipr = &table[index];
295 	assert(ipr->ipr_name == MACH_PORT_NULL);
296 
297 	table->ipr_next = ipr->ipr_next;
298 	ipr->ipr_name = name;
299 
300 	if (send_possible) {
301 		mask |= IPR_SOR_SPREQ_MASK;
302 		if (immediate) {
303 			mask |= IPR_SOR_SPARM_MASK;
304 			if (port->ip_sprequests == 0) {
305 				port->ip_sprequests = 1;
306 #if IMPORTANCE_INHERITANCE
307 				/* TODO: Live importance support in send-possible */
308 				if (port->ip_impdonation != 0 &&
309 				    port->ip_spimportant == 0 &&
310 				    (task_is_importance_donor(current_task()))) {
311 					*importantp = TRUE;
312 				}
313 #endif /* IMPORTANCE_INHERTANCE */
314 			}
315 		}
316 	}
317 	ipr->ipr_soright = IPR_SOR_MAKE(soright, mask);
318 
319 	*indexp = index;
320 
321 	return KERN_SUCCESS;
322 }
323 
324 /*
325  *	Routine:	ipc_port_request_grow
326  *	Purpose:
327  *		Grow a port's table of requests.
328  *	Conditions:
329  *		The port must be locked and active.
330  *		Nothing else locked; will allocate memory.
331  *		Upon return the port is unlocked.
332  *	Returns:
333  *		KERN_SUCCESS		Grew the table.
334  *		KERN_SUCCESS		Somebody else grew the table.
335  *		KERN_SUCCESS		The port died.
336  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate new table.
337  *		KERN_NO_SPACE		Couldn't grow to desired size
338  */
339 
340 kern_return_t
ipc_port_request_grow(ipc_port_t port,ipc_table_elems_t target_size)341 ipc_port_request_grow(
342 	ipc_port_t              port,
343 	ipc_table_elems_t       target_size)
344 {
345 	ipc_table_size_t its;
346 	ipc_port_request_t otable, ntable;
347 	require_ip_active(port);
348 
349 	otable = port->ip_requests;
350 	if (otable == IPR_NULL) {
351 		its = &ipc_table_requests[0];
352 	} else {
353 		its = otable->ipr_size + 1;
354 	}
355 
356 	if (target_size != ITS_SIZE_NONE) {
357 		if ((otable != IPR_NULL) &&
358 		    (target_size <= otable->ipr_size->its_size)) {
359 			ip_mq_unlock(port);
360 			return KERN_SUCCESS;
361 		}
362 		while ((its->its_size) && (its->its_size < target_size)) {
363 			its++;
364 		}
365 		if (its->its_size == 0) {
366 			ip_mq_unlock(port);
367 			return KERN_NO_SPACE;
368 		}
369 	}
370 
371 	ip_reference(port);
372 	ip_mq_unlock(port);
373 
374 	if ((its->its_size == 0) ||
375 	    ((ntable = it_requests_alloc(its)) == IPR_NULL)) {
376 		ip_release(port);
377 		return KERN_RESOURCE_SHORTAGE;
378 	}
379 
380 	ip_mq_lock(port);
381 
382 	/*
383 	 *	Check that port is still active and that nobody else
384 	 *	has slipped in and grown the table on us.  Note that
385 	 *	just checking if the current table pointer == otable
386 	 *	isn't sufficient; must check ipr_size.
387 	 */
388 
389 	if (ip_active(port) && (port->ip_requests == otable) &&
390 	    ((otable == IPR_NULL) || (otable->ipr_size + 1 == its))) {
391 		ipc_table_size_t oits;
392 		ipc_table_elems_t osize, nsize;
393 		ipc_port_request_index_t free, i;
394 
395 		/* copy old table to new table */
396 
397 		if (otable != IPR_NULL) {
398 			oits = otable->ipr_size;
399 			osize = oits->its_size;
400 			free = otable->ipr_next;
401 
402 			(void) memcpy((void *)(ntable + 1),
403 			    (const void *)(otable + 1),
404 			    (osize - 1) * sizeof(struct ipc_port_request));
405 		} else {
406 			osize = 1;
407 			oits = 0;
408 			free = 0;
409 		}
410 
411 		nsize = its->its_size;
412 		assert(nsize > osize);
413 
414 		/* add new elements to the new table's free list */
415 
416 		for (i = osize; i < nsize; i++) {
417 			ipc_port_request_t ipr = &ntable[i];
418 
419 			ipr->ipr_name = MACH_PORT_NULL;
420 			ipr->ipr_next = free;
421 			free = i;
422 		}
423 
424 		ntable->ipr_next = free;
425 		ntable->ipr_size = its;
426 		port->ip_requests = ntable;
427 		ip_mq_unlock(port);
428 		ip_release(port);
429 
430 		if (otable != IPR_NULL) {
431 			it_requests_free(oits, otable);
432 		}
433 	} else {
434 		ip_mq_unlock(port);
435 		ip_release(port);
436 		it_requests_free(its, ntable);
437 	}
438 
439 	return KERN_SUCCESS;
440 }
441 
442 /*
443  *	Routine:	ipc_port_request_sparm
444  *	Purpose:
445  *		Arm delayed send-possible request.
446  *	Conditions:
447  *		The port must be locked and active.
448  *
449  *		Returns TRUE if the request was armed
450  *		(or armed with importance in that version).
451  */
452 
453 boolean_t
ipc_port_request_sparm(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index,mach_msg_option_t option,mach_msg_priority_t priority)454 ipc_port_request_sparm(
455 	ipc_port_t                      port,
456 	__assert_only mach_port_name_t  name,
457 	ipc_port_request_index_t        index,
458 	mach_msg_option_t               option,
459 	mach_msg_priority_t             priority)
460 {
461 	if (index != IE_REQ_NONE) {
462 		ipc_port_request_t ipr, table;
463 
464 		require_ip_active(port);
465 
466 		table = port->ip_requests;
467 		assert(table != IPR_NULL);
468 
469 		ipr = &table[index];
470 		assert(ipr->ipr_name == name);
471 
472 		/* Is there a valid destination? */
473 		if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
474 			ipr->ipr_soright = IPR_SOR_MAKE(ipr->ipr_soright, IPR_SOR_SPARM_MASK);
475 			port->ip_sprequests = 1;
476 
477 			if (option & MACH_SEND_OVERRIDE) {
478 				/* apply override to message queue */
479 				mach_msg_qos_t qos_ovr;
480 				if (mach_msg_priority_is_pthread_priority(priority)) {
481 					qos_ovr = _pthread_priority_thread_qos(priority);
482 				} else {
483 					qos_ovr = mach_msg_priority_overide_qos(priority);
484 				}
485 				if (qos_ovr) {
486 					ipc_mqueue_override_send_locked(&port->ip_messages, qos_ovr);
487 				}
488 			}
489 
490 #if IMPORTANCE_INHERITANCE
491 			if (((option & MACH_SEND_NOIMPORTANCE) == 0) &&
492 			    (port->ip_impdonation != 0) &&
493 			    (port->ip_spimportant == 0) &&
494 			    (((option & MACH_SEND_IMPORTANCE) != 0) ||
495 			    (task_is_importance_donor(current_task())))) {
496 				return TRUE;
497 			}
498 #else
499 			return TRUE;
500 #endif /* IMPORTANCE_INHERITANCE */
501 		}
502 	}
503 	return FALSE;
504 }
505 
506 /*
507  *	Routine:	ipc_port_request_type
508  *	Purpose:
509  *		Determine the type(s) of port requests enabled for a name.
510  *	Conditions:
511  *		The port must be locked or inactive (to avoid table growth).
512  *		The index must not be IE_REQ_NONE and for the name in question.
513  */
514 mach_port_type_t
ipc_port_request_type(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index)515 ipc_port_request_type(
516 	ipc_port_t                      port,
517 	__assert_only mach_port_name_t  name,
518 	ipc_port_request_index_t        index)
519 {
520 	ipc_port_request_t ipr, table;
521 	mach_port_type_t type = 0;
522 
523 	table = port->ip_requests;
524 	assert(table != IPR_NULL);
525 
526 	assert(index != IE_REQ_NONE);
527 	ipr = &table[index];
528 	assert(ipr->ipr_name == name);
529 
530 	if (IP_VALID(IPR_SOR_PORT(ipr->ipr_soright))) {
531 		type |= MACH_PORT_TYPE_DNREQUEST;
532 
533 		if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
534 			type |= MACH_PORT_TYPE_SPREQUEST;
535 
536 			if (!IPR_SOR_SPARMED(ipr->ipr_soright)) {
537 				type |= MACH_PORT_TYPE_SPREQUEST_DELAYED;
538 			}
539 		}
540 	}
541 	return type;
542 }
543 
544 /*
545  *	Routine:	ipc_port_request_cancel
546  *	Purpose:
547  *		Cancel a dead-name/send-possible request and return the send-once right.
548  *	Conditions:
549  *		The port must be locked and active.
550  *		The index must not be IPR_REQ_NONE and must correspond with name.
551  */
552 
553 ipc_port_t
ipc_port_request_cancel(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index)554 ipc_port_request_cancel(
555 	ipc_port_t                      port,
556 	__assert_only mach_port_name_t  name,
557 	ipc_port_request_index_t        index)
558 {
559 	ipc_port_request_t ipr, table;
560 	ipc_port_t request = IP_NULL;
561 
562 	require_ip_active(port);
563 	table = port->ip_requests;
564 	assert(table != IPR_NULL);
565 
566 	assert(index != IE_REQ_NONE);
567 	ipr = &table[index];
568 	assert(ipr->ipr_name == name);
569 	request = IPR_SOR_PORT(ipr->ipr_soright);
570 
571 	/* return ipr to the free list inside the table */
572 	ipr->ipr_name = MACH_PORT_NULL;
573 	ipr->ipr_next = table->ipr_next;
574 	table->ipr_next = index;
575 
576 	return request;
577 }
578 
579 
580 /*
581  *	Routine:	ipc_port_nsrequest
582  *	Purpose:
583  *		Make a no-senders request, returning the
584  *		previously registered send-once right.
585  *		Just cancels the previous request if notify is IP_NULL.
586  *	Conditions:
587  *		The port is locked and active.  It is unlocked.
588  *		Consumes a ref for notify (if non-null), and
589  *		returns previous with a ref (if non-null).
590  */
591 
592 void
ipc_port_nsrequest(ipc_port_t port,mach_port_mscount_t sync,ipc_port_t notify,ipc_port_t * previousp)593 ipc_port_nsrequest(
594 	ipc_port_t              port,
595 	mach_port_mscount_t     sync,
596 	ipc_port_t              notify,
597 	ipc_port_t              *previousp)
598 {
599 	ipc_port_t previous;
600 	mach_port_mscount_t mscount;
601 	require_ip_active(port);
602 
603 	assert(!ip_in_space(port, ipc_space_kernel));
604 	assert(port->ip_nsrequest != IP_KOBJECT_NSREQUEST_ARMED);
605 
606 	previous = port->ip_nsrequest;
607 	mscount = port->ip_mscount;
608 
609 	if ((port->ip_srights == 0) && (sync <= mscount) &&
610 	    (notify != IP_NULL)) {
611 		port->ip_nsrequest = IP_NULL;
612 		ip_mq_unlock(port);
613 		ipc_notify_no_senders(notify, mscount, /* kobject */ false);
614 	} else {
615 		port->ip_nsrequest = notify;
616 		ip_mq_unlock(port);
617 	}
618 
619 	*previousp = previous;
620 }
621 
622 
623 /*
624  *	Routine:	ipc_port_clear_receiver
625  *	Purpose:
626  *		Prepares a receive right for transmission/destruction,
627  *		optionally performs mqueue destruction (with port lock held)
628  *
629  *	Conditions:
630  *		The port is locked and active.
631  *	Returns:
632  *		If should_destroy is TRUE, then the return value indicates
633  *		whether the caller needs to reap kmsg structures that should
634  *		be destroyed (by calling ipc_kmsg_reap_delayed)
635  *
636  *              If should_destroy is FALSE, this always returns FALSE
637  */
638 
639 boolean_t
ipc_port_clear_receiver(ipc_port_t port,boolean_t should_destroy,waitq_link_list_t * free_l)640 ipc_port_clear_receiver(
641 	ipc_port_t          port,
642 	boolean_t           should_destroy,
643 	waitq_link_list_t  *free_l)
644 {
645 	ipc_mqueue_t    mqueue = &port->ip_messages;
646 	boolean_t       reap_messages = FALSE;
647 
648 	/*
649 	 * Pull ourselves out of any sets to which we belong.
650 	 * We hold the write space lock or the receive entry has
651 	 * been deleted, so even though this acquires and releases
652 	 * the port lock, we know we won't be added to any other sets.
653 	 */
654 	if (ip_in_pset(port)) {
655 		waitq_unlink_all_locked(&port->ip_waitq, NULL, free_l);
656 		assert(!ip_in_pset(port));
657 	}
658 
659 	/*
660 	 * Send anyone waiting on the port's queue directly away.
661 	 * Also clear the mscount, seqno, guard bits
662 	 */
663 	if (ip_in_a_space(port)) {
664 		ipc_mqueue_changed(ip_get_receiver(port), &port->ip_waitq);
665 	} else {
666 		ipc_mqueue_changed(NULL, &port->ip_waitq);
667 	}
668 	port->ip_mscount = 0;
669 	mqueue->imq_seqno = 0;
670 	port->ip_context = port->ip_guarded = port->ip_strict_guard = 0;
671 	/*
672 	 * clear the immovable bit so the port can move back to anyone listening
673 	 * for the port destroy notification
674 	 */
675 	port->ip_immovable_receive = 0;
676 
677 	if (should_destroy) {
678 		/*
679 		 * Mark the port and mqueue invalid, preventing further send/receive
680 		 * operations from succeeding. It's important for this to be
681 		 * done under the same lock hold as the ipc_mqueue_changed
682 		 * call to avoid additional threads blocking on an mqueue
683 		 * that's being destroyed.
684 		 *
685 		 * The port active bit needs to be guarded under mqueue lock for
686 		 * turnstiles
687 		 */
688 
689 		/* port transitions to INACTIVE state */
690 		io_bits_andnot(ip_to_object(port), IO_BITS_ACTIVE);
691 		port->ip_receiver_name = MACH_PORT_NULL;
692 		port->ip_timestamp = ipc_port_timestamp();
693 
694 		reap_messages = ipc_mqueue_destroy_locked(mqueue, free_l);
695 	} else {
696 		/* port transtions to IN-LIMBO state */
697 		port->ip_receiver_name = MACH_PORT_NULL;
698 		port->ip_destination = IP_NULL;
699 	}
700 
701 	return reap_messages;
702 }
703 
704 /*
705  *	Routine:	ipc_port_init
706  *	Purpose:
707  *		Initializes a newly-allocated port.
708  *
709  *		The memory is expected to be zero initialized (allocated with Z_ZERO).
710  */
711 
712 void
ipc_port_init(ipc_port_t port,ipc_space_t space,ipc_port_init_flags_t flags,mach_port_name_t name)713 ipc_port_init(
714 	ipc_port_t              port,
715 	ipc_space_t             space,
716 	ipc_port_init_flags_t   flags,
717 	mach_port_name_t        name)
718 {
719 	int policy = SYNC_POLICY_FIFO;
720 
721 	/* the port has been 0 initialized when called */
722 
723 	if (flags & IPC_PORT_INIT_FILTER_MESSAGE) {
724 		io_bits_or(ip_to_object(port), IP_BIT_FILTER_MSG);
725 	}
726 	if (flags & IPC_PORT_INIT_LOCKED) {
727 		policy |= SYNC_POLICY_INIT_LOCKED;
728 	}
729 
730 	/* must be done first, many ip_* bits live inside the waitq */
731 	waitq_init(&port->ip_waitq, WQT_PORT, policy);
732 	if (flags & IPC_PORT_INIT_TG_BLOCK_TRACKING) {
733 		port->ip_tg_block_tracking = true;
734 	}
735 	if (flags & IPC_PORT_INIT_SPECIAL_REPLY) {
736 		port->ip_specialreply = true;
737 		port->ip_immovable_receive = true;
738 	}
739 
740 	port->ip_kernel_qos_override = THREAD_QOS_UNSPECIFIED;
741 	port->ip_kernel_iotier_override = THROTTLE_LEVEL_END;
742 
743 	ipc_mqueue_init(&port->ip_messages);
744 #if MACH_ASSERT
745 	ipc_port_init_debug(port, __builtin_frame_address(0));
746 #endif  /* MACH_ASSERT */
747 
748 	/* port transitions to IN-SPACE state */
749 	port->ip_receiver_name = name;
750 	port->ip_receiver = space;
751 
752 	if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
753 		port->ip_srights = 1;
754 		port->ip_mscount = 1;
755 	}
756 }
757 
758 /*
759  *	Routine:	ipc_port_alloc
760  *	Purpose:
761  *		Allocate a port.
762  *	Conditions:
763  *		Nothing locked.  If successful, the port is returned
764  *		locked.  (The caller doesn't have a reference.)
765  *	Returns:
766  *		KERN_SUCCESS		The port is allocated.
767  *		KERN_INVALID_TASK	The space is dead.
768  *		KERN_NO_SPACE		No room for an entry in the space.
769  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
770  */
771 
772 kern_return_t
ipc_port_alloc(ipc_space_t space,ipc_port_init_flags_t flags,mach_port_name_t * namep,ipc_port_t * portp)773 ipc_port_alloc(
774 	ipc_space_t             space,
775 	ipc_port_init_flags_t   flags,
776 	mach_port_name_t        *namep,
777 	ipc_port_t              *portp)
778 {
779 	ipc_port_t port;
780 	mach_port_name_t name;
781 	kern_return_t kr;
782 	mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
783 	mach_port_urefs_t urefs = 0;
784 
785 	if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
786 		type |= MACH_PORT_TYPE_SEND;
787 		urefs = 1;
788 	}
789 	kr = ipc_object_alloc(space, IOT_PORT, type, urefs,
790 	    &name, (ipc_object_t *) &port);
791 	if (kr != KERN_SUCCESS) {
792 		return kr;
793 	}
794 
795 	/* space is locked */
796 	ipc_port_init(port, space, flags | IPC_PORT_INIT_LOCKED, name);
797 	/* port is locked */
798 #if MACH_ASSERT
799 	ipc_port_init_debug(port, __builtin_frame_address(0));
800 #endif  /* MACH_ASSERT */
801 
802 	/* unlock space after init */
803 	is_write_unlock(space);
804 
805 	*namep = name;
806 	*portp = port;
807 
808 	return KERN_SUCCESS;
809 }
810 
811 /*
812  *	Routine:	ipc_port_alloc_name
813  *	Purpose:
814  *		Allocate a port, with a specific name.
815  *	Conditions:
816  *		Nothing locked.  If successful, the port is returned
817  *		locked.  (The caller doesn't have a reference.)
818  *	Returns:
819  *		KERN_SUCCESS		The port is allocated.
820  *		KERN_INVALID_TASK	The space is dead.
821  *		KERN_NAME_EXISTS	The name already denotes a right.
822  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
823  */
824 
825 kern_return_t
ipc_port_alloc_name(ipc_space_t space,ipc_port_init_flags_t flags,mach_port_name_t name,ipc_port_t * portp)826 ipc_port_alloc_name(
827 	ipc_space_t             space,
828 	ipc_port_init_flags_t   flags,
829 	mach_port_name_t        name,
830 	ipc_port_t              *portp)
831 {
832 	mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
833 	mach_port_urefs_t urefs = 0;
834 
835 	if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
836 		type |= MACH_PORT_TYPE_SEND;
837 		urefs = 1;
838 	}
839 	flags |= IPC_PORT_INIT_LOCKED;
840 
841 	return ipc_object_alloc_name(space, IOT_PORT, type, urefs,
842 	           name, (ipc_object_t *)portp, ^(ipc_object_t object){
843 		ipc_port_init(ip_object_to_port(object), space, flags, name);
844 	});
845 }
846 
847 /*
848  *      Routine:	ipc_port_spnotify
849  *	Purpose:
850  *		Generate send-possible port notifications.
851  *	Conditions:
852  *		Nothing locked, reference held on port.
853  */
854 void
ipc_port_spnotify(ipc_port_t port)855 ipc_port_spnotify(
856 	ipc_port_t      port)
857 {
858 	ipc_port_request_index_t index = 0;
859 	ipc_table_elems_t size = 0;
860 
861 	/*
862 	 * If the port has no send-possible request
863 	 * armed, don't bother to lock the port.
864 	 */
865 	if (port->ip_sprequests == 0) {
866 		return;
867 	}
868 
869 	ip_mq_lock(port);
870 
871 #if IMPORTANCE_INHERITANCE
872 	if (port->ip_spimportant != 0) {
873 		port->ip_spimportant = 0;
874 		if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == TRUE) {
875 			ip_mq_lock(port);
876 		}
877 	}
878 #endif /* IMPORTANCE_INHERITANCE */
879 
880 	if (port->ip_sprequests == 0) {
881 		ip_mq_unlock(port);
882 		return;
883 	}
884 	port->ip_sprequests = 0;
885 
886 revalidate:
887 	if (ip_active(port)) {
888 		ipc_port_request_t requests;
889 
890 		/* table may change each time port unlocked (reload) */
891 		requests = port->ip_requests;
892 		assert(requests != IPR_NULL);
893 
894 		/*
895 		 * no need to go beyond table size when first
896 		 * we entered - those are future notifications.
897 		 */
898 		if (size == 0) {
899 			size = requests->ipr_size->its_size;
900 		}
901 
902 		/* no need to backtrack either */
903 		while (++index < size) {
904 			ipc_port_request_t ipr = &requests[index];
905 			mach_port_name_t name = ipr->ipr_name;
906 			ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
907 			boolean_t armed = IPR_SOR_SPARMED(ipr->ipr_soright);
908 
909 			if (MACH_PORT_VALID(name) && armed && IP_VALID(soright)) {
910 				/* claim send-once right - slot still inuse */
911 				ipr->ipr_soright = IP_NULL;
912 				ip_mq_unlock(port);
913 
914 				ipc_notify_send_possible(soright, name);
915 
916 				ip_mq_lock(port);
917 				goto revalidate;
918 			}
919 		}
920 	}
921 	ip_mq_unlock(port);
922 	return;
923 }
924 
925 /*
926  *      Routine:	ipc_port_dnnotify
927  *	Purpose:
928  *		Generate dead name notifications for
929  *		all outstanding dead-name and send-
930  *		possible requests.
931  *	Conditions:
932  *		Nothing locked.
933  *		Port must be inactive.
934  *		Reference held on port.
935  */
936 void
ipc_port_dnnotify(ipc_port_t port)937 ipc_port_dnnotify(
938 	ipc_port_t      port)
939 {
940 	ipc_port_request_t requests = port->ip_requests;
941 
942 	assert(!ip_active(port));
943 	if (requests != IPR_NULL) {
944 		ipc_table_size_t its = requests->ipr_size;
945 		ipc_table_elems_t size = its->its_size;
946 		ipc_port_request_index_t index;
947 		for (index = 1; index < size; index++) {
948 			ipc_port_request_t ipr = &requests[index];
949 			mach_port_name_t name = ipr->ipr_name;
950 			ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
951 
952 			if (MACH_PORT_VALID(name) && IP_VALID(soright)) {
953 				ipc_notify_dead_name(soright, name);
954 			}
955 		}
956 	}
957 }
958 
959 /*
960  *	Routine:	ipc_port_destroy
961  *	Purpose:
962  *		Destroys a port.  Cleans up queued messages.
963  *
964  *		If the port has a backup, it doesn't get destroyed,
965  *		but is sent in a port-destroyed notification to the backup.
966  *	Conditions:
967  *		The port is locked and alive; nothing else locked.
968  *		The caller has a reference, which is consumed.
969  *		Afterwards, the port is unlocked and dead.
970  */
971 
972 void
ipc_port_destroy(ipc_port_t port)973 ipc_port_destroy(ipc_port_t port)
974 {
975 	bool special_reply = port->ip_specialreply;
976 	bool service_port  = port->ip_service_port;
977 	bool reap_msgs;
978 
979 	ipc_port_t pdrequest = IP_NULL;
980 	struct task_watchport_elem *twe = NULL;
981 	waitq_link_list_t free_l = { };
982 
983 #if IMPORTANCE_INHERITANCE
984 	ipc_importance_task_t release_imp_task = IIT_NULL;
985 	thread_t self = current_thread();
986 	boolean_t top = (self->ith_assertions == 0);
987 	natural_t assertcnt = 0;
988 #endif /* IMPORTANCE_INHERITANCE */
989 
990 	require_ip_active(port);
991 	/* port->ip_receiver_name is garbage */
992 	/* port->ip_receiver/port->ip_destination is garbage */
993 
994 	/* clear any reply-port context */
995 	port->ip_reply_context = 0;
996 
997 	/* must be done before we access ip_pdrequest */
998 	twe = ipc_port_clear_watchport_elem_internal(port);
999 	assert(!port->ip_has_watchport);
1000 
1001 	if (!special_reply) {
1002 		/* we assume the ref for pdrequest */
1003 		pdrequest = port->ip_pdrequest;
1004 		port->ip_pdrequest = IP_NULL;
1005 	} else if (port->ip_tempowner) {
1006 		panic("ipc_port_destroy: invalid state");
1007 	}
1008 
1009 #if IMPORTANCE_INHERITANCE
1010 	/* determine how many assertions to drop and from whom */
1011 	if (port->ip_tempowner != 0) {
1012 		assert(top);
1013 		release_imp_task = ip_get_imp_task(port);
1014 		if (IIT_NULL != release_imp_task) {
1015 			port->ip_imp_task = IIT_NULL;
1016 			assertcnt = port->ip_impcount;
1017 		}
1018 		/* Otherwise, nothing to drop */
1019 	} else {
1020 		assertcnt = port->ip_impcount;
1021 		if (pdrequest != IP_NULL) {
1022 			/* mark in limbo for the journey */
1023 			port->ip_tempowner = 1;
1024 		}
1025 	}
1026 
1027 	if (top) {
1028 		self->ith_assertions = assertcnt;
1029 	}
1030 #endif /* IMPORTANCE_INHERITANCE */
1031 
1032 	/*
1033 	 * If no port-destroyed notification is armed, calling
1034 	 * ipc_port_clear_receiver() will mark the port inactive
1035 	 * and will wakeup any threads which may be blocked receiving on it.
1036 	 */
1037 	reap_msgs = ipc_port_clear_receiver(port, pdrequest == IP_NULL, &free_l);
1038 	assert(!ip_in_pset(port));
1039 	assert(port->ip_mscount == 0);
1040 
1041 	/*
1042 	 * Handle port-destroyed notification
1043 	 */
1044 	if (pdrequest != IP_NULL) {
1045 		assert(reap_msgs == false);
1046 
1047 		if (service_port) {
1048 			assert(port->ip_splabel != NULL);
1049 			if (ipc_service_port_label_is_special_pdrequest((ipc_service_port_label_t)port->ip_splabel)) {
1050 				ipc_service_port_label_set_flag(port->ip_splabel, ISPL_FLAGS_SEND_PD_NOTIFICATION);
1051 			}
1052 		}
1053 
1054 		ipc_port_send_turnstile_recompute_push_locked(port);
1055 		/* port unlocked */
1056 
1057 		/* consumes our refs for port and pdrequest */
1058 		ipc_notify_port_destroyed(pdrequest, port);
1059 	} else {
1060 		ipc_service_port_label_t splabel = NULL;
1061 		ipc_notify_nsenders_t nsrequest;
1062 
1063 		nsrequest = ipc_notify_no_senders_prepare(port);
1064 
1065 		if (!ip_is_kolabeled(port)) {
1066 			splabel = port->ip_splabel;
1067 			port->ip_splabel = NULL;
1068 			port->ip_service_port = false;
1069 		}
1070 
1071 		ipc_port_send_turnstile_recompute_push_locked(port);
1072 		/* port unlocked */
1073 
1074 		/* unlink the kmsg from special reply port */
1075 		if (special_reply) {
1076 			ipc_port_adjust_special_reply_port(port,
1077 			    IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE);
1078 		}
1079 
1080 		/* Deallocate the service/connection port label */
1081 		if (splabel) {
1082 			ipc_service_port_label_dealloc(splabel, service_port);
1083 			splabel = NULL;
1084 		}
1085 
1086 		if (reap_msgs) {
1087 			ipc_kmsg_reap_delayed();
1088 		}
1089 
1090 		if (nsrequest.ns_notify) {
1091 			/*
1092 			 * ipc_notify_no_senders_prepare will consume
1093 			 * the reference for kobjects.
1094 			 */
1095 			assert(!nsrequest.ns_is_kobject);
1096 			ip_mq_lock(nsrequest.ns_notify);
1097 			ipc_notify_send_once_and_unlock(nsrequest.ns_notify); /* consumes ref */
1098 		}
1099 
1100 		/* generate dead-name notifications */
1101 		ipc_port_dnnotify(port);
1102 
1103 		ipc_kobject_destroy(port);
1104 
1105 		ip_release(port); /* consume caller's ref */
1106 	}
1107 
1108 	if (twe) {
1109 		task_watchport_elem_deallocate(twe);
1110 		twe = NULL;
1111 	}
1112 
1113 	waitq_link_free_list(WQT_PORT_SET, &free_l);
1114 
1115 #if IMPORTANCE_INHERITANCE
1116 	if (release_imp_task != IIT_NULL) {
1117 		if (assertcnt > 0) {
1118 			assert(top);
1119 			self->ith_assertions = 0;
1120 			assert(ipc_importance_task_is_any_receiver_type(release_imp_task));
1121 			ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1122 		}
1123 		ipc_importance_task_release(release_imp_task);
1124 	} else if (assertcnt > 0) {
1125 		if (top) {
1126 			self->ith_assertions = 0;
1127 			release_imp_task = current_task()->task_imp_base;
1128 			if (ipc_importance_task_is_any_receiver_type(release_imp_task)) {
1129 				ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1130 			}
1131 		}
1132 	}
1133 #endif /* IMPORTANCE_INHERITANCE */
1134 }
1135 
1136 /*
1137  *	Routine:	ipc_port_destination_chain_lock
1138  *	Purpose:
1139  *		Search for the end of the chain (a port not in transit),
1140  *		acquiring locks along the way, and return it in `base`.
1141  *
1142  *		Returns true if a reference was taken on `base`
1143  *
1144  *	Conditions:
1145  *		No ports locked.
1146  *		ipc_port_multiple_lock held.
1147  */
1148 boolean_t
ipc_port_destination_chain_lock(ipc_port_t port,ipc_port_t * base)1149 ipc_port_destination_chain_lock(
1150 	ipc_port_t port,
1151 	ipc_port_t *base)
1152 {
1153 	for (;;) {
1154 		ip_mq_lock(port);
1155 
1156 		if (!ip_active(port)) {
1157 			/*
1158 			 * Active ports that are ip_mq_lock()ed cannot go away.
1159 			 *
1160 			 * But inactive ports at the end of walking
1161 			 * an ip_destination chain are only protected
1162 			 * from space termination cleanup while the entire
1163 			 * chain of ports leading to them is held.
1164 			 *
1165 			 * Callers of this code tend to unlock the chain
1166 			 * in the same order than this walk which doesn't
1167 			 * protect `base` properly when it's inactive.
1168 			 *
1169 			 * In that case, take a reference that the caller
1170 			 * is responsible for releasing.
1171 			 */
1172 			ip_reference(port);
1173 			*base = port;
1174 			return true;
1175 		}
1176 
1177 		/* port is active */
1178 		if (!ip_in_transit(port)) {
1179 			*base = port;
1180 			return false;
1181 		}
1182 
1183 		port = ip_get_destination(port);
1184 	}
1185 }
1186 
1187 
1188 /*
1189  *	Routine:	ipc_port_check_circularity
1190  *	Purpose:
1191  *		Check if queueing "port" in a message for "dest"
1192  *		would create a circular group of ports and messages.
1193  *
1194  *		If no circularity (FALSE returned), then "port"
1195  *		is changed from "in limbo" to "in transit".
1196  *
1197  *		That is, we want to set port->ip_destination == dest,
1198  *		but guaranteeing that this doesn't create a circle
1199  *		port->ip_destination->ip_destination->... == port
1200  *
1201  *	Conditions:
1202  *		No ports locked.  References held for "port" and "dest".
1203  */
1204 
1205 boolean_t
ipc_port_check_circularity(ipc_port_t port,ipc_port_t dest)1206 ipc_port_check_circularity(
1207 	ipc_port_t      port,
1208 	ipc_port_t      dest)
1209 {
1210 #if IMPORTANCE_INHERITANCE
1211 	/* adjust importance counts at the same time */
1212 	return ipc_importance_check_circularity(port, dest);
1213 #else
1214 	ipc_port_t base;
1215 	struct task_watchport_elem *watchport_elem = NULL;
1216 	bool took_base_ref = false;
1217 
1218 	assert(port != IP_NULL);
1219 	assert(dest != IP_NULL);
1220 
1221 	if (port == dest) {
1222 		return TRUE;
1223 	}
1224 	base = dest;
1225 
1226 	/* Check if destination needs a turnstile */
1227 	ipc_port_send_turnstile_prepare(dest);
1228 
1229 	/*
1230 	 *	First try a quick check that can run in parallel.
1231 	 *	No circularity if dest is not in transit.
1232 	 */
1233 	ip_mq_lock(port);
1234 	if (ip_mq_lock_try(dest)) {
1235 		if (!ip_in_transit(dest)) {
1236 			goto not_circular;
1237 		}
1238 
1239 		/* dest is in transit; further checking necessary */
1240 
1241 		ip_mq_unlock(dest);
1242 	}
1243 	ip_mq_unlock(port);
1244 
1245 	ipc_port_multiple_lock(); /* massive serialization */
1246 
1247 	/*
1248 	 *	Search for the end of the chain (a port not in transit),
1249 	 *	acquiring locks along the way.
1250 	 */
1251 
1252 	took_base_ref = ipc_port_destination_chain_lock(dest, &base);
1253 	/* all ports in chain from dest to base, inclusive, are locked */
1254 
1255 	if (port == base) {
1256 		/* circularity detected! */
1257 
1258 		ipc_port_multiple_unlock();
1259 
1260 		/* port (== base) is in limbo */
1261 		require_ip_active(port);
1262 		assert(ip_in_limbo(port));
1263 		assert(!took_base_ref);
1264 
1265 		base = dest;
1266 		while (base != IP_NULL) {
1267 			ipc_port_t next;
1268 
1269 			/* dest is in transit or in limbo */
1270 			require_ip_active(base);
1271 			assert(!ip_in_a_space(base));
1272 
1273 			next = ip_get_destination(base);
1274 			ip_mq_unlock(base);
1275 			base = next;
1276 		}
1277 
1278 		ipc_port_send_turnstile_complete(dest);
1279 		return TRUE;
1280 	}
1281 
1282 	/*
1283 	 *	The guarantee:  lock port while the entire chain is locked.
1284 	 *	Once port is locked, we can take a reference to dest,
1285 	 *	add port to the chain, and unlock everything.
1286 	 */
1287 
1288 	ip_mq_lock(port);
1289 	ipc_port_multiple_unlock();
1290 
1291 not_circular:
1292 	require_ip_active(port);
1293 	assert(ip_in_limbo(port));
1294 
1295 	/* Clear the watchport boost */
1296 	watchport_elem = ipc_port_clear_watchport_elem_internal(port);
1297 
1298 	/* Check if the port is being enqueued as a part of sync bootstrap checkin */
1299 	if (dest->ip_specialreply && dest->ip_sync_bootstrap_checkin) {
1300 		port->ip_sync_bootstrap_checkin = 1;
1301 	}
1302 
1303 	ip_reference(dest);
1304 
1305 	/* port transitions to IN-TRANSIT state */
1306 	assert(port->ip_receiver_name == MACH_PORT_NULL);
1307 	port->ip_destination = dest;
1308 
1309 	/* Setup linkage for source port if it has sync ipc push */
1310 	struct turnstile *send_turnstile = TURNSTILE_NULL;
1311 	if (port_send_turnstile(port)) {
1312 		send_turnstile = turnstile_prepare((uintptr_t)port,
1313 		    port_send_turnstile_address(port),
1314 		    TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
1315 
1316 		/*
1317 		 * What ipc_port_adjust_port_locked would do,
1318 		 * but we need to also drop even more locks before
1319 		 * calling turnstile_update_inheritor_complete().
1320 		 */
1321 		ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
1322 
1323 		turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
1324 		    (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
1325 
1326 		/* update complete and turnstile complete called after dropping all locks */
1327 	}
1328 	/* now unlock chain */
1329 
1330 	ip_mq_unlock(port);
1331 
1332 	for (;;) {
1333 		ipc_port_t next;
1334 
1335 		if (dest == base) {
1336 			break;
1337 		}
1338 
1339 		/* port is IN-TRANSIT */
1340 		require_ip_active(dest);
1341 		assert(ip_in_transit(dest));
1342 
1343 		next = ip_get_destination(dest);
1344 		ip_mq_unlock(dest);
1345 		dest = next;
1346 	}
1347 
1348 	/* base is not IN-TRANSIT */
1349 	assert(!ip_in_transit(base));
1350 
1351 	ip_mq_unlock(base);
1352 	if (took_base_ref) {
1353 		ip_release(base);
1354 	}
1355 
1356 	/* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
1357 	if (send_turnstile) {
1358 		turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
1359 
1360 		/* Take the mq lock to call turnstile complete */
1361 		ip_mq_lock(port);
1362 		turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC);
1363 		send_turnstile = TURNSTILE_NULL;
1364 		ip_mq_unlock(port);
1365 		turnstile_cleanup();
1366 	}
1367 
1368 	if (watchport_elem) {
1369 		task_watchport_elem_deallocate(watchport_elem);
1370 	}
1371 
1372 	return FALSE;
1373 #endif /* !IMPORTANCE_INHERITANCE */
1374 }
1375 
1376 /*
1377  *	Routine:	ipc_port_watchport_elem
1378  *	Purpose:
1379  *		Get the port's watchport elem field
1380  *
1381  *	Conditions:
1382  *		port locked
1383  */
1384 static struct task_watchport_elem *
ipc_port_watchport_elem(ipc_port_t port)1385 ipc_port_watchport_elem(ipc_port_t port)
1386 {
1387 	if (port->ip_has_watchport) {
1388 		assert(!port->ip_specialreply);
1389 		return port->ip_twe;
1390 	}
1391 	return NULL;
1392 }
1393 
1394 /*
1395  *	Routine:	ipc_port_update_watchport_elem
1396  *	Purpose:
1397  *		Set the port's watchport elem field
1398  *
1399  *	Conditions:
1400  *		port locked and is not a special reply port.
1401  */
1402 static inline struct task_watchport_elem *
ipc_port_update_watchport_elem(ipc_port_t port,struct task_watchport_elem * we)1403 ipc_port_update_watchport_elem(ipc_port_t port, struct task_watchport_elem *we)
1404 {
1405 	struct task_watchport_elem *old_we;
1406 	ipc_port_t pdrequest;
1407 
1408 	assert(!port->ip_specialreply);
1409 
1410 	/*
1411 	 * Note: ip_pdrequest and ip_twe are unioned.
1412 	 *       and ip_has_watchport controls the union "type"
1413 	 */
1414 	if (port->ip_has_watchport) {
1415 		old_we = port->ip_twe;
1416 		pdrequest = old_we->twe_pdrequest;
1417 		old_we->twe_pdrequest = IP_NULL;
1418 	} else {
1419 		old_we = NULL;
1420 		pdrequest = port->ip_pdrequest;
1421 	}
1422 
1423 	if (we) {
1424 		port->ip_has_watchport = true;
1425 		we->twe_pdrequest = pdrequest;
1426 		port->ip_twe = we;
1427 	} else {
1428 		port->ip_has_watchport = false;
1429 		port->ip_pdrequest = pdrequest;
1430 	}
1431 
1432 	return old_we;
1433 }
1434 
1435 /*
1436  *	Routine:	ipc_special_reply_stash_pid_locked
1437  *	Purpose:
1438  *		Set the pid of process that copied out send once right to special reply port.
1439  *
1440  *	Conditions:
1441  *		port locked
1442  */
1443 static inline void
ipc_special_reply_stash_pid_locked(ipc_port_t port,int pid)1444 ipc_special_reply_stash_pid_locked(ipc_port_t port, int pid)
1445 {
1446 	assert(port->ip_specialreply);
1447 	port->ip_pid = pid;
1448 }
1449 
1450 /*
1451  *	Routine:	ipc_special_reply_get_pid_locked
1452  *	Purpose:
1453  *		Get the pid of process that copied out send once right to special reply port.
1454  *
1455  *	Conditions:
1456  *		port locked
1457  */
1458 int
ipc_special_reply_get_pid_locked(ipc_port_t port)1459 ipc_special_reply_get_pid_locked(ipc_port_t port)
1460 {
1461 	assert(port->ip_specialreply);
1462 	return port->ip_pid;
1463 }
1464 
1465 /*
1466  * Update the recv turnstile inheritor for a port.
1467  *
1468  * Sync IPC through the port receive turnstile only happens for the special
1469  * reply port case. It has three sub-cases:
1470  *
1471  * 1. a send-once right is in transit, and pushes on the send turnstile of its
1472  *    destination mqueue.
1473  *
1474  * 2. a send-once right has been stashed on a knote it was copied out "through",
1475  *    as the first such copied out port.
1476  *
1477  * 3. a send-once right has been stashed on a knote it was copied out "through",
1478  *    as the second or more copied out port.
1479  */
1480 void
ipc_port_recv_update_inheritor(ipc_port_t port,struct turnstile * rcv_turnstile,turnstile_update_flags_t flags)1481 ipc_port_recv_update_inheritor(
1482 	ipc_port_t port,
1483 	struct turnstile *rcv_turnstile,
1484 	turnstile_update_flags_t flags)
1485 {
1486 	struct turnstile *inheritor = TURNSTILE_NULL;
1487 	struct knote *kn;
1488 
1489 	if (ip_active(port) && port->ip_specialreply) {
1490 		ip_mq_lock_held(port);
1491 
1492 		switch (port->ip_sync_link_state) {
1493 		case PORT_SYNC_LINK_PORT:
1494 			if (port->ip_sync_inheritor_port != NULL) {
1495 				inheritor = port_send_turnstile(port->ip_sync_inheritor_port);
1496 			}
1497 			break;
1498 
1499 		case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1500 			kn = port->ip_sync_inheritor_knote;
1501 			inheritor = filt_ipc_kqueue_turnstile(kn);
1502 			break;
1503 
1504 		case PORT_SYNC_LINK_WORKLOOP_STASH:
1505 			inheritor = port->ip_sync_inheritor_ts;
1506 			break;
1507 		}
1508 	}
1509 
1510 	turnstile_update_inheritor(rcv_turnstile, inheritor,
1511 	    flags | TURNSTILE_INHERITOR_TURNSTILE);
1512 }
1513 
1514 /*
1515  * Update the send turnstile inheritor for a port.
1516  *
1517  * Sync IPC through the port send turnstile has 7 possible reasons to be linked:
1518  *
1519  * 1. a special reply port is part of sync ipc for bootstrap checkin and needs
1520  *    to push on thread doing the sync ipc.
1521  *
1522  * 2. a receive right is in transit, and pushes on the send turnstile of its
1523  *    destination mqueue.
1524  *
1525  * 3. port was passed as an exec watchport and port is pushing on main thread
1526  *    of the task.
1527  *
1528  * 4. a receive right has been stashed on a knote it was copied out "through",
1529  *    as the first such copied out port (same as PORT_SYNC_LINK_WORKLOOP_KNOTE
1530  *    for the special reply port)
1531  *
1532  * 5. a receive right has been stashed on a knote it was copied out "through",
1533  *    as the second or more copied out port (same as
1534  *    PORT_SYNC_LINK_WORKLOOP_STASH for the special reply port)
1535  *
1536  * 6. a receive right has been copied out as a part of sync bootstrap checkin
1537  *    and needs to push on thread doing the sync bootstrap checkin.
1538  *
1539  * 7. the receive right is monitored by a knote, and pushes on any that is
1540  *    registered on a workloop. filt_machport makes sure that if such a knote
1541  *    exists, it is kept as the first item in the knote list, so we never need
1542  *    to walk.
1543  */
1544 void
ipc_port_send_update_inheritor(ipc_port_t port,struct turnstile * send_turnstile,turnstile_update_flags_t flags)1545 ipc_port_send_update_inheritor(
1546 	ipc_port_t port,
1547 	struct turnstile *send_turnstile,
1548 	turnstile_update_flags_t flags)
1549 {
1550 	ipc_mqueue_t mqueue = &port->ip_messages;
1551 	turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1552 	struct knote *kn;
1553 	turnstile_update_flags_t inheritor_flags = TURNSTILE_INHERITOR_TURNSTILE;
1554 
1555 	ip_mq_lock_held(port);
1556 
1557 	if (!ip_active(port)) {
1558 		/* this port is no longer active, it should not push anywhere */
1559 	} else if (port->ip_specialreply) {
1560 		/* Case 1. */
1561 		if (port->ip_sync_bootstrap_checkin && prioritize_launch) {
1562 			inheritor = port->ip_messages.imq_srp_owner_thread;
1563 			inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1564 		}
1565 	} else if (ip_in_transit(port)) {
1566 		/* Case 2. */
1567 		inheritor = port_send_turnstile(ip_get_destination(port));
1568 	} else if (port->ip_has_watchport) {
1569 		/* Case 3. */
1570 		if (prioritize_launch) {
1571 			assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1572 			inheritor = ipc_port_get_watchport_inheritor(port);
1573 			inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1574 		}
1575 	} else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
1576 		/* Case 4. */
1577 		inheritor = filt_ipc_kqueue_turnstile(mqueue->imq_inheritor_knote);
1578 	} else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_STASH) {
1579 		/* Case 5. */
1580 		inheritor = mqueue->imq_inheritor_turnstile;
1581 	} else if (port->ip_sync_link_state == PORT_SYNC_LINK_RCV_THREAD) {
1582 		/* Case 6. */
1583 		if (prioritize_launch) {
1584 			inheritor = port->ip_messages.imq_inheritor_thread_ref;
1585 			inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1586 		}
1587 	} else if ((kn = SLIST_FIRST(&port->ip_klist))) {
1588 		/* Case 7. Push on a workloop that is interested */
1589 		if (filt_machport_kqueue_has_turnstile(kn)) {
1590 			assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1591 			inheritor = filt_ipc_kqueue_turnstile(kn);
1592 		}
1593 	}
1594 
1595 	turnstile_update_inheritor(send_turnstile, inheritor,
1596 	    flags | inheritor_flags);
1597 }
1598 
1599 /*
1600  *	Routine:	ipc_port_send_turnstile_prepare
1601  *	Purpose:
1602  *		Get a reference on port's send turnstile, if
1603  *		port does not have a send turnstile then allocate one.
1604  *
1605  *	Conditions:
1606  *		Nothing is locked.
1607  */
1608 void
ipc_port_send_turnstile_prepare(ipc_port_t port)1609 ipc_port_send_turnstile_prepare(ipc_port_t port)
1610 {
1611 	struct turnstile *turnstile = TURNSTILE_NULL;
1612 	struct turnstile *send_turnstile = TURNSTILE_NULL;
1613 
1614 retry_alloc:
1615 	ip_mq_lock(port);
1616 
1617 	if (port_send_turnstile(port) == NULL ||
1618 	    port_send_turnstile(port)->ts_port_ref == 0) {
1619 		if (turnstile == TURNSTILE_NULL) {
1620 			ip_mq_unlock(port);
1621 			turnstile = turnstile_alloc();
1622 			goto retry_alloc;
1623 		}
1624 
1625 		send_turnstile = turnstile_prepare((uintptr_t)port,
1626 		    port_send_turnstile_address(port),
1627 		    turnstile, TURNSTILE_SYNC_IPC);
1628 		turnstile = TURNSTILE_NULL;
1629 
1630 		ipc_port_send_update_inheritor(port, send_turnstile,
1631 		    TURNSTILE_IMMEDIATE_UPDATE);
1632 
1633 		/* turnstile complete will be called in ipc_port_send_turnstile_complete */
1634 	}
1635 
1636 	/* Increment turnstile counter */
1637 	port_send_turnstile(port)->ts_port_ref++;
1638 	ip_mq_unlock(port);
1639 
1640 	if (send_turnstile) {
1641 		turnstile_update_inheritor_complete(send_turnstile,
1642 		    TURNSTILE_INTERLOCK_NOT_HELD);
1643 	}
1644 	if (turnstile != TURNSTILE_NULL) {
1645 		turnstile_deallocate(turnstile);
1646 	}
1647 }
1648 
1649 
1650 /*
1651  *	Routine:	ipc_port_send_turnstile_complete
1652  *	Purpose:
1653  *		Drop a ref on the port's send turnstile, if the
1654  *		ref becomes zero, deallocate the turnstile.
1655  *
1656  *	Conditions:
1657  *		The space might be locked, use safe deallocate.
1658  */
1659 void
ipc_port_send_turnstile_complete(ipc_port_t port)1660 ipc_port_send_turnstile_complete(ipc_port_t port)
1661 {
1662 	struct turnstile *turnstile = TURNSTILE_NULL;
1663 
1664 	/* Drop turnstile count on dest port */
1665 	ip_mq_lock(port);
1666 
1667 	port_send_turnstile(port)->ts_port_ref--;
1668 	if (port_send_turnstile(port)->ts_port_ref == 0) {
1669 		turnstile_complete((uintptr_t)port, port_send_turnstile_address(port),
1670 		    &turnstile, TURNSTILE_SYNC_IPC);
1671 		assert(turnstile != TURNSTILE_NULL);
1672 	}
1673 	ip_mq_unlock(port);
1674 	turnstile_cleanup();
1675 
1676 	if (turnstile != TURNSTILE_NULL) {
1677 		turnstile_deallocate_safe(turnstile);
1678 		turnstile = TURNSTILE_NULL;
1679 	}
1680 }
1681 
1682 /*
1683  *	Routine:	ipc_port_rcv_turnstile
1684  *	Purpose:
1685  *		Get the port's receive turnstile
1686  *
1687  *	Conditions:
1688  *		mqueue locked or thread waiting on turnstile is locked.
1689  */
1690 static struct turnstile *
ipc_port_rcv_turnstile(ipc_port_t port)1691 ipc_port_rcv_turnstile(ipc_port_t port)
1692 {
1693 	return *port_rcv_turnstile_address(port);
1694 }
1695 
1696 
1697 /*
1698  *	Routine:	ipc_port_link_special_reply_port
1699  *	Purpose:
1700  *		Link the special reply port with the destination port.
1701  *              Allocates turnstile to dest port.
1702  *
1703  *	Conditions:
1704  *		Nothing is locked.
1705  */
1706 void
ipc_port_link_special_reply_port(ipc_port_t special_reply_port,ipc_port_t dest_port,boolean_t sync_bootstrap_checkin)1707 ipc_port_link_special_reply_port(
1708 	ipc_port_t special_reply_port,
1709 	ipc_port_t dest_port,
1710 	boolean_t sync_bootstrap_checkin)
1711 {
1712 	boolean_t drop_turnstile_ref = FALSE;
1713 	boolean_t special_reply = FALSE;
1714 
1715 	/* Check if dest_port needs a turnstile */
1716 	ipc_port_send_turnstile_prepare(dest_port);
1717 
1718 	/* Lock the special reply port and establish the linkage */
1719 	ip_mq_lock(special_reply_port);
1720 
1721 	special_reply = special_reply_port->ip_specialreply;
1722 
1723 	if (sync_bootstrap_checkin && special_reply) {
1724 		special_reply_port->ip_sync_bootstrap_checkin = 1;
1725 	}
1726 
1727 	/* Check if we need to drop the acquired turnstile ref on dest port */
1728 	if (!special_reply ||
1729 	    special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY ||
1730 	    special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) {
1731 		drop_turnstile_ref = TRUE;
1732 	} else {
1733 		/* take a reference on dest_port */
1734 		ip_reference(dest_port);
1735 		special_reply_port->ip_sync_inheritor_port = dest_port;
1736 		special_reply_port->ip_sync_link_state = PORT_SYNC_LINK_PORT;
1737 	}
1738 
1739 	ip_mq_unlock(special_reply_port);
1740 
1741 	if (special_reply) {
1742 		/*
1743 		 * For special reply ports, if the destination port is
1744 		 * marked with the thread group blocked tracking flag,
1745 		 * callout to the performance controller.
1746 		 */
1747 		ipc_port_thread_group_blocked(dest_port);
1748 	}
1749 
1750 	if (drop_turnstile_ref) {
1751 		ipc_port_send_turnstile_complete(dest_port);
1752 	}
1753 
1754 	return;
1755 }
1756 
1757 /*
1758  *	Routine:	ipc_port_thread_group_blocked
1759  *	Purpose:
1760  *		Call thread_group_blocked callout if the port
1761  *	        has ip_tg_block_tracking bit set and the thread
1762  *	        has not made this callout already.
1763  *
1764  *	Conditions:
1765  *		Nothing is locked.
1766  */
1767 void
ipc_port_thread_group_blocked(ipc_port_t port __unused)1768 ipc_port_thread_group_blocked(ipc_port_t port __unused)
1769 {
1770 #if CONFIG_THREAD_GROUPS
1771 	bool port_tg_block_tracking = false;
1772 	thread_t self = current_thread();
1773 
1774 	if (self->thread_group == NULL ||
1775 	    (self->options & TH_OPT_IPC_TG_BLOCKED)) {
1776 		return;
1777 	}
1778 
1779 	port_tg_block_tracking = port->ip_tg_block_tracking;
1780 	if (!port_tg_block_tracking) {
1781 		return;
1782 	}
1783 
1784 	machine_thread_group_blocked(self->thread_group, NULL,
1785 	    PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
1786 
1787 	self->options |= TH_OPT_IPC_TG_BLOCKED;
1788 #endif
1789 }
1790 
1791 /*
1792  *	Routine:	ipc_port_thread_group_unblocked
1793  *	Purpose:
1794  *		Call thread_group_unblocked callout if the
1795  *		thread had previously made a thread_group_blocked
1796  *		callout before (indicated by TH_OPT_IPC_TG_BLOCKED
1797  *		flag on the thread).
1798  *
1799  *	Conditions:
1800  *		Nothing is locked.
1801  */
1802 void
ipc_port_thread_group_unblocked(void)1803 ipc_port_thread_group_unblocked(void)
1804 {
1805 #if CONFIG_THREAD_GROUPS
1806 	thread_t self = current_thread();
1807 
1808 	if (!(self->options & TH_OPT_IPC_TG_BLOCKED)) {
1809 		return;
1810 	}
1811 
1812 	machine_thread_group_unblocked(self->thread_group, NULL,
1813 	    PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
1814 
1815 	self->options &= ~TH_OPT_IPC_TG_BLOCKED;
1816 #endif
1817 }
1818 
1819 #if DEVELOPMENT || DEBUG
1820 inline void
ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)1821 ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)
1822 {
1823 	special_reply_port->ip_srp_lost_link = 0;
1824 	special_reply_port->ip_srp_msg_sent = 0;
1825 }
1826 
1827 static inline void
ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)1828 ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)
1829 {
1830 	if (special_reply_port->ip_specialreply == 1) {
1831 		special_reply_port->ip_srp_msg_sent = 0;
1832 	}
1833 }
1834 
1835 inline void
ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)1836 ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)
1837 {
1838 	if (special_reply_port->ip_specialreply == 1) {
1839 		special_reply_port->ip_srp_msg_sent = 1;
1840 	}
1841 }
1842 
1843 static inline void
ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)1844 ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)
1845 {
1846 	if (special_reply_port->ip_specialreply == 1 && special_reply_port->ip_srp_msg_sent == 0) {
1847 		special_reply_port->ip_srp_lost_link = 1;
1848 	}
1849 }
1850 
1851 #else /* DEVELOPMENT || DEBUG */
1852 inline void
ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)1853 ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)
1854 {
1855 	return;
1856 }
1857 
1858 static inline void
ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)1859 ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)
1860 {
1861 	return;
1862 }
1863 
1864 inline void
ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)1865 ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)
1866 {
1867 	return;
1868 }
1869 
1870 static inline void
ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)1871 ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)
1872 {
1873 	return;
1874 }
1875 #endif /* DEVELOPMENT || DEBUG */
1876 
1877 /*
1878  *	Routine:	ipc_port_adjust_special_reply_port_locked
1879  *	Purpose:
1880  *		If the special port has a turnstile, update its inheritor.
1881  *	Condition:
1882  *		Special reply port locked on entry.
1883  *		Special reply port unlocked on return.
1884  *		The passed in port is a special reply port.
1885  *	Returns:
1886  *		None.
1887  */
1888 void
ipc_port_adjust_special_reply_port_locked(ipc_port_t special_reply_port,struct knote * kn,uint8_t flags,boolean_t get_turnstile)1889 ipc_port_adjust_special_reply_port_locked(
1890 	ipc_port_t special_reply_port,
1891 	struct knote *kn,
1892 	uint8_t flags,
1893 	boolean_t get_turnstile)
1894 {
1895 	ipc_port_t dest_port = IPC_PORT_NULL;
1896 	int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE;
1897 	turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1898 	struct turnstile *ts = TURNSTILE_NULL;
1899 	struct turnstile *port_stashed_turnstile = TURNSTILE_NULL;
1900 
1901 	ip_mq_lock_held(special_reply_port); // ip_sync_link_state is touched
1902 
1903 	if (!special_reply_port->ip_specialreply) {
1904 		// only mach_msg_receive_results_complete() calls this with any port
1905 		assert(get_turnstile);
1906 		goto not_special;
1907 	}
1908 
1909 	if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) {
1910 		ipc_special_reply_port_msg_sent_reset(special_reply_port);
1911 	}
1912 
1913 	if (flags & IPC_PORT_ADJUST_UNLINK_THREAD) {
1914 		special_reply_port->ip_messages.imq_srp_owner_thread = NULL;
1915 	}
1916 
1917 	if (flags & IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN) {
1918 		special_reply_port->ip_sync_bootstrap_checkin = 0;
1919 	}
1920 
1921 	/* Check if the special reply port is marked non-special */
1922 	if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) {
1923 not_special:
1924 		if (get_turnstile) {
1925 			turnstile_complete((uintptr_t)special_reply_port,
1926 			    port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
1927 		}
1928 		ip_mq_unlock(special_reply_port);
1929 		if (get_turnstile) {
1930 			turnstile_cleanup();
1931 		}
1932 		return;
1933 	}
1934 
1935 	if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) {
1936 		if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) {
1937 			inheritor = filt_machport_stash_port(kn, special_reply_port,
1938 			    &sync_link_state);
1939 		}
1940 	} else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) {
1941 		sync_link_state = PORT_SYNC_LINK_ANY;
1942 	}
1943 
1944 	/* Check if need to break linkage */
1945 	if (!get_turnstile && sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
1946 	    special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
1947 		ip_mq_unlock(special_reply_port);
1948 		return;
1949 	}
1950 
1951 	switch (special_reply_port->ip_sync_link_state) {
1952 	case PORT_SYNC_LINK_PORT:
1953 		dest_port = special_reply_port->ip_sync_inheritor_port;
1954 		special_reply_port->ip_sync_inheritor_port = IPC_PORT_NULL;
1955 		break;
1956 	case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1957 		special_reply_port->ip_sync_inheritor_knote = NULL;
1958 		break;
1959 	case PORT_SYNC_LINK_WORKLOOP_STASH:
1960 		port_stashed_turnstile = special_reply_port->ip_sync_inheritor_ts;
1961 		special_reply_port->ip_sync_inheritor_ts = NULL;
1962 		break;
1963 	}
1964 
1965 	/*
1966 	 * Stash (or unstash) the server's PID in the ip_sorights field of the
1967 	 * special reply port, so that stackshot can later retrieve who the client
1968 	 * is blocked on.
1969 	 */
1970 	if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT &&
1971 	    sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
1972 		ipc_special_reply_stash_pid_locked(special_reply_port, pid_from_task(current_task()));
1973 	} else if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
1974 	    sync_link_state == PORT_SYNC_LINK_ANY) {
1975 		/* If we are resetting the special reply port, remove the stashed pid. */
1976 		ipc_special_reply_stash_pid_locked(special_reply_port, 0);
1977 	}
1978 
1979 	special_reply_port->ip_sync_link_state = sync_link_state;
1980 
1981 	switch (sync_link_state) {
1982 	case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1983 		special_reply_port->ip_sync_inheritor_knote = kn;
1984 		break;
1985 	case PORT_SYNC_LINK_WORKLOOP_STASH:
1986 		turnstile_reference(inheritor);
1987 		special_reply_port->ip_sync_inheritor_ts = inheritor;
1988 		break;
1989 	case PORT_SYNC_LINK_NO_LINKAGE:
1990 		if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) {
1991 			ipc_special_reply_port_lost_link(special_reply_port);
1992 		}
1993 		break;
1994 	}
1995 
1996 	/* Get thread's turnstile donated to special reply port */
1997 	if (get_turnstile) {
1998 		turnstile_complete((uintptr_t)special_reply_port,
1999 		    port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
2000 	} else {
2001 		ts = ipc_port_rcv_turnstile(special_reply_port);
2002 		if (ts) {
2003 			turnstile_reference(ts);
2004 			ipc_port_recv_update_inheritor(special_reply_port, ts,
2005 			    TURNSTILE_IMMEDIATE_UPDATE);
2006 		}
2007 	}
2008 
2009 	ip_mq_unlock(special_reply_port);
2010 
2011 	if (get_turnstile) {
2012 		turnstile_cleanup();
2013 	} else if (ts) {
2014 		/* Call turnstile cleanup after dropping the interlock */
2015 		turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
2016 		turnstile_deallocate_safe(ts);
2017 	}
2018 
2019 	if (port_stashed_turnstile) {
2020 		turnstile_deallocate_safe(port_stashed_turnstile);
2021 	}
2022 
2023 	/* Release the ref on the dest port and its turnstile */
2024 	if (dest_port) {
2025 		ipc_port_send_turnstile_complete(dest_port);
2026 		/* release the reference on the dest port, space lock might be held */
2027 		ip_release_safe(dest_port);
2028 	}
2029 }
2030 
2031 /*
2032  *	Routine:	ipc_port_adjust_special_reply_port
2033  *	Purpose:
2034  *		If the special port has a turnstile, update its inheritor.
2035  *	Condition:
2036  *		Nothing locked.
2037  *	Returns:
2038  *		None.
2039  */
2040 void
ipc_port_adjust_special_reply_port(ipc_port_t port,uint8_t flags)2041 ipc_port_adjust_special_reply_port(
2042 	ipc_port_t port,
2043 	uint8_t flags)
2044 {
2045 	if (port->ip_specialreply) {
2046 		ip_mq_lock(port);
2047 		ipc_port_adjust_special_reply_port_locked(port, NULL, flags, FALSE);
2048 	}
2049 }
2050 
2051 /*
2052  *	Routine:	ipc_port_adjust_sync_link_state_locked
2053  *	Purpose:
2054  *		Update the sync link state of the port and the
2055  *		turnstile inheritor.
2056  *	Condition:
2057  *		Port locked on entry.
2058  *		Port locked on return.
2059  *	Returns:
2060  *              None.
2061  */
2062 void
ipc_port_adjust_sync_link_state_locked(ipc_port_t port,int sync_link_state,turnstile_inheritor_t inheritor)2063 ipc_port_adjust_sync_link_state_locked(
2064 	ipc_port_t port,
2065 	int sync_link_state,
2066 	turnstile_inheritor_t inheritor)
2067 {
2068 	switch (port->ip_sync_link_state) {
2069 	case PORT_SYNC_LINK_RCV_THREAD:
2070 		/* deallocate the thread reference for the inheritor */
2071 		thread_deallocate_safe(port->ip_messages.imq_inheritor_thread_ref);
2072 		break;
2073 	case PORT_SYNC_LINK_WORKLOOP_STASH:
2074 		/* deallocate the turnstile reference for the inheritor */
2075 		turnstile_deallocate_safe(port->ip_messages.imq_inheritor_turnstile);
2076 		break;
2077 	}
2078 
2079 	klist_init(&port->ip_klist);
2080 
2081 	switch (sync_link_state) {
2082 	case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2083 		port->ip_messages.imq_inheritor_knote = inheritor;
2084 		break;
2085 	case PORT_SYNC_LINK_WORKLOOP_STASH:
2086 		/* knote can be deleted by userspace, take a reference on turnstile */
2087 		turnstile_reference(inheritor);
2088 		port->ip_messages.imq_inheritor_turnstile = inheritor;
2089 		break;
2090 	case PORT_SYNC_LINK_RCV_THREAD:
2091 		/* The thread could exit without clearing port state, take a thread ref */
2092 		thread_reference((thread_t)inheritor);
2093 		port->ip_messages.imq_inheritor_thread_ref = inheritor;
2094 		break;
2095 	default:
2096 		klist_init(&port->ip_klist);
2097 		sync_link_state = PORT_SYNC_LINK_ANY;
2098 	}
2099 
2100 	port->ip_sync_link_state = sync_link_state;
2101 }
2102 
2103 
2104 /*
2105  *	Routine:	ipc_port_adjust_port_locked
2106  *	Purpose:
2107  *		If the port has a turnstile, update its inheritor.
2108  *	Condition:
2109  *		Port locked on entry.
2110  *		Port unlocked on return.
2111  *	Returns:
2112  *		None.
2113  */
2114 void
ipc_port_adjust_port_locked(ipc_port_t port,struct knote * kn,boolean_t sync_bootstrap_checkin)2115 ipc_port_adjust_port_locked(
2116 	ipc_port_t port,
2117 	struct knote *kn,
2118 	boolean_t sync_bootstrap_checkin)
2119 {
2120 	int sync_link_state = PORT_SYNC_LINK_ANY;
2121 	turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
2122 
2123 	ip_mq_lock_held(port); // ip_sync_link_state is touched
2124 	assert(!port->ip_specialreply);
2125 
2126 	if (kn) {
2127 		inheritor = filt_machport_stash_port(kn, port, &sync_link_state);
2128 		if (sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
2129 			inheritor = kn;
2130 		}
2131 	} else if (sync_bootstrap_checkin) {
2132 		inheritor = current_thread();
2133 		sync_link_state = PORT_SYNC_LINK_RCV_THREAD;
2134 	}
2135 
2136 	ipc_port_adjust_sync_link_state_locked(port, sync_link_state, inheritor);
2137 	port->ip_sync_bootstrap_checkin = 0;
2138 
2139 	ipc_port_send_turnstile_recompute_push_locked(port);
2140 	/* port unlocked */
2141 }
2142 
2143 /*
2144  *	Routine:	ipc_port_clear_sync_rcv_thread_boost_locked
2145  *	Purpose:
2146  *		If the port is pushing on rcv thread, clear it.
2147  *	Condition:
2148  *		Port locked on entry
2149  *		Port unlocked on return.
2150  *	Returns:
2151  *		None.
2152  */
2153 void
ipc_port_clear_sync_rcv_thread_boost_locked(ipc_port_t port)2154 ipc_port_clear_sync_rcv_thread_boost_locked(
2155 	ipc_port_t port)
2156 {
2157 	ip_mq_lock_held(port); // ip_sync_link_state is touched
2158 
2159 	if (port->ip_sync_link_state != PORT_SYNC_LINK_RCV_THREAD) {
2160 		ip_mq_unlock(port);
2161 		return;
2162 	}
2163 
2164 	ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
2165 
2166 	ipc_port_send_turnstile_recompute_push_locked(port);
2167 	/* port unlocked */
2168 }
2169 
2170 /*
2171  *	Routine:	ipc_port_has_prdrequest
2172  *	Purpose:
2173  *		Returns whether a port has a port-destroyed request armed
2174  *	Condition:
2175  *		Port is locked.
2176  */
2177 bool
ipc_port_has_prdrequest(ipc_port_t port)2178 ipc_port_has_prdrequest(
2179 	ipc_port_t port)
2180 {
2181 	if (port->ip_specialreply) {
2182 		return false;
2183 	}
2184 	if (port->ip_has_watchport) {
2185 		return port->ip_twe->twe_pdrequest != IP_NULL;
2186 	}
2187 	return port->ip_pdrequest != IP_NULL;
2188 }
2189 
2190 /*
2191  *	Routine:	ipc_port_add_watchport_elem_locked
2192  *	Purpose:
2193  *		Transfer the turnstile boost of watchport to task calling exec.
2194  *	Condition:
2195  *		Port locked on entry.
2196  *		Port unlocked on return.
2197  *	Returns:
2198  *		KERN_SUCESS on success.
2199  *		KERN_FAILURE otherwise.
2200  */
2201 kern_return_t
ipc_port_add_watchport_elem_locked(ipc_port_t port,struct task_watchport_elem * watchport_elem,struct task_watchport_elem ** old_elem)2202 ipc_port_add_watchport_elem_locked(
2203 	ipc_port_t                 port,
2204 	struct task_watchport_elem *watchport_elem,
2205 	struct task_watchport_elem **old_elem)
2206 {
2207 	ip_mq_lock_held(port);
2208 
2209 	/* Watchport boost only works for non-special active ports mapped in an ipc space */
2210 	if (!ip_active(port) || port->ip_specialreply || !ip_in_a_space(port)) {
2211 		ip_mq_unlock(port);
2212 		return KERN_FAILURE;
2213 	}
2214 
2215 	if (port->ip_sync_link_state != PORT_SYNC_LINK_ANY) {
2216 		/* Sever the linkage if the port was pushing on knote */
2217 		ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
2218 	}
2219 
2220 	*old_elem = ipc_port_update_watchport_elem(port, watchport_elem);
2221 
2222 	ipc_port_send_turnstile_recompute_push_locked(port);
2223 	/* port unlocked */
2224 	return KERN_SUCCESS;
2225 }
2226 
2227 /*
2228  *	Routine:	ipc_port_clear_watchport_elem_internal_conditional_locked
2229  *	Purpose:
2230  *		Remove the turnstile boost of watchport and recompute the push.
2231  *	Condition:
2232  *		Port locked on entry.
2233  *		Port unlocked on return.
2234  *	Returns:
2235  *		KERN_SUCESS on success.
2236  *		KERN_FAILURE otherwise.
2237  */
2238 kern_return_t
ipc_port_clear_watchport_elem_internal_conditional_locked(ipc_port_t port,struct task_watchport_elem * watchport_elem)2239 ipc_port_clear_watchport_elem_internal_conditional_locked(
2240 	ipc_port_t                 port,
2241 	struct task_watchport_elem *watchport_elem)
2242 {
2243 	ip_mq_lock_held(port);
2244 
2245 	if (ipc_port_watchport_elem(port) != watchport_elem) {
2246 		ip_mq_unlock(port);
2247 		return KERN_FAILURE;
2248 	}
2249 
2250 	ipc_port_clear_watchport_elem_internal(port);
2251 	ipc_port_send_turnstile_recompute_push_locked(port);
2252 	/* port unlocked */
2253 	return KERN_SUCCESS;
2254 }
2255 
2256 /*
2257  *	Routine:	ipc_port_replace_watchport_elem_conditional_locked
2258  *	Purpose:
2259  *		Replace the turnstile boost of watchport and recompute the push.
2260  *	Condition:
2261  *		Port locked on entry.
2262  *		Port unlocked on return.
2263  *	Returns:
2264  *		KERN_SUCESS on success.
2265  *		KERN_FAILURE otherwise.
2266  */
2267 kern_return_t
ipc_port_replace_watchport_elem_conditional_locked(ipc_port_t port,struct task_watchport_elem * old_watchport_elem,struct task_watchport_elem * new_watchport_elem)2268 ipc_port_replace_watchport_elem_conditional_locked(
2269 	ipc_port_t                 port,
2270 	struct task_watchport_elem *old_watchport_elem,
2271 	struct task_watchport_elem *new_watchport_elem)
2272 {
2273 	ip_mq_lock_held(port);
2274 
2275 	if (port->ip_specialreply ||
2276 	    ipc_port_watchport_elem(port) != old_watchport_elem) {
2277 		ip_mq_unlock(port);
2278 		return KERN_FAILURE;
2279 	}
2280 
2281 	ipc_port_update_watchport_elem(port, new_watchport_elem);
2282 	ipc_port_send_turnstile_recompute_push_locked(port);
2283 	/* port unlocked */
2284 	return KERN_SUCCESS;
2285 }
2286 
2287 /*
2288  *	Routine:	ipc_port_clear_watchport_elem_internal
2289  *	Purpose:
2290  *		Remove the turnstile boost of watchport.
2291  *	Condition:
2292  *		Port locked on entry.
2293  *		Port locked on return.
2294  *	Returns:
2295  *		Old task_watchport_elem returned.
2296  */
2297 struct task_watchport_elem *
ipc_port_clear_watchport_elem_internal(ipc_port_t port)2298 ipc_port_clear_watchport_elem_internal(
2299 	ipc_port_t                 port)
2300 {
2301 	ip_mq_lock_held(port);
2302 
2303 	if (!port->ip_has_watchport) {
2304 		return NULL;
2305 	}
2306 
2307 	return ipc_port_update_watchport_elem(port, NULL);
2308 }
2309 
2310 /*
2311  *	Routine:	ipc_port_send_turnstile_recompute_push_locked
2312  *	Purpose:
2313  *		Update send turnstile inheritor of port and recompute the push.
2314  *	Condition:
2315  *		Port locked on entry.
2316  *		Port unlocked on return.
2317  *	Returns:
2318  *		None.
2319  */
2320 static void
ipc_port_send_turnstile_recompute_push_locked(ipc_port_t port)2321 ipc_port_send_turnstile_recompute_push_locked(
2322 	ipc_port_t port)
2323 {
2324 	struct turnstile *send_turnstile = port_send_turnstile(port);
2325 	if (send_turnstile) {
2326 		turnstile_reference(send_turnstile);
2327 		ipc_port_send_update_inheritor(port, send_turnstile,
2328 		    TURNSTILE_IMMEDIATE_UPDATE);
2329 	}
2330 	ip_mq_unlock(port);
2331 
2332 	if (send_turnstile) {
2333 		turnstile_update_inheritor_complete(send_turnstile,
2334 		    TURNSTILE_INTERLOCK_NOT_HELD);
2335 		turnstile_deallocate_safe(send_turnstile);
2336 	}
2337 }
2338 
2339 /*
2340  *	Routine:	ipc_port_get_watchport_inheritor
2341  *	Purpose:
2342  *		Returns inheritor for watchport.
2343  *
2344  *	Conditions:
2345  *		mqueue locked.
2346  *	Returns:
2347  *		watchport inheritor.
2348  */
2349 static thread_t
ipc_port_get_watchport_inheritor(ipc_port_t port)2350 ipc_port_get_watchport_inheritor(
2351 	ipc_port_t port)
2352 {
2353 	ip_mq_lock_held(port);
2354 	return ipc_port_watchport_elem(port)->twe_task->watchports->tw_thread;
2355 }
2356 
2357 /*
2358  *	Routine:	ipc_port_get_receiver_task
2359  *	Purpose:
2360  *		Returns receiver task pointer and its pid (if any) for port.
2361  *
2362  *	Conditions:
2363  *		Nothing locked. The routine takes port lock.
2364  */
2365 pid_t
ipc_port_get_receiver_task(ipc_port_t port,uintptr_t * task)2366 ipc_port_get_receiver_task(ipc_port_t port, uintptr_t *task)
2367 {
2368 	task_t receiver = TASK_NULL;
2369 	pid_t pid = -1;
2370 
2371 	if (!port) {
2372 		goto out;
2373 	}
2374 
2375 	ip_mq_lock(port);
2376 	if (ip_in_a_space(port) &&
2377 	    !ip_in_space(port, ipc_space_kernel) &&
2378 	    !ip_in_space(port, ipc_space_reply)) {
2379 		receiver = port->ip_receiver->is_task;
2380 		pid = task_pid(receiver);
2381 	}
2382 	ip_mq_unlock(port);
2383 
2384 out:
2385 	if (task) {
2386 		*task = (uintptr_t)receiver;
2387 	}
2388 	return pid;
2389 }
2390 
2391 /*
2392  *	Routine:	ipc_port_impcount_delta
2393  *	Purpose:
2394  *		Adjust only the importance count associated with a port.
2395  *		If there are any adjustments to be made to receiver task,
2396  *		those are handled elsewhere.
2397  *
2398  *		For now, be defensive during deductions to make sure the
2399  *		impcount for the port doesn't underflow zero.  This will
2400  *		go away when the port boost addition is made atomic (see
2401  *		note in ipc_port_importance_delta()).
2402  *	Conditions:
2403  *		The port is referenced and locked.
2404  *		Nothing else is locked.
2405  */
2406 mach_port_delta_t
ipc_port_impcount_delta(ipc_port_t port,mach_port_delta_t delta,ipc_port_t __unused base)2407 ipc_port_impcount_delta(
2408 	ipc_port_t        port,
2409 	mach_port_delta_t delta,
2410 	ipc_port_t        __unused base)
2411 {
2412 	mach_port_delta_t absdelta;
2413 
2414 	if (!ip_active(port)) {
2415 		return 0;
2416 	}
2417 
2418 	/* adding/doing nothing is easy */
2419 	if (delta >= 0) {
2420 		port->ip_impcount += delta;
2421 		return delta;
2422 	}
2423 
2424 	absdelta = 0 - delta;
2425 	if (port->ip_impcount >= absdelta) {
2426 		port->ip_impcount -= absdelta;
2427 		return delta;
2428 	}
2429 
2430 #if (DEVELOPMENT || DEBUG)
2431 	if (ip_in_a_space(port)) {
2432 		task_t target_task = port->ip_receiver->is_task;
2433 		ipc_importance_task_t target_imp = target_task->task_imp_base;
2434 		const char *target_procname;
2435 		int target_pid;
2436 
2437 		if (target_imp != IIT_NULL) {
2438 			target_procname = target_imp->iit_procname;
2439 			target_pid = target_imp->iit_bsd_pid;
2440 		} else {
2441 			target_procname = "unknown";
2442 			target_pid = -1;
2443 		}
2444 		printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
2445 		    "dropping %d assertion(s) but port only has %d remaining.\n",
2446 		    ip_get_receiver_name(port),
2447 		    target_pid, target_procname,
2448 		    absdelta, port->ip_impcount);
2449 	} else if (base != IP_NULL) {
2450 		assert(ip_in_a_space(base));
2451 		task_t target_task = base->ip_receiver->is_task;
2452 		ipc_importance_task_t target_imp = target_task->task_imp_base;
2453 		const char *target_procname;
2454 		int target_pid;
2455 
2456 		if (target_imp != IIT_NULL) {
2457 			target_procname = target_imp->iit_procname;
2458 			target_pid = target_imp->iit_bsd_pid;
2459 		} else {
2460 			target_procname = "unknown";
2461 			target_pid = -1;
2462 		}
2463 		printf("Over-release of importance assertions for port 0x%lx "
2464 		    "enqueued on port 0x%x with receiver pid %d (%s), "
2465 		    "dropping %d assertion(s) but port only has %d remaining.\n",
2466 		    (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port),
2467 		    ip_get_receiver_name(base),
2468 		    target_pid, target_procname,
2469 		    absdelta, port->ip_impcount);
2470 	}
2471 #endif
2472 
2473 	delta = 0 - port->ip_impcount;
2474 	port->ip_impcount = 0;
2475 	return delta;
2476 }
2477 
2478 /*
2479  *	Routine:	ipc_port_importance_delta_internal
2480  *	Purpose:
2481  *		Adjust the importance count through the given port.
2482  *		If the port is in transit, apply the delta throughout
2483  *		the chain. Determine if the there is a task at the
2484  *		base of the chain that wants/needs to be adjusted,
2485  *		and if so, apply the delta.
2486  *	Conditions:
2487  *		The port is referenced and locked on entry.
2488  *		Importance may be locked.
2489  *		Nothing else is locked.
2490  *		The lock may be dropped on exit.
2491  *		Returns TRUE if lock was dropped.
2492  */
2493 #if IMPORTANCE_INHERITANCE
2494 
2495 boolean_t
ipc_port_importance_delta_internal(ipc_port_t port,natural_t options,mach_port_delta_t * deltap,ipc_importance_task_t * imp_task)2496 ipc_port_importance_delta_internal(
2497 	ipc_port_t              port,
2498 	natural_t               options,
2499 	mach_port_delta_t       *deltap,
2500 	ipc_importance_task_t   *imp_task)
2501 {
2502 	ipc_port_t next, base;
2503 	bool dropped = false;
2504 	bool took_base_ref = false;
2505 
2506 	*imp_task = IIT_NULL;
2507 
2508 	if (*deltap == 0) {
2509 		return FALSE;
2510 	}
2511 
2512 	assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE);
2513 
2514 	base = port;
2515 
2516 	/* if port is in transit, have to search for end of chain */
2517 	if (ip_in_transit(port)) {
2518 		dropped = true;
2519 
2520 
2521 		ip_mq_unlock(port);
2522 		ipc_port_multiple_lock(); /* massive serialization */
2523 
2524 		took_base_ref = ipc_port_destination_chain_lock(port, &base);
2525 		/* all ports in chain from port to base, inclusive, are locked */
2526 
2527 		ipc_port_multiple_unlock();
2528 	}
2529 
2530 	/*
2531 	 * If the port lock is dropped b/c the port is in transit, there is a
2532 	 * race window where another thread can drain messages and/or fire a
2533 	 * send possible notification before we get here.
2534 	 *
2535 	 * We solve this race by checking to see if our caller armed the send
2536 	 * possible notification, whether or not it's been fired yet, and
2537 	 * whether or not we've already set the port's ip_spimportant bit. If
2538 	 * we don't need a send-possible boost, then we'll just apply a
2539 	 * harmless 0-boost to the port.
2540 	 */
2541 	if (options & IPID_OPTION_SENDPOSSIBLE) {
2542 		assert(*deltap == 1);
2543 		if (port->ip_sprequests && port->ip_spimportant == 0) {
2544 			port->ip_spimportant = 1;
2545 		} else {
2546 			*deltap = 0;
2547 		}
2548 	}
2549 
2550 	/* unlock down to the base, adjusting boost(s) at each level */
2551 	for (;;) {
2552 		*deltap = ipc_port_impcount_delta(port, *deltap, base);
2553 
2554 		if (port == base) {
2555 			break;
2556 		}
2557 
2558 		/* port is in transit */
2559 		assert(port->ip_tempowner == 0);
2560 		assert(ip_in_transit(port));
2561 		next = ip_get_destination(port);
2562 		ip_mq_unlock(port);
2563 		port = next;
2564 	}
2565 
2566 	/* find the task (if any) to boost according to the base */
2567 	if (ip_active(base)) {
2568 		if (base->ip_tempowner != 0) {
2569 			if (IIT_NULL != ip_get_imp_task(base)) {
2570 				*imp_task = ip_get_imp_task(base);
2571 			}
2572 			/* otherwise don't boost */
2573 		} else if (ip_in_a_space(base)) {
2574 			ipc_space_t space = ip_get_receiver(base);
2575 
2576 			/* only spaces with boost-accepting tasks */
2577 			if (space->is_task != TASK_NULL &&
2578 			    ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
2579 				*imp_task = space->is_task->task_imp_base;
2580 			}
2581 		}
2582 	}
2583 
2584 	/*
2585 	 * Only the base is locked.  If we have to hold or drop task
2586 	 * importance assertions, we'll have to drop that lock as well.
2587 	 */
2588 	if (*imp_task != IIT_NULL) {
2589 		/* take a reference before unlocking base */
2590 		ipc_importance_task_reference(*imp_task);
2591 	}
2592 
2593 	if (dropped) {
2594 		ip_mq_unlock(base);
2595 		if (took_base_ref) {
2596 			/* importance lock might be held */
2597 			ip_release_safe(base);
2598 		}
2599 	}
2600 
2601 	return dropped;
2602 }
2603 #endif /* IMPORTANCE_INHERITANCE */
2604 
2605 /*
2606  *	Routine:	ipc_port_importance_delta
2607  *	Purpose:
2608  *		Adjust the importance count through the given port.
2609  *		If the port is in transit, apply the delta throughout
2610  *		the chain.
2611  *
2612  *		If there is a task at the base of the chain that wants/needs
2613  *		to be adjusted, apply the delta.
2614  *	Conditions:
2615  *		The port is referenced and locked on entry.
2616  *		Nothing else is locked.
2617  *		The lock may be dropped on exit.
2618  *		Returns TRUE if lock was dropped.
2619  */
2620 #if IMPORTANCE_INHERITANCE
2621 
2622 boolean_t
ipc_port_importance_delta(ipc_port_t port,natural_t options,mach_port_delta_t delta)2623 ipc_port_importance_delta(
2624 	ipc_port_t              port,
2625 	natural_t               options,
2626 	mach_port_delta_t       delta)
2627 {
2628 	ipc_importance_task_t imp_task = IIT_NULL;
2629 	boolean_t dropped;
2630 
2631 	dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task);
2632 
2633 	if (IIT_NULL == imp_task || delta == 0) {
2634 		return dropped;
2635 	}
2636 
2637 	if (!dropped) {
2638 		ip_mq_unlock(port);
2639 	}
2640 
2641 	assert(ipc_importance_task_is_any_receiver_type(imp_task));
2642 
2643 	if (delta > 0) {
2644 		ipc_importance_task_hold_internal_assertion(imp_task, delta);
2645 	} else {
2646 		ipc_importance_task_drop_internal_assertion(imp_task, -delta);
2647 	}
2648 
2649 	ipc_importance_task_release(imp_task);
2650 	return TRUE;
2651 }
2652 #endif /* IMPORTANCE_INHERITANCE */
2653 
2654 /*
2655  *	Routine:	ipc_port_make_send_locked
2656  *	Purpose:
2657  *		Make a naked send right from a receive right.
2658  *
2659  *		See ipc_port_make_send for more extensive documentation.
2660  *
2661  *	Conditions:
2662  *		port locked and active.
2663  */
2664 ipc_port_t
ipc_port_make_send_locked(ipc_port_t port)2665 ipc_port_make_send_locked(
2666 	ipc_port_t      port)
2667 {
2668 	require_ip_active(port);
2669 	port->ip_mscount++;
2670 	port->ip_srights++;
2671 	ip_reference(port);
2672 	return port;
2673 }
2674 
2675 /*
2676  *	Routine:	ipc_port_make_send
2677  *	Purpose:
2678  *		Make a naked send right from a receive right.
2679  *
2680  *		ipc_port_make_send should not be used in any generic IPC
2681  *		plumbing, as this is an operation that subsystem
2682  *		owners need to be able to synchronize against
2683  *		with the make-send-count and no-senders notifications.
2684  *
2685  *		It is especially important for kobject types,
2686  *		and in general MIG upcalls or replies from the kernel
2687  *		should never use MAKE_SEND dispositions, and prefer
2688  *		COPY_SEND or MOVE_SEND, so that subsystems can control
2689  *		where that send right comes from.
2690  */
2691 ipc_port_t
ipc_port_make_send(ipc_port_t port)2692 ipc_port_make_send(
2693 	ipc_port_t      port)
2694 {
2695 	if (!IP_VALID(port)) {
2696 		return port;
2697 	}
2698 
2699 	ip_mq_lock(port);
2700 	if (ip_active(port)) {
2701 		ipc_port_make_send_locked(port);
2702 		ip_mq_unlock(port);
2703 		return port;
2704 	}
2705 	ip_mq_unlock(port);
2706 	return IP_DEAD;
2707 }
2708 
2709 /*
2710  *	Routine:	ipc_port_copy_send_locked
2711  *	Purpose:
2712  *		Make a naked send right from another naked send right.
2713  *	Conditions:
2714  *		port locked.
2715  */
2716 void
ipc_port_copy_send_locked(ipc_port_t port)2717 ipc_port_copy_send_locked(
2718 	ipc_port_t      port)
2719 {
2720 	assert(port->ip_srights > 0);
2721 	port->ip_srights++;
2722 	ip_reference(port);
2723 }
2724 
2725 /*
2726  *	Routine:	ipc_port_copy_send
2727  *	Purpose:
2728  *		Make a naked send right from another naked send right.
2729  *			IP_NULL		-> IP_NULL
2730  *			IP_DEAD		-> IP_DEAD
2731  *			dead port	-> IP_DEAD
2732  *			live port	-> port + ref
2733  *	Conditions:
2734  *		Nothing locked except possibly a space.
2735  */
2736 
2737 ipc_port_t
ipc_port_copy_send(ipc_port_t port)2738 ipc_port_copy_send(
2739 	ipc_port_t      port)
2740 {
2741 	ipc_port_t sright;
2742 
2743 	if (!IP_VALID(port)) {
2744 		return port;
2745 	}
2746 
2747 	ip_mq_lock(port);
2748 	if (ip_active(port)) {
2749 		ipc_port_copy_send_locked(port);
2750 		sright = port;
2751 	} else {
2752 		sright = IP_DEAD;
2753 	}
2754 	ip_mq_unlock(port);
2755 
2756 	return sright;
2757 }
2758 
2759 /*
2760  *	Routine:	ipc_port_copyout_send
2761  *	Purpose:
2762  *		Copyout a naked send right (possibly null/dead),
2763  *		or if that fails, destroy the right.
2764  *	Conditions:
2765  *		Nothing locked.
2766  */
2767 
2768 static mach_port_name_t
ipc_port_copyout_send_internal(ipc_port_t sright,ipc_space_t space,ipc_object_copyout_flags_t flags)2769 ipc_port_copyout_send_internal(
2770 	ipc_port_t      sright,
2771 	ipc_space_t     space,
2772 	ipc_object_copyout_flags_t flags)
2773 {
2774 	mach_port_name_t name;
2775 
2776 	if (IP_VALID(sright)) {
2777 		kern_return_t kr;
2778 
2779 		kr = ipc_object_copyout(space, ip_to_object(sright),
2780 		    MACH_MSG_TYPE_PORT_SEND, flags, NULL, NULL, &name);
2781 		if (kr != KERN_SUCCESS) {
2782 			if (kr == KERN_INVALID_CAPABILITY) {
2783 				name = MACH_PORT_DEAD;
2784 			} else {
2785 				name = MACH_PORT_NULL;
2786 			}
2787 		}
2788 	} else {
2789 		name = CAST_MACH_PORT_TO_NAME(sright);
2790 	}
2791 
2792 	return name;
2793 }
2794 
2795 mach_port_name_t
ipc_port_copyout_send(ipc_port_t sright,ipc_space_t space)2796 ipc_port_copyout_send(
2797 	ipc_port_t      sright, /* can be invalid */
2798 	ipc_space_t     space)
2799 {
2800 	return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_NONE);
2801 }
2802 
2803 /* Used by pthread kext to copyout thread port only */
2804 mach_port_name_t
ipc_port_copyout_send_pinned(ipc_port_t sright,ipc_space_t space)2805 ipc_port_copyout_send_pinned(
2806 	ipc_port_t      sright, /* can be invalid */
2807 	ipc_space_t     space)
2808 {
2809 	assert(space->is_task != TASK_NULL);
2810 
2811 	if (IP_VALID(sright)) {
2812 		assert(ip_kotype(sright) == IKOT_THREAD_CONTROL);
2813 	}
2814 
2815 	if (task_is_pinned(space->is_task)) {
2816 		return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_PINNED);
2817 	} else {
2818 		return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_NONE);
2819 	}
2820 }
2821 
2822 /*
2823  *	Routine:	ipc_port_release_send_and_unlock
2824  *	Purpose:
2825  *		Release a naked send right.
2826  *		Consumes a ref for the port.
2827  *	Conditions:
2828  *		Port is valid and locked on entry
2829  *		Port is unlocked on exit.
2830  */
2831 void
ipc_port_release_send_and_unlock(ipc_port_t port)2832 ipc_port_release_send_and_unlock(
2833 	ipc_port_t      port)
2834 {
2835 	ipc_notify_nsenders_t nsrequest = { };
2836 
2837 	if (port->ip_srights == 0) {
2838 		panic("Over-release of port %p send right!", port);
2839 	}
2840 	port->ip_srights--;
2841 
2842 	if (ip_active(port) && port->ip_srights == 0) {
2843 		nsrequest = ipc_notify_no_senders_prepare(port);
2844 	}
2845 
2846 	ip_mq_unlock(port);
2847 	ip_release(port);
2848 
2849 	ipc_notify_no_senders_emit(nsrequest);
2850 }
2851 
2852 /*
2853  *	Routine:	ipc_port_release_send
2854  *	Purpose:
2855  *		Release a naked send right.
2856  *		Consumes a ref for the port.
2857  *	Conditions:
2858  *		Nothing locked.
2859  */
2860 
2861 void
ipc_port_release_send(ipc_port_t port)2862 ipc_port_release_send(
2863 	ipc_port_t      port)
2864 {
2865 	if (IP_VALID(port)) {
2866 		ip_mq_lock(port);
2867 		ipc_port_release_send_and_unlock(port);
2868 	}
2869 }
2870 
2871 /*
2872  *	Routine:	ipc_port_make_sonce_locked
2873  *	Purpose:
2874  *		Make a naked send-once right from a receive right.
2875  *	Conditions:
2876  *		The port is locked and active.
2877  */
2878 
2879 ipc_port_t
ipc_port_make_sonce_locked(ipc_port_t port)2880 ipc_port_make_sonce_locked(
2881 	ipc_port_t      port)
2882 {
2883 	require_ip_active(port);
2884 	port->ip_sorights++;
2885 	ip_reference(port);
2886 	return port;
2887 }
2888 
2889 /*
2890  *	Routine:	ipc_port_make_sonce
2891  *	Purpose:
2892  *		Make a naked send-once right from a receive right.
2893  *	Conditions:
2894  *		The port is not locked.
2895  */
2896 
2897 ipc_port_t
ipc_port_make_sonce(ipc_port_t port)2898 ipc_port_make_sonce(
2899 	ipc_port_t      port)
2900 {
2901 	if (!IP_VALID(port)) {
2902 		return port;
2903 	}
2904 
2905 	ip_mq_lock(port);
2906 	if (ip_active(port)) {
2907 		ipc_port_make_sonce_locked(port);
2908 		ip_mq_unlock(port);
2909 		return port;
2910 	}
2911 	ip_mq_unlock(port);
2912 	return IP_DEAD;
2913 }
2914 
2915 /*
2916  *	Routine:	ipc_port_release_sonce
2917  *	Purpose:
2918  *		Release a naked send-once right.
2919  *		Consumes a ref for the port.
2920  *
2921  *		In normal situations, this is never used.
2922  *		Send-once rights are only consumed when
2923  *		a message (possibly a send-once notification)
2924  *		is sent to them.
2925  *	Conditions:
2926  *		The port is locked, possibly a space too.
2927  */
2928 void
ipc_port_release_sonce_and_unlock(ipc_port_t port)2929 ipc_port_release_sonce_and_unlock(
2930 	ipc_port_t      port)
2931 {
2932 	ip_mq_lock_held(port);
2933 
2934 	if (port->ip_sorights == 0) {
2935 		panic("Over-release of port %p send-once right!", port);
2936 	}
2937 
2938 	port->ip_sorights--;
2939 
2940 	if (port->ip_specialreply) {
2941 		ipc_port_adjust_special_reply_port_locked(port, NULL,
2942 		    IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN, FALSE);
2943 	} else {
2944 		ip_mq_unlock(port);
2945 	}
2946 
2947 	ip_release(port);
2948 }
2949 
2950 /*
2951  *	Routine:	ipc_port_release_sonce
2952  *	Purpose:
2953  *		Release a naked send-once right.
2954  *		Consumes a ref for the port.
2955  *
2956  *		In normal situations, this is never used.
2957  *		Send-once rights are only consumed when
2958  *		a message (possibly a send-once notification)
2959  *		is sent to them.
2960  *	Conditions:
2961  *		Nothing locked except possibly a space.
2962  */
2963 void
ipc_port_release_sonce(ipc_port_t port)2964 ipc_port_release_sonce(
2965 	ipc_port_t      port)
2966 {
2967 	if (IP_VALID(port)) {
2968 		ip_mq_lock(port);
2969 		ipc_port_release_sonce_and_unlock(port);
2970 	}
2971 }
2972 
2973 /*
2974  *	Routine:	ipc_port_release_receive
2975  *	Purpose:
2976  *		Release a naked (in limbo or in transit) receive right.
2977  *		Consumes a ref for the port; destroys the port.
2978  *	Conditions:
2979  *		Nothing locked.
2980  */
2981 
2982 void
ipc_port_release_receive(ipc_port_t port)2983 ipc_port_release_receive(
2984 	ipc_port_t      port)
2985 {
2986 	ipc_port_t dest;
2987 
2988 	if (!IP_VALID(port)) {
2989 		return;
2990 	}
2991 
2992 	ip_mq_lock(port);
2993 	require_ip_active(port);
2994 	assert(!ip_in_a_space(port));
2995 	dest = ip_get_destination(port);
2996 
2997 	ipc_port_destroy(port); /* consumes ref, unlocks */
2998 
2999 	if (dest != IP_NULL) {
3000 		ipc_port_send_turnstile_complete(dest);
3001 		ip_release(dest);
3002 	}
3003 }
3004 
3005 /*
3006  *	Routine:	ipc_port_alloc_special
3007  *	Purpose:
3008  *		Allocate a port in a special space.
3009  *		The new port is returned with one ref.
3010  *		If unsuccessful, IP_NULL is returned.
3011  *	Conditions:
3012  *		Nothing locked.
3013  */
3014 
3015 ipc_port_t
ipc_port_alloc_special(ipc_space_t space,ipc_port_init_flags_t flags)3016 ipc_port_alloc_special(
3017 	ipc_space_t             space,
3018 	ipc_port_init_flags_t   flags)
3019 {
3020 	ipc_port_t port;
3021 
3022 	port = ip_object_to_port(io_alloc(IOT_PORT, Z_WAITOK | Z_ZERO));
3023 	if (port == IP_NULL) {
3024 		return IP_NULL;
3025 	}
3026 
3027 	os_atomic_init(&port->ip_object.io_bits, io_makebits(TRUE, IOT_PORT, 0));
3028 	os_atomic_init(&port->ip_object.io_references, 1);
3029 
3030 	ipc_port_init(port, space, flags, MACH_PORT_SPECIAL_DEFAULT);
3031 	return port;
3032 }
3033 
3034 /*
3035  *	Routine:	ipc_port_dealloc_special_and_unlock
3036  *	Purpose:
3037  *		Deallocate a port in a special space.
3038  *		Consumes one ref for the port.
3039  *	Conditions:
3040  *		Port is locked.
3041  */
3042 
3043 void
ipc_port_dealloc_special_and_unlock(ipc_port_t port,__assert_only ipc_space_t space)3044 ipc_port_dealloc_special_and_unlock(
3045 	ipc_port_t                      port,
3046 	__assert_only ipc_space_t       space)
3047 {
3048 	require_ip_active(port);
3049 //	assert(port->ip_receiver_name != MACH_PORT_NULL);
3050 	assert(ip_in_space(port, space));
3051 
3052 	/*
3053 	 *	We clear ip_receiver_name and ip_receiver to simplify
3054 	 *	the ipc_space_kernel check in ipc_mqueue_send.
3055 	 */
3056 
3057 	/* port transtions to IN-LIMBO state */
3058 	port->ip_receiver_name = MACH_PORT_NULL;
3059 	port->ip_receiver = IS_NULL;
3060 
3061 	/* relevant part of ipc_port_clear_receiver */
3062 	port->ip_mscount = 0;
3063 	port->ip_messages.imq_seqno = 0;
3064 
3065 	ipc_port_destroy(port);
3066 }
3067 
3068 /*
3069  *	Routine:	ipc_port_dealloc_special
3070  *	Purpose:
3071  *		Deallocate a port in a special space.
3072  *		Consumes one ref for the port.
3073  *	Conditions:
3074  *		Nothing locked.
3075  */
3076 
3077 void
ipc_port_dealloc_special(ipc_port_t port,ipc_space_t space)3078 ipc_port_dealloc_special(
3079 	ipc_port_t        port,
3080 	ipc_space_t       space)
3081 {
3082 	ip_mq_lock(port);
3083 	ipc_port_dealloc_special_and_unlock(port, space);
3084 }
3085 
3086 /*
3087  *	Routine:	ipc_port_finalize
3088  *	Purpose:
3089  *		Called on last reference deallocate to
3090  *		free any remaining data associated with the
3091  *		port.
3092  *	Conditions:
3093  *		Nothing locked.
3094  */
3095 void
ipc_port_finalize(ipc_port_t port)3096 ipc_port_finalize(
3097 	ipc_port_t              port)
3098 {
3099 	ipc_port_request_t requests = port->ip_requests;
3100 
3101 	assert(port_send_turnstile(port) == TURNSTILE_NULL);
3102 	if (waitq_type(&port->ip_waitq) == WQT_PORT) {
3103 		assert(ipc_port_rcv_turnstile(port) == TURNSTILE_NULL);
3104 	}
3105 
3106 	if (ip_active(port)) {
3107 		panic("Trying to free an active port. port %p", port);
3108 	}
3109 
3110 	if (requests != IPR_NULL) {
3111 		ipc_table_size_t its = requests->ipr_size;
3112 		it_requests_free(its, requests);
3113 		port->ip_requests = IPR_NULL;
3114 	}
3115 
3116 	/*
3117 	 * (81997111) now it is safe to deallocate the prealloc message.
3118 	 * Keep the IP_BIT_PREALLOC bit, it has to be sticky as the turnstile
3119 	 * code looks at it without holding locks.
3120 	 */
3121 	if (IP_PREALLOC(port)) {
3122 		ipc_kmsg_t kmsg = port->ip_premsg;
3123 
3124 		if (kmsg == IKM_NULL || ikm_prealloc_inuse_port(kmsg) ||
3125 		    kmsg->ikm_turnstile != TURNSTILE_NULL) {
3126 			panic("port(%p, %p): prealloc message in an invalid state",
3127 			    port, kmsg);
3128 		}
3129 
3130 		port->ip_premsg = IKM_NULL;
3131 		ipc_kmsg_free(kmsg);
3132 	}
3133 
3134 	waitq_deinit(&port->ip_waitq);
3135 #if MACH_ASSERT
3136 	if (port->ip_made_bt) {
3137 		btref_put(port->ip_made_bt);
3138 	}
3139 #endif
3140 }
3141 
3142 /*
3143  *	Routine:	kdp_mqueue_send_find_owner
3144  *	Purpose:
3145  *		Discover the owner of the ipc object that contains the input
3146  *		waitq object. The thread blocked on the waitq should be
3147  *		waiting for an IPC_MQUEUE_FULL event.
3148  *	Conditions:
3149  *		The 'waitinfo->wait_type' value should already be set to
3150  *		kThreadWaitPortSend.
3151  *	Note:
3152  *		If we find out that the containing port is actually in
3153  *		transit, we reset the wait_type field to reflect this.
3154  */
3155 void
kdp_mqueue_send_find_owner(struct waitq * waitq,__assert_only event64_t event,thread_waitinfo_v2_t * waitinfo,struct ipc_service_port_label ** isplp)3156 kdp_mqueue_send_find_owner(
3157 	struct waitq                   *waitq,
3158 	__assert_only event64_t         event,
3159 	thread_waitinfo_v2_t           *waitinfo,
3160 	struct ipc_service_port_label **isplp)
3161 {
3162 	struct turnstile *turnstile;
3163 	assert(waitinfo->wait_type == kThreadWaitPortSend);
3164 	assert(event == IPC_MQUEUE_FULL);
3165 	assert(waitq_type(waitq) == WQT_TURNSTILE);
3166 
3167 	turnstile = waitq_to_turnstile(waitq);
3168 	ipc_port_t port = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */
3169 
3170 	zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port);
3171 
3172 	waitinfo->owner = 0;
3173 	waitinfo->context  = VM_KERNEL_UNSLIDE_OR_PERM(port);
3174 	if (ip_mq_lock_held_kdp(port)) {
3175 		/*
3176 		 * someone has the port locked: it may be in an
3177 		 * inconsistent state: bail
3178 		 */
3179 		waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
3180 		return;
3181 	}
3182 
3183 	/* now we are the only one accessing the port */
3184 	if (ip_active(port)) {
3185 		/*
3186 		 * In kdp context, port must be left unlocked throughout.
3187 		 * Therefore can't use union field accessor helpers, manually strip PAC
3188 		 * and compare raw pointer.
3189 		 */
3190 		void *raw_ptr = ip_get_receiver_ptr_noauth(port);
3191 
3192 		if (port->ip_tempowner) {
3193 			ipc_importance_task_t imp_task = ip_get_imp_task(port);
3194 			if (imp_task != IIT_NULL && imp_task->iit_task != NULL) {
3195 				/* port is held by a tempowner */
3196 				waitinfo->owner = pid_from_task(port->ip_imp_task->iit_task);
3197 			} else {
3198 				waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
3199 			}
3200 		} else if (ip_in_a_space(port)) { /* no port lock needed */
3201 			if ((ipc_space_t)raw_ptr == ipc_space_kernel) { /* access union field as ip_receiver */
3202 				/*
3203 				 * The kernel pid is 0, make this
3204 				 * distinguishable from no-owner and
3205 				 * inconsistent port state.
3206 				 */
3207 				waitinfo->owner = STACKSHOT_WAITOWNER_KERNEL;
3208 			} else {
3209 				waitinfo->owner = pid_from_task(((ipc_space_t)raw_ptr)->is_task);
3210 			}
3211 		} else if ((ipc_port_t)raw_ptr != IP_NULL) { /* access union field as ip_destination */
3212 			waitinfo->wait_type = kThreadWaitPortSendInTransit;
3213 			waitinfo->owner     = VM_KERNEL_UNSLIDE_OR_PERM((ipc_port_t)raw_ptr);
3214 		}
3215 		if (port->ip_service_port && port->ip_splabel != NULL) {
3216 			*isplp = (struct ipc_service_port_label *)port->ip_splabel;
3217 		}
3218 	}
3219 }
3220 
3221 /*
3222  *	Routine:	kdp_mqueue_recv_find_owner
3223  *	Purpose:
3224  *		Discover the "owner" of the ipc object that contains the input
3225  *		waitq object. The thread blocked on the waitq is trying to
3226  *		receive on the mqueue.
3227  *	Conditions:
3228  *		The 'waitinfo->wait_type' value should already be set to
3229  *		kThreadWaitPortReceive.
3230  *	Note:
3231  *		If we find that we are actualy waiting on a port set, we reset
3232  *		the wait_type field to reflect this.
3233  */
3234 void
kdp_mqueue_recv_find_owner(struct waitq * waitq,__assert_only event64_t event,thread_waitinfo_v2_t * waitinfo,struct ipc_service_port_label ** isplp)3235 kdp_mqueue_recv_find_owner(
3236 	struct waitq                   *waitq,
3237 	__assert_only event64_t         event,
3238 	thread_waitinfo_v2_t           *waitinfo,
3239 	struct ipc_service_port_label **isplp)
3240 {
3241 	assert(waitinfo->wait_type == kThreadWaitPortReceive);
3242 	assert(event == IPC_MQUEUE_RECEIVE);
3243 
3244 	waitinfo->owner = 0;
3245 
3246 	if (waitq_type(waitq) == WQT_PORT_SET) {
3247 		ipc_pset_t set = ips_from_waitq(waitq);
3248 
3249 		zone_id_require(ZONE_ID_IPC_PORT_SET, sizeof(struct ipc_pset), set);
3250 
3251 		/* Reset wait type to specify waiting on port set receive */
3252 		waitinfo->wait_type = kThreadWaitPortSetReceive;
3253 		waitinfo->context   = VM_KERNEL_UNSLIDE_OR_PERM(set);
3254 		if (ips_mq_lock_held_kdp(set)) {
3255 			waitinfo->owner = STACKSHOT_WAITOWNER_PSET_LOCKED;
3256 		}
3257 		/* There is no specific owner "at the other end" of a port set, so leave unset. */
3258 	} else if (waitq_type(waitq) == WQT_PORT) {
3259 		ipc_port_t port = ip_from_waitq(waitq);
3260 
3261 		zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port);
3262 
3263 		waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
3264 		if (ip_mq_lock_held_kdp(port)) {
3265 			waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
3266 			return;
3267 		}
3268 
3269 		if (ip_active(port)) {
3270 			if (ip_in_a_space(port)) { /* no port lock needed */
3271 				waitinfo->owner = ip_get_receiver_name(port);
3272 			} else {
3273 				waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
3274 			}
3275 			if (port->ip_specialreply) {
3276 				waitinfo->wait_flags |= STACKSHOT_WAITINFO_FLAGS_SPECIALREPLY;
3277 			}
3278 			if (port->ip_splabel != NULL) {
3279 				*isplp = (struct ipc_service_port_label *)port->ip_splabel;
3280 			}
3281 		}
3282 	}
3283 }
3284 
3285 void
ipc_port_set_label(ipc_port_t port,ipc_label_t label)3286 ipc_port_set_label(
3287 	ipc_port_t              port,
3288 	ipc_label_t             label)
3289 {
3290 	ipc_kobject_label_t labelp;
3291 
3292 	assert(!ip_is_kolabeled(port));
3293 
3294 	labelp = zalloc_flags(ipc_kobject_label_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3295 	labelp->ikol_label = label;
3296 
3297 	port->ip_kolabel = labelp;
3298 	io_bits_or(ip_to_object(port), IO_BITS_KOLABEL);
3299 }
3300 
3301 kern_return_t
ipc_port_reset_thread_attr(ipc_port_t port)3302 ipc_port_reset_thread_attr(
3303 	ipc_port_t port)
3304 {
3305 	uint8_t iotier = THROTTLE_LEVEL_END;
3306 	uint8_t qos = THREAD_QOS_UNSPECIFIED;
3307 
3308 	return ipc_port_update_qos_n_iotier(port, qos, iotier);
3309 }
3310 
3311 kern_return_t
ipc_port_propagate_thread_attr(ipc_port_t port,struct thread_attr_for_ipc_propagation attr)3312 ipc_port_propagate_thread_attr(
3313 	ipc_port_t port,
3314 	struct thread_attr_for_ipc_propagation attr)
3315 {
3316 	uint8_t iotier = attr.tafip_iotier;
3317 	uint8_t qos = attr.tafip_qos;
3318 
3319 	return ipc_port_update_qos_n_iotier(port, qos, iotier);
3320 }
3321 
3322 static kern_return_t
ipc_port_update_qos_n_iotier(ipc_port_t port,uint8_t qos,uint8_t iotier)3323 ipc_port_update_qos_n_iotier(
3324 	ipc_port_t port,
3325 	uint8_t    qos,
3326 	uint8_t    iotier)
3327 {
3328 	if (port == IPC_PORT_NULL) {
3329 		return KERN_INVALID_ARGUMENT;
3330 	}
3331 
3332 	ip_mq_lock(port);
3333 
3334 	if (!ip_active(port)) {
3335 		ip_mq_unlock(port);
3336 		return KERN_TERMINATED;
3337 	}
3338 
3339 	if (port->ip_specialreply) {
3340 		ip_mq_unlock(port);
3341 		return KERN_INVALID_ARGUMENT;
3342 	}
3343 
3344 	port->ip_kernel_iotier_override = iotier;
3345 	port->ip_kernel_qos_override = qos;
3346 
3347 	if (ip_in_a_space(port) &&
3348 	    is_active(ip_get_receiver(port)) &&
3349 	    ipc_port_has_klist(port)) {
3350 		KNOTE(&port->ip_klist, 0);
3351 	}
3352 
3353 	ip_mq_unlock(port);
3354 	return KERN_SUCCESS;
3355 }
3356 
3357 #if MACH_ASSERT
3358 #include <kern/machine.h>
3359 
3360 unsigned long   port_count = 0;
3361 unsigned long   port_count_warning = 20000;
3362 unsigned long   port_timestamp = 0;
3363 
3364 void            db_port_stack_trace(
3365 	ipc_port_t      port);
3366 void            db_ref(
3367 	int             refs);
3368 int             db_port_walk(
3369 	unsigned int    verbose,
3370 	unsigned int    display,
3371 	unsigned int    ref_search,
3372 	unsigned int    ref_target);
3373 
3374 #ifdef MACH_BSD
3375 extern int proc_pid(struct proc*);
3376 #endif /* MACH_BSD */
3377 
3378 /*
3379  *	Initialize all of the debugging state in a port.
3380  *	Insert the port into a global list of all allocated ports.
3381  */
3382 void
ipc_port_init_debug(ipc_port_t port,void * fp)3383 ipc_port_init_debug(ipc_port_t port, void *fp)
3384 {
3385 	port->ip_timetrack = port_timestamp++;
3386 
3387 	if (ipc_portbt) {
3388 		port->ip_made_bt = btref_get(fp, 0);
3389 	}
3390 
3391 #ifdef MACH_BSD
3392 	task_t task = current_task_early();
3393 	if (task != TASK_NULL) {
3394 		struct proc *proc = get_bsdtask_info(task);
3395 		if (proc) {
3396 			port->ip_made_pid = proc_pid(proc);
3397 		}
3398 	}
3399 #endif /* MACH_BSD */
3400 }
3401 
3402 #endif  /* MACH_ASSERT */
3403