xref: /xnu-8796.121.2/osfmk/ipc/ipc_port.c (revision c54f35ca767986246321eb901baf8f5ff7923f6a)
1 /*
2  * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_FREE_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  */
62 /*
63  */
64 /*
65  *	File:	ipc/ipc_port.c
66  *	Author:	Rich Draves
67  *	Date:	1989
68  *
69  *	Functions to manipulate IPC ports.
70  */
71 
72 #include <mach_assert.h>
73 
74 #include <mach/port.h>
75 #include <mach/kern_return.h>
76 #include <kern/backtrace.h>
77 #include <kern/debug.h>
78 #include <kern/ipc_kobject.h>
79 #include <kern/kcdata.h>
80 #include <kern/misc_protos.h>
81 #include <kern/policy_internal.h>
82 #include <kern/thread.h>
83 #include <kern/waitq.h>
84 #include <kern/host_notify.h>
85 #include <ipc/ipc_entry.h>
86 #include <ipc/ipc_space.h>
87 #include <ipc/ipc_object.h>
88 #include <ipc/ipc_right.h>
89 #include <ipc/ipc_port.h>
90 #include <ipc/ipc_pset.h>
91 #include <ipc/ipc_kmsg.h>
92 #include <ipc/ipc_mqueue.h>
93 #include <ipc/ipc_notify.h>
94 #include <ipc/ipc_importance.h>
95 #include <machine/limits.h>
96 #include <kern/turnstile.h>
97 #include <kern/machine.h>
98 
99 #include <security/mac_mach_internal.h>
100 #include <ipc/ipc_service_port.h>
101 
102 #include <string.h>
103 
104 static TUNABLE(bool, prioritize_launch, "prioritize_launch", true);
105 TUNABLE_WRITEABLE(int, ipc_portbt, "ipc_portbt", false);
106 
107 extern zone_t ipc_kobject_label_zone;
108 
109 LCK_SPIN_DECLARE_ATTR(ipc_port_multiple_lock_data, &ipc_lck_grp, &ipc_lck_attr);
110 ipc_port_timestamp_t ipc_port_timestamp_data;
111 
112 KALLOC_ARRAY_TYPE_DEFINE(ipc_port_request_table,
113     struct ipc_port_request, KT_DEFAULT);
114 
115 #if     MACH_ASSERT
116 static void ipc_port_init_debug(ipc_port_t, void *fp);
117 #endif  /* MACH_ASSERT */
118 
119 void __abortlike
__ipc_port_inactive_panic(ipc_port_t port)120 __ipc_port_inactive_panic(ipc_port_t port)
121 {
122 	panic("Using inactive port %p", port);
123 }
124 
125 static __abortlike void
__ipc_port_translate_receive_panic(ipc_space_t space,ipc_port_t port)126 __ipc_port_translate_receive_panic(ipc_space_t space, ipc_port_t port)
127 {
128 	panic("found receive right in space %p for port %p owned by space %p",
129 	    space, port, ip_get_receiver(port));
130 }
131 
132 __abortlike void
__ipc_right_delta_overflow_panic(ipc_port_t port,natural_t * field,int delta)133 __ipc_right_delta_overflow_panic(ipc_port_t port, natural_t *field, int delta)
134 {
135 	const char *what;
136 	if (field == &port->ip_srights) {
137 		what = "send right";
138 	} else {
139 		what = "send-once right";
140 	}
141 	panic("port %p %s count overflow (delta: %d)", port, what, delta);
142 }
143 
144 static void
145 ipc_port_send_turnstile_recompute_push_locked(
146 	ipc_port_t port);
147 
148 static thread_t
149 ipc_port_get_watchport_inheritor(
150 	ipc_port_t port);
151 
152 static kern_return_t
153 ipc_port_update_qos_n_iotier(
154 	ipc_port_t port,
155 	uint8_t    qos,
156 	uint8_t    iotier);
157 
158 void
ipc_port_release(ipc_port_t port)159 ipc_port_release(ipc_port_t port)
160 {
161 	ip_release(port);
162 }
163 
164 void
ipc_port_reference(ipc_port_t port)165 ipc_port_reference(ipc_port_t port)
166 {
167 	ip_validate(port);
168 	ip_reference(port);
169 }
170 
171 /*
172  *	Routine:	ipc_port_timestamp
173  *	Purpose:
174  *		Retrieve a timestamp value.
175  */
176 
177 ipc_port_timestamp_t
ipc_port_timestamp(void)178 ipc_port_timestamp(void)
179 {
180 	return OSIncrementAtomic(&ipc_port_timestamp_data);
181 }
182 
183 
184 /*
185  *	Routine:	ipc_port_translate_send
186  *	Purpose:
187  *		Look up a send right in a space.
188  *	Conditions:
189  *		Nothing locked before.  If successful, the object
190  *		is returned active and locked.  The caller doesn't get a ref.
191  *	Returns:
192  *		KERN_SUCCESS		Object returned locked.
193  *		KERN_INVALID_TASK	The space is dead.
194  *		KERN_INVALID_NAME	The name doesn't denote a right
195  *		KERN_INVALID_RIGHT	Name doesn't denote the correct right
196  */
197 kern_return_t
ipc_port_translate_send(ipc_space_t space,mach_port_name_t name,ipc_port_t * portp)198 ipc_port_translate_send(
199 	ipc_space_t                     space,
200 	mach_port_name_t                name,
201 	ipc_port_t                     *portp)
202 {
203 	ipc_port_t port = IP_NULL;
204 	ipc_object_t object;
205 	kern_return_t kr;
206 
207 	kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_SEND, &object);
208 	if (kr == KERN_SUCCESS) {
209 		port = ip_object_to_port(object);
210 	}
211 	*portp = port;
212 	return kr;
213 }
214 
215 
216 /*
217  *	Routine:	ipc_port_translate_receive
218  *	Purpose:
219  *		Look up a receive right in a space.
220  *		Performs some minimal security checks against tampering.
221  *	Conditions:
222  *		Nothing locked before.  If successful, the object
223  *		is returned active and locked.  The caller doesn't get a ref.
224  *	Returns:
225  *		KERN_SUCCESS		Object returned locked.
226  *		KERN_INVALID_TASK	The space is dead.
227  *		KERN_INVALID_NAME	The name doesn't denote a right
228  *		KERN_INVALID_RIGHT	Name doesn't denote the correct right
229  */
230 kern_return_t
ipc_port_translate_receive(ipc_space_t space,mach_port_name_t name,ipc_port_t * portp)231 ipc_port_translate_receive(
232 	ipc_space_t                     space,
233 	mach_port_name_t                name,
234 	ipc_port_t                     *portp)
235 {
236 	ipc_port_t port = IP_NULL;
237 	ipc_object_t object;
238 	kern_return_t kr;
239 
240 	kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_RECEIVE, &object);
241 	if (kr == KERN_SUCCESS) {
242 		/* object is locked */
243 		port = ip_object_to_port(object);
244 		if (!ip_in_space(port, space)) {
245 			__ipc_port_translate_receive_panic(space, port);
246 		}
247 	}
248 	*portp = port;
249 	return kr;
250 }
251 
252 
253 /*
254  *	Routine:	ipc_port_request_alloc
255  *	Purpose:
256  *		Try to allocate a request slot.
257  *		If successful, returns the request index.
258  *		Otherwise returns zero.
259  *	Conditions:
260  *		The port is locked and active.
261  *	Returns:
262  *		KERN_SUCCESS		A request index was found.
263  *		KERN_NO_SPACE		No index allocated.
264  */
265 
266 kern_return_t
ipc_port_request_alloc(ipc_port_t port,mach_port_name_t name,ipc_port_t soright,ipc_port_request_opts_t options,ipc_port_request_index_t * indexp)267 ipc_port_request_alloc(
268 	ipc_port_t                      port,
269 	mach_port_name_t                name,
270 	ipc_port_t                      soright,
271 	ipc_port_request_opts_t         options,
272 	ipc_port_request_index_t        *indexp)
273 {
274 	ipc_port_request_table_t table;
275 	ipc_port_request_index_t index;
276 	ipc_port_request_t ipr, base;
277 
278 	require_ip_active(port);
279 	assert(name != MACH_PORT_NULL);
280 	assert(soright != IP_NULL);
281 
282 	table = port->ip_requests;
283 	if (table == NULL) {
284 		return KERN_NO_SPACE;
285 	}
286 
287 	base  = ipc_port_request_table_base(table);
288 	index = base->ipr_next;
289 	if (index == 0) {
290 		return KERN_NO_SPACE;
291 	}
292 
293 	ipr = ipc_port_request_table_get(table, index);
294 	assert(ipr->ipr_soright == IP_NULL);
295 
296 	base->ipr_next = ipr->ipr_next;
297 	ipr->ipr_name = name;
298 	ipr->ipr_soright = IPR_SOR_MAKE(soright, options);
299 
300 	if (options == (IPR_SOR_SPARM_MASK | IPR_SOR_SPREQ_MASK) &&
301 	    port->ip_sprequests == 0) {
302 		port->ip_sprequests = 1;
303 	}
304 
305 	*indexp = index;
306 
307 	return KERN_SUCCESS;
308 }
309 
310 
311 /*
312  *	Routine:	ipc_port_request_hnotify_alloc
313  *	Purpose:
314  *		Try to allocate a request slot.
315  *		If successful, returns the request index.
316  *		Otherwise returns zero.
317  *	Conditions:
318  *		The port is locked and active.
319  *	Returns:
320  *		KERN_SUCCESS		A request index was found.
321  *		KERN_NO_SPACE		No index allocated.
322  */
323 
324 kern_return_t
ipc_port_request_hnotify_alloc(ipc_port_t port,struct host_notify_entry * hnotify,ipc_port_request_index_t * indexp)325 ipc_port_request_hnotify_alloc(
326 	ipc_port_t                      port,
327 	struct host_notify_entry       *hnotify,
328 	ipc_port_request_index_t       *indexp)
329 {
330 	ipc_port_request_table_t table;
331 	ipc_port_request_index_t index;
332 	ipc_port_request_t ipr, base;
333 
334 	require_ip_active(port);
335 
336 	table = port->ip_requests;
337 	if (table == NULL) {
338 		return KERN_NO_SPACE;
339 	}
340 
341 	base  = ipc_port_request_table_base(table);
342 	index = base->ipr_next;
343 	if (index == 0) {
344 		return KERN_NO_SPACE;
345 	}
346 
347 	ipr = ipc_port_request_table_get(table, index);
348 	assert(ipr->ipr_soright == IP_NULL);
349 
350 	base->ipr_next = ipr->ipr_next;
351 	ipr->ipr_name = IPR_HOST_NOTIFY;
352 	ipr->ipr_hnotify = hnotify;
353 
354 	*indexp = index;
355 
356 	return KERN_SUCCESS;
357 }
358 
359 /*
360  *	Routine:	ipc_port_request_grow
361  *	Purpose:
362  *		Grow a port's table of requests.
363  *	Conditions:
364  *		The port must be locked and active.
365  *		Nothing else locked; will allocate memory.
366  *		Upon return the port is unlocked.
367  *	Returns:
368  *		KERN_SUCCESS		Grew the table.
369  *		KERN_SUCCESS		Somebody else grew the table.
370  *		KERN_SUCCESS		The port died.
371  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate new table.
372  *		KERN_NO_SPACE		Couldn't grow to desired size
373  */
374 
375 kern_return_t
ipc_port_request_grow(ipc_port_t port)376 ipc_port_request_grow(
377 	ipc_port_t              port)
378 {
379 	ipc_port_request_table_t otable, ntable;
380 	uint32_t osize, nsize;
381 	uint32_t ocount, ncount;
382 
383 	require_ip_active(port);
384 
385 	otable = port->ip_requests;
386 	if (otable) {
387 		osize = ipc_port_request_table_size(otable);
388 	} else {
389 		osize = 0;
390 	}
391 	nsize = ipc_port_request_table_next_size(2, osize, 16);
392 	if (nsize > CONFIG_IPC_TABLE_REQUEST_SIZE_MAX) {
393 		nsize = CONFIG_IPC_TABLE_REQUEST_SIZE_MAX;
394 	}
395 	if (nsize == osize) {
396 		return KERN_RESOURCE_SHORTAGE;
397 	}
398 
399 	ip_reference(port);
400 	ip_mq_unlock(port);
401 
402 	ntable = ipc_port_request_table_alloc_by_size(nsize, Z_WAITOK | Z_ZERO);
403 	if (ntable == NULL) {
404 		ip_release(port);
405 		return KERN_RESOURCE_SHORTAGE;
406 	}
407 
408 	ip_mq_lock(port);
409 
410 	/*
411 	 *	Check that port is still active and that nobody else
412 	 *	has slipped in and grown the table on us.  Note that
413 	 *	just checking if the current table pointer == otable
414 	 *	isn't sufficient; must check ipr_size.
415 	 */
416 
417 	ocount = ipc_port_request_table_size_to_count(osize);
418 	ncount = ipc_port_request_table_size_to_count(nsize);
419 
420 	if (ip_active(port) && port->ip_requests == otable) {
421 		ipc_port_request_index_t free, i;
422 
423 		/* copy old table to new table */
424 
425 		if (otable != NULL) {
426 			memcpy(ipc_port_request_table_base(ntable),
427 			    ipc_port_request_table_base(otable),
428 			    osize);
429 		} else {
430 			ocount = 1;
431 			free   = 0;
432 		}
433 
434 		/* add new elements to the new table's free list */
435 
436 		for (i = ocount; i < ncount; i++) {
437 			ipc_port_request_table_get_nocheck(ntable, i)->ipr_next = free;
438 			free = i;
439 		}
440 
441 		ipc_port_request_table_base(ntable)->ipr_next = free;
442 		port->ip_requests = ntable;
443 		ip_mq_unlock(port);
444 		ip_release(port);
445 
446 		if (otable != NULL) {
447 			ipc_port_request_table_free(&otable);
448 		}
449 	} else {
450 		ip_mq_unlock(port);
451 		ip_release(port);
452 		ipc_port_request_table_free(&ntable);
453 	}
454 
455 	return KERN_SUCCESS;
456 }
457 
458 /*
459  *	Routine:	ipc_port_request_sparm
460  *	Purpose:
461  *		Arm delayed send-possible request.
462  *	Conditions:
463  *		The port must be locked and active.
464  *
465  *		Returns TRUE if the request was armed
466  *		(or armed with importance in that version).
467  */
468 
469 boolean_t
ipc_port_request_sparm(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index,mach_msg_option_t option,mach_msg_priority_t priority)470 ipc_port_request_sparm(
471 	ipc_port_t                      port,
472 	__assert_only mach_port_name_t  name,
473 	ipc_port_request_index_t        index,
474 	mach_msg_option_t               option,
475 	mach_msg_priority_t             priority)
476 {
477 	if (index != IE_REQ_NONE) {
478 		ipc_port_request_table_t table;
479 		ipc_port_request_t ipr;
480 
481 		require_ip_active(port);
482 
483 		table = port->ip_requests;
484 		assert(table != NULL);
485 
486 		ipr = ipc_port_request_table_get(table, index);
487 		assert(ipr->ipr_name == name);
488 
489 		/* Is there a valid destination? */
490 		if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
491 			ipr->ipr_soright = IPR_SOR_MAKE(ipr->ipr_soright, IPR_SOR_SPARM_MASK);
492 			port->ip_sprequests = 1;
493 
494 			if (option & MACH_SEND_OVERRIDE) {
495 				/* apply override to message queue */
496 				mach_msg_qos_t qos_ovr;
497 				if (mach_msg_priority_is_pthread_priority(priority)) {
498 					qos_ovr = _pthread_priority_thread_qos(priority);
499 				} else {
500 					qos_ovr = mach_msg_priority_overide_qos(priority);
501 				}
502 				if (qos_ovr) {
503 					ipc_mqueue_override_send_locked(&port->ip_messages, qos_ovr);
504 				}
505 			}
506 
507 #if IMPORTANCE_INHERITANCE
508 			if (((option & MACH_SEND_NOIMPORTANCE) == 0) &&
509 			    (port->ip_impdonation != 0) &&
510 			    (port->ip_spimportant == 0) &&
511 			    (((option & MACH_SEND_IMPORTANCE) != 0) ||
512 			    (task_is_importance_donor(current_task())))) {
513 				return TRUE;
514 			}
515 #else
516 			return TRUE;
517 #endif /* IMPORTANCE_INHERITANCE */
518 		}
519 	}
520 	return FALSE;
521 }
522 
523 /*
524  *	Routine:	ipc_port_request_type
525  *	Purpose:
526  *		Determine the type(s) of port requests enabled for a name.
527  *	Conditions:
528  *		The port must be locked or inactive (to avoid table growth).
529  *		The index must not be IE_REQ_NONE and for the name in question.
530  */
531 mach_port_type_t
ipc_port_request_type(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index)532 ipc_port_request_type(
533 	ipc_port_t                      port,
534 	__assert_only mach_port_name_t  name,
535 	ipc_port_request_index_t        index)
536 {
537 	ipc_port_request_table_t table;
538 	ipc_port_request_t ipr;
539 	mach_port_type_t type = 0;
540 
541 	table = port->ip_requests;
542 	assert(table != NULL);
543 
544 	assert(index != IE_REQ_NONE);
545 	ipr = ipc_port_request_table_get(table, index);
546 	assert(ipr->ipr_name == name);
547 
548 	if (IP_VALID(IPR_SOR_PORT(ipr->ipr_soright))) {
549 		type |= MACH_PORT_TYPE_DNREQUEST;
550 
551 		if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
552 			type |= MACH_PORT_TYPE_SPREQUEST;
553 
554 			if (!IPR_SOR_SPARMED(ipr->ipr_soright)) {
555 				type |= MACH_PORT_TYPE_SPREQUEST_DELAYED;
556 			}
557 		}
558 	}
559 	return type;
560 }
561 
562 /*
563  *	Routine:	ipc_port_request_cancel
564  *	Purpose:
565  *		Cancel a dead-name/send-possible request and return the send-once right.
566  *	Conditions:
567  *		The port must be locked and active.
568  *		The index must not be IPR_REQ_NONE and must correspond with name.
569  */
570 
571 ipc_port_t
ipc_port_request_cancel(ipc_port_t port,__assert_only mach_port_name_t name,ipc_port_request_index_t index)572 ipc_port_request_cancel(
573 	ipc_port_t                      port,
574 	__assert_only mach_port_name_t  name,
575 	ipc_port_request_index_t        index)
576 {
577 	ipc_port_request_table_t table;
578 	ipc_port_request_t base, ipr;
579 	ipc_port_t request = IP_NULL;
580 
581 	require_ip_active(port);
582 	table = port->ip_requests;
583 	base  = ipc_port_request_table_base(table);
584 	assert(table != NULL);
585 
586 	assert(index != IE_REQ_NONE);
587 	ipr = ipc_port_request_table_get(table, index);
588 	assert(ipr->ipr_name == name);
589 	request = IPR_SOR_PORT(ipr->ipr_soright);
590 
591 	/* return ipr to the free list inside the table */
592 	ipr->ipr_next = base->ipr_next;
593 	ipr->ipr_soright = IP_NULL;
594 	base->ipr_next = index;
595 
596 	return request;
597 }
598 
599 
600 /*
601  *	Routine:	ipc_port_nsrequest
602  *	Purpose:
603  *		Make a no-senders request, returning the
604  *		previously registered send-once right.
605  *		Just cancels the previous request if notify is IP_NULL.
606  *	Conditions:
607  *		The port is locked and active.  It is unlocked.
608  *		Consumes a ref for notify (if non-null), and
609  *		returns previous with a ref (if non-null).
610  */
611 
612 void
ipc_port_nsrequest(ipc_port_t port,mach_port_mscount_t sync,ipc_port_t notify,ipc_port_t * previousp)613 ipc_port_nsrequest(
614 	ipc_port_t              port,
615 	mach_port_mscount_t     sync,
616 	ipc_port_t              notify,
617 	ipc_port_t              *previousp)
618 {
619 	ipc_port_t previous;
620 	mach_port_mscount_t mscount;
621 	require_ip_active(port);
622 
623 	assert(!ip_in_space(port, ipc_space_kernel));
624 	assert(port->ip_nsrequest != IP_KOBJECT_NSREQUEST_ARMED);
625 
626 	previous = port->ip_nsrequest;
627 	mscount = port->ip_mscount;
628 
629 	if ((port->ip_srights == 0) && (sync <= mscount) &&
630 	    (notify != IP_NULL)) {
631 		port->ip_nsrequest = IP_NULL;
632 		ip_mq_unlock(port);
633 		ipc_notify_no_senders(notify, mscount, /* kobject */ false);
634 	} else {
635 		port->ip_nsrequest = notify;
636 		ip_mq_unlock(port);
637 	}
638 
639 	*previousp = previous;
640 }
641 
642 
643 /*
644  *	Routine:	ipc_port_clear_receiver
645  *	Purpose:
646  *		Prepares a receive right for transmission/destruction,
647  *		optionally performs mqueue destruction (with port lock held)
648  *
649  *	Conditions:
650  *		The port is locked and active.
651  *	Returns:
652  *		If should_destroy is TRUE, then the return value indicates
653  *		whether the caller needs to reap kmsg structures that should
654  *		be destroyed (by calling ipc_kmsg_reap_delayed)
655  *
656  *              If should_destroy is FALSE, this always returns FALSE
657  */
658 
659 boolean_t
ipc_port_clear_receiver(ipc_port_t port,boolean_t should_destroy,waitq_link_list_t * free_l)660 ipc_port_clear_receiver(
661 	ipc_port_t          port,
662 	boolean_t           should_destroy,
663 	waitq_link_list_t  *free_l)
664 {
665 	ipc_mqueue_t    mqueue = &port->ip_messages;
666 	boolean_t       reap_messages = FALSE;
667 
668 	/*
669 	 * Pull ourselves out of any sets to which we belong.
670 	 * We hold the write space lock or the receive entry has
671 	 * been deleted, so even though this acquires and releases
672 	 * the port lock, we know we won't be added to any other sets.
673 	 */
674 	if (ip_in_pset(port)) {
675 		waitq_unlink_all_locked(&port->ip_waitq, NULL, free_l);
676 		assert(!ip_in_pset(port));
677 	}
678 
679 	/*
680 	 * Send anyone waiting on the port's queue directly away.
681 	 * Also clear the mscount, seqno, guard bits
682 	 */
683 	if (ip_in_a_space(port)) {
684 		ipc_mqueue_changed(ip_get_receiver(port), &port->ip_waitq);
685 	} else {
686 		ipc_mqueue_changed(NULL, &port->ip_waitq);
687 	}
688 	port->ip_mscount = 0;
689 	mqueue->imq_seqno = 0;
690 	port->ip_context = port->ip_guarded = port->ip_strict_guard = 0;
691 
692 	/*
693 	 * clear the immovable bit so the port can move back to anyone listening
694 	 * for the port destroy notification.
695 	 */
696 	port->ip_immovable_receive = 0;
697 
698 	if (should_destroy) {
699 		/*
700 		 * Mark the port and mqueue invalid, preventing further send/receive
701 		 * operations from succeeding. It's important for this to be
702 		 * done under the same lock hold as the ipc_mqueue_changed
703 		 * call to avoid additional threads blocking on an mqueue
704 		 * that's being destroyed.
705 		 *
706 		 * The port active bit needs to be guarded under mqueue lock for
707 		 * turnstiles
708 		 */
709 
710 		/* port transitions to INACTIVE state */
711 		io_bits_andnot(ip_to_object(port), IO_BITS_ACTIVE);
712 		port->ip_receiver_name = MACH_PORT_NULL;
713 		port->ip_timestamp = ipc_port_timestamp();
714 
715 		reap_messages = ipc_mqueue_destroy_locked(mqueue, free_l);
716 	} else {
717 		/* port transtions to IN-LIMBO state */
718 		port->ip_receiver_name = MACH_PORT_NULL;
719 		port->ip_destination = IP_NULL;
720 	}
721 
722 	return reap_messages;
723 }
724 
725 /*
726  *	Routine:	ipc_port_init
727  *	Purpose:
728  *		Initializes a newly-allocated port.
729  *
730  *		The memory is expected to be zero initialized (allocated with Z_ZERO).
731  */
732 
733 void
ipc_port_init(ipc_port_t port,ipc_space_t space,ipc_port_init_flags_t flags,mach_port_name_t name)734 ipc_port_init(
735 	ipc_port_t              port,
736 	ipc_space_t             space,
737 	ipc_port_init_flags_t   flags,
738 	mach_port_name_t        name)
739 {
740 	int policy = SYNC_POLICY_FIFO;
741 	task_t task = TASK_NULL;
742 
743 	/* the port has been 0 initialized when called */
744 
745 	if (flags & IPC_PORT_INIT_FILTER_MESSAGE) {
746 		io_bits_or(ip_to_object(port), IP_BIT_FILTER_MSG);
747 	}
748 	if (flags & IPC_PORT_INIT_LOCKED) {
749 		policy |= SYNC_POLICY_INIT_LOCKED;
750 	}
751 
752 	/* must be done first, many ip_* bits live inside the waitq */
753 	waitq_init(&port->ip_waitq, WQT_PORT, policy);
754 	if (flags & IPC_PORT_INIT_TG_BLOCK_TRACKING) {
755 		port->ip_tg_block_tracking = true;
756 	}
757 	if (flags & IPC_PORT_INIT_SPECIAL_REPLY) {
758 		port->ip_specialreply = true;
759 	}
760 	if ((flags & IPC_PORT_INIT_REPLY) || (flags & IPC_PORT_INIT_SPECIAL_REPLY)) {
761 		task = current_task_early();
762 
763 		/* Strict enforcement of reply port semantics are disabled for 3p - rdar://97441265. */
764 		if (task && task_get_platform_binary(task)) {
765 			port->ip_immovable_receive = true;
766 			ip_mark_reply_port(port);
767 		} else {
768 			ip_mark_provisional_reply_port(port);
769 		}
770 	}
771 	if (flags & IPC_PORT_ENFORCE_REPLY_PORT_SEMANTICS) {
772 		ip_enforce_reply_port_semantics(port);
773 	}
774 	if (flags & IPC_PORT_INIT_PROVISIONAL_REPLY) {
775 		ip_mark_provisional_reply_port(port);
776 	}
777 
778 	port->ip_kernel_qos_override = THREAD_QOS_UNSPECIFIED;
779 	port->ip_kernel_iotier_override = THROTTLE_LEVEL_END;
780 
781 	ipc_mqueue_init(&port->ip_messages);
782 #if MACH_ASSERT
783 	ipc_port_init_debug(port, __builtin_frame_address(0));
784 #endif  /* MACH_ASSERT */
785 
786 	/* port transitions to IN-SPACE state */
787 	port->ip_receiver_name = name;
788 	port->ip_receiver = space;
789 
790 	if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
791 		port->ip_srights = 1;
792 		port->ip_mscount = 1;
793 	}
794 }
795 
796 /*
797  *	Routine:	ipc_port_alloc
798  *	Purpose:
799  *		Allocate a port.
800  *	Conditions:
801  *		Nothing locked.  If successful, the port is returned
802  *		locked.  (The caller doesn't have a reference.)
803  *	Returns:
804  *		KERN_SUCCESS		The port is allocated.
805  *		KERN_INVALID_TASK	The space is dead.
806  *		KERN_NO_SPACE		No room for an entry in the space.
807  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
808  */
809 
810 kern_return_t
ipc_port_alloc(ipc_space_t space,ipc_port_init_flags_t flags,mach_port_name_t * namep,ipc_port_t * portp)811 ipc_port_alloc(
812 	ipc_space_t             space,
813 	ipc_port_init_flags_t   flags,
814 	mach_port_name_t        *namep,
815 	ipc_port_t              *portp)
816 {
817 	ipc_port_t port;
818 	mach_port_name_t name;
819 	kern_return_t kr;
820 	mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
821 	mach_port_urefs_t urefs = 0;
822 
823 	if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
824 		type |= MACH_PORT_TYPE_SEND;
825 		urefs = 1;
826 	}
827 	kr = ipc_object_alloc(space, IOT_PORT, type, urefs,
828 	    &name, (ipc_object_t *) &port);
829 	if (kr != KERN_SUCCESS) {
830 		return kr;
831 	}
832 
833 	/* space is locked */
834 	ipc_port_init(port, space, flags | IPC_PORT_INIT_LOCKED, name);
835 	/* port is locked */
836 #if MACH_ASSERT
837 	ipc_port_init_debug(port, __builtin_frame_address(0));
838 #endif  /* MACH_ASSERT */
839 
840 	/* unlock space after init */
841 	is_write_unlock(space);
842 
843 	*namep = name;
844 	*portp = port;
845 
846 	return KERN_SUCCESS;
847 }
848 
849 /*
850  *	Routine:	ipc_port_alloc_name
851  *	Purpose:
852  *		Allocate a port, with a specific name.
853  *	Conditions:
854  *		Nothing locked.  If successful, the port is returned
855  *		locked.  (The caller doesn't have a reference.)
856  *	Returns:
857  *		KERN_SUCCESS		The port is allocated.
858  *		KERN_INVALID_TASK	The space is dead.
859  *		KERN_NAME_EXISTS	The name already denotes a right.
860  *		KERN_RESOURCE_SHORTAGE	Couldn't allocate memory.
861  */
862 
863 kern_return_t
ipc_port_alloc_name(ipc_space_t space,ipc_port_init_flags_t flags,mach_port_name_t name,ipc_port_t * portp)864 ipc_port_alloc_name(
865 	ipc_space_t             space,
866 	ipc_port_init_flags_t   flags,
867 	mach_port_name_t        name,
868 	ipc_port_t              *portp)
869 {
870 	mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
871 	mach_port_urefs_t urefs = 0;
872 
873 	if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
874 		type |= MACH_PORT_TYPE_SEND;
875 		urefs = 1;
876 	}
877 	flags |= IPC_PORT_INIT_LOCKED;
878 
879 	return ipc_object_alloc_name(space, IOT_PORT, type, urefs,
880 	           name, (ipc_object_t *)portp, ^(ipc_object_t object){
881 		ipc_port_init(ip_object_to_port(object), space, flags, name);
882 	});
883 }
884 
885 /*
886  *      Routine:	ipc_port_spnotify
887  *	Purpose:
888  *		Generate send-possible port notifications.
889  *	Conditions:
890  *		Nothing locked, reference held on port.
891  */
892 void
ipc_port_spnotify(ipc_port_t port)893 ipc_port_spnotify(
894 	ipc_port_t      port)
895 {
896 	ipc_port_request_index_t index = 0;
897 	ipc_table_elems_t size = 0;
898 
899 	/*
900 	 * If the port has no send-possible request
901 	 * armed, don't bother to lock the port.
902 	 */
903 	if (port->ip_sprequests == 0) {
904 		return;
905 	}
906 
907 	ip_mq_lock(port);
908 
909 #if IMPORTANCE_INHERITANCE
910 	if (port->ip_spimportant != 0) {
911 		port->ip_spimportant = 0;
912 		if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == TRUE) {
913 			ip_mq_lock(port);
914 		}
915 	}
916 #endif /* IMPORTANCE_INHERITANCE */
917 
918 	if (port->ip_sprequests == 0) {
919 		ip_mq_unlock(port);
920 		return;
921 	}
922 	port->ip_sprequests = 0;
923 
924 revalidate:
925 	if (ip_active(port)) {
926 		ipc_port_request_table_t requests;
927 
928 		/* table may change each time port unlocked (reload) */
929 		requests = port->ip_requests;
930 		assert(requests != NULL);
931 
932 		/*
933 		 * no need to go beyond table size when first
934 		 * we entered - those are future notifications.
935 		 */
936 		if (size == 0) {
937 			size = ipc_port_request_table_count(requests);
938 		}
939 
940 		/* no need to backtrack either */
941 		while (++index < size) {
942 			ipc_port_request_t ipr = ipc_port_request_table_get_nocheck(requests, index);
943 			mach_port_name_t name = ipr->ipr_name;
944 			ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
945 			boolean_t armed = IPR_SOR_SPARMED(ipr->ipr_soright);
946 
947 			if (MACH_PORT_VALID(name) && armed && IP_VALID(soright)) {
948 				/* claim send-once right - slot still inuse */
949 				assert(name != IPR_HOST_NOTIFY);
950 				ipr->ipr_soright = IP_NULL;
951 				ip_mq_unlock(port);
952 
953 				ipc_notify_send_possible(soright, name);
954 
955 				ip_mq_lock(port);
956 				goto revalidate;
957 			}
958 		}
959 	}
960 	ip_mq_unlock(port);
961 	return;
962 }
963 
964 /*
965  *      Routine:	ipc_port_dnnotify
966  *	Purpose:
967  *		Generate dead name notifications for
968  *		all outstanding dead-name and send-
969  *		possible requests.
970  *	Conditions:
971  *		Nothing locked.
972  *		Port must be inactive.
973  *		Reference held on port.
974  */
975 void
ipc_port_dnnotify(ipc_port_t port)976 ipc_port_dnnotify(
977 	ipc_port_t      port)
978 {
979 	ipc_port_request_table_t requests = port->ip_requests;
980 
981 	assert(!ip_active(port));
982 	if (requests != NULL) {
983 		ipc_port_request_t ipr = ipc_port_request_table_base(requests);
984 
985 		while ((ipr = ipc_port_request_table_next_elem(requests, ipr))) {
986 			mach_port_name_t name = ipr->ipr_name;
987 			ipc_port_t soright;
988 
989 			switch (name) {
990 			case MACH_PORT_DEAD:
991 			case MACH_PORT_NULL:
992 				break;
993 			case IPR_HOST_NOTIFY:
994 				host_notify_cancel(ipr->ipr_hnotify);
995 				break;
996 			default:
997 				soright = IPR_SOR_PORT(ipr->ipr_soright);
998 				if (IP_VALID(soright)) {
999 					ipc_notify_dead_name(soright, name);
1000 				}
1001 				break;
1002 			}
1003 		}
1004 	}
1005 }
1006 
1007 /*
1008  *	Routine:	ipc_port_destroy
1009  *	Purpose:
1010  *		Destroys a port.  Cleans up queued messages.
1011  *
1012  *		If the port has a backup, it doesn't get destroyed,
1013  *		but is sent in a port-destroyed notification to the backup.
1014  *	Conditions:
1015  *		The port is locked and alive; nothing else locked.
1016  *		The caller has a reference, which is consumed.
1017  *		Afterwards, the port is unlocked and dead.
1018  */
1019 
1020 void
ipc_port_destroy(ipc_port_t port)1021 ipc_port_destroy(ipc_port_t port)
1022 {
1023 	bool special_reply = port->ip_specialreply;
1024 	bool service_port  = port->ip_service_port;
1025 	bool reap_msgs;
1026 
1027 	ipc_port_t pdrequest = IP_NULL;
1028 	struct task_watchport_elem *twe = NULL;
1029 	waitq_link_list_t free_l = { };
1030 
1031 #if IMPORTANCE_INHERITANCE
1032 	ipc_importance_task_t release_imp_task = IIT_NULL;
1033 	thread_t self = current_thread();
1034 	boolean_t top = (self->ith_assertions == 0);
1035 	natural_t assertcnt = 0;
1036 #endif /* IMPORTANCE_INHERITANCE */
1037 
1038 	require_ip_active(port);
1039 	/* port->ip_receiver_name is garbage */
1040 	/* port->ip_receiver/port->ip_destination is garbage */
1041 
1042 	/* clear any reply-port context */
1043 	port->ip_reply_context = 0;
1044 
1045 	/* must be done before we access ip_pdrequest */
1046 	twe = ipc_port_clear_watchport_elem_internal(port);
1047 	assert(!port->ip_has_watchport);
1048 
1049 	if (!special_reply) {
1050 		/* we assume the ref for pdrequest */
1051 		pdrequest = port->ip_pdrequest;
1052 		port->ip_pdrequest = IP_NULL;
1053 	} else if (port->ip_tempowner) {
1054 		panic("ipc_port_destroy: invalid state");
1055 	}
1056 
1057 #if IMPORTANCE_INHERITANCE
1058 	/* determine how many assertions to drop and from whom */
1059 	if (port->ip_tempowner != 0) {
1060 		assert(top);
1061 		release_imp_task = ip_get_imp_task(port);
1062 		if (IIT_NULL != release_imp_task) {
1063 			port->ip_imp_task = IIT_NULL;
1064 			assertcnt = port->ip_impcount;
1065 		}
1066 		/* Otherwise, nothing to drop */
1067 	} else {
1068 		assertcnt = port->ip_impcount;
1069 		if (pdrequest != IP_NULL) {
1070 			/* mark in limbo for the journey */
1071 			port->ip_tempowner = 1;
1072 		}
1073 	}
1074 
1075 	if (top) {
1076 		self->ith_assertions = assertcnt;
1077 	}
1078 #endif /* IMPORTANCE_INHERITANCE */
1079 
1080 	/*
1081 	 * If no port-destroyed notification is armed, calling
1082 	 * ipc_port_clear_receiver() will mark the port inactive
1083 	 * and will wakeup any threads which may be blocked receiving on it.
1084 	 */
1085 	reap_msgs = ipc_port_clear_receiver(port, pdrequest == IP_NULL, &free_l);
1086 	assert(!ip_in_pset(port));
1087 	assert(port->ip_mscount == 0);
1088 
1089 	/*
1090 	 * Handle port-destroyed notification
1091 	 */
1092 	if (pdrequest != IP_NULL) {
1093 		assert(reap_msgs == false);
1094 
1095 		if (service_port) {
1096 			assert(port->ip_splabel != NULL);
1097 			if (ipc_service_port_label_is_special_pdrequest((ipc_service_port_label_t)port->ip_splabel)) {
1098 				ipc_service_port_label_set_flag(port->ip_splabel, ISPL_FLAGS_SEND_PD_NOTIFICATION);
1099 			}
1100 		}
1101 
1102 		ipc_port_send_turnstile_recompute_push_locked(port);
1103 		/* port unlocked */
1104 
1105 		/* consumes our refs for port and pdrequest */
1106 		ipc_notify_port_destroyed(pdrequest, port);
1107 	} else {
1108 		ipc_service_port_label_t splabel = NULL;
1109 		ipc_notify_nsenders_t nsrequest;
1110 
1111 		nsrequest = ipc_notify_no_senders_prepare(port);
1112 
1113 		if (!ip_is_kolabeled(port)) {
1114 			splabel = port->ip_splabel;
1115 			port->ip_splabel = NULL;
1116 			port->ip_service_port = false;
1117 		}
1118 
1119 		ipc_port_send_turnstile_recompute_push_locked(port);
1120 		/* port unlocked */
1121 
1122 		/* unlink the kmsg from special reply port */
1123 		if (special_reply) {
1124 			ipc_port_adjust_special_reply_port(port,
1125 			    IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE);
1126 		}
1127 
1128 		/* Deallocate the service/connection port label */
1129 		if (splabel) {
1130 			ipc_service_port_label_dealloc(splabel, service_port);
1131 			splabel = NULL;
1132 		}
1133 
1134 		if (reap_msgs) {
1135 			ipc_kmsg_reap_delayed();
1136 		}
1137 
1138 		if (nsrequest.ns_notify) {
1139 			/*
1140 			 * ipc_notify_no_senders_prepare will consume
1141 			 * the reference for kobjects.
1142 			 */
1143 			assert(!nsrequest.ns_is_kobject);
1144 			ip_mq_lock(nsrequest.ns_notify);
1145 			ipc_notify_send_once_and_unlock(nsrequest.ns_notify); /* consumes ref */
1146 		}
1147 
1148 		/* generate dead-name notifications */
1149 		ipc_port_dnnotify(port);
1150 
1151 		ipc_kobject_destroy(port);
1152 
1153 		ip_release(port); /* consume caller's ref */
1154 	}
1155 
1156 	if (twe) {
1157 		task_watchport_elem_deallocate(twe);
1158 		twe = NULL;
1159 	}
1160 
1161 	waitq_link_free_list(WQT_PORT_SET, &free_l);
1162 
1163 #if IMPORTANCE_INHERITANCE
1164 	if (release_imp_task != IIT_NULL) {
1165 		if (assertcnt > 0) {
1166 			assert(top);
1167 			self->ith_assertions = 0;
1168 			assert(ipc_importance_task_is_any_receiver_type(release_imp_task));
1169 			ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1170 		}
1171 		ipc_importance_task_release(release_imp_task);
1172 	} else if (assertcnt > 0) {
1173 		if (top) {
1174 			self->ith_assertions = 0;
1175 			release_imp_task = current_task()->task_imp_base;
1176 			if (ipc_importance_task_is_any_receiver_type(release_imp_task)) {
1177 				ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1178 			}
1179 		}
1180 	}
1181 #endif /* IMPORTANCE_INHERITANCE */
1182 }
1183 
1184 /*
1185  *	Routine:	ipc_port_destination_chain_lock
1186  *	Purpose:
1187  *		Search for the end of the chain (a port not in transit),
1188  *		acquiring locks along the way, and return it in `base`.
1189  *
1190  *		Returns true if a reference was taken on `base`
1191  *
1192  *	Conditions:
1193  *		No ports locked.
1194  *		ipc_port_multiple_lock held.
1195  */
1196 boolean_t
ipc_port_destination_chain_lock(ipc_port_t port,ipc_port_t * base)1197 ipc_port_destination_chain_lock(
1198 	ipc_port_t port,
1199 	ipc_port_t *base)
1200 {
1201 	for (;;) {
1202 		ip_mq_lock(port);
1203 
1204 		if (!ip_active(port)) {
1205 			/*
1206 			 * Active ports that are ip_mq_lock()ed cannot go away.
1207 			 *
1208 			 * But inactive ports at the end of walking
1209 			 * an ip_destination chain are only protected
1210 			 * from space termination cleanup while the entire
1211 			 * chain of ports leading to them is held.
1212 			 *
1213 			 * Callers of this code tend to unlock the chain
1214 			 * in the same order than this walk which doesn't
1215 			 * protect `base` properly when it's inactive.
1216 			 *
1217 			 * In that case, take a reference that the caller
1218 			 * is responsible for releasing.
1219 			 */
1220 			ip_reference(port);
1221 			*base = port;
1222 			return true;
1223 		}
1224 
1225 		/* port is active */
1226 		if (!ip_in_transit(port)) {
1227 			*base = port;
1228 			return false;
1229 		}
1230 
1231 		port = ip_get_destination(port);
1232 	}
1233 }
1234 
1235 
1236 /*
1237  *	Routine:	ipc_port_check_circularity
1238  *	Purpose:
1239  *		Check if queueing "port" in a message for "dest"
1240  *		would create a circular group of ports and messages.
1241  *
1242  *		If no circularity (FALSE returned), then "port"
1243  *		is changed from "in limbo" to "in transit".
1244  *
1245  *		That is, we want to set port->ip_destination == dest,
1246  *		but guaranteeing that this doesn't create a circle
1247  *		port->ip_destination->ip_destination->... == port
1248  *
1249  *	Conditions:
1250  *		No ports locked.  References held for "port" and "dest".
1251  */
1252 
1253 boolean_t
ipc_port_check_circularity(ipc_port_t port,ipc_port_t dest)1254 ipc_port_check_circularity(
1255 	ipc_port_t      port,
1256 	ipc_port_t      dest)
1257 {
1258 #if IMPORTANCE_INHERITANCE
1259 	/* adjust importance counts at the same time */
1260 	return ipc_importance_check_circularity(port, dest);
1261 #else
1262 	ipc_port_t base;
1263 	struct task_watchport_elem *watchport_elem = NULL;
1264 	bool took_base_ref = false;
1265 
1266 	assert(port != IP_NULL);
1267 	assert(dest != IP_NULL);
1268 
1269 	if (port == dest) {
1270 		return TRUE;
1271 	}
1272 	base = dest;
1273 
1274 	/* Check if destination needs a turnstile */
1275 	ipc_port_send_turnstile_prepare(dest);
1276 
1277 	/*
1278 	 *	First try a quick check that can run in parallel.
1279 	 *	No circularity if dest is not in transit.
1280 	 */
1281 	ip_mq_lock(port);
1282 	if (ip_mq_lock_try(dest)) {
1283 		if (!ip_in_transit(dest)) {
1284 			goto not_circular;
1285 		}
1286 
1287 		/* dest is in transit; further checking necessary */
1288 
1289 		ip_mq_unlock(dest);
1290 	}
1291 	ip_mq_unlock(port);
1292 
1293 	ipc_port_multiple_lock(); /* massive serialization */
1294 
1295 	/*
1296 	 *	Search for the end of the chain (a port not in transit),
1297 	 *	acquiring locks along the way.
1298 	 */
1299 
1300 	took_base_ref = ipc_port_destination_chain_lock(dest, &base);
1301 	/* all ports in chain from dest to base, inclusive, are locked */
1302 
1303 	if (port == base) {
1304 		/* circularity detected! */
1305 
1306 		ipc_port_multiple_unlock();
1307 
1308 		/* port (== base) is in limbo */
1309 		require_ip_active(port);
1310 		assert(ip_in_limbo(port));
1311 		assert(!took_base_ref);
1312 
1313 		base = dest;
1314 		while (base != IP_NULL) {
1315 			ipc_port_t next;
1316 
1317 			/* dest is in transit or in limbo */
1318 			require_ip_active(base);
1319 			assert(!ip_in_a_space(base));
1320 
1321 			next = ip_get_destination(base);
1322 			ip_mq_unlock(base);
1323 			base = next;
1324 		}
1325 
1326 		ipc_port_send_turnstile_complete(dest);
1327 		return TRUE;
1328 	}
1329 
1330 	/*
1331 	 *	The guarantee:  lock port while the entire chain is locked.
1332 	 *	Once port is locked, we can take a reference to dest,
1333 	 *	add port to the chain, and unlock everything.
1334 	 */
1335 
1336 	ip_mq_lock(port);
1337 	ipc_port_multiple_unlock();
1338 
1339 not_circular:
1340 	require_ip_active(port);
1341 	assert(ip_in_limbo(port));
1342 
1343 	/* Clear the watchport boost */
1344 	watchport_elem = ipc_port_clear_watchport_elem_internal(port);
1345 
1346 	/* Check if the port is being enqueued as a part of sync bootstrap checkin */
1347 	if (dest->ip_specialreply && dest->ip_sync_bootstrap_checkin) {
1348 		port->ip_sync_bootstrap_checkin = 1;
1349 	}
1350 
1351 	ip_reference(dest);
1352 
1353 	/* port transitions to IN-TRANSIT state */
1354 	assert(port->ip_receiver_name == MACH_PORT_NULL);
1355 	port->ip_destination = dest;
1356 
1357 	/* Setup linkage for source port if it has sync ipc push */
1358 	struct turnstile *send_turnstile = TURNSTILE_NULL;
1359 	if (port_send_turnstile(port)) {
1360 		send_turnstile = turnstile_prepare((uintptr_t)port,
1361 		    port_send_turnstile_address(port),
1362 		    TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
1363 
1364 		/*
1365 		 * What ipc_port_adjust_port_locked would do,
1366 		 * but we need to also drop even more locks before
1367 		 * calling turnstile_update_inheritor_complete().
1368 		 */
1369 		ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
1370 
1371 		turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
1372 		    (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
1373 
1374 		/* update complete and turnstile complete called after dropping all locks */
1375 	}
1376 	/* now unlock chain */
1377 
1378 	ip_mq_unlock(port);
1379 
1380 	for (;;) {
1381 		ipc_port_t next;
1382 
1383 		if (dest == base) {
1384 			break;
1385 		}
1386 
1387 		/* port is IN-TRANSIT */
1388 		require_ip_active(dest);
1389 		assert(ip_in_transit(dest));
1390 
1391 		next = ip_get_destination(dest);
1392 		ip_mq_unlock(dest);
1393 		dest = next;
1394 	}
1395 
1396 	/* base is not IN-TRANSIT */
1397 	assert(!ip_in_transit(base));
1398 
1399 	ip_mq_unlock(base);
1400 	if (took_base_ref) {
1401 		ip_release(base);
1402 	}
1403 
1404 	/* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
1405 	if (send_turnstile) {
1406 		turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
1407 
1408 		/* Take the mq lock to call turnstile complete */
1409 		ip_mq_lock(port);
1410 		turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC);
1411 		send_turnstile = TURNSTILE_NULL;
1412 		ip_mq_unlock(port);
1413 		turnstile_cleanup();
1414 	}
1415 
1416 	if (watchport_elem) {
1417 		task_watchport_elem_deallocate(watchport_elem);
1418 	}
1419 
1420 	return FALSE;
1421 #endif /* !IMPORTANCE_INHERITANCE */
1422 }
1423 
1424 /*
1425  *	Routine:	ipc_port_watchport_elem
1426  *	Purpose:
1427  *		Get the port's watchport elem field
1428  *
1429  *	Conditions:
1430  *		port locked
1431  */
1432 static struct task_watchport_elem *
ipc_port_watchport_elem(ipc_port_t port)1433 ipc_port_watchport_elem(ipc_port_t port)
1434 {
1435 	if (port->ip_has_watchport) {
1436 		assert(!port->ip_specialreply);
1437 		return port->ip_twe;
1438 	}
1439 	return NULL;
1440 }
1441 
1442 /*
1443  *	Routine:	ipc_port_update_watchport_elem
1444  *	Purpose:
1445  *		Set the port's watchport elem field
1446  *
1447  *	Conditions:
1448  *		port locked and is not a special reply port.
1449  */
1450 static inline struct task_watchport_elem *
ipc_port_update_watchport_elem(ipc_port_t port,struct task_watchport_elem * we)1451 ipc_port_update_watchport_elem(ipc_port_t port, struct task_watchport_elem *we)
1452 {
1453 	struct task_watchport_elem *old_we;
1454 	ipc_port_t pdrequest;
1455 
1456 	assert(!port->ip_specialreply);
1457 
1458 	/*
1459 	 * Note: ip_pdrequest and ip_twe are unioned.
1460 	 *       and ip_has_watchport controls the union "type"
1461 	 */
1462 	if (port->ip_has_watchport) {
1463 		old_we = port->ip_twe;
1464 		pdrequest = old_we->twe_pdrequest;
1465 		old_we->twe_pdrequest = IP_NULL;
1466 	} else {
1467 		old_we = NULL;
1468 		pdrequest = port->ip_pdrequest;
1469 	}
1470 
1471 	if (we) {
1472 		port->ip_has_watchport = true;
1473 		we->twe_pdrequest = pdrequest;
1474 		port->ip_twe = we;
1475 	} else {
1476 		port->ip_has_watchport = false;
1477 		port->ip_pdrequest = pdrequest;
1478 	}
1479 
1480 	return old_we;
1481 }
1482 
1483 /*
1484  *	Routine:	ipc_special_reply_stash_pid_locked
1485  *	Purpose:
1486  *		Set the pid of process that copied out send once right to special reply port.
1487  *
1488  *	Conditions:
1489  *		port locked
1490  */
1491 static inline void
ipc_special_reply_stash_pid_locked(ipc_port_t port,int pid)1492 ipc_special_reply_stash_pid_locked(ipc_port_t port, int pid)
1493 {
1494 	assert(port->ip_specialreply);
1495 	port->ip_pid = pid;
1496 }
1497 
1498 /*
1499  *	Routine:	ipc_special_reply_get_pid_locked
1500  *	Purpose:
1501  *		Get the pid of process that copied out send once right to special reply port.
1502  *
1503  *	Conditions:
1504  *		port locked
1505  */
1506 int
ipc_special_reply_get_pid_locked(ipc_port_t port)1507 ipc_special_reply_get_pid_locked(ipc_port_t port)
1508 {
1509 	assert(port->ip_specialreply);
1510 	return port->ip_pid;
1511 }
1512 
1513 /*
1514  * Update the recv turnstile inheritor for a port.
1515  *
1516  * Sync IPC through the port receive turnstile only happens for the special
1517  * reply port case. It has three sub-cases:
1518  *
1519  * 1. a send-once right is in transit, and pushes on the send turnstile of its
1520  *    destination mqueue.
1521  *
1522  * 2. a send-once right has been stashed on a knote it was copied out "through",
1523  *    as the first such copied out port.
1524  *
1525  * 3. a send-once right has been stashed on a knote it was copied out "through",
1526  *    as the second or more copied out port.
1527  */
1528 void
ipc_port_recv_update_inheritor(ipc_port_t port,struct turnstile * rcv_turnstile,turnstile_update_flags_t flags)1529 ipc_port_recv_update_inheritor(
1530 	ipc_port_t port,
1531 	struct turnstile *rcv_turnstile,
1532 	turnstile_update_flags_t flags)
1533 {
1534 	struct turnstile *inheritor = TURNSTILE_NULL;
1535 	struct knote *kn;
1536 
1537 	if (ip_active(port) && port->ip_specialreply) {
1538 		ip_mq_lock_held(port);
1539 
1540 		switch (port->ip_sync_link_state) {
1541 		case PORT_SYNC_LINK_PORT:
1542 			if (port->ip_sync_inheritor_port != NULL) {
1543 				inheritor = port_send_turnstile(port->ip_sync_inheritor_port);
1544 			}
1545 			break;
1546 
1547 		case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1548 			kn = port->ip_sync_inheritor_knote;
1549 			inheritor = filt_ipc_kqueue_turnstile(kn);
1550 			break;
1551 
1552 		case PORT_SYNC_LINK_WORKLOOP_STASH:
1553 			inheritor = port->ip_sync_inheritor_ts;
1554 			break;
1555 		}
1556 	}
1557 
1558 	turnstile_update_inheritor(rcv_turnstile, inheritor,
1559 	    flags | TURNSTILE_INHERITOR_TURNSTILE);
1560 }
1561 
1562 /*
1563  * Update the send turnstile inheritor for a port.
1564  *
1565  * Sync IPC through the port send turnstile has 7 possible reasons to be linked:
1566  *
1567  * 1. a special reply port is part of sync ipc for bootstrap checkin and needs
1568  *    to push on thread doing the sync ipc.
1569  *
1570  * 2. a receive right is in transit, and pushes on the send turnstile of its
1571  *    destination mqueue.
1572  *
1573  * 3. port was passed as an exec watchport and port is pushing on main thread
1574  *    of the task.
1575  *
1576  * 4. a receive right has been stashed on a knote it was copied out "through",
1577  *    as the first such copied out port (same as PORT_SYNC_LINK_WORKLOOP_KNOTE
1578  *    for the special reply port)
1579  *
1580  * 5. a receive right has been stashed on a knote it was copied out "through",
1581  *    as the second or more copied out port (same as
1582  *    PORT_SYNC_LINK_WORKLOOP_STASH for the special reply port)
1583  *
1584  * 6. a receive right has been copied out as a part of sync bootstrap checkin
1585  *    and needs to push on thread doing the sync bootstrap checkin.
1586  *
1587  * 7. the receive right is monitored by a knote, and pushes on any that is
1588  *    registered on a workloop. filt_machport makes sure that if such a knote
1589  *    exists, it is kept as the first item in the knote list, so we never need
1590  *    to walk.
1591  */
1592 void
ipc_port_send_update_inheritor(ipc_port_t port,struct turnstile * send_turnstile,turnstile_update_flags_t flags)1593 ipc_port_send_update_inheritor(
1594 	ipc_port_t port,
1595 	struct turnstile *send_turnstile,
1596 	turnstile_update_flags_t flags)
1597 {
1598 	ipc_mqueue_t mqueue = &port->ip_messages;
1599 	turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1600 	struct knote *kn;
1601 	turnstile_update_flags_t inheritor_flags = TURNSTILE_INHERITOR_TURNSTILE;
1602 
1603 	ip_mq_lock_held(port);
1604 
1605 	if (!ip_active(port)) {
1606 		/* this port is no longer active, it should not push anywhere */
1607 	} else if (port->ip_specialreply) {
1608 		/* Case 1. */
1609 		if (port->ip_sync_bootstrap_checkin && prioritize_launch) {
1610 			inheritor = port->ip_messages.imq_srp_owner_thread;
1611 			inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1612 		}
1613 	} else if (ip_in_transit(port)) {
1614 		/* Case 2. */
1615 		inheritor = port_send_turnstile(ip_get_destination(port));
1616 	} else if (port->ip_has_watchport) {
1617 		/* Case 3. */
1618 		if (prioritize_launch) {
1619 			assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1620 			inheritor = ipc_port_get_watchport_inheritor(port);
1621 			inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1622 		}
1623 	} else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
1624 		/* Case 4. */
1625 		inheritor = filt_ipc_kqueue_turnstile(mqueue->imq_inheritor_knote);
1626 	} else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_STASH) {
1627 		/* Case 5. */
1628 		inheritor = mqueue->imq_inheritor_turnstile;
1629 	} else if (port->ip_sync_link_state == PORT_SYNC_LINK_RCV_THREAD) {
1630 		/* Case 6. */
1631 		if (prioritize_launch) {
1632 			inheritor = port->ip_messages.imq_inheritor_thread_ref;
1633 			inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1634 		}
1635 	} else if ((kn = SLIST_FIRST(&port->ip_klist))) {
1636 		/* Case 7. Push on a workloop that is interested */
1637 		if (filt_machport_kqueue_has_turnstile(kn)) {
1638 			assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1639 			inheritor = filt_ipc_kqueue_turnstile(kn);
1640 		}
1641 	}
1642 
1643 	turnstile_update_inheritor(send_turnstile, inheritor,
1644 	    flags | inheritor_flags);
1645 }
1646 
1647 /*
1648  *	Routine:	ipc_port_send_turnstile_prepare
1649  *	Purpose:
1650  *		Get a reference on port's send turnstile, if
1651  *		port does not have a send turnstile then allocate one.
1652  *
1653  *	Conditions:
1654  *		Nothing is locked.
1655  */
1656 void
ipc_port_send_turnstile_prepare(ipc_port_t port)1657 ipc_port_send_turnstile_prepare(ipc_port_t port)
1658 {
1659 	struct turnstile *turnstile = TURNSTILE_NULL;
1660 	struct turnstile *send_turnstile = TURNSTILE_NULL;
1661 
1662 retry_alloc:
1663 	ip_mq_lock(port);
1664 
1665 	if (port_send_turnstile(port) == NULL ||
1666 	    port_send_turnstile(port)->ts_prim_count == 0) {
1667 		if (turnstile == TURNSTILE_NULL) {
1668 			ip_mq_unlock(port);
1669 			turnstile = turnstile_alloc();
1670 			goto retry_alloc;
1671 		}
1672 
1673 		send_turnstile = turnstile_prepare((uintptr_t)port,
1674 		    port_send_turnstile_address(port),
1675 		    turnstile, TURNSTILE_SYNC_IPC);
1676 		turnstile = TURNSTILE_NULL;
1677 
1678 		ipc_port_send_update_inheritor(port, send_turnstile,
1679 		    TURNSTILE_IMMEDIATE_UPDATE);
1680 
1681 		/* turnstile complete will be called in ipc_port_send_turnstile_complete */
1682 	}
1683 
1684 	/* Increment turnstile counter */
1685 	port_send_turnstile(port)->ts_prim_count++;
1686 	ip_mq_unlock(port);
1687 
1688 	if (send_turnstile) {
1689 		turnstile_update_inheritor_complete(send_turnstile,
1690 		    TURNSTILE_INTERLOCK_NOT_HELD);
1691 	}
1692 	if (turnstile != TURNSTILE_NULL) {
1693 		turnstile_deallocate(turnstile);
1694 	}
1695 }
1696 
1697 
1698 /*
1699  *	Routine:	ipc_port_send_turnstile_complete
1700  *	Purpose:
1701  *		Drop a ref on the port's send turnstile, if the
1702  *		ref becomes zero, deallocate the turnstile.
1703  *
1704  *	Conditions:
1705  *		The space might be locked, use safe deallocate.
1706  */
1707 void
ipc_port_send_turnstile_complete(ipc_port_t port)1708 ipc_port_send_turnstile_complete(ipc_port_t port)
1709 {
1710 	struct turnstile *turnstile = TURNSTILE_NULL;
1711 
1712 	/* Drop turnstile count on dest port */
1713 	ip_mq_lock(port);
1714 
1715 	port_send_turnstile(port)->ts_prim_count--;
1716 	if (port_send_turnstile(port)->ts_prim_count == 0) {
1717 		turnstile_complete((uintptr_t)port, port_send_turnstile_address(port),
1718 		    &turnstile, TURNSTILE_SYNC_IPC);
1719 		assert(turnstile != TURNSTILE_NULL);
1720 	}
1721 	ip_mq_unlock(port);
1722 	turnstile_cleanup();
1723 
1724 	if (turnstile != TURNSTILE_NULL) {
1725 		turnstile_deallocate_safe(turnstile);
1726 		turnstile = TURNSTILE_NULL;
1727 	}
1728 }
1729 
1730 /*
1731  *	Routine:	ipc_port_rcv_turnstile
1732  *	Purpose:
1733  *		Get the port's receive turnstile
1734  *
1735  *	Conditions:
1736  *		mqueue locked or thread waiting on turnstile is locked.
1737  */
1738 static struct turnstile *
ipc_port_rcv_turnstile(ipc_port_t port)1739 ipc_port_rcv_turnstile(ipc_port_t port)
1740 {
1741 	return *port_rcv_turnstile_address(port);
1742 }
1743 
1744 
1745 /*
1746  *	Routine:	ipc_port_link_special_reply_port
1747  *	Purpose:
1748  *		Link the special reply port with the destination port.
1749  *              Allocates turnstile to dest port.
1750  *
1751  *	Conditions:
1752  *		Nothing is locked.
1753  */
1754 void
ipc_port_link_special_reply_port(ipc_port_t special_reply_port,ipc_port_t dest_port,boolean_t sync_bootstrap_checkin)1755 ipc_port_link_special_reply_port(
1756 	ipc_port_t special_reply_port,
1757 	ipc_port_t dest_port,
1758 	boolean_t sync_bootstrap_checkin)
1759 {
1760 	boolean_t drop_turnstile_ref = FALSE;
1761 	boolean_t special_reply = FALSE;
1762 
1763 	/* Check if dest_port needs a turnstile */
1764 	ipc_port_send_turnstile_prepare(dest_port);
1765 
1766 	/* Lock the special reply port and establish the linkage */
1767 	ip_mq_lock(special_reply_port);
1768 
1769 	special_reply = special_reply_port->ip_specialreply;
1770 
1771 	if (sync_bootstrap_checkin && special_reply) {
1772 		special_reply_port->ip_sync_bootstrap_checkin = 1;
1773 	}
1774 
1775 	/* Check if we need to drop the acquired turnstile ref on dest port */
1776 	if (!special_reply ||
1777 	    special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY ||
1778 	    special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) {
1779 		drop_turnstile_ref = TRUE;
1780 	} else {
1781 		/* take a reference on dest_port */
1782 		ip_reference(dest_port);
1783 		special_reply_port->ip_sync_inheritor_port = dest_port;
1784 		special_reply_port->ip_sync_link_state = PORT_SYNC_LINK_PORT;
1785 	}
1786 
1787 	ip_mq_unlock(special_reply_port);
1788 
1789 	if (special_reply) {
1790 		/*
1791 		 * For special reply ports, if the destination port is
1792 		 * marked with the thread group blocked tracking flag,
1793 		 * callout to the performance controller.
1794 		 */
1795 		ipc_port_thread_group_blocked(dest_port);
1796 	}
1797 
1798 	if (drop_turnstile_ref) {
1799 		ipc_port_send_turnstile_complete(dest_port);
1800 	}
1801 
1802 	return;
1803 }
1804 
1805 /*
1806  *	Routine:	ipc_port_thread_group_blocked
1807  *	Purpose:
1808  *		Call thread_group_blocked callout if the port
1809  *	        has ip_tg_block_tracking bit set and the thread
1810  *	        has not made this callout already.
1811  *
1812  *	Conditions:
1813  *		Nothing is locked.
1814  */
1815 void
ipc_port_thread_group_blocked(ipc_port_t port __unused)1816 ipc_port_thread_group_blocked(ipc_port_t port __unused)
1817 {
1818 #if CONFIG_THREAD_GROUPS
1819 	bool port_tg_block_tracking = false;
1820 	thread_t self = current_thread();
1821 
1822 	if (self->thread_group == NULL ||
1823 	    (self->options & TH_OPT_IPC_TG_BLOCKED)) {
1824 		return;
1825 	}
1826 
1827 	port_tg_block_tracking = port->ip_tg_block_tracking;
1828 	if (!port_tg_block_tracking) {
1829 		return;
1830 	}
1831 
1832 	machine_thread_group_blocked(self->thread_group, NULL,
1833 	    PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
1834 
1835 	self->options |= TH_OPT_IPC_TG_BLOCKED;
1836 #endif
1837 }
1838 
1839 /*
1840  *	Routine:	ipc_port_thread_group_unblocked
1841  *	Purpose:
1842  *		Call thread_group_unblocked callout if the
1843  *		thread had previously made a thread_group_blocked
1844  *		callout before (indicated by TH_OPT_IPC_TG_BLOCKED
1845  *		flag on the thread).
1846  *
1847  *	Conditions:
1848  *		Nothing is locked.
1849  */
1850 void
ipc_port_thread_group_unblocked(void)1851 ipc_port_thread_group_unblocked(void)
1852 {
1853 #if CONFIG_THREAD_GROUPS
1854 	thread_t self = current_thread();
1855 
1856 	if (!(self->options & TH_OPT_IPC_TG_BLOCKED)) {
1857 		return;
1858 	}
1859 
1860 	machine_thread_group_unblocked(self->thread_group, NULL,
1861 	    PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
1862 
1863 	self->options &= ~TH_OPT_IPC_TG_BLOCKED;
1864 #endif
1865 }
1866 
1867 #if DEVELOPMENT || DEBUG
1868 inline void
ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)1869 ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)
1870 {
1871 	special_reply_port->ip_srp_lost_link = 0;
1872 	special_reply_port->ip_srp_msg_sent = 0;
1873 }
1874 
1875 static inline void
ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)1876 ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)
1877 {
1878 	if (special_reply_port->ip_specialreply == 1) {
1879 		special_reply_port->ip_srp_msg_sent = 0;
1880 	}
1881 }
1882 
1883 inline void
ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)1884 ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)
1885 {
1886 	if (special_reply_port->ip_specialreply == 1) {
1887 		special_reply_port->ip_srp_msg_sent = 1;
1888 	}
1889 }
1890 
1891 static inline void
ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)1892 ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)
1893 {
1894 	if (special_reply_port->ip_specialreply == 1 && special_reply_port->ip_srp_msg_sent == 0) {
1895 		special_reply_port->ip_srp_lost_link = 1;
1896 	}
1897 }
1898 
1899 #else /* DEVELOPMENT || DEBUG */
1900 inline void
ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)1901 ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)
1902 {
1903 	return;
1904 }
1905 
1906 static inline void
ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)1907 ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)
1908 {
1909 	return;
1910 }
1911 
1912 inline void
ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)1913 ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)
1914 {
1915 	return;
1916 }
1917 
1918 static inline void
ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)1919 ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)
1920 {
1921 	return;
1922 }
1923 #endif /* DEVELOPMENT || DEBUG */
1924 
1925 /*
1926  *	Routine:	ipc_port_adjust_special_reply_port_locked
1927  *	Purpose:
1928  *		If the special port has a turnstile, update its inheritor.
1929  *	Condition:
1930  *		Special reply port locked on entry.
1931  *		Special reply port unlocked on return.
1932  *		The passed in port is a special reply port.
1933  *	Returns:
1934  *		None.
1935  */
1936 void
ipc_port_adjust_special_reply_port_locked(ipc_port_t special_reply_port,struct knote * kn,uint8_t flags,boolean_t get_turnstile)1937 ipc_port_adjust_special_reply_port_locked(
1938 	ipc_port_t special_reply_port,
1939 	struct knote *kn,
1940 	uint8_t flags,
1941 	boolean_t get_turnstile)
1942 {
1943 	ipc_port_t dest_port = IPC_PORT_NULL;
1944 	int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE;
1945 	turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1946 	struct turnstile *ts = TURNSTILE_NULL;
1947 	struct turnstile *port_stashed_turnstile = TURNSTILE_NULL;
1948 
1949 	ip_mq_lock_held(special_reply_port); // ip_sync_link_state is touched
1950 
1951 	if (!special_reply_port->ip_specialreply) {
1952 		// only mach_msg_receive_results_complete() calls this with any port
1953 		assert(get_turnstile);
1954 		goto not_special;
1955 	}
1956 
1957 	if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) {
1958 		ipc_special_reply_port_msg_sent_reset(special_reply_port);
1959 	}
1960 
1961 	if (flags & IPC_PORT_ADJUST_UNLINK_THREAD) {
1962 		special_reply_port->ip_messages.imq_srp_owner_thread = NULL;
1963 	}
1964 
1965 	if (flags & IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN) {
1966 		special_reply_port->ip_sync_bootstrap_checkin = 0;
1967 	}
1968 
1969 	/* Check if the special reply port is marked non-special */
1970 	if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) {
1971 not_special:
1972 		if (get_turnstile) {
1973 			turnstile_complete((uintptr_t)special_reply_port,
1974 			    port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
1975 		}
1976 		ip_mq_unlock(special_reply_port);
1977 		if (get_turnstile) {
1978 			turnstile_cleanup();
1979 		}
1980 		return;
1981 	}
1982 
1983 	if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) {
1984 		if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) {
1985 			inheritor = filt_machport_stash_port(kn, special_reply_port,
1986 			    &sync_link_state);
1987 		}
1988 	} else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) {
1989 		sync_link_state = PORT_SYNC_LINK_ANY;
1990 	}
1991 
1992 	/* Check if need to break linkage */
1993 	if (!get_turnstile && sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
1994 	    special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
1995 		ip_mq_unlock(special_reply_port);
1996 		return;
1997 	}
1998 
1999 	switch (special_reply_port->ip_sync_link_state) {
2000 	case PORT_SYNC_LINK_PORT:
2001 		dest_port = special_reply_port->ip_sync_inheritor_port;
2002 		special_reply_port->ip_sync_inheritor_port = IPC_PORT_NULL;
2003 		break;
2004 	case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2005 		special_reply_port->ip_sync_inheritor_knote = NULL;
2006 		break;
2007 	case PORT_SYNC_LINK_WORKLOOP_STASH:
2008 		port_stashed_turnstile = special_reply_port->ip_sync_inheritor_ts;
2009 		special_reply_port->ip_sync_inheritor_ts = NULL;
2010 		break;
2011 	}
2012 
2013 	/*
2014 	 * Stash (or unstash) the server's PID in the ip_sorights field of the
2015 	 * special reply port, so that stackshot can later retrieve who the client
2016 	 * is blocked on.
2017 	 */
2018 	if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT &&
2019 	    sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
2020 		ipc_special_reply_stash_pid_locked(special_reply_port, pid_from_task(current_task()));
2021 	} else if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
2022 	    sync_link_state == PORT_SYNC_LINK_ANY) {
2023 		/* If we are resetting the special reply port, remove the stashed pid. */
2024 		ipc_special_reply_stash_pid_locked(special_reply_port, 0);
2025 	}
2026 
2027 	special_reply_port->ip_sync_link_state = sync_link_state;
2028 
2029 	switch (sync_link_state) {
2030 	case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2031 		special_reply_port->ip_sync_inheritor_knote = kn;
2032 		break;
2033 	case PORT_SYNC_LINK_WORKLOOP_STASH:
2034 		turnstile_reference(inheritor);
2035 		special_reply_port->ip_sync_inheritor_ts = inheritor;
2036 		break;
2037 	case PORT_SYNC_LINK_NO_LINKAGE:
2038 		if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) {
2039 			ipc_special_reply_port_lost_link(special_reply_port);
2040 		}
2041 		break;
2042 	}
2043 
2044 	/* Get thread's turnstile donated to special reply port */
2045 	if (get_turnstile) {
2046 		turnstile_complete((uintptr_t)special_reply_port,
2047 		    port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
2048 	} else {
2049 		ts = ipc_port_rcv_turnstile(special_reply_port);
2050 		if (ts) {
2051 			turnstile_reference(ts);
2052 			ipc_port_recv_update_inheritor(special_reply_port, ts,
2053 			    TURNSTILE_IMMEDIATE_UPDATE);
2054 		}
2055 	}
2056 
2057 	ip_mq_unlock(special_reply_port);
2058 
2059 	if (get_turnstile) {
2060 		turnstile_cleanup();
2061 	} else if (ts) {
2062 		/* Call turnstile cleanup after dropping the interlock */
2063 		turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
2064 		turnstile_deallocate_safe(ts);
2065 	}
2066 
2067 	if (port_stashed_turnstile) {
2068 		turnstile_deallocate_safe(port_stashed_turnstile);
2069 	}
2070 
2071 	/* Release the ref on the dest port and its turnstile */
2072 	if (dest_port) {
2073 		ipc_port_send_turnstile_complete(dest_port);
2074 		/* release the reference on the dest port, space lock might be held */
2075 		ip_release_safe(dest_port);
2076 	}
2077 }
2078 
2079 /*
2080  *	Routine:	ipc_port_adjust_special_reply_port
2081  *	Purpose:
2082  *		If the special port has a turnstile, update its inheritor.
2083  *	Condition:
2084  *		Nothing locked.
2085  *	Returns:
2086  *		None.
2087  */
2088 void
ipc_port_adjust_special_reply_port(ipc_port_t port,uint8_t flags)2089 ipc_port_adjust_special_reply_port(
2090 	ipc_port_t port,
2091 	uint8_t flags)
2092 {
2093 	if (port->ip_specialreply) {
2094 		ip_mq_lock(port);
2095 		ipc_port_adjust_special_reply_port_locked(port, NULL, flags, FALSE);
2096 	}
2097 }
2098 
2099 /*
2100  *	Routine:	ipc_port_adjust_sync_link_state_locked
2101  *	Purpose:
2102  *		Update the sync link state of the port and the
2103  *		turnstile inheritor.
2104  *	Condition:
2105  *		Port locked on entry.
2106  *		Port locked on return.
2107  *	Returns:
2108  *              None.
2109  */
2110 void
ipc_port_adjust_sync_link_state_locked(ipc_port_t port,int sync_link_state,turnstile_inheritor_t inheritor)2111 ipc_port_adjust_sync_link_state_locked(
2112 	ipc_port_t port,
2113 	int sync_link_state,
2114 	turnstile_inheritor_t inheritor)
2115 {
2116 	switch (port->ip_sync_link_state) {
2117 	case PORT_SYNC_LINK_RCV_THREAD:
2118 		/* deallocate the thread reference for the inheritor */
2119 		thread_deallocate_safe(port->ip_messages.imq_inheritor_thread_ref);
2120 		break;
2121 	case PORT_SYNC_LINK_WORKLOOP_STASH:
2122 		/* deallocate the turnstile reference for the inheritor */
2123 		turnstile_deallocate_safe(port->ip_messages.imq_inheritor_turnstile);
2124 		break;
2125 	}
2126 
2127 	klist_init(&port->ip_klist);
2128 
2129 	switch (sync_link_state) {
2130 	case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2131 		port->ip_messages.imq_inheritor_knote = inheritor;
2132 		break;
2133 	case PORT_SYNC_LINK_WORKLOOP_STASH:
2134 		/* knote can be deleted by userspace, take a reference on turnstile */
2135 		turnstile_reference(inheritor);
2136 		port->ip_messages.imq_inheritor_turnstile = inheritor;
2137 		break;
2138 	case PORT_SYNC_LINK_RCV_THREAD:
2139 		/* The thread could exit without clearing port state, take a thread ref */
2140 		thread_reference((thread_t)inheritor);
2141 		port->ip_messages.imq_inheritor_thread_ref = inheritor;
2142 		break;
2143 	default:
2144 		klist_init(&port->ip_klist);
2145 		sync_link_state = PORT_SYNC_LINK_ANY;
2146 	}
2147 
2148 	port->ip_sync_link_state = sync_link_state;
2149 }
2150 
2151 
2152 /*
2153  *	Routine:	ipc_port_adjust_port_locked
2154  *	Purpose:
2155  *		If the port has a turnstile, update its inheritor.
2156  *	Condition:
2157  *		Port locked on entry.
2158  *		Port unlocked on return.
2159  *	Returns:
2160  *		None.
2161  */
2162 void
ipc_port_adjust_port_locked(ipc_port_t port,struct knote * kn,boolean_t sync_bootstrap_checkin)2163 ipc_port_adjust_port_locked(
2164 	ipc_port_t port,
2165 	struct knote *kn,
2166 	boolean_t sync_bootstrap_checkin)
2167 {
2168 	int sync_link_state = PORT_SYNC_LINK_ANY;
2169 	turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
2170 
2171 	ip_mq_lock_held(port); // ip_sync_link_state is touched
2172 	assert(!port->ip_specialreply);
2173 
2174 	if (kn) {
2175 		inheritor = filt_machport_stash_port(kn, port, &sync_link_state);
2176 		if (sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
2177 			inheritor = kn;
2178 		}
2179 	} else if (sync_bootstrap_checkin) {
2180 		inheritor = current_thread();
2181 		sync_link_state = PORT_SYNC_LINK_RCV_THREAD;
2182 	}
2183 
2184 	ipc_port_adjust_sync_link_state_locked(port, sync_link_state, inheritor);
2185 	port->ip_sync_bootstrap_checkin = 0;
2186 
2187 	ipc_port_send_turnstile_recompute_push_locked(port);
2188 	/* port unlocked */
2189 }
2190 
2191 /*
2192  *	Routine:	ipc_port_clear_sync_rcv_thread_boost_locked
2193  *	Purpose:
2194  *		If the port is pushing on rcv thread, clear it.
2195  *	Condition:
2196  *		Port locked on entry
2197  *		Port unlocked on return.
2198  *	Returns:
2199  *		None.
2200  */
2201 void
ipc_port_clear_sync_rcv_thread_boost_locked(ipc_port_t port)2202 ipc_port_clear_sync_rcv_thread_boost_locked(
2203 	ipc_port_t port)
2204 {
2205 	ip_mq_lock_held(port); // ip_sync_link_state is touched
2206 
2207 	if (port->ip_sync_link_state != PORT_SYNC_LINK_RCV_THREAD) {
2208 		ip_mq_unlock(port);
2209 		return;
2210 	}
2211 
2212 	ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
2213 
2214 	ipc_port_send_turnstile_recompute_push_locked(port);
2215 	/* port unlocked */
2216 }
2217 
2218 /*
2219  *	Routine:	ipc_port_has_prdrequest
2220  *	Purpose:
2221  *		Returns whether a port has a port-destroyed request armed
2222  *	Condition:
2223  *		Port is locked.
2224  */
2225 bool
ipc_port_has_prdrequest(ipc_port_t port)2226 ipc_port_has_prdrequest(
2227 	ipc_port_t port)
2228 {
2229 	if (port->ip_specialreply) {
2230 		return false;
2231 	}
2232 	if (port->ip_has_watchport) {
2233 		return port->ip_twe->twe_pdrequest != IP_NULL;
2234 	}
2235 	return port->ip_pdrequest != IP_NULL;
2236 }
2237 
2238 /*
2239  *	Routine:	ipc_port_add_watchport_elem_locked
2240  *	Purpose:
2241  *		Transfer the turnstile boost of watchport to task calling exec.
2242  *	Condition:
2243  *		Port locked on entry.
2244  *		Port unlocked on return.
2245  *	Returns:
2246  *		KERN_SUCESS on success.
2247  *		KERN_FAILURE otherwise.
2248  */
2249 kern_return_t
ipc_port_add_watchport_elem_locked(ipc_port_t port,struct task_watchport_elem * watchport_elem,struct task_watchport_elem ** old_elem)2250 ipc_port_add_watchport_elem_locked(
2251 	ipc_port_t                 port,
2252 	struct task_watchport_elem *watchport_elem,
2253 	struct task_watchport_elem **old_elem)
2254 {
2255 	ip_mq_lock_held(port);
2256 
2257 	/* Watchport boost only works for non-special active ports mapped in an ipc space */
2258 	if (!ip_active(port) || port->ip_specialreply || !ip_in_a_space(port)) {
2259 		ip_mq_unlock(port);
2260 		return KERN_FAILURE;
2261 	}
2262 
2263 	if (port->ip_sync_link_state != PORT_SYNC_LINK_ANY) {
2264 		/* Sever the linkage if the port was pushing on knote */
2265 		ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
2266 	}
2267 
2268 	*old_elem = ipc_port_update_watchport_elem(port, watchport_elem);
2269 
2270 	ipc_port_send_turnstile_recompute_push_locked(port);
2271 	/* port unlocked */
2272 	return KERN_SUCCESS;
2273 }
2274 
2275 /*
2276  *	Routine:	ipc_port_clear_watchport_elem_internal_conditional_locked
2277  *	Purpose:
2278  *		Remove the turnstile boost of watchport and recompute the push.
2279  *	Condition:
2280  *		Port locked on entry.
2281  *		Port unlocked on return.
2282  *	Returns:
2283  *		KERN_SUCESS on success.
2284  *		KERN_FAILURE otherwise.
2285  */
2286 kern_return_t
ipc_port_clear_watchport_elem_internal_conditional_locked(ipc_port_t port,struct task_watchport_elem * watchport_elem)2287 ipc_port_clear_watchport_elem_internal_conditional_locked(
2288 	ipc_port_t                 port,
2289 	struct task_watchport_elem *watchport_elem)
2290 {
2291 	ip_mq_lock_held(port);
2292 
2293 	if (ipc_port_watchport_elem(port) != watchport_elem) {
2294 		ip_mq_unlock(port);
2295 		return KERN_FAILURE;
2296 	}
2297 
2298 	ipc_port_clear_watchport_elem_internal(port);
2299 	ipc_port_send_turnstile_recompute_push_locked(port);
2300 	/* port unlocked */
2301 	return KERN_SUCCESS;
2302 }
2303 
2304 /*
2305  *	Routine:	ipc_port_replace_watchport_elem_conditional_locked
2306  *	Purpose:
2307  *		Replace the turnstile boost of watchport and recompute the push.
2308  *	Condition:
2309  *		Port locked on entry.
2310  *		Port unlocked on return.
2311  *	Returns:
2312  *		KERN_SUCESS on success.
2313  *		KERN_FAILURE otherwise.
2314  */
2315 kern_return_t
ipc_port_replace_watchport_elem_conditional_locked(ipc_port_t port,struct task_watchport_elem * old_watchport_elem,struct task_watchport_elem * new_watchport_elem)2316 ipc_port_replace_watchport_elem_conditional_locked(
2317 	ipc_port_t                 port,
2318 	struct task_watchport_elem *old_watchport_elem,
2319 	struct task_watchport_elem *new_watchport_elem)
2320 {
2321 	ip_mq_lock_held(port);
2322 
2323 	if (port->ip_specialreply ||
2324 	    ipc_port_watchport_elem(port) != old_watchport_elem) {
2325 		ip_mq_unlock(port);
2326 		return KERN_FAILURE;
2327 	}
2328 
2329 	ipc_port_update_watchport_elem(port, new_watchport_elem);
2330 	ipc_port_send_turnstile_recompute_push_locked(port);
2331 	/* port unlocked */
2332 	return KERN_SUCCESS;
2333 }
2334 
2335 /*
2336  *	Routine:	ipc_port_clear_watchport_elem_internal
2337  *	Purpose:
2338  *		Remove the turnstile boost of watchport.
2339  *	Condition:
2340  *		Port locked on entry.
2341  *		Port locked on return.
2342  *	Returns:
2343  *		Old task_watchport_elem returned.
2344  */
2345 struct task_watchport_elem *
ipc_port_clear_watchport_elem_internal(ipc_port_t port)2346 ipc_port_clear_watchport_elem_internal(
2347 	ipc_port_t                 port)
2348 {
2349 	ip_mq_lock_held(port);
2350 
2351 	if (!port->ip_has_watchport) {
2352 		return NULL;
2353 	}
2354 
2355 	return ipc_port_update_watchport_elem(port, NULL);
2356 }
2357 
2358 /*
2359  *	Routine:	ipc_port_send_turnstile_recompute_push_locked
2360  *	Purpose:
2361  *		Update send turnstile inheritor of port and recompute the push.
2362  *	Condition:
2363  *		Port locked on entry.
2364  *		Port unlocked on return.
2365  *	Returns:
2366  *		None.
2367  */
2368 static void
ipc_port_send_turnstile_recompute_push_locked(ipc_port_t port)2369 ipc_port_send_turnstile_recompute_push_locked(
2370 	ipc_port_t port)
2371 {
2372 	struct turnstile *send_turnstile = port_send_turnstile(port);
2373 	if (send_turnstile) {
2374 		turnstile_reference(send_turnstile);
2375 		ipc_port_send_update_inheritor(port, send_turnstile,
2376 		    TURNSTILE_IMMEDIATE_UPDATE);
2377 	}
2378 	ip_mq_unlock(port);
2379 
2380 	if (send_turnstile) {
2381 		turnstile_update_inheritor_complete(send_turnstile,
2382 		    TURNSTILE_INTERLOCK_NOT_HELD);
2383 		turnstile_deallocate_safe(send_turnstile);
2384 	}
2385 }
2386 
2387 /*
2388  *	Routine:	ipc_port_get_watchport_inheritor
2389  *	Purpose:
2390  *		Returns inheritor for watchport.
2391  *
2392  *	Conditions:
2393  *		mqueue locked.
2394  *	Returns:
2395  *		watchport inheritor.
2396  */
2397 static thread_t
ipc_port_get_watchport_inheritor(ipc_port_t port)2398 ipc_port_get_watchport_inheritor(
2399 	ipc_port_t port)
2400 {
2401 	ip_mq_lock_held(port);
2402 	return ipc_port_watchport_elem(port)->twe_task->watchports->tw_thread;
2403 }
2404 
2405 /*
2406  *	Routine:	ipc_port_get_receiver_task
2407  *	Purpose:
2408  *		Returns receiver task pointer and its pid (if any) for port.
2409  *
2410  *	Conditions:
2411  *		Assumes the port is locked.
2412  */
2413 pid_t
ipc_port_get_receiver_task_locked(ipc_port_t port,uintptr_t * task)2414 ipc_port_get_receiver_task_locked(ipc_port_t port, uintptr_t *task)
2415 {
2416 	task_t receiver = TASK_NULL;
2417 	pid_t pid = -1;
2418 
2419 	if (!port) {
2420 		goto out;
2421 	}
2422 
2423 	if (ip_in_a_space(port) &&
2424 	    !ip_in_space(port, ipc_space_kernel) &&
2425 	    !ip_in_space(port, ipc_space_reply)) {
2426 		receiver = port->ip_receiver->is_task;
2427 		pid = task_pid(receiver);
2428 	}
2429 
2430 out:
2431 	if (task) {
2432 		*task = (uintptr_t)receiver;
2433 	}
2434 	return pid;
2435 }
2436 
2437 /*
2438  *	Routine:	ipc_port_get_receiver_task
2439  *	Purpose:
2440  *		Returns receiver task pointer and its pid (if any) for port.
2441  *
2442  *	Conditions:
2443  *		Nothing locked. The routine takes port lock.
2444  */
2445 pid_t
ipc_port_get_receiver_task(ipc_port_t port,uintptr_t * task)2446 ipc_port_get_receiver_task(ipc_port_t port, uintptr_t *task)
2447 {
2448 	pid_t pid = -1;
2449 
2450 	if (!port) {
2451 		if (task) {
2452 			*task = (uintptr_t)TASK_NULL;
2453 		}
2454 		return pid;
2455 	}
2456 
2457 	ip_mq_lock(port);
2458 	pid = ipc_port_get_receiver_task_locked(port, task);
2459 	ip_mq_unlock(port);
2460 
2461 	return pid;
2462 }
2463 
2464 /*
2465  *	Routine:	ipc_port_impcount_delta
2466  *	Purpose:
2467  *		Adjust only the importance count associated with a port.
2468  *		If there are any adjustments to be made to receiver task,
2469  *		those are handled elsewhere.
2470  *
2471  *		For now, be defensive during deductions to make sure the
2472  *		impcount for the port doesn't underflow zero.  This will
2473  *		go away when the port boost addition is made atomic (see
2474  *		note in ipc_port_importance_delta()).
2475  *	Conditions:
2476  *		The port is referenced and locked.
2477  *		Nothing else is locked.
2478  */
2479 mach_port_delta_t
ipc_port_impcount_delta(ipc_port_t port,mach_port_delta_t delta,ipc_port_t __unused base)2480 ipc_port_impcount_delta(
2481 	ipc_port_t        port,
2482 	mach_port_delta_t delta,
2483 	ipc_port_t        __unused base)
2484 {
2485 	mach_port_delta_t absdelta;
2486 
2487 	if (!ip_active(port)) {
2488 		return 0;
2489 	}
2490 
2491 	/* adding/doing nothing is easy */
2492 	if (delta >= 0) {
2493 		port->ip_impcount += delta;
2494 		return delta;
2495 	}
2496 
2497 	absdelta = 0 - delta;
2498 	if (port->ip_impcount >= absdelta) {
2499 		port->ip_impcount -= absdelta;
2500 		return delta;
2501 	}
2502 
2503 #if (DEVELOPMENT || DEBUG)
2504 	if (ip_in_a_space(port)) {
2505 		task_t target_task = port->ip_receiver->is_task;
2506 		ipc_importance_task_t target_imp = target_task->task_imp_base;
2507 		const char *target_procname;
2508 		int target_pid;
2509 
2510 		if (target_imp != IIT_NULL) {
2511 			target_procname = target_imp->iit_procname;
2512 			target_pid = target_imp->iit_bsd_pid;
2513 		} else {
2514 			target_procname = "unknown";
2515 			target_pid = -1;
2516 		}
2517 		printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
2518 		    "dropping %d assertion(s) but port only has %d remaining.\n",
2519 		    ip_get_receiver_name(port),
2520 		    target_pid, target_procname,
2521 		    absdelta, port->ip_impcount);
2522 	} else if (base != IP_NULL) {
2523 		assert(ip_in_a_space(base));
2524 		task_t target_task = base->ip_receiver->is_task;
2525 		ipc_importance_task_t target_imp = target_task->task_imp_base;
2526 		const char *target_procname;
2527 		int target_pid;
2528 
2529 		if (target_imp != IIT_NULL) {
2530 			target_procname = target_imp->iit_procname;
2531 			target_pid = target_imp->iit_bsd_pid;
2532 		} else {
2533 			target_procname = "unknown";
2534 			target_pid = -1;
2535 		}
2536 		printf("Over-release of importance assertions for port 0x%lx "
2537 		    "enqueued on port 0x%x with receiver pid %d (%s), "
2538 		    "dropping %d assertion(s) but port only has %d remaining.\n",
2539 		    (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port),
2540 		    ip_get_receiver_name(base),
2541 		    target_pid, target_procname,
2542 		    absdelta, port->ip_impcount);
2543 	}
2544 #endif
2545 
2546 	delta = 0 - port->ip_impcount;
2547 	port->ip_impcount = 0;
2548 	return delta;
2549 }
2550 
2551 /*
2552  *	Routine:	ipc_port_importance_delta_internal
2553  *	Purpose:
2554  *		Adjust the importance count through the given port.
2555  *		If the port is in transit, apply the delta throughout
2556  *		the chain. Determine if the there is a task at the
2557  *		base of the chain that wants/needs to be adjusted,
2558  *		and if so, apply the delta.
2559  *	Conditions:
2560  *		The port is referenced and locked on entry.
2561  *		Importance may be locked.
2562  *		Nothing else is locked.
2563  *		The lock may be dropped on exit.
2564  *		Returns TRUE if lock was dropped.
2565  */
2566 #if IMPORTANCE_INHERITANCE
2567 
2568 boolean_t
ipc_port_importance_delta_internal(ipc_port_t port,natural_t options,mach_port_delta_t * deltap,ipc_importance_task_t * imp_task)2569 ipc_port_importance_delta_internal(
2570 	ipc_port_t              port,
2571 	natural_t               options,
2572 	mach_port_delta_t       *deltap,
2573 	ipc_importance_task_t   *imp_task)
2574 {
2575 	ipc_port_t next, base;
2576 	bool dropped = false;
2577 	bool took_base_ref = false;
2578 
2579 	*imp_task = IIT_NULL;
2580 
2581 	if (*deltap == 0) {
2582 		return FALSE;
2583 	}
2584 
2585 	assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE);
2586 
2587 	base = port;
2588 
2589 	/* if port is in transit, have to search for end of chain */
2590 	if (ip_in_transit(port)) {
2591 		dropped = true;
2592 
2593 
2594 		ip_mq_unlock(port);
2595 		ipc_port_multiple_lock(); /* massive serialization */
2596 
2597 		took_base_ref = ipc_port_destination_chain_lock(port, &base);
2598 		/* all ports in chain from port to base, inclusive, are locked */
2599 
2600 		ipc_port_multiple_unlock();
2601 	}
2602 
2603 	/*
2604 	 * If the port lock is dropped b/c the port is in transit, there is a
2605 	 * race window where another thread can drain messages and/or fire a
2606 	 * send possible notification before we get here.
2607 	 *
2608 	 * We solve this race by checking to see if our caller armed the send
2609 	 * possible notification, whether or not it's been fired yet, and
2610 	 * whether or not we've already set the port's ip_spimportant bit. If
2611 	 * we don't need a send-possible boost, then we'll just apply a
2612 	 * harmless 0-boost to the port.
2613 	 */
2614 	if (options & IPID_OPTION_SENDPOSSIBLE) {
2615 		assert(*deltap == 1);
2616 		if (port->ip_sprequests && port->ip_spimportant == 0) {
2617 			port->ip_spimportant = 1;
2618 		} else {
2619 			*deltap = 0;
2620 		}
2621 	}
2622 
2623 	/* unlock down to the base, adjusting boost(s) at each level */
2624 	for (;;) {
2625 		*deltap = ipc_port_impcount_delta(port, *deltap, base);
2626 
2627 		if (port == base) {
2628 			break;
2629 		}
2630 
2631 		/* port is in transit */
2632 		assert(port->ip_tempowner == 0);
2633 		assert(ip_in_transit(port));
2634 		next = ip_get_destination(port);
2635 		ip_mq_unlock(port);
2636 		port = next;
2637 	}
2638 
2639 	/* find the task (if any) to boost according to the base */
2640 	if (ip_active(base)) {
2641 		if (base->ip_tempowner != 0) {
2642 			if (IIT_NULL != ip_get_imp_task(base)) {
2643 				*imp_task = ip_get_imp_task(base);
2644 			}
2645 			/* otherwise don't boost */
2646 		} else if (ip_in_a_space(base)) {
2647 			ipc_space_t space = ip_get_receiver(base);
2648 
2649 			/* only spaces with boost-accepting tasks */
2650 			if (space->is_task != TASK_NULL &&
2651 			    ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
2652 				*imp_task = space->is_task->task_imp_base;
2653 			}
2654 		}
2655 	}
2656 
2657 	/*
2658 	 * Only the base is locked.  If we have to hold or drop task
2659 	 * importance assertions, we'll have to drop that lock as well.
2660 	 */
2661 	if (*imp_task != IIT_NULL) {
2662 		/* take a reference before unlocking base */
2663 		ipc_importance_task_reference(*imp_task);
2664 	}
2665 
2666 	if (dropped) {
2667 		ip_mq_unlock(base);
2668 		if (took_base_ref) {
2669 			/* importance lock might be held */
2670 			ip_release_safe(base);
2671 		}
2672 	}
2673 
2674 	return dropped;
2675 }
2676 #endif /* IMPORTANCE_INHERITANCE */
2677 
2678 /*
2679  *	Routine:	ipc_port_importance_delta
2680  *	Purpose:
2681  *		Adjust the importance count through the given port.
2682  *		If the port is in transit, apply the delta throughout
2683  *		the chain.
2684  *
2685  *		If there is a task at the base of the chain that wants/needs
2686  *		to be adjusted, apply the delta.
2687  *	Conditions:
2688  *		The port is referenced and locked on entry.
2689  *		Nothing else is locked.
2690  *		The lock may be dropped on exit.
2691  *		Returns TRUE if lock was dropped.
2692  */
2693 #if IMPORTANCE_INHERITANCE
2694 
2695 boolean_t
ipc_port_importance_delta(ipc_port_t port,natural_t options,mach_port_delta_t delta)2696 ipc_port_importance_delta(
2697 	ipc_port_t              port,
2698 	natural_t               options,
2699 	mach_port_delta_t       delta)
2700 {
2701 	ipc_importance_task_t imp_task = IIT_NULL;
2702 	boolean_t dropped;
2703 
2704 	dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task);
2705 
2706 	if (IIT_NULL == imp_task || delta == 0) {
2707 		return dropped;
2708 	}
2709 
2710 	if (!dropped) {
2711 		ip_mq_unlock(port);
2712 	}
2713 
2714 	assert(ipc_importance_task_is_any_receiver_type(imp_task));
2715 
2716 	if (delta > 0) {
2717 		ipc_importance_task_hold_internal_assertion(imp_task, delta);
2718 	} else {
2719 		ipc_importance_task_drop_internal_assertion(imp_task, -delta);
2720 	}
2721 
2722 	ipc_importance_task_release(imp_task);
2723 	return TRUE;
2724 }
2725 #endif /* IMPORTANCE_INHERITANCE */
2726 
2727 ipc_port_t
ipc_port_make_send_any_locked(ipc_port_t port)2728 ipc_port_make_send_any_locked(
2729 	ipc_port_t      port)
2730 {
2731 	require_ip_active(port);
2732 	port->ip_mscount++;
2733 	ip_srights_inc(port);
2734 	ip_reference(port);
2735 	return port;
2736 }
2737 
2738 ipc_port_t
ipc_port_make_send_any(ipc_port_t port)2739 ipc_port_make_send_any(
2740 	ipc_port_t      port)
2741 {
2742 	ipc_port_t sright = port;
2743 
2744 	if (IP_VALID(port)) {
2745 		ip_mq_lock(port);
2746 		if (ip_active(port)) {
2747 			ipc_port_make_send_any_locked(port);
2748 		} else {
2749 			sright = IP_DEAD;
2750 		}
2751 		ip_mq_unlock(port);
2752 	}
2753 
2754 	return sright;
2755 }
2756 
2757 ipc_port_t
ipc_port_make_send_mqueue(ipc_port_t port)2758 ipc_port_make_send_mqueue(
2759 	ipc_port_t      port)
2760 {
2761 	ipc_port_t sright = port;
2762 	ipc_kobject_type_t kotype;
2763 
2764 	if (IP_VALID(port)) {
2765 		kotype = ip_kotype(port);
2766 
2767 		ip_mq_lock(port);
2768 		if (__improbable(!ip_active(port))) {
2769 			sright = IP_DEAD;
2770 		} else if (kotype == IKOT_NONE) {
2771 			ipc_port_make_send_any_locked(port);
2772 		} else if (kotype == IKOT_TIMER) {
2773 			ipc_kobject_mktimer_require_locked(port);
2774 			ipc_port_make_send_any_locked(port);
2775 		} else {
2776 			sright = IP_NULL;
2777 		}
2778 		ip_mq_unlock(port);
2779 	}
2780 
2781 	return sright;
2782 }
2783 
2784 void
ipc_port_copy_send_any_locked(ipc_port_t port)2785 ipc_port_copy_send_any_locked(
2786 	ipc_port_t      port)
2787 {
2788 	assert(port->ip_srights > 0);
2789 	ip_srights_inc(port);
2790 	ip_reference(port);
2791 }
2792 
2793 ipc_port_t
ipc_port_copy_send_any(ipc_port_t port)2794 ipc_port_copy_send_any(
2795 	ipc_port_t      port)
2796 {
2797 	ipc_port_t sright = port;
2798 
2799 	if (IP_VALID(port)) {
2800 		ip_mq_lock(port);
2801 		if (ip_active(port)) {
2802 			ipc_port_copy_send_any_locked(port);
2803 		} else {
2804 			sright = IP_DEAD;
2805 		}
2806 		ip_mq_unlock(port);
2807 	}
2808 
2809 	return sright;
2810 }
2811 
2812 ipc_port_t
ipc_port_copy_send_mqueue(ipc_port_t port)2813 ipc_port_copy_send_mqueue(
2814 	ipc_port_t      port)
2815 {
2816 	ipc_port_t sright = port;
2817 	ipc_kobject_type_t kotype;
2818 
2819 	if (IP_VALID(port)) {
2820 		kotype = ip_kotype(port);
2821 
2822 		ip_mq_lock(port);
2823 		if (__improbable(!ip_active(port))) {
2824 			sright = IP_DEAD;
2825 		} else if (kotype == IKOT_NONE) {
2826 			ipc_port_copy_send_any_locked(port);
2827 		} else if (kotype == IKOT_TIMER) {
2828 			ipc_kobject_mktimer_require_locked(port);
2829 			ipc_port_copy_send_any_locked(port);
2830 		} else {
2831 			sright = IP_NULL;
2832 		}
2833 		ip_mq_unlock(port);
2834 	}
2835 
2836 	return sright;
2837 }
2838 
2839 /*
2840  *	Routine:	ipc_port_copyout_send
2841  *	Purpose:
2842  *		Copyout a naked send right (possibly null/dead),
2843  *		or if that fails, destroy the right.
2844  *	Conditions:
2845  *		Nothing locked.
2846  */
2847 
2848 static mach_port_name_t
ipc_port_copyout_send_internal(ipc_port_t sright,ipc_space_t space,ipc_object_copyout_flags_t flags)2849 ipc_port_copyout_send_internal(
2850 	ipc_port_t      sright,
2851 	ipc_space_t     space,
2852 	ipc_object_copyout_flags_t flags)
2853 {
2854 	mach_port_name_t name;
2855 
2856 	if (IP_VALID(sright)) {
2857 		kern_return_t kr;
2858 
2859 		kr = ipc_object_copyout(space, ip_to_object(sright),
2860 		    MACH_MSG_TYPE_PORT_SEND, flags, NULL, NULL, &name);
2861 		if (kr != KERN_SUCCESS) {
2862 			if (kr == KERN_INVALID_CAPABILITY) {
2863 				name = MACH_PORT_DEAD;
2864 			} else {
2865 				name = MACH_PORT_NULL;
2866 			}
2867 		}
2868 	} else {
2869 		name = CAST_MACH_PORT_TO_NAME(sright);
2870 	}
2871 
2872 	return name;
2873 }
2874 
2875 mach_port_name_t
ipc_port_copyout_send(ipc_port_t sright,ipc_space_t space)2876 ipc_port_copyout_send(
2877 	ipc_port_t      sright, /* can be invalid */
2878 	ipc_space_t     space)
2879 {
2880 	return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_NONE);
2881 }
2882 
2883 /* Used by pthread kext to copyout thread port only */
2884 mach_port_name_t
ipc_port_copyout_send_pinned(ipc_port_t sright,ipc_space_t space)2885 ipc_port_copyout_send_pinned(
2886 	ipc_port_t      sright, /* can be invalid */
2887 	ipc_space_t     space)
2888 {
2889 	assert(space->is_task != TASK_NULL);
2890 
2891 	if (IP_VALID(sright)) {
2892 		assert(ip_kotype(sright) == IKOT_THREAD_CONTROL);
2893 	}
2894 
2895 	if (task_is_pinned(space->is_task)) {
2896 		return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_PINNED);
2897 	} else {
2898 		return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_NONE);
2899 	}
2900 }
2901 
2902 /*
2903  *	Routine:	ipc_port_release_send_and_unlock
2904  *	Purpose:
2905  *		Release a naked send right.
2906  *		Consumes a ref for the port.
2907  *	Conditions:
2908  *		Port is valid and locked on entry
2909  *		Port is unlocked on exit.
2910  */
2911 void
ipc_port_release_send_and_unlock(ipc_port_t port)2912 ipc_port_release_send_and_unlock(
2913 	ipc_port_t      port)
2914 {
2915 	ipc_notify_nsenders_t nsrequest = { };
2916 
2917 	ip_srights_dec(port);
2918 
2919 	if (ip_active(port) && port->ip_srights == 0) {
2920 		nsrequest = ipc_notify_no_senders_prepare(port);
2921 	}
2922 
2923 	ip_mq_unlock(port);
2924 	ip_release(port);
2925 
2926 	ipc_notify_no_senders_emit(nsrequest);
2927 }
2928 
2929 /*
2930  *	Routine:	ipc_port_release_send
2931  *	Purpose:
2932  *		Release a naked send right.
2933  *		Consumes a ref for the port.
2934  *	Conditions:
2935  *		Nothing locked.
2936  */
2937 
2938 __attribute__((flatten, noinline))
2939 void
ipc_port_release_send(ipc_port_t port)2940 ipc_port_release_send(
2941 	ipc_port_t      port)
2942 {
2943 	if (IP_VALID(port)) {
2944 		ip_mq_lock(port);
2945 		ipc_port_release_send_and_unlock(port);
2946 	}
2947 }
2948 
2949 /*
2950  *	Routine:	ipc_port_make_sonce_locked
2951  *	Purpose:
2952  *		Make a naked send-once right from a receive right.
2953  *	Conditions:
2954  *		The port is locked and active.
2955  */
2956 
2957 ipc_port_t
ipc_port_make_sonce_locked(ipc_port_t port)2958 ipc_port_make_sonce_locked(
2959 	ipc_port_t      port)
2960 {
2961 	require_ip_active(port);
2962 	ip_sorights_inc(port);
2963 	ip_reference(port);
2964 	return port;
2965 }
2966 
2967 /*
2968  *	Routine:	ipc_port_make_sonce
2969  *	Purpose:
2970  *		Make a naked send-once right from a receive right.
2971  *	Conditions:
2972  *		The port is not locked.
2973  */
2974 
2975 ipc_port_t
ipc_port_make_sonce(ipc_port_t port)2976 ipc_port_make_sonce(
2977 	ipc_port_t      port)
2978 {
2979 	if (!IP_VALID(port)) {
2980 		return port;
2981 	}
2982 
2983 	ip_mq_lock(port);
2984 	if (ip_active(port)) {
2985 		ipc_port_make_sonce_locked(port);
2986 		ip_mq_unlock(port);
2987 		return port;
2988 	}
2989 	ip_mq_unlock(port);
2990 	return IP_DEAD;
2991 }
2992 
2993 /*
2994  *	Routine:	ipc_port_release_sonce
2995  *	Purpose:
2996  *		Release a naked send-once right.
2997  *		Consumes a ref for the port.
2998  *
2999  *		In normal situations, this is never used.
3000  *		Send-once rights are only consumed when
3001  *		a message (possibly a send-once notification)
3002  *		is sent to them.
3003  *	Conditions:
3004  *		The port is locked, possibly a space too.
3005  */
3006 void
ipc_port_release_sonce_and_unlock(ipc_port_t port)3007 ipc_port_release_sonce_and_unlock(
3008 	ipc_port_t      port)
3009 {
3010 	ip_mq_lock_held(port);
3011 
3012 	ip_sorights_dec(port);
3013 
3014 	if (port->ip_specialreply) {
3015 		ipc_port_adjust_special_reply_port_locked(port, NULL,
3016 		    IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN, FALSE);
3017 	} else {
3018 		ip_mq_unlock(port);
3019 	}
3020 
3021 	ip_release(port);
3022 }
3023 
3024 /*
3025  *	Routine:	ipc_port_release_sonce
3026  *	Purpose:
3027  *		Release a naked send-once right.
3028  *		Consumes a ref for the port.
3029  *
3030  *		In normal situations, this is never used.
3031  *		Send-once rights are only consumed when
3032  *		a message (possibly a send-once notification)
3033  *		is sent to them.
3034  *	Conditions:
3035  *		Nothing locked except possibly a space.
3036  */
3037 void
ipc_port_release_sonce(ipc_port_t port)3038 ipc_port_release_sonce(
3039 	ipc_port_t      port)
3040 {
3041 	if (IP_VALID(port)) {
3042 		ip_mq_lock(port);
3043 		ipc_port_release_sonce_and_unlock(port);
3044 	}
3045 }
3046 
3047 /*
3048  *	Routine:	ipc_port_release_receive
3049  *	Purpose:
3050  *		Release a naked (in limbo or in transit) receive right.
3051  *		Consumes a ref for the port; destroys the port.
3052  *	Conditions:
3053  *		Nothing locked.
3054  */
3055 
3056 void
ipc_port_release_receive(ipc_port_t port)3057 ipc_port_release_receive(
3058 	ipc_port_t      port)
3059 {
3060 	ipc_port_t dest;
3061 
3062 	if (!IP_VALID(port)) {
3063 		return;
3064 	}
3065 
3066 	ip_mq_lock(port);
3067 	require_ip_active(port);
3068 	assert(!ip_in_a_space(port));
3069 	dest = ip_get_destination(port);
3070 
3071 	ipc_port_destroy(port); /* consumes ref, unlocks */
3072 
3073 	if (dest != IP_NULL) {
3074 		ipc_port_send_turnstile_complete(dest);
3075 		ip_release(dest);
3076 	}
3077 }
3078 
3079 /*
3080  *	Routine:	ipc_port_alloc_special
3081  *	Purpose:
3082  *		Allocate a port in a special space.
3083  *		The new port is returned with one ref.
3084  *		If unsuccessful, IP_NULL is returned.
3085  *	Conditions:
3086  *		Nothing locked.
3087  */
3088 
3089 ipc_port_t
ipc_port_alloc_special(ipc_space_t space,ipc_port_init_flags_t flags)3090 ipc_port_alloc_special(
3091 	ipc_space_t             space,
3092 	ipc_port_init_flags_t   flags)
3093 {
3094 	ipc_port_t port;
3095 
3096 	port = ip_object_to_port(io_alloc(IOT_PORT, Z_WAITOK | Z_ZERO));
3097 	if (port == IP_NULL) {
3098 		return IP_NULL;
3099 	}
3100 
3101 	os_atomic_init(&port->ip_object.io_bits, io_makebits(IOT_PORT));
3102 	os_atomic_init(&port->ip_object.io_references, 1);
3103 
3104 	ipc_port_init(port, space, flags, MACH_PORT_SPECIAL_DEFAULT);
3105 	return port;
3106 }
3107 
3108 /*
3109  *	Routine:	ipc_port_dealloc_special_and_unlock
3110  *	Purpose:
3111  *		Deallocate a port in a special space.
3112  *		Consumes one ref for the port.
3113  *	Conditions:
3114  *		Port is locked.
3115  */
3116 
3117 void
ipc_port_dealloc_special_and_unlock(ipc_port_t port,__assert_only ipc_space_t space)3118 ipc_port_dealloc_special_and_unlock(
3119 	ipc_port_t                      port,
3120 	__assert_only ipc_space_t       space)
3121 {
3122 	require_ip_active(port);
3123 //	assert(port->ip_receiver_name != MACH_PORT_NULL);
3124 	assert(ip_in_space(port, space));
3125 
3126 	/*
3127 	 *	We clear ip_receiver_name and ip_receiver to simplify
3128 	 *	the ipc_space_kernel check in ipc_mqueue_send.
3129 	 */
3130 
3131 	/* port transtions to IN-LIMBO state */
3132 	port->ip_receiver_name = MACH_PORT_NULL;
3133 	port->ip_receiver = IS_NULL;
3134 
3135 	/* relevant part of ipc_port_clear_receiver */
3136 	port->ip_mscount = 0;
3137 	port->ip_messages.imq_seqno = 0;
3138 
3139 	ipc_port_destroy(port);
3140 }
3141 
3142 /*
3143  *	Routine:	ipc_port_dealloc_special
3144  *	Purpose:
3145  *		Deallocate a port in a special space.
3146  *		Consumes one ref for the port.
3147  *	Conditions:
3148  *		Nothing locked.
3149  */
3150 
3151 void
ipc_port_dealloc_special(ipc_port_t port,ipc_space_t space)3152 ipc_port_dealloc_special(
3153 	ipc_port_t        port,
3154 	ipc_space_t       space)
3155 {
3156 	ip_mq_lock(port);
3157 	ipc_port_dealloc_special_and_unlock(port, space);
3158 }
3159 
3160 /*
3161  *	Routine:	ipc_port_finalize
3162  *	Purpose:
3163  *		Called on last reference deallocate to
3164  *		free any remaining data associated with the
3165  *		port.
3166  *	Conditions:
3167  *		Nothing locked.
3168  */
3169 void
ipc_port_finalize(ipc_port_t port)3170 ipc_port_finalize(
3171 	ipc_port_t              port)
3172 {
3173 	ipc_port_request_table_t requests = port->ip_requests;
3174 
3175 	assert(port_send_turnstile(port) == TURNSTILE_NULL);
3176 
3177 	if (waitq_type(&port->ip_waitq) == WQT_PORT) {
3178 		assert(ipc_port_rcv_turnstile(port) == TURNSTILE_NULL);
3179 	}
3180 
3181 	if (ip_active(port)) {
3182 		panic("Trying to free an active port. port %p", port);
3183 	}
3184 
3185 	if (requests) {
3186 		port->ip_requests = NULL;
3187 		ipc_port_request_table_free_noclear(requests);
3188 	}
3189 
3190 	/*
3191 	 * (81997111) now it is safe to deallocate the prealloc message.
3192 	 * Keep the IP_BIT_PREALLOC bit, it has to be sticky as the turnstile
3193 	 * code looks at it without holding locks.
3194 	 */
3195 	if (IP_PREALLOC(port)) {
3196 		ipc_kmsg_t kmsg = port->ip_premsg;
3197 
3198 		if (kmsg == IKM_NULL || ikm_prealloc_inuse_port(kmsg)) {
3199 			panic("port(%p, %p): prealloc message in an invalid state",
3200 			    port, kmsg);
3201 		}
3202 
3203 		port->ip_premsg = IKM_NULL;
3204 		ipc_kmsg_free(kmsg);
3205 	}
3206 
3207 	waitq_deinit(&port->ip_waitq);
3208 #if MACH_ASSERT
3209 	if (port->ip_made_bt) {
3210 		btref_put(port->ip_made_bt);
3211 	}
3212 #endif
3213 }
3214 
3215 /*
3216  *	Routine:	kdp_mqueue_send_find_owner
3217  *	Purpose:
3218  *		Discover the owner of the ipc object that contains the input
3219  *		waitq object. The thread blocked on the waitq should be
3220  *		waiting for an IPC_MQUEUE_FULL event.
3221  *	Conditions:
3222  *		The 'waitinfo->wait_type' value should already be set to
3223  *		kThreadWaitPortSend.
3224  *	Note:
3225  *		If we find out that the containing port is actually in
3226  *		transit, we reset the wait_type field to reflect this.
3227  */
3228 void
kdp_mqueue_send_find_owner(struct waitq * waitq,__assert_only event64_t event,thread_waitinfo_v2_t * waitinfo,struct ipc_service_port_label ** isplp)3229 kdp_mqueue_send_find_owner(
3230 	struct waitq                   *waitq,
3231 	__assert_only event64_t         event,
3232 	thread_waitinfo_v2_t           *waitinfo,
3233 	struct ipc_service_port_label **isplp)
3234 {
3235 	struct turnstile *turnstile;
3236 	assert(waitinfo->wait_type == kThreadWaitPortSend);
3237 	assert(event == IPC_MQUEUE_FULL);
3238 	assert(waitq_type(waitq) == WQT_TURNSTILE);
3239 
3240 	turnstile = waitq_to_turnstile(waitq);
3241 	ipc_port_t port = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */
3242 
3243 	zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port);
3244 
3245 	waitinfo->owner = 0;
3246 	waitinfo->context  = VM_KERNEL_UNSLIDE_OR_PERM(port);
3247 	if (ip_mq_lock_held_kdp(port)) {
3248 		/*
3249 		 * someone has the port locked: it may be in an
3250 		 * inconsistent state: bail
3251 		 */
3252 		waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
3253 		return;
3254 	}
3255 
3256 	/* now we are the only one accessing the port */
3257 	if (ip_active(port)) {
3258 		/*
3259 		 * In kdp context, port must be left unlocked throughout.
3260 		 * Therefore can't use union field accessor helpers, manually strip PAC
3261 		 * and compare raw pointer.
3262 		 */
3263 		void *raw_ptr = ip_get_receiver_ptr_noauth(port);
3264 
3265 		if (port->ip_tempowner) {
3266 			ipc_importance_task_t imp_task = ip_get_imp_task(port);
3267 			if (imp_task != IIT_NULL && imp_task->iit_task != NULL) {
3268 				/* port is held by a tempowner */
3269 				waitinfo->owner = pid_from_task(port->ip_imp_task->iit_task);
3270 			} else {
3271 				waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
3272 			}
3273 		} else if (ip_in_a_space(port)) { /* no port lock needed */
3274 			if ((ipc_space_t)raw_ptr == ipc_space_kernel) { /* access union field as ip_receiver */
3275 				/*
3276 				 * The kernel pid is 0, make this
3277 				 * distinguishable from no-owner and
3278 				 * inconsistent port state.
3279 				 */
3280 				waitinfo->owner = STACKSHOT_WAITOWNER_KERNEL;
3281 			} else {
3282 				waitinfo->owner = pid_from_task(((ipc_space_t)raw_ptr)->is_task);
3283 			}
3284 		} else if ((ipc_port_t)raw_ptr != IP_NULL) { /* access union field as ip_destination */
3285 			waitinfo->wait_type = kThreadWaitPortSendInTransit;
3286 			waitinfo->owner     = VM_KERNEL_UNSLIDE_OR_PERM((ipc_port_t)raw_ptr);
3287 		}
3288 		if (port->ip_service_port && port->ip_splabel != NULL) {
3289 			*isplp = (struct ipc_service_port_label *)port->ip_splabel;
3290 		}
3291 	}
3292 }
3293 
3294 /*
3295  *	Routine:	kdp_mqueue_recv_find_owner
3296  *	Purpose:
3297  *		Discover the "owner" of the ipc object that contains the input
3298  *		waitq object. The thread blocked on the waitq is trying to
3299  *		receive on the mqueue.
3300  *	Conditions:
3301  *		The 'waitinfo->wait_type' value should already be set to
3302  *		kThreadWaitPortReceive.
3303  *	Note:
3304  *		If we find that we are actualy waiting on a port set, we reset
3305  *		the wait_type field to reflect this.
3306  */
3307 void
kdp_mqueue_recv_find_owner(struct waitq * waitq,__assert_only event64_t event,thread_waitinfo_v2_t * waitinfo,struct ipc_service_port_label ** isplp)3308 kdp_mqueue_recv_find_owner(
3309 	struct waitq                   *waitq,
3310 	__assert_only event64_t         event,
3311 	thread_waitinfo_v2_t           *waitinfo,
3312 	struct ipc_service_port_label **isplp)
3313 {
3314 	assert(waitinfo->wait_type == kThreadWaitPortReceive);
3315 	assert(event == IPC_MQUEUE_RECEIVE);
3316 
3317 	waitinfo->owner = 0;
3318 
3319 	if (waitq_type(waitq) == WQT_PORT_SET) {
3320 		ipc_pset_t set = ips_from_waitq(waitq);
3321 
3322 		zone_id_require(ZONE_ID_IPC_PORT_SET, sizeof(struct ipc_pset), set);
3323 
3324 		/* Reset wait type to specify waiting on port set receive */
3325 		waitinfo->wait_type = kThreadWaitPortSetReceive;
3326 		waitinfo->context   = VM_KERNEL_UNSLIDE_OR_PERM(set);
3327 		if (ips_mq_lock_held_kdp(set)) {
3328 			waitinfo->owner = STACKSHOT_WAITOWNER_PSET_LOCKED;
3329 		}
3330 		/* There is no specific owner "at the other end" of a port set, so leave unset. */
3331 	} else if (waitq_type(waitq) == WQT_PORT) {
3332 		ipc_port_t port = ip_from_waitq(waitq);
3333 
3334 		zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port);
3335 
3336 		waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
3337 		if (ip_mq_lock_held_kdp(port)) {
3338 			waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
3339 			return;
3340 		}
3341 
3342 		if (ip_active(port)) {
3343 			if (ip_in_a_space(port)) { /* no port lock needed */
3344 				waitinfo->owner = ip_get_receiver_name(port);
3345 			} else {
3346 				waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
3347 			}
3348 			if (port->ip_specialreply) {
3349 				waitinfo->wait_flags |= STACKSHOT_WAITINFO_FLAGS_SPECIALREPLY;
3350 			}
3351 			if (port->ip_splabel != NULL) {
3352 				*isplp = (struct ipc_service_port_label *)port->ip_splabel;
3353 			}
3354 		}
3355 	}
3356 }
3357 
3358 void
ipc_port_set_label(ipc_port_t port,ipc_label_t label)3359 ipc_port_set_label(
3360 	ipc_port_t              port,
3361 	ipc_label_t             label)
3362 {
3363 	ipc_kobject_label_t labelp;
3364 
3365 	assert(!ip_is_kolabeled(port));
3366 
3367 	labelp = zalloc_flags(ipc_kobject_label_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
3368 	labelp->ikol_label = label;
3369 
3370 	port->ip_kolabel = labelp;
3371 	io_bits_or(ip_to_object(port), IO_BITS_KOLABEL);
3372 }
3373 
3374 kern_return_t
ipc_port_reset_thread_attr(ipc_port_t port)3375 ipc_port_reset_thread_attr(
3376 	ipc_port_t port)
3377 {
3378 	uint8_t iotier = THROTTLE_LEVEL_END;
3379 	uint8_t qos = THREAD_QOS_UNSPECIFIED;
3380 
3381 	return ipc_port_update_qos_n_iotier(port, qos, iotier);
3382 }
3383 
3384 kern_return_t
ipc_port_propagate_thread_attr(ipc_port_t port,struct thread_attr_for_ipc_propagation attr)3385 ipc_port_propagate_thread_attr(
3386 	ipc_port_t port,
3387 	struct thread_attr_for_ipc_propagation attr)
3388 {
3389 	uint8_t iotier = attr.tafip_iotier;
3390 	uint8_t qos = attr.tafip_qos;
3391 
3392 	return ipc_port_update_qos_n_iotier(port, qos, iotier);
3393 }
3394 
3395 static kern_return_t
ipc_port_update_qos_n_iotier(ipc_port_t port,uint8_t qos,uint8_t iotier)3396 ipc_port_update_qos_n_iotier(
3397 	ipc_port_t port,
3398 	uint8_t    qos,
3399 	uint8_t    iotier)
3400 {
3401 	if (port == IPC_PORT_NULL) {
3402 		return KERN_INVALID_ARGUMENT;
3403 	}
3404 
3405 	ip_mq_lock(port);
3406 
3407 	if (!ip_active(port)) {
3408 		ip_mq_unlock(port);
3409 		return KERN_TERMINATED;
3410 	}
3411 
3412 	if (port->ip_specialreply) {
3413 		ip_mq_unlock(port);
3414 		return KERN_INVALID_ARGUMENT;
3415 	}
3416 
3417 	port->ip_kernel_iotier_override = iotier;
3418 	port->ip_kernel_qos_override = qos;
3419 
3420 	if (ip_in_a_space(port) &&
3421 	    is_active(ip_get_receiver(port)) &&
3422 	    ipc_port_has_klist(port)) {
3423 		KNOTE(&port->ip_klist, 0);
3424 	}
3425 
3426 	ip_mq_unlock(port);
3427 	return KERN_SUCCESS;
3428 }
3429 
3430 #if MACH_ASSERT
3431 #include <kern/machine.h>
3432 
3433 unsigned long   port_count = 0;
3434 unsigned long   port_count_warning = 20000;
3435 unsigned long   port_timestamp = 0;
3436 
3437 void            db_port_stack_trace(
3438 	ipc_port_t      port);
3439 void            db_ref(
3440 	int             refs);
3441 int             db_port_walk(
3442 	unsigned int    verbose,
3443 	unsigned int    display,
3444 	unsigned int    ref_search,
3445 	unsigned int    ref_target);
3446 
3447 #ifdef MACH_BSD
3448 extern int proc_pid(struct proc*);
3449 #endif /* MACH_BSD */
3450 
3451 /*
3452  *	Initialize all of the debugging state in a port.
3453  *	Insert the port into a global list of all allocated ports.
3454  */
3455 void
ipc_port_init_debug(ipc_port_t port,void * fp)3456 ipc_port_init_debug(ipc_port_t port, void *fp)
3457 {
3458 	port->ip_timetrack = port_timestamp++;
3459 
3460 	if (ipc_portbt) {
3461 		port->ip_made_bt = btref_get(fp, 0);
3462 	}
3463 
3464 #ifdef MACH_BSD
3465 	task_t task = current_task_early();
3466 	if (task != TASK_NULL) {
3467 		struct proc *proc = get_bsdtask_info(task);
3468 		if (proc) {
3469 			port->ip_made_pid = proc_pid(proc);
3470 		}
3471 	}
3472 #endif /* MACH_BSD */
3473 }
3474 
3475 #endif  /* MACH_ASSERT */
3476