xref: /xnu-11417.121.6/osfmk/kern/ipc_kobject.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58  * support for mandatory and extensible security protections.  This notice
59  * is included in support of clause 2.2 (b) of the Apple Public License,
60  * Version 2.0.
61  * Copyright (c) 2005 SPARTA, Inc.
62  */
63 /*
64  */
65 /*
66  *	File:	kern/ipc_kobject.c
67  *	Author:	Rich Draves
68  *	Date:	1989
69  *
70  *	Functions for letting a port represent a kernel object.
71  */
72 
73 #include <mach/mig.h>
74 #include <mach/port.h>
75 #include <mach/kern_return.h>
76 #include <mach/message.h>
77 #include <mach/mig_errors.h>
78 #include <mach/mach_notify.h>
79 #include <mach/ndr.h>
80 #include <mach/vm_param.h>
81 
82 #include <mach/mach_vm_server.h>
83 #include <mach/mach_port_server.h>
84 #include <mach/mach_host_server.h>
85 #include <mach/host_priv_server.h>
86 #include <mach/clock_server.h>
87 #include <mach/memory_entry_server.h>
88 #include <mach/processor_server.h>
89 #include <mach/processor_set_server.h>
90 #include <mach/task_server.h>
91 #include <mach/mach_voucher_server.h>
92 #ifdef VM32_SUPPORT
93 #include <mach/vm32_map_server.h>
94 #endif
95 #include <mach/thread_act_server.h>
96 #include <mach/restartable_server.h>
97 
98 #include <mach/exc_server.h>
99 #include <mach/mach_exc_server.h>
100 #include <mach/mach_eventlink_server.h>
101 
102 #include <device/device_types.h>
103 #include <device/device_server.h>
104 
105 #if     CONFIG_USER_NOTIFICATION
106 #include <UserNotification/UNDReplyServer.h>
107 #endif
108 
109 #if     CONFIG_ARCADE
110 #include <mach/arcade_register_server.h>
111 #endif
112 
113 #if     CONFIG_AUDIT
114 #include <kern/audit_sessionport.h>
115 #endif
116 
117 #include <kern/counter.h>
118 #include <kern/ipc_tt.h>
119 #include <kern/ipc_mig.h>
120 #include <kern/ipc_misc.h>
121 #include <kern/ipc_kobject.h>
122 #include <kern/host_notify.h>
123 #include <kern/misc_protos.h>
124 
125 #if CONFIG_ARCADE
126 #include <kern/arcade.h>
127 #endif /* CONFIG_ARCADE */
128 
129 #include <ipc/ipc_kmsg.h>
130 #include <ipc/ipc_policy.h>
131 #include <ipc/ipc_port.h>
132 #include <ipc/ipc_voucher.h>
133 #include <kern/sync_sema.h>
134 #include <kern/work_interval.h>
135 #include <kern/task_ident.h>
136 
137 #if HYPERVISOR
138 #include <kern/hv_support.h>
139 #endif
140 
141 #include <vm/vm_protos.h>
142 
143 #include <security/mac_mach_internal.h>
144 
145 extern char *proc_name_address(void *p);
146 struct proc;
147 extern int proc_pid(struct proc *p);
148 
149 typedef struct {
150 	mach_msg_id_t num;
151 	int kobjidx;
152 	mig_kern_routine_t kroutine;    /* Kernel server routine */
153 	unsigned int kreply_size;       /* Size of kernel reply msg */
154 	unsigned int kreply_desc_cnt;   /* Number of descs in kernel reply msg */
155 } mig_hash_t;
156 
157 static void ipc_kobject_subst_once_no_senders(ipc_port_t, mach_msg_type_number_t);
158 
159 IPC_KOBJECT_DEFINE(IKOT_MEMORY_OBJECT);   /* vestigial, no real instance */
160 IPC_KOBJECT_DEFINE(IKOT_PORT_SUBST_ONCE,
161     .iko_op_no_senders = ipc_kobject_subst_once_no_senders);
162 
163 #define MAX_MIG_ENTRIES 1031
164 #define MIG_HASH(x) (x)
165 
166 #define KOBJ_IDX_NOT_SET (-1)
167 
168 static SECURITY_READ_ONLY_LATE(mig_hash_t) mig_buckets[MAX_MIG_ENTRIES];
169 static SECURITY_READ_ONLY_LATE(int) mig_table_max_displ;
170 SECURITY_READ_ONLY_LATE(int) mach_kobj_count; /* count of total number of kobjects */
171 
172 ZONE_DEFINE_TYPE(ipc_kobject_label_zone, "ipc kobject labels",
173     struct ipc_kobject_label, ZC_ZFREE_CLEARMEM);
174 
175 __startup_const
176 static struct mig_kern_subsystem *mig_e[] = {
177 	(const struct mig_kern_subsystem *)&mach_vm_subsystem,
178 	(const struct mig_kern_subsystem *)&mach_port_subsystem,
179 	(const struct mig_kern_subsystem *)&mach_host_subsystem,
180 	(const struct mig_kern_subsystem *)&host_priv_subsystem,
181 	(const struct mig_kern_subsystem *)&clock_subsystem,
182 	(const struct mig_kern_subsystem *)&processor_subsystem,
183 	(const struct mig_kern_subsystem *)&processor_set_subsystem,
184 	(const struct mig_kern_subsystem *)&is_iokit_subsystem,
185 	(const struct mig_kern_subsystem *)&task_subsystem,
186 	(const struct mig_kern_subsystem *)&thread_act_subsystem,
187 #ifdef VM32_SUPPORT
188 	(const struct mig_kern_subsystem *)&vm32_map_subsystem,
189 #endif
190 #if CONFIG_USER_NOTIFICATION
191 	(const struct mig_kern_subsystem *)&UNDReply_subsystem,
192 #endif
193 	(const struct mig_kern_subsystem *)&mach_voucher_subsystem,
194 	(const struct mig_kern_subsystem *)&memory_entry_subsystem,
195 	(const struct mig_kern_subsystem *)&task_restartable_subsystem,
196 	(const struct mig_kern_subsystem *)&catch_exc_subsystem,
197 	(const struct mig_kern_subsystem *)&catch_mach_exc_subsystem,
198 #if CONFIG_ARCADE
199 	(const struct mig_kern_subsystem *)&arcade_register_subsystem,
200 #endif
201 	(const struct mig_kern_subsystem *)&mach_eventlink_subsystem,
202 };
203 
204 static struct ipc_kobject_ops __security_const_late
205     ipc_kobject_ops_array[IKOT_MAX_TYPE];
206 
207 __startup_func
208 void
ipc_kobject_register_startup(ipc_kobject_ops_t ops)209 ipc_kobject_register_startup(ipc_kobject_ops_t ops)
210 {
211 	if (ipc_kobject_ops_array[ops->iko_op_type].iko_op_type) {
212 		panic("trying to register kobject(%d) twice", ops->iko_op_type);
213 	}
214 	ipc_kobject_ops_array[ops->iko_op_type] = *ops;
215 }
216 
217 static ipc_kobject_ops_t
ipc_kobject_ops_get(ipc_kobject_type_t ikot)218 ipc_kobject_ops_get(ipc_kobject_type_t ikot)
219 {
220 	if (ikot < IKOT_NONE || ikot >= IKOT_MAX_TYPE) {
221 		panic("invalid kobject type %d", ikot);
222 	}
223 	return &ipc_kobject_ops_array[ikot];
224 }
225 
226 __startup_func
227 static void
mig_init(void)228 mig_init(void)
229 {
230 	unsigned int i, n = sizeof(mig_e) / sizeof(const struct mig_kern_subsystem *);
231 	int howmany;
232 	mach_msg_id_t j, pos, nentry, range;
233 
234 	for (i = 0; i < n; i++) {
235 		range = mig_e[i]->end - mig_e[i]->start;
236 		if (!mig_e[i]->start || range < 0) {
237 			panic("the msgh_ids in mig_e[] aren't valid!");
238 		}
239 
240 		if (mig_e[i]->maxsize > KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE) {
241 			panic("mig subsystem %d (%p) replies are too large (%d > %d)",
242 			    mig_e[i]->start, mig_e[i], mig_e[i]->maxsize,
243 			    KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE);
244 		}
245 
246 		for (j = 0; j < range; j++) {
247 			if (mig_e[i]->kroutine[j].kstub_routine) {
248 				/* Only put real entries in the table */
249 				nentry = j + mig_e[i]->start;
250 				for (pos = MIG_HASH(nentry) % MAX_MIG_ENTRIES, howmany = 1;
251 				    mig_buckets[pos].num;
252 				    pos++, pos = pos % MAX_MIG_ENTRIES, howmany++) {
253 					if (mig_buckets[pos].num == nentry) {
254 						printf("message id = %d\n", nentry);
255 						panic("multiple entries with the same msgh_id");
256 					}
257 					if (howmany == MAX_MIG_ENTRIES) {
258 						panic("the mig dispatch table is too small");
259 					}
260 				}
261 
262 				mig_buckets[pos].num = nentry;
263 				mig_buckets[pos].kroutine = mig_e[i]->kroutine[j].kstub_routine;
264 				if (mig_e[i]->kroutine[j].max_reply_msg) {
265 					mig_buckets[pos].kreply_size = mig_e[i]->kroutine[j].max_reply_msg;
266 					mig_buckets[pos].kreply_desc_cnt = mig_e[i]->kroutine[j].reply_descr_count;
267 					assert3u(mig_e[i]->kroutine[j].descr_count,
268 					    <=, IPC_KOBJECT_DESC_MAX);
269 					assert3u(mig_e[i]->kroutine[j].reply_descr_count,
270 					    <=, IPC_KOBJECT_RDESC_MAX);
271 				} else {
272 					/*
273 					 * Allocating a larger-than-needed kmsg creates hole for
274 					 * inlined kmsgs (IKM_TYPE_ALL_INLINED) during copyout.
275 					 * Disallow that.
276 					 */
277 					panic("kroutine must have precise size %d %d", mig_e[i]->start, j);
278 				}
279 
280 				mig_buckets[pos].kobjidx = KOBJ_IDX_NOT_SET;
281 
282 				if (mig_table_max_displ < howmany) {
283 					mig_table_max_displ = howmany;
284 				}
285 				mach_kobj_count++;
286 			}
287 		}
288 	}
289 
290 	/* 77417305: pad to allow for MIG routines removals/cleanups */
291 	mach_kobj_count += 32;
292 
293 	printf("mig_table_max_displ = %d mach_kobj_count = %d\n",
294 	    mig_table_max_displ, mach_kobj_count);
295 }
296 STARTUP(MACH_IPC, STARTUP_RANK_FIRST, mig_init);
297 
298 /*
299  * Do a hash table lookup for given msgh_id. Return 0
300  * if not found.
301  */
302 static mig_hash_t *
find_mig_hash_entry(int msgh_id)303 find_mig_hash_entry(int msgh_id)
304 {
305 	unsigned int i = (unsigned int)MIG_HASH(msgh_id);
306 	int max_iter = mig_table_max_displ;
307 	mig_hash_t *ptr;
308 
309 	do {
310 		ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES];
311 	} while (msgh_id != ptr->num && ptr->num && --max_iter);
312 
313 	if (!ptr->kroutine || msgh_id != ptr->num) {
314 		ptr = (mig_hash_t *)0;
315 	}
316 
317 	return ptr;
318 }
319 
320 /*
321  * Routine: ipc_kobject_reply_status
322  *
323  * Returns the error/success status from a given kobject call reply message.
324  *
325  * Contract for KernelServer MIG routines is as follows:
326  *
327  * (1) If reply header has complex bit set, kernel server implementation routine
328  *     must have implicitly returned KERN_SUCCESS.
329  *
330  * (2) Otherwise we can always read RetCode from after the header. This is not
331  *     obvious to see, and is discussed below by case.
332  *
333  * MIG can return three types of replies from KernelServer routines.
334  *
335  * (A) Complex Reply (i.e. with Descriptors)
336  *
337  *     E.g.: thread_get_exception_ports()
338  *
339  *       If complex bit is set, we can deduce the call is successful since the bit
340  *     is set at the very end.
341  *       If complex bit is not set, we must have returned from MIG_RETURN_ERROR.
342  *     MIG writes RetCode to immediately after the header, and we know this is
343  *     safe to do for all kmsg layouts. (See discussion in ipc_kmsg_server_internal()).
344  *
345  *  (B) Simple Reply with Out Params
346  *
347  *      E.g.: thread_get_states()
348  *
349  *        If the call failed, we return from MIG_RETURN_ERROR, which writes RetCode
350  *      to immediately after the header.
351  *        If the call succeeded, MIG writes RetCode as KERN_SUCCESS to USER DATA
352  *      buffer. *BUT* since the region after header is always initialized with
353  *      KERN_SUCCESS, reading from there gives us the same result. We rely on
354  *      this behavior to not make a special case.
355  *
356  *  (C) Simple Reply without Out Params
357  *
358  *      E.g.: thread_set_states()
359  *
360  *        For this type of MIG routines we always allocate a mig_reply_error_t
361  *      as reply kmsg, which fits inline in kmsg. RetCode can be found after
362  *      header, and can be KERN_SUCCESS or otherwise a failure code.
363  */
364 static kern_return_t
ipc_kobject_reply_status(ipc_kmsg_t reply)365 ipc_kobject_reply_status(ipc_kmsg_t reply)
366 {
367 	mach_msg_header_t *hdr = ikm_header(reply);
368 
369 	if (hdr->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
370 		return KERN_SUCCESS;
371 	}
372 
373 	return ((mig_reply_error_t *)hdr)->RetCode;
374 }
375 
376 static void
ipc_kobject_set_reply_error_status(ipc_kmsg_t reply,kern_return_t kr)377 ipc_kobject_set_reply_error_status(
378 	ipc_kmsg_t    reply,
379 	kern_return_t kr)
380 {
381 	mig_reply_error_t *error = (mig_reply_error_t *)ikm_header(reply);
382 
383 	assert(!(error->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX));
384 	error->RetCode = kr;
385 }
386 
387 /*
388  *      Routine:	ipc_kobject_set_kobjidx
389  *      Purpose:
390  *              Set the index for the kobject filter
391  *              mask for a given message ID.
392  */
393 kern_return_t
ipc_kobject_set_kobjidx(int msgh_id,int index)394 ipc_kobject_set_kobjidx(
395 	int       msgh_id,
396 	int       index)
397 {
398 	mig_hash_t *ptr = find_mig_hash_entry(msgh_id);
399 
400 	if (ptr == (mig_hash_t *)0) {
401 		return KERN_INVALID_ARGUMENT;
402 	}
403 
404 	assert(index < mach_kobj_count);
405 	ptr->kobjidx = index;
406 
407 	return KERN_SUCCESS;
408 }
409 
410 static void
ipc_kobject_init_reply(ipc_kmsg_t reply,const ipc_kmsg_t request,kern_return_t kr)411 ipc_kobject_init_reply(
412 	ipc_kmsg_t          reply,
413 	const ipc_kmsg_t    request,
414 	kern_return_t       kr)
415 {
416 	mach_msg_header_t *req_hdr   = ikm_header(request);
417 	mach_msg_header_t *reply_hdr = ikm_header(reply);
418 
419 #define InP     ((mach_msg_header_t *) req_hdr)
420 #define OutP    ((mig_reply_error_t *) reply_hdr)
421 
422 	OutP->Head.msgh_size = sizeof(mig_reply_error_t);
423 	OutP->Head.msgh_bits =
424 	    MACH_MSGH_BITS_SET(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0, 0, 0);
425 	OutP->Head.msgh_remote_port = InP->msgh_local_port;
426 	OutP->Head.msgh_local_port = MACH_PORT_NULL;
427 	OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
428 	OutP->Head.msgh_id = InP->msgh_id + 100;
429 
430 	OutP->NDR = NDR_record;
431 	OutP->RetCode = kr;
432 
433 #undef  InP
434 #undef  OutP
435 }
436 
437 static void
ipc_kobject_init_new_reply(ipc_kmsg_t new_reply,const ipc_kmsg_t old_reply,kern_return_t kr)438 ipc_kobject_init_new_reply(
439 	ipc_kmsg_t          new_reply,
440 	const ipc_kmsg_t    old_reply,
441 	kern_return_t       kr)
442 {
443 	mach_msg_header_t *new_hdr = ikm_header(new_reply);
444 	mach_msg_header_t *old_hdr = ikm_header(old_reply);
445 
446 #define InP     ((mig_reply_error_t *) old_hdr)
447 #define OutP    ((mig_reply_error_t *) new_hdr)
448 
449 	OutP->Head.msgh_size = sizeof(mig_reply_error_t);
450 	OutP->Head.msgh_bits = InP->Head.msgh_bits & ~MACH_MSGH_BITS_COMPLEX;
451 	OutP->Head.msgh_remote_port = InP->Head.msgh_remote_port;
452 	OutP->Head.msgh_local_port = MACH_PORT_NULL;
453 	OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
454 	OutP->Head.msgh_id = InP->Head.msgh_id;
455 
456 	OutP->NDR = InP->NDR;
457 	OutP->RetCode = kr;
458 
459 #undef  InP
460 #undef  OutP
461 }
462 
463 static ipc_kmsg_t
ipc_kobject_alloc_mig_error(void)464 ipc_kobject_alloc_mig_error(void)
465 {
466 	ipc_kmsg_alloc_flags_t flags = IPC_KMSG_ALLOC_KERNEL |
467 	    IPC_KMSG_ALLOC_ZERO |
468 	    IPC_KMSG_ALLOC_ALL_INLINE |
469 	    IPC_KMSG_ALLOC_NOFAIL;
470 
471 	return ipc_kmsg_alloc(sizeof(mig_reply_error_t), 0, 0, flags);
472 }
473 
474 /*
475  *	Routine:	ipc_kobject_server_internal
476  *	Purpose:
477  *		Handle a message sent to the kernel.
478  *		Generates a reply message.
479  *		Version for Untyped IPC.
480  *	Conditions:
481  *		Nothing locked.
482  */
483 static kern_return_t
ipc_kobject_server_internal(__unused ipc_port_t port,ipc_kmsg_t request,ipc_kmsg_t * replyp)484 ipc_kobject_server_internal(
485 	__unused ipc_port_t port,
486 	ipc_kmsg_t          request,
487 	ipc_kmsg_t          *replyp)
488 {
489 	int request_msgh_id;
490 	ipc_kmsg_t reply = IKM_NULL;
491 	mach_msg_size_t reply_size, reply_desc_cnt;
492 	mig_hash_t *ptr;
493 	mach_msg_header_t *req_hdr, *reply_hdr;
494 	void *req_data, *reply_data;
495 	mach_msg_max_trailer_t *req_trailer;
496 
497 	thread_ro_t tro = current_thread_ro();
498 	task_t curtask = tro->tro_task;
499 	struct proc *curproc = tro->tro_proc;
500 
501 	req_hdr = ikm_header(request);
502 	req_data = ikm_udata_from_header(request);
503 	req_trailer = ipc_kmsg_get_trailer(request);
504 	request_msgh_id = req_hdr->msgh_id;
505 
506 	/* Find corresponding mig_hash entry, if any */
507 	ptr = find_mig_hash_entry(request_msgh_id);
508 
509 	/* Get the reply_size. */
510 	if (ptr == (mig_hash_t *)0) {
511 		reply_size = sizeof(mig_reply_error_t);
512 		reply_desc_cnt = 0;
513 	} else {
514 		reply_size = ptr->kreply_size;
515 		reply_desc_cnt = ptr->kreply_desc_cnt;
516 	}
517 
518 	assert(reply_size >= sizeof(mig_reply_error_t));
519 
520 	/*
521 	 * MIG should really assure no data leakage -
522 	 * but until it does, pessimistically zero the
523 	 * whole reply buffer.
524 	 */
525 	reply = ipc_kmsg_alloc(reply_size, 0, reply_desc_cnt, IPC_KMSG_ALLOC_KERNEL |
526 	    IPC_KMSG_ALLOC_ZERO | IPC_KMSG_ALLOC_NOFAIL);
527 	/* reply can be non-linear */
528 
529 	if (ptr == (mig_hash_t *)0) {
530 #if DEVELOPMENT || DEBUG
531 		printf("ipc_kobject_server: bogus kernel message, id=%d\n",
532 		    req_hdr->msgh_id);
533 #endif  /* DEVELOPMENT || DEBUG */
534 		_MIG_MSGID_INVALID(req_hdr->msgh_id);
535 
536 		ipc_kobject_init_reply(reply, request, MIG_BAD_ID);
537 
538 		*replyp = reply;
539 		return KERN_SUCCESS;
540 	}
541 
542 	/*
543 	 * We found the routine to call. Call it to perform the kernel function.
544 	 */
545 	assert(ptr != (mig_hash_t *)0);
546 
547 	reply_hdr = ikm_header(reply);
548 	/* reply is allocated by kernel. non-zero desc count means complex msg */
549 	reply_data = ikm_udata(reply, reply_desc_cnt, (reply_desc_cnt > 0));
550 
551 	/*
552 	 * Reply can be of layout IKM_TYPE_ALL_INLINED, IKM_TYPE_UDATA_OOL,
553 	 * or IKM_TYPE_ALL_OOL, each of which guarantees kernel/user data segregation.
554 	 *
555 	 * Here is the trick: In each case, there _must_ be enough space in
556 	 * the kdata (header) buffer in `reply` to hold a mig_reply_error_t.
557 	 */
558 	assert(reply->ikm_type != IKM_TYPE_KDATA_OOL);
559 	assert((vm_offset_t)reply_hdr + sizeof(mig_reply_error_t) <= ikm_kdata_end(reply));
560 
561 	/*
562 	 * Discussion by case:
563 	 *
564 	 * (1) IKM_TYPE_ALL_INLINED
565 	 *     - IKM_BIG_MSG_SIZE is large enough for mig_reply_error_t
566 	 * (2) IKM_TYPE_UDATA_OOL
567 	 *     - IKM_SMALL_MSG_SIZE is large enough for mig_reply_error_t
568 	 * (3) IKM_TYPE_ALL_OOL
569 	 *     - This layout is only possible if kdata (header + descs) doesn't fit
570 	 *       in IKM_SMALL_MSG_SIZE. So we must have at least one descriptor
571 	 *       following the header, which is enough to fit mig_reply_error_t.
572 	 */
573 	static_assert(sizeof(mig_reply_error_t) < IKM_BIG_MSG_SIZE);
574 	static_assert(sizeof(mig_reply_error_t) < sizeof(mach_msg_base_t) +
575 	    1 * sizeof(mach_msg_kdescriptor_t));
576 
577 	/*
578 	 * Therefore, we can temporarily treat `reply` as a *simple* message that
579 	 * contains NDR Record + RetCode immediately after the header (which overlaps
580 	 * with descriptors, if the reply msg is supposed to be complex).
581 	 *
582 	 * In doing so we save having a separate allocation specifically for errors.
583 	 */
584 	ipc_kobject_init_reply(reply, request, KERN_SUCCESS);
585 
586 	/* Check if the kobject call should be filtered */
587 #if CONFIG_MACF
588 	int idx = ptr->kobjidx;
589 	uint8_t *filter_mask = task_get_mach_kobj_filter_mask(curtask);
590 
591 	/* Check kobject mig filter mask, if exists. */
592 	if (filter_mask != NULL &&
593 	    (idx == KOBJ_IDX_NOT_SET || !bitstr_test(filter_mask, idx)) &&
594 	    mac_task_kobj_msg_evaluate != NULL) {
595 		/* No index registered by Sandbox, or not in filter mask: evaluate policy. */
596 		kern_return_t kr = mac_task_kobj_msg_evaluate(curproc,
597 		    request_msgh_id, idx);
598 		if (kr != KERN_SUCCESS) {
599 			ipc_kobject_set_reply_error_status(reply, kr);
600 			goto skip_kobjcall;
601 		}
602 	}
603 #endif /* CONFIG_MACF */
604 
605 	__BeforeKobjectServerTrace(idx);
606 	/* See contract in header doc for ipc_kobject_reply_status() */
607 	(*ptr->kroutine)(req_hdr, req_data, req_trailer, reply_hdr, reply_data);
608 	__AfterKobjectServerTrace(idx);
609 
610 #if CONFIG_MACF
611 skip_kobjcall:
612 #endif
613 	counter_inc(&kernel_task->messages_received);
614 
615 	kern_return_t reply_status = ipc_kobject_reply_status(reply);
616 
617 	if (reply_status == MIG_NO_REPLY) {
618 		/*
619 		 *	The server function will send a reply message
620 		 *	using the reply port right, which it has saved.
621 		 */
622 		ipc_kmsg_free(reply);
623 		reply = IKM_NULL;
624 	} else if (reply_status != KERN_SUCCESS && reply_size > sizeof(mig_reply_error_t)) {
625 		assert(ikm_header(reply)->msgh_size == sizeof(mig_reply_error_t));
626 		/*
627 		 * MIG returned an error, and the original kmsg we allocated for reply
628 		 * is oversized. Deallocate it and allocate a smaller, proper kmsg
629 		 * that fits mig_reply_error_t snuggly.
630 		 *
631 		 * We must do so because we used the trick mentioned above which (depending
632 		 * on the kmsg layout) may cause payload in mig_reply_error_t to overlap
633 		 * with kdata buffer meant for descriptors.
634 		 *
635 		 * This will mess with ikm_kdata_size() calculation down the line so
636 		 * reallocate a new buffer immediately here.
637 		 */
638 		ipc_kmsg_t new_reply = ipc_kobject_alloc_mig_error();
639 		ipc_kobject_init_new_reply(new_reply, reply, reply_status);
640 
641 		/* MIG contract: If status is not KERN_SUCCESS, reply must be simple. */
642 		assert(!(ikm_header(reply)->msgh_bits & MACH_MSGH_BITS_COMPLEX));
643 		assert(ikm_header(reply)->msgh_local_port == MACH_PORT_NULL);
644 		assert(ikm_header(reply)->msgh_voucher_port == MACH_PORT_NULL);
645 		/* So we can simply free the original reply message. */
646 		ipc_kmsg_free(reply);
647 		reply = new_reply;
648 	}
649 
650 	*replyp = reply;
651 	return KERN_SUCCESS;
652 }
653 
654 
655 /*
656  *	Routine:	ipc_kobject_server
657  *	Purpose:
658  *		Handle a message sent to the kernel.
659  *		Generates a reply message.
660  *		Version for Untyped IPC.
661  *
662  *		Ownership of the incoming rights (from the request)
663  *		are transferred on success (wether a reply is made or not).
664  *
665  *	Conditions:
666  *		Nothing locked.
667  */
668 ipc_kmsg_t
ipc_kobject_server(ipc_port_t port,ipc_kmsg_t request,mach_msg_option64_t option __unused)669 ipc_kobject_server(
670 	ipc_port_t          port,
671 	ipc_kmsg_t          request,
672 	mach_msg_option64_t option __unused)
673 {
674 	mach_msg_header_t *req_hdr = ikm_header(request);
675 #if DEVELOPMENT || DEBUG
676 	const int request_msgh_id = req_hdr->msgh_id;
677 #endif
678 	ipc_port_t request_voucher_port;
679 	ipc_kmsg_t reply = IKM_NULL;
680 	mach_msg_header_t *reply_hdr;
681 	kern_return_t kr;
682 
683 	ipc_kmsg_trace_send(request, option);
684 
685 	if (ip_kotype(port) == IKOT_UEXT_OBJECT) {
686 		kr = uext_server(port, request, &reply);
687 	} else {
688 		kr = ipc_kobject_server_internal(port, request, &reply);
689 		assert(kr == KERN_SUCCESS);
690 	}
691 
692 	if (kr != KERN_SUCCESS) {
693 		assert(kr != MACH_SEND_TIMED_OUT &&
694 		    kr != MACH_SEND_INTERRUPTED &&
695 		    kr != MACH_SEND_INVALID_DEST);
696 		assert(reply == IKM_NULL);
697 
698 		/* convert the server error into a MIG error */
699 		reply = ipc_kobject_alloc_mig_error();
700 		ipc_kobject_init_reply(reply, request, kr);
701 	}
702 
703 	counter_inc(&kernel_task->messages_sent);
704 	/*
705 	 *	Destroy destination. The following code differs from
706 	 *	ipc_object_destroy in that we release the send-once
707 	 *	right instead of generating a send-once notification
708 	 *	(which would bring us here again, creating a loop).
709 	 *	It also differs in that we only expect send or
710 	 *	send-once rights, never receive rights.
711 	 */
712 	switch (MACH_MSGH_BITS_REMOTE(req_hdr->msgh_bits)) {
713 	case MACH_MSG_TYPE_PORT_SEND:
714 		ipc_port_release_send(req_hdr->msgh_remote_port);
715 		break;
716 
717 	case MACH_MSG_TYPE_PORT_SEND_ONCE:
718 		ipc_port_release_sonce(req_hdr->msgh_remote_port);
719 		break;
720 
721 	default:
722 		panic("ipc_kobject_server: strange destination rights");
723 	}
724 
725 	/*
726 	 *	Destroy voucher.  The kernel MIG servers never take ownership
727 	 *	of vouchers sent in messages.  Swallow any such rights here.
728 	 */
729 	request_voucher_port = ipc_kmsg_get_voucher_port(request);
730 	if (IP_VALID(request_voucher_port)) {
731 		assert(MACH_MSG_TYPE_PORT_SEND ==
732 		    MACH_MSGH_BITS_VOUCHER(req_hdr->msgh_bits));
733 		ipc_port_release_send(request_voucher_port);
734 		ipc_kmsg_clear_voucher_port(request);
735 	}
736 
737 	if (reply == IKM_NULL ||
738 	    ipc_kobject_reply_status(reply) == KERN_SUCCESS) {
739 		/*
740 		 *	The server function is responsible for the contents
741 		 *	of the message.  The reply port right is moved
742 		 *	to the reply message, and we have deallocated
743 		 *	the destination port right, so we just need
744 		 *	to free the kmsg.
745 		 */
746 		ipc_kmsg_free(request);
747 	} else {
748 		/*
749 		 *	The message contents of the request are intact.
750 		 *  Remote port has been released above. Do not destroy
751 		 *  the reply port right either, which is needed in the reply message.
752 		 */
753 		ipc_kmsg_destroy(request, IPC_KMSG_DESTROY_SKIP_LOCAL | IPC_KMSG_DESTROY_SKIP_REMOTE);
754 	}
755 
756 	if (reply != IKM_NULL) {
757 		reply_hdr = ikm_header(reply);
758 		ipc_port_t reply_port = reply_hdr->msgh_remote_port;
759 
760 		if (!IP_VALID(reply_port)) {
761 			/*
762 			 *	Can't queue the reply message if the destination
763 			 *	(the reply port) isn't valid.
764 			 */
765 			ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_NOT_SIGNED);
766 			reply = IKM_NULL;
767 		} else if (ip_in_space_noauth(reply_port, ipc_space_kernel)) {
768 			/* do not lock reply port, use raw pointer comparison */
769 
770 			/*
771 			 *	Don't send replies to kobject kernel ports.
772 			 */
773 #if DEVELOPMENT || DEBUG
774 			printf("%s: refusing to send reply to kobject %d port (id:%d)\n",
775 			    __func__, ip_kotype(reply_port), request_msgh_id);
776 #endif  /* DEVELOPMENT || DEBUG */
777 			ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_NOT_SIGNED);
778 			reply = IKM_NULL;
779 		}
780 	}
781 
782 	return reply;
783 }
784 
785 static __header_always_inline void
ipc_kobject_set_raw(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)786 ipc_kobject_set_raw(
787 	ipc_port_t          port,
788 	ipc_kobject_t       kobject,
789 	ipc_kobject_type_t  type)
790 {
791 	uintptr_t *store = &port->ip_kobject;
792 
793 #if __has_feature(ptrauth_calls)
794 	type |= port->ip_immovable_receive << 14;
795 	type |= port->ip_immovable_send << 15;
796 	type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
797 	kobject = ptrauth_sign_unauthenticated(kobject,
798 	    ptrauth_key_process_independent_data,
799 	    ptrauth_blend_discriminator(store, type));
800 #else
801 	(void)type;
802 #endif // __has_feature(ptrauth_calls)
803 
804 	*store = (uintptr_t)kobject;
805 }
806 
807 static inline void
ipc_kobject_set_internal(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)808 ipc_kobject_set_internal(
809 	ipc_port_t          port,
810 	ipc_kobject_t       kobject,
811 	ipc_kobject_type_t  type)
812 {
813 	assert(type != IKOT_NONE);
814 	io_bits_or(ip_to_object(port), type);
815 	ipc_kobject_set_raw(port, kobject, type);
816 }
817 
818 /*
819  *	Routine:	ipc_kobject_get_raw
820  *	Purpose:
821  *		Returns the kobject pointer of a specified port.
822  *
823  *		This returns the current value of the kobject pointer,
824  *		without any validation (the caller is expected to do
825  *		the validation it needs).
826  *
827  *	Conditions:
828  *		The port is a kobject of the proper type.
829  */
830 __header_always_inline ipc_kobject_t
ipc_kobject_get_raw(ipc_port_t port,ipc_kobject_type_t type)831 ipc_kobject_get_raw(
832 	ipc_port_t                  port,
833 	ipc_kobject_type_t          type)
834 {
835 	uintptr_t *store = &port->ip_kobject;
836 	ipc_kobject_t kobject = (ipc_kobject_t)*store;
837 
838 #if __has_feature(ptrauth_calls)
839 	type |= port->ip_immovable_receive << 14;
840 	type |= port->ip_immovable_send << 15;
841 	type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
842 	kobject = ptrauth_auth_data(kobject,
843 	    ptrauth_key_process_independent_data,
844 	    ptrauth_blend_discriminator(store, type));
845 #else
846 	(void)type;
847 #endif // __has_feature(ptrauth_calls)
848 
849 	return kobject;
850 }
851 
852 __abortlike
853 static void
ipc_kobject_require_panic(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)854 ipc_kobject_require_panic(
855 	ipc_port_t                  port,
856 	ipc_kobject_t               kobject,
857 	ipc_kobject_type_t          kotype)
858 {
859 	if (ip_kotype(port) != kotype) {
860 		panic("port %p: invalid kobject type, got %d wanted %d",
861 		    port, ip_kotype(port), kotype);
862 	}
863 	panic("port %p: invalid kobject, got %p wanted %p",
864 	    port, ipc_kobject_get_raw(port, kotype), kobject);
865 }
866 
867 __header_always_inline void
ipc_kobject_require(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)868 ipc_kobject_require(
869 	ipc_port_t                  port,
870 	ipc_kobject_t               kobject,
871 	ipc_kobject_type_t          kotype)
872 {
873 	ipc_kobject_t cur;
874 
875 	if (__improbable(ip_kotype(port) != kotype)) {
876 		ipc_kobject_require_panic(port, kobject, kotype);
877 	}
878 	cur = ipc_kobject_get_raw(port, kotype);
879 	if (cur && cur != kobject) {
880 		ipc_kobject_require_panic(port, kobject, kotype);
881 	}
882 }
883 
884 /*
885  *	Routine:	ipc_kobject_get_locked
886  *	Purpose:
887  *		Returns the kobject pointer of a specified port,
888  *		for an expected type.
889  *
890  *		Returns IKO_NULL if the port isn't active.
891  *
892  *		This function may be used when:
893  *		- the port lock is held
894  *		- the kobject association stays while there
895  *		  are any outstanding rights.
896  *
897  *	Conditions:
898  *		The port is a kobject of the proper type.
899  */
900 ipc_kobject_t
ipc_kobject_get_locked(ipc_port_t port,ipc_kobject_type_t type)901 ipc_kobject_get_locked(
902 	ipc_port_t                  port,
903 	ipc_kobject_type_t          type)
904 {
905 	ipc_kobject_t kobject = IKO_NULL;
906 
907 	if (ip_active(port) && type == ip_kotype(port)) {
908 		kobject = ipc_kobject_get_raw(port, type);
909 	}
910 
911 	return kobject;
912 }
913 
914 /*
915  *	Routine:	ipc_kobject_get_stable
916  *	Purpose:
917  *		Returns the kobject pointer of a specified port,
918  *		for an expected type, for types where the port/kobject
919  *		association is permanent.
920  *
921  *		Returns IKO_NULL if the port isn't active.
922  *
923  *	Conditions:
924  *		The port is a kobject of the proper type.
925  */
926 ipc_kobject_t
ipc_kobject_get_stable(ipc_port_t port,ipc_kobject_type_t type)927 ipc_kobject_get_stable(
928 	ipc_port_t                  port,
929 	ipc_kobject_type_t          type)
930 {
931 	assert(ipc_kobject_ops_get(type)->iko_op_stable);
932 	return ipc_kobject_get_locked(port, type);
933 }
934 
935 /*
936  *	Routine:	ipc_kobject_init_port
937  *	Purpose:
938  *		Initialize a kobject port with the given types and options.
939  *
940  *		This function never fails.
941  */
942 static inline void
ipc_kobject_init_port(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t options)943 ipc_kobject_init_port(
944 	ipc_port_t port,
945 	ipc_kobject_t kobject,
946 	ipc_kobject_type_t type,
947 	ipc_kobject_alloc_options_t options)
948 {
949 	if (options & IPC_KOBJECT_ALLOC_MAKE_SEND) {
950 		ipc_port_make_send_any_locked(port);
951 	}
952 	if (options & IPC_KOBJECT_ALLOC_NSREQUEST) {
953 		port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
954 		ip_reference(port);
955 	}
956 	if (options & IPC_KOBJECT_ALLOC_NO_GRANT) {
957 		port->ip_no_grant = 1;
958 	}
959 	if (options & IPC_KOBJECT_ALLOC_IMMOVABLE_SEND) {
960 		port->ip_immovable_send = 1;
961 	}
962 	if (options & IPC_KOBJECT_ALLOC_PINNED) {
963 		port->ip_pinned = 1;
964 	}
965 
966 	ipc_kobject_set_internal(port, kobject, type);
967 }
968 
969 /*
970  *	Routine:	ipc_kobject_alloc_port
971  *	Purpose:
972  *		Allocate a kobject port in the kernel space of the specified type.
973  *
974  *		This function never fails.
975  *
976  *	Conditions:
977  *		No locks held (memory is allocated)
978  */
979 ipc_port_t
ipc_kobject_alloc_port(ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t options)980 ipc_kobject_alloc_port(
981 	ipc_kobject_t           kobject,
982 	ipc_kobject_type_t      type,
983 	ipc_kobject_alloc_options_t     options)
984 {
985 	ipc_port_t port;
986 	port = ipc_port_alloc_special(ipc_space_kernel, IPC_PORT_ENFORCE_RIGID_REPLY_PORT_SEMANTICS);
987 
988 	if (port == IP_NULL) {
989 		panic("ipc_kobject_alloc_port(): failed to allocate port");
990 	}
991 
992 	ipc_kobject_init_port(port, kobject, type, options);
993 	return port;
994 }
995 
996 /*
997  *	Routine:	ipc_kobject_alloc_labeled_port
998  *	Purpose:
999  *		Allocate a kobject port and associated mandatory access label
1000  *		in the kernel space of the specified type.
1001  *
1002  *		This function never fails.
1003  *
1004  *	Conditions:
1005  *		No locks held (memory is allocated)
1006  */
1007 
1008 ipc_port_t
ipc_kobject_alloc_labeled_port(ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_label_t label,ipc_kobject_alloc_options_t options)1009 ipc_kobject_alloc_labeled_port(
1010 	ipc_kobject_t           kobject,
1011 	ipc_kobject_type_t      type,
1012 	ipc_label_t             label,
1013 	ipc_kobject_alloc_options_t     options)
1014 {
1015 	ipc_port_t port;
1016 
1017 	port = ipc_kobject_alloc_port(kobject, type, options);
1018 
1019 	ipc_port_set_label(port, label);
1020 
1021 	return port;
1022 }
1023 
1024 static void
ipc_kobject_subst_once_no_senders(ipc_port_t port,mach_port_mscount_t mscount)1025 ipc_kobject_subst_once_no_senders(
1026 	ipc_port_t          port,
1027 	mach_port_mscount_t mscount)
1028 {
1029 	ipc_port_t ko_port;
1030 
1031 	ko_port = ipc_kobject_dealloc_port(port, mscount, IKOT_PORT_SUBST_ONCE);
1032 
1033 	if (ko_port) {
1034 		/*
1035 		 * Clean up the right if the wrapper wasn't hollowed out
1036 		 * by ipc_kobject_alloc_subst_once().
1037 		 */
1038 		ipc_port_release_send(ko_port);
1039 	}
1040 }
1041 
1042 /*
1043  *	Routine:	ipc_kobject_alloc_subst_once
1044  *	Purpose:
1045  *		Make a port that will be substituted by the kolabel
1046  *		rules once, preventing the next substitution (of its target)
1047  *		to happen if any.
1048  *
1049  *	Returns:
1050  *		A port with a send right, that will substitute to its "kobject".
1051  *
1052  *	Conditions:
1053  *		No locks held (memory is allocated).
1054  *
1055  *		`target` holds a send-right donated to this function,
1056  *		consumed in ipc_kobject_subst_once_no_senders().
1057  */
1058 ipc_port_t
ipc_kobject_alloc_subst_once(ipc_port_t target)1059 ipc_kobject_alloc_subst_once(
1060 	ipc_port_t          target)
1061 {
1062 	if (!IP_VALID(target)) {
1063 		return target;
1064 	}
1065 	return ipc_kobject_alloc_labeled_port(target,
1066 	           IKOT_PORT_SUBST_ONCE, IPC_LABEL_SUBST_ONCE,
1067 	           IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
1068 }
1069 
1070 /*
1071  *	Routine:	ipc_kobject_make_send_lazy_alloc_port
1072  *	Purpose:
1073  *		Make a send once for a kobject port.
1074  *
1075  *		A location owning this port is passed in port_store.
1076  *		If no port exists, a port is made lazily.
1077  *
1078  *		A send right is made for the port, and if this is the first one
1079  *		(possibly not for the first time), then the no-more-senders
1080  *		notification is rearmed.
1081  *
1082  *		When a notification is armed, the kobject must donate
1083  *		one of its references to the port. It is expected
1084  *		the no-more-senders notification will consume this reference.
1085  *
1086  *	Returns:
1087  *		TRUE if a notification was armed
1088  *		FALSE else
1089  *
1090  *	Conditions:
1091  *		Nothing is locked, memory can be allocated.
1092  *		The caller must be able to donate a kobject reference to the port.
1093  */
1094 bool
ipc_kobject_make_send_lazy_alloc_port(ipc_port_t * port_store,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t alloc_opts)1095 ipc_kobject_make_send_lazy_alloc_port(
1096 	ipc_port_t              *port_store,
1097 	ipc_kobject_t           kobject,
1098 	ipc_kobject_type_t      type,
1099 	ipc_kobject_alloc_options_t alloc_opts)
1100 {
1101 	ipc_port_t port, previous;
1102 	kern_return_t kr;
1103 
1104 	alloc_opts |= IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST;
1105 	port = os_atomic_load(port_store, dependency);
1106 
1107 	if (!IP_VALID(port)) {
1108 		port = ipc_kobject_alloc_port(kobject, type, alloc_opts);
1109 
1110 		if (os_atomic_cmpxchgv(port_store,
1111 		    IP_NULL, port, &previous, release)) {
1112 			return TRUE;
1113 		}
1114 
1115 		/*
1116 		 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
1117 		 * ipc_kobject_dealloc_port will handle
1118 		 * IPC_KOBJECT_ALLOC_NSREQUEST.
1119 		 */
1120 		port->ip_mscount = 0;
1121 		port->ip_srights = 0;
1122 		ip_release_live(port);
1123 		ipc_kobject_dealloc_port(port, 0, type);
1124 
1125 		port = previous;
1126 	}
1127 
1128 	kr = ipc_kobject_make_send_nsrequest(port, kobject, type);
1129 	assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1130 
1131 	return kr == KERN_SUCCESS;
1132 }
1133 
1134 /*
1135  *	Routine:	ipc_kobject_make_send_lazy_alloc_labeled_port
1136  *	Purpose:
1137  *		Make a send once for a kobject port.
1138  *
1139  *		A location owning this port is passed in port_store.
1140  *		If no port exists, a port is made lazily.
1141  *
1142  *		A send right is made for the port, and if this is the first one
1143  *		(possibly not for the first time), then the no-more-senders
1144  *		notification is rearmed.
1145  *
1146  *		When a notification is armed, the kobject must donate
1147  *		one of its references to the port. It is expected
1148  *		the no-more-senders notification will consume this reference.
1149  *
1150  *	Returns:
1151  *		TRUE if a notification was armed
1152  *		FALSE else
1153  *
1154  *	Conditions:
1155  *		Nothing is locked, memory can be allocated.
1156  *		The caller must be able to donate a kobject reference to the port.
1157  */
1158 boolean_t
ipc_kobject_make_send_lazy_alloc_labeled_port(ipc_port_t * port_store,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_label_t label)1159 ipc_kobject_make_send_lazy_alloc_labeled_port(
1160 	ipc_port_t              *port_store,
1161 	ipc_kobject_t           kobject,
1162 	ipc_kobject_type_t      type,
1163 	ipc_label_t             label)
1164 {
1165 	ipc_port_t port, previous;
1166 	kern_return_t kr;
1167 
1168 	port = os_atomic_load(port_store, dependency);
1169 
1170 	if (!IP_VALID(port)) {
1171 		port = ipc_kobject_alloc_labeled_port(kobject, type, label,
1172 		    IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
1173 		if (os_atomic_cmpxchgv(port_store, IP_NULL, port, &previous, release)) {
1174 			return TRUE;
1175 		}
1176 
1177 		/*
1178 		 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
1179 		 * ipc_kobject_dealloc_port will handle
1180 		 * IPC_KOBJECT_ALLOC_NSREQUEST.
1181 		 */
1182 		port->ip_mscount = 0;
1183 		port->ip_srights = 0;
1184 		ip_release_live(port);
1185 		ipc_kobject_dealloc_port(port, 0, type);
1186 
1187 		port = previous;
1188 		assert(ip_is_kolabeled(port));
1189 	}
1190 
1191 	kr = ipc_kobject_make_send_nsrequest(port, kobject, type);
1192 	assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1193 
1194 	return kr == KERN_SUCCESS;
1195 }
1196 
1197 /*
1198  *	Routine:	ipc_kobject_nsrequest_locked
1199  *	Purpose:
1200  *		Arm the no-senders notification for the given kobject
1201  *		if it doesn't have one armed yet.
1202  *
1203  *	Conditions:
1204  *		Port is locked and active.
1205  *
1206  *	Returns:
1207  *		KERN_SUCCESS:           the notification was armed
1208  *		KERN_ALREADY_WAITING:   the notification was already armed
1209  *		KERN_FAILURE:           the notification would fire immediately
1210  */
1211 static inline kern_return_t
ipc_kobject_nsrequest_locked(ipc_port_t port,mach_port_mscount_t sync)1212 ipc_kobject_nsrequest_locked(
1213 	ipc_port_t                  port,
1214 	mach_port_mscount_t         sync)
1215 {
1216 	if (port->ip_nsrequest == IP_KOBJECT_NSREQUEST_ARMED) {
1217 		return KERN_ALREADY_WAITING;
1218 	}
1219 
1220 	if (port->ip_srights == 0 && sync <= port->ip_mscount) {
1221 		return KERN_FAILURE;
1222 	}
1223 
1224 	port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
1225 	ip_reference(port);
1226 	return KERN_SUCCESS;
1227 }
1228 
1229 
1230 /*
1231  *	Routine:	ipc_kobject_nsrequest
1232  *	Purpose:
1233  *		Arm the no-senders notification for the given kobject
1234  *		if it doesn't have one armed yet.
1235  *
1236  *	Returns:
1237  *		KERN_SUCCESS:           the notification was armed
1238  *		KERN_ALREADY_WAITING:   the notification was already armed
1239  *		KERN_FAILURE:           the notification would fire immediately
1240  *		KERN_INVALID_RIGHT:     the port is dead
1241  */
1242 kern_return_t
ipc_kobject_nsrequest(ipc_port_t port,mach_port_mscount_t sync,mach_port_mscount_t * mscount)1243 ipc_kobject_nsrequest(
1244 	ipc_port_t              port,
1245 	mach_port_mscount_t     sync,
1246 	mach_port_mscount_t    *mscount)
1247 {
1248 	kern_return_t kr = KERN_INVALID_RIGHT;
1249 
1250 	if (IP_VALID(port)) {
1251 		ip_mq_lock(port);
1252 
1253 		if (mscount) {
1254 			*mscount = port->ip_mscount;
1255 		}
1256 		if (ip_active(port)) {
1257 			kr = ipc_kobject_nsrequest_locked(port, sync);
1258 		}
1259 
1260 		ip_mq_unlock(port);
1261 	} else if (mscount) {
1262 		*mscount = 0;
1263 	}
1264 
1265 	return kr;
1266 }
1267 
1268 kern_return_t
ipc_typed_port_copyin_send(ipc_space_t space,mach_port_name_t name,ipc_kobject_type_t kotype,ipc_port_t * portp)1269 ipc_typed_port_copyin_send(
1270 	ipc_space_t             space,
1271 	mach_port_name_t        name,
1272 	ipc_kobject_type_t      kotype,
1273 	ipc_port_t             *portp)
1274 {
1275 	kern_return_t kr;
1276 
1277 	kr = ipc_object_copyin(space, name, MACH_MSG_TYPE_COPY_SEND,
1278 	    IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND, NULL, portp);
1279 	if (kr != KERN_SUCCESS) {
1280 		*portp = IP_NULL;
1281 		return kr;
1282 	}
1283 
1284 	if (kotype != IKOT_UNKNOWN &&
1285 	    IP_VALID(*portp) &&
1286 	    ip_kotype(*portp) != kotype) {
1287 		ipc_port_release_send(*portp);
1288 		*portp = IP_NULL;
1289 		return KERN_INVALID_CAPABILITY;
1290 	}
1291 
1292 	return KERN_SUCCESS;
1293 }
1294 
1295 ipc_port_t
ipc_kobject_copy_send(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1296 ipc_kobject_copy_send(
1297 	ipc_port_t              port,
1298 	ipc_kobject_t           kobject,
1299 	ipc_kobject_type_t      kotype)
1300 {
1301 	ipc_port_t sright = port;
1302 
1303 	if (IP_VALID(port)) {
1304 		ip_mq_lock(port);
1305 		if (ip_active(port)) {
1306 			ipc_kobject_require(port, kobject, kotype);
1307 			ipc_port_copy_send_any_locked(port);
1308 		} else {
1309 			sright = IP_DEAD;
1310 		}
1311 		ip_mq_unlock(port);
1312 	}
1313 
1314 	return sright;
1315 }
1316 
1317 ipc_port_t
ipc_kobject_make_send(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1318 ipc_kobject_make_send(
1319 	ipc_port_t              port,
1320 	ipc_kobject_t           kobject,
1321 	ipc_kobject_type_t      kotype)
1322 {
1323 	ipc_port_t sright = port;
1324 
1325 	if (IP_VALID(port)) {
1326 		ip_mq_lock(port);
1327 		if (ip_active(port)) {
1328 			ipc_kobject_require(port, kobject, kotype);
1329 			ipc_port_make_send_any_locked(port);
1330 		} else {
1331 			sright = IP_DEAD;
1332 		}
1333 		ip_mq_unlock(port);
1334 	}
1335 
1336 	return sright;
1337 }
1338 
1339 void
ipc_typed_port_release_send(ipc_port_t port,ipc_kobject_type_t kotype)1340 ipc_typed_port_release_send(
1341 	ipc_port_t              port,
1342 	ipc_kobject_type_t      kotype)
1343 {
1344 	if (kotype != IKOT_UNKNOWN &&
1345 	    IP_VALID(port) &&
1346 	    ip_kotype(port) != kotype) {
1347 		ipc_kobject_require_panic(port, IKO_NULL, kotype);
1348 	}
1349 	ipc_port_release_send(port);
1350 }
1351 
1352 kern_return_t
ipc_kobject_make_send_nsrequest(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1353 ipc_kobject_make_send_nsrequest(
1354 	ipc_port_t              port,
1355 	ipc_kobject_t           kobject,
1356 	ipc_kobject_type_t      kotype)
1357 {
1358 	kern_return_t kr = KERN_INVALID_RIGHT;
1359 
1360 	if (IP_VALID(port)) {
1361 		ip_mq_lock(port);
1362 		if (ip_active(port)) {
1363 			ipc_kobject_require(port, kobject, kotype);
1364 			ipc_port_make_send_any_locked(port);
1365 			kr = ipc_kobject_nsrequest_locked(port, 0);
1366 			assert(kr != KERN_FAILURE);
1367 		}
1368 		ip_mq_unlock(port);
1369 	}
1370 
1371 	return kr;
1372 }
1373 
1374 kern_return_t
ipc_kobject_make_send_nsrequest_locked(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1375 ipc_kobject_make_send_nsrequest_locked(
1376 	ipc_port_t              port,
1377 	ipc_kobject_t           kobject,
1378 	ipc_kobject_type_t      kotype)
1379 {
1380 	kern_return_t kr = KERN_INVALID_RIGHT;
1381 
1382 	if (ip_active(port)) {
1383 		ipc_kobject_require(port, kobject, kotype);
1384 		ipc_port_make_send_any_locked(port);
1385 		kr = ipc_kobject_nsrequest_locked(port, 0);
1386 		assert(kr != KERN_FAILURE);
1387 	}
1388 
1389 	return kr;
1390 }
1391 
1392 static inline ipc_kobject_t
ipc_kobject_disable_internal(ipc_port_t port,ipc_kobject_type_t type)1393 ipc_kobject_disable_internal(
1394 	ipc_port_t              port,
1395 	ipc_kobject_type_t      type)
1396 {
1397 	ipc_kobject_t kobject = ipc_kobject_get_raw(port, type);
1398 
1399 	ipc_kobject_set_raw(port, IKO_NULL, type);
1400 	if (ip_is_kolabeled(port)) {
1401 		port->ip_kolabel->ikol_alt_port = IP_NULL;
1402 	}
1403 
1404 	return kobject;
1405 }
1406 
1407 /*
1408  *	Routine:	ipc_kobject_dealloc_port_and_unlock
1409  *	Purpose:
1410  *		Destroys a port allocated with any of the ipc_kobject_alloc*
1411  *		functions.
1412  *
1413  *		This will atomically:
1414  *		- make the port inactive,
1415  *		- optionally check the make send count
1416  *		- disable (nil-out) the kobject pointer for kobjects without
1417  *		  a destroy callback.
1418  *
1419  *		The port will retain its kobject-ness and kobject type.
1420  *
1421  *
1422  *	Returns:
1423  *		The kobject pointer that was set prior to this call
1424  *		(possibly NULL if the kobject was already disabled).
1425  *
1426  *	Conditions:
1427  *		The port is active and locked.
1428  *		On return the port is inactive and unlocked.
1429  */
1430 __abortlike
1431 static void
__ipc_kobject_dealloc_bad_type_panic(ipc_port_t port,ipc_kobject_type_t type)1432 __ipc_kobject_dealloc_bad_type_panic(ipc_port_t port, ipc_kobject_type_t type)
1433 {
1434 	panic("port %p of type %d, expecting %d", port, ip_kotype(port), type);
1435 }
1436 
1437 __abortlike
1438 static void
__ipc_kobject_dealloc_bad_mscount_panic(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1439 __ipc_kobject_dealloc_bad_mscount_panic(
1440 	ipc_port_t                  port,
1441 	mach_port_mscount_t         mscount,
1442 	ipc_kobject_type_t          type)
1443 {
1444 	panic("unexpected make-send count: %p[%d], %d, %d",
1445 	    port, type, port->ip_mscount, mscount);
1446 }
1447 
1448 __abortlike
1449 static void
__ipc_kobject_dealloc_bad_srights_panic(ipc_port_t port,ipc_kobject_type_t type)1450 __ipc_kobject_dealloc_bad_srights_panic(
1451 	ipc_port_t                  port,
1452 	ipc_kobject_type_t          type)
1453 {
1454 	panic("unexpected send right count: %p[%d], %d",
1455 	    port, type, port->ip_srights);
1456 }
1457 
1458 ipc_kobject_t
ipc_kobject_dealloc_port_and_unlock(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1459 ipc_kobject_dealloc_port_and_unlock(
1460 	ipc_port_t                  port,
1461 	mach_port_mscount_t         mscount,
1462 	ipc_kobject_type_t          type)
1463 {
1464 	ipc_kobject_t kobject = IKO_NULL;
1465 	ipc_kobject_ops_t ops = ipc_kobject_ops_get(type);
1466 
1467 	require_ip_active(port);
1468 
1469 	if (ip_kotype(port) != type) {
1470 		__ipc_kobject_dealloc_bad_type_panic(port, type);
1471 	}
1472 
1473 	if (mscount && port->ip_mscount != mscount) {
1474 		__ipc_kobject_dealloc_bad_mscount_panic(port, mscount, type);
1475 	}
1476 	if ((mscount || ops->iko_op_stable) && port->ip_srights != 0) {
1477 		__ipc_kobject_dealloc_bad_srights_panic(port, type);
1478 	}
1479 
1480 	if (!ops->iko_op_destroy) {
1481 		kobject = ipc_kobject_disable_internal(port, type);
1482 	}
1483 
1484 	ipc_port_dealloc_special_and_unlock(port, ipc_space_kernel);
1485 
1486 	return kobject;
1487 }
1488 
1489 /*
1490  *	Routine:	ipc_kobject_dealloc_port
1491  *	Purpose:
1492  *		Destroys a port allocated with any of the ipc_kobject_alloc*
1493  *		functions.
1494  *
1495  *		This will atomically:
1496  *		- make the port inactive,
1497  *		- optionally check the make send count
1498  *		- disable (nil-out) the kobject pointer for kobjects without
1499  *		  a destroy callback.
1500  *
1501  *		The port will retain its kobject-ness and kobject type.
1502  *
1503  *
1504  *	Returns:
1505  *		The kobject pointer that was set prior to this call
1506  *		(possibly NULL if the kobject was already disabled).
1507  *
1508  *	Conditions:
1509  *		Nothing is locked.
1510  *		The port is active.
1511  *		On return the port is inactive.
1512  */
1513 ipc_kobject_t
ipc_kobject_dealloc_port(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1514 ipc_kobject_dealloc_port(
1515 	ipc_port_t                  port,
1516 	mach_port_mscount_t         mscount,
1517 	ipc_kobject_type_t          type)
1518 {
1519 	ip_mq_lock(port);
1520 	return ipc_kobject_dealloc_port_and_unlock(port, mscount, type);
1521 }
1522 
1523 /*
1524  *	Routine:	ipc_kobject_enable
1525  *	Purpose:
1526  *		Make a port represent a kernel object of the given type.
1527  *		The caller is responsible for handling refs for the
1528  *		kernel object, if necessary.
1529  *	Conditions:
1530  *		Nothing locked.
1531  *		The port must be active.
1532  */
1533 void
ipc_kobject_enable(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)1534 ipc_kobject_enable(
1535 	ipc_port_t              port,
1536 	ipc_kobject_t           kobject,
1537 	ipc_kobject_type_t      type)
1538 {
1539 	assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1540 
1541 	ip_mq_lock(port);
1542 	require_ip_active(port);
1543 
1544 	if (type != ip_kotype(port)) {
1545 		panic("%s: unexpected kotype of port %p: want %d, got %d",
1546 		    __func__, port, type, ip_kotype(port));
1547 	}
1548 
1549 	ipc_kobject_set_raw(port, kobject, type);
1550 
1551 	ip_mq_unlock(port);
1552 }
1553 
1554 /*
1555  *	Routine:	ipc_kobject_disable_locked
1556  *	Purpose:
1557  *		Clear the kobject pointer for a port.
1558  *	Conditions:
1559  *		The port is locked.
1560  *		Returns the current kobject pointer.
1561  */
1562 ipc_kobject_t
ipc_kobject_disable_locked(ipc_port_t port,ipc_kobject_type_t type)1563 ipc_kobject_disable_locked(
1564 	ipc_port_t              port,
1565 	ipc_kobject_type_t      type)
1566 {
1567 	if (ip_active(port)) {
1568 		assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1569 	}
1570 
1571 	if (ip_kotype(port) != type) {
1572 		panic("port %p of type %d, expecting %d",
1573 		    port, ip_kotype(port), type);
1574 	}
1575 
1576 	return ipc_kobject_disable_internal(port, type);
1577 }
1578 
1579 /*
1580  *	Routine:	ipc_kobject_disable
1581  *	Purpose:
1582  *		Clear the kobject pointer for a port.
1583  *	Conditions:
1584  *		Nothing locked.
1585  *		Returns the current kobject pointer.
1586  */
1587 ipc_kobject_t
ipc_kobject_disable(ipc_port_t port,ipc_kobject_type_t type)1588 ipc_kobject_disable(
1589 	ipc_port_t              port,
1590 	ipc_kobject_type_t      type)
1591 {
1592 	ipc_kobject_t kobject;
1593 
1594 	ip_mq_lock(port);
1595 	kobject = ipc_kobject_disable_locked(port, type);
1596 	ip_mq_unlock(port);
1597 
1598 	return kobject;
1599 }
1600 
1601 /*
1602  *	Routine:	ipc_kobject_upgrade_mktimer_locked
1603  *	Purpose:
1604  *		Upgrades a port to mktimer kobject status
1605  *
1606  *		This pattern is rather bad as it leads to various
1607  *		confusions that need to be special cased with kobject-ness
1608  *		of ports. No new port with dual kobject/message-queue
1609  *		semantics should be made ever.
1610  *
1611  *	Conditions:
1612  *		Port is locked
1613  */
1614 void
ipc_kobject_upgrade_mktimer_locked(ipc_port_t port,ipc_kobject_t kobject)1615 ipc_kobject_upgrade_mktimer_locked(
1616 	ipc_port_t                  port,
1617 	ipc_kobject_t               kobject)
1618 {
1619 	ipc_kobject_set_internal(port, kobject, IKOT_TIMER);
1620 }
1621 
1622 /*
1623  *	Routine:	ipc_kobject_notify_no_senders
1624  *	Purpose:
1625  *		Handles a no-senders notification
1626  *		sent to a kobject.
1627  *
1628  *		A port reference is consumed.
1629  *
1630  *	Conditions:
1631  *		Nothing locked.
1632  */
1633 void
ipc_kobject_notify_no_senders(ipc_port_t port,mach_port_mscount_t mscount)1634 ipc_kobject_notify_no_senders(
1635 	ipc_port_t              port,
1636 	mach_port_mscount_t     mscount)
1637 {
1638 	ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1639 
1640 	assert(ops->iko_op_no_senders);
1641 	ops->iko_op_no_senders(port, mscount);
1642 
1643 	/* consume the ref ipc_notify_no_senders_prepare left */
1644 	ip_release(port);
1645 }
1646 
1647 /*
1648  *	Routine:	ipc_kobject_notify_no_senders
1649  *	Purpose:
1650  *		Handles a send once notifications
1651  *		sent to a kobject.
1652  *
1653  *		A send-once port reference is consumed.
1654  *
1655  *	Conditions:
1656  *		Port is locked.
1657  */
1658 void
ipc_kobject_notify_send_once_and_unlock(ipc_port_t port)1659 ipc_kobject_notify_send_once_and_unlock(
1660 	ipc_port_t              port)
1661 {
1662 	/*
1663 	 * drop the send once right while we hold the port lock.
1664 	 * we will keep a port reference while we run the possible
1665 	 * callouts to kobjects.
1666 	 *
1667 	 * This a simplified version of ipc_port_release_sonce()
1668 	 * since kobjects can't be special reply ports.
1669 	 */
1670 	assert(!port->ip_specialreply);
1671 
1672 	ip_sorights_dec(port);
1673 	ip_mq_unlock(port);
1674 
1675 	/*
1676 	 * because there's very few consumers,
1677 	 * the code here isn't generic as it's really not worth it.
1678 	 */
1679 	switch (ip_kotype(port)) {
1680 	case IKOT_TASK_RESUME:
1681 		task_suspension_send_once(port);
1682 		break;
1683 	default:
1684 		break;
1685 	}
1686 
1687 	ip_release(port);
1688 }
1689 
1690 
1691 /*
1692  *	Routine:	ipc_kobject_destroy
1693  *	Purpose:
1694  *		Release any kernel object resources associated
1695  *		with the port, which is being destroyed.
1696  *
1697  *		This path to free object resources should only be
1698  *		needed when resources are associated with a user's port.
1699  *		In the normal case, when the kernel is the receiver,
1700  *		the code calling ipc_kobject_dealloc_port() should clean
1701  *		up the object resources.
1702  *
1703  *		Cleans up any kobject label that might be present.
1704  *	Conditions:
1705  *		The port is not locked, but it is dead.
1706  */
1707 void
ipc_kobject_destroy(ipc_port_t port)1708 ipc_kobject_destroy(
1709 	ipc_port_t              port)
1710 {
1711 	ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1712 
1713 	if (ops->iko_op_permanent) {
1714 		panic("trying to destroy a permanent port %p with kobject type: %d", port, ip_kotype(port));
1715 	}
1716 	if (ops->iko_op_destroy) {
1717 		ops->iko_op_destroy(port);
1718 	}
1719 
1720 	if (ip_is_kolabeled(port)) {
1721 		ipc_kobject_label_t labelp = port->ip_kolabel;
1722 
1723 		assert(labelp != NULL);
1724 		assert(labelp->ikol_alt_port == IP_NULL);
1725 		assert(ip_is_kobject(port));
1726 		port->ip_kolabel = NULL;
1727 		io_bits_andnot(ip_to_object(port), IO_BITS_KOLABEL);
1728 		zfree(ipc_kobject_label_zone, labelp);
1729 	}
1730 }
1731 
1732 /*
1733  *	Routine:	ipc_kobject_label_substitute_task
1734  *	Purpose:
1735  *		Substitute a task control port for its immovable
1736  *		equivalent when the receiver is that task.
1737  *	Conditions:
1738  *		Space is write locked and active.
1739  *		Port is locked and active.
1740  *	Returns:
1741  *		- IP_NULL port if no substitution is to be done
1742  *		- a valid port if a substitution needs to happen
1743  */
1744 static ipc_port_t
ipc_kobject_label_substitute_task(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1745 ipc_kobject_label_substitute_task(
1746 	ipc_space_t             space,
1747 	ipc_kobject_label_t     kolabel,
1748 	ipc_port_t              port)
1749 {
1750 	ipc_port_t subst = IP_NULL;
1751 	task_t task = ipc_kobject_get_raw(port, IKOT_TASK_CONTROL);
1752 
1753 	if (task != TASK_NULL && task == space->is_task) {
1754 		if ((subst = kolabel->ikol_alt_port)) {
1755 			return subst;
1756 		}
1757 	}
1758 
1759 	return IP_NULL;
1760 }
1761 
1762 /*
1763  *	Routine:	ipc_kobject_label_substitute_task_read
1764  *	Purpose:
1765  *		Substitute a task read port for its immovable
1766  *		control equivalent when the receiver is that task.
1767  *	Conditions:
1768  *		Space is write locked and active.
1769  *		Port is locked and active.
1770  *	Returns:
1771  *		- IP_NULL port if no substitution is to be done
1772  *		- a valid port if a substitution needs to happen
1773  */
1774 static ipc_port_t
ipc_kobject_label_substitute_task_read(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1775 ipc_kobject_label_substitute_task_read(
1776 	ipc_space_t             space,
1777 	ipc_kobject_label_t     kolabel,
1778 	ipc_port_t              port)
1779 {
1780 	ipc_port_t subst = IP_NULL;
1781 	task_t task = ipc_kobject_get_raw(port, IKOT_TASK_READ);
1782 
1783 	if (task != TASK_NULL && task == space->is_task) {
1784 		if ((subst = kolabel->ikol_alt_port)) {
1785 			return subst;
1786 		}
1787 	}
1788 
1789 	return IP_NULL;
1790 }
1791 
1792 /*
1793  *	Routine:	ipc_kobject_label_substitute_thread
1794  *	Purpose:
1795  *		Substitute a thread control port for its immovable
1796  *		equivalent when it belongs to the receiver task.
1797  *	Conditions:
1798  *		Space is write locked and active.
1799  *		Port is locked and active.
1800  *	Returns:
1801  *		- IP_NULL port if no substitution is to be done
1802  *		- a valid port if a substitution needs to happen
1803  */
1804 static ipc_port_t
ipc_kobject_label_substitute_thread(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1805 ipc_kobject_label_substitute_thread(
1806 	ipc_space_t             space,
1807 	ipc_kobject_label_t     kolabel,
1808 	ipc_port_t              port)
1809 {
1810 	ipc_port_t subst = IP_NULL;
1811 	thread_t thread = ipc_kobject_get_raw(port, IKOT_THREAD_CONTROL);
1812 
1813 	if (thread != THREAD_NULL && space->is_task == get_threadtask(thread)) {
1814 		if ((subst = kolabel->ikol_alt_port) != IP_NULL) {
1815 			return subst;
1816 		}
1817 	}
1818 
1819 	return IP_NULL;
1820 }
1821 
1822 /*
1823  *	Routine:	ipc_kobject_label_substitute_thread_read
1824  *	Purpose:
1825  *		Substitute a thread read port for its immovable
1826  *		control equivalent when it belongs to the receiver task.
1827  *	Conditions:
1828  *		Space is write locked and active.
1829  *		Port is locked and active.
1830  *	Returns:
1831  *		- IP_NULL port if no substitution is to be done
1832  *		- a valid port if a substitution needs to happen
1833  */
1834 static ipc_port_t
ipc_kobject_label_substitute_thread_read(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1835 ipc_kobject_label_substitute_thread_read(
1836 	ipc_space_t             space,
1837 	ipc_kobject_label_t     kolabel,
1838 	ipc_port_t              port)
1839 {
1840 	ipc_port_t subst = IP_NULL;
1841 	thread_t thread = ipc_kobject_get_raw(port, IKOT_THREAD_READ);
1842 
1843 	if (thread != THREAD_NULL && space->is_task == get_threadtask(thread)) {
1844 		if ((subst = kolabel->ikol_alt_port) != IP_NULL) {
1845 			return subst;
1846 		}
1847 	}
1848 
1849 	return IP_NULL;
1850 }
1851 
1852 /*
1853  *	Routine:	ipc_kobject_label_check
1854  *	Purpose:
1855  *		Check to see if the space is allowed to possess
1856  *		a right for the given port. In order to qualify,
1857  *		the space label must contain all the privileges
1858  *		listed in the port/kobject label.
1859  *
1860  *	Conditions:
1861  *		Space is write locked and active.
1862  *		Port is locked and active.
1863  *
1864  *	Returns:
1865  *		Whether the copyout is authorized.
1866  *
1867  *		If a port substitution is requested, the space is unlocked,
1868  *		the port is unlocked and its "right" consumed.
1869  *
1870  *		As of now, substituted ports only happen for send rights.
1871  */
1872 bool
ipc_kobject_label_check(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t * flags,ipc_port_t * subst_portp)1873 ipc_kobject_label_check(
1874 	ipc_space_t                     space,
1875 	ipc_port_t                      port,
1876 	mach_msg_type_name_t            msgt_name,
1877 	ipc_object_copyout_flags_t     *flags,
1878 	ipc_port_t                     *subst_portp)
1879 {
1880 	ipc_kobject_label_t kolabel;
1881 	ipc_label_t label;
1882 
1883 	assert(is_active(space));
1884 	assert(ip_active(port));
1885 
1886 	*subst_portp = IP_NULL;
1887 
1888 	/* Unlabled ports/kobjects are always allowed */
1889 	if (!ip_is_kolabeled(port)) {
1890 		return true;
1891 	}
1892 
1893 	/* Never OK to copyout the receive right for a labeled kobject */
1894 	if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1895 		panic("ipc_kobject_label_check: attempted receive right "
1896 		    "copyout for labeled kobject");
1897 	}
1898 
1899 	kolabel = port->ip_kolabel;
1900 	label = kolabel->ikol_label;
1901 
1902 	if ((*flags & IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK) == 0 &&
1903 	    (label & IPC_LABEL_SUBST_MASK)) {
1904 		ipc_port_t subst = IP_NULL;
1905 
1906 		if (msgt_name != MACH_MSG_TYPE_PORT_SEND) {
1907 			return false;
1908 		}
1909 
1910 		if ((label & IPC_LABEL_SUBST_MASK) == IPC_LABEL_SUBST_ONCE) {
1911 			/*
1912 			 * The next check will _not_ substitute.
1913 			 * hollow out our one-time wrapper,
1914 			 * and steal its send right.
1915 			 */
1916 			*flags |= IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK;
1917 			subst = ipc_kobject_disable_locked(port,
1918 			    IKOT_PORT_SUBST_ONCE);
1919 			is_write_unlock(space);
1920 			ipc_port_release_send_and_unlock(port);
1921 			if (subst == IP_NULL) {
1922 				panic("subst-once port %p was consumed twice", port);
1923 			}
1924 			*subst_portp = subst;
1925 			return true;
1926 		}
1927 
1928 		switch (label & IPC_LABEL_SUBST_MASK) {
1929 		case IPC_LABEL_SUBST_TASK:
1930 			subst = ipc_kobject_label_substitute_task(space,
1931 			    kolabel, port);
1932 			break;
1933 		case IPC_LABEL_SUBST_TASK_READ:
1934 			subst = ipc_kobject_label_substitute_task_read(space,
1935 			    kolabel, port);
1936 			break;
1937 		case IPC_LABEL_SUBST_THREAD:
1938 			subst = ipc_kobject_label_substitute_thread(space,
1939 			    kolabel, port);
1940 			break;
1941 		case IPC_LABEL_SUBST_THREAD_READ:
1942 			subst = ipc_kobject_label_substitute_thread_read(space,
1943 			    kolabel, port);
1944 			break;
1945 		default:
1946 			panic("unexpected label: %llx", label);
1947 		}
1948 
1949 		if (subst != IP_NULL) {
1950 			ip_reference(subst);
1951 			is_write_unlock(space);
1952 
1953 			/*
1954 			 * We do not hold a proper send right on `subst`,
1955 			 * only a reference.
1956 			 *
1957 			 * Because of how thread/task termination works,
1958 			 * there is no guarantee copy_send() would work,
1959 			 * so we need to make_send().
1960 			 *
1961 			 * We can do that because ports tagged with
1962 			 * IPC_LABEL_SUBST_{THREAD,TASK} do not use
1963 			 * the no-senders notification.
1964 			 */
1965 
1966 			ipc_port_release_send_and_unlock(port);
1967 			/* no check: dPAC integrity */
1968 			port = ipc_port_make_send_any(subst);
1969 			ip_release(subst);
1970 			*subst_portp = port;
1971 			return true;
1972 		}
1973 	}
1974 
1975 	return (label & space->is_label & IPC_LABEL_SPACE_MASK) ==
1976 	       (label & IPC_LABEL_SPACE_MASK);
1977 }
1978