1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: kern/ipc_kobject.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions for letting a port represent a kernel object.
71 */
72
73 #include <mach/mig.h>
74 #include <mach/port.h>
75 #include <mach/kern_return.h>
76 #include <mach/message.h>
77 #include <mach/mig_errors.h>
78 #include <mach/mach_notify.h>
79 #include <mach/ndr.h>
80 #include <mach/vm_param.h>
81
82 #include <mach/mach_vm_server.h>
83 #include <mach/mach_port_server.h>
84 #include <mach/mach_host_server.h>
85 #include <mach/host_priv_server.h>
86 #include <mach/clock_server.h>
87 #include <mach/memory_entry_server.h>
88 #include <mach/processor_server.h>
89 #include <mach/processor_set_server.h>
90 #include <mach/task_server.h>
91 #include <mach/mach_voucher_server.h>
92 #ifdef VM32_SUPPORT
93 #include <mach/vm32_map_server.h>
94 #endif
95 #include <mach/thread_act_server.h>
96 #include <mach/restartable_server.h>
97
98 #include <mach/exc_server.h>
99 #include <mach/mach_exc_server.h>
100 #include <mach/mach_eventlink_server.h>
101
102 #include <device/device_types.h>
103 #include <device/device_server.h>
104
105 #if CONFIG_USER_NOTIFICATION
106 #include <UserNotification/UNDReplyServer.h>
107 #endif
108
109 #if CONFIG_ARCADE
110 #include <mach/arcade_register_server.h>
111 #endif
112
113 #if CONFIG_AUDIT
114 #include <kern/audit_sessionport.h>
115 #endif
116
117 #include <kern/counter.h>
118 #include <kern/ipc_tt.h>
119 #include <kern/ipc_mig.h>
120 #include <kern/ipc_misc.h>
121 #include <kern/ipc_kobject.h>
122 #include <kern/host_notify.h>
123 #include <kern/misc_protos.h>
124
125 #if CONFIG_ARCADE
126 #include <kern/arcade.h>
127 #endif /* CONFIG_ARCADE */
128
129 #include <ipc/ipc_kmsg.h>
130 #include <ipc/ipc_policy.h>
131 #include <ipc/ipc_port.h>
132 #include <ipc/ipc_voucher.h>
133 #include <kern/sync_sema.h>
134 #include <kern/work_interval.h>
135 #include <kern/task_ident.h>
136
137 #if HYPERVISOR
138 #include <kern/hv_support.h>
139 #endif
140
141 #if CONFIG_CSR
142 #include <sys/csr.h>
143 #endif
144
145 #include <vm/vm_protos.h>
146
147 #include <security/mac_mach_internal.h>
148
149 extern char *proc_name_address(void *p);
150 struct proc;
151 extern int proc_pid(struct proc *p);
152
153 typedef struct {
154 mach_msg_id_t num;
155 int kobjidx;
156 mig_kern_routine_t kroutine; /* Kernel server routine */
157 unsigned int kreply_size; /* Size of kernel reply msg */
158 unsigned int kreply_desc_cnt; /* Number of descs in kernel reply msg */
159 } mig_hash_t;
160
161 static void ipc_kobject_subst_once_no_senders(ipc_port_t, mach_msg_type_number_t);
162
163 IPC_KOBJECT_DEFINE(IKOT_MEMORY_OBJECT); /* vestigial, no real instance */
164 IPC_KOBJECT_DEFINE(IKOT_PORT_SUBST_ONCE,
165 .iko_op_no_senders = ipc_kobject_subst_once_no_senders);
166
167 #define MAX_MIG_ENTRIES 1031
168 #define MIG_HASH(x) (x)
169
170 #define KOBJ_IDX_NOT_SET (-1)
171
172 static SECURITY_READ_ONLY_LATE(mig_hash_t) mig_buckets[MAX_MIG_ENTRIES];
173 static SECURITY_READ_ONLY_LATE(int) mig_table_max_displ;
174 SECURITY_READ_ONLY_LATE(int) mach_kobj_count; /* count of total number of kobjects */
175
176 ZONE_DEFINE_TYPE(ipc_kobject_label_zone, "ipc kobject labels",
177 struct ipc_kobject_label, ZC_ZFREE_CLEARMEM);
178
179 __startup_const
180 static struct mig_kern_subsystem *mig_e[] = {
181 (const struct mig_kern_subsystem *)&mach_vm_subsystem,
182 (const struct mig_kern_subsystem *)&mach_port_subsystem,
183 (const struct mig_kern_subsystem *)&mach_host_subsystem,
184 (const struct mig_kern_subsystem *)&host_priv_subsystem,
185 (const struct mig_kern_subsystem *)&clock_subsystem,
186 (const struct mig_kern_subsystem *)&processor_subsystem,
187 (const struct mig_kern_subsystem *)&processor_set_subsystem,
188 (const struct mig_kern_subsystem *)&is_iokit_subsystem,
189 (const struct mig_kern_subsystem *)&task_subsystem,
190 (const struct mig_kern_subsystem *)&thread_act_subsystem,
191 #ifdef VM32_SUPPORT
192 (const struct mig_kern_subsystem *)&vm32_map_subsystem,
193 #endif
194 #if CONFIG_USER_NOTIFICATION
195 (const struct mig_kern_subsystem *)&UNDReply_subsystem,
196 #endif
197 (const struct mig_kern_subsystem *)&mach_voucher_subsystem,
198 (const struct mig_kern_subsystem *)&memory_entry_subsystem,
199 (const struct mig_kern_subsystem *)&task_restartable_subsystem,
200 (const struct mig_kern_subsystem *)&catch_exc_subsystem,
201 (const struct mig_kern_subsystem *)&catch_mach_exc_subsystem,
202 #if CONFIG_ARCADE
203 (const struct mig_kern_subsystem *)&arcade_register_subsystem,
204 #endif
205 (const struct mig_kern_subsystem *)&mach_eventlink_subsystem,
206 };
207
208 static struct ipc_kobject_ops __security_const_late
209 ipc_kobject_ops_array[IKOT_MAX_TYPE];
210
211 __startup_func
212 void
ipc_kobject_register_startup(ipc_kobject_ops_t ops)213 ipc_kobject_register_startup(ipc_kobject_ops_t ops)
214 {
215 if (ipc_kobject_ops_array[ops->iko_op_type].iko_op_type) {
216 panic("trying to register kobject(%d) twice", ops->iko_op_type);
217 }
218 ipc_kobject_ops_array[ops->iko_op_type] = *ops;
219 }
220
221 static ipc_kobject_ops_t
ipc_kobject_ops_get(ipc_kobject_type_t ikot)222 ipc_kobject_ops_get(ipc_kobject_type_t ikot)
223 {
224 if (ikot < IKOT_NONE || ikot >= IKOT_MAX_TYPE) {
225 panic("invalid kobject type %d", ikot);
226 }
227 return &ipc_kobject_ops_array[ikot];
228 }
229
230 __startup_func
231 static void
mig_init(void)232 mig_init(void)
233 {
234 unsigned int i, n = sizeof(mig_e) / sizeof(const struct mig_kern_subsystem *);
235 int howmany;
236 mach_msg_id_t j, pos, nentry, range;
237
238 for (i = 0; i < n; i++) {
239 range = mig_e[i]->end - mig_e[i]->start;
240 if (!mig_e[i]->start || range < 0) {
241 panic("the msgh_ids in mig_e[] aren't valid!");
242 }
243
244 if (mig_e[i]->maxsize > KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE) {
245 panic("mig subsystem %d (%p) replies are too large (%d > %d)",
246 mig_e[i]->start, mig_e[i], mig_e[i]->maxsize,
247 KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE);
248 }
249
250 for (j = 0; j < range; j++) {
251 if (mig_e[i]->kroutine[j].kstub_routine) {
252 /* Only put real entries in the table */
253 nentry = j + mig_e[i]->start;
254 for (pos = MIG_HASH(nentry) % MAX_MIG_ENTRIES, howmany = 1;
255 mig_buckets[pos].num;
256 pos++, pos = pos % MAX_MIG_ENTRIES, howmany++) {
257 if (mig_buckets[pos].num == nentry) {
258 printf("message id = %d\n", nentry);
259 panic("multiple entries with the same msgh_id");
260 }
261 if (howmany == MAX_MIG_ENTRIES) {
262 panic("the mig dispatch table is too small");
263 }
264 }
265
266 mig_buckets[pos].num = nentry;
267 mig_buckets[pos].kroutine = mig_e[i]->kroutine[j].kstub_routine;
268 if (mig_e[i]->kroutine[j].max_reply_msg) {
269 mig_buckets[pos].kreply_size = mig_e[i]->kroutine[j].max_reply_msg;
270 mig_buckets[pos].kreply_desc_cnt = mig_e[i]->kroutine[j].reply_descr_count;
271 assert3u(mig_e[i]->kroutine[j].descr_count,
272 <=, IPC_KOBJECT_DESC_MAX);
273 assert3u(mig_e[i]->kroutine[j].reply_descr_count,
274 <=, IPC_KOBJECT_RDESC_MAX);
275 } else {
276 /*
277 * Allocating a larger-than-needed kmsg creates hole for
278 * inlined kmsgs (IKM_TYPE_ALL_INLINED) during copyout.
279 * Disallow that.
280 */
281 panic("kroutine must have precise size %d %d", mig_e[i]->start, j);
282 }
283
284 mig_buckets[pos].kobjidx = KOBJ_IDX_NOT_SET;
285
286 if (mig_table_max_displ < howmany) {
287 mig_table_max_displ = howmany;
288 }
289 mach_kobj_count++;
290 }
291 }
292 }
293
294 /* 77417305: pad to allow for MIG routines removals/cleanups */
295 mach_kobj_count += 32;
296
297 printf("mig_table_max_displ = %d mach_kobj_count = %d\n",
298 mig_table_max_displ, mach_kobj_count);
299 }
300 STARTUP(MACH_IPC, STARTUP_RANK_FIRST, mig_init);
301
302 /*
303 * Do a hash table lookup for given msgh_id. Return 0
304 * if not found.
305 */
306 static mig_hash_t *
find_mig_hash_entry(int msgh_id)307 find_mig_hash_entry(int msgh_id)
308 {
309 unsigned int i = (unsigned int)MIG_HASH(msgh_id);
310 int max_iter = mig_table_max_displ;
311 mig_hash_t *ptr;
312
313 do {
314 ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES];
315 } while (msgh_id != ptr->num && ptr->num && --max_iter);
316
317 if (!ptr->kroutine || msgh_id != ptr->num) {
318 ptr = (mig_hash_t *)0;
319 }
320
321 return ptr;
322 }
323
324 /*
325 * Routine: ipc_kobject_reply_status
326 *
327 * Returns the error/success status from a given kobject call reply message.
328 *
329 * Contract for KernelServer MIG routines is as follows:
330 *
331 * (1) If reply header has complex bit set, kernel server implementation routine
332 * must have implicitly returned KERN_SUCCESS.
333 *
334 * (2) Otherwise we can always read RetCode from after the header. This is not
335 * obvious to see, and is discussed below by case.
336 *
337 * MIG can return three types of replies from KernelServer routines.
338 *
339 * (A) Complex Reply (i.e. with Descriptors)
340 *
341 * E.g.: thread_get_exception_ports()
342 *
343 * If complex bit is set, we can deduce the call is successful since the bit
344 * is set at the very end.
345 * If complex bit is not set, we must have returned from MIG_RETURN_ERROR.
346 * MIG writes RetCode to immediately after the header, and we know this is
347 * safe to do for all kmsg layouts. (See discussion in ipc_kmsg_server_internal()).
348 *
349 * (B) Simple Reply with Out Params
350 *
351 * E.g.: thread_get_states()
352 *
353 * If the call failed, we return from MIG_RETURN_ERROR, which writes RetCode
354 * to immediately after the header.
355 * If the call succeeded, MIG writes RetCode as KERN_SUCCESS to USER DATA
356 * buffer. *BUT* since the region after header is always initialized with
357 * KERN_SUCCESS, reading from there gives us the same result. We rely on
358 * this behavior to not make a special case.
359 *
360 * (C) Simple Reply without Out Params
361 *
362 * E.g.: thread_set_states()
363 *
364 * For this type of MIG routines we always allocate a mig_reply_error_t
365 * as reply kmsg, which fits inline in kmsg. RetCode can be found after
366 * header, and can be KERN_SUCCESS or otherwise a failure code.
367 */
368 static kern_return_t
ipc_kobject_reply_status(ipc_kmsg_t reply)369 ipc_kobject_reply_status(ipc_kmsg_t reply)
370 {
371 mach_msg_header_t *hdr = ikm_header(reply);
372
373 if (hdr->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
374 return KERN_SUCCESS;
375 }
376
377 return ((mig_reply_error_t *)hdr)->RetCode;
378 }
379
380 static void
ipc_kobject_set_reply_error_status(ipc_kmsg_t reply,kern_return_t kr)381 ipc_kobject_set_reply_error_status(
382 ipc_kmsg_t reply,
383 kern_return_t kr)
384 {
385 mig_reply_error_t *error = (mig_reply_error_t *)ikm_header(reply);
386
387 assert(!(error->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX));
388 error->RetCode = kr;
389 }
390
391 /*
392 * Routine: ipc_kobject_set_kobjidx
393 * Purpose:
394 * Set the index for the kobject filter
395 * mask for a given message ID.
396 */
397 kern_return_t
ipc_kobject_set_kobjidx(int msgh_id,int index)398 ipc_kobject_set_kobjidx(
399 int msgh_id,
400 int index)
401 {
402 mig_hash_t *ptr = find_mig_hash_entry(msgh_id);
403
404 if (ptr == (mig_hash_t *)0) {
405 return KERN_INVALID_ARGUMENT;
406 }
407
408 assert(index < mach_kobj_count);
409 ptr->kobjidx = index;
410
411 return KERN_SUCCESS;
412 }
413
414 static void
ipc_kobject_init_reply(ipc_kmsg_t reply,const ipc_kmsg_t request,kern_return_t kr)415 ipc_kobject_init_reply(
416 ipc_kmsg_t reply,
417 const ipc_kmsg_t request,
418 kern_return_t kr)
419 {
420 mach_msg_header_t *req_hdr = ikm_header(request);
421 mach_msg_header_t *reply_hdr = ikm_header(reply);
422
423 #define InP ((mach_msg_header_t *) req_hdr)
424 #define OutP ((mig_reply_error_t *) reply_hdr)
425
426 OutP->Head.msgh_size = sizeof(mig_reply_error_t);
427 OutP->Head.msgh_bits =
428 MACH_MSGH_BITS_SET(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0, 0, 0);
429 OutP->Head.msgh_remote_port = InP->msgh_local_port;
430 OutP->Head.msgh_local_port = MACH_PORT_NULL;
431 OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
432 OutP->Head.msgh_id = InP->msgh_id + 100;
433
434 OutP->NDR = NDR_record;
435 OutP->RetCode = kr;
436
437 #undef InP
438 #undef OutP
439 }
440
441 static void
ipc_kobject_init_new_reply(ipc_kmsg_t new_reply,const ipc_kmsg_t old_reply,kern_return_t kr)442 ipc_kobject_init_new_reply(
443 ipc_kmsg_t new_reply,
444 const ipc_kmsg_t old_reply,
445 kern_return_t kr)
446 {
447 mach_msg_header_t *new_hdr = ikm_header(new_reply);
448 mach_msg_header_t *old_hdr = ikm_header(old_reply);
449
450 #define InP ((mig_reply_error_t *) old_hdr)
451 #define OutP ((mig_reply_error_t *) new_hdr)
452
453 OutP->Head.msgh_size = sizeof(mig_reply_error_t);
454 OutP->Head.msgh_bits = InP->Head.msgh_bits & ~MACH_MSGH_BITS_COMPLEX;
455 OutP->Head.msgh_remote_port = InP->Head.msgh_remote_port;
456 OutP->Head.msgh_local_port = MACH_PORT_NULL;
457 OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
458 OutP->Head.msgh_id = InP->Head.msgh_id;
459
460 OutP->NDR = InP->NDR;
461 OutP->RetCode = kr;
462
463 #undef InP
464 #undef OutP
465 }
466
467 static ipc_kmsg_t
ipc_kobject_alloc_mig_error(void)468 ipc_kobject_alloc_mig_error(void)
469 {
470 ipc_kmsg_alloc_flags_t flags = IPC_KMSG_ALLOC_KERNEL |
471 IPC_KMSG_ALLOC_ZERO |
472 IPC_KMSG_ALLOC_ALL_INLINE |
473 IPC_KMSG_ALLOC_NOFAIL;
474
475 return ipc_kmsg_alloc(sizeof(mig_reply_error_t), 0, 0, flags);
476 }
477
478 /*
479 * Routine: ipc_kobject_server_internal
480 * Purpose:
481 * Handle a message sent to the kernel.
482 * Generates a reply message.
483 * Version for Untyped IPC.
484 * Conditions:
485 * Nothing locked.
486 */
487 static kern_return_t
ipc_kobject_server_internal(__unused ipc_port_t port,ipc_kmsg_t request,ipc_kmsg_t * replyp)488 ipc_kobject_server_internal(
489 __unused ipc_port_t port,
490 ipc_kmsg_t request,
491 ipc_kmsg_t *replyp)
492 {
493 int request_msgh_id;
494 ipc_kmsg_t reply = IKM_NULL;
495 mach_msg_size_t reply_size, reply_desc_cnt;
496 mig_hash_t *ptr;
497 mach_msg_header_t *req_hdr, *reply_hdr;
498 void *req_data, *reply_data;
499 mach_msg_max_trailer_t *req_trailer;
500
501 thread_ro_t tro = current_thread_ro();
502 task_t curtask = tro->tro_task;
503 struct proc *curproc = tro->tro_proc;
504
505 req_hdr = ikm_header(request);
506 req_data = ikm_udata_from_header(request);
507 req_trailer = ipc_kmsg_get_trailer(request);
508 request_msgh_id = req_hdr->msgh_id;
509
510 /* Find corresponding mig_hash entry, if any */
511 ptr = find_mig_hash_entry(request_msgh_id);
512
513 /* Get the reply_size. */
514 if (ptr == (mig_hash_t *)0) {
515 reply_size = sizeof(mig_reply_error_t);
516 reply_desc_cnt = 0;
517 } else {
518 reply_size = ptr->kreply_size;
519 reply_desc_cnt = ptr->kreply_desc_cnt;
520 }
521
522 assert(reply_size >= sizeof(mig_reply_error_t));
523
524 /*
525 * MIG should really assure no data leakage -
526 * but until it does, pessimistically zero the
527 * whole reply buffer.
528 */
529 reply = ipc_kmsg_alloc(reply_size, 0, reply_desc_cnt, IPC_KMSG_ALLOC_KERNEL |
530 IPC_KMSG_ALLOC_ZERO | IPC_KMSG_ALLOC_NOFAIL);
531 /* reply can be non-linear */
532
533 if (ptr == (mig_hash_t *)0) {
534 #if DEVELOPMENT || DEBUG
535 printf("ipc_kobject_server: bogus kernel message, id=%d\n",
536 req_hdr->msgh_id);
537 #endif /* DEVELOPMENT || DEBUG */
538 _MIG_MSGID_INVALID(req_hdr->msgh_id);
539
540 ipc_kobject_init_reply(reply, request, MIG_BAD_ID);
541
542 *replyp = reply;
543 return KERN_SUCCESS;
544 }
545
546 /*
547 * We found the routine to call. Call it to perform the kernel function.
548 */
549 assert(ptr != (mig_hash_t *)0);
550
551 reply_hdr = ikm_header(reply);
552 /* reply is allocated by kernel. non-zero desc count means complex msg */
553 reply_data = ikm_udata(reply, reply_desc_cnt, (reply_desc_cnt > 0));
554
555 /*
556 * Reply can be of layout IKM_TYPE_ALL_INLINED, IKM_TYPE_UDATA_OOL,
557 * or IKM_TYPE_ALL_OOL, each of which guarantees kernel/user data segregation.
558 *
559 * Here is the trick: In each case, there _must_ be enough space in
560 * the kdata (header) buffer in `reply` to hold a mig_reply_error_t.
561 */
562 assert(reply->ikm_type != IKM_TYPE_KDATA_OOL);
563 assert((vm_offset_t)reply_hdr + sizeof(mig_reply_error_t) <= ikm_kdata_end(reply));
564
565 /*
566 * Discussion by case:
567 *
568 * (1) IKM_TYPE_ALL_INLINED
569 * - IKM_BIG_MSG_SIZE is large enough for mig_reply_error_t
570 * (2) IKM_TYPE_UDATA_OOL
571 * - IKM_SMALL_MSG_SIZE is large enough for mig_reply_error_t
572 * (3) IKM_TYPE_ALL_OOL
573 * - This layout is only possible if kdata (header + descs) doesn't fit
574 * in IKM_SMALL_MSG_SIZE. So we must have at least one descriptor
575 * following the header, which is enough to fit mig_reply_error_t.
576 */
577 static_assert(sizeof(mig_reply_error_t) < IKM_BIG_MSG_SIZE);
578 static_assert(sizeof(mig_reply_error_t) < sizeof(mach_msg_base_t) +
579 1 * sizeof(mach_msg_kdescriptor_t));
580
581 /*
582 * Therefore, we can temporarily treat `reply` as a *simple* message that
583 * contains NDR Record + RetCode immediately after the header (which overlaps
584 * with descriptors, if the reply msg is supposed to be complex).
585 *
586 * In doing so we save having a separate allocation specifically for errors.
587 */
588 ipc_kobject_init_reply(reply, request, KERN_SUCCESS);
589
590 /* Check if the kobject call should be filtered */
591 #if CONFIG_MACF
592 int idx = ptr->kobjidx;
593 uint8_t *filter_mask = task_get_mach_kobj_filter_mask(curtask);
594
595 /* Check kobject mig filter mask, if exists. */
596 if (filter_mask != NULL &&
597 idx != KOBJ_IDX_NOT_SET &&
598 !bitstr_test(filter_mask, idx) &&
599 mac_task_kobj_msg_evaluate != NULL) {
600 /* Not in filter mask, evaluate policy. */
601 kern_return_t kr = mac_task_kobj_msg_evaluate(curproc,
602 request_msgh_id, idx);
603 if (kr != KERN_SUCCESS) {
604 ipc_kobject_set_reply_error_status(reply, kr);
605 goto skip_kobjcall;
606 }
607 }
608 #endif /* CONFIG_MACF */
609
610 __BeforeKobjectServerTrace(idx);
611 /* See contract in header doc for ipc_kobject_reply_status() */
612 (*ptr->kroutine)(req_hdr, req_data, req_trailer, reply_hdr, reply_data);
613 __AfterKobjectServerTrace(idx);
614
615 #if CONFIG_MACF
616 skip_kobjcall:
617 #endif
618 counter_inc(&kernel_task->messages_received);
619
620 kern_return_t reply_status = ipc_kobject_reply_status(reply);
621
622 if (reply_status == MIG_NO_REPLY) {
623 /*
624 * The server function will send a reply message
625 * using the reply port right, which it has saved.
626 */
627 ipc_kmsg_free(reply);
628 reply = IKM_NULL;
629 } else if (reply_status != KERN_SUCCESS && reply_size > sizeof(mig_reply_error_t)) {
630 assert(ikm_header(reply)->msgh_size == sizeof(mig_reply_error_t));
631 /*
632 * MIG returned an error, and the original kmsg we allocated for reply
633 * is oversized. Deallocate it and allocate a smaller, proper kmsg
634 * that fits mig_reply_error_t snuggly.
635 *
636 * We must do so because we used the trick mentioned above which (depending
637 * on the kmsg layout) may cause payload in mig_reply_error_t to overlap
638 * with kdata buffer meant for descriptors.
639 *
640 * This will mess with ikm_kdata_size() calculation down the line so
641 * reallocate a new buffer immediately here.
642 */
643 ipc_kmsg_t new_reply = ipc_kobject_alloc_mig_error();
644 ipc_kobject_init_new_reply(new_reply, reply, reply_status);
645
646 /* MIG contract: If status is not KERN_SUCCESS, reply must be simple. */
647 assert(!(ikm_header(reply)->msgh_bits & MACH_MSGH_BITS_COMPLEX));
648 assert(ikm_header(reply)->msgh_local_port == MACH_PORT_NULL);
649 assert(ikm_header(reply)->msgh_voucher_port == MACH_PORT_NULL);
650 /* So we can simply free the original reply message. */
651 ipc_kmsg_free(reply);
652 reply = new_reply;
653 }
654
655 *replyp = reply;
656 return KERN_SUCCESS;
657 }
658
659
660 /*
661 * Routine: ipc_kobject_server
662 * Purpose:
663 * Handle a message sent to the kernel.
664 * Generates a reply message.
665 * Version for Untyped IPC.
666 *
667 * Ownership of the incoming rights (from the request)
668 * are transferred on success (wether a reply is made or not).
669 *
670 * Conditions:
671 * Nothing locked.
672 */
673 ipc_kmsg_t
ipc_kobject_server(ipc_port_t port,ipc_kmsg_t request,mach_msg_option64_t option __unused)674 ipc_kobject_server(
675 ipc_port_t port,
676 ipc_kmsg_t request,
677 mach_msg_option64_t option __unused)
678 {
679 mach_msg_header_t *req_hdr = ikm_header(request);
680 #if DEVELOPMENT || DEBUG
681 const int request_msgh_id = req_hdr->msgh_id;
682 #endif
683 ipc_port_t request_voucher_port;
684 ipc_kmsg_t reply = IKM_NULL;
685 mach_msg_header_t *reply_hdr;
686 kern_return_t kr;
687
688 ipc_kmsg_trace_send(request, option);
689
690 if (ip_kotype(port) == IKOT_UEXT_OBJECT) {
691 kr = uext_server(port, request, &reply);
692 } else {
693 kr = ipc_kobject_server_internal(port, request, &reply);
694 assert(kr == KERN_SUCCESS);
695 }
696
697 if (kr != KERN_SUCCESS) {
698 assert(kr != MACH_SEND_TIMED_OUT &&
699 kr != MACH_SEND_INTERRUPTED &&
700 kr != MACH_SEND_INVALID_DEST);
701 assert(reply == IKM_NULL);
702
703 /* convert the server error into a MIG error */
704 reply = ipc_kobject_alloc_mig_error();
705 ipc_kobject_init_reply(reply, request, kr);
706 }
707
708 counter_inc(&kernel_task->messages_sent);
709 /*
710 * Destroy destination. The following code differs from
711 * ipc_object_destroy in that we release the send-once
712 * right instead of generating a send-once notification
713 * (which would bring us here again, creating a loop).
714 * It also differs in that we only expect send or
715 * send-once rights, never receive rights.
716 */
717 switch (MACH_MSGH_BITS_REMOTE(req_hdr->msgh_bits)) {
718 case MACH_MSG_TYPE_PORT_SEND:
719 ipc_port_release_send(req_hdr->msgh_remote_port);
720 break;
721
722 case MACH_MSG_TYPE_PORT_SEND_ONCE:
723 ipc_port_release_sonce(req_hdr->msgh_remote_port);
724 break;
725
726 default:
727 panic("ipc_kobject_server: strange destination rights");
728 }
729
730 /*
731 * Destroy voucher. The kernel MIG servers never take ownership
732 * of vouchers sent in messages. Swallow any such rights here.
733 */
734 request_voucher_port = ipc_kmsg_get_voucher_port(request);
735 if (IP_VALID(request_voucher_port)) {
736 assert(MACH_MSG_TYPE_PORT_SEND ==
737 MACH_MSGH_BITS_VOUCHER(req_hdr->msgh_bits));
738 ipc_port_release_send(request_voucher_port);
739 ipc_kmsg_clear_voucher_port(request);
740 }
741
742 if (reply == IKM_NULL ||
743 ipc_kobject_reply_status(reply) == KERN_SUCCESS) {
744 /*
745 * The server function is responsible for the contents
746 * of the message. The reply port right is moved
747 * to the reply message, and we have deallocated
748 * the destination port right, so we just need
749 * to free the kmsg.
750 */
751 ipc_kmsg_free(request);
752 } else {
753 /*
754 * The message contents of the request are intact.
755 * Remote port has been released above. Do not destroy
756 * the reply port right either, which is needed in the reply message.
757 */
758 ipc_kmsg_destroy(request, IPC_KMSG_DESTROY_SKIP_LOCAL | IPC_KMSG_DESTROY_SKIP_REMOTE);
759 }
760
761 if (reply != IKM_NULL) {
762 reply_hdr = ikm_header(reply);
763 ipc_port_t reply_port = reply_hdr->msgh_remote_port;
764
765 if (!IP_VALID(reply_port)) {
766 /*
767 * Can't queue the reply message if the destination
768 * (the reply port) isn't valid.
769 */
770 ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_NOT_SIGNED);
771 reply = IKM_NULL;
772 } else if (ip_in_space_noauth(reply_port, ipc_space_kernel)) {
773 /* do not lock reply port, use raw pointer comparison */
774
775 /*
776 * Don't send replies to kobject kernel ports.
777 */
778 #if DEVELOPMENT || DEBUG
779 printf("%s: refusing to send reply to kobject %d port (id:%d)\n",
780 __func__, ip_kotype(reply_port), request_msgh_id);
781 #endif /* DEVELOPMENT || DEBUG */
782 ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_NOT_SIGNED);
783 reply = IKM_NULL;
784 }
785 }
786
787 return reply;
788 }
789
790 static __header_always_inline void
ipc_kobject_set_raw(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)791 ipc_kobject_set_raw(
792 ipc_port_t port,
793 ipc_kobject_t kobject,
794 ipc_kobject_type_t type)
795 {
796 uintptr_t *store = &port->ip_kobject;
797
798 #if __has_feature(ptrauth_calls)
799 type |= port->ip_immovable_receive << 14;
800 type |= port->ip_immovable_send << 15;
801 type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
802 kobject = ptrauth_sign_unauthenticated(kobject,
803 ptrauth_key_process_independent_data,
804 ptrauth_blend_discriminator(store, type));
805 #else
806 (void)type;
807 #endif // __has_feature(ptrauth_calls)
808
809 *store = (uintptr_t)kobject;
810 }
811
812 static inline void
ipc_kobject_set_internal(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)813 ipc_kobject_set_internal(
814 ipc_port_t port,
815 ipc_kobject_t kobject,
816 ipc_kobject_type_t type)
817 {
818 assert(type != IKOT_NONE);
819 io_bits_or(ip_to_object(port), type);
820 ipc_kobject_set_raw(port, kobject, type);
821 }
822
823 /*
824 * Routine: ipc_kobject_get_raw
825 * Purpose:
826 * Returns the kobject pointer of a specified port.
827 *
828 * This returns the current value of the kobject pointer,
829 * without any validation (the caller is expected to do
830 * the validation it needs).
831 *
832 * Conditions:
833 * The port is a kobject of the proper type.
834 */
835 __header_always_inline ipc_kobject_t
ipc_kobject_get_raw(ipc_port_t port,ipc_kobject_type_t type)836 ipc_kobject_get_raw(
837 ipc_port_t port,
838 ipc_kobject_type_t type)
839 {
840 uintptr_t *store = &port->ip_kobject;
841 ipc_kobject_t kobject = (ipc_kobject_t)*store;
842
843 #if __has_feature(ptrauth_calls)
844 type |= port->ip_immovable_receive << 14;
845 type |= port->ip_immovable_send << 15;
846 type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
847 kobject = ptrauth_auth_data(kobject,
848 ptrauth_key_process_independent_data,
849 ptrauth_blend_discriminator(store, type));
850 #else
851 (void)type;
852 #endif // __has_feature(ptrauth_calls)
853
854 return kobject;
855 }
856
857 __abortlike
858 static void
ipc_kobject_require_panic(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)859 ipc_kobject_require_panic(
860 ipc_port_t port,
861 ipc_kobject_t kobject,
862 ipc_kobject_type_t kotype)
863 {
864 if (ip_kotype(port) != kotype) {
865 panic("port %p: invalid kobject type, got %d wanted %d",
866 port, ip_kotype(port), kotype);
867 }
868 panic("port %p: invalid kobject, got %p wanted %p",
869 port, ipc_kobject_get_raw(port, kotype), kobject);
870 }
871
872 __header_always_inline void
ipc_kobject_require(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)873 ipc_kobject_require(
874 ipc_port_t port,
875 ipc_kobject_t kobject,
876 ipc_kobject_type_t kotype)
877 {
878 ipc_kobject_t cur;
879
880 if (__improbable(ip_kotype(port) != kotype)) {
881 ipc_kobject_require_panic(port, kobject, kotype);
882 }
883 cur = ipc_kobject_get_raw(port, kotype);
884 if (cur && cur != kobject) {
885 ipc_kobject_require_panic(port, kobject, kotype);
886 }
887 }
888
889 /*
890 * Routine: ipc_kobject_get_locked
891 * Purpose:
892 * Returns the kobject pointer of a specified port,
893 * for an expected type.
894 *
895 * Returns IKO_NULL if the port isn't active.
896 *
897 * This function may be used when:
898 * - the port lock is held
899 * - the kobject association stays while there
900 * are any outstanding rights.
901 *
902 * Conditions:
903 * The port is a kobject of the proper type.
904 */
905 ipc_kobject_t
ipc_kobject_get_locked(ipc_port_t port,ipc_kobject_type_t type)906 ipc_kobject_get_locked(
907 ipc_port_t port,
908 ipc_kobject_type_t type)
909 {
910 ipc_kobject_t kobject = IKO_NULL;
911
912 if (ip_active(port) && type == ip_kotype(port)) {
913 kobject = ipc_kobject_get_raw(port, type);
914 }
915
916 return kobject;
917 }
918
919 /*
920 * Routine: ipc_kobject_get_stable
921 * Purpose:
922 * Returns the kobject pointer of a specified port,
923 * for an expected type, for types where the port/kobject
924 * association is permanent.
925 *
926 * Returns IKO_NULL if the port isn't active.
927 *
928 * Conditions:
929 * The port is a kobject of the proper type.
930 */
931 ipc_kobject_t
ipc_kobject_get_stable(ipc_port_t port,ipc_kobject_type_t type)932 ipc_kobject_get_stable(
933 ipc_port_t port,
934 ipc_kobject_type_t type)
935 {
936 assert(ipc_kobject_ops_get(type)->iko_op_stable);
937 return ipc_kobject_get_locked(port, type);
938 }
939
940 /*
941 * Routine: ipc_kobject_init_port
942 * Purpose:
943 * Initialize a kobject port with the given types and options.
944 *
945 * This function never fails.
946 */
947 static inline void
ipc_kobject_init_port(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t options)948 ipc_kobject_init_port(
949 ipc_port_t port,
950 ipc_kobject_t kobject,
951 ipc_kobject_type_t type,
952 ipc_kobject_alloc_options_t options)
953 {
954 if (options & IPC_KOBJECT_ALLOC_MAKE_SEND) {
955 ipc_port_make_send_any_locked(port);
956 }
957 if (options & IPC_KOBJECT_ALLOC_NSREQUEST) {
958 port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
959 ip_reference(port);
960 }
961 if (options & IPC_KOBJECT_ALLOC_NO_GRANT) {
962 port->ip_no_grant = 1;
963 }
964 if (options & IPC_KOBJECT_ALLOC_IMMOVABLE_SEND) {
965 port->ip_immovable_send = 1;
966 }
967 if (options & IPC_KOBJECT_ALLOC_PINNED) {
968 port->ip_pinned = 1;
969 }
970
971 ipc_kobject_set_internal(port, kobject, type);
972 }
973
974 /*
975 * Routine: ipc_kobject_alloc_port
976 * Purpose:
977 * Allocate a kobject port in the kernel space of the specified type.
978 *
979 * This function never fails.
980 *
981 * Conditions:
982 * No locks held (memory is allocated)
983 */
984 ipc_port_t
ipc_kobject_alloc_port(ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t options)985 ipc_kobject_alloc_port(
986 ipc_kobject_t kobject,
987 ipc_kobject_type_t type,
988 ipc_kobject_alloc_options_t options)
989 {
990 ipc_port_t port;
991 port = ipc_port_alloc_special(ipc_space_kernel, IPC_PORT_ENFORCE_RIGID_REPLY_PORT_SEMANTICS);
992
993 if (port == IP_NULL) {
994 panic("ipc_kobject_alloc_port(): failed to allocate port");
995 }
996
997 ipc_kobject_init_port(port, kobject, type, options);
998 return port;
999 }
1000
1001 /*
1002 * Routine: ipc_kobject_alloc_labeled_port
1003 * Purpose:
1004 * Allocate a kobject port and associated mandatory access label
1005 * in the kernel space of the specified type.
1006 *
1007 * This function never fails.
1008 *
1009 * Conditions:
1010 * No locks held (memory is allocated)
1011 */
1012
1013 ipc_port_t
ipc_kobject_alloc_labeled_port(ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_label_t label,ipc_kobject_alloc_options_t options)1014 ipc_kobject_alloc_labeled_port(
1015 ipc_kobject_t kobject,
1016 ipc_kobject_type_t type,
1017 ipc_label_t label,
1018 ipc_kobject_alloc_options_t options)
1019 {
1020 ipc_port_t port;
1021
1022 port = ipc_kobject_alloc_port(kobject, type, options);
1023
1024 ipc_port_set_label(port, label);
1025
1026 return port;
1027 }
1028
1029 static void
ipc_kobject_subst_once_no_senders(ipc_port_t port,mach_port_mscount_t mscount)1030 ipc_kobject_subst_once_no_senders(
1031 ipc_port_t port,
1032 mach_port_mscount_t mscount)
1033 {
1034 ipc_port_t ko_port;
1035
1036 ko_port = ipc_kobject_dealloc_port(port, mscount, IKOT_PORT_SUBST_ONCE);
1037
1038 if (ko_port) {
1039 /*
1040 * Clean up the right if the wrapper wasn't hollowed out
1041 * by ipc_kobject_alloc_subst_once().
1042 */
1043 ipc_port_release_send(ko_port);
1044 }
1045 }
1046
1047 /*
1048 * Routine: ipc_kobject_alloc_subst_once
1049 * Purpose:
1050 * Make a port that will be substituted by the kolabel
1051 * rules once, preventing the next substitution (of its target)
1052 * to happen if any.
1053 *
1054 * Returns:
1055 * A port with a send right, that will substitute to its "kobject".
1056 *
1057 * Conditions:
1058 * No locks held (memory is allocated).
1059 *
1060 * `target` holds a send-right donated to this function,
1061 * consumed in ipc_kobject_subst_once_no_senders().
1062 */
1063 ipc_port_t
ipc_kobject_alloc_subst_once(ipc_port_t target)1064 ipc_kobject_alloc_subst_once(
1065 ipc_port_t target)
1066 {
1067 if (!IP_VALID(target)) {
1068 return target;
1069 }
1070 return ipc_kobject_alloc_labeled_port(target,
1071 IKOT_PORT_SUBST_ONCE, IPC_LABEL_SUBST_ONCE,
1072 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
1073 }
1074
1075 /*
1076 * Routine: ipc_kobject_make_send_lazy_alloc_port
1077 * Purpose:
1078 * Make a send once for a kobject port.
1079 *
1080 * A location owning this port is passed in port_store.
1081 * If no port exists, a port is made lazily.
1082 *
1083 * A send right is made for the port, and if this is the first one
1084 * (possibly not for the first time), then the no-more-senders
1085 * notification is rearmed.
1086 *
1087 * When a notification is armed, the kobject must donate
1088 * one of its references to the port. It is expected
1089 * the no-more-senders notification will consume this reference.
1090 *
1091 * Returns:
1092 * TRUE if a notification was armed
1093 * FALSE else
1094 *
1095 * Conditions:
1096 * Nothing is locked, memory can be allocated.
1097 * The caller must be able to donate a kobject reference to the port.
1098 */
1099 bool
ipc_kobject_make_send_lazy_alloc_port(ipc_port_t * port_store,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t alloc_opts)1100 ipc_kobject_make_send_lazy_alloc_port(
1101 ipc_port_t *port_store,
1102 ipc_kobject_t kobject,
1103 ipc_kobject_type_t type,
1104 ipc_kobject_alloc_options_t alloc_opts)
1105 {
1106 ipc_port_t port, previous;
1107 kern_return_t kr;
1108
1109 alloc_opts |= IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST;
1110 port = os_atomic_load(port_store, dependency);
1111
1112 if (!IP_VALID(port)) {
1113 port = ipc_kobject_alloc_port(kobject, type, alloc_opts);
1114
1115 if (os_atomic_cmpxchgv(port_store,
1116 IP_NULL, port, &previous, release)) {
1117 return TRUE;
1118 }
1119
1120 /*
1121 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
1122 * ipc_kobject_dealloc_port will handle
1123 * IPC_KOBJECT_ALLOC_NSREQUEST.
1124 */
1125 port->ip_mscount = 0;
1126 port->ip_srights = 0;
1127 ip_release_live(port);
1128 ipc_kobject_dealloc_port(port, 0, type);
1129
1130 port = previous;
1131 }
1132
1133 kr = ipc_kobject_make_send_nsrequest(port, kobject, type);
1134 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1135
1136 return kr == KERN_SUCCESS;
1137 }
1138
1139 /*
1140 * Routine: ipc_kobject_make_send_lazy_alloc_labeled_port
1141 * Purpose:
1142 * Make a send once for a kobject port.
1143 *
1144 * A location owning this port is passed in port_store.
1145 * If no port exists, a port is made lazily.
1146 *
1147 * A send right is made for the port, and if this is the first one
1148 * (possibly not for the first time), then the no-more-senders
1149 * notification is rearmed.
1150 *
1151 * When a notification is armed, the kobject must donate
1152 * one of its references to the port. It is expected
1153 * the no-more-senders notification will consume this reference.
1154 *
1155 * Returns:
1156 * TRUE if a notification was armed
1157 * FALSE else
1158 *
1159 * Conditions:
1160 * Nothing is locked, memory can be allocated.
1161 * The caller must be able to donate a kobject reference to the port.
1162 */
1163 boolean_t
ipc_kobject_make_send_lazy_alloc_labeled_port(ipc_port_t * port_store,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_label_t label)1164 ipc_kobject_make_send_lazy_alloc_labeled_port(
1165 ipc_port_t *port_store,
1166 ipc_kobject_t kobject,
1167 ipc_kobject_type_t type,
1168 ipc_label_t label)
1169 {
1170 ipc_port_t port, previous;
1171 kern_return_t kr;
1172
1173 port = os_atomic_load(port_store, dependency);
1174
1175 if (!IP_VALID(port)) {
1176 port = ipc_kobject_alloc_labeled_port(kobject, type, label,
1177 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
1178 if (os_atomic_cmpxchgv(port_store, IP_NULL, port, &previous, release)) {
1179 return TRUE;
1180 }
1181
1182 /*
1183 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
1184 * ipc_kobject_dealloc_port will handle
1185 * IPC_KOBJECT_ALLOC_NSREQUEST.
1186 */
1187 port->ip_mscount = 0;
1188 port->ip_srights = 0;
1189 ip_release_live(port);
1190 ipc_kobject_dealloc_port(port, 0, type);
1191
1192 port = previous;
1193 assert(ip_is_kolabeled(port));
1194 }
1195
1196 kr = ipc_kobject_make_send_nsrequest(port, kobject, type);
1197 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1198
1199 return kr == KERN_SUCCESS;
1200 }
1201
1202 /*
1203 * Routine: ipc_kobject_nsrequest_locked
1204 * Purpose:
1205 * Arm the no-senders notification for the given kobject
1206 * if it doesn't have one armed yet.
1207 *
1208 * Conditions:
1209 * Port is locked and active.
1210 *
1211 * Returns:
1212 * KERN_SUCCESS: the notification was armed
1213 * KERN_ALREADY_WAITING: the notification was already armed
1214 * KERN_FAILURE: the notification would fire immediately
1215 */
1216 static inline kern_return_t
ipc_kobject_nsrequest_locked(ipc_port_t port,mach_port_mscount_t sync)1217 ipc_kobject_nsrequest_locked(
1218 ipc_port_t port,
1219 mach_port_mscount_t sync)
1220 {
1221 if (port->ip_nsrequest == IP_KOBJECT_NSREQUEST_ARMED) {
1222 return KERN_ALREADY_WAITING;
1223 }
1224
1225 if (port->ip_srights == 0 && sync <= port->ip_mscount) {
1226 return KERN_FAILURE;
1227 }
1228
1229 port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
1230 ip_reference(port);
1231 return KERN_SUCCESS;
1232 }
1233
1234
1235 /*
1236 * Routine: ipc_kobject_nsrequest
1237 * Purpose:
1238 * Arm the no-senders notification for the given kobject
1239 * if it doesn't have one armed yet.
1240 *
1241 * Returns:
1242 * KERN_SUCCESS: the notification was armed
1243 * KERN_ALREADY_WAITING: the notification was already armed
1244 * KERN_FAILURE: the notification would fire immediately
1245 * KERN_INVALID_RIGHT: the port is dead
1246 */
1247 kern_return_t
ipc_kobject_nsrequest(ipc_port_t port,mach_port_mscount_t sync,mach_port_mscount_t * mscount)1248 ipc_kobject_nsrequest(
1249 ipc_port_t port,
1250 mach_port_mscount_t sync,
1251 mach_port_mscount_t *mscount)
1252 {
1253 kern_return_t kr = KERN_INVALID_RIGHT;
1254
1255 if (IP_VALID(port)) {
1256 ip_mq_lock(port);
1257
1258 if (mscount) {
1259 *mscount = port->ip_mscount;
1260 }
1261 if (ip_active(port)) {
1262 kr = ipc_kobject_nsrequest_locked(port, sync);
1263 }
1264
1265 ip_mq_unlock(port);
1266 } else if (mscount) {
1267 *mscount = 0;
1268 }
1269
1270 return kr;
1271 }
1272
1273 ipc_port_t
ipc_kobject_copy_send(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1274 ipc_kobject_copy_send(
1275 ipc_port_t port,
1276 ipc_kobject_t kobject,
1277 ipc_kobject_type_t kotype)
1278 {
1279 ipc_port_t sright = port;
1280
1281 if (IP_VALID(port)) {
1282 ip_mq_lock(port);
1283 if (ip_active(port)) {
1284 ipc_kobject_require(port, kobject, kotype);
1285 ipc_port_copy_send_any_locked(port);
1286 } else {
1287 sright = IP_DEAD;
1288 }
1289 ip_mq_unlock(port);
1290 }
1291
1292 return sright;
1293 }
1294
1295 ipc_port_t
ipc_kobject_make_send(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1296 ipc_kobject_make_send(
1297 ipc_port_t port,
1298 ipc_kobject_t kobject,
1299 ipc_kobject_type_t kotype)
1300 {
1301 ipc_port_t sright = port;
1302
1303 if (IP_VALID(port)) {
1304 ip_mq_lock(port);
1305 if (ip_active(port)) {
1306 ipc_kobject_require(port, kobject, kotype);
1307 ipc_port_make_send_any_locked(port);
1308 } else {
1309 sright = IP_DEAD;
1310 }
1311 ip_mq_unlock(port);
1312 }
1313
1314 return sright;
1315 }
1316
1317 kern_return_t
ipc_kobject_make_send_nsrequest(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1318 ipc_kobject_make_send_nsrequest(
1319 ipc_port_t port,
1320 ipc_kobject_t kobject,
1321 ipc_kobject_type_t kotype)
1322 {
1323 kern_return_t kr = KERN_INVALID_RIGHT;
1324
1325 if (IP_VALID(port)) {
1326 ip_mq_lock(port);
1327 if (ip_active(port)) {
1328 ipc_kobject_require(port, kobject, kotype);
1329 ipc_port_make_send_any_locked(port);
1330 kr = ipc_kobject_nsrequest_locked(port, 0);
1331 assert(kr != KERN_FAILURE);
1332 }
1333 ip_mq_unlock(port);
1334 }
1335
1336 return kr;
1337 }
1338
1339 kern_return_t
ipc_kobject_make_send_nsrequest_locked(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1340 ipc_kobject_make_send_nsrequest_locked(
1341 ipc_port_t port,
1342 ipc_kobject_t kobject,
1343 ipc_kobject_type_t kotype)
1344 {
1345 kern_return_t kr = KERN_INVALID_RIGHT;
1346
1347 if (ip_active(port)) {
1348 ipc_kobject_require(port, kobject, kotype);
1349 ipc_port_make_send_any_locked(port);
1350 kr = ipc_kobject_nsrequest_locked(port, 0);
1351 assert(kr != KERN_FAILURE);
1352 }
1353
1354 return kr;
1355 }
1356
1357 static inline ipc_kobject_t
ipc_kobject_disable_internal(ipc_port_t port,ipc_kobject_type_t type)1358 ipc_kobject_disable_internal(
1359 ipc_port_t port,
1360 ipc_kobject_type_t type)
1361 {
1362 ipc_kobject_t kobject = ipc_kobject_get_raw(port, type);
1363
1364 ipc_kobject_set_raw(port, IKO_NULL, type);
1365 if (ip_is_kolabeled(port)) {
1366 port->ip_kolabel->ikol_alt_port = IP_NULL;
1367 }
1368
1369 return kobject;
1370 }
1371
1372 /*
1373 * Routine: ipc_kobject_dealloc_port_and_unlock
1374 * Purpose:
1375 * Destroys a port allocated with any of the ipc_kobject_alloc*
1376 * functions.
1377 *
1378 * This will atomically:
1379 * - make the port inactive,
1380 * - optionally check the make send count
1381 * - disable (nil-out) the kobject pointer for kobjects without
1382 * a destroy callback.
1383 *
1384 * The port will retain its kobject-ness and kobject type.
1385 *
1386 *
1387 * Returns:
1388 * The kobject pointer that was set prior to this call
1389 * (possibly NULL if the kobject was already disabled).
1390 *
1391 * Conditions:
1392 * The port is active and locked.
1393 * On return the port is inactive and unlocked.
1394 */
1395 __abortlike
1396 static void
__ipc_kobject_dealloc_bad_type_panic(ipc_port_t port,ipc_kobject_type_t type)1397 __ipc_kobject_dealloc_bad_type_panic(ipc_port_t port, ipc_kobject_type_t type)
1398 {
1399 panic("port %p of type %d, expecting %d", port, ip_kotype(port), type);
1400 }
1401
1402 __abortlike
1403 static void
__ipc_kobject_dealloc_bad_mscount_panic(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1404 __ipc_kobject_dealloc_bad_mscount_panic(
1405 ipc_port_t port,
1406 mach_port_mscount_t mscount,
1407 ipc_kobject_type_t type)
1408 {
1409 panic("unexpected make-send count: %p[%d], %d, %d",
1410 port, type, port->ip_mscount, mscount);
1411 }
1412
1413 __abortlike
1414 static void
__ipc_kobject_dealloc_bad_srights_panic(ipc_port_t port,ipc_kobject_type_t type)1415 __ipc_kobject_dealloc_bad_srights_panic(
1416 ipc_port_t port,
1417 ipc_kobject_type_t type)
1418 {
1419 panic("unexpected send right count: %p[%d], %d",
1420 port, type, port->ip_srights);
1421 }
1422
1423 ipc_kobject_t
ipc_kobject_dealloc_port_and_unlock(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1424 ipc_kobject_dealloc_port_and_unlock(
1425 ipc_port_t port,
1426 mach_port_mscount_t mscount,
1427 ipc_kobject_type_t type)
1428 {
1429 ipc_kobject_t kobject = IKO_NULL;
1430 ipc_kobject_ops_t ops = ipc_kobject_ops_get(type);
1431
1432 require_ip_active(port);
1433
1434 if (ip_kotype(port) != type) {
1435 __ipc_kobject_dealloc_bad_type_panic(port, type);
1436 }
1437
1438 if (mscount && port->ip_mscount != mscount) {
1439 __ipc_kobject_dealloc_bad_mscount_panic(port, mscount, type);
1440 }
1441 if ((mscount || ops->iko_op_stable) && port->ip_srights != 0) {
1442 __ipc_kobject_dealloc_bad_srights_panic(port, type);
1443 }
1444
1445 if (!ops->iko_op_destroy) {
1446 kobject = ipc_kobject_disable_internal(port, type);
1447 }
1448
1449 ipc_port_dealloc_special_and_unlock(port, ipc_space_kernel);
1450
1451 return kobject;
1452 }
1453
1454 /*
1455 * Routine: ipc_kobject_dealloc_port
1456 * Purpose:
1457 * Destroys a port allocated with any of the ipc_kobject_alloc*
1458 * functions.
1459 *
1460 * This will atomically:
1461 * - make the port inactive,
1462 * - optionally check the make send count
1463 * - disable (nil-out) the kobject pointer for kobjects without
1464 * a destroy callback.
1465 *
1466 * The port will retain its kobject-ness and kobject type.
1467 *
1468 *
1469 * Returns:
1470 * The kobject pointer that was set prior to this call
1471 * (possibly NULL if the kobject was already disabled).
1472 *
1473 * Conditions:
1474 * Nothing is locked.
1475 * The port is active.
1476 * On return the port is inactive.
1477 */
1478 ipc_kobject_t
ipc_kobject_dealloc_port(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1479 ipc_kobject_dealloc_port(
1480 ipc_port_t port,
1481 mach_port_mscount_t mscount,
1482 ipc_kobject_type_t type)
1483 {
1484 ip_mq_lock(port);
1485 return ipc_kobject_dealloc_port_and_unlock(port, mscount, type);
1486 }
1487
1488 /*
1489 * Routine: ipc_kobject_enable
1490 * Purpose:
1491 * Make a port represent a kernel object of the given type.
1492 * The caller is responsible for handling refs for the
1493 * kernel object, if necessary.
1494 * Conditions:
1495 * Nothing locked.
1496 * The port must be active.
1497 */
1498 void
ipc_kobject_enable(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)1499 ipc_kobject_enable(
1500 ipc_port_t port,
1501 ipc_kobject_t kobject,
1502 ipc_kobject_type_t type)
1503 {
1504 assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1505
1506 ip_mq_lock(port);
1507 require_ip_active(port);
1508
1509 if (type != ip_kotype(port)) {
1510 panic("%s: unexpected kotype of port %p: want %d, got %d",
1511 __func__, port, type, ip_kotype(port));
1512 }
1513
1514 ipc_kobject_set_raw(port, kobject, type);
1515
1516 ip_mq_unlock(port);
1517 }
1518
1519 /*
1520 * Routine: ipc_kobject_disable_locked
1521 * Purpose:
1522 * Clear the kobject pointer for a port.
1523 * Conditions:
1524 * The port is locked.
1525 * Returns the current kobject pointer.
1526 */
1527 ipc_kobject_t
ipc_kobject_disable_locked(ipc_port_t port,ipc_kobject_type_t type)1528 ipc_kobject_disable_locked(
1529 ipc_port_t port,
1530 ipc_kobject_type_t type)
1531 {
1532 if (ip_active(port)) {
1533 assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1534 }
1535
1536 if (ip_kotype(port) != type) {
1537 panic("port %p of type %d, expecting %d",
1538 port, ip_kotype(port), type);
1539 }
1540
1541 return ipc_kobject_disable_internal(port, type);
1542 }
1543
1544 /*
1545 * Routine: ipc_kobject_disable
1546 * Purpose:
1547 * Clear the kobject pointer for a port.
1548 * Conditions:
1549 * Nothing locked.
1550 * Returns the current kobject pointer.
1551 */
1552 ipc_kobject_t
ipc_kobject_disable(ipc_port_t port,ipc_kobject_type_t type)1553 ipc_kobject_disable(
1554 ipc_port_t port,
1555 ipc_kobject_type_t type)
1556 {
1557 ipc_kobject_t kobject;
1558
1559 ip_mq_lock(port);
1560 kobject = ipc_kobject_disable_locked(port, type);
1561 ip_mq_unlock(port);
1562
1563 return kobject;
1564 }
1565
1566 /*
1567 * Routine: ipc_kobject_upgrade_mktimer_locked
1568 * Purpose:
1569 * Upgrades a port to mktimer kobject status
1570 *
1571 * This pattern is rather bad as it leads to various
1572 * confusions that need to be special cased with kobject-ness
1573 * of ports. No new port with dual kobject/message-queue
1574 * semantics should be made ever.
1575 *
1576 * Conditions:
1577 * Port is locked
1578 */
1579 void
ipc_kobject_upgrade_mktimer_locked(ipc_port_t port,ipc_kobject_t kobject)1580 ipc_kobject_upgrade_mktimer_locked(
1581 ipc_port_t port,
1582 ipc_kobject_t kobject)
1583 {
1584 ipc_kobject_set_internal(port, kobject, IKOT_TIMER);
1585 }
1586
1587 /*
1588 * Routine: ipc_kobject_notify_no_senders
1589 * Purpose:
1590 * Handles a no-senders notification
1591 * sent to a kobject.
1592 *
1593 * A port reference is consumed.
1594 *
1595 * Conditions:
1596 * Nothing locked.
1597 */
1598 void
ipc_kobject_notify_no_senders(ipc_port_t port,mach_port_mscount_t mscount)1599 ipc_kobject_notify_no_senders(
1600 ipc_port_t port,
1601 mach_port_mscount_t mscount)
1602 {
1603 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1604
1605 assert(ops->iko_op_no_senders);
1606 ops->iko_op_no_senders(port, mscount);
1607
1608 /* consume the ref ipc_notify_no_senders_prepare left */
1609 ip_release(port);
1610 }
1611
1612 /*
1613 * Routine: ipc_kobject_notify_no_senders
1614 * Purpose:
1615 * Handles a send once notifications
1616 * sent to a kobject.
1617 *
1618 * A send-once port reference is consumed.
1619 *
1620 * Conditions:
1621 * Port is locked.
1622 */
1623 void
ipc_kobject_notify_send_once_and_unlock(ipc_port_t port)1624 ipc_kobject_notify_send_once_and_unlock(
1625 ipc_port_t port)
1626 {
1627 /*
1628 * drop the send once right while we hold the port lock.
1629 * we will keep a port reference while we run the possible
1630 * callouts to kobjects.
1631 *
1632 * This a simplified version of ipc_port_release_sonce()
1633 * since kobjects can't be special reply ports.
1634 */
1635 assert(!port->ip_specialreply);
1636
1637 ip_sorights_dec(port);
1638 ip_mq_unlock(port);
1639
1640 /*
1641 * because there's very few consumers,
1642 * the code here isn't generic as it's really not worth it.
1643 */
1644 switch (ip_kotype(port)) {
1645 case IKOT_TASK_RESUME:
1646 task_suspension_send_once(port);
1647 break;
1648 default:
1649 break;
1650 }
1651
1652 ip_release(port);
1653 }
1654
1655
1656 /*
1657 * Routine: ipc_kobject_destroy
1658 * Purpose:
1659 * Release any kernel object resources associated
1660 * with the port, which is being destroyed.
1661 *
1662 * This path to free object resources should only be
1663 * needed when resources are associated with a user's port.
1664 * In the normal case, when the kernel is the receiver,
1665 * the code calling ipc_kobject_dealloc_port() should clean
1666 * up the object resources.
1667 *
1668 * Cleans up any kobject label that might be present.
1669 * Conditions:
1670 * The port is not locked, but it is dead.
1671 */
1672 void
ipc_kobject_destroy(ipc_port_t port)1673 ipc_kobject_destroy(
1674 ipc_port_t port)
1675 {
1676 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1677
1678 if (ops->iko_op_permanent) {
1679 panic("trying to destroy an permanent port %p", port);
1680 }
1681 if (ops->iko_op_destroy) {
1682 ops->iko_op_destroy(port);
1683 }
1684
1685 if (ip_is_kolabeled(port)) {
1686 ipc_kobject_label_t labelp = port->ip_kolabel;
1687
1688 assert(labelp != NULL);
1689 assert(labelp->ikol_alt_port == IP_NULL);
1690 assert(ip_is_kobject(port));
1691 port->ip_kolabel = NULL;
1692 io_bits_andnot(ip_to_object(port), IO_BITS_KOLABEL);
1693 zfree(ipc_kobject_label_zone, labelp);
1694 }
1695 }
1696
1697 /*
1698 * Routine: ipc_kobject_label_substitute_task
1699 * Purpose:
1700 * Substitute a task control port for its immovable
1701 * equivalent when the receiver is that task.
1702 * Conditions:
1703 * Space is write locked and active.
1704 * Port is locked and active.
1705 * Returns:
1706 * - IP_NULL port if no substitution is to be done
1707 * - a valid port if a substitution needs to happen
1708 */
1709 static ipc_port_t
ipc_kobject_label_substitute_task(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1710 ipc_kobject_label_substitute_task(
1711 ipc_space_t space,
1712 ipc_kobject_label_t kolabel,
1713 ipc_port_t port)
1714 {
1715 ipc_port_t subst = IP_NULL;
1716 task_t task = ipc_kobject_get_raw(port, IKOT_TASK_CONTROL);
1717
1718 if (task != TASK_NULL && task == space->is_task) {
1719 if ((subst = kolabel->ikol_alt_port)) {
1720 return subst;
1721 }
1722 }
1723
1724 return IP_NULL;
1725 }
1726
1727 /*
1728 * Routine: ipc_kobject_label_substitute_task_read
1729 * Purpose:
1730 * Substitute a task read port for its immovable
1731 * control equivalent when the receiver is that task.
1732 * Conditions:
1733 * Space is write locked and active.
1734 * Port is locked and active.
1735 * Returns:
1736 * - IP_NULL port if no substitution is to be done
1737 * - a valid port if a substitution needs to happen
1738 */
1739 static ipc_port_t
ipc_kobject_label_substitute_task_read(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1740 ipc_kobject_label_substitute_task_read(
1741 ipc_space_t space,
1742 ipc_kobject_label_t kolabel,
1743 ipc_port_t port)
1744 {
1745 ipc_port_t subst = IP_NULL;
1746 task_t task = ipc_kobject_get_raw(port, IKOT_TASK_READ);
1747
1748 if (task != TASK_NULL && task == space->is_task) {
1749 if ((subst = kolabel->ikol_alt_port)) {
1750 return subst;
1751 }
1752 }
1753
1754 return IP_NULL;
1755 }
1756
1757 /*
1758 * Routine: ipc_kobject_label_substitute_thread
1759 * Purpose:
1760 * Substitute a thread control port for its immovable
1761 * equivalent when it belongs to the receiver task.
1762 * Conditions:
1763 * Space is write locked and active.
1764 * Port is locked and active.
1765 * Returns:
1766 * - IP_NULL port if no substitution is to be done
1767 * - a valid port if a substitution needs to happen
1768 */
1769 static ipc_port_t
ipc_kobject_label_substitute_thread(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1770 ipc_kobject_label_substitute_thread(
1771 ipc_space_t space,
1772 ipc_kobject_label_t kolabel,
1773 ipc_port_t port)
1774 {
1775 ipc_port_t subst = IP_NULL;
1776 thread_t thread = ipc_kobject_get_raw(port, IKOT_THREAD_CONTROL);
1777
1778 if (thread != THREAD_NULL && space->is_task == get_threadtask(thread)) {
1779 if ((subst = kolabel->ikol_alt_port) != IP_NULL) {
1780 return subst;
1781 }
1782 }
1783
1784 return IP_NULL;
1785 }
1786
1787 /*
1788 * Routine: ipc_kobject_label_substitute_thread_read
1789 * Purpose:
1790 * Substitute a thread read port for its immovable
1791 * control equivalent when it belongs to the receiver task.
1792 * Conditions:
1793 * Space is write locked and active.
1794 * Port is locked and active.
1795 * Returns:
1796 * - IP_NULL port if no substitution is to be done
1797 * - a valid port if a substitution needs to happen
1798 */
1799 static ipc_port_t
ipc_kobject_label_substitute_thread_read(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1800 ipc_kobject_label_substitute_thread_read(
1801 ipc_space_t space,
1802 ipc_kobject_label_t kolabel,
1803 ipc_port_t port)
1804 {
1805 ipc_port_t subst = IP_NULL;
1806 thread_t thread = ipc_kobject_get_raw(port, IKOT_THREAD_READ);
1807
1808 if (thread != THREAD_NULL && space->is_task == get_threadtask(thread)) {
1809 if ((subst = kolabel->ikol_alt_port) != IP_NULL) {
1810 return subst;
1811 }
1812 }
1813
1814 return IP_NULL;
1815 }
1816
1817 /*
1818 * Routine: ipc_kobject_label_check
1819 * Purpose:
1820 * Check to see if the space is allowed to possess
1821 * a right for the given port. In order to qualify,
1822 * the space label must contain all the privileges
1823 * listed in the port/kobject label.
1824 *
1825 * Conditions:
1826 * Space is write locked and active.
1827 * Port is locked and active.
1828 *
1829 * Returns:
1830 * Whether the copyout is authorized.
1831 *
1832 * If a port substitution is requested, the space is unlocked,
1833 * the port is unlocked and its "right" consumed.
1834 *
1835 * As of now, substituted ports only happen for send rights.
1836 */
1837 bool
ipc_kobject_label_check(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t * flags,ipc_port_t * subst_portp)1838 ipc_kobject_label_check(
1839 ipc_space_t space,
1840 ipc_port_t port,
1841 mach_msg_type_name_t msgt_name,
1842 ipc_object_copyout_flags_t *flags,
1843 ipc_port_t *subst_portp)
1844 {
1845 ipc_kobject_label_t kolabel;
1846 ipc_label_t label;
1847
1848 assert(is_active(space));
1849 assert(ip_active(port));
1850
1851 *subst_portp = IP_NULL;
1852
1853 /* Unlabled ports/kobjects are always allowed */
1854 if (!ip_is_kolabeled(port)) {
1855 return true;
1856 }
1857
1858 /* Never OK to copyout the receive right for a labeled kobject */
1859 if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1860 panic("ipc_kobject_label_check: attempted receive right "
1861 "copyout for labeled kobject");
1862 }
1863
1864 kolabel = port->ip_kolabel;
1865 label = kolabel->ikol_label;
1866
1867 if ((*flags & IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK) == 0 &&
1868 (label & IPC_LABEL_SUBST_MASK)) {
1869 ipc_port_t subst = IP_NULL;
1870
1871 if (msgt_name != MACH_MSG_TYPE_PORT_SEND) {
1872 return false;
1873 }
1874
1875 if ((label & IPC_LABEL_SUBST_MASK) == IPC_LABEL_SUBST_ONCE) {
1876 /*
1877 * The next check will _not_ substitute.
1878 * hollow out our one-time wrapper,
1879 * and steal its send right.
1880 */
1881 *flags |= IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK;
1882 subst = ipc_kobject_disable_locked(port,
1883 IKOT_PORT_SUBST_ONCE);
1884 is_write_unlock(space);
1885 ipc_port_release_send_and_unlock(port);
1886 if (subst == IP_NULL) {
1887 panic("subst-once port %p was consumed twice", port);
1888 }
1889 *subst_portp = subst;
1890 return true;
1891 }
1892
1893 switch (label & IPC_LABEL_SUBST_MASK) {
1894 case IPC_LABEL_SUBST_TASK:
1895 subst = ipc_kobject_label_substitute_task(space,
1896 kolabel, port);
1897 break;
1898 case IPC_LABEL_SUBST_TASK_READ:
1899 subst = ipc_kobject_label_substitute_task_read(space,
1900 kolabel, port);
1901 break;
1902 case IPC_LABEL_SUBST_THREAD:
1903 subst = ipc_kobject_label_substitute_thread(space,
1904 kolabel, port);
1905 break;
1906 case IPC_LABEL_SUBST_THREAD_READ:
1907 subst = ipc_kobject_label_substitute_thread_read(space,
1908 kolabel, port);
1909 break;
1910 default:
1911 panic("unexpected label: %llx", label);
1912 }
1913
1914 if (subst != IP_NULL) {
1915 ip_reference(subst);
1916 is_write_unlock(space);
1917
1918 /*
1919 * We do not hold a proper send right on `subst`,
1920 * only a reference.
1921 *
1922 * Because of how thread/task termination works,
1923 * there is no guarantee copy_send() would work,
1924 * so we need to make_send().
1925 *
1926 * We can do that because ports tagged with
1927 * IPC_LABEL_SUBST_{THREAD,TASK} do not use
1928 * the no-senders notification.
1929 */
1930
1931 ipc_port_release_send_and_unlock(port);
1932 /* no check: dPAC integrity */
1933 port = ipc_port_make_send_any(subst);
1934 ip_release(subst);
1935 *subst_portp = port;
1936 return true;
1937 }
1938 }
1939
1940 return (label & space->is_label & IPC_LABEL_SPACE_MASK) ==
1941 (label & IPC_LABEL_SPACE_MASK);
1942 }
1943