1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: kern/ipc_kobject.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions for letting a port represent a kernel object.
71 */
72
73 #include <mach/mig.h>
74 #include <mach/port.h>
75 #include <mach/kern_return.h>
76 #include <mach/message.h>
77 #include <mach/mig_errors.h>
78 #include <mach/mach_notify.h>
79 #include <mach/ndr.h>
80 #include <mach/vm_param.h>
81
82 #include <mach/mach_vm_server.h>
83 #include <mach/mach_port_server.h>
84 #include <mach/mach_host_server.h>
85 #include <mach/host_priv_server.h>
86 #include <mach/clock_server.h>
87 #include <mach/memory_entry_server.h>
88 #include <mach/processor_server.h>
89 #include <mach/processor_set_server.h>
90 #include <mach/task_server.h>
91 #include <mach/mach_voucher_server.h>
92 #ifdef VM32_SUPPORT
93 #include <mach/vm32_map_server.h>
94 #endif
95 #include <mach/thread_act_server.h>
96 #include <mach/restartable_server.h>
97
98 #include <mach/exc_server.h>
99 #include <mach/mach_exc_server.h>
100 #include <mach/mach_eventlink_server.h>
101
102 #include <device/device_types.h>
103 #include <device/device_server.h>
104
105 #if CONFIG_USER_NOTIFICATION
106 #include <UserNotification/UNDReplyServer.h>
107 #endif
108
109 #if CONFIG_ARCADE
110 #include <mach/arcade_register_server.h>
111 #endif
112
113 #if CONFIG_AUDIT
114 #include <kern/audit_sessionport.h>
115 #endif
116
117 #include <kern/counter.h>
118 #include <kern/ipc_tt.h>
119 #include <kern/ipc_mig.h>
120 #include <kern/ipc_misc.h>
121 #include <kern/ipc_kobject.h>
122 #include <kern/host_notify.h>
123 #include <kern/misc_protos.h>
124
125 #if CONFIG_ARCADE
126 #include <kern/arcade.h>
127 #endif /* CONFIG_ARCADE */
128
129 #include <ipc/ipc_kmsg.h>
130 #include <ipc/ipc_policy.h>
131 #include <ipc/ipc_port.h>
132 #include <ipc/ipc_voucher.h>
133 #include <kern/sync_sema.h>
134 #include <kern/work_interval.h>
135 #include <kern/task_ident.h>
136
137 #if HYPERVISOR
138 #include <kern/hv_support.h>
139 #endif
140
141 #include <vm/vm_protos.h>
142
143 #include <security/mac_mach_internal.h>
144
145 extern char *proc_name_address(void *p);
146 struct proc;
147 extern int proc_pid(struct proc *p);
148
149 typedef struct {
150 mach_msg_id_t num;
151 int kobjidx;
152 mig_kern_routine_t kroutine; /* Kernel server routine */
153 unsigned int kreply_size; /* Size of kernel reply msg */
154 unsigned int kreply_desc_cnt; /* Number of descs in kernel reply msg */
155 } mig_hash_t;
156
157 static void ipc_kobject_subst_once_no_senders(ipc_port_t, mach_msg_type_number_t);
158
159 IPC_KOBJECT_DEFINE(IKOT_MEMORY_OBJECT); /* vestigial, no real instance */
160 IPC_KOBJECT_DEFINE(IKOT_PORT_SUBST_ONCE,
161 .iko_op_no_senders = ipc_kobject_subst_once_no_senders);
162
163 #define MAX_MIG_ENTRIES 1031
164 #define MIG_HASH(x) (x)
165
166 #define KOBJ_IDX_NOT_SET (-1)
167
168 static SECURITY_READ_ONLY_LATE(mig_hash_t) mig_buckets[MAX_MIG_ENTRIES];
169 static SECURITY_READ_ONLY_LATE(int) mig_table_max_displ;
170 SECURITY_READ_ONLY_LATE(int) mach_kobj_count; /* count of total number of kobjects */
171
172 ZONE_DEFINE_TYPE(ipc_kobject_label_zone, "ipc kobject labels",
173 struct ipc_kobject_label, ZC_ZFREE_CLEARMEM);
174
175 __startup_const
176 static struct mig_kern_subsystem *mig_e[] = {
177 (const struct mig_kern_subsystem *)&mach_vm_subsystem,
178 (const struct mig_kern_subsystem *)&mach_port_subsystem,
179 (const struct mig_kern_subsystem *)&mach_host_subsystem,
180 (const struct mig_kern_subsystem *)&host_priv_subsystem,
181 (const struct mig_kern_subsystem *)&clock_subsystem,
182 (const struct mig_kern_subsystem *)&processor_subsystem,
183 (const struct mig_kern_subsystem *)&processor_set_subsystem,
184 (const struct mig_kern_subsystem *)&is_iokit_subsystem,
185 (const struct mig_kern_subsystem *)&task_subsystem,
186 (const struct mig_kern_subsystem *)&thread_act_subsystem,
187 #ifdef VM32_SUPPORT
188 (const struct mig_kern_subsystem *)&vm32_map_subsystem,
189 #endif
190 #if CONFIG_USER_NOTIFICATION
191 (const struct mig_kern_subsystem *)&UNDReply_subsystem,
192 #endif
193 (const struct mig_kern_subsystem *)&mach_voucher_subsystem,
194 (const struct mig_kern_subsystem *)&memory_entry_subsystem,
195 (const struct mig_kern_subsystem *)&task_restartable_subsystem,
196 (const struct mig_kern_subsystem *)&catch_exc_subsystem,
197 (const struct mig_kern_subsystem *)&catch_mach_exc_subsystem,
198 #if CONFIG_ARCADE
199 (const struct mig_kern_subsystem *)&arcade_register_subsystem,
200 #endif
201 (const struct mig_kern_subsystem *)&mach_eventlink_subsystem,
202 };
203
204 static struct ipc_kobject_ops __security_const_late
205 ipc_kobject_ops_array[IKOT_MAX_TYPE];
206
207 __startup_func
208 void
ipc_kobject_register_startup(ipc_kobject_ops_t ops)209 ipc_kobject_register_startup(ipc_kobject_ops_t ops)
210 {
211 if (ipc_kobject_ops_array[ops->iko_op_type].iko_op_type) {
212 panic("trying to register kobject(%d) twice", ops->iko_op_type);
213 }
214 ipc_kobject_ops_array[ops->iko_op_type] = *ops;
215 }
216
217 static ipc_kobject_ops_t
ipc_kobject_ops_get(ipc_kobject_type_t ikot)218 ipc_kobject_ops_get(ipc_kobject_type_t ikot)
219 {
220 if (ikot < IKOT_NONE || ikot >= IKOT_MAX_TYPE) {
221 panic("invalid kobject type %d", ikot);
222 }
223 return &ipc_kobject_ops_array[ikot];
224 }
225
226 __startup_func
227 static void
mig_init(void)228 mig_init(void)
229 {
230 unsigned int i, n = sizeof(mig_e) / sizeof(const struct mig_kern_subsystem *);
231 int howmany;
232 mach_msg_id_t j, pos, nentry, range;
233
234 for (i = 0; i < n; i++) {
235 range = mig_e[i]->end - mig_e[i]->start;
236 if (!mig_e[i]->start || range < 0) {
237 panic("the msgh_ids in mig_e[] aren't valid!");
238 }
239
240 if (mig_e[i]->maxsize > KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE) {
241 panic("mig subsystem %d (%p) replies are too large (%d > %d)",
242 mig_e[i]->start, mig_e[i], mig_e[i]->maxsize,
243 KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE);
244 }
245
246 for (j = 0; j < range; j++) {
247 if (mig_e[i]->kroutine[j].kstub_routine) {
248 /* Only put real entries in the table */
249 nentry = j + mig_e[i]->start;
250 for (pos = MIG_HASH(nentry) % MAX_MIG_ENTRIES, howmany = 1;
251 mig_buckets[pos].num;
252 pos++, pos = pos % MAX_MIG_ENTRIES, howmany++) {
253 if (mig_buckets[pos].num == nentry) {
254 printf("message id = %d\n", nentry);
255 panic("multiple entries with the same msgh_id");
256 }
257 if (howmany == MAX_MIG_ENTRIES) {
258 panic("the mig dispatch table is too small");
259 }
260 }
261
262 mig_buckets[pos].num = nentry;
263 mig_buckets[pos].kroutine = mig_e[i]->kroutine[j].kstub_routine;
264 if (mig_e[i]->kroutine[j].max_reply_msg) {
265 mig_buckets[pos].kreply_size = mig_e[i]->kroutine[j].max_reply_msg;
266 mig_buckets[pos].kreply_desc_cnt = mig_e[i]->kroutine[j].reply_descr_count;
267 assert3u(mig_e[i]->kroutine[j].descr_count,
268 <=, IPC_KOBJECT_DESC_MAX);
269 assert3u(mig_e[i]->kroutine[j].reply_descr_count,
270 <=, IPC_KOBJECT_RDESC_MAX);
271 } else {
272 /*
273 * Allocating a larger-than-needed kmsg creates hole for
274 * inlined kmsgs (IKM_TYPE_ALL_INLINED) during copyout.
275 * Disallow that.
276 */
277 panic("kroutine must have precise size %d %d", mig_e[i]->start, j);
278 }
279
280 mig_buckets[pos].kobjidx = KOBJ_IDX_NOT_SET;
281
282 if (mig_table_max_displ < howmany) {
283 mig_table_max_displ = howmany;
284 }
285 mach_kobj_count++;
286 }
287 }
288 }
289
290 /* 77417305: pad to allow for MIG routines removals/cleanups */
291 mach_kobj_count += 32;
292
293 printf("mig_table_max_displ = %d mach_kobj_count = %d\n",
294 mig_table_max_displ, mach_kobj_count);
295 }
296 STARTUP(MACH_IPC, STARTUP_RANK_FIRST, mig_init);
297
298 /*
299 * Do a hash table lookup for given msgh_id. Return 0
300 * if not found.
301 */
302 static mig_hash_t *
find_mig_hash_entry(int msgh_id)303 find_mig_hash_entry(int msgh_id)
304 {
305 unsigned int i = (unsigned int)MIG_HASH(msgh_id);
306 int max_iter = mig_table_max_displ;
307 mig_hash_t *ptr;
308
309 do {
310 ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES];
311 } while (msgh_id != ptr->num && ptr->num && --max_iter);
312
313 if (!ptr->kroutine || msgh_id != ptr->num) {
314 ptr = (mig_hash_t *)0;
315 }
316
317 return ptr;
318 }
319
320 /*
321 * Routine: ipc_kobject_reply_status
322 *
323 * Returns the error/success status from a given kobject call reply message.
324 *
325 * Contract for KernelServer MIG routines is as follows:
326 *
327 * (1) If reply header has complex bit set, kernel server implementation routine
328 * must have implicitly returned KERN_SUCCESS.
329 *
330 * (2) Otherwise we can always read RetCode from after the header. This is not
331 * obvious to see, and is discussed below by case.
332 *
333 * MIG can return three types of replies from KernelServer routines.
334 *
335 * (A) Complex Reply (i.e. with Descriptors)
336 *
337 * E.g.: thread_get_exception_ports()
338 *
339 * If complex bit is set, we can deduce the call is successful since the bit
340 * is set at the very end.
341 * If complex bit is not set, we must have returned from MIG_RETURN_ERROR.
342 * MIG writes RetCode to immediately after the header, and we know this is
343 * safe to do for all kmsg layouts. (See discussion in ipc_kmsg_server_internal()).
344 *
345 * (B) Simple Reply with Out Params
346 *
347 * E.g.: thread_get_states()
348 *
349 * If the call failed, we return from MIG_RETURN_ERROR, which writes RetCode
350 * to immediately after the header.
351 * If the call succeeded, MIG writes RetCode as KERN_SUCCESS to USER DATA
352 * buffer. *BUT* since the region after header is always initialized with
353 * KERN_SUCCESS, reading from there gives us the same result. We rely on
354 * this behavior to not make a special case.
355 *
356 * (C) Simple Reply without Out Params
357 *
358 * E.g.: thread_set_states()
359 *
360 * For this type of MIG routines we always allocate a mig_reply_error_t
361 * as reply kmsg, which fits inline in kmsg. RetCode can be found after
362 * header, and can be KERN_SUCCESS or otherwise a failure code.
363 */
364 static kern_return_t
ipc_kobject_reply_status(ipc_kmsg_t reply)365 ipc_kobject_reply_status(ipc_kmsg_t reply)
366 {
367 mach_msg_header_t *hdr = ikm_header(reply);
368
369 if (hdr->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
370 return KERN_SUCCESS;
371 }
372
373 return ((mig_reply_error_t *)hdr)->RetCode;
374 }
375
376 static void
ipc_kobject_set_reply_error_status(ipc_kmsg_t reply,kern_return_t kr)377 ipc_kobject_set_reply_error_status(
378 ipc_kmsg_t reply,
379 kern_return_t kr)
380 {
381 mig_reply_error_t *error = (mig_reply_error_t *)ikm_header(reply);
382
383 assert(!(error->Head.msgh_bits & MACH_MSGH_BITS_COMPLEX));
384 error->RetCode = kr;
385 }
386
387 /*
388 * Routine: ipc_kobject_set_kobjidx
389 * Purpose:
390 * Set the index for the kobject filter
391 * mask for a given message ID.
392 */
393 kern_return_t
ipc_kobject_set_kobjidx(int msgh_id,int index)394 ipc_kobject_set_kobjidx(
395 int msgh_id,
396 int index)
397 {
398 mig_hash_t *ptr = find_mig_hash_entry(msgh_id);
399
400 if (ptr == (mig_hash_t *)0) {
401 return KERN_INVALID_ARGUMENT;
402 }
403
404 assert(index < mach_kobj_count);
405 ptr->kobjidx = index;
406
407 return KERN_SUCCESS;
408 }
409
410 static void
ipc_kobject_init_reply(ipc_kmsg_t reply,const ipc_kmsg_t request,kern_return_t kr)411 ipc_kobject_init_reply(
412 ipc_kmsg_t reply,
413 const ipc_kmsg_t request,
414 kern_return_t kr)
415 {
416 mach_msg_header_t *req_hdr = ikm_header(request);
417 mach_msg_header_t *reply_hdr = ikm_header(reply);
418
419 #define InP ((mach_msg_header_t *) req_hdr)
420 #define OutP ((mig_reply_error_t *) reply_hdr)
421
422 OutP->Head.msgh_size = sizeof(mig_reply_error_t);
423 OutP->Head.msgh_bits =
424 MACH_MSGH_BITS_SET(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0, 0, 0);
425 OutP->Head.msgh_remote_port = InP->msgh_local_port;
426 OutP->Head.msgh_local_port = MACH_PORT_NULL;
427 OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
428 OutP->Head.msgh_id = InP->msgh_id + 100;
429
430 OutP->NDR = NDR_record;
431 OutP->RetCode = kr;
432
433 #undef InP
434 #undef OutP
435 }
436
437 static void
ipc_kobject_init_new_reply(ipc_kmsg_t new_reply,const ipc_kmsg_t old_reply,kern_return_t kr)438 ipc_kobject_init_new_reply(
439 ipc_kmsg_t new_reply,
440 const ipc_kmsg_t old_reply,
441 kern_return_t kr)
442 {
443 mach_msg_header_t *new_hdr = ikm_header(new_reply);
444 mach_msg_header_t *old_hdr = ikm_header(old_reply);
445
446 #define InP ((mig_reply_error_t *) old_hdr)
447 #define OutP ((mig_reply_error_t *) new_hdr)
448
449 OutP->Head.msgh_size = sizeof(mig_reply_error_t);
450 OutP->Head.msgh_bits = InP->Head.msgh_bits & ~MACH_MSGH_BITS_COMPLEX;
451 OutP->Head.msgh_remote_port = InP->Head.msgh_remote_port;
452 OutP->Head.msgh_local_port = MACH_PORT_NULL;
453 OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
454 OutP->Head.msgh_id = InP->Head.msgh_id;
455
456 OutP->NDR = InP->NDR;
457 OutP->RetCode = kr;
458
459 #undef InP
460 #undef OutP
461 }
462
463 static ipc_kmsg_t
ipc_kobject_alloc_mig_error(void)464 ipc_kobject_alloc_mig_error(void)
465 {
466 ipc_kmsg_alloc_flags_t flags = IPC_KMSG_ALLOC_KERNEL |
467 IPC_KMSG_ALLOC_ZERO |
468 IPC_KMSG_ALLOC_ALL_INLINE |
469 IPC_KMSG_ALLOC_NOFAIL;
470
471 return ipc_kmsg_alloc(sizeof(mig_reply_error_t), 0, 0, flags);
472 }
473
474 /*
475 * Routine: ipc_kobject_server_internal
476 * Purpose:
477 * Handle a message sent to the kernel.
478 * Generates a reply message.
479 * Version for Untyped IPC.
480 * Conditions:
481 * Nothing locked.
482 */
483 static kern_return_t
ipc_kobject_server_internal(__unused ipc_port_t port,ipc_kmsg_t request,ipc_kmsg_t * replyp)484 ipc_kobject_server_internal(
485 __unused ipc_port_t port,
486 ipc_kmsg_t request,
487 ipc_kmsg_t *replyp)
488 {
489 int request_msgh_id;
490 ipc_kmsg_t reply = IKM_NULL;
491 mach_msg_size_t reply_size, reply_desc_cnt;
492 mig_hash_t *ptr;
493 mach_msg_header_t *req_hdr, *reply_hdr;
494 void *req_data, *reply_data;
495 mach_msg_max_trailer_t *req_trailer;
496
497 thread_ro_t tro = current_thread_ro();
498 task_t curtask = tro->tro_task;
499 struct proc *curproc = tro->tro_proc;
500
501 req_hdr = ikm_header(request);
502 req_data = ikm_udata_from_header(request);
503 req_trailer = ipc_kmsg_get_trailer(request);
504 request_msgh_id = req_hdr->msgh_id;
505
506 /* Find corresponding mig_hash entry, if any */
507 ptr = find_mig_hash_entry(request_msgh_id);
508
509 /* Get the reply_size. */
510 if (ptr == (mig_hash_t *)0) {
511 reply_size = sizeof(mig_reply_error_t);
512 reply_desc_cnt = 0;
513 } else {
514 reply_size = ptr->kreply_size;
515 reply_desc_cnt = ptr->kreply_desc_cnt;
516 }
517
518 assert(reply_size >= sizeof(mig_reply_error_t));
519
520 /*
521 * MIG should really assure no data leakage -
522 * but until it does, pessimistically zero the
523 * whole reply buffer.
524 */
525 reply = ipc_kmsg_alloc(reply_size, 0, reply_desc_cnt, IPC_KMSG_ALLOC_KERNEL |
526 IPC_KMSG_ALLOC_ZERO | IPC_KMSG_ALLOC_NOFAIL);
527 /* reply can be non-linear */
528
529 if (ptr == (mig_hash_t *)0) {
530 #if DEVELOPMENT || DEBUG
531 printf("ipc_kobject_server: bogus kernel message, id=%d\n",
532 req_hdr->msgh_id);
533 #endif /* DEVELOPMENT || DEBUG */
534 _MIG_MSGID_INVALID(req_hdr->msgh_id);
535
536 ipc_kobject_init_reply(reply, request, MIG_BAD_ID);
537
538 *replyp = reply;
539 return KERN_SUCCESS;
540 }
541
542 /*
543 * We found the routine to call. Call it to perform the kernel function.
544 */
545 assert(ptr != (mig_hash_t *)0);
546
547 reply_hdr = ikm_header(reply);
548 /* reply is allocated by kernel. non-zero desc count means complex msg */
549 reply_data = ikm_udata(reply, reply_desc_cnt, (reply_desc_cnt > 0));
550
551 /*
552 * Reply can be of layout IKM_TYPE_ALL_INLINED, IKM_TYPE_UDATA_OOL,
553 * or IKM_TYPE_ALL_OOL, each of which guarantees kernel/user data segregation.
554 *
555 * Here is the trick: In each case, there _must_ be enough space in
556 * the kdata (header) buffer in `reply` to hold a mig_reply_error_t.
557 */
558 assert(reply->ikm_type != IKM_TYPE_KDATA_OOL);
559 assert((vm_offset_t)reply_hdr + sizeof(mig_reply_error_t) <= ikm_kdata_end(reply));
560
561 /*
562 * Discussion by case:
563 *
564 * (1) IKM_TYPE_ALL_INLINED
565 * - IKM_BIG_MSG_SIZE is large enough for mig_reply_error_t
566 * (2) IKM_TYPE_UDATA_OOL
567 * - IKM_SMALL_MSG_SIZE is large enough for mig_reply_error_t
568 * (3) IKM_TYPE_ALL_OOL
569 * - This layout is only possible if kdata (header + descs) doesn't fit
570 * in IKM_SMALL_MSG_SIZE. So we must have at least one descriptor
571 * following the header, which is enough to fit mig_reply_error_t.
572 */
573 static_assert(sizeof(mig_reply_error_t) < IKM_BIG_MSG_SIZE);
574 static_assert(sizeof(mig_reply_error_t) < sizeof(mach_msg_base_t) +
575 1 * sizeof(mach_msg_kdescriptor_t));
576
577 /*
578 * Therefore, we can temporarily treat `reply` as a *simple* message that
579 * contains NDR Record + RetCode immediately after the header (which overlaps
580 * with descriptors, if the reply msg is supposed to be complex).
581 *
582 * In doing so we save having a separate allocation specifically for errors.
583 */
584 ipc_kobject_init_reply(reply, request, KERN_SUCCESS);
585
586 /* Check if the kobject call should be filtered */
587 #if CONFIG_MACF
588 int idx = ptr->kobjidx;
589 uint8_t *filter_mask = task_get_mach_kobj_filter_mask(curtask);
590
591 /* Check kobject mig filter mask, if exists. */
592 if (filter_mask != NULL &&
593 idx != KOBJ_IDX_NOT_SET &&
594 !bitstr_test(filter_mask, idx) &&
595 mac_task_kobj_msg_evaluate != NULL) {
596 /* Not in filter mask, evaluate policy. */
597 kern_return_t kr = mac_task_kobj_msg_evaluate(curproc,
598 request_msgh_id, idx);
599 if (kr != KERN_SUCCESS) {
600 ipc_kobject_set_reply_error_status(reply, kr);
601 goto skip_kobjcall;
602 }
603 }
604 #endif /* CONFIG_MACF */
605
606 __BeforeKobjectServerTrace(idx);
607 /* See contract in header doc for ipc_kobject_reply_status() */
608 (*ptr->kroutine)(req_hdr, req_data, req_trailer, reply_hdr, reply_data);
609 __AfterKobjectServerTrace(idx);
610
611 #if CONFIG_MACF
612 skip_kobjcall:
613 #endif
614 counter_inc(&kernel_task->messages_received);
615
616 kern_return_t reply_status = ipc_kobject_reply_status(reply);
617
618 if (reply_status == MIG_NO_REPLY) {
619 /*
620 * The server function will send a reply message
621 * using the reply port right, which it has saved.
622 */
623 ipc_kmsg_free(reply);
624 reply = IKM_NULL;
625 } else if (reply_status != KERN_SUCCESS && reply_size > sizeof(mig_reply_error_t)) {
626 assert(ikm_header(reply)->msgh_size == sizeof(mig_reply_error_t));
627 /*
628 * MIG returned an error, and the original kmsg we allocated for reply
629 * is oversized. Deallocate it and allocate a smaller, proper kmsg
630 * that fits mig_reply_error_t snuggly.
631 *
632 * We must do so because we used the trick mentioned above which (depending
633 * on the kmsg layout) may cause payload in mig_reply_error_t to overlap
634 * with kdata buffer meant for descriptors.
635 *
636 * This will mess with ikm_kdata_size() calculation down the line so
637 * reallocate a new buffer immediately here.
638 */
639 ipc_kmsg_t new_reply = ipc_kobject_alloc_mig_error();
640 ipc_kobject_init_new_reply(new_reply, reply, reply_status);
641
642 /* MIG contract: If status is not KERN_SUCCESS, reply must be simple. */
643 assert(!(ikm_header(reply)->msgh_bits & MACH_MSGH_BITS_COMPLEX));
644 assert(ikm_header(reply)->msgh_local_port == MACH_PORT_NULL);
645 assert(ikm_header(reply)->msgh_voucher_port == MACH_PORT_NULL);
646 /* So we can simply free the original reply message. */
647 ipc_kmsg_free(reply);
648 reply = new_reply;
649 }
650
651 *replyp = reply;
652 return KERN_SUCCESS;
653 }
654
655
656 /*
657 * Routine: ipc_kobject_server
658 * Purpose:
659 * Handle a message sent to the kernel.
660 * Generates a reply message.
661 * Version for Untyped IPC.
662 *
663 * Ownership of the incoming rights (from the request)
664 * are transferred on success (wether a reply is made or not).
665 *
666 * Conditions:
667 * Nothing locked.
668 */
669 ipc_kmsg_t
ipc_kobject_server(ipc_port_t port,ipc_kmsg_t request,mach_msg_option64_t option __unused)670 ipc_kobject_server(
671 ipc_port_t port,
672 ipc_kmsg_t request,
673 mach_msg_option64_t option __unused)
674 {
675 mach_msg_header_t *req_hdr = ikm_header(request);
676 #if DEVELOPMENT || DEBUG
677 const int request_msgh_id = req_hdr->msgh_id;
678 #endif
679 ipc_port_t request_voucher_port;
680 ipc_kmsg_t reply = IKM_NULL;
681 mach_msg_header_t *reply_hdr;
682 kern_return_t kr;
683
684 ipc_kmsg_trace_send(request, option);
685
686 if (ip_kotype(port) == IKOT_UEXT_OBJECT) {
687 kr = uext_server(port, request, &reply);
688 } else {
689 kr = ipc_kobject_server_internal(port, request, &reply);
690 assert(kr == KERN_SUCCESS);
691 }
692
693 if (kr != KERN_SUCCESS) {
694 assert(kr != MACH_SEND_TIMED_OUT &&
695 kr != MACH_SEND_INTERRUPTED &&
696 kr != MACH_SEND_INVALID_DEST);
697 assert(reply == IKM_NULL);
698
699 /* convert the server error into a MIG error */
700 reply = ipc_kobject_alloc_mig_error();
701 ipc_kobject_init_reply(reply, request, kr);
702 }
703
704 counter_inc(&kernel_task->messages_sent);
705 /*
706 * Destroy destination. The following code differs from
707 * ipc_object_destroy in that we release the send-once
708 * right instead of generating a send-once notification
709 * (which would bring us here again, creating a loop).
710 * It also differs in that we only expect send or
711 * send-once rights, never receive rights.
712 */
713 switch (MACH_MSGH_BITS_REMOTE(req_hdr->msgh_bits)) {
714 case MACH_MSG_TYPE_PORT_SEND:
715 ipc_port_release_send(req_hdr->msgh_remote_port);
716 break;
717
718 case MACH_MSG_TYPE_PORT_SEND_ONCE:
719 ipc_port_release_sonce(req_hdr->msgh_remote_port);
720 break;
721
722 default:
723 panic("ipc_kobject_server: strange destination rights");
724 }
725
726 /*
727 * Destroy voucher. The kernel MIG servers never take ownership
728 * of vouchers sent in messages. Swallow any such rights here.
729 */
730 request_voucher_port = ipc_kmsg_get_voucher_port(request);
731 if (IP_VALID(request_voucher_port)) {
732 assert(MACH_MSG_TYPE_PORT_SEND ==
733 MACH_MSGH_BITS_VOUCHER(req_hdr->msgh_bits));
734 ipc_port_release_send(request_voucher_port);
735 ipc_kmsg_clear_voucher_port(request);
736 }
737
738 if (reply == IKM_NULL ||
739 ipc_kobject_reply_status(reply) == KERN_SUCCESS) {
740 /*
741 * The server function is responsible for the contents
742 * of the message. The reply port right is moved
743 * to the reply message, and we have deallocated
744 * the destination port right, so we just need
745 * to free the kmsg.
746 */
747 ipc_kmsg_free(request);
748 } else {
749 /*
750 * The message contents of the request are intact.
751 * Remote port has been released above. Do not destroy
752 * the reply port right either, which is needed in the reply message.
753 */
754 ipc_kmsg_destroy(request, IPC_KMSG_DESTROY_SKIP_LOCAL | IPC_KMSG_DESTROY_SKIP_REMOTE);
755 }
756
757 if (reply != IKM_NULL) {
758 reply_hdr = ikm_header(reply);
759 ipc_port_t reply_port = reply_hdr->msgh_remote_port;
760
761 if (!IP_VALID(reply_port)) {
762 /*
763 * Can't queue the reply message if the destination
764 * (the reply port) isn't valid.
765 */
766 ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_NOT_SIGNED);
767 reply = IKM_NULL;
768 } else if (ip_in_space_noauth(reply_port, ipc_space_kernel)) {
769 /* do not lock reply port, use raw pointer comparison */
770
771 /*
772 * Don't send replies to kobject kernel ports.
773 */
774 #if DEVELOPMENT || DEBUG
775 printf("%s: refusing to send reply to kobject %d port (id:%d)\n",
776 __func__, ip_kotype(reply_port), request_msgh_id);
777 #endif /* DEVELOPMENT || DEBUG */
778 ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_NOT_SIGNED);
779 reply = IKM_NULL;
780 }
781 }
782
783 return reply;
784 }
785
786 static __header_always_inline void
ipc_kobject_set_raw(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)787 ipc_kobject_set_raw(
788 ipc_port_t port,
789 ipc_kobject_t kobject,
790 ipc_kobject_type_t type)
791 {
792 uintptr_t *store = &port->ip_kobject;
793
794 #if __has_feature(ptrauth_calls)
795 type |= port->ip_immovable_receive << 14;
796 type |= port->ip_immovable_send << 15;
797 type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
798 kobject = ptrauth_sign_unauthenticated(kobject,
799 ptrauth_key_process_independent_data,
800 ptrauth_blend_discriminator(store, type));
801 #else
802 (void)type;
803 #endif // __has_feature(ptrauth_calls)
804
805 *store = (uintptr_t)kobject;
806 }
807
808 static inline void
ipc_kobject_set_internal(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)809 ipc_kobject_set_internal(
810 ipc_port_t port,
811 ipc_kobject_t kobject,
812 ipc_kobject_type_t type)
813 {
814 assert(type != IKOT_NONE);
815 io_bits_or(ip_to_object(port), type);
816 ipc_kobject_set_raw(port, kobject, type);
817 }
818
819 /*
820 * Routine: ipc_kobject_get_raw
821 * Purpose:
822 * Returns the kobject pointer of a specified port.
823 *
824 * This returns the current value of the kobject pointer,
825 * without any validation (the caller is expected to do
826 * the validation it needs).
827 *
828 * Conditions:
829 * The port is a kobject of the proper type.
830 */
831 __header_always_inline ipc_kobject_t
ipc_kobject_get_raw(ipc_port_t port,ipc_kobject_type_t type)832 ipc_kobject_get_raw(
833 ipc_port_t port,
834 ipc_kobject_type_t type)
835 {
836 uintptr_t *store = &port->ip_kobject;
837 ipc_kobject_t kobject = (ipc_kobject_t)*store;
838
839 #if __has_feature(ptrauth_calls)
840 type |= port->ip_immovable_receive << 14;
841 type |= port->ip_immovable_send << 15;
842 type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
843 kobject = ptrauth_auth_data(kobject,
844 ptrauth_key_process_independent_data,
845 ptrauth_blend_discriminator(store, type));
846 #else
847 (void)type;
848 #endif // __has_feature(ptrauth_calls)
849
850 return kobject;
851 }
852
853 __abortlike
854 static void
ipc_kobject_require_panic(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)855 ipc_kobject_require_panic(
856 ipc_port_t port,
857 ipc_kobject_t kobject,
858 ipc_kobject_type_t kotype)
859 {
860 if (ip_kotype(port) != kotype) {
861 panic("port %p: invalid kobject type, got %d wanted %d",
862 port, ip_kotype(port), kotype);
863 }
864 panic("port %p: invalid kobject, got %p wanted %p",
865 port, ipc_kobject_get_raw(port, kotype), kobject);
866 }
867
868 __header_always_inline void
ipc_kobject_require(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)869 ipc_kobject_require(
870 ipc_port_t port,
871 ipc_kobject_t kobject,
872 ipc_kobject_type_t kotype)
873 {
874 ipc_kobject_t cur;
875
876 if (__improbable(ip_kotype(port) != kotype)) {
877 ipc_kobject_require_panic(port, kobject, kotype);
878 }
879 cur = ipc_kobject_get_raw(port, kotype);
880 if (cur && cur != kobject) {
881 ipc_kobject_require_panic(port, kobject, kotype);
882 }
883 }
884
885 /*
886 * Routine: ipc_kobject_get_locked
887 * Purpose:
888 * Returns the kobject pointer of a specified port,
889 * for an expected type.
890 *
891 * Returns IKO_NULL if the port isn't active.
892 *
893 * This function may be used when:
894 * - the port lock is held
895 * - the kobject association stays while there
896 * are any outstanding rights.
897 *
898 * Conditions:
899 * The port is a kobject of the proper type.
900 */
901 ipc_kobject_t
ipc_kobject_get_locked(ipc_port_t port,ipc_kobject_type_t type)902 ipc_kobject_get_locked(
903 ipc_port_t port,
904 ipc_kobject_type_t type)
905 {
906 ipc_kobject_t kobject = IKO_NULL;
907
908 if (ip_active(port) && type == ip_kotype(port)) {
909 kobject = ipc_kobject_get_raw(port, type);
910 }
911
912 return kobject;
913 }
914
915 /*
916 * Routine: ipc_kobject_get_stable
917 * Purpose:
918 * Returns the kobject pointer of a specified port,
919 * for an expected type, for types where the port/kobject
920 * association is permanent.
921 *
922 * Returns IKO_NULL if the port isn't active.
923 *
924 * Conditions:
925 * The port is a kobject of the proper type.
926 */
927 ipc_kobject_t
ipc_kobject_get_stable(ipc_port_t port,ipc_kobject_type_t type)928 ipc_kobject_get_stable(
929 ipc_port_t port,
930 ipc_kobject_type_t type)
931 {
932 assert(ipc_kobject_ops_get(type)->iko_op_stable);
933 return ipc_kobject_get_locked(port, type);
934 }
935
936 /*
937 * Routine: ipc_kobject_init_port
938 * Purpose:
939 * Initialize a kobject port with the given types and options.
940 *
941 * This function never fails.
942 */
943 static inline void
ipc_kobject_init_port(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t options)944 ipc_kobject_init_port(
945 ipc_port_t port,
946 ipc_kobject_t kobject,
947 ipc_kobject_type_t type,
948 ipc_kobject_alloc_options_t options)
949 {
950 if (options & IPC_KOBJECT_ALLOC_MAKE_SEND) {
951 ipc_port_make_send_any_locked(port);
952 }
953 if (options & IPC_KOBJECT_ALLOC_NSREQUEST) {
954 port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
955 ip_reference(port);
956 }
957 if (options & IPC_KOBJECT_ALLOC_NO_GRANT) {
958 port->ip_no_grant = 1;
959 }
960 if (options & IPC_KOBJECT_ALLOC_IMMOVABLE_SEND) {
961 port->ip_immovable_send = 1;
962 }
963 if (options & IPC_KOBJECT_ALLOC_PINNED) {
964 port->ip_pinned = 1;
965 }
966
967 ipc_kobject_set_internal(port, kobject, type);
968 }
969
970 /*
971 * Routine: ipc_kobject_alloc_port
972 * Purpose:
973 * Allocate a kobject port in the kernel space of the specified type.
974 *
975 * This function never fails.
976 *
977 * Conditions:
978 * No locks held (memory is allocated)
979 */
980 ipc_port_t
ipc_kobject_alloc_port(ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t options)981 ipc_kobject_alloc_port(
982 ipc_kobject_t kobject,
983 ipc_kobject_type_t type,
984 ipc_kobject_alloc_options_t options)
985 {
986 ipc_port_t port;
987 port = ipc_port_alloc_special(ipc_space_kernel, IPC_PORT_ENFORCE_RIGID_REPLY_PORT_SEMANTICS);
988
989 if (port == IP_NULL) {
990 panic("ipc_kobject_alloc_port(): failed to allocate port");
991 }
992
993 ipc_kobject_init_port(port, kobject, type, options);
994 return port;
995 }
996
997 /*
998 * Routine: ipc_kobject_alloc_labeled_port
999 * Purpose:
1000 * Allocate a kobject port and associated mandatory access label
1001 * in the kernel space of the specified type.
1002 *
1003 * This function never fails.
1004 *
1005 * Conditions:
1006 * No locks held (memory is allocated)
1007 */
1008
1009 ipc_port_t
ipc_kobject_alloc_labeled_port(ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_label_t label,ipc_kobject_alloc_options_t options)1010 ipc_kobject_alloc_labeled_port(
1011 ipc_kobject_t kobject,
1012 ipc_kobject_type_t type,
1013 ipc_label_t label,
1014 ipc_kobject_alloc_options_t options)
1015 {
1016 ipc_port_t port;
1017
1018 port = ipc_kobject_alloc_port(kobject, type, options);
1019
1020 ipc_port_set_label(port, label);
1021
1022 return port;
1023 }
1024
1025 static void
ipc_kobject_subst_once_no_senders(ipc_port_t port,mach_port_mscount_t mscount)1026 ipc_kobject_subst_once_no_senders(
1027 ipc_port_t port,
1028 mach_port_mscount_t mscount)
1029 {
1030 ipc_port_t ko_port;
1031
1032 ko_port = ipc_kobject_dealloc_port(port, mscount, IKOT_PORT_SUBST_ONCE);
1033
1034 if (ko_port) {
1035 /*
1036 * Clean up the right if the wrapper wasn't hollowed out
1037 * by ipc_kobject_alloc_subst_once().
1038 */
1039 ipc_port_release_send(ko_port);
1040 }
1041 }
1042
1043 /*
1044 * Routine: ipc_kobject_alloc_subst_once
1045 * Purpose:
1046 * Make a port that will be substituted by the kolabel
1047 * rules once, preventing the next substitution (of its target)
1048 * to happen if any.
1049 *
1050 * Returns:
1051 * A port with a send right, that will substitute to its "kobject".
1052 *
1053 * Conditions:
1054 * No locks held (memory is allocated).
1055 *
1056 * `target` holds a send-right donated to this function,
1057 * consumed in ipc_kobject_subst_once_no_senders().
1058 */
1059 ipc_port_t
ipc_kobject_alloc_subst_once(ipc_port_t target)1060 ipc_kobject_alloc_subst_once(
1061 ipc_port_t target)
1062 {
1063 if (!IP_VALID(target)) {
1064 return target;
1065 }
1066 return ipc_kobject_alloc_labeled_port(target,
1067 IKOT_PORT_SUBST_ONCE, IPC_LABEL_SUBST_ONCE,
1068 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
1069 }
1070
1071 /*
1072 * Routine: ipc_kobject_make_send_lazy_alloc_port
1073 * Purpose:
1074 * Make a send once for a kobject port.
1075 *
1076 * A location owning this port is passed in port_store.
1077 * If no port exists, a port is made lazily.
1078 *
1079 * A send right is made for the port, and if this is the first one
1080 * (possibly not for the first time), then the no-more-senders
1081 * notification is rearmed.
1082 *
1083 * When a notification is armed, the kobject must donate
1084 * one of its references to the port. It is expected
1085 * the no-more-senders notification will consume this reference.
1086 *
1087 * Returns:
1088 * TRUE if a notification was armed
1089 * FALSE else
1090 *
1091 * Conditions:
1092 * Nothing is locked, memory can be allocated.
1093 * The caller must be able to donate a kobject reference to the port.
1094 */
1095 bool
ipc_kobject_make_send_lazy_alloc_port(ipc_port_t * port_store,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t alloc_opts)1096 ipc_kobject_make_send_lazy_alloc_port(
1097 ipc_port_t *port_store,
1098 ipc_kobject_t kobject,
1099 ipc_kobject_type_t type,
1100 ipc_kobject_alloc_options_t alloc_opts)
1101 {
1102 ipc_port_t port, previous;
1103 kern_return_t kr;
1104
1105 alloc_opts |= IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST;
1106 port = os_atomic_load(port_store, dependency);
1107
1108 if (!IP_VALID(port)) {
1109 port = ipc_kobject_alloc_port(kobject, type, alloc_opts);
1110
1111 if (os_atomic_cmpxchgv(port_store,
1112 IP_NULL, port, &previous, release)) {
1113 return TRUE;
1114 }
1115
1116 /*
1117 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
1118 * ipc_kobject_dealloc_port will handle
1119 * IPC_KOBJECT_ALLOC_NSREQUEST.
1120 */
1121 port->ip_mscount = 0;
1122 port->ip_srights = 0;
1123 ip_release_live(port);
1124 ipc_kobject_dealloc_port(port, 0, type);
1125
1126 port = previous;
1127 }
1128
1129 kr = ipc_kobject_make_send_nsrequest(port, kobject, type);
1130 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1131
1132 return kr == KERN_SUCCESS;
1133 }
1134
1135 /*
1136 * Routine: ipc_kobject_make_send_lazy_alloc_labeled_port
1137 * Purpose:
1138 * Make a send once for a kobject port.
1139 *
1140 * A location owning this port is passed in port_store.
1141 * If no port exists, a port is made lazily.
1142 *
1143 * A send right is made for the port, and if this is the first one
1144 * (possibly not for the first time), then the no-more-senders
1145 * notification is rearmed.
1146 *
1147 * When a notification is armed, the kobject must donate
1148 * one of its references to the port. It is expected
1149 * the no-more-senders notification will consume this reference.
1150 *
1151 * Returns:
1152 * TRUE if a notification was armed
1153 * FALSE else
1154 *
1155 * Conditions:
1156 * Nothing is locked, memory can be allocated.
1157 * The caller must be able to donate a kobject reference to the port.
1158 */
1159 boolean_t
ipc_kobject_make_send_lazy_alloc_labeled_port(ipc_port_t * port_store,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_label_t label)1160 ipc_kobject_make_send_lazy_alloc_labeled_port(
1161 ipc_port_t *port_store,
1162 ipc_kobject_t kobject,
1163 ipc_kobject_type_t type,
1164 ipc_label_t label)
1165 {
1166 ipc_port_t port, previous;
1167 kern_return_t kr;
1168
1169 port = os_atomic_load(port_store, dependency);
1170
1171 if (!IP_VALID(port)) {
1172 port = ipc_kobject_alloc_labeled_port(kobject, type, label,
1173 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
1174 if (os_atomic_cmpxchgv(port_store, IP_NULL, port, &previous, release)) {
1175 return TRUE;
1176 }
1177
1178 /*
1179 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
1180 * ipc_kobject_dealloc_port will handle
1181 * IPC_KOBJECT_ALLOC_NSREQUEST.
1182 */
1183 port->ip_mscount = 0;
1184 port->ip_srights = 0;
1185 ip_release_live(port);
1186 ipc_kobject_dealloc_port(port, 0, type);
1187
1188 port = previous;
1189 assert(ip_is_kolabeled(port));
1190 }
1191
1192 kr = ipc_kobject_make_send_nsrequest(port, kobject, type);
1193 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1194
1195 return kr == KERN_SUCCESS;
1196 }
1197
1198 /*
1199 * Routine: ipc_kobject_nsrequest_locked
1200 * Purpose:
1201 * Arm the no-senders notification for the given kobject
1202 * if it doesn't have one armed yet.
1203 *
1204 * Conditions:
1205 * Port is locked and active.
1206 *
1207 * Returns:
1208 * KERN_SUCCESS: the notification was armed
1209 * KERN_ALREADY_WAITING: the notification was already armed
1210 * KERN_FAILURE: the notification would fire immediately
1211 */
1212 static inline kern_return_t
ipc_kobject_nsrequest_locked(ipc_port_t port,mach_port_mscount_t sync)1213 ipc_kobject_nsrequest_locked(
1214 ipc_port_t port,
1215 mach_port_mscount_t sync)
1216 {
1217 if (port->ip_nsrequest == IP_KOBJECT_NSREQUEST_ARMED) {
1218 return KERN_ALREADY_WAITING;
1219 }
1220
1221 if (port->ip_srights == 0 && sync <= port->ip_mscount) {
1222 return KERN_FAILURE;
1223 }
1224
1225 port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
1226 ip_reference(port);
1227 return KERN_SUCCESS;
1228 }
1229
1230
1231 /*
1232 * Routine: ipc_kobject_nsrequest
1233 * Purpose:
1234 * Arm the no-senders notification for the given kobject
1235 * if it doesn't have one armed yet.
1236 *
1237 * Returns:
1238 * KERN_SUCCESS: the notification was armed
1239 * KERN_ALREADY_WAITING: the notification was already armed
1240 * KERN_FAILURE: the notification would fire immediately
1241 * KERN_INVALID_RIGHT: the port is dead
1242 */
1243 kern_return_t
ipc_kobject_nsrequest(ipc_port_t port,mach_port_mscount_t sync,mach_port_mscount_t * mscount)1244 ipc_kobject_nsrequest(
1245 ipc_port_t port,
1246 mach_port_mscount_t sync,
1247 mach_port_mscount_t *mscount)
1248 {
1249 kern_return_t kr = KERN_INVALID_RIGHT;
1250
1251 if (IP_VALID(port)) {
1252 ip_mq_lock(port);
1253
1254 if (mscount) {
1255 *mscount = port->ip_mscount;
1256 }
1257 if (ip_active(port)) {
1258 kr = ipc_kobject_nsrequest_locked(port, sync);
1259 }
1260
1261 ip_mq_unlock(port);
1262 } else if (mscount) {
1263 *mscount = 0;
1264 }
1265
1266 return kr;
1267 }
1268
1269 ipc_port_t
ipc_kobject_copy_send(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1270 ipc_kobject_copy_send(
1271 ipc_port_t port,
1272 ipc_kobject_t kobject,
1273 ipc_kobject_type_t kotype)
1274 {
1275 ipc_port_t sright = port;
1276
1277 if (IP_VALID(port)) {
1278 ip_mq_lock(port);
1279 if (ip_active(port)) {
1280 ipc_kobject_require(port, kobject, kotype);
1281 ipc_port_copy_send_any_locked(port);
1282 } else {
1283 sright = IP_DEAD;
1284 }
1285 ip_mq_unlock(port);
1286 }
1287
1288 return sright;
1289 }
1290
1291 ipc_port_t
ipc_kobject_make_send(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1292 ipc_kobject_make_send(
1293 ipc_port_t port,
1294 ipc_kobject_t kobject,
1295 ipc_kobject_type_t kotype)
1296 {
1297 ipc_port_t sright = port;
1298
1299 if (IP_VALID(port)) {
1300 ip_mq_lock(port);
1301 if (ip_active(port)) {
1302 ipc_kobject_require(port, kobject, kotype);
1303 ipc_port_make_send_any_locked(port);
1304 } else {
1305 sright = IP_DEAD;
1306 }
1307 ip_mq_unlock(port);
1308 }
1309
1310 return sright;
1311 }
1312
1313 kern_return_t
ipc_kobject_make_send_nsrequest(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1314 ipc_kobject_make_send_nsrequest(
1315 ipc_port_t port,
1316 ipc_kobject_t kobject,
1317 ipc_kobject_type_t kotype)
1318 {
1319 kern_return_t kr = KERN_INVALID_RIGHT;
1320
1321 if (IP_VALID(port)) {
1322 ip_mq_lock(port);
1323 if (ip_active(port)) {
1324 ipc_kobject_require(port, kobject, kotype);
1325 ipc_port_make_send_any_locked(port);
1326 kr = ipc_kobject_nsrequest_locked(port, 0);
1327 assert(kr != KERN_FAILURE);
1328 }
1329 ip_mq_unlock(port);
1330 }
1331
1332 return kr;
1333 }
1334
1335 kern_return_t
ipc_kobject_make_send_nsrequest_locked(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1336 ipc_kobject_make_send_nsrequest_locked(
1337 ipc_port_t port,
1338 ipc_kobject_t kobject,
1339 ipc_kobject_type_t kotype)
1340 {
1341 kern_return_t kr = KERN_INVALID_RIGHT;
1342
1343 if (ip_active(port)) {
1344 ipc_kobject_require(port, kobject, kotype);
1345 ipc_port_make_send_any_locked(port);
1346 kr = ipc_kobject_nsrequest_locked(port, 0);
1347 assert(kr != KERN_FAILURE);
1348 }
1349
1350 return kr;
1351 }
1352
1353 static inline ipc_kobject_t
ipc_kobject_disable_internal(ipc_port_t port,ipc_kobject_type_t type)1354 ipc_kobject_disable_internal(
1355 ipc_port_t port,
1356 ipc_kobject_type_t type)
1357 {
1358 ipc_kobject_t kobject = ipc_kobject_get_raw(port, type);
1359
1360 ipc_kobject_set_raw(port, IKO_NULL, type);
1361 if (ip_is_kolabeled(port)) {
1362 port->ip_kolabel->ikol_alt_port = IP_NULL;
1363 }
1364
1365 return kobject;
1366 }
1367
1368 /*
1369 * Routine: ipc_kobject_dealloc_port_and_unlock
1370 * Purpose:
1371 * Destroys a port allocated with any of the ipc_kobject_alloc*
1372 * functions.
1373 *
1374 * This will atomically:
1375 * - make the port inactive,
1376 * - optionally check the make send count
1377 * - disable (nil-out) the kobject pointer for kobjects without
1378 * a destroy callback.
1379 *
1380 * The port will retain its kobject-ness and kobject type.
1381 *
1382 *
1383 * Returns:
1384 * The kobject pointer that was set prior to this call
1385 * (possibly NULL if the kobject was already disabled).
1386 *
1387 * Conditions:
1388 * The port is active and locked.
1389 * On return the port is inactive and unlocked.
1390 */
1391 __abortlike
1392 static void
__ipc_kobject_dealloc_bad_type_panic(ipc_port_t port,ipc_kobject_type_t type)1393 __ipc_kobject_dealloc_bad_type_panic(ipc_port_t port, ipc_kobject_type_t type)
1394 {
1395 panic("port %p of type %d, expecting %d", port, ip_kotype(port), type);
1396 }
1397
1398 __abortlike
1399 static void
__ipc_kobject_dealloc_bad_mscount_panic(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1400 __ipc_kobject_dealloc_bad_mscount_panic(
1401 ipc_port_t port,
1402 mach_port_mscount_t mscount,
1403 ipc_kobject_type_t type)
1404 {
1405 panic("unexpected make-send count: %p[%d], %d, %d",
1406 port, type, port->ip_mscount, mscount);
1407 }
1408
1409 __abortlike
1410 static void
__ipc_kobject_dealloc_bad_srights_panic(ipc_port_t port,ipc_kobject_type_t type)1411 __ipc_kobject_dealloc_bad_srights_panic(
1412 ipc_port_t port,
1413 ipc_kobject_type_t type)
1414 {
1415 panic("unexpected send right count: %p[%d], %d",
1416 port, type, port->ip_srights);
1417 }
1418
1419 ipc_kobject_t
ipc_kobject_dealloc_port_and_unlock(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1420 ipc_kobject_dealloc_port_and_unlock(
1421 ipc_port_t port,
1422 mach_port_mscount_t mscount,
1423 ipc_kobject_type_t type)
1424 {
1425 ipc_kobject_t kobject = IKO_NULL;
1426 ipc_kobject_ops_t ops = ipc_kobject_ops_get(type);
1427
1428 require_ip_active(port);
1429
1430 if (ip_kotype(port) != type) {
1431 __ipc_kobject_dealloc_bad_type_panic(port, type);
1432 }
1433
1434 if (mscount && port->ip_mscount != mscount) {
1435 __ipc_kobject_dealloc_bad_mscount_panic(port, mscount, type);
1436 }
1437 if ((mscount || ops->iko_op_stable) && port->ip_srights != 0) {
1438 __ipc_kobject_dealloc_bad_srights_panic(port, type);
1439 }
1440
1441 if (!ops->iko_op_destroy) {
1442 kobject = ipc_kobject_disable_internal(port, type);
1443 }
1444
1445 ipc_port_dealloc_special_and_unlock(port, ipc_space_kernel);
1446
1447 return kobject;
1448 }
1449
1450 /*
1451 * Routine: ipc_kobject_dealloc_port
1452 * Purpose:
1453 * Destroys a port allocated with any of the ipc_kobject_alloc*
1454 * functions.
1455 *
1456 * This will atomically:
1457 * - make the port inactive,
1458 * - optionally check the make send count
1459 * - disable (nil-out) the kobject pointer for kobjects without
1460 * a destroy callback.
1461 *
1462 * The port will retain its kobject-ness and kobject type.
1463 *
1464 *
1465 * Returns:
1466 * The kobject pointer that was set prior to this call
1467 * (possibly NULL if the kobject was already disabled).
1468 *
1469 * Conditions:
1470 * Nothing is locked.
1471 * The port is active.
1472 * On return the port is inactive.
1473 */
1474 ipc_kobject_t
ipc_kobject_dealloc_port(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1475 ipc_kobject_dealloc_port(
1476 ipc_port_t port,
1477 mach_port_mscount_t mscount,
1478 ipc_kobject_type_t type)
1479 {
1480 ip_mq_lock(port);
1481 return ipc_kobject_dealloc_port_and_unlock(port, mscount, type);
1482 }
1483
1484 /*
1485 * Routine: ipc_kobject_enable
1486 * Purpose:
1487 * Make a port represent a kernel object of the given type.
1488 * The caller is responsible for handling refs for the
1489 * kernel object, if necessary.
1490 * Conditions:
1491 * Nothing locked.
1492 * The port must be active.
1493 */
1494 void
ipc_kobject_enable(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)1495 ipc_kobject_enable(
1496 ipc_port_t port,
1497 ipc_kobject_t kobject,
1498 ipc_kobject_type_t type)
1499 {
1500 assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1501
1502 ip_mq_lock(port);
1503 require_ip_active(port);
1504
1505 if (type != ip_kotype(port)) {
1506 panic("%s: unexpected kotype of port %p: want %d, got %d",
1507 __func__, port, type, ip_kotype(port));
1508 }
1509
1510 ipc_kobject_set_raw(port, kobject, type);
1511
1512 ip_mq_unlock(port);
1513 }
1514
1515 /*
1516 * Routine: ipc_kobject_disable_locked
1517 * Purpose:
1518 * Clear the kobject pointer for a port.
1519 * Conditions:
1520 * The port is locked.
1521 * Returns the current kobject pointer.
1522 */
1523 ipc_kobject_t
ipc_kobject_disable_locked(ipc_port_t port,ipc_kobject_type_t type)1524 ipc_kobject_disable_locked(
1525 ipc_port_t port,
1526 ipc_kobject_type_t type)
1527 {
1528 if (ip_active(port)) {
1529 assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1530 }
1531
1532 if (ip_kotype(port) != type) {
1533 panic("port %p of type %d, expecting %d",
1534 port, ip_kotype(port), type);
1535 }
1536
1537 return ipc_kobject_disable_internal(port, type);
1538 }
1539
1540 /*
1541 * Routine: ipc_kobject_disable
1542 * Purpose:
1543 * Clear the kobject pointer for a port.
1544 * Conditions:
1545 * Nothing locked.
1546 * Returns the current kobject pointer.
1547 */
1548 ipc_kobject_t
ipc_kobject_disable(ipc_port_t port,ipc_kobject_type_t type)1549 ipc_kobject_disable(
1550 ipc_port_t port,
1551 ipc_kobject_type_t type)
1552 {
1553 ipc_kobject_t kobject;
1554
1555 ip_mq_lock(port);
1556 kobject = ipc_kobject_disable_locked(port, type);
1557 ip_mq_unlock(port);
1558
1559 return kobject;
1560 }
1561
1562 /*
1563 * Routine: ipc_kobject_upgrade_mktimer_locked
1564 * Purpose:
1565 * Upgrades a port to mktimer kobject status
1566 *
1567 * This pattern is rather bad as it leads to various
1568 * confusions that need to be special cased with kobject-ness
1569 * of ports. No new port with dual kobject/message-queue
1570 * semantics should be made ever.
1571 *
1572 * Conditions:
1573 * Port is locked
1574 */
1575 void
ipc_kobject_upgrade_mktimer_locked(ipc_port_t port,ipc_kobject_t kobject)1576 ipc_kobject_upgrade_mktimer_locked(
1577 ipc_port_t port,
1578 ipc_kobject_t kobject)
1579 {
1580 ipc_kobject_set_internal(port, kobject, IKOT_TIMER);
1581 }
1582
1583 /*
1584 * Routine: ipc_kobject_notify_no_senders
1585 * Purpose:
1586 * Handles a no-senders notification
1587 * sent to a kobject.
1588 *
1589 * A port reference is consumed.
1590 *
1591 * Conditions:
1592 * Nothing locked.
1593 */
1594 void
ipc_kobject_notify_no_senders(ipc_port_t port,mach_port_mscount_t mscount)1595 ipc_kobject_notify_no_senders(
1596 ipc_port_t port,
1597 mach_port_mscount_t mscount)
1598 {
1599 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1600
1601 assert(ops->iko_op_no_senders);
1602 ops->iko_op_no_senders(port, mscount);
1603
1604 /* consume the ref ipc_notify_no_senders_prepare left */
1605 ip_release(port);
1606 }
1607
1608 /*
1609 * Routine: ipc_kobject_notify_no_senders
1610 * Purpose:
1611 * Handles a send once notifications
1612 * sent to a kobject.
1613 *
1614 * A send-once port reference is consumed.
1615 *
1616 * Conditions:
1617 * Port is locked.
1618 */
1619 void
ipc_kobject_notify_send_once_and_unlock(ipc_port_t port)1620 ipc_kobject_notify_send_once_and_unlock(
1621 ipc_port_t port)
1622 {
1623 /*
1624 * drop the send once right while we hold the port lock.
1625 * we will keep a port reference while we run the possible
1626 * callouts to kobjects.
1627 *
1628 * This a simplified version of ipc_port_release_sonce()
1629 * since kobjects can't be special reply ports.
1630 */
1631 assert(!port->ip_specialreply);
1632
1633 ip_sorights_dec(port);
1634 ip_mq_unlock(port);
1635
1636 /*
1637 * because there's very few consumers,
1638 * the code here isn't generic as it's really not worth it.
1639 */
1640 switch (ip_kotype(port)) {
1641 case IKOT_TASK_RESUME:
1642 task_suspension_send_once(port);
1643 break;
1644 default:
1645 break;
1646 }
1647
1648 ip_release(port);
1649 }
1650
1651
1652 /*
1653 * Routine: ipc_kobject_destroy
1654 * Purpose:
1655 * Release any kernel object resources associated
1656 * with the port, which is being destroyed.
1657 *
1658 * This path to free object resources should only be
1659 * needed when resources are associated with a user's port.
1660 * In the normal case, when the kernel is the receiver,
1661 * the code calling ipc_kobject_dealloc_port() should clean
1662 * up the object resources.
1663 *
1664 * Cleans up any kobject label that might be present.
1665 * Conditions:
1666 * The port is not locked, but it is dead.
1667 */
1668 void
ipc_kobject_destroy(ipc_port_t port)1669 ipc_kobject_destroy(
1670 ipc_port_t port)
1671 {
1672 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1673
1674 if (ops->iko_op_permanent) {
1675 panic("trying to destroy a permanent port %p with kobject type: %d", port, ip_kotype(port));
1676 }
1677 if (ops->iko_op_destroy) {
1678 ops->iko_op_destroy(port);
1679 }
1680
1681 if (ip_is_kolabeled(port)) {
1682 ipc_kobject_label_t labelp = port->ip_kolabel;
1683
1684 assert(labelp != NULL);
1685 assert(labelp->ikol_alt_port == IP_NULL);
1686 assert(ip_is_kobject(port));
1687 port->ip_kolabel = NULL;
1688 io_bits_andnot(ip_to_object(port), IO_BITS_KOLABEL);
1689 zfree(ipc_kobject_label_zone, labelp);
1690 }
1691 }
1692
1693 /*
1694 * Routine: ipc_kobject_label_substitute_task
1695 * Purpose:
1696 * Substitute a task control port for its immovable
1697 * equivalent when the receiver is that task.
1698 * Conditions:
1699 * Space is write locked and active.
1700 * Port is locked and active.
1701 * Returns:
1702 * - IP_NULL port if no substitution is to be done
1703 * - a valid port if a substitution needs to happen
1704 */
1705 static ipc_port_t
ipc_kobject_label_substitute_task(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1706 ipc_kobject_label_substitute_task(
1707 ipc_space_t space,
1708 ipc_kobject_label_t kolabel,
1709 ipc_port_t port)
1710 {
1711 ipc_port_t subst = IP_NULL;
1712 task_t task = ipc_kobject_get_raw(port, IKOT_TASK_CONTROL);
1713
1714 if (task != TASK_NULL && task == space->is_task) {
1715 if ((subst = kolabel->ikol_alt_port)) {
1716 return subst;
1717 }
1718 }
1719
1720 return IP_NULL;
1721 }
1722
1723 /*
1724 * Routine: ipc_kobject_label_substitute_task_read
1725 * Purpose:
1726 * Substitute a task read port for its immovable
1727 * control equivalent when the receiver is that task.
1728 * Conditions:
1729 * Space is write locked and active.
1730 * Port is locked and active.
1731 * Returns:
1732 * - IP_NULL port if no substitution is to be done
1733 * - a valid port if a substitution needs to happen
1734 */
1735 static ipc_port_t
ipc_kobject_label_substitute_task_read(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1736 ipc_kobject_label_substitute_task_read(
1737 ipc_space_t space,
1738 ipc_kobject_label_t kolabel,
1739 ipc_port_t port)
1740 {
1741 ipc_port_t subst = IP_NULL;
1742 task_t task = ipc_kobject_get_raw(port, IKOT_TASK_READ);
1743
1744 if (task != TASK_NULL && task == space->is_task) {
1745 if ((subst = kolabel->ikol_alt_port)) {
1746 return subst;
1747 }
1748 }
1749
1750 return IP_NULL;
1751 }
1752
1753 /*
1754 * Routine: ipc_kobject_label_substitute_thread
1755 * Purpose:
1756 * Substitute a thread control port for its immovable
1757 * equivalent when it belongs to the receiver task.
1758 * Conditions:
1759 * Space is write locked and active.
1760 * Port is locked and active.
1761 * Returns:
1762 * - IP_NULL port if no substitution is to be done
1763 * - a valid port if a substitution needs to happen
1764 */
1765 static ipc_port_t
ipc_kobject_label_substitute_thread(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1766 ipc_kobject_label_substitute_thread(
1767 ipc_space_t space,
1768 ipc_kobject_label_t kolabel,
1769 ipc_port_t port)
1770 {
1771 ipc_port_t subst = IP_NULL;
1772 thread_t thread = ipc_kobject_get_raw(port, IKOT_THREAD_CONTROL);
1773
1774 if (thread != THREAD_NULL && space->is_task == get_threadtask(thread)) {
1775 if ((subst = kolabel->ikol_alt_port) != IP_NULL) {
1776 return subst;
1777 }
1778 }
1779
1780 return IP_NULL;
1781 }
1782
1783 /*
1784 * Routine: ipc_kobject_label_substitute_thread_read
1785 * Purpose:
1786 * Substitute a thread read port for its immovable
1787 * control equivalent when it belongs to the receiver task.
1788 * Conditions:
1789 * Space is write locked and active.
1790 * Port is locked and active.
1791 * Returns:
1792 * - IP_NULL port if no substitution is to be done
1793 * - a valid port if a substitution needs to happen
1794 */
1795 static ipc_port_t
ipc_kobject_label_substitute_thread_read(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1796 ipc_kobject_label_substitute_thread_read(
1797 ipc_space_t space,
1798 ipc_kobject_label_t kolabel,
1799 ipc_port_t port)
1800 {
1801 ipc_port_t subst = IP_NULL;
1802 thread_t thread = ipc_kobject_get_raw(port, IKOT_THREAD_READ);
1803
1804 if (thread != THREAD_NULL && space->is_task == get_threadtask(thread)) {
1805 if ((subst = kolabel->ikol_alt_port) != IP_NULL) {
1806 return subst;
1807 }
1808 }
1809
1810 return IP_NULL;
1811 }
1812
1813 /*
1814 * Routine: ipc_kobject_label_check
1815 * Purpose:
1816 * Check to see if the space is allowed to possess
1817 * a right for the given port. In order to qualify,
1818 * the space label must contain all the privileges
1819 * listed in the port/kobject label.
1820 *
1821 * Conditions:
1822 * Space is write locked and active.
1823 * Port is locked and active.
1824 *
1825 * Returns:
1826 * Whether the copyout is authorized.
1827 *
1828 * If a port substitution is requested, the space is unlocked,
1829 * the port is unlocked and its "right" consumed.
1830 *
1831 * As of now, substituted ports only happen for send rights.
1832 */
1833 bool
ipc_kobject_label_check(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t * flags,ipc_port_t * subst_portp)1834 ipc_kobject_label_check(
1835 ipc_space_t space,
1836 ipc_port_t port,
1837 mach_msg_type_name_t msgt_name,
1838 ipc_object_copyout_flags_t *flags,
1839 ipc_port_t *subst_portp)
1840 {
1841 ipc_kobject_label_t kolabel;
1842 ipc_label_t label;
1843
1844 assert(is_active(space));
1845 assert(ip_active(port));
1846
1847 *subst_portp = IP_NULL;
1848
1849 /* Unlabled ports/kobjects are always allowed */
1850 if (!ip_is_kolabeled(port)) {
1851 return true;
1852 }
1853
1854 /* Never OK to copyout the receive right for a labeled kobject */
1855 if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1856 panic("ipc_kobject_label_check: attempted receive right "
1857 "copyout for labeled kobject");
1858 }
1859
1860 kolabel = port->ip_kolabel;
1861 label = kolabel->ikol_label;
1862
1863 if ((*flags & IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK) == 0 &&
1864 (label & IPC_LABEL_SUBST_MASK)) {
1865 ipc_port_t subst = IP_NULL;
1866
1867 if (msgt_name != MACH_MSG_TYPE_PORT_SEND) {
1868 return false;
1869 }
1870
1871 if ((label & IPC_LABEL_SUBST_MASK) == IPC_LABEL_SUBST_ONCE) {
1872 /*
1873 * The next check will _not_ substitute.
1874 * hollow out our one-time wrapper,
1875 * and steal its send right.
1876 */
1877 *flags |= IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK;
1878 subst = ipc_kobject_disable_locked(port,
1879 IKOT_PORT_SUBST_ONCE);
1880 is_write_unlock(space);
1881 ipc_port_release_send_and_unlock(port);
1882 if (subst == IP_NULL) {
1883 panic("subst-once port %p was consumed twice", port);
1884 }
1885 *subst_portp = subst;
1886 return true;
1887 }
1888
1889 switch (label & IPC_LABEL_SUBST_MASK) {
1890 case IPC_LABEL_SUBST_TASK:
1891 subst = ipc_kobject_label_substitute_task(space,
1892 kolabel, port);
1893 break;
1894 case IPC_LABEL_SUBST_TASK_READ:
1895 subst = ipc_kobject_label_substitute_task_read(space,
1896 kolabel, port);
1897 break;
1898 case IPC_LABEL_SUBST_THREAD:
1899 subst = ipc_kobject_label_substitute_thread(space,
1900 kolabel, port);
1901 break;
1902 case IPC_LABEL_SUBST_THREAD_READ:
1903 subst = ipc_kobject_label_substitute_thread_read(space,
1904 kolabel, port);
1905 break;
1906 default:
1907 panic("unexpected label: %llx", label);
1908 }
1909
1910 if (subst != IP_NULL) {
1911 ip_reference(subst);
1912 is_write_unlock(space);
1913
1914 /*
1915 * We do not hold a proper send right on `subst`,
1916 * only a reference.
1917 *
1918 * Because of how thread/task termination works,
1919 * there is no guarantee copy_send() would work,
1920 * so we need to make_send().
1921 *
1922 * We can do that because ports tagged with
1923 * IPC_LABEL_SUBST_{THREAD,TASK} do not use
1924 * the no-senders notification.
1925 */
1926
1927 ipc_port_release_send_and_unlock(port);
1928 /* no check: dPAC integrity */
1929 port = ipc_port_make_send_any(subst);
1930 ip_release(subst);
1931 *subst_portp = port;
1932 return true;
1933 }
1934 }
1935
1936 return (label & space->is_label & IPC_LABEL_SPACE_MASK) ==
1937 (label & IPC_LABEL_SPACE_MASK);
1938 }
1939