1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: kern/ipc_kobject.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions for letting a port represent a kernel object.
71 */
72
73 #include <mach/mig.h>
74 #include <mach/port.h>
75 #include <mach/kern_return.h>
76 #include <mach/message.h>
77 #include <mach/mig_errors.h>
78 #include <mach/mach_notify.h>
79 #include <mach/ndr.h>
80 #include <mach/vm_param.h>
81
82 #include <mach/mach_vm_server.h>
83 #include <mach/mach_port_server.h>
84 #include <mach/mach_host_server.h>
85 #include <mach/host_priv_server.h>
86 #include <mach/clock_server.h>
87 #include <mach/memory_entry_server.h>
88 #include <mach/processor_server.h>
89 #include <mach/processor_set_server.h>
90 #include <mach/task_server.h>
91 #include <mach/mach_voucher_server.h>
92 #ifdef VM32_SUPPORT
93 #include <mach/vm32_map_server.h>
94 #endif
95 #include <mach/thread_act_server.h>
96 #include <mach/restartable_server.h>
97
98 #include <mach/exc_server.h>
99 #include <mach/mach_exc_server.h>
100 #include <mach/mach_eventlink_server.h>
101
102 #include <device/device_types.h>
103 #include <device/device_server.h>
104
105 #if CONFIG_USER_NOTIFICATION
106 #include <UserNotification/UNDReplyServer.h>
107 #endif
108
109 #if CONFIG_ARCADE
110 #include <mach/arcade_register_server.h>
111 #endif
112
113 #if CONFIG_AUDIT
114 #include <kern/audit_sessionport.h>
115 #endif
116
117 #include <kern/counter.h>
118 #include <kern/ipc_tt.h>
119 #include <kern/ipc_mig.h>
120 #include <kern/ipc_misc.h>
121 #include <kern/ipc_kobject.h>
122 #include <kern/host_notify.h>
123 #include <kern/misc_protos.h>
124
125 #if CONFIG_ARCADE
126 #include <kern/arcade.h>
127 #endif /* CONFIG_ARCADE */
128
129 #include <ipc/ipc_kmsg.h>
130 #include <ipc/ipc_port.h>
131 #include <ipc/ipc_voucher.h>
132 #include <kern/sync_sema.h>
133 #include <kern/work_interval.h>
134 #include <kern/task_ident.h>
135
136 #if HYPERVISOR
137 #include <kern/hv_support.h>
138 #endif
139
140 #if CONFIG_CSR
141 #include <sys/csr.h>
142 #endif
143
144 #include <vm/vm_protos.h>
145
146 #include <security/mac_mach_internal.h>
147
148 extern char *proc_name_address(void *p);
149 struct proc;
150 extern int proc_pid(struct proc *p);
151
152 typedef struct {
153 mach_msg_id_t num;
154 mig_routine_t routine;
155 int size;
156 int kobjidx;
157 } mig_hash_t;
158
159 static void ipc_kobject_subst_once_no_senders(ipc_port_t, mach_msg_type_number_t);
160
161 IPC_KOBJECT_DEFINE(IKOT_MEMORY_OBJECT); /* vestigial, no real instance */
162 IPC_KOBJECT_DEFINE(IKOT_PORT_SUBST_ONCE,
163 .iko_op_no_senders = ipc_kobject_subst_once_no_senders);
164
165 #define MAX_MIG_ENTRIES 1031
166 #define MIG_HASH(x) (x)
167
168 #define KOBJ_IDX_NOT_SET (-1)
169
170 static SECURITY_READ_ONLY_LATE(mig_hash_t) mig_buckets[MAX_MIG_ENTRIES];
171 static SECURITY_READ_ONLY_LATE(int) mig_table_max_displ;
172 SECURITY_READ_ONLY_LATE(int) mach_kobj_count; /* count of total number of kobjects */
173
174 ZONE_DEFINE_TYPE(ipc_kobject_label_zone, "ipc kobject labels",
175 struct ipc_kobject_label, ZC_ZFREE_CLEARMEM);
176
177 __startup_data
178 static const struct mig_subsystem *mig_e[] = {
179 (const struct mig_subsystem *)&mach_vm_subsystem,
180 (const struct mig_subsystem *)&mach_port_subsystem,
181 (const struct mig_subsystem *)&mach_host_subsystem,
182 (const struct mig_subsystem *)&host_priv_subsystem,
183 (const struct mig_subsystem *)&clock_subsystem,
184 (const struct mig_subsystem *)&processor_subsystem,
185 (const struct mig_subsystem *)&processor_set_subsystem,
186 (const struct mig_subsystem *)&is_iokit_subsystem,
187 (const struct mig_subsystem *)&task_subsystem,
188 (const struct mig_subsystem *)&thread_act_subsystem,
189 #ifdef VM32_SUPPORT
190 (const struct mig_subsystem *)&vm32_map_subsystem,
191 #endif
192 #if CONFIG_USER_NOTIFICATION
193 (const struct mig_subsystem *)&UNDReply_subsystem,
194 #endif
195 (const struct mig_subsystem *)&mach_voucher_subsystem,
196 (const struct mig_subsystem *)&memory_entry_subsystem,
197 (const struct mig_subsystem *)&task_restartable_subsystem,
198 (const struct mig_subsystem *)&catch_exc_subsystem,
199 (const struct mig_subsystem *)&catch_mach_exc_subsystem,
200 #if CONFIG_ARCADE
201 (const struct mig_subsystem *)&arcade_register_subsystem,
202 #endif
203 (const struct mig_subsystem *)&mach_eventlink_subsystem,
204 };
205
206 static struct ipc_kobject_ops __security_const_late
207 ipc_kobject_ops_array[IKOT_MAX_TYPE];
208
209 __startup_func
210 void
ipc_kobject_register_startup(ipc_kobject_ops_t ops)211 ipc_kobject_register_startup(ipc_kobject_ops_t ops)
212 {
213 if (ipc_kobject_ops_array[ops->iko_op_type].iko_op_type) {
214 panic("trying to register kobject(%d) twice", ops->iko_op_type);
215 }
216 ipc_kobject_ops_array[ops->iko_op_type] = *ops;
217 }
218
219 static ipc_kobject_ops_t
ipc_kobject_ops_get(ipc_kobject_type_t ikot)220 ipc_kobject_ops_get(ipc_kobject_type_t ikot)
221 {
222 if (ikot < IKOT_NONE || ikot >= IKOT_MAX_TYPE) {
223 panic("invalid kobject type %d", ikot);
224 }
225 return &ipc_kobject_ops_array[ikot];
226 }
227
228 __startup_func
229 static void
mig_init(void)230 mig_init(void)
231 {
232 unsigned int i, n = sizeof(mig_e) / sizeof(const struct mig_subsystem *);
233 int howmany;
234 mach_msg_id_t j, pos, nentry, range;
235
236 for (i = 0; i < n; i++) {
237 range = mig_e[i]->end - mig_e[i]->start;
238 if (!mig_e[i]->start || range < 0) {
239 panic("the msgh_ids in mig_e[] aren't valid!");
240 }
241
242 if (mig_e[i]->maxsize > KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE) {
243 panic("mig subsystem %d (%p) replies are too large (%d > %d)",
244 mig_e[i]->start, mig_e[i], mig_e[i]->maxsize,
245 KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE);
246 }
247
248 for (j = 0; j < range; j++) {
249 if (mig_e[i]->routine[j].stub_routine) {
250 /* Only put real entries in the table */
251 nentry = j + mig_e[i]->start;
252 for (pos = MIG_HASH(nentry) % MAX_MIG_ENTRIES, howmany = 1;
253 mig_buckets[pos].num;
254 pos++, pos = pos % MAX_MIG_ENTRIES, howmany++) {
255 if (mig_buckets[pos].num == nentry) {
256 printf("message id = %d\n", nentry);
257 panic("multiple entries with the same msgh_id");
258 }
259 if (howmany == MAX_MIG_ENTRIES) {
260 panic("the mig dispatch table is too small");
261 }
262 }
263
264 mig_buckets[pos].num = nentry;
265 mig_buckets[pos].routine = mig_e[i]->routine[j].stub_routine;
266 if (mig_e[i]->routine[j].max_reply_msg) {
267 mig_buckets[pos].size = mig_e[i]->routine[j].max_reply_msg;
268 } else {
269 mig_buckets[pos].size = mig_e[i]->maxsize;
270 }
271
272 mig_buckets[pos].kobjidx = KOBJ_IDX_NOT_SET;
273
274 if (mig_table_max_displ < howmany) {
275 mig_table_max_displ = howmany;
276 }
277 mach_kobj_count++;
278 }
279 }
280 }
281
282 /* 77417305: pad to allow for MIG routines removals/cleanups */
283 mach_kobj_count += 32;
284
285 printf("mig_table_max_displ = %d mach_kobj_count = %d\n",
286 mig_table_max_displ, mach_kobj_count);
287 }
288 STARTUP(MACH_IPC, STARTUP_RANK_FIRST, mig_init);
289
290 /*
291 * Do a hash table lookup for given msgh_id. Return 0
292 * if not found.
293 */
294 static mig_hash_t *
find_mig_hash_entry(int msgh_id)295 find_mig_hash_entry(int msgh_id)
296 {
297 unsigned int i = (unsigned int)MIG_HASH(msgh_id);
298 int max_iter = mig_table_max_displ;
299 mig_hash_t *ptr;
300
301 do {
302 ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES];
303 } while (msgh_id != ptr->num && ptr->num && --max_iter);
304
305 if (!ptr->routine || msgh_id != ptr->num) {
306 ptr = (mig_hash_t *)0;
307 }
308
309 return ptr;
310 }
311
312 static kern_return_t
ipc_kobject_reply_status(ipc_kmsg_t kmsg)313 ipc_kobject_reply_status(ipc_kmsg_t kmsg)
314 {
315 mach_msg_header_t *hdr = ikm_header(kmsg);
316
317 if (hdr->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
318 return KERN_SUCCESS;
319 }
320
321 return ((mig_reply_error_t *)hdr)->RetCode;
322 }
323
324 /*
325 * Routine: ipc_kobject_set_kobjidx
326 * Purpose:
327 * Set the index for the kobject filter
328 * mask for a given message ID.
329 */
330 kern_return_t
ipc_kobject_set_kobjidx(int msgh_id,int index)331 ipc_kobject_set_kobjidx(
332 int msgh_id,
333 int index)
334 {
335 mig_hash_t *ptr = find_mig_hash_entry(msgh_id);
336
337 if (ptr == (mig_hash_t *)0) {
338 return KERN_INVALID_ARGUMENT;
339 }
340
341 assert(index < mach_kobj_count);
342 ptr->kobjidx = index;
343
344 return KERN_SUCCESS;
345 }
346
347 static void
ipc_kobject_init_reply(ipc_kmsg_t reply,const ipc_kmsg_t request,kern_return_t kr)348 ipc_kobject_init_reply(
349 ipc_kmsg_t reply,
350 const ipc_kmsg_t request,
351 kern_return_t kr)
352 {
353 mach_msg_header_t *req_hdr = ikm_header(request);
354 mach_msg_header_t *reply_hdr = ikm_header(reply);
355
356 #define InP ((mach_msg_header_t *) req_hdr)
357 #define OutP ((mig_reply_error_t *) reply_hdr)
358
359 OutP->NDR = NDR_record;
360 OutP->Head.msgh_size = sizeof(mig_reply_error_t);
361
362 OutP->Head.msgh_bits =
363 MACH_MSGH_BITS_SET(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0, 0, 0);
364 OutP->Head.msgh_remote_port = InP->msgh_local_port;
365 OutP->Head.msgh_local_port = MACH_PORT_NULL;
366 OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
367 OutP->Head.msgh_id = InP->msgh_id + 100;
368
369 OutP->RetCode = kr;
370 #undef InP
371 #undef OutP
372 }
373
374 /*
375 * Routine: ipc_kobject_server_internal
376 * Purpose:
377 * Handle a message sent to the kernel.
378 * Generates a reply message.
379 * Version for Untyped IPC.
380 * Conditions:
381 * Nothing locked.
382 */
383 static kern_return_t
ipc_kobject_server_internal(ipc_port_t port,ipc_kmsg_t request,ipc_kmsg_t * replyp)384 ipc_kobject_server_internal(
385 ipc_port_t port,
386 ipc_kmsg_t request,
387 ipc_kmsg_t *replyp)
388 {
389 int request_msgh_id;
390 ipc_kmsg_t reply = IKM_NULL;
391 mach_msg_size_t reply_size;
392 bool exec_token_changed = false;
393 mig_hash_t *ptr;
394 mach_msg_header_t *req_hdr, *reply_hdr, *new_reply_hdr;
395
396 req_hdr = ikm_header(request);
397 request_msgh_id = req_hdr->msgh_id;
398
399 /* Find corresponding mig_hash entry, if any */
400 ptr = find_mig_hash_entry(request_msgh_id);
401
402 /* Get the reply_size. */
403 if (ptr == (mig_hash_t *)0) {
404 reply_size = sizeof(mig_reply_error_t);
405 } else {
406 reply_size = ptr->size;
407 }
408
409 /*
410 * MIG should really assure no data leakage -
411 * but until it does, pessimistically zero the
412 * whole reply buffer.
413 */
414 reply = ipc_kmsg_alloc(reply_size, 0, 0,
415 IPC_KMSG_ALLOC_KERNEL | IPC_KMSG_ALLOC_ZERO | IPC_KMSG_ALLOC_NOFAIL);
416 ipc_kobject_init_reply(reply, request, KERN_SUCCESS);
417 reply_hdr = ikm_header(reply);
418
419 /*
420 * Find the routine to call, and call it
421 * to perform the kernel function
422 */
423 if (ptr) {
424 thread_ro_t tro = current_thread_ro();
425 task_t curtask = tro->tro_task;
426 struct proc *curproc = tro->tro_proc;
427 task_t task = TASK_NULL;
428 uint32_t exec_token;
429
430 /*
431 * Check if the port is a task port, if its a task port then
432 * snapshot the task exec token before the mig routine call.
433 */
434 if (ip_kotype(port) == IKOT_TASK_CONTROL && port != curtask->itk_self) {
435 task = convert_port_to_task_with_exec_token(port, &exec_token);
436 }
437
438 #if CONFIG_MACF
439 int idx = ptr->kobjidx;
440 uint8_t *filter_mask = task_get_mach_kobj_filter_mask(curtask);
441
442 /* Check kobject mig filter mask, if exists. */
443 if (filter_mask != NULL &&
444 idx != KOBJ_IDX_NOT_SET &&
445 !bitstr_test(filter_mask, idx) &&
446 mac_task_kobj_msg_evaluate != NULL) {
447 /* Not in filter mask, evaluate policy. */
448 kern_return_t kr = mac_task_kobj_msg_evaluate(curproc,
449 request_msgh_id, idx);
450 if (kr != KERN_SUCCESS) {
451 ((mig_reply_error_t *)reply_hdr)->RetCode = kr;
452 goto skip_kobjcall;
453 }
454 }
455 #endif /* CONFIG_MACF */
456
457 __BeforeKobjectServerTrace(idx);
458 (*ptr->routine)(req_hdr, reply_hdr);
459 __AfterKobjectServerTrace(idx);
460
461 #if CONFIG_MACF
462 skip_kobjcall:
463 #endif
464
465 /* Check if the exec token changed during the mig routine */
466 if (task != TASK_NULL) {
467 if (exec_token != task->exec_token) {
468 exec_token_changed = true;
469 }
470 task_deallocate(task);
471 }
472
473 counter_inc(&kernel_task->messages_received);
474 } else {
475 #if DEVELOPMENT || DEBUG
476 printf("ipc_kobject_server: bogus kernel message, id=%d\n",
477 req_hdr->msgh_id);
478 #endif /* DEVELOPMENT || DEBUG */
479 _MIG_MSGID_INVALID(req_hdr->msgh_id);
480
481 ((mig_reply_error_t *)reply_hdr)->RetCode = MIG_BAD_ID;
482 }
483
484 /* Fail the MIG call if the task exec token changed during the call */
485 if (exec_token_changed && ipc_kobject_reply_status(reply) == KERN_SUCCESS) {
486 /*
487 * Create a new reply msg with error and destroy the old reply msg.
488 */
489 ipc_kmsg_t new_reply = ipc_kmsg_alloc(sizeof(mig_reply_error_t),
490 0, 0, IPC_KMSG_ALLOC_KERNEL | IPC_KMSG_ALLOC_ZERO |
491 IPC_KMSG_ALLOC_NOFAIL);
492 new_reply_hdr = ikm_header(new_reply);
493
494 /*
495 * Initialize the new reply message.
496 */
497 {
498 #define OutP_new ((mig_reply_error_t *) new_reply_hdr)
499 #define OutP_old ((mig_reply_error_t *) reply_hdr)
500
501 OutP_new->NDR = OutP_old->NDR;
502 OutP_new->Head.msgh_size = sizeof(mig_reply_error_t);
503 OutP_new->Head.msgh_bits = OutP_old->Head.msgh_bits & ~MACH_MSGH_BITS_COMPLEX;
504 OutP_new->Head.msgh_remote_port = OutP_old->Head.msgh_remote_port;
505 OutP_new->Head.msgh_local_port = MACH_PORT_NULL;
506 OutP_new->Head.msgh_voucher_port = MACH_PORT_NULL;
507 OutP_new->Head.msgh_id = OutP_old->Head.msgh_id;
508
509 /* Set the error as KERN_INVALID_TASK */
510 OutP_new->RetCode = KERN_INVALID_TASK;
511
512 #undef OutP_new
513 #undef OutP_old
514 }
515
516 /*
517 * Destroy everything in reply except the reply port right,
518 * which is needed in the new reply message.
519 */
520 ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_SKIP_REMOTE | IPC_KMSG_DESTROY_NOT_SIGNED);
521 reply = new_reply;
522 } else if (ipc_kobject_reply_status(reply) == MIG_NO_REPLY) {
523 /*
524 * The server function will send a reply message
525 * using the reply port right, which it has saved.
526 */
527 ipc_kmsg_free(reply);
528 reply = IKM_NULL;
529 }
530
531 *replyp = reply;
532 return KERN_SUCCESS;
533 }
534
535
536 /*
537 * Routine: ipc_kobject_server
538 * Purpose:
539 * Handle a message sent to the kernel.
540 * Generates a reply message.
541 * Version for Untyped IPC.
542 *
543 * Ownership of the incoming rights (from the request)
544 * are transferred on success (wether a reply is made or not).
545 *
546 * Conditions:
547 * Nothing locked.
548 */
549 ipc_kmsg_t
ipc_kobject_server(ipc_port_t port,ipc_kmsg_t request,mach_msg_option_t option __unused)550 ipc_kobject_server(
551 ipc_port_t port,
552 ipc_kmsg_t request,
553 mach_msg_option_t option __unused)
554 {
555 mach_msg_header_t *req_hdr = ikm_header(request);
556 #if DEVELOPMENT || DEBUG
557 const int request_msgh_id = req_hdr->msgh_id;
558 #endif
559 ipc_port_t request_voucher_port;
560 ipc_kmsg_t reply = IKM_NULL;
561 mach_msg_header_t *reply_hdr;
562 kern_return_t kr;
563
564 ipc_kmsg_trace_send(request, option);
565
566 if (ip_kotype(port) == IKOT_UEXT_OBJECT) {
567 kr = uext_server(port, request, &reply);
568 } else {
569 kr = ipc_kobject_server_internal(port, request, &reply);
570 }
571
572 if (kr != KERN_SUCCESS) {
573 assert(kr != MACH_SEND_TIMED_OUT &&
574 kr != MACH_SEND_INTERRUPTED &&
575 kr != MACH_SEND_INVALID_DEST);
576 assert(reply == IKM_NULL);
577
578 /* convert the server error into a MIG error */
579 reply = ipc_kmsg_alloc(sizeof(mig_reply_error_t), 0, 0,
580 IPC_KMSG_ALLOC_KERNEL | IPC_KMSG_ALLOC_ZERO | IPC_KMSG_ALLOC_NOFAIL);
581 ipc_kobject_init_reply(reply, request, kr);
582 }
583
584 counter_inc(&kernel_task->messages_sent);
585 /*
586 * Destroy destination. The following code differs from
587 * ipc_object_destroy in that we release the send-once
588 * right instead of generating a send-once notification
589 * (which would bring us here again, creating a loop).
590 * It also differs in that we only expect send or
591 * send-once rights, never receive rights.
592 */
593 switch (MACH_MSGH_BITS_REMOTE(req_hdr->msgh_bits)) {
594 case MACH_MSG_TYPE_PORT_SEND:
595 ipc_port_release_send(req_hdr->msgh_remote_port);
596 break;
597
598 case MACH_MSG_TYPE_PORT_SEND_ONCE:
599 ipc_port_release_sonce(req_hdr->msgh_remote_port);
600 break;
601
602 default:
603 panic("ipc_kobject_server: strange destination rights");
604 }
605
606 /*
607 * Destroy voucher. The kernel MIG servers never take ownership
608 * of vouchers sent in messages. Swallow any such rights here.
609 */
610 request_voucher_port = ipc_kmsg_get_voucher_port(request);
611 if (IP_VALID(request_voucher_port)) {
612 assert(MACH_MSG_TYPE_PORT_SEND ==
613 MACH_MSGH_BITS_VOUCHER(req_hdr->msgh_bits));
614 ipc_port_release_send(request_voucher_port);
615 ipc_kmsg_clear_voucher_port(request);
616 }
617
618 if (reply == IKM_NULL ||
619 ipc_kobject_reply_status(reply) == KERN_SUCCESS) {
620 /*
621 * The server function is responsible for the contents
622 * of the message. The reply port right is moved
623 * to the reply message, and we have deallocated
624 * the destination port right, so we just need
625 * to free the kmsg.
626 */
627 ipc_kmsg_free(request);
628 } else {
629 /*
630 * The message contents of the request are intact.
631 * Remote port has been released above. Do not destroy
632 * the reply port right either, which is needed in the reply message.
633 */
634 ipc_kmsg_destroy(request, IPC_KMSG_DESTROY_SKIP_LOCAL | IPC_KMSG_DESTROY_SKIP_REMOTE);
635 }
636
637 if (reply != IKM_NULL) {
638 reply_hdr = ikm_header(reply);
639 ipc_port_t reply_port = reply_hdr->msgh_remote_port;
640
641 if (!IP_VALID(reply_port)) {
642 /*
643 * Can't queue the reply message if the destination
644 * (the reply port) isn't valid.
645 */
646 ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_NOT_SIGNED);
647 reply = IKM_NULL;
648 } else if (ip_in_space_noauth(reply_port, ipc_space_kernel)) {
649 /* do not lock reply port, use raw pointer comparison */
650
651 /*
652 * Don't send replies to kobject kernel ports.
653 */
654 #if DEVELOPMENT || DEBUG
655 printf("%s: refusing to send reply to kobject %d port (id:%d)\n",
656 __func__, ip_kotype(reply_port), request_msgh_id);
657 #endif /* DEVELOPMENT || DEBUG */
658 ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_NOT_SIGNED);
659 reply = IKM_NULL;
660 }
661 }
662
663 return reply;
664 }
665
666 static __header_always_inline void
ipc_kobject_set_raw(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)667 ipc_kobject_set_raw(
668 ipc_port_t port,
669 ipc_kobject_t kobject,
670 ipc_kobject_type_t type)
671 {
672 uintptr_t *store = &port->ip_kobject;
673
674 #if __has_feature(ptrauth_calls)
675 if (kobject) {
676 type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
677 kobject = ptrauth_sign_unauthenticated(kobject,
678 ptrauth_key_process_independent_data,
679 ptrauth_blend_discriminator(store, type));
680 }
681 #else
682 (void)type;
683 #endif // __has_feature(ptrauth_calls)
684
685 *store = (uintptr_t)kobject;
686 }
687
688 static inline void
ipc_kobject_set_internal(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)689 ipc_kobject_set_internal(
690 ipc_port_t port,
691 ipc_kobject_t kobject,
692 ipc_kobject_type_t type)
693 {
694 assert(type != IKOT_NONE);
695 io_bits_or(ip_to_object(port), type);
696 ipc_kobject_set_raw(port, kobject, type);
697 }
698
699 /*
700 * Routine: ipc_kobject_get_raw
701 * Purpose:
702 * Returns the kobject pointer of a specified port.
703 *
704 * This returns the current value of the kobject pointer,
705 * without any validation (the caller is expected to do
706 * the validation it needs).
707 *
708 * Conditions:
709 * The port is a kobject of the proper type.
710 */
711 __header_always_inline ipc_kobject_t
ipc_kobject_get_raw(ipc_port_t port,ipc_kobject_type_t type)712 ipc_kobject_get_raw(
713 ipc_port_t port,
714 ipc_kobject_type_t type)
715 {
716 uintptr_t *store = &port->ip_kobject;
717 ipc_kobject_t kobject = (ipc_kobject_t)*store;
718
719 #if __has_feature(ptrauth_calls)
720 if (kobject) {
721 type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
722 kobject = ptrauth_auth_data(kobject,
723 ptrauth_key_process_independent_data,
724 ptrauth_blend_discriminator(store, type));
725 }
726 #else
727 (void)type;
728 #endif // __has_feature(ptrauth_calls)
729
730 return kobject;
731 }
732
733 __abortlike
734 static void
ipc_kobject_require_panic(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)735 ipc_kobject_require_panic(
736 ipc_port_t port,
737 ipc_kobject_t kobject,
738 ipc_kobject_type_t kotype)
739 {
740 if (ip_kotype(port) != kotype) {
741 panic("port %p: invalid kobject type, got %d wanted %d",
742 port, ip_kotype(port), kotype);
743 }
744 panic("port %p: invalid kobject, got %p wanted %p",
745 port, ipc_kobject_get_raw(port, kotype), kobject);
746 }
747
748 __header_always_inline void
ipc_kobject_require(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)749 ipc_kobject_require(
750 ipc_port_t port,
751 ipc_kobject_t kobject,
752 ipc_kobject_type_t kotype)
753 {
754 ipc_kobject_t cur;
755
756 if (__improbable(ip_kotype(port) != kotype)) {
757 ipc_kobject_require_panic(port, kobject, kotype);
758 }
759 cur = ipc_kobject_get_raw(port, kotype);
760 if (cur && cur != kobject) {
761 ipc_kobject_require_panic(port, kobject, kotype);
762 }
763 }
764
765 /*
766 * Routine: ipc_kobject_get_locked
767 * Purpose:
768 * Returns the kobject pointer of a specified port,
769 * for an expected type.
770 *
771 * Returns IKO_NULL if the port isn't active.
772 *
773 * This function may be used when:
774 * - the port lock is held
775 * - the kobject association stays while there
776 * are any outstanding rights.
777 *
778 * Conditions:
779 * The port is a kobject of the proper type.
780 */
781 ipc_kobject_t
ipc_kobject_get_locked(ipc_port_t port,ipc_kobject_type_t type)782 ipc_kobject_get_locked(
783 ipc_port_t port,
784 ipc_kobject_type_t type)
785 {
786 ipc_kobject_t kobject = IKO_NULL;
787
788 if (ip_active(port) && type == ip_kotype(port)) {
789 kobject = ipc_kobject_get_raw(port, type);
790 }
791
792 return kobject;
793 }
794
795 /*
796 * Routine: ipc_kobject_get_stable
797 * Purpose:
798 * Returns the kobject pointer of a specified port,
799 * for an expected type, for types where the port/kobject
800 * association is permanent.
801 *
802 * Returns IKO_NULL if the port isn't active.
803 *
804 * Conditions:
805 * The port is a kobject of the proper type.
806 */
807 ipc_kobject_t
ipc_kobject_get_stable(ipc_port_t port,ipc_kobject_type_t type)808 ipc_kobject_get_stable(
809 ipc_port_t port,
810 ipc_kobject_type_t type)
811 {
812 assert(ipc_kobject_ops_get(type)->iko_op_stable);
813 return ipc_kobject_get_locked(port, type);
814 }
815
816 /*
817 * Routine: ipc_kobject_init_port
818 * Purpose:
819 * Initialize a kobject port with the given types and options.
820 *
821 * This function never fails.
822 */
823 static inline void
ipc_kobject_init_port(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t options)824 ipc_kobject_init_port(
825 ipc_port_t port,
826 ipc_kobject_t kobject,
827 ipc_kobject_type_t type,
828 ipc_kobject_alloc_options_t options)
829 {
830 ipc_kobject_set_internal(port, kobject, type);
831
832 if (options & IPC_KOBJECT_ALLOC_MAKE_SEND) {
833 ipc_port_make_send_any_locked(port);
834 }
835 if (options & IPC_KOBJECT_ALLOC_NSREQUEST) {
836 port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
837 ip_reference(port);
838 }
839 if (options & IPC_KOBJECT_ALLOC_NO_GRANT) {
840 port->ip_no_grant = 1;
841 }
842 if (options & IPC_KOBJECT_ALLOC_IMMOVABLE_SEND) {
843 port->ip_immovable_send = 1;
844 }
845 if (options & IPC_KOBJECT_ALLOC_PINNED) {
846 port->ip_pinned = 1;
847 }
848 }
849
850 /*
851 * Routine: ipc_kobject_alloc_port
852 * Purpose:
853 * Allocate a kobject port in the kernel space of the specified type.
854 *
855 * This function never fails.
856 *
857 * Conditions:
858 * No locks held (memory is allocated)
859 */
860 ipc_port_t
ipc_kobject_alloc_port(ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t options)861 ipc_kobject_alloc_port(
862 ipc_kobject_t kobject,
863 ipc_kobject_type_t type,
864 ipc_kobject_alloc_options_t options)
865 {
866 ipc_port_t port;
867
868 port = ipc_port_alloc_special(ipc_space_kernel, IPC_PORT_INIT_NONE);
869 if (port == IP_NULL) {
870 panic("ipc_kobject_alloc_port(): failed to allocate port");
871 }
872
873 ipc_kobject_init_port(port, kobject, type, options);
874 return port;
875 }
876
877 /*
878 * Routine: ipc_kobject_alloc_labeled_port
879 * Purpose:
880 * Allocate a kobject port and associated mandatory access label
881 * in the kernel space of the specified type.
882 *
883 * This function never fails.
884 *
885 * Conditions:
886 * No locks held (memory is allocated)
887 */
888
889 ipc_port_t
ipc_kobject_alloc_labeled_port(ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_label_t label,ipc_kobject_alloc_options_t options)890 ipc_kobject_alloc_labeled_port(
891 ipc_kobject_t kobject,
892 ipc_kobject_type_t type,
893 ipc_label_t label,
894 ipc_kobject_alloc_options_t options)
895 {
896 ipc_port_t port;
897
898 port = ipc_kobject_alloc_port(kobject, type, options);
899
900 ipc_port_set_label(port, label);
901
902 return port;
903 }
904
905 static void
ipc_kobject_subst_once_no_senders(ipc_port_t port,mach_port_mscount_t mscount)906 ipc_kobject_subst_once_no_senders(
907 ipc_port_t port,
908 mach_port_mscount_t mscount)
909 {
910 ipc_port_t ko_port;
911
912 ko_port = ipc_kobject_dealloc_port(port, mscount, IKOT_PORT_SUBST_ONCE);
913
914 if (ko_port) {
915 /*
916 * Clean up the right if the wrapper wasn't hollowed out
917 * by ipc_kobject_alloc_subst_once().
918 */
919 ipc_port_release_send(ko_port);
920 }
921 }
922
923 /*
924 * Routine: ipc_kobject_alloc_subst_once
925 * Purpose:
926 * Make a port that will be substituted by the kolabel
927 * rules once, preventing the next substitution (of its target)
928 * to happen if any.
929 *
930 * Returns:
931 * A port with a send right, that will substitute to its "kobject".
932 *
933 * Conditions:
934 * No locks held (memory is allocated).
935 *
936 * `target` holds a send-right donated to this function,
937 * consumed in ipc_kobject_subst_once_no_senders().
938 */
939 ipc_port_t
ipc_kobject_alloc_subst_once(ipc_port_t target)940 ipc_kobject_alloc_subst_once(
941 ipc_port_t target)
942 {
943 if (!IP_VALID(target)) {
944 return target;
945 }
946 return ipc_kobject_alloc_labeled_port(target,
947 IKOT_PORT_SUBST_ONCE, IPC_LABEL_SUBST_ONCE,
948 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
949 }
950
951 /*
952 * Routine: ipc_kobject_make_send_lazy_alloc_port
953 * Purpose:
954 * Make a send once for a kobject port.
955 *
956 * A location owning this port is passed in port_store.
957 * If no port exists, a port is made lazily.
958 *
959 * A send right is made for the port, and if this is the first one
960 * (possibly not for the first time), then the no-more-senders
961 * notification is rearmed.
962 *
963 * When a notification is armed, the kobject must donate
964 * one of its references to the port. It is expected
965 * the no-more-senders notification will consume this reference.
966 *
967 * Returns:
968 * TRUE if a notification was armed
969 * FALSE else
970 *
971 * Conditions:
972 * Nothing is locked, memory can be allocated.
973 * The caller must be able to donate a kobject reference to the port.
974 */
975 bool
ipc_kobject_make_send_lazy_alloc_port(ipc_port_t * port_store,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t alloc_opts)976 ipc_kobject_make_send_lazy_alloc_port(
977 ipc_port_t *port_store,
978 ipc_kobject_t kobject,
979 ipc_kobject_type_t type,
980 ipc_kobject_alloc_options_t alloc_opts)
981 {
982 ipc_port_t port, previous;
983 kern_return_t kr;
984
985 alloc_opts |= IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST;
986 port = os_atomic_load(port_store, dependency);
987
988 if (!IP_VALID(port)) {
989 port = ipc_kobject_alloc_port(kobject, type, alloc_opts);
990
991 if (os_atomic_cmpxchgv(port_store,
992 IP_NULL, port, &previous, release)) {
993 return TRUE;
994 }
995
996 /*
997 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
998 * ipc_kobject_dealloc_port will handle
999 * IPC_KOBJECT_ALLOC_NSREQUEST.
1000 */
1001 port->ip_mscount = 0;
1002 port->ip_srights = 0;
1003 ip_release_live(port);
1004 ipc_kobject_dealloc_port(port, 0, type);
1005
1006 port = previous;
1007 }
1008
1009 kr = ipc_kobject_make_send_nsrequest(port, kobject, type);
1010 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1011
1012 return kr == KERN_SUCCESS;
1013 }
1014
1015 /*
1016 * Routine: ipc_kobject_make_send_lazy_alloc_labeled_port
1017 * Purpose:
1018 * Make a send once for a kobject port.
1019 *
1020 * A location owning this port is passed in port_store.
1021 * If no port exists, a port is made lazily.
1022 *
1023 * A send right is made for the port, and if this is the first one
1024 * (possibly not for the first time), then the no-more-senders
1025 * notification is rearmed.
1026 *
1027 * When a notification is armed, the kobject must donate
1028 * one of its references to the port. It is expected
1029 * the no-more-senders notification will consume this reference.
1030 *
1031 * Returns:
1032 * TRUE if a notification was armed
1033 * FALSE else
1034 *
1035 * Conditions:
1036 * Nothing is locked, memory can be allocated.
1037 * The caller must be able to donate a kobject reference to the port.
1038 */
1039 boolean_t
ipc_kobject_make_send_lazy_alloc_labeled_port(ipc_port_t * port_store,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_label_t label)1040 ipc_kobject_make_send_lazy_alloc_labeled_port(
1041 ipc_port_t *port_store,
1042 ipc_kobject_t kobject,
1043 ipc_kobject_type_t type,
1044 ipc_label_t label)
1045 {
1046 ipc_port_t port, previous;
1047 kern_return_t kr;
1048
1049 port = os_atomic_load(port_store, dependency);
1050
1051 if (!IP_VALID(port)) {
1052 port = ipc_kobject_alloc_labeled_port(kobject, type, label,
1053 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
1054 if (os_atomic_cmpxchgv(port_store, IP_NULL, port, &previous, release)) {
1055 return TRUE;
1056 }
1057
1058 /*
1059 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
1060 * ipc_kobject_dealloc_port will handle
1061 * IPC_KOBJECT_ALLOC_NSREQUEST.
1062 */
1063 port->ip_mscount = 0;
1064 port->ip_srights = 0;
1065 ip_release_live(port);
1066 ipc_kobject_dealloc_port(port, 0, type);
1067
1068 port = previous;
1069 assert(ip_is_kolabeled(port));
1070 }
1071
1072 kr = ipc_kobject_make_send_nsrequest(port, kobject, type);
1073 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1074
1075 return kr == KERN_SUCCESS;
1076 }
1077
1078 /*
1079 * Routine: ipc_kobject_nsrequest_locked
1080 * Purpose:
1081 * Arm the no-senders notification for the given kobject
1082 * if it doesn't have one armed yet.
1083 *
1084 * Conditions:
1085 * Port is locked and active.
1086 *
1087 * Returns:
1088 * KERN_SUCCESS: the notification was armed
1089 * KERN_ALREADY_WAITING: the notification was already armed
1090 * KERN_FAILURE: the notification would fire immediately
1091 */
1092 static inline kern_return_t
ipc_kobject_nsrequest_locked(ipc_port_t port,mach_port_mscount_t sync)1093 ipc_kobject_nsrequest_locked(
1094 ipc_port_t port,
1095 mach_port_mscount_t sync)
1096 {
1097 if (port->ip_nsrequest == IP_KOBJECT_NSREQUEST_ARMED) {
1098 return KERN_ALREADY_WAITING;
1099 }
1100
1101 if (port->ip_srights == 0 && sync <= port->ip_mscount) {
1102 return KERN_FAILURE;
1103 }
1104
1105 port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
1106 ip_reference(port);
1107 return KERN_SUCCESS;
1108 }
1109
1110
1111 /*
1112 * Routine: ipc_kobject_nsrequest
1113 * Purpose:
1114 * Arm the no-senders notification for the given kobject
1115 * if it doesn't have one armed yet.
1116 *
1117 * Returns:
1118 * KERN_SUCCESS: the notification was armed
1119 * KERN_ALREADY_WAITING: the notification was already armed
1120 * KERN_FAILURE: the notification would fire immediately
1121 * KERN_INVALID_RIGHT: the port is dead
1122 */
1123 kern_return_t
ipc_kobject_nsrequest(ipc_port_t port,mach_port_mscount_t sync,mach_port_mscount_t * mscount)1124 ipc_kobject_nsrequest(
1125 ipc_port_t port,
1126 mach_port_mscount_t sync,
1127 mach_port_mscount_t *mscount)
1128 {
1129 kern_return_t kr = KERN_INVALID_RIGHT;
1130
1131 if (IP_VALID(port)) {
1132 ip_mq_lock(port);
1133
1134 if (mscount) {
1135 *mscount = port->ip_mscount;
1136 }
1137 if (ip_active(port)) {
1138 kr = ipc_kobject_nsrequest_locked(port, sync);
1139 }
1140
1141 ip_mq_unlock(port);
1142 } else if (mscount) {
1143 *mscount = 0;
1144 }
1145
1146 return kr;
1147 }
1148
1149 ipc_port_t
ipc_kobject_copy_send(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1150 ipc_kobject_copy_send(
1151 ipc_port_t port,
1152 ipc_kobject_t kobject,
1153 ipc_kobject_type_t kotype)
1154 {
1155 ipc_port_t sright = port;
1156
1157 if (IP_VALID(port)) {
1158 ip_mq_lock(port);
1159 if (ip_active(port)) {
1160 ipc_kobject_require(port, kobject, kotype);
1161 ipc_port_copy_send_any_locked(port);
1162 } else {
1163 sright = IP_DEAD;
1164 }
1165 ip_mq_unlock(port);
1166 }
1167
1168 return sright;
1169 }
1170
1171 ipc_port_t
ipc_kobject_make_send(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1172 ipc_kobject_make_send(
1173 ipc_port_t port,
1174 ipc_kobject_t kobject,
1175 ipc_kobject_type_t kotype)
1176 {
1177 ipc_port_t sright = port;
1178
1179 if (IP_VALID(port)) {
1180 ip_mq_lock(port);
1181 if (ip_active(port)) {
1182 ipc_kobject_require(port, kobject, kotype);
1183 ipc_port_make_send_any_locked(port);
1184 } else {
1185 sright = IP_DEAD;
1186 }
1187 ip_mq_unlock(port);
1188 }
1189
1190 return sright;
1191 }
1192
1193 kern_return_t
ipc_kobject_make_send_nsrequest(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1194 ipc_kobject_make_send_nsrequest(
1195 ipc_port_t port,
1196 ipc_kobject_t kobject,
1197 ipc_kobject_type_t kotype)
1198 {
1199 kern_return_t kr = KERN_INVALID_RIGHT;
1200
1201 if (IP_VALID(port)) {
1202 ip_mq_lock(port);
1203 if (ip_active(port)) {
1204 ipc_kobject_require(port, kobject, kotype);
1205 ipc_port_make_send_any_locked(port);
1206 kr = ipc_kobject_nsrequest_locked(port, 0);
1207 assert(kr != KERN_FAILURE);
1208 }
1209 ip_mq_unlock(port);
1210 }
1211
1212 return kr;
1213 }
1214
1215 static inline ipc_kobject_t
ipc_kobject_disable_internal(ipc_port_t port,ipc_kobject_type_t type)1216 ipc_kobject_disable_internal(
1217 ipc_port_t port,
1218 ipc_kobject_type_t type)
1219 {
1220 ipc_kobject_t kobject = ipc_kobject_get_raw(port, type);
1221
1222 port->ip_kobject = 0;
1223 if (ip_is_kolabeled(port)) {
1224 port->ip_kolabel->ikol_alt_port = IP_NULL;
1225 }
1226
1227 return kobject;
1228 }
1229
1230 /*
1231 * Routine: ipc_kobject_dealloc_port_and_unlock
1232 * Purpose:
1233 * Destroys a port allocated with any of the ipc_kobject_alloc*
1234 * functions.
1235 *
1236 * This will atomically:
1237 * - make the port inactive,
1238 * - optionally check the make send count
1239 * - disable (nil-out) the kobject pointer for kobjects without
1240 * a destroy callback.
1241 *
1242 * The port will retain its kobject-ness and kobject type.
1243 *
1244 *
1245 * Returns:
1246 * The kobject pointer that was set prior to this call
1247 * (possibly NULL if the kobject was already disabled).
1248 *
1249 * Conditions:
1250 * The port is active and locked.
1251 * On return the port is inactive and unlocked.
1252 */
1253 __abortlike
1254 static void
__ipc_kobject_dealloc_bad_type_panic(ipc_port_t port,ipc_kobject_type_t type)1255 __ipc_kobject_dealloc_bad_type_panic(ipc_port_t port, ipc_kobject_type_t type)
1256 {
1257 panic("port %p of type %d, expecting %d", port, ip_kotype(port), type);
1258 }
1259
1260 __abortlike
1261 static void
__ipc_kobject_dealloc_bad_mscount_panic(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1262 __ipc_kobject_dealloc_bad_mscount_panic(
1263 ipc_port_t port,
1264 mach_port_mscount_t mscount,
1265 ipc_kobject_type_t type)
1266 {
1267 panic("unexpected make-send count: %p[%d], %d, %d",
1268 port, type, port->ip_mscount, mscount);
1269 }
1270
1271 __abortlike
1272 static void
__ipc_kobject_dealloc_bad_srights_panic(ipc_port_t port,ipc_kobject_type_t type)1273 __ipc_kobject_dealloc_bad_srights_panic(
1274 ipc_port_t port,
1275 ipc_kobject_type_t type)
1276 {
1277 panic("unexpected send right count: %p[%d], %d",
1278 port, type, port->ip_srights);
1279 }
1280
1281 ipc_kobject_t
ipc_kobject_dealloc_port_and_unlock(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1282 ipc_kobject_dealloc_port_and_unlock(
1283 ipc_port_t port,
1284 mach_port_mscount_t mscount,
1285 ipc_kobject_type_t type)
1286 {
1287 ipc_kobject_t kobject = IKO_NULL;
1288 ipc_kobject_ops_t ops = ipc_kobject_ops_get(type);
1289
1290 require_ip_active(port);
1291
1292 if (ip_kotype(port) != type) {
1293 __ipc_kobject_dealloc_bad_type_panic(port, type);
1294 }
1295
1296 if (mscount && port->ip_mscount != mscount) {
1297 __ipc_kobject_dealloc_bad_mscount_panic(port, mscount, type);
1298 }
1299 if ((mscount || ops->iko_op_stable) && port->ip_srights != 0) {
1300 __ipc_kobject_dealloc_bad_srights_panic(port, type);
1301 }
1302
1303 if (!ops->iko_op_destroy) {
1304 kobject = ipc_kobject_disable_internal(port, type);
1305 }
1306
1307 ipc_port_dealloc_special_and_unlock(port, ipc_space_kernel);
1308
1309 return kobject;
1310 }
1311
1312 /*
1313 * Routine: ipc_kobject_dealloc_port
1314 * Purpose:
1315 * Destroys a port allocated with any of the ipc_kobject_alloc*
1316 * functions.
1317 *
1318 * This will atomically:
1319 * - make the port inactive,
1320 * - optionally check the make send count
1321 * - disable (nil-out) the kobject pointer for kobjects without
1322 * a destroy callback.
1323 *
1324 * The port will retain its kobject-ness and kobject type.
1325 *
1326 *
1327 * Returns:
1328 * The kobject pointer that was set prior to this call
1329 * (possibly NULL if the kobject was already disabled).
1330 *
1331 * Conditions:
1332 * Nothing is locked.
1333 * The port is active.
1334 * On return the port is inactive.
1335 */
1336 ipc_kobject_t
ipc_kobject_dealloc_port(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1337 ipc_kobject_dealloc_port(
1338 ipc_port_t port,
1339 mach_port_mscount_t mscount,
1340 ipc_kobject_type_t type)
1341 {
1342 ip_mq_lock(port);
1343 return ipc_kobject_dealloc_port_and_unlock(port, mscount, type);
1344 }
1345
1346 /*
1347 * Routine: ipc_kobject_enable
1348 * Purpose:
1349 * Make a port represent a kernel object of the given type.
1350 * The caller is responsible for handling refs for the
1351 * kernel object, if necessary.
1352 * Conditions:
1353 * Nothing locked.
1354 * The port must be active.
1355 */
1356 void
ipc_kobject_enable(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)1357 ipc_kobject_enable(
1358 ipc_port_t port,
1359 ipc_kobject_t kobject,
1360 ipc_kobject_type_t type)
1361 {
1362 assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1363
1364 ip_mq_lock(port);
1365 require_ip_active(port);
1366
1367 if (type != ip_kotype(port)) {
1368 panic("%s: unexpected kotype of port %p: want %d, got %d",
1369 __func__, port, type, ip_kotype(port));
1370 }
1371
1372 ipc_kobject_set_raw(port, kobject, type);
1373
1374 ip_mq_unlock(port);
1375 }
1376
1377 /*
1378 * Routine: ipc_kobject_disable_locked
1379 * Purpose:
1380 * Clear the kobject pointer for a port.
1381 * Conditions:
1382 * The port is locked.
1383 * Returns the current kobject pointer.
1384 */
1385 ipc_kobject_t
ipc_kobject_disable_locked(ipc_port_t port,ipc_kobject_type_t type)1386 ipc_kobject_disable_locked(
1387 ipc_port_t port,
1388 ipc_kobject_type_t type)
1389 {
1390 if (ip_active(port)) {
1391 assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1392 }
1393
1394 if (ip_kotype(port) != type) {
1395 panic("port %p of type %d, expecting %d",
1396 port, ip_kotype(port), type);
1397 }
1398
1399 return ipc_kobject_disable_internal(port, type);
1400 }
1401
1402 /*
1403 * Routine: ipc_kobject_disable
1404 * Purpose:
1405 * Clear the kobject pointer for a port.
1406 * Conditions:
1407 * Nothing locked.
1408 * Returns the current kobject pointer.
1409 */
1410 ipc_kobject_t
ipc_kobject_disable(ipc_port_t port,ipc_kobject_type_t type)1411 ipc_kobject_disable(
1412 ipc_port_t port,
1413 ipc_kobject_type_t type)
1414 {
1415 ipc_kobject_t kobject;
1416
1417 ip_mq_lock(port);
1418 kobject = ipc_kobject_disable_locked(port, type);
1419 ip_mq_unlock(port);
1420
1421 return kobject;
1422 }
1423
1424 /*
1425 * Routine: ipc_kobject_upgrade_mktimer_locked
1426 * Purpose:
1427 * Upgrades a port to mktimer kobject status
1428 *
1429 * This pattern is rather bad as it leads to various
1430 * confusions that need to be special cased with kobject-ness
1431 * of ports. No new port with dual kobject/message-queue
1432 * semantics should be made ever.
1433 *
1434 * Conditions:
1435 * Port is locked
1436 */
1437 void
ipc_kobject_upgrade_mktimer_locked(ipc_port_t port,ipc_kobject_t kobject)1438 ipc_kobject_upgrade_mktimer_locked(
1439 ipc_port_t port,
1440 ipc_kobject_t kobject)
1441 {
1442 ipc_kobject_set_internal(port, kobject, IKOT_TIMER);
1443 }
1444
1445 /*
1446 * Routine: ipc_kobject_notify_no_senders
1447 * Purpose:
1448 * Handles a no-senders notification
1449 * sent to a kobject.
1450 *
1451 * A port reference is consumed.
1452 *
1453 * Conditions:
1454 * Nothing locked.
1455 */
1456 void
ipc_kobject_notify_no_senders(ipc_port_t port,mach_port_mscount_t mscount)1457 ipc_kobject_notify_no_senders(
1458 ipc_port_t port,
1459 mach_port_mscount_t mscount)
1460 {
1461 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1462
1463 assert(ops->iko_op_no_senders);
1464 ops->iko_op_no_senders(port, mscount);
1465
1466 /* consume the ref ipc_notify_no_senders_prepare left */
1467 ip_release(port);
1468 }
1469
1470 /*
1471 * Routine: ipc_kobject_notify_no_senders
1472 * Purpose:
1473 * Handles a send once notifications
1474 * sent to a kobject.
1475 *
1476 * A send-once port reference is consumed.
1477 *
1478 * Conditions:
1479 * Port is locked.
1480 */
1481 void
ipc_kobject_notify_send_once_and_unlock(ipc_port_t port)1482 ipc_kobject_notify_send_once_and_unlock(
1483 ipc_port_t port)
1484 {
1485 /*
1486 * drop the send once right while we hold the port lock.
1487 * we will keep a port reference while we run the possible
1488 * callouts to kobjects.
1489 *
1490 * This a simplified version of ipc_port_release_sonce()
1491 * since kobjects can't be special reply ports.
1492 */
1493 assert(!port->ip_specialreply);
1494
1495 if (port->ip_sorights == 0) {
1496 panic("Over-release of port %p send-once right!", port);
1497 }
1498
1499 port->ip_sorights--;
1500 ip_mq_unlock(port);
1501
1502 /*
1503 * because there's very few consumers,
1504 * the code here isn't generic as it's really not worth it.
1505 */
1506 switch (ip_kotype(port)) {
1507 case IKOT_TASK_RESUME:
1508 task_suspension_send_once(port);
1509 break;
1510 default:
1511 break;
1512 }
1513
1514 ip_release(port);
1515 }
1516
1517
1518 /*
1519 * Routine: ipc_kobject_destroy
1520 * Purpose:
1521 * Release any kernel object resources associated
1522 * with the port, which is being destroyed.
1523 *
1524 * This path to free object resources should only be
1525 * needed when resources are associated with a user's port.
1526 * In the normal case, when the kernel is the receiver,
1527 * the code calling ipc_kobject_dealloc_port() should clean
1528 * up the object resources.
1529 *
1530 * Cleans up any kobject label that might be present.
1531 * Conditions:
1532 * The port is not locked, but it is dead.
1533 */
1534 void
ipc_kobject_destroy(ipc_port_t port)1535 ipc_kobject_destroy(
1536 ipc_port_t port)
1537 {
1538 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1539
1540 if (ops->iko_op_permanent) {
1541 panic("trying to destroy an permanent port %p", port);
1542 }
1543 if (ops->iko_op_destroy) {
1544 ops->iko_op_destroy(port);
1545 }
1546
1547 if (ip_is_kolabeled(port)) {
1548 ipc_kobject_label_t labelp = port->ip_kolabel;
1549
1550 assert(labelp != NULL);
1551 assert(labelp->ikol_alt_port == IP_NULL);
1552 assert(ip_is_kobject(port));
1553 port->ip_kolabel = NULL;
1554 io_bits_andnot(ip_to_object(port), IO_BITS_KOLABEL);
1555 zfree(ipc_kobject_label_zone, labelp);
1556 }
1557 }
1558
1559 /*
1560 * Routine: ipc_kobject_label_substitute_task
1561 * Purpose:
1562 * Substitute a task control port for its immovable
1563 * equivalent when the receiver is that task.
1564 * Conditions:
1565 * Space is write locked and active.
1566 * Port is locked and active.
1567 * Returns:
1568 * - IP_NULL port if no substitution is to be done
1569 * - a valid port if a substitution needs to happen
1570 */
1571 static ipc_port_t
ipc_kobject_label_substitute_task(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1572 ipc_kobject_label_substitute_task(
1573 ipc_space_t space,
1574 ipc_kobject_label_t kolabel,
1575 ipc_port_t port)
1576 {
1577 ipc_port_t subst = IP_NULL;
1578 task_t task = ipc_kobject_get_raw(port, IKOT_TASK_CONTROL);
1579
1580 if (task != TASK_NULL && task == space->is_task) {
1581 if ((subst = kolabel->ikol_alt_port)) {
1582 return subst;
1583 }
1584 }
1585
1586 return IP_NULL;
1587 }
1588
1589 /*
1590 * Routine: ipc_kobject_label_substitute_task_read
1591 * Purpose:
1592 * Substitute a task read port for its immovable
1593 * control equivalent when the receiver is that task.
1594 * Conditions:
1595 * Space is write locked and active.
1596 * Port is locked and active.
1597 * Returns:
1598 * - IP_NULL port if no substitution is to be done
1599 * - a valid port if a substitution needs to happen
1600 */
1601 static ipc_port_t
ipc_kobject_label_substitute_task_read(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1602 ipc_kobject_label_substitute_task_read(
1603 ipc_space_t space,
1604 ipc_kobject_label_t kolabel,
1605 ipc_port_t port)
1606 {
1607 ipc_port_t subst = IP_NULL;
1608 task_t task = ipc_kobject_get_raw(port, IKOT_TASK_READ);
1609
1610 if (task != TASK_NULL && task == space->is_task) {
1611 if ((subst = kolabel->ikol_alt_port)) {
1612 return subst;
1613 }
1614 }
1615
1616 return IP_NULL;
1617 }
1618
1619 /*
1620 * Routine: ipc_kobject_label_substitute_thread
1621 * Purpose:
1622 * Substitute a thread control port for its immovable
1623 * equivalent when it belongs to the receiver task.
1624 * Conditions:
1625 * Space is write locked and active.
1626 * Port is locked and active.
1627 * Returns:
1628 * - IP_NULL port if no substitution is to be done
1629 * - a valid port if a substitution needs to happen
1630 */
1631 static ipc_port_t
ipc_kobject_label_substitute_thread(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1632 ipc_kobject_label_substitute_thread(
1633 ipc_space_t space,
1634 ipc_kobject_label_t kolabel,
1635 ipc_port_t port)
1636 {
1637 ipc_port_t subst = IP_NULL;
1638 thread_t thread = ipc_kobject_get_raw(port, IKOT_THREAD_CONTROL);
1639
1640 if (thread != THREAD_NULL && space->is_task == get_threadtask(thread)) {
1641 if ((subst = kolabel->ikol_alt_port) != IP_NULL) {
1642 return subst;
1643 }
1644 }
1645
1646 return IP_NULL;
1647 }
1648
1649 /*
1650 * Routine: ipc_kobject_label_substitute_thread_read
1651 * Purpose:
1652 * Substitute a thread read port for its immovable
1653 * control equivalent when it belongs to the receiver task.
1654 * Conditions:
1655 * Space is write locked and active.
1656 * Port is locked and active.
1657 * Returns:
1658 * - IP_NULL port if no substitution is to be done
1659 * - a valid port if a substitution needs to happen
1660 */
1661 static ipc_port_t
ipc_kobject_label_substitute_thread_read(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1662 ipc_kobject_label_substitute_thread_read(
1663 ipc_space_t space,
1664 ipc_kobject_label_t kolabel,
1665 ipc_port_t port)
1666 {
1667 ipc_port_t subst = IP_NULL;
1668 thread_t thread = ipc_kobject_get_raw(port, IKOT_THREAD_READ);
1669
1670 if (thread != THREAD_NULL && space->is_task == get_threadtask(thread)) {
1671 if ((subst = kolabel->ikol_alt_port) != IP_NULL) {
1672 return subst;
1673 }
1674 }
1675
1676 return IP_NULL;
1677 }
1678
1679 /*
1680 * Routine: ipc_kobject_label_check
1681 * Purpose:
1682 * Check to see if the space is allowed to possess
1683 * a right for the given port. In order to qualify,
1684 * the space label must contain all the privileges
1685 * listed in the port/kobject label.
1686 *
1687 * Conditions:
1688 * Space is write locked and active.
1689 * Port is locked and active.
1690 *
1691 * Returns:
1692 * Whether the copyout is authorized.
1693 *
1694 * If a port substitution is requested, the space is unlocked,
1695 * the port is unlocked and its "right" consumed.
1696 *
1697 * As of now, substituted ports only happen for send rights.
1698 */
1699 bool
ipc_kobject_label_check(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t * flags,ipc_port_t * subst_portp)1700 ipc_kobject_label_check(
1701 ipc_space_t space,
1702 ipc_port_t port,
1703 mach_msg_type_name_t msgt_name,
1704 ipc_object_copyout_flags_t *flags,
1705 ipc_port_t *subst_portp)
1706 {
1707 ipc_kobject_label_t kolabel;
1708 ipc_label_t label;
1709
1710 assert(is_active(space));
1711 assert(ip_active(port));
1712
1713 *subst_portp = IP_NULL;
1714
1715 /* Unlabled ports/kobjects are always allowed */
1716 if (!ip_is_kolabeled(port)) {
1717 return true;
1718 }
1719
1720 /* Never OK to copyout the receive right for a labeled kobject */
1721 if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1722 panic("ipc_kobject_label_check: attempted receive right "
1723 "copyout for labeled kobject");
1724 }
1725
1726 kolabel = port->ip_kolabel;
1727 label = kolabel->ikol_label;
1728
1729 if ((*flags & IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK) == 0 &&
1730 (label & IPC_LABEL_SUBST_MASK)) {
1731 ipc_port_t subst = IP_NULL;
1732
1733 if (msgt_name != MACH_MSG_TYPE_PORT_SEND) {
1734 return false;
1735 }
1736
1737 if ((label & IPC_LABEL_SUBST_MASK) == IPC_LABEL_SUBST_ONCE) {
1738 /*
1739 * The next check will _not_ substitute.
1740 * hollow out our one-time wrapper,
1741 * and steal its send right.
1742 */
1743 *flags |= IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK;
1744 subst = ipc_kobject_disable_locked(port,
1745 IKOT_PORT_SUBST_ONCE);
1746 is_write_unlock(space);
1747 ipc_port_release_send_and_unlock(port);
1748 if (subst == IP_NULL) {
1749 panic("subst-once port %p was consumed twice", port);
1750 }
1751 *subst_portp = subst;
1752 return true;
1753 }
1754
1755 switch (label & IPC_LABEL_SUBST_MASK) {
1756 case IPC_LABEL_SUBST_TASK:
1757 subst = ipc_kobject_label_substitute_task(space,
1758 kolabel, port);
1759 break;
1760 case IPC_LABEL_SUBST_TASK_READ:
1761 subst = ipc_kobject_label_substitute_task_read(space,
1762 kolabel, port);
1763 break;
1764 case IPC_LABEL_SUBST_THREAD:
1765 subst = ipc_kobject_label_substitute_thread(space,
1766 kolabel, port);
1767 break;
1768 case IPC_LABEL_SUBST_THREAD_READ:
1769 subst = ipc_kobject_label_substitute_thread_read(space,
1770 kolabel, port);
1771 break;
1772 default:
1773 panic("unexpected label: %llx", label);
1774 }
1775
1776 if (subst != IP_NULL) {
1777 ip_reference(subst);
1778 is_write_unlock(space);
1779
1780 /*
1781 * We do not hold a proper send right on `subst`,
1782 * only a reference.
1783 *
1784 * Because of how thread/task termination works,
1785 * there is no guarantee copy_send() would work,
1786 * so we need to make_send().
1787 *
1788 * We can do that because ports tagged with
1789 * IPC_LABEL_SUBST_{THREAD,TASK} do not use
1790 * the no-senders notification.
1791 */
1792
1793 ipc_port_release_send_and_unlock(port);
1794 /* no check: dPAC integrity */
1795 port = ipc_port_make_send_any(subst);
1796 ip_release(subst);
1797 *subst_portp = port;
1798 return true;
1799 }
1800 }
1801
1802 return (label & space->is_label & IPC_LABEL_SPACE_MASK) ==
1803 (label & IPC_LABEL_SPACE_MASK);
1804 }
1805