1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: kern/ipc_kobject.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions for letting a port represent a kernel object.
71 */
72
73 #include <mach/mig.h>
74 #include <mach/port.h>
75 #include <mach/kern_return.h>
76 #include <mach/message.h>
77 #include <mach/mig_errors.h>
78 #include <mach/mach_notify.h>
79 #include <mach/ndr.h>
80 #include <mach/vm_param.h>
81
82 #include <mach/mach_vm_server.h>
83 #include <mach/mach_port_server.h>
84 #include <mach/mach_host_server.h>
85 #include <mach/host_priv_server.h>
86 #include <mach/clock_server.h>
87 #include <mach/memory_entry_server.h>
88 #include <mach/processor_server.h>
89 #include <mach/processor_set_server.h>
90 #include <mach/task_server.h>
91 #include <mach/mach_voucher_server.h>
92 #ifdef VM32_SUPPORT
93 #include <mach/vm32_map_server.h>
94 #endif
95 #include <mach/thread_act_server.h>
96 #include <mach/restartable_server.h>
97
98 #include <mach/exc_server.h>
99 #include <mach/mach_exc_server.h>
100 #include <mach/mach_eventlink_server.h>
101
102 #include <device/device_types.h>
103 #include <device/device_server.h>
104
105 #if CONFIG_USER_NOTIFICATION
106 #include <UserNotification/UNDReplyServer.h>
107 #endif
108
109 #if CONFIG_ARCADE
110 #include <mach/arcade_register_server.h>
111 #endif
112
113 #if CONFIG_AUDIT
114 #include <kern/audit_sessionport.h>
115 #endif
116
117 #include <kern/counter.h>
118 #include <kern/ipc_tt.h>
119 #include <kern/ipc_mig.h>
120 #include <kern/ipc_misc.h>
121 #include <kern/ipc_kobject.h>
122 #include <kern/host_notify.h>
123 #include <kern/misc_protos.h>
124
125 #if CONFIG_ARCADE
126 #include <kern/arcade.h>
127 #endif /* CONFIG_ARCADE */
128
129 #include <ipc/ipc_kmsg.h>
130 #include <ipc/ipc_port.h>
131 #include <ipc/ipc_voucher.h>
132 #include <kern/sync_sema.h>
133 #include <kern/work_interval.h>
134 #include <kern/task_ident.h>
135
136 #if HYPERVISOR
137 #include <kern/hv_support.h>
138 #endif
139
140 #if CONFIG_CSR
141 #include <sys/csr.h>
142 #endif
143
144 #include <vm/vm_protos.h>
145
146 #include <security/mac_mach_internal.h>
147
148 extern char *proc_name_address(void *p);
149 struct proc;
150 extern int proc_pid(struct proc *p);
151
152 typedef struct {
153 mach_msg_id_t num;
154 mig_routine_t routine;
155 int size;
156 int kobjidx;
157 } mig_hash_t;
158
159 static void ipc_kobject_subst_once_no_senders(ipc_port_t, mach_msg_type_number_t);
160
161 IPC_KOBJECT_DEFINE(IKOT_MEMORY_OBJECT); /* vestigial, no real instance */
162 IPC_KOBJECT_DEFINE(IKOT_PORT_SUBST_ONCE,
163 .iko_op_no_senders = ipc_kobject_subst_once_no_senders);
164
165 #define MAX_MIG_ENTRIES 1031
166 #define MIG_HASH(x) (x)
167
168 #define KOBJ_IDX_NOT_SET (-1)
169
170 static SECURITY_READ_ONLY_LATE(mig_hash_t) mig_buckets[MAX_MIG_ENTRIES];
171 static SECURITY_READ_ONLY_LATE(int) mig_table_max_displ;
172 SECURITY_READ_ONLY_LATE(int) mach_kobj_count; /* count of total number of kobjects */
173
174 ZONE_DEFINE_TYPE(ipc_kobject_label_zone, "ipc kobject labels",
175 struct ipc_kobject_label, ZC_ZFREE_CLEARMEM);
176
177 __startup_data
178 static const struct mig_subsystem *mig_e[] = {
179 (const struct mig_subsystem *)&mach_vm_subsystem,
180 (const struct mig_subsystem *)&mach_port_subsystem,
181 (const struct mig_subsystem *)&mach_host_subsystem,
182 (const struct mig_subsystem *)&host_priv_subsystem,
183 (const struct mig_subsystem *)&clock_subsystem,
184 (const struct mig_subsystem *)&processor_subsystem,
185 (const struct mig_subsystem *)&processor_set_subsystem,
186 (const struct mig_subsystem *)&is_iokit_subsystem,
187 (const struct mig_subsystem *)&task_subsystem,
188 (const struct mig_subsystem *)&thread_act_subsystem,
189 #ifdef VM32_SUPPORT
190 (const struct mig_subsystem *)&vm32_map_subsystem,
191 #endif
192 #if CONFIG_USER_NOTIFICATION
193 (const struct mig_subsystem *)&UNDReply_subsystem,
194 #endif
195 (const struct mig_subsystem *)&mach_voucher_subsystem,
196 (const struct mig_subsystem *)&memory_entry_subsystem,
197 (const struct mig_subsystem *)&task_restartable_subsystem,
198 (const struct mig_subsystem *)&catch_exc_subsystem,
199 (const struct mig_subsystem *)&catch_mach_exc_subsystem,
200 #if CONFIG_ARCADE
201 (const struct mig_subsystem *)&arcade_register_subsystem,
202 #endif
203 (const struct mig_subsystem *)&mach_eventlink_subsystem,
204 };
205
206 static struct ipc_kobject_ops __security_const_late
207 ipc_kobject_ops_array[IKOT_MAX_TYPE];
208
209 __startup_func
210 void
ipc_kobject_register_startup(ipc_kobject_ops_t ops)211 ipc_kobject_register_startup(ipc_kobject_ops_t ops)
212 {
213 if (ipc_kobject_ops_array[ops->iko_op_type].iko_op_type) {
214 panic("trying to register kobject(%d) twice", ops->iko_op_type);
215 }
216 ipc_kobject_ops_array[ops->iko_op_type] = *ops;
217 }
218
219 static ipc_kobject_ops_t
ipc_kobject_ops_get(ipc_kobject_type_t ikot)220 ipc_kobject_ops_get(ipc_kobject_type_t ikot)
221 {
222 if (ikot < IKOT_NONE || ikot >= IKOT_MAX_TYPE) {
223 panic("invalid kobject type %d", ikot);
224 }
225 return &ipc_kobject_ops_array[ikot];
226 }
227
228 __startup_func
229 static void
mig_init(void)230 mig_init(void)
231 {
232 unsigned int i, n = sizeof(mig_e) / sizeof(const struct mig_subsystem *);
233 int howmany;
234 mach_msg_id_t j, pos, nentry, range;
235
236 for (i = 0; i < n; i++) {
237 range = mig_e[i]->end - mig_e[i]->start;
238 if (!mig_e[i]->start || range < 0) {
239 panic("the msgh_ids in mig_e[] aren't valid!");
240 }
241
242 if (mig_e[i]->maxsize > KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE) {
243 panic("mig subsystem %d (%p) replies are too large (%d > %d)",
244 mig_e[i]->start, mig_e[i], mig_e[i]->maxsize,
245 KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE);
246 }
247
248 for (j = 0; j < range; j++) {
249 if (mig_e[i]->routine[j].stub_routine) {
250 /* Only put real entries in the table */
251 nentry = j + mig_e[i]->start;
252 for (pos = MIG_HASH(nentry) % MAX_MIG_ENTRIES, howmany = 1;
253 mig_buckets[pos].num;
254 pos++, pos = pos % MAX_MIG_ENTRIES, howmany++) {
255 if (mig_buckets[pos].num == nentry) {
256 printf("message id = %d\n", nentry);
257 panic("multiple entries with the same msgh_id");
258 }
259 if (howmany == MAX_MIG_ENTRIES) {
260 panic("the mig dispatch table is too small");
261 }
262 }
263
264 mig_buckets[pos].num = nentry;
265 mig_buckets[pos].routine = mig_e[i]->routine[j].stub_routine;
266 if (mig_e[i]->routine[j].max_reply_msg) {
267 mig_buckets[pos].size = mig_e[i]->routine[j].max_reply_msg;
268 } else {
269 mig_buckets[pos].size = mig_e[i]->maxsize;
270 }
271
272 mig_buckets[pos].kobjidx = KOBJ_IDX_NOT_SET;
273
274 if (mig_table_max_displ < howmany) {
275 mig_table_max_displ = howmany;
276 }
277 mach_kobj_count++;
278 }
279 }
280 }
281
282 /* 77417305: pad to allow for MIG routines removals/cleanups */
283 mach_kobj_count += 32;
284
285 printf("mig_table_max_displ = %d mach_kobj_count = %d\n",
286 mig_table_max_displ, mach_kobj_count);
287 }
288 STARTUP(MACH_IPC, STARTUP_RANK_FIRST, mig_init);
289
290 /*
291 * Do a hash table lookup for given msgh_id. Return 0
292 * if not found.
293 */
294 static mig_hash_t *
find_mig_hash_entry(int msgh_id)295 find_mig_hash_entry(int msgh_id)
296 {
297 unsigned int i = (unsigned int)MIG_HASH(msgh_id);
298 int max_iter = mig_table_max_displ;
299 mig_hash_t *ptr;
300
301 do {
302 ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES];
303 } while (msgh_id != ptr->num && ptr->num && --max_iter);
304
305 if (!ptr->routine || msgh_id != ptr->num) {
306 ptr = (mig_hash_t *)0;
307 }
308
309 return ptr;
310 }
311
312 static kern_return_t
ipc_kobject_reply_status(ipc_kmsg_t kmsg)313 ipc_kobject_reply_status(ipc_kmsg_t kmsg)
314 {
315 mach_msg_header_t *hdr = ikm_header(kmsg);
316
317 if (hdr->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
318 return KERN_SUCCESS;
319 }
320
321 return ((mig_reply_error_t *)hdr)->RetCode;
322 }
323
324 /*
325 * Routine: ipc_kobject_set_kobjidx
326 * Purpose:
327 * Set the index for the kobject filter
328 * mask for a given message ID.
329 */
330 kern_return_t
ipc_kobject_set_kobjidx(int msgh_id,int index)331 ipc_kobject_set_kobjidx(
332 int msgh_id,
333 int index)
334 {
335 mig_hash_t *ptr = find_mig_hash_entry(msgh_id);
336
337 if (ptr == (mig_hash_t *)0) {
338 return KERN_INVALID_ARGUMENT;
339 }
340
341 assert(index < mach_kobj_count);
342 ptr->kobjidx = index;
343
344 return KERN_SUCCESS;
345 }
346
347 static void
ipc_kobject_init_reply(ipc_kmsg_t reply,const ipc_kmsg_t request,kern_return_t kr)348 ipc_kobject_init_reply(
349 ipc_kmsg_t reply,
350 const ipc_kmsg_t request,
351 kern_return_t kr)
352 {
353 mach_msg_header_t *req_hdr = ikm_header(request);
354 mach_msg_header_t *reply_hdr = ikm_header(reply);
355
356 #define InP ((mach_msg_header_t *) req_hdr)
357 #define OutP ((mig_reply_error_t *) reply_hdr)
358
359 OutP->NDR = NDR_record;
360 OutP->Head.msgh_size = sizeof(mig_reply_error_t);
361
362 OutP->Head.msgh_bits =
363 MACH_MSGH_BITS_SET(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0, 0, 0);
364 OutP->Head.msgh_remote_port = InP->msgh_local_port;
365 OutP->Head.msgh_local_port = MACH_PORT_NULL;
366 OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
367 OutP->Head.msgh_id = InP->msgh_id + 100;
368
369 OutP->RetCode = kr;
370 #undef InP
371 #undef OutP
372 }
373
374 /*
375 * Routine: ipc_kobject_server_internal
376 * Purpose:
377 * Handle a message sent to the kernel.
378 * Generates a reply message.
379 * Version for Untyped IPC.
380 * Conditions:
381 * Nothing locked.
382 */
383 static kern_return_t
ipc_kobject_server_internal(ipc_port_t port,ipc_kmsg_t request,ipc_kmsg_t * replyp)384 ipc_kobject_server_internal(
385 ipc_port_t port,
386 ipc_kmsg_t request,
387 ipc_kmsg_t *replyp)
388 {
389 int request_msgh_id;
390 ipc_kmsg_t reply = IKM_NULL;
391 mach_msg_size_t reply_size;
392 bool exec_token_changed = false;
393 mig_hash_t *ptr;
394 mach_msg_header_t *req_hdr, *reply_hdr, *new_reply_hdr;
395
396 req_hdr = ikm_header(request);
397 request_msgh_id = req_hdr->msgh_id;
398
399 /* Find corresponding mig_hash entry, if any */
400 ptr = find_mig_hash_entry(request_msgh_id);
401
402 /* Get the reply_size. */
403 if (ptr == (mig_hash_t *)0) {
404 reply_size = sizeof(mig_reply_error_t);
405 } else {
406 reply_size = ptr->size;
407 }
408
409 /*
410 * MIG should really assure no data leakage -
411 * but until it does, pessimistically zero the
412 * whole reply buffer.
413 */
414 reply = ipc_kmsg_alloc(reply_size, 0, 0, IPC_KMSG_ALLOC_KERNEL |
415 IPC_KMSG_ALLOC_LINEAR | IPC_KMSG_ALLOC_ZERO | IPC_KMSG_ALLOC_NOFAIL);
416 ipc_kobject_init_reply(reply, request, KERN_SUCCESS);
417 reply_hdr = ikm_header(reply);
418
419 /*
420 * Find the routine to call, and call it
421 * to perform the kernel function
422 */
423 if (ptr) {
424 thread_ro_t tro = current_thread_ro();
425 task_t curtask = tro->tro_task;
426 struct proc *curproc = tro->tro_proc;
427 task_t task = TASK_NULL;
428 uint32_t exec_token;
429
430 /*
431 * Check if the port is a task port, if its a task port then
432 * snapshot the task exec token before the mig routine call.
433 */
434 if (ip_kotype(port) == IKOT_TASK_CONTROL && port != curtask->itk_self) {
435 task = convert_port_to_task_with_exec_token(port, &exec_token);
436 }
437
438 #if CONFIG_MACF
439 int idx = ptr->kobjidx;
440 uint8_t *filter_mask = task_get_mach_kobj_filter_mask(curtask);
441
442 /* Check kobject mig filter mask, if exists. */
443 if (filter_mask != NULL &&
444 idx != KOBJ_IDX_NOT_SET &&
445 !bitstr_test(filter_mask, idx) &&
446 mac_task_kobj_msg_evaluate != NULL) {
447 /* Not in filter mask, evaluate policy. */
448 kern_return_t kr = mac_task_kobj_msg_evaluate(curproc,
449 request_msgh_id, idx);
450 if (kr != KERN_SUCCESS) {
451 ((mig_reply_error_t *)reply_hdr)->RetCode = kr;
452 goto skip_kobjcall;
453 }
454 }
455 #endif /* CONFIG_MACF */
456
457 __BeforeKobjectServerTrace(idx);
458 (*ptr->routine)(req_hdr, reply_hdr);
459 __AfterKobjectServerTrace(idx);
460
461 #if CONFIG_MACF
462 skip_kobjcall:
463 #endif
464
465 /* Check if the exec token changed during the mig routine */
466 if (task != TASK_NULL) {
467 if (exec_token != task->exec_token) {
468 exec_token_changed = true;
469 }
470 task_deallocate(task);
471 }
472
473 counter_inc(&kernel_task->messages_received);
474 } else {
475 #if DEVELOPMENT || DEBUG
476 printf("ipc_kobject_server: bogus kernel message, id=%d\n",
477 req_hdr->msgh_id);
478 #endif /* DEVELOPMENT || DEBUG */
479 _MIG_MSGID_INVALID(req_hdr->msgh_id);
480
481 ((mig_reply_error_t *)reply_hdr)->RetCode = MIG_BAD_ID;
482 }
483
484 /* Fail the MIG call if the task exec token changed during the call */
485 if (exec_token_changed && ipc_kobject_reply_status(reply) == KERN_SUCCESS) {
486 /*
487 * Create a new reply msg with error and destroy the old reply msg.
488 */
489 ipc_kmsg_t new_reply = ipc_kmsg_alloc(sizeof(mig_reply_error_t),
490 0, 0, IPC_KMSG_ALLOC_KERNEL | IPC_KMSG_ALLOC_SAVED | IPC_KMSG_ALLOC_ZERO |
491 IPC_KMSG_ALLOC_NOFAIL);
492 new_reply_hdr = ikm_header(new_reply);
493
494 /*
495 * Initialize the new reply message.
496 */
497 {
498 #define OutP_new ((mig_reply_error_t *) new_reply_hdr)
499 #define OutP_old ((mig_reply_error_t *) reply_hdr)
500
501 OutP_new->NDR = OutP_old->NDR;
502 OutP_new->Head.msgh_size = sizeof(mig_reply_error_t);
503 OutP_new->Head.msgh_bits = OutP_old->Head.msgh_bits & ~MACH_MSGH_BITS_COMPLEX;
504 OutP_new->Head.msgh_remote_port = OutP_old->Head.msgh_remote_port;
505 OutP_new->Head.msgh_local_port = MACH_PORT_NULL;
506 OutP_new->Head.msgh_voucher_port = MACH_PORT_NULL;
507 OutP_new->Head.msgh_id = OutP_old->Head.msgh_id;
508
509 /* Set the error as KERN_INVALID_TASK */
510 OutP_new->RetCode = KERN_INVALID_TASK;
511
512 #undef OutP_new
513 #undef OutP_old
514 }
515
516 /*
517 * Destroy everything in reply except the reply port right,
518 * which is needed in the new reply message.
519 */
520 ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_SKIP_REMOTE | IPC_KMSG_DESTROY_NOT_SIGNED);
521 reply = new_reply;
522 } else if (ipc_kobject_reply_status(reply) == MIG_NO_REPLY) {
523 /*
524 * The server function will send a reply message
525 * using the reply port right, which it has saved.
526 */
527 ipc_kmsg_free(reply);
528 reply = IKM_NULL;
529 }
530
531 *replyp = reply;
532 return KERN_SUCCESS;
533 }
534
535
536 /*
537 * Routine: ipc_kobject_server
538 * Purpose:
539 * Handle a message sent to the kernel.
540 * Generates a reply message.
541 * Version for Untyped IPC.
542 *
543 * Ownership of the incoming rights (from the request)
544 * are transferred on success (wether a reply is made or not).
545 *
546 * Conditions:
547 * Nothing locked.
548 */
549 ipc_kmsg_t
ipc_kobject_server(ipc_port_t port,ipc_kmsg_t request,mach_msg_option_t option __unused)550 ipc_kobject_server(
551 ipc_port_t port,
552 ipc_kmsg_t request,
553 mach_msg_option_t option __unused)
554 {
555 mach_msg_header_t *req_hdr = ikm_header(request);
556 #if DEVELOPMENT || DEBUG
557 const int request_msgh_id = req_hdr->msgh_id;
558 #endif
559 ipc_port_t request_voucher_port;
560 ipc_kmsg_t reply = IKM_NULL;
561 mach_msg_header_t *reply_hdr;
562 kern_return_t kr;
563
564 ipc_kmsg_trace_send(request, option);
565
566 if (ip_kotype(port) == IKOT_UEXT_OBJECT) {
567 kr = uext_server(port, request, &reply);
568 } else {
569 kr = ipc_kobject_server_internal(port, request, &reply);
570 }
571
572 if (kr != KERN_SUCCESS) {
573 assert(kr != MACH_SEND_TIMED_OUT &&
574 kr != MACH_SEND_INTERRUPTED &&
575 kr != MACH_SEND_INVALID_DEST);
576 assert(reply == IKM_NULL);
577
578 /* convert the server error into a MIG error */
579 reply = ipc_kmsg_alloc(sizeof(mig_reply_error_t), 0, 0,
580 IPC_KMSG_ALLOC_KERNEL | IPC_KMSG_ALLOC_SAVED |
581 IPC_KMSG_ALLOC_ZERO | IPC_KMSG_ALLOC_NOFAIL);
582 static_assert(sizeof(mig_reply_error_t) < IKM_SAVED_MSG_SIZE);
583
584 ipc_kobject_init_reply(reply, request, kr);
585 }
586
587 counter_inc(&kernel_task->messages_sent);
588 /*
589 * Destroy destination. The following code differs from
590 * ipc_object_destroy in that we release the send-once
591 * right instead of generating a send-once notification
592 * (which would bring us here again, creating a loop).
593 * It also differs in that we only expect send or
594 * send-once rights, never receive rights.
595 */
596 switch (MACH_MSGH_BITS_REMOTE(req_hdr->msgh_bits)) {
597 case MACH_MSG_TYPE_PORT_SEND:
598 ipc_port_release_send(req_hdr->msgh_remote_port);
599 break;
600
601 case MACH_MSG_TYPE_PORT_SEND_ONCE:
602 ipc_port_release_sonce(req_hdr->msgh_remote_port);
603 break;
604
605 default:
606 panic("ipc_kobject_server: strange destination rights");
607 }
608
609 /*
610 * Destroy voucher. The kernel MIG servers never take ownership
611 * of vouchers sent in messages. Swallow any such rights here.
612 */
613 request_voucher_port = ipc_kmsg_get_voucher_port(request);
614 if (IP_VALID(request_voucher_port)) {
615 assert(MACH_MSG_TYPE_PORT_SEND ==
616 MACH_MSGH_BITS_VOUCHER(req_hdr->msgh_bits));
617 ipc_port_release_send(request_voucher_port);
618 ipc_kmsg_clear_voucher_port(request);
619 }
620
621 if (reply == IKM_NULL ||
622 ipc_kobject_reply_status(reply) == KERN_SUCCESS) {
623 /*
624 * The server function is responsible for the contents
625 * of the message. The reply port right is moved
626 * to the reply message, and we have deallocated
627 * the destination port right, so we just need
628 * to free the kmsg.
629 */
630 ipc_kmsg_free(request);
631 } else {
632 /*
633 * The message contents of the request are intact.
634 * Remote port has been released above. Do not destroy
635 * the reply port right either, which is needed in the reply message.
636 */
637 ipc_kmsg_destroy(request, IPC_KMSG_DESTROY_SKIP_LOCAL | IPC_KMSG_DESTROY_SKIP_REMOTE);
638 }
639
640 if (reply != IKM_NULL) {
641 reply_hdr = ikm_header(reply);
642 ipc_port_t reply_port = reply_hdr->msgh_remote_port;
643
644 if (!IP_VALID(reply_port)) {
645 /*
646 * Can't queue the reply message if the destination
647 * (the reply port) isn't valid.
648 */
649 ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_NOT_SIGNED);
650 reply = IKM_NULL;
651 } else if (ip_in_space_noauth(reply_port, ipc_space_kernel)) {
652 /* do not lock reply port, use raw pointer comparison */
653
654 /*
655 * Don't send replies to kobject kernel ports.
656 */
657 #if DEVELOPMENT || DEBUG
658 printf("%s: refusing to send reply to kobject %d port (id:%d)\n",
659 __func__, ip_kotype(reply_port), request_msgh_id);
660 #endif /* DEVELOPMENT || DEBUG */
661 ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_NOT_SIGNED);
662 reply = IKM_NULL;
663 }
664 }
665
666 return reply;
667 }
668
669 static __header_always_inline void
ipc_kobject_set_raw(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)670 ipc_kobject_set_raw(
671 ipc_port_t port,
672 ipc_kobject_t kobject,
673 ipc_kobject_type_t type)
674 {
675 uintptr_t *store = &port->ip_kobject;
676
677 #if __has_feature(ptrauth_calls)
678 if (kobject) {
679 type |= port->ip_immovable_receive << 14;
680 type |= port->ip_immovable_send << 15;
681 type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
682 kobject = ptrauth_sign_unauthenticated(kobject,
683 ptrauth_key_process_independent_data,
684 ptrauth_blend_discriminator(store, type));
685 }
686 #else
687 (void)type;
688 #endif // __has_feature(ptrauth_calls)
689
690 *store = (uintptr_t)kobject;
691 }
692
693 static inline void
ipc_kobject_set_internal(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)694 ipc_kobject_set_internal(
695 ipc_port_t port,
696 ipc_kobject_t kobject,
697 ipc_kobject_type_t type)
698 {
699 assert(type != IKOT_NONE);
700 io_bits_or(ip_to_object(port), type);
701 ipc_kobject_set_raw(port, kobject, type);
702 }
703
704 /*
705 * Routine: ipc_kobject_get_raw
706 * Purpose:
707 * Returns the kobject pointer of a specified port.
708 *
709 * This returns the current value of the kobject pointer,
710 * without any validation (the caller is expected to do
711 * the validation it needs).
712 *
713 * Conditions:
714 * The port is a kobject of the proper type.
715 */
716 __header_always_inline ipc_kobject_t
ipc_kobject_get_raw(ipc_port_t port,ipc_kobject_type_t type)717 ipc_kobject_get_raw(
718 ipc_port_t port,
719 ipc_kobject_type_t type)
720 {
721 uintptr_t *store = &port->ip_kobject;
722 ipc_kobject_t kobject = (ipc_kobject_t)*store;
723
724 #if __has_feature(ptrauth_calls)
725 if (kobject) {
726 type |= port->ip_immovable_receive << 14;
727 type |= port->ip_immovable_send << 15;
728 type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
729 kobject = ptrauth_auth_data(kobject,
730 ptrauth_key_process_independent_data,
731 ptrauth_blend_discriminator(store, type));
732 }
733 #else
734 (void)type;
735 #endif // __has_feature(ptrauth_calls)
736
737 return kobject;
738 }
739
740 __abortlike
741 static void
ipc_kobject_require_panic(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)742 ipc_kobject_require_panic(
743 ipc_port_t port,
744 ipc_kobject_t kobject,
745 ipc_kobject_type_t kotype)
746 {
747 if (ip_kotype(port) != kotype) {
748 panic("port %p: invalid kobject type, got %d wanted %d",
749 port, ip_kotype(port), kotype);
750 }
751 panic("port %p: invalid kobject, got %p wanted %p",
752 port, ipc_kobject_get_raw(port, kotype), kobject);
753 }
754
755 __header_always_inline void
ipc_kobject_require(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)756 ipc_kobject_require(
757 ipc_port_t port,
758 ipc_kobject_t kobject,
759 ipc_kobject_type_t kotype)
760 {
761 ipc_kobject_t cur;
762
763 if (__improbable(ip_kotype(port) != kotype)) {
764 ipc_kobject_require_panic(port, kobject, kotype);
765 }
766 cur = ipc_kobject_get_raw(port, kotype);
767 if (cur && cur != kobject) {
768 ipc_kobject_require_panic(port, kobject, kotype);
769 }
770 }
771
772 /*
773 * Routine: ipc_kobject_get_locked
774 * Purpose:
775 * Returns the kobject pointer of a specified port,
776 * for an expected type.
777 *
778 * Returns IKO_NULL if the port isn't active.
779 *
780 * This function may be used when:
781 * - the port lock is held
782 * - the kobject association stays while there
783 * are any outstanding rights.
784 *
785 * Conditions:
786 * The port is a kobject of the proper type.
787 */
788 ipc_kobject_t
ipc_kobject_get_locked(ipc_port_t port,ipc_kobject_type_t type)789 ipc_kobject_get_locked(
790 ipc_port_t port,
791 ipc_kobject_type_t type)
792 {
793 ipc_kobject_t kobject = IKO_NULL;
794
795 if (ip_active(port) && type == ip_kotype(port)) {
796 kobject = ipc_kobject_get_raw(port, type);
797 }
798
799 return kobject;
800 }
801
802 /*
803 * Routine: ipc_kobject_get_stable
804 * Purpose:
805 * Returns the kobject pointer of a specified port,
806 * for an expected type, for types where the port/kobject
807 * association is permanent.
808 *
809 * Returns IKO_NULL if the port isn't active.
810 *
811 * Conditions:
812 * The port is a kobject of the proper type.
813 */
814 ipc_kobject_t
ipc_kobject_get_stable(ipc_port_t port,ipc_kobject_type_t type)815 ipc_kobject_get_stable(
816 ipc_port_t port,
817 ipc_kobject_type_t type)
818 {
819 assert(ipc_kobject_ops_get(type)->iko_op_stable);
820 return ipc_kobject_get_locked(port, type);
821 }
822
823 /*
824 * Routine: ipc_kobject_init_port
825 * Purpose:
826 * Initialize a kobject port with the given types and options.
827 *
828 * This function never fails.
829 */
830 static inline void
ipc_kobject_init_port(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t options)831 ipc_kobject_init_port(
832 ipc_port_t port,
833 ipc_kobject_t kobject,
834 ipc_kobject_type_t type,
835 ipc_kobject_alloc_options_t options)
836 {
837 if (options & IPC_KOBJECT_ALLOC_MAKE_SEND) {
838 ipc_port_make_send_any_locked(port);
839 }
840 if (options & IPC_KOBJECT_ALLOC_NSREQUEST) {
841 port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
842 ip_reference(port);
843 }
844 if (options & IPC_KOBJECT_ALLOC_NO_GRANT) {
845 port->ip_no_grant = 1;
846 }
847 if (options & IPC_KOBJECT_ALLOC_IMMOVABLE_SEND) {
848 port->ip_immovable_send = 1;
849 }
850 if (options & IPC_KOBJECT_ALLOC_PINNED) {
851 port->ip_pinned = 1;
852 }
853
854 ipc_kobject_set_internal(port, kobject, type);
855 }
856
857 /*
858 * Routine: ipc_kobject_alloc_port
859 * Purpose:
860 * Allocate a kobject port in the kernel space of the specified type.
861 *
862 * This function never fails.
863 *
864 * Conditions:
865 * No locks held (memory is allocated)
866 */
867 ipc_port_t
ipc_kobject_alloc_port(ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t options)868 ipc_kobject_alloc_port(
869 ipc_kobject_t kobject,
870 ipc_kobject_type_t type,
871 ipc_kobject_alloc_options_t options)
872 {
873 ipc_port_t port;
874
875 port = ipc_port_alloc_special(ipc_space_kernel, IPC_PORT_INIT_NONE);
876 if (port == IP_NULL) {
877 panic("ipc_kobject_alloc_port(): failed to allocate port");
878 }
879
880 ipc_kobject_init_port(port, kobject, type, options);
881 return port;
882 }
883
884 /*
885 * Routine: ipc_kobject_alloc_labeled_port
886 * Purpose:
887 * Allocate a kobject port and associated mandatory access label
888 * in the kernel space of the specified type.
889 *
890 * This function never fails.
891 *
892 * Conditions:
893 * No locks held (memory is allocated)
894 */
895
896 ipc_port_t
ipc_kobject_alloc_labeled_port(ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_label_t label,ipc_kobject_alloc_options_t options)897 ipc_kobject_alloc_labeled_port(
898 ipc_kobject_t kobject,
899 ipc_kobject_type_t type,
900 ipc_label_t label,
901 ipc_kobject_alloc_options_t options)
902 {
903 ipc_port_t port;
904
905 port = ipc_kobject_alloc_port(kobject, type, options);
906
907 ipc_port_set_label(port, label);
908
909 return port;
910 }
911
912 static void
ipc_kobject_subst_once_no_senders(ipc_port_t port,mach_port_mscount_t mscount)913 ipc_kobject_subst_once_no_senders(
914 ipc_port_t port,
915 mach_port_mscount_t mscount)
916 {
917 ipc_port_t ko_port;
918
919 ko_port = ipc_kobject_dealloc_port(port, mscount, IKOT_PORT_SUBST_ONCE);
920
921 if (ko_port) {
922 /*
923 * Clean up the right if the wrapper wasn't hollowed out
924 * by ipc_kobject_alloc_subst_once().
925 */
926 ipc_port_release_send(ko_port);
927 }
928 }
929
930 /*
931 * Routine: ipc_kobject_alloc_subst_once
932 * Purpose:
933 * Make a port that will be substituted by the kolabel
934 * rules once, preventing the next substitution (of its target)
935 * to happen if any.
936 *
937 * Returns:
938 * A port with a send right, that will substitute to its "kobject".
939 *
940 * Conditions:
941 * No locks held (memory is allocated).
942 *
943 * `target` holds a send-right donated to this function,
944 * consumed in ipc_kobject_subst_once_no_senders().
945 */
946 ipc_port_t
ipc_kobject_alloc_subst_once(ipc_port_t target)947 ipc_kobject_alloc_subst_once(
948 ipc_port_t target)
949 {
950 if (!IP_VALID(target)) {
951 return target;
952 }
953 return ipc_kobject_alloc_labeled_port(target,
954 IKOT_PORT_SUBST_ONCE, IPC_LABEL_SUBST_ONCE,
955 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
956 }
957
958 /*
959 * Routine: ipc_kobject_make_send_lazy_alloc_port
960 * Purpose:
961 * Make a send once for a kobject port.
962 *
963 * A location owning this port is passed in port_store.
964 * If no port exists, a port is made lazily.
965 *
966 * A send right is made for the port, and if this is the first one
967 * (possibly not for the first time), then the no-more-senders
968 * notification is rearmed.
969 *
970 * When a notification is armed, the kobject must donate
971 * one of its references to the port. It is expected
972 * the no-more-senders notification will consume this reference.
973 *
974 * Returns:
975 * TRUE if a notification was armed
976 * FALSE else
977 *
978 * Conditions:
979 * Nothing is locked, memory can be allocated.
980 * The caller must be able to donate a kobject reference to the port.
981 */
982 bool
ipc_kobject_make_send_lazy_alloc_port(ipc_port_t * port_store,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t alloc_opts)983 ipc_kobject_make_send_lazy_alloc_port(
984 ipc_port_t *port_store,
985 ipc_kobject_t kobject,
986 ipc_kobject_type_t type,
987 ipc_kobject_alloc_options_t alloc_opts)
988 {
989 ipc_port_t port, previous;
990 kern_return_t kr;
991
992 alloc_opts |= IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST;
993 port = os_atomic_load(port_store, dependency);
994
995 if (!IP_VALID(port)) {
996 port = ipc_kobject_alloc_port(kobject, type, alloc_opts);
997
998 if (os_atomic_cmpxchgv(port_store,
999 IP_NULL, port, &previous, release)) {
1000 return TRUE;
1001 }
1002
1003 /*
1004 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
1005 * ipc_kobject_dealloc_port will handle
1006 * IPC_KOBJECT_ALLOC_NSREQUEST.
1007 */
1008 port->ip_mscount = 0;
1009 port->ip_srights = 0;
1010 ip_release_live(port);
1011 ipc_kobject_dealloc_port(port, 0, type);
1012
1013 port = previous;
1014 }
1015
1016 kr = ipc_kobject_make_send_nsrequest(port, kobject, type);
1017 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1018
1019 return kr == KERN_SUCCESS;
1020 }
1021
1022 /*
1023 * Routine: ipc_kobject_make_send_lazy_alloc_labeled_port
1024 * Purpose:
1025 * Make a send once for a kobject port.
1026 *
1027 * A location owning this port is passed in port_store.
1028 * If no port exists, a port is made lazily.
1029 *
1030 * A send right is made for the port, and if this is the first one
1031 * (possibly not for the first time), then the no-more-senders
1032 * notification is rearmed.
1033 *
1034 * When a notification is armed, the kobject must donate
1035 * one of its references to the port. It is expected
1036 * the no-more-senders notification will consume this reference.
1037 *
1038 * Returns:
1039 * TRUE if a notification was armed
1040 * FALSE else
1041 *
1042 * Conditions:
1043 * Nothing is locked, memory can be allocated.
1044 * The caller must be able to donate a kobject reference to the port.
1045 */
1046 boolean_t
ipc_kobject_make_send_lazy_alloc_labeled_port(ipc_port_t * port_store,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_label_t label)1047 ipc_kobject_make_send_lazy_alloc_labeled_port(
1048 ipc_port_t *port_store,
1049 ipc_kobject_t kobject,
1050 ipc_kobject_type_t type,
1051 ipc_label_t label)
1052 {
1053 ipc_port_t port, previous;
1054 kern_return_t kr;
1055
1056 port = os_atomic_load(port_store, dependency);
1057
1058 if (!IP_VALID(port)) {
1059 port = ipc_kobject_alloc_labeled_port(kobject, type, label,
1060 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
1061 if (os_atomic_cmpxchgv(port_store, IP_NULL, port, &previous, release)) {
1062 return TRUE;
1063 }
1064
1065 /*
1066 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
1067 * ipc_kobject_dealloc_port will handle
1068 * IPC_KOBJECT_ALLOC_NSREQUEST.
1069 */
1070 port->ip_mscount = 0;
1071 port->ip_srights = 0;
1072 ip_release_live(port);
1073 ipc_kobject_dealloc_port(port, 0, type);
1074
1075 port = previous;
1076 assert(ip_is_kolabeled(port));
1077 }
1078
1079 kr = ipc_kobject_make_send_nsrequest(port, kobject, type);
1080 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1081
1082 return kr == KERN_SUCCESS;
1083 }
1084
1085 /*
1086 * Routine: ipc_kobject_nsrequest_locked
1087 * Purpose:
1088 * Arm the no-senders notification for the given kobject
1089 * if it doesn't have one armed yet.
1090 *
1091 * Conditions:
1092 * Port is locked and active.
1093 *
1094 * Returns:
1095 * KERN_SUCCESS: the notification was armed
1096 * KERN_ALREADY_WAITING: the notification was already armed
1097 * KERN_FAILURE: the notification would fire immediately
1098 */
1099 static inline kern_return_t
ipc_kobject_nsrequest_locked(ipc_port_t port,mach_port_mscount_t sync)1100 ipc_kobject_nsrequest_locked(
1101 ipc_port_t port,
1102 mach_port_mscount_t sync)
1103 {
1104 if (port->ip_nsrequest == IP_KOBJECT_NSREQUEST_ARMED) {
1105 return KERN_ALREADY_WAITING;
1106 }
1107
1108 if (port->ip_srights == 0 && sync <= port->ip_mscount) {
1109 return KERN_FAILURE;
1110 }
1111
1112 port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
1113 ip_reference(port);
1114 return KERN_SUCCESS;
1115 }
1116
1117
1118 /*
1119 * Routine: ipc_kobject_nsrequest
1120 * Purpose:
1121 * Arm the no-senders notification for the given kobject
1122 * if it doesn't have one armed yet.
1123 *
1124 * Returns:
1125 * KERN_SUCCESS: the notification was armed
1126 * KERN_ALREADY_WAITING: the notification was already armed
1127 * KERN_FAILURE: the notification would fire immediately
1128 * KERN_INVALID_RIGHT: the port is dead
1129 */
1130 kern_return_t
ipc_kobject_nsrequest(ipc_port_t port,mach_port_mscount_t sync,mach_port_mscount_t * mscount)1131 ipc_kobject_nsrequest(
1132 ipc_port_t port,
1133 mach_port_mscount_t sync,
1134 mach_port_mscount_t *mscount)
1135 {
1136 kern_return_t kr = KERN_INVALID_RIGHT;
1137
1138 if (IP_VALID(port)) {
1139 ip_mq_lock(port);
1140
1141 if (mscount) {
1142 *mscount = port->ip_mscount;
1143 }
1144 if (ip_active(port)) {
1145 kr = ipc_kobject_nsrequest_locked(port, sync);
1146 }
1147
1148 ip_mq_unlock(port);
1149 } else if (mscount) {
1150 *mscount = 0;
1151 }
1152
1153 return kr;
1154 }
1155
1156 ipc_port_t
ipc_kobject_copy_send(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1157 ipc_kobject_copy_send(
1158 ipc_port_t port,
1159 ipc_kobject_t kobject,
1160 ipc_kobject_type_t kotype)
1161 {
1162 ipc_port_t sright = port;
1163
1164 if (IP_VALID(port)) {
1165 ip_mq_lock(port);
1166 if (ip_active(port)) {
1167 ipc_kobject_require(port, kobject, kotype);
1168 ipc_port_copy_send_any_locked(port);
1169 } else {
1170 sright = IP_DEAD;
1171 }
1172 ip_mq_unlock(port);
1173 }
1174
1175 return sright;
1176 }
1177
1178 ipc_port_t
ipc_kobject_make_send(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1179 ipc_kobject_make_send(
1180 ipc_port_t port,
1181 ipc_kobject_t kobject,
1182 ipc_kobject_type_t kotype)
1183 {
1184 ipc_port_t sright = port;
1185
1186 if (IP_VALID(port)) {
1187 ip_mq_lock(port);
1188 if (ip_active(port)) {
1189 ipc_kobject_require(port, kobject, kotype);
1190 ipc_port_make_send_any_locked(port);
1191 } else {
1192 sright = IP_DEAD;
1193 }
1194 ip_mq_unlock(port);
1195 }
1196
1197 return sright;
1198 }
1199
1200 kern_return_t
ipc_kobject_make_send_nsrequest(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t kotype)1201 ipc_kobject_make_send_nsrequest(
1202 ipc_port_t port,
1203 ipc_kobject_t kobject,
1204 ipc_kobject_type_t kotype)
1205 {
1206 kern_return_t kr = KERN_INVALID_RIGHT;
1207
1208 if (IP_VALID(port)) {
1209 ip_mq_lock(port);
1210 if (ip_active(port)) {
1211 ipc_kobject_require(port, kobject, kotype);
1212 ipc_port_make_send_any_locked(port);
1213 kr = ipc_kobject_nsrequest_locked(port, 0);
1214 assert(kr != KERN_FAILURE);
1215 }
1216 ip_mq_unlock(port);
1217 }
1218
1219 return kr;
1220 }
1221
1222 static inline ipc_kobject_t
ipc_kobject_disable_internal(ipc_port_t port,ipc_kobject_type_t type)1223 ipc_kobject_disable_internal(
1224 ipc_port_t port,
1225 ipc_kobject_type_t type)
1226 {
1227 ipc_kobject_t kobject = ipc_kobject_get_raw(port, type);
1228
1229 port->ip_kobject = 0;
1230 if (ip_is_kolabeled(port)) {
1231 port->ip_kolabel->ikol_alt_port = IP_NULL;
1232 }
1233
1234 return kobject;
1235 }
1236
1237 /*
1238 * Routine: ipc_kobject_dealloc_port_and_unlock
1239 * Purpose:
1240 * Destroys a port allocated with any of the ipc_kobject_alloc*
1241 * functions.
1242 *
1243 * This will atomically:
1244 * - make the port inactive,
1245 * - optionally check the make send count
1246 * - disable (nil-out) the kobject pointer for kobjects without
1247 * a destroy callback.
1248 *
1249 * The port will retain its kobject-ness and kobject type.
1250 *
1251 *
1252 * Returns:
1253 * The kobject pointer that was set prior to this call
1254 * (possibly NULL if the kobject was already disabled).
1255 *
1256 * Conditions:
1257 * The port is active and locked.
1258 * On return the port is inactive and unlocked.
1259 */
1260 __abortlike
1261 static void
__ipc_kobject_dealloc_bad_type_panic(ipc_port_t port,ipc_kobject_type_t type)1262 __ipc_kobject_dealloc_bad_type_panic(ipc_port_t port, ipc_kobject_type_t type)
1263 {
1264 panic("port %p of type %d, expecting %d", port, ip_kotype(port), type);
1265 }
1266
1267 __abortlike
1268 static void
__ipc_kobject_dealloc_bad_mscount_panic(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1269 __ipc_kobject_dealloc_bad_mscount_panic(
1270 ipc_port_t port,
1271 mach_port_mscount_t mscount,
1272 ipc_kobject_type_t type)
1273 {
1274 panic("unexpected make-send count: %p[%d], %d, %d",
1275 port, type, port->ip_mscount, mscount);
1276 }
1277
1278 __abortlike
1279 static void
__ipc_kobject_dealloc_bad_srights_panic(ipc_port_t port,ipc_kobject_type_t type)1280 __ipc_kobject_dealloc_bad_srights_panic(
1281 ipc_port_t port,
1282 ipc_kobject_type_t type)
1283 {
1284 panic("unexpected send right count: %p[%d], %d",
1285 port, type, port->ip_srights);
1286 }
1287
1288 ipc_kobject_t
ipc_kobject_dealloc_port_and_unlock(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1289 ipc_kobject_dealloc_port_and_unlock(
1290 ipc_port_t port,
1291 mach_port_mscount_t mscount,
1292 ipc_kobject_type_t type)
1293 {
1294 ipc_kobject_t kobject = IKO_NULL;
1295 ipc_kobject_ops_t ops = ipc_kobject_ops_get(type);
1296
1297 require_ip_active(port);
1298
1299 if (ip_kotype(port) != type) {
1300 __ipc_kobject_dealloc_bad_type_panic(port, type);
1301 }
1302
1303 if (mscount && port->ip_mscount != mscount) {
1304 __ipc_kobject_dealloc_bad_mscount_panic(port, mscount, type);
1305 }
1306 if ((mscount || ops->iko_op_stable) && port->ip_srights != 0) {
1307 __ipc_kobject_dealloc_bad_srights_panic(port, type);
1308 }
1309
1310 if (!ops->iko_op_destroy) {
1311 kobject = ipc_kobject_disable_internal(port, type);
1312 }
1313
1314 ipc_port_dealloc_special_and_unlock(port, ipc_space_kernel);
1315
1316 return kobject;
1317 }
1318
1319 /*
1320 * Routine: ipc_kobject_dealloc_port
1321 * Purpose:
1322 * Destroys a port allocated with any of the ipc_kobject_alloc*
1323 * functions.
1324 *
1325 * This will atomically:
1326 * - make the port inactive,
1327 * - optionally check the make send count
1328 * - disable (nil-out) the kobject pointer for kobjects without
1329 * a destroy callback.
1330 *
1331 * The port will retain its kobject-ness and kobject type.
1332 *
1333 *
1334 * Returns:
1335 * The kobject pointer that was set prior to this call
1336 * (possibly NULL if the kobject was already disabled).
1337 *
1338 * Conditions:
1339 * Nothing is locked.
1340 * The port is active.
1341 * On return the port is inactive.
1342 */
1343 ipc_kobject_t
ipc_kobject_dealloc_port(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1344 ipc_kobject_dealloc_port(
1345 ipc_port_t port,
1346 mach_port_mscount_t mscount,
1347 ipc_kobject_type_t type)
1348 {
1349 ip_mq_lock(port);
1350 return ipc_kobject_dealloc_port_and_unlock(port, mscount, type);
1351 }
1352
1353 /*
1354 * Routine: ipc_kobject_enable
1355 * Purpose:
1356 * Make a port represent a kernel object of the given type.
1357 * The caller is responsible for handling refs for the
1358 * kernel object, if necessary.
1359 * Conditions:
1360 * Nothing locked.
1361 * The port must be active.
1362 */
1363 void
ipc_kobject_enable(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)1364 ipc_kobject_enable(
1365 ipc_port_t port,
1366 ipc_kobject_t kobject,
1367 ipc_kobject_type_t type)
1368 {
1369 assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1370
1371 ip_mq_lock(port);
1372 require_ip_active(port);
1373
1374 if (type != ip_kotype(port)) {
1375 panic("%s: unexpected kotype of port %p: want %d, got %d",
1376 __func__, port, type, ip_kotype(port));
1377 }
1378
1379 ipc_kobject_set_raw(port, kobject, type);
1380
1381 ip_mq_unlock(port);
1382 }
1383
1384 /*
1385 * Routine: ipc_kobject_disable_locked
1386 * Purpose:
1387 * Clear the kobject pointer for a port.
1388 * Conditions:
1389 * The port is locked.
1390 * Returns the current kobject pointer.
1391 */
1392 ipc_kobject_t
ipc_kobject_disable_locked(ipc_port_t port,ipc_kobject_type_t type)1393 ipc_kobject_disable_locked(
1394 ipc_port_t port,
1395 ipc_kobject_type_t type)
1396 {
1397 if (ip_active(port)) {
1398 assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1399 }
1400
1401 if (ip_kotype(port) != type) {
1402 panic("port %p of type %d, expecting %d",
1403 port, ip_kotype(port), type);
1404 }
1405
1406 return ipc_kobject_disable_internal(port, type);
1407 }
1408
1409 /*
1410 * Routine: ipc_kobject_disable
1411 * Purpose:
1412 * Clear the kobject pointer for a port.
1413 * Conditions:
1414 * Nothing locked.
1415 * Returns the current kobject pointer.
1416 */
1417 ipc_kobject_t
ipc_kobject_disable(ipc_port_t port,ipc_kobject_type_t type)1418 ipc_kobject_disable(
1419 ipc_port_t port,
1420 ipc_kobject_type_t type)
1421 {
1422 ipc_kobject_t kobject;
1423
1424 ip_mq_lock(port);
1425 kobject = ipc_kobject_disable_locked(port, type);
1426 ip_mq_unlock(port);
1427
1428 return kobject;
1429 }
1430
1431 /*
1432 * Routine: ipc_kobject_upgrade_mktimer_locked
1433 * Purpose:
1434 * Upgrades a port to mktimer kobject status
1435 *
1436 * This pattern is rather bad as it leads to various
1437 * confusions that need to be special cased with kobject-ness
1438 * of ports. No new port with dual kobject/message-queue
1439 * semantics should be made ever.
1440 *
1441 * Conditions:
1442 * Port is locked
1443 */
1444 void
ipc_kobject_upgrade_mktimer_locked(ipc_port_t port,ipc_kobject_t kobject)1445 ipc_kobject_upgrade_mktimer_locked(
1446 ipc_port_t port,
1447 ipc_kobject_t kobject)
1448 {
1449 ipc_kobject_set_internal(port, kobject, IKOT_TIMER);
1450 }
1451
1452 /*
1453 * Routine: ipc_kobject_notify_no_senders
1454 * Purpose:
1455 * Handles a no-senders notification
1456 * sent to a kobject.
1457 *
1458 * A port reference is consumed.
1459 *
1460 * Conditions:
1461 * Nothing locked.
1462 */
1463 void
ipc_kobject_notify_no_senders(ipc_port_t port,mach_port_mscount_t mscount)1464 ipc_kobject_notify_no_senders(
1465 ipc_port_t port,
1466 mach_port_mscount_t mscount)
1467 {
1468 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1469
1470 assert(ops->iko_op_no_senders);
1471 ops->iko_op_no_senders(port, mscount);
1472
1473 /* consume the ref ipc_notify_no_senders_prepare left */
1474 ip_release(port);
1475 }
1476
1477 /*
1478 * Routine: ipc_kobject_notify_no_senders
1479 * Purpose:
1480 * Handles a send once notifications
1481 * sent to a kobject.
1482 *
1483 * A send-once port reference is consumed.
1484 *
1485 * Conditions:
1486 * Port is locked.
1487 */
1488 void
ipc_kobject_notify_send_once_and_unlock(ipc_port_t port)1489 ipc_kobject_notify_send_once_and_unlock(
1490 ipc_port_t port)
1491 {
1492 /*
1493 * drop the send once right while we hold the port lock.
1494 * we will keep a port reference while we run the possible
1495 * callouts to kobjects.
1496 *
1497 * This a simplified version of ipc_port_release_sonce()
1498 * since kobjects can't be special reply ports.
1499 */
1500 assert(!port->ip_specialreply);
1501
1502 if (port->ip_sorights == 0) {
1503 panic("Over-release of port %p send-once right!", port);
1504 }
1505
1506 port->ip_sorights--;
1507 ip_mq_unlock(port);
1508
1509 /*
1510 * because there's very few consumers,
1511 * the code here isn't generic as it's really not worth it.
1512 */
1513 switch (ip_kotype(port)) {
1514 case IKOT_TASK_RESUME:
1515 task_suspension_send_once(port);
1516 break;
1517 default:
1518 break;
1519 }
1520
1521 ip_release(port);
1522 }
1523
1524
1525 /*
1526 * Routine: ipc_kobject_destroy
1527 * Purpose:
1528 * Release any kernel object resources associated
1529 * with the port, which is being destroyed.
1530 *
1531 * This path to free object resources should only be
1532 * needed when resources are associated with a user's port.
1533 * In the normal case, when the kernel is the receiver,
1534 * the code calling ipc_kobject_dealloc_port() should clean
1535 * up the object resources.
1536 *
1537 * Cleans up any kobject label that might be present.
1538 * Conditions:
1539 * The port is not locked, but it is dead.
1540 */
1541 void
ipc_kobject_destroy(ipc_port_t port)1542 ipc_kobject_destroy(
1543 ipc_port_t port)
1544 {
1545 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1546
1547 if (ops->iko_op_permanent) {
1548 panic("trying to destroy an permanent port %p", port);
1549 }
1550 if (ops->iko_op_destroy) {
1551 ops->iko_op_destroy(port);
1552 }
1553
1554 if (ip_is_kolabeled(port)) {
1555 ipc_kobject_label_t labelp = port->ip_kolabel;
1556
1557 assert(labelp != NULL);
1558 assert(labelp->ikol_alt_port == IP_NULL);
1559 assert(ip_is_kobject(port));
1560 port->ip_kolabel = NULL;
1561 io_bits_andnot(ip_to_object(port), IO_BITS_KOLABEL);
1562 zfree(ipc_kobject_label_zone, labelp);
1563 }
1564 }
1565
1566 /*
1567 * Routine: ipc_kobject_label_substitute_task
1568 * Purpose:
1569 * Substitute a task control port for its immovable
1570 * equivalent when the receiver is that task.
1571 * Conditions:
1572 * Space is write locked and active.
1573 * Port is locked and active.
1574 * Returns:
1575 * - IP_NULL port if no substitution is to be done
1576 * - a valid port if a substitution needs to happen
1577 */
1578 static ipc_port_t
ipc_kobject_label_substitute_task(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1579 ipc_kobject_label_substitute_task(
1580 ipc_space_t space,
1581 ipc_kobject_label_t kolabel,
1582 ipc_port_t port)
1583 {
1584 ipc_port_t subst = IP_NULL;
1585 task_t task = ipc_kobject_get_raw(port, IKOT_TASK_CONTROL);
1586
1587 if (task != TASK_NULL && task == space->is_task) {
1588 if ((subst = kolabel->ikol_alt_port)) {
1589 return subst;
1590 }
1591 }
1592
1593 return IP_NULL;
1594 }
1595
1596 /*
1597 * Routine: ipc_kobject_label_substitute_task_read
1598 * Purpose:
1599 * Substitute a task read port for its immovable
1600 * control equivalent when the receiver is that task.
1601 * Conditions:
1602 * Space is write locked and active.
1603 * Port is locked and active.
1604 * Returns:
1605 * - IP_NULL port if no substitution is to be done
1606 * - a valid port if a substitution needs to happen
1607 */
1608 static ipc_port_t
ipc_kobject_label_substitute_task_read(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1609 ipc_kobject_label_substitute_task_read(
1610 ipc_space_t space,
1611 ipc_kobject_label_t kolabel,
1612 ipc_port_t port)
1613 {
1614 ipc_port_t subst = IP_NULL;
1615 task_t task = ipc_kobject_get_raw(port, IKOT_TASK_READ);
1616
1617 if (task != TASK_NULL && task == space->is_task) {
1618 if ((subst = kolabel->ikol_alt_port)) {
1619 return subst;
1620 }
1621 }
1622
1623 return IP_NULL;
1624 }
1625
1626 /*
1627 * Routine: ipc_kobject_label_substitute_thread
1628 * Purpose:
1629 * Substitute a thread control port for its immovable
1630 * equivalent when it belongs to the receiver task.
1631 * Conditions:
1632 * Space is write locked and active.
1633 * Port is locked and active.
1634 * Returns:
1635 * - IP_NULL port if no substitution is to be done
1636 * - a valid port if a substitution needs to happen
1637 */
1638 static ipc_port_t
ipc_kobject_label_substitute_thread(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1639 ipc_kobject_label_substitute_thread(
1640 ipc_space_t space,
1641 ipc_kobject_label_t kolabel,
1642 ipc_port_t port)
1643 {
1644 ipc_port_t subst = IP_NULL;
1645 thread_t thread = ipc_kobject_get_raw(port, IKOT_THREAD_CONTROL);
1646
1647 if (thread != THREAD_NULL && space->is_task == get_threadtask(thread)) {
1648 if ((subst = kolabel->ikol_alt_port) != IP_NULL) {
1649 return subst;
1650 }
1651 }
1652
1653 return IP_NULL;
1654 }
1655
1656 /*
1657 * Routine: ipc_kobject_label_substitute_thread_read
1658 * Purpose:
1659 * Substitute a thread read port for its immovable
1660 * control equivalent when it belongs to the receiver task.
1661 * Conditions:
1662 * Space is write locked and active.
1663 * Port is locked and active.
1664 * Returns:
1665 * - IP_NULL port if no substitution is to be done
1666 * - a valid port if a substitution needs to happen
1667 */
1668 static ipc_port_t
ipc_kobject_label_substitute_thread_read(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1669 ipc_kobject_label_substitute_thread_read(
1670 ipc_space_t space,
1671 ipc_kobject_label_t kolabel,
1672 ipc_port_t port)
1673 {
1674 ipc_port_t subst = IP_NULL;
1675 thread_t thread = ipc_kobject_get_raw(port, IKOT_THREAD_READ);
1676
1677 if (thread != THREAD_NULL && space->is_task == get_threadtask(thread)) {
1678 if ((subst = kolabel->ikol_alt_port) != IP_NULL) {
1679 return subst;
1680 }
1681 }
1682
1683 return IP_NULL;
1684 }
1685
1686 /*
1687 * Routine: ipc_kobject_label_check
1688 * Purpose:
1689 * Check to see if the space is allowed to possess
1690 * a right for the given port. In order to qualify,
1691 * the space label must contain all the privileges
1692 * listed in the port/kobject label.
1693 *
1694 * Conditions:
1695 * Space is write locked and active.
1696 * Port is locked and active.
1697 *
1698 * Returns:
1699 * Whether the copyout is authorized.
1700 *
1701 * If a port substitution is requested, the space is unlocked,
1702 * the port is unlocked and its "right" consumed.
1703 *
1704 * As of now, substituted ports only happen for send rights.
1705 */
1706 bool
ipc_kobject_label_check(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t * flags,ipc_port_t * subst_portp)1707 ipc_kobject_label_check(
1708 ipc_space_t space,
1709 ipc_port_t port,
1710 mach_msg_type_name_t msgt_name,
1711 ipc_object_copyout_flags_t *flags,
1712 ipc_port_t *subst_portp)
1713 {
1714 ipc_kobject_label_t kolabel;
1715 ipc_label_t label;
1716
1717 assert(is_active(space));
1718 assert(ip_active(port));
1719
1720 *subst_portp = IP_NULL;
1721
1722 /* Unlabled ports/kobjects are always allowed */
1723 if (!ip_is_kolabeled(port)) {
1724 return true;
1725 }
1726
1727 /* Never OK to copyout the receive right for a labeled kobject */
1728 if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1729 panic("ipc_kobject_label_check: attempted receive right "
1730 "copyout for labeled kobject");
1731 }
1732
1733 kolabel = port->ip_kolabel;
1734 label = kolabel->ikol_label;
1735
1736 if ((*flags & IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK) == 0 &&
1737 (label & IPC_LABEL_SUBST_MASK)) {
1738 ipc_port_t subst = IP_NULL;
1739
1740 if (msgt_name != MACH_MSG_TYPE_PORT_SEND) {
1741 return false;
1742 }
1743
1744 if ((label & IPC_LABEL_SUBST_MASK) == IPC_LABEL_SUBST_ONCE) {
1745 /*
1746 * The next check will _not_ substitute.
1747 * hollow out our one-time wrapper,
1748 * and steal its send right.
1749 */
1750 *flags |= IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK;
1751 subst = ipc_kobject_disable_locked(port,
1752 IKOT_PORT_SUBST_ONCE);
1753 is_write_unlock(space);
1754 ipc_port_release_send_and_unlock(port);
1755 if (subst == IP_NULL) {
1756 panic("subst-once port %p was consumed twice", port);
1757 }
1758 *subst_portp = subst;
1759 return true;
1760 }
1761
1762 switch (label & IPC_LABEL_SUBST_MASK) {
1763 case IPC_LABEL_SUBST_TASK:
1764 subst = ipc_kobject_label_substitute_task(space,
1765 kolabel, port);
1766 break;
1767 case IPC_LABEL_SUBST_TASK_READ:
1768 subst = ipc_kobject_label_substitute_task_read(space,
1769 kolabel, port);
1770 break;
1771 case IPC_LABEL_SUBST_THREAD:
1772 subst = ipc_kobject_label_substitute_thread(space,
1773 kolabel, port);
1774 break;
1775 case IPC_LABEL_SUBST_THREAD_READ:
1776 subst = ipc_kobject_label_substitute_thread_read(space,
1777 kolabel, port);
1778 break;
1779 default:
1780 panic("unexpected label: %llx", label);
1781 }
1782
1783 if (subst != IP_NULL) {
1784 ip_reference(subst);
1785 is_write_unlock(space);
1786
1787 /*
1788 * We do not hold a proper send right on `subst`,
1789 * only a reference.
1790 *
1791 * Because of how thread/task termination works,
1792 * there is no guarantee copy_send() would work,
1793 * so we need to make_send().
1794 *
1795 * We can do that because ports tagged with
1796 * IPC_LABEL_SUBST_{THREAD,TASK} do not use
1797 * the no-senders notification.
1798 */
1799
1800 ipc_port_release_send_and_unlock(port);
1801 /* no check: dPAC integrity */
1802 port = ipc_port_make_send_any(subst);
1803 ip_release(subst);
1804 *subst_portp = port;
1805 return true;
1806 }
1807 }
1808
1809 return (label & space->is_label & IPC_LABEL_SPACE_MASK) ==
1810 (label & IPC_LABEL_SPACE_MASK);
1811 }
1812