1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: kern/ipc_kobject.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions for letting a port represent a kernel object.
71 */
72
73 #include <mach_debug.h>
74 #include <mach_ipc_test.h>
75 #include <mach/mig.h>
76 #include <mach/port.h>
77 #include <mach/kern_return.h>
78 #include <mach/message.h>
79 #include <mach/mig_errors.h>
80 #include <mach/mach_notify.h>
81 #include <mach/ndr.h>
82 #include <mach/vm_param.h>
83
84 #include <mach/mach_vm_server.h>
85 #include <mach/mach_port_server.h>
86 #include <mach/mach_host_server.h>
87 #include <mach/host_priv_server.h>
88 #include <mach/clock_server.h>
89 #include <mach/clock_priv_server.h>
90 #include <mach/memory_entry_server.h>
91 #include <mach/memory_object_control_server.h>
92 #include <mach/memory_object_default_server.h>
93 #include <mach/processor_server.h>
94 #include <mach/processor_set_server.h>
95 #include <mach/task_server.h>
96 #include <mach/mach_voucher_server.h>
97 #include <mach/mach_voucher_attr_control_server.h>
98 #ifdef VM32_SUPPORT
99 #include <mach/vm32_map_server.h>
100 #endif
101 #include <mach/thread_act_server.h>
102 #include <mach/restartable_server.h>
103
104 #include <mach/exc_server.h>
105 #include <mach/mach_exc_server.h>
106 #include <mach/mach_eventlink_server.h>
107
108 #include <device/device_types.h>
109 #include <device/device_server.h>
110
111 #if CONFIG_USER_NOTIFICATION
112 #include <UserNotification/UNDReplyServer.h>
113 #endif
114
115 #if CONFIG_ARCADE
116 #include <mach/arcade_register_server.h>
117 #endif
118
119 #if CONFIG_AUDIT
120 #include <kern/audit_sessionport.h>
121 #endif
122
123 #if MACH_MACHINE_ROUTINES
124 #include <machine/machine_routines.h>
125 #endif /* MACH_MACHINE_ROUTINES */
126 #if XK_PROXY
127 #include <uk_xkern/xk_uproxy_server.h>
128 #endif /* XK_PROXY */
129
130 #include <kern/counter.h>
131 #include <kern/ipc_tt.h>
132 #include <kern/ipc_mig.h>
133 #include <kern/ipc_misc.h>
134 #include <kern/ipc_kobject.h>
135 #include <kern/host_notify.h>
136 #include <kern/misc_protos.h>
137
138 #if CONFIG_ARCADE
139 #include <kern/arcade.h>
140 #endif /* CONFIG_ARCADE */
141
142 #include <ipc/ipc_kmsg.h>
143 #include <ipc/ipc_port.h>
144 #include <ipc/ipc_voucher.h>
145 #include <kern/sync_sema.h>
146 #include <kern/work_interval.h>
147 #include <kern/task_ident.h>
148
149 #if HYPERVISOR
150 #include <kern/hv_support.h>
151 #endif
152
153 #include <vm/vm_protos.h>
154
155 #include <security/mac_mach_internal.h>
156
157 extern char *proc_name_address(void *p);
158 struct proc;
159 extern int proc_pid(struct proc *p);
160
161 typedef struct {
162 mach_msg_id_t num;
163 mig_routine_t routine;
164 int size;
165 int kobjidx;
166 } mig_hash_t;
167
168 static void ipc_kobject_subst_once_no_senders(ipc_port_t, mach_msg_type_number_t);
169
170 IPC_KOBJECT_DEFINE(IKOT_MEMORY_OBJECT); /* vestigial, no real instance */
171 IPC_KOBJECT_DEFINE(IKOT_MEM_OBJ_CONTROL); /* vestigial, no real instance */
172 IPC_KOBJECT_DEFINE(IKOT_PORT_SUBST_ONCE,
173 .iko_op_no_senders = ipc_kobject_subst_once_no_senders);
174
175 #define MAX_MIG_ENTRIES 1031
176 #define MIG_HASH(x) (x)
177
178 #define KOBJ_IDX_NOT_SET (-1)
179
180 #ifndef max
181 #define max(a, b) (((a) > (b)) ? (a) : (b))
182 #endif /* max */
183
184 static SECURITY_READ_ONLY_LATE(mig_hash_t) mig_buckets[MAX_MIG_ENTRIES];
185 static SECURITY_READ_ONLY_LATE(int) mig_table_max_displ;
186 SECURITY_READ_ONLY_LATE(int) mach_kobj_count; /* count of total number of kobjects */
187
188 ZONE_DEFINE_TYPE(ipc_kobject_label_zone, "ipc kobject labels",
189 struct ipc_kobject_label, ZC_ZFREE_CLEARMEM);
190
191 __startup_data
192 static const struct mig_subsystem *mig_e[] = {
193 (const struct mig_subsystem *)&mach_vm_subsystem,
194 (const struct mig_subsystem *)&mach_port_subsystem,
195 (const struct mig_subsystem *)&mach_host_subsystem,
196 (const struct mig_subsystem *)&host_priv_subsystem,
197 (const struct mig_subsystem *)&clock_subsystem,
198 (const struct mig_subsystem *)&clock_priv_subsystem,
199 (const struct mig_subsystem *)&processor_subsystem,
200 (const struct mig_subsystem *)&processor_set_subsystem,
201 (const struct mig_subsystem *)&is_iokit_subsystem,
202 (const struct mig_subsystem *)&task_subsystem,
203 (const struct mig_subsystem *)&thread_act_subsystem,
204 #ifdef VM32_SUPPORT
205 (const struct mig_subsystem *)&vm32_map_subsystem,
206 #endif
207 #if CONFIG_USER_NOTIFICATION
208 (const struct mig_subsystem *)&UNDReply_subsystem,
209 #endif
210 (const struct mig_subsystem *)&mach_voucher_subsystem,
211 (const struct mig_subsystem *)&mach_voucher_attr_control_subsystem,
212 (const struct mig_subsystem *)&memory_entry_subsystem,
213 (const struct mig_subsystem *)&task_restartable_subsystem,
214
215 #if XK_PROXY
216 (const struct mig_subsystem *)&do_uproxy_xk_uproxy_subsystem,
217 #endif /* XK_PROXY */
218 #if MACH_MACHINE_ROUTINES
219 (const struct mig_subsystem *)&MACHINE_SUBSYSTEM,
220 #endif /* MACH_MACHINE_ROUTINES */
221 #if MCMSG && iPSC860
222 (const struct mig_subsystem *)&mcmsg_info_subsystem,
223 #endif /* MCMSG && iPSC860 */
224 (const struct mig_subsystem *)&catch_exc_subsystem,
225 (const struct mig_subsystem *)&catch_mach_exc_subsystem,
226 #if CONFIG_ARCADE
227 (const struct mig_subsystem *)&arcade_register_subsystem,
228 #endif
229 (const struct mig_subsystem *)&mach_eventlink_subsystem,
230 };
231
232 static struct ipc_kobject_ops __security_const_late
233 ipc_kobject_ops_array[IKOT_MAX_TYPE];
234
235 void
ipc_kobject_register_startup(ipc_kobject_ops_t ops)236 ipc_kobject_register_startup(ipc_kobject_ops_t ops)
237 {
238 if (ipc_kobject_ops_array[ops->iko_op_type].iko_op_type) {
239 panic("trying to register kobject(%d) twice", ops->iko_op_type);
240 }
241 if (ops->iko_op_allow_upgrade && ops->iko_op_no_senders) {
242 panic("Cant receive notifications when upgradable");
243 }
244 ipc_kobject_ops_array[ops->iko_op_type] = *ops;
245 }
246
247 static ipc_kobject_ops_t
ipc_kobject_ops_get(ipc_kobject_type_t ikot)248 ipc_kobject_ops_get(ipc_kobject_type_t ikot)
249 {
250 if (ikot < IKOT_NONE || ikot >= IKOT_MAX_TYPE) {
251 panic("invalid kobject type %d", ikot);
252 }
253 return &ipc_kobject_ops_array[ikot];
254 }
255
256 static void
mig_init(void)257 mig_init(void)
258 {
259 unsigned int i, n = sizeof(mig_e) / sizeof(const struct mig_subsystem *);
260 int howmany;
261 mach_msg_id_t j, pos, nentry, range;
262
263 for (i = 0; i < n; i++) {
264 range = mig_e[i]->end - mig_e[i]->start;
265 if (!mig_e[i]->start || range < 0) {
266 panic("the msgh_ids in mig_e[] aren't valid!");
267 }
268
269 if (mig_e[i]->maxsize > KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE) {
270 panic("mig subsystem %d (%p) replies are too large (%d > %d)",
271 mig_e[i]->start, mig_e[i], mig_e[i]->maxsize,
272 KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE);
273 }
274
275 for (j = 0; j < range; j++) {
276 if (mig_e[i]->routine[j].stub_routine) {
277 /* Only put real entries in the table */
278 nentry = j + mig_e[i]->start;
279 for (pos = MIG_HASH(nentry) % MAX_MIG_ENTRIES, howmany = 1;
280 mig_buckets[pos].num;
281 pos++, pos = pos % MAX_MIG_ENTRIES, howmany++) {
282 if (mig_buckets[pos].num == nentry) {
283 printf("message id = %d\n", nentry);
284 panic("multiple entries with the same msgh_id");
285 }
286 if (howmany == MAX_MIG_ENTRIES) {
287 panic("the mig dispatch table is too small");
288 }
289 }
290
291 mig_buckets[pos].num = nentry;
292 mig_buckets[pos].routine = mig_e[i]->routine[j].stub_routine;
293 if (mig_e[i]->routine[j].max_reply_msg) {
294 mig_buckets[pos].size = mig_e[i]->routine[j].max_reply_msg;
295 } else {
296 mig_buckets[pos].size = mig_e[i]->maxsize;
297 }
298
299 mig_buckets[pos].kobjidx = KOBJ_IDX_NOT_SET;
300
301 mig_table_max_displ = max(howmany, mig_table_max_displ);
302 mach_kobj_count++;
303 }
304 }
305 }
306
307 /* 77417305: pad to allow for MIG routines removals/cleanups */
308 mach_kobj_count += 32;
309
310 printf("mig_table_max_displ = %d mach_kobj_count = %d\n",
311 mig_table_max_displ, mach_kobj_count);
312 }
313 STARTUP(MACH_IPC, STARTUP_RANK_FIRST, mig_init);
314
315 /*
316 * Do a hash table lookup for given msgh_id. Return 0
317 * if not found.
318 */
319 static mig_hash_t *
find_mig_hash_entry(int msgh_id)320 find_mig_hash_entry(int msgh_id)
321 {
322 unsigned int i = (unsigned int)MIG_HASH(msgh_id);
323 int max_iter = mig_table_max_displ;
324 mig_hash_t *ptr;
325
326 do {
327 ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES];
328 } while (msgh_id != ptr->num && ptr->num && --max_iter);
329
330 if (!ptr->routine || msgh_id != ptr->num) {
331 ptr = (mig_hash_t *)0;
332 }
333
334 return ptr;
335 }
336
337 static kern_return_t
ipc_kobject_reply_status(ipc_kmsg_t kmsg)338 ipc_kobject_reply_status(ipc_kmsg_t kmsg)
339 {
340 if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
341 return KERN_SUCCESS;
342 }
343
344 return ((mig_reply_error_t *)kmsg->ikm_header)->RetCode;
345 }
346
347 /*
348 * Routine: ipc_kobject_set_kobjidx
349 * Purpose:
350 * Set the index for the kobject filter
351 * mask for a given message ID.
352 */
353 kern_return_t
ipc_kobject_set_kobjidx(int msgh_id,int index)354 ipc_kobject_set_kobjidx(
355 int msgh_id,
356 int index)
357 {
358 mig_hash_t *ptr = find_mig_hash_entry(msgh_id);
359
360 if (ptr == (mig_hash_t *)0) {
361 return KERN_INVALID_ARGUMENT;
362 }
363
364 assert(index < mach_kobj_count);
365 ptr->kobjidx = index;
366
367 return KERN_SUCCESS;
368 }
369
370 static void
ipc_kobject_init_reply(ipc_kmsg_t reply,const ipc_kmsg_t request,kern_return_t kr)371 ipc_kobject_init_reply(
372 ipc_kmsg_t reply,
373 const ipc_kmsg_t request,
374 kern_return_t kr)
375 {
376 #define InP ((mach_msg_header_t *) request->ikm_header)
377 #define OutP ((mig_reply_error_t *) reply->ikm_header)
378
379 OutP->NDR = NDR_record;
380 OutP->Head.msgh_size = sizeof(mig_reply_error_t);
381
382 OutP->Head.msgh_bits =
383 MACH_MSGH_BITS_SET(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0, 0, 0);
384 OutP->Head.msgh_remote_port = InP->msgh_local_port;
385 OutP->Head.msgh_local_port = MACH_PORT_NULL;
386 OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
387 OutP->Head.msgh_id = InP->msgh_id + 100;
388
389 OutP->RetCode = kr;
390 #undef InP
391 #undef OutP
392 }
393
394 /*
395 * Routine: ipc_kobject_server_internal
396 * Purpose:
397 * Handle a message sent to the kernel.
398 * Generates a reply message.
399 * Version for Untyped IPC.
400 * Conditions:
401 * Nothing locked.
402 */
403 static kern_return_t
ipc_kobject_server_internal(ipc_port_t port,ipc_kmsg_t request,ipc_kmsg_t * replyp)404 ipc_kobject_server_internal(
405 ipc_port_t port,
406 ipc_kmsg_t request,
407 ipc_kmsg_t *replyp)
408 {
409 const int request_msgh_id = request->ikm_header->msgh_id;
410 ipc_kmsg_t reply = IKM_NULL;
411 mach_msg_size_t reply_size;
412 bool exec_token_changed = false;
413 mig_hash_t *ptr;
414
415 /* Find corresponding mig_hash entry, if any */
416 ptr = find_mig_hash_entry(request_msgh_id);
417
418 /* Get the reply_size. */
419 if (ptr == (mig_hash_t *)0) {
420 reply_size = sizeof(mig_reply_error_t);
421 } else {
422 reply_size = ptr->size;
423 }
424
425 /*
426 * MIG should really assure no data leakage -
427 * but until it does, pessimistically zero the
428 * whole reply buffer.
429 */
430 reply = ipc_kmsg_alloc(reply_size, 0,
431 IPC_KMSG_ALLOC_KERNEL | IPC_KMSG_ALLOC_ZERO | IPC_KMSG_ALLOC_NOFAIL);
432
433 ipc_kobject_init_reply(reply, request, KERN_SUCCESS);
434
435 /*
436 * Find the routine to call, and call it
437 * to perform the kernel function
438 */
439 if (ptr) {
440 thread_ro_t tro = current_thread_ro();
441 task_t curtask = tro->tro_task;
442 struct proc *curproc = tro->tro_proc;
443 task_t task = TASK_NULL;
444 uint32_t exec_token;
445
446 /*
447 * Check if the port is a task port, if its a task port then
448 * snapshot the task exec token before the mig routine call.
449 */
450 if (ip_kotype(port) == IKOT_TASK_CONTROL && port != curtask->itk_self) {
451 task = convert_port_to_task_with_exec_token(port, &exec_token);
452 }
453
454 #if CONFIG_MACF
455 int idx = ptr->kobjidx;
456 uint8_t *filter_mask = task_get_mach_kobj_filter_mask(curtask);
457
458 /* Check kobject mig filter mask, if exists. */
459 if (filter_mask != NULL &&
460 idx != KOBJ_IDX_NOT_SET &&
461 !bitstr_test(filter_mask, idx) &&
462 mac_task_kobj_msg_evaluate != NULL) {
463 /* Not in filter mask, evaluate policy. */
464 kern_return_t kr = mac_task_kobj_msg_evaluate(curproc,
465 request_msgh_id, idx);
466 if (kr != KERN_SUCCESS) {
467 ((mig_reply_error_t *) reply->ikm_header)->RetCode = kr;
468 goto skip_kobjcall;
469 }
470 }
471 #endif /* CONFIG_MACF */
472
473 (*ptr->routine)(request->ikm_header, reply->ikm_header);
474
475 #if CONFIG_MACF
476 skip_kobjcall:
477 #endif
478
479 /* Check if the exec token changed during the mig routine */
480 if (task != TASK_NULL) {
481 if (exec_token != task->exec_token) {
482 exec_token_changed = true;
483 }
484 task_deallocate(task);
485 }
486
487 counter_inc(&kernel_task->messages_received);
488 } else {
489 #if DEVELOPMENT || DEBUG
490 printf("ipc_kobject_server: bogus kernel message, id=%d\n",
491 request->ikm_header->msgh_id);
492 #endif /* DEVELOPMENT || DEBUG */
493 _MIG_MSGID_INVALID(request->ikm_header->msgh_id);
494
495 ((mig_reply_error_t *)reply->ikm_header)->RetCode = MIG_BAD_ID;
496 }
497
498 /* Fail the MIG call if the task exec token changed during the call */
499 if (exec_token_changed && ipc_kobject_reply_status(reply) == KERN_SUCCESS) {
500 /*
501 * Create a new reply msg with error and destroy the old reply msg.
502 */
503 ipc_kmsg_t new_reply = ipc_kmsg_alloc(reply_size, 0,
504 IPC_KMSG_ALLOC_KERNEL | IPC_KMSG_ALLOC_ZERO |
505 IPC_KMSG_ALLOC_NOFAIL);
506
507 /*
508 * Initialize the new reply message.
509 */
510 {
511 #define OutP_new ((mig_reply_error_t *) new_reply->ikm_header)
512 #define OutP_old ((mig_reply_error_t *) reply->ikm_header)
513
514 OutP_new->NDR = OutP_old->NDR;
515 OutP_new->Head.msgh_size = sizeof(mig_reply_error_t);
516 OutP_new->Head.msgh_bits = OutP_old->Head.msgh_bits & ~MACH_MSGH_BITS_COMPLEX;
517 OutP_new->Head.msgh_remote_port = OutP_old->Head.msgh_remote_port;
518 OutP_new->Head.msgh_local_port = MACH_PORT_NULL;
519 OutP_new->Head.msgh_voucher_port = MACH_PORT_NULL;
520 OutP_new->Head.msgh_id = OutP_old->Head.msgh_id;
521
522 /* Set the error as KERN_INVALID_TASK */
523 OutP_new->RetCode = KERN_INVALID_TASK;
524
525 #undef OutP_new
526 #undef OutP_old
527 }
528
529 /*
530 * Destroy everything in reply except the reply port right,
531 * which is needed in the new reply message.
532 */
533 ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_SKIP_REMOTE | IPC_KMSG_DESTROY_NOT_SIGNED);
534 reply = new_reply;
535 } else if (ipc_kobject_reply_status(reply) == MIG_NO_REPLY) {
536 /*
537 * The server function will send a reply message
538 * using the reply port right, which it has saved.
539 */
540 ipc_kmsg_free(reply);
541 reply = IKM_NULL;
542 }
543
544 *replyp = reply;
545 return KERN_SUCCESS;
546 }
547
548
549 /*
550 * Routine: ipc_kobject_server
551 * Purpose:
552 * Handle a message sent to the kernel.
553 * Generates a reply message.
554 * Version for Untyped IPC.
555 *
556 * Ownership of the incoming rights (from the request)
557 * are transferred on success (wether a reply is made or not).
558 *
559 * Conditions:
560 * Nothing locked.
561 */
562 ipc_kmsg_t
ipc_kobject_server(ipc_port_t port,ipc_kmsg_t request,mach_msg_option_t option __unused)563 ipc_kobject_server(
564 ipc_port_t port,
565 ipc_kmsg_t request,
566 mach_msg_option_t option __unused)
567 {
568 #if DEVELOPMENT || DEBUG
569 const int request_msgh_id = request->ikm_header->msgh_id;
570 #endif
571 ipc_port_t request_voucher_port;
572 ipc_kmsg_t reply = IKM_NULL;
573 kern_return_t kr;
574
575 ipc_kmsg_trace_send(request, option);
576
577 if (ip_kotype(port) == IKOT_UEXT_OBJECT) {
578 kr = uext_server(port, request, &reply);
579 } else {
580 kr = ipc_kobject_server_internal(port, request, &reply);
581 }
582
583 if (kr != KERN_SUCCESS) {
584 assert(kr != MACH_SEND_TIMED_OUT &&
585 kr != MACH_SEND_INTERRUPTED &&
586 kr != MACH_SEND_INVALID_DEST);
587 assert(reply == IKM_NULL);
588
589 /* convert the server error into a MIG error */
590 reply = ipc_kmsg_alloc(sizeof(mig_reply_error_t), 0,
591 IPC_KMSG_ALLOC_KERNEL | IPC_KMSG_ALLOC_ZERO);
592 ipc_kobject_init_reply(reply, request, kr);
593 }
594
595 counter_inc(&kernel_task->messages_sent);
596 /*
597 * Destroy destination. The following code differs from
598 * ipc_object_destroy in that we release the send-once
599 * right instead of generating a send-once notification
600 * (which would bring us here again, creating a loop).
601 * It also differs in that we only expect send or
602 * send-once rights, never receive rights.
603 */
604 switch (MACH_MSGH_BITS_REMOTE(request->ikm_header->msgh_bits)) {
605 case MACH_MSG_TYPE_PORT_SEND:
606 ipc_port_release_send(request->ikm_header->msgh_remote_port);
607 break;
608
609 case MACH_MSG_TYPE_PORT_SEND_ONCE:
610 ipc_port_release_sonce(request->ikm_header->msgh_remote_port);
611 break;
612
613 default:
614 panic("ipc_kobject_server: strange destination rights");
615 }
616
617 /*
618 * Destroy voucher. The kernel MIG servers never take ownership
619 * of vouchers sent in messages. Swallow any such rights here.
620 */
621 request_voucher_port = ipc_kmsg_get_voucher_port(request);
622 if (IP_VALID(request_voucher_port)) {
623 assert(MACH_MSG_TYPE_PORT_SEND ==
624 MACH_MSGH_BITS_VOUCHER(request->ikm_header->msgh_bits));
625 ipc_port_release_send(request_voucher_port);
626 ipc_kmsg_clear_voucher_port(request);
627 }
628
629 if (reply == IKM_NULL ||
630 ipc_kobject_reply_status(reply) == KERN_SUCCESS) {
631 /*
632 * The server function is responsible for the contents
633 * of the message. The reply port right is moved
634 * to the reply message, and we have deallocated
635 * the destination port right, so we just need
636 * to free the kmsg.
637 */
638 ipc_kmsg_free(request);
639 } else {
640 /*
641 * The message contents of the request are intact.
642 * Remote port has been released above. Do not destroy
643 * the reply port right either, which is needed in the reply message.
644 */
645 ipc_kmsg_destroy(request, IPC_KMSG_DESTROY_SKIP_LOCAL | IPC_KMSG_DESTROY_SKIP_REMOTE);
646 }
647
648 if (reply != IKM_NULL) {
649 ipc_port_t reply_port = reply->ikm_header->msgh_remote_port;
650
651 if (!IP_VALID(reply_port)) {
652 /*
653 * Can't queue the reply message if the destination
654 * (the reply port) isn't valid.
655 */
656 ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_NOT_SIGNED);
657 reply = IKM_NULL;
658 } else if (ip_in_space_noauth(reply_port, ipc_space_kernel)) {
659 /* do not lock reply port, use raw pointer comparison */
660
661 /*
662 * Don't send replies to kobject kernel ports.
663 */
664 #if DEVELOPMENT || DEBUG
665 printf("%s: refusing to send reply to kobject %d port (id:%d)\n",
666 __func__, ip_kotype(reply_port), request_msgh_id);
667 #endif /* DEVELOPMENT || DEBUG */
668 ipc_kmsg_destroy(reply, IPC_KMSG_DESTROY_NOT_SIGNED);
669 reply = IKM_NULL;
670 }
671 }
672
673 return reply;
674 }
675
676 static __header_always_inline void
ipc_kobject_set_raw(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)677 ipc_kobject_set_raw(
678 ipc_port_t port,
679 ipc_kobject_t kobject,
680 ipc_kobject_type_t type)
681 {
682 uintptr_t *store = &port->ip_kobject;
683
684 #if __has_feature(ptrauth_calls)
685 if (kobject) {
686 type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
687 kobject = ptrauth_sign_unauthenticated(kobject,
688 ptrauth_key_process_independent_data,
689 ptrauth_blend_discriminator(store, type));
690 }
691 #else
692 (void)type;
693 #endif // __has_feature(ptrauth_calls)
694
695 *store = (uintptr_t)kobject;
696 }
697
698 static inline void
ipc_kobject_set_internal(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)699 ipc_kobject_set_internal(
700 ipc_port_t port,
701 ipc_kobject_t kobject,
702 ipc_kobject_type_t type)
703 {
704 assert(type != IKOT_NONE);
705 io_bits_or(ip_to_object(port), type | IO_BITS_KOBJECT);
706 ipc_kobject_set_raw(port, kobject, type);
707 }
708
709 /*
710 * Routine: ipc_kobject_get_raw
711 * Purpose:
712 * Returns the kobject pointer of a specified port.
713 *
714 * This returns the current value of the kobject pointer,
715 * without any validation (the caller is expected to do
716 * the validation it needs).
717 *
718 * Conditions:
719 * The port is a kobject of the proper type.
720 */
721 __header_always_inline ipc_kobject_t
ipc_kobject_get_raw(ipc_port_t port,ipc_kobject_type_t type)722 ipc_kobject_get_raw(
723 ipc_port_t port,
724 ipc_kobject_type_t type)
725 {
726 uintptr_t *store = &port->ip_kobject;
727 ipc_kobject_t kobject = (ipc_kobject_t)*store;
728
729 #if __has_feature(ptrauth_calls)
730 if (kobject) {
731 type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
732 kobject = ptrauth_auth_data(kobject,
733 ptrauth_key_process_independent_data,
734 ptrauth_blend_discriminator(store, type));
735 }
736 #else
737 (void)type;
738 #endif // __has_feature(ptrauth_calls)
739
740 return kobject;
741 }
742
743 /*
744 * Routine: ipc_kobject_get_locked
745 * Purpose:
746 * Returns the kobject pointer of a specified port,
747 * for an expected type.
748 *
749 * Returns IKO_NULL if the port isn't active.
750 *
751 * This function may be used when:
752 * - the port lock is held
753 * - the kobject association stays while there
754 * are any outstanding rights.
755 *
756 * Conditions:
757 * The port is a kobject of the proper type.
758 */
759 ipc_kobject_t
ipc_kobject_get_locked(ipc_port_t port,ipc_kobject_type_t type)760 ipc_kobject_get_locked(
761 ipc_port_t port,
762 ipc_kobject_type_t type)
763 {
764 ipc_kobject_t kobject = IKO_NULL;
765
766 if (ip_active(port) && type == ip_kotype(port)) {
767 kobject = ipc_kobject_get_raw(port, type);
768 }
769
770 return kobject;
771 }
772
773 /*
774 * Routine: ipc_kobject_get_stable
775 * Purpose:
776 * Returns the kobject pointer of a specified port,
777 * for an expected type, for types where the port/kobject
778 * association is permanent.
779 *
780 * Returns IKO_NULL if the port isn't active.
781 *
782 * Conditions:
783 * The port is a kobject of the proper type.
784 */
785 ipc_kobject_t
ipc_kobject_get_stable(ipc_port_t port,ipc_kobject_type_t type)786 ipc_kobject_get_stable(
787 ipc_port_t port,
788 ipc_kobject_type_t type)
789 {
790 assert(ipc_kobject_ops_get(type)->iko_op_stable);
791 return ipc_kobject_get_locked(port, type);
792 }
793
794 /*
795 * Routine: ipc_kobject_init_port
796 * Purpose:
797 * Initialize a kobject port with the given types and options.
798 *
799 * This function never fails.
800 */
801 static inline void
ipc_kobject_init_port(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t options)802 ipc_kobject_init_port(
803 ipc_port_t port,
804 ipc_kobject_t kobject,
805 ipc_kobject_type_t type,
806 ipc_kobject_alloc_options_t options)
807 {
808 ipc_kobject_set_internal(port, kobject, type);
809
810 if (options & IPC_KOBJECT_ALLOC_MAKE_SEND) {
811 ipc_port_make_send_locked(port);
812 }
813 if (options & IPC_KOBJECT_ALLOC_NSREQUEST) {
814 port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
815 ip_reference(port);
816 }
817 if (options & IPC_KOBJECT_ALLOC_NO_GRANT) {
818 port->ip_no_grant = 1;
819 }
820 if (options & IPC_KOBJECT_ALLOC_IMMOVABLE_SEND) {
821 port->ip_immovable_send = 1;
822 }
823 if (options & IPC_KOBJECT_ALLOC_PINNED) {
824 port->ip_pinned = 1;
825 }
826 }
827
828 /*
829 * Routine: ipc_kobject_alloc_port
830 * Purpose:
831 * Allocate a kobject port in the kernel space of the specified type.
832 *
833 * This function never fails.
834 *
835 * Conditions:
836 * No locks held (memory is allocated)
837 */
838 ipc_port_t
ipc_kobject_alloc_port(ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t options)839 ipc_kobject_alloc_port(
840 ipc_kobject_t kobject,
841 ipc_kobject_type_t type,
842 ipc_kobject_alloc_options_t options)
843 {
844 ipc_port_t port;
845
846 port = ipc_port_alloc_special(ipc_space_kernel, IPC_PORT_INIT_NONE);
847 if (port == IP_NULL) {
848 panic("ipc_kobject_alloc_port(): failed to allocate port");
849 }
850
851 ipc_kobject_init_port(port, kobject, type, options);
852 return port;
853 }
854
855 /*
856 * Routine: ipc_kobject_alloc_labeled_port
857 * Purpose:
858 * Allocate a kobject port and associated mandatory access label
859 * in the kernel space of the specified type.
860 *
861 * This function never fails.
862 *
863 * Conditions:
864 * No locks held (memory is allocated)
865 */
866
867 ipc_port_t
ipc_kobject_alloc_labeled_port(ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_label_t label,ipc_kobject_alloc_options_t options)868 ipc_kobject_alloc_labeled_port(
869 ipc_kobject_t kobject,
870 ipc_kobject_type_t type,
871 ipc_label_t label,
872 ipc_kobject_alloc_options_t options)
873 {
874 ipc_port_t port;
875
876 port = ipc_kobject_alloc_port(kobject, type, options);
877
878 ipc_port_set_label(port, label);
879
880 return port;
881 }
882
883 static void
ipc_kobject_subst_once_no_senders(ipc_port_t port,mach_port_mscount_t mscount)884 ipc_kobject_subst_once_no_senders(
885 ipc_port_t port,
886 mach_port_mscount_t mscount)
887 {
888 ipc_port_t ko_port;
889
890 ko_port = ipc_kobject_dealloc_port(port, mscount, IKOT_PORT_SUBST_ONCE);
891
892 if (ko_port) {
893 /*
894 * Clean up the right if the wrapper wasn't hollowed out
895 * by ipc_kobject_alloc_subst_once().
896 */
897 ipc_port_release_send(ko_port);
898 }
899 }
900
901 /*
902 * Routine: ipc_kobject_alloc_subst_once
903 * Purpose:
904 * Make a port that will be substituted by the kolabel
905 * rules once, preventing the next substitution (of its target)
906 * to happen if any.
907 *
908 * Returns:
909 * A port with a send right, that will substitute to its "kobject".
910 *
911 * Conditions:
912 * No locks held (memory is allocated).
913 *
914 * `target` holds a send-right donated to this function,
915 * consumed in ipc_kobject_subst_once_no_senders().
916 */
917 ipc_port_t
ipc_kobject_alloc_subst_once(ipc_port_t target)918 ipc_kobject_alloc_subst_once(
919 ipc_port_t target)
920 {
921 if (!IP_VALID(target)) {
922 return target;
923 }
924 return ipc_kobject_alloc_labeled_port(target,
925 IKOT_PORT_SUBST_ONCE, IPC_LABEL_SUBST_ONCE,
926 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
927 }
928
929 /*
930 * Routine: ipc_kobject_make_send_lazy_alloc_port
931 * Purpose:
932 * Make a send once for a kobject port.
933 *
934 * A location owning this port is passed in port_store.
935 * If no port exists, a port is made lazily.
936 *
937 * A send right is made for the port, and if this is the first one
938 * (possibly not for the first time), then the no-more-senders
939 * notification is rearmed.
940 *
941 * When a notification is armed, the kobject must donate
942 * one of its references to the port. It is expected
943 * the no-more-senders notification will consume this reference.
944 *
945 * Returns:
946 * TRUE if a notification was armed
947 * FALSE else
948 *
949 * Conditions:
950 * Nothing is locked, memory can be allocated.
951 * The caller must be able to donate a kobject reference to the port.
952 */
953 boolean_t
ipc_kobject_make_send_lazy_alloc_port(ipc_port_t * port_store,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t alloc_opts,uint64_t __ptrauth_only ptrauth_discriminator)954 ipc_kobject_make_send_lazy_alloc_port(
955 ipc_port_t *port_store,
956 ipc_kobject_t kobject,
957 ipc_kobject_type_t type,
958 ipc_kobject_alloc_options_t alloc_opts,
959 uint64_t __ptrauth_only ptrauth_discriminator)
960 {
961 ipc_port_t port, previous, __ptrauth_only port_addr;
962 kern_return_t kr;
963
964 port = os_atomic_load(port_store, dependency);
965
966 #if __has_feature(ptrauth_calls)
967 /* If we're on a ptrauth system and this port is signed, authenticate and strip the pointer */
968 if ((alloc_opts & IPC_KOBJECT_PTRAUTH_STORE) && IP_VALID(port)) {
969 port = ptrauth_auth_data(port,
970 ptrauth_key_process_independent_data,
971 ptrauth_blend_discriminator(port_store, ptrauth_discriminator));
972 }
973 #endif // __has_feature(ptrauth_calls)
974
975 if (!IP_VALID(port)) {
976 port = ipc_kobject_alloc_port(kobject, type,
977 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST | alloc_opts);
978
979 #if __has_feature(ptrauth_calls)
980 if (alloc_opts & IPC_KOBJECT_PTRAUTH_STORE) {
981 port_addr = ptrauth_sign_unauthenticated(port,
982 ptrauth_key_process_independent_data,
983 ptrauth_blend_discriminator(port_store, ptrauth_discriminator));
984 } else {
985 port_addr = port;
986 }
987 #else
988 port_addr = port;
989 #endif // __has_feature(ptrauth_calls)
990
991 if (os_atomic_cmpxchgv(port_store, IP_NULL, port_addr, &previous, release)) {
992 return TRUE;
993 }
994
995 /*
996 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
997 * ipc_kobject_dealloc_port will handle
998 * IPC_KOBJECT_ALLOC_NSREQUEST.
999 */
1000 port->ip_mscount = 0;
1001 port->ip_srights = 0;
1002 ip_release_live(port);
1003 ipc_kobject_dealloc_port(port, 0, type);
1004
1005 port = previous;
1006 }
1007
1008 kr = ipc_kobject_make_send_nsrequest(port);
1009 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1010
1011 return kr == KERN_SUCCESS;
1012 }
1013
1014 /*
1015 * Routine: ipc_kobject_make_send_lazy_alloc_labeled_port
1016 * Purpose:
1017 * Make a send once for a kobject port.
1018 *
1019 * A location owning this port is passed in port_store.
1020 * If no port exists, a port is made lazily.
1021 *
1022 * A send right is made for the port, and if this is the first one
1023 * (possibly not for the first time), then the no-more-senders
1024 * notification is rearmed.
1025 *
1026 * When a notification is armed, the kobject must donate
1027 * one of its references to the port. It is expected
1028 * the no-more-senders notification will consume this reference.
1029 *
1030 * Returns:
1031 * TRUE if a notification was armed
1032 * FALSE else
1033 *
1034 * Conditions:
1035 * Nothing is locked, memory can be allocated.
1036 * The caller must be able to donate a kobject reference to the port.
1037 */
1038 boolean_t
ipc_kobject_make_send_lazy_alloc_labeled_port(ipc_port_t * port_store,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_label_t label)1039 ipc_kobject_make_send_lazy_alloc_labeled_port(
1040 ipc_port_t *port_store,
1041 ipc_kobject_t kobject,
1042 ipc_kobject_type_t type,
1043 ipc_label_t label)
1044 {
1045 ipc_port_t port, previous;
1046 kern_return_t kr;
1047
1048 port = os_atomic_load(port_store, dependency);
1049
1050 if (!IP_VALID(port)) {
1051 port = ipc_kobject_alloc_labeled_port(kobject, type, label,
1052 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
1053 if (os_atomic_cmpxchgv(port_store, IP_NULL, port, &previous, release)) {
1054 return TRUE;
1055 }
1056
1057 /*
1058 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
1059 * ipc_kobject_dealloc_port will handle
1060 * IPC_KOBJECT_ALLOC_NSREQUEST.
1061 */
1062 port->ip_mscount = 0;
1063 port->ip_srights = 0;
1064 ip_release_live(port);
1065 ipc_kobject_dealloc_port(port, 0, type);
1066
1067 port = previous;
1068 assert(ip_is_kolabeled(port));
1069 }
1070
1071 kr = ipc_kobject_make_send_nsrequest(port);
1072 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1073
1074 return kr == KERN_SUCCESS;
1075 }
1076
1077 /*
1078 * Routine: ipc_kobject_nsrequest_locked
1079 * Purpose:
1080 * Arm the no-senders notification for the given kobject
1081 * if it doesn't have one armed yet.
1082 *
1083 * Conditions:
1084 * Port is locked and active.
1085 *
1086 * Returns:
1087 * KERN_SUCCESS: the notification was armed
1088 * KERN_ALREADY_WAITING: the notification was already armed
1089 * KERN_FAILURE: the notification would fire immediately
1090 */
1091 static inline kern_return_t
ipc_kobject_nsrequest_locked(ipc_port_t port,mach_port_mscount_t sync)1092 ipc_kobject_nsrequest_locked(
1093 ipc_port_t port,
1094 mach_port_mscount_t sync)
1095 {
1096 if (port->ip_nsrequest == IP_KOBJECT_NSREQUEST_ARMED) {
1097 return KERN_ALREADY_WAITING;
1098 }
1099
1100 if (port->ip_srights == 0 && sync <= port->ip_mscount) {
1101 return KERN_FAILURE;
1102 }
1103
1104 port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
1105 ip_reference(port);
1106 return KERN_SUCCESS;
1107 }
1108
1109
1110 /*
1111 * Routine: ipc_kobject_nsrequest
1112 * Purpose:
1113 * Arm the no-senders notification for the given kobject
1114 * if it doesn't have one armed yet.
1115 *
1116 * Returns:
1117 * KERN_SUCCESS: the notification was armed
1118 * KERN_ALREADY_WAITING: the notification was already armed
1119 * KERN_FAILURE: the notification would fire immediately
1120 * KERN_INVALID_RIGHT: the port is dead
1121 */
1122 kern_return_t
ipc_kobject_nsrequest(ipc_port_t port,mach_port_mscount_t sync,mach_port_mscount_t * mscount)1123 ipc_kobject_nsrequest(
1124 ipc_port_t port,
1125 mach_port_mscount_t sync,
1126 mach_port_mscount_t *mscount)
1127 {
1128 kern_return_t kr = KERN_INVALID_RIGHT;
1129
1130 if (IP_VALID(port)) {
1131 ip_mq_lock(port);
1132
1133 if (mscount) {
1134 *mscount = port->ip_mscount;
1135 }
1136 if (ip_active(port)) {
1137 kr = ipc_kobject_nsrequest_locked(port, sync);
1138 }
1139
1140 ip_mq_unlock(port);
1141 } else if (mscount) {
1142 *mscount = 0;
1143 }
1144
1145 return kr;
1146 }
1147
1148
1149 /*
1150 * Routine: ipc_kobject_make_send_nsrequest
1151 * Purpose:
1152 * Make a send right for a kobject port.
1153 *
1154 * Then the no-more-senders notification is armed
1155 * if it wasn't already.
1156 *
1157 * Conditions:
1158 * Nothing is locked.
1159 *
1160 * Returns:
1161 * KERN_SUCCESS: the notification was armed
1162 * KERN_ALREADY_WAITING: the notification was already armed
1163 * KERN_INVALID_RIGHT: the port is dead
1164 */
1165 kern_return_t
ipc_kobject_make_send_nsrequest(ipc_port_t port)1166 ipc_kobject_make_send_nsrequest(
1167 ipc_port_t port)
1168 {
1169 kern_return_t kr = KERN_INVALID_RIGHT;
1170
1171 if (IP_VALID(port)) {
1172 ip_mq_lock(port);
1173 if (ip_active(port)) {
1174 ipc_port_make_send_locked(port);
1175 kr = ipc_kobject_nsrequest_locked(port, 0);
1176 assert(kr != KERN_FAILURE);
1177 }
1178 ip_mq_unlock(port);
1179 }
1180
1181 return kr;
1182 }
1183
1184 static inline ipc_kobject_t
ipc_kobject_disable_internal(ipc_port_t port,ipc_kobject_type_t type)1185 ipc_kobject_disable_internal(
1186 ipc_port_t port,
1187 ipc_kobject_type_t type)
1188 {
1189 ipc_kobject_t kobject = ipc_kobject_get_raw(port, type);
1190
1191 port->ip_kobject = 0;
1192 if (ip_is_kolabeled(port)) {
1193 port->ip_kolabel->ikol_alt_port = IP_NULL;
1194 }
1195
1196 return kobject;
1197 }
1198
1199 /*
1200 * Routine: ipc_kobject_dealloc_port_and_unlock
1201 * Purpose:
1202 * Destroys a port allocated with any of the ipc_kobject_alloc*
1203 * functions.
1204 *
1205 * This will atomically:
1206 * - make the port inactive,
1207 * - optionally check the make send count
1208 * - disable (nil-out) the kobject pointer for kobjects without
1209 * a destroy callback.
1210 *
1211 * The port will retain its kobject-ness and kobject type.
1212 *
1213 *
1214 * Returns:
1215 * The kobject pointer that was set prior to this call
1216 * (possibly NULL if the kobject was already disabled).
1217 *
1218 * Conditions:
1219 * The port is active and locked.
1220 * On return the port is inactive and unlocked.
1221 */
1222 __abortlike
1223 static void
__ipc_kobject_dealloc_bad_type_panic(ipc_port_t port,ipc_kobject_type_t type)1224 __ipc_kobject_dealloc_bad_type_panic(ipc_port_t port, ipc_kobject_type_t type)
1225 {
1226 panic("port %p of type %d, expecting %d", port, ip_kotype(port), type);
1227 }
1228
1229 __abortlike
1230 static void
__ipc_kobject_dealloc_bad_mscount_panic(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1231 __ipc_kobject_dealloc_bad_mscount_panic(
1232 ipc_port_t port,
1233 mach_port_mscount_t mscount,
1234 ipc_kobject_type_t type)
1235 {
1236 panic("unexpected make-send count: %p[%d], %d, %d",
1237 port, type, port->ip_mscount, mscount);
1238 }
1239
1240 __abortlike
1241 static void
__ipc_kobject_dealloc_bad_srights_panic(ipc_port_t port,ipc_kobject_type_t type)1242 __ipc_kobject_dealloc_bad_srights_panic(
1243 ipc_port_t port,
1244 ipc_kobject_type_t type)
1245 {
1246 panic("unexpected send right count: %p[%d], %d",
1247 port, type, port->ip_srights);
1248 }
1249
1250 ipc_kobject_t
ipc_kobject_dealloc_port_and_unlock(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1251 ipc_kobject_dealloc_port_and_unlock(
1252 ipc_port_t port,
1253 mach_port_mscount_t mscount,
1254 ipc_kobject_type_t type)
1255 {
1256 ipc_kobject_t kobject = IKO_NULL;
1257 ipc_kobject_ops_t ops = ipc_kobject_ops_get(type);
1258
1259 require_ip_active(port);
1260
1261 if (ip_kotype(port) != type) {
1262 __ipc_kobject_dealloc_bad_type_panic(port, type);
1263 }
1264
1265 if (mscount && port->ip_mscount != mscount) {
1266 __ipc_kobject_dealloc_bad_mscount_panic(port, mscount, type);
1267 }
1268 if ((mscount || ops->iko_op_stable) && port->ip_srights != 0) {
1269 __ipc_kobject_dealloc_bad_srights_panic(port, type);
1270 }
1271
1272 if (!ops->iko_op_destroy) {
1273 kobject = ipc_kobject_disable_internal(port, type);
1274 }
1275
1276 ipc_port_dealloc_special_and_unlock(port, ipc_space_kernel);
1277
1278 return kobject;
1279 }
1280
1281 /*
1282 * Routine: ipc_kobject_dealloc_port
1283 * Purpose:
1284 * Destroys a port allocated with any of the ipc_kobject_alloc*
1285 * functions.
1286 *
1287 * This will atomically:
1288 * - make the port inactive,
1289 * - optionally check the make send count
1290 * - disable (nil-out) the kobject pointer for kobjects without
1291 * a destroy callback.
1292 *
1293 * The port will retain its kobject-ness and kobject type.
1294 *
1295 *
1296 * Returns:
1297 * The kobject pointer that was set prior to this call
1298 * (possibly NULL if the kobject was already disabled).
1299 *
1300 * Conditions:
1301 * Nothing is locked.
1302 * The port is active.
1303 * On return the port is inactive.
1304 */
1305 ipc_kobject_t
ipc_kobject_dealloc_port(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1306 ipc_kobject_dealloc_port(
1307 ipc_port_t port,
1308 mach_port_mscount_t mscount,
1309 ipc_kobject_type_t type)
1310 {
1311 ip_mq_lock(port);
1312 return ipc_kobject_dealloc_port_and_unlock(port, mscount, type);
1313 }
1314
1315 /*
1316 * Routine: ipc_kobject_enable
1317 * Purpose:
1318 * Make a port represent a kernel object of the given type.
1319 * The caller is responsible for handling refs for the
1320 * kernel object, if necessary.
1321 * Conditions:
1322 * Nothing locked.
1323 * The port must be active.
1324 */
1325 void
ipc_kobject_enable(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)1326 ipc_kobject_enable(
1327 ipc_port_t port,
1328 ipc_kobject_t kobject,
1329 ipc_kobject_type_t type)
1330 {
1331 assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1332
1333 ip_mq_lock(port);
1334 require_ip_active(port);
1335
1336 if (type != ip_kotype(port)) {
1337 panic("%s: unexpected kotype of port %p: want %d, got %d",
1338 __func__, port, type, ip_kotype(port));
1339 }
1340
1341 ipc_kobject_set_raw(port, kobject, type);
1342
1343 ip_mq_unlock(port);
1344 }
1345
1346 /*
1347 * Routine: ipc_kobject_disable_locked
1348 * Purpose:
1349 * Clear the kobject pointer for a port.
1350 * Conditions:
1351 * The port is locked.
1352 * Returns the current kobject pointer.
1353 */
1354 ipc_kobject_t
ipc_kobject_disable_locked(ipc_port_t port,ipc_kobject_type_t type)1355 ipc_kobject_disable_locked(
1356 ipc_port_t port,
1357 ipc_kobject_type_t type)
1358 {
1359 if (ip_active(port)) {
1360 assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1361 }
1362
1363 if (ip_kotype(port) != type) {
1364 panic("port %p of type %d, expecting %d",
1365 port, ip_kotype(port), type);
1366 }
1367
1368 return ipc_kobject_disable_internal(port, type);
1369 }
1370
1371 /*
1372 * Routine: ipc_kobject_disable
1373 * Purpose:
1374 * Clear the kobject pointer for a port.
1375 * Conditions:
1376 * Nothing locked.
1377 * Returns the current kobject pointer.
1378 */
1379 ipc_kobject_t
ipc_kobject_disable(ipc_port_t port,ipc_kobject_type_t type)1380 ipc_kobject_disable(
1381 ipc_port_t port,
1382 ipc_kobject_type_t type)
1383 {
1384 ipc_kobject_t kobject;
1385
1386 ip_mq_lock(port);
1387 kobject = ipc_kobject_disable_locked(port, type);
1388 ip_mq_unlock(port);
1389
1390 return kobject;
1391 }
1392
1393 static inline bool
ipc_kobject_may_upgrade(ipc_port_t port)1394 ipc_kobject_may_upgrade(ipc_port_t port)
1395 {
1396 if (!ip_active(port) || ip_kotype(port) != IKOT_NONE) {
1397 /* needs to be active and have no tag */
1398 return false;
1399 }
1400
1401 if (port->ip_tempowner || port->ip_specialreply) {
1402 /* union overlays with ip_kobject */
1403 return false;
1404 }
1405
1406 if (port->ip_has_watchport || ipc_port_has_prdrequest(port)) {
1407 /* outstanding watchport or port-destroyed is also disallowed */
1408 return false;
1409 }
1410
1411 return true;
1412 }
1413
1414 /*
1415 * Routine: ipc_kobject_upgrade_locked
1416 * Purpose:
1417 * Upgrades a port to kobject status
1418 * Only kobjects with iko_op_allow_upgrade can do this.
1419 * Conditions:
1420 * Port is locked
1421 */
1422 void
ipc_kobject_upgrade_locked(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)1423 ipc_kobject_upgrade_locked(
1424 ipc_port_t port,
1425 ipc_kobject_t kobject,
1426 ipc_kobject_type_t type)
1427 {
1428 assert(ipc_kobject_may_upgrade(port));
1429 assert(ipc_kobject_ops_get(type)->iko_op_allow_upgrade);
1430 ipc_kobject_set_internal(port, kobject, type);
1431 }
1432
1433 /*
1434 * Routine: ipc_kobject_upgrade
1435 * Purpose:
1436 * Upgrades a port to kobject status
1437 * Only kobjects with iko_op_allow_upgrade can do this.
1438 * Returns:
1439 * KERN_SUCCESS: the upgrade was possible
1440 * KERN_INVALID_CAPABILITY: the upgrade wasn't possible
1441 * Conditions:
1442 * Nothing is locked
1443 */
1444 kern_return_t
ipc_kobject_upgrade(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)1445 ipc_kobject_upgrade(
1446 ipc_port_t port,
1447 ipc_kobject_t kobject,
1448 ipc_kobject_type_t type)
1449 {
1450 kern_return_t kr = KERN_INVALID_CAPABILITY;
1451
1452 assert(ipc_kobject_ops_get(type)->iko_op_allow_upgrade);
1453
1454 ip_mq_lock(port);
1455
1456 if (ipc_kobject_may_upgrade(port)) {
1457 ipc_kobject_set_internal(port, kobject, type);
1458 kr = KERN_SUCCESS;
1459 }
1460
1461 ip_mq_unlock(port);
1462
1463 return kr;
1464 }
1465
1466 /*
1467 * Routine: ipc_kobject_downgrade_host_notify
1468 * Purpose:
1469 * Downgrade a kobject port back to receive right status.
1470 * Only IKOT_HOST_NOTIFY should use this facility.
1471 *
1472 * /!\ WARNING /!\
1473 *
1474 * This feature is breaking the kobject abstraction
1475 * and is grandfathered in. Accessing io_kotype() without a lock
1476 * only works because this is the only such kobject doing
1477 * this disgusting dance.
1478 *
1479 * Returns:
1480 * The kobject pointer previously set on the object.
1481 * Conditions:
1482 * Nothing is locked
1483 * The port doesn't need to be active
1484 */
1485 ipc_kobject_t
ipc_kobject_downgrade_host_notify(ipc_port_t port)1486 ipc_kobject_downgrade_host_notify(
1487 ipc_port_t port)
1488 {
1489 ipc_kobject_t kobject = IKO_NULL;
1490
1491 ip_mq_lock(port);
1492
1493 if (ip_kotype(port) == IKOT_HOST_NOTIFY) {
1494 kobject = ipc_kobject_disable_locked(port, IKOT_HOST_NOTIFY);
1495 io_bits_andnot(ip_to_object(port), IO_BITS_KOTYPE);
1496 }
1497
1498 ip_mq_unlock(port);
1499
1500 return kobject;
1501 }
1502
1503 /*
1504 * Routine: ipc_kobject_notify_no_senders
1505 * Purpose:
1506 * Handles a no-senders notification
1507 * sent to a kobject.
1508 *
1509 * A port reference is consumed.
1510 *
1511 * Conditions:
1512 * Nothing locked.
1513 */
1514 void
ipc_kobject_notify_no_senders(ipc_port_t port,mach_port_mscount_t mscount)1515 ipc_kobject_notify_no_senders(
1516 ipc_port_t port,
1517 mach_port_mscount_t mscount)
1518 {
1519 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1520
1521 assert(ops->iko_op_no_senders);
1522 ops->iko_op_no_senders(port, mscount);
1523
1524 /* consume the ref ipc_notify_no_senders_prepare left */
1525 ip_release(port);
1526 }
1527
1528 /*
1529 * Routine: ipc_kobject_notify_no_senders
1530 * Purpose:
1531 * Handles a send once notifications
1532 * sent to a kobject.
1533 *
1534 * A send-once port reference is consumed.
1535 *
1536 * Conditions:
1537 * Port is locked.
1538 */
1539 void
ipc_kobject_notify_send_once_and_unlock(ipc_port_t port)1540 ipc_kobject_notify_send_once_and_unlock(
1541 ipc_port_t port)
1542 {
1543 /*
1544 * drop the send once right while we hold the port lock.
1545 * we will keep a port reference while we run the possible
1546 * callouts to kobjects.
1547 *
1548 * This a simplified version of ipc_port_release_sonce()
1549 * since kobjects can't be special reply ports.
1550 */
1551 assert(!port->ip_specialreply);
1552
1553 if (port->ip_sorights == 0) {
1554 panic("Over-release of port %p send-once right!", port);
1555 }
1556
1557 port->ip_sorights--;
1558 ip_mq_unlock(port);
1559
1560 /*
1561 * because there's very few consumers,
1562 * the code here isn't generic as it's really not worth it.
1563 */
1564 switch (ip_kotype(port)) {
1565 case IKOT_TASK_RESUME:
1566 task_suspension_send_once(port);
1567 break;
1568 default:
1569 break;
1570 }
1571
1572 ip_release(port);
1573 }
1574
1575
1576 /*
1577 * Routine: ipc_kobject_destroy
1578 * Purpose:
1579 * Release any kernel object resources associated
1580 * with the port, which is being destroyed.
1581 *
1582 * This path to free object resources should only be
1583 * needed when resources are associated with a user's port.
1584 * In the normal case, when the kernel is the receiver,
1585 * the code calling ipc_kobject_dealloc_port() should clean
1586 * up the object resources.
1587 *
1588 * Cleans up any kobject label that might be present.
1589 * Conditions:
1590 * The port is not locked, but it is dead.
1591 */
1592 void
ipc_kobject_destroy(ipc_port_t port)1593 ipc_kobject_destroy(
1594 ipc_port_t port)
1595 {
1596 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1597
1598 if (ops->iko_op_permanent) {
1599 panic("trying to destroy an permanent port %p", port);
1600 }
1601 if (ops->iko_op_destroy) {
1602 ops->iko_op_destroy(port);
1603 }
1604
1605 if (ip_is_kolabeled(port)) {
1606 ipc_kobject_label_t labelp = port->ip_kolabel;
1607
1608 assert(labelp != NULL);
1609 assert(labelp->ikol_alt_port == IP_NULL);
1610 assert(ip_is_kobject(port));
1611 port->ip_kolabel = NULL;
1612 io_bits_andnot(ip_to_object(port), IO_BITS_KOLABEL);
1613 zfree(ipc_kobject_label_zone, labelp);
1614 }
1615 }
1616
1617 /*
1618 * Routine: ipc_kobject_label_substitute_task
1619 * Purpose:
1620 * Substitute a task control port for its immovable
1621 * equivalent when the receiver is that task.
1622 * Conditions:
1623 * Space is write locked and active.
1624 * Port is locked and active.
1625 * Returns:
1626 * - IP_NULL port if no substitution is to be done
1627 * - a valid port if a substitution needs to happen
1628 */
1629 static ipc_port_t
ipc_kobject_label_substitute_task(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1630 ipc_kobject_label_substitute_task(
1631 ipc_space_t space,
1632 ipc_kobject_label_t kolabel,
1633 ipc_port_t port)
1634 {
1635 ipc_port_t subst = IP_NULL;
1636 task_t task = ipc_kobject_get_raw(port, IKOT_TASK_CONTROL);
1637
1638 if (task != TASK_NULL && task == space->is_task) {
1639 if ((subst = kolabel->ikol_alt_port)) {
1640 return subst;
1641 }
1642 }
1643
1644 return IP_NULL;
1645 }
1646
1647 /*
1648 * Routine: ipc_kobject_label_substitute_thread
1649 * Purpose:
1650 * Substitute a thread control port for its immovable
1651 * equivalent when it belongs to the receiver task.
1652 * Conditions:
1653 * Space is write locked and active.
1654 * Port is locked and active.
1655 * Returns:
1656 * - IP_NULL port if no substitution is to be done
1657 * - a valid port if a substitution needs to happen
1658 */
1659 static ipc_port_t
ipc_kobject_label_substitute_thread(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1660 ipc_kobject_label_substitute_thread(
1661 ipc_space_t space,
1662 ipc_kobject_label_t kolabel,
1663 ipc_port_t port)
1664 {
1665 ipc_port_t subst = IP_NULL;
1666 thread_t thread = ipc_kobject_get_raw(port, IKOT_THREAD_CONTROL);
1667
1668 if (thread != THREAD_NULL && space->is_task == get_threadtask(thread)) {
1669 if ((subst = kolabel->ikol_alt_port) != IP_NULL) {
1670 return subst;
1671 }
1672 }
1673
1674 return IP_NULL;
1675 }
1676
1677 /*
1678 * Routine: ipc_kobject_label_check
1679 * Purpose:
1680 * Check to see if the space is allowed to possess
1681 * a right for the given port. In order to qualify,
1682 * the space label must contain all the privileges
1683 * listed in the port/kobject label.
1684 *
1685 * Conditions:
1686 * Space is write locked and active.
1687 * Port is locked and active.
1688 *
1689 * Returns:
1690 * Whether the copyout is authorized.
1691 *
1692 * If a port substitution is requested, the space is unlocked,
1693 * the port is unlocked and its "right" consumed.
1694 *
1695 * As of now, substituted ports only happen for send rights.
1696 */
1697 bool
ipc_kobject_label_check(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t * flags,ipc_port_t * subst_portp)1698 ipc_kobject_label_check(
1699 ipc_space_t space,
1700 ipc_port_t port,
1701 mach_msg_type_name_t msgt_name,
1702 ipc_object_copyout_flags_t *flags,
1703 ipc_port_t *subst_portp)
1704 {
1705 ipc_kobject_label_t kolabel;
1706 ipc_label_t label;
1707
1708 assert(is_active(space));
1709 assert(ip_active(port));
1710
1711 *subst_portp = IP_NULL;
1712
1713 /* Unlabled ports/kobjects are always allowed */
1714 if (!ip_is_kolabeled(port)) {
1715 return true;
1716 }
1717
1718 /* Never OK to copyout the receive right for a labeled kobject */
1719 if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1720 panic("ipc_kobject_label_check: attempted receive right "
1721 "copyout for labeled kobject");
1722 }
1723
1724 kolabel = port->ip_kolabel;
1725 label = kolabel->ikol_label;
1726
1727 if ((*flags & IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK) == 0 &&
1728 (label & IPC_LABEL_SUBST_MASK)) {
1729 ipc_port_t subst = IP_NULL;
1730
1731 if (msgt_name != MACH_MSG_TYPE_PORT_SEND) {
1732 return false;
1733 }
1734
1735 if ((label & IPC_LABEL_SUBST_MASK) == IPC_LABEL_SUBST_ONCE) {
1736 /*
1737 * The next check will _not_ substitute.
1738 * hollow out our one-time wrapper,
1739 * and steal its send right.
1740 */
1741 *flags |= IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK;
1742 subst = ipc_kobject_disable_locked(port,
1743 IKOT_PORT_SUBST_ONCE);
1744 is_write_unlock(space);
1745 ipc_port_release_send_and_unlock(port);
1746 if (subst == IP_NULL) {
1747 panic("subst-once port %p was consumed twice", port);
1748 }
1749 *subst_portp = subst;
1750 return true;
1751 }
1752
1753 switch (label & IPC_LABEL_SUBST_MASK) {
1754 case IPC_LABEL_SUBST_TASK:
1755 subst = ipc_kobject_label_substitute_task(space,
1756 kolabel, port);
1757 break;
1758 case IPC_LABEL_SUBST_THREAD:
1759 subst = ipc_kobject_label_substitute_thread(space,
1760 kolabel, port);
1761 break;
1762 default:
1763 panic("unexpected label: %llx", label);
1764 }
1765
1766 if (subst != IP_NULL) {
1767 ip_reference(subst);
1768 is_write_unlock(space);
1769
1770 /*
1771 * We do not hold a proper send right on `subst`,
1772 * only a reference.
1773 *
1774 * Because of how thread/task termination works,
1775 * there is no guarantee copy_send() would work,
1776 * so we need to make_send().
1777 *
1778 * We can do that because ports tagged with
1779 * IPC_LABEL_SUBST_{THREAD,TASK} do not use
1780 * the no-senders notification.
1781 */
1782
1783 ipc_port_release_send_and_unlock(port);
1784 port = ipc_port_make_send(subst);
1785 ip_release(subst);
1786 *subst_portp = port;
1787 return true;
1788 }
1789 }
1790
1791 return (label & space->is_label & IPC_LABEL_SPACE_MASK) ==
1792 (label & IPC_LABEL_SPACE_MASK);
1793 }
1794