1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: kern/ipc_kobject.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Functions for letting a port represent a kernel object.
71 */
72
73 #include <mach_debug.h>
74 #include <mach_ipc_test.h>
75 #include <mach/mig.h>
76 #include <mach/port.h>
77 #include <mach/kern_return.h>
78 #include <mach/message.h>
79 #include <mach/mig_errors.h>
80 #include <mach/mach_notify.h>
81 #include <mach/ndr.h>
82 #include <mach/vm_param.h>
83
84 #include <mach/mach_vm_server.h>
85 #include <mach/mach_port_server.h>
86 #include <mach/mach_host_server.h>
87 #include <mach/host_priv_server.h>
88 #include <mach/clock_server.h>
89 #include <mach/clock_priv_server.h>
90 #include <mach/memory_entry_server.h>
91 #include <mach/memory_object_control_server.h>
92 #include <mach/memory_object_default_server.h>
93 #include <mach/processor_server.h>
94 #include <mach/processor_set_server.h>
95 #include <mach/task_server.h>
96 #include <mach/mach_voucher_server.h>
97 #include <mach/mach_voucher_attr_control_server.h>
98 #ifdef VM32_SUPPORT
99 #include <mach/vm32_map_server.h>
100 #endif
101 #include <mach/thread_act_server.h>
102 #include <mach/restartable_server.h>
103
104 #include <mach/exc_server.h>
105 #include <mach/mach_exc_server.h>
106 #include <mach/mach_eventlink_server.h>
107
108 #include <device/device_types.h>
109 #include <device/device_server.h>
110
111 #if CONFIG_USER_NOTIFICATION
112 #include <UserNotification/UNDReplyServer.h>
113 #endif
114
115 #if CONFIG_ARCADE
116 #include <mach/arcade_register_server.h>
117 #endif
118
119 #if CONFIG_AUDIT
120 #include <kern/audit_sessionport.h>
121 #endif
122
123 #if MACH_MACHINE_ROUTINES
124 #include <machine/machine_routines.h>
125 #endif /* MACH_MACHINE_ROUTINES */
126 #if XK_PROXY
127 #include <uk_xkern/xk_uproxy_server.h>
128 #endif /* XK_PROXY */
129
130 #include <kern/counter.h>
131 #include <kern/ipc_tt.h>
132 #include <kern/ipc_mig.h>
133 #include <kern/ipc_misc.h>
134 #include <kern/ipc_kobject.h>
135 #include <kern/host_notify.h>
136 #include <kern/misc_protos.h>
137
138 #if CONFIG_ARCADE
139 #include <kern/arcade.h>
140 #endif /* CONFIG_ARCADE */
141
142 #include <ipc/ipc_kmsg.h>
143 #include <ipc/ipc_port.h>
144 #include <ipc/ipc_voucher.h>
145 #include <kern/sync_sema.h>
146 #include <kern/work_interval.h>
147 #include <kern/task_ident.h>
148
149 #if HYPERVISOR
150 #include <kern/hv_support.h>
151 #endif
152
153 #include <vm/vm_protos.h>
154
155 #include <security/mac_mach_internal.h>
156
157 extern char *proc_name_address(void *p);
158 struct proc;
159 extern int proc_pid(struct proc *p);
160
161 typedef struct {
162 mach_msg_id_t num;
163 mig_routine_t routine;
164 int size;
165 int kobjidx;
166 } mig_hash_t;
167
168 static void ipc_kobject_subst_once_no_senders(ipc_port_t, mach_msg_type_number_t);
169
170 IPC_KOBJECT_DEFINE(IKOT_MEMORY_OBJECT); /* vestigial, no real instance */
171 IPC_KOBJECT_DEFINE(IKOT_MEM_OBJ_CONTROL); /* vestigial, no real instance */
172 IPC_KOBJECT_DEFINE(IKOT_PORT_SUBST_ONCE,
173 .iko_op_no_senders = ipc_kobject_subst_once_no_senders);
174
175 #define MAX_MIG_ENTRIES 1031
176 #define MIG_HASH(x) (x)
177
178 #define KOBJ_IDX_NOT_SET (-1)
179
180 #ifndef max
181 #define max(a, b) (((a) > (b)) ? (a) : (b))
182 #endif /* max */
183
184 static SECURITY_READ_ONLY_LATE(mig_hash_t) mig_buckets[MAX_MIG_ENTRIES];
185 static SECURITY_READ_ONLY_LATE(int) mig_table_max_displ;
186 SECURITY_READ_ONLY_LATE(int) mach_kobj_count; /* count of total number of kobjects */
187
188 ZONE_DEFINE_TYPE(ipc_kobject_label_zone, "ipc kobject labels",
189 struct ipc_kobject_label, ZC_ZFREE_CLEARMEM);
190
191 __startup_data
192 static const struct mig_subsystem *mig_e[] = {
193 (const struct mig_subsystem *)&mach_vm_subsystem,
194 (const struct mig_subsystem *)&mach_port_subsystem,
195 (const struct mig_subsystem *)&mach_host_subsystem,
196 (const struct mig_subsystem *)&host_priv_subsystem,
197 (const struct mig_subsystem *)&clock_subsystem,
198 (const struct mig_subsystem *)&clock_priv_subsystem,
199 (const struct mig_subsystem *)&processor_subsystem,
200 (const struct mig_subsystem *)&processor_set_subsystem,
201 (const struct mig_subsystem *)&is_iokit_subsystem,
202 (const struct mig_subsystem *)&task_subsystem,
203 (const struct mig_subsystem *)&thread_act_subsystem,
204 #ifdef VM32_SUPPORT
205 (const struct mig_subsystem *)&vm32_map_subsystem,
206 #endif
207 #if CONFIG_USER_NOTIFICATION
208 (const struct mig_subsystem *)&UNDReply_subsystem,
209 #endif
210 (const struct mig_subsystem *)&mach_voucher_subsystem,
211 (const struct mig_subsystem *)&mach_voucher_attr_control_subsystem,
212 (const struct mig_subsystem *)&memory_entry_subsystem,
213 (const struct mig_subsystem *)&task_restartable_subsystem,
214
215 #if XK_PROXY
216 (const struct mig_subsystem *)&do_uproxy_xk_uproxy_subsystem,
217 #endif /* XK_PROXY */
218 #if MACH_MACHINE_ROUTINES
219 (const struct mig_subsystem *)&MACHINE_SUBSYSTEM,
220 #endif /* MACH_MACHINE_ROUTINES */
221 #if MCMSG && iPSC860
222 (const struct mig_subsystem *)&mcmsg_info_subsystem,
223 #endif /* MCMSG && iPSC860 */
224 (const struct mig_subsystem *)&catch_exc_subsystem,
225 (const struct mig_subsystem *)&catch_mach_exc_subsystem,
226 #if CONFIG_ARCADE
227 (const struct mig_subsystem *)&arcade_register_subsystem,
228 #endif
229 (const struct mig_subsystem *)&mach_eventlink_subsystem,
230 };
231
232 static struct ipc_kobject_ops __security_const_late
233 ipc_kobject_ops_array[IKOT_MAX_TYPE];
234
235 void
ipc_kobject_register_startup(ipc_kobject_ops_t ops)236 ipc_kobject_register_startup(ipc_kobject_ops_t ops)
237 {
238 if (ipc_kobject_ops_array[ops->iko_op_type].iko_op_type) {
239 panic("trying to register kobject(%d) twice", ops->iko_op_type);
240 }
241 if (ops->iko_op_allow_upgrade && ops->iko_op_no_senders) {
242 panic("Cant receive notifications when upgradable");
243 }
244 ipc_kobject_ops_array[ops->iko_op_type] = *ops;
245 }
246
247 static ipc_kobject_ops_t
ipc_kobject_ops_get(ipc_kobject_type_t ikot)248 ipc_kobject_ops_get(ipc_kobject_type_t ikot)
249 {
250 if (ikot < IKOT_NONE || ikot >= IKOT_MAX_TYPE) {
251 panic("invalid kobject type %d", ikot);
252 }
253 return &ipc_kobject_ops_array[ikot];
254 }
255
256 static void
mig_init(void)257 mig_init(void)
258 {
259 unsigned int i, n = sizeof(mig_e) / sizeof(const struct mig_subsystem *);
260 int howmany;
261 mach_msg_id_t j, pos, nentry, range;
262
263 for (i = 0; i < n; i++) {
264 range = mig_e[i]->end - mig_e[i]->start;
265 if (!mig_e[i]->start || range < 0) {
266 panic("the msgh_ids in mig_e[] aren't valid!");
267 }
268
269 if (mig_e[i]->maxsize > KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE) {
270 panic("mig subsystem %d (%p) replies are too large (%d > %d)",
271 mig_e[i]->start, mig_e[i], mig_e[i]->maxsize,
272 KALLOC_SAFE_ALLOC_SIZE - MAX_TRAILER_SIZE);
273 }
274
275 for (j = 0; j < range; j++) {
276 if (mig_e[i]->routine[j].stub_routine) {
277 /* Only put real entries in the table */
278 nentry = j + mig_e[i]->start;
279 for (pos = MIG_HASH(nentry) % MAX_MIG_ENTRIES, howmany = 1;
280 mig_buckets[pos].num;
281 pos++, pos = pos % MAX_MIG_ENTRIES, howmany++) {
282 if (mig_buckets[pos].num == nentry) {
283 printf("message id = %d\n", nentry);
284 panic("multiple entries with the same msgh_id");
285 }
286 if (howmany == MAX_MIG_ENTRIES) {
287 panic("the mig dispatch table is too small");
288 }
289 }
290
291 mig_buckets[pos].num = nentry;
292 mig_buckets[pos].routine = mig_e[i]->routine[j].stub_routine;
293 if (mig_e[i]->routine[j].max_reply_msg) {
294 mig_buckets[pos].size = mig_e[i]->routine[j].max_reply_msg;
295 } else {
296 mig_buckets[pos].size = mig_e[i]->maxsize;
297 }
298
299 mig_buckets[pos].kobjidx = KOBJ_IDX_NOT_SET;
300
301 mig_table_max_displ = max(howmany, mig_table_max_displ);
302 mach_kobj_count++;
303 }
304 }
305 }
306
307 /* 77417305: pad to allow for MIG routines removals/cleanups */
308 mach_kobj_count += 32;
309
310 printf("mig_table_max_displ = %d mach_kobj_count = %d\n",
311 mig_table_max_displ, mach_kobj_count);
312 }
313 STARTUP(MACH_IPC, STARTUP_RANK_FIRST, mig_init);
314
315 /*
316 * Do a hash table lookup for given msgh_id. Return 0
317 * if not found.
318 */
319 static mig_hash_t *
find_mig_hash_entry(int msgh_id)320 find_mig_hash_entry(int msgh_id)
321 {
322 unsigned int i = (unsigned int)MIG_HASH(msgh_id);
323 int max_iter = mig_table_max_displ;
324 mig_hash_t *ptr;
325
326 do {
327 ptr = &mig_buckets[i++ % MAX_MIG_ENTRIES];
328 } while (msgh_id != ptr->num && ptr->num && --max_iter);
329
330 if (!ptr->routine || msgh_id != ptr->num) {
331 ptr = (mig_hash_t *)0;
332 }
333
334 return ptr;
335 }
336
337 static kern_return_t
ipc_kobject_reply_status(ipc_kmsg_t kmsg)338 ipc_kobject_reply_status(ipc_kmsg_t kmsg)
339 {
340 if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
341 return KERN_SUCCESS;
342 }
343
344 return ((mig_reply_error_t *)kmsg->ikm_header)->RetCode;
345 }
346
347 /*
348 * Routine: ipc_kobject_set_kobjidx
349 * Purpose:
350 * Set the index for the kobject filter
351 * mask for a given message ID.
352 */
353 kern_return_t
ipc_kobject_set_kobjidx(int msgh_id,int index)354 ipc_kobject_set_kobjidx(
355 int msgh_id,
356 int index)
357 {
358 mig_hash_t *ptr = find_mig_hash_entry(msgh_id);
359
360 if (ptr == (mig_hash_t *)0) {
361 return KERN_INVALID_ARGUMENT;
362 }
363
364 assert(index < mach_kobj_count);
365 ptr->kobjidx = index;
366
367 return KERN_SUCCESS;
368 }
369
370 static void
ipc_kobject_init_reply(ipc_kmsg_t reply,const ipc_kmsg_t request,kern_return_t kr)371 ipc_kobject_init_reply(
372 ipc_kmsg_t reply,
373 const ipc_kmsg_t request,
374 kern_return_t kr)
375 {
376 #define InP ((mach_msg_header_t *) request->ikm_header)
377 #define OutP ((mig_reply_error_t *) reply->ikm_header)
378
379 OutP->NDR = NDR_record;
380 OutP->Head.msgh_size = sizeof(mig_reply_error_t);
381
382 OutP->Head.msgh_bits =
383 MACH_MSGH_BITS_SET(MACH_MSGH_BITS_LOCAL(InP->msgh_bits), 0, 0, 0);
384 OutP->Head.msgh_remote_port = InP->msgh_local_port;
385 OutP->Head.msgh_local_port = MACH_PORT_NULL;
386 OutP->Head.msgh_voucher_port = MACH_PORT_NULL;
387 OutP->Head.msgh_id = InP->msgh_id + 100;
388
389 OutP->RetCode = kr;
390 #undef InP
391 #undef OutP
392 }
393
394 /*
395 * Routine: ipc_kobject_server_internal
396 * Purpose:
397 * Handle a message sent to the kernel.
398 * Generates a reply message.
399 * Version for Untyped IPC.
400 * Conditions:
401 * Nothing locked.
402 */
403 static kern_return_t
ipc_kobject_server_internal(ipc_port_t port,ipc_kmsg_t request,ipc_kmsg_t * replyp)404 ipc_kobject_server_internal(
405 ipc_port_t port,
406 ipc_kmsg_t request,
407 ipc_kmsg_t *replyp)
408 {
409 const int request_msgh_id = request->ikm_header->msgh_id;
410 ipc_kmsg_t reply = IKM_NULL;
411 mach_msg_size_t reply_size;
412 bool exec_token_changed = false;
413 mig_hash_t *ptr;
414
415 /* Find corresponding mig_hash entry, if any */
416 ptr = find_mig_hash_entry(request_msgh_id);
417
418 /* Get the reply_size. */
419 if (ptr == (mig_hash_t *)0) {
420 reply_size = sizeof(mig_reply_error_t);
421 } else {
422 reply_size = ptr->size;
423 }
424
425 /*
426 * MIG should really assure no data leakage -
427 * but until it does, pessimistically zero the
428 * whole reply buffer.
429 */
430 reply = ipc_kmsg_alloc(reply_size, 0,
431 IPC_KMSG_ALLOC_KERNEL | IPC_KMSG_ALLOC_ZERO | IPC_KMSG_ALLOC_NOFAIL);
432
433 ipc_kobject_init_reply(reply, request, KERN_SUCCESS);
434
435 /*
436 * Find the routine to call, and call it
437 * to perform the kernel function
438 */
439 if (ptr) {
440 thread_ro_t tro = current_thread_ro();
441 task_t curtask = tro->tro_task;
442 struct proc *curproc = tro->tro_proc;
443 task_t task = TASK_NULL;
444 uint32_t exec_token;
445
446 /*
447 * Check if the port is a task port, if its a task port then
448 * snapshot the task exec token before the mig routine call.
449 */
450 if (ip_kotype(port) == IKOT_TASK_CONTROL && port != curtask->itk_self) {
451 task = convert_port_to_task_with_exec_token(port, &exec_token);
452 }
453
454 #if CONFIG_MACF
455 int idx = ptr->kobjidx;
456 uint8_t *filter_mask = task_get_mach_kobj_filter_mask(curtask);
457
458 /* Check kobject mig filter mask, if exists. */
459 if (filter_mask != NULL &&
460 idx != KOBJ_IDX_NOT_SET &&
461 !bitstr_test(filter_mask, idx) &&
462 mac_task_kobj_msg_evaluate != NULL) {
463 /* Not in filter mask, evaluate policy. */
464 kern_return_t kr = mac_task_kobj_msg_evaluate(curproc,
465 request_msgh_id, idx);
466 if (kr != KERN_SUCCESS) {
467 ((mig_reply_error_t *) reply->ikm_header)->RetCode = kr;
468 goto skip_kobjcall;
469 }
470 }
471 #endif /* CONFIG_MACF */
472
473 (*ptr->routine)(request->ikm_header, reply->ikm_header);
474
475 #if CONFIG_MACF
476 skip_kobjcall:
477 #endif
478
479 /* Check if the exec token changed during the mig routine */
480 if (task != TASK_NULL) {
481 if (exec_token != task->exec_token) {
482 exec_token_changed = true;
483 }
484 task_deallocate(task);
485 }
486
487 counter_inc(&kernel_task->messages_received);
488 } else {
489 #if DEVELOPMENT || DEBUG
490 printf("ipc_kobject_server: bogus kernel message, id=%d\n",
491 request->ikm_header->msgh_id);
492 #endif /* DEVELOPMENT || DEBUG */
493 _MIG_MSGID_INVALID(request->ikm_header->msgh_id);
494
495 ((mig_reply_error_t *)reply->ikm_header)->RetCode = MIG_BAD_ID;
496 }
497
498 /* Fail the MIG call if the task exec token changed during the call */
499 if (exec_token_changed && ipc_kobject_reply_status(reply) == KERN_SUCCESS) {
500 /*
501 * Create a new reply msg with error and destroy the old reply msg.
502 */
503 ipc_kmsg_t new_reply = ipc_kmsg_alloc(reply_size, 0,
504 IPC_KMSG_ALLOC_KERNEL | IPC_KMSG_ALLOC_ZERO |
505 IPC_KMSG_ALLOC_NOFAIL);
506
507 /*
508 * Initialize the new reply message.
509 */
510 {
511 #define OutP_new ((mig_reply_error_t *) new_reply->ikm_header)
512 #define OutP_old ((mig_reply_error_t *) reply->ikm_header)
513
514 OutP_new->NDR = OutP_old->NDR;
515 OutP_new->Head.msgh_size = sizeof(mig_reply_error_t);
516 OutP_new->Head.msgh_bits = OutP_old->Head.msgh_bits & ~MACH_MSGH_BITS_COMPLEX;
517 OutP_new->Head.msgh_remote_port = OutP_old->Head.msgh_remote_port;
518 OutP_new->Head.msgh_local_port = MACH_PORT_NULL;
519 OutP_new->Head.msgh_voucher_port = MACH_PORT_NULL;
520 OutP_new->Head.msgh_id = OutP_old->Head.msgh_id;
521
522 /* Set the error as KERN_INVALID_TASK */
523 OutP_new->RetCode = KERN_INVALID_TASK;
524
525 #undef OutP_new
526 #undef OutP_old
527 }
528
529 /*
530 * Destroy everything in reply except the reply port right,
531 * which is needed in the new reply message.
532 */
533 reply->ikm_header->msgh_remote_port = MACH_PORT_NULL;
534 ipc_kmsg_destroy(reply);
535 reply = new_reply;
536 } else if (ipc_kobject_reply_status(reply) == MIG_NO_REPLY) {
537 /*
538 * The server function will send a reply message
539 * using the reply port right, which it has saved.
540 */
541 ipc_kmsg_free(reply);
542 reply = IKM_NULL;
543 }
544
545 *replyp = reply;
546 return KERN_SUCCESS;
547 }
548
549
550 /*
551 * Routine: ipc_kobject_server
552 * Purpose:
553 * Handle a message sent to the kernel.
554 * Generates a reply message.
555 * Version for Untyped IPC.
556 *
557 * Ownership of the incoming rights (from the request)
558 * are transferred on success (wether a reply is made or not).
559 *
560 * Conditions:
561 * Nothing locked.
562 */
563 ipc_kmsg_t
ipc_kobject_server(ipc_port_t port,ipc_kmsg_t request,mach_msg_option_t option __unused)564 ipc_kobject_server(
565 ipc_port_t port,
566 ipc_kmsg_t request,
567 mach_msg_option_t option __unused)
568 {
569 #if DEVELOPMENT || DEBUG
570 const int request_msgh_id = request->ikm_header->msgh_id;
571 #endif
572 ipc_port_t request_voucher_port;
573 ipc_kmsg_t reply = IKM_NULL;
574 kern_return_t kr;
575
576 ipc_kmsg_trace_send(request, option);
577
578 if (ip_kotype(port) == IKOT_UEXT_OBJECT) {
579 kr = uext_server(port, request, &reply);
580 } else {
581 kr = ipc_kobject_server_internal(port, request, &reply);
582 }
583
584 if (kr != KERN_SUCCESS) {
585 assert(kr != MACH_SEND_TIMED_OUT &&
586 kr != MACH_SEND_INTERRUPTED &&
587 kr != MACH_SEND_INVALID_DEST);
588 assert(reply == IKM_NULL);
589
590 /* convert the server error into a MIG error */
591 reply = ipc_kmsg_alloc(sizeof(mig_reply_error_t), 0,
592 IPC_KMSG_ALLOC_KERNEL | IPC_KMSG_ALLOC_ZERO);
593 ipc_kobject_init_reply(reply, request, kr);
594 }
595
596 counter_inc(&kernel_task->messages_sent);
597 /*
598 * Destroy destination. The following code differs from
599 * ipc_object_destroy in that we release the send-once
600 * right instead of generating a send-once notification
601 * (which would bring us here again, creating a loop).
602 * It also differs in that we only expect send or
603 * send-once rights, never receive rights.
604 *
605 * We set msgh_remote_port to IP_NULL so that the kmsg
606 * destroy routines don't try to destroy the port twice.
607 */
608 switch (MACH_MSGH_BITS_REMOTE(request->ikm_header->msgh_bits)) {
609 case MACH_MSG_TYPE_PORT_SEND:
610 ipc_port_release_send(request->ikm_header->msgh_remote_port);
611 request->ikm_header->msgh_remote_port = IP_NULL;
612 break;
613
614 case MACH_MSG_TYPE_PORT_SEND_ONCE:
615 ipc_port_release_sonce(request->ikm_header->msgh_remote_port);
616 request->ikm_header->msgh_remote_port = IP_NULL;
617 break;
618
619 default:
620 panic("ipc_kobject_server: strange destination rights");
621 }
622
623 /*
624 * Destroy voucher. The kernel MIG servers never take ownership
625 * of vouchers sent in messages. Swallow any such rights here.
626 */
627 request_voucher_port = ipc_kmsg_get_voucher_port(request);
628 if (IP_VALID(request_voucher_port)) {
629 assert(MACH_MSG_TYPE_PORT_SEND ==
630 MACH_MSGH_BITS_VOUCHER(request->ikm_header->msgh_bits));
631 ipc_port_release_send(request_voucher_port);
632 ipc_kmsg_clear_voucher_port(request);
633 }
634
635 if (reply == IKM_NULL ||
636 ipc_kobject_reply_status(reply) == KERN_SUCCESS) {
637 /*
638 * The server function is responsible for the contents
639 * of the message. The reply port right is moved
640 * to the reply message, and we have deallocated
641 * the destination port right, so we just need
642 * to free the kmsg.
643 */
644 ipc_kmsg_free(request);
645 } else {
646 /*
647 * The message contents of the request are intact.
648 * Destroy everthing except the reply port right,
649 * which is needed in the reply message.
650 */
651 request->ikm_header->msgh_local_port = MACH_PORT_NULL;
652 ipc_kmsg_destroy(request);
653 }
654
655 if (reply != IKM_NULL) {
656 ipc_port_t reply_port = reply->ikm_header->msgh_remote_port;
657
658 if (!IP_VALID(reply_port)) {
659 /*
660 * Can't queue the reply message if the destination
661 * (the reply port) isn't valid.
662 */
663
664 ipc_kmsg_destroy(reply);
665 reply = IKM_NULL;
666 } else if (ip_in_space_noauth(reply_port, ipc_space_kernel)) {
667 /* do not lock reply port, use raw pointer comparison */
668
669 /*
670 * Don't send replies to kobject kernel ports.
671 */
672 #if DEVELOPMENT || DEBUG
673 printf("%s: refusing to send reply to kobject %d port (id:%d)\n",
674 __func__, ip_kotype(reply_port), request_msgh_id);
675 #endif /* DEVELOPMENT || DEBUG */
676 ipc_kmsg_destroy(reply);
677 reply = IKM_NULL;
678 }
679 }
680
681 return reply;
682 }
683
684 static __header_always_inline void
ipc_kobject_set_raw(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)685 ipc_kobject_set_raw(
686 ipc_port_t port,
687 ipc_kobject_t kobject,
688 ipc_kobject_type_t type)
689 {
690 uintptr_t *store = &port->ip_kobject;
691
692 #if __has_feature(ptrauth_calls)
693 if (kobject) {
694 type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
695 kobject = ptrauth_sign_unauthenticated(kobject,
696 ptrauth_key_process_independent_data,
697 ptrauth_blend_discriminator(store, type));
698 }
699 #else
700 (void)type;
701 #endif // __has_feature(ptrauth_calls)
702
703 *store = (uintptr_t)kobject;
704 }
705
706 static inline void
ipc_kobject_set_internal(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)707 ipc_kobject_set_internal(
708 ipc_port_t port,
709 ipc_kobject_t kobject,
710 ipc_kobject_type_t type)
711 {
712 assert(type != IKOT_NONE);
713 io_bits_or(ip_to_object(port), type | IO_BITS_KOBJECT);
714 ipc_kobject_set_raw(port, kobject, type);
715 }
716
717 /*
718 * Routine: ipc_kobject_get_raw
719 * Purpose:
720 * Returns the kobject pointer of a specified port.
721 *
722 * This returns the current value of the kobject pointer,
723 * without any validation (the caller is expected to do
724 * the validation it needs).
725 *
726 * Conditions:
727 * The port is a kobject of the proper type.
728 */
729 __header_always_inline ipc_kobject_t
ipc_kobject_get_raw(ipc_port_t port,ipc_kobject_type_t type)730 ipc_kobject_get_raw(
731 ipc_port_t port,
732 ipc_kobject_type_t type)
733 {
734 uintptr_t *store = &port->ip_kobject;
735 ipc_kobject_t kobject = (ipc_kobject_t)*store;
736
737 #if __has_feature(ptrauth_calls)
738 if (kobject) {
739 type ^= OS_PTRAUTH_DISCRIMINATOR("ipc_port.ip_kobject");
740 kobject = ptrauth_auth_data(kobject,
741 ptrauth_key_process_independent_data,
742 ptrauth_blend_discriminator(store, type));
743 }
744 #else
745 (void)type;
746 #endif // __has_feature(ptrauth_calls)
747
748 return kobject;
749 }
750
751 /*
752 * Routine: ipc_kobject_get_locked
753 * Purpose:
754 * Returns the kobject pointer of a specified port,
755 * for an expected type.
756 *
757 * Returns IKO_NULL if the port isn't active.
758 *
759 * This function may be used when:
760 * - the port lock is held
761 * - the kobject association stays while there
762 * are any outstanding rights.
763 *
764 * Conditions:
765 * The port is a kobject of the proper type.
766 */
767 ipc_kobject_t
ipc_kobject_get_locked(ipc_port_t port,ipc_kobject_type_t type)768 ipc_kobject_get_locked(
769 ipc_port_t port,
770 ipc_kobject_type_t type)
771 {
772 ipc_kobject_t kobject = IKO_NULL;
773
774 if (ip_active(port) && type == ip_kotype(port)) {
775 kobject = ipc_kobject_get_raw(port, type);
776 }
777
778 return kobject;
779 }
780
781 /*
782 * Routine: ipc_kobject_get_stable
783 * Purpose:
784 * Returns the kobject pointer of a specified port,
785 * for an expected type, for types where the port/kobject
786 * association is permanent.
787 *
788 * Returns IKO_NULL if the port isn't active.
789 *
790 * Conditions:
791 * The port is a kobject of the proper type.
792 */
793 ipc_kobject_t
ipc_kobject_get_stable(ipc_port_t port,ipc_kobject_type_t type)794 ipc_kobject_get_stable(
795 ipc_port_t port,
796 ipc_kobject_type_t type)
797 {
798 assert(ipc_kobject_ops_get(type)->iko_op_stable);
799 return ipc_kobject_get_locked(port, type);
800 }
801
802 /*
803 * Routine: ipc_kobject_init_port
804 * Purpose:
805 * Initialize a kobject port with the given types and options.
806 *
807 * This function never fails.
808 */
809 static inline void
ipc_kobject_init_port(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t options)810 ipc_kobject_init_port(
811 ipc_port_t port,
812 ipc_kobject_t kobject,
813 ipc_kobject_type_t type,
814 ipc_kobject_alloc_options_t options)
815 {
816 ipc_kobject_set_internal(port, kobject, type);
817
818 if (options & IPC_KOBJECT_ALLOC_MAKE_SEND) {
819 ipc_port_make_send_locked(port);
820 }
821 if (options & IPC_KOBJECT_ALLOC_NSREQUEST) {
822 port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
823 ip_reference(port);
824 }
825 if (options & IPC_KOBJECT_ALLOC_NO_GRANT) {
826 port->ip_no_grant = 1;
827 }
828 if (options & IPC_KOBJECT_ALLOC_IMMOVABLE_SEND) {
829 port->ip_immovable_send = 1;
830 }
831 if (options & IPC_KOBJECT_ALLOC_PINNED) {
832 port->ip_pinned = 1;
833 }
834 }
835
836 /*
837 * Routine: ipc_kobject_alloc_port
838 * Purpose:
839 * Allocate a kobject port in the kernel space of the specified type.
840 *
841 * This function never fails.
842 *
843 * Conditions:
844 * No locks held (memory is allocated)
845 */
846 ipc_port_t
ipc_kobject_alloc_port(ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t options)847 ipc_kobject_alloc_port(
848 ipc_kobject_t kobject,
849 ipc_kobject_type_t type,
850 ipc_kobject_alloc_options_t options)
851 {
852 ipc_port_t port;
853
854 port = ipc_port_alloc_special(ipc_space_kernel, IPC_PORT_INIT_NONE);
855 if (port == IP_NULL) {
856 panic("ipc_kobject_alloc_port(): failed to allocate port");
857 }
858
859 ipc_kobject_init_port(port, kobject, type, options);
860 return port;
861 }
862
863 /*
864 * Routine: ipc_kobject_alloc_labeled_port
865 * Purpose:
866 * Allocate a kobject port and associated mandatory access label
867 * in the kernel space of the specified type.
868 *
869 * This function never fails.
870 *
871 * Conditions:
872 * No locks held (memory is allocated)
873 */
874
875 ipc_port_t
ipc_kobject_alloc_labeled_port(ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_label_t label,ipc_kobject_alloc_options_t options)876 ipc_kobject_alloc_labeled_port(
877 ipc_kobject_t kobject,
878 ipc_kobject_type_t type,
879 ipc_label_t label,
880 ipc_kobject_alloc_options_t options)
881 {
882 ipc_port_t port;
883
884 port = ipc_kobject_alloc_port(kobject, type, options);
885
886 ipc_port_set_label(port, label);
887
888 return port;
889 }
890
891 static void
ipc_kobject_subst_once_no_senders(ipc_port_t port,mach_port_mscount_t mscount)892 ipc_kobject_subst_once_no_senders(
893 ipc_port_t port,
894 mach_port_mscount_t mscount)
895 {
896 ipc_port_t ko_port;
897
898 ko_port = ipc_kobject_dealloc_port(port, mscount, IKOT_PORT_SUBST_ONCE);
899
900 if (ko_port) {
901 /*
902 * Clean up the right if the wrapper wasn't hollowed out
903 * by ipc_kobject_alloc_subst_once().
904 */
905 ipc_port_release_send(ko_port);
906 }
907 }
908
909 /*
910 * Routine: ipc_kobject_alloc_subst_once
911 * Purpose:
912 * Make a port that will be substituted by the kolabel
913 * rules once, preventing the next substitution (of its target)
914 * to happen if any.
915 *
916 * Returns:
917 * A port with a send right, that will substitute to its "kobject".
918 *
919 * Conditions:
920 * No locks held (memory is allocated).
921 *
922 * `target` holds a send-right donated to this function,
923 * consumed in ipc_kobject_subst_once_no_senders().
924 */
925 ipc_port_t
ipc_kobject_alloc_subst_once(ipc_port_t target)926 ipc_kobject_alloc_subst_once(
927 ipc_port_t target)
928 {
929 if (!IP_VALID(target)) {
930 return target;
931 }
932 return ipc_kobject_alloc_labeled_port(target,
933 IKOT_PORT_SUBST_ONCE, IPC_LABEL_SUBST_ONCE,
934 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
935 }
936
937 /*
938 * Routine: ipc_kobject_make_send_lazy_alloc_port
939 * Purpose:
940 * Make a send once for a kobject port.
941 *
942 * A location owning this port is passed in port_store.
943 * If no port exists, a port is made lazily.
944 *
945 * A send right is made for the port, and if this is the first one
946 * (possibly not for the first time), then the no-more-senders
947 * notification is rearmed.
948 *
949 * When a notification is armed, the kobject must donate
950 * one of its references to the port. It is expected
951 * the no-more-senders notification will consume this reference.
952 *
953 * Returns:
954 * TRUE if a notification was armed
955 * FALSE else
956 *
957 * Conditions:
958 * Nothing is locked, memory can be allocated.
959 * The caller must be able to donate a kobject reference to the port.
960 */
961 boolean_t
ipc_kobject_make_send_lazy_alloc_port(ipc_port_t * port_store,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_kobject_alloc_options_t alloc_opts,uint64_t __ptrauth_only ptrauth_discriminator)962 ipc_kobject_make_send_lazy_alloc_port(
963 ipc_port_t *port_store,
964 ipc_kobject_t kobject,
965 ipc_kobject_type_t type,
966 ipc_kobject_alloc_options_t alloc_opts,
967 uint64_t __ptrauth_only ptrauth_discriminator)
968 {
969 ipc_port_t port, previous, __ptrauth_only port_addr;
970 kern_return_t kr;
971
972 port = os_atomic_load(port_store, dependency);
973
974 #if __has_feature(ptrauth_calls)
975 /* If we're on a ptrauth system and this port is signed, authenticate and strip the pointer */
976 if ((alloc_opts & IPC_KOBJECT_PTRAUTH_STORE) && IP_VALID(port)) {
977 port = ptrauth_auth_data(port,
978 ptrauth_key_process_independent_data,
979 ptrauth_blend_discriminator(port_store, ptrauth_discriminator));
980 }
981 #endif // __has_feature(ptrauth_calls)
982
983 if (!IP_VALID(port)) {
984 port = ipc_kobject_alloc_port(kobject, type,
985 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST | alloc_opts);
986
987 #if __has_feature(ptrauth_calls)
988 if (alloc_opts & IPC_KOBJECT_PTRAUTH_STORE) {
989 port_addr = ptrauth_sign_unauthenticated(port,
990 ptrauth_key_process_independent_data,
991 ptrauth_blend_discriminator(port_store, ptrauth_discriminator));
992 } else {
993 port_addr = port;
994 }
995 #else
996 port_addr = port;
997 #endif // __has_feature(ptrauth_calls)
998
999 if (os_atomic_cmpxchgv(port_store, IP_NULL, port_addr, &previous, release)) {
1000 return TRUE;
1001 }
1002
1003 /*
1004 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
1005 * ipc_kobject_dealloc_port will handle
1006 * IPC_KOBJECT_ALLOC_NSREQUEST.
1007 */
1008 port->ip_mscount = 0;
1009 port->ip_srights = 0;
1010 ip_release_live(port);
1011 ipc_kobject_dealloc_port(port, 0, type);
1012
1013 port = previous;
1014 }
1015
1016 kr = ipc_kobject_make_send_nsrequest(port);
1017 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1018
1019 return kr == KERN_SUCCESS;
1020 }
1021
1022 /*
1023 * Routine: ipc_kobject_make_send_lazy_alloc_labeled_port
1024 * Purpose:
1025 * Make a send once for a kobject port.
1026 *
1027 * A location owning this port is passed in port_store.
1028 * If no port exists, a port is made lazily.
1029 *
1030 * A send right is made for the port, and if this is the first one
1031 * (possibly not for the first time), then the no-more-senders
1032 * notification is rearmed.
1033 *
1034 * When a notification is armed, the kobject must donate
1035 * one of its references to the port. It is expected
1036 * the no-more-senders notification will consume this reference.
1037 *
1038 * Returns:
1039 * TRUE if a notification was armed
1040 * FALSE else
1041 *
1042 * Conditions:
1043 * Nothing is locked, memory can be allocated.
1044 * The caller must be able to donate a kobject reference to the port.
1045 */
1046 boolean_t
ipc_kobject_make_send_lazy_alloc_labeled_port(ipc_port_t * port_store,ipc_kobject_t kobject,ipc_kobject_type_t type,ipc_label_t label)1047 ipc_kobject_make_send_lazy_alloc_labeled_port(
1048 ipc_port_t *port_store,
1049 ipc_kobject_t kobject,
1050 ipc_kobject_type_t type,
1051 ipc_label_t label)
1052 {
1053 ipc_port_t port, previous;
1054 kern_return_t kr;
1055
1056 port = os_atomic_load(port_store, dependency);
1057
1058 if (!IP_VALID(port)) {
1059 port = ipc_kobject_alloc_labeled_port(kobject, type, label,
1060 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
1061 if (os_atomic_cmpxchgv(port_store, IP_NULL, port, &previous, release)) {
1062 return TRUE;
1063 }
1064
1065 /*
1066 * undo IPC_KOBJECT_ALLOC_MAKE_SEND,
1067 * ipc_kobject_dealloc_port will handle
1068 * IPC_KOBJECT_ALLOC_NSREQUEST.
1069 */
1070 port->ip_mscount = 0;
1071 port->ip_srights = 0;
1072 ip_release_live(port);
1073 ipc_kobject_dealloc_port(port, 0, type);
1074
1075 port = previous;
1076 assert(ip_is_kolabeled(port));
1077 }
1078
1079 kr = ipc_kobject_make_send_nsrequest(port);
1080 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
1081
1082 return kr == KERN_SUCCESS;
1083 }
1084
1085 /*
1086 * Routine: ipc_kobject_nsrequest_locked
1087 * Purpose:
1088 * Arm the no-senders notification for the given kobject
1089 * if it doesn't have one armed yet.
1090 *
1091 * Conditions:
1092 * Port is locked and active.
1093 *
1094 * Returns:
1095 * KERN_SUCCESS: the notification was armed
1096 * KERN_ALREADY_WAITING: the notification was already armed
1097 * KERN_FAILURE: the notification would fire immediately
1098 */
1099 static inline kern_return_t
ipc_kobject_nsrequest_locked(ipc_port_t port,mach_port_mscount_t sync)1100 ipc_kobject_nsrequest_locked(
1101 ipc_port_t port,
1102 mach_port_mscount_t sync)
1103 {
1104 if (port->ip_nsrequest == IP_KOBJECT_NSREQUEST_ARMED) {
1105 return KERN_ALREADY_WAITING;
1106 }
1107
1108 if (port->ip_srights == 0 && sync <= port->ip_mscount) {
1109 return KERN_FAILURE;
1110 }
1111
1112 port->ip_nsrequest = IP_KOBJECT_NSREQUEST_ARMED;
1113 ip_reference(port);
1114 return KERN_SUCCESS;
1115 }
1116
1117
1118 /*
1119 * Routine: ipc_kobject_nsrequest
1120 * Purpose:
1121 * Arm the no-senders notification for the given kobject
1122 * if it doesn't have one armed yet.
1123 *
1124 * Returns:
1125 * KERN_SUCCESS: the notification was armed
1126 * KERN_ALREADY_WAITING: the notification was already armed
1127 * KERN_FAILURE: the notification would fire immediately
1128 * KERN_INVALID_RIGHT: the port is dead
1129 */
1130 kern_return_t
ipc_kobject_nsrequest(ipc_port_t port,mach_port_mscount_t sync,mach_port_mscount_t * mscount)1131 ipc_kobject_nsrequest(
1132 ipc_port_t port,
1133 mach_port_mscount_t sync,
1134 mach_port_mscount_t *mscount)
1135 {
1136 kern_return_t kr = KERN_INVALID_RIGHT;
1137
1138 if (IP_VALID(port)) {
1139 ip_mq_lock(port);
1140
1141 if (mscount) {
1142 *mscount = port->ip_mscount;
1143 }
1144 if (ip_active(port)) {
1145 kr = ipc_kobject_nsrequest_locked(port, sync);
1146 }
1147
1148 ip_mq_unlock(port);
1149 } else if (mscount) {
1150 *mscount = 0;
1151 }
1152
1153 return kr;
1154 }
1155
1156
1157 /*
1158 * Routine: ipc_kobject_make_send_nsrequest
1159 * Purpose:
1160 * Make a send right for a kobject port.
1161 *
1162 * Then the no-more-senders notification is armed
1163 * if it wasn't already.
1164 *
1165 * Conditions:
1166 * Nothing is locked.
1167 *
1168 * Returns:
1169 * KERN_SUCCESS: the notification was armed
1170 * KERN_ALREADY_WAITING: the notification was already armed
1171 * KERN_INVALID_RIGHT: the port is dead
1172 */
1173 kern_return_t
ipc_kobject_make_send_nsrequest(ipc_port_t port)1174 ipc_kobject_make_send_nsrequest(
1175 ipc_port_t port)
1176 {
1177 kern_return_t kr = KERN_INVALID_RIGHT;
1178
1179 if (IP_VALID(port)) {
1180 ip_mq_lock(port);
1181 if (ip_active(port)) {
1182 ipc_port_make_send_locked(port);
1183 kr = ipc_kobject_nsrequest_locked(port, 0);
1184 assert(kr != KERN_FAILURE);
1185 }
1186 ip_mq_unlock(port);
1187 }
1188
1189 return kr;
1190 }
1191
1192 static inline ipc_kobject_t
ipc_kobject_disable_internal(ipc_port_t port,ipc_kobject_type_t type)1193 ipc_kobject_disable_internal(
1194 ipc_port_t port,
1195 ipc_kobject_type_t type)
1196 {
1197 ipc_kobject_t kobject = ipc_kobject_get_raw(port, type);
1198
1199 port->ip_kobject = 0;
1200 if (ip_is_kolabeled(port)) {
1201 port->ip_kolabel->ikol_alt_port = IP_NULL;
1202 }
1203
1204 return kobject;
1205 }
1206
1207 /*
1208 * Routine: ipc_kobject_dealloc_port_and_unlock
1209 * Purpose:
1210 * Destroys a port allocated with any of the ipc_kobject_alloc*
1211 * functions.
1212 *
1213 * This will atomically:
1214 * - make the port inactive,
1215 * - optionally check the make send count
1216 * - disable (nil-out) the kobject pointer for kobjects without
1217 * a destroy callback.
1218 *
1219 * The port will retain its kobject-ness and kobject type.
1220 *
1221 *
1222 * Returns:
1223 * The kobject pointer that was set prior to this call
1224 * (possibly NULL if the kobject was already disabled).
1225 *
1226 * Conditions:
1227 * The port is active and locked.
1228 * On return the port is inactive and unlocked.
1229 */
1230 __abortlike
1231 static void
__ipc_kobject_dealloc_bad_type_panic(ipc_port_t port,ipc_kobject_type_t type)1232 __ipc_kobject_dealloc_bad_type_panic(ipc_port_t port, ipc_kobject_type_t type)
1233 {
1234 panic("port %p of type %d, expecting %d", port, ip_kotype(port), type);
1235 }
1236
1237 __abortlike
1238 static void
__ipc_kobject_dealloc_bad_mscount_panic(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1239 __ipc_kobject_dealloc_bad_mscount_panic(
1240 ipc_port_t port,
1241 mach_port_mscount_t mscount,
1242 ipc_kobject_type_t type)
1243 {
1244 panic("unexpected make-send count: %p[%d], %d, %d",
1245 port, type, port->ip_mscount, mscount);
1246 }
1247
1248 __abortlike
1249 static void
__ipc_kobject_dealloc_bad_srights_panic(ipc_port_t port,ipc_kobject_type_t type)1250 __ipc_kobject_dealloc_bad_srights_panic(
1251 ipc_port_t port,
1252 ipc_kobject_type_t type)
1253 {
1254 panic("unexpected send right count: %p[%d], %d",
1255 port, type, port->ip_srights);
1256 }
1257
1258 ipc_kobject_t
ipc_kobject_dealloc_port_and_unlock(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1259 ipc_kobject_dealloc_port_and_unlock(
1260 ipc_port_t port,
1261 mach_port_mscount_t mscount,
1262 ipc_kobject_type_t type)
1263 {
1264 ipc_kobject_t kobject = IKO_NULL;
1265 ipc_kobject_ops_t ops = ipc_kobject_ops_get(type);
1266
1267 require_ip_active(port);
1268
1269 if (ip_kotype(port) != type) {
1270 __ipc_kobject_dealloc_bad_type_panic(port, type);
1271 }
1272
1273 if (mscount && port->ip_mscount != mscount) {
1274 __ipc_kobject_dealloc_bad_mscount_panic(port, mscount, type);
1275 }
1276 if ((mscount || ops->iko_op_stable) && port->ip_srights != 0) {
1277 __ipc_kobject_dealloc_bad_srights_panic(port, type);
1278 }
1279
1280 if (!ops->iko_op_destroy) {
1281 kobject = ipc_kobject_disable_internal(port, type);
1282 }
1283
1284 ipc_port_dealloc_special_and_unlock(port, ipc_space_kernel);
1285
1286 return kobject;
1287 }
1288
1289 /*
1290 * Routine: ipc_kobject_dealloc_port
1291 * Purpose:
1292 * Destroys a port allocated with any of the ipc_kobject_alloc*
1293 * functions.
1294 *
1295 * This will atomically:
1296 * - make the port inactive,
1297 * - optionally check the make send count
1298 * - disable (nil-out) the kobject pointer for kobjects without
1299 * a destroy callback.
1300 *
1301 * The port will retain its kobject-ness and kobject type.
1302 *
1303 *
1304 * Returns:
1305 * The kobject pointer that was set prior to this call
1306 * (possibly NULL if the kobject was already disabled).
1307 *
1308 * Conditions:
1309 * Nothing is locked.
1310 * The port is active.
1311 * On return the port is inactive.
1312 */
1313 ipc_kobject_t
ipc_kobject_dealloc_port(ipc_port_t port,mach_port_mscount_t mscount,ipc_kobject_type_t type)1314 ipc_kobject_dealloc_port(
1315 ipc_port_t port,
1316 mach_port_mscount_t mscount,
1317 ipc_kobject_type_t type)
1318 {
1319 ip_mq_lock(port);
1320 return ipc_kobject_dealloc_port_and_unlock(port, mscount, type);
1321 }
1322
1323 /*
1324 * Routine: ipc_kobject_enable
1325 * Purpose:
1326 * Make a port represent a kernel object of the given type.
1327 * The caller is responsible for handling refs for the
1328 * kernel object, if necessary.
1329 * Conditions:
1330 * Nothing locked.
1331 * The port must be active.
1332 */
1333 void
ipc_kobject_enable(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)1334 ipc_kobject_enable(
1335 ipc_port_t port,
1336 ipc_kobject_t kobject,
1337 ipc_kobject_type_t type)
1338 {
1339 assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1340
1341 ip_mq_lock(port);
1342 require_ip_active(port);
1343
1344 if (type != ip_kotype(port)) {
1345 panic("%s: unexpected kotype of port %p: want %d, got %d",
1346 __func__, port, type, ip_kotype(port));
1347 }
1348
1349 ipc_kobject_set_raw(port, kobject, type);
1350
1351 ip_mq_unlock(port);
1352 }
1353
1354 /*
1355 * Routine: ipc_kobject_disable_locked
1356 * Purpose:
1357 * Clear the kobject pointer for a port.
1358 * Conditions:
1359 * The port is locked.
1360 * Returns the current kobject pointer.
1361 */
1362 ipc_kobject_t
ipc_kobject_disable_locked(ipc_port_t port,ipc_kobject_type_t type)1363 ipc_kobject_disable_locked(
1364 ipc_port_t port,
1365 ipc_kobject_type_t type)
1366 {
1367 if (ip_active(port)) {
1368 assert(!ipc_kobject_ops_get(type)->iko_op_stable);
1369 }
1370
1371 if (ip_kotype(port) != type) {
1372 panic("port %p of type %d, expecting %d",
1373 port, ip_kotype(port), type);
1374 }
1375
1376 return ipc_kobject_disable_internal(port, type);
1377 }
1378
1379 /*
1380 * Routine: ipc_kobject_disable
1381 * Purpose:
1382 * Clear the kobject pointer for a port.
1383 * Conditions:
1384 * Nothing locked.
1385 * Returns the current kobject pointer.
1386 */
1387 ipc_kobject_t
ipc_kobject_disable(ipc_port_t port,ipc_kobject_type_t type)1388 ipc_kobject_disable(
1389 ipc_port_t port,
1390 ipc_kobject_type_t type)
1391 {
1392 ipc_kobject_t kobject;
1393
1394 ip_mq_lock(port);
1395 kobject = ipc_kobject_disable_locked(port, type);
1396 ip_mq_unlock(port);
1397
1398 return kobject;
1399 }
1400
1401 static inline bool
ipc_kobject_may_upgrade(ipc_port_t port)1402 ipc_kobject_may_upgrade(ipc_port_t port)
1403 {
1404 if (!ip_active(port) || ip_kotype(port) != IKOT_NONE) {
1405 /* needs to be active and have no tag */
1406 return false;
1407 }
1408
1409 if (port->ip_tempowner || port->ip_specialreply) {
1410 /* union overlays with ip_kobject */
1411 return false;
1412 }
1413
1414 if (port->ip_has_watchport || ipc_port_has_prdrequest(port)) {
1415 /* outstanding watchport or port-destroyed is also disallowed */
1416 return false;
1417 }
1418
1419 return true;
1420 }
1421
1422 /*
1423 * Routine: ipc_kobject_upgrade_locked
1424 * Purpose:
1425 * Upgrades a port to kobject status
1426 * Only kobjects with iko_op_allow_upgrade can do this.
1427 * Conditions:
1428 * Port is locked
1429 */
1430 void
ipc_kobject_upgrade_locked(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)1431 ipc_kobject_upgrade_locked(
1432 ipc_port_t port,
1433 ipc_kobject_t kobject,
1434 ipc_kobject_type_t type)
1435 {
1436 assert(ipc_kobject_may_upgrade(port));
1437 assert(ipc_kobject_ops_get(type)->iko_op_allow_upgrade);
1438 ipc_kobject_set_internal(port, kobject, type);
1439 }
1440
1441 /*
1442 * Routine: ipc_kobject_upgrade
1443 * Purpose:
1444 * Upgrades a port to kobject status
1445 * Only kobjects with iko_op_allow_upgrade can do this.
1446 * Returns:
1447 * KERN_SUCCESS: the upgrade was possible
1448 * KERN_INVALID_CAPABILITY: the upgrade wasn't possible
1449 * Conditions:
1450 * Nothing is locked
1451 */
1452 kern_return_t
ipc_kobject_upgrade(ipc_port_t port,ipc_kobject_t kobject,ipc_kobject_type_t type)1453 ipc_kobject_upgrade(
1454 ipc_port_t port,
1455 ipc_kobject_t kobject,
1456 ipc_kobject_type_t type)
1457 {
1458 kern_return_t kr = KERN_INVALID_CAPABILITY;
1459
1460 assert(ipc_kobject_ops_get(type)->iko_op_allow_upgrade);
1461
1462 ip_mq_lock(port);
1463
1464 if (ipc_kobject_may_upgrade(port)) {
1465 ipc_kobject_set_internal(port, kobject, type);
1466 kr = KERN_SUCCESS;
1467 }
1468
1469 ip_mq_unlock(port);
1470
1471 return kr;
1472 }
1473
1474 /*
1475 * Routine: ipc_kobject_downgrade_host_notify
1476 * Purpose:
1477 * Downgrade a kobject port back to receive right status.
1478 * Only IKOT_HOST_NOTIFY should use this facility.
1479 *
1480 * /!\ WARNING /!\
1481 *
1482 * This feature is breaking the kobject abstraction
1483 * and is grandfathered in. Accessing io_kotype() without a lock
1484 * only works because this is the only such kobject doing
1485 * this disgusting dance.
1486 *
1487 * Returns:
1488 * The kobject pointer previously set on the object.
1489 * Conditions:
1490 * Nothing is locked
1491 * The port doesn't need to be active
1492 */
1493 ipc_kobject_t
ipc_kobject_downgrade_host_notify(ipc_port_t port)1494 ipc_kobject_downgrade_host_notify(
1495 ipc_port_t port)
1496 {
1497 ipc_kobject_t kobject = IKO_NULL;
1498
1499 ip_mq_lock(port);
1500
1501 if (ip_kotype(port) == IKOT_HOST_NOTIFY) {
1502 kobject = ipc_kobject_disable_locked(port, IKOT_HOST_NOTIFY);
1503 io_bits_andnot(ip_to_object(port), IO_BITS_KOTYPE);
1504 }
1505
1506 ip_mq_unlock(port);
1507
1508 return kobject;
1509 }
1510
1511 /*
1512 * Routine: ipc_kobject_notify_no_senders
1513 * Purpose:
1514 * Handles a no-senders notification
1515 * sent to a kobject.
1516 *
1517 * A port reference is consumed.
1518 *
1519 * Conditions:
1520 * Nothing locked.
1521 */
1522 void
ipc_kobject_notify_no_senders(ipc_port_t port,mach_port_mscount_t mscount)1523 ipc_kobject_notify_no_senders(
1524 ipc_port_t port,
1525 mach_port_mscount_t mscount)
1526 {
1527 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1528
1529 assert(ops->iko_op_no_senders);
1530 ops->iko_op_no_senders(port, mscount);
1531
1532 /* consume the ref ipc_notify_no_senders_prepare left */
1533 ip_release(port);
1534 }
1535
1536 /*
1537 * Routine: ipc_kobject_notify_no_senders
1538 * Purpose:
1539 * Handles a send once notifications
1540 * sent to a kobject.
1541 *
1542 * A send-once port reference is consumed.
1543 *
1544 * Conditions:
1545 * Port is locked.
1546 */
1547 void
ipc_kobject_notify_send_once_and_unlock(ipc_port_t port)1548 ipc_kobject_notify_send_once_and_unlock(
1549 ipc_port_t port)
1550 {
1551 /*
1552 * drop the send once right while we hold the port lock.
1553 * we will keep a port reference while we run the possible
1554 * callouts to kobjects.
1555 *
1556 * This a simplified version of ipc_port_release_sonce()
1557 * since kobjects can't be special reply ports.
1558 */
1559 assert(!port->ip_specialreply);
1560
1561 if (port->ip_sorights == 0) {
1562 panic("Over-release of port %p send-once right!", port);
1563 }
1564
1565 port->ip_sorights--;
1566 ip_mq_unlock(port);
1567
1568 /*
1569 * because there's very few consumers,
1570 * the code here isn't generic as it's really not worth it.
1571 */
1572 switch (ip_kotype(port)) {
1573 case IKOT_TASK_RESUME:
1574 task_suspension_send_once(port);
1575 break;
1576 default:
1577 break;
1578 }
1579
1580 ip_release(port);
1581 }
1582
1583
1584 /*
1585 * Routine: ipc_kobject_destroy
1586 * Purpose:
1587 * Release any kernel object resources associated
1588 * with the port, which is being destroyed.
1589 *
1590 * This path to free object resources should only be
1591 * needed when resources are associated with a user's port.
1592 * In the normal case, when the kernel is the receiver,
1593 * the code calling ipc_kobject_dealloc_port() should clean
1594 * up the object resources.
1595 *
1596 * Cleans up any kobject label that might be present.
1597 * Conditions:
1598 * The port is not locked, but it is dead.
1599 */
1600 void
ipc_kobject_destroy(ipc_port_t port)1601 ipc_kobject_destroy(
1602 ipc_port_t port)
1603 {
1604 ipc_kobject_ops_t ops = ipc_kobject_ops_get(ip_kotype(port));
1605
1606 if (ops->iko_op_permanent) {
1607 panic("trying to destroy an permanent port %p", port);
1608 }
1609 if (ops->iko_op_destroy) {
1610 ops->iko_op_destroy(port);
1611 }
1612
1613 if (ip_is_kolabeled(port)) {
1614 ipc_kobject_label_t labelp = port->ip_kolabel;
1615
1616 assert(labelp != NULL);
1617 assert(labelp->ikol_alt_port == IP_NULL);
1618 assert(ip_is_kobject(port));
1619 port->ip_kolabel = NULL;
1620 io_bits_andnot(ip_to_object(port), IO_BITS_KOLABEL);
1621 zfree(ipc_kobject_label_zone, labelp);
1622 }
1623 }
1624
1625 /*
1626 * Routine: ipc_kobject_label_substitute_task
1627 * Purpose:
1628 * Substitute a task control port for its immovable
1629 * equivalent when the receiver is that task.
1630 * Conditions:
1631 * Space is write locked and active.
1632 * Port is locked and active.
1633 * Returns:
1634 * - IP_NULL port if no substitution is to be done
1635 * - a valid port if a substitution needs to happen
1636 */
1637 static ipc_port_t
ipc_kobject_label_substitute_task(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1638 ipc_kobject_label_substitute_task(
1639 ipc_space_t space,
1640 ipc_kobject_label_t kolabel,
1641 ipc_port_t port)
1642 {
1643 ipc_port_t subst = IP_NULL;
1644 task_t task = ipc_kobject_get_raw(port, IKOT_TASK_CONTROL);
1645
1646 if (task != TASK_NULL && task == space->is_task) {
1647 if ((subst = kolabel->ikol_alt_port)) {
1648 return subst;
1649 }
1650 }
1651
1652 return IP_NULL;
1653 }
1654
1655 /*
1656 * Routine: ipc_kobject_label_substitute_thread
1657 * Purpose:
1658 * Substitute a thread control port for its immovable
1659 * equivalent when it belongs to the receiver task.
1660 * Conditions:
1661 * Space is write locked and active.
1662 * Port is locked and active.
1663 * Returns:
1664 * - IP_NULL port if no substitution is to be done
1665 * - a valid port if a substitution needs to happen
1666 */
1667 static ipc_port_t
ipc_kobject_label_substitute_thread(ipc_space_t space,ipc_kobject_label_t kolabel,ipc_port_t port)1668 ipc_kobject_label_substitute_thread(
1669 ipc_space_t space,
1670 ipc_kobject_label_t kolabel,
1671 ipc_port_t port)
1672 {
1673 ipc_port_t subst = IP_NULL;
1674 thread_t thread = ipc_kobject_get_raw(port, IKOT_THREAD_CONTROL);
1675
1676 if (thread != THREAD_NULL && space->is_task == get_threadtask(thread)) {
1677 if ((subst = kolabel->ikol_alt_port) != IP_NULL) {
1678 return subst;
1679 }
1680 }
1681
1682 return IP_NULL;
1683 }
1684
1685 /*
1686 * Routine: ipc_kobject_label_check
1687 * Purpose:
1688 * Check to see if the space is allowed to possess
1689 * a right for the given port. In order to qualify,
1690 * the space label must contain all the privileges
1691 * listed in the port/kobject label.
1692 *
1693 * Conditions:
1694 * Space is write locked and active.
1695 * Port is locked and active.
1696 *
1697 * Returns:
1698 * Whether the copyout is authorized.
1699 *
1700 * If a port substitution is requested, the space is unlocked,
1701 * the port is unlocked and its "right" consumed.
1702 *
1703 * As of now, substituted ports only happen for send rights.
1704 */
1705 bool
ipc_kobject_label_check(ipc_space_t space,ipc_port_t port,mach_msg_type_name_t msgt_name,ipc_object_copyout_flags_t * flags,ipc_port_t * subst_portp)1706 ipc_kobject_label_check(
1707 ipc_space_t space,
1708 ipc_port_t port,
1709 mach_msg_type_name_t msgt_name,
1710 ipc_object_copyout_flags_t *flags,
1711 ipc_port_t *subst_portp)
1712 {
1713 ipc_kobject_label_t kolabel;
1714 ipc_label_t label;
1715
1716 assert(is_active(space));
1717 assert(ip_active(port));
1718
1719 *subst_portp = IP_NULL;
1720
1721 /* Unlabled ports/kobjects are always allowed */
1722 if (!ip_is_kolabeled(port)) {
1723 return true;
1724 }
1725
1726 /* Never OK to copyout the receive right for a labeled kobject */
1727 if (msgt_name == MACH_MSG_TYPE_PORT_RECEIVE) {
1728 panic("ipc_kobject_label_check: attempted receive right "
1729 "copyout for labeled kobject");
1730 }
1731
1732 kolabel = port->ip_kolabel;
1733 label = kolabel->ikol_label;
1734
1735 if ((*flags & IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK) == 0 &&
1736 (label & IPC_LABEL_SUBST_MASK)) {
1737 ipc_port_t subst = IP_NULL;
1738
1739 if (msgt_name != MACH_MSG_TYPE_PORT_SEND) {
1740 return false;
1741 }
1742
1743 if ((label & IPC_LABEL_SUBST_MASK) == IPC_LABEL_SUBST_ONCE) {
1744 /*
1745 * The next check will _not_ substitute.
1746 * hollow out our one-time wrapper,
1747 * and steal its send right.
1748 */
1749 *flags |= IPC_OBJECT_COPYOUT_FLAGS_NO_LABEL_CHECK;
1750 subst = ipc_kobject_disable_locked(port,
1751 IKOT_PORT_SUBST_ONCE);
1752 is_write_unlock(space);
1753 ipc_port_release_send_and_unlock(port);
1754 if (subst == IP_NULL) {
1755 panic("subst-once port %p was consumed twice", port);
1756 }
1757 *subst_portp = subst;
1758 return true;
1759 }
1760
1761 switch (label & IPC_LABEL_SUBST_MASK) {
1762 case IPC_LABEL_SUBST_TASK:
1763 subst = ipc_kobject_label_substitute_task(space,
1764 kolabel, port);
1765 break;
1766 case IPC_LABEL_SUBST_THREAD:
1767 subst = ipc_kobject_label_substitute_thread(space,
1768 kolabel, port);
1769 break;
1770 default:
1771 panic("unexpected label: %llx", label);
1772 }
1773
1774 if (subst != IP_NULL) {
1775 ip_reference(subst);
1776 is_write_unlock(space);
1777
1778 /*
1779 * We do not hold a proper send right on `subst`,
1780 * only a reference.
1781 *
1782 * Because of how thread/task termination works,
1783 * there is no guarantee copy_send() would work,
1784 * so we need to make_send().
1785 *
1786 * We can do that because ports tagged with
1787 * IPC_LABEL_SUBST_{THREAD,TASK} do not use
1788 * the no-senders notification.
1789 */
1790
1791 ipc_port_release_send_and_unlock(port);
1792 port = ipc_port_make_send(subst);
1793 ip_release(subst);
1794 *subst_portp = port;
1795 return true;
1796 }
1797 }
1798
1799 return (label & space->is_label & IPC_LABEL_SPACE_MASK) ==
1800 (label & IPC_LABEL_SPACE_MASK);
1801 }
1802