1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71 #include <IOKit/IOBSD.h> // IOTaskHasEntitlement
72
73 #include <ipc/ipc_policy.h>
74 #include <mach/mach_types.h>
75 #include <mach/boolean.h>
76 #include <mach/kern_return.h>
77 #include <mach/mach_param.h>
78 #include <mach/task_special_ports.h>
79 #include <mach/thread_special_ports.h>
80 #include <mach/thread_status.h>
81 #include <mach/exception_types.h>
82 #include <mach/memory_object_types.h>
83 #include <mach/mach_traps.h>
84 #include <mach/task_server.h>
85 #include <mach/thread_act_server.h>
86 #include <mach/mach_host_server.h>
87 #include <mach/host_priv_server.h>
88 #include <mach/vm_map_server.h>
89
90 #include <kern/exc_guard.h>
91 #include <kern/kern_types.h>
92 #include <kern/host.h>
93 #include <kern/ipc_kobject.h>
94 #include <kern/ipc_tt.h>
95 #include <kern/kalloc.h>
96 #include <kern/thread.h>
97 #include <kern/ux_handler.h>
98 #include <kern/misc_protos.h>
99 #include <kdp/kdp_dyld.h>
100
101 #include <vm/vm_map_xnu.h>
102 #include <vm/vm_pageout.h>
103 #include <vm/vm_protos.h>
104 #include <mach/vm_types.h>
105 #include <libkern/coreanalytics/coreanalytics.h>
106
107 #include <security/mac_mach_internal.h>
108
109 #if CONFIG_CSR
110 #include <sys/csr.h>
111 #endif
112
113 #include <sys/code_signing.h> /* for developer mode state */
114
115 #if !defined(XNU_TARGET_OS_OSX) && !SECURE_KERNEL
116 extern int cs_relax_platform_task_ports;
117 #endif
118
119 extern boolean_t IOCurrentTaskHasEntitlement(const char *);
120 extern boolean_t proc_is_simulated(const proc_t);
121 extern struct proc* current_proc(void);
122
123 /* bootarg to create lightweight corpse for thread identity lockdown */
124 TUNABLE(bool, thid_should_crash, "thid_should_crash", true);
125
126 /* Allows the process to call `[thread,task]_set_exception_ports */
127 #define SET_EXCEPTION_ENTITLEMENT "com.apple.private.set-exception-port"
128
129 /*
130 * Entitlement to disallow setting the exception port of task/thread unless you
131 * are being debugged or are setting up the hardened task exception handler
132 */
133 #define IPC_ONLY_ONE_EXCEPTION_PORT "com.apple.security.only-one-exception-port"
134
135 CA_EVENT(set_exception,
136 CA_STATIC_STRING(CA_PROCNAME_LEN), current_proc,
137 CA_STATIC_STRING(CA_PROCNAME_LEN), thread_proc,
138 CA_INT, mask,
139 CA_STATIC_STRING(6), level);
140
141 __options_decl(ipc_reply_port_type_t, uint32_t, {
142 IRPT_NONE = 0x00,
143 IRPT_USER = 0x01,
144 IRPT_KERNEL = 0x02,
145 });
146
147 /* forward declarations */
148 static kern_return_t special_port_allowed_with_task_flavor(int which, mach_task_flavor_t flavor);
149 static kern_return_t special_port_allowed_with_thread_flavor(int which, mach_thread_flavor_t flavor);
150 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port, ipc_reply_port_type_t reply_type);
151 static void ipc_port_unbind_special_reply_port(thread_t thread, ipc_reply_port_type_t reply_type);
152 extern kern_return_t task_conversion_eval(task_t caller, task_t victim, int flavor);
153 static thread_inspect_t convert_port_to_thread_inspect_no_eval(ipc_port_t port);
154 static ipc_port_t convert_thread_to_port_with_flavor(thread_t, thread_ro_t, mach_thread_flavor_t flavor);
155 ipc_port_t convert_task_to_port_with_flavor(task_t task, mach_task_flavor_t flavor, task_grp_t grp);
156 kern_return_t task_set_special_port(task_t task, int which, ipc_port_t port);
157 kern_return_t task_get_special_port(task_t task, int which, ipc_port_t *portp);
158
159 /*
160 * Routine: ipc_task_init
161 * Purpose:
162 * Initialize a task's IPC state.
163 *
164 * If non-null, some state will be inherited from the parent.
165 * The parent must be appropriately initialized.
166 * Conditions:
167 * Nothing locked.
168 */
169
170 void
ipc_task_init(task_t task,task_t parent)171 ipc_task_init(
172 task_t task,
173 task_t parent)
174 {
175 ipc_space_t space;
176 ipc_port_t kport;
177 ipc_port_t nport;
178 ipc_port_t pport;
179 kern_return_t kr;
180 struct label *temp_label;
181 int i;
182
183
184 kr = ipc_space_create(IPC_LABEL_NONE, &space);
185 if (kr != KERN_SUCCESS) {
186 panic("ipc_task_init");
187 }
188
189 space->is_task = task;
190
191 kport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL,
192 IPC_KOBJECT_ALLOC_NONE);
193 pport = kport;
194
195 nport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_NAME,
196 IPC_KOBJECT_ALLOC_NONE);
197
198 itk_lock_init(task);
199 task->itk_task_ports[TASK_FLAVOR_CONTROL] = kport;
200 task->itk_task_ports[TASK_FLAVOR_NAME] = nport;
201
202 /* Lazily allocated on-demand */
203 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
204 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
205 task->itk_dyld_notify = NULL;
206 #if CONFIG_PROC_RESOURCE_LIMITS
207 task->itk_resource_notify = NULL;
208 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
209
210 task->itk_self = pport;
211 task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
212 #if CONFIG_CSR
213 if (task_is_a_corpse_fork(task)) {
214 /*
215 * No sender's notification for corpse would not
216 * work with a naked send right in kernel.
217 */
218 task->itk_settable_self = IP_NULL;
219 } else {
220 /* we just made the port, no need to triple check */
221 task->itk_settable_self = ipc_port_make_send_any(kport);
222 }
223 #endif /* CONFIG_CSR */
224 task->itk_debug_control = IP_NULL;
225 task->itk_space = space;
226
227 #if CONFIG_MACF
228 task->exc_actions[0].label = NULL;
229 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
230 mac_exc_associate_action_label(&task->exc_actions[i],
231 mac_exc_create_label(&task->exc_actions[i]));
232 }
233 #endif
234
235 /* always zero-out the first (unused) array element */
236 bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
237 /* We don't need to inherit this */
238 bzero(&task->hardened_exception_action, sizeof(task->hardened_exception_action));
239
240 if (parent == TASK_NULL) {
241 ipc_port_t port = IP_NULL;
242 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
243 task->exc_actions[i].port = IP_NULL;
244 task->exc_actions[i].flavor = 0;
245 task->exc_actions[i].behavior = 0;
246 task->exc_actions[i].privileged = FALSE;
247 }/* for */
248
249 kr = host_get_host_port(host_priv_self(), &port);
250 assert(kr == KERN_SUCCESS);
251 task->itk_host = port;
252
253 task->itk_bootstrap = IP_NULL;
254 task->itk_task_access = IP_NULL;
255
256 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
257 task->itk_registered[i] = IP_NULL;
258 }
259 } else {
260 itk_lock(parent);
261 assert(parent->itk_task_ports[TASK_FLAVOR_CONTROL] != IP_NULL);
262
263 /* inherit registered ports */
264
265 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
266 task->itk_registered[i] =
267 ipc_port_copy_send_any(parent->itk_registered[i]);
268 }
269
270 /* inherit exception and bootstrap ports */
271
272 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
273 temp_label = task->exc_actions[i].label;
274 task->exc_actions[i] = parent->exc_actions[i];
275 task->exc_actions[i].port =
276 exception_port_copy_send(parent->exc_actions[i].port);
277 task->exc_actions[i].label = temp_label;
278 #if CONFIG_MACF
279 mac_exc_inherit_action_label(parent->exc_actions + i,
280 task->exc_actions + i);
281 #endif
282 }
283
284 task->itk_host = host_port_copy_send(parent->itk_host);
285
286 task->itk_bootstrap =
287 ipc_port_copy_send_mqueue(parent->itk_bootstrap);
288
289 task->itk_task_access =
290 ipc_port_copy_send_mqueue(parent->itk_task_access);
291
292 itk_unlock(parent);
293 }
294 }
295
296 /*
297 * Routine: ipc_task_set_immovable_pinned
298 * Purpose:
299 * Make a task's control port immovable and/or pinned
300 * according to its control port options. If control port
301 * is immovable, allocate an immovable control port for the
302 * task and optionally pin it.
303 * Conditions:
304 * Task's control port is movable and not pinned.
305 */
306 void
ipc_task_set_immovable_pinned(task_t task)307 ipc_task_set_immovable_pinned(
308 task_t task)
309 {
310 ipc_port_t kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
311 ipc_port_t new_pport;
312
313 /* pport is the same as kport at ipc_task_init() time */
314 assert(task->itk_self == task->itk_task_ports[TASK_FLAVOR_CONTROL]);
315 #if CONFIG_CSR
316 assert(task->itk_self == task->itk_settable_self);
317 #endif /* CONFIG_CSR */
318 assert(!task_is_a_corpse(task));
319
320 /* only tasks opt in immovable control port can have pinned control port */
321 if (task_is_immovable(task)) {
322 ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
323
324 if (task_is_pinned(task)) {
325 options |= IPC_KOBJECT_ALLOC_PINNED;
326 }
327
328 new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_TASK_CONTROL, options);
329
330 assert(kport != IP_NULL);
331 ipc_port_set_label(kport, IPC_LABEL_SUBST_TASK);
332 kport->ip_kolabel->ikol_alt_port = new_pport;
333
334 itk_lock(task);
335 task->itk_self = new_pport;
336 itk_unlock(task);
337
338 /* enable the pinned port */
339 ipc_kobject_enable(new_pport, task, IKOT_TASK_CONTROL);
340 }
341 }
342
343 /*
344 * Routine: ipc_task_enable
345 * Purpose:
346 * Enable a task for IPC access.
347 * Conditions:
348 * Nothing locked.
349 */
350 void
ipc_task_enable(task_t task)351 ipc_task_enable(
352 task_t task)
353 {
354 ipc_port_t kport;
355 ipc_port_t nport;
356 ipc_port_t iport;
357 ipc_port_t rdport;
358 ipc_port_t pport;
359
360 itk_lock(task);
361 if (!task->active) {
362 /*
363 * task has been terminated before we can enable IPC access.
364 * The check is to make sure we don't accidentally re-enable
365 * the task ports _after_ they've been disabled during
366 * task_terminate_internal(), in which case we will hit the
367 * !task->ipc_active assertion in ipc_task_terminate().
368 *
369 * Technically we should grab task lock when checking task
370 * active bit, but since task termination unsets task->active
371 * _before_ calling ipc_task_disable(), we can always see the
372 * truth with just itk_lock() and bail if disable has been called.
373 */
374 itk_unlock(task);
375 return;
376 }
377
378 assert(task_is_a_corpse(task) || task->map->owning_task == task); /* verify vm_map_setup called */
379 assert(!task->ipc_active || task_is_a_corpse(task));
380 task->ipc_active = true;
381
382 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
383 if (kport != IP_NULL) {
384 ipc_kobject_enable(kport, task, IKOT_TASK_CONTROL);
385 }
386 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
387 if (nport != IP_NULL) {
388 ipc_kobject_enable(nport, task, IKOT_TASK_NAME);
389 }
390 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
391 if (iport != IP_NULL) {
392 ipc_kobject_enable(iport, task, IKOT_TASK_INSPECT);
393 }
394 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
395 if (rdport != IP_NULL) {
396 ipc_kobject_enable(rdport, task, IKOT_TASK_READ);
397 }
398 pport = task->itk_self;
399 if (pport != kport && pport != IP_NULL) {
400 assert(task_is_immovable(task));
401 ipc_kobject_enable(pport, task, IKOT_TASK_CONTROL);
402 }
403
404 itk_unlock(task);
405 }
406
407 /*
408 * Routine: ipc_task_disable
409 * Purpose:
410 * Disable IPC access to a task.
411 * Conditions:
412 * Nothing locked.
413 */
414
415 void
ipc_task_disable(task_t task)416 ipc_task_disable(
417 task_t task)
418 {
419 ipc_port_t kport;
420 ipc_port_t nport;
421 ipc_port_t iport;
422 ipc_port_t rdport;
423 ipc_port_t rport;
424 ipc_port_t pport;
425
426 itk_lock(task);
427
428 /*
429 * This innocuous looking line is load bearing.
430 *
431 * It is used to disable the creation of lazy made ports.
432 * We must do so before we drop the last reference on the task,
433 * as task ports do not own a reference on the task, and
434 * convert_port_to_task* will crash trying to resurect a task.
435 */
436 task->ipc_active = false;
437
438 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
439 if (kport != IP_NULL) {
440 /* clears ikol_alt_port */
441 ipc_kobject_disable(kport, IKOT_TASK_CONTROL);
442 }
443 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
444 if (nport != IP_NULL) {
445 ipc_kobject_disable(nport, IKOT_TASK_NAME);
446 }
447 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
448 if (iport != IP_NULL) {
449 ipc_kobject_disable(iport, IKOT_TASK_INSPECT);
450 }
451 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
452 if (rdport != IP_NULL) {
453 /* clears ikol_alt_port */
454 ipc_kobject_disable(rdport, IKOT_TASK_READ);
455 }
456 pport = task->itk_self;
457 if (pport != IP_NULL) {
458 /* see port_name_is_pinned_itk_self() */
459 pport->ip_receiver_name = MACH_PORT_SPECIAL_DEFAULT;
460 if (pport != kport) {
461 assert(task_is_immovable(task));
462 assert(pport->ip_immovable_send);
463 ipc_kobject_disable(pport, IKOT_TASK_CONTROL);
464 }
465 }
466
467 rport = task->itk_resume;
468 if (rport != IP_NULL) {
469 /*
470 * From this point onwards this task is no longer accepting
471 * resumptions.
472 *
473 * There are still outstanding suspensions on this task,
474 * even as it is being torn down. Disconnect the task
475 * from the rport, thereby "orphaning" the rport. The rport
476 * itself will go away only when the last suspension holder
477 * destroys his SO right to it -- when he either
478 * exits, or tries to actually use that last SO right to
479 * resume this (now non-existent) task.
480 */
481 ipc_kobject_disable(rport, IKOT_TASK_RESUME);
482 }
483 itk_unlock(task);
484 }
485
486 /*
487 * Routine: ipc_task_terminate
488 * Purpose:
489 * Clean up and destroy a task's IPC state.
490 * Conditions:
491 * Nothing locked. The task must be suspended.
492 * (Or the current thread must be in the task.)
493 */
494
495 void
ipc_task_terminate(task_t task)496 ipc_task_terminate(
497 task_t task)
498 {
499 ipc_port_t kport;
500 ipc_port_t nport;
501 ipc_port_t iport;
502 ipc_port_t rdport;
503 ipc_port_t rport;
504 ipc_port_t pport;
505 #if CONFIG_CSR
506 ipc_port_t sself;
507 #endif /* CONFIG_CSR */
508 ipc_port_t *notifiers_ptr = NULL;
509
510 itk_lock(task);
511
512 /*
513 * If we ever failed to clear ipc_active before the last reference
514 * was dropped, lazy ports might be made and used after the last
515 * reference is dropped and cause use after free (see comment in
516 * ipc_task_disable()).
517 */
518 assert(!task->ipc_active);
519
520 kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
521 #if CONFIG_CSR
522 sself = task->itk_settable_self;
523 #endif /* CONFIG_CSR */
524 pport = IP_NULL;
525
526 if (kport == IP_NULL) {
527 /* the task is already terminated (can this happen?) */
528 itk_unlock(task);
529 return;
530 }
531 task->itk_task_ports[TASK_FLAVOR_CONTROL] = IP_NULL;
532
533 rdport = task->itk_task_ports[TASK_FLAVOR_READ];
534 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
535
536 iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
537 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
538
539 nport = task->itk_task_ports[TASK_FLAVOR_NAME];
540 assert(nport != IP_NULL);
541 task->itk_task_ports[TASK_FLAVOR_NAME] = IP_NULL;
542
543 if (task->itk_dyld_notify) {
544 notifiers_ptr = task->itk_dyld_notify;
545 task->itk_dyld_notify = NULL;
546 }
547
548 pport = task->itk_self;
549 task->itk_self = IP_NULL;
550
551 rport = task->itk_resume;
552 task->itk_resume = IP_NULL;
553
554 itk_unlock(task);
555
556 /* release the naked send rights */
557 #if CONFIG_CSR
558 if (IP_VALID(sself)) {
559 ipc_port_release_send(sself);
560 }
561 #endif /* CONFIG_CSR */
562
563 if (notifiers_ptr) {
564 for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
565 if (IP_VALID(notifiers_ptr[i])) {
566 ipc_port_release_send(notifiers_ptr[i]);
567 }
568 }
569 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
570 }
571
572 if (IP_VALID(task->hardened_exception_action.ea.port)) {
573 ipc_port_release_send(task->hardened_exception_action.ea.port);
574 }
575
576 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
577 if (IP_VALID(task->exc_actions[i].port)) {
578 ipc_port_release_send(task->exc_actions[i].port);
579 }
580 #if CONFIG_MACF
581 mac_exc_free_action_label(task->exc_actions + i);
582 #endif
583 }
584
585 if (IP_VALID(task->itk_host)) {
586 ipc_port_release_send(task->itk_host);
587 }
588
589 if (IP_VALID(task->itk_bootstrap)) {
590 ipc_port_release_send(task->itk_bootstrap);
591 }
592
593 if (IP_VALID(task->itk_task_access)) {
594 ipc_port_release_send(task->itk_task_access);
595 }
596
597 if (IP_VALID(task->itk_debug_control)) {
598 ipc_port_release_send(task->itk_debug_control);
599 }
600
601 #if CONFIG_PROC_RESOURCE_LIMITS
602 if (IP_VALID(task->itk_resource_notify)) {
603 ipc_port_release_send(task->itk_resource_notify);
604 }
605 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
606
607 for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
608 if (IP_VALID(task->itk_registered[i])) {
609 ipc_port_release_send(task->itk_registered[i]);
610 }
611 }
612
613 /* clears read port ikol_alt_port, must be done first */
614 if (rdport != IP_NULL) {
615 ipc_kobject_dealloc_port(rdport, 0, IKOT_TASK_READ);
616 }
617 ipc_kobject_dealloc_port(kport, 0, IKOT_TASK_CONTROL);
618 /* ikol_alt_port cleared */
619
620 /* destroy other kernel ports */
621 ipc_kobject_dealloc_port(nport, 0, IKOT_TASK_NAME);
622 if (iport != IP_NULL) {
623 ipc_kobject_dealloc_port(iport, 0, IKOT_TASK_INSPECT);
624 }
625 if (pport != IP_NULL && pport != kport) {
626 ipc_kobject_dealloc_port(pport, 0, IKOT_TASK_CONTROL);
627 }
628 if (rport != IP_NULL) {
629 ipc_kobject_dealloc_port(rport, 0, IKOT_TASK_RESUME);
630 }
631
632 itk_lock_destroy(task);
633 }
634
635 /*
636 * Routine: ipc_task_reset
637 * Purpose:
638 * Reset a task's IPC state to protect it when
639 * it enters an elevated security context. The
640 * task name port can remain the same - since it
641 * represents no specific privilege.
642 * Conditions:
643 * Nothing locked. The task must be suspended.
644 * (Or the current thread must be in the task.)
645 */
646
647 void
ipc_task_reset(task_t task)648 ipc_task_reset(
649 task_t task)
650 {
651 ipc_port_t old_kport, old_pport, new_kport, new_pport;
652 #if CONFIG_CSR
653 ipc_port_t old_sself;
654 #endif /* CONFIG_CSR */
655 ipc_port_t old_rdport;
656 ipc_port_t old_iport;
657 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
658 ipc_port_t old_hardened_exception;
659 ipc_port_t *notifiers_ptr = NULL;
660
661 #if CONFIG_MACF
662 /* Fresh label to unset credentials in existing labels. */
663 struct label *unset_label = mac_exc_create_label(NULL);
664 #endif
665
666 new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task,
667 IKOT_TASK_CONTROL, IPC_KOBJECT_ALLOC_NONE);
668 /*
669 * ipc_task_reset() only happens during sugid or corpsify.
670 *
671 * (1) sugid happens early in exec_mach_imgact(), at which point the old task
672 * port has not been enabled, and is left movable/not pinned.
673 * (2) corpse cannot execute more code so the notion of the immovable/pinned
674 * task port is bogus, and should appear as if it doesn't have one.
675 *
676 * So simply leave pport the same as kport.
677 */
678 new_pport = new_kport;
679
680 itk_lock(task);
681
682 old_kport = task->itk_task_ports[TASK_FLAVOR_CONTROL];
683 old_rdport = task->itk_task_ports[TASK_FLAVOR_READ];
684 old_iport = task->itk_task_ports[TASK_FLAVOR_INSPECT];
685
686 old_pport = task->itk_self;
687
688 if (old_pport == IP_NULL) {
689 /* the task is already terminated (can this happen?) */
690 itk_unlock(task);
691 ipc_kobject_dealloc_port(new_kport, 0, IKOT_TASK_CONTROL);
692 if (new_pport != new_kport) {
693 assert(task_is_immovable(task));
694 ipc_kobject_dealloc_port(new_pport, 0, IKOT_TASK_CONTROL);
695 }
696 #if CONFIG_MACF
697 mac_exc_free_label(unset_label);
698 #endif
699 return;
700 }
701
702 task->itk_task_ports[TASK_FLAVOR_CONTROL] = new_kport;
703 task->itk_self = new_pport;
704
705 #if CONFIG_CSR
706 old_sself = task->itk_settable_self;
707 if (task_is_a_corpse(task)) {
708 /* No extra send right for coprse, needed to arm no-sender notification */
709 task->itk_settable_self = IP_NULL;
710 } else {
711 /* we just made the port, no need to triple check */
712 task->itk_settable_self = ipc_port_make_send_any(new_kport);
713 }
714 #endif /* CONFIG_CSR */
715
716 /* clears ikol_alt_port */
717 ipc_kobject_disable(old_kport, IKOT_TASK_CONTROL);
718
719 /* Reset the read and inspect flavors of task port */
720 task->itk_task_ports[TASK_FLAVOR_READ] = IP_NULL;
721 task->itk_task_ports[TASK_FLAVOR_INSPECT] = IP_NULL;
722
723 if (old_pport != old_kport) {
724 assert(task_is_immovable(task));
725 ipc_kobject_disable(old_pport, IKOT_TASK_CONTROL);
726 }
727
728 if (IP_VALID(task->hardened_exception_action.ea.port)
729 && !task->hardened_exception_action.ea.privileged) {
730 old_hardened_exception = task->hardened_exception_action.ea.port;
731 task->hardened_exception_action.ea.port = IP_NULL;
732 }
733
734 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
735 old_exc_actions[i] = IP_NULL;
736
737 if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
738 continue;
739 }
740
741 if (!task->exc_actions[i].privileged) {
742 #if CONFIG_MACF
743 mac_exc_update_action_label(task->exc_actions + i, unset_label);
744 #endif
745 old_exc_actions[i] = task->exc_actions[i].port;
746 task->exc_actions[i].port = IP_NULL;
747 }
748 }/* for */
749
750 if (IP_VALID(task->itk_debug_control)) {
751 ipc_port_release_send(task->itk_debug_control);
752 }
753 task->itk_debug_control = IP_NULL;
754
755 if (task->itk_dyld_notify) {
756 notifiers_ptr = task->itk_dyld_notify;
757 task->itk_dyld_notify = NULL;
758 }
759
760 itk_unlock(task);
761
762 #if CONFIG_MACF
763 mac_exc_free_label(unset_label);
764 #endif
765
766 /* release the naked send rights */
767 #if CONFIG_CSR
768 if (IP_VALID(old_sself)) {
769 ipc_port_release_send(old_sself);
770 }
771 #endif /* CONFIG_CSR */
772
773 if (notifiers_ptr) {
774 for (int i = 0; i < DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT; i++) {
775 if (IP_VALID(notifiers_ptr[i])) {
776 ipc_port_release_send(notifiers_ptr[i]);
777 }
778 }
779 kfree_type(ipc_port_t, DYLD_MAX_PROCESS_INFO_NOTIFY_COUNT, notifiers_ptr);
780 }
781
782 ipc_port_release_send(old_hardened_exception);
783
784 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
785 if (IP_VALID(old_exc_actions[i])) {
786 ipc_port_release_send(old_exc_actions[i]);
787 }
788 }
789
790 /* destroy all task port flavors */
791 if (old_rdport != IP_NULL) {
792 /* read port ikol_alt_port may point to kport, dealloc first */
793 ipc_kobject_dealloc_port(old_rdport, 0, IKOT_TASK_READ);
794 }
795 ipc_kobject_dealloc_port(old_kport, 0, IKOT_TASK_CONTROL);
796 /* ikol_alt_port cleared */
797
798 if (old_iport != IP_NULL) {
799 ipc_kobject_dealloc_port(old_iport, 0, IKOT_TASK_INSPECT);
800 }
801 if (old_pport != old_kport) {
802 assert(task_is_immovable(task));
803 ipc_kobject_dealloc_port(old_pport, 0, IKOT_TASK_CONTROL);
804 }
805 }
806
807 /*
808 * Routine: ipc_thread_init
809 * Purpose:
810 * Initialize a thread's IPC state.
811 * Conditions:
812 * Nothing locked.
813 */
814
815 void
ipc_thread_init(task_t task,thread_t thread,thread_ro_t tro,ipc_thread_init_options_t options)816 ipc_thread_init(
817 task_t task,
818 thread_t thread,
819 thread_ro_t tro,
820 ipc_thread_init_options_t options)
821 {
822 ipc_port_t kport;
823 ipc_port_t pport;
824 ipc_kobject_alloc_options_t alloc_options = IPC_KOBJECT_ALLOC_NONE;
825
826 if (task_is_immovable(task) && !(options & IPC_THREAD_INIT_MAINTHREAD)) {
827 /*
828 * pthreads and raw threads both have immovable port upon creation.
829 * pthreads are subsequently pinned via ipc_port_copyout_send_pinned() whereas
830 * raw threads are left unpinned.
831 */
832 alloc_options |= IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
833
834 pport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
835 IKOT_THREAD_CONTROL, alloc_options);
836
837 kport = ipc_kobject_alloc_labeled_port((ipc_kobject_t)thread,
838 IKOT_THREAD_CONTROL, IPC_LABEL_SUBST_THREAD, IPC_KOBJECT_ALLOC_NONE);
839 kport->ip_kolabel->ikol_alt_port = pport;
840 } else {
841 /*
842 * Main thread is created movable but may be set immovable and pinned in
843 * main_thread_set_immovable_pinned(). It needs to be handled separately
844 * because task_control_port_options is not available at main thread creation time.
845 */
846 kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
847 IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
848
849 pport = kport;
850 }
851
852 tro->tro_self_port = pport;
853 /* we just made the port, no need to triple check */
854 #if CONFIG_CSR
855 tro->tro_settable_self_port = ipc_port_make_send_any(kport);
856 #endif /* CONFIG_CSR */
857 tro->tro_ports[THREAD_FLAVOR_CONTROL] = kport;
858
859 thread->ith_special_reply_port = NULL;
860
861 #if IMPORTANCE_INHERITANCE
862 thread->ith_assertions = 0;
863 #endif
864
865 thread->ipc_active = true;
866 ipc_kmsg_queue_init(&thread->ith_messages);
867
868 thread->ith_kernel_reply_port = IP_NULL;
869 }
870
871 void
ipc_main_thread_set_immovable_pinned(thread_t thread)872 ipc_main_thread_set_immovable_pinned(thread_t thread)
873 {
874 thread_ro_t tro = get_thread_ro(thread);
875 ipc_port_t kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
876 task_t task = tro->tro_task;
877 ipc_port_t new_pport;
878
879 assert(thread_get_tag(thread) & THREAD_TAG_MAINTHREAD);
880
881 /* pport is the same as kport at ipc_thread_init() time */
882 assert(tro->tro_self_port == tro->tro_ports[THREAD_FLAVOR_CONTROL]);
883 #if CONFIG_CSR
884 assert(tro->tro_self_port == tro->tro_settable_self_port);
885 #endif /* CONFIG_CSR */
886
887 /*
888 * Main thread port is immovable/pinned depending on whether owner task has
889 * immovable/pinned task control port. task_control_port_options is now set.
890 */
891 if (task_is_immovable(task)) {
892 ipc_kobject_alloc_options_t options = IPC_KOBJECT_ALLOC_IMMOVABLE_SEND;
893
894 if (task_is_pinned(task)) {
895 options |= IPC_KOBJECT_ALLOC_PINNED;
896 }
897
898 new_pport = ipc_kobject_alloc_port(IKO_NULL, IKOT_THREAD_CONTROL, options);
899
900 assert(kport != IP_NULL);
901 ipc_port_set_label(kport, IPC_LABEL_SUBST_THREAD);
902 kport->ip_kolabel->ikol_alt_port = new_pport;
903
904 thread_mtx_lock(thread);
905 zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_self_port, &new_pport);
906 thread_mtx_unlock(thread);
907
908 /* enable the pinned port */
909 ipc_kobject_enable(new_pport, thread, IKOT_THREAD_CONTROL);
910 }
911 }
912
913 struct thread_init_exc_actions {
914 struct exception_action array[EXC_TYPES_COUNT];
915 };
916
917 static void
ipc_thread_init_exc_actions(thread_ro_t tro)918 ipc_thread_init_exc_actions(thread_ro_t tro)
919 {
920 struct exception_action *actions;
921
922 actions = kalloc_type(struct thread_init_exc_actions,
923 Z_WAITOK | Z_ZERO | Z_NOFAIL)->array;
924
925 #if CONFIG_MACF
926 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
927 mac_exc_associate_action_label(&actions[i],
928 mac_exc_create_label(&actions[i]));
929 }
930 #endif
931
932 zalloc_ro_update_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions, &actions);
933 }
934
935 static void
ipc_thread_destroy_exc_actions(thread_ro_t tro)936 ipc_thread_destroy_exc_actions(thread_ro_t tro)
937 {
938 struct exception_action *actions = tro->tro_exc_actions;
939
940 if (actions) {
941 #if CONFIG_MACF
942 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
943 mac_exc_free_action_label(actions + i);
944 }
945 #endif
946
947 zalloc_ro_clear_field(ZONE_ID_THREAD_RO, tro, tro_exc_actions);
948 struct thread_init_exc_actions *tr_actions =
949 (struct thread_init_exc_actions *)actions;
950 kfree_type(struct thread_init_exc_actions, tr_actions);
951 }
952 }
953
954 static void
ipc_thread_ro_update_ports(thread_ro_t tro,const struct thread_ro * tro_tpl)955 ipc_thread_ro_update_ports(
956 thread_ro_t tro,
957 const struct thread_ro *tro_tpl)
958 {
959 vm_size_t offs = offsetof(struct thread_ro, tro_self_port);
960 vm_size_t size = sizeof(struct ipc_port *) +
961 #if CONFIG_CSR
962 sizeof(struct ipc_port *) +
963 #endif /* CONFIG_CSR */
964 sizeof(tro_tpl->tro_ports);
965
966 #if CONFIG_CSR
967 static_assert(offsetof(struct thread_ro, tro_settable_self_port) ==
968 offsetof(struct thread_ro, tro_self_port) +
969 sizeof(struct ipc_port_t *));
970 #endif /* CONFIG_CSR */
971 static_assert(offsetof(struct thread_ro, tro_ports) ==
972 offsetof(struct thread_ro, tro_self_port) +
973 #if CONFIG_CSR
974 sizeof(struct ipc_port_t *) +
975 #endif /* CONFIG_CSR */
976 sizeof(struct ipc_port_t *));
977
978 zalloc_ro_mut(ZONE_ID_THREAD_RO, tro,
979 offs, &tro_tpl->tro_self_port, size);
980 }
981
982 /*
983 * Routine: ipc_thread_disable
984 * Purpose:
985 * Clean up and destroy a thread's IPC state.
986 * Conditions:
987 * Thread locked.
988 */
989 void
ipc_thread_disable(thread_t thread)990 ipc_thread_disable(
991 thread_t thread)
992 {
993 thread_ro_t tro = get_thread_ro(thread);
994 ipc_port_t kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
995 ipc_port_t iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
996 ipc_port_t rdport = tro->tro_ports[THREAD_FLAVOR_READ];
997 ipc_port_t pport = tro->tro_self_port;
998
999 /*
1000 * This innocuous looking line is load bearing.
1001 *
1002 * It is used to disable the creation of lazy made ports.
1003 * We must do so before we drop the last reference on the thread,
1004 * as thread ports do not own a reference on the thread, and
1005 * convert_port_to_thread* will crash trying to resurect a thread.
1006 */
1007 thread->ipc_active = false;
1008
1009 if (kport != IP_NULL) {
1010 /* clears ikol_alt_port */
1011 ipc_kobject_disable(kport, IKOT_THREAD_CONTROL);
1012 }
1013
1014 if (iport != IP_NULL) {
1015 ipc_kobject_disable(iport, IKOT_THREAD_INSPECT);
1016 }
1017
1018 if (rdport != IP_NULL) {
1019 /* clears ikol_alt_port */
1020 ipc_kobject_disable(rdport, IKOT_THREAD_READ);
1021 }
1022
1023 if (pport != kport && pport != IP_NULL) {
1024 assert(task_is_immovable(tro->tro_task));
1025 assert(pport->ip_immovable_send);
1026 ipc_kobject_disable(pport, IKOT_THREAD_CONTROL);
1027 }
1028
1029 /* unbind the thread special reply port */
1030 if (IP_VALID(thread->ith_special_reply_port)) {
1031 ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1032 }
1033 }
1034
1035 /*
1036 * Routine: ipc_thread_terminate
1037 * Purpose:
1038 * Clean up and destroy a thread's IPC state.
1039 * Conditions:
1040 * Nothing locked.
1041 */
1042
1043 void
ipc_thread_terminate(thread_t thread)1044 ipc_thread_terminate(
1045 thread_t thread)
1046 {
1047 thread_ro_t tro = get_thread_ro(thread);
1048 ipc_port_t kport = IP_NULL;
1049 ipc_port_t iport = IP_NULL;
1050 ipc_port_t rdport = IP_NULL;
1051 ipc_port_t pport = IP_NULL;
1052 #if CONFIG_CSR
1053 ipc_port_t sport = IP_NULL;
1054 #endif /* CONFIG_CSR */
1055
1056 thread_mtx_lock(thread);
1057
1058 /*
1059 * If we ever failed to clear ipc_active before the last reference
1060 * was dropped, lazy ports might be made and used after the last
1061 * reference is dropped and cause use after free (see comment in
1062 * ipc_thread_disable()).
1063 */
1064 assert(!thread->ipc_active);
1065
1066 kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1067 iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1068 rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1069 pport = tro->tro_self_port;
1070 #if CONFIG_CSR
1071 sport = tro->tro_settable_self_port;
1072 #endif /* CONFIG_CSR */
1073
1074 if (kport != IP_NULL) {
1075 #if CONFIG_CSR
1076 if (IP_VALID(sport)) {
1077 ipc_port_release_send(sport);
1078 }
1079 #endif /* CONFIG_CSR */
1080
1081 ipc_thread_ro_update_ports(tro, &(struct thread_ro){ });
1082
1083 if (tro->tro_exc_actions != NULL) {
1084 for (int i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1085 if (IP_VALID(tro->tro_exc_actions[i].port)) {
1086 ipc_port_release_send(tro->tro_exc_actions[i].port);
1087 }
1088 }
1089 ipc_thread_destroy_exc_actions(tro);
1090 }
1091 }
1092
1093 #if IMPORTANCE_INHERITANCE
1094 assert(thread->ith_assertions == 0);
1095 #endif
1096
1097 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
1098 thread_mtx_unlock(thread);
1099
1100 /* clears read port ikol_alt_port, must be done first */
1101 if (rdport != IP_NULL) {
1102 ipc_kobject_dealloc_port(rdport, 0, IKOT_THREAD_READ);
1103 }
1104 /* control port can also have ikol_alt_port */
1105 if (kport != IP_NULL) {
1106 ipc_kobject_dealloc_port(kport, 0, IKOT_THREAD_CONTROL);
1107 }
1108 /* ikol_alt_port cleared */
1109
1110 if (iport != IP_NULL) {
1111 ipc_kobject_dealloc_port(iport, 0, IKOT_THREAD_INSPECT);
1112 }
1113 if (pport != kport && pport != IP_NULL) {
1114 assert(task_is_immovable(tro->tro_task));
1115 ipc_kobject_dealloc_port(pport, 0, IKOT_THREAD_CONTROL);
1116 }
1117 if (thread->ith_kernel_reply_port != IP_NULL) {
1118 thread_dealloc_kernel_special_reply_port(thread);
1119 }
1120 }
1121
1122 /*
1123 * Routine: ipc_thread_reset
1124 * Purpose:
1125 * Reset the IPC state for a given Mach thread when
1126 * its task enters an elevated security context.
1127 * All flavors of thread port and its exception ports have
1128 * to be reset. Its RPC reply port cannot have any
1129 * rights outstanding, so it should be fine. The thread
1130 * inspect and read port are set to NULL.
1131 * Conditions:
1132 * Nothing locked.
1133 */
1134
1135 void
ipc_thread_reset(thread_t thread)1136 ipc_thread_reset(
1137 thread_t thread)
1138 {
1139 thread_ro_t tro = get_thread_ro(thread);
1140 ipc_port_t old_kport, new_kport, old_pport, new_pport;
1141 #if CONFIG_CSR
1142 ipc_port_t old_sself;
1143 #endif /* CONFIG_CSR */
1144 ipc_port_t old_rdport;
1145 ipc_port_t old_iport;
1146 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
1147 boolean_t has_old_exc_actions = FALSE;
1148 boolean_t thread_is_immovable;
1149 int i;
1150
1151 #if CONFIG_MACF
1152 struct label *new_label = mac_exc_create_label(NULL);
1153 #endif
1154
1155 thread_is_immovable = ip_is_immovable_send(tro->tro_self_port);
1156
1157 new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread,
1158 IKOT_THREAD_CONTROL, IPC_KOBJECT_ALLOC_NONE);
1159 /*
1160 * ipc_thread_reset() only happens during sugid or corpsify.
1161 *
1162 * (1) sugid happens early in exec_mach_imgact(), at which point the old thread
1163 * port is still movable/not pinned.
1164 * (2) corpse cannot execute more code so the notion of the immovable/pinned
1165 * thread port is bogus, and should appear as if it doesn't have one.
1166 *
1167 * So simply leave pport the same as kport.
1168 */
1169 new_pport = new_kport;
1170
1171 thread_mtx_lock(thread);
1172
1173 old_kport = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1174 old_rdport = tro->tro_ports[THREAD_FLAVOR_READ];
1175 old_iport = tro->tro_ports[THREAD_FLAVOR_INSPECT];
1176
1177 #if CONFIG_CSR
1178 old_sself = tro->tro_settable_self_port;
1179 #endif /* CONFIG_CSR */
1180 old_pport = tro->tro_self_port;
1181
1182 if (old_kport == IP_NULL && thread->inspection == FALSE) {
1183 /* thread is already terminated (can this happen?) */
1184 thread_mtx_unlock(thread);
1185 ipc_kobject_dealloc_port(new_kport, 0, IKOT_THREAD_CONTROL);
1186 if (thread_is_immovable) {
1187 ipc_kobject_dealloc_port(new_pport, 0,
1188 IKOT_THREAD_CONTROL);
1189 }
1190 #if CONFIG_MACF
1191 mac_exc_free_label(new_label);
1192 #endif
1193 return;
1194 }
1195
1196 thread->ipc_active = true;
1197
1198 struct thread_ro tpl = {
1199 .tro_self_port = new_pport,
1200 /* we just made the port, no need to triple check */
1201 #if CONFIG_CSR
1202 .tro_settable_self_port = ipc_port_make_send_any(new_kport),
1203 #endif /* CONFIG_CSR */
1204 .tro_ports[THREAD_FLAVOR_CONTROL] = new_kport,
1205 };
1206
1207 ipc_thread_ro_update_ports(tro, &tpl);
1208
1209 if (old_kport != IP_NULL) {
1210 /* clears ikol_alt_port */
1211 (void)ipc_kobject_disable(old_kport, IKOT_THREAD_CONTROL);
1212 }
1213 if (old_rdport != IP_NULL) {
1214 /* clears ikol_alt_port */
1215 (void)ipc_kobject_disable(old_rdport, IKOT_THREAD_READ);
1216 }
1217 if (old_iport != IP_NULL) {
1218 (void)ipc_kobject_disable(old_iport, IKOT_THREAD_INSPECT);
1219 }
1220 if (thread_is_immovable && old_pport != IP_NULL) {
1221 (void)ipc_kobject_disable(old_pport, IKOT_THREAD_CONTROL);
1222 }
1223
1224 /*
1225 * Only ports that were set by root-owned processes
1226 * (privileged ports) should survive
1227 */
1228 if (tro->tro_exc_actions != NULL) {
1229 has_old_exc_actions = TRUE;
1230 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1231 if (tro->tro_exc_actions[i].privileged) {
1232 old_exc_actions[i] = IP_NULL;
1233 } else {
1234 #if CONFIG_MACF
1235 mac_exc_update_action_label(tro->tro_exc_actions + i, new_label);
1236 #endif
1237 old_exc_actions[i] = tro->tro_exc_actions[i].port;
1238 tro->tro_exc_actions[i].port = IP_NULL;
1239 }
1240 }
1241 }
1242
1243 thread_mtx_unlock(thread);
1244
1245 #if CONFIG_MACF
1246 mac_exc_free_label(new_label);
1247 #endif
1248
1249 /* release the naked send rights */
1250 #if CONFIG_CSR
1251 if (IP_VALID(old_sself)) {
1252 ipc_port_release_send(old_sself);
1253 }
1254 #endif /* CONFIG_CSR */
1255
1256 if (has_old_exc_actions) {
1257 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1258 ipc_port_release_send(old_exc_actions[i]);
1259 }
1260 }
1261
1262 /* destroy the kernel ports */
1263 if (old_rdport != IP_NULL) {
1264 ipc_kobject_dealloc_port(old_rdport, 0, IKOT_THREAD_READ);
1265 }
1266 if (old_kport != IP_NULL) {
1267 ipc_kobject_dealloc_port(old_kport, 0, IKOT_THREAD_CONTROL);
1268 }
1269 /* ikol_alt_port cleared */
1270
1271 if (old_iport != IP_NULL) {
1272 ipc_kobject_dealloc_port(old_iport, 0, IKOT_THREAD_INSPECT);
1273 }
1274 if (old_pport != old_kport && old_pport != IP_NULL) {
1275 assert(thread_is_immovable);
1276 ipc_kobject_dealloc_port(old_pport, 0, IKOT_THREAD_CONTROL);
1277 }
1278
1279 /* unbind the thread special reply port */
1280 if (IP_VALID(thread->ith_special_reply_port)) {
1281 ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1282 }
1283 }
1284
1285 /*
1286 * Routine: retrieve_task_self_fast
1287 * Purpose:
1288 * Optimized version of retrieve_task_self,
1289 * that only works for the current task.
1290 *
1291 * Return a send right (possibly null/dead)
1292 * for the task's user-visible self port.
1293 * Conditions:
1294 * Nothing locked.
1295 */
1296
1297 static ipc_port_t
retrieve_task_self_fast(task_t task)1298 retrieve_task_self_fast(
1299 task_t task)
1300 {
1301 ipc_port_t port = IP_NULL;
1302
1303 assert(task == current_task());
1304
1305 itk_lock(task);
1306 assert(task->itk_self != IP_NULL);
1307
1308 #if CONFIG_CSR
1309 if (task->itk_settable_self != task->itk_task_ports[TASK_FLAVOR_CONTROL]) {
1310 port = ipc_port_copy_send_mqueue(task->itk_settable_self);
1311 } else
1312 #endif
1313 {
1314 /* no interposing, return the IMMOVABLE port */
1315 port = ipc_kobject_make_send(task->itk_self, task,
1316 IKOT_TASK_CONTROL);
1317 #if (DEBUG || DEVELOPMENT)
1318 if (task_is_immovable(task)) {
1319 assert(ip_is_immovable_send(port));
1320 if (task_is_pinned(task)) {
1321 /* pinned port is also immovable */
1322 assert(ip_is_pinned(port));
1323 }
1324 } else {
1325 assert(!ip_is_immovable_send(port));
1326 assert(!ip_is_pinned(port));
1327 }
1328 #endif
1329 }
1330
1331 itk_unlock(task);
1332
1333 return port;
1334 }
1335
1336 /*
1337 * Routine: mach_task_is_self
1338 * Purpose:
1339 * [MIG call] Checks if the task (control/read/inspect/name/movable)
1340 * port is pointing to current_task.
1341 */
1342 kern_return_t
mach_task_is_self(task_t task,boolean_t * is_self)1343 mach_task_is_self(
1344 task_t task,
1345 boolean_t *is_self)
1346 {
1347 if (task == TASK_NULL) {
1348 return KERN_INVALID_ARGUMENT;
1349 }
1350
1351 *is_self = (task == current_task());
1352
1353 return KERN_SUCCESS;
1354 }
1355
1356 /*
1357 * Routine: retrieve_thread_self_fast
1358 * Purpose:
1359 * Return a send right (possibly null/dead)
1360 * for the thread's user-visible self port.
1361 *
1362 * Only works for the current thread.
1363 *
1364 * Conditions:
1365 * Nothing locked.
1366 */
1367
1368 ipc_port_t
retrieve_thread_self_fast(thread_t thread)1369 retrieve_thread_self_fast(
1370 thread_t thread)
1371 {
1372 thread_ro_t tro = get_thread_ro(thread);
1373 ipc_port_t port = IP_NULL;
1374
1375 assert(thread == current_thread());
1376
1377 thread_mtx_lock(thread);
1378
1379 assert(tro->tro_self_port != IP_NULL);
1380
1381 #if CONFIG_CSR
1382 if (tro->tro_settable_self_port != tro->tro_ports[THREAD_FLAVOR_CONTROL]) {
1383 port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1384 } else
1385 #endif
1386 {
1387 /* no interposing, return IMMOVABLE_PORT */
1388 port = ipc_kobject_make_send(tro->tro_self_port, thread,
1389 IKOT_THREAD_CONTROL);
1390 #if (DEBUG || DEVELOPMENT)
1391 if (task_is_immovable(tro->tro_task)) {
1392 assert(ip_is_immovable_send(port));
1393 uint16_t tag = thread_get_tag(thread);
1394 /* terminated threads are unpinned */
1395 if (thread->active && (tag & (THREAD_TAG_PTHREAD | THREAD_TAG_MAINTHREAD))) {
1396 assert(ip_is_pinned(port));
1397 } else {
1398 assert(!ip_is_pinned(port));
1399 }
1400 } else {
1401 assert(!ip_is_immovable_send(port));
1402 assert(!ip_is_pinned(port));
1403 }
1404 #endif
1405 }
1406
1407 thread_mtx_unlock(thread);
1408
1409 return port;
1410 }
1411
1412 /*
1413 * Routine: task_self_trap [mach trap]
1414 * Purpose:
1415 * Give the caller send rights for his own task port.
1416 * Conditions:
1417 * Nothing locked.
1418 * Returns:
1419 * MACH_PORT_NULL if there are any resource failures
1420 * or other errors.
1421 */
1422
1423 mach_port_name_t
task_self_trap(__unused struct task_self_trap_args * args)1424 task_self_trap(
1425 __unused struct task_self_trap_args *args)
1426 {
1427 task_t task = current_task();
1428 ipc_port_t sright;
1429 mach_port_name_t name;
1430
1431 sright = retrieve_task_self_fast(task);
1432 name = ipc_port_copyout_send(sright, task->itk_space);
1433
1434 /*
1435 * When the right is pinned, memorize the name we gave it
1436 * in ip_receiver_name (it's an abuse as this port really
1437 * isn't a message queue, but the field is up for grabs
1438 * and otherwise `MACH_PORT_SPECIAL_DEFAULT` for special ports).
1439 *
1440 * port_name_to_task* use this to fastpath IPCs to mach_task_self()
1441 * when it is pinned.
1442 *
1443 * ipc_task_disable() will revert this when the task dies.
1444 */
1445 if (sright == task->itk_self && sright->ip_pinned &&
1446 MACH_PORT_VALID(name)) {
1447 itk_lock(task);
1448 if (task->ipc_active) {
1449 if (ip_get_receiver_name(sright) == MACH_PORT_SPECIAL_DEFAULT) {
1450 sright->ip_receiver_name = name;
1451 } else if (ip_get_receiver_name(sright) != name) {
1452 panic("mach_task_self() name changed");
1453 }
1454 }
1455 itk_unlock(task);
1456 }
1457 return name;
1458 }
1459
1460 /*
1461 * Routine: thread_self_trap [mach trap]
1462 * Purpose:
1463 * Give the caller send rights for his own thread port.
1464 * Conditions:
1465 * Nothing locked.
1466 * Returns:
1467 * MACH_PORT_NULL if there are any resource failures
1468 * or other errors.
1469 */
1470
1471 mach_port_name_t
thread_self_trap(__unused struct thread_self_trap_args * args)1472 thread_self_trap(
1473 __unused struct thread_self_trap_args *args)
1474 {
1475 thread_t thread = current_thread();
1476 ipc_space_t space = current_space();
1477 ipc_port_t sright;
1478 mach_port_name_t name;
1479
1480 sright = retrieve_thread_self_fast(thread);
1481 name = ipc_port_copyout_send(sright, space);
1482 return name;
1483 }
1484
1485 /*
1486 * Routine: mach_reply_port [mach trap]
1487 * Purpose:
1488 * Allocate a port for the caller.
1489 * Conditions:
1490 * Nothing locked.
1491 * Returns:
1492 * MACH_PORT_NULL if there are any resource failures
1493 * or other errors.
1494 */
1495
1496 mach_port_name_t
mach_reply_port(__unused struct mach_reply_port_args * args)1497 mach_reply_port(
1498 __unused struct mach_reply_port_args *args)
1499 {
1500 ipc_port_t port;
1501 mach_port_name_t name;
1502 kern_return_t kr;
1503
1504 kr = ipc_port_alloc(current_task()->itk_space, IPC_PORT_INIT_MESSAGE_QUEUE,
1505 &name, &port);
1506 if (kr == KERN_SUCCESS) {
1507 ip_mq_unlock(port);
1508 } else {
1509 name = MACH_PORT_NULL;
1510 }
1511 return name;
1512 }
1513
1514 /*
1515 * Routine: thread_get_special_reply_port [mach trap]
1516 * Purpose:
1517 * Allocate a special reply port for the calling thread.
1518 * Conditions:
1519 * Nothing locked.
1520 * Returns:
1521 * mach_port_name_t: send right & receive right for special reply port.
1522 * MACH_PORT_NULL if there are any resource failures
1523 * or other errors.
1524 */
1525
1526 mach_port_name_t
thread_get_special_reply_port(__unused struct thread_get_special_reply_port_args * args)1527 thread_get_special_reply_port(
1528 __unused struct thread_get_special_reply_port_args *args)
1529 {
1530 ipc_port_t port;
1531 mach_port_name_t name;
1532 kern_return_t kr;
1533 thread_t thread = current_thread();
1534 ipc_port_init_flags_t flags = IPC_PORT_INIT_MESSAGE_QUEUE |
1535 IPC_PORT_INIT_MAKE_SEND_RIGHT | IPC_PORT_INIT_SPECIAL_REPLY;
1536
1537 /* unbind the thread special reply port */
1538 if (IP_VALID(thread->ith_special_reply_port)) {
1539 ipc_port_unbind_special_reply_port(thread, IRPT_USER);
1540 }
1541
1542 kr = ipc_port_alloc(current_task()->itk_space, flags, &name, &port);
1543 if (kr == KERN_SUCCESS) {
1544 ipc_port_bind_special_reply_port_locked(port, IRPT_USER);
1545 ip_mq_unlock(port);
1546 } else {
1547 name = MACH_PORT_NULL;
1548 }
1549 return name;
1550 }
1551
1552 /*
1553 * Routine: thread_get_kernel_special_reply_port
1554 * Purpose:
1555 * Allocate a kernel special reply port for the calling thread.
1556 * Conditions:
1557 * Nothing locked.
1558 * Returns:
1559 * Creates and sets kernel special reply port.
1560 * KERN_SUCCESS on Success.
1561 * KERN_FAILURE on Failure.
1562 */
1563
1564 kern_return_t
thread_get_kernel_special_reply_port(void)1565 thread_get_kernel_special_reply_port(void)
1566 {
1567 ipc_port_t port = IP_NULL;
1568 thread_t thread = current_thread();
1569
1570 /* unbind the thread special reply port */
1571 if (IP_VALID(thread->ith_kernel_reply_port)) {
1572 ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1573 }
1574
1575 port = ipc_port_alloc_reply(); /*returns a reference on the port */
1576 if (port != IPC_PORT_NULL) {
1577 ip_mq_lock(port);
1578 ipc_port_bind_special_reply_port_locked(port, IRPT_KERNEL);
1579 ip_mq_unlock(port);
1580 ip_release(port); /* release the reference returned by ipc_port_alloc_reply */
1581 }
1582 return KERN_SUCCESS;
1583 }
1584
1585 /*
1586 * Routine: ipc_port_bind_special_reply_port_locked
1587 * Purpose:
1588 * Bind the given port to current thread as a special reply port.
1589 * Conditions:
1590 * Port locked.
1591 * Returns:
1592 * None.
1593 */
1594
1595 static void
ipc_port_bind_special_reply_port_locked(ipc_port_t port,ipc_reply_port_type_t reply_type)1596 ipc_port_bind_special_reply_port_locked(
1597 ipc_port_t port,
1598 ipc_reply_port_type_t reply_type)
1599 {
1600 thread_t thread = current_thread();
1601 ipc_port_t *reply_portp;
1602
1603 if (reply_type == IRPT_USER) {
1604 reply_portp = &thread->ith_special_reply_port;
1605 } else {
1606 reply_portp = &thread->ith_kernel_reply_port;
1607 }
1608
1609 assert(*reply_portp == NULL);
1610 assert(port->ip_specialreply);
1611 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1612
1613 ip_reference(port);
1614 *reply_portp = port;
1615 port->ip_messages.imq_srp_owner_thread = thread;
1616
1617 ipc_special_reply_port_bits_reset(port);
1618 }
1619
1620 /*
1621 * Routine: ipc_port_unbind_special_reply_port
1622 * Purpose:
1623 * Unbind the thread's special reply port.
1624 * If the special port has threads waiting on turnstile,
1625 * update it's inheritor.
1626 * Condition:
1627 * Nothing locked.
1628 * Returns:
1629 * None.
1630 */
1631 static void
ipc_port_unbind_special_reply_port(thread_t thread,ipc_reply_port_type_t reply_type)1632 ipc_port_unbind_special_reply_port(
1633 thread_t thread,
1634 ipc_reply_port_type_t reply_type)
1635 {
1636 ipc_port_t *reply_portp;
1637
1638 if (reply_type == IRPT_USER) {
1639 reply_portp = &thread->ith_special_reply_port;
1640 } else {
1641 reply_portp = &thread->ith_kernel_reply_port;
1642 }
1643
1644 ipc_port_t special_reply_port = *reply_portp;
1645
1646 ip_mq_lock(special_reply_port);
1647
1648 *reply_portp = NULL;
1649 ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
1650 IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
1651 /* port unlocked */
1652
1653 /* Destroy the port if its kernel special reply, else just release a ref */
1654 if (reply_type == IRPT_USER) {
1655 ip_release(special_reply_port);
1656 } else {
1657 ipc_port_dealloc_reply(special_reply_port);
1658 }
1659 return;
1660 }
1661
1662 /*
1663 * Routine: thread_dealloc_kernel_special_reply_port
1664 * Purpose:
1665 * Unbind the thread's kernel special reply port.
1666 * If the special port has threads waiting on turnstile,
1667 * update it's inheritor.
1668 * Condition:
1669 * Called on current thread or a terminated thread.
1670 * Returns:
1671 * None.
1672 */
1673
1674 void
thread_dealloc_kernel_special_reply_port(thread_t thread)1675 thread_dealloc_kernel_special_reply_port(thread_t thread)
1676 {
1677 ipc_port_unbind_special_reply_port(thread, IRPT_KERNEL);
1678 }
1679
1680 /*
1681 * Routine: thread_get_special_port [kernel call]
1682 * Purpose:
1683 * Clones a send right for one of the thread's
1684 * special ports.
1685 * Conditions:
1686 * Nothing locked.
1687 * Returns:
1688 * KERN_SUCCESS Extracted a send right.
1689 * KERN_INVALID_ARGUMENT The thread is null.
1690 * KERN_FAILURE The thread is dead.
1691 * KERN_INVALID_ARGUMENT Invalid special port.
1692 */
1693
1694 kern_return_t
1695 thread_get_special_port(
1696 thread_inspect_t thread,
1697 int which,
1698 ipc_port_t *portp);
1699
1700 static kern_return_t
thread_get_special_port_internal(thread_inspect_t thread,thread_ro_t tro,int which,ipc_port_t * portp,mach_thread_flavor_t flavor)1701 thread_get_special_port_internal(
1702 thread_inspect_t thread,
1703 thread_ro_t tro,
1704 int which,
1705 ipc_port_t *portp,
1706 mach_thread_flavor_t flavor)
1707 {
1708 kern_return_t kr;
1709 ipc_port_t port;
1710
1711 if ((kr = special_port_allowed_with_thread_flavor(which, flavor)) != KERN_SUCCESS) {
1712 return kr;
1713 }
1714
1715 thread_mtx_lock(thread);
1716 if (!thread->active) {
1717 thread_mtx_unlock(thread);
1718 return KERN_FAILURE;
1719 }
1720
1721 switch (which) {
1722 case THREAD_KERNEL_PORT:
1723 port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1724 #if CONFIG_CSR
1725 if (tro->tro_settable_self_port != port) {
1726 port = ipc_port_copy_send_mqueue(tro->tro_settable_self_port);
1727 } else
1728 #endif /* CONFIG_CSR */
1729 {
1730 port = ipc_kobject_copy_send(port, thread, IKOT_THREAD_CONTROL);
1731 }
1732 thread_mtx_unlock(thread);
1733 break;
1734
1735 case THREAD_READ_PORT:
1736 case THREAD_INSPECT_PORT:
1737 thread_mtx_unlock(thread);
1738 mach_thread_flavor_t current_flavor = (which == THREAD_READ_PORT) ?
1739 THREAD_FLAVOR_READ : THREAD_FLAVOR_INSPECT;
1740 /* convert_thread_to_port_with_flavor consumes a thread reference */
1741 thread_reference(thread);
1742 port = convert_thread_to_port_with_flavor(thread, tro, current_flavor);
1743 break;
1744
1745 default:
1746 thread_mtx_unlock(thread);
1747 return KERN_INVALID_ARGUMENT;
1748 }
1749
1750 *portp = port;
1751 return KERN_SUCCESS;
1752 }
1753
1754 kern_return_t
thread_get_special_port(thread_inspect_t thread,int which,ipc_port_t * portp)1755 thread_get_special_port(
1756 thread_inspect_t thread,
1757 int which,
1758 ipc_port_t *portp)
1759 {
1760 if (thread == THREAD_NULL) {
1761 return KERN_INVALID_ARGUMENT;
1762 }
1763
1764 return thread_get_special_port_internal(thread, get_thread_ro(thread),
1765 which, portp, THREAD_FLAVOR_CONTROL);
1766 }
1767
1768 static ipc_port_t
thread_get_non_substituted_self(thread_t thread,thread_ro_t tro)1769 thread_get_non_substituted_self(thread_t thread, thread_ro_t tro)
1770 {
1771 ipc_port_t port = IP_NULL;
1772
1773 thread_mtx_lock(thread);
1774 port = tro->tro_ports[THREAD_FLAVOR_CONTROL];
1775 #if CONFIG_CSR
1776 if (tro->tro_settable_self_port != port) {
1777 port = ipc_port_make_send_mqueue(tro->tro_settable_self_port);
1778 } else
1779 #endif /* CONFIG_CSR */
1780 {
1781 port = ipc_kobject_make_send(port, thread, IKOT_THREAD_CONTROL);
1782 }
1783 thread_mtx_unlock(thread);
1784
1785 /* takes ownership of the send right */
1786 return ipc_kobject_alloc_subst_once(port);
1787 }
1788
1789 kern_return_t
thread_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)1790 thread_get_special_port_from_user(
1791 mach_port_t port,
1792 int which,
1793 ipc_port_t *portp)
1794 {
1795 thread_ro_t tro;
1796 ipc_kobject_type_t kotype;
1797 mach_thread_flavor_t flavor;
1798 kern_return_t kr = KERN_SUCCESS;
1799
1800 thread_t thread = convert_port_to_thread_inspect_no_eval(port);
1801
1802 if (thread == THREAD_NULL) {
1803 return KERN_INVALID_ARGUMENT;
1804 }
1805
1806 tro = get_thread_ro(thread);
1807 kotype = ip_kotype(port);
1808
1809 if (which == THREAD_KERNEL_PORT && tro->tro_task == current_task()) {
1810 #if CONFIG_MACF
1811 /*
1812 * only check for threads belong to current_task,
1813 * because foreign thread ports are always movable
1814 */
1815 if (mac_task_check_get_movable_control_port()) {
1816 kr = KERN_DENIED;
1817 goto out;
1818 }
1819 #endif
1820 if (kotype == IKOT_THREAD_CONTROL) {
1821 *portp = thread_get_non_substituted_self(thread, tro);
1822 goto out;
1823 }
1824 }
1825
1826 switch (kotype) {
1827 case IKOT_THREAD_CONTROL:
1828 flavor = THREAD_FLAVOR_CONTROL;
1829 break;
1830 case IKOT_THREAD_READ:
1831 flavor = THREAD_FLAVOR_READ;
1832 break;
1833 case IKOT_THREAD_INSPECT:
1834 flavor = THREAD_FLAVOR_INSPECT;
1835 break;
1836 default:
1837 panic("strange kobject type");
1838 }
1839
1840 kr = thread_get_special_port_internal(thread, tro, which, portp, flavor);
1841 out:
1842 thread_deallocate(thread);
1843 return kr;
1844 }
1845
1846 static kern_return_t
special_port_allowed_with_thread_flavor(int which,mach_thread_flavor_t flavor)1847 special_port_allowed_with_thread_flavor(
1848 int which,
1849 mach_thread_flavor_t flavor)
1850 {
1851 switch (flavor) {
1852 case THREAD_FLAVOR_CONTROL:
1853 return KERN_SUCCESS;
1854
1855 case THREAD_FLAVOR_READ:
1856
1857 switch (which) {
1858 case THREAD_READ_PORT:
1859 case THREAD_INSPECT_PORT:
1860 return KERN_SUCCESS;
1861 default:
1862 return KERN_INVALID_CAPABILITY;
1863 }
1864
1865 case THREAD_FLAVOR_INSPECT:
1866
1867 switch (which) {
1868 case THREAD_INSPECT_PORT:
1869 return KERN_SUCCESS;
1870 default:
1871 return KERN_INVALID_CAPABILITY;
1872 }
1873
1874 default:
1875 return KERN_INVALID_CAPABILITY;
1876 }
1877 }
1878
1879 /*
1880 * Routine: thread_set_special_port [kernel call]
1881 * Purpose:
1882 * Changes one of the thread's special ports,
1883 * setting it to the supplied send right.
1884 * Conditions:
1885 * Nothing locked. If successful, consumes
1886 * the supplied send right.
1887 * Returns:
1888 * KERN_SUCCESS Changed the special port.
1889 * KERN_INVALID_ARGUMENT The thread is null.
1890 * KERN_INVALID_RIGHT Port is marked as immovable.
1891 * KERN_FAILURE The thread is dead.
1892 * KERN_INVALID_ARGUMENT Invalid special port.
1893 * KERN_NO_ACCESS Restricted access to set port.
1894 */
1895
1896 kern_return_t
thread_set_special_port(thread_t thread,int which,ipc_port_t port)1897 thread_set_special_port(
1898 thread_t thread,
1899 int which,
1900 ipc_port_t port)
1901 {
1902 kern_return_t result = KERN_SUCCESS;
1903 thread_ro_t tro = NULL;
1904 ipc_port_t old = IP_NULL;
1905
1906 if (thread == THREAD_NULL) {
1907 return KERN_INVALID_ARGUMENT;
1908 }
1909
1910 /*
1911 * rdar://70585367
1912 * disallow immovable send so other process can't retrieve it through thread_get_special_port()
1913 */
1914 if (IP_VALID(port) && port->ip_immovable_send) {
1915 return KERN_INVALID_RIGHT;
1916 }
1917
1918 switch (which) {
1919 case THREAD_KERNEL_PORT:
1920 #if CONFIG_CSR
1921 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
1922 /*
1923 * Only allow setting of thread-self
1924 * special port from user-space when SIP is
1925 * disabled (for Mach-on-Mach emulation).
1926 */
1927 tro = get_thread_ro(thread);
1928
1929 thread_mtx_lock(thread);
1930 if (thread->active) {
1931 old = tro->tro_settable_self_port;
1932 zalloc_ro_update_field(ZONE_ID_THREAD_RO,
1933 tro, tro_settable_self_port, &port);
1934 } else {
1935 result = KERN_FAILURE;
1936 }
1937 thread_mtx_unlock(thread);
1938
1939 if (IP_VALID(old)) {
1940 ipc_port_release_send(old);
1941 }
1942
1943 return result;
1944 }
1945 #else
1946 (void)old;
1947 (void)result;
1948 (void)tro;
1949 #endif /* CONFIG_CSR */
1950 return KERN_NO_ACCESS;
1951
1952 default:
1953 return KERN_INVALID_ARGUMENT;
1954 }
1955 }
1956
1957 /*
1958 * Routine: task_get_special_port [kernel call]
1959 * Purpose:
1960 * Clones a send right for one of the task's
1961 * special ports.
1962 * Conditions:
1963 * Nothing locked.
1964 * Returns:
1965 * KERN_SUCCESS Extracted a send right.
1966 * KERN_INVALID_ARGUMENT The task is null.
1967 * KERN_FAILURE The task/space is dead.
1968 * KERN_INVALID_ARGUMENT Invalid special port.
1969 */
1970
1971 static kern_return_t
task_get_special_port_internal(task_t task,int which,ipc_port_t * portp,mach_task_flavor_t flavor)1972 task_get_special_port_internal(
1973 task_t task,
1974 int which,
1975 ipc_port_t *portp,
1976 mach_task_flavor_t flavor)
1977 {
1978 kern_return_t kr;
1979 ipc_port_t port;
1980
1981 if (task == TASK_NULL) {
1982 return KERN_INVALID_ARGUMENT;
1983 }
1984
1985 if ((kr = special_port_allowed_with_task_flavor(which, flavor)) != KERN_SUCCESS) {
1986 return kr;
1987 }
1988
1989 itk_lock(task);
1990 if (!task->ipc_active) {
1991 itk_unlock(task);
1992 return KERN_FAILURE;
1993 }
1994
1995 switch (which) {
1996 case TASK_KERNEL_PORT:
1997 port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
1998 #if CONFIG_CSR
1999 if (task->itk_settable_self != port) {
2000 port = ipc_port_copy_send_mqueue(task->itk_settable_self);
2001 } else
2002 #endif /* CONFIG_CSR */
2003 {
2004 port = ipc_kobject_copy_send(port, task, IKOT_TASK_CONTROL);
2005 }
2006 itk_unlock(task);
2007 break;
2008
2009 case TASK_READ_PORT:
2010 case TASK_INSPECT_PORT:
2011 itk_unlock(task);
2012 mach_task_flavor_t current_flavor = (which == TASK_READ_PORT) ?
2013 TASK_FLAVOR_READ : TASK_FLAVOR_INSPECT;
2014 /* convert_task_to_port_with_flavor consumes a task reference */
2015 task_reference(task);
2016 port = convert_task_to_port_with_flavor(task, current_flavor, TASK_GRP_KERNEL);
2017 break;
2018
2019 case TASK_NAME_PORT:
2020 port = ipc_kobject_make_send(task->itk_task_ports[TASK_FLAVOR_NAME],
2021 task, IKOT_TASK_NAME);
2022 itk_unlock(task);
2023 break;
2024
2025 case TASK_HOST_PORT:
2026 port = host_port_copy_send(task->itk_host);
2027 itk_unlock(task);
2028 break;
2029
2030 case TASK_BOOTSTRAP_PORT:
2031 port = ipc_port_copy_send_mqueue(task->itk_bootstrap);
2032 itk_unlock(task);
2033 break;
2034
2035 case TASK_ACCESS_PORT:
2036 port = ipc_port_copy_send_mqueue(task->itk_task_access);
2037 itk_unlock(task);
2038 break;
2039
2040 case TASK_DEBUG_CONTROL_PORT:
2041 port = ipc_port_copy_send_mqueue(task->itk_debug_control);
2042 itk_unlock(task);
2043 break;
2044
2045 #if CONFIG_PROC_RESOURCE_LIMITS
2046 case TASK_RESOURCE_NOTIFY_PORT:
2047 port = ipc_port_copy_send_mqueue(task->itk_resource_notify);
2048 itk_unlock(task);
2049 break;
2050 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
2051
2052 default:
2053 itk_unlock(task);
2054 return KERN_INVALID_ARGUMENT;
2055 }
2056
2057 *portp = port;
2058 return KERN_SUCCESS;
2059 }
2060
2061 /* Kernel/Kext call only and skips MACF checks. MIG uses task_get_special_port_from_user(). */
2062 kern_return_t
task_get_special_port(task_t task,int which,ipc_port_t * portp)2063 task_get_special_port(
2064 task_t task,
2065 int which,
2066 ipc_port_t *portp)
2067 {
2068 return task_get_special_port_internal(task, which, portp, TASK_FLAVOR_CONTROL);
2069 }
2070
2071 static ipc_port_t
task_get_non_substituted_self(task_t task)2072 task_get_non_substituted_self(task_t task)
2073 {
2074 ipc_port_t port = IP_NULL;
2075
2076 itk_lock(task);
2077 port = task->itk_task_ports[TASK_FLAVOR_CONTROL];
2078 #if CONFIG_CSR
2079 if (task->itk_settable_self != port) {
2080 port = ipc_port_make_send_mqueue(task->itk_settable_self);
2081 } else
2082 #endif /* CONFIG_CSR */
2083 {
2084 port = ipc_kobject_make_send(port, task, IKOT_TASK_CONTROL);
2085 }
2086 itk_unlock(task);
2087
2088 /* takes ownership of the send right */
2089 return ipc_kobject_alloc_subst_once(port);
2090 }
2091
2092 /* MIG call only. Kernel/Kext uses task_get_special_port() */
2093 kern_return_t
task_get_special_port_from_user(mach_port_t port,int which,ipc_port_t * portp)2094 task_get_special_port_from_user(
2095 mach_port_t port,
2096 int which,
2097 ipc_port_t *portp)
2098 {
2099 ipc_kobject_type_t kotype;
2100 mach_task_flavor_t flavor;
2101 kern_return_t kr = KERN_SUCCESS;
2102
2103 task_t task = convert_port_to_task_inspect_no_eval(port);
2104
2105 if (task == TASK_NULL) {
2106 return KERN_INVALID_ARGUMENT;
2107 }
2108
2109 kotype = ip_kotype(port);
2110
2111 #if CONFIG_MACF
2112 if (mac_task_check_get_task_special_port(current_task(), task, which)) {
2113 kr = KERN_DENIED;
2114 goto out;
2115 }
2116 #endif
2117
2118 if (which == TASK_KERNEL_PORT && task == current_task()) {
2119 #if CONFIG_MACF
2120 /*
2121 * only check for current_task,
2122 * because foreign task ports are always movable
2123 */
2124 if (mac_task_check_get_movable_control_port()) {
2125 kr = KERN_DENIED;
2126 goto out;
2127 }
2128 #endif
2129 if (kotype == IKOT_TASK_CONTROL) {
2130 *portp = task_get_non_substituted_self(task);
2131 goto out;
2132 }
2133 }
2134
2135 switch (kotype) {
2136 case IKOT_TASK_CONTROL:
2137 flavor = TASK_FLAVOR_CONTROL;
2138 break;
2139 case IKOT_TASK_READ:
2140 flavor = TASK_FLAVOR_READ;
2141 break;
2142 case IKOT_TASK_INSPECT:
2143 flavor = TASK_FLAVOR_INSPECT;
2144 break;
2145 default:
2146 panic("strange kobject type");
2147 }
2148
2149 kr = task_get_special_port_internal(task, which, portp, flavor);
2150 out:
2151 task_deallocate(task);
2152 return kr;
2153 }
2154
2155 static kern_return_t
special_port_allowed_with_task_flavor(int which,mach_task_flavor_t flavor)2156 special_port_allowed_with_task_flavor(
2157 int which,
2158 mach_task_flavor_t flavor)
2159 {
2160 switch (flavor) {
2161 case TASK_FLAVOR_CONTROL:
2162 return KERN_SUCCESS;
2163
2164 case TASK_FLAVOR_READ:
2165
2166 switch (which) {
2167 case TASK_READ_PORT:
2168 case TASK_INSPECT_PORT:
2169 case TASK_NAME_PORT:
2170 return KERN_SUCCESS;
2171 default:
2172 return KERN_INVALID_CAPABILITY;
2173 }
2174
2175 case TASK_FLAVOR_INSPECT:
2176
2177 switch (which) {
2178 case TASK_INSPECT_PORT:
2179 case TASK_NAME_PORT:
2180 return KERN_SUCCESS;
2181 default:
2182 return KERN_INVALID_CAPABILITY;
2183 }
2184
2185 default:
2186 return KERN_INVALID_CAPABILITY;
2187 }
2188 }
2189
2190 /*
2191 * Routine: task_set_special_port [MIG call]
2192 * Purpose:
2193 * Changes one of the task's special ports,
2194 * setting it to the supplied send right.
2195 * Conditions:
2196 * Nothing locked. If successful, consumes
2197 * the supplied send right.
2198 * Returns:
2199 * KERN_SUCCESS Changed the special port.
2200 * KERN_INVALID_ARGUMENT The task is null.
2201 * KERN_INVALID_RIGHT Port is marked as immovable.
2202 * KERN_FAILURE The task/space is dead.
2203 * KERN_INVALID_ARGUMENT Invalid special port.
2204 * KERN_NO_ACCESS Restricted access to set port.
2205 */
2206
2207 kern_return_t
task_set_special_port_from_user(task_t task,int which,ipc_port_t port)2208 task_set_special_port_from_user(
2209 task_t task,
2210 int which,
2211 ipc_port_t port)
2212 {
2213 if (task == TASK_NULL) {
2214 return KERN_INVALID_ARGUMENT;
2215 }
2216
2217 #if CONFIG_MACF
2218 if (mac_task_check_set_task_special_port(current_task(), task, which, port)) {
2219 return KERN_DENIED;
2220 }
2221 #endif
2222
2223 return task_set_special_port(task, which, port);
2224 }
2225
2226 /* Kernel call only. MIG uses task_set_special_port_from_user() */
2227 kern_return_t
task_set_special_port(task_t task,int which,ipc_port_t port)2228 task_set_special_port(
2229 task_t task,
2230 int which,
2231 ipc_port_t port)
2232 {
2233 if (task == TASK_NULL) {
2234 return KERN_INVALID_ARGUMENT;
2235 }
2236
2237 if (task_is_driver(current_task())) {
2238 return KERN_NO_ACCESS;
2239 }
2240
2241 /*
2242 * rdar://70585367
2243 * disallow immovable send so other process can't retrieve it through task_get_special_port()
2244 */
2245 if (IP_VALID(port) && port->ip_immovable_send) {
2246 return KERN_INVALID_RIGHT;
2247 }
2248
2249 switch (which) {
2250 case TASK_KERNEL_PORT:
2251 case TASK_HOST_PORT:
2252 #if CONFIG_CSR
2253 if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
2254 /*
2255 * Only allow setting of task-self / task-host
2256 * special ports from user-space when SIP is
2257 * disabled (for Mach-on-Mach emulation).
2258 */
2259 break;
2260 }
2261 #endif
2262 return KERN_NO_ACCESS;
2263 default:
2264 break;
2265 }
2266
2267 return task_set_special_port_internal(task, which, port);
2268 }
2269
2270 /*
2271 * Routine: task_set_special_port_internal
2272 * Purpose:
2273 * Changes one of the task's special ports,
2274 * setting it to the supplied send right.
2275 * Conditions:
2276 * Nothing locked. If successful, consumes
2277 * the supplied send right.
2278 * Returns:
2279 * KERN_SUCCESS Changed the special port.
2280 * KERN_INVALID_ARGUMENT The task is null.
2281 * KERN_FAILURE The task/space is dead.
2282 * KERN_INVALID_ARGUMENT Invalid special port.
2283 * KERN_NO_ACCESS Restricted access to overwrite port.
2284 */
2285
2286 kern_return_t
task_set_special_port_internal(task_t task,int which,ipc_port_t port)2287 task_set_special_port_internal(
2288 task_t task,
2289 int which,
2290 ipc_port_t port)
2291 {
2292 ipc_port_t old = IP_NULL;
2293 kern_return_t rc = KERN_INVALID_ARGUMENT;
2294
2295 if (task == TASK_NULL) {
2296 goto out;
2297 }
2298
2299 itk_lock(task);
2300 /*
2301 * Allow setting special port during the span of ipc_task_init() to
2302 * ipc_task_terminate(). posix_spawn() port actions can set special
2303 * ports on target task _before_ task IPC access is enabled.
2304 */
2305 if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
2306 rc = KERN_FAILURE;
2307 goto out_unlock;
2308 }
2309
2310 switch (which) {
2311 #if CONFIG_CSR
2312 case TASK_KERNEL_PORT:
2313 old = task->itk_settable_self;
2314 task->itk_settable_self = port;
2315 break;
2316 #endif /* CONFIG_CSR */
2317
2318 case TASK_HOST_PORT:
2319 old = task->itk_host;
2320 task->itk_host = port;
2321 break;
2322
2323 case TASK_BOOTSTRAP_PORT:
2324 old = task->itk_bootstrap;
2325 task->itk_bootstrap = port;
2326 break;
2327
2328 /* Never allow overwrite of the task access port */
2329 case TASK_ACCESS_PORT:
2330 if (IP_VALID(task->itk_task_access)) {
2331 rc = KERN_NO_ACCESS;
2332 goto out_unlock;
2333 }
2334 task->itk_task_access = port;
2335 break;
2336
2337 case TASK_DEBUG_CONTROL_PORT:
2338 old = task->itk_debug_control;
2339 task->itk_debug_control = port;
2340 break;
2341
2342 #if CONFIG_PROC_RESOURCE_LIMITS
2343 case TASK_RESOURCE_NOTIFY_PORT:
2344 old = task->itk_resource_notify;
2345 task->itk_resource_notify = port;
2346 break;
2347 #endif /* CONFIG_PROC_RESOURCE_LIMITS */
2348
2349 default:
2350 rc = KERN_INVALID_ARGUMENT;
2351 goto out_unlock;
2352 }/* switch */
2353
2354 rc = KERN_SUCCESS;
2355
2356 out_unlock:
2357 itk_unlock(task);
2358
2359 if (IP_VALID(old)) {
2360 ipc_port_release_send(old);
2361 }
2362 out:
2363 return rc;
2364 }
2365 /*
2366 * Routine: mach_ports_register [kernel call]
2367 * Purpose:
2368 * Stash a handful of port send rights in the task.
2369 * Child tasks will inherit these rights, but they
2370 * must use mach_ports_lookup to acquire them.
2371 *
2372 * The rights are supplied in a (wired) kalloc'd segment.
2373 * Rights which aren't supplied are assumed to be null.
2374 * Conditions:
2375 * Nothing locked. If successful, consumes
2376 * the supplied rights and memory.
2377 * Returns:
2378 * KERN_SUCCESS Stashed the port rights.
2379 * KERN_INVALID_RIGHT Port in array is marked immovable.
2380 * KERN_INVALID_ARGUMENT The task is null.
2381 * KERN_INVALID_ARGUMENT The task is dead.
2382 * KERN_INVALID_ARGUMENT The memory param is null.
2383 * KERN_INVALID_ARGUMENT Too many port rights supplied.
2384 */
2385
2386 kern_return_t
_kernelrpc_mach_ports_register3(task_t task,mach_port_t port1,mach_port_t port2,mach_port_t port3)2387 _kernelrpc_mach_ports_register3(
2388 task_t task,
2389 mach_port_t port1,
2390 mach_port_t port2,
2391 mach_port_t port3)
2392 {
2393 ipc_port_t ports[TASK_PORT_REGISTER_MAX] = {
2394 port1, port2, port3,
2395 };
2396
2397 if (task == TASK_NULL) {
2398 return KERN_INVALID_ARGUMENT;
2399 }
2400
2401 for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2402 /*
2403 * rdar://70585367
2404 * disallow immovable send so other process can't retrieve it through mach_ports_lookup()
2405 */
2406 if (IP_VALID(ports[i]) && ports[i]->ip_immovable_send) {
2407 return KERN_INVALID_RIGHT;
2408 }
2409 }
2410
2411 itk_lock(task);
2412 if (!task->ipc_active) {
2413 itk_unlock(task);
2414 return KERN_INVALID_ARGUMENT;
2415 }
2416
2417 /*
2418 * Replace the old send rights with the new.
2419 * Release the old rights after unlocking.
2420 */
2421
2422 for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2423 ipc_port_t old;
2424
2425 old = task->itk_registered[i];
2426 task->itk_registered[i] = ports[i];
2427 ports[i] = old;
2428 }
2429
2430 itk_unlock(task);
2431
2432 for (int i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
2433 ipc_port_release_send(ports[i]);
2434 }
2435
2436 return KERN_SUCCESS;
2437 }
2438
2439 /*
2440 * Routine: mach_ports_lookup [kernel call]
2441 * Purpose:
2442 * Retrieves (clones) the stashed port send rights.
2443 * Conditions:
2444 * Nothing locked. If successful, the caller gets
2445 * rights and memory.
2446 * Returns:
2447 * KERN_SUCCESS Retrieved the send rights.
2448 * KERN_INVALID_ARGUMENT The task is null.
2449 * KERN_INVALID_ARGUMENT The task is dead.
2450 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
2451 */
2452
2453 kern_return_t
_kernelrpc_mach_ports_lookup3(task_t task,ipc_port_t * port1,ipc_port_t * port2,ipc_port_t * port3)2454 _kernelrpc_mach_ports_lookup3(
2455 task_t task,
2456 ipc_port_t *port1,
2457 ipc_port_t *port2,
2458 ipc_port_t *port3)
2459 {
2460 if (task == TASK_NULL) {
2461 return KERN_INVALID_ARGUMENT;
2462 }
2463
2464 itk_lock(task);
2465 if (!task->ipc_active) {
2466 itk_unlock(task);
2467 return KERN_INVALID_ARGUMENT;
2468 }
2469
2470 *port1 = ipc_port_copy_send_any(task->itk_registered[0]);
2471 *port2 = ipc_port_copy_send_any(task->itk_registered[1]);
2472 *port3 = ipc_port_copy_send_any(task->itk_registered[2]);
2473
2474 itk_unlock(task);
2475
2476 return KERN_SUCCESS;
2477 }
2478
2479 static kern_return_t
task_conversion_eval_internal(task_t caller,task_t victim,boolean_t out_trans,int flavor)2480 task_conversion_eval_internal(
2481 task_t caller,
2482 task_t victim,
2483 boolean_t out_trans,
2484 int flavor) /* control or read */
2485 {
2486 boolean_t allow_kern_task_out_trans;
2487 boolean_t allow_kern_task;
2488
2489 assert(flavor == TASK_FLAVOR_CONTROL || flavor == TASK_FLAVOR_READ);
2490 assert(flavor == THREAD_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_READ);
2491
2492 #if defined(SECURE_KERNEL)
2493 /*
2494 * On secure kernel platforms, reject converting kernel task/threads to port
2495 * and sending it to user space.
2496 */
2497 allow_kern_task_out_trans = FALSE;
2498 #else
2499 allow_kern_task_out_trans = TRUE;
2500 #endif
2501
2502 allow_kern_task = out_trans && allow_kern_task_out_trans;
2503
2504 if (victim == TASK_NULL) {
2505 return KERN_INVALID_SECURITY;
2506 }
2507
2508 task_require(victim);
2509
2510 /*
2511 * If Developer Mode is not enabled, deny attempts to translate foreign task's
2512 * control port completely. Read port or corpse is okay.
2513 */
2514 if (!developer_mode_state()) {
2515 if ((caller != victim) &&
2516 (flavor == TASK_FLAVOR_CONTROL) && !task_is_a_corpse(victim)) {
2517 #if XNU_TARGET_OS_OSX
2518 return KERN_INVALID_SECURITY;
2519 #else
2520 /*
2521 * All control ports are immovable.
2522 * Return an error for outtrans, but panic on intrans.
2523 */
2524 if (out_trans) {
2525 return KERN_INVALID_SECURITY;
2526 } else {
2527 panic("Just like pineapple on pizza, this task/thread port doesn't belong here.");
2528 }
2529 #endif /* XNU_TARGET_OS_OSX */
2530 }
2531 }
2532
2533 /*
2534 * Tasks are allowed to resolve their own task ports, and the kernel is
2535 * allowed to resolve anyone's task port (subject to Developer Mode check).
2536 */
2537 if (caller == kernel_task) {
2538 return KERN_SUCCESS;
2539 }
2540
2541 if (caller == victim) {
2542 return KERN_SUCCESS;
2543 }
2544
2545 /*
2546 * Only the kernel can resolve the kernel's task port. We've established
2547 * by this point that the caller is not kernel_task.
2548 */
2549 if (victim == kernel_task && !allow_kern_task) {
2550 return KERN_INVALID_SECURITY;
2551 }
2552
2553 #if !defined(XNU_TARGET_OS_OSX)
2554 /*
2555 * On platforms other than macOS, only a platform binary can resolve the task port
2556 * of another platform binary.
2557 */
2558 if (task_get_platform_binary(victim) && !task_get_platform_binary(caller)) {
2559 #if SECURE_KERNEL
2560 return KERN_INVALID_SECURITY;
2561 #else
2562 if (cs_relax_platform_task_ports) {
2563 return KERN_SUCCESS;
2564 } else {
2565 return KERN_INVALID_SECURITY;
2566 }
2567 #endif /* SECURE_KERNEL */
2568 }
2569 #endif /* !defined(XNU_TARGET_OS_OSX) */
2570
2571 return KERN_SUCCESS;
2572 }
2573
2574 kern_return_t
task_conversion_eval(task_t caller,task_t victim,int flavor)2575 task_conversion_eval(task_t caller, task_t victim, int flavor)
2576 {
2577 /* flavor is mach_task_flavor_t or mach_thread_flavor_t */
2578 static_assert(TASK_FLAVOR_CONTROL == THREAD_FLAVOR_CONTROL);
2579 static_assert(TASK_FLAVOR_READ == THREAD_FLAVOR_READ);
2580 return task_conversion_eval_internal(caller, victim, FALSE, flavor);
2581 }
2582
2583 static kern_return_t
task_conversion_eval_out_trans(task_t caller,task_t victim,int flavor)2584 task_conversion_eval_out_trans(task_t caller, task_t victim, int flavor)
2585 {
2586 assert(flavor == TASK_FLAVOR_CONTROL || flavor == THREAD_FLAVOR_CONTROL);
2587 return task_conversion_eval_internal(caller, victim, TRUE, flavor);
2588 }
2589
2590 /*
2591 * Routine: task_port_kotype_valid_for_flavor
2592 * Purpose:
2593 * Check whether the kobject type of a mach port
2594 * is valid for conversion to a task of given flavor.
2595 */
2596 static boolean_t
task_port_kotype_valid_for_flavor(natural_t kotype,mach_task_flavor_t flavor)2597 task_port_kotype_valid_for_flavor(
2598 natural_t kotype,
2599 mach_task_flavor_t flavor)
2600 {
2601 switch (flavor) {
2602 /* Ascending capability */
2603 case TASK_FLAVOR_NAME:
2604 if (kotype == IKOT_TASK_NAME) {
2605 return TRUE;
2606 }
2607 OS_FALLTHROUGH;
2608 case TASK_FLAVOR_INSPECT:
2609 if (kotype == IKOT_TASK_INSPECT) {
2610 return TRUE;
2611 }
2612 OS_FALLTHROUGH;
2613 case TASK_FLAVOR_READ:
2614 if (kotype == IKOT_TASK_READ) {
2615 return TRUE;
2616 }
2617 OS_FALLTHROUGH;
2618 case TASK_FLAVOR_CONTROL:
2619 if (kotype == IKOT_TASK_CONTROL) {
2620 return TRUE;
2621 }
2622 break;
2623 default:
2624 panic("strange task flavor");
2625 }
2626
2627 return FALSE;
2628 }
2629
2630 /*
2631 * Routine: convert_port_to_task_with_flavor_locked_noref
2632 * Purpose:
2633 * Internal helper routine to convert from a locked port to a task.
2634 * Args:
2635 * port - target port
2636 * flavor - requested task port flavor
2637 * options - port translation options
2638 * Conditions:
2639 * Port is locked and active.
2640 */
2641 static task_t
convert_port_to_task_with_flavor_locked_noref(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2642 convert_port_to_task_with_flavor_locked_noref(
2643 ipc_port_t port,
2644 mach_task_flavor_t flavor,
2645 port_intrans_options_t options)
2646 {
2647 ipc_kobject_type_t type = ip_kotype(port);
2648 task_t task;
2649
2650 ip_mq_lock_held(port);
2651 require_ip_active(port);
2652
2653 if (!task_port_kotype_valid_for_flavor(type, flavor)) {
2654 return TASK_NULL;
2655 }
2656
2657 task = ipc_kobject_get_locked(port, type);
2658 if (task == TASK_NULL) {
2659 return TASK_NULL;
2660 }
2661
2662 if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
2663 assert(flavor == TASK_FLAVOR_CONTROL);
2664 return TASK_NULL;
2665 }
2666
2667 /* TODO: rdar://42389187 */
2668 if (flavor == TASK_FLAVOR_NAME || flavor == TASK_FLAVOR_INSPECT) {
2669 assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
2670 }
2671
2672 if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
2673 task_conversion_eval(current_task(), task, flavor)) {
2674 return TASK_NULL;
2675 }
2676
2677 return task;
2678 }
2679
2680 /*
2681 * Routine: convert_port_to_task_with_flavor_locked
2682 * Purpose:
2683 * Internal helper routine to convert from a locked port to a task.
2684 * Args:
2685 * port - target port
2686 * flavor - requested task port flavor
2687 * options - port translation options
2688 * grp - task reference group
2689 * Conditions:
2690 * Port is locked and active.
2691 * Produces task ref or TASK_NULL.
2692 */
2693 static task_t
convert_port_to_task_with_flavor_locked(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2694 convert_port_to_task_with_flavor_locked(
2695 ipc_port_t port,
2696 mach_task_flavor_t flavor,
2697 port_intrans_options_t options,
2698 task_grp_t grp)
2699 {
2700 task_t task;
2701
2702 task = convert_port_to_task_with_flavor_locked_noref(port, flavor,
2703 options);
2704
2705 if (task != TASK_NULL) {
2706 task_reference_grp(task, grp);
2707 }
2708
2709 return task;
2710 }
2711
2712 /*
2713 * Routine: convert_port_to_task_with_flavor
2714 * Purpose:
2715 * Internal helper for converting from a port to a task.
2716 * Doesn't consume the port ref; produces a task ref,
2717 * which may be null.
2718 * Args:
2719 * port - target port
2720 * flavor - requested task port flavor
2721 * options - port translation options
2722 * grp - task reference group
2723 * Conditions:
2724 * Nothing locked.
2725 */
2726 static task_t
convert_port_to_task_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp)2727 convert_port_to_task_with_flavor(
2728 ipc_port_t port,
2729 mach_task_flavor_t flavor,
2730 port_intrans_options_t options,
2731 task_grp_t grp)
2732 {
2733 task_t task = TASK_NULL;
2734 task_t self = current_task();
2735
2736 if (IP_VALID(port)) {
2737 if (port == self->itk_self) {
2738 task_reference_grp(self, grp);
2739 return self;
2740 }
2741
2742 ip_mq_lock(port);
2743 if (ip_active(port)) {
2744 task = convert_port_to_task_with_flavor_locked(port,
2745 flavor, options, grp);
2746 }
2747 ip_mq_unlock(port);
2748 }
2749
2750 return task;
2751 }
2752
2753 task_t
convert_port_to_task(ipc_port_t port)2754 convert_port_to_task(
2755 ipc_port_t port)
2756 {
2757 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2758 PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL);
2759 }
2760
2761 task_t
convert_port_to_task_mig(ipc_port_t port)2762 convert_port_to_task_mig(
2763 ipc_port_t port)
2764 {
2765 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_CONTROL,
2766 PORT_INTRANS_OPTIONS_NONE, TASK_GRP_MIG);
2767 }
2768
2769 task_read_t
convert_port_to_task_read(ipc_port_t port)2770 convert_port_to_task_read(
2771 ipc_port_t port)
2772 {
2773 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2774 PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2775 }
2776
2777 task_read_t
convert_port_to_task_read_no_eval(ipc_port_t port)2778 convert_port_to_task_read_no_eval(
2779 ipc_port_t port)
2780 {
2781 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2782 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2783 }
2784
2785 task_read_t
convert_port_to_task_read_mig(ipc_port_t port)2786 convert_port_to_task_read_mig(
2787 ipc_port_t port)
2788 {
2789 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_READ,
2790 PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2791 }
2792
2793 task_inspect_t
convert_port_to_task_inspect(ipc_port_t port)2794 convert_port_to_task_inspect(
2795 ipc_port_t port)
2796 {
2797 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2798 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2799 }
2800
2801 task_inspect_t
convert_port_to_task_inspect_no_eval(ipc_port_t port)2802 convert_port_to_task_inspect_no_eval(
2803 ipc_port_t port)
2804 {
2805 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2806 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2807 }
2808
2809 task_inspect_t
convert_port_to_task_inspect_mig(ipc_port_t port)2810 convert_port_to_task_inspect_mig(
2811 ipc_port_t port)
2812 {
2813 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_INSPECT,
2814 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2815 }
2816
2817 task_name_t
convert_port_to_task_name(ipc_port_t port)2818 convert_port_to_task_name(
2819 ipc_port_t port)
2820 {
2821 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2822 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL);
2823 }
2824
2825 task_name_t
convert_port_to_task_name_mig(ipc_port_t port)2826 convert_port_to_task_name_mig(
2827 ipc_port_t port)
2828 {
2829 return convert_port_to_task_with_flavor(port, TASK_FLAVOR_NAME,
2830 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_MIG);
2831 }
2832
2833 /*
2834 * Routine: convert_port_to_task_policy
2835 * Purpose:
2836 * Convert from a port to a task.
2837 * Doesn't consume the port ref; produces a task ref,
2838 * which may be null.
2839 * If the port is being used with task_port_set(), any task port
2840 * type other than TASK_CONTROL requires an entitlement. If the
2841 * port is being used with task_port_get(), TASK_NAME requires an
2842 * entitlement.
2843 * Conditions:
2844 * Nothing locked.
2845 */
2846 static task_t
convert_port_to_task_policy_mig(ipc_port_t port,boolean_t set)2847 convert_port_to_task_policy_mig(ipc_port_t port, boolean_t set)
2848 {
2849 task_t task = TASK_NULL;
2850
2851 if (!IP_VALID(port)) {
2852 return TASK_NULL;
2853 }
2854
2855 task = set ?
2856 convert_port_to_task_mig(port) :
2857 convert_port_to_task_inspect_mig(port);
2858
2859 if (task == TASK_NULL &&
2860 IOCurrentTaskHasEntitlement("com.apple.private.task_policy")) {
2861 task = convert_port_to_task_name_mig(port);
2862 }
2863
2864 return task;
2865 }
2866
2867 task_policy_set_t
convert_port_to_task_policy_set_mig(ipc_port_t port)2868 convert_port_to_task_policy_set_mig(ipc_port_t port)
2869 {
2870 return convert_port_to_task_policy_mig(port, true);
2871 }
2872
2873 task_policy_get_t
convert_port_to_task_policy_get_mig(ipc_port_t port)2874 convert_port_to_task_policy_get_mig(ipc_port_t port)
2875 {
2876 return convert_port_to_task_policy_mig(port, false);
2877 }
2878
2879 /*
2880 * Routine: convert_port_to_task_suspension_token
2881 * Purpose:
2882 * Convert from a port to a task suspension token.
2883 * Doesn't consume the port ref; produces a suspension token ref,
2884 * which may be null.
2885 * Conditions:
2886 * Nothing locked.
2887 */
2888 static task_suspension_token_t
convert_port_to_task_suspension_token_grp(ipc_port_t port,task_grp_t grp)2889 convert_port_to_task_suspension_token_grp(
2890 ipc_port_t port,
2891 task_grp_t grp)
2892 {
2893 task_suspension_token_t task = TASK_NULL;
2894
2895 if (IP_VALID(port)) {
2896 ip_mq_lock(port);
2897 task = ipc_kobject_get_locked(port, IKOT_TASK_RESUME);
2898 if (task != TASK_NULL) {
2899 task_reference_grp(task, grp);
2900 }
2901 ip_mq_unlock(port);
2902 }
2903
2904 return task;
2905 }
2906
2907 task_suspension_token_t
convert_port_to_task_suspension_token_external(ipc_port_t port)2908 convert_port_to_task_suspension_token_external(
2909 ipc_port_t port)
2910 {
2911 return convert_port_to_task_suspension_token_grp(port, TASK_GRP_EXTERNAL);
2912 }
2913
2914 task_suspension_token_t
convert_port_to_task_suspension_token_mig(ipc_port_t port)2915 convert_port_to_task_suspension_token_mig(
2916 ipc_port_t port)
2917 {
2918 return convert_port_to_task_suspension_token_grp(port, TASK_GRP_MIG);
2919 }
2920
2921 task_suspension_token_t
convert_port_to_task_suspension_token_kernel(ipc_port_t port)2922 convert_port_to_task_suspension_token_kernel(
2923 ipc_port_t port)
2924 {
2925 return convert_port_to_task_suspension_token_grp(port, TASK_GRP_KERNEL);
2926 }
2927
2928 /*
2929 * Routine: convert_port_to_space_with_flavor
2930 * Purpose:
2931 * Internal helper for converting from a port to a space.
2932 * Doesn't consume the port ref; produces a space ref,
2933 * which may be null.
2934 * Args:
2935 * port - target port
2936 * flavor - requested ipc space flavor
2937 * options - port translation options
2938 * Conditions:
2939 * Nothing locked.
2940 */
2941 static ipc_space_t
convert_port_to_space_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)2942 convert_port_to_space_with_flavor(
2943 ipc_port_t port,
2944 mach_task_flavor_t flavor,
2945 port_intrans_options_t options)
2946 {
2947 ipc_space_t space = IPC_SPACE_NULL;
2948 task_t task = TASK_NULL;
2949
2950 assert(flavor != TASK_FLAVOR_NAME);
2951
2952 if (IP_VALID(port)) {
2953 ip_mq_lock(port);
2954 if (ip_active(port)) {
2955 task = convert_port_to_task_with_flavor_locked_noref(port,
2956 flavor, options);
2957 }
2958
2959 /*
2960 * Because we hold the port lock and we could resolve a task,
2961 * even if we're racing with task termination, we know that
2962 * ipc_task_disable() hasn't been called yet.
2963 *
2964 * We try to sniff if `task->active` flipped to accelerate
2965 * resolving the race, but this isn't load bearing.
2966 *
2967 * The space will be torn down _after_ ipc_task_disable() returns,
2968 * so it is valid to take a reference on it now.
2969 */
2970 if (task && task->active) {
2971 space = task->itk_space;
2972 is_reference(space);
2973 }
2974 ip_mq_unlock(port);
2975 }
2976
2977 return space;
2978 }
2979
2980 ipc_space_t
convert_port_to_space(ipc_port_t port)2981 convert_port_to_space(
2982 ipc_port_t port)
2983 {
2984 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_CONTROL,
2985 PORT_INTRANS_OPTIONS_NONE);
2986 }
2987
2988 ipc_space_read_t
convert_port_to_space_read(ipc_port_t port)2989 convert_port_to_space_read(
2990 ipc_port_t port)
2991 {
2992 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
2993 PORT_INTRANS_ALLOW_CORPSE_TASK);
2994 }
2995
2996 ipc_space_read_t
convert_port_to_space_read_no_eval(ipc_port_t port)2997 convert_port_to_space_read_no_eval(
2998 ipc_port_t port)
2999 {
3000 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_READ,
3001 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3002 }
3003
3004 ipc_space_inspect_t
convert_port_to_space_inspect(ipc_port_t port)3005 convert_port_to_space_inspect(
3006 ipc_port_t port)
3007 {
3008 return convert_port_to_space_with_flavor(port, TASK_FLAVOR_INSPECT,
3009 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3010 }
3011
3012 /*
3013 * Routine: convert_port_to_map_with_flavor
3014 * Purpose:
3015 * Internal helper for converting from a port to a map.
3016 * Doesn't consume the port ref; produces a map ref,
3017 * which may be null.
3018 * Args:
3019 * port - target port
3020 * flavor - requested vm map flavor
3021 * options - port translation options
3022 * Conditions:
3023 * Nothing locked.
3024 */
3025 static vm_map_t
convert_port_to_map_with_flavor(ipc_port_t port,mach_task_flavor_t flavor,port_intrans_options_t options)3026 convert_port_to_map_with_flavor(
3027 ipc_port_t port,
3028 mach_task_flavor_t flavor,
3029 port_intrans_options_t options)
3030 {
3031 task_t task = TASK_NULL;
3032 vm_map_t map = VM_MAP_NULL;
3033
3034 /* there is no vm_map_inspect_t routines at the moment. */
3035 assert(flavor != TASK_FLAVOR_NAME && flavor != TASK_FLAVOR_INSPECT);
3036 assert((options & PORT_INTRANS_SKIP_TASK_EVAL) == 0);
3037
3038 if (IP_VALID(port)) {
3039 ip_mq_lock(port);
3040
3041 if (ip_active(port)) {
3042 task = convert_port_to_task_with_flavor_locked_noref(port,
3043 flavor, options);
3044 }
3045
3046 /*
3047 * Because we hold the port lock and we could resolve a task,
3048 * even if we're racing with task termination, we know that
3049 * ipc_task_disable() hasn't been called yet.
3050 *
3051 * We try to sniff if `task->active` flipped to accelerate
3052 * resolving the race, but this isn't load bearing.
3053 *
3054 * The vm map will be torn down _after_ ipc_task_disable() returns,
3055 * so it is valid to take a reference on it now.
3056 */
3057 if (task && task->active) {
3058 map = task->map;
3059
3060 if (map->pmap == kernel_pmap) {
3061 panic("userspace has control access to a "
3062 "kernel map %p through task %p", map, task);
3063 }
3064
3065 pmap_require(map->pmap);
3066 vm_map_reference(map);
3067 }
3068
3069 ip_mq_unlock(port);
3070 }
3071
3072 return map;
3073 }
3074
3075 vm_map_t
convert_port_to_map(ipc_port_t port)3076 convert_port_to_map(
3077 ipc_port_t port)
3078 {
3079 return convert_port_to_map_with_flavor(port, TASK_FLAVOR_CONTROL,
3080 PORT_INTRANS_OPTIONS_NONE);
3081 }
3082
3083 vm_map_read_t
convert_port_to_map_read(ipc_port_t port)3084 convert_port_to_map_read(
3085 ipc_port_t port)
3086 {
3087 return convert_port_to_map_with_flavor(port, TASK_FLAVOR_READ,
3088 PORT_INTRANS_ALLOW_CORPSE_TASK);
3089 }
3090
3091 vm_map_inspect_t
convert_port_to_map_inspect(__unused ipc_port_t port)3092 convert_port_to_map_inspect(
3093 __unused ipc_port_t port)
3094 {
3095 /* there is no vm_map_inspect_t routines at the moment. */
3096 return VM_MAP_INSPECT_NULL;
3097 }
3098
3099 /*
3100 * Routine: thread_port_kotype_valid_for_flavor
3101 * Purpose:
3102 * Check whether the kobject type of a mach port
3103 * is valid for conversion to a thread of given flavor.
3104 */
3105 static boolean_t
thread_port_kotype_valid_for_flavor(natural_t kotype,mach_thread_flavor_t flavor)3106 thread_port_kotype_valid_for_flavor(
3107 natural_t kotype,
3108 mach_thread_flavor_t flavor)
3109 {
3110 switch (flavor) {
3111 /* Ascending capability */
3112 case THREAD_FLAVOR_INSPECT:
3113 if (kotype == IKOT_THREAD_INSPECT) {
3114 return TRUE;
3115 }
3116 OS_FALLTHROUGH;
3117 case THREAD_FLAVOR_READ:
3118 if (kotype == IKOT_THREAD_READ) {
3119 return TRUE;
3120 }
3121 OS_FALLTHROUGH;
3122 case THREAD_FLAVOR_CONTROL:
3123 if (kotype == IKOT_THREAD_CONTROL) {
3124 return TRUE;
3125 }
3126 break;
3127 default:
3128 panic("strange thread flavor");
3129 }
3130
3131 return FALSE;
3132 }
3133
3134 /*
3135 * Routine: convert_port_to_thread_with_flavor_locked
3136 * Purpose:
3137 * Internal helper routine to convert from a locked port to a thread.
3138 * Args:
3139 * port - target port
3140 * flavor - requested thread port flavor
3141 * options - port translation options
3142 * Conditions:
3143 * Port is locked and active.
3144 * Produces a thread ref or THREAD_NULL.
3145 */
3146 static thread_t
convert_port_to_thread_with_flavor_locked(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3147 convert_port_to_thread_with_flavor_locked(
3148 ipc_port_t port,
3149 mach_thread_flavor_t flavor,
3150 port_intrans_options_t options)
3151 {
3152 thread_t thread = THREAD_NULL;
3153 task_t task;
3154 ipc_kobject_type_t type = ip_kotype(port);
3155
3156 ip_mq_lock_held(port);
3157 require_ip_active(port);
3158
3159 if (!thread_port_kotype_valid_for_flavor(type, flavor)) {
3160 return THREAD_NULL;
3161 }
3162
3163 thread = ipc_kobject_get_locked(port, type);
3164
3165 if (thread == THREAD_NULL) {
3166 return THREAD_NULL;
3167 }
3168
3169 if (options & PORT_INTRANS_THREAD_NOT_CURRENT_THREAD) {
3170 if (thread == current_thread()) {
3171 return THREAD_NULL;
3172 }
3173 }
3174
3175 task = get_threadtask(thread);
3176
3177 if (options & PORT_INTRANS_THREAD_IN_CURRENT_TASK) {
3178 if (task != current_task()) {
3179 return THREAD_NULL;
3180 }
3181 } else {
3182 if (!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) && task_is_a_corpse(task)) {
3183 assert(flavor == THREAD_FLAVOR_CONTROL);
3184 return THREAD_NULL;
3185 }
3186 /* TODO: rdar://42389187 */
3187 if (flavor == THREAD_FLAVOR_INSPECT) {
3188 assert(options & PORT_INTRANS_SKIP_TASK_EVAL);
3189 }
3190
3191 if (!(options & PORT_INTRANS_SKIP_TASK_EVAL) &&
3192 task_conversion_eval(current_task(), task, flavor) != KERN_SUCCESS) {
3193 return THREAD_NULL;
3194 }
3195 }
3196
3197 thread_reference(thread);
3198 return thread;
3199 }
3200
3201 /*
3202 * Routine: convert_port_to_thread_with_flavor
3203 * Purpose:
3204 * Internal helper for converting from a port to a thread.
3205 * Doesn't consume the port ref; produces a thread ref,
3206 * which may be null.
3207 * Args:
3208 * port - target port
3209 * flavor - requested thread port flavor
3210 * options - port translation options
3211 * Conditions:
3212 * Nothing locked.
3213 */
3214 static thread_t
convert_port_to_thread_with_flavor(ipc_port_t port,mach_thread_flavor_t flavor,port_intrans_options_t options)3215 convert_port_to_thread_with_flavor(
3216 ipc_port_t port,
3217 mach_thread_flavor_t flavor,
3218 port_intrans_options_t options)
3219 {
3220 thread_t thread = THREAD_NULL;
3221
3222 if (IP_VALID(port)) {
3223 ip_mq_lock(port);
3224 if (ip_active(port)) {
3225 thread = convert_port_to_thread_with_flavor_locked(port,
3226 flavor, options);
3227 }
3228 ip_mq_unlock(port);
3229 }
3230
3231 return thread;
3232 }
3233
3234 thread_t
convert_port_to_thread(ipc_port_t port)3235 convert_port_to_thread(
3236 ipc_port_t port)
3237 {
3238 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_CONTROL,
3239 PORT_INTRANS_OPTIONS_NONE);
3240 }
3241
3242 thread_read_t
convert_port_to_thread_read(ipc_port_t port)3243 convert_port_to_thread_read(
3244 ipc_port_t port)
3245 {
3246 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3247 PORT_INTRANS_ALLOW_CORPSE_TASK);
3248 }
3249
3250 static thread_read_t
convert_port_to_thread_read_no_eval(ipc_port_t port)3251 convert_port_to_thread_read_no_eval(
3252 ipc_port_t port)
3253 {
3254 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_READ,
3255 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3256 }
3257
3258 thread_inspect_t
convert_port_to_thread_inspect(ipc_port_t port)3259 convert_port_to_thread_inspect(
3260 ipc_port_t port)
3261 {
3262 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3263 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3264 }
3265
3266 static thread_inspect_t
convert_port_to_thread_inspect_no_eval(ipc_port_t port)3267 convert_port_to_thread_inspect_no_eval(
3268 ipc_port_t port)
3269 {
3270 return convert_port_to_thread_with_flavor(port, THREAD_FLAVOR_INSPECT,
3271 PORT_INTRANS_SKIP_TASK_EVAL | PORT_INTRANS_ALLOW_CORPSE_TASK);
3272 }
3273
3274 static inline ipc_kobject_type_t
thread_flavor_to_kotype(mach_thread_flavor_t flavor)3275 thread_flavor_to_kotype(mach_thread_flavor_t flavor)
3276 {
3277 switch (flavor) {
3278 case THREAD_FLAVOR_CONTROL:
3279 return IKOT_THREAD_CONTROL;
3280 case THREAD_FLAVOR_READ:
3281 return IKOT_THREAD_READ;
3282 default:
3283 return IKOT_THREAD_INSPECT;
3284 }
3285 }
3286
3287 /*
3288 * Routine: convert_thread_to_port_with_flavor
3289 * Purpose:
3290 * Convert from a thread to a port of given flavor.
3291 * Consumes a thread ref; produces a naked send right
3292 * which may be invalid.
3293 * Conditions:
3294 * Nothing locked.
3295 */
3296 static ipc_port_t
convert_thread_to_port_with_flavor(thread_t thread,thread_ro_t tro,mach_thread_flavor_t flavor)3297 convert_thread_to_port_with_flavor(
3298 thread_t thread,
3299 thread_ro_t tro,
3300 mach_thread_flavor_t flavor)
3301 {
3302 ipc_kobject_type_t kotype = thread_flavor_to_kotype(flavor);
3303 ipc_port_t port = IP_NULL;
3304
3305 thread_mtx_lock(thread);
3306
3307 /*
3308 * out-trans of weaker flavors are still permitted, but in-trans
3309 * is separately enforced.
3310 */
3311 if (flavor == THREAD_FLAVOR_CONTROL &&
3312 task_conversion_eval_out_trans(current_task(), tro->tro_task, flavor)) {
3313 /* denied by security policy, make the port appear dead */
3314 port = IP_DEAD;
3315 goto exit;
3316 }
3317
3318 if (!thread->ipc_active) {
3319 goto exit;
3320 }
3321
3322 port = tro->tro_ports[flavor];
3323 if (flavor == THREAD_FLAVOR_CONTROL) {
3324 port = ipc_kobject_make_send(port, thread, IKOT_THREAD_CONTROL);
3325 } else if (IP_VALID(port)) {
3326 (void)ipc_kobject_make_send_nsrequest(port, thread, kotype);
3327 } else {
3328 /*
3329 * Claim a send right on the thread read/inspect port, and request a no-senders
3330 * notification on that port (if none outstanding). A thread reference is not
3331 * donated here even though the ports are created lazily because it doesn't own the
3332 * kobject that it points to. Threads manage their lifetime explicitly and
3333 * have to synchronize with each other, between the task/thread terminating and the
3334 * send-once notification firing, and this is done under the thread mutex
3335 * rather than with atomics.
3336 */
3337 port = ipc_kobject_alloc_port(thread, kotype,
3338 IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST |
3339 IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3340 /*
3341 * If Developer Mode is off, substitute read port for control
3342 * port if copying out to owning task's space, for the sake of
3343 * in-process exception handler.
3344 *
3345 * Also see: exception_deliver().
3346 */
3347 if (!developer_mode_state() && flavor == THREAD_FLAVOR_READ) {
3348 ipc_port_set_label(port, IPC_LABEL_SUBST_THREAD_READ);
3349 port->ip_kolabel->ikol_alt_port = tro->tro_self_port;
3350 }
3351 zalloc_ro_update_field(ZONE_ID_THREAD_RO,
3352 tro, tro_ports[flavor], &port);
3353 }
3354
3355 exit:
3356 thread_mtx_unlock(thread);
3357 thread_deallocate(thread);
3358 return port;
3359 }
3360
3361 ipc_port_t
convert_thread_to_port(thread_t thread)3362 convert_thread_to_port(
3363 thread_t thread)
3364 {
3365 thread_ro_t tro = get_thread_ro(thread);
3366 return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_CONTROL);
3367 }
3368
3369 ipc_port_t
convert_thread_read_to_port(thread_read_t thread)3370 convert_thread_read_to_port(thread_read_t thread)
3371 {
3372 thread_ro_t tro = get_thread_ro(thread);
3373 return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_READ);
3374 }
3375
3376 ipc_port_t
convert_thread_inspect_to_port(thread_inspect_t thread)3377 convert_thread_inspect_to_port(thread_inspect_t thread)
3378 {
3379 thread_ro_t tro = get_thread_ro(thread);
3380 return convert_thread_to_port_with_flavor(thread, tro, THREAD_FLAVOR_INSPECT);
3381 }
3382
3383 void
convert_thread_array_to_ports(thread_act_array_t array,size_t count,mach_thread_flavor_t flavor)3384 convert_thread_array_to_ports(
3385 thread_act_array_t array,
3386 size_t count,
3387 mach_thread_flavor_t flavor)
3388 {
3389 thread_t *thread_list = (thread_t *)array;
3390 task_t task_self = current_task();
3391
3392 for (size_t i = 0; i < count; i++) {
3393 thread_t thread = thread_list[i];
3394 ipc_port_t port;
3395
3396 switch (flavor) {
3397 case THREAD_FLAVOR_CONTROL:
3398 if (get_threadtask(thread) == task_self) {
3399 port = convert_thread_to_port_pinned(thread);
3400 } else {
3401 port = convert_thread_to_port(thread);
3402 }
3403 break;
3404 case THREAD_FLAVOR_READ:
3405 port = convert_thread_read_to_port(thread);
3406 break;
3407 case THREAD_FLAVOR_INSPECT:
3408 port = convert_thread_inspect_to_port(thread);
3409 break;
3410 }
3411
3412 array[i].port = port;
3413 }
3414 }
3415
3416
3417 /*
3418 * Routine: port_name_to_thread
3419 * Purpose:
3420 * Convert from a port name to a thread reference
3421 * A name of MACH_PORT_NULL is valid for the null thread.
3422 * Conditions:
3423 * Nothing locked.
3424 */
3425 thread_t
port_name_to_thread(mach_port_name_t name,port_intrans_options_t options)3426 port_name_to_thread(
3427 mach_port_name_t name,
3428 port_intrans_options_t options)
3429 {
3430 thread_t thread = THREAD_NULL;
3431 ipc_port_t kport;
3432 kern_return_t kr;
3433
3434 if (MACH_PORT_VALID(name)) {
3435 kr = ipc_port_translate_send(current_space(), name, &kport);
3436 if (kr == KERN_SUCCESS) {
3437 /* port is locked and active */
3438 assert(!(options & PORT_INTRANS_ALLOW_CORPSE_TASK) &&
3439 !(options & PORT_INTRANS_SKIP_TASK_EVAL));
3440 thread = convert_port_to_thread_with_flavor_locked(kport,
3441 THREAD_FLAVOR_CONTROL, options);
3442 ip_mq_unlock(kport);
3443 }
3444 }
3445
3446 return thread;
3447 }
3448
3449 /*
3450 * Routine: port_name_is_pinned_itk_self
3451 * Purpose:
3452 * Returns whether this port name is for the pinned
3453 * mach_task_self (if it exists).
3454 *
3455 * task_self_trap() when the task port is pinned,
3456 * will memorize the name the port has in the space
3457 * in ip_receiver_name, which we can use to fast-track
3458 * this answer without taking any lock.
3459 *
3460 * ipc_task_disable() will set `ip_receiver_name` back to
3461 * MACH_PORT_SPECIAL_DEFAULT.
3462 *
3463 * Conditions:
3464 * self must be current_task()
3465 * Nothing locked.
3466 */
3467 static bool
port_name_is_pinned_itk_self(task_t self,mach_port_name_t name)3468 port_name_is_pinned_itk_self(
3469 task_t self,
3470 mach_port_name_t name)
3471 {
3472 ipc_port_t kport = self->itk_self;
3473 return MACH_PORT_VALID(name) && name != MACH_PORT_SPECIAL_DEFAULT &&
3474 kport->ip_pinned && ip_get_receiver_name(kport) == name;
3475 }
3476
3477 /*
3478 * Routine: port_name_to_current_task*_noref
3479 * Purpose:
3480 * Convert from a port name to current_task()
3481 * A name of MACH_PORT_NULL is valid for the null task.
3482 *
3483 * If current_task() is in the process of being terminated,
3484 * this might return a non NULL task even when port_name_to_task()
3485 * would.
3486 *
3487 * However, this is an acceptable race that can't be controlled by
3488 * userspace, and that downstream code using the returned task
3489 * has to handle anyway.
3490 *
3491 * ipc_space_disable() does try to narrow this race,
3492 * by causing port_name_is_pinned_itk_self() to fail.
3493 *
3494 * Returns:
3495 * current_task() if the port name was for current_task()
3496 * at the appropriate flavor.
3497 *
3498 * TASK_NULL otherwise.
3499 *
3500 * Conditions:
3501 * Nothing locked.
3502 */
3503 static task_t
port_name_to_current_task_internal_noref(mach_port_name_t name,mach_task_flavor_t flavor)3504 port_name_to_current_task_internal_noref(
3505 mach_port_name_t name,
3506 mach_task_flavor_t flavor)
3507 {
3508 ipc_port_t kport;
3509 kern_return_t kr;
3510 task_t task = TASK_NULL;
3511 task_t self = current_task();
3512
3513 if (port_name_is_pinned_itk_self(self, name)) {
3514 return self;
3515 }
3516
3517 if (MACH_PORT_VALID(name)) {
3518 kr = ipc_port_translate_send(self->itk_space, name, &kport);
3519 if (kr == KERN_SUCCESS) {
3520 ipc_kobject_type_t type = ip_kotype(kport);
3521 if (task_port_kotype_valid_for_flavor(type, flavor)) {
3522 task = ipc_kobject_get_locked(kport, type);
3523 }
3524 ip_mq_unlock(kport);
3525 if (task != self) {
3526 task = TASK_NULL;
3527 }
3528 }
3529 }
3530
3531 return task;
3532 }
3533
3534 task_t
port_name_to_current_task_noref(mach_port_name_t name)3535 port_name_to_current_task_noref(
3536 mach_port_name_t name)
3537 {
3538 return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_CONTROL);
3539 }
3540
3541 task_read_t
port_name_to_current_task_read_noref(mach_port_name_t name)3542 port_name_to_current_task_read_noref(
3543 mach_port_name_t name)
3544 {
3545 return port_name_to_current_task_internal_noref(name, TASK_FLAVOR_READ);
3546 }
3547
3548 /*
3549 * Routine: port_name_to_task_grp
3550 * Purpose:
3551 * Convert from a port name to a task reference
3552 * A name of MACH_PORT_NULL is valid for the null task.
3553 * Acquire a send right if [inout] @kportp is non-null.
3554 * Conditions:
3555 * Nothing locked.
3556 */
3557 static task_t
port_name_to_task_grp(mach_port_name_t name,mach_task_flavor_t flavor,port_intrans_options_t options,task_grp_t grp,ipc_port_t * kportp)3558 port_name_to_task_grp(
3559 mach_port_name_t name,
3560 mach_task_flavor_t flavor,
3561 port_intrans_options_t options,
3562 task_grp_t grp,
3563 ipc_port_t *kportp)
3564 {
3565 ipc_port_t kport;
3566 kern_return_t kr;
3567 task_t task = TASK_NULL;
3568 task_t self = current_task();
3569
3570 if (!kportp && port_name_is_pinned_itk_self(self, name)) {
3571 task_reference_grp(self, grp);
3572 return self;
3573 }
3574
3575 if (MACH_PORT_VALID(name)) {
3576 kr = ipc_port_translate_send(self->itk_space, name, &kport);
3577 if (kr == KERN_SUCCESS) {
3578 /* port is locked and active */
3579 task = convert_port_to_task_with_flavor_locked(kport,
3580 flavor, options, grp);
3581 if (kportp) {
3582 /* send right requested */
3583 ipc_port_copy_send_any_locked(kport);
3584 *kportp = kport;
3585 }
3586 ip_mq_unlock(kport);
3587 }
3588 }
3589 return task;
3590 }
3591
3592 task_t
port_name_to_task_external(mach_port_name_t name)3593 port_name_to_task_external(
3594 mach_port_name_t name)
3595 {
3596 return port_name_to_task_grp(name, TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE, TASK_GRP_EXTERNAL, NULL);
3597 }
3598
3599 task_t
port_name_to_task_kernel(mach_port_name_t name)3600 port_name_to_task_kernel(
3601 mach_port_name_t name)
3602 {
3603 return port_name_to_task_grp(name, TASK_FLAVOR_CONTROL, PORT_INTRANS_OPTIONS_NONE, TASK_GRP_KERNEL, NULL);
3604 }
3605
3606 /*
3607 * Routine: port_name_to_task_read
3608 * Purpose:
3609 * Convert from a port name to a task reference
3610 * A name of MACH_PORT_NULL is valid for the null task.
3611 * Conditions:
3612 * Nothing locked.
3613 */
3614 task_read_t
port_name_to_task_read(mach_port_name_t name)3615 port_name_to_task_read(
3616 mach_port_name_t name)
3617 {
3618 return port_name_to_task_grp(name, TASK_FLAVOR_READ, PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL, NULL);
3619 }
3620
3621 /*
3622 * Routine: port_name_to_task_read_and_send_right
3623 * Purpose:
3624 * Convert from a port name to a task reference
3625 * A name of MACH_PORT_NULL is valid for the null task.
3626 * Conditions:
3627 * On success, ipc port returned with a +1 send right.
3628 */
3629 task_read_t
port_name_to_task_read_and_send_right(mach_port_name_t name,ipc_port_t * kportp)3630 port_name_to_task_read_and_send_right(
3631 mach_port_name_t name,
3632 ipc_port_t *kportp)
3633 {
3634 return port_name_to_task_grp(name, TASK_FLAVOR_READ, PORT_INTRANS_ALLOW_CORPSE_TASK, TASK_GRP_KERNEL, kportp);
3635 }
3636
3637 /*
3638 * Routine: port_name_to_task_read_no_eval
3639 * Purpose:
3640 * Convert from a port name to a task reference
3641 * A name of MACH_PORT_NULL is valid for the null task.
3642 * Skips task_conversion_eval() during conversion.
3643 * Conditions:
3644 * Nothing locked.
3645 */
3646 task_read_t
port_name_to_task_read_no_eval(mach_port_name_t name)3647 port_name_to_task_read_no_eval(
3648 mach_port_name_t name)
3649 {
3650 port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3651 PORT_INTRANS_ALLOW_CORPSE_TASK;
3652 return port_name_to_task_grp(name, TASK_FLAVOR_READ, options, TASK_GRP_KERNEL, NULL);
3653 }
3654
3655 /*
3656 * Routine: port_name_to_task_name
3657 * Purpose:
3658 * Convert from a port name to a task reference
3659 * A name of MACH_PORT_NULL is valid for the null task.
3660 * Conditions:
3661 * Nothing locked.
3662 */
3663 task_name_t
port_name_to_task_name(mach_port_name_t name)3664 port_name_to_task_name(
3665 mach_port_name_t name)
3666 {
3667 port_intrans_options_t options = PORT_INTRANS_SKIP_TASK_EVAL |
3668 PORT_INTRANS_ALLOW_CORPSE_TASK;
3669 return port_name_to_task_grp(name, TASK_FLAVOR_NAME, options, TASK_GRP_KERNEL, NULL);
3670 }
3671
3672 /*
3673 * Routine: port_name_to_task_id_token
3674 * Purpose:
3675 * Convert from a port name to a task identity token reference
3676 * Conditions:
3677 * Nothing locked.
3678 */
3679 task_id_token_t
port_name_to_task_id_token(mach_port_name_t name)3680 port_name_to_task_id_token(
3681 mach_port_name_t name)
3682 {
3683 ipc_port_t port;
3684 kern_return_t kr;
3685 task_id_token_t token = TASK_ID_TOKEN_NULL;
3686
3687 if (MACH_PORT_VALID(name)) {
3688 kr = ipc_port_translate_send(current_space(), name, &port);
3689 if (kr == KERN_SUCCESS) {
3690 token = convert_port_to_task_id_token(port);
3691 ip_mq_unlock(port);
3692 }
3693 }
3694 return token;
3695 }
3696
3697 /*
3698 * Routine: port_name_to_host
3699 * Purpose:
3700 * Convert from a port name to a host pointer.
3701 * NOTE: This does _not_ return a +1 reference to the host_t
3702 * Conditions:
3703 * Nothing locked.
3704 */
3705 host_t
port_name_to_host(mach_port_name_t name)3706 port_name_to_host(
3707 mach_port_name_t name)
3708 {
3709 host_t host = HOST_NULL;
3710 kern_return_t kr;
3711 ipc_port_t port;
3712
3713 if (MACH_PORT_VALID(name)) {
3714 kr = ipc_port_translate_send(current_space(), name, &port);
3715 if (kr == KERN_SUCCESS) {
3716 host = convert_port_to_host(port);
3717 ip_mq_unlock(port);
3718 }
3719 }
3720 return host;
3721 }
3722
3723 static inline ipc_kobject_type_t
task_flavor_to_kotype(mach_task_flavor_t flavor)3724 task_flavor_to_kotype(mach_task_flavor_t flavor)
3725 {
3726 switch (flavor) {
3727 case TASK_FLAVOR_CONTROL:
3728 return IKOT_TASK_CONTROL;
3729 case TASK_FLAVOR_READ:
3730 return IKOT_TASK_READ;
3731 case TASK_FLAVOR_INSPECT:
3732 return IKOT_TASK_INSPECT;
3733 default:
3734 return IKOT_TASK_NAME;
3735 }
3736 }
3737
3738 /*
3739 * Routine: convert_task_to_port_with_flavor
3740 * Purpose:
3741 * Convert from a task to a port of given flavor.
3742 * Consumes a task ref; produces a naked send right
3743 * which may be invalid.
3744 * Conditions:
3745 * Nothing locked.
3746 */
3747 ipc_port_t
convert_task_to_port_with_flavor(task_t task,mach_task_flavor_t flavor,task_grp_t grp)3748 convert_task_to_port_with_flavor(
3749 task_t task,
3750 mach_task_flavor_t flavor,
3751 task_grp_t grp)
3752 {
3753 ipc_kobject_type_t kotype = task_flavor_to_kotype(flavor);
3754 ipc_port_t port = IP_NULL;
3755
3756 itk_lock(task);
3757
3758 if (!task->ipc_active) {
3759 goto exit;
3760 }
3761
3762 /*
3763 * out-trans of weaker flavors are still permitted, but in-trans
3764 * is separately enforced.
3765 */
3766 if (flavor == TASK_FLAVOR_CONTROL &&
3767 task_conversion_eval_out_trans(current_task(), task, flavor)) {
3768 /* denied by security policy, make the port appear dead */
3769 port = IP_DEAD;
3770 goto exit;
3771 }
3772
3773 switch (flavor) {
3774 case TASK_FLAVOR_CONTROL:
3775 case TASK_FLAVOR_NAME:
3776 port = ipc_kobject_make_send(task->itk_task_ports[flavor],
3777 task, kotype);
3778 break;
3779 /*
3780 * Claim a send right on the task read/inspect port,
3781 * and request a no-senders notification on that port
3782 * (if none outstanding).
3783 *
3784 * The task's itk_lock is used to synchronize the handling
3785 * of the no-senders notification with the task termination.
3786 */
3787 case TASK_FLAVOR_READ:
3788 case TASK_FLAVOR_INSPECT:
3789 port = task->itk_task_ports[flavor];
3790 if (IP_VALID(port)) {
3791 (void)ipc_kobject_make_send_nsrequest(port,
3792 task, kotype);
3793 } else {
3794 port = ipc_kobject_alloc_port(task, kotype,
3795 IPC_KOBJECT_ALLOC_MAKE_SEND |
3796 IPC_KOBJECT_ALLOC_NSREQUEST |
3797 IPC_KOBJECT_ALLOC_IMMOVABLE_SEND);
3798 /*
3799 * If Developer Mode is off, substitute read port for control port if
3800 * copying out to owning task's space, for the sake of in-process
3801 * exception handler.
3802 *
3803 * Also see: exception_deliver().
3804 */
3805 if (!developer_mode_state() && flavor == TASK_FLAVOR_READ) {
3806 ipc_port_set_label(port, IPC_LABEL_SUBST_TASK_READ);
3807 port->ip_kolabel->ikol_alt_port = task->itk_self;
3808 }
3809
3810 task->itk_task_ports[flavor] = port;
3811 }
3812 break;
3813 }
3814
3815 exit:
3816 itk_unlock(task);
3817 task_deallocate_grp(task, grp);
3818 return port;
3819 }
3820
3821 ipc_port_t
convert_corpse_to_port_and_nsrequest(task_t corpse)3822 convert_corpse_to_port_and_nsrequest(
3823 task_t corpse)
3824 {
3825 ipc_port_t port = IP_NULL;
3826 __assert_only kern_return_t kr;
3827
3828 assert(task_is_a_corpse(corpse));
3829 itk_lock(corpse);
3830 port = corpse->itk_task_ports[TASK_FLAVOR_CONTROL];
3831 kr = ipc_kobject_make_send_nsrequest(port, corpse, IKOT_TASK_CONTROL);
3832 assert(kr == KERN_SUCCESS || kr == KERN_ALREADY_WAITING);
3833 itk_unlock(corpse);
3834
3835 task_deallocate(corpse);
3836 return port;
3837 }
3838
3839 ipc_port_t
convert_task_to_port(task_t task)3840 convert_task_to_port(
3841 task_t task)
3842 {
3843 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_KERNEL);
3844 }
3845
3846 ipc_port_t
convert_task_read_to_port(task_read_t task)3847 convert_task_read_to_port(
3848 task_read_t task)
3849 {
3850 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_KERNEL);
3851 }
3852
3853 ipc_port_t
convert_task_inspect_to_port(task_inspect_t task)3854 convert_task_inspect_to_port(
3855 task_inspect_t task)
3856 {
3857 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_INSPECT, TASK_GRP_KERNEL);
3858 }
3859
3860 ipc_port_t
convert_task_name_to_port(task_name_t task)3861 convert_task_name_to_port(
3862 task_name_t task)
3863 {
3864 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_NAME, TASK_GRP_KERNEL);
3865 }
3866
3867 ipc_port_t
convert_task_to_port_external(task_t task)3868 convert_task_to_port_external(task_t task)
3869 {
3870 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_CONTROL, TASK_GRP_EXTERNAL);
3871 }
3872
3873 ipc_port_t
convert_task_read_to_port_external(task_t task)3874 convert_task_read_to_port_external(task_t task)
3875 {
3876 return convert_task_to_port_with_flavor(task, TASK_FLAVOR_READ, TASK_GRP_EXTERNAL);
3877 }
3878
3879 ipc_port_t
convert_task_to_port_pinned(task_t task)3880 convert_task_to_port_pinned(
3881 task_t task)
3882 {
3883 ipc_port_t port = IP_NULL;
3884
3885 assert(task == current_task());
3886
3887 itk_lock(task);
3888
3889 if (task->ipc_active) {
3890 port = ipc_kobject_make_send(task->itk_self, task,
3891 IKOT_TASK_CONTROL);
3892 }
3893
3894 if (port && task_is_immovable(task)) {
3895 assert(ip_is_pinned(port));
3896 assert(ip_is_immovable_send(port));
3897 }
3898
3899 itk_unlock(task);
3900 task_deallocate(task);
3901 return port;
3902 }
3903
3904 void
convert_task_array_to_ports(task_array_t array,size_t count,mach_task_flavor_t flavor)3905 convert_task_array_to_ports(
3906 task_array_t array,
3907 size_t count,
3908 mach_task_flavor_t flavor)
3909 {
3910 task_t *task_list = (task_t *)array;
3911 task_t task_self = current_task();
3912
3913 for (size_t i = 0; i < count; i++) {
3914 task_t task = task_list[i];
3915 ipc_port_t port;
3916
3917 switch (flavor) {
3918 case TASK_FLAVOR_CONTROL:
3919 if (task == task_self) {
3920 /* if current_task(), return pinned port */
3921 port = convert_task_to_port_pinned(task);
3922 } else {
3923 port = convert_task_to_port(task);
3924 }
3925 break;
3926 case TASK_FLAVOR_READ:
3927 port = convert_task_read_to_port(task);
3928 break;
3929 case TASK_FLAVOR_INSPECT:
3930 port = convert_task_inspect_to_port(task);
3931 break;
3932 case TASK_FLAVOR_NAME:
3933 port = convert_task_name_to_port(task);
3934 break;
3935 }
3936
3937 array[i].port = port;
3938 }
3939 }
3940
3941 /*
3942 * Routine: convert_task_suspend_token_to_port
3943 * Purpose:
3944 * Convert from a task suspension token to a port.
3945 * Consumes a task suspension token ref; produces a naked send-once right
3946 * which may be invalid.
3947 * Conditions:
3948 * Nothing locked.
3949 */
3950 static ipc_port_t
convert_task_suspension_token_to_port_grp(task_suspension_token_t task,task_grp_t grp)3951 convert_task_suspension_token_to_port_grp(
3952 task_suspension_token_t task,
3953 task_grp_t grp)
3954 {
3955 ipc_port_t port;
3956
3957 task_lock(task);
3958 if (task->active) {
3959 itk_lock(task);
3960 if (task->itk_resume == IP_NULL) {
3961 task->itk_resume = ipc_kobject_alloc_port((ipc_kobject_t) task,
3962 IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE);
3963 }
3964
3965 /*
3966 * Create a send-once right for each instance of a direct user-called
3967 * task_suspend2 call. Each time one of these send-once rights is abandoned,
3968 * the notification handler will resume the target task.
3969 */
3970 port = task->itk_resume;
3971 ipc_kobject_require(port, task, IKOT_TASK_RESUME);
3972 port = ipc_port_make_sonce(port);
3973 itk_unlock(task);
3974 assert(IP_VALID(port));
3975 } else {
3976 port = IP_NULL;
3977 }
3978
3979 task_unlock(task);
3980 task_suspension_token_deallocate_grp(task, grp);
3981
3982 return port;
3983 }
3984
3985 ipc_port_t
convert_task_suspension_token_to_port_external(task_suspension_token_t task)3986 convert_task_suspension_token_to_port_external(
3987 task_suspension_token_t task)
3988 {
3989 return convert_task_suspension_token_to_port_grp(task, TASK_GRP_EXTERNAL);
3990 }
3991
3992 ipc_port_t
convert_task_suspension_token_to_port_mig(task_suspension_token_t task)3993 convert_task_suspension_token_to_port_mig(
3994 task_suspension_token_t task)
3995 {
3996 return convert_task_suspension_token_to_port_grp(task, TASK_GRP_MIG);
3997 }
3998
3999 ipc_port_t
convert_thread_to_port_pinned(thread_t thread)4000 convert_thread_to_port_pinned(
4001 thread_t thread)
4002 {
4003 thread_ro_t tro = get_thread_ro(thread);
4004 ipc_port_t port = IP_NULL;
4005
4006 thread_mtx_lock(thread);
4007
4008 if (thread->ipc_active) {
4009 port = ipc_kobject_make_send(tro->tro_self_port,
4010 thread, IKOT_THREAD_CONTROL);
4011 }
4012
4013 if (port && task_is_immovable(tro->tro_task)) {
4014 assert(ip_is_immovable_send(port));
4015 }
4016
4017 thread_mtx_unlock(thread);
4018 thread_deallocate(thread);
4019 return port;
4020 }
4021 /*
4022 * Routine: space_deallocate
4023 * Purpose:
4024 * Deallocate a space ref produced by convert_port_to_space.
4025 * Conditions:
4026 * Nothing locked.
4027 */
4028
4029 void
space_deallocate(ipc_space_t space)4030 space_deallocate(
4031 ipc_space_t space)
4032 {
4033 if (space != IS_NULL) {
4034 is_release(space);
4035 }
4036 }
4037
4038 /*
4039 * Routine: space_read_deallocate
4040 * Purpose:
4041 * Deallocate a space read ref produced by convert_port_to_space_read.
4042 * Conditions:
4043 * Nothing locked.
4044 */
4045
4046 void
space_read_deallocate(ipc_space_read_t space)4047 space_read_deallocate(
4048 ipc_space_read_t space)
4049 {
4050 if (space != IS_INSPECT_NULL) {
4051 is_release((ipc_space_t)space);
4052 }
4053 }
4054
4055 /*
4056 * Routine: space_inspect_deallocate
4057 * Purpose:
4058 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
4059 * Conditions:
4060 * Nothing locked.
4061 */
4062
4063 void
space_inspect_deallocate(ipc_space_inspect_t space)4064 space_inspect_deallocate(
4065 ipc_space_inspect_t space)
4066 {
4067 if (space != IS_INSPECT_NULL) {
4068 is_release((ipc_space_t)space);
4069 }
4070 }
4071
4072
4073 static boolean_t
behavior_is_identity_protected(int new_behavior)4074 behavior_is_identity_protected(int new_behavior)
4075 {
4076 return ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED) ||
4077 ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_STATE) ||
4078 ((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_STATE_IDENTITY_PROTECTED);
4079 }
4080
4081 static void
send_set_exception_telemetry(const task_t excepting_task,const exception_mask_t mask)4082 send_set_exception_telemetry(const task_t excepting_task, const exception_mask_t mask)
4083 {
4084 ca_event_t ca_event = CA_EVENT_ALLOCATE(set_exception);
4085 CA_EVENT_TYPE(set_exception) * event = ca_event->data;
4086
4087 task_procname(current_task(), (char *) &event->current_proc, sizeof(event->current_proc));
4088 task_procname(excepting_task, (char *) &event->thread_proc, sizeof(event->thread_proc));
4089 event->mask = mask;
4090
4091 CA_EVENT_SEND(ca_event);
4092 }
4093
4094 /* Returns whether the violation should be ignored */
4095 static boolean_t
set_exception_behavior_violation(const task_t excepting_task,const exception_mask_t mask)4096 set_exception_behavior_violation(const task_t excepting_task, const exception_mask_t mask)
4097 {
4098 if (thid_should_crash) {
4099 /* create lightweight corpse */
4100 mach_port_guard_exception(0, 0, kGUARD_EXC_EXCEPTION_BEHAVIOR_ENFORCE);
4101 }
4102
4103 /* always report the proc name to CA */
4104 send_set_exception_telemetry(excepting_task, mask);
4105
4106 /* if the bootarg has been manually set to false, ignore the violation */
4107 return !thid_should_crash;
4108 }
4109
4110 /*
4111 * Protect platform binary task/thread ports.
4112 * excepting_task is NULL if we are setting a host exception port.
4113 */
4114 static boolean_t
exception_exposes_protected_ports(const ipc_port_t new_port,const task_t excepting_task)4115 exception_exposes_protected_ports(const ipc_port_t new_port, const task_t excepting_task)
4116 {
4117 if (!IP_VALID(new_port) || is_ux_handler_port(new_port)) {
4118 /*
4119 * sending exceptions to invalid port does not pose risk
4120 * ux_handler port is an immovable, read-only kobject port; doesn't need protection.
4121 */
4122 return FALSE;
4123 } else if (excepting_task) {
4124 /* setting task/thread exception port - protect hardened binaries */
4125 return task_is_hardened_binary(excepting_task);
4126 }
4127
4128 /* setting host port exposes all processes - always protect. */
4129 return TRUE;
4130 }
4131
4132 static boolean_t
exception_ports_frozen(task_t excepting_task)4133 exception_ports_frozen(task_t excepting_task)
4134 {
4135 return excepting_task &&
4136 (task_ro_flags_get(excepting_task) & TFRO_FREEZE_EXCEPTION_PORTS);
4137 }
4138
4139 #if XNU_TARGET_OS_OSX && CONFIG_CSR
4140 static bool
SIP_is_enabled()4141 SIP_is_enabled()
4142 {
4143 return csr_check(CSR_ALLOW_UNRESTRICTED_FS) != 0;
4144 }
4145 #endif /* XNU_TARGET_OS_OSX && CONFIG_CSR*/
4146
4147 static boolean_t
exception_is_identity_protected(const ipc_port_t new_port,int new_behavior,const task_t excepting_task,const exception_mask_t mask)4148 exception_is_identity_protected(const ipc_port_t new_port, int new_behavior,
4149 const task_t excepting_task, const exception_mask_t mask)
4150 {
4151 if (exception_exposes_protected_ports(new_port, excepting_task)
4152 && !behavior_is_identity_protected(new_behavior)
4153 #if XNU_TARGET_OS_OSX
4154 && !task_opted_out_mach_hardening(excepting_task) /* Some tasks are opted out more generally */
4155 #if CONFIG_CSR
4156 && SIP_is_enabled() /* cannot enforce if SIP is disabled */
4157 #endif /* CONFIG_CSR */
4158 #endif /* XNU_TARGET_OS_OSX */
4159 #if CONFIG_ROSETTA
4160 && !task_is_translated(current_task())
4161 #endif /* CONFIG_ROSETTA */
4162 && !proc_is_simulated(current_proc())
4163 ) {
4164 return set_exception_behavior_violation(excepting_task, mask);
4165 }
4166
4167 return true;
4168 }
4169
4170 static boolean_t
set_exception_behavior_allowed(const ipc_port_t new_port,int new_behavior,const task_t excepting_task,const exception_mask_t mask,const bool hardened_exception)4171 set_exception_behavior_allowed(const ipc_port_t new_port, int new_behavior,
4172 const task_t excepting_task, const exception_mask_t mask, const bool hardened_exception)
4173 {
4174 const char *excepting_task_name = "";
4175 const char *cur_task_name = "";
4176
4177 if (excepting_task) {
4178 excepting_task_name = task_best_name(excepting_task);
4179 }
4180 if (current_task()) {
4181 cur_task_name = task_best_name(current_task());
4182 }
4183
4184 /* Allow debuggers, tests, and tooling to set exception ports however they wish */
4185 if (IOCurrentTaskHasEntitlement(SET_EXCEPTION_ENTITLEMENT)) {
4186 kprintf("Allowing set_exception_ports from [%s] on [%s] for "
4187 "entitled process/debugger\n", cur_task_name, excepting_task_name);
4188 return true;
4189 }
4190
4191 /* excepting_task can be NULL if setting the host port */
4192 if (excepting_task) {
4193 /*
4194 * Only allow hardened set_exception_port calls on hardened tasks
4195 * that opt in via entitlement
4196 */
4197 bool only_one_exception_port =
4198 IOTaskHasEntitlement(excepting_task, IPC_ONLY_ONE_EXCEPTION_PORT)
4199 && task_is_hardened_binary(excepting_task);
4200
4201 if (!hardened_exception && only_one_exception_port) {
4202 kprintf("Disallowing set_exception_ports from [%s] on [%s] due "
4203 "to only_one_exception_port policy\n", cur_task_name, excepting_task_name);
4204 return set_exception_behavior_violation(excepting_task, mask);
4205 }
4206 }
4207
4208 /* Everyone else follows the standard policy and must use identity protected exceptions */
4209 return exception_is_identity_protected(new_port, new_behavior, excepting_task, mask);
4210 }
4211
4212 /*
4213 * Routine: set_exception_ports_validation
4214 * Purpose:
4215 * Common argument validation shared between all exception port setting/swapping routines
4216 * Conditions:
4217 * Nothing locked.
4218 * Returns:
4219 * KERN_SUCCESS Setting the exception port is allowed with these arguments
4220 * KERN_INVALID_ARGUMENT Invalid arguments
4221 * KERN_INVALID_RIGHT Incorrect port configuration
4222 * KERN_DENIED Denied by security policy
4223 */
4224 kern_return_t
set_exception_ports_validation(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,bool hardened_exception)4225 set_exception_ports_validation(
4226 task_t task,
4227 exception_mask_t exception_mask,
4228 ipc_port_t new_port,
4229 exception_behavior_t new_behavior,
4230 thread_state_flavor_t new_flavor,
4231 bool hardened_exception
4232 )
4233 {
4234 if (exception_mask & ~EXC_MASK_VALID) {
4235 return KERN_INVALID_ARGUMENT;
4236 }
4237
4238 if (IP_VALID(new_port)) {
4239 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
4240 case EXCEPTION_DEFAULT:
4241 case EXCEPTION_STATE:
4242 case EXCEPTION_STATE_IDENTITY:
4243 case EXCEPTION_IDENTITY_PROTECTED:
4244 case EXCEPTION_STATE_IDENTITY_PROTECTED:
4245 break;
4246
4247 default:
4248 return KERN_INVALID_ARGUMENT;
4249 }
4250 }
4251
4252 /*
4253 * rdar://77996387
4254 * Avoid exposing immovable ports send rights (kobjects) to `get_exception_ports`,
4255 * but exception ports to still be set.
4256 */
4257 if (IP_VALID(new_port) &&
4258 ((!ip_is_exception_port(new_port) && new_port->ip_immovable_receive) ||
4259 new_port->ip_immovable_send)) {
4260 return KERN_INVALID_RIGHT;
4261 }
4262
4263
4264 /*
4265 * Check the validity of the thread_state_flavor by calling the
4266 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
4267 * osfmk/mach/ARCHITECTURE/thread_status.h
4268 */
4269 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
4270 return KERN_INVALID_ARGUMENT;
4271 }
4272
4273 if (((new_behavior & ~MACH_EXCEPTION_MASK) == EXCEPTION_IDENTITY_PROTECTED ||
4274 (new_behavior & MACH_EXCEPTION_BACKTRACE_PREFERRED))
4275 && !(new_behavior & MACH_EXCEPTION_CODES)) {
4276 return KERN_INVALID_ARGUMENT;
4277 }
4278
4279 if (!set_exception_behavior_allowed(new_port, new_behavior, task, exception_mask, hardened_exception)) {
4280 return KERN_DENIED;
4281 }
4282
4283 return KERN_SUCCESS;
4284 }
4285
4286 /*
4287 * Routine: thread_set_exception_ports_internal
4288 * Purpose:
4289 * Set a new exception action on the thread
4290 * Conditions:
4291 * Arguments have been validated via `set_exception_ports_validation`
4292 * Nothing locked.
4293 * Returns:
4294 * KERN_SUCCESS Setting the exception port is allowed with these arguments
4295 * KERN_FAILURE Thread is inactive
4296 */
4297 kern_return_t
thread_set_exception_ports_internal(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,boolean_t hardened)4298 thread_set_exception_ports_internal(
4299 thread_t thread,
4300 exception_mask_t exception_mask,
4301 ipc_port_t new_port,
4302 exception_behavior_t new_behavior,
4303 thread_state_flavor_t new_flavor,
4304 boolean_t hardened)
4305 {
4306 ipc_port_t old_port[EXC_TYPES_COUNT];
4307 thread_ro_t tro;
4308 boolean_t privileged = task_is_privileged(current_task());
4309
4310 #if CONFIG_MACF
4311 if (mac_task_check_set_thread_exception_ports(current_task(), get_threadtask(thread), exception_mask, new_behavior) != 0) {
4312 return KERN_NO_ACCESS;
4313 }
4314
4315 struct label *new_label = mac_exc_create_label_for_current_proc();
4316 #endif
4317
4318 tro = get_thread_ro(thread);
4319 thread_mtx_lock(thread);
4320
4321 if (!thread->active) {
4322 thread_mtx_unlock(thread);
4323 #if CONFIG_MACF
4324 mac_exc_free_label(new_label);
4325 #endif
4326 return KERN_FAILURE;
4327 }
4328
4329 if (tro->tro_exc_actions == NULL) {
4330 ipc_thread_init_exc_actions(tro);
4331 }
4332 for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4333 struct exception_action *action = &tro->tro_exc_actions[i];
4334
4335 if ((exception_mask & (1 << i))
4336 #if CONFIG_MACF
4337 && mac_exc_update_action_label(action, new_label) == 0
4338 #endif
4339 ) {
4340 old_port[i] = action->port;
4341 action->port = exception_port_copy_send(new_port);
4342 action->behavior = new_behavior;
4343 action->flavor = new_flavor;
4344 action->privileged = privileged;
4345 action->hardened = hardened;
4346 } else {
4347 old_port[i] = IP_NULL;
4348 }
4349 }
4350
4351 thread_mtx_unlock(thread);
4352
4353 #if CONFIG_MACF
4354 mac_exc_free_label(new_label);
4355 #endif
4356
4357 for (size_t i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4358 if (IP_VALID(old_port[i])) {
4359 ipc_port_release_send(old_port[i]);
4360 }
4361 }
4362
4363 if (IP_VALID(new_port)) { /* consume send right */
4364 ipc_port_release_send(new_port);
4365 }
4366
4367 return KERN_SUCCESS;
4368 }
4369
4370 /*
4371 * Routine: thread/task_set_exception_ports [kernel call]
4372 * Purpose:
4373 * Sets the thread/task exception port, flavor and
4374 * behavior for the exception types specified by the mask.
4375 * There will be one send right per exception per valid
4376 * port.
4377 * Conditions:
4378 * Nothing locked. If successful, consumes
4379 * the supplied send right.
4380 * Returns:
4381 * KERN_SUCCESS Changed the special port.
4382 * KERN_INVALID_ARGUMENT The thread is null,
4383 * Illegal mask bit set.
4384 * Illegal exception behavior
4385 * KERN_FAILURE The thread is dead.
4386 * KERN_NO_ACCESS Restricted access to set port
4387 */
4388
4389 kern_return_t
thread_set_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4390 thread_set_exception_ports(
4391 thread_t thread,
4392 exception_mask_t exception_mask,
4393 ipc_port_t new_port,
4394 exception_behavior_t new_behavior,
4395 thread_state_flavor_t new_flavor)
4396 {
4397 if (thread == THREAD_NULL) {
4398 return KERN_INVALID_ARGUMENT;
4399 }
4400 bool hardened_exception_flow = false;
4401 kern_return_t kr = set_exception_ports_validation(get_threadtask(thread),
4402 exception_mask, new_port, new_behavior, new_flavor, hardened_exception_flow);
4403 if (kr != KERN_SUCCESS) {
4404 return kr;
4405 }
4406
4407 return thread_set_exception_ports_internal(thread, exception_mask, new_port, new_behavior, new_flavor, false);
4408 }
4409
4410 kern_return_t
task_set_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor)4411 task_set_exception_ports(
4412 task_t task,
4413 exception_mask_t exception_mask,
4414 ipc_port_t new_port,
4415 exception_behavior_t new_behavior,
4416 thread_state_flavor_t new_flavor)
4417 {
4418 ipc_port_t old_port[EXC_TYPES_COUNT];
4419 boolean_t privileged = task_is_privileged(current_task());
4420 register int i;
4421
4422 if (task == TASK_NULL) {
4423 return KERN_INVALID_ARGUMENT;
4424 }
4425 bool hardened_exception_flow = false;
4426 kern_return_t kr = set_exception_ports_validation(task, exception_mask,
4427 new_port, new_behavior, new_flavor, hardened_exception_flow);
4428 if (kr != KERN_SUCCESS) {
4429 return kr;
4430 }
4431
4432
4433 #if CONFIG_MACF
4434 if (mac_task_check_set_task_exception_ports(current_task(), task, exception_mask, new_behavior) != 0) {
4435 return KERN_NO_ACCESS;
4436 }
4437
4438 struct label *new_label = mac_exc_create_label_for_current_proc();
4439 #endif
4440
4441 itk_lock(task);
4442
4443 /*
4444 * Allow setting exception port during the span of ipc_task_init() to
4445 * ipc_task_terminate(). posix_spawn() port actions can set exception
4446 * ports on target task _before_ task IPC access is enabled.
4447 */
4448 if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == IP_NULL) {
4449 itk_unlock(task);
4450 #if CONFIG_MACF
4451 mac_exc_free_label(new_label);
4452 #endif
4453 return KERN_FAILURE;
4454 }
4455
4456 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4457 if ((exception_mask & (1 << i))
4458 #if CONFIG_MACF
4459 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4460 #endif
4461 ) {
4462 old_port[i] = task->exc_actions[i].port;
4463 task->exc_actions[i].port =
4464 exception_port_copy_send(new_port);
4465 task->exc_actions[i].behavior = new_behavior;
4466 task->exc_actions[i].flavor = new_flavor;
4467 task->exc_actions[i].privileged = privileged;
4468 } else {
4469 old_port[i] = IP_NULL;
4470 }
4471 }
4472
4473 itk_unlock(task);
4474
4475 #if CONFIG_MACF
4476 mac_exc_free_label(new_label);
4477 #endif
4478
4479 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
4480 if (IP_VALID(old_port[i])) {
4481 ipc_port_release_send(old_port[i]);
4482 }
4483 }
4484
4485 if (IP_VALID(new_port)) { /* consume send right */
4486 ipc_port_release_send(new_port);
4487 }
4488
4489 return KERN_SUCCESS;
4490 }
4491
4492 /*
4493 * Routine: thread/task_swap_exception_ports [kernel call]
4494 * Purpose:
4495 * Sets the thread/task exception port, flavor and
4496 * behavior for the exception types specified by the
4497 * mask.
4498 *
4499 * The old ports, behavior and flavors are returned
4500 * Count specifies the array sizes on input and
4501 * the number of returned ports etc. on output. The
4502 * arrays must be large enough to hold all the returned
4503 * data, MIG returnes an error otherwise. The masks
4504 * array specifies the corresponding exception type(s).
4505 *
4506 * Conditions:
4507 * Nothing locked. If successful, consumes
4508 * the supplied send right.
4509 *
4510 * Returns upto [in} CountCnt elements.
4511 * Returns:
4512 * KERN_SUCCESS Changed the special port.
4513 * KERN_INVALID_ARGUMENT The thread is null,
4514 * Illegal mask bit set.
4515 * Illegal exception behavior
4516 * KERN_FAILURE The thread is dead.
4517 * KERN_NO_ACCESS Restricted access to set port
4518 */
4519
4520 kern_return_t
thread_swap_exception_ports(thread_t thread,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4521 thread_swap_exception_ports(
4522 thread_t thread,
4523 exception_mask_t exception_mask,
4524 ipc_port_t new_port,
4525 exception_behavior_t new_behavior,
4526 thread_state_flavor_t new_flavor,
4527 exception_mask_array_t masks,
4528 mach_msg_type_number_t *CountCnt,
4529 exception_port_array_t ports,
4530 exception_behavior_array_t behaviors,
4531 thread_state_flavor_array_t flavors)
4532 {
4533 ipc_port_t old_port[EXC_TYPES_COUNT];
4534 thread_ro_t tro;
4535 boolean_t privileged = task_is_privileged(current_task());
4536 unsigned int i, j, count;
4537
4538 if (thread == THREAD_NULL) {
4539 return KERN_INVALID_ARGUMENT;
4540 }
4541 bool hardened_exception_flow = false;
4542 kern_return_t kr = set_exception_ports_validation(get_threadtask(thread),
4543 exception_mask, new_port, new_behavior, new_flavor, hardened_exception_flow);
4544 if (kr != KERN_SUCCESS) {
4545 return kr;
4546 }
4547
4548 #if CONFIG_MACF
4549 if (mac_task_check_set_thread_exception_ports(current_task(), get_threadtask(thread), exception_mask, new_behavior) != 0) {
4550 return KERN_NO_ACCESS;
4551 }
4552
4553 struct label *new_label = mac_exc_create_label_for_current_proc();
4554 #endif
4555
4556 thread_mtx_lock(thread);
4557
4558 if (!thread->active) {
4559 thread_mtx_unlock(thread);
4560 #if CONFIG_MACF
4561 mac_exc_free_label(new_label);
4562 #endif
4563 return KERN_FAILURE;
4564 }
4565
4566 tro = get_thread_ro(thread);
4567 if (tro->tro_exc_actions == NULL) {
4568 ipc_thread_init_exc_actions(tro);
4569 }
4570
4571 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4572 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4573 struct exception_action *action = &tro->tro_exc_actions[i];
4574
4575 if ((exception_mask & (1 << i))
4576 #if CONFIG_MACF
4577 && mac_exc_update_action_label(action, new_label) == 0
4578 #endif
4579 ) {
4580 for (j = 0; j < count; ++j) {
4581 /*
4582 * search for an identical entry, if found
4583 * set corresponding mask for this exception.
4584 */
4585 if (action->port == ports[j] &&
4586 action->behavior == behaviors[j] &&
4587 action->flavor == flavors[j]) {
4588 masks[j] |= (1 << i);
4589 break;
4590 }
4591 }
4592
4593 if (j == count) {
4594 masks[j] = (1 << i);
4595 ports[j] = exception_port_copy_send(action->port);
4596
4597 behaviors[j] = action->behavior;
4598 flavors[j] = action->flavor;
4599 ++count;
4600 }
4601
4602 old_port[i] = action->port;
4603 action->port = exception_port_copy_send(new_port);
4604 action->behavior = new_behavior;
4605 action->flavor = new_flavor;
4606 action->privileged = privileged;
4607 } else {
4608 old_port[i] = IP_NULL;
4609 }
4610 }
4611
4612 thread_mtx_unlock(thread);
4613
4614 #if CONFIG_MACF
4615 mac_exc_free_label(new_label);
4616 #endif
4617
4618 while (--i >= FIRST_EXCEPTION) {
4619 if (IP_VALID(old_port[i])) {
4620 ipc_port_release_send(old_port[i]);
4621 }
4622 }
4623
4624 if (IP_VALID(new_port)) { /* consume send right */
4625 ipc_port_release_send(new_port);
4626 }
4627
4628 *CountCnt = count;
4629
4630 return KERN_SUCCESS;
4631 }
4632
4633 kern_return_t
task_swap_exception_ports(task_t task,exception_mask_t exception_mask,ipc_port_t new_port,exception_behavior_t new_behavior,thread_state_flavor_t new_flavor,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4634 task_swap_exception_ports(
4635 task_t task,
4636 exception_mask_t exception_mask,
4637 ipc_port_t new_port,
4638 exception_behavior_t new_behavior,
4639 thread_state_flavor_t new_flavor,
4640 exception_mask_array_t masks,
4641 mach_msg_type_number_t *CountCnt,
4642 exception_port_array_t ports,
4643 exception_behavior_array_t behaviors,
4644 thread_state_flavor_array_t flavors)
4645 {
4646 ipc_port_t old_port[EXC_TYPES_COUNT];
4647 boolean_t privileged = task_is_privileged(current_task());
4648 unsigned int i, j, count;
4649
4650 #if CONFIG_MACF
4651 struct label *new_label;
4652 #endif
4653
4654 if (task == TASK_NULL) {
4655 return KERN_INVALID_ARGUMENT;
4656 }
4657 bool hardened_exception_flow = false;
4658 kern_return_t kr = set_exception_ports_validation(task, exception_mask,
4659 new_port, new_behavior, new_flavor, hardened_exception_flow);
4660 if (kr != KERN_SUCCESS) {
4661 return kr;
4662 }
4663
4664 #if CONFIG_MACF
4665 if (mac_task_check_set_task_exception_ports(current_task(), task, exception_mask, new_behavior) != 0) {
4666 return KERN_NO_ACCESS;
4667 }
4668
4669 new_label = mac_exc_create_label_for_current_proc();
4670 #endif
4671
4672 itk_lock(task);
4673
4674 if (!task->ipc_active) {
4675 itk_unlock(task);
4676 #if CONFIG_MACF
4677 mac_exc_free_label(new_label);
4678 #endif
4679 return KERN_FAILURE;
4680 }
4681
4682 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
4683 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
4684 if ((exception_mask & (1 << i))
4685 #if CONFIG_MACF
4686 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
4687 #endif
4688 ) {
4689 for (j = 0; j < count; j++) {
4690 /*
4691 * search for an identical entry, if found
4692 * set corresponding mask for this exception.
4693 */
4694 if (task->exc_actions[i].port == ports[j] &&
4695 task->exc_actions[i].behavior == behaviors[j] &&
4696 task->exc_actions[i].flavor == flavors[j]) {
4697 masks[j] |= (1 << i);
4698 break;
4699 }
4700 }
4701
4702 if (j == count) {
4703 masks[j] = (1 << i);
4704 ports[j] = exception_port_copy_send(task->exc_actions[i].port);
4705 behaviors[j] = task->exc_actions[i].behavior;
4706 flavors[j] = task->exc_actions[i].flavor;
4707 ++count;
4708 }
4709
4710 old_port[i] = task->exc_actions[i].port;
4711
4712 task->exc_actions[i].port = exception_port_copy_send(new_port);
4713 task->exc_actions[i].behavior = new_behavior;
4714 task->exc_actions[i].flavor = new_flavor;
4715 task->exc_actions[i].privileged = privileged;
4716 } else {
4717 old_port[i] = IP_NULL;
4718 }
4719 }
4720
4721 itk_unlock(task);
4722
4723 #if CONFIG_MACF
4724 mac_exc_free_label(new_label);
4725 #endif
4726
4727 while (--i >= FIRST_EXCEPTION) {
4728 if (IP_VALID(old_port[i])) {
4729 ipc_port_release_send(old_port[i]);
4730 }
4731 }
4732
4733 if (IP_VALID(new_port)) { /* consume send right */
4734 ipc_port_release_send(new_port);
4735 }
4736
4737 *CountCnt = count;
4738
4739 return KERN_SUCCESS;
4740 }
4741
4742 /*
4743 * Routine: thread/task_get_exception_ports [kernel call]
4744 * Purpose:
4745 * Clones a send right for each of the thread/task's exception
4746 * ports specified in the mask and returns the behaviour
4747 * and flavor of said port.
4748 *
4749 * Returns upto [in} CountCnt elements.
4750 *
4751 * Conditions:
4752 * Nothing locked.
4753 * Returns:
4754 * KERN_SUCCESS Extracted a send right.
4755 * KERN_INVALID_ARGUMENT The thread is null,
4756 * Invalid special port,
4757 * Illegal mask bit set.
4758 * KERN_FAILURE The thread is dead.
4759 */
4760 static kern_return_t
thread_get_exception_ports_internal(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4761 thread_get_exception_ports_internal(
4762 thread_t thread,
4763 exception_mask_t exception_mask,
4764 exception_mask_array_t masks,
4765 mach_msg_type_number_t *CountCnt,
4766 exception_port_info_array_t ports_info,
4767 exception_port_array_t ports,
4768 exception_behavior_array_t behaviors,
4769 thread_state_flavor_array_t flavors)
4770 {
4771 unsigned int count;
4772 boolean_t info_only = (ports_info != NULL);
4773 thread_ro_t tro;
4774 ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4775
4776 if (thread == THREAD_NULL) {
4777 return KERN_INVALID_ARGUMENT;
4778 }
4779
4780 if (exception_mask & ~EXC_MASK_VALID) {
4781 return KERN_INVALID_ARGUMENT;
4782 }
4783
4784 if (!info_only && !ports) {
4785 return KERN_INVALID_ARGUMENT;
4786 }
4787
4788 /*
4789 * Allocate a save area for FP state before taking thread lock,
4790 * if necessary, to ensure that VM_KERNEL_ADDRHASH() doesn't cause
4791 * an FP state allocation while holding thread locks.
4792 */
4793 ml_fp_save_area_prealloc();
4794
4795 tro = get_thread_ro(thread);
4796 thread_mtx_lock(thread);
4797
4798 if (!thread->active) {
4799 thread_mtx_unlock(thread);
4800
4801 return KERN_FAILURE;
4802 }
4803
4804 count = 0;
4805
4806 if (tro->tro_exc_actions == NULL) {
4807 goto done;
4808 }
4809
4810 for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4811 if (exception_mask & (1 << i)) {
4812 ipc_port_t exc_port = tro->tro_exc_actions[i].port;
4813 exception_behavior_t exc_behavior = tro->tro_exc_actions[i].behavior;
4814 thread_state_flavor_t exc_flavor = tro->tro_exc_actions[i].flavor;
4815
4816 for (j = 0; j < count; ++j) {
4817 /*
4818 * search for an identical entry, if found
4819 * set corresponding mask for this exception.
4820 */
4821 if (exc_port == port_ptrs[j] &&
4822 exc_behavior == behaviors[j] &&
4823 exc_flavor == flavors[j]) {
4824 masks[j] |= (1 << i);
4825 break;
4826 }
4827 }
4828
4829 if (j == count && count < *CountCnt) {
4830 masks[j] = (1 << i);
4831 port_ptrs[j] = exc_port;
4832
4833 if (info_only) {
4834 if (!IP_VALID(exc_port)) {
4835 ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4836 } else {
4837 uintptr_t receiver;
4838 (void)ipc_port_get_receiver_task(exc_port, &receiver);
4839 ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRHASH(exc_port);
4840 ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRHASH(receiver) : 0;
4841 }
4842 } else {
4843 ports[j] = exception_port_copy_send(exc_port);
4844 }
4845 behaviors[j] = exc_behavior;
4846 flavors[j] = exc_flavor;
4847 ++count;
4848 }
4849 }
4850 }
4851
4852 done:
4853 thread_mtx_unlock(thread);
4854
4855 *CountCnt = count;
4856
4857 return KERN_SUCCESS;
4858 }
4859
4860 kern_return_t
thread_get_exception_ports(thread_t thread,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4861 thread_get_exception_ports(
4862 thread_t thread,
4863 exception_mask_t exception_mask,
4864 exception_mask_array_t masks,
4865 mach_msg_type_number_t *CountCnt,
4866 exception_port_array_t ports,
4867 exception_behavior_array_t behaviors,
4868 thread_state_flavor_array_t flavors)
4869 {
4870 return thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4871 NULL, ports, behaviors, flavors);
4872 }
4873
4874 kern_return_t
thread_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4875 thread_get_exception_ports_info(
4876 mach_port_t port,
4877 exception_mask_t exception_mask,
4878 exception_mask_array_t masks,
4879 mach_msg_type_number_t *CountCnt,
4880 exception_port_info_array_t ports_info,
4881 exception_behavior_array_t behaviors,
4882 thread_state_flavor_array_t flavors)
4883 {
4884 kern_return_t kr;
4885
4886 thread_t thread = convert_port_to_thread_read_no_eval(port);
4887
4888 if (thread == THREAD_NULL) {
4889 return KERN_INVALID_ARGUMENT;
4890 }
4891
4892 kr = thread_get_exception_ports_internal(thread, exception_mask, masks, CountCnt,
4893 ports_info, NULL, behaviors, flavors);
4894
4895 thread_deallocate(thread);
4896 return kr;
4897 }
4898
4899 kern_return_t
thread_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4900 thread_get_exception_ports_from_user(
4901 mach_port_t port,
4902 exception_mask_t exception_mask,
4903 exception_mask_array_t masks,
4904 mach_msg_type_number_t *CountCnt,
4905 exception_port_array_t ports,
4906 exception_behavior_array_t behaviors,
4907 thread_state_flavor_array_t flavors)
4908 {
4909 kern_return_t kr;
4910
4911 thread_t thread = convert_port_to_thread(port);
4912
4913 if (thread == THREAD_NULL) {
4914 return KERN_INVALID_ARGUMENT;
4915 }
4916
4917 kr = thread_get_exception_ports(thread, exception_mask, masks, CountCnt, ports, behaviors, flavors);
4918
4919 thread_deallocate(thread);
4920 return kr;
4921 }
4922
4923 static kern_return_t
task_get_exception_ports_internal(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)4924 task_get_exception_ports_internal(
4925 task_t task,
4926 exception_mask_t exception_mask,
4927 exception_mask_array_t masks,
4928 mach_msg_type_number_t *CountCnt,
4929 exception_port_info_array_t ports_info,
4930 exception_port_array_t ports,
4931 exception_behavior_array_t behaviors,
4932 thread_state_flavor_array_t flavors)
4933 {
4934 unsigned int count;
4935 boolean_t info_only = (ports_info != NULL);
4936 ipc_port_t port_ptrs[EXC_TYPES_COUNT]; /* pointers only, does not hold right */
4937
4938 if (task == TASK_NULL) {
4939 return KERN_INVALID_ARGUMENT;
4940 }
4941
4942 if (exception_mask & ~EXC_MASK_VALID) {
4943 return KERN_INVALID_ARGUMENT;
4944 }
4945
4946 if (!info_only && !ports) {
4947 return KERN_INVALID_ARGUMENT;
4948 }
4949
4950 /*
4951 * Allocate a save area for FP state before taking task lock,
4952 * if necessary, to ensure that VM_KERNEL_ADDRHASH() doesn't cause
4953 * an FP state allocation while holding task locks.
4954 */
4955 ml_fp_save_area_prealloc();
4956
4957 itk_lock(task);
4958
4959 if (!task->ipc_active) {
4960 itk_unlock(task);
4961 return KERN_FAILURE;
4962 }
4963
4964 count = 0;
4965
4966 for (int i = FIRST_EXCEPTION, j = 0; i < EXC_TYPES_COUNT; ++i) {
4967 if (exception_mask & (1 << i)) {
4968 ipc_port_t exc_port = task->exc_actions[i].port;
4969 exception_behavior_t exc_behavior = task->exc_actions[i].behavior;
4970 thread_state_flavor_t exc_flavor = task->exc_actions[i].flavor;
4971
4972 for (j = 0; j < count; ++j) {
4973 /*
4974 * search for an identical entry, if found
4975 * set corresponding mask for this exception.
4976 */
4977 if (exc_port == port_ptrs[j] &&
4978 exc_behavior == behaviors[j] &&
4979 exc_flavor == flavors[j]) {
4980 masks[j] |= (1 << i);
4981 break;
4982 }
4983 }
4984
4985 if (j == count && count < *CountCnt) {
4986 masks[j] = (1 << i);
4987 port_ptrs[j] = exc_port;
4988
4989 if (info_only) {
4990 if (!IP_VALID(exc_port)) {
4991 ports_info[j] = (ipc_info_port_t){ .iip_port_object = 0, .iip_receiver_object = 0 };
4992 } else {
4993 uintptr_t receiver;
4994 (void)ipc_port_get_receiver_task(exc_port, &receiver);
4995 ports_info[j].iip_port_object = (natural_t)VM_KERNEL_ADDRHASH(exc_port);
4996 ports_info[j].iip_receiver_object = receiver ? (natural_t)VM_KERNEL_ADDRHASH(receiver) : 0;
4997 }
4998 } else {
4999 ports[j] = exception_port_copy_send(exc_port);
5000 }
5001 behaviors[j] = exc_behavior;
5002 flavors[j] = exc_flavor;
5003 ++count;
5004 }
5005 }
5006 }
5007
5008 itk_unlock(task);
5009
5010 *CountCnt = count;
5011
5012 return KERN_SUCCESS;
5013 }
5014
5015 kern_return_t
task_get_exception_ports(task_t task,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)5016 task_get_exception_ports(
5017 task_t task,
5018 exception_mask_t exception_mask,
5019 exception_mask_array_t masks,
5020 mach_msg_type_number_t *CountCnt,
5021 exception_port_array_t ports,
5022 exception_behavior_array_t behaviors,
5023 thread_state_flavor_array_t flavors)
5024 {
5025 return task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
5026 NULL, ports, behaviors, flavors);
5027 }
5028
5029 kern_return_t
task_get_exception_ports_info(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_info_array_t ports_info,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)5030 task_get_exception_ports_info(
5031 mach_port_t port,
5032 exception_mask_t exception_mask,
5033 exception_mask_array_t masks,
5034 mach_msg_type_number_t *CountCnt,
5035 exception_port_info_array_t ports_info,
5036 exception_behavior_array_t behaviors,
5037 thread_state_flavor_array_t flavors)
5038 {
5039 kern_return_t kr;
5040
5041 task_t task = convert_port_to_task_read_no_eval(port);
5042
5043 if (task == TASK_NULL) {
5044 return KERN_INVALID_ARGUMENT;
5045 }
5046
5047 kr = task_get_exception_ports_internal(task, exception_mask, masks, CountCnt,
5048 ports_info, NULL, behaviors, flavors);
5049
5050 task_deallocate(task);
5051 return kr;
5052 }
5053
5054 kern_return_t
task_get_exception_ports_from_user(mach_port_t port,exception_mask_t exception_mask,exception_mask_array_t masks,mach_msg_type_number_t * CountCnt,exception_port_array_t ports,exception_behavior_array_t behaviors,thread_state_flavor_array_t flavors)5055 task_get_exception_ports_from_user(
5056 mach_port_t port,
5057 exception_mask_t exception_mask,
5058 exception_mask_array_t masks,
5059 mach_msg_type_number_t *CountCnt,
5060 exception_port_array_t ports,
5061 exception_behavior_array_t behaviors,
5062 thread_state_flavor_array_t flavors)
5063 {
5064 kern_return_t kr;
5065
5066 task_t task = convert_port_to_task(port);
5067
5068 if (task == TASK_NULL) {
5069 return KERN_INVALID_ARGUMENT;
5070 }
5071
5072 kr = task_get_exception_ports(task, exception_mask, masks, CountCnt, ports, behaviors, flavors);
5073
5074 task_deallocate(task);
5075 return kr;
5076 }
5077
5078 /*
5079 * Routine: ipc_thread_port_unpin
5080 * Purpose:
5081 *
5082 * Called on the thread when it's terminating so that the last ref
5083 * can be deallocated without a guard exception.
5084 * Conditions:
5085 * Thread mutex lock is held.
5086 */
5087 void
ipc_thread_port_unpin(ipc_port_t port)5088 ipc_thread_port_unpin(
5089 ipc_port_t port)
5090 {
5091 if (port == IP_NULL) {
5092 return;
5093 }
5094 ip_mq_lock(port);
5095 port->ip_pinned = 0;
5096 ip_mq_unlock(port);
5097 }
5098
5099 /*
5100 * Routine: task_register_hardened_exception_handler
5101 * Purpose:
5102 * Register a port as a hardened exception handler.
5103 * See task.defs for additional info
5104 * Conditions:
5105 * Nothing locked.
5106 * Limit of one hardened exception handler per task
5107 * Returns:
5108 * KERN_INVALID_ARGUMENT invalid thread
5109 * KERN_DENIED breaking the security policy
5110 * KERN_NAME_EXISTS Already set a hardened exception handler on this task
5111 * KERN_SUCCESS
5112 */
5113 kern_return_t
task_register_hardened_exception_handler(task_t task,uint32_t signed_pc_key,exception_mask_t exceptions_allowed,exception_behavior_t behaviors_allowed,thread_state_flavor_t flavors_allowed,mach_port_t new_port)5114 task_register_hardened_exception_handler(
5115 task_t task,
5116 uint32_t signed_pc_key,
5117 exception_mask_t exceptions_allowed,
5118 exception_behavior_t behaviors_allowed,
5119 thread_state_flavor_t flavors_allowed,
5120 mach_port_t new_port)
5121 {
5122 ipc_port_t old_port;
5123
5124 if (task == TASK_NULL) {
5125 return KERN_INVALID_ARGUMENT;
5126 }
5127 if (IP_VALID(new_port) && !ip_is_exception_port(new_port)) {
5128 return KERN_INVALID_ARGUMENT;
5129 }
5130
5131
5132 bool hardened_exception_flow = true;
5133 kern_return_t kr = set_exception_ports_validation(task, exceptions_allowed,
5134 new_port, behaviors_allowed, flavors_allowed, hardened_exception_flow);
5135 if (kr != KERN_SUCCESS) {
5136 return kr;
5137 }
5138
5139 /* You can only register one hardened exception handler */
5140 if (exception_ports_frozen(task)) {
5141 return KERN_INVALID_ARGUMENT;
5142 }
5143 task_ro_flags_set(task, TFRO_FREEZE_EXCEPTION_PORTS);
5144 itk_lock(task);
5145
5146 /* No reason to allow setting this multiple times per task */
5147 old_port = task->hardened_exception_action.ea.port;
5148 if (IP_VALID(old_port)) {
5149 itk_unlock(task);
5150 return KERN_NAME_EXISTS;
5151 }
5152
5153 /* Stash the semantics for this port on the task */
5154 struct hardened_exception_action hea;
5155 hea.ea.port = new_port; /* Donate our send right to the task */
5156 hea.ea.flavor = flavors_allowed;
5157 hea.ea.behavior = behaviors_allowed;
5158 hea.ea.privileged = false;
5159 hea.ea.label = NULL;
5160 hea.signed_pc_key = signed_pc_key;
5161 hea.exception = exceptions_allowed;
5162
5163 task->hardened_exception_action = hea;
5164 itk_unlock(task);
5165
5166 return KERN_SUCCESS;
5167 }
5168
5169 /*
5170 * Routine: thread_adopt_exception_handler
5171 * Purpose:
5172 * Adopt the hardened exception handler from the current task, for this thread.
5173 * Allows you to set exception ports on a thread after exception ports
5174 * have been frozen for the task.
5175 * Conditions:
5176 * Nothing locked
5177 * Returns:
5178 * KERN_INVALID_ARGUMENT invalid thread
5179 * KERN_DENIED breaking the security policy
5180 * KERN_SUCCESS
5181 */
5182 kern_return_t
thread_adopt_exception_handler(thread_t thread,mach_port_t exc_port,exception_mask_t exc_mask,exception_behavior_t behavior_mask,thread_state_flavor_t flavor_mask)5183 thread_adopt_exception_handler(
5184 thread_t thread,
5185 mach_port_t exc_port,
5186 exception_mask_t exc_mask,
5187 exception_behavior_t behavior_mask,
5188 thread_state_flavor_t flavor_mask
5189 )
5190 {
5191 if (thread == THREAD_NULL) {
5192 return KERN_INVALID_ARGUMENT;
5193 }
5194
5195 task_t task = get_threadtask(thread);
5196
5197 if (task != current_task()) {
5198 return KERN_DENIED;
5199 }
5200
5201 /* We must have exactly one hardened exception port per task */
5202 if (!exception_ports_frozen(task)) {
5203 return KERN_DENIED;
5204 }
5205
5206 /* Ensure we see a consistent state of the hardened exception action */
5207 itk_lock(task);
5208 struct hardened_exception_action hea = task->hardened_exception_action;
5209 itk_unlock(task);
5210
5211 if (exc_port != IP_NULL && exc_port != hea.ea.port) {
5212 return KERN_DENIED;
5213 }
5214 /* Ensure that the new masks for this thread are a subset of the
5215 * allowable masks for this exception handler
5216 */
5217 if (exc_mask & ~hea.exception ||
5218 behavior_mask & ~hea.ea.behavior ||
5219 flavor_mask & ~hea.ea.flavor) {
5220 return KERN_DENIED;
5221 }
5222
5223 assert(!IP_VALID(exc_port) || exc_port->ip_immovable_receive);
5224 assert(!IP_VALID(exc_port) || ip_is_exception_port(exc_port));
5225
5226 /* We can safely assume this will be valid because we called set_exception_ports_validation on it when it was originally set on the task */
5227 return thread_set_exception_ports_internal(thread, exc_mask, exc_port, behavior_mask, flavor_mask, true);
5228 }
5229