1 /*
2 * Copyright (c) 2023 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <mach/kern_return.h>
30 #include <mach/mach_types.h>
31 #include <mach/port.h>
32
33 #include <kern/assert.h>
34 #include <kern/exc_guard.h>
35 #include <kern/ipc_kobject.h>
36 #include <kern/kern_types.h>
37 #include <kern/mach_filter.h>
38 #include <kern/task.h>
39
40 #include <vm/vm_map_xnu.h> /* current_map() */
41 #include <vm/vm_protos.h> /* current_proc() */
42
43 #include <ipc/ipc_policy.h>
44 #include <ipc/ipc_service_port.h>
45 #include <ipc/port.h>
46
47 #if CONFIG_CSR
48 #include <sys/csr.h>
49 #endif
50 #include <sys/codesign.h>
51 #include <sys/proc_ro.h>
52 #include <sys/reason.h>
53
54 #include <libkern/coreanalytics/coreanalytics.h>
55
56 extern int proc_isinitproc(struct proc *p);
57 extern bool proc_is_simulated(struct proc *);
58 extern char *proc_name_address(struct proc *p);
59 extern int exit_with_guard_exception(
60 struct proc *p,
61 mach_exception_data_type_t code,
62 mach_exception_data_type_t subcode);
63
64
65
66 #pragma mark policy tunables
67
68 extern const vm_size_t ipc_kmsg_max_vm_space;
69
70 #if IPC_HAS_LEGACY_MACH_MSG_TRAP
71 #if DEVELOPMENT || DEBUG
72 static TUNABLE(bool, allow_legacy_mach_msg, "allow_legacy_mach_msg", false);
73 #endif /* DEVELOPMENT || DEBUG */
74 #endif /* IPC_HAS_LEGACY_MACH_MSG_TRAP */
75
76
77 #pragma mark policy options
78
79 mach_msg_option64_t
ipc_current_user_policy(task_t task,mach_msg_option64_t opts)80 ipc_current_user_policy(
81 task_t task,
82 mach_msg_option64_t opts)
83 {
84 uint32_t ro_flags = task_ro_flags_get(task);
85
86 /*
87 * Step 1: convert to kernel flags
88 * - clear any kernel only flags
89 * - convert MACH_SEND_FILTER_NONFATAL which is aliased to the
90 * MACH_SEND_ALWAYS kernel flag into MACH64_POLICY_FILTER_NON_FATAL.
91 */
92 opts &= MACH64_MSG_OPTION_USER;
93
94 if (opts & MACH64_SEND_FILTER_NONFATAL) {
95 /*
96 */
97 opts &= ~MACH64_SEND_FILTER_NONFATAL;
98 opts |= MACH64_POLICY_FILTER_NON_FATAL;
99 }
100 if (ro_flags & TFRO_FILTER_MSG) {
101 opts |= MACH64_POLICY_FILTER_MSG;
102 }
103
104 /*
105 * Step 2: derive policy flags from the current context
106 */
107 if (ro_flags & TFRO_PLATFORM) {
108 opts |= MACH64_POLICY_PLATFORM;
109 opts |= MACH64_POLICY_RIGID;
110 opts |= MACH64_POLICY_HARDENED;
111 }
112 if (ro_flags & TFRO_HARDENED) {
113 opts |= MACH64_POLICY_RIGID;
114 opts |= MACH64_POLICY_HARDENED;
115 }
116 #if CONFIG_ROSETTA
117 if (task_is_translated(task)) {
118 opts |= MACH64_POLICY_TRANSLATED;
119 }
120 #endif
121 #if XNU_TARGET_OS_OSX
122 struct proc *proc = get_bsdtask_info(task);
123 if (proc_is_simulated(proc)) {
124 opts |= MACH64_POLICY_SIMULATED;
125 }
126 if (csproc_hardened_runtime(proc)) {
127 opts |= MACH64_POLICY_HARDENED;
128 }
129 #endif
130 if (!(opts & MACH64_POLICY_NEEDED_MASK)) {
131 /* helps assert that a policy has been set */
132 opts |= MACH64_POLICY_DEFAULT;
133 }
134
135 return opts;
136 }
137
138 mach_msg_return_t
ipc_preflight_msg_option64(mach_msg_option64_t opts)139 ipc_preflight_msg_option64(mach_msg_option64_t opts)
140 {
141 bool success = true;
142
143 if ((opts & MACH64_SEND_MSG) && (opts & MACH64_MACH_MSG2)) {
144 mach_msg_option64_t cfi = opts & MACH64_MSG_OPTION_CFI_MASK;
145
146 #if !XNU_TARGET_OS_OSX
147 cfi &= ~MACH64_SEND_ANY;
148 #endif
149 /* mach_msg2() calls must have exactly _one_ of these set */
150 if (cfi == 0 || (cfi & (cfi - 1)) != 0) {
151 success = false;
152 }
153
154 /* vector calls are only supported for message queues */
155 if ((opts & (MACH64_SEND_MQ_CALL | MACH64_SEND_ANY)) == 0 &&
156 (opts & MACH64_MSG_VECTOR)) {
157 success = false;
158 }
159 }
160
161 if (success) {
162 return MACH_MSG_SUCCESS;
163 }
164
165 mach_port_guard_exception(0, opts, kGUARD_EXC_INVALID_OPTIONS);
166 if (opts & MACH64_MACH_MSG2) {
167 return MACH_SEND_INVALID_OPTIONS;
168 }
169 return KERN_NOT_SUPPORTED;
170 }
171
172
173 #pragma mark legacy trap policies
174 #if IPC_HAS_LEGACY_MACH_MSG_TRAP
175
176 CA_EVENT(mach_msg_trap_event,
177 CA_INT, msgh_id,
178 CA_INT, sw_platform,
179 CA_INT, sdk,
180 CA_STATIC_STRING(CA_TEAMID_MAX_LEN), team_id,
181 CA_STATIC_STRING(CA_SIGNINGID_MAX_LEN), signing_id,
182 CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
183
184 static void
mach_msg_legacy_send_analytics(mach_msg_id_t msgh_id,uint32_t platform,uint32_t sdk)185 mach_msg_legacy_send_analytics(
186 mach_msg_id_t msgh_id,
187 uint32_t platform,
188 uint32_t sdk)
189 {
190 char *proc_name = proc_name_address(current_proc());
191 const char *team_id = csproc_get_teamid(current_proc());
192 const char *signing_id = csproc_get_identity(current_proc());
193
194 ca_event_t ca_event = CA_EVENT_ALLOCATE(mach_msg_trap_event);
195 CA_EVENT_TYPE(mach_msg_trap_event) * msg_event = ca_event->data;
196
197 msg_event->msgh_id = msgh_id;
198 msg_event->sw_platform = platform;
199 msg_event->sdk = sdk;
200
201 if (proc_name) {
202 strlcpy(msg_event->proc_name, proc_name, CA_PROCNAME_LEN);
203 }
204
205 if (team_id) {
206 strlcpy(msg_event->team_id, team_id, CA_TEAMID_MAX_LEN);
207 }
208
209 if (signing_id) {
210 strlcpy(msg_event->signing_id, signing_id, CA_SIGNINGID_MAX_LEN);
211 }
212
213 CA_EVENT_SEND(ca_event);
214 }
215
216 static bool
ipc_policy_allow_legacy_mach_msg_trap_for_platform(mach_msg_id_t msgid)217 ipc_policy_allow_legacy_mach_msg_trap_for_platform(
218 mach_msg_id_t msgid)
219 {
220 struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
221 uint32_t platform = pro->p_platform_data.p_platform;
222 uint32_t sdk = pro->p_platform_data.p_sdk;
223 uint32_t sdk_major = sdk >> 16;
224
225 /*
226 * Special rules, due to unfortunate bincompat reasons,
227 * allow for a hardcoded list of MIG calls to XNU to go through
228 * for macOS apps linked against an SDK older than 12.x.
229 */
230 switch (platform) {
231 case PLATFORM_MACOS:
232 if (sdk == 0 || sdk_major > 12) {
233 return false;
234 }
235 break;
236 default:
237 /* disallow for any non-macOS for platform */
238 return false;
239 }
240
241 switch (msgid) {
242 case 0xd4a: /* task_threads */
243 case 0xd4d: /* task_info */
244 case 0xe13: /* thread_get_state */
245 case 0x12c4: /* mach_vm_read */
246 case 0x12c8: /* mach_vm_read_overwrite */
247 mach_msg_legacy_send_analytics(msgid, platform, sdk);
248 return true;
249 default:
250 return false;
251 }
252 }
253
254
255 mach_msg_return_t
ipc_policy_allow_legacy_send_trap(mach_msg_id_t msgid,mach_msg_option64_t opts)256 ipc_policy_allow_legacy_send_trap(
257 mach_msg_id_t msgid,
258 mach_msg_option64_t opts)
259 {
260 if ((opts & MACH64_POLICY_HARDENED) == 0) {
261 #if __x86_64__
262 if (current_map()->max_offset <= VM_MAX_ADDRESS) {
263 /*
264 * Legacy mach_msg_trap() is the only
265 * available thing for 32-bit tasks
266 */
267 return MACH_MSG_SUCCESS;
268 }
269 #endif /* __x86_64__ */
270 #if CONFIG_ROSETTA
271 if (opts & MACH64_POLICY_TRANSLATED) {
272 /*
273 * Similarly, on Rosetta, allow mach_msg_trap()
274 * as those apps likely can't be fixed anymore
275 */
276 return MACH_MSG_SUCCESS;
277 }
278 #endif
279 #if DEVELOPMENT || DEBUG
280 if (allow_legacy_mach_msg) {
281 /* Honor boot-arg */
282 return MACH_MSG_SUCCESS;
283 }
284 #endif /* DEVELOPMENT || DEBUG */
285 if (ipc_policy_allow_legacy_mach_msg_trap_for_platform(msgid)) {
286 return MACH_MSG_SUCCESS;
287 }
288 }
289
290 mach_port_guard_exception(msgid, opts, kGUARD_EXC_INVALID_OPTIONS);
291 /*
292 * this should be MACH_SEND_INVALID_OPTIONS,
293 * but this is a new mach_msg2 error only.
294 */
295 return KERN_NOT_SUPPORTED;
296 }
297
298
299 #endif /* IPC_HAS_LEGACY_MACH_MSG_TRAP */
300 #pragma mark ipc policy telemetry
301
302 /*
303 * As CA framework replies on successfully allocating zalloc memory,
304 * we maintain a small buffer that gets flushed when full. This helps us avoid taking spinlocks when working with CA.
305 */
306 #define IPC_POLICY_VIOLATIONS_RB_SIZE 2
307
308 /*
309 * Stripped down version of service port's string name. This is to avoid overwhelming CA's dynamic memory allocation.
310 */
311 #define CA_MACH_SERVICE_PORT_NAME_LEN 86
312
313 struct ipc_policy_violations_rb_entry {
314 char proc_name[CA_PROCNAME_LEN];
315 char service_name[CA_MACH_SERVICE_PORT_NAME_LEN];
316 char team_id[CA_TEAMID_MAX_LEN];
317 char signing_id[CA_SIGNINGID_MAX_LEN];
318 ipc_policy_violation_id_t violation_id;
319 int sw_platform;
320 int msgh_id;
321 int sdk;
322 };
323 struct ipc_policy_violations_rb_entry ipc_policy_violations_rb[IPC_POLICY_VIOLATIONS_RB_SIZE];
324 static uint8_t ipc_policy_violations_rb_index = 0;
325
326 LCK_GRP_DECLARE(ipc_telemetry_lock_grp, "ipc_telemetry_lock_grp");
327 LCK_TICKET_DECLARE(ipc_telemetry_lock, &ipc_telemetry_lock_grp);
328
329 /*
330 * Telemetry: report back the process name violating ipc policy. Note that this event can be used to report
331 * any type of ipc violation through a ipc_policy_violation_id_t. It is named reply_port_semantics_violations
332 * because we are reusing an existing event.
333 */
334 CA_EVENT(reply_port_semantics_violations,
335 CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name,
336 CA_STATIC_STRING(CA_MACH_SERVICE_PORT_NAME_LEN), service_name,
337 CA_STATIC_STRING(CA_TEAMID_MAX_LEN), team_id,
338 CA_STATIC_STRING(CA_SIGNINGID_MAX_LEN), signing_id,
339 CA_INT, reply_port_semantics_violation);
340
341 static void
send_telemetry(const struct ipc_policy_violations_rb_entry * entry)342 send_telemetry(
343 const struct ipc_policy_violations_rb_entry *entry)
344 {
345 ca_event_t ca_event = CA_EVENT_ALLOCATE_FLAGS(reply_port_semantics_violations, Z_NOWAIT);
346 if (ca_event) {
347 CA_EVENT_TYPE(reply_port_semantics_violations) * event = ca_event->data;
348
349 strlcpy(event->service_name, entry->service_name, CA_MACH_SERVICE_PORT_NAME_LEN);
350 strlcpy(event->proc_name, entry->proc_name, CA_PROCNAME_LEN);
351 strlcpy(event->team_id, entry->team_id, CA_TEAMID_MAX_LEN);
352 strlcpy(event->signing_id, entry->signing_id, CA_SIGNINGID_MAX_LEN);
353 event->reply_port_semantics_violation = entry->violation_id;
354
355 CA_EVENT_SEND(ca_event);
356 }
357 }
358
359 /* Routine: flush_ipc_policy_violations_telemetry
360 * Conditions:
361 * Assumes ipc_policy_type is valid
362 * Assumes ipc telemetry lock is held.
363 * Unlocks it before returning.
364 */
365 static void
flush_ipc_policy_violations_telemetry(void)366 flush_ipc_policy_violations_telemetry(void)
367 {
368 struct ipc_policy_violations_rb_entry local_rb[IPC_POLICY_VIOLATIONS_RB_SIZE];
369 uint8_t local_rb_index = 0;
370
371 if (__improbable(ipc_policy_violations_rb_index > IPC_POLICY_VIOLATIONS_RB_SIZE)) {
372 panic("Invalid ipc policy violation buffer index %d > %d",
373 ipc_policy_violations_rb_index, IPC_POLICY_VIOLATIONS_RB_SIZE);
374 }
375
376 /*
377 * We operate on local copy of telemetry buffer because CA framework relies on successfully
378 * allocating zalloc memory. It can not do that if we are accessing the shared buffer
379 * with spin locks held.
380 */
381 while (local_rb_index != ipc_policy_violations_rb_index) {
382 local_rb[local_rb_index] = ipc_policy_violations_rb[local_rb_index];
383 local_rb_index++;
384 }
385
386 lck_ticket_unlock(&ipc_telemetry_lock);
387
388 while (local_rb_index > 0) {
389 struct ipc_policy_violations_rb_entry *entry = &local_rb[--local_rb_index];
390 send_telemetry(entry);
391 }
392
393 /*
394 * Finally call out the buffer as empty. This is also a sort of rate limiting mechanisms for the events.
395 * Events will get dropped until the buffer is not fully flushed.
396 */
397 lck_ticket_lock(&ipc_telemetry_lock, &ipc_telemetry_lock_grp);
398 ipc_policy_violations_rb_index = 0;
399 }
400
401 void
ipc_stash_policy_violations_telemetry(ipc_policy_violation_id_t violation_id,mach_service_port_info_t sp_info,int aux_data)402 ipc_stash_policy_violations_telemetry(
403 ipc_policy_violation_id_t violation_id,
404 mach_service_port_info_t sp_info,
405 int aux_data)
406 {
407 struct ipc_policy_violations_rb_entry *entry;
408 char *service_name = (char *) "unknown";
409 task_t task = current_task_early();
410 int pid = -1;
411 bool skip_telemetry = false;
412
413 if (task && violation_id == IPCPV_REPLY_PORT_SEMANTICS_OPTOUT) {
414 task_lock(task);
415 /* Telemetry rate limited to once per task per host. */
416 skip_telemetry = task_has_reply_port_telemetry(task);
417 if (!skip_telemetry) {
418 task_set_reply_port_telemetry(task);
419 }
420 task_unlock(task);
421 }
422
423 if (skip_telemetry) {
424 return;
425 }
426
427 if (sp_info) {
428 service_name = sp_info->mspi_string_name;
429 }
430
431 if (task) {
432 pid = task_pid(task);
433 }
434
435 if (task) {
436 struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
437 uint32_t platform = pro->p_platform_data.p_platform;
438 uint32_t sdk = pro->p_platform_data.p_sdk;
439 char *proc_name = (char *) "unknown";
440 #ifdef MACH_BSD
441 proc_name = proc_name_address(get_bsdtask_info(task));
442 #endif /* MACH_BSD */
443 const char *team_id = csproc_get_identity(current_proc());
444 const char *signing_id = csproc_get_teamid(current_proc());
445
446 lck_ticket_lock(&ipc_telemetry_lock, &ipc_telemetry_lock_grp);
447
448 if (ipc_policy_violations_rb_index >= IPC_POLICY_VIOLATIONS_RB_SIZE) {
449 /* Dropping the event since buffer is full. */
450 lck_ticket_unlock(&ipc_telemetry_lock);
451 return;
452 }
453 entry = &ipc_policy_violations_rb[ipc_policy_violations_rb_index++];
454 strlcpy(entry->proc_name, proc_name, CA_PROCNAME_LEN);
455
456 strlcpy(entry->service_name, service_name, CA_MACH_SERVICE_PORT_NAME_LEN);
457 entry->violation_id = violation_id;
458
459 if (team_id) {
460 strlcpy(entry->team_id, team_id, CA_TEAMID_MAX_LEN);
461 }
462
463 if (signing_id) {
464 strlcpy(entry->signing_id, signing_id, CA_SIGNINGID_MAX_LEN);
465 }
466 entry->msgh_id = aux_data;
467 entry->sw_platform = platform;
468 entry->sdk = sdk;
469 }
470
471 if (ipc_policy_violations_rb_index == IPC_POLICY_VIOLATIONS_RB_SIZE) {
472 flush_ipc_policy_violations_telemetry();
473 }
474
475 lck_ticket_unlock(&ipc_telemetry_lock);
476 }
477
478 void
send_prp_telemetry(int msgh_id)479 send_prp_telemetry(int msgh_id)
480 {
481 ipc_policy_violation_id_t violation_type = (csproc_hardened_runtime(current_proc())) ? IPCPV_MOVE_REPLY_PORT_HARDENED_RUNTIME : IPCPV_MOVE_REPLY_PORT_3P;
482 ipc_stash_policy_violations_telemetry(violation_type, NULL, msgh_id);
483 }
484
485 #pragma mark MACH_SEND_MSG policies
486
487 mach_msg_return_t
ipc_validate_kmsg_header_schema_from_user(mach_msg_user_header_t * hdr __unused,mach_msg_size_t dsc_count,mach_msg_option64_t opts)488 ipc_validate_kmsg_header_schema_from_user(
489 mach_msg_user_header_t *hdr __unused,
490 mach_msg_size_t dsc_count,
491 mach_msg_option64_t opts)
492 {
493 if (opts & MACH64_SEND_KOBJECT_CALL) {
494 if (dsc_count > IPC_KOBJECT_DESC_MAX) {
495 return MACH_SEND_TOO_LARGE;
496 }
497 }
498
499 return MACH_MSG_SUCCESS;
500 }
501
502 mach_msg_return_t
ipc_validate_kmsg_schema_from_user(mach_msg_header_t * kdata,mach_msg_send_uctx_t * send_uctx,mach_msg_option64_t opts __unused)503 ipc_validate_kmsg_schema_from_user(
504 mach_msg_header_t *kdata,
505 mach_msg_send_uctx_t *send_uctx,
506 mach_msg_option64_t opts __unused)
507 {
508 mach_msg_kbase_t *kbase = NULL;
509 vm_size_t vm_size;
510
511 if (kdata->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
512 kbase = mach_msg_header_to_kbase(kdata);
513 }
514
515 if (send_uctx->send_dsc_port_count > IPC_KMSG_MAX_OOL_PORT_COUNT) {
516 return MACH_SEND_TOO_LARGE;
517 }
518
519 if (os_add_overflow(send_uctx->send_dsc_vm_size,
520 send_uctx->send_dsc_port_count * sizeof(mach_port_t), &vm_size)) {
521 return MACH_SEND_TOO_LARGE;
522 }
523 if (vm_size > ipc_kmsg_max_vm_space) {
524 return MACH_MSG_VM_KERNEL;
525 }
526
527 return MACH_MSG_SUCCESS;
528 }
529
530 static mach_msg_return_t
ipc_filter_kmsg_header_from_user(mach_msg_header_t * hdr,mach_msg_option64_t opts)531 ipc_filter_kmsg_header_from_user(
532 mach_msg_header_t *hdr,
533 mach_msg_option64_t opts)
534 {
535 static const uint32_t MACH_BOOTSTRAP_PORT_MSG_ID_MASK = ((1u << 24) - 1);
536
537 mach_msg_filter_id fid = 0;
538 mach_port_t remote_port = hdr->msgh_remote_port;
539 mach_msg_id_t msg_id = hdr->msgh_id;
540 ipc_service_port_label_t label = NULL;
541 void *sblabel = NULL;
542
543 if (!ip_enforce_msg_filtering(remote_port)) {
544 return MACH_MSG_SUCCESS;
545 }
546
547 ip_mq_lock(remote_port);
548 if (!ip_active(remote_port)) {
549 /* nothing to do */
550 } else if (remote_port->ip_service_port) {
551 label = remote_port->ip_splabel;
552 sblabel = label->ispl_sblabel;
553
554 /*
555 * Mask the top byte for messages sent to launchd's bootstrap port.
556 * Filter any messages with domain 0 (as they correspond to MIG
557 * based messages)
558 */
559 if (ipc_service_port_label_is_bootstrap_port(label)) {
560 if ((msg_id & ~MACH_BOOTSTRAP_PORT_MSG_ID_MASK) == 0) {
561 ip_mq_unlock(remote_port);
562 goto filtered_msg;
563 }
564 msg_id = msg_id & MACH_BOOTSTRAP_PORT_MSG_ID_MASK;
565 }
566 } else {
567 assert(!ip_is_kolabeled(remote_port));
568 /* Connection ports can also have send-side message filters */
569 sblabel = remote_port->ip_splabel;
570 }
571 if (sblabel) {
572 mach_msg_filter_retain_sblabel_callback(sblabel);
573 }
574 ip_mq_unlock(remote_port);
575
576 if (sblabel && !mach_msg_fetch_filter_policy(sblabel, msg_id, &fid)) {
577 goto filtered_msg;
578 }
579 return MACH_MSG_SUCCESS;
580
581 filtered_msg:
582 if ((opts & MACH64_POLICY_FILTER_NON_FATAL) == 0) {
583 mach_port_name_t dest_name = CAST_MACH_PORT_TO_NAME(hdr->msgh_remote_port);
584
585 mach_port_guard_exception(dest_name, hdr->msgh_id,
586 kGUARD_EXC_MSG_FILTERED);
587 }
588 return MACH_SEND_MSG_FILTERED;
589 }
590
591 static bool
ipc_policy_allow_send_only_kobject_calls(void)592 ipc_policy_allow_send_only_kobject_calls(void)
593 {
594 struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
595 uint32_t sdk = pro->p_platform_data.p_sdk;
596 uint32_t sdk_major = sdk >> 16;
597
598 switch (pro->p_platform_data.p_platform) {
599 case PLATFORM_IOS:
600 case PLATFORM_MACCATALYST:
601 case PLATFORM_TVOS:
602 if (sdk == 0 || sdk_major > 17) {
603 return false;
604 }
605 return true;
606 case PLATFORM_MACOS:
607 if (sdk == 0 || sdk_major > 14) {
608 return false;
609 }
610 return true;
611 case PLATFORM_WATCHOS:
612 if (sdk == 0 || sdk_major > 10) {
613 return false;
614 }
615 return true;
616 default:
617 return false;
618 }
619 }
620
621 static mach_msg_return_t
ipc_validate_kmsg_dest_from_user(mach_msg_header_t * hdr,ipc_port_t port,mach_msg_option64_t opts)622 ipc_validate_kmsg_dest_from_user(
623 mach_msg_header_t *hdr,
624 ipc_port_t port,
625 mach_msg_option64_t opts)
626 {
627 /*
628 * This is a _user_ message via mach_msg2_trap()。
629 *
630 * To curb kobject port/message queue confusion and improve control flow
631 * integrity, mach_msg2_trap() invocations mandate the use of either
632 * MACH64_SEND_KOBJECT_CALL or MACH64_SEND_MQ_CALL and that the flag
633 * matches the underlying port type. (unless the call is from a simulator,
634 * since old simulators keep using mach_msg() in all cases indiscriminatingly.)
635 *
636 * Since:
637 * (1) We make sure to always pass either MACH64_SEND_MQ_CALL or
638 * MACH64_SEND_KOBJECT_CALL bit at all sites outside simulators
639 * (checked by mach_msg2_trap());
640 * (2) We checked in mach_msg2_trap() that _exactly_ one of the three bits is set.
641 *
642 * CFI check cannot be bypassed by simply setting MACH64_SEND_ANY.
643 */
644 #if XNU_TARGET_OS_OSX
645 if (opts & MACH64_SEND_ANY) {
646 return MACH_MSG_SUCCESS;
647 }
648 #endif /* XNU_TARGET_OS_OSX */
649
650 if (ip_is_kobject(port)) {
651 natural_t kotype = ip_kotype(port);
652
653 if (__improbable(kotype == IKOT_TIMER)) {
654 #if XNU_TARGET_OS_OSX
655 if (__improbable(opts & MACH64_POLICY_HARDENED)) {
656 return MACH_SEND_INVALID_OPTIONS;
657 }
658 /*
659 * For bincompat, let's still allow user messages to timer port, but
660 * force MACH64_SEND_MQ_CALL flag for memory segregation.
661 */
662 if (__improbable(!(opts & MACH64_SEND_MQ_CALL))) {
663 return MACH_SEND_INVALID_OPTIONS;
664 }
665 #else
666 return MACH_SEND_INVALID_OPTIONS;
667 #endif
668 } else if (kotype == IKOT_UEXT_OBJECT) {
669 if (__improbable(!(opts & MACH64_SEND_DK_CALL))) {
670 return MACH_SEND_INVALID_OPTIONS;
671 }
672 } else {
673 /* Otherwise, caller must set MACH64_SEND_KOBJECT_CALL. */
674 if (__improbable(!(opts & MACH64_SEND_KOBJECT_CALL))) {
675 return MACH_SEND_INVALID_OPTIONS;
676 }
677
678 /* kobject calls must be a combined send/receive */
679 if (__improbable((opts & MACH64_RCV_MSG) == 0)) {
680 if ((opts & MACH64_POLICY_HARDENED) ||
681 IP_VALID(hdr->msgh_local_port) ||
682 !ipc_policy_allow_send_only_kobject_calls()) {
683 return MACH_SEND_INVALID_OPTIONS;
684 }
685 }
686 }
687 #if CONFIG_CSR
688 } else if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
689 /*
690 * Allow MACH64_SEND_KOBJECT_CALL flag to message queues
691 * when SIP is off (for Mach-on-Mach emulation).
692 */
693 #endif /* CONFIG_CSR */
694 } else {
695 /* If destination is a message queue, caller must set MACH64_SEND_MQ_CALL */
696 if (__improbable(!(opts & MACH64_SEND_MQ_CALL))) {
697 return MACH_SEND_INVALID_OPTIONS;
698 }
699 }
700
701 return MACH_MSG_SUCCESS;
702 }
703
704 mach_msg_return_t
ipc_validate_kmsg_header_from_user(mach_msg_header_t * hdr,mach_msg_send_uctx_t * send_uctx,mach_msg_option64_t opts)705 ipc_validate_kmsg_header_from_user(
706 mach_msg_header_t *hdr,
707 mach_msg_send_uctx_t *send_uctx,
708 mach_msg_option64_t opts)
709 {
710 ipc_port_t dest_port = hdr->msgh_remote_port;
711 mach_msg_return_t mr = KERN_SUCCESS;
712
713 if (opts & MACH64_MACH_MSG2) {
714 mr = ipc_validate_kmsg_dest_from_user(hdr, dest_port, opts);
715 if (mr != MACH_MSG_SUCCESS) {
716 goto out;
717 }
718 }
719
720 /*
721 * Check if dest is a no-grant port; Since this bit is set only on
722 * port construction and cannot be unset later, we can peek at the
723 * bit without paying the cost of locking the port.
724 */
725 if (send_uctx->send_dsc_port_count && dest_port->ip_no_grant) {
726 mr = MACH_SEND_NO_GRANT_DEST;
727 goto out;
728 }
729
730 /*
731 * Evaluate message filtering if the sender is filtered.
732 */
733 if ((opts & MACH64_POLICY_FILTER_MSG) &&
734 mach_msg_filter_at_least(MACH_MSG_FILTER_CALLBACKS_VERSION_1)) {
735 mr = ipc_filter_kmsg_header_from_user(hdr, opts);
736 if (mr != MACH_MSG_SUCCESS) {
737 goto out;
738 }
739 }
740
741 out:
742 if (mr == MACH_SEND_INVALID_OPTIONS) {
743 mach_port_guard_exception(0, opts, kGUARD_EXC_INVALID_OPTIONS);
744 }
745 return mr;
746 }
747
748
749 #pragma mark policy guard violations
750
751 void
mach_port_guard_exception(uint32_t target,uint64_t payload,unsigned reason)752 mach_port_guard_exception(uint32_t target, uint64_t payload, unsigned reason)
753 {
754 mach_exception_code_t code = 0;
755 EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_MACH_PORT);
756 EXC_GUARD_ENCODE_FLAVOR(code, reason);
757 EXC_GUARD_ENCODE_TARGET(code, target);
758 mach_exception_subcode_t subcode = (uint64_t)payload;
759 thread_t t = current_thread();
760 bool fatal = FALSE;
761
762 if (reason <= MAX_OPTIONAL_kGUARD_EXC_CODE &&
763 (get_threadtask(t)->task_exc_guard & TASK_EXC_GUARD_MP_FATAL)) {
764 fatal = true;
765 } else if (reason <= MAX_FATAL_kGUARD_EXC_CODE) {
766 fatal = true;
767 }
768 thread_guard_violation(t, code, subcode, fatal);
769 }
770
771 void
mach_port_guard_exception_immovable(ipc_space_t space,mach_port_name_t name,mach_port_t port)772 mach_port_guard_exception_immovable(
773 ipc_space_t space,
774 mach_port_name_t name,
775 mach_port_t port)
776 {
777 if (space == current_space()) {
778 assert(ip_is_immovable_send(port));
779
780 boolean_t hard = task_get_control_port_options(current_task()) & TASK_CONTROL_PORT_IMMOVABLE_HARD;
781
782 if (ip_is_control(port)) {
783 assert(task_is_immovable(current_task()));
784 mach_port_guard_exception(name, MPG_FLAGS_NONE,
785 hard ? kGUARD_EXC_IMMOVABLE : kGUARD_EXC_IMMOVABLE_NON_FATAL);
786 } else {
787 /* always fatal exception for non-control port violation */
788 mach_port_guard_exception(name, MPG_FLAGS_NONE,
789 kGUARD_EXC_IMMOVABLE);
790 }
791 }
792 }
793
794 /*
795 * Deliver a soft or hard immovable guard exception.
796 *
797 * Conditions: port is marked as immovable and pinned.
798 */
799 void
mach_port_guard_exception_pinned(ipc_space_t space,mach_port_name_t name,__assert_only mach_port_t port,uint64_t payload)800 mach_port_guard_exception_pinned(
801 ipc_space_t space,
802 mach_port_name_t name,
803 __assert_only mach_port_t port,
804 uint64_t payload)
805 {
806 if (space == current_space()) {
807 assert(ip_is_immovable_send(port));
808 assert(ip_is_control(port)); /* only task/thread control ports can be pinned */
809
810 boolean_t hard = task_get_control_port_options(current_task()) & TASK_CONTROL_PORT_PINNED_HARD;
811
812 assert(task_is_pinned(current_task()));
813
814 mach_port_guard_exception(name, payload,
815 hard ? kGUARD_EXC_MOD_REFS : kGUARD_EXC_MOD_REFS_NON_FATAL);
816 }
817 }
818
819 /*
820 * Routine: mach_port_guard_ast
821 * Purpose:
822 * Raises an exception for mach port guard violation.
823 * Conditions:
824 * None.
825 * Returns:
826 * None.
827 */
828
829 void
mach_port_guard_ast(thread_t t,mach_exception_data_type_t code,mach_exception_data_type_t subcode)830 mach_port_guard_ast(
831 thread_t t,
832 mach_exception_data_type_t code,
833 mach_exception_data_type_t subcode)
834 {
835 unsigned int reason = EXC_GUARD_DECODE_GUARD_FLAVOR(code);
836 task_t task = get_threadtask(t);
837 unsigned int behavior = task->task_exc_guard;
838 bool fatal = true;
839
840 assert(task == current_task());
841 assert(task != kernel_task);
842
843 if (reason <= MAX_FATAL_kGUARD_EXC_CODE) {
844 /*
845 * Fatal Mach port guards - always delivered synchronously if dev mode is on.
846 * Check if anyone has registered for Synchronous EXC_GUARD, if yes then,
847 * deliver it synchronously and then kill the process, else kill the process
848 * and deliver the exception via EXC_CORPSE_NOTIFY.
849 */
850
851 int flags = PX_DEBUG_NO_HONOR;
852 exception_info_t info = {
853 .os_reason = OS_REASON_GUARD,
854 .exception_type = EXC_GUARD,
855 .mx_code = code,
856 .mx_subcode = subcode,
857 };
858
859 if (task_exception_notify(EXC_GUARD, code, subcode, fatal) == KERN_SUCCESS) {
860 flags |= PX_PSIGNAL;
861 }
862 exit_with_mach_exception(get_bsdtask_info(task), info, flags);
863 } else {
864 /*
865 * Mach port guards controlled by task settings.
866 */
867
868 /* Is delivery enabled */
869 if ((behavior & TASK_EXC_GUARD_MP_DELIVER) == 0) {
870 return;
871 }
872
873 /* If only once, make sure we're that once */
874 while (behavior & TASK_EXC_GUARD_MP_ONCE) {
875 uint32_t new_behavior = behavior & ~TASK_EXC_GUARD_MP_DELIVER;
876
877 if (OSCompareAndSwap(behavior, new_behavior, &task->task_exc_guard)) {
878 break;
879 }
880 behavior = task->task_exc_guard;
881 if ((behavior & TASK_EXC_GUARD_MP_DELIVER) == 0) {
882 return;
883 }
884 }
885 fatal = (task->task_exc_guard & TASK_EXC_GUARD_MP_FATAL)
886 && (reason <= MAX_OPTIONAL_kGUARD_EXC_CODE);
887 kern_return_t sync_exception_result;
888 sync_exception_result = task_exception_notify(EXC_GUARD, code, subcode, fatal);
889
890 if (task->task_exc_guard & TASK_EXC_GUARD_MP_FATAL) {
891 if (reason > MAX_OPTIONAL_kGUARD_EXC_CODE) {
892 /* generate a simulated crash if not handled synchronously */
893 if (sync_exception_result != KERN_SUCCESS) {
894 task_violated_guard(code, subcode, NULL, TRUE);
895 }
896 } else {
897 /*
898 * Only generate crash report if synchronous EXC_GUARD wasn't handled,
899 * but it has to die regardless.
900 */
901
902 int flags = PX_DEBUG_NO_HONOR;
903 exception_info_t info = {
904 .os_reason = OS_REASON_GUARD,
905 .exception_type = EXC_GUARD,
906 .mx_code = code,
907 .mx_subcode = subcode
908 };
909
910 if (sync_exception_result == KERN_SUCCESS) {
911 flags |= PX_PSIGNAL;
912 }
913
914 exit_with_mach_exception(get_bsdtask_info(task), info, flags);
915 }
916 } else if (task->task_exc_guard & TASK_EXC_GUARD_MP_CORPSE) {
917 /* Raise exception via corpse fork if not handled synchronously */
918 if (sync_exception_result != KERN_SUCCESS) {
919 task_violated_guard(code, subcode, NULL, TRUE);
920 }
921 }
922 }
923 }
924