xref: /xnu-11215.81.4/osfmk/ipc/ipc_policy.c (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1 /*
2  * Copyright (c) 2023 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/kern_return.h>
30 #include <mach/mach_types.h>
31 #include <mach/port.h>
32 
33 #include <kern/assert.h>
34 #include <kern/kern_types.h>
35 #include <kern/ipc_kobject.h>
36 #include <kern/mach_filter.h>
37 #include <kern/task.h>
38 
39 #include <vm/vm_map_xnu.h> /* current_map() */
40 #include <vm/vm_protos.h> /* current_proc() */
41 
42 #include <ipc/ipc_policy.h>
43 #include <ipc/ipc_service_port.h>
44 #include <ipc/port.h>
45 
46 #if CONFIG_CSR
47 #include <sys/csr.h>
48 #endif
49 #include <sys/codesign.h>
50 #include <sys/proc_ro.h>
51 
52 #include <libkern/coreanalytics/coreanalytics.h>
53 
54 extern int  proc_isinitproc(struct proc *p);
55 extern bool proc_is_simulated(struct proc *);
56 extern char *proc_name_address(struct proc *p);
57 
58 
59 #pragma mark policy tunables
60 
61 extern const vm_size_t  ipc_kmsg_max_vm_space;
62 
63 #if IPC_HAS_LEGACY_MACH_MSG_TRAP
64 #if DEVELOPMENT || DEBUG
65 static TUNABLE(bool, allow_legacy_mach_msg, "allow_legacy_mach_msg", false);
66 #endif /* DEVELOPMENT || DEBUG */
67 #endif /* IPC_HAS_LEGACY_MACH_MSG_TRAP */
68 
69 
70 #pragma mark policy utils
71 
72 __abortlike void
__ipc_unreachable(const char * reason,const char * file,int line)73 __ipc_unreachable(
74 	const char *reason,
75 	const char *file,
76 	int         line)
77 {
78 	(panic)("%s @%s:%d", reason, file, line);
79 }
80 
81 
82 #pragma mark policy options
83 
84 mach_msg_option64_t
ipc_current_user_policy(task_t task,mach_msg_option64_t opts)85 ipc_current_user_policy(
86 	task_t                  task,
87 	mach_msg_option64_t     opts)
88 {
89 	uint32_t ro_flags = task_ro_flags_get(task);
90 
91 	/*
92 	 * Step 1: convert to kernel flags
93 	 * - clear any kernel only flags
94 	 * - convert MACH_SEND_FILTER_NONFATAL which is aliased to the
95 	 *   MACH_SEND_ALWAYS kernel flag into MACH64_POLICY_FILTER_NON_FATAL.
96 	 */
97 	opts &= MACH64_MSG_OPTION_USER;
98 
99 	if (opts & MACH64_SEND_FILTER_NONFATAL) {
100 		/*
101 		 */
102 		opts &= ~MACH64_SEND_FILTER_NONFATAL;
103 		opts |= MACH64_POLICY_FILTER_NON_FATAL;
104 	}
105 	if (ro_flags & TFRO_FILTER_MSG) {
106 		opts |= MACH64_POLICY_FILTER_MSG;
107 	}
108 
109 	/*
110 	 * Step 2: derive policy flags from the current context
111 	 */
112 	if (ro_flags & TFRO_PLATFORM) {
113 		opts |= MACH64_POLICY_PLATFORM;
114 		opts |= MACH64_POLICY_RIGID;
115 		opts |= MACH64_POLICY_HARDENED;
116 	}
117 	if (ro_flags & TFRO_HARDENED) {
118 		opts |= MACH64_POLICY_RIGID;
119 		opts |= MACH64_POLICY_HARDENED;
120 	}
121 #if CONFIG_ROSETTA
122 	if (task_is_translated(task)) {
123 		opts |= MACH64_POLICY_TRANSLATED;
124 	}
125 #endif
126 #if XNU_TARGET_OS_OSX
127 	struct proc *proc = get_bsdtask_info(task);
128 	if (proc_is_simulated(proc)) {
129 		opts |= MACH64_POLICY_SIMULATED;
130 	}
131 	if (csproc_hardened_runtime(proc)) {
132 		opts |= MACH64_POLICY_HARDENED;
133 	}
134 #endif
135 	if (!(opts & MACH64_POLICY_NEEDED_MASK)) {
136 		/* helps assert that a policy has been set */
137 		opts |= MACH64_POLICY_DEFAULT;
138 	}
139 
140 	return opts;
141 }
142 
143 mach_msg_return_t
ipc_preflight_msg_option64(mach_msg_option64_t opts)144 ipc_preflight_msg_option64(mach_msg_option64_t opts)
145 {
146 	bool success = true;
147 
148 	if ((opts & MACH64_SEND_MSG) && (opts & MACH64_MACH_MSG2)) {
149 		mach_msg_option64_t cfi = opts & MACH64_MSG_OPTION_CFI_MASK;
150 
151 #if !XNU_TARGET_OS_OSX
152 		cfi &= ~MACH64_SEND_ANY;
153 #endif
154 		/* mach_msg2() calls must have exactly _one_ of these set */
155 		if (cfi == 0 || (cfi & (cfi - 1)) != 0) {
156 			success = false;
157 		}
158 
159 		/* vector calls are only supported for message queues */
160 		if ((opts & (MACH64_SEND_MQ_CALL | MACH64_SEND_ANY)) == 0 &&
161 		    (opts & MACH64_MSG_VECTOR)) {
162 			success = false;
163 		}
164 	}
165 
166 	if (success) {
167 		return MACH_MSG_SUCCESS;
168 	}
169 
170 	mach_port_guard_exception(0, 0, 0, kGUARD_EXC_INVALID_OPTIONS);
171 	if (opts & MACH64_MACH_MSG2) {
172 		return MACH_SEND_INVALID_OPTIONS;
173 	}
174 	return KERN_NOT_SUPPORTED;
175 }
176 
177 
178 #pragma mark legacy trap policies
179 #if IPC_HAS_LEGACY_MACH_MSG_TRAP
180 
181 CA_EVENT(mach_msg_trap_event,
182     CA_INT, msgh_id,
183     CA_INT, sw_platform,
184     CA_INT, sdk,
185     CA_STATIC_STRING(CA_TEAMID_MAX_LEN), team_id,
186     CA_STATIC_STRING(CA_SIGNINGID_MAX_LEN), signing_id,
187     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
188 
189 static void
mach_msg_legacy_send_analytics(mach_msg_id_t msgh_id,uint32_t platform,uint32_t sdk)190 mach_msg_legacy_send_analytics(
191 	mach_msg_id_t           msgh_id,
192 	uint32_t                platform,
193 	uint32_t                sdk)
194 {
195 	char *proc_name = proc_name_address(current_proc());
196 	const char *team_id = csproc_get_teamid(current_proc());
197 	const char *signing_id = csproc_get_identity(current_proc());
198 
199 	ca_event_t ca_event = CA_EVENT_ALLOCATE(mach_msg_trap_event);
200 	CA_EVENT_TYPE(mach_msg_trap_event) * msg_event = ca_event->data;
201 
202 	msg_event->msgh_id = msgh_id;
203 	msg_event->sw_platform = platform;
204 	msg_event->sdk = sdk;
205 
206 	if (proc_name) {
207 		strlcpy(msg_event->proc_name, proc_name, CA_PROCNAME_LEN);
208 	}
209 
210 	if (team_id) {
211 		strlcpy(msg_event->team_id, team_id, CA_TEAMID_MAX_LEN);
212 	}
213 
214 	if (signing_id) {
215 		strlcpy(msg_event->signing_id, signing_id, CA_SIGNINGID_MAX_LEN);
216 	}
217 
218 	CA_EVENT_SEND(ca_event);
219 }
220 
221 static bool
ipc_policy_allow_legacy_mach_msg_trap_for_platform(mach_msg_id_t msgid)222 ipc_policy_allow_legacy_mach_msg_trap_for_platform(
223 	mach_msg_id_t           msgid)
224 {
225 	struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
226 	uint32_t platform = pro->p_platform_data.p_platform;
227 	uint32_t sdk = pro->p_platform_data.p_sdk;
228 	uint32_t sdk_major = sdk >> 16;
229 
230 	/*
231 	 * Special rules, due to unfortunate bincompat reasons,
232 	 * allow for a hardcoded list of MIG calls to XNU to go through:
233 	 * - for iOS, Catalyst and iOS Simulator apps linked against
234 	 *   an SDK older than 15.x,
235 	 * - for macOS apps linked against an SDK older than 12.x.
236 	 */
237 	switch (platform) {
238 	case PLATFORM_IOS:
239 	case PLATFORM_IOSSIMULATOR:
240 	case PLATFORM_MACCATALYST:
241 		if (sdk == 0 || sdk_major > 15) {
242 			return false;
243 		}
244 		break;
245 	case PLATFORM_MACOS:
246 		if (sdk == 0 || sdk_major > 12) {
247 			return false;
248 		}
249 		break;
250 	default:
251 		return false;
252 	}
253 
254 	switch (msgid) {
255 	case 0xd4a: /* task_threads */
256 	case 0xd4d: /* task_info */
257 	case 0xe13: /* thread_get_state */
258 	case 0x12c4: /* mach_vm_read */
259 	case 0x12c8: /* mach_vm_read_overwrite */
260 		mach_msg_legacy_send_analytics(msgid, platform, sdk);
261 		return true;
262 	default:
263 		return false;
264 	}
265 }
266 
267 
268 mach_msg_return_t
ipc_policy_allow_legacy_send_trap(mach_msg_id_t msgid,mach_msg_option64_t opts)269 ipc_policy_allow_legacy_send_trap(
270 	mach_msg_id_t           msgid,
271 	mach_msg_option64_t     opts)
272 {
273 	if ((opts & MACH64_POLICY_HARDENED) == 0) {
274 #if __x86_64__
275 		if (current_map()->max_offset <= VM_MAX_ADDRESS) {
276 			/*
277 			 * Legacy mach_msg_trap() is the only
278 			 * available thing for 32-bit tasks
279 			 */
280 			return MACH_MSG_SUCCESS;
281 		}
282 #endif /* __x86_64__ */
283 #if CONFIG_ROSETTA
284 		if (opts & MACH64_POLICY_TRANSLATED) {
285 			/*
286 			 * Similarly, on Rosetta, allow mach_msg_trap()
287 			 * as those apps likely can't be fixed anymore
288 			 */
289 			return MACH_MSG_SUCCESS;
290 		}
291 #endif
292 #if DEVELOPMENT || DEBUG
293 		if (allow_legacy_mach_msg) {
294 			/* Honor boot-arg */
295 			return MACH_MSG_SUCCESS;
296 		}
297 #endif /* DEVELOPMENT || DEBUG */
298 		if (ipc_policy_allow_legacy_mach_msg_trap_for_platform(msgid)) {
299 			return MACH_MSG_SUCCESS;
300 		}
301 	}
302 
303 	mach_port_guard_exception(msgid, 0, 0, kGUARD_EXC_INVALID_OPTIONS);
304 	/*
305 	 * this should be MACH_SEND_INVALID_OPTIONS,
306 	 * but this is a new mach_msg2 error only.
307 	 */
308 	return KERN_NOT_SUPPORTED;
309 }
310 
311 
312 #endif /* IPC_HAS_LEGACY_MACH_MSG_TRAP */
313 #pragma mark reply port semantics telemetry
314 
315 /*
316  * As CA framework replies on successfully allocating zalloc memory,
317  * we maintain a small buffer that gets flushed when full. This helps us avoid taking spinlocks when working with CA.
318  */
319 #define REPLY_PORT_SEMANTICS_VIOLATIONS_RB_SIZE         2
320 
321 /*
322  * Stripped down version of service port's string name. This is to avoid overwhelming CA's dynamic memory allocation.
323  */
324 #define CA_MACH_SERVICE_PORT_NAME_LEN                   86
325 
326 struct reply_port_semantics_violations_rb_entry {
327 	char proc_name[CA_PROCNAME_LEN];
328 	char service_name[CA_MACH_SERVICE_PORT_NAME_LEN];
329 	char team_id[CA_TEAMID_MAX_LEN];
330 	char signing_id[CA_SIGNINGID_MAX_LEN];
331 	int  reply_port_semantics_violation;
332 	int  sw_platform;
333 	int  msgh_id;
334 	int  sdk;
335 };
336 struct reply_port_semantics_violations_rb_entry reply_port_semantics_violations_rb[REPLY_PORT_SEMANTICS_VIOLATIONS_RB_SIZE];
337 static uint8_t reply_port_semantics_violations_rb_index = 0;
338 
339 LCK_GRP_DECLARE(reply_port_telemetry_lock_grp, "reply_port_telemetry_lock_grp");
340 LCK_SPIN_DECLARE(reply_port_telemetry_lock, &reply_port_telemetry_lock_grp);
341 
342 /* Telemetry: report back the process name violating reply port semantics */
343 CA_EVENT(reply_port_semantics_violations,
344     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name,
345     CA_STATIC_STRING(CA_MACH_SERVICE_PORT_NAME_LEN), service_name,
346     CA_STATIC_STRING(CA_TEAMID_MAX_LEN), team_id,
347     CA_STATIC_STRING(CA_SIGNINGID_MAX_LEN), signing_id,
348     CA_INT, reply_port_semantics_violation);
349 
350 static void
send_reply_port_telemetry(const struct reply_port_semantics_violations_rb_entry * entry)351 send_reply_port_telemetry(
352 	const struct reply_port_semantics_violations_rb_entry *entry)
353 {
354 	ca_event_t ca_event = CA_EVENT_ALLOCATE_FLAGS(reply_port_semantics_violations, Z_NOWAIT);
355 	if (ca_event) {
356 		CA_EVENT_TYPE(reply_port_semantics_violations) * event = ca_event->data;
357 
358 		strlcpy(event->service_name, entry->service_name, CA_MACH_SERVICE_PORT_NAME_LEN);
359 		strlcpy(event->proc_name, entry->proc_name, CA_PROCNAME_LEN);
360 		strlcpy(event->team_id, entry->team_id, CA_TEAMID_MAX_LEN);
361 		strlcpy(event->signing_id, entry->signing_id, CA_SIGNINGID_MAX_LEN);
362 		event->reply_port_semantics_violation = entry->reply_port_semantics_violation;
363 
364 		CA_EVENT_SEND(ca_event);
365 	}
366 }
367 
368 /* Routine: flush_reply_port_semantics_violations_telemetry
369  * Conditions:
370  *              Assumes the reply_port_telemetry_lock is held.
371  *              Unlocks it before returning.
372  */
373 static void
flush_reply_port_semantics_violations_telemetry(void)374 flush_reply_port_semantics_violations_telemetry(void)
375 {
376 	struct reply_port_semantics_violations_rb_entry local_rb[REPLY_PORT_SEMANTICS_VIOLATIONS_RB_SIZE];
377 	uint8_t local_rb_index = 0;
378 
379 	if (__improbable(reply_port_semantics_violations_rb_index > REPLY_PORT_SEMANTICS_VIOLATIONS_RB_SIZE)) {
380 		panic("Invalid reply port semantics violations buffer index %d > %d",
381 		    reply_port_semantics_violations_rb_index, REPLY_PORT_SEMANTICS_VIOLATIONS_RB_SIZE);
382 	}
383 
384 	/*
385 	 * We operate on local copy of telemetry buffer because CA framework relies on successfully
386 	 * allocating zalloc memory. It can not do that if we are accessing the shared buffer
387 	 * with spin locks held.
388 	 */
389 	while (local_rb_index != reply_port_semantics_violations_rb_index) {
390 		local_rb[local_rb_index] = reply_port_semantics_violations_rb[local_rb_index];
391 		local_rb_index++;
392 	}
393 
394 	lck_spin_unlock(&reply_port_telemetry_lock);
395 
396 	while (local_rb_index > 0) {
397 		struct reply_port_semantics_violations_rb_entry *entry = &local_rb[--local_rb_index];
398 
399 		send_reply_port_telemetry(entry);
400 	}
401 
402 	/*
403 	 * Finally call out the buffer as empty. This is also a sort of rate limiting mechanisms for the events.
404 	 * Events will get dropped until the buffer is not fully flushed.
405 	 */
406 	lck_spin_lock(&reply_port_telemetry_lock);
407 	reply_port_semantics_violations_rb_index = 0;
408 }
409 
410 void
stash_reply_port_semantics_violations_telemetry(mach_service_port_info_t sp_info,int reply_port_semantics_violation,int msgh_id)411 stash_reply_port_semantics_violations_telemetry(
412 	mach_service_port_info_t sp_info,
413 	int                     reply_port_semantics_violation,
414 	int                     msgh_id)
415 {
416 	struct reply_port_semantics_violations_rb_entry *entry;
417 
418 	task_t task = current_task_early();
419 	if (task) {
420 		struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
421 		uint32_t platform = pro->p_platform_data.p_platform;
422 		uint32_t sdk = pro->p_platform_data.p_sdk;
423 		char *proc_name = (char *) "unknown";
424 #ifdef MACH_BSD
425 		proc_name = proc_name_address(get_bsdtask_info(task));
426 #endif /* MACH_BSD */
427 		const char *team_id = csproc_get_identity(current_proc());
428 		const char *signing_id = csproc_get_teamid(current_proc());
429 		char *service_name = (char *) "unknown";
430 		if (sp_info) {
431 			service_name = sp_info->mspi_string_name;
432 		}
433 
434 		lck_spin_lock(&reply_port_telemetry_lock);
435 
436 		if (reply_port_semantics_violations_rb_index >= REPLY_PORT_SEMANTICS_VIOLATIONS_RB_SIZE) {
437 			/* Dropping the event since buffer is full. */
438 			lck_spin_unlock(&reply_port_telemetry_lock);
439 			return;
440 		}
441 		entry = &reply_port_semantics_violations_rb[reply_port_semantics_violations_rb_index++];
442 		strlcpy(entry->proc_name, proc_name, CA_PROCNAME_LEN);
443 
444 		strlcpy(entry->service_name, service_name, CA_MACH_SERVICE_PORT_NAME_LEN);
445 		entry->reply_port_semantics_violation = reply_port_semantics_violation;
446 		if (team_id) {
447 			strlcpy(entry->team_id, team_id, CA_TEAMID_MAX_LEN);
448 		}
449 
450 		if (signing_id) {
451 			strlcpy(entry->signing_id, signing_id, CA_SIGNINGID_MAX_LEN);
452 		}
453 		entry->msgh_id = msgh_id;
454 		entry->sw_platform = platform;
455 		entry->sdk = sdk;
456 	}
457 
458 	if (reply_port_semantics_violations_rb_index == REPLY_PORT_SEMANTICS_VIOLATIONS_RB_SIZE) {
459 		flush_reply_port_semantics_violations_telemetry();
460 	}
461 
462 	lck_spin_unlock(&reply_port_telemetry_lock);
463 }
464 
465 void
send_prp_telemetry(int msgh_id)466 send_prp_telemetry(int msgh_id)
467 {
468 	if (csproc_hardened_runtime(current_proc())) {
469 		stash_reply_port_semantics_violations_telemetry(NULL, MRP_HARDENED_RUNTIME_VIOLATOR, msgh_id);
470 	} else {
471 		stash_reply_port_semantics_violations_telemetry(NULL, MRP_3P_VIOLATOR, msgh_id);
472 	}
473 }
474 
475 
476 #pragma mark MACH_SEND_MSG policies
477 
478 mach_msg_return_t
ipc_validate_kmsg_header_schema_from_user(mach_msg_user_header_t * hdr __unused,mach_msg_size_t dsc_count,mach_msg_option64_t opts)479 ipc_validate_kmsg_header_schema_from_user(
480 	mach_msg_user_header_t *hdr __unused,
481 	mach_msg_size_t         dsc_count,
482 	mach_msg_option64_t     opts)
483 {
484 	if (opts & MACH64_SEND_KOBJECT_CALL) {
485 		if (dsc_count > IPC_KOBJECT_DESC_MAX) {
486 			return MACH_SEND_TOO_LARGE;
487 		}
488 	}
489 
490 	return MACH_MSG_SUCCESS;
491 }
492 
493 mach_msg_return_t
ipc_validate_kmsg_schema_from_user(mach_msg_header_t * kdata,mach_msg_send_uctx_t * send_uctx,mach_msg_option64_t opts __unused)494 ipc_validate_kmsg_schema_from_user(
495 	mach_msg_header_t      *kdata,
496 	mach_msg_send_uctx_t   *send_uctx,
497 	mach_msg_option64_t     opts __unused)
498 {
499 	mach_msg_kbase_t *kbase = NULL;
500 	vm_size_t vm_size;
501 
502 	if (kdata->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
503 		kbase = mach_msg_header_to_kbase(kdata);
504 	}
505 
506 	if (send_uctx->send_dsc_port_count > IPC_KMSG_MAX_OOL_PORT_COUNT) {
507 		return MACH_SEND_TOO_LARGE;
508 	}
509 
510 	if (os_add_overflow(send_uctx->send_dsc_vm_size,
511 	    send_uctx->send_dsc_port_count * sizeof(mach_port_t), &vm_size)) {
512 		return MACH_SEND_TOO_LARGE;
513 	}
514 	if (vm_size > ipc_kmsg_max_vm_space) {
515 		return MACH_MSG_VM_KERNEL;
516 	}
517 
518 	return MACH_MSG_SUCCESS;
519 }
520 
521 static mach_msg_return_t
ipc_filter_kmsg_header_from_user(mach_msg_header_t * hdr,mach_msg_option64_t opts)522 ipc_filter_kmsg_header_from_user(
523 	mach_msg_header_t      *hdr,
524 	mach_msg_option64_t     opts)
525 {
526 	static const uint32_t MACH_BOOTSTRAP_PORT_MSG_ID_MASK = ((1u << 24) - 1);
527 
528 	mach_msg_filter_id fid = 0;
529 	mach_port_t remote_port = hdr->msgh_remote_port;
530 	mach_msg_id_t msg_id = hdr->msgh_id;
531 	ipc_service_port_label_t label = NULL;
532 	void *sblabel = NULL;
533 
534 	if (!ip_enforce_msg_filtering(remote_port)) {
535 		return MACH_MSG_SUCCESS;
536 	}
537 
538 	ip_mq_lock(remote_port);
539 	if (!ip_active(remote_port)) {
540 		/* nothing to do */
541 	} else if (remote_port->ip_service_port) {
542 		label   = remote_port->ip_splabel;
543 		sblabel = label->ispl_sblabel;
544 
545 		/*
546 		 * Mask the top byte for messages sent to launchd's bootstrap port.
547 		 * Filter any messages with domain 0 (as they correspond to MIG
548 		 * based messages)
549 		 */
550 		if (ipc_service_port_label_is_bootstrap_port(label)) {
551 			if ((msg_id & ~MACH_BOOTSTRAP_PORT_MSG_ID_MASK) == 0) {
552 				ip_mq_unlock(remote_port);
553 				goto filtered_msg;
554 			}
555 			msg_id = msg_id & MACH_BOOTSTRAP_PORT_MSG_ID_MASK;
556 		}
557 	} else {
558 		assert(!ip_is_kolabeled(remote_port));
559 		/* Connection ports can also have send-side message filters */
560 		sblabel = remote_port->ip_splabel;
561 	}
562 	if (sblabel) {
563 		mach_msg_filter_retain_sblabel_callback(sblabel);
564 	}
565 	ip_mq_unlock(remote_port);
566 
567 	if (sblabel && !mach_msg_fetch_filter_policy(sblabel, msg_id, &fid)) {
568 		goto filtered_msg;
569 	}
570 	return MACH_MSG_SUCCESS;
571 
572 filtered_msg:
573 	if ((opts & MACH64_POLICY_FILTER_NON_FATAL) == 0) {
574 		mach_port_name_t dest_name = CAST_MACH_PORT_TO_NAME(hdr->msgh_remote_port);
575 
576 		mach_port_guard_exception(dest_name, 0, 0, kGUARD_EXC_MSG_FILTERED);
577 	}
578 	return MACH_SEND_MSG_FILTERED;
579 }
580 
581 static bool
ipc_policy_allow_send_only_kobject_calls(void)582 ipc_policy_allow_send_only_kobject_calls(void)
583 {
584 	struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
585 	uint32_t sdk = pro->p_platform_data.p_sdk;
586 	uint32_t sdk_major = sdk >> 16;
587 
588 	switch (pro->p_platform_data.p_platform) {
589 	case PLATFORM_IOS:
590 	case PLATFORM_MACCATALYST:
591 	case PLATFORM_TVOS:
592 		if (sdk == 0 || sdk_major > 17) {
593 			return false;
594 		}
595 		return true;
596 	case PLATFORM_MACOS:
597 		if (sdk == 0 || sdk_major > 14) {
598 			return false;
599 		}
600 		return true;
601 	case PLATFORM_WATCHOS:
602 		if (sdk == 0 || sdk_major > 10) {
603 			return false;
604 		}
605 		return true;
606 	default:
607 		return false;
608 	}
609 }
610 
611 static mach_msg_return_t
ipc_validate_kmsg_dest_from_user(mach_msg_header_t * hdr,ipc_port_t port,mach_msg_option64_t opts)612 ipc_validate_kmsg_dest_from_user(
613 	mach_msg_header_t      *hdr,
614 	ipc_port_t              port,
615 	mach_msg_option64_t     opts)
616 {
617 	/*
618 	 * This is a _user_ message via mach_msg2_trap()。
619 	 *
620 	 * To curb kobject port/message queue confusion and improve control flow
621 	 * integrity, mach_msg2_trap() invocations mandate the use of either
622 	 * MACH64_SEND_KOBJECT_CALL or MACH64_SEND_MQ_CALL and that the flag
623 	 * matches the underlying port type. (unless the call is from a simulator,
624 	 * since old simulators keep using mach_msg() in all cases indiscriminatingly.)
625 	 *
626 	 * Since:
627 	 *     (1) We make sure to always pass either MACH64_SEND_MQ_CALL or
628 	 *         MACH64_SEND_KOBJECT_CALL bit at all sites outside simulators
629 	 *         (checked by mach_msg2_trap());
630 	 *     (2) We checked in mach_msg2_trap() that _exactly_ one of the three bits is set.
631 	 *
632 	 * CFI check cannot be bypassed by simply setting MACH64_SEND_ANY.
633 	 */
634 #if XNU_TARGET_OS_OSX
635 	if (opts & MACH64_SEND_ANY) {
636 		return MACH_MSG_SUCCESS;
637 	}
638 #endif /* XNU_TARGET_OS_OSX */
639 
640 	if (ip_is_kobject(port)) {
641 		natural_t kotype = ip_kotype(port);
642 
643 		if (__improbable(kotype == IKOT_TIMER)) {
644 #if XNU_TARGET_OS_OSX
645 			if (__improbable(opts & MACH64_POLICY_HARDENED)) {
646 				return MACH_SEND_INVALID_OPTIONS;
647 			}
648 			/*
649 			 * For bincompat, let's still allow user messages to timer port, but
650 			 * force MACH64_SEND_MQ_CALL flag for memory segregation.
651 			 */
652 			if (__improbable(!(opts & MACH64_SEND_MQ_CALL))) {
653 				return MACH_SEND_INVALID_OPTIONS;
654 			}
655 #else
656 			return MACH_SEND_INVALID_OPTIONS;
657 #endif
658 		} else if (kotype == IKOT_UEXT_OBJECT) {
659 			if (__improbable(!(opts & MACH64_SEND_DK_CALL))) {
660 				return MACH_SEND_INVALID_OPTIONS;
661 			}
662 		} else {
663 			/* Otherwise, caller must set MACH64_SEND_KOBJECT_CALL. */
664 			if (__improbable(!(opts & MACH64_SEND_KOBJECT_CALL))) {
665 				return MACH_SEND_INVALID_OPTIONS;
666 			}
667 
668 			/* kobject calls must be a combined send/receive */
669 			if (__improbable((opts & MACH64_RCV_MSG) == 0)) {
670 				if ((opts & MACH64_POLICY_HARDENED) ||
671 				    IP_VALID(hdr->msgh_local_port) ||
672 				    !ipc_policy_allow_send_only_kobject_calls()) {
673 					return MACH_SEND_INVALID_OPTIONS;
674 				}
675 			}
676 		}
677 #if CONFIG_CSR
678 	} else if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
679 		/*
680 		 * Allow MACH64_SEND_KOBJECT_CALL flag to message queues
681 		 * when SIP is off (for Mach-on-Mach emulation).
682 		 */
683 #endif /* CONFIG_CSR */
684 	} else {
685 		/* If destination is a message queue, caller must set MACH64_SEND_MQ_CALL */
686 		if (__improbable(!(opts & MACH64_SEND_MQ_CALL))) {
687 			return MACH_SEND_INVALID_OPTIONS;
688 		}
689 	}
690 
691 	return MACH_MSG_SUCCESS;
692 }
693 
694 mach_msg_return_t
ipc_validate_kmsg_header_from_user(mach_msg_header_t * hdr,mach_msg_send_uctx_t * send_uctx,mach_msg_option64_t opts)695 ipc_validate_kmsg_header_from_user(
696 	mach_msg_header_t      *hdr,
697 	mach_msg_send_uctx_t   *send_uctx,
698 	mach_msg_option64_t     opts)
699 {
700 	ipc_port_t dest_port = hdr->msgh_remote_port;
701 	mach_msg_return_t mr = KERN_SUCCESS;
702 
703 	if (opts & MACH64_MACH_MSG2) {
704 		mr = ipc_validate_kmsg_dest_from_user(hdr, dest_port, opts);
705 		if (mr != MACH_MSG_SUCCESS) {
706 			goto out;
707 		}
708 	}
709 
710 	/*
711 	 * Check if dest is a no-grant port; Since this bit is set only on
712 	 * port construction and cannot be unset later, we can peek at the
713 	 * bit without paying the cost of locking the port.
714 	 */
715 	if (send_uctx->send_dsc_port_count && dest_port->ip_no_grant) {
716 		mr = MACH_SEND_NO_GRANT_DEST;
717 		goto out;
718 	}
719 
720 	/*
721 	 * Evaluate message filtering if the sender is filtered.
722 	 */
723 	if ((opts & MACH64_POLICY_FILTER_MSG) &&
724 	    mach_msg_filter_at_least(MACH_MSG_FILTER_CALLBACKS_VERSION_1)) {
725 		mr = ipc_filter_kmsg_header_from_user(hdr, opts);
726 		if (mr != MACH_MSG_SUCCESS) {
727 			goto out;
728 		}
729 	}
730 
731 out:
732 	if (mr == MACH_SEND_INVALID_OPTIONS) {
733 		mach_port_guard_exception(0, 0, 0, kGUARD_EXC_INVALID_OPTIONS);
734 	}
735 	return mr;
736 }
737