xref: /xnu-12377.41.6/osfmk/ipc/ipc_policy.c (revision bbb1b6f9e71b8cdde6e5cd6f4841f207dee3d828)
1 /*
2  * Copyright (c) 2023 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/kern_return.h>
30 #include <mach/mach_types.h>
31 #include <mach/port.h>
32 #include <mach/mk_timer.h>
33 #include <mach/notify.h>
34 
35 #include <kern/assert.h>
36 #include <kern/exc_guard.h>
37 #include <kern/ipc_kobject.h>
38 #include <kern/ipc_tt.h>
39 #include <kern/kern_types.h>
40 #include <kern/mach_filter.h>
41 #include <kern/task.h>
42 #include <kern/ux_handler.h> /* is_ux_handler_port() */
43 
44 #include <vm/vm_map_xnu.h> /* current_map() */
45 #include <vm/vm_protos.h> /* current_proc() */
46 
47 #include <ipc/ipc_policy.h>
48 #include <ipc/ipc_service_port.h>
49 #include <ipc/port.h>
50 
51 #if CONFIG_CSR
52 #include <sys/csr.h>
53 #endif
54 #include <sys/codesign.h>
55 #include <sys/proc_ro.h>
56 #include <sys/reason.h>
57 
58 #include <libkern/coreanalytics/coreanalytics.h>
59 
60 extern bool proc_is_simulated(struct proc *);
61 extern char *proc_name_address(struct proc *p);
62 extern int  exit_with_guard_exception(
63 	struct proc            *p,
64 	mach_exception_data_type_t code,
65 	mach_exception_data_type_t subcode);
66 
67 #pragma mark policy tunables
68 
69 extern const vm_size_t  ipc_kmsg_max_vm_space;
70 
71 #if IPC_HAS_LEGACY_MACH_MSG_TRAP
72 #if DEVELOPMENT || DEBUG
73 static TUNABLE(bool, allow_legacy_mach_msg, "allow_legacy_mach_msg", false);
74 #endif /* DEVELOPMENT || DEBUG */
75 #endif /* IPC_HAS_LEGACY_MACH_MSG_TRAP */
76 
77 /* a boot-arg to enable/disable OOL port array restrictions */
78 #if XNU_TARGET_OS_XR
79 TUNABLE(bool, ool_port_array_enforced, "ool_port_array_enforced", false);
80 #else
81 TUNABLE(bool, ool_port_array_enforced, "ool_port_array_enforced", true);
82 #endif /* XNU_TARGET_OS_XR */
83 
84 /* Note: Consider Developer Mode when changing the default. */
85 TUNABLE(ipc_control_port_options_t, ipc_control_port_options,
86     "ipc_control_port_options",
87     ICP_OPTIONS_IMMOVABLE_1P_HARD |
88     ICP_OPTIONS_PINNED_1P_HARD |
89 #if !XNU_TARGET_OS_OSX
90     ICP_OPTIONS_IMMOVABLE_3P_HARD |
91 #endif
92     ICP_OPTIONS_PINNED_3P_SOFT);
93 
94 TUNABLE(bool, service_port_defense_enabled, "-service_port_defense_enabled", true);
95 
96 /* The bootarg to disable ALL ipc policy violation telemetry */
97 TUNABLE(bool, ipcpv_telemetry_enabled, "-ipcpv_telemetry_enabled", true);
98 
99 /* boot-arg for provisional reply port enforcement */
100 #if XNU_TARGET_OS_OSX || XNU_TARGET_OS_BRIDGE
101 TUNABLE(bool, prp_enforcement_enabled, "-prp_enforcement_enabled", false);
102 #else
103 TUNABLE(bool, prp_enforcement_enabled, "-prp_enforcement_enabled", true);
104 #endif /* XNU_TARGET_OS_OSX || XNU_TARGET_OS_BRIDGE */
105 
106 /*
107  * bootargs for reply port semantics on bootstrap ports
108  */
109 TUNABLE(bool, bootstrap_port_telemetry_enabled, "-bootstrap_port_telemetry_enabled", true);
110 TUNABLE(bool, bootstrap_port_enforcement_enabled, "-bootstrap_port_enforcement_enabled", true);
111 
112 /* Enables reply port/voucher/persona debugging code */
113 TUNABLE(bool, enforce_strict_reply, "-enforce_strict_reply", false);
114 
115 #pragma mark policy options
116 
117 ipc_space_policy_t
ipc_policy_for_task(task_t task)118 ipc_policy_for_task(task_t task)
119 {
120 #if XNU_TARGET_OS_OSX
121 	struct proc *proc = get_bsdtask_info(task);
122 #endif /* XNU_TARGET_OS_OSX */
123 	ipc_space_policy_t policy = IPC_SPACE_POLICY_DEFAULT;
124 	uint32_t ro_flags;
125 
126 	if (task == kernel_task) {
127 		return policy | IPC_SPACE_POLICY_KERNEL;
128 	}
129 
130 	ro_flags = task_ro_flags_get(task);
131 	if (ro_flags & TFRO_PLATFORM) {
132 		policy |= IPC_SPACE_POLICY_PLATFORM;
133 		policy |= IPC_POLICY_ENHANCED_V2;
134 	}
135 
136 	if (task_get_platform_restrictions_version(task) >= 2) {
137 		policy |= IPC_POLICY_ENHANCED_V2;
138 	} else if (task_get_platform_restrictions_version(task) == 1) {
139 		policy |= IPC_POLICY_ENHANCED_V1;
140 #if XNU_TARGET_OS_OSX
141 	} else if (proc && csproc_hardened_runtime(proc)) {
142 		policy |= IPC_POLICY_ENHANCED_V0;
143 #endif /* XNU_TARGET_OS_OSX */
144 	}
145 
146 #if XNU_TARGET_OS_OSX
147 	if (task_opted_out_mach_hardening(task)) {
148 		policy |= IPC_SPACE_POLICY_OPTED_OUT;
149 	}
150 #endif /* XNU_TARGET_OS_OSX */
151 
152 	/*
153 	 * policy modifiers
154 	 */
155 #if XNU_TARGET_OS_OSX
156 	if (proc && proc_is_simulated(proc)) {
157 		policy |= IPC_SPACE_POLICY_SIMULATED;
158 	}
159 #endif
160 #if CONFIG_ROSETTA
161 	if (task_is_translated(task)) {
162 		policy |= IPC_SPACE_POLICY_TRANSLATED;
163 	}
164 #endif
165 
166 	return policy;
167 }
168 
169 
170 inline ipc_space_policy_t
ipc_convert_msg_options_to_space(mach_msg_option64_t opts)171 ipc_convert_msg_options_to_space(mach_msg_option64_t opts)
172 {
173 	return opts >> MACH64_POLICY_SHIFT;
174 }
175 
176 mach_msg_option64_t
ipc_current_msg_options(task_t task,mach_msg_option64_t opts)177 ipc_current_msg_options(
178 	task_t                  task,
179 	mach_msg_option64_t     opts)
180 {
181 	uint32_t ro_flags = task_ro_flags_get(task);
182 
183 	/*
184 	 * Step 1: convert to kernel flags
185 	 * - clear any kernel only flags
186 	 * - convert MACH_SEND_FILTER_NONFATAL which is aliased to the
187 	 *   MACH_SEND_ALWAYS kernel flag into MACH64_POLICY_FILTER_NON_FATAL.
188 	 */
189 	opts &= MACH64_MSG_OPTION_USER;
190 
191 	if (opts & MACH64_SEND_FILTER_NONFATAL) {
192 		/*
193 		 */
194 		opts &= ~MACH64_SEND_FILTER_NONFATAL;
195 		opts |= MACH64_POLICY_FILTER_NON_FATAL;
196 	}
197 	if (ro_flags & TFRO_FILTER_MSG) {
198 		opts |= MACH64_POLICY_FILTER_MSG;
199 	}
200 
201 	/*
202 	 * Step 2: derive policy flags from the current context
203 	 */
204 	{
205 		/*
206 		 * mach_msg_option64_t can't use IPC_SPACE_POLICY_BASE(),
207 		 * check using this MACH64_POLICY_SHIFT is legitimate.
208 		 */
209 #define verify_policy_enum(name) \
210 	static_assert(IPC_SPACE_POLICY_ ## name == \
211 	    MACH64_POLICY_ ## name >> MACH64_POLICY_SHIFT)
212 
213 		verify_policy_enum(DEFAULT);
214 		verify_policy_enum(ENHANCED);
215 		verify_policy_enum(PLATFORM);
216 		verify_policy_enum(KERNEL);
217 		verify_policy_enum(SIMULATED);
218 		verify_policy_enum(TRANSLATED);
219 		verify_policy_enum(OPTED_OUT);
220 		verify_policy_enum(ENHANCED_V0);
221 		verify_policy_enum(ENHANCED_V1);
222 		verify_policy_enum(ENHANCED_V2);
223 		verify_policy_enum(ENHANCED_VERSION_MASK);
224 		verify_policy_enum(MASK);
225 
226 #undef verify_policy_enum
227 	}
228 
229 	opts |= (uint64_t)ipc_space_policy(task->itk_space) << MACH64_POLICY_SHIFT;
230 
231 	return opts;
232 }
233 
234 mach_msg_return_t
ipc_preflight_msg_option64(mach_msg_option64_t opts)235 ipc_preflight_msg_option64(mach_msg_option64_t opts)
236 {
237 	bool success = true;
238 
239 	if ((opts & MACH64_SEND_MSG) && (opts & MACH64_MACH_MSG2)) {
240 		mach_msg_option64_t cfi = opts & MACH64_MSG_OPTION_CFI_MASK;
241 
242 #if !XNU_TARGET_OS_OSX
243 		cfi &= ~MACH64_SEND_ANY;
244 #endif
245 		/* mach_msg2() calls must have exactly _one_ of these set */
246 		if (cfi == 0 || (cfi & (cfi - 1)) != 0) {
247 			success = false;
248 		}
249 
250 		/* vector calls are only supported for message queues */
251 		if ((opts & (MACH64_SEND_MQ_CALL | MACH64_SEND_ANY)) == 0 &&
252 		    (opts & MACH64_MSG_VECTOR)) {
253 			success = false;
254 		}
255 	}
256 
257 	if (success) {
258 		return MACH_MSG_SUCCESS;
259 	}
260 
261 	mach_port_guard_exception(0, opts, kGUARD_EXC_INVALID_OPTIONS);
262 	if (opts & MACH64_MACH_MSG2) {
263 		return MACH_SEND_INVALID_OPTIONS;
264 	}
265 	return KERN_NOT_SUPPORTED;
266 }
267 
268 #pragma mark helpers
269 
270 bool
ipc_should_apply_policy(const ipc_space_policy_t current_policy,const ipc_space_policy_t requested_level)271 ipc_should_apply_policy(
272 	const ipc_space_policy_t current_policy,
273 	const ipc_space_policy_t requested_level)
274 {
275 	/* Do not apply security policies on these binaries to avoid bincompat regression */
276 	if ((current_policy & IPC_SPACE_POLICY_SIMULATED) ||
277 	    (current_policy & IPC_SPACE_POLICY_OPTED_OUT) ||
278 	    (current_policy & IPC_SPACE_POLICY_TRANSLATED)) {
279 		return false;
280 	}
281 
282 	/* Check versioning for applying platform restrictions policy */
283 	if (requested_level & current_policy & IPC_SPACE_POLICY_ENHANCED) {
284 		/* Platform is always opted into platform restrictions */
285 		if (current_policy & IPC_SPACE_POLICY_PLATFORM) {
286 			return true;
287 		}
288 
289 		const ipc_space_policy_t requested_version = requested_level & IPC_SPACE_POLICY_ENHANCED_VERSION_MASK;
290 		const ipc_space_policy_t current_es_version = current_policy & IPC_SPACE_POLICY_ENHANCED_VERSION_MASK;
291 		assert(requested_version != 0);
292 		return requested_version <= current_es_version;
293 	}
294 	return current_policy & requested_level;
295 }
296 
297 #pragma mark legacy trap policies
298 #if IPC_HAS_LEGACY_MACH_MSG_TRAP
299 
300 CA_EVENT(mach_msg_trap_event,
301     CA_INT, msgh_id,
302     CA_INT, sw_platform,
303     CA_INT, sdk,
304     CA_STATIC_STRING(CA_TEAMID_MAX_LEN), team_id,
305     CA_STATIC_STRING(CA_SIGNINGID_MAX_LEN), signing_id,
306     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
307 
308 static void
mach_msg_legacy_send_analytics(mach_msg_id_t msgh_id,uint32_t platform,uint32_t sdk)309 mach_msg_legacy_send_analytics(
310 	mach_msg_id_t           msgh_id,
311 	uint32_t                platform,
312 	uint32_t                sdk)
313 {
314 	char *proc_name = proc_name_address(current_proc());
315 	const char *team_id = csproc_get_teamid(current_proc());
316 	const char *signing_id = csproc_get_identity(current_proc());
317 
318 	ca_event_t ca_event = CA_EVENT_ALLOCATE(mach_msg_trap_event);
319 	CA_EVENT_TYPE(mach_msg_trap_event) * msg_event = ca_event->data;
320 
321 	msg_event->msgh_id = msgh_id;
322 	msg_event->sw_platform = platform;
323 	msg_event->sdk = sdk;
324 
325 	if (proc_name) {
326 		strlcpy(msg_event->proc_name, proc_name, CA_PROCNAME_LEN);
327 	}
328 
329 	if (team_id) {
330 		strlcpy(msg_event->team_id, team_id, CA_TEAMID_MAX_LEN);
331 	}
332 
333 	if (signing_id) {
334 		strlcpy(msg_event->signing_id, signing_id, CA_SIGNINGID_MAX_LEN);
335 	}
336 
337 	CA_EVENT_SEND(ca_event);
338 }
339 
340 static bool
ipc_policy_allow_legacy_mach_msg_trap_for_platform(mach_msg_id_t msgid)341 ipc_policy_allow_legacy_mach_msg_trap_for_platform(
342 	mach_msg_id_t           msgid)
343 {
344 	struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
345 	uint32_t platform = pro->p_platform_data.p_platform;
346 	uint32_t sdk = pro->p_platform_data.p_sdk;
347 	uint32_t sdk_major = sdk >> 16;
348 
349 	/*
350 	 * Special rules, due to unfortunate bincompat reasons,
351 	 * allow for a hardcoded list of MIG calls to XNU to go through
352 	 * for macOS apps linked against an SDK older than 12.x.
353 	 */
354 	switch (platform) {
355 	case PLATFORM_MACOS:
356 		if (sdk == 0 || sdk_major > 12) {
357 			return false;
358 		}
359 		break;
360 	default:
361 		/* disallow for any non-macOS for platform */
362 		return false;
363 	}
364 
365 	switch (msgid) {
366 	case 0xd4a: /* task_threads */
367 	case 0xd4d: /* task_info */
368 	case 0xe13: /* thread_get_state */
369 	case 0x12c4: /* mach_vm_read */
370 	case 0x12c8: /* mach_vm_read_overwrite */
371 		mach_msg_legacy_send_analytics(msgid, platform, sdk);
372 		return true;
373 	default:
374 		return false;
375 	}
376 }
377 
378 
379 mach_msg_return_t
ipc_policy_allow_legacy_send_trap(mach_msg_id_t msgid,mach_msg_option64_t opts)380 ipc_policy_allow_legacy_send_trap(
381 	mach_msg_id_t           msgid,
382 	mach_msg_option64_t     opts)
383 {
384 	/* equivalent to ENHANCED_V0 */
385 	if ((opts & MACH64_POLICY_ENHANCED) == 0) {
386 #if __x86_64__
387 		if (current_map()->max_offset <= VM_MAX_ADDRESS) {
388 			/*
389 			 * Legacy mach_msg_trap() is the only
390 			 * available thing for 32-bit tasks
391 			 */
392 			return MACH_MSG_SUCCESS;
393 		}
394 #endif /* __x86_64__ */
395 #if CONFIG_ROSETTA
396 		if (opts & MACH64_POLICY_TRANSLATED) {
397 			/*
398 			 * Similarly, on Rosetta, allow mach_msg_trap()
399 			 * as those apps likely can't be fixed anymore
400 			 */
401 			return MACH_MSG_SUCCESS;
402 		}
403 #endif
404 #if DEVELOPMENT || DEBUG
405 		if (allow_legacy_mach_msg) {
406 			/* Honor boot-arg */
407 			return MACH_MSG_SUCCESS;
408 		}
409 #endif /* DEVELOPMENT || DEBUG */
410 		if (ipc_policy_allow_legacy_mach_msg_trap_for_platform(msgid)) {
411 			return MACH_MSG_SUCCESS;
412 		}
413 	}
414 
415 	mach_port_guard_exception(msgid, opts, kGUARD_EXC_INVALID_OPTIONS);
416 	/*
417 	 * this should be MACH_SEND_INVALID_OPTIONS,
418 	 * but this is a new mach_msg2 error only.
419 	 */
420 	return KERN_NOT_SUPPORTED;
421 }
422 
423 
424 #endif /* IPC_HAS_LEGACY_MACH_MSG_TRAP */
425 #pragma mark ipc policy telemetry
426 
427 /*
428  * As CA framework replies on successfully allocating zalloc memory,
429  * we maintain a small buffer that gets flushed when full. This helps us avoid taking spinlocks when working with CA.
430  */
431 #define IPC_POLICY_VIOLATIONS_RB_SIZE         2
432 
433 /*
434  * Stripped down version of service port's string name. This is to avoid overwhelming CA's dynamic memory allocation.
435  */
436 #define CA_MACH_SERVICE_PORT_NAME_LEN         86
437 
438 struct ipc_policy_violations_rb_entry {
439 	char proc_name[CA_PROCNAME_LEN];
440 	char service_name[CA_MACH_SERVICE_PORT_NAME_LEN];
441 	char team_id[CA_TEAMID_MAX_LEN];
442 	char signing_id[CA_SIGNINGID_MAX_LEN];
443 	ipc_policy_violation_id_t violation_id;
444 	int  sw_platform;
445 	int  aux_data;
446 	int  sdk;
447 };
448 struct ipc_policy_violations_rb_entry ipc_policy_violations_rb[IPC_POLICY_VIOLATIONS_RB_SIZE];
449 static uint8_t ipc_policy_violations_rb_index = 0;
450 
451 #if DEBUG || DEVELOPMENT
452 /* sysctl debug.ipcpv_telemetry_count */
453 _Atomic unsigned int ipcpv_telemetry_count = 0;
454 #endif
455 
456 LCK_GRP_DECLARE(ipc_telemetry_lock_grp, "ipc_telemetry_lock_grp");
457 LCK_TICKET_DECLARE(ipc_telemetry_lock, &ipc_telemetry_lock_grp);
458 
459 /*
460  * Telemetry: report back the process name violating ipc policy. Note that this event can be used to report
461  * any type of ipc violation through a ipc_policy_violation_id_t. It is named reply_port_semantics_violations
462  * because we are reusing an existing event.
463  */
464 CA_EVENT(reply_port_semantics_violations,
465     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name,
466     CA_STATIC_STRING(CA_MACH_SERVICE_PORT_NAME_LEN), service_name,
467     CA_STATIC_STRING(CA_TEAMID_MAX_LEN), team_id,
468     CA_STATIC_STRING(CA_SIGNINGID_MAX_LEN), signing_id,
469     CA_INT, reply_port_semantics_violation,
470     CA_INT, msgh_id); /* for aux_data, keeping the legacy name msgh_id to avoid CA shenanigan */
471 
472 static void
send_telemetry(const struct ipc_policy_violations_rb_entry * entry)473 send_telemetry(
474 	const struct ipc_policy_violations_rb_entry *entry)
475 {
476 	ca_event_t ca_event = CA_EVENT_ALLOCATE_FLAGS(reply_port_semantics_violations, Z_NOWAIT);
477 	if (ca_event) {
478 		CA_EVENT_TYPE(reply_port_semantics_violations) * event = ca_event->data;
479 
480 		strlcpy(event->service_name, entry->service_name, CA_MACH_SERVICE_PORT_NAME_LEN);
481 		strlcpy(event->proc_name, entry->proc_name, CA_PROCNAME_LEN);
482 		strlcpy(event->team_id, entry->team_id, CA_TEAMID_MAX_LEN);
483 		strlcpy(event->signing_id, entry->signing_id, CA_SIGNINGID_MAX_LEN);
484 		event->reply_port_semantics_violation = entry->violation_id;
485 		event->msgh_id = entry->aux_data;
486 
487 		CA_EVENT_SEND(ca_event);
488 	}
489 }
490 
491 /* Routine: flush_ipc_policy_violations_telemetry
492  * Conditions:
493  *              Assumes ipc_policy_type is valid
494  *              Assumes ipc telemetry lock is held.
495  *              Unlocks it before returning.
496  */
497 static void
flush_ipc_policy_violations_telemetry(void)498 flush_ipc_policy_violations_telemetry(void)
499 {
500 	struct ipc_policy_violations_rb_entry local_rb[IPC_POLICY_VIOLATIONS_RB_SIZE];
501 	uint8_t local_rb_index = 0;
502 
503 	if (__improbable(ipc_policy_violations_rb_index > IPC_POLICY_VIOLATIONS_RB_SIZE)) {
504 		panic("Invalid ipc policy violation buffer index %d > %d",
505 		    ipc_policy_violations_rb_index, IPC_POLICY_VIOLATIONS_RB_SIZE);
506 	}
507 
508 	/*
509 	 * We operate on local copy of telemetry buffer because CA framework relies on successfully
510 	 * allocating zalloc memory. It can not do that if we are accessing the shared buffer
511 	 * with spin locks held.
512 	 */
513 	while (local_rb_index != ipc_policy_violations_rb_index) {
514 		local_rb[local_rb_index] = ipc_policy_violations_rb[local_rb_index];
515 		local_rb_index++;
516 	}
517 
518 	lck_ticket_unlock(&ipc_telemetry_lock);
519 
520 	while (local_rb_index > 0) {
521 		struct ipc_policy_violations_rb_entry *entry = &local_rb[--local_rb_index];
522 		send_telemetry(entry);
523 	}
524 
525 	/*
526 	 * Finally call out the buffer as empty. This is also a sort of rate limiting mechanisms for the events.
527 	 * Events will get dropped until the buffer is not fully flushed.
528 	 */
529 	lck_ticket_lock(&ipc_telemetry_lock, &ipc_telemetry_lock_grp);
530 	ipc_policy_violations_rb_index = 0;
531 }
532 
533 void
ipc_stash_policy_violations_telemetry(ipc_policy_violation_id_t violation_id,ipc_port_t port,int aux_data)534 ipc_stash_policy_violations_telemetry(
535 	ipc_policy_violation_id_t    violation_id,
536 	ipc_port_t                   port,
537 	int                          aux_data)
538 {
539 	if (!ipcpv_telemetry_enabled) {
540 		return;
541 	}
542 
543 	struct ipc_policy_violations_rb_entry *entry;
544 	char *service_name = (char *) "unknown";
545 	task_t task = current_task_early();
546 	int pid = -1;
547 
548 #if CONFIG_SERVICE_PORT_INFO
549 	if (IP_VALID(port)) {
550 		/*
551 		 * dest_port lock must be held to avoid race condition
552 		 * when accessing ip_splabel rdar://139066947
553 		 */
554 		struct mach_service_port_info sp_info;
555 		ipc_object_label_t label = ip_mq_lock_label_get(port);
556 		if (io_state_active(label.io_state)) {
557 			if (ip_is_any_service_port_type(label.io_type) ||
558 			    ip_is_bootstrap_port_type(label.io_type)) {
559 				ipc_service_port_label_get_info(label.iol_service, &sp_info);
560 				service_name = sp_info.mspi_string_name;
561 			}
562 		}
563 		ip_mq_unlock_label_put(port, &label);
564 	}
565 #endif /* CONFIG_SERVICE_PORT_INFO */
566 
567 	if (task) {
568 		pid = task_pid(task);
569 	}
570 
571 	if (task) {
572 		struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
573 		uint32_t platform = pro->p_platform_data.p_platform;
574 		uint32_t sdk = pro->p_platform_data.p_sdk;
575 		char *proc_name = (char *) "unknown";
576 #ifdef MACH_BSD
577 		proc_name = proc_name_address(get_bsdtask_info(task));
578 #endif /* MACH_BSD */
579 		const char *team_id = csproc_get_identity(current_proc());
580 		const char *signing_id = csproc_get_teamid(current_proc());
581 
582 		lck_ticket_lock(&ipc_telemetry_lock, &ipc_telemetry_lock_grp);
583 
584 		if (ipc_policy_violations_rb_index >= IPC_POLICY_VIOLATIONS_RB_SIZE) {
585 			/* Dropping the event since buffer is full. */
586 			lck_ticket_unlock(&ipc_telemetry_lock);
587 			return;
588 		}
589 		entry = &ipc_policy_violations_rb[ipc_policy_violations_rb_index++];
590 		strlcpy(entry->proc_name, proc_name, CA_PROCNAME_LEN);
591 
592 		strlcpy(entry->service_name, service_name, CA_MACH_SERVICE_PORT_NAME_LEN);
593 		entry->violation_id = violation_id;
594 
595 		if (team_id) {
596 			strlcpy(entry->team_id, team_id, CA_TEAMID_MAX_LEN);
597 		}
598 
599 		if (signing_id) {
600 			strlcpy(entry->signing_id, signing_id, CA_SIGNINGID_MAX_LEN);
601 		}
602 		entry->aux_data = aux_data;
603 		entry->sw_platform = platform;
604 		entry->sdk = sdk;
605 	}
606 
607 	if (ipc_policy_violations_rb_index == IPC_POLICY_VIOLATIONS_RB_SIZE) {
608 		flush_ipc_policy_violations_telemetry();
609 	}
610 
611 	lck_ticket_unlock(&ipc_telemetry_lock);
612 }
613 
614 #if DEBUG || DEVELOPMENT
615 void
ipc_inc_telemetry_count(void)616 ipc_inc_telemetry_count(void)
617 {
618 	unsigned int count = os_atomic_load(&ipcpv_telemetry_count, relaxed);
619 	if (!os_add_overflow(count, 1, &count)) {
620 		os_atomic_store(&ipcpv_telemetry_count, count, relaxed);
621 	}
622 }
623 #endif /* DEBUG || DEVELOPMENT */
624 
625 /*!
626  * @brief
627  * Checks that this message conforms to reply port policies, which are:
628  * 1. IOT_REPLY_PORT's must be make-send-once disposition
629  * 2. You must use an IOT_REPLY_PORT (or weak variant) if the dest_port requires it
630  *
631  * @param reply_port    the message local/reply port
632  * @param dest_port     the message remote/dest port
633  *
634  * @returns
635  * - true  if there is a violation in the security policy for this mach msg
636  * - false otherwise
637  */
638 static mach_msg_return_t
ipc_validate_local_port(mach_port_t reply_port,mach_port_t dest_port,mach_msg_option64_t opts)639 ipc_validate_local_port(
640 	mach_port_t         reply_port,
641 	mach_port_t         dest_port,
642 	mach_msg_option64_t opts)
643 {
644 	assert(IP_VALID(dest_port));
645 	/* An empty reply port, or an inactive reply port / dest port violates nothing */
646 	if (!IP_VALID(reply_port) || !ip_active(reply_port) || !ip_active(dest_port)) {
647 		return MACH_MSG_SUCCESS;
648 	}
649 
650 	if (ip_is_reply_port(reply_port)) {
651 		return MACH_MSG_SUCCESS;
652 	}
653 
654 	ipc_space_policy_t pol = ipc_convert_msg_options_to_space(opts);
655 	/* skip translated and simulated process */
656 	if (!ipc_should_apply_policy((pol), IPC_SPACE_POLICY_DEFAULT)) {
657 		return MACH_MSG_SUCCESS;
658 	}
659 
660 	/* kobject enforcement */
661 	if (ip_is_kobject(dest_port) &&
662 	    ipc_should_apply_policy(pol, IPC_POLICY_ENHANCED_V1)) {
663 		mach_port_guard_exception(ip_get_receiver_name(dest_port), 0, kGUARD_EXC_KOBJECT_REPLY_PORT_SEMANTICS);
664 		return MACH_SEND_INVALID_REPLY;
665 	}
666 
667 	if (!ipc_policy(dest_port)->pol_enforce_reply_semantics || ip_is_provisional_reply_port(reply_port)) {
668 		return MACH_MSG_SUCCESS;
669 	}
670 
671 	/* bootstrap port defense */
672 	if (ip_is_bootstrap_port(dest_port) &&
673 	    ipc_should_apply_policy(pol, IPC_POLICY_ENHANCED_V2)) {
674 		if (bootstrap_port_telemetry_enabled &&
675 		    !ipc_space_has_telemetry_type(current_space(), IS_HAS_BOOTSTRAP_PORT_TELEMETRY)) {
676 			ipc_stash_policy_violations_telemetry(IPCPV_BOOTSTRAP_PORT, dest_port, 0);
677 		}
678 		if (bootstrap_port_enforcement_enabled) {
679 			mach_port_guard_exception(ip_get_receiver_name(dest_port), 1, kGUARD_EXC_REQUIRE_REPLY_PORT_SEMANTICS);
680 			return MACH_SEND_INVALID_REPLY;
681 		}
682 	}
683 
684 	/* regular enforcement */
685 	if (!ip_is_bootstrap_port(dest_port)) {
686 		if (ip_is_strong_service_port(dest_port)) {
687 			ipc_stash_policy_violations_telemetry(IPCPV_REPLY_PORT_SEMANTICS_OPTOUT, dest_port, 0);
688 		}
689 		mach_port_guard_exception(ip_get_receiver_name(dest_port), 0, kGUARD_EXC_REQUIRE_REPLY_PORT_SEMANTICS);
690 		return MACH_SEND_INVALID_REPLY;
691 	}
692 
693 	return MACH_MSG_SUCCESS;
694 }
695 
696 #pragma mark MACH_SEND_MSG policies
697 
698 mach_msg_return_t
ipc_validate_kmsg_header_schema_from_user(mach_msg_user_header_t * hdr __unused,mach_msg_size_t dsc_count,mach_msg_option64_t opts)699 ipc_validate_kmsg_header_schema_from_user(
700 	mach_msg_user_header_t *hdr __unused,
701 	mach_msg_size_t         dsc_count,
702 	mach_msg_option64_t     opts)
703 {
704 	if (opts & MACH64_SEND_KOBJECT_CALL) {
705 		if (dsc_count > IPC_KOBJECT_DESC_MAX) {
706 			return MACH_SEND_TOO_LARGE;
707 		}
708 	}
709 
710 	return MACH_MSG_SUCCESS;
711 }
712 
713 mach_msg_return_t
ipc_validate_kmsg_schema_from_user(mach_msg_header_t * kdata,mach_msg_send_uctx_t * send_uctx,mach_msg_option64_t opts __unused)714 ipc_validate_kmsg_schema_from_user(
715 	mach_msg_header_t      *kdata,
716 	mach_msg_send_uctx_t   *send_uctx,
717 	mach_msg_option64_t     opts __unused)
718 {
719 	mach_msg_kbase_t *kbase = NULL;
720 	vm_size_t vm_size;
721 
722 	if (kdata->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
723 		kbase = mach_msg_header_to_kbase(kdata);
724 	}
725 
726 	if (send_uctx->send_dsc_port_count > IPC_KMSG_MAX_OOL_PORT_COUNT) {
727 		return MACH_SEND_TOO_LARGE;
728 	}
729 
730 	if (os_add_overflow(send_uctx->send_dsc_vm_size,
731 	    send_uctx->send_dsc_port_count * sizeof(mach_port_t), &vm_size)) {
732 		return MACH_SEND_TOO_LARGE;
733 	}
734 	if (vm_size > ipc_kmsg_max_vm_space) {
735 		return MACH_MSG_VM_KERNEL;
736 	}
737 
738 	return MACH_MSG_SUCCESS;
739 }
740 
741 static mach_msg_return_t
ipc_filter_kmsg_header_from_user(mach_msg_header_t * hdr,mach_port_t dport,mach_msg_option64_t opts)742 ipc_filter_kmsg_header_from_user(
743 	mach_msg_header_t      *hdr,
744 	mach_port_t             dport,
745 	mach_msg_option64_t     opts)
746 {
747 	static const uint32_t MACH_BOOTSTRAP_PORT_MSG_ID_MASK = ((1u << 24) - 1);
748 
749 	mach_msg_filter_id fid = 0;
750 	ipc_object_label_t dlabel;
751 	mach_msg_id_t msg_id = hdr->msgh_id;
752 	struct ipc_conn_port_label *sblabel = NULL;
753 
754 	dlabel = ip_mq_lock_label_get(dport);
755 
756 	if (io_state_active(dlabel.io_state) && dlabel.io_filtered) {
757 		switch (dlabel.io_type) {
758 		case IOT_BOOTSTRAP_PORT:
759 			/*
760 			 * Mask the top byte for messages sent to launchd's bootstrap port.
761 			 * Filter any messages with domain 0 (as they correspond to MIG
762 			 * based messages)
763 			 */
764 			if ((msg_id & ~MACH_BOOTSTRAP_PORT_MSG_ID_MASK) == 0) {
765 				ip_mq_unlock_label_put(dport, &dlabel);
766 				goto filtered_msg;
767 			}
768 			msg_id = msg_id & MACH_BOOTSTRAP_PORT_MSG_ID_MASK;
769 			OS_FALLTHROUGH;
770 
771 		case IOT_SERVICE_PORT:
772 		case IOT_WEAK_SERVICE_PORT:
773 			sblabel = dlabel.iol_service->ispl_sblabel;
774 			break;
775 
776 		case IOT_CONNECTION_PORT:
777 			/* Connection ports can also have send-side message filters */
778 			sblabel = dlabel.iol_connection;
779 			break;
780 
781 		default:
782 			break;
783 		}
784 	}
785 	if (sblabel) {
786 		mach_msg_filter_retain_sblabel_callback(sblabel);
787 	}
788 
789 	ip_mq_unlock_label_put(dport, &dlabel);
790 
791 	if (sblabel && !mach_msg_fetch_filter_policy(sblabel, msg_id, &fid)) {
792 		goto filtered_msg;
793 	}
794 	return MACH_MSG_SUCCESS;
795 
796 filtered_msg:
797 	if ((opts & MACH64_POLICY_FILTER_NON_FATAL) == 0) {
798 		mach_port_name_t dest_name = CAST_MACH_PORT_TO_NAME(hdr->msgh_remote_port);
799 
800 		mach_port_guard_exception(dest_name, hdr->msgh_id,
801 		    kGUARD_EXC_MSG_FILTERED);
802 	}
803 	return MACH_SEND_MSG_FILTERED;
804 }
805 
806 static bool
ipc_policy_allow_send_only_kobject_calls(void)807 ipc_policy_allow_send_only_kobject_calls(void)
808 {
809 	struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
810 	uint32_t sdk = pro->p_platform_data.p_sdk;
811 	uint32_t sdk_major = sdk >> 16;
812 
813 	switch (pro->p_platform_data.p_platform) {
814 	case PLATFORM_IOS:
815 	case PLATFORM_MACCATALYST:
816 	case PLATFORM_TVOS:
817 		if (sdk == 0 || sdk_major > 17) {
818 			return false;
819 		}
820 		return true;
821 	case PLATFORM_MACOS:
822 		if (sdk == 0 || sdk_major > 14) {
823 			return false;
824 		}
825 		return true;
826 	case PLATFORM_WATCHOS:
827 		if (sdk == 0 || sdk_major > 10) {
828 			return false;
829 		}
830 		return true;
831 	default:
832 		return false;
833 	}
834 }
835 
836 static mach_msg_return_t
ipc_validate_kmsg_dest_from_user(mach_msg_header_t * hdr,ipc_port_t port,mach_msg_option64_t opts)837 ipc_validate_kmsg_dest_from_user(
838 	mach_msg_header_t      *hdr,
839 	ipc_port_t              port,
840 	mach_msg_option64_t     opts)
841 {
842 	/*
843 	 * This is a _user_ message via mach_msg2_trap()。
844 	 *
845 	 * To curb kobject port/message queue confusion and improve control flow
846 	 * integrity, mach_msg2_trap() invocations mandate the use of either
847 	 * MACH64_SEND_KOBJECT_CALL or MACH64_SEND_MQ_CALL and that the flag
848 	 * matches the underlying port type. (unless the call is from a simulator,
849 	 * since old simulators keep using mach_msg() in all cases indiscriminatingly.)
850 	 *
851 	 * Since:
852 	 *     (1) We make sure to always pass either MACH64_SEND_MQ_CALL or
853 	 *         MACH64_SEND_KOBJECT_CALL bit at all sites outside simulators
854 	 *         (checked by mach_msg2_trap());
855 	 *     (2) We checked in mach_msg2_trap() that _exactly_ one of the three bits is set.
856 	 *
857 	 * CFI check cannot be bypassed by simply setting MACH64_SEND_ANY.
858 	 */
859 #if XNU_TARGET_OS_OSX
860 	if (opts & MACH64_SEND_ANY) {
861 		return MACH_MSG_SUCCESS;
862 	}
863 #endif /* XNU_TARGET_OS_OSX */
864 
865 	natural_t otype = ip_type(port);
866 	if (otype == IOT_TIMER_PORT) {
867 #if XNU_TARGET_OS_OSX
868 		if (__improbable(opts & MACH64_POLICY_ENHANCED)) {
869 			return MACH_SEND_INVALID_OPTIONS;
870 		}
871 		/*
872 		 * For bincompat, let's still allow user messages to timer port, but
873 		 * force MACH64_SEND_MQ_CALL flag for memory segregation.
874 		 */
875 		if (__improbable(!(opts & MACH64_SEND_MQ_CALL))) {
876 			return MACH_SEND_INVALID_OPTIONS;
877 		}
878 #else
879 		return MACH_SEND_INVALID_OPTIONS;
880 #endif
881 	} else if (io_is_kobject_type(otype)) {
882 		if (otype == IKOT_UEXT_OBJECT) {
883 			if (__improbable(!(opts & MACH64_SEND_DK_CALL))) {
884 				return MACH_SEND_INVALID_OPTIONS;
885 			}
886 		} else {
887 			/* Otherwise, caller must set MACH64_SEND_KOBJECT_CALL. */
888 			if (__improbable(!(opts & MACH64_SEND_KOBJECT_CALL))) {
889 				return MACH_SEND_INVALID_OPTIONS;
890 			}
891 
892 			/* kobject calls must be a combined send/receive */
893 			if (__improbable((opts & MACH64_RCV_MSG) == 0)) {
894 				if ((opts & MACH64_POLICY_ENHANCED) ||
895 				    IP_VALID(hdr->msgh_local_port) ||
896 				    !ipc_policy_allow_send_only_kobject_calls()) {
897 					return MACH_SEND_INVALID_OPTIONS;
898 				}
899 			}
900 		}
901 #if CONFIG_CSR
902 	} else if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
903 		/*
904 		 * Allow MACH64_SEND_KOBJECT_CALL flag to message queues
905 		 * when SIP is off (for Mach-on-Mach emulation).
906 		 */
907 #endif /* CONFIG_CSR */
908 	} else {
909 		/* If destination is a message queue, caller must set MACH64_SEND_MQ_CALL */
910 		if (__improbable(!(opts & MACH64_SEND_MQ_CALL))) {
911 			return MACH_SEND_INVALID_OPTIONS;
912 		}
913 	}
914 
915 	return MACH_MSG_SUCCESS;
916 }
917 
918 mach_msg_return_t
ipc_validate_kmsg_header_from_user(mach_msg_header_t * hdr,mach_msg_send_uctx_t * send_uctx,mach_msg_option64_t opts)919 ipc_validate_kmsg_header_from_user(
920 	mach_msg_header_t      *hdr,
921 	mach_msg_send_uctx_t   *send_uctx,
922 	mach_msg_option64_t     opts)
923 {
924 	ipc_port_t dest_port = hdr->msgh_remote_port;
925 	ipc_port_t reply_port = hdr->msgh_local_port;
926 	mach_msg_return_t mr = MACH_MSG_SUCCESS;
927 	ipc_space_policy_t current_policy;
928 
929 	if (opts & MACH64_MACH_MSG2) {
930 		mr = ipc_validate_kmsg_dest_from_user(hdr, dest_port, opts);
931 		if (mr != MACH_MSG_SUCCESS) {
932 			goto out;
933 		}
934 	}
935 
936 	/*
937 	 * For enhanced v2 binaries, enforce two OOL port array restrictions:
938 	 *     - the receive right has to be of a type that explicitly
939 	 *       allows receiving that descriptor
940 	 *     - there could be no more than ONE single array in a kmsg
941 	 */
942 	current_policy = ipc_convert_msg_options_to_space(opts);
943 	if (ool_port_array_enforced &&
944 	    send_uctx->send_dsc_port_arrays_count &&
945 	    ipc_should_apply_policy(current_policy, IPC_POLICY_ENHANCED_V2)) {
946 		if (!ip_is_port_array_allowed(dest_port)) {
947 			mach_port_guard_exception(current_policy,
948 			    MPG_PAYLOAD(MPG_FLAGS_INVALID_OPTIONS_OOL_RIGHT,
949 			    ip_type(dest_port)),
950 			    kGUARD_EXC_DESCRIPTOR_VIOLATION);
951 
952 			return MACH_SEND_INVALID_OPTIONS;
953 		}
954 
955 		if (send_uctx->send_dsc_port_arrays_count > 1) {
956 			mach_port_guard_exception(current_policy,
957 			    MPG_PAYLOAD(MPG_FLAGS_INVALID_OPTIONS_OOL_ARRAYS,
958 			    send_uctx->send_dsc_port_arrays_count),
959 			    kGUARD_EXC_DESCRIPTOR_VIOLATION);
960 
961 			return MACH_SEND_INVALID_OPTIONS;
962 		}
963 	}
964 
965 	/*
966 	 * Ensure that the reply field follows our security policies,
967 	 * including IOT_REPLY_PORT requirements
968 	 */
969 	mr = ipc_validate_local_port(reply_port, dest_port, opts);
970 	if (mr != MACH_MSG_SUCCESS) {
971 		goto out;
972 	}
973 
974 	/*
975 	 * Evaluate message filtering if the sender is filtered.
976 	 */
977 	if ((opts & MACH64_POLICY_FILTER_MSG) &&
978 	    mach_msg_filter_at_least(MACH_MSG_FILTER_CALLBACKS_VERSION_1) &&
979 	    ip_to_object(dest_port)->io_filtered) {
980 		mr = ipc_filter_kmsg_header_from_user(hdr, dest_port, opts);
981 		if (mr != MACH_MSG_SUCCESS) {
982 			goto out;
983 		}
984 	}
985 
986 out:
987 	if (mr == MACH_SEND_INVALID_OPTIONS) {
988 		mach_port_guard_exception(0, opts, kGUARD_EXC_INVALID_OPTIONS);
989 	}
990 	return mr;
991 }
992 
993 #pragma mark receive immovability
994 
995 bool
ipc_move_receive_allowed(ipc_space_t space,ipc_port_t port,mach_port_name_t name)996 ipc_move_receive_allowed(
997 	ipc_space_t             space,
998 	ipc_port_t              port,
999 	mach_port_name_t        name)
1000 {
1001 	ipc_space_policy_t policy = ipc_space_policy(space);
1002 	/*
1003 	 * Check for service port before immovability so the task crash
1004 	 * with reason kGUARD_EXC_SERVICE_PORT_VIOLATION_FATAL
1005 	 */
1006 	if (service_port_defense_enabled &&
1007 	    ip_is_strong_service_port(port) &&
1008 	    !task_is_initproc(space->is_task)) {
1009 		mach_port_guard_exception(0, name,
1010 		    kGUARD_EXC_SERVICE_PORT_VIOLATION_FATAL);
1011 		return false;
1012 	}
1013 
1014 	if (ip_type(port) == IOT_PROVISIONAL_REPLY_PORT &&
1015 	    ipc_should_apply_policy(policy, IPC_POLICY_ENHANCED_V2) &&
1016 	    !ipc_space_has_telemetry_type(space, IS_HAS_MOVE_PRP_TELEMETRY)) {
1017 		mach_port_guard_exception(name, 0, kGUARD_EXC_MOVE_PROVISIONAL_REPLY_PORT);
1018 	}
1019 
1020 	if (ip_is_immovable_receive(port)) {
1021 		mach_port_guard_exception(name, 0, kGUARD_EXC_IMMOVABLE);
1022 		return false;
1023 	}
1024 
1025 	return true;
1026 }
1027 
1028 #pragma mark send immovability
1029 
1030 
1031 bool
ipc_should_mark_immovable_send(task_t curr_task,ipc_port_t port,ipc_object_label_t label)1032 ipc_should_mark_immovable_send(
1033 	task_t curr_task,
1034 	ipc_port_t port,
1035 	ipc_object_label_t label)
1036 {
1037 	thread_t ctrl_thread = THREAD_NULL;
1038 	task_t   ctrl_task   = TASK_NULL;
1039 
1040 	/*
1041 	 * task obtaining its own task control port is controlled by security policy
1042 	 * see `task_set_ctrl_port_default`
1043 	 * This must come first so that we avoid evaluating the kobject port before ipc_task_enable has run
1044 	 */
1045 	if (curr_task->itk_task_ports[TASK_FLAVOR_CONTROL] == port) {
1046 		return task_is_immovable(curr_task);
1047 	}
1048 
1049 	switch (ip_type(port)) {
1050 	case IKOT_TASK_CONTROL:
1051 		ctrl_task = ipc_kobject_get_raw(port, IKOT_TASK_CONTROL);
1052 		break;
1053 	case IKOT_THREAD_CONTROL:
1054 		ctrl_thread = ipc_kobject_get_raw(port, IKOT_THREAD_CONTROL);
1055 		if (ctrl_thread) {
1056 			ctrl_task = get_threadtask(ctrl_thread);
1057 		}
1058 		break;
1059 	default:
1060 		break;
1061 	}
1062 
1063 	/*
1064 	 * task obtaining its own thread control port is controlled by security policy
1065 	 * see `task_set_ctrl_port_default`
1066 	 */
1067 	if (ctrl_thread && curr_task == ctrl_task) {
1068 		/*
1069 		 * we cannot assert that the control port options for the task are set up
1070 		 * yet because we may be copying out the thread control port during exec.
1071 		 * This means that the first thread control port copyout will always be movable, but other
1072 		 * copyouts will occur before userspace is allowed to run any code which will subsequently mark it
1073 		 * as immovable if needed.
1074 		 */
1075 		return task_is_immovable_no_assert(curr_task);
1076 	}
1077 
1078 	/*
1079 	 * all control ports obtained by another process are movable
1080 	 * while the space is inactive (for corpses).
1081 	 */
1082 	if (ctrl_task && !is_active(ctrl_task->itk_space)) {
1083 		assert(ctrl_task != curr_task);
1084 		assert(ip_is_tt_control_port_type(label.io_type));
1085 		return false;
1086 	}
1087 
1088 	/* special cases are handled, now we refer to the default policy */
1089 	return !ipc_policy(label)->pol_movable_send;
1090 }
1091 
1092 /* requires: nothing locked, port is valid */
1093 static bool
ip_is_currently_immovable_send(ipc_port_t port)1094 ip_is_currently_immovable_send(ipc_port_t port)
1095 {
1096 	ipc_object_label_t label = ipc_port_lock_label_get(port);
1097 	bool port_is_immovable_send = ipc_should_mark_immovable_send(current_task(), port, label);
1098 	ip_mq_unlock_label_put(port, &label);
1099 	return port_is_immovable_send;
1100 }
1101 
1102 bool
ipc_can_stash_naked_send(ipc_port_t port)1103 ipc_can_stash_naked_send(ipc_port_t port)
1104 {
1105 	return !IP_VALID(port) || !ip_is_currently_immovable_send(port);
1106 }
1107 
1108 #pragma mark entry init
1109 
1110 void
ipc_entry_init(ipc_space_t space,ipc_object_t object,mach_port_type_t type,ipc_entry_t entry,mach_port_urefs_t urefs,mach_port_name_t name)1111 ipc_entry_init(
1112 	ipc_space_t         space,
1113 	ipc_object_t        object,
1114 	mach_port_type_t    type,
1115 	ipc_entry_t         entry,
1116 	mach_port_urefs_t   urefs,
1117 	mach_port_name_t    name)
1118 {
1119 	/* object type can be deadname, port, or a portset */
1120 	assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
1121 	assert(type != MACH_PORT_TYPE_NONE);
1122 	assert(urefs <= MACH_PORT_UREFS_MAX);
1123 	assert(entry);
1124 
1125 	if (object && (type & MACH_PORT_TYPE_SEND_RIGHTS)) {
1126 		ipc_port_t port = ip_object_to_port(object);
1127 		ipc_object_label_t label = ip_label_get(port);
1128 
1129 		if (ipc_should_mark_immovable_send(space->is_task, port, label)) {
1130 			entry->ie_bits |= IE_BITS_IMMOVABLE_SEND;
1131 		}
1132 		io_label_set_and_put(&port->ip_object, &label);
1133 	}
1134 	entry->ie_object = object;
1135 	entry->ie_bits |= type | urefs;
1136 	ipc_entry_modified(space, name, entry);
1137 }
1138 
1139 #pragma mark policy guard violations
1140 
1141 void
mach_port_guard_exception(uint32_t target,uint64_t payload,unsigned reason)1142 mach_port_guard_exception(uint32_t target, uint64_t payload, unsigned reason)
1143 {
1144 	mach_exception_code_t code = 0;
1145 	EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_MACH_PORT);
1146 	EXC_GUARD_ENCODE_FLAVOR(code, reason);
1147 	EXC_GUARD_ENCODE_TARGET(code, target);
1148 	mach_exception_subcode_t subcode = (uint64_t)payload;
1149 	thread_t t = current_thread();
1150 	bool fatal = FALSE;
1151 
1152 	if (reason <= MAX_OPTIONAL_kGUARD_EXC_CODE &&
1153 	    (get_threadtask(t)->task_exc_guard & TASK_EXC_GUARD_MP_FATAL)) {
1154 		fatal = true;
1155 	} else if (reason <= MAX_FATAL_kGUARD_EXC_CODE) {
1156 		fatal = true;
1157 	}
1158 	thread_guard_violation(t, code, subcode, fatal);
1159 }
1160 
1161 void
mach_port_guard_exception_immovable(ipc_space_t space,mach_port_name_t name,mach_port_t port,mach_msg_type_name_t disp,__assert_only ipc_entry_t entry)1162 mach_port_guard_exception_immovable(
1163 	ipc_space_t             space,
1164 	mach_port_name_t        name,
1165 	mach_port_t             port,
1166 	mach_msg_type_name_t    disp,
1167 	__assert_only ipc_entry_t entry)
1168 {
1169 	if (space == current_space()) {
1170 		assert(entry->ie_bits & IE_BITS_IMMOVABLE_SEND);
1171 		assert(entry->ie_port == port);
1172 		uint64_t payload = MPG_PAYLOAD(MPG_FLAGS_NONE, ip_type(port), disp);
1173 		mach_port_guard_exception(name, payload, kGUARD_EXC_IMMOVABLE);
1174 	}
1175 }
1176 
1177 void
mach_port_guard_exception_pinned(ipc_space_t space,mach_port_name_t name,uint64_t payload)1178 mach_port_guard_exception_pinned(
1179 	ipc_space_t             space,
1180 	mach_port_name_t        name,
1181 	uint64_t                payload)
1182 {
1183 	ipc_space_policy_t policy = ipc_space_policy(space);
1184 	int guard;
1185 
1186 	if (space != current_space()) {
1187 		guard = kGUARD_EXC_NONE;
1188 	} else if (policy &
1189 	    (IPC_SPACE_POLICY_TRANSLATED | IPC_SPACE_POLICY_SIMULATED)) {
1190 		guard = kGUARD_EXC_NONE;
1191 	} else if (ipc_should_apply_policy(policy, IPC_POLICY_ENHANCED_V1)) {
1192 		if (ipc_control_port_options & ICP_OPTIONS_PINNED_1P_HARD) {
1193 			guard = kGUARD_EXC_MOD_REFS;
1194 		} else if (ipc_control_port_options & ICP_OPTIONS_PINNED_1P_SOFT) {
1195 			guard = kGUARD_EXC_MOD_REFS_NON_FATAL;
1196 		} else {
1197 			guard = kGUARD_EXC_NONE;
1198 		}
1199 	} else {
1200 		if (ipc_control_port_options & ICP_OPTIONS_PINNED_3P_HARD) {
1201 			guard = kGUARD_EXC_MOD_REFS;
1202 		} else if (ipc_control_port_options & ICP_OPTIONS_PINNED_3P_SOFT) {
1203 			guard = kGUARD_EXC_MOD_REFS_NON_FATAL;
1204 		} else {
1205 			guard = kGUARD_EXC_NONE;
1206 		}
1207 	}
1208 
1209 	if (guard != kGUARD_EXC_NONE) {
1210 		mach_port_guard_exception(name, payload, guard);
1211 	}
1212 }
1213 
1214 /*
1215  *	Routine:	mach_port_guard_ast
1216  *	Purpose:
1217  *		Raises an exception for mach port guard violation.
1218  *	Conditions:
1219  *		None.
1220  *	Returns:
1221  *		None.
1222  */
1223 
1224 void
mach_port_guard_ast(thread_t t,mach_exception_data_type_t code,mach_exception_data_type_t subcode)1225 mach_port_guard_ast(
1226 	thread_t                t,
1227 	mach_exception_data_type_t code,
1228 	mach_exception_data_type_t subcode)
1229 {
1230 	unsigned int reason = EXC_GUARD_DECODE_GUARD_FLAVOR(code);
1231 	task_t task = get_threadtask(t);
1232 	unsigned int behavior = task->task_exc_guard;
1233 	bool fatal = true;
1234 
1235 	assert(task == current_task());
1236 	assert(task != kernel_task);
1237 
1238 	if (reason <= MAX_FATAL_kGUARD_EXC_CODE) {
1239 		/*
1240 		 * Fatal Mach port guards - always delivered synchronously if dev mode is on.
1241 		 * Check if anyone has registered for Synchronous EXC_GUARD, if yes then,
1242 		 * deliver it synchronously and then kill the process, else kill the process
1243 		 * and deliver the exception via EXC_CORPSE_NOTIFY.
1244 		 */
1245 
1246 		int flags = PX_DEBUG_NO_HONOR;
1247 		exception_info_t info = {
1248 			.os_reason = OS_REASON_GUARD,
1249 			.exception_type = EXC_GUARD,
1250 			.mx_code = code,
1251 			.mx_subcode = subcode,
1252 		};
1253 
1254 		if (task_exception_notify(EXC_GUARD, code, subcode, fatal) == KERN_SUCCESS) {
1255 			flags |= PX_PSIGNAL;
1256 		}
1257 		exit_with_mach_exception(get_bsdtask_info(task), info, flags);
1258 	} else {
1259 		/*
1260 		 * Mach port guards controlled by task settings.
1261 		 */
1262 
1263 		/* Is delivery enabled */
1264 		if ((behavior & TASK_EXC_GUARD_MP_DELIVER) == 0) {
1265 			return;
1266 		}
1267 
1268 		/* If only once, make sure we're that once */
1269 		while (behavior & TASK_EXC_GUARD_MP_ONCE) {
1270 			uint32_t new_behavior = behavior & ~TASK_EXC_GUARD_MP_DELIVER;
1271 
1272 			if (os_atomic_cmpxchg(&task->task_exc_guard,
1273 			    behavior, new_behavior, relaxed)) {
1274 				break;
1275 			}
1276 			behavior = task->task_exc_guard;
1277 			if ((behavior & TASK_EXC_GUARD_MP_DELIVER) == 0) {
1278 				return;
1279 			}
1280 		}
1281 		fatal = (task->task_exc_guard & TASK_EXC_GUARD_MP_FATAL)
1282 		    && (reason <= MAX_OPTIONAL_kGUARD_EXC_CODE);
1283 		kern_return_t sync_exception_result;
1284 		sync_exception_result = task_exception_notify(EXC_GUARD, code, subcode, fatal);
1285 
1286 		if (task->task_exc_guard & TASK_EXC_GUARD_MP_FATAL) {
1287 			if (reason > MAX_OPTIONAL_kGUARD_EXC_CODE) {
1288 				/* generate a simulated crash if not handled synchronously */
1289 				if (sync_exception_result != KERN_SUCCESS) {
1290 					task_violated_guard(code, subcode, NULL, TRUE);
1291 				}
1292 			} else {
1293 				/*
1294 				 * Only generate crash report if synchronous EXC_GUARD wasn't handled,
1295 				 * but it has to die regardless.
1296 				 */
1297 
1298 				int flags = PX_DEBUG_NO_HONOR;
1299 				exception_info_t info = {
1300 					.os_reason = OS_REASON_GUARD,
1301 					.exception_type = EXC_GUARD,
1302 					.mx_code = code,
1303 					.mx_subcode = subcode
1304 				};
1305 
1306 				if (sync_exception_result == KERN_SUCCESS) {
1307 					flags |= PX_PSIGNAL;
1308 				}
1309 
1310 				exit_with_mach_exception(get_bsdtask_info(task), info, flags);
1311 			}
1312 		} else if (task->task_exc_guard & TASK_EXC_GUARD_MP_CORPSE) {
1313 			/* Raise exception via corpse fork if not handled synchronously */
1314 			if (sync_exception_result != KERN_SUCCESS) {
1315 				task_violated_guard(code, subcode, NULL, TRUE);
1316 			}
1317 		}
1318 	}
1319 }
1320 
1321 #pragma mark notification policies
1322 
1323 static bool
ipc_allow_service_port_register_pd(ipc_port_t service_port,ipc_port_t notify_port,uint64_t * payload)1324 ipc_allow_service_port_register_pd(
1325 	ipc_port_t              service_port,
1326 	ipc_port_t              notify_port,
1327 	uint64_t                *payload)
1328 {
1329 	/* boot-arg disables this security policy */
1330 	if (!service_port_defense_enabled || !IP_VALID(notify_port)) {
1331 		return true;
1332 	}
1333 	/* enforce this policy only on service port types */
1334 	if (ip_is_any_service_port(service_port)) {
1335 		/* Only launchd should be able to register for port destroyed notification on a service port. */
1336 		if (!task_is_initproc(current_task())) {
1337 			*payload = MPG_FLAGS_KERN_FAILURE_TASK;
1338 			return false;
1339 		}
1340 		/* notify_port needs to be immovable */
1341 		if (!ip_is_immovable_receive(notify_port)) {
1342 			*payload = MPG_FLAGS_KERN_FAILURE_NOTIFY_TYPE;
1343 			return false;
1344 		}
1345 		/* notify_port should be owned by launchd */
1346 		if (!task_is_initproc(notify_port->ip_receiver->is_task)) {
1347 			*payload = MPG_FLAGS_KERN_FAILURE_NOTIFY_RECV;
1348 			return false;
1349 		}
1350 	}
1351 	return true;
1352 }
1353 
1354 kern_return_t
ipc_allow_register_pd_notification(ipc_port_t pd_port,ipc_port_t notify_port)1355 ipc_allow_register_pd_notification(
1356 	ipc_port_t              pd_port,
1357 	ipc_port_t              notify_port)
1358 {
1359 	uint64_t payload;
1360 
1361 	/*
1362 	 * you cannot register for port destroyed notifications
1363 	 * on an immovable receive right (which includes kobjects),
1364 	 * or a (special) reply port or any other port that explicitly disallows them.
1365 	 */
1366 	release_assert(ip_in_a_space(pd_port));
1367 	if (ip_is_immovable_receive(pd_port) ||
1368 	    !ipc_policy(pd_port)->pol_notif_port_destroy) {
1369 		mach_port_guard_exception(ip_type(pd_port), MACH_NOTIFY_PORT_DESTROYED, kGUARD_EXC_INVALID_NOTIFICATION_REQ);
1370 		return KERN_INVALID_RIGHT;
1371 	}
1372 
1373 	/* Stronger pd enforcement for service ports */
1374 	if (!ipc_allow_service_port_register_pd(pd_port, notify_port, &payload)) {
1375 		mach_port_guard_exception(0, payload, kGUARD_EXC_KERN_FAILURE);
1376 		return KERN_INVALID_RIGHT;
1377 	}
1378 
1379 	/* Allow only one registration of this notification */
1380 	if (ipc_port_has_prdrequest(pd_port)) {
1381 		mach_port_guard_exception(0, MPG_FLAGS_KERN_FAILURE_MULTI_NOTI, kGUARD_EXC_KERN_FAILURE);
1382 		return KERN_FAILURE;
1383 	}
1384 
1385 	return KERN_SUCCESS;
1386 }
1387 
1388 
1389 #pragma mark policy array
1390 
1391 __dead2
1392 static void
no_kobject_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)1393 no_kobject_no_senders(
1394 	ipc_port_t              port,
1395 	mach_port_mscount_t     mscount __unused)
1396 {
1397 	panic("unexpected call to no_senders for object %p, type %d",
1398 	    port, ip_type(port));
1399 }
1400 
1401 __dead2
1402 static void
no_label_free(ipc_object_label_t label)1403 no_label_free(ipc_object_label_t label)
1404 {
1405 	panic("unexpected call to label_free for object type %d, label %p",
1406 	    label.io_type, label.iol_pointer);
1407 }
1408 
1409 /*
1410  * Denotes a policy which safe value is the argument to PENDING(),
1411  * but is currently not default and pending validation/prep work.
1412  */
1413 #define PENDING(value)          value
1414 
1415 __security_const_late
1416 struct ipc_object_policy ipc_policy_array[IOT_UNKNOWN] = {
1417 	[IOT_PORT_SET] = {
1418 		.pol_name               = "port set",
1419 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1420 		.pol_movable_send       = false,
1421 	},
1422 	[IOT_PORT] = {
1423 		.pol_name               = "port",
1424 		.pol_movability         = IPC_MOVE_POLICY_ALWAYS,
1425 		.pol_movable_send       = true,
1426 		.pol_notif_dead_name    = true,
1427 		.pol_notif_no_senders   = true,
1428 		.pol_notif_port_destroy = true,
1429 	},
1430 	[IOT_SERVICE_PORT] = {
1431 		.pol_name               = "service port",
1432 		.pol_movability         = PENDING(IPC_MOVE_POLICY_ONCE_OR_AFTER_PD),
1433 		.pol_movable_send       = true,
1434 		.pol_label_free         = ipc_service_port_label_dealloc,
1435 		.pol_enforce_reply_semantics = PENDING(true), /* pending on service port defense cleanup */
1436 		.pol_notif_dead_name    = true,
1437 		.pol_notif_no_senders   = true,
1438 		.pol_notif_port_destroy = true,
1439 	},
1440 	[IOT_BOOTSTRAP_PORT] = {
1441 		.pol_name               = "bootstrap port",
1442 		.pol_movability         = IPC_MOVE_POLICY_NEVER, /* bootstrap port should never leave launchd */
1443 		.pol_movable_send       = true,
1444 		.pol_label_free         = ipc_service_port_label_dealloc,
1445 		.pol_enforce_reply_semantics = PENDING(true), /* pending on service port defense cleanup */
1446 		.pol_notif_dead_name    = true,
1447 		.pol_notif_no_senders   = true,
1448 	},
1449 	[IOT_WEAK_SERVICE_PORT] = {
1450 		.pol_name               = "weak service port",
1451 		.pol_movability         = IPC_MOVE_POLICY_ALWAYS,
1452 		.pol_movable_send       = true,
1453 		.pol_label_free         = ipc_service_port_label_dealloc,
1454 		.pol_notif_dead_name    = true,
1455 		.pol_notif_no_senders   = true,
1456 		.pol_notif_port_destroy = true,
1457 	},
1458 	[IOT_CONNECTION_PORT] = {
1459 		.pol_name               = "connection port",
1460 		.pol_movability         = IPC_MOVE_POLICY_ONCE,
1461 		.pol_label_free         = ipc_connection_port_label_dealloc,
1462 		.pol_enforce_reply_semantics = true,
1463 		.pol_notif_dead_name    = true,
1464 		.pol_notif_no_senders   = true,
1465 		.pol_notif_port_destroy = true,
1466 	},
1467 	[IOT_CONNECTION_PORT_WITH_PORT_ARRAY] = {
1468 		.pol_name               = "conn port with ool port array",
1469 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1470 		.pol_movable_send       = true,
1471 		.pol_construct_entitlement = MACH_PORT_CONNECTION_PORT_WITH_PORT_ARRAY,
1472 		.pol_notif_dead_name    = true,
1473 		.pol_notif_no_senders   = true,
1474 	},
1475 	[IOT_EXCEPTION_PORT] = {
1476 		.pol_name               = "exception port",
1477 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1478 		.pol_movable_send       = true,
1479 		.pol_notif_dead_name    = true,
1480 		.pol_notif_no_senders   = true,
1481 	},
1482 	[IOT_TIMER_PORT] = {
1483 		.pol_name               = "timer port",
1484 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1485 		.pol_movable_send       = true,
1486 		.pol_label_free         = mk_timer_port_label_dealloc,
1487 		.pol_notif_dead_name    = true,
1488 		.pol_notif_no_senders   = true,
1489 	},
1490 	[IOT_REPLY_PORT] = {
1491 		.pol_name               = "reply port",
1492 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1493 		.pol_notif_dead_name    = true,
1494 	},
1495 	[IOT_SPECIAL_REPLY_PORT] = {
1496 		.pol_name               = "special reply port",
1497 		/*
1498 		 * General use of a special reply port as a receive right
1499 		 * can cause type confusion in the importance code.
1500 		 */
1501 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1502 		.pol_notif_dead_name    = true,
1503 	},
1504 	[IOT_PROVISIONAL_REPLY_PORT] = {
1505 		.pol_name               = "provisional reply port",
1506 		.pol_movability         = IPC_MOVE_POLICY_ALWAYS,
1507 		.pol_movable_send       = true,
1508 		.pol_construct_entitlement = MACH_PORT_PROVISIONAL_REPLY_ENTITLEMENT,
1509 		.pol_notif_dead_name    = true,
1510 		.pol_notif_no_senders   = true,
1511 		.pol_notif_port_destroy = true,
1512 	},
1513 
1514 	[__IKOT_FIRST ... IOT_UNKNOWN - 1] = {
1515 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1516 		.pol_notif_dead_name    = true,
1517 	},
1518 };
1519 
1520 __startup_func
1521 static void
ipc_policy_update_from_tunables(void)1522 ipc_policy_update_from_tunables(void)
1523 {
1524 	if (!service_port_defense_enabled) {
1525 		ipc_policy_array[IOT_SERVICE_PORT].pol_movability =
1526 		    IPC_MOVE_POLICY_ALWAYS;
1527 	}
1528 }
1529 STARTUP(TUNABLES, STARTUP_RANK_LAST, ipc_policy_update_from_tunables);
1530 
1531 /*
1532  * Ensure new port types that requires a construction entitlement
1533  * are marked as immovable.
1534  */
1535 __startup_func
1536 static void
ipc_policy_construct_entitlement_hardening(void)1537 ipc_policy_construct_entitlement_hardening(void)
1538 {
1539 	/* No need to check kobjects because they are always immovable */
1540 	for (ipc_object_type_t i = 0; i < __IKOT_FIRST; i++) {
1541 		/*
1542 		 * IOT_PROVISIONAL_REPLY_PORT is an exception as it used to be
1543 		 * movable. For process opted for enhanced security V2,
1544 		 * kGUARD_EXC_MOVE_PROVISIONAL_REPLY_PORT will be thrown when a
1545 		 * provisional reply port is being moved.
1546 		 */
1547 		if (i == IOT_PROVISIONAL_REPLY_PORT) {
1548 			continue;
1549 		}
1550 		if (ipc_policy_array[i].pol_construct_entitlement) {
1551 			assert(ipc_policy_array[i].pol_movability == IPC_MOVE_POLICY_NEVER);
1552 		}
1553 	}
1554 }
1555 STARTUP(TUNABLES, STARTUP_RANK_LAST, ipc_policy_construct_entitlement_hardening);
1556 
1557 __startup_func
1558 void
ipc_kobject_register_startup(ipc_kobject_ops_t ops)1559 ipc_kobject_register_startup(ipc_kobject_ops_t ops)
1560 {
1561 	struct ipc_object_policy *pol = &ipc_policy_array[ops->iko_op_type];
1562 
1563 	if (pol->pol_name) {
1564 		panic("trying to register kobject(%d) twice", ops->iko_op_type);
1565 	}
1566 
1567 	/*
1568 	 * Always make sure kobject ports have immovable receive rights.
1569 	 *
1570 	 * They use the ip_kobject field of the ipc_port structure,
1571 	 * which is unioned with ip_imp_task.
1572 	 *
1573 	 * Thus, general use of a kobject port as a receive right can
1574 	 * cause type confusion in the importance code.
1575 	 */
1576 	ipc_release_assert(pol->pol_movability == IPC_MOVE_POLICY_NEVER);
1577 	if (ops->iko_op_no_senders) {
1578 		pol->pol_notif_no_senders = true;
1579 	}
1580 
1581 	pol->pol_name               = ops->iko_op_name;
1582 	pol->pol_kobject_stable     = ops->iko_op_stable;
1583 	pol->pol_kobject_permanent  = ops->iko_op_permanent;
1584 	pol->pol_kobject_no_senders = ops->iko_op_no_senders;
1585 	pol->pol_label_free         = ops->iko_op_label_free;
1586 	pol->pol_movable_send       = ops->iko_op_movable_send;
1587 }
1588 
1589 __startup_func
1590 static void
ipc_policy_set_defaults(void)1591 ipc_policy_set_defaults(void)
1592 {
1593 	/*
1594 	 * Check that implicit init to 0 picks the right "values"
1595 	 * for all properties.
1596 	 */
1597 	static_assert(IPC_MOVE_POLICY_NEVER == 0);
1598 
1599 	for (uint32_t i = 0; i < IOT_UNKNOWN; i++) {
1600 		struct ipc_object_policy *pol = &ipc_policy_array[i];
1601 
1602 		if (!pol->pol_kobject_no_senders) {
1603 			pol->pol_kobject_no_senders = no_kobject_no_senders;
1604 		}
1605 		if (!pol->pol_label_free) {
1606 			pol->pol_label_free = no_label_free;
1607 		}
1608 	}
1609 }
1610 STARTUP(MACH_IPC, STARTUP_RANK_LAST, ipc_policy_set_defaults);
1611 
1612 #pragma mark exception port policy
1613 
1614 bool
ipc_is_valid_exception_port(task_t task,ipc_port_t port)1615 ipc_is_valid_exception_port(
1616 	task_t task,
1617 	ipc_port_t port)
1618 {
1619 	if (task == TASK_NULL && is_ux_handler_port(port)) {
1620 		return true;
1621 	}
1622 
1623 	if (ip_is_exception_port(port)) {
1624 		return true;
1625 	}
1626 
1627 	/*
1628 	 * rdar://77996387
1629 	 * Avoid exposing immovable ports send rights (kobjects) to `get_exception_ports`,
1630 	 * but exception ports to still be set.
1631 	 */
1632 	if (!ipc_can_stash_naked_send(port)) {
1633 		return false;
1634 	}
1635 
1636 	if (ip_is_immovable_receive(port)) {
1637 		/*
1638 		 * rdar://153108740
1639 		 * Temporarily allow service ports until telemetry is clean.
1640 		 */
1641 		if (ip_is_strong_service_port(port)) {
1642 			return true;
1643 		}
1644 		return false;
1645 	}
1646 
1647 	return true;
1648 }
1649