xref: /xnu-12377.1.9/osfmk/ipc/ipc_policy.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2023 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/kern_return.h>
30 #include <mach/mach_types.h>
31 #include <mach/port.h>
32 #include <mach/mk_timer.h>
33 #include <mach/notify.h>
34 
35 #include <kern/assert.h>
36 #include <kern/exc_guard.h>
37 #include <kern/ipc_kobject.h>
38 #include <kern/kern_types.h>
39 #include <kern/mach_filter.h>
40 #include <kern/task.h>
41 #include <kern/ux_handler.h> /* is_ux_handler_port() */
42 
43 #include <vm/vm_map_xnu.h> /* current_map() */
44 #include <vm/vm_protos.h> /* current_proc() */
45 
46 #include <ipc/ipc_policy.h>
47 #include <ipc/ipc_service_port.h>
48 #include <ipc/port.h>
49 
50 #if CONFIG_CSR
51 #include <sys/csr.h>
52 #endif
53 #include <sys/codesign.h>
54 #include <sys/proc_ro.h>
55 #include <sys/reason.h>
56 
57 #include <libkern/coreanalytics/coreanalytics.h>
58 
59 extern bool proc_is_simulated(struct proc *);
60 extern char *proc_name_address(struct proc *p);
61 extern int  exit_with_guard_exception(
62 	struct proc            *p,
63 	mach_exception_data_type_t code,
64 	mach_exception_data_type_t subcode);
65 
66 #pragma mark policy tunables
67 
68 extern const vm_size_t  ipc_kmsg_max_vm_space;
69 
70 #if IPC_HAS_LEGACY_MACH_MSG_TRAP
71 #if DEVELOPMENT || DEBUG
72 static TUNABLE(bool, allow_legacy_mach_msg, "allow_legacy_mach_msg", false);
73 #endif /* DEVELOPMENT || DEBUG */
74 #endif /* IPC_HAS_LEGACY_MACH_MSG_TRAP */
75 
76 /* a boot-arg to enable/disable OOL port array restrictions */
77 #if XNU_TARGET_OS_XR
78 TUNABLE(bool, ool_port_array_enforced, "ool_port_array_enforced", false);
79 #else
80 TUNABLE(bool, ool_port_array_enforced, "ool_port_array_enforced", true);
81 #endif /* XNU_TARGET_OS_XR */
82 
83 /* Note: Consider Developer Mode when changing the default. */
84 TUNABLE(ipc_control_port_options_t, ipc_control_port_options,
85     "ipc_control_port_options",
86     ICP_OPTIONS_IMMOVABLE_1P_HARD |
87     ICP_OPTIONS_PINNED_1P_HARD |
88 #if !XNU_TARGET_OS_OSX
89     ICP_OPTIONS_IMMOVABLE_3P_HARD |
90 #endif
91     ICP_OPTIONS_PINNED_3P_SOFT);
92 
93 TUNABLE(bool, service_port_defense_enabled, "-service_port_defense_enabled", true);
94 
95 /* The bootarg to disable ALL ipc policy violation telemetry */
96 TUNABLE(bool, ipcpv_telemetry_enabled, "-ipcpv_telemetry_enabled", true);
97 
98 /* boot-arg for provisional reply port enforcement */
99 #if XNU_TARGET_OS_OSX || XNU_TARGET_OS_BRIDGE
100 TUNABLE(bool, prp_enforcement_enabled, "-prp_enforcement_enabled", false);
101 #else
102 TUNABLE(bool, prp_enforcement_enabled, "-prp_enforcement_enabled", true);
103 #endif /* XNU_TARGET_OS_OSX || XNU_TARGET_OS_BRIDGE */
104 
105 /*
106  * bootargs for reply port semantics on bootstrap ports
107  */
108 TUNABLE(bool, bootstrap_port_telemetry_enabled, "-bootstrap_port_telemetry_enabled", true);
109 TUNABLE(bool, bootstrap_port_enforcement_enabled, "-bootstrap_port_enforcement_enabled", true);
110 
111 /* Enables reply port/voucher/persona debugging code */
112 TUNABLE(bool, enforce_strict_reply, "-enforce_strict_reply", false);
113 
114 #pragma mark policy options
115 
116 ipc_space_policy_t
ipc_policy_for_task(task_t task)117 ipc_policy_for_task(task_t task)
118 {
119 #if XNU_TARGET_OS_OSX
120 	struct proc *proc = get_bsdtask_info(task);
121 #endif /* XNU_TARGET_OS_OSX */
122 	ipc_space_policy_t policy = IPC_SPACE_POLICY_DEFAULT;
123 	uint32_t ro_flags;
124 
125 	if (task == kernel_task) {
126 		return policy | IPC_SPACE_POLICY_KERNEL;
127 	}
128 
129 	ro_flags = task_ro_flags_get(task);
130 	if (ro_flags & TFRO_PLATFORM) {
131 		policy |= IPC_SPACE_POLICY_PLATFORM;
132 		policy |= IPC_POLICY_ENHANCED_V2;
133 	}
134 
135 	if (task_get_platform_restrictions_version(task) >= 2) {
136 		policy |= IPC_POLICY_ENHANCED_V2;
137 	} else if (task_get_platform_restrictions_version(task) == 1) {
138 		policy |= IPC_POLICY_ENHANCED_V1;
139 #if XNU_TARGET_OS_OSX
140 	} else if (proc && csproc_hardened_runtime(proc)) {
141 		policy |= IPC_POLICY_ENHANCED_V0;
142 #endif /* XNU_TARGET_OS_OSX */
143 	}
144 
145 #if XNU_TARGET_OS_OSX
146 	if (task_opted_out_mach_hardening(task)) {
147 		policy |= IPC_SPACE_POLICY_OPTED_OUT;
148 	}
149 #endif /* XNU_TARGET_OS_OSX */
150 
151 	/*
152 	 * policy modifiers
153 	 */
154 #if XNU_TARGET_OS_OSX
155 	if (proc && proc_is_simulated(proc)) {
156 		policy |= IPC_SPACE_POLICY_SIMULATED;
157 	}
158 #endif
159 #if CONFIG_ROSETTA
160 	if (task_is_translated(task)) {
161 		policy |= IPC_SPACE_POLICY_TRANSLATED;
162 	}
163 #endif
164 
165 	return policy;
166 }
167 
168 
169 inline ipc_space_policy_t
ipc_convert_msg_options_to_space(mach_msg_option64_t opts)170 ipc_convert_msg_options_to_space(mach_msg_option64_t opts)
171 {
172 	return opts >> MACH64_POLICY_SHIFT;
173 }
174 
175 mach_msg_option64_t
ipc_current_msg_options(task_t task,mach_msg_option64_t opts)176 ipc_current_msg_options(
177 	task_t                  task,
178 	mach_msg_option64_t     opts)
179 {
180 	uint32_t ro_flags = task_ro_flags_get(task);
181 
182 	/*
183 	 * Step 1: convert to kernel flags
184 	 * - clear any kernel only flags
185 	 * - convert MACH_SEND_FILTER_NONFATAL which is aliased to the
186 	 *   MACH_SEND_ALWAYS kernel flag into MACH64_POLICY_FILTER_NON_FATAL.
187 	 */
188 	opts &= MACH64_MSG_OPTION_USER;
189 
190 	if (opts & MACH64_SEND_FILTER_NONFATAL) {
191 		/*
192 		 */
193 		opts &= ~MACH64_SEND_FILTER_NONFATAL;
194 		opts |= MACH64_POLICY_FILTER_NON_FATAL;
195 	}
196 	if (ro_flags & TFRO_FILTER_MSG) {
197 		opts |= MACH64_POLICY_FILTER_MSG;
198 	}
199 
200 	/*
201 	 * Step 2: derive policy flags from the current context
202 	 */
203 	{
204 		/*
205 		 * mach_msg_option64_t can't use IPC_SPACE_POLICY_BASE(),
206 		 * check using this MACH64_POLICY_SHIFT is legitimate.
207 		 */
208 #define verify_policy_enum(name) \
209 	static_assert(IPC_SPACE_POLICY_ ## name == \
210 	    MACH64_POLICY_ ## name >> MACH64_POLICY_SHIFT)
211 
212 		verify_policy_enum(DEFAULT);
213 		verify_policy_enum(ENHANCED);
214 		verify_policy_enum(PLATFORM);
215 		verify_policy_enum(KERNEL);
216 		verify_policy_enum(SIMULATED);
217 		verify_policy_enum(TRANSLATED);
218 		verify_policy_enum(OPTED_OUT);
219 		verify_policy_enum(ENHANCED_V0);
220 		verify_policy_enum(ENHANCED_V1);
221 		verify_policy_enum(ENHANCED_V2);
222 		verify_policy_enum(ENHANCED_VERSION_MASK);
223 		verify_policy_enum(MASK);
224 
225 #undef verify_policy_enum
226 	}
227 
228 	opts |= (uint64_t)ipc_space_policy(task->itk_space) << MACH64_POLICY_SHIFT;
229 
230 	return opts;
231 }
232 
233 mach_msg_return_t
ipc_preflight_msg_option64(mach_msg_option64_t opts)234 ipc_preflight_msg_option64(mach_msg_option64_t opts)
235 {
236 	bool success = true;
237 
238 	if ((opts & MACH64_SEND_MSG) && (opts & MACH64_MACH_MSG2)) {
239 		mach_msg_option64_t cfi = opts & MACH64_MSG_OPTION_CFI_MASK;
240 
241 #if !XNU_TARGET_OS_OSX
242 		cfi &= ~MACH64_SEND_ANY;
243 #endif
244 		/* mach_msg2() calls must have exactly _one_ of these set */
245 		if (cfi == 0 || (cfi & (cfi - 1)) != 0) {
246 			success = false;
247 		}
248 
249 		/* vector calls are only supported for message queues */
250 		if ((opts & (MACH64_SEND_MQ_CALL | MACH64_SEND_ANY)) == 0 &&
251 		    (opts & MACH64_MSG_VECTOR)) {
252 			success = false;
253 		}
254 	}
255 
256 	if (success) {
257 		return MACH_MSG_SUCCESS;
258 	}
259 
260 	mach_port_guard_exception(0, opts, kGUARD_EXC_INVALID_OPTIONS);
261 	if (opts & MACH64_MACH_MSG2) {
262 		return MACH_SEND_INVALID_OPTIONS;
263 	}
264 	return KERN_NOT_SUPPORTED;
265 }
266 
267 #pragma mark helpers
268 
269 bool
ipc_should_apply_policy(const ipc_space_policy_t current_policy,const ipc_space_policy_t requested_level)270 ipc_should_apply_policy(
271 	const ipc_space_policy_t current_policy,
272 	const ipc_space_policy_t requested_level)
273 {
274 	/* Do not apply security policies on these binaries to avoid bincompat regression */
275 	if ((current_policy & IPC_SPACE_POLICY_SIMULATED) ||
276 	    (current_policy & IPC_SPACE_POLICY_OPTED_OUT) ||
277 	    (current_policy & IPC_SPACE_POLICY_TRANSLATED)) {
278 		return false;
279 	}
280 
281 	/* Check versioning for applying platform restrictions policy */
282 	if (requested_level & current_policy & IPC_SPACE_POLICY_ENHANCED) {
283 		/* Platform is always opted into platform restrictions */
284 		if (current_policy & IPC_SPACE_POLICY_PLATFORM) {
285 			return true;
286 		}
287 
288 		const ipc_space_policy_t requested_version = requested_level & IPC_SPACE_POLICY_ENHANCED_VERSION_MASK;
289 		const ipc_space_policy_t current_es_version = current_policy & IPC_SPACE_POLICY_ENHANCED_VERSION_MASK;
290 		assert(requested_version != 0);
291 		return requested_version <= current_es_version;
292 	}
293 	return current_policy & requested_level;
294 }
295 
296 #pragma mark legacy trap policies
297 #if IPC_HAS_LEGACY_MACH_MSG_TRAP
298 
299 CA_EVENT(mach_msg_trap_event,
300     CA_INT, msgh_id,
301     CA_INT, sw_platform,
302     CA_INT, sdk,
303     CA_STATIC_STRING(CA_TEAMID_MAX_LEN), team_id,
304     CA_STATIC_STRING(CA_SIGNINGID_MAX_LEN), signing_id,
305     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
306 
307 static void
mach_msg_legacy_send_analytics(mach_msg_id_t msgh_id,uint32_t platform,uint32_t sdk)308 mach_msg_legacy_send_analytics(
309 	mach_msg_id_t           msgh_id,
310 	uint32_t                platform,
311 	uint32_t                sdk)
312 {
313 	char *proc_name = proc_name_address(current_proc());
314 	const char *team_id = csproc_get_teamid(current_proc());
315 	const char *signing_id = csproc_get_identity(current_proc());
316 
317 	ca_event_t ca_event = CA_EVENT_ALLOCATE(mach_msg_trap_event);
318 	CA_EVENT_TYPE(mach_msg_trap_event) * msg_event = ca_event->data;
319 
320 	msg_event->msgh_id = msgh_id;
321 	msg_event->sw_platform = platform;
322 	msg_event->sdk = sdk;
323 
324 	if (proc_name) {
325 		strlcpy(msg_event->proc_name, proc_name, CA_PROCNAME_LEN);
326 	}
327 
328 	if (team_id) {
329 		strlcpy(msg_event->team_id, team_id, CA_TEAMID_MAX_LEN);
330 	}
331 
332 	if (signing_id) {
333 		strlcpy(msg_event->signing_id, signing_id, CA_SIGNINGID_MAX_LEN);
334 	}
335 
336 	CA_EVENT_SEND(ca_event);
337 }
338 
339 static bool
ipc_policy_allow_legacy_mach_msg_trap_for_platform(mach_msg_id_t msgid)340 ipc_policy_allow_legacy_mach_msg_trap_for_platform(
341 	mach_msg_id_t           msgid)
342 {
343 	struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
344 	uint32_t platform = pro->p_platform_data.p_platform;
345 	uint32_t sdk = pro->p_platform_data.p_sdk;
346 	uint32_t sdk_major = sdk >> 16;
347 
348 	/*
349 	 * Special rules, due to unfortunate bincompat reasons,
350 	 * allow for a hardcoded list of MIG calls to XNU to go through
351 	 * for macOS apps linked against an SDK older than 12.x.
352 	 */
353 	switch (platform) {
354 	case PLATFORM_MACOS:
355 		if (sdk == 0 || sdk_major > 12) {
356 			return false;
357 		}
358 		break;
359 	default:
360 		/* disallow for any non-macOS for platform */
361 		return false;
362 	}
363 
364 	switch (msgid) {
365 	case 0xd4a: /* task_threads */
366 	case 0xd4d: /* task_info */
367 	case 0xe13: /* thread_get_state */
368 	case 0x12c4: /* mach_vm_read */
369 	case 0x12c8: /* mach_vm_read_overwrite */
370 		mach_msg_legacy_send_analytics(msgid, platform, sdk);
371 		return true;
372 	default:
373 		return false;
374 	}
375 }
376 
377 
378 mach_msg_return_t
ipc_policy_allow_legacy_send_trap(mach_msg_id_t msgid,mach_msg_option64_t opts)379 ipc_policy_allow_legacy_send_trap(
380 	mach_msg_id_t           msgid,
381 	mach_msg_option64_t     opts)
382 {
383 	/* equivalent to ENHANCED_V0 */
384 	if ((opts & MACH64_POLICY_ENHANCED) == 0) {
385 #if __x86_64__
386 		if (current_map()->max_offset <= VM_MAX_ADDRESS) {
387 			/*
388 			 * Legacy mach_msg_trap() is the only
389 			 * available thing for 32-bit tasks
390 			 */
391 			return MACH_MSG_SUCCESS;
392 		}
393 #endif /* __x86_64__ */
394 #if CONFIG_ROSETTA
395 		if (opts & MACH64_POLICY_TRANSLATED) {
396 			/*
397 			 * Similarly, on Rosetta, allow mach_msg_trap()
398 			 * as those apps likely can't be fixed anymore
399 			 */
400 			return MACH_MSG_SUCCESS;
401 		}
402 #endif
403 #if DEVELOPMENT || DEBUG
404 		if (allow_legacy_mach_msg) {
405 			/* Honor boot-arg */
406 			return MACH_MSG_SUCCESS;
407 		}
408 #endif /* DEVELOPMENT || DEBUG */
409 		if (ipc_policy_allow_legacy_mach_msg_trap_for_platform(msgid)) {
410 			return MACH_MSG_SUCCESS;
411 		}
412 	}
413 
414 	mach_port_guard_exception(msgid, opts, kGUARD_EXC_INVALID_OPTIONS);
415 	/*
416 	 * this should be MACH_SEND_INVALID_OPTIONS,
417 	 * but this is a new mach_msg2 error only.
418 	 */
419 	return KERN_NOT_SUPPORTED;
420 }
421 
422 
423 #endif /* IPC_HAS_LEGACY_MACH_MSG_TRAP */
424 #pragma mark ipc policy telemetry
425 
426 /*
427  * As CA framework replies on successfully allocating zalloc memory,
428  * we maintain a small buffer that gets flushed when full. This helps us avoid taking spinlocks when working with CA.
429  */
430 #define IPC_POLICY_VIOLATIONS_RB_SIZE         2
431 
432 /*
433  * Stripped down version of service port's string name. This is to avoid overwhelming CA's dynamic memory allocation.
434  */
435 #define CA_MACH_SERVICE_PORT_NAME_LEN         86
436 
437 struct ipc_policy_violations_rb_entry {
438 	char proc_name[CA_PROCNAME_LEN];
439 	char service_name[CA_MACH_SERVICE_PORT_NAME_LEN];
440 	char team_id[CA_TEAMID_MAX_LEN];
441 	char signing_id[CA_SIGNINGID_MAX_LEN];
442 	ipc_policy_violation_id_t violation_id;
443 	int  sw_platform;
444 	int  aux_data;
445 	int  sdk;
446 };
447 struct ipc_policy_violations_rb_entry ipc_policy_violations_rb[IPC_POLICY_VIOLATIONS_RB_SIZE];
448 static uint8_t ipc_policy_violations_rb_index = 0;
449 
450 #if DEBUG || DEVELOPMENT
451 /* sysctl debug.ipcpv_telemetry_count */
452 _Atomic unsigned int ipcpv_telemetry_count = 0;
453 #endif
454 
455 LCK_GRP_DECLARE(ipc_telemetry_lock_grp, "ipc_telemetry_lock_grp");
456 LCK_TICKET_DECLARE(ipc_telemetry_lock, &ipc_telemetry_lock_grp);
457 
458 /*
459  * Telemetry: report back the process name violating ipc policy. Note that this event can be used to report
460  * any type of ipc violation through a ipc_policy_violation_id_t. It is named reply_port_semantics_violations
461  * because we are reusing an existing event.
462  */
463 CA_EVENT(reply_port_semantics_violations,
464     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name,
465     CA_STATIC_STRING(CA_MACH_SERVICE_PORT_NAME_LEN), service_name,
466     CA_STATIC_STRING(CA_TEAMID_MAX_LEN), team_id,
467     CA_STATIC_STRING(CA_SIGNINGID_MAX_LEN), signing_id,
468     CA_INT, reply_port_semantics_violation,
469     CA_INT, msgh_id); /* for aux_data, keeping the legacy name msgh_id to avoid CA shenanigan */
470 
471 static void
send_telemetry(const struct ipc_policy_violations_rb_entry * entry)472 send_telemetry(
473 	const struct ipc_policy_violations_rb_entry *entry)
474 {
475 	ca_event_t ca_event = CA_EVENT_ALLOCATE_FLAGS(reply_port_semantics_violations, Z_NOWAIT);
476 	if (ca_event) {
477 		CA_EVENT_TYPE(reply_port_semantics_violations) * event = ca_event->data;
478 
479 		strlcpy(event->service_name, entry->service_name, CA_MACH_SERVICE_PORT_NAME_LEN);
480 		strlcpy(event->proc_name, entry->proc_name, CA_PROCNAME_LEN);
481 		strlcpy(event->team_id, entry->team_id, CA_TEAMID_MAX_LEN);
482 		strlcpy(event->signing_id, entry->signing_id, CA_SIGNINGID_MAX_LEN);
483 		event->reply_port_semantics_violation = entry->violation_id;
484 		event->msgh_id = entry->aux_data;
485 
486 		CA_EVENT_SEND(ca_event);
487 	}
488 }
489 
490 /* Routine: flush_ipc_policy_violations_telemetry
491  * Conditions:
492  *              Assumes ipc_policy_type is valid
493  *              Assumes ipc telemetry lock is held.
494  *              Unlocks it before returning.
495  */
496 static void
flush_ipc_policy_violations_telemetry(void)497 flush_ipc_policy_violations_telemetry(void)
498 {
499 	struct ipc_policy_violations_rb_entry local_rb[IPC_POLICY_VIOLATIONS_RB_SIZE];
500 	uint8_t local_rb_index = 0;
501 
502 	if (__improbable(ipc_policy_violations_rb_index > IPC_POLICY_VIOLATIONS_RB_SIZE)) {
503 		panic("Invalid ipc policy violation buffer index %d > %d",
504 		    ipc_policy_violations_rb_index, IPC_POLICY_VIOLATIONS_RB_SIZE);
505 	}
506 
507 	/*
508 	 * We operate on local copy of telemetry buffer because CA framework relies on successfully
509 	 * allocating zalloc memory. It can not do that if we are accessing the shared buffer
510 	 * with spin locks held.
511 	 */
512 	while (local_rb_index != ipc_policy_violations_rb_index) {
513 		local_rb[local_rb_index] = ipc_policy_violations_rb[local_rb_index];
514 		local_rb_index++;
515 	}
516 
517 	lck_ticket_unlock(&ipc_telemetry_lock);
518 
519 	while (local_rb_index > 0) {
520 		struct ipc_policy_violations_rb_entry *entry = &local_rb[--local_rb_index];
521 		send_telemetry(entry);
522 	}
523 
524 	/*
525 	 * Finally call out the buffer as empty. This is also a sort of rate limiting mechanisms for the events.
526 	 * Events will get dropped until the buffer is not fully flushed.
527 	 */
528 	lck_ticket_lock(&ipc_telemetry_lock, &ipc_telemetry_lock_grp);
529 	ipc_policy_violations_rb_index = 0;
530 }
531 
532 void
ipc_stash_policy_violations_telemetry(ipc_policy_violation_id_t violation_id,ipc_port_t service_port,int aux_data)533 ipc_stash_policy_violations_telemetry(
534 	ipc_policy_violation_id_t    violation_id,
535 	ipc_port_t                   service_port,
536 	int                          aux_data)
537 {
538 	if (!ipcpv_telemetry_enabled) {
539 		return;
540 	}
541 
542 	struct ipc_policy_violations_rb_entry *entry;
543 	char *service_name = (char *) "unknown";
544 	task_t task = current_task_early();
545 	int pid = -1;
546 
547 #if CONFIG_SERVICE_PORT_INFO
548 	if (IP_VALID(service_port)) {
549 		/*
550 		 * dest_port lock must be held to avoid race condition
551 		 * when accessing ip_splabel rdar://139066947
552 		 */
553 		struct mach_service_port_info sp_info;
554 		ipc_object_label_t label = ip_mq_lock_label_get(service_port);
555 		if (io_state_active(label.io_state) && ip_is_any_service_port_type(label.io_type)) {
556 			ipc_service_port_label_get_info(label.iol_service, &sp_info);
557 			service_name = sp_info.mspi_string_name;
558 		}
559 		ip_mq_unlock_label_put(service_port, &label);
560 	}
561 #endif /* CONFIG_SERVICE_PORT_INFO */
562 
563 	if (task) {
564 		pid = task_pid(task);
565 	}
566 
567 	if (task) {
568 		struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
569 		uint32_t platform = pro->p_platform_data.p_platform;
570 		uint32_t sdk = pro->p_platform_data.p_sdk;
571 		char *proc_name = (char *) "unknown";
572 #ifdef MACH_BSD
573 		proc_name = proc_name_address(get_bsdtask_info(task));
574 #endif /* MACH_BSD */
575 		const char *team_id = csproc_get_identity(current_proc());
576 		const char *signing_id = csproc_get_teamid(current_proc());
577 
578 		lck_ticket_lock(&ipc_telemetry_lock, &ipc_telemetry_lock_grp);
579 
580 		if (ipc_policy_violations_rb_index >= IPC_POLICY_VIOLATIONS_RB_SIZE) {
581 			/* Dropping the event since buffer is full. */
582 			lck_ticket_unlock(&ipc_telemetry_lock);
583 			return;
584 		}
585 		entry = &ipc_policy_violations_rb[ipc_policy_violations_rb_index++];
586 		strlcpy(entry->proc_name, proc_name, CA_PROCNAME_LEN);
587 
588 		strlcpy(entry->service_name, service_name, CA_MACH_SERVICE_PORT_NAME_LEN);
589 		entry->violation_id = violation_id;
590 
591 		if (team_id) {
592 			strlcpy(entry->team_id, team_id, CA_TEAMID_MAX_LEN);
593 		}
594 
595 		if (signing_id) {
596 			strlcpy(entry->signing_id, signing_id, CA_SIGNINGID_MAX_LEN);
597 		}
598 		entry->aux_data = aux_data;
599 		entry->sw_platform = platform;
600 		entry->sdk = sdk;
601 	}
602 
603 	if (ipc_policy_violations_rb_index == IPC_POLICY_VIOLATIONS_RB_SIZE) {
604 		flush_ipc_policy_violations_telemetry();
605 	}
606 
607 	lck_ticket_unlock(&ipc_telemetry_lock);
608 }
609 
610 #if DEBUG || DEVELOPMENT
611 void
ipc_inc_telemetry_count(void)612 ipc_inc_telemetry_count(void)
613 {
614 	unsigned int count = os_atomic_load(&ipcpv_telemetry_count, relaxed);
615 	if (!os_add_overflow(count, 1, &count)) {
616 		os_atomic_store(&ipcpv_telemetry_count, count, relaxed);
617 	}
618 }
619 #endif /* DEBUG || DEVELOPMENT */
620 
621 /*!
622  * @brief
623  * Checks that this message conforms to reply port policies, which are:
624  * 1. IOT_REPLY_PORT's must be make-send-once disposition
625  * 2. You must use an IOT_REPLY_PORT (or weak variant) if the dest_port requires it
626  *
627  * @param reply_port    the message local/reply port
628  * @param dest_port     the message remote/dest port
629  *
630  * @returns
631  * - true  if there is a violation in the security policy for this mach msg
632  * - false otherwise
633  */
634 static mach_msg_return_t
ipc_validate_local_port(mach_port_t reply_port,mach_port_t dest_port,mach_msg_option64_t opts)635 ipc_validate_local_port(
636 	mach_port_t         reply_port,
637 	mach_port_t         dest_port,
638 	mach_msg_option64_t opts)
639 {
640 	assert(IP_VALID(dest_port));
641 	/* An empty reply port, or an inactive reply port / dest port violates nothing */
642 	if (!IP_VALID(reply_port) || !ip_active(reply_port) || !ip_active(dest_port)) {
643 		return MACH_MSG_SUCCESS;
644 	}
645 
646 	if (ip_is_reply_port(reply_port)) {
647 		return MACH_MSG_SUCCESS;
648 	}
649 
650 	ipc_space_policy_t pol = ipc_convert_msg_options_to_space(opts);
651 	/* skip translated and simulated process */
652 	if (!ipc_should_apply_policy((pol), IPC_SPACE_POLICY_DEFAULT)) {
653 		return MACH_MSG_SUCCESS;
654 	}
655 
656 	/* kobject enforcement */
657 	if (ip_is_kobject(dest_port) &&
658 	    ipc_should_apply_policy(pol, IPC_POLICY_ENHANCED_V1)) {
659 		mach_port_guard_exception(ip_get_receiver_name(dest_port), 0, kGUARD_EXC_KOBJECT_REPLY_PORT_SEMANTICS);
660 		return MACH_SEND_INVALID_REPLY;
661 	}
662 
663 	if (!ipc_policy(dest_port)->pol_enforce_reply_semantics || ip_is_provisional_reply_port(reply_port)) {
664 		return MACH_MSG_SUCCESS;
665 	}
666 
667 	/* bootstrap port defense */
668 	if (ip_is_bootstrap_port(dest_port) && ipc_should_apply_policy(pol, IPC_POLICY_ENHANCED_V2)) {
669 		if (bootstrap_port_telemetry_enabled &&
670 		    !ipc_space_has_telemetry_type(current_space(), IS_HAS_BOOTSTRAP_PORT_TELEMETRY)) {
671 			ipc_stash_policy_violations_telemetry(IPCPV_BOOTSTRAP_PORT, dest_port, 0);
672 		}
673 		if (bootstrap_port_enforcement_enabled) {
674 			mach_port_guard_exception(ip_get_receiver_name(dest_port), 1, kGUARD_EXC_REQUIRE_REPLY_PORT_SEMANTICS);
675 			return MACH_SEND_INVALID_REPLY;
676 		}
677 	}
678 
679 	/* regular enforcement */
680 	if (!ip_is_bootstrap_port(dest_port)) {
681 		if (ip_type(dest_port) == IOT_SERVICE_PORT) {
682 			ipc_stash_policy_violations_telemetry(IPCPV_REPLY_PORT_SEMANTICS_OPTOUT, dest_port, 0);
683 		}
684 		mach_port_guard_exception(ip_get_receiver_name(dest_port), 0, kGUARD_EXC_REQUIRE_REPLY_PORT_SEMANTICS);
685 		return MACH_SEND_INVALID_REPLY;
686 	}
687 
688 	return MACH_MSG_SUCCESS;
689 }
690 
691 #pragma mark MACH_SEND_MSG policies
692 
693 mach_msg_return_t
ipc_validate_kmsg_header_schema_from_user(mach_msg_user_header_t * hdr __unused,mach_msg_size_t dsc_count,mach_msg_option64_t opts)694 ipc_validate_kmsg_header_schema_from_user(
695 	mach_msg_user_header_t *hdr __unused,
696 	mach_msg_size_t         dsc_count,
697 	mach_msg_option64_t     opts)
698 {
699 	if (opts & MACH64_SEND_KOBJECT_CALL) {
700 		if (dsc_count > IPC_KOBJECT_DESC_MAX) {
701 			return MACH_SEND_TOO_LARGE;
702 		}
703 	}
704 
705 	return MACH_MSG_SUCCESS;
706 }
707 
708 mach_msg_return_t
ipc_validate_kmsg_schema_from_user(mach_msg_header_t * kdata,mach_msg_send_uctx_t * send_uctx,mach_msg_option64_t opts __unused)709 ipc_validate_kmsg_schema_from_user(
710 	mach_msg_header_t      *kdata,
711 	mach_msg_send_uctx_t   *send_uctx,
712 	mach_msg_option64_t     opts __unused)
713 {
714 	mach_msg_kbase_t *kbase = NULL;
715 	vm_size_t vm_size;
716 
717 	if (kdata->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
718 		kbase = mach_msg_header_to_kbase(kdata);
719 	}
720 
721 	if (send_uctx->send_dsc_port_count > IPC_KMSG_MAX_OOL_PORT_COUNT) {
722 		return MACH_SEND_TOO_LARGE;
723 	}
724 
725 	if (os_add_overflow(send_uctx->send_dsc_vm_size,
726 	    send_uctx->send_dsc_port_count * sizeof(mach_port_t), &vm_size)) {
727 		return MACH_SEND_TOO_LARGE;
728 	}
729 	if (vm_size > ipc_kmsg_max_vm_space) {
730 		return MACH_MSG_VM_KERNEL;
731 	}
732 
733 	return MACH_MSG_SUCCESS;
734 }
735 
736 static mach_msg_return_t
ipc_filter_kmsg_header_from_user(mach_msg_header_t * hdr,mach_port_t dport,mach_msg_option64_t opts)737 ipc_filter_kmsg_header_from_user(
738 	mach_msg_header_t      *hdr,
739 	mach_port_t             dport,
740 	mach_msg_option64_t     opts)
741 {
742 	static const uint32_t MACH_BOOTSTRAP_PORT_MSG_ID_MASK = ((1u << 24) - 1);
743 
744 	mach_msg_filter_id fid = 0;
745 	ipc_object_label_t dlabel;
746 	mach_msg_id_t msg_id = hdr->msgh_id;
747 	struct ipc_conn_port_label *sblabel = NULL;
748 
749 	dlabel = ip_mq_lock_label_get(dport);
750 
751 	if (io_state_active(dlabel.io_state) && dlabel.io_filtered) {
752 		switch (dlabel.io_type) {
753 		case IOT_SERVICE_PORT:
754 		case IOT_WEAK_SERVICE_PORT:
755 			/*
756 			 * Mask the top byte for messages sent to launchd's bootstrap port.
757 			 * Filter any messages with domain 0 (as they correspond to MIG
758 			 * based messages)
759 			 */
760 			if (dlabel.iol_service->ispl_bootstrap_port) {
761 				if ((msg_id & ~MACH_BOOTSTRAP_PORT_MSG_ID_MASK) == 0) {
762 					ip_mq_unlock_label_put(dport, &dlabel);
763 					goto filtered_msg;
764 				}
765 				msg_id = msg_id & MACH_BOOTSTRAP_PORT_MSG_ID_MASK;
766 			}
767 
768 			sblabel = dlabel.iol_service->ispl_sblabel;
769 			break;
770 
771 		case IOT_CONNECTION_PORT:
772 			/* Connection ports can also have send-side message filters */
773 			sblabel = dlabel.iol_connection;
774 			break;
775 
776 		default:
777 			break;
778 		}
779 	}
780 	if (sblabel) {
781 		mach_msg_filter_retain_sblabel_callback(sblabel);
782 	}
783 
784 	ip_mq_unlock_label_put(dport, &dlabel);
785 
786 	if (sblabel && !mach_msg_fetch_filter_policy(sblabel, msg_id, &fid)) {
787 		goto filtered_msg;
788 	}
789 	return MACH_MSG_SUCCESS;
790 
791 filtered_msg:
792 	if ((opts & MACH64_POLICY_FILTER_NON_FATAL) == 0) {
793 		mach_port_name_t dest_name = CAST_MACH_PORT_TO_NAME(hdr->msgh_remote_port);
794 
795 		mach_port_guard_exception(dest_name, hdr->msgh_id,
796 		    kGUARD_EXC_MSG_FILTERED);
797 	}
798 	return MACH_SEND_MSG_FILTERED;
799 }
800 
801 static bool
ipc_policy_allow_send_only_kobject_calls(void)802 ipc_policy_allow_send_only_kobject_calls(void)
803 {
804 	struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
805 	uint32_t sdk = pro->p_platform_data.p_sdk;
806 	uint32_t sdk_major = sdk >> 16;
807 
808 	switch (pro->p_platform_data.p_platform) {
809 	case PLATFORM_IOS:
810 	case PLATFORM_MACCATALYST:
811 	case PLATFORM_TVOS:
812 		if (sdk == 0 || sdk_major > 17) {
813 			return false;
814 		}
815 		return true;
816 	case PLATFORM_MACOS:
817 		if (sdk == 0 || sdk_major > 14) {
818 			return false;
819 		}
820 		return true;
821 	case PLATFORM_WATCHOS:
822 		if (sdk == 0 || sdk_major > 10) {
823 			return false;
824 		}
825 		return true;
826 	default:
827 		return false;
828 	}
829 }
830 
831 static mach_msg_return_t
ipc_validate_kmsg_dest_from_user(mach_msg_header_t * hdr,ipc_port_t port,mach_msg_option64_t opts)832 ipc_validate_kmsg_dest_from_user(
833 	mach_msg_header_t      *hdr,
834 	ipc_port_t              port,
835 	mach_msg_option64_t     opts)
836 {
837 	/*
838 	 * This is a _user_ message via mach_msg2_trap()。
839 	 *
840 	 * To curb kobject port/message queue confusion and improve control flow
841 	 * integrity, mach_msg2_trap() invocations mandate the use of either
842 	 * MACH64_SEND_KOBJECT_CALL or MACH64_SEND_MQ_CALL and that the flag
843 	 * matches the underlying port type. (unless the call is from a simulator,
844 	 * since old simulators keep using mach_msg() in all cases indiscriminatingly.)
845 	 *
846 	 * Since:
847 	 *     (1) We make sure to always pass either MACH64_SEND_MQ_CALL or
848 	 *         MACH64_SEND_KOBJECT_CALL bit at all sites outside simulators
849 	 *         (checked by mach_msg2_trap());
850 	 *     (2) We checked in mach_msg2_trap() that _exactly_ one of the three bits is set.
851 	 *
852 	 * CFI check cannot be bypassed by simply setting MACH64_SEND_ANY.
853 	 */
854 #if XNU_TARGET_OS_OSX
855 	if (opts & MACH64_SEND_ANY) {
856 		return MACH_MSG_SUCCESS;
857 	}
858 #endif /* XNU_TARGET_OS_OSX */
859 
860 	natural_t otype = ip_type(port);
861 	if (otype == IOT_TIMER_PORT) {
862 #if XNU_TARGET_OS_OSX
863 		if (__improbable(opts & MACH64_POLICY_ENHANCED)) {
864 			return MACH_SEND_INVALID_OPTIONS;
865 		}
866 		/*
867 		 * For bincompat, let's still allow user messages to timer port, but
868 		 * force MACH64_SEND_MQ_CALL flag for memory segregation.
869 		 */
870 		if (__improbable(!(opts & MACH64_SEND_MQ_CALL))) {
871 			return MACH_SEND_INVALID_OPTIONS;
872 		}
873 #else
874 		return MACH_SEND_INVALID_OPTIONS;
875 #endif
876 	} else if (io_is_kobject_type(otype)) {
877 		if (otype == IKOT_UEXT_OBJECT) {
878 			if (__improbable(!(opts & MACH64_SEND_DK_CALL))) {
879 				return MACH_SEND_INVALID_OPTIONS;
880 			}
881 		} else {
882 			/* Otherwise, caller must set MACH64_SEND_KOBJECT_CALL. */
883 			if (__improbable(!(opts & MACH64_SEND_KOBJECT_CALL))) {
884 				return MACH_SEND_INVALID_OPTIONS;
885 			}
886 
887 			/* kobject calls must be a combined send/receive */
888 			if (__improbable((opts & MACH64_RCV_MSG) == 0)) {
889 				if ((opts & MACH64_POLICY_ENHANCED) ||
890 				    IP_VALID(hdr->msgh_local_port) ||
891 				    !ipc_policy_allow_send_only_kobject_calls()) {
892 					return MACH_SEND_INVALID_OPTIONS;
893 				}
894 			}
895 		}
896 #if CONFIG_CSR
897 	} else if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
898 		/*
899 		 * Allow MACH64_SEND_KOBJECT_CALL flag to message queues
900 		 * when SIP is off (for Mach-on-Mach emulation).
901 		 */
902 #endif /* CONFIG_CSR */
903 	} else {
904 		/* If destination is a message queue, caller must set MACH64_SEND_MQ_CALL */
905 		if (__improbable(!(opts & MACH64_SEND_MQ_CALL))) {
906 			return MACH_SEND_INVALID_OPTIONS;
907 		}
908 	}
909 
910 	return MACH_MSG_SUCCESS;
911 }
912 
913 mach_msg_return_t
ipc_validate_kmsg_header_from_user(mach_msg_header_t * hdr,mach_msg_send_uctx_t * send_uctx,mach_msg_option64_t opts)914 ipc_validate_kmsg_header_from_user(
915 	mach_msg_header_t      *hdr,
916 	mach_msg_send_uctx_t   *send_uctx,
917 	mach_msg_option64_t     opts)
918 {
919 	ipc_port_t dest_port = hdr->msgh_remote_port;
920 	ipc_port_t reply_port = hdr->msgh_local_port;
921 	mach_msg_return_t mr = MACH_MSG_SUCCESS;
922 	ipc_space_policy_t current_policy;
923 
924 	if (opts & MACH64_MACH_MSG2) {
925 		mr = ipc_validate_kmsg_dest_from_user(hdr, dest_port, opts);
926 		if (mr != MACH_MSG_SUCCESS) {
927 			goto out;
928 		}
929 	}
930 
931 	/*
932 	 * For enhanced v2 binaries, enforce two OOL port array restrictions:
933 	 *     - the receive right has to be of a type that explicitly
934 	 *       allows receiving that descriptor
935 	 *     - there could be no more than ONE single array in a kmsg
936 	 */
937 	current_policy = ipc_convert_msg_options_to_space(opts);
938 	if (ool_port_array_enforced &&
939 	    send_uctx->send_dsc_port_arrays_count &&
940 	    ipc_should_apply_policy(current_policy, IPC_POLICY_ENHANCED_V2)) {
941 		if (!ip_is_port_array_allowed(dest_port)) {
942 			mach_port_guard_exception(current_policy,
943 			    MPG_PAYLOAD(MPG_FLAGS_INVALID_OPTIONS_OOL_RIGHT,
944 			    ip_type(dest_port)),
945 			    kGUARD_EXC_DESCRIPTOR_VIOLATION);
946 
947 			return MACH_SEND_INVALID_OPTIONS;
948 		}
949 
950 		if (send_uctx->send_dsc_port_arrays_count > 1) {
951 			mach_port_guard_exception(current_policy,
952 			    MPG_PAYLOAD(MPG_FLAGS_INVALID_OPTIONS_OOL_ARRAYS,
953 			    send_uctx->send_dsc_port_arrays_count),
954 			    kGUARD_EXC_DESCRIPTOR_VIOLATION);
955 
956 			return MACH_SEND_INVALID_OPTIONS;
957 		}
958 	}
959 
960 	/*
961 	 * Ensure that the reply field follows our security policies,
962 	 * including IOT_REPLY_PORT requirements
963 	 */
964 	mr = ipc_validate_local_port(reply_port, dest_port, opts);
965 	if (mr != MACH_MSG_SUCCESS) {
966 		goto out;
967 	}
968 
969 	/*
970 	 * Evaluate message filtering if the sender is filtered.
971 	 */
972 	if ((opts & MACH64_POLICY_FILTER_MSG) &&
973 	    mach_msg_filter_at_least(MACH_MSG_FILTER_CALLBACKS_VERSION_1) &&
974 	    ip_to_object(dest_port)->io_filtered) {
975 		mr = ipc_filter_kmsg_header_from_user(hdr, dest_port, opts);
976 		if (mr != MACH_MSG_SUCCESS) {
977 			goto out;
978 		}
979 	}
980 
981 out:
982 	if (mr == MACH_SEND_INVALID_OPTIONS) {
983 		mach_port_guard_exception(0, opts, kGUARD_EXC_INVALID_OPTIONS);
984 	}
985 	return mr;
986 }
987 
988 #pragma mark receive immovability
989 
990 bool
ipc_move_receive_allowed(ipc_space_t space,ipc_port_t port,mach_port_name_t name)991 ipc_move_receive_allowed(
992 	ipc_space_t             space,
993 	ipc_port_t              port,
994 	mach_port_name_t        name)
995 {
996 	ipc_space_policy_t policy = ipc_space_policy(space);
997 	/*
998 	 * Check for service port before immovability so the task crash
999 	 * with reason kGUARD_EXC_SERVICE_PORT_VIOLATION_FATAL
1000 	 */
1001 	if (service_port_defense_enabled &&
1002 	    ip_type(port) == IOT_SERVICE_PORT &&
1003 	    !task_is_initproc(space->is_task)) {
1004 		mach_port_guard_exception(IPCPV_MOVE_SERVICE_PORT, name,
1005 		    kGUARD_EXC_SERVICE_PORT_VIOLATION_FATAL);
1006 		return false;
1007 	}
1008 
1009 	if (ip_type(port) == IOT_PROVISIONAL_REPLY_PORT &&
1010 	    ipc_should_apply_policy(policy, IPC_POLICY_ENHANCED_V2) &&
1011 	    !ipc_space_has_telemetry_type(space, IS_HAS_MOVE_PRP_TELEMETRY)) {
1012 		mach_port_guard_exception(name, 0, kGUARD_EXC_MOVE_PROVISIONAL_REPLY_PORT);
1013 	}
1014 
1015 	if (ip_is_immovable_receive(port)) {
1016 		mach_port_guard_exception(name, 0, kGUARD_EXC_IMMOVABLE);
1017 		return false;
1018 	}
1019 
1020 	return true;
1021 }
1022 
1023 #pragma mark send immovability
1024 
1025 
1026 bool
ipc_should_mark_immovable_send(task_t task,ipc_port_t port,ipc_object_label_t label)1027 ipc_should_mark_immovable_send(
1028 	task_t task,
1029 	ipc_port_t port,
1030 	ipc_object_label_t label)
1031 {
1032 	/*
1033 	 * some entitled processes are allowed to get movable control ports
1034 	 * see `task_set_ctrl_port_default` - also all control ports are movable
1035 	 * before/after the space becomes inactive. They will be made movable before
1036 	 * the `task` is able to run code in userspace in `task_wait_to_return`
1037 	 */
1038 	if ((!task_is_immovable(task) ||
1039 	    !is_active(task->itk_space)) &&
1040 	    ip_is_tt_control_port_type(label.io_type)) {
1041 		return false;
1042 	}
1043 
1044 	/* tasks get their own thread control port as immovable */
1045 	if (label.io_type == IKOT_THREAD_CONTROL) {
1046 		thread_t thread = ipc_kobject_get_raw(port, IKOT_THREAD_CONTROL);
1047 		if (thread != THREAD_NULL && task == get_threadtask(thread)) {
1048 			return true;
1049 		}
1050 	}
1051 
1052 	/* tasks get their own task control port as immovable */
1053 	if (task->itk_task_ports[TASK_FLAVOR_CONTROL] == port) {
1054 		return true;
1055 	}
1056 
1057 	/* special cases are handled, check the default policy */
1058 	if (!ipc_policy(label)->pol_movable_send) {
1059 		return true;
1060 	}
1061 
1062 	return false;
1063 }
1064 
1065 /* requires: nothing locked, port is valid */
1066 static bool
ip_is_currently_immovable_send(ipc_port_t port)1067 ip_is_currently_immovable_send(ipc_port_t port)
1068 {
1069 	ipc_object_label_t label = ipc_port_lock_label_get(port);
1070 	if (task_is_immovable(current_task()) &&
1071 	    (ip_is_tt_control_port_type(label.io_type))) {
1072 		/* most tasks cannot move their control ports */
1073 		ip_mq_unlock_label_put(port, &label);
1074 		return true;
1075 	}
1076 
1077 	bool is_always_immovable_send = !ipc_policy(label)->pol_movable_send;
1078 	ip_mq_unlock_label_put(port, &label);
1079 	return is_always_immovable_send;
1080 }
1081 
1082 bool
ipc_can_stash_naked_send(ipc_port_t port)1083 ipc_can_stash_naked_send(ipc_port_t port)
1084 {
1085 	return !IP_VALID(port) || !ip_is_currently_immovable_send(port);
1086 }
1087 
1088 #pragma mark entry init
1089 
1090 void
ipc_entry_init(ipc_space_t space,ipc_object_t object,mach_port_type_t type,ipc_entry_t entry,mach_port_urefs_t urefs,mach_port_name_t name)1091 ipc_entry_init(
1092 	ipc_space_t         space,
1093 	ipc_object_t        object,
1094 	mach_port_type_t    type,
1095 	ipc_entry_t         entry,
1096 	mach_port_urefs_t   urefs,
1097 	mach_port_name_t    name)
1098 {
1099 	/* object type can be deadname, port, or a portset */
1100 	assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
1101 	assert(type != MACH_PORT_TYPE_NONE);
1102 	assert(urefs <= MACH_PORT_UREFS_MAX);
1103 	assert(entry);
1104 
1105 	if (object && (type & MACH_PORT_TYPE_SEND_RIGHTS)) {
1106 		ipc_port_t port = ip_object_to_port(object);
1107 		ipc_object_label_t label = ip_label_get(port);
1108 
1109 		if (ipc_should_mark_immovable_send(space->is_task, port, label)) {
1110 			entry->ie_bits |= IE_BITS_IMMOVABLE_SEND;
1111 		}
1112 		io_label_set_and_put(&port->ip_object, &label);
1113 	}
1114 	entry->ie_object = object;
1115 	entry->ie_bits |= type | urefs;
1116 	ipc_entry_modified(space, name, entry);
1117 }
1118 
1119 #pragma mark policy guard violations
1120 
1121 void
mach_port_guard_exception(uint32_t target,uint64_t payload,unsigned reason)1122 mach_port_guard_exception(uint32_t target, uint64_t payload, unsigned reason)
1123 {
1124 	mach_exception_code_t code = 0;
1125 	EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_MACH_PORT);
1126 	EXC_GUARD_ENCODE_FLAVOR(code, reason);
1127 	EXC_GUARD_ENCODE_TARGET(code, target);
1128 	mach_exception_subcode_t subcode = (uint64_t)payload;
1129 	thread_t t = current_thread();
1130 	bool fatal = FALSE;
1131 
1132 	if (reason <= MAX_OPTIONAL_kGUARD_EXC_CODE &&
1133 	    (get_threadtask(t)->task_exc_guard & TASK_EXC_GUARD_MP_FATAL)) {
1134 		fatal = true;
1135 	} else if (reason <= MAX_FATAL_kGUARD_EXC_CODE) {
1136 		fatal = true;
1137 	}
1138 	thread_guard_violation(t, code, subcode, fatal);
1139 }
1140 
1141 void
mach_port_guard_exception_immovable(ipc_space_t space,mach_port_name_t name,mach_port_t port,mach_msg_type_name_t disp,__assert_only ipc_entry_t entry)1142 mach_port_guard_exception_immovable(
1143 	ipc_space_t             space,
1144 	mach_port_name_t        name,
1145 	mach_port_t             port,
1146 	mach_msg_type_name_t    disp,
1147 	__assert_only ipc_entry_t entry)
1148 {
1149 	if (space == current_space()) {
1150 		assert(entry->ie_bits & IE_BITS_IMMOVABLE_SEND);
1151 		assert(entry->ie_port == port);
1152 
1153 		boolean_t hard = task_get_control_port_options(current_task()) & TASK_CONTROL_PORT_IMMOVABLE_HARD;
1154 		uint64_t payload = MPG_PAYLOAD(MPG_FLAGS_NONE, ip_type(port), disp);
1155 
1156 		if (ip_is_tt_control_port(port)) {
1157 			assert(task_is_immovable(current_task()));
1158 			mach_port_guard_exception(name, payload,
1159 			    hard ? kGUARD_EXC_IMMOVABLE : kGUARD_EXC_IMMOVABLE_NON_FATAL);
1160 		} else {
1161 			/* always fatal exception for non-control port violation */
1162 			mach_port_guard_exception(name, payload, kGUARD_EXC_IMMOVABLE);
1163 		}
1164 	}
1165 }
1166 
1167 void
mach_port_guard_exception_pinned(ipc_space_t space,mach_port_name_t name,uint64_t payload)1168 mach_port_guard_exception_pinned(
1169 	ipc_space_t             space,
1170 	mach_port_name_t        name,
1171 	uint64_t                payload)
1172 {
1173 	ipc_space_policy_t policy = ipc_space_policy(space);
1174 	int guard;
1175 
1176 	if (space != current_space()) {
1177 		guard = kGUARD_EXC_NONE;
1178 	} else if (policy &
1179 	    (IPC_SPACE_POLICY_TRANSLATED | IPC_SPACE_POLICY_SIMULATED)) {
1180 		guard = kGUARD_EXC_NONE;
1181 	} else if (ipc_should_apply_policy(policy, IPC_POLICY_ENHANCED_V1)) {
1182 		if (ipc_control_port_options & ICP_OPTIONS_PINNED_1P_HARD) {
1183 			guard = kGUARD_EXC_MOD_REFS;
1184 		} else if (ipc_control_port_options & ICP_OPTIONS_PINNED_1P_SOFT) {
1185 			guard = kGUARD_EXC_MOD_REFS_NON_FATAL;
1186 		} else {
1187 			guard = kGUARD_EXC_NONE;
1188 		}
1189 	} else {
1190 		if (ipc_control_port_options & ICP_OPTIONS_PINNED_3P_HARD) {
1191 			guard = kGUARD_EXC_MOD_REFS;
1192 		} else if (ipc_control_port_options & ICP_OPTIONS_PINNED_3P_SOFT) {
1193 			guard = kGUARD_EXC_MOD_REFS_NON_FATAL;
1194 		} else {
1195 			guard = kGUARD_EXC_NONE;
1196 		}
1197 	}
1198 
1199 	if (guard != kGUARD_EXC_NONE) {
1200 		mach_port_guard_exception(name, payload, guard);
1201 	}
1202 }
1203 
1204 /*
1205  *	Routine:	mach_port_guard_ast
1206  *	Purpose:
1207  *		Raises an exception for mach port guard violation.
1208  *	Conditions:
1209  *		None.
1210  *	Returns:
1211  *		None.
1212  */
1213 
1214 void
mach_port_guard_ast(thread_t t,mach_exception_data_type_t code,mach_exception_data_type_t subcode)1215 mach_port_guard_ast(
1216 	thread_t                t,
1217 	mach_exception_data_type_t code,
1218 	mach_exception_data_type_t subcode)
1219 {
1220 	unsigned int reason = EXC_GUARD_DECODE_GUARD_FLAVOR(code);
1221 	task_t task = get_threadtask(t);
1222 	unsigned int behavior = task->task_exc_guard;
1223 	bool fatal = true;
1224 
1225 	assert(task == current_task());
1226 	assert(task != kernel_task);
1227 
1228 	if (reason <= MAX_FATAL_kGUARD_EXC_CODE) {
1229 		/*
1230 		 * Fatal Mach port guards - always delivered synchronously if dev mode is on.
1231 		 * Check if anyone has registered for Synchronous EXC_GUARD, if yes then,
1232 		 * deliver it synchronously and then kill the process, else kill the process
1233 		 * and deliver the exception via EXC_CORPSE_NOTIFY.
1234 		 */
1235 
1236 		int flags = PX_DEBUG_NO_HONOR;
1237 		exception_info_t info = {
1238 			.os_reason = OS_REASON_GUARD,
1239 			.exception_type = EXC_GUARD,
1240 			.mx_code = code,
1241 			.mx_subcode = subcode,
1242 		};
1243 
1244 		if (task_exception_notify(EXC_GUARD, code, subcode, fatal) == KERN_SUCCESS) {
1245 			flags |= PX_PSIGNAL;
1246 		}
1247 		exit_with_mach_exception(get_bsdtask_info(task), info, flags);
1248 	} else {
1249 		/*
1250 		 * Mach port guards controlled by task settings.
1251 		 */
1252 
1253 		/* Is delivery enabled */
1254 		if ((behavior & TASK_EXC_GUARD_MP_DELIVER) == 0) {
1255 			return;
1256 		}
1257 
1258 		/* If only once, make sure we're that once */
1259 		while (behavior & TASK_EXC_GUARD_MP_ONCE) {
1260 			uint32_t new_behavior = behavior & ~TASK_EXC_GUARD_MP_DELIVER;
1261 
1262 			if (os_atomic_cmpxchg(&task->task_exc_guard,
1263 			    behavior, new_behavior, relaxed)) {
1264 				break;
1265 			}
1266 			behavior = task->task_exc_guard;
1267 			if ((behavior & TASK_EXC_GUARD_MP_DELIVER) == 0) {
1268 				return;
1269 			}
1270 		}
1271 		fatal = (task->task_exc_guard & TASK_EXC_GUARD_MP_FATAL)
1272 		    && (reason <= MAX_OPTIONAL_kGUARD_EXC_CODE);
1273 		kern_return_t sync_exception_result;
1274 		sync_exception_result = task_exception_notify(EXC_GUARD, code, subcode, fatal);
1275 
1276 		if (task->task_exc_guard & TASK_EXC_GUARD_MP_FATAL) {
1277 			if (reason > MAX_OPTIONAL_kGUARD_EXC_CODE) {
1278 				/* generate a simulated crash if not handled synchronously */
1279 				if (sync_exception_result != KERN_SUCCESS) {
1280 					task_violated_guard(code, subcode, NULL, TRUE);
1281 				}
1282 			} else {
1283 				/*
1284 				 * Only generate crash report if synchronous EXC_GUARD wasn't handled,
1285 				 * but it has to die regardless.
1286 				 */
1287 
1288 				int flags = PX_DEBUG_NO_HONOR;
1289 				exception_info_t info = {
1290 					.os_reason = OS_REASON_GUARD,
1291 					.exception_type = EXC_GUARD,
1292 					.mx_code = code,
1293 					.mx_subcode = subcode
1294 				};
1295 
1296 				if (sync_exception_result == KERN_SUCCESS) {
1297 					flags |= PX_PSIGNAL;
1298 				}
1299 
1300 				exit_with_mach_exception(get_bsdtask_info(task), info, flags);
1301 			}
1302 		} else if (task->task_exc_guard & TASK_EXC_GUARD_MP_CORPSE) {
1303 			/* Raise exception via corpse fork if not handled synchronously */
1304 			if (sync_exception_result != KERN_SUCCESS) {
1305 				task_violated_guard(code, subcode, NULL, TRUE);
1306 			}
1307 		}
1308 	}
1309 }
1310 
1311 #pragma mark notification policies
1312 
1313 static bool
ipc_allow_service_port_register_pd(ipc_port_t service_port,ipc_port_t notify_port,uint64_t * payload)1314 ipc_allow_service_port_register_pd(
1315 	ipc_port_t              service_port,
1316 	ipc_port_t              notify_port,
1317 	uint64_t                *payload)
1318 {
1319 	/* boot-arg disables this security policy */
1320 	if (!service_port_defense_enabled || !IP_VALID(notify_port)) {
1321 		return true;
1322 	}
1323 	/* enforce this policy only on service port types */
1324 	if (ip_is_any_service_port(service_port)) {
1325 		/* Only launchd should be able to register for port destroyed notification on a service port. */
1326 		if (!task_is_initproc(current_task())) {
1327 			*payload = MPG_FLAGS_KERN_FAILURE_TASK;
1328 			return false;
1329 		}
1330 		/* notify_port needs to be immovable */
1331 		if (!ip_is_immovable_receive(notify_port)) {
1332 			*payload = MPG_FLAGS_KERN_FAILURE_NOTIFY_TYPE;
1333 			return false;
1334 		}
1335 		/* notify_port should be owned by launchd */
1336 		if (!task_is_initproc(notify_port->ip_receiver->is_task)) {
1337 			*payload = MPG_FLAGS_KERN_FAILURE_NOTIFY_RECV;
1338 			return false;
1339 		}
1340 	}
1341 	return true;
1342 }
1343 
1344 kern_return_t
ipc_allow_register_pd_notification(ipc_port_t pd_port,ipc_port_t notify_port)1345 ipc_allow_register_pd_notification(
1346 	ipc_port_t              pd_port,
1347 	ipc_port_t              notify_port)
1348 {
1349 	uint64_t payload;
1350 
1351 	/*
1352 	 * you cannot register for port destroyed notifications
1353 	 * on an immovable receive right (which includes kobjects),
1354 	 * or a (special) reply port or any other port that explicitly disallows them.
1355 	 */
1356 	release_assert(ip_in_a_space(pd_port));
1357 	if (ip_is_immovable_receive(pd_port) ||
1358 	    !ipc_policy(pd_port)->pol_notif_port_destroy) {
1359 		mach_port_guard_exception(ip_type(pd_port), MACH_NOTIFY_PORT_DESTROYED, kGUARD_EXC_INVALID_NOTIFICATION_REQ);
1360 		return KERN_INVALID_RIGHT;
1361 	}
1362 
1363 	/* Stronger pd enforcement for service ports */
1364 	if (!ipc_allow_service_port_register_pd(pd_port, notify_port, &payload)) {
1365 		mach_port_guard_exception(0, payload, kGUARD_EXC_KERN_FAILURE);
1366 		return KERN_INVALID_RIGHT;
1367 	}
1368 
1369 	/* Allow only one registration of this notification */
1370 	if (ipc_port_has_prdrequest(pd_port)) {
1371 		mach_port_guard_exception(0, MPG_FLAGS_KERN_FAILURE_MULTI_NOTI, kGUARD_EXC_KERN_FAILURE);
1372 		return KERN_FAILURE;
1373 	}
1374 
1375 	return KERN_SUCCESS;
1376 }
1377 
1378 
1379 #pragma mark policy array
1380 
1381 __dead2
1382 static void
no_kobject_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)1383 no_kobject_no_senders(
1384 	ipc_port_t              port,
1385 	mach_port_mscount_t     mscount __unused)
1386 {
1387 	panic("unexpected call to no_senders for object %p, type %d",
1388 	    port, ip_type(port));
1389 }
1390 
1391 __dead2
1392 static void
no_label_free(ipc_object_label_t label)1393 no_label_free(ipc_object_label_t label)
1394 {
1395 	panic("unexpected call to label_free for object type %d, label %p",
1396 	    label.io_type, label.iol_pointer);
1397 }
1398 
1399 /*
1400  * Denotes a policy which safe value is the argument to PENDING(),
1401  * but is currently not default and pending validation/prep work.
1402  */
1403 #define PENDING(value)          value
1404 
1405 __security_const_late
1406 struct ipc_object_policy ipc_policy_array[IOT_UNKNOWN] = {
1407 	[IOT_PORT_SET] = {
1408 		.pol_name               = "port set",
1409 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1410 		.pol_movable_send       = false,
1411 	},
1412 	[IOT_PORT] = {
1413 		.pol_name               = "port",
1414 		.pol_movability         = IPC_MOVE_POLICY_ALWAYS,
1415 		.pol_movable_send       = true,
1416 		.pol_notif_dead_name    = true,
1417 		.pol_notif_no_senders   = true,
1418 		.pol_notif_port_destroy = true,
1419 	},
1420 	[IOT_SERVICE_PORT] = {
1421 		.pol_name               = "service port",
1422 		.pol_movability         = PENDING(IPC_MOVE_POLICY_ONCE_OR_AFTER_PD),
1423 		.pol_movable_send       = true,
1424 		.pol_label_free         = ipc_service_port_label_dealloc,
1425 		.pol_enforce_reply_semantics = PENDING(true), /* pending on service port defense cleanup */
1426 		.pol_notif_dead_name    = true,
1427 		.pol_notif_no_senders   = true,
1428 		.pol_notif_port_destroy = true,
1429 	},
1430 	[IOT_WEAK_SERVICE_PORT] = {
1431 		.pol_name               = "weak service port",
1432 		.pol_movability         = IPC_MOVE_POLICY_ALWAYS,
1433 		.pol_movable_send       = true,
1434 		.pol_label_free         = ipc_service_port_label_dealloc,
1435 		.pol_notif_dead_name    = true,
1436 		.pol_notif_no_senders   = true,
1437 		.pol_notif_port_destroy = true,
1438 	},
1439 	[IOT_CONNECTION_PORT] = {
1440 		.pol_name               = "connection port",
1441 		.pol_movability         = IPC_MOVE_POLICY_ONCE,
1442 		.pol_label_free         = ipc_connection_port_label_dealloc,
1443 		.pol_enforce_reply_semantics = true,
1444 		.pol_notif_dead_name    = true,
1445 		.pol_notif_no_senders   = true,
1446 		.pol_notif_port_destroy = true,
1447 	},
1448 	[IOT_CONNECTION_PORT_WITH_PORT_ARRAY] = {
1449 		.pol_name               = "conn port with ool port array",
1450 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1451 		.pol_movable_send       = true,
1452 		.pol_construct_entitlement = MACH_PORT_CONNECTION_PORT_WITH_PORT_ARRAY,
1453 		.pol_notif_dead_name    = true,
1454 		.pol_notif_no_senders   = true,
1455 	},
1456 	[IOT_EXCEPTION_PORT] = {
1457 		.pol_name               = "exception port",
1458 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1459 		.pol_movable_send       = true,
1460 		.pol_notif_dead_name    = true,
1461 		.pol_notif_no_senders   = true,
1462 	},
1463 	[IOT_TIMER_PORT] = {
1464 		.pol_name               = "timer port",
1465 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1466 		.pol_movable_send       = true,
1467 		.pol_label_free         = mk_timer_port_label_dealloc,
1468 		.pol_notif_dead_name    = true,
1469 		.pol_notif_no_senders   = true,
1470 	},
1471 	[IOT_REPLY_PORT] = {
1472 		.pol_name               = "reply port",
1473 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1474 		.pol_notif_dead_name    = true,
1475 	},
1476 	[IOT_SPECIAL_REPLY_PORT] = {
1477 		.pol_name               = "special reply port",
1478 		/*
1479 		 * General use of a special reply port as a receive right
1480 		 * can cause type confusion in the importance code.
1481 		 */
1482 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1483 		.pol_notif_dead_name    = true,
1484 	},
1485 	[IOT_PROVISIONAL_REPLY_PORT] = {
1486 		.pol_name               = "provisional reply port",
1487 		.pol_movability         = IPC_MOVE_POLICY_ALWAYS,
1488 		.pol_movable_send       = true,
1489 		.pol_construct_entitlement = MACH_PORT_PROVISIONAL_REPLY_ENTITLEMENT,
1490 		.pol_notif_dead_name    = true,
1491 		.pol_notif_no_senders   = true,
1492 		.pol_notif_port_destroy = true,
1493 	},
1494 
1495 	[__IKOT_FIRST ... IOT_UNKNOWN - 1] = {
1496 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1497 		.pol_notif_dead_name    = true,
1498 	},
1499 };
1500 
1501 __startup_func
1502 static void
ipc_policy_update_from_tunables(void)1503 ipc_policy_update_from_tunables(void)
1504 {
1505 	if (!service_port_defense_enabled) {
1506 		ipc_policy_array[IOT_SERVICE_PORT].pol_movability =
1507 		    IPC_MOVE_POLICY_ALWAYS;
1508 	}
1509 }
1510 STARTUP(TUNABLES, STARTUP_RANK_LAST, ipc_policy_update_from_tunables);
1511 
1512 /*
1513  * Ensure new port types that requires a construction entitlement
1514  * are marked as immovable.
1515  */
1516 __startup_func
1517 static void
ipc_policy_construct_entitlement_hardening(void)1518 ipc_policy_construct_entitlement_hardening(void)
1519 {
1520 	/* No need to check kobjects because they are always immovable */
1521 	for (ipc_object_type_t i = 0; i < __IKOT_FIRST; i++) {
1522 		/*
1523 		 * IOT_PROVISIONAL_REPLY_PORT is an exception as it used to be
1524 		 * movable. For process opted for enhanced security V2,
1525 		 * kGUARD_EXC_MOVE_PROVISIONAL_REPLY_PORT will be thrown when a
1526 		 * provisional reply port is being moved.
1527 		 */
1528 		if (i == IOT_PROVISIONAL_REPLY_PORT) {
1529 			continue;
1530 		}
1531 		if (ipc_policy_array[i].pol_construct_entitlement) {
1532 			assert(ipc_policy_array[i].pol_movability == IPC_MOVE_POLICY_NEVER);
1533 		}
1534 	}
1535 }
1536 STARTUP(TUNABLES, STARTUP_RANK_LAST, ipc_policy_construct_entitlement_hardening);
1537 
1538 __startup_func
1539 void
ipc_kobject_register_startup(ipc_kobject_ops_t ops)1540 ipc_kobject_register_startup(ipc_kobject_ops_t ops)
1541 {
1542 	struct ipc_object_policy *pol = &ipc_policy_array[ops->iko_op_type];
1543 
1544 	if (pol->pol_name) {
1545 		panic("trying to register kobject(%d) twice", ops->iko_op_type);
1546 	}
1547 
1548 	/*
1549 	 * Always make sure kobject ports have immovable receive rights.
1550 	 *
1551 	 * They use the ip_kobject field of the ipc_port structure,
1552 	 * which is unioned with ip_imp_task.
1553 	 *
1554 	 * Thus, general use of a kobject port as a receive right can
1555 	 * cause type confusion in the importance code.
1556 	 */
1557 	ipc_release_assert(pol->pol_movability == IPC_MOVE_POLICY_NEVER);
1558 	if (ops->iko_op_no_senders) {
1559 		pol->pol_notif_no_senders = true;
1560 	}
1561 
1562 	pol->pol_name               = ops->iko_op_name;
1563 	pol->pol_kobject_stable     = ops->iko_op_stable;
1564 	pol->pol_kobject_permanent  = ops->iko_op_permanent;
1565 	pol->pol_kobject_no_senders = ops->iko_op_no_senders;
1566 	pol->pol_label_free         = ops->iko_op_label_free;
1567 	pol->pol_movable_send       = ops->iko_op_movable_send;
1568 }
1569 
1570 __startup_func
1571 static void
ipc_policy_set_defaults(void)1572 ipc_policy_set_defaults(void)
1573 {
1574 	/*
1575 	 * Check that implicit init to 0 picks the right "values"
1576 	 * for all properties.
1577 	 */
1578 	static_assert(IPC_MOVE_POLICY_NEVER == 0);
1579 
1580 	for (uint32_t i = 0; i < IOT_UNKNOWN; i++) {
1581 		struct ipc_object_policy *pol = &ipc_policy_array[i];
1582 
1583 		if (!pol->pol_kobject_no_senders) {
1584 			pol->pol_kobject_no_senders = no_kobject_no_senders;
1585 		}
1586 		if (!pol->pol_label_free) {
1587 			pol->pol_label_free = no_label_free;
1588 		}
1589 	}
1590 }
1591 STARTUP(MACH_IPC, STARTUP_RANK_LAST, ipc_policy_set_defaults);
1592 
1593 #pragma mark exception port policy
1594 
1595 bool
ipc_is_valid_exception_port(task_t task,ipc_port_t port)1596 ipc_is_valid_exception_port(
1597 	task_t task,
1598 	ipc_port_t port)
1599 {
1600 	if (task == TASK_NULL && is_ux_handler_port(port)) {
1601 		return true;
1602 	}
1603 
1604 	if (ip_is_exception_port(port)) {
1605 		return true;
1606 	}
1607 
1608 	/*
1609 	 * rdar://77996387
1610 	 * Avoid exposing immovable ports send rights (kobjects) to `get_exception_ports`,
1611 	 * but exception ports to still be set.
1612 	 */
1613 	if (!ipc_can_stash_naked_send(port)) {
1614 		return false;
1615 	}
1616 
1617 	if (ip_is_immovable_receive(port)) {
1618 		/*
1619 		 * rdar://153108740
1620 		 * Temporarily allow service ports until telemetry is clean.
1621 		 */
1622 		if (ip_type(port) == IOT_SERVICE_PORT) {
1623 			return true;
1624 		}
1625 		return false;
1626 	}
1627 
1628 	return true;
1629 }
1630