xref: /xnu-12377.81.4/osfmk/ipc/ipc_policy.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2023 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/kern_return.h>
30 #include <mach/mach_types.h>
31 #include <mach/port.h>
32 #include <mach/mk_timer.h>
33 #include <mach/notify.h>
34 
35 #include <kern/assert.h>
36 #include <kern/exc_guard.h>
37 #include <kern/ipc_kobject.h>
38 #include <kern/ipc_tt.h>
39 #include <kern/kern_types.h>
40 #include <kern/mach_filter.h>
41 #include <kern/task.h>
42 #include <kern/ux_handler.h> /* is_ux_handler_port() */
43 
44 #include <vm/vm_map_xnu.h> /* current_map() */
45 #include <vm/vm_protos.h> /* current_proc() */
46 
47 #include <ipc/ipc_policy.h>
48 #include <ipc/ipc_service_port.h>
49 #include <ipc/port.h>
50 
51 #if CONFIG_CSR
52 #include <sys/csr.h>
53 #endif
54 #include <sys/codesign.h>
55 #include <sys/proc_ro.h>
56 #include <sys/reason.h>
57 
58 #include <libkern/coreanalytics/coreanalytics.h>
59 
60 extern bool proc_is_simulated(struct proc *);
61 extern char *proc_name_address(struct proc *p);
62 extern int  exit_with_guard_exception(
63 	struct proc            *p,
64 	mach_exception_data_type_t code,
65 	mach_exception_data_type_t subcode);
66 
67 #pragma mark policy tunables
68 
69 extern const vm_size_t  ipc_kmsg_max_vm_space;
70 
71 #if IPC_HAS_LEGACY_MACH_MSG_TRAP
72 #if DEVELOPMENT || DEBUG
73 static TUNABLE(bool, allow_legacy_mach_msg, "allow_legacy_mach_msg", false);
74 #endif /* DEVELOPMENT || DEBUG */
75 #endif /* IPC_HAS_LEGACY_MACH_MSG_TRAP */
76 
77 /* a boot-arg to enable/disable OOL port array restrictions */
78 #if XNU_TARGET_OS_XR
79 TUNABLE(bool, ool_port_array_enforced, "ool_port_array_enforced", false);
80 #else
81 TUNABLE(bool, ool_port_array_enforced, "ool_port_array_enforced", true);
82 #endif /* XNU_TARGET_OS_XR */
83 
84 /* Note: Consider Developer Mode when changing the default. */
85 TUNABLE(ipc_control_port_options_t, ipc_control_port_options,
86     "ipc_control_port_options",
87     ICP_OPTIONS_IMMOVABLE_1P_HARD |
88     ICP_OPTIONS_PINNED_1P_HARD |
89 #if !XNU_TARGET_OS_OSX
90     ICP_OPTIONS_IMMOVABLE_3P_HARD |
91 #endif
92     ICP_OPTIONS_PINNED_3P_SOFT);
93 
94 TUNABLE(bool, service_port_defense_enabled, "-service_port_defense_enabled", true);
95 
96 /* The bootarg to disable ALL ipc policy violation telemetry */
97 TUNABLE(bool, ipcpv_telemetry_enabled, "-ipcpv_telemetry_enabled", true);
98 
99 /*
100  * bootargs for reply port semantics on bootstrap ports
101  */
102 TUNABLE(bool, bootstrap_port_telemetry_enabled, "-bootstrap_port_telemetry_enabled", true);
103 TUNABLE(bool, bootstrap_port_enforcement_enabled, "-bootstrap_port_enforcement_enabled", true);
104 
105 /* Enables reply port/voucher/persona debugging code */
106 TUNABLE(bool, enforce_strict_reply, "-enforce_strict_reply", false);
107 
108 #pragma mark policy options
109 
110 ipc_space_policy_t
ipc_policy_for_task(task_t task)111 ipc_policy_for_task(task_t task)
112 {
113 #if XNU_TARGET_OS_OSX
114 	struct proc *proc = get_bsdtask_info(task);
115 #endif /* XNU_TARGET_OS_OSX */
116 	ipc_space_policy_t policy = IPC_SPACE_POLICY_DEFAULT;
117 	uint32_t ro_flags;
118 
119 	if (task == kernel_task) {
120 		return policy | IPC_SPACE_POLICY_KERNEL;
121 	}
122 
123 	ro_flags = task_ro_flags_get(task);
124 	if (ro_flags & TFRO_PLATFORM) {
125 		policy |= IPC_SPACE_POLICY_PLATFORM;
126 		policy |= IPC_POLICY_ENHANCED_V2;
127 	}
128 
129 	if (task_get_platform_restrictions_version(task) >= 2) {
130 		policy |= IPC_POLICY_ENHANCED_V2;
131 	} else if (task_get_platform_restrictions_version(task) == 1) {
132 		policy |= IPC_POLICY_ENHANCED_V1;
133 #if XNU_TARGET_OS_OSX
134 	} else if (proc && csproc_hardened_runtime(proc)) {
135 		policy |= IPC_POLICY_ENHANCED_V0;
136 #endif /* XNU_TARGET_OS_OSX */
137 	}
138 
139 #if XNU_TARGET_OS_OSX
140 	if (task_opted_out_mach_hardening(task)) {
141 		policy |= IPC_SPACE_POLICY_OPTED_OUT;
142 	}
143 #endif /* XNU_TARGET_OS_OSX */
144 
145 	/*
146 	 * policy modifiers
147 	 */
148 #if XNU_TARGET_OS_OSX
149 	if (proc && proc_is_simulated(proc)) {
150 		policy |= IPC_SPACE_POLICY_SIMULATED;
151 	}
152 #endif
153 #if CONFIG_ROSETTA
154 	if (task_is_translated(task)) {
155 		policy |= IPC_SPACE_POLICY_TRANSLATED;
156 	}
157 #endif
158 
159 	return policy;
160 }
161 
162 
163 inline ipc_space_policy_t
ipc_convert_msg_options_to_space(mach_msg_option64_t opts)164 ipc_convert_msg_options_to_space(mach_msg_option64_t opts)
165 {
166 	return opts >> MACH64_POLICY_SHIFT;
167 }
168 
169 mach_msg_option64_t
ipc_current_msg_options(task_t task,mach_msg_option64_t opts)170 ipc_current_msg_options(
171 	task_t                  task,
172 	mach_msg_option64_t     opts)
173 {
174 	uint32_t ro_flags = task_ro_flags_get(task);
175 
176 	/*
177 	 * Step 1: convert to kernel flags
178 	 * - clear any kernel only flags
179 	 * - convert MACH_SEND_FILTER_NONFATAL which is aliased to the
180 	 *   MACH_SEND_ALWAYS kernel flag into MACH64_POLICY_FILTER_NON_FATAL.
181 	 */
182 	opts &= MACH64_MSG_OPTION_USER;
183 
184 	if (opts & MACH64_SEND_FILTER_NONFATAL) {
185 		/*
186 		 */
187 		opts &= ~MACH64_SEND_FILTER_NONFATAL;
188 		opts |= MACH64_POLICY_FILTER_NON_FATAL;
189 	}
190 	if (ro_flags & TFRO_FILTER_MSG) {
191 		opts |= MACH64_POLICY_FILTER_MSG;
192 	}
193 
194 	/*
195 	 * Step 2: derive policy flags from the current context
196 	 */
197 	{
198 		/*
199 		 * mach_msg_option64_t can't use IPC_SPACE_POLICY_BASE(),
200 		 * check using this MACH64_POLICY_SHIFT is legitimate.
201 		 */
202 #define verify_policy_enum(name) \
203 	static_assert(IPC_SPACE_POLICY_ ## name == \
204 	    MACH64_POLICY_ ## name >> MACH64_POLICY_SHIFT)
205 
206 		verify_policy_enum(DEFAULT);
207 		verify_policy_enum(ENHANCED);
208 		verify_policy_enum(PLATFORM);
209 		verify_policy_enum(KERNEL);
210 		verify_policy_enum(SIMULATED);
211 		verify_policy_enum(TRANSLATED);
212 		verify_policy_enum(OPTED_OUT);
213 		verify_policy_enum(ENHANCED_V0);
214 		verify_policy_enum(ENHANCED_V1);
215 		verify_policy_enum(ENHANCED_V2);
216 		verify_policy_enum(ENHANCED_VERSION_MASK);
217 		verify_policy_enum(MASK);
218 
219 #undef verify_policy_enum
220 	}
221 
222 	opts |= (uint64_t)ipc_space_policy(task->itk_space) << MACH64_POLICY_SHIFT;
223 
224 	return opts;
225 }
226 
227 mach_msg_return_t
ipc_preflight_msg_option64(mach_msg_option64_t opts)228 ipc_preflight_msg_option64(mach_msg_option64_t opts)
229 {
230 	bool success = true;
231 
232 	if ((opts & MACH64_SEND_MSG) && (opts & MACH64_MACH_MSG2)) {
233 		mach_msg_option64_t cfi = opts & MACH64_MSG_OPTION_CFI_MASK;
234 
235 #if !XNU_TARGET_OS_OSX
236 		cfi &= ~MACH64_SEND_ANY;
237 #endif
238 		/* mach_msg2() calls must have exactly _one_ of these set */
239 		if (cfi == 0 || (cfi & (cfi - 1)) != 0) {
240 			success = false;
241 		}
242 
243 		/* vector calls are only supported for message queues */
244 		if ((opts & (MACH64_SEND_MQ_CALL | MACH64_SEND_ANY)) == 0 &&
245 		    (opts & MACH64_MSG_VECTOR)) {
246 			success = false;
247 		}
248 	}
249 
250 	if (success) {
251 		return MACH_MSG_SUCCESS;
252 	}
253 
254 	mach_port_guard_exception(0, opts, kGUARD_EXC_INVALID_OPTIONS);
255 	if (opts & MACH64_MACH_MSG2) {
256 		return MACH_SEND_INVALID_OPTIONS;
257 	}
258 	return KERN_NOT_SUPPORTED;
259 }
260 
261 #pragma mark helpers
262 
263 bool
ipc_should_apply_policy(const ipc_space_policy_t current_policy,const ipc_space_policy_t requested_level)264 ipc_should_apply_policy(
265 	const ipc_space_policy_t current_policy,
266 	const ipc_space_policy_t requested_level)
267 {
268 	/* Do not apply security policies on these binaries to avoid bincompat regression */
269 	if ((current_policy & IPC_SPACE_POLICY_SIMULATED) ||
270 	    (current_policy & IPC_SPACE_POLICY_OPTED_OUT) ||
271 	    (current_policy & IPC_SPACE_POLICY_TRANSLATED)) {
272 		return false;
273 	}
274 
275 	/* Check versioning for applying platform restrictions policy */
276 	if (requested_level & current_policy & IPC_SPACE_POLICY_ENHANCED) {
277 		/* Platform is always opted into platform restrictions */
278 		if (current_policy & IPC_SPACE_POLICY_PLATFORM) {
279 			return true;
280 		}
281 
282 		const ipc_space_policy_t requested_version = requested_level & IPC_SPACE_POLICY_ENHANCED_VERSION_MASK;
283 		const ipc_space_policy_t current_es_version = current_policy & IPC_SPACE_POLICY_ENHANCED_VERSION_MASK;
284 		assert(requested_version != 0);
285 		return requested_version <= current_es_version;
286 	}
287 	return current_policy & requested_level;
288 }
289 
290 #pragma mark legacy trap policies
291 #if IPC_HAS_LEGACY_MACH_MSG_TRAP
292 
293 CA_EVENT(mach_msg_trap_event,
294     CA_INT, msgh_id,
295     CA_INT, sw_platform,
296     CA_INT, sdk,
297     CA_STATIC_STRING(CA_TEAMID_MAX_LEN), team_id,
298     CA_STATIC_STRING(CA_SIGNINGID_MAX_LEN), signing_id,
299     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name);
300 
301 static void
mach_msg_legacy_send_analytics(mach_msg_id_t msgh_id,uint32_t platform,uint32_t sdk)302 mach_msg_legacy_send_analytics(
303 	mach_msg_id_t           msgh_id,
304 	uint32_t                platform,
305 	uint32_t                sdk)
306 {
307 	char *proc_name = proc_name_address(current_proc());
308 	const char *team_id = csproc_get_teamid(current_proc());
309 	const char *signing_id = csproc_get_identity(current_proc());
310 
311 	ca_event_t ca_event = CA_EVENT_ALLOCATE(mach_msg_trap_event);
312 	CA_EVENT_TYPE(mach_msg_trap_event) * msg_event = ca_event->data;
313 
314 	msg_event->msgh_id = msgh_id;
315 	msg_event->sw_platform = platform;
316 	msg_event->sdk = sdk;
317 
318 	if (proc_name) {
319 		strlcpy(msg_event->proc_name, proc_name, CA_PROCNAME_LEN);
320 	}
321 
322 	if (team_id) {
323 		strlcpy(msg_event->team_id, team_id, CA_TEAMID_MAX_LEN);
324 	}
325 
326 	if (signing_id) {
327 		strlcpy(msg_event->signing_id, signing_id, CA_SIGNINGID_MAX_LEN);
328 	}
329 
330 	CA_EVENT_SEND(ca_event);
331 }
332 
333 static bool
ipc_policy_allow_legacy_mach_msg_trap_for_platform(mach_msg_id_t msgid)334 ipc_policy_allow_legacy_mach_msg_trap_for_platform(
335 	mach_msg_id_t           msgid)
336 {
337 	struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
338 	uint32_t platform = pro->p_platform_data.p_platform;
339 	uint32_t sdk = pro->p_platform_data.p_sdk;
340 	uint32_t sdk_major = sdk >> 16;
341 
342 	/*
343 	 * Special rules, due to unfortunate bincompat reasons,
344 	 * allow for a hardcoded list of MIG calls to XNU to go through
345 	 * for macOS apps linked against an SDK older than 12.x.
346 	 */
347 	switch (platform) {
348 	case PLATFORM_MACOS:
349 		if (sdk == 0 || sdk_major > 12) {
350 			return false;
351 		}
352 		break;
353 	default:
354 		/* disallow for any non-macOS for platform */
355 		return false;
356 	}
357 
358 	switch (msgid) {
359 	case 0xd4a: /* task_threads */
360 	case 0xd4d: /* task_info */
361 	case 0xe13: /* thread_get_state */
362 	case 0x12c4: /* mach_vm_read */
363 	case 0x12c8: /* mach_vm_read_overwrite */
364 		mach_msg_legacy_send_analytics(msgid, platform, sdk);
365 		return true;
366 	default:
367 		return false;
368 	}
369 }
370 
371 
372 mach_msg_return_t
ipc_policy_allow_legacy_send_trap(mach_msg_id_t msgid,mach_msg_option64_t opts)373 ipc_policy_allow_legacy_send_trap(
374 	mach_msg_id_t           msgid,
375 	mach_msg_option64_t     opts)
376 {
377 	/* equivalent to ENHANCED_V0 */
378 	if ((opts & MACH64_POLICY_ENHANCED) == 0) {
379 #if __x86_64__
380 		if (current_map()->max_offset <= VM_MAX_ADDRESS) {
381 			/*
382 			 * Legacy mach_msg_trap() is the only
383 			 * available thing for 32-bit tasks
384 			 */
385 			return MACH_MSG_SUCCESS;
386 		}
387 #endif /* __x86_64__ */
388 #if CONFIG_ROSETTA
389 		if (opts & MACH64_POLICY_TRANSLATED) {
390 			/*
391 			 * Similarly, on Rosetta, allow mach_msg_trap()
392 			 * as those apps likely can't be fixed anymore
393 			 */
394 			return MACH_MSG_SUCCESS;
395 		}
396 #endif
397 #if DEVELOPMENT || DEBUG
398 		if (allow_legacy_mach_msg) {
399 			/* Honor boot-arg */
400 			return MACH_MSG_SUCCESS;
401 		}
402 #endif /* DEVELOPMENT || DEBUG */
403 		if (ipc_policy_allow_legacy_mach_msg_trap_for_platform(msgid)) {
404 			return MACH_MSG_SUCCESS;
405 		}
406 	}
407 
408 	mach_port_guard_exception(msgid, opts, kGUARD_EXC_INVALID_OPTIONS);
409 	/*
410 	 * this should be MACH_SEND_INVALID_OPTIONS,
411 	 * but this is a new mach_msg2 error only.
412 	 */
413 	return KERN_NOT_SUPPORTED;
414 }
415 
416 
417 #endif /* IPC_HAS_LEGACY_MACH_MSG_TRAP */
418 #pragma mark ipc policy telemetry
419 
420 /*
421  * As CA framework replies on successfully allocating zalloc memory,
422  * we maintain a small buffer that gets flushed when full. This helps us avoid taking spinlocks when working with CA.
423  */
424 #define IPC_POLICY_VIOLATIONS_RB_SIZE         2
425 
426 /*
427  * Stripped down version of service port's string name. This is to avoid overwhelming CA's dynamic memory allocation.
428  */
429 #define CA_MACH_SERVICE_PORT_NAME_LEN         86
430 
431 struct ipc_policy_violations_rb_entry {
432 	char proc_name[CA_PROCNAME_LEN];
433 	char service_name[CA_MACH_SERVICE_PORT_NAME_LEN];
434 	char team_id[CA_TEAMID_MAX_LEN];
435 	char signing_id[CA_SIGNINGID_MAX_LEN];
436 	ipc_policy_violation_id_t violation_id;
437 	int  sw_platform;
438 	int  aux_data;
439 	int  sdk;
440 };
441 struct ipc_policy_violations_rb_entry ipc_policy_violations_rb[IPC_POLICY_VIOLATIONS_RB_SIZE];
442 static uint8_t ipc_policy_violations_rb_index = 0;
443 
444 #if DEBUG || DEVELOPMENT
445 /* sysctl debug.ipcpv_telemetry_count */
446 _Atomic unsigned int ipcpv_telemetry_count = 0;
447 #endif
448 
449 LCK_GRP_DECLARE(ipc_telemetry_lock_grp, "ipc_telemetry_lock_grp");
450 LCK_TICKET_DECLARE(ipc_telemetry_lock, &ipc_telemetry_lock_grp);
451 
452 /*
453  * Telemetry: report back the process name violating ipc policy. Note that this event can be used to report
454  * any type of ipc violation through a ipc_policy_violation_id_t. It is named reply_port_semantics_violations
455  * because we are reusing an existing event.
456  */
457 CA_EVENT(reply_port_semantics_violations,
458     CA_STATIC_STRING(CA_PROCNAME_LEN), proc_name,
459     CA_STATIC_STRING(CA_MACH_SERVICE_PORT_NAME_LEN), service_name,
460     CA_STATIC_STRING(CA_TEAMID_MAX_LEN), team_id,
461     CA_STATIC_STRING(CA_SIGNINGID_MAX_LEN), signing_id,
462     CA_INT, reply_port_semantics_violation,
463     CA_INT, msgh_id); /* for aux_data, keeping the legacy name msgh_id to avoid CA shenanigan */
464 
465 static void
send_telemetry(const struct ipc_policy_violations_rb_entry * entry)466 send_telemetry(
467 	const struct ipc_policy_violations_rb_entry *entry)
468 {
469 	ca_event_t ca_event = CA_EVENT_ALLOCATE_FLAGS(reply_port_semantics_violations, Z_NOWAIT);
470 	if (ca_event) {
471 		CA_EVENT_TYPE(reply_port_semantics_violations) * event = ca_event->data;
472 
473 		strlcpy(event->service_name, entry->service_name, CA_MACH_SERVICE_PORT_NAME_LEN);
474 		strlcpy(event->proc_name, entry->proc_name, CA_PROCNAME_LEN);
475 		strlcpy(event->team_id, entry->team_id, CA_TEAMID_MAX_LEN);
476 		strlcpy(event->signing_id, entry->signing_id, CA_SIGNINGID_MAX_LEN);
477 		event->reply_port_semantics_violation = entry->violation_id;
478 		event->msgh_id = entry->aux_data;
479 
480 		CA_EVENT_SEND(ca_event);
481 	}
482 }
483 
484 /* Routine: flush_ipc_policy_violations_telemetry
485  * Conditions:
486  *              Assumes ipc_policy_type is valid
487  *              Assumes ipc telemetry lock is held.
488  *              Unlocks it before returning.
489  */
490 static void
flush_ipc_policy_violations_telemetry(void)491 flush_ipc_policy_violations_telemetry(void)
492 {
493 	struct ipc_policy_violations_rb_entry local_rb[IPC_POLICY_VIOLATIONS_RB_SIZE];
494 	uint8_t local_rb_index = 0;
495 
496 	if (__improbable(ipc_policy_violations_rb_index > IPC_POLICY_VIOLATIONS_RB_SIZE)) {
497 		panic("Invalid ipc policy violation buffer index %d > %d",
498 		    ipc_policy_violations_rb_index, IPC_POLICY_VIOLATIONS_RB_SIZE);
499 	}
500 
501 	/*
502 	 * We operate on local copy of telemetry buffer because CA framework relies on successfully
503 	 * allocating zalloc memory. It can not do that if we are accessing the shared buffer
504 	 * with spin locks held.
505 	 */
506 	while (local_rb_index != ipc_policy_violations_rb_index) {
507 		local_rb[local_rb_index] = ipc_policy_violations_rb[local_rb_index];
508 		local_rb_index++;
509 	}
510 
511 	lck_ticket_unlock(&ipc_telemetry_lock);
512 
513 	while (local_rb_index > 0) {
514 		struct ipc_policy_violations_rb_entry *entry = &local_rb[--local_rb_index];
515 		send_telemetry(entry);
516 	}
517 
518 	/*
519 	 * Finally call out the buffer as empty. This is also a sort of rate limiting mechanisms for the events.
520 	 * Events will get dropped until the buffer is not fully flushed.
521 	 */
522 	lck_ticket_lock(&ipc_telemetry_lock, &ipc_telemetry_lock_grp);
523 	ipc_policy_violations_rb_index = 0;
524 }
525 
526 void
ipc_stash_policy_violations_telemetry(ipc_policy_violation_id_t violation_id,ipc_port_t port,int aux_data)527 ipc_stash_policy_violations_telemetry(
528 	ipc_policy_violation_id_t    violation_id,
529 	ipc_port_t                   port,
530 	int                          aux_data)
531 {
532 	if (!ipcpv_telemetry_enabled) {
533 		return;
534 	}
535 
536 	struct ipc_policy_violations_rb_entry *entry;
537 	char *service_name = (char *) "unknown";
538 	task_t task = current_task_early();
539 	int pid = -1;
540 
541 #if CONFIG_SERVICE_PORT_INFO
542 	if (IP_VALID(port)) {
543 		/*
544 		 * dest_port lock must be held to avoid race condition
545 		 * when accessing ip_splabel rdar://139066947
546 		 */
547 		struct mach_service_port_info sp_info;
548 		ipc_object_label_t label = ip_mq_lock_label_get(port);
549 		if (io_state_active(label.io_state)) {
550 			if (ip_is_any_service_port_type(label.io_type) ||
551 			    ip_is_bootstrap_port_type(label.io_type)) {
552 				ipc_service_port_label_get_info(label.iol_service, &sp_info);
553 				service_name = sp_info.mspi_string_name;
554 			}
555 		}
556 		ip_mq_unlock_label_put(port, &label);
557 	}
558 #endif /* CONFIG_SERVICE_PORT_INFO */
559 
560 	if (task) {
561 		pid = task_pid(task);
562 	}
563 
564 	if (task) {
565 		struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
566 		uint32_t platform = pro->p_platform_data.p_platform;
567 		uint32_t sdk = pro->p_platform_data.p_sdk;
568 		char *proc_name = (char *) "unknown";
569 #ifdef MACH_BSD
570 		proc_name = proc_name_address(get_bsdtask_info(task));
571 #endif /* MACH_BSD */
572 		const char *team_id = csproc_get_identity(current_proc());
573 		const char *signing_id = csproc_get_teamid(current_proc());
574 
575 		lck_ticket_lock(&ipc_telemetry_lock, &ipc_telemetry_lock_grp);
576 
577 		if (ipc_policy_violations_rb_index >= IPC_POLICY_VIOLATIONS_RB_SIZE) {
578 			/* Dropping the event since buffer is full. */
579 			lck_ticket_unlock(&ipc_telemetry_lock);
580 			return;
581 		}
582 		entry = &ipc_policy_violations_rb[ipc_policy_violations_rb_index++];
583 		strlcpy(entry->proc_name, proc_name, CA_PROCNAME_LEN);
584 
585 		strlcpy(entry->service_name, service_name, CA_MACH_SERVICE_PORT_NAME_LEN);
586 		entry->violation_id = violation_id;
587 
588 		if (team_id) {
589 			strlcpy(entry->team_id, team_id, CA_TEAMID_MAX_LEN);
590 		}
591 
592 		if (signing_id) {
593 			strlcpy(entry->signing_id, signing_id, CA_SIGNINGID_MAX_LEN);
594 		}
595 		entry->aux_data = aux_data;
596 		entry->sw_platform = platform;
597 		entry->sdk = sdk;
598 	}
599 
600 	if (ipc_policy_violations_rb_index == IPC_POLICY_VIOLATIONS_RB_SIZE) {
601 		flush_ipc_policy_violations_telemetry();
602 	}
603 
604 	lck_ticket_unlock(&ipc_telemetry_lock);
605 }
606 
607 #if DEBUG || DEVELOPMENT
608 void
ipc_inc_telemetry_count(void)609 ipc_inc_telemetry_count(void)
610 {
611 	unsigned int count = os_atomic_load(&ipcpv_telemetry_count, relaxed);
612 	if (!os_add_overflow(count, 1, &count)) {
613 		os_atomic_store(&ipcpv_telemetry_count, count, relaxed);
614 	}
615 }
616 #endif /* DEBUG || DEVELOPMENT */
617 
618 /*!
619  * @brief
620  * Checks that this message conforms to reply port policies, which are:
621  * 1. IOT_REPLY_PORT's must be make-send-once disposition
622  * 2. You must use an IOT_REPLY_PORT (or weak variant) if the dest_port requires it
623  *
624  * @param reply_port    the message local/reply port
625  * @param dest_port     the message remote/dest port
626  *
627  * @returns
628  * - true  if there is a violation in the security policy for this mach msg
629  * - false otherwise
630  */
631 static mach_msg_return_t
ipc_validate_local_port(mach_port_t reply_port,mach_port_t dest_port,mach_msg_option64_t opts)632 ipc_validate_local_port(
633 	mach_port_t         reply_port,
634 	mach_port_t         dest_port,
635 	mach_msg_option64_t opts)
636 {
637 	assert(IP_VALID(dest_port));
638 	/* An empty reply port, or an inactive reply port / dest port violates nothing */
639 	if (!IP_VALID(reply_port) || !ip_active(reply_port) || !ip_active(dest_port)) {
640 		return MACH_MSG_SUCCESS;
641 	}
642 
643 	if (ip_is_reply_port(reply_port)) {
644 		return MACH_MSG_SUCCESS;
645 	}
646 
647 	ipc_space_policy_t pol = ipc_convert_msg_options_to_space(opts);
648 	/* skip translated and simulated process */
649 	if (!ipc_should_apply_policy((pol), IPC_SPACE_POLICY_DEFAULT)) {
650 		return MACH_MSG_SUCCESS;
651 	}
652 
653 	/* kobject enforcement */
654 	if (ip_is_kobject(dest_port) &&
655 	    ipc_should_apply_policy(pol, IPC_POLICY_ENHANCED_V1)) {
656 		mach_port_guard_exception(ip_get_receiver_name(dest_port), 0, kGUARD_EXC_KOBJECT_REPLY_PORT_SEMANTICS);
657 		return MACH_SEND_INVALID_REPLY;
658 	}
659 
660 	if (!ipc_policy(dest_port)->pol_enforce_reply_semantics || ip_is_provisional_reply_port(reply_port)) {
661 		return MACH_MSG_SUCCESS;
662 	}
663 
664 	/* bootstrap port defense */
665 	if (ip_is_bootstrap_port(dest_port) &&
666 	    ipc_should_apply_policy(pol, IPC_POLICY_ENHANCED_V2)) {
667 		if (bootstrap_port_telemetry_enabled &&
668 		    !ipc_space_has_telemetry_type(current_space(), IS_HAS_BOOTSTRAP_PORT_TELEMETRY)) {
669 			ipc_stash_policy_violations_telemetry(IPCPV_BOOTSTRAP_PORT, dest_port, 0);
670 		}
671 		if (bootstrap_port_enforcement_enabled) {
672 			mach_port_guard_exception(ip_get_receiver_name(dest_port), 1, kGUARD_EXC_REQUIRE_REPLY_PORT_SEMANTICS);
673 			return MACH_SEND_INVALID_REPLY;
674 		}
675 	}
676 
677 	/* regular enforcement */
678 	if (!ip_is_bootstrap_port(dest_port)) {
679 		if (ip_is_strong_service_port(dest_port)) {
680 			ipc_stash_policy_violations_telemetry(IPCPV_REPLY_PORT_SEMANTICS_OPTOUT, dest_port, 0);
681 		}
682 		mach_port_guard_exception(ip_get_receiver_name(dest_port), 0, kGUARD_EXC_REQUIRE_REPLY_PORT_SEMANTICS);
683 		return MACH_SEND_INVALID_REPLY;
684 	}
685 
686 	return MACH_MSG_SUCCESS;
687 }
688 
689 #pragma mark MACH_SEND_MSG policies
690 
691 mach_msg_return_t
ipc_validate_kmsg_header_schema_from_user(mach_msg_user_header_t * hdr __unused,mach_msg_size_t dsc_count,mach_msg_option64_t opts)692 ipc_validate_kmsg_header_schema_from_user(
693 	mach_msg_user_header_t *hdr __unused,
694 	mach_msg_size_t         dsc_count,
695 	mach_msg_option64_t     opts)
696 {
697 	if (opts & MACH64_SEND_KOBJECT_CALL) {
698 		if (dsc_count > IPC_KOBJECT_DESC_MAX) {
699 			return MACH_SEND_TOO_LARGE;
700 		}
701 	}
702 
703 	return MACH_MSG_SUCCESS;
704 }
705 
706 mach_msg_return_t
ipc_validate_kmsg_schema_from_user(mach_msg_header_t * kdata,mach_msg_send_uctx_t * send_uctx,mach_msg_option64_t opts __unused)707 ipc_validate_kmsg_schema_from_user(
708 	mach_msg_header_t      *kdata,
709 	mach_msg_send_uctx_t   *send_uctx,
710 	mach_msg_option64_t     opts __unused)
711 {
712 	mach_msg_kbase_t *kbase = NULL;
713 	vm_size_t vm_size;
714 
715 	if (kdata->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
716 		kbase = mach_msg_header_to_kbase(kdata);
717 	}
718 
719 	if (send_uctx->send_dsc_port_count > IPC_KMSG_MAX_OOL_PORT_COUNT) {
720 		return MACH_SEND_TOO_LARGE;
721 	}
722 
723 	if (os_add_overflow(send_uctx->send_dsc_vm_size,
724 	    send_uctx->send_dsc_port_count * sizeof(mach_port_t), &vm_size)) {
725 		return MACH_SEND_TOO_LARGE;
726 	}
727 	if (vm_size > ipc_kmsg_max_vm_space) {
728 		return MACH_MSG_VM_KERNEL;
729 	}
730 
731 	return MACH_MSG_SUCCESS;
732 }
733 
734 static mach_msg_return_t
ipc_filter_kmsg_header_from_user(mach_msg_header_t * hdr,mach_port_t dport,mach_msg_option64_t opts)735 ipc_filter_kmsg_header_from_user(
736 	mach_msg_header_t      *hdr,
737 	mach_port_t             dport,
738 	mach_msg_option64_t     opts)
739 {
740 	static const uint32_t MACH_BOOTSTRAP_PORT_MSG_ID_MASK = ((1u << 24) - 1);
741 
742 	mach_msg_filter_id fid = 0;
743 	ipc_object_label_t dlabel;
744 	mach_msg_id_t msg_id = hdr->msgh_id;
745 	struct ipc_conn_port_label *sblabel = NULL;
746 
747 	dlabel = ip_mq_lock_label_get(dport);
748 
749 	if (io_state_active(dlabel.io_state) && dlabel.io_filtered) {
750 		switch (dlabel.io_type) {
751 		case IOT_BOOTSTRAP_PORT:
752 			/*
753 			 * Mask the top byte for messages sent to launchd's bootstrap port.
754 			 * Filter any messages with domain 0 (as they correspond to MIG
755 			 * based messages)
756 			 */
757 			if ((msg_id & ~MACH_BOOTSTRAP_PORT_MSG_ID_MASK) == 0) {
758 				ip_mq_unlock_label_put(dport, &dlabel);
759 				goto filtered_msg;
760 			}
761 			msg_id = msg_id & MACH_BOOTSTRAP_PORT_MSG_ID_MASK;
762 			OS_FALLTHROUGH;
763 
764 		case IOT_SERVICE_PORT:
765 		case IOT_WEAK_SERVICE_PORT:
766 			sblabel = dlabel.iol_service->ispl_sblabel;
767 			break;
768 
769 		case IOT_CONNECTION_PORT:
770 			/* Connection ports can also have send-side message filters */
771 			sblabel = dlabel.iol_connection;
772 			break;
773 
774 		default:
775 			break;
776 		}
777 	}
778 	if (sblabel) {
779 		mach_msg_filter_retain_sblabel_callback(sblabel);
780 	}
781 
782 	ip_mq_unlock_label_put(dport, &dlabel);
783 
784 	if (sblabel && !mach_msg_fetch_filter_policy(sblabel, msg_id, &fid)) {
785 		goto filtered_msg;
786 	}
787 	return MACH_MSG_SUCCESS;
788 
789 filtered_msg:
790 	if ((opts & MACH64_POLICY_FILTER_NON_FATAL) == 0) {
791 		mach_port_name_t dest_name = CAST_MACH_PORT_TO_NAME(hdr->msgh_remote_port);
792 
793 		mach_port_guard_exception(dest_name, hdr->msgh_id,
794 		    kGUARD_EXC_MSG_FILTERED);
795 	}
796 	return MACH_SEND_MSG_FILTERED;
797 }
798 
799 static bool
ipc_policy_allow_send_only_kobject_calls(void)800 ipc_policy_allow_send_only_kobject_calls(void)
801 {
802 	struct proc_ro *pro = current_thread_ro()->tro_proc_ro;
803 	uint32_t sdk = pro->p_platform_data.p_sdk;
804 	uint32_t sdk_major = sdk >> 16;
805 
806 	switch (pro->p_platform_data.p_platform) {
807 	case PLATFORM_IOS:
808 	case PLATFORM_MACCATALYST:
809 	case PLATFORM_TVOS:
810 		if (sdk == 0 || sdk_major > 17) {
811 			return false;
812 		}
813 		return true;
814 	case PLATFORM_MACOS:
815 		if (sdk == 0 || sdk_major > 14) {
816 			return false;
817 		}
818 		return true;
819 	case PLATFORM_WATCHOS:
820 		if (sdk == 0 || sdk_major > 10) {
821 			return false;
822 		}
823 		return true;
824 	default:
825 		return false;
826 	}
827 }
828 
829 static mach_msg_return_t
ipc_validate_kmsg_dest_from_user(mach_msg_header_t * hdr,ipc_port_t port,mach_msg_option64_t opts)830 ipc_validate_kmsg_dest_from_user(
831 	mach_msg_header_t      *hdr,
832 	ipc_port_t              port,
833 	mach_msg_option64_t     opts)
834 {
835 	/*
836 	 * This is a _user_ message via mach_msg2_trap()。
837 	 *
838 	 * To curb kobject port/message queue confusion and improve control flow
839 	 * integrity, mach_msg2_trap() invocations mandate the use of either
840 	 * MACH64_SEND_KOBJECT_CALL or MACH64_SEND_MQ_CALL and that the flag
841 	 * matches the underlying port type. (unless the call is from a simulator,
842 	 * since old simulators keep using mach_msg() in all cases indiscriminatingly.)
843 	 *
844 	 * Since:
845 	 *     (1) We make sure to always pass either MACH64_SEND_MQ_CALL or
846 	 *         MACH64_SEND_KOBJECT_CALL bit at all sites outside simulators
847 	 *         (checked by mach_msg2_trap());
848 	 *     (2) We checked in mach_msg2_trap() that _exactly_ one of the three bits is set.
849 	 *
850 	 * CFI check cannot be bypassed by simply setting MACH64_SEND_ANY.
851 	 */
852 #if XNU_TARGET_OS_OSX
853 	if (opts & MACH64_SEND_ANY) {
854 		return MACH_MSG_SUCCESS;
855 	}
856 #endif /* XNU_TARGET_OS_OSX */
857 
858 	natural_t otype = ip_type(port);
859 	if (otype == IOT_TIMER_PORT) {
860 #if XNU_TARGET_OS_OSX
861 		if (__improbable(opts & MACH64_POLICY_ENHANCED)) {
862 			return MACH_SEND_INVALID_OPTIONS;
863 		}
864 		/*
865 		 * For bincompat, let's still allow user messages to timer port, but
866 		 * force MACH64_SEND_MQ_CALL flag for memory segregation.
867 		 */
868 		if (__improbable(!(opts & MACH64_SEND_MQ_CALL))) {
869 			return MACH_SEND_INVALID_OPTIONS;
870 		}
871 #else
872 		return MACH_SEND_INVALID_OPTIONS;
873 #endif
874 	} else if (io_is_kobject_type(otype)) {
875 		if (otype == IKOT_UEXT_OBJECT) {
876 			if (__improbable(!(opts & MACH64_SEND_DK_CALL))) {
877 				return MACH_SEND_INVALID_OPTIONS;
878 			}
879 		} else {
880 			/* Otherwise, caller must set MACH64_SEND_KOBJECT_CALL. */
881 			if (__improbable(!(opts & MACH64_SEND_KOBJECT_CALL))) {
882 				return MACH_SEND_INVALID_OPTIONS;
883 			}
884 
885 			/* kobject calls must be a combined send/receive */
886 			if (__improbable((opts & MACH64_RCV_MSG) == 0)) {
887 				if ((opts & MACH64_POLICY_ENHANCED) ||
888 				    IP_VALID(hdr->msgh_local_port) ||
889 				    !ipc_policy_allow_send_only_kobject_calls()) {
890 					return MACH_SEND_INVALID_OPTIONS;
891 				}
892 			}
893 		}
894 #if CONFIG_CSR
895 	} else if (csr_check(CSR_ALLOW_KERNEL_DEBUGGER) == 0) {
896 		/*
897 		 * Allow MACH64_SEND_KOBJECT_CALL flag to message queues
898 		 * when SIP is off (for Mach-on-Mach emulation).
899 		 */
900 #endif /* CONFIG_CSR */
901 	} else {
902 		/* If destination is a message queue, caller must set MACH64_SEND_MQ_CALL */
903 		if (__improbable(!(opts & MACH64_SEND_MQ_CALL))) {
904 			return MACH_SEND_INVALID_OPTIONS;
905 		}
906 	}
907 
908 	return MACH_MSG_SUCCESS;
909 }
910 
911 mach_msg_return_t
ipc_validate_kmsg_header_from_user(mach_msg_header_t * hdr,mach_msg_send_uctx_t * send_uctx,mach_msg_option64_t opts)912 ipc_validate_kmsg_header_from_user(
913 	mach_msg_header_t      *hdr,
914 	mach_msg_send_uctx_t   *send_uctx,
915 	mach_msg_option64_t     opts)
916 {
917 	ipc_port_t dest_port = hdr->msgh_remote_port;
918 	ipc_port_t reply_port = hdr->msgh_local_port;
919 	mach_msg_return_t mr = MACH_MSG_SUCCESS;
920 	ipc_space_policy_t current_policy;
921 
922 	if (opts & MACH64_MACH_MSG2) {
923 		mr = ipc_validate_kmsg_dest_from_user(hdr, dest_port, opts);
924 		if (mr != MACH_MSG_SUCCESS) {
925 			goto out;
926 		}
927 	}
928 
929 	/*
930 	 * For enhanced v2 binaries, enforce two OOL port array restrictions:
931 	 *     - the receive right has to be of a type that explicitly
932 	 *       allows receiving that descriptor
933 	 *     - there could be no more than ONE single array in a kmsg
934 	 */
935 	current_policy = ipc_convert_msg_options_to_space(opts);
936 	if (ool_port_array_enforced &&
937 	    send_uctx->send_dsc_port_arrays_count &&
938 	    ipc_should_apply_policy(current_policy, IPC_POLICY_ENHANCED_V2)) {
939 		if (!ip_is_port_array_allowed(dest_port)) {
940 			mach_port_guard_exception(current_policy,
941 			    MPG_PAYLOAD(MPG_FLAGS_INVALID_OPTIONS_OOL_RIGHT,
942 			    ip_type(dest_port)),
943 			    kGUARD_EXC_DESCRIPTOR_VIOLATION);
944 
945 			return MACH_SEND_INVALID_OPTIONS;
946 		}
947 
948 		if (send_uctx->send_dsc_port_arrays_count > 1) {
949 			mach_port_guard_exception(current_policy,
950 			    MPG_PAYLOAD(MPG_FLAGS_INVALID_OPTIONS_OOL_ARRAYS,
951 			    send_uctx->send_dsc_port_arrays_count),
952 			    kGUARD_EXC_DESCRIPTOR_VIOLATION);
953 
954 			return MACH_SEND_INVALID_OPTIONS;
955 		}
956 	}
957 
958 	/*
959 	 * Ensure that the reply field follows our security policies,
960 	 * including IOT_REPLY_PORT requirements
961 	 */
962 	mr = ipc_validate_local_port(reply_port, dest_port, opts);
963 	if (mr != MACH_MSG_SUCCESS) {
964 		goto out;
965 	}
966 
967 	/*
968 	 * Evaluate message filtering if the sender is filtered.
969 	 */
970 	if ((opts & MACH64_POLICY_FILTER_MSG) &&
971 	    mach_msg_filter_at_least(MACH_MSG_FILTER_CALLBACKS_VERSION_1) &&
972 	    ip_to_object(dest_port)->io_filtered) {
973 		mr = ipc_filter_kmsg_header_from_user(hdr, dest_port, opts);
974 		if (mr != MACH_MSG_SUCCESS) {
975 			goto out;
976 		}
977 	}
978 
979 out:
980 	if (mr == MACH_SEND_INVALID_OPTIONS) {
981 		mach_port_guard_exception(0, opts, kGUARD_EXC_INVALID_OPTIONS);
982 	}
983 	return mr;
984 }
985 
986 #pragma mark receive immovability
987 
988 bool
ipc_move_receive_allowed(ipc_space_t space,ipc_port_t port,mach_port_name_t name)989 ipc_move_receive_allowed(
990 	ipc_space_t             space,
991 	ipc_port_t              port,
992 	mach_port_name_t        name)
993 {
994 	ipc_space_policy_t policy = ipc_space_policy(space);
995 	/*
996 	 * Check for service port before immovability so the task crash
997 	 * with reason kGUARD_EXC_SERVICE_PORT_VIOLATION_FATAL
998 	 */
999 	if (service_port_defense_enabled &&
1000 	    ip_is_strong_service_port(port) &&
1001 	    !task_is_initproc(space->is_task)) {
1002 		mach_port_guard_exception(0, name,
1003 		    kGUARD_EXC_SERVICE_PORT_VIOLATION_FATAL);
1004 		return false;
1005 	}
1006 
1007 	if (ip_type(port) == IOT_PROVISIONAL_REPLY_PORT &&
1008 	    ipc_should_apply_policy(policy, IPC_POLICY_ENHANCED_V2) &&
1009 	    !ipc_space_has_telemetry_type(space, IS_HAS_MOVE_PRP_TELEMETRY)) {
1010 		mach_port_guard_exception(name, 0, kGUARD_EXC_MOVE_PROVISIONAL_REPLY_PORT);
1011 	}
1012 
1013 	if (ip_is_immovable_receive(port)) {
1014 		mach_port_guard_exception(name, 0, kGUARD_EXC_IMMOVABLE);
1015 		return false;
1016 	}
1017 
1018 	return true;
1019 }
1020 
1021 #pragma mark send immovability
1022 
1023 
1024 bool
ipc_should_mark_immovable_send(task_t curr_task,ipc_port_t port,ipc_object_label_t label)1025 ipc_should_mark_immovable_send(
1026 	task_t curr_task,
1027 	ipc_port_t port,
1028 	ipc_object_label_t label)
1029 {
1030 	thread_t ctrl_thread = THREAD_NULL;
1031 	task_t   ctrl_task   = TASK_NULL;
1032 
1033 	/*
1034 	 * task obtaining its own task control port is controlled by security policy
1035 	 * see `task_set_ctrl_port_default`
1036 	 * This must come first so that we avoid evaluating the kobject port before ipc_task_enable has run
1037 	 */
1038 	if (curr_task->itk_task_ports[TASK_FLAVOR_CONTROL] == port) {
1039 		return task_is_immovable(curr_task);
1040 	}
1041 
1042 	switch (ip_type(port)) {
1043 	case IKOT_TASK_CONTROL:
1044 		ctrl_task = ipc_kobject_get_raw(port, IKOT_TASK_CONTROL);
1045 		break;
1046 	case IKOT_THREAD_CONTROL:
1047 		ctrl_thread = ipc_kobject_get_raw(port, IKOT_THREAD_CONTROL);
1048 		if (ctrl_thread) {
1049 			ctrl_task = get_threadtask(ctrl_thread);
1050 		}
1051 		break;
1052 	default:
1053 		break;
1054 	}
1055 
1056 	/*
1057 	 * task obtaining its own thread control port is controlled by security policy
1058 	 * see `task_set_ctrl_port_default`
1059 	 */
1060 	if (ctrl_thread && curr_task == ctrl_task) {
1061 		/*
1062 		 * we cannot assert that the control port options for the task are set up
1063 		 * yet because we may be copying out the thread control port during exec.
1064 		 * This means that the first thread control port copyout will always be movable, but other
1065 		 * copyouts will occur before userspace is allowed to run any code which will subsequently mark it
1066 		 * as immovable if needed.
1067 		 */
1068 		return task_is_immovable_no_assert(curr_task);
1069 	}
1070 
1071 	/*
1072 	 * all control ports obtained by another process are movable
1073 	 * while the space is inactive (for corpses).
1074 	 */
1075 	if (ctrl_task && !is_active(ctrl_task->itk_space)) {
1076 		assert(ctrl_task != curr_task);
1077 		assert(ip_is_tt_control_port_type(label.io_type));
1078 		return false;
1079 	}
1080 
1081 	/* special cases are handled, now we refer to the default policy */
1082 	return !ipc_policy(label)->pol_movable_send;
1083 }
1084 
1085 /* requires: nothing locked, port is valid */
1086 static bool
ip_is_currently_immovable_send(ipc_port_t port)1087 ip_is_currently_immovable_send(ipc_port_t port)
1088 {
1089 	ipc_object_label_t label = ipc_port_lock_label_get(port);
1090 	bool port_is_immovable_send = ipc_should_mark_immovable_send(current_task(), port, label);
1091 	ip_mq_unlock_label_put(port, &label);
1092 	return port_is_immovable_send;
1093 }
1094 
1095 bool
ipc_can_stash_naked_send(ipc_port_t port)1096 ipc_can_stash_naked_send(ipc_port_t port)
1097 {
1098 	return !IP_VALID(port) || !ip_is_currently_immovable_send(port);
1099 }
1100 
1101 #pragma mark entry init
1102 
1103 void
ipc_entry_init(ipc_space_t space,ipc_object_t object,mach_port_type_t type,ipc_entry_t entry,mach_port_urefs_t urefs,mach_port_name_t name)1104 ipc_entry_init(
1105 	ipc_space_t         space,
1106 	ipc_object_t        object,
1107 	mach_port_type_t    type,
1108 	ipc_entry_t         entry,
1109 	mach_port_urefs_t   urefs,
1110 	mach_port_name_t    name)
1111 {
1112 	/* object type can be deadname, port, or a portset */
1113 	assert((type & MACH_PORT_TYPE_ALL_RIGHTS) == type);
1114 	assert(type != MACH_PORT_TYPE_NONE);
1115 	assert(urefs <= MACH_PORT_UREFS_MAX);
1116 	assert(entry);
1117 
1118 	if (object && (type & MACH_PORT_TYPE_SEND_RIGHTS)) {
1119 		ipc_port_t port = ip_object_to_port(object);
1120 		ipc_object_label_t label = ip_label_get(port);
1121 
1122 		if (ipc_should_mark_immovable_send(space->is_task, port, label)) {
1123 			entry->ie_bits |= IE_BITS_IMMOVABLE_SEND;
1124 		}
1125 		io_label_set_and_put(&port->ip_object, &label);
1126 	}
1127 	entry->ie_object = object;
1128 	entry->ie_bits |= type | urefs;
1129 	ipc_entry_modified(space, name, entry);
1130 }
1131 
1132 #pragma mark policy guard violations
1133 
1134 void
mach_port_guard_exception(uint32_t target,uint64_t payload,unsigned reason)1135 mach_port_guard_exception(uint32_t target, uint64_t payload, unsigned reason)
1136 {
1137 	mach_exception_code_t code = 0;
1138 	EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_MACH_PORT);
1139 	EXC_GUARD_ENCODE_FLAVOR(code, reason);
1140 	EXC_GUARD_ENCODE_TARGET(code, target);
1141 	mach_exception_subcode_t subcode = (uint64_t)payload;
1142 	thread_t t = current_thread();
1143 	bool fatal = FALSE;
1144 
1145 	if (reason <= MAX_OPTIONAL_kGUARD_EXC_CODE &&
1146 	    (get_threadtask(t)->task_exc_guard & TASK_EXC_GUARD_MP_FATAL)) {
1147 		fatal = true;
1148 	} else if (reason <= MAX_FATAL_kGUARD_EXC_CODE) {
1149 		fatal = true;
1150 	}
1151 	thread_guard_violation(t, code, subcode, fatal);
1152 }
1153 
1154 void
mach_port_guard_exception_immovable(ipc_space_t space,mach_port_name_t name,mach_port_t port,mach_msg_type_name_t disp,__assert_only ipc_entry_t entry)1155 mach_port_guard_exception_immovable(
1156 	ipc_space_t             space,
1157 	mach_port_name_t        name,
1158 	mach_port_t             port,
1159 	mach_msg_type_name_t    disp,
1160 	__assert_only ipc_entry_t entry)
1161 {
1162 	if (space == current_space()) {
1163 		assert(entry->ie_bits & IE_BITS_IMMOVABLE_SEND);
1164 		assert(entry->ie_port == port);
1165 		uint64_t payload = MPG_PAYLOAD(MPG_FLAGS_NONE, ip_type(port), disp);
1166 		mach_port_guard_exception(name, payload, kGUARD_EXC_IMMOVABLE);
1167 	}
1168 }
1169 
1170 void
mach_port_guard_exception_pinned(ipc_space_t space,mach_port_name_t name,uint64_t payload)1171 mach_port_guard_exception_pinned(
1172 	ipc_space_t             space,
1173 	mach_port_name_t        name,
1174 	uint64_t                payload)
1175 {
1176 	ipc_space_policy_t policy = ipc_space_policy(space);
1177 	int guard;
1178 
1179 	if (space != current_space()) {
1180 		guard = kGUARD_EXC_NONE;
1181 	} else if (policy &
1182 	    (IPC_SPACE_POLICY_TRANSLATED | IPC_SPACE_POLICY_SIMULATED)) {
1183 		guard = kGUARD_EXC_NONE;
1184 	} else if (ipc_should_apply_policy(policy, IPC_POLICY_ENHANCED_V1)) {
1185 		if (ipc_control_port_options & ICP_OPTIONS_PINNED_1P_HARD) {
1186 			guard = kGUARD_EXC_MOD_REFS;
1187 		} else if (ipc_control_port_options & ICP_OPTIONS_PINNED_1P_SOFT) {
1188 			guard = kGUARD_EXC_MOD_REFS_NON_FATAL;
1189 		} else {
1190 			guard = kGUARD_EXC_NONE;
1191 		}
1192 	} else {
1193 		if (ipc_control_port_options & ICP_OPTIONS_PINNED_3P_HARD) {
1194 			guard = kGUARD_EXC_MOD_REFS;
1195 		} else if (ipc_control_port_options & ICP_OPTIONS_PINNED_3P_SOFT) {
1196 			guard = kGUARD_EXC_MOD_REFS_NON_FATAL;
1197 		} else {
1198 			guard = kGUARD_EXC_NONE;
1199 		}
1200 	}
1201 
1202 	if (guard != kGUARD_EXC_NONE) {
1203 		mach_port_guard_exception(name, payload, guard);
1204 	}
1205 }
1206 
1207 /*
1208  *	Routine:	mach_port_guard_ast
1209  *	Purpose:
1210  *		Raises an exception for mach port guard violation.
1211  *	Conditions:
1212  *		None.
1213  *	Returns:
1214  *		None.
1215  */
1216 
1217 void
mach_port_guard_ast(thread_t t,mach_exception_data_type_t code,mach_exception_data_type_t subcode)1218 mach_port_guard_ast(
1219 	thread_t                t,
1220 	mach_exception_data_type_t code,
1221 	mach_exception_data_type_t subcode)
1222 {
1223 	unsigned int reason = EXC_GUARD_DECODE_GUARD_FLAVOR(code);
1224 	task_t task = get_threadtask(t);
1225 	unsigned int behavior = task->task_exc_guard;
1226 	bool fatal = true;
1227 
1228 	assert(task == current_task());
1229 	assert(task != kernel_task);
1230 
1231 	if (reason <= MAX_FATAL_kGUARD_EXC_CODE) {
1232 		/*
1233 		 * Fatal Mach port guards - always delivered synchronously if dev mode is on.
1234 		 * Check if anyone has registered for Synchronous EXC_GUARD, if yes then,
1235 		 * deliver it synchronously and then kill the process, else kill the process
1236 		 * and deliver the exception via EXC_CORPSE_NOTIFY.
1237 		 */
1238 
1239 		int flags = PX_DEBUG_NO_HONOR;
1240 		exception_info_t info = {
1241 			.os_reason = OS_REASON_GUARD,
1242 			.exception_type = EXC_GUARD,
1243 			.mx_code = code,
1244 			.mx_subcode = subcode,
1245 		};
1246 
1247 		if (task_exception_notify(EXC_GUARD, code, subcode, fatal) == KERN_SUCCESS) {
1248 			flags |= PX_PSIGNAL;
1249 		}
1250 		exit_with_mach_exception(get_bsdtask_info(task), info, flags);
1251 	} else {
1252 		/*
1253 		 * Mach port guards controlled by task settings.
1254 		 */
1255 
1256 		/* Is delivery enabled */
1257 		if ((behavior & TASK_EXC_GUARD_MP_DELIVER) == 0) {
1258 			return;
1259 		}
1260 
1261 		/* If only once, make sure we're that once */
1262 		while (behavior & TASK_EXC_GUARD_MP_ONCE) {
1263 			uint32_t new_behavior = behavior & ~TASK_EXC_GUARD_MP_DELIVER;
1264 
1265 			if (os_atomic_cmpxchg(&task->task_exc_guard,
1266 			    behavior, new_behavior, relaxed)) {
1267 				break;
1268 			}
1269 			behavior = task->task_exc_guard;
1270 			if ((behavior & TASK_EXC_GUARD_MP_DELIVER) == 0) {
1271 				return;
1272 			}
1273 		}
1274 		fatal = (task->task_exc_guard & TASK_EXC_GUARD_MP_FATAL)
1275 		    && (reason <= MAX_OPTIONAL_kGUARD_EXC_CODE);
1276 		kern_return_t sync_exception_result;
1277 		sync_exception_result = task_exception_notify(EXC_GUARD, code, subcode, fatal);
1278 
1279 		if (task->task_exc_guard & TASK_EXC_GUARD_MP_FATAL) {
1280 			if (reason > MAX_OPTIONAL_kGUARD_EXC_CODE) {
1281 				/* generate a simulated crash if not handled synchronously */
1282 				if (sync_exception_result != KERN_SUCCESS) {
1283 					task_violated_guard(code, subcode, NULL, TRUE);
1284 				}
1285 			} else {
1286 				/*
1287 				 * Only generate crash report if synchronous EXC_GUARD wasn't handled,
1288 				 * but it has to die regardless.
1289 				 */
1290 
1291 				int flags = PX_DEBUG_NO_HONOR;
1292 				exception_info_t info = {
1293 					.os_reason = OS_REASON_GUARD,
1294 					.exception_type = EXC_GUARD,
1295 					.mx_code = code,
1296 					.mx_subcode = subcode
1297 				};
1298 
1299 				if (sync_exception_result == KERN_SUCCESS) {
1300 					flags |= PX_PSIGNAL;
1301 				}
1302 
1303 				exit_with_mach_exception(get_bsdtask_info(task), info, flags);
1304 			}
1305 		} else if (task->task_exc_guard & TASK_EXC_GUARD_MP_CORPSE) {
1306 			/* Raise exception via corpse fork if not handled synchronously */
1307 			if (sync_exception_result != KERN_SUCCESS) {
1308 				task_violated_guard(code, subcode, NULL, TRUE);
1309 			}
1310 		}
1311 	}
1312 }
1313 
1314 #pragma mark notification policies
1315 
1316 static bool
ipc_allow_service_port_register_pd(ipc_port_t service_port,ipc_port_t notify_port,uint64_t * payload)1317 ipc_allow_service_port_register_pd(
1318 	ipc_port_t              service_port,
1319 	ipc_port_t              notify_port,
1320 	uint64_t                *payload)
1321 {
1322 	/* boot-arg disables this security policy */
1323 	if (!service_port_defense_enabled || !IP_VALID(notify_port)) {
1324 		return true;
1325 	}
1326 	/* enforce this policy only on service port types */
1327 	if (ip_is_any_service_port(service_port)) {
1328 		/* Only launchd should be able to register for port destroyed notification on a service port. */
1329 		if (!task_is_initproc(current_task())) {
1330 			*payload = MPG_FLAGS_KERN_FAILURE_TASK;
1331 			return false;
1332 		}
1333 		/* notify_port needs to be immovable */
1334 		if (!ip_is_immovable_receive(notify_port)) {
1335 			*payload = MPG_FLAGS_KERN_FAILURE_NOTIFY_TYPE;
1336 			return false;
1337 		}
1338 		/* notify_port should be owned by launchd */
1339 		if (!task_is_initproc(notify_port->ip_receiver->is_task)) {
1340 			*payload = MPG_FLAGS_KERN_FAILURE_NOTIFY_RECV;
1341 			return false;
1342 		}
1343 	}
1344 	return true;
1345 }
1346 
1347 kern_return_t
ipc_allow_register_pd_notification(ipc_port_t pd_port,ipc_port_t notify_port)1348 ipc_allow_register_pd_notification(
1349 	ipc_port_t              pd_port,
1350 	ipc_port_t              notify_port)
1351 {
1352 	uint64_t payload;
1353 
1354 	/*
1355 	 * you cannot register for port destroyed notifications
1356 	 * on an immovable receive right (which includes kobjects),
1357 	 * or a (special) reply port or any other port that explicitly disallows them.
1358 	 */
1359 	release_assert(ip_in_a_space(pd_port));
1360 	if (ip_is_immovable_receive(pd_port) ||
1361 	    !ipc_policy(pd_port)->pol_notif_port_destroy) {
1362 		mach_port_guard_exception(ip_type(pd_port), MACH_NOTIFY_PORT_DESTROYED, kGUARD_EXC_INVALID_NOTIFICATION_REQ);
1363 		return KERN_INVALID_RIGHT;
1364 	}
1365 
1366 	/* Stronger pd enforcement for service ports */
1367 	if (!ipc_allow_service_port_register_pd(pd_port, notify_port, &payload)) {
1368 		mach_port_guard_exception(0, payload, kGUARD_EXC_KERN_FAILURE);
1369 		return KERN_INVALID_RIGHT;
1370 	}
1371 
1372 	/* Allow only one registration of this notification */
1373 	if (ipc_port_has_prdrequest(pd_port)) {
1374 		mach_port_guard_exception(0, MPG_FLAGS_KERN_FAILURE_MULTI_NOTI, kGUARD_EXC_KERN_FAILURE);
1375 		return KERN_FAILURE;
1376 	}
1377 
1378 	return KERN_SUCCESS;
1379 }
1380 
1381 
1382 #pragma mark policy array
1383 
1384 __dead2
1385 static void
no_kobject_no_senders(ipc_port_t port,mach_port_mscount_t mscount __unused)1386 no_kobject_no_senders(
1387 	ipc_port_t              port,
1388 	mach_port_mscount_t     mscount __unused)
1389 {
1390 	panic("unexpected call to no_senders for object %p, type %d",
1391 	    port, ip_type(port));
1392 }
1393 
1394 __dead2
1395 static void
no_label_free(ipc_object_label_t label)1396 no_label_free(ipc_object_label_t label)
1397 {
1398 	panic("unexpected call to label_free for object type %d, label %p",
1399 	    label.io_type, label.iol_pointer);
1400 }
1401 
1402 /*
1403  * Denotes a policy which safe value is the argument to PENDING(),
1404  * but is currently not default and pending validation/prep work.
1405  */
1406 #define PENDING(value)          value
1407 
1408 __security_const_late
1409 struct ipc_object_policy ipc_policy_array[IOT_UNKNOWN] = {
1410 	[IOT_PORT_SET] = {
1411 		.pol_name               = "port set",
1412 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1413 		.pol_movable_send       = false,
1414 	},
1415 	[IOT_PORT] = {
1416 		.pol_name               = "port",
1417 		.pol_movability         = IPC_MOVE_POLICY_ALWAYS,
1418 		.pol_movable_send       = true,
1419 		.pol_notif_dead_name    = true,
1420 		.pol_notif_no_senders   = true,
1421 		.pol_notif_port_destroy = true,
1422 	},
1423 	[IOT_SERVICE_PORT] = {
1424 		.pol_name               = "service port",
1425 		.pol_movability         = PENDING(IPC_MOVE_POLICY_ONCE_OR_AFTER_PD),
1426 		.pol_movable_send       = true,
1427 		.pol_label_free         = ipc_service_port_label_dealloc,
1428 		.pol_enforce_reply_semantics = PENDING(true), /* pending on service port defense cleanup */
1429 		.pol_notif_dead_name    = true,
1430 		.pol_notif_no_senders   = true,
1431 		.pol_notif_port_destroy = true,
1432 	},
1433 	[IOT_BOOTSTRAP_PORT] = {
1434 		.pol_name               = "bootstrap port",
1435 		.pol_movability         = IPC_MOVE_POLICY_NEVER, /* bootstrap port should never leave launchd */
1436 		.pol_movable_send       = true,
1437 		.pol_label_free         = ipc_service_port_label_dealloc,
1438 		.pol_enforce_reply_semantics = PENDING(true), /* pending on service port defense cleanup */
1439 		.pol_notif_dead_name    = true,
1440 		.pol_notif_no_senders   = true,
1441 	},
1442 	[IOT_WEAK_SERVICE_PORT] = {
1443 		.pol_name               = "weak service port",
1444 		.pol_movability         = IPC_MOVE_POLICY_ALWAYS,
1445 		.pol_movable_send       = true,
1446 		.pol_label_free         = ipc_service_port_label_dealloc,
1447 		.pol_notif_dead_name    = true,
1448 		.pol_notif_no_senders   = true,
1449 		.pol_notif_port_destroy = true,
1450 	},
1451 	[IOT_CONNECTION_PORT] = {
1452 		.pol_name               = "connection port",
1453 		.pol_movability         = IPC_MOVE_POLICY_ONCE,
1454 		.pol_label_free         = ipc_connection_port_label_dealloc,
1455 		.pol_enforce_reply_semantics = true,
1456 		.pol_notif_dead_name    = true,
1457 		.pol_notif_no_senders   = true,
1458 		.pol_notif_port_destroy = true,
1459 	},
1460 	[IOT_CONNECTION_PORT_WITH_PORT_ARRAY] = {
1461 		.pol_name               = "conn port with ool port array",
1462 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1463 		.pol_movable_send       = true,
1464 		.pol_construct_entitlement = MACH_PORT_CONNECTION_PORT_WITH_PORT_ARRAY,
1465 		.pol_notif_dead_name    = true,
1466 		.pol_notif_no_senders   = true,
1467 	},
1468 	[IOT_EXCEPTION_PORT] = {
1469 		.pol_name               = "exception port",
1470 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1471 		.pol_movable_send       = true,
1472 		.pol_notif_dead_name    = true,
1473 		.pol_notif_no_senders   = true,
1474 	},
1475 	[IOT_TIMER_PORT] = {
1476 		.pol_name               = "timer port",
1477 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1478 		.pol_movable_send       = true,
1479 		.pol_label_free         = mk_timer_port_label_dealloc,
1480 		.pol_notif_dead_name    = true,
1481 		.pol_notif_no_senders   = true,
1482 	},
1483 	[IOT_REPLY_PORT] = {
1484 		.pol_name               = "reply port",
1485 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1486 		.pol_notif_dead_name    = true,
1487 	},
1488 	[IOT_SPECIAL_REPLY_PORT] = {
1489 		.pol_name               = "special reply port",
1490 		/*
1491 		 * General use of a special reply port as a receive right
1492 		 * can cause type confusion in the importance code.
1493 		 */
1494 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1495 		.pol_notif_dead_name    = true,
1496 	},
1497 	[IOT_PROVISIONAL_REPLY_PORT] = {
1498 		.pol_name               = "provisional reply port",
1499 		.pol_movability         = IPC_MOVE_POLICY_ALWAYS,
1500 		.pol_movable_send       = true,
1501 		.pol_construct_entitlement = MACH_PORT_PROVISIONAL_REPLY_ENTITLEMENT,
1502 		.pol_notif_dead_name    = true,
1503 		.pol_notif_no_senders   = true,
1504 		.pol_notif_port_destroy = true,
1505 	},
1506 
1507 	[__IKOT_FIRST ... IOT_UNKNOWN - 1] = {
1508 		.pol_movability         = IPC_MOVE_POLICY_NEVER,
1509 		.pol_notif_dead_name    = true,
1510 	},
1511 };
1512 
1513 __startup_func
1514 static void
ipc_policy_update_from_tunables(void)1515 ipc_policy_update_from_tunables(void)
1516 {
1517 	if (!service_port_defense_enabled) {
1518 		ipc_policy_array[IOT_SERVICE_PORT].pol_movability =
1519 		    IPC_MOVE_POLICY_ALWAYS;
1520 	}
1521 }
1522 STARTUP(TUNABLES, STARTUP_RANK_LAST, ipc_policy_update_from_tunables);
1523 
1524 /*
1525  * Ensure new port types that requires a construction entitlement
1526  * are marked as immovable.
1527  */
1528 __startup_func
1529 static void
ipc_policy_construct_entitlement_hardening(void)1530 ipc_policy_construct_entitlement_hardening(void)
1531 {
1532 	/* No need to check kobjects because they are always immovable */
1533 	for (ipc_object_type_t i = 0; i < __IKOT_FIRST; i++) {
1534 		/*
1535 		 * IOT_PROVISIONAL_REPLY_PORT is an exception as it used to be
1536 		 * movable. For process opted for enhanced security V2,
1537 		 * kGUARD_EXC_MOVE_PROVISIONAL_REPLY_PORT will be thrown when a
1538 		 * provisional reply port is being moved.
1539 		 */
1540 		if (i == IOT_PROVISIONAL_REPLY_PORT) {
1541 			continue;
1542 		}
1543 		if (ipc_policy_array[i].pol_construct_entitlement) {
1544 			assert(ipc_policy_array[i].pol_movability == IPC_MOVE_POLICY_NEVER);
1545 		}
1546 	}
1547 }
1548 STARTUP(TUNABLES, STARTUP_RANK_LAST, ipc_policy_construct_entitlement_hardening);
1549 
1550 __startup_func
1551 void
ipc_kobject_register_startup(ipc_kobject_ops_t ops)1552 ipc_kobject_register_startup(ipc_kobject_ops_t ops)
1553 {
1554 	struct ipc_object_policy *pol = &ipc_policy_array[ops->iko_op_type];
1555 
1556 	if (pol->pol_name) {
1557 		panic("trying to register kobject(%d) twice", ops->iko_op_type);
1558 	}
1559 
1560 	/*
1561 	 * Always make sure kobject ports have immovable receive rights.
1562 	 *
1563 	 * They use the ip_kobject field of the ipc_port structure,
1564 	 * which is unioned with ip_imp_task.
1565 	 *
1566 	 * Thus, general use of a kobject port as a receive right can
1567 	 * cause type confusion in the importance code.
1568 	 */
1569 	ipc_release_assert(pol->pol_movability == IPC_MOVE_POLICY_NEVER);
1570 	if (ops->iko_op_no_senders) {
1571 		pol->pol_notif_no_senders = true;
1572 	}
1573 
1574 	pol->pol_name               = ops->iko_op_name;
1575 	pol->pol_kobject_stable     = ops->iko_op_stable;
1576 	pol->pol_kobject_permanent  = ops->iko_op_permanent;
1577 	pol->pol_kobject_no_senders = ops->iko_op_no_senders;
1578 	pol->pol_label_free         = ops->iko_op_label_free;
1579 	pol->pol_movable_send       = ops->iko_op_movable_send;
1580 }
1581 
1582 __startup_func
1583 static void
ipc_policy_set_defaults(void)1584 ipc_policy_set_defaults(void)
1585 {
1586 	/*
1587 	 * Check that implicit init to 0 picks the right "values"
1588 	 * for all properties.
1589 	 */
1590 	static_assert(IPC_MOVE_POLICY_NEVER == 0);
1591 
1592 	for (uint32_t i = 0; i < IOT_UNKNOWN; i++) {
1593 		struct ipc_object_policy *pol = &ipc_policy_array[i];
1594 
1595 		if (!pol->pol_kobject_no_senders) {
1596 			pol->pol_kobject_no_senders = no_kobject_no_senders;
1597 		}
1598 		if (!pol->pol_label_free) {
1599 			pol->pol_label_free = no_label_free;
1600 		}
1601 	}
1602 }
1603 STARTUP(MACH_IPC, STARTUP_RANK_LAST, ipc_policy_set_defaults);
1604 
1605 #pragma mark exception port policy
1606 
1607 bool
ipc_is_valid_exception_port(task_t task,ipc_port_t port)1608 ipc_is_valid_exception_port(
1609 	task_t task,
1610 	ipc_port_t port)
1611 {
1612 	if (task == TASK_NULL && is_ux_handler_port(port)) {
1613 		return true;
1614 	}
1615 
1616 	if (ip_is_exception_port(port)) {
1617 		return true;
1618 	}
1619 
1620 	/*
1621 	 * rdar://77996387
1622 	 * Avoid exposing immovable ports send rights (kobjects) to `get_exception_ports`,
1623 	 * but exception ports to still be set.
1624 	 */
1625 	if (!ipc_can_stash_naked_send(port)) {
1626 		return false;
1627 	}
1628 
1629 	if (ip_is_immovable_receive(port)) {
1630 		/*
1631 		 * rdar://153108740
1632 		 * Temporarily allow service ports until telemetry is clean.
1633 		 */
1634 		if (ip_is_strong_service_port(port)) {
1635 			return true;
1636 		}
1637 		return false;
1638 	}
1639 
1640 	return true;
1641 }
1642