xref: /xnu-11215.61.5/bsd/kern/kern_codesigning.c (revision 4f1223e81cd707a65cc109d0b8ad6653699da3c4)
1 /*
2  * Copyright (c) 2021 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_LICENSE_HEADER_START@
5  *
6  * The contents of this file constitute Original Code as defined in and
7  * are subject to the Apple Public Source License Version 1.1 (the
8  * "License").  You may not use this file except in compliance with the
9  * License.  Please obtain a copy of the License at
10  * http://www.apple.com/publicsource and read it before using this file.
11  *
12  * This Original Code and all software distributed under the License are
13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
17  * License for the specific language governing rights and limitations
18  * under the License.
19  *
20  * @APPLE_LICENSE_HEADER_END@
21  */
22 
23 #include <os/overflow.h>
24 #include <machine/atomic.h>
25 #include <mach/vm_param.h>
26 #include <vm/vm_kern_xnu.h>
27 #include <vm/pmap.h>
28 #include <vm/pmap_cs.h>
29 #include <vm/vm_map_xnu.h>
30 #include <kern/zalloc.h>
31 #include <kern/kalloc.h>
32 #include <kern/assert.h>
33 #include <kern/locks.h>
34 #include <kern/lock_rw.h>
35 #include <libkern/libkern.h>
36 #include <libkern/section_keywords.h>
37 #include <libkern/coretrust/coretrust.h>
38 #include <pexpert/pexpert.h>
39 #include <sys/user.h>
40 #include <sys/vm.h>
41 #include <sys/proc.h>
42 #include <sys/proc_require.h>
43 #include <sys/codesign.h>
44 #include <sys/code_signing.h>
45 #include <sys/lockdown_mode.h>
46 #include <sys/reason.h>
47 #include <sys/kdebug_kernel.h>
48 #include <sys/kdebug_triage.h>
49 #include <sys/sysctl.h>
50 #include <uuid/uuid.h>
51 #include <IOKit/IOBSD.h>
52 
53 #if CONFIG_SPTM
54 #include <sys/trusted_execution_monitor.h>
55 #endif
56 
57 #if XNU_KERNEL_PRIVATE
58 vm_address_t
code_signing_allocate(size_t alloc_size)59 code_signing_allocate(
60 	size_t alloc_size)
61 {
62 	vm_address_t alloc_addr = 0;
63 
64 	if (alloc_size == 0) {
65 		panic("%s: zero allocation size", __FUNCTION__);
66 	}
67 	size_t aligned_size = round_page(alloc_size);
68 
69 	kern_return_t ret = kmem_alloc(
70 		kernel_map,
71 		&alloc_addr, aligned_size,
72 		KMA_KOBJECT | KMA_DATA | KMA_ZERO,
73 		VM_KERN_MEMORY_SECURITY);
74 
75 	if (ret != KERN_SUCCESS) {
76 		printf("%s: unable to allocate %lu bytes\n", __FUNCTION__, aligned_size);
77 	} else if (alloc_addr == 0) {
78 		printf("%s: invalid allocation\n", __FUNCTION__);
79 	}
80 
81 	return alloc_addr;
82 }
83 
84 void
code_signing_deallocate(vm_address_t * alloc_addr,size_t alloc_size)85 code_signing_deallocate(
86 	vm_address_t *alloc_addr,
87 	size_t alloc_size)
88 {
89 	if (alloc_addr == NULL) {
90 		panic("%s: invalid pointer provided", __FUNCTION__);
91 	} else if ((*alloc_addr == 0) || ((*alloc_addr & PAGE_MASK) != 0)) {
92 		panic("%s: address provided: %p", __FUNCTION__, (void*)(*alloc_addr));
93 	} else if (alloc_size == 0) {
94 		panic("%s: zero allocation size", __FUNCTION__);
95 	}
96 	size_t aligned_size = round_page(alloc_size);
97 
98 	/* Free the allocation */
99 	kmem_free(kernel_map, *alloc_addr, aligned_size);
100 
101 	/* Clear the address */
102 	*alloc_addr = 0;
103 }
104 #endif /* XNU_KERNEL_PRIVATE */
105 
106 SYSCTL_DECL(_security);
107 SYSCTL_DECL(_security_codesigning);
108 SYSCTL_NODE(_security, OID_AUTO, codesigning, CTLFLAG_RD, 0, "XNU Code Signing");
109 
110 static SECURITY_READ_ONLY_LATE(bool) cs_config_set = false;
111 static SECURITY_READ_ONLY_LATE(code_signing_monitor_type_t) cs_monitor = CS_MONITOR_TYPE_NONE;
112 static SECURITY_READ_ONLY_LATE(code_signing_config_t) cs_config = 0;
113 static uint32_t security_boot_mode_complete = 0;
114 
115 SYSCTL_UINT(_security_codesigning, OID_AUTO, monitor, CTLFLAG_RD, &cs_monitor, 0, "code signing monitor type");
116 SYSCTL_UINT(_security_codesigning, OID_AUTO, config, CTLFLAG_RD, &cs_config, 0, "code signing configuration");
117 
118 SYSCTL_UINT(
119 	_security_codesigning, OID_AUTO,
120 	security_boot_mode_complete, CTLFLAG_RD,
121 	&security_boot_mode_complete, 0, "security boot mode completion status");
122 
123 void
code_signing_configuration(code_signing_monitor_type_t * monitor_type_out,code_signing_config_t * config_out)124 code_signing_configuration(
125 	code_signing_monitor_type_t *monitor_type_out,
126 	code_signing_config_t *config_out)
127 {
128 	code_signing_monitor_type_t monitor_type = CS_MONITOR_TYPE_NONE;
129 	code_signing_config_t config = 0;
130 
131 	/*
132 	 * Since we read this variable with load-acquire semantics, if we observe a value
133 	 * of true, it means we should be able to observe writes to cs_monitor and also
134 	 * cs_config.
135 	 */
136 	if (os_atomic_load(&cs_config_set, acquire) == true) {
137 		goto config_set;
138 	}
139 
140 	/*
141 	 * Add support for all the code signing features. This function is called very
142 	 * early in the system boot, much before kernel extensions such as Apple Mobile
143 	 * File Integrity come online. As a result, this function assumes that all the
144 	 * code signing features are enabled, and later on, different components can
145 	 * disable support for different features using disable_code_signing_feature().
146 	 */
147 	config |= CS_CONFIG_MAP_JIT;
148 	config |= CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
149 	config |= CS_CONFIG_COMPILATION_SERVICE;
150 	config |= CS_CONFIG_LOCAL_SIGNING;
151 	config |= CS_CONFIG_OOP_JIT;
152 
153 #if CODE_SIGNING_MONITOR
154 	/* Mark the code signing monitor as enabled if required */
155 	if (csm_enabled() == true) {
156 		config |= CS_CONFIG_CSM_ENABLED;
157 	}
158 
159 #if CONFIG_SPTM
160 	/*
161 	 * Since TrustedExecutionMonitor cannot call into any function within XNU, we
162 	 * query it's code signing configuration even before this function is called.
163 	 * Using that, we modify the state of the code signing features available.
164 	 */
165 	if (csm_enabled() == true) {
166 		bool platform_code_only = txm_cs_config->systemPolicy->platformCodeOnly;
167 
168 		/* Disable unsupported features when enforcing platform-code-only */
169 		if (platform_code_only == true) {
170 			config &= ~CS_CONFIG_MAP_JIT;
171 			config &= ~CS_CONFIG_COMPILATION_SERVICE;
172 			config &= ~CS_CONFIG_LOCAL_SIGNING;
173 			config &= ~CS_CONFIG_OOP_JIT;
174 		}
175 
176 		/*
177 		 * Restricted Execution Mode support. The pattern for this code snippet breaks
178 		 * the norm compared to others. For the other features, we consider them enabled
179 		 * by default unless TXM disables them. For REM, given this is a TXM only feature,
180 		 * we consider it disabled unless TXM explicitly tells us it is enabled.
181 		 */
182 		if (txm_cs_config->systemPolicy->featureSet.restrictedExecutionMode == true) {
183 			config |= CS_CONFIG_REM_SUPPORTED;
184 		}
185 
186 		/* MAP_JIT support */
187 		if (txm_cs_config->systemPolicy->featureSet.JIT == false) {
188 			config &= ~CS_CONFIG_MAP_JIT;
189 		}
190 
191 		/* Developer mode support */
192 		if (txm_cs_config->systemPolicy->featureSet.developerMode == false) {
193 			config &= ~CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
194 		}
195 
196 		/* Compilation service support */
197 		if (txm_cs_config->systemPolicy->featureSet.compilationService == false) {
198 			config &= ~CS_CONFIG_COMPILATION_SERVICE;
199 		}
200 
201 		/* Local signing support */
202 		if (txm_cs_config->systemPolicy->featureSet.localSigning == false) {
203 			config &= ~CS_CONFIG_LOCAL_SIGNING;
204 		}
205 
206 		/* OOP-JIT support */
207 		if (txm_cs_config->systemPolicy->featureSet.OOPJit == false) {
208 			config &= ~CS_CONFIG_OOP_JIT;
209 		}
210 	}
211 	monitor_type = CS_MONITOR_TYPE_TXM;
212 #elif PMAP_CS_PPL_MONITOR
213 	monitor_type = CS_MONITOR_TYPE_PPL;
214 #endif /* CONFIG_SPTM */
215 #endif /* CODE_SIGNING_MONITOR */
216 
217 #if DEVELOPMENT || DEBUG
218 	/*
219 	 * We only ever need to parse for boot-args based exemption state on DEVELOPMENT
220 	 * or DEBUG builds as this state is not respected by any code signing component
221 	 * on RELEASE builds.
222 	 */
223 
224 #define CS_AMFI_MASK_UNRESTRICT_TASK_FOR_PID 0x01
225 #define CS_AMFI_MASK_ALLOW_ANY_SIGNATURE 0x02
226 #define CS_AMFI_MASK_GET_OUT_OF_MY_WAY 0x80
227 
228 	int amfi_mask = 0;
229 	int amfi_allow_any_signature = 0;
230 	int amfi_unrestrict_task_for_pid = 0;
231 	int amfi_get_out_of_my_way = 0;
232 	int cs_enforcement_disabled = 0;
233 	int cs_integrity_skip = 0;
234 	int amfi_relax_profile_trust = 0;
235 
236 	/* Parse the AMFI mask */
237 	PE_parse_boot_argn("amfi", &amfi_mask, sizeof(amfi_mask));
238 
239 	/* Parse the AMFI soft-bypass */
240 	PE_parse_boot_argn(
241 		"amfi_allow_any_signature",
242 		&amfi_allow_any_signature,
243 		sizeof(amfi_allow_any_signature));
244 
245 	/* Parse the AMFI debug-bypass */
246 	PE_parse_boot_argn(
247 		"amfi_unrestrict_task_for_pid",
248 		&amfi_unrestrict_task_for_pid,
249 		sizeof(amfi_unrestrict_task_for_pid));
250 
251 	/* Parse the AMFI hard-bypass */
252 	PE_parse_boot_argn(
253 		"amfi_get_out_of_my_way",
254 		&amfi_get_out_of_my_way,
255 		sizeof(amfi_get_out_of_my_way));
256 
257 	/* Parse the system code signing hard-bypass */
258 	PE_parse_boot_argn(
259 		"cs_enforcement_disable",
260 		&cs_enforcement_disabled,
261 		sizeof(cs_enforcement_disabled));
262 
263 	/* Parse the system code signing integrity-check bypass */
264 	PE_parse_boot_argn(
265 		"cs_integrity_skip",
266 		&cs_integrity_skip,
267 		sizeof(cs_integrity_skip));
268 
269 	/* Parse the AMFI profile trust bypass */
270 	PE_parse_boot_argn(
271 		"amfi_relax_profile_trust",
272 		&amfi_relax_profile_trust,
273 		sizeof(amfi_relax_profile_trust));
274 
275 	/* CS_CONFIG_UNRESTRICTED_DEBUGGING */
276 	if (amfi_mask & CS_AMFI_MASK_UNRESTRICT_TASK_FOR_PID) {
277 		config |= CS_CONFIG_UNRESTRICTED_DEBUGGING;
278 	} else if (amfi_unrestrict_task_for_pid) {
279 		config |= CS_CONFIG_UNRESTRICTED_DEBUGGING;
280 	}
281 
282 	/* CS_CONFIG_ALLOW_ANY_SIGNATURE */
283 	if (amfi_mask & CS_AMFI_MASK_ALLOW_ANY_SIGNATURE) {
284 		config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
285 	} else if (amfi_mask & CS_AMFI_MASK_GET_OUT_OF_MY_WAY) {
286 		config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
287 	} else if (amfi_allow_any_signature) {
288 		config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
289 	} else if (amfi_get_out_of_my_way) {
290 		config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
291 	} else if (cs_enforcement_disabled) {
292 		config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
293 	}
294 
295 	/* CS_CONFIG_ENFORCEMENT_DISABLED */
296 	if (cs_enforcement_disabled) {
297 		config |= CS_CONFIG_ENFORCEMENT_DISABLED;
298 	}
299 
300 	/* CS_CONFIG_GET_OUT_OF_MY_WAY */
301 	if (amfi_mask & CS_AMFI_MASK_GET_OUT_OF_MY_WAY) {
302 		config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
303 	} else if (amfi_get_out_of_my_way) {
304 		config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
305 	} else if (cs_enforcement_disabled) {
306 		config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
307 	}
308 
309 	/* CS_CONFIG_INTEGRITY_SKIP */
310 	if (cs_integrity_skip) {
311 		config |= CS_CONFIG_INTEGRITY_SKIP;
312 	}
313 
314 	/* CS_CONFIG_RELAX_PROFILE_TRUST */
315 	if (amfi_relax_profile_trust) {
316 		config |= CS_CONFIG_RELAX_PROFILE_TRUST;
317 	}
318 
319 #if CONFIG_SPTM
320 
321 	if (csm_enabled() == true) {
322 		/* allow_any_signature */
323 		if (txm_cs_config->exemptions.allowAnySignature == false) {
324 			config &= ~CS_CONFIG_ALLOW_ANY_SIGNATURE;
325 		}
326 
327 		/* unrestrict_task_for_pid */
328 		if (txm_ro_data && !txm_ro_data->exemptions.allowUnrestrictedDebugging) {
329 			config &= ~CS_CONFIG_UNRESTRICTED_DEBUGGING;
330 		}
331 
332 		/* cs_enforcement_disable */
333 		if (txm_ro_data && !txm_ro_data->exemptions.allowModifiedCode) {
334 			config &= ~CS_CONFIG_ENFORCEMENT_DISABLED;
335 		}
336 
337 		/* get_out_of_my_way (skip_trust_evaluation) */
338 		if (txm_cs_config->exemptions.skipTrustEvaluation == false) {
339 			config &= ~CS_CONFIG_GET_OUT_OF_MY_WAY;
340 		}
341 
342 #if kTXMKernelAPIVersion >= 7
343 		/*
344 		 * In some cases, the relax_profile_trust exemption can be set even without
345 		 * the boot-arg on TXM devices. As a result, we always overrule the kernel's
346 		 * data with TXM's data for this exemption.
347 		 */
348 		if (txm_cs_config->exemptions.relaxProfileTrust == true) {
349 			config |= CS_CONFIG_RELAX_PROFILE_TRUST;
350 		} else {
351 			config &= ~CS_CONFIG_RELAX_PROFILE_TRUST;
352 		}
353 #endif
354 	}
355 
356 #elif PMAP_CS_PPL_MONITOR
357 
358 	if (csm_enabled() == true) {
359 		int pmap_cs_allow_any_signature = 0;
360 		bool override = PE_parse_boot_argn(
361 			"pmap_cs_allow_any_signature",
362 			&pmap_cs_allow_any_signature,
363 			sizeof(pmap_cs_allow_any_signature));
364 
365 		if (!pmap_cs_allow_any_signature && override) {
366 			config &= ~CS_CONFIG_ALLOW_ANY_SIGNATURE;
367 		}
368 
369 		int pmap_cs_unrestrict_task_for_pid = 0;
370 		override = PE_parse_boot_argn(
371 			"pmap_cs_unrestrict_pmap_cs_disable",
372 			&pmap_cs_unrestrict_task_for_pid,
373 			sizeof(pmap_cs_unrestrict_task_for_pid));
374 
375 		if (!pmap_cs_unrestrict_task_for_pid && override) {
376 			config &= ~CS_CONFIG_UNRESTRICTED_DEBUGGING;
377 		}
378 
379 		int pmap_cs_enforcement_disable = 0;
380 		override = PE_parse_boot_argn(
381 			"pmap_cs_allow_modified_code_pages",
382 			&pmap_cs_enforcement_disable,
383 			sizeof(pmap_cs_enforcement_disable));
384 
385 		if (!pmap_cs_enforcement_disable && override) {
386 			config &= ~CS_CONFIG_ENFORCEMENT_DISABLED;
387 		}
388 	}
389 
390 #endif /* CONFIG_SPTM */
391 #endif /* DEVELOPMENT || DEBUG */
392 
393 	os_atomic_store(&cs_monitor, monitor_type, relaxed);
394 	os_atomic_store(&cs_config, config, relaxed);
395 
396 	/*
397 	 * We write the cs_config_set variable with store-release semantics which means
398 	 * no writes before this call will be re-ordered to after this call. Hence, if
399 	 * someone reads this variable with load-acquire semantics, and they observe a
400 	 * value of true, then they will be able to observe the correct values of the
401 	 * cs_monitor and the cs_config variables as well.
402 	 */
403 	os_atomic_store(&cs_config_set, true, release);
404 
405 config_set:
406 	/* Ensure configuration has been set */
407 	assert(os_atomic_load(&cs_config_set, relaxed) == true);
408 
409 	/* Set the monitor type */
410 	if (monitor_type_out) {
411 		*monitor_type_out = os_atomic_load(&cs_monitor, relaxed);
412 	}
413 
414 	/* Set the configuration */
415 	if (config_out) {
416 		*config_out = os_atomic_load(&cs_config, relaxed);
417 	}
418 }
419 
420 void
disable_code_signing_feature(code_signing_config_t feature)421 disable_code_signing_feature(
422 	code_signing_config_t feature)
423 {
424 	/*
425 	 * We require that this function be called only after the code signing config
426 	 * has been setup initially with a call to code_signing_configuration.
427 	 */
428 	if (os_atomic_load(&cs_config_set, acquire) == false) {
429 		panic("attempted to disable code signing feature without init: %u", feature);
430 	}
431 
432 	/*
433 	 * We require that only a single feature be disabled through a single call to this
434 	 * function. Moreover, we ensure that only valid features are being disabled.
435 	 */
436 	switch (feature) {
437 	case CS_CONFIG_DEVELOPER_MODE_SUPPORTED:
438 		cs_config &= ~CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
439 		break;
440 
441 	case CS_CONFIG_COMPILATION_SERVICE:
442 		cs_config &= ~CS_CONFIG_COMPILATION_SERVICE;
443 		break;
444 
445 	case CS_CONFIG_LOCAL_SIGNING:
446 		cs_config &= ~CS_CONFIG_LOCAL_SIGNING;
447 		break;
448 
449 	case CS_CONFIG_OOP_JIT:
450 		cs_config &= ~CS_CONFIG_OOP_JIT;
451 		break;
452 
453 	case CS_CONFIG_MAP_JIT:
454 		cs_config &= ~CS_CONFIG_MAP_JIT;
455 		break;
456 
457 	default:
458 		panic("attempted to disable a code signing feature invalidly: %u", feature);
459 	}
460 
461 	/* Ensure all readers can observe the latest data */
462 #if defined(__arm64__)
463 	__asm__ volatile ("dmb ish" ::: "memory");
464 #elif defined(__x86_64__)
465 	__asm__ volatile ("mfence" ::: "memory");
466 #else
467 #error "Unknown platform -- fence instruction unavailable"
468 #endif
469 }
470 
471 kern_return_t
secure_channel_shared_page(uint64_t * secure_channel_phys,size_t * secure_channel_size)472 secure_channel_shared_page(
473 	uint64_t *secure_channel_phys,
474 	size_t *secure_channel_size)
475 {
476 	return CSM_PREFIX(secure_channel_shared_page)(
477 		secure_channel_phys,
478 		secure_channel_size);
479 }
480 
481 #pragma mark Developer Mode
482 
483 void
enable_developer_mode(void)484 enable_developer_mode(void)
485 {
486 	CSM_PREFIX(toggle_developer_mode)(true);
487 }
488 
489 void
disable_developer_mode(void)490 disable_developer_mode(void)
491 {
492 	CSM_PREFIX(toggle_developer_mode)(false);
493 }
494 
495 bool
developer_mode_state(void)496 developer_mode_state(void)
497 {
498 	/* Assume false if the pointer isn't setup */
499 	if (developer_mode_enabled == NULL) {
500 		return false;
501 	}
502 
503 	return os_atomic_load(developer_mode_enabled, relaxed);
504 }
505 
506 #pragma mark Restricted Execution Mode
507 
508 kern_return_t
restricted_execution_mode_enable(void)509 restricted_execution_mode_enable(void)
510 {
511 	return CSM_PREFIX(rem_enable)();
512 }
513 
514 kern_return_t
restricted_execution_mode_state(void)515 restricted_execution_mode_state(void)
516 {
517 	return CSM_PREFIX(rem_state)();
518 }
519 
520 void
update_csm_device_state(void)521 update_csm_device_state(void)
522 {
523 	CSM_PREFIX(update_device_state)();
524 }
525 
526 void
complete_security_boot_mode(uint32_t security_boot_mode)527 complete_security_boot_mode(
528 	uint32_t security_boot_mode)
529 {
530 	CSM_PREFIX(complete_security_boot_mode)(security_boot_mode);
531 
532 	/*
533 	 * If we're reach here, it means the completion of the security boot mode was
534 	 * successful. We update our sysctl with the provided boot mode in order to
535 	 * signify both completion and the boot mode identifier.
536 	 */
537 	security_boot_mode_complete = security_boot_mode;
538 }
539 
540 #pragma mark Provisioning Profiles
541 /*
542  * AMFI performs full profile validation by itself. XNU only needs to manage provisioning
543  * profiles when we have a monitor since the monitor needs to independently verify the
544  * profile data as well.
545  */
546 
547 void
garbage_collect_provisioning_profiles(void)548 garbage_collect_provisioning_profiles(void)
549 {
550 #if CODE_SIGNING_MONITOR
551 	csm_free_provisioning_profiles();
552 #endif
553 }
554 
555 #if CODE_SIGNING_MONITOR
556 
557 /* Structure used to maintain the set of registered profiles on the system */
558 typedef struct _cs_profile {
559 	/* The UUID of the registered profile */
560 	uuid_t profile_uuid;
561 
562 	/* The profile validation object from the monitor */
563 	void *profile_obj;
564 
565 	/*
566 	 * In order to minimize the number of times the same profile would need to be
567 	 * registered, we allow frequently used profiles to skip the garbage collector
568 	 * for one pass.
569 	 */
570 	bool skip_collector;
571 
572 	/* We skip repeated trust validations of the profile */
573 	bool trusted;
574 
575 	/* Linked list linkage */
576 	SLIST_ENTRY(_cs_profile) link;
577 } cs_profile_t;
578 
579 /* Linked list head for registered profiles */
580 static SLIST_HEAD(, _cs_profile) all_profiles = SLIST_HEAD_INITIALIZER(all_profiles);
581 
582 /* Lock for the provisioning profiles */
583 LCK_GRP_DECLARE(profiles_lck_grp, "profiles_lck_grp");
584 decl_lck_rw_data(, profiles_lock);
585 
586 void
csm_initialize_provisioning_profiles(void)587 csm_initialize_provisioning_profiles(void)
588 {
589 	/* Ensure the CoreTrust kernel extension has loaded */
590 	if (coretrust == NULL) {
591 		panic("coretrust interface not available");
592 	}
593 
594 	/* Initialize the provisoning profiles lock */
595 	lck_rw_init(&profiles_lock, &profiles_lck_grp, 0);
596 	printf("initialized XNU provisioning profile data\n");
597 
598 #if PMAP_CS_PPL_MONITOR
599 	pmap_initialize_provisioning_profiles();
600 #endif
601 }
602 
603 static cs_profile_t*
search_for_profile_uuid(const uuid_t profile_uuid)604 search_for_profile_uuid(
605 	const uuid_t profile_uuid)
606 {
607 	cs_profile_t *profile = NULL;
608 
609 	/* Caller is required to acquire the lock */
610 	lck_rw_assert(&profiles_lock, LCK_RW_ASSERT_HELD);
611 
612 	SLIST_FOREACH(profile, &all_profiles, link) {
613 		if (uuid_compare(profile_uuid, profile->profile_uuid) == 0) {
614 			return profile;
615 		}
616 	}
617 
618 	return NULL;
619 }
620 
621 kern_return_t
csm_register_provisioning_profile(const uuid_t profile_uuid,const void * profile_blob,const size_t profile_blob_size)622 csm_register_provisioning_profile(
623 	const uuid_t profile_uuid,
624 	const void *profile_blob,
625 	const size_t profile_blob_size)
626 {
627 	cs_profile_t *profile = NULL;
628 	void *monitor_profile_obj = NULL;
629 	kern_return_t ret = KERN_DENIED;
630 
631 	/* Only proceed if code-signing-monitor is enabled */
632 	if (csm_enabled() == false) {
633 		return KERN_NOT_SUPPORTED;
634 	}
635 
636 	/* Allocate storage for the profile wrapper object */
637 	profile = kalloc_type(cs_profile_t, Z_WAITOK_ZERO);
638 	assert(profile != NULL);
639 
640 	/* Lock the profile set exclusively */
641 	lck_rw_lock_exclusive(&profiles_lock);
642 
643 	/* Check to make sure this isn't a duplicate UUID */
644 	cs_profile_t *dup_profile = search_for_profile_uuid(profile_uuid);
645 	if (dup_profile != NULL) {
646 		/* This profile might be used soon -- skip garbage collector */
647 		dup_profile->skip_collector = true;
648 
649 		ret = KERN_ALREADY_IN_SET;
650 		goto exit;
651 	}
652 
653 	ret = CSM_PREFIX(register_provisioning_profile)(
654 		profile_blob,
655 		profile_blob_size,
656 		&monitor_profile_obj);
657 
658 	if (ret == KERN_SUCCESS) {
659 		/* Copy in the profile UUID */
660 		uuid_copy(profile->profile_uuid, profile_uuid);
661 
662 		/* Setup the monitor's profile object */
663 		profile->profile_obj = monitor_profile_obj;
664 
665 		/* This profile might be used soon -- skip garbage collector */
666 		profile->skip_collector = true;
667 
668 		/* Insert at the head of the profile set */
669 		SLIST_INSERT_HEAD(&all_profiles, profile, link);
670 	}
671 
672 exit:
673 	/* Unlock the profile set */
674 	lck_rw_unlock_exclusive(&profiles_lock);
675 
676 	if (ret != KERN_SUCCESS) {
677 		/* Free the profile wrapper object */
678 		kfree_type(cs_profile_t, profile);
679 		profile = NULL;
680 
681 		if (ret != KERN_ALREADY_IN_SET) {
682 			printf("unable to register profile with monitor: %d\n", ret);
683 		}
684 	}
685 
686 	return ret;
687 }
688 
689 kern_return_t
csm_trust_provisioning_profile(const uuid_t profile_uuid,const void * sig_data,size_t sig_size)690 csm_trust_provisioning_profile(
691 	const uuid_t profile_uuid,
692 	const void *sig_data,
693 	size_t sig_size)
694 {
695 	cs_profile_t *profile = NULL;
696 	kern_return_t ret = KERN_NOT_FOUND;
697 
698 	/*
699 	 * We don't explicitly make a check here for if the code-signing-monitor is enabled
700 	 * or not because this function should never be called unless registration of the
701 	 * profile succeeded, which it won't in cases where the CSM is disabled.
702 	 *
703 	 * If this function does somehow get called, it'll result in a panic -- this is good
704 	 * for us to detect and to fix the code path which results in this behavior.
705 	 */
706 
707 	/* Lock the profile set exclusively */
708 	lck_rw_lock_exclusive(&profiles_lock);
709 
710 	/* Search for the registered profile */
711 	profile = search_for_profile_uuid(profile_uuid);
712 	if (profile == NULL) {
713 		goto exit;
714 	} else if (profile->trusted == true) {
715 		ret = KERN_SUCCESS;
716 		goto exit;
717 	}
718 
719 	ret = CSM_PREFIX(trust_provisioning_profile)(
720 		profile->profile_obj,
721 		sig_data,
722 		sig_size);
723 
724 	/* Mark profile as trusted if needed */
725 	if (ret == KERN_SUCCESS) {
726 		profile->trusted = true;
727 	} else {
728 		printf("unable to trust profile with monitor: %d\n", ret);
729 	}
730 
731 exit:
732 	/* Unlock the profile set */
733 	lck_rw_unlock_exclusive(&profiles_lock);
734 
735 	return ret;
736 }
737 
738 kern_return_t
csm_associate_provisioning_profile(void * monitor_sig_obj,const uuid_t profile_uuid)739 csm_associate_provisioning_profile(
740 	void *monitor_sig_obj,
741 	const uuid_t profile_uuid)
742 {
743 	cs_profile_t *profile = NULL;
744 	kern_return_t ret = KERN_DENIED;
745 
746 	/*
747 	 * We don't explicitly make a check here for if the code-signing-monitor is enabled
748 	 * or not because this function should never be called unless registration of the
749 	 * profile succeeded, which it won't in cases where the CSM is disabled.
750 	 *
751 	 * If this function does somehow get called, it'll result in a panic -- this is good
752 	 * for us to detect and to fix the code path which results in this behavior.
753 	 */
754 
755 	/* Lock the profile set as shared */
756 	lck_rw_lock_shared(&profiles_lock);
757 
758 	/* Search for the provisioning profile */
759 	profile = search_for_profile_uuid(profile_uuid);
760 	if (profile == NULL) {
761 		ret = KERN_NOT_FOUND;
762 		goto exit;
763 	}
764 
765 	ret = CSM_PREFIX(associate_provisioning_profile)(
766 		monitor_sig_obj,
767 		profile->profile_obj);
768 
769 	if (ret == KERN_SUCCESS) {
770 		/*
771 		 * This seems like an active profile -- let it skip the garbage collector on
772 		 * the next pass. We can modify this field even though we've only taken a shared
773 		 * lock as in this case we're always setting it to a fixed value.
774 		 */
775 		profile->skip_collector = true;
776 	}
777 
778 exit:
779 	/* Unlock the profile set */
780 	lck_rw_unlock_shared(&profiles_lock);
781 
782 	if (ret != KERN_SUCCESS) {
783 		printf("unable to associate profile: %d\n", ret);
784 	}
785 	return ret;
786 }
787 
788 kern_return_t
csm_disassociate_provisioning_profile(void * monitor_sig_obj)789 csm_disassociate_provisioning_profile(
790 	void *monitor_sig_obj)
791 {
792 	kern_return_t ret = KERN_DENIED;
793 
794 	if (csm_enabled() == false) {
795 		return KERN_NOT_SUPPORTED;
796 	}
797 
798 	/* Call out to the monitor */
799 	ret = CSM_PREFIX(disassociate_provisioning_profile)(monitor_sig_obj);
800 
801 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_FOUND)) {
802 		printf("unable to disassociate profile: %d\n", ret);
803 	}
804 	return ret;
805 }
806 
807 static kern_return_t
unregister_provisioning_profile(cs_profile_t * profile)808 unregister_provisioning_profile(
809 	cs_profile_t *profile)
810 {
811 	kern_return_t ret = KERN_DENIED;
812 
813 	/* Call out to the monitor */
814 	ret = CSM_PREFIX(unregister_provisioning_profile)(profile->profile_obj);
815 
816 	/*
817 	 * KERN_FAILURE represents the case when the unregistration failed because the
818 	 * monitor noted that the profile was still being used. Other than that, there
819 	 * is no other error expected out of this interface. In fact, there is no easy
820 	 * way to deal with other errors, as the profile state may be corrupted. If we
821 	 * see a different error, then we panic.
822 	 */
823 	if ((ret != KERN_SUCCESS) && (ret != KERN_FAILURE)) {
824 		panic("unable to unregister profile from monitor: %d | %p\n", ret, profile);
825 	}
826 
827 	return ret;
828 }
829 
830 void
csm_free_provisioning_profiles(void)831 csm_free_provisioning_profiles(void)
832 {
833 	kern_return_t ret = KERN_DENIED;
834 	cs_profile_t *profile = NULL;
835 	cs_profile_t *temp_profile = NULL;
836 
837 	/* Lock the profile set exclusively */
838 	lck_rw_lock_exclusive(&profiles_lock);
839 
840 	SLIST_FOREACH_SAFE(profile, &all_profiles, link, temp_profile) {
841 		if (profile->skip_collector == true) {
842 			profile->skip_collector = false;
843 			continue;
844 		}
845 
846 		/* Attempt to unregister this profile from the system */
847 		ret = unregister_provisioning_profile(profile);
848 		if (ret == KERN_SUCCESS) {
849 			/* Remove the profile from the profile set */
850 			SLIST_REMOVE(&all_profiles, profile, _cs_profile, link);
851 
852 			/* Free the memory consumed for the profile wrapper object */
853 			kfree_type(cs_profile_t, profile);
854 			profile = NULL;
855 		}
856 	}
857 
858 	/* Unlock the profile set */
859 	lck_rw_unlock_exclusive(&profiles_lock);
860 }
861 
862 #endif /* CODE_SIGNING_MONITOR */
863 
864 #pragma mark Code Signing
865 /*
866  * AMFI performs full signature validation by itself. For some things, AMFI uses XNU in
867  * order to abstract away the underlying implementation for data storage, but for most of
868  * these, AMFI doesn't directly interact with them, and they're only required when we have
869  * a code signing monitor on the system.
870  */
871 
872 void
set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])873 set_compilation_service_cdhash(
874 	const uint8_t cdhash[CS_CDHASH_LEN])
875 {
876 	CSM_PREFIX(set_compilation_service_cdhash)(cdhash);
877 }
878 
879 bool
match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])880 match_compilation_service_cdhash(
881 	const uint8_t cdhash[CS_CDHASH_LEN])
882 {
883 	return CSM_PREFIX(match_compilation_service_cdhash)(cdhash);
884 }
885 
886 void
set_local_signing_public_key(const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])887 set_local_signing_public_key(
888 	const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])
889 {
890 	CSM_PREFIX(set_local_signing_public_key)(public_key);
891 }
892 
893 uint8_t*
get_local_signing_public_key(void)894 get_local_signing_public_key(void)
895 {
896 	return CSM_PREFIX(get_local_signing_public_key)();
897 }
898 
899 void
unrestrict_local_signing_cdhash(__unused const uint8_t cdhash[CS_CDHASH_LEN])900 unrestrict_local_signing_cdhash(
901 	__unused const uint8_t cdhash[CS_CDHASH_LEN])
902 {
903 	/*
904 	 * Since AMFI manages code signing on its own, we only need to unrestrict the
905 	 * local signing cdhash when we have a monitor environment.
906 	 */
907 
908 #if CODE_SIGNING_MONITOR
909 	CSM_PREFIX(unrestrict_local_signing_cdhash)(cdhash);
910 #endif
911 }
912 
913 kern_return_t
get_trust_level_kdp(__unused pmap_t pmap,__unused uint32_t * trust_level)914 get_trust_level_kdp(
915 	__unused pmap_t pmap,
916 	__unused uint32_t *trust_level)
917 {
918 #if CODE_SIGNING_MONITOR
919 	return csm_get_trust_level_kdp(pmap, trust_level);
920 #else
921 	return KERN_NOT_SUPPORTED;
922 #endif
923 }
924 
925 kern_return_t
get_jit_address_range_kdp(__unused pmap_t pmap,__unused uintptr_t * jit_region_start,__unused uintptr_t * jit_region_end)926 get_jit_address_range_kdp(
927 	__unused pmap_t pmap,
928 	__unused uintptr_t *jit_region_start,
929 	__unused uintptr_t *jit_region_end)
930 {
931 #if CODE_SIGNING_MONITOR
932 	return csm_get_jit_address_range_kdp(pmap, jit_region_start, jit_region_end);
933 #else
934 	return KERN_NOT_SUPPORTED;
935 #endif
936 }
937 
938 kern_return_t
csm_resolve_os_entitlements_from_proc(__unused const proc_t process,__unused const void ** os_entitlements)939 csm_resolve_os_entitlements_from_proc(
940 	__unused const proc_t process,
941 	__unused const void **os_entitlements)
942 {
943 #if CODE_SIGNING_MONITOR
944 	task_t task = NULL;
945 	vm_map_t task_map = NULL;
946 	pmap_t task_pmap = NULL;
947 	kern_return_t ret = KERN_DENIED;
948 
949 	if (csm_enabled() == false) {
950 		return KERN_NOT_SUPPORTED;
951 	}
952 
953 	/* Ensure the process comes from the proc_task zone */
954 	proc_require(process, PROC_REQUIRE_ALLOW_ALL);
955 
956 	/* Acquire the task from the proc */
957 	task = proc_task(process);
958 	if (task == NULL) {
959 		return KERN_NOT_FOUND;
960 	}
961 
962 	/* Acquire the virtual memory map from the task -- takes a reference on it */
963 	task_map = get_task_map_reference(task);
964 	if (task_map == NULL) {
965 		return KERN_NOT_FOUND;
966 	}
967 
968 	/* Acquire the pmap from the virtual memory map */
969 	task_pmap = vm_map_get_pmap(task_map);
970 	assert(task_pmap != NULL);
971 
972 	/* Call into the monitor to resolve the entitlements */
973 	ret = CSM_PREFIX(resolve_kernel_entitlements)(task_pmap, os_entitlements);
974 
975 	/* Release the reference on the virtual memory map */
976 	vm_map_deallocate(task_map);
977 
978 	return ret;
979 #else
980 	return KERN_NOT_SUPPORTED;
981 #endif
982 }
983 
984 kern_return_t
address_space_debugged(const proc_t process)985 address_space_debugged(
986 	const proc_t process)
987 {
988 	/* Must pass in a valid proc_t */
989 	if (process == NULL) {
990 		printf("%s: provided a NULL process\n", __FUNCTION__);
991 		return KERN_DENIED;
992 	}
993 	proc_require(process, PROC_REQUIRE_ALLOW_ALL);
994 
995 	/* Developer mode must always be enabled for this to return successfully */
996 	if (developer_mode_state() == false) {
997 		return KERN_DENIED;
998 	}
999 
1000 #if CODE_SIGNING_MONITOR
1001 	task_t task = NULL;
1002 	vm_map_t task_map = NULL;
1003 	pmap_t task_pmap = NULL;
1004 
1005 	if (csm_enabled() == true) {
1006 		/* Acquire the task from the proc */
1007 		task = proc_task(process);
1008 		if (task == NULL) {
1009 			return KERN_NOT_FOUND;
1010 		}
1011 
1012 		/* Acquire the virtual memory map from the task -- takes a reference on it */
1013 		task_map = get_task_map_reference(task);
1014 		if (task_map == NULL) {
1015 			return KERN_NOT_FOUND;
1016 		}
1017 
1018 		/* Acquire the pmap from the virtual memory map */
1019 		task_pmap = vm_map_get_pmap(task_map);
1020 		assert(task_pmap != NULL);
1021 
1022 		/* Acquire the state from the monitor */
1023 		kern_return_t ret = CSM_PREFIX(address_space_debugged)(task_pmap);
1024 
1025 		/* Release the reference on the virtual memory map */
1026 		vm_map_deallocate(task_map);
1027 
1028 		return ret;
1029 	}
1030 #endif /* CODE_SIGNING_MONITOR */
1031 
1032 	/* Check read-only process flags for state */
1033 	if (proc_getcsflags(process) & CS_DEBUGGED) {
1034 		return KERN_SUCCESS;
1035 	}
1036 
1037 #if XNU_TARGET_OS_OSX
1038 	/*
1039 	 * For macOS systems only, we allow the execution of unsigned code. On Intel, code
1040 	 * doesn't need to be signed, and on ASi, Rosetta binaries don't need to be signed.
1041 	 * In these cases, we return successfully from this function because we don't know
1042 	 * what else we can do.
1043 	 */
1044 	if ((proc_getcsflags(process) & CS_SIGNED) == 0) {
1045 		return KERN_SUCCESS;
1046 	}
1047 #endif
1048 
1049 	return KERN_DENIED;
1050 }
1051 
1052 #if CODE_SIGNING_MONITOR
1053 
1054 bool
csm_enabled(void)1055 csm_enabled(void)
1056 {
1057 	return CSM_PREFIX(code_signing_enabled)();
1058 }
1059 
1060 vm_size_t
csm_signature_size_limit(void)1061 csm_signature_size_limit(void)
1062 {
1063 	return CSM_PREFIX(managed_code_signature_size)();
1064 }
1065 
1066 void
csm_check_lockdown_mode(void)1067 csm_check_lockdown_mode(void)
1068 {
1069 	if (get_lockdown_mode_state() == 0) {
1070 		return;
1071 	}
1072 
1073 	/* Inform the code signing monitor about lockdown mode */
1074 	CSM_PREFIX(enter_lockdown_mode)();
1075 
1076 #if CONFIG_SPTM
1077 	/* MAP_JIT lockdown */
1078 	if (txm_cs_config->systemPolicy->featureSet.JIT == false) {
1079 		disable_code_signing_feature(CS_CONFIG_MAP_JIT);
1080 	}
1081 
1082 	/* Compilation service lockdown */
1083 	if (txm_cs_config->systemPolicy->featureSet.compilationService == false) {
1084 		disable_code_signing_feature(CS_CONFIG_COMPILATION_SERVICE);
1085 	}
1086 
1087 	/* Local signing lockdown */
1088 	if (txm_cs_config->systemPolicy->featureSet.localSigning == false) {
1089 		disable_code_signing_feature(CS_CONFIG_LOCAL_SIGNING);
1090 	}
1091 
1092 	/* OOP-JIT lockdown */
1093 	if (txm_cs_config->systemPolicy->featureSet.OOPJit == false) {
1094 		disable_code_signing_feature(CS_CONFIG_OOP_JIT);
1095 	}
1096 #else
1097 	/*
1098 	 * Lockdown mode is supposed to disable all forms of JIT on the system. For now,
1099 	 * we leave JIT enabled by default until some blockers are resolved. The way this
1100 	 * code is written, we don't need to change anything once we enforce MAP_JIT to
1101 	 * be disabled for lockdown mode.
1102 	 */
1103 	if (ppl_lockdown_mode_enforce_jit == true) {
1104 		disable_code_signing_feature(CS_CONFIG_MAP_JIT);
1105 	}
1106 	disable_code_signing_feature(CS_CONFIG_OOP_JIT);
1107 	disable_code_signing_feature(CS_CONFIG_LOCAL_SIGNING);
1108 	disable_code_signing_feature(CS_CONFIG_COMPILATION_SERVICE);
1109 #endif /* CONFIG_SPTM */
1110 }
1111 
1112 void
csm_code_signing_violation(proc_t proc,vm_offset_t addr)1113 csm_code_signing_violation(
1114 	proc_t proc,
1115 	vm_offset_t addr)
1116 {
1117 	/* No enforcement if code-signing-monitor is disabled */
1118 	if (csm_enabled() == false) {
1119 		return;
1120 	}
1121 
1122 	/* Leave a log for triage purposes */
1123 	printf("[%s] code-signing-violation at %p\n", proc_best_name(proc), (void*)addr);
1124 
1125 	/*
1126 	 * For now, the only input into this function is from current_proc(), so using current_thread()
1127 	 * over here is alright. If this function ever gets called from another location, we need to
1128 	 * then change where we get the user thread from.
1129 	 */
1130 	assert(proc == current_proc());
1131 
1132 	/*
1133 	 * Force exit the process and set it to allow generating crash reports, which is critical
1134 	 * for better triaging these issues.
1135 	 */
1136 
1137 	exception_info_t info = {
1138 		.os_reason = OS_REASON_CODESIGNING,
1139 		.exception_type = EXC_BAD_ACCESS,
1140 		.mx_code = CODESIGNING_EXIT_REASON_INVALID_PAGE,
1141 		.mx_subcode = VM_USER_STRIP_PTR(addr),
1142 		.kt_info.kt_subsys = KDBG_TRIAGE_SUBSYS_VM,
1143 		.kt_info.kt_error = KDBG_TRIAGE_VM_CODE_SIGNING
1144 	};
1145 
1146 	exit_with_mach_exception(proc, info, PX_KTRIAGE);
1147 }
1148 
1149 kern_return_t
csm_register_code_signature(const vm_address_t signature_addr,const vm_size_t signature_size,const vm_offset_t code_directory_offset,const char * signature_path,void ** monitor_sig_obj,vm_address_t * monitor_signature_addr)1150 csm_register_code_signature(
1151 	const vm_address_t signature_addr,
1152 	const vm_size_t signature_size,
1153 	const vm_offset_t code_directory_offset,
1154 	const char *signature_path,
1155 	void **monitor_sig_obj,
1156 	vm_address_t *monitor_signature_addr)
1157 {
1158 	if (csm_enabled() == false) {
1159 		return KERN_NOT_SUPPORTED;
1160 	}
1161 
1162 	return CSM_PREFIX(register_code_signature)(
1163 		signature_addr,
1164 		signature_size,
1165 		code_directory_offset,
1166 		signature_path,
1167 		monitor_sig_obj,
1168 		monitor_signature_addr);
1169 }
1170 
1171 kern_return_t
csm_unregister_code_signature(void * monitor_sig_obj)1172 csm_unregister_code_signature(
1173 	void *monitor_sig_obj)
1174 {
1175 	if (csm_enabled() == false) {
1176 		return KERN_NOT_SUPPORTED;
1177 	}
1178 
1179 	return CSM_PREFIX(unregister_code_signature)(monitor_sig_obj);
1180 }
1181 
1182 kern_return_t
csm_verify_code_signature(void * monitor_sig_obj)1183 csm_verify_code_signature(
1184 	void *monitor_sig_obj)
1185 {
1186 	if (csm_enabled() == false) {
1187 		return KERN_NOT_SUPPORTED;
1188 	}
1189 
1190 	return CSM_PREFIX(verify_code_signature)(monitor_sig_obj);
1191 }
1192 
1193 kern_return_t
csm_reconstitute_code_signature(void * monitor_sig_obj,vm_address_t * unneeded_addr,vm_size_t * unneeded_size)1194 csm_reconstitute_code_signature(
1195 	void *monitor_sig_obj,
1196 	vm_address_t *unneeded_addr,
1197 	vm_size_t *unneeded_size)
1198 {
1199 	if (csm_enabled() == false) {
1200 		return KERN_NOT_SUPPORTED;
1201 	}
1202 
1203 	return CSM_PREFIX(reconstitute_code_signature)(
1204 		monitor_sig_obj,
1205 		unneeded_addr,
1206 		unneeded_size);
1207 }
1208 
1209 kern_return_t
csm_associate_code_signature(pmap_t monitor_pmap,void * monitor_sig_obj,const vm_address_t region_addr,const vm_size_t region_size,const vm_offset_t region_offset)1210 csm_associate_code_signature(
1211 	pmap_t monitor_pmap,
1212 	void *monitor_sig_obj,
1213 	const vm_address_t region_addr,
1214 	const vm_size_t region_size,
1215 	const vm_offset_t region_offset)
1216 {
1217 	if (csm_enabled() == false) {
1218 		return KERN_NOT_SUPPORTED;
1219 	}
1220 
1221 	return CSM_PREFIX(associate_code_signature)(
1222 		monitor_pmap,
1223 		monitor_sig_obj,
1224 		region_addr,
1225 		region_size,
1226 		region_offset);
1227 }
1228 
1229 kern_return_t
csm_allow_jit_region(pmap_t monitor_pmap)1230 csm_allow_jit_region(
1231 	pmap_t monitor_pmap)
1232 {
1233 	if (csm_enabled() == false) {
1234 		return KERN_SUCCESS;
1235 	} else if (monitor_pmap == NULL) {
1236 		return KERN_DENIED;
1237 	}
1238 
1239 	kern_return_t ret = CSM_PREFIX(allow_jit_region)(monitor_pmap);
1240 	if (ret == KERN_NOT_SUPPORTED) {
1241 		/*
1242 		 * Some monitor environments do not support this API and as a result will
1243 		 * return KERN_NOT_SUPPORTED. The caller here should not interpret that as
1244 		 * a failure.
1245 		 */
1246 		ret = KERN_SUCCESS;
1247 	}
1248 
1249 	return ret;
1250 }
1251 
1252 kern_return_t
csm_associate_jit_region(pmap_t monitor_pmap,const vm_address_t region_addr,const vm_size_t region_size)1253 csm_associate_jit_region(
1254 	pmap_t monitor_pmap,
1255 	const vm_address_t region_addr,
1256 	const vm_size_t region_size)
1257 {
1258 	if (csm_enabled() == false) {
1259 		return KERN_NOT_SUPPORTED;
1260 	}
1261 
1262 	return CSM_PREFIX(associate_jit_region)(
1263 		monitor_pmap,
1264 		region_addr,
1265 		region_size);
1266 }
1267 
1268 kern_return_t
csm_associate_debug_region(pmap_t monitor_pmap,const vm_address_t region_addr,const vm_size_t region_size)1269 csm_associate_debug_region(
1270 	pmap_t monitor_pmap,
1271 	const vm_address_t region_addr,
1272 	const vm_size_t region_size)
1273 {
1274 	if (csm_enabled() == false) {
1275 		return KERN_NOT_SUPPORTED;
1276 	}
1277 
1278 	return CSM_PREFIX(associate_debug_region)(
1279 		monitor_pmap,
1280 		region_addr,
1281 		region_size);
1282 }
1283 
1284 kern_return_t
csm_allow_invalid_code(pmap_t pmap)1285 csm_allow_invalid_code(
1286 	pmap_t pmap)
1287 {
1288 	if (csm_enabled() == false) {
1289 		return KERN_NOT_SUPPORTED;
1290 	}
1291 
1292 	return CSM_PREFIX(allow_invalid_code)(pmap);
1293 }
1294 
1295 kern_return_t
csm_get_trust_level_kdp(pmap_t pmap,uint32_t * trust_level)1296 csm_get_trust_level_kdp(
1297 	pmap_t pmap,
1298 	uint32_t *trust_level)
1299 {
1300 	if (csm_enabled() == false) {
1301 		return KERN_NOT_SUPPORTED;
1302 	}
1303 
1304 	return CSM_PREFIX(get_trust_level_kdp)(pmap, trust_level);
1305 }
1306 
1307 kern_return_t
csm_get_jit_address_range_kdp(pmap_t pmap,uintptr_t * jit_region_start,uintptr_t * jit_region_end)1308 csm_get_jit_address_range_kdp(
1309 	pmap_t pmap,
1310 	uintptr_t *jit_region_start,
1311 	uintptr_t *jit_region_end)
1312 {
1313 	if (csm_enabled() == false) {
1314 		return KERN_NOT_SUPPORTED;
1315 	}
1316 
1317 	return CSM_PREFIX(get_jit_address_range_kdp)(pmap, jit_region_start, jit_region_end);
1318 }
1319 
1320 kern_return_t
csm_address_space_exempt(const pmap_t pmap)1321 csm_address_space_exempt(
1322 	const pmap_t pmap)
1323 {
1324 	/*
1325 	 * These exemptions are actually orthogonal to the code signing enforcement. As
1326 	 * a result, we let each monitor explicitly decide how to deal with the exemption
1327 	 * in case code signing enforcement is disabled.
1328 	 */
1329 
1330 	return CSM_PREFIX(address_space_exempt)(pmap);
1331 }
1332 
1333 kern_return_t
csm_fork_prepare(pmap_t old_pmap,pmap_t new_pmap)1334 csm_fork_prepare(
1335 	pmap_t old_pmap,
1336 	pmap_t new_pmap)
1337 {
1338 	if (csm_enabled() == false) {
1339 		return KERN_NOT_SUPPORTED;
1340 	}
1341 
1342 	return CSM_PREFIX(fork_prepare)(old_pmap, new_pmap);
1343 }
1344 
1345 kern_return_t
csm_acquire_signing_identifier(const void * monitor_sig_obj,const char ** signing_id)1346 csm_acquire_signing_identifier(
1347 	const void *monitor_sig_obj,
1348 	const char **signing_id)
1349 {
1350 	if (csm_enabled() == false) {
1351 		return KERN_NOT_SUPPORTED;
1352 	}
1353 
1354 	return CSM_PREFIX(acquire_signing_identifier)(monitor_sig_obj, signing_id);
1355 }
1356 
1357 kern_return_t
csm_associate_os_entitlements(void * monitor_sig_obj,const void * os_entitlements)1358 csm_associate_os_entitlements(
1359 	void *monitor_sig_obj,
1360 	const void *os_entitlements)
1361 {
1362 	if (csm_enabled() == false) {
1363 		return KERN_NOT_SUPPORTED;
1364 	} else if (os_entitlements == NULL) {
1365 		/* Not every signature has entitlements */
1366 		return KERN_SUCCESS;
1367 	}
1368 
1369 	return CSM_PREFIX(associate_kernel_entitlements)(monitor_sig_obj, os_entitlements);
1370 }
1371 
1372 kern_return_t
csm_accelerate_entitlements(void * monitor_sig_obj,CEQueryContext_t * ce_ctx)1373 csm_accelerate_entitlements(
1374 	void *monitor_sig_obj,
1375 	CEQueryContext_t *ce_ctx)
1376 {
1377 	if (csm_enabled() == false) {
1378 		return KERN_NOT_SUPPORTED;
1379 	}
1380 
1381 	return CSM_PREFIX(accelerate_entitlements)(monitor_sig_obj, ce_ctx);
1382 }
1383 
1384 #endif /* CODE_SIGNING_MONITOR */
1385 
1386 #pragma mark AppleImage4
1387 /*
1388  * AppleImage4 uses the monitor environment to safeguard critical security data.
1389  * In order to ease the implementation specific, AppleImage4 always depends on these
1390  * abstracted APIs, regardless of whether the system has a monitor environment or
1391  * not.
1392  */
1393 
1394 void*
kernel_image4_storage_data(size_t * allocated_size)1395 kernel_image4_storage_data(
1396 	size_t *allocated_size)
1397 {
1398 	return CSM_PREFIX(image4_storage_data)(allocated_size);
1399 }
1400 
1401 void
kernel_image4_set_nonce(const img4_nonce_domain_index_t ndi,const img4_nonce_t * nonce)1402 kernel_image4_set_nonce(
1403 	const img4_nonce_domain_index_t ndi,
1404 	const img4_nonce_t *nonce)
1405 {
1406 	return CSM_PREFIX(image4_set_nonce)(ndi, nonce);
1407 }
1408 
1409 void
kernel_image4_roll_nonce(const img4_nonce_domain_index_t ndi)1410 kernel_image4_roll_nonce(
1411 	const img4_nonce_domain_index_t ndi)
1412 {
1413 	return CSM_PREFIX(image4_roll_nonce)(ndi);
1414 }
1415 
1416 errno_t
kernel_image4_copy_nonce(const img4_nonce_domain_index_t ndi,img4_nonce_t * nonce_out)1417 kernel_image4_copy_nonce(
1418 	const img4_nonce_domain_index_t ndi,
1419 	img4_nonce_t *nonce_out)
1420 {
1421 	return CSM_PREFIX(image4_copy_nonce)(ndi, nonce_out);
1422 }
1423 
1424 errno_t
kernel_image4_execute_object(img4_runtime_object_spec_index_t obj_spec_index,const img4_buff_t * payload,const img4_buff_t * manifest)1425 kernel_image4_execute_object(
1426 	img4_runtime_object_spec_index_t obj_spec_index,
1427 	const img4_buff_t *payload,
1428 	const img4_buff_t *manifest)
1429 {
1430 	return CSM_PREFIX(image4_execute_object)(
1431 		obj_spec_index,
1432 		payload,
1433 		manifest);
1434 }
1435 
1436 errno_t
kernel_image4_copy_object(img4_runtime_object_spec_index_t obj_spec_index,vm_address_t object_out,size_t * object_length)1437 kernel_image4_copy_object(
1438 	img4_runtime_object_spec_index_t obj_spec_index,
1439 	vm_address_t object_out,
1440 	size_t *object_length)
1441 {
1442 	return CSM_PREFIX(image4_copy_object)(
1443 		obj_spec_index,
1444 		object_out,
1445 		object_length);
1446 }
1447 
1448 const void*
kernel_image4_get_monitor_exports(void)1449 kernel_image4_get_monitor_exports(void)
1450 {
1451 	return CSM_PREFIX(image4_get_monitor_exports)();
1452 }
1453 
1454 errno_t
kernel_image4_set_release_type(const char * release_type)1455 kernel_image4_set_release_type(
1456 	const char *release_type)
1457 {
1458 	return CSM_PREFIX(image4_set_release_type)(release_type);
1459 }
1460 
1461 errno_t
kernel_image4_set_bnch_shadow(const img4_nonce_domain_index_t ndi)1462 kernel_image4_set_bnch_shadow(
1463 	const img4_nonce_domain_index_t ndi)
1464 {
1465 	return CSM_PREFIX(image4_set_bnch_shadow)(ndi);
1466 }
1467 
1468 #pragma mark Image4 - New
1469 
1470 
1471 
1472 static errno_t
_kernel_image4_monitor_trap_image_activate(image4_cs_trap_t selector,const void * input_data)1473 _kernel_image4_monitor_trap_image_activate(
1474 	image4_cs_trap_t selector,
1475 	const void *input_data)
1476 {
1477 	/*
1478 	 * csmx_payload (csmx_payload_len) --> __cs_xfer
1479 	 * csmx_manifest (csmx_manifest_len) --> __cs_borrow
1480 	 */
1481 	image4_cs_trap_argv(image_activate) input = {0};
1482 	vm_address_t payload_addr = 0;
1483 	vm_address_t manifest_addr = 0;
1484 	errno_t err = EPERM;
1485 
1486 	/* Copy the input data */
1487 	memcpy(&input, input_data, sizeof(input));
1488 
1489 	payload_addr = code_signing_allocate(input.csmx_payload_len);
1490 	if (payload_addr == 0) {
1491 		goto out;
1492 	}
1493 	memcpy((void*)payload_addr, (void*)input.csmx_payload, input.csmx_payload_len);
1494 
1495 	manifest_addr = code_signing_allocate(input.csmx_manifest_len);
1496 	if (manifest_addr == 0) {
1497 		goto out;
1498 	}
1499 	memcpy((void*)manifest_addr, (void*)input.csmx_manifest, input.csmx_manifest_len);
1500 
1501 	/* Transfer both regions to the monitor */
1502 	CSM_PREFIX(image4_transfer_region)(selector, payload_addr, input.csmx_payload_len);
1503 	CSM_PREFIX(image4_transfer_region)(selector, manifest_addr, input.csmx_manifest_len);
1504 
1505 	/* Setup the input with new addresses */
1506 	input.csmx_payload = payload_addr;
1507 	input.csmx_manifest = manifest_addr;
1508 
1509 	/* Trap into the monitor for this selector */
1510 	err = CSM_PREFIX(image4_monitor_trap)(selector, &input, sizeof(input));
1511 
1512 out:
1513 	if ((err != 0) && (payload_addr != 0)) {
1514 		/* Retyping only happens after allocating the manifest */
1515 		if (manifest_addr != 0) {
1516 			CSM_PREFIX(image4_reclaim_region)(
1517 				selector, payload_addr, input.csmx_payload_len);
1518 		}
1519 		code_signing_deallocate(&payload_addr, input.csmx_payload_len);
1520 	}
1521 
1522 	if (manifest_addr != 0) {
1523 		/* Reclaim the manifest region -- will be retyped if not NULL */
1524 		CSM_PREFIX(image4_reclaim_region)(
1525 			selector, manifest_addr, input.csmx_manifest_len);
1526 
1527 		/* Deallocate the manifest region */
1528 		code_signing_deallocate(&manifest_addr, input.csmx_manifest_len);
1529 	}
1530 
1531 	return err;
1532 }
1533 
1534 static errno_t
_kernel_image4_monitor_trap(image4_cs_trap_t selector,const void * input_data,size_t input_size)1535 _kernel_image4_monitor_trap(
1536 	image4_cs_trap_t selector,
1537 	const void *input_data,
1538 	size_t input_size)
1539 {
1540 	/* Validate input size for the selector */
1541 	if (input_size != image4_cs_trap_vector_size(selector)) {
1542 		printf("image4 dispatch: invalid input: %llu | %lu\n", selector, input_size);
1543 		return EINVAL;
1544 	}
1545 
1546 	switch (selector) {
1547 	case IMAGE4_CS_TRAP_IMAGE_ACTIVATE:
1548 		return _kernel_image4_monitor_trap_image_activate(selector, input_data);
1549 
1550 	default:
1551 		return CSM_PREFIX(image4_monitor_trap)(selector, input_data, input_size);
1552 	}
1553 }
1554 
1555 errno_t
kernel_image4_monitor_trap(image4_cs_trap_t selector,const void * input_data,size_t input_size,__unused void * output_data,__unused size_t * output_size)1556 kernel_image4_monitor_trap(
1557 	image4_cs_trap_t selector,
1558 	const void *input_data,
1559 	size_t input_size,
1560 	__unused void *output_data,
1561 	__unused size_t *output_size)
1562 {
1563 	size_t length_check = 0;
1564 
1565 	/* Input data is always required */
1566 	if ((input_data == NULL) || (input_size == 0)) {
1567 		printf("image4 dispatch: no input data: %llu\n", selector);
1568 		return EINVAL;
1569 	} else if (os_add_overflow((vm_address_t)input_data, input_size, &length_check)) {
1570 		panic("image4_ dispatch: overflow on input: %p | %lu", input_data, input_size);
1571 	}
1572 
1573 	return _kernel_image4_monitor_trap(selector, input_data, input_size);
1574 }
1575