xref: /xnu-8796.121.2/bsd/kern/kern_codesigning.c (revision c54f35ca767986246321eb901baf8f5ff7923f6a)
1 /*
2  * Copyright (c) 2021 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_LICENSE_HEADER_START@
5  *
6  * The contents of this file constitute Original Code as defined in and
7  * are subject to the Apple Public Source License Version 1.1 (the
8  * "License").  You may not use this file except in compliance with the
9  * License.  Please obtain a copy of the License at
10  * http://www.apple.com/publicsource and read it before using this file.
11  *
12  * This Original Code and all software distributed under the License are
13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
17  * License for the specific language governing rights and limitations
18  * under the License.
19  *
20  * @APPLE_LICENSE_HEADER_END@
21  */
22 
23 #include <os/overflow.h>
24 #include <machine/atomic.h>
25 #include <mach/vm_param.h>
26 #include <vm/vm_kern.h>
27 #include <vm/pmap.h>
28 #include <vm/pmap_cs.h>
29 #include <vm/vm_map.h>
30 #include <kern/zalloc.h>
31 #include <kern/kalloc.h>
32 #include <kern/assert.h>
33 #include <kern/locks.h>
34 #include <kern/lock_rw.h>
35 #include <libkern/libkern.h>
36 #include <libkern/section_keywords.h>
37 #include <libkern/coretrust/coretrust.h>
38 #include <pexpert/pexpert.h>
39 #include <sys/vm.h>
40 #include <sys/proc.h>
41 #include <sys/proc_require.h>
42 #include <sys/codesign.h>
43 #include <sys/code_signing.h>
44 #include <sys/sysctl.h>
45 #include <uuid/uuid.h>
46 #include <IOKit/IOBSD.h>
47 
48 SYSCTL_DECL(_security);
49 SYSCTL_DECL(_security_codesigning);
50 SYSCTL_NODE(_security, OID_AUTO, codesigning, CTLFLAG_RD, 0, "XNU Code Signing");
51 
52 static SECURITY_READ_ONLY_LATE(bool) cs_config_set = false;
53 static SECURITY_READ_ONLY_LATE(code_signing_monitor_type_t) cs_monitor = CS_MONITOR_TYPE_NONE;
54 static SECURITY_READ_ONLY_LATE(code_signing_config_t) cs_config = 0;
55 
56 SYSCTL_UINT(_security_codesigning, OID_AUTO, monitor, CTLFLAG_RD, &cs_monitor, 0, "code signing monitor type");
57 SYSCTL_UINT(_security_codesigning, OID_AUTO, config, CTLFLAG_RD, &cs_config, 0, "code signing configuration");
58 
59 void
code_signing_configuration(code_signing_monitor_type_t * monitor_type_out,code_signing_config_t * config_out)60 code_signing_configuration(
61 	code_signing_monitor_type_t *monitor_type_out,
62 	code_signing_config_t *config_out)
63 {
64 	code_signing_monitor_type_t monitor_type = CS_MONITOR_TYPE_NONE;
65 	code_signing_config_t config = 0;
66 
67 	if (os_atomic_load(&cs_config_set, acquire) == true) {
68 		goto config_set;
69 	}
70 
71 #if DEVELOPMENT || DEBUG
72 	int amfi_mask = 0;
73 	int amfi_allow_any_signature = 0;
74 	int amfi_unrestrict_task_for_pid = 0;
75 	int amfi_get_out_of_my_way = 0;
76 	int cs_enforcement_disabled = 0;
77 
78 #define CS_AMFI_MASK_UNRESTRICT_TASK_FOR_PID 0x01
79 #define CS_AMFI_MASK_ALLOW_ANY_SIGNATURE 0x02
80 #define CS_AMFI_MASK_GET_OUT_OF_MY_WAY 0x80
81 
82 	/* Parse the AMFI mask */
83 	PE_parse_boot_argn("amfi", &amfi_mask, sizeof(amfi_mask));
84 
85 	/* Parse the AMFI soft-bypass */
86 	PE_parse_boot_argn(
87 		"amfi_allow_any_signature",
88 		&amfi_allow_any_signature,
89 		sizeof(amfi_allow_any_signature));
90 
91 	/* Parse the AMFI debug-bypass */
92 	PE_parse_boot_argn(
93 		"amfi_unrestrict_task_for_pid",
94 		&amfi_unrestrict_task_for_pid,
95 		sizeof(amfi_unrestrict_task_for_pid));
96 
97 	/* Parse the AMFI hard-bypass */
98 	PE_parse_boot_argn(
99 		"amfi_get_out_of_my_way",
100 		&amfi_get_out_of_my_way,
101 		sizeof(amfi_get_out_of_my_way));
102 
103 	/* Parse the system code signing hard-bypass */
104 	PE_parse_boot_argn(
105 		"cs_enforcement_disable",
106 		&cs_enforcement_disabled,
107 		sizeof(cs_enforcement_disabled));
108 
109 	/* CS_CONFIG_UNRESTRICTED_DEBUGGING */
110 	if (amfi_mask & CS_AMFI_MASK_UNRESTRICT_TASK_FOR_PID) {
111 		config |= CS_CONFIG_UNRESTRICTED_DEBUGGING;
112 	} else if (amfi_unrestrict_task_for_pid) {
113 		config |= CS_CONFIG_UNRESTRICTED_DEBUGGING;
114 	}
115 
116 	/* CS_CONFIG_ALLOW_ANY_SIGNATURE */
117 	if (amfi_mask & CS_AMFI_MASK_ALLOW_ANY_SIGNATURE) {
118 		config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
119 	} else if (amfi_mask & CS_AMFI_MASK_GET_OUT_OF_MY_WAY) {
120 		config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
121 	} else if (amfi_allow_any_signature) {
122 		config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
123 	} else if (amfi_get_out_of_my_way) {
124 		config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
125 	} else if (cs_enforcement_disabled) {
126 		config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
127 	}
128 
129 	/* CS_CONFIG_ENFORCEMENT_DISABLED */
130 	if (cs_enforcement_disabled) {
131 		config |= CS_CONFIG_ENFORCEMENT_DISABLED;
132 	}
133 
134 	/* CS_CONFIG_GET_OUT_OF_MY_WAY */
135 	if (amfi_mask & CS_AMFI_MASK_GET_OUT_OF_MY_WAY) {
136 		config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
137 	} else if (amfi_get_out_of_my_way) {
138 		config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
139 	} else if (cs_enforcement_disabled) {
140 		config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
141 	}
142 
143 #if   PMAP_CS_PPL_MONITOR
144 
145 	if (csm_enabled() == true) {
146 		int pmap_cs_allow_any_signature = 0;
147 		bool override = PE_parse_boot_argn(
148 			"pmap_cs_allow_any_signature",
149 			&pmap_cs_allow_any_signature,
150 			sizeof(pmap_cs_allow_any_signature));
151 
152 		if (!pmap_cs_allow_any_signature && override) {
153 			config &= ~CS_CONFIG_ALLOW_ANY_SIGNATURE;
154 		}
155 
156 		int pmap_cs_unrestrict_task_for_pid = 0;
157 		override = PE_parse_boot_argn(
158 			"pmap_cs_unrestrict_pmap_cs_disable",
159 			&pmap_cs_unrestrict_task_for_pid,
160 			sizeof(pmap_cs_unrestrict_task_for_pid));
161 
162 		if (!pmap_cs_unrestrict_task_for_pid && override) {
163 			config &= ~CS_CONFIG_UNRESTRICTED_DEBUGGING;
164 		}
165 
166 		int pmap_cs_enforcement_disable = 0;
167 		override = PE_parse_boot_argn(
168 			"pmap_cs_allow_modified_code_pages",
169 			&pmap_cs_enforcement_disable,
170 			sizeof(pmap_cs_enforcement_disable));
171 
172 		if (!pmap_cs_enforcement_disable && override) {
173 			config &= ~CS_CONFIG_ENFORCEMENT_DISABLED;
174 		}
175 	}
176 
177 #endif /* */
178 #endif /* DEVELOPMENT || DEBUG */
179 
180 #if CODE_SIGNING_MONITOR
181 #if   PMAP_CS_PPL_MONITOR
182 	monitor_type = CS_MONITOR_TYPE_PPL;
183 #endif
184 
185 	if (csm_enabled() == true) {
186 		config |= CS_CONFIG_CSM_ENABLED;
187 	}
188 #endif
189 
190 	os_atomic_store(&cs_monitor, monitor_type, relaxed);
191 	os_atomic_store(&cs_config, config, relaxed);
192 	os_atomic_store(&cs_config_set, true, release);
193 
194 	/*
195 	 * We don't actually ever expect to have concurrent writers in this function
196 	 * as the data is warmed up during early boot by the kernel bootstrap code.
197 	 * The only thing we need to ensure is that subsequent readers can view all
198 	 * all the latest data.
199 	 */
200 #if defined(__arm64__)
201 	__asm__ volatile ("dmb ish" ::: "memory");
202 #elif defined(__x86_64__)
203 	__asm__ volatile ("mfence" ::: "memory");
204 #else
205 #error "Unknown platform -- fence instruction unavailable"
206 #endif
207 
208 config_set:
209 	/* Ensure configuration has been set */
210 	assert(os_atomic_load(&cs_config_set, relaxed) == true);
211 
212 	/* Set the monitor type */
213 	if (monitor_type_out) {
214 		*monitor_type_out = os_atomic_load(&cs_monitor, relaxed);
215 	}
216 
217 	/* Set the configuration */
218 	if (config_out) {
219 		*config_out = os_atomic_load(&cs_config, relaxed);
220 	}
221 }
222 
223 #pragma mark Developer Mode
224 
225 void
enable_developer_mode(void)226 enable_developer_mode(void)
227 {
228 	CSM_PREFIX(toggle_developer_mode)(true);
229 }
230 
231 void
disable_developer_mode(void)232 disable_developer_mode(void)
233 {
234 	CSM_PREFIX(toggle_developer_mode)(false);
235 }
236 
237 bool
developer_mode_state(void)238 developer_mode_state(void)
239 {
240 	/* Assume false if the pointer isn't setup */
241 	if (developer_mode_enabled == NULL) {
242 		return false;
243 	}
244 
245 	return os_atomic_load(developer_mode_enabled, relaxed);
246 }
247 
248 #pragma mark Provisioning Profiles
249 /*
250  * AMFI performs full profile validation by itself. XNU only needs to manage provisioning
251  * profiles when we have a monitor since the monitor needs to independently verify the
252  * profile data as well.
253  */
254 
255 void
garbage_collect_provisioning_profiles(void)256 garbage_collect_provisioning_profiles(void)
257 {
258 #if CODE_SIGNING_MONITOR
259 	csm_free_provisioning_profiles();
260 #endif
261 }
262 
263 #if CODE_SIGNING_MONITOR
264 
265 /* Structure used to maintain the set of registered profiles on the system */
266 typedef struct _cs_profile {
267 	/* The UUID of the registered profile */
268 	uuid_t profile_uuid;
269 
270 	/* The profile validation object from the monitor */
271 	void *profile_obj;
272 
273 	/*
274 	 * In order to minimize the number of times the same profile would need to be
275 	 * registered, we allow frequently used profiles to skip the garbage collector
276 	 * for one pass.
277 	 */
278 	bool skip_collector;
279 
280 	/* Linked list linkage */
281 	SLIST_ENTRY(_cs_profile) link;
282 } cs_profile_t;
283 
284 /* Linked list head for registered profiles */
285 static SLIST_HEAD(, _cs_profile) all_profiles = SLIST_HEAD_INITIALIZER(all_profiles);
286 
287 /* Lock for the provisioning profiles */
288 LCK_GRP_DECLARE(profiles_lck_grp, "profiles_lck_grp");
289 decl_lck_rw_data(, profiles_lock);
290 
291 void
csm_initialize_provisioning_profiles(void)292 csm_initialize_provisioning_profiles(void)
293 {
294 	/* Ensure the CoreTrust kernel extension has loaded */
295 	if (coretrust == NULL) {
296 		panic("coretrust interface not available");
297 	}
298 
299 	/* Initialize the provisoning profiles lock */
300 	lck_rw_init(&profiles_lock, &profiles_lck_grp, 0);
301 	printf("initialized XNU provisioning profile data\n");
302 
303 #if PMAP_CS_PPL_MONITOR
304 	pmap_initialize_provisioning_profiles();
305 #endif
306 }
307 
308 static cs_profile_t*
search_for_profile_uuid(const uuid_t profile_uuid)309 search_for_profile_uuid(
310 	const uuid_t profile_uuid)
311 {
312 	cs_profile_t *profile = NULL;
313 
314 	/* Caller is required to acquire the lock */
315 	lck_rw_assert(&profiles_lock, LCK_RW_ASSERT_HELD);
316 
317 	SLIST_FOREACH(profile, &all_profiles, link) {
318 		if (uuid_compare(profile_uuid, profile->profile_uuid) == 0) {
319 			return profile;
320 		}
321 	}
322 
323 	return NULL;
324 }
325 
326 kern_return_t
csm_register_provisioning_profile(const uuid_t profile_uuid,const void * profile_blob,const size_t profile_blob_size)327 csm_register_provisioning_profile(
328 	const uuid_t profile_uuid,
329 	const void *profile_blob,
330 	const size_t profile_blob_size)
331 {
332 	cs_profile_t *profile = NULL;
333 	void *monitor_profile_obj = NULL;
334 	kern_return_t ret = KERN_DENIED;
335 
336 	/* Allocate storage for the profile wrapper object */
337 	profile = kalloc_type(cs_profile_t, Z_WAITOK_ZERO);
338 	assert(profile != NULL);
339 
340 	/* Lock the profile set exclusively */
341 	lck_rw_lock_exclusive(&profiles_lock);
342 
343 	/* Check to make sure this isn't a duplicate UUID */
344 	cs_profile_t *dup_profile = search_for_profile_uuid(profile_uuid);
345 	if (dup_profile != NULL) {
346 		/* This profile might be used soon -- skip garbage collector */
347 		dup_profile->skip_collector = true;
348 
349 		ret = KERN_ALREADY_IN_SET;
350 		goto exit;
351 	}
352 
353 	ret = CSM_PREFIX(register_provisioning_profile)(
354 		profile_blob,
355 		profile_blob_size,
356 		&monitor_profile_obj);
357 
358 	if (ret == KERN_SUCCESS) {
359 		/* Copy in the profile UUID */
360 		uuid_copy(profile->profile_uuid, profile_uuid);
361 
362 		/* Setup the monitor's profile object */
363 		profile->profile_obj = monitor_profile_obj;
364 
365 		/* This profile might be used soon -- skip garbage collector */
366 		profile->skip_collector = true;
367 
368 		/* Insert at the head of the profile set */
369 		SLIST_INSERT_HEAD(&all_profiles, profile, link);
370 	}
371 
372 exit:
373 	/* Unlock the profile set */
374 	lck_rw_unlock_exclusive(&profiles_lock);
375 
376 	if (ret != KERN_SUCCESS) {
377 		/* Free the profile wrapper object */
378 		kfree_type(cs_profile_t, profile);
379 		profile = NULL;
380 
381 		if (ret != KERN_ALREADY_IN_SET) {
382 			printf("unable to register profile with monitor: %d\n", ret);
383 		}
384 	}
385 
386 	return ret;
387 }
388 
389 kern_return_t
csm_associate_provisioning_profile(void * monitor_sig_obj,const uuid_t profile_uuid)390 csm_associate_provisioning_profile(
391 	void *monitor_sig_obj,
392 	const uuid_t profile_uuid)
393 {
394 	cs_profile_t *profile = NULL;
395 	kern_return_t ret = KERN_DENIED;
396 
397 	if (csm_enabled() == false) {
398 		return KERN_NOT_SUPPORTED;
399 	}
400 
401 	/* Lock the profile set as shared */
402 	lck_rw_lock_shared(&profiles_lock);
403 
404 	/* Search for the provisioning profile */
405 	profile = search_for_profile_uuid(profile_uuid);
406 	if (profile == NULL) {
407 		ret = KERN_NOT_FOUND;
408 		goto exit;
409 	}
410 
411 	ret = CSM_PREFIX(associate_provisioning_profile)(
412 		monitor_sig_obj,
413 		profile->profile_obj);
414 
415 	if (ret == KERN_SUCCESS) {
416 		/*
417 		 * This seems like an active profile -- let it skip the garbage collector on
418 		 * the next pass. We can modify this field even though we've only taken a shared
419 		 * lock as in this case we're always setting it to a fixed value.
420 		 */
421 		profile->skip_collector = true;
422 	}
423 
424 exit:
425 	/* Unlock the profile set */
426 	lck_rw_unlock_shared(&profiles_lock);
427 
428 	if (ret != KERN_SUCCESS) {
429 		printf("unable to associate profile: %d\n", ret);
430 	}
431 	return ret;
432 }
433 
434 kern_return_t
csm_disassociate_provisioning_profile(void * monitor_sig_obj)435 csm_disassociate_provisioning_profile(
436 	void *monitor_sig_obj)
437 {
438 	kern_return_t ret = KERN_DENIED;
439 
440 	if (csm_enabled() == false) {
441 		return KERN_NOT_SUPPORTED;
442 	}
443 
444 	/* Call out to the monitor */
445 	ret = CSM_PREFIX(disassociate_provisioning_profile)(monitor_sig_obj);
446 
447 	if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_FOUND)) {
448 		printf("unable to disassociate profile: %d\n", ret);
449 	}
450 	return ret;
451 }
452 
453 static kern_return_t
unregister_provisioning_profile(cs_profile_t * profile)454 unregister_provisioning_profile(
455 	cs_profile_t *profile)
456 {
457 	kern_return_t ret = KERN_DENIED;
458 
459 	/* Call out to the monitor */
460 	ret = CSM_PREFIX(unregister_provisioning_profile)(profile->profile_obj);
461 
462 	/*
463 	 * KERN_FAILURE represents the case when the unregistration failed because the
464 	 * monitor noted that the profile was still being used. Other than that, there
465 	 * is no other error expected out of this interface. In fact, there is no easy
466 	 * way to deal with other errors, as the profile state may be corrupted. If we
467 	 * see a different error, then we panic.
468 	 */
469 	if ((ret != KERN_SUCCESS) && (ret != KERN_FAILURE)) {
470 		panic("unable to unregister profile from monitor: %d | %p\n", ret, profile);
471 	}
472 
473 	return ret;
474 }
475 
476 void
csm_free_provisioning_profiles(void)477 csm_free_provisioning_profiles(void)
478 {
479 	kern_return_t ret = KERN_DENIED;
480 	cs_profile_t *profile = NULL;
481 	cs_profile_t *temp_profile = NULL;
482 
483 	/* Lock the profile set exclusively */
484 	lck_rw_lock_exclusive(&profiles_lock);
485 
486 	SLIST_FOREACH_SAFE(profile, &all_profiles, link, temp_profile) {
487 		if (profile->skip_collector == true) {
488 			profile->skip_collector = false;
489 			continue;
490 		}
491 
492 		/* Attempt to unregister this profile from the system */
493 		ret = unregister_provisioning_profile(profile);
494 		if (ret == KERN_SUCCESS) {
495 			/* Remove the profile from the profile set */
496 			SLIST_REMOVE(&all_profiles, profile, _cs_profile, link);
497 
498 			/* Free the memory consumed for the profile wrapper object */
499 			kfree_type(cs_profile_t, profile);
500 			profile = NULL;
501 		}
502 	}
503 
504 	/* Unlock the profile set */
505 	lck_rw_unlock_exclusive(&profiles_lock);
506 }
507 
508 #endif /* CODE_SIGNING_MONITOR */
509 
510 #pragma mark Code Signing
511 /*
512  * AMFI performs full signature validation by itself. For some things, AMFI uses XNU in
513  * order to abstract away the underlying implementation for data storage, but for most of
514  * these, AMFI doesn't directly interact with them, and they're only required when we have
515  * a code signing monitor on the system.
516  */
517 
518 void
set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])519 set_compilation_service_cdhash(
520 	const uint8_t cdhash[CS_CDHASH_LEN])
521 {
522 	CSM_PREFIX(set_compilation_service_cdhash)(cdhash);
523 }
524 
525 bool
match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])526 match_compilation_service_cdhash(
527 	const uint8_t cdhash[CS_CDHASH_LEN])
528 {
529 	return CSM_PREFIX(match_compilation_service_cdhash)(cdhash);
530 }
531 
532 void
set_local_signing_public_key(const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])533 set_local_signing_public_key(
534 	const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])
535 {
536 	CSM_PREFIX(set_local_signing_public_key)(public_key);
537 }
538 
539 uint8_t*
get_local_signing_public_key(void)540 get_local_signing_public_key(void)
541 {
542 	return CSM_PREFIX(get_local_signing_public_key)();
543 }
544 
545 void
unrestrict_local_signing_cdhash(__unused const uint8_t cdhash[CS_CDHASH_LEN])546 unrestrict_local_signing_cdhash(
547 	__unused const uint8_t cdhash[CS_CDHASH_LEN])
548 {
549 	/*
550 	 * Since AMFI manages code signing on its own, we only need to unrestrict the
551 	 * local signing cdhash when we have a monitor environment.
552 	 */
553 
554 #if CODE_SIGNING_MONITOR
555 	CSM_PREFIX(unrestrict_local_signing_cdhash)(cdhash);
556 #endif
557 }
558 
559 kern_return_t
csm_resolve_os_entitlements_from_proc(__unused const proc_t process,__unused const void ** os_entitlements)560 csm_resolve_os_entitlements_from_proc(
561 	__unused const proc_t process,
562 	__unused const void **os_entitlements)
563 {
564 #if CODE_SIGNING_MONITOR
565 	task_t task = NULL;
566 	vm_map_t task_map = NULL;
567 	pmap_t task_pmap = NULL;
568 	kern_return_t ret = KERN_DENIED;
569 
570 	if (csm_enabled() == false) {
571 		return KERN_NOT_SUPPORTED;
572 	}
573 
574 	/* Ensure the process comes from the proc_task zone */
575 	proc_require(process, PROC_REQUIRE_ALLOW_ALL);
576 
577 	/* Acquire the task from the proc */
578 	task = proc_task(process);
579 	if (task == NULL) {
580 		return KERN_NOT_FOUND;
581 	}
582 
583 	/* Acquire the virtual memory map from the task -- takes a reference on it */
584 	task_map = get_task_map_reference(task);
585 	if (task_map == NULL) {
586 		return KERN_NOT_FOUND;
587 	}
588 
589 	/* Acquire the pmap from the virtual memory map */
590 	task_pmap = vm_map_get_pmap(task_map);
591 	assert(task_pmap != NULL);
592 
593 	/* Call into the monitor to resolve the entitlements */
594 	ret = CSM_PREFIX(resolve_kernel_entitlements)(task_pmap, os_entitlements);
595 
596 	/* Release the reference on the virtual memory map */
597 	vm_map_deallocate(task_map);
598 
599 	return ret;
600 #else
601 	return KERN_NOT_SUPPORTED;
602 #endif
603 }
604 
605 #if CODE_SIGNING_MONITOR
606 
607 bool
csm_enabled(void)608 csm_enabled(void)
609 {
610 	return CSM_PREFIX(code_signing_enabled)();
611 }
612 
613 vm_size_t
csm_signature_size_limit(void)614 csm_signature_size_limit(void)
615 {
616 	return CSM_PREFIX(managed_code_signature_size)();
617 }
618 
619 kern_return_t
csm_register_code_signature(const vm_address_t signature_addr,const vm_size_t signature_size,const vm_offset_t code_directory_offset,const char * signature_path,void ** monitor_sig_obj,vm_address_t * monitor_signature_addr)620 csm_register_code_signature(
621 	const vm_address_t signature_addr,
622 	const vm_size_t signature_size,
623 	const vm_offset_t code_directory_offset,
624 	const char *signature_path,
625 	void **monitor_sig_obj,
626 	vm_address_t *monitor_signature_addr)
627 {
628 	if (csm_enabled() == false) {
629 		return KERN_NOT_SUPPORTED;
630 	}
631 
632 	return CSM_PREFIX(register_code_signature)(
633 		signature_addr,
634 		signature_size,
635 		code_directory_offset,
636 		signature_path,
637 		monitor_sig_obj,
638 		monitor_signature_addr);
639 }
640 
641 kern_return_t
csm_unregister_code_signature(void * monitor_sig_obj)642 csm_unregister_code_signature(
643 	void *monitor_sig_obj)
644 {
645 	if (csm_enabled() == false) {
646 		return KERN_NOT_SUPPORTED;
647 	}
648 
649 	return CSM_PREFIX(unregister_code_signature)(monitor_sig_obj);
650 }
651 
652 kern_return_t
csm_verify_code_signature(void * monitor_sig_obj)653 csm_verify_code_signature(
654 	void *monitor_sig_obj)
655 {
656 	if (csm_enabled() == false) {
657 		return KERN_NOT_SUPPORTED;
658 	}
659 
660 	return CSM_PREFIX(verify_code_signature)(monitor_sig_obj);
661 }
662 
663 kern_return_t
csm_reconstitute_code_signature(void * monitor_sig_obj,vm_address_t * unneeded_addr,vm_size_t * unneeded_size)664 csm_reconstitute_code_signature(
665 	void *monitor_sig_obj,
666 	vm_address_t *unneeded_addr,
667 	vm_size_t *unneeded_size)
668 {
669 	if (csm_enabled() == false) {
670 		return KERN_NOT_SUPPORTED;
671 	}
672 
673 	return CSM_PREFIX(reconstitute_code_signature)(
674 		monitor_sig_obj,
675 		unneeded_addr,
676 		unneeded_size);
677 }
678 
679 kern_return_t
csm_associate_code_signature(pmap_t monitor_pmap,void * monitor_sig_obj,const vm_address_t region_addr,const vm_size_t region_size,const vm_offset_t region_offset)680 csm_associate_code_signature(
681 	pmap_t monitor_pmap,
682 	void *monitor_sig_obj,
683 	const vm_address_t region_addr,
684 	const vm_size_t region_size,
685 	const vm_offset_t region_offset)
686 {
687 	if (csm_enabled() == false) {
688 		return KERN_NOT_SUPPORTED;
689 	}
690 
691 	return CSM_PREFIX(associate_code_signature)(
692 		monitor_pmap,
693 		monitor_sig_obj,
694 		region_addr,
695 		region_size,
696 		region_offset);
697 }
698 
699 kern_return_t
csm_associate_jit_region(pmap_t monitor_pmap,const vm_address_t region_addr,const vm_size_t region_size)700 csm_associate_jit_region(
701 	pmap_t monitor_pmap,
702 	const vm_address_t region_addr,
703 	const vm_size_t region_size)
704 {
705 	if (csm_enabled() == false) {
706 		return KERN_NOT_SUPPORTED;
707 	}
708 
709 	return CSM_PREFIX(associate_jit_region)(
710 		monitor_pmap,
711 		region_addr,
712 		region_size);
713 }
714 
715 kern_return_t
csm_associate_debug_region(pmap_t monitor_pmap,const vm_address_t region_addr,const vm_size_t region_size)716 csm_associate_debug_region(
717 	pmap_t monitor_pmap,
718 	const vm_address_t region_addr,
719 	const vm_size_t region_size)
720 {
721 	if (csm_enabled() == false) {
722 		return KERN_NOT_SUPPORTED;
723 	}
724 
725 	return CSM_PREFIX(associate_debug_region)(
726 		monitor_pmap,
727 		region_addr,
728 		region_size);
729 }
730 
731 kern_return_t
csm_allow_invalid_code(pmap_t pmap)732 csm_allow_invalid_code(
733 	pmap_t pmap)
734 {
735 	if (csm_enabled() == false) {
736 		return KERN_NOT_SUPPORTED;
737 	}
738 
739 	return CSM_PREFIX(allow_invalid_code)(pmap);
740 }
741 
742 kern_return_t
csm_address_space_exempt(const pmap_t pmap)743 csm_address_space_exempt(
744 	const pmap_t pmap)
745 {
746 	/*
747 	 * These exemptions are actually orthogonal to the code signing enforcement. As
748 	 * a result, we let each monitor explicitly decide how to deal with the exemption
749 	 * in case code signing enforcement is disabled.
750 	 */
751 
752 	return CSM_PREFIX(address_space_exempt)(pmap);
753 }
754 
755 kern_return_t
csm_fork_prepare(pmap_t old_pmap,pmap_t new_pmap)756 csm_fork_prepare(
757 	pmap_t old_pmap,
758 	pmap_t new_pmap)
759 {
760 	if (csm_enabled() == false) {
761 		return KERN_NOT_SUPPORTED;
762 	}
763 
764 	return CSM_PREFIX(fork_prepare)(old_pmap, new_pmap);
765 }
766 
767 kern_return_t
csm_acquire_signing_identifier(const void * monitor_sig_obj,const char ** signing_id)768 csm_acquire_signing_identifier(
769 	const void *monitor_sig_obj,
770 	const char **signing_id)
771 {
772 	if (csm_enabled() == false) {
773 		return KERN_NOT_SUPPORTED;
774 	}
775 
776 	return CSM_PREFIX(acquire_signing_identifier)(monitor_sig_obj, signing_id);
777 }
778 
779 kern_return_t
csm_associate_os_entitlements(void * monitor_sig_obj,const void * os_entitlements)780 csm_associate_os_entitlements(
781 	void *monitor_sig_obj,
782 	const void *os_entitlements)
783 {
784 	if (csm_enabled() == false) {
785 		return KERN_NOT_SUPPORTED;
786 	} else if (os_entitlements == NULL) {
787 		/* Not every signature has entitlements */
788 		return KERN_SUCCESS;
789 	}
790 
791 	return CSM_PREFIX(associate_kernel_entitlements)(monitor_sig_obj, os_entitlements);
792 }
793 
794 kern_return_t
csm_accelerate_entitlements(void * monitor_sig_obj,CEQueryContext_t * ce_ctx)795 csm_accelerate_entitlements(
796 	void *monitor_sig_obj,
797 	CEQueryContext_t *ce_ctx)
798 {
799 	if (csm_enabled() == false) {
800 		return KERN_NOT_SUPPORTED;
801 	}
802 
803 	return CSM_PREFIX(accelerate_entitlements)(monitor_sig_obj, ce_ctx);
804 }
805 
806 #endif /* CODE_SIGNING_MONITOR */
807 
808 #pragma mark AppleImage4
809 /*
810  * AppleImage4 uses the monitor environment to safeguard critical security data.
811  * In order to ease the implementation specific, AppleImage4 always depends on these
812  * abstracted APIs, regardless of whether the system has a monitor environment or
813  * not.
814  */
815 
816 void*
kernel_image4_storage_data(size_t * allocated_size)817 kernel_image4_storage_data(
818 	size_t *allocated_size)
819 {
820 	return CSM_PREFIX(image4_storage_data)(allocated_size);
821 }
822 
823 void
kernel_image4_set_nonce(const img4_nonce_domain_index_t ndi,const img4_nonce_t * nonce)824 kernel_image4_set_nonce(
825 	const img4_nonce_domain_index_t ndi,
826 	const img4_nonce_t *nonce)
827 {
828 	return CSM_PREFIX(image4_set_nonce)(ndi, nonce);
829 }
830 
831 void
kernel_image4_roll_nonce(const img4_nonce_domain_index_t ndi)832 kernel_image4_roll_nonce(
833 	const img4_nonce_domain_index_t ndi)
834 {
835 	return CSM_PREFIX(image4_roll_nonce)(ndi);
836 }
837 
838 errno_t
kernel_image4_copy_nonce(const img4_nonce_domain_index_t ndi,img4_nonce_t * nonce_out)839 kernel_image4_copy_nonce(
840 	const img4_nonce_domain_index_t ndi,
841 	img4_nonce_t *nonce_out)
842 {
843 	return CSM_PREFIX(image4_copy_nonce)(ndi, nonce_out);
844 }
845 
846 errno_t
kernel_image4_execute_object(img4_runtime_object_spec_index_t obj_spec_index,const img4_buff_t * payload,const img4_buff_t * manifest)847 kernel_image4_execute_object(
848 	img4_runtime_object_spec_index_t obj_spec_index,
849 	const img4_buff_t *payload,
850 	const img4_buff_t *manifest)
851 {
852 	return CSM_PREFIX(image4_execute_object)(
853 		obj_spec_index,
854 		payload,
855 		manifest);
856 }
857 
858 errno_t
kernel_image4_copy_object(img4_runtime_object_spec_index_t obj_spec_index,vm_address_t object_out,size_t * object_length)859 kernel_image4_copy_object(
860 	img4_runtime_object_spec_index_t obj_spec_index,
861 	vm_address_t object_out,
862 	size_t *object_length)
863 {
864 	return CSM_PREFIX(image4_copy_object)(
865 		obj_spec_index,
866 		object_out,
867 		object_length);
868 }
869 
870 const void*
kernel_image4_get_monitor_exports(void)871 kernel_image4_get_monitor_exports(void)
872 {
873 	return CSM_PREFIX(image4_get_monitor_exports)();
874 }
875 
876 errno_t
kernel_image4_set_release_type(const char * release_type)877 kernel_image4_set_release_type(
878 	const char *release_type)
879 {
880 	return CSM_PREFIX(image4_set_release_type)(release_type);
881 }
882 
883 errno_t
kernel_image4_set_bnch_shadow(const img4_nonce_domain_index_t ndi)884 kernel_image4_set_bnch_shadow(
885 	const img4_nonce_domain_index_t ndi)
886 {
887 	return CSM_PREFIX(image4_set_bnch_shadow)(ndi);
888 }
889