1 /*
2 * Copyright (c) 2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <os/overflow.h>
24 #include <machine/atomic.h>
25 #include <mach/vm_param.h>
26 #include <vm/vm_kern.h>
27 #include <vm/pmap.h>
28 #include <vm/pmap_cs.h>
29 #include <vm/vm_map.h>
30 #include <kern/zalloc.h>
31 #include <kern/kalloc.h>
32 #include <kern/assert.h>
33 #include <kern/locks.h>
34 #include <kern/lock_rw.h>
35 #include <libkern/libkern.h>
36 #include <libkern/section_keywords.h>
37 #include <libkern/coretrust/coretrust.h>
38 #include <pexpert/pexpert.h>
39 #include <sys/vm.h>
40 #include <sys/proc.h>
41 #include <sys/proc_require.h>
42 #include <sys/codesign.h>
43 #include <sys/code_signing.h>
44 #include <sys/sysctl.h>
45 #include <uuid/uuid.h>
46 #include <IOKit/IOBSD.h>
47
48
49 SYSCTL_DECL(_security);
50 SYSCTL_DECL(_security_codesigning);
51 SYSCTL_NODE(_security, OID_AUTO, codesigning, CTLFLAG_RD, 0, "XNU Code Signing");
52
53 static SECURITY_READ_ONLY_LATE(bool) cs_config_set = false;
54 static SECURITY_READ_ONLY_LATE(code_signing_monitor_type_t) cs_monitor = CS_MONITOR_TYPE_NONE;
55 static SECURITY_READ_ONLY_LATE(code_signing_config_t) cs_config = 0;
56
57 SYSCTL_UINT(_security_codesigning, OID_AUTO, monitor, CTLFLAG_RD, &cs_monitor, 0, "code signing monitor type");
58 SYSCTL_UINT(_security_codesigning, OID_AUTO, config, CTLFLAG_RD, &cs_config, 0, "code signing configuration");
59
60 void
code_signing_configuration(code_signing_monitor_type_t * monitor_type_out,code_signing_config_t * config_out)61 code_signing_configuration(
62 code_signing_monitor_type_t *monitor_type_out,
63 code_signing_config_t *config_out)
64 {
65 code_signing_monitor_type_t monitor_type = CS_MONITOR_TYPE_NONE;
66 code_signing_config_t config = 0;
67
68 /*
69 * Since we read this variable with load-acquire semantics, if we observe a value
70 * of true, it means we should be able to observe writes to cs_monitor and also
71 * cs_config.
72 */
73 if (os_atomic_load(&cs_config_set, acquire) == true) {
74 goto config_set;
75 }
76
77 /*
78 * Add support for all the code signing features. This function is called very
79 * early in the system boot, much before kernel extensions such as Apple Mobile
80 * File Integrity come online. As a result, this function assumes that all the
81 * code signing features are enabled, and later on, different components can
82 * disable support for different features using disable_code_signing_feature().
83 */
84 config |= CS_CONFIG_MAP_JIT;
85 config |= CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
86 config |= CS_CONFIG_COMPILATION_SERVICE;
87 config |= CS_CONFIG_LOCAL_SIGNING;
88 config |= CS_CONFIG_OOP_JIT;
89
90 #if CODE_SIGNING_MONITOR
91 /* Mark the code signing monitor as enabled if required */
92 if (csm_enabled() == true) {
93 config |= CS_CONFIG_CSM_ENABLED;
94 }
95
96 #if PMAP_CS_PPL_MONITOR
97 monitor_type = CS_MONITOR_TYPE_PPL;
98 #endif /* */
99 #endif /* CODE_SIGNING_MONITOR */
100
101 #if DEVELOPMENT || DEBUG
102 /*
103 * We only ever need to parse for boot-args based exemption state on DEVELOPMENT
104 * or DEBUG builds as this state is not respected by any code signing component
105 * on RELEASE builds.
106 */
107
108 #define CS_AMFI_MASK_UNRESTRICT_TASK_FOR_PID 0x01
109 #define CS_AMFI_MASK_ALLOW_ANY_SIGNATURE 0x02
110 #define CS_AMFI_MASK_GET_OUT_OF_MY_WAY 0x80
111
112 int amfi_mask = 0;
113 int amfi_allow_any_signature = 0;
114 int amfi_unrestrict_task_for_pid = 0;
115 int amfi_get_out_of_my_way = 0;
116 int cs_enforcement_disabled = 0;
117 int cs_integrity_skip = 0;
118
119 /* Parse the AMFI mask */
120 PE_parse_boot_argn("amfi", &amfi_mask, sizeof(amfi_mask));
121
122 /* Parse the AMFI soft-bypass */
123 PE_parse_boot_argn(
124 "amfi_allow_any_signature",
125 &amfi_allow_any_signature,
126 sizeof(amfi_allow_any_signature));
127
128 /* Parse the AMFI debug-bypass */
129 PE_parse_boot_argn(
130 "amfi_unrestrict_task_for_pid",
131 &amfi_unrestrict_task_for_pid,
132 sizeof(amfi_unrestrict_task_for_pid));
133
134 /* Parse the AMFI hard-bypass */
135 PE_parse_boot_argn(
136 "amfi_get_out_of_my_way",
137 &amfi_get_out_of_my_way,
138 sizeof(amfi_get_out_of_my_way));
139
140 /* Parse the system code signing hard-bypass */
141 PE_parse_boot_argn(
142 "cs_enforcement_disable",
143 &cs_enforcement_disabled,
144 sizeof(cs_enforcement_disabled));
145
146 /* Parse the system code signing integrity-check bypass */
147 PE_parse_boot_argn(
148 "cs_integrity_skip",
149 &cs_integrity_skip,
150 sizeof(cs_integrity_skip));
151
152 /* CS_CONFIG_UNRESTRICTED_DEBUGGING */
153 if (amfi_mask & CS_AMFI_MASK_UNRESTRICT_TASK_FOR_PID) {
154 config |= CS_CONFIG_UNRESTRICTED_DEBUGGING;
155 } else if (amfi_unrestrict_task_for_pid) {
156 config |= CS_CONFIG_UNRESTRICTED_DEBUGGING;
157 }
158
159 /* CS_CONFIG_ALLOW_ANY_SIGNATURE */
160 if (amfi_mask & CS_AMFI_MASK_ALLOW_ANY_SIGNATURE) {
161 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
162 } else if (amfi_mask & CS_AMFI_MASK_GET_OUT_OF_MY_WAY) {
163 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
164 } else if (amfi_allow_any_signature) {
165 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
166 } else if (amfi_get_out_of_my_way) {
167 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
168 } else if (cs_enforcement_disabled) {
169 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
170 }
171
172 /* CS_CONFIG_ENFORCEMENT_DISABLED */
173 if (cs_enforcement_disabled) {
174 config |= CS_CONFIG_ENFORCEMENT_DISABLED;
175 }
176
177 /* CS_CONFIG_GET_OUT_OF_MY_WAY */
178 if (amfi_mask & CS_AMFI_MASK_GET_OUT_OF_MY_WAY) {
179 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
180 } else if (amfi_get_out_of_my_way) {
181 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
182 } else if (cs_enforcement_disabled) {
183 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
184 }
185
186 /* CS_CONFIG_INTEGRITY_SKIP */
187 if (cs_integrity_skip) {
188 config |= CS_CONFIG_INTEGRITY_SKIP;
189 }
190
191 #if PMAP_CS_PPL_MONITOR
192
193 if (csm_enabled() == true) {
194 int pmap_cs_allow_any_signature = 0;
195 bool override = PE_parse_boot_argn(
196 "pmap_cs_allow_any_signature",
197 &pmap_cs_allow_any_signature,
198 sizeof(pmap_cs_allow_any_signature));
199
200 if (!pmap_cs_allow_any_signature && override) {
201 config &= ~CS_CONFIG_ALLOW_ANY_SIGNATURE;
202 }
203
204 int pmap_cs_unrestrict_task_for_pid = 0;
205 override = PE_parse_boot_argn(
206 "pmap_cs_unrestrict_pmap_cs_disable",
207 &pmap_cs_unrestrict_task_for_pid,
208 sizeof(pmap_cs_unrestrict_task_for_pid));
209
210 if (!pmap_cs_unrestrict_task_for_pid && override) {
211 config &= ~CS_CONFIG_UNRESTRICTED_DEBUGGING;
212 }
213
214 int pmap_cs_enforcement_disable = 0;
215 override = PE_parse_boot_argn(
216 "pmap_cs_allow_modified_code_pages",
217 &pmap_cs_enforcement_disable,
218 sizeof(pmap_cs_enforcement_disable));
219
220 if (!pmap_cs_enforcement_disable && override) {
221 config &= ~CS_CONFIG_ENFORCEMENT_DISABLED;
222 }
223 }
224
225 #endif /* */
226 #endif /* DEVELOPMENT || DEBUG */
227
228 os_atomic_store(&cs_monitor, monitor_type, relaxed);
229 os_atomic_store(&cs_config, config, relaxed);
230
231 /*
232 * We write the cs_config_set variable with store-release semantics which means
233 * no writes before this call will be re-ordered to after this call. Hence, if
234 * someone reads this variable with load-acquire semantics, and they observe a
235 * value of true, then they will be able to observe the correct values of the
236 * cs_monitor and the cs_config variables as well.
237 */
238 os_atomic_store(&cs_config_set, true, release);
239
240 config_set:
241 /* Ensure configuration has been set */
242 assert(os_atomic_load(&cs_config_set, relaxed) == true);
243
244 /* Set the monitor type */
245 if (monitor_type_out) {
246 *monitor_type_out = os_atomic_load(&cs_monitor, relaxed);
247 }
248
249 /* Set the configuration */
250 if (config_out) {
251 *config_out = os_atomic_load(&cs_config, relaxed);
252 }
253 }
254
255 void
disable_code_signing_feature(code_signing_config_t feature)256 disable_code_signing_feature(
257 code_signing_config_t feature)
258 {
259 /*
260 * We require that this function be called only after the code signing config
261 * has been setup initially with a call to code_signing_configuration.
262 */
263 if (os_atomic_load(&cs_config_set, acquire) == false) {
264 panic("attempted to disable code signing feature without init: %u", feature);
265 }
266
267 /*
268 * We require that only a single feature be disabled through a single call to this
269 * function. Moreover, we ensure that only valid features are being disabled.
270 */
271 switch (feature) {
272 case CS_CONFIG_DEVELOPER_MODE_SUPPORTED:
273 cs_config &= ~CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
274 break;
275
276 case CS_CONFIG_COMPILATION_SERVICE:
277 cs_config &= ~CS_CONFIG_COMPILATION_SERVICE;
278 break;
279
280 case CS_CONFIG_LOCAL_SIGNING:
281 cs_config &= ~CS_CONFIG_LOCAL_SIGNING;
282 break;
283
284 case CS_CONFIG_OOP_JIT:
285 cs_config &= ~CS_CONFIG_OOP_JIT;
286 break;
287
288 default:
289 panic("attempted to disable a code signing feature invalidly: %u", feature);
290 }
291
292 /* Ensure all readers can observe the latest data */
293 #if defined(__arm64__)
294 __asm__ volatile ("dmb ish" ::: "memory");
295 #elif defined(__x86_64__)
296 __asm__ volatile ("mfence" ::: "memory");
297 #else
298 #error "Unknown platform -- fence instruction unavailable"
299 #endif
300 }
301
302 #pragma mark Developer Mode
303
304 void
enable_developer_mode(void)305 enable_developer_mode(void)
306 {
307 CSM_PREFIX(toggle_developer_mode)(true);
308 }
309
310 void
disable_developer_mode(void)311 disable_developer_mode(void)
312 {
313 CSM_PREFIX(toggle_developer_mode)(false);
314 }
315
316 bool
developer_mode_state(void)317 developer_mode_state(void)
318 {
319 /* Assume false if the pointer isn't setup */
320 if (developer_mode_enabled == NULL) {
321 return false;
322 }
323
324 return os_atomic_load(developer_mode_enabled, relaxed);
325 }
326
327 #pragma mark Provisioning Profiles
328 /*
329 * AMFI performs full profile validation by itself. XNU only needs to manage provisioning
330 * profiles when we have a monitor since the monitor needs to independently verify the
331 * profile data as well.
332 */
333
334 void
garbage_collect_provisioning_profiles(void)335 garbage_collect_provisioning_profiles(void)
336 {
337 #if CODE_SIGNING_MONITOR
338 csm_free_provisioning_profiles();
339 #endif
340 }
341
342 #if CODE_SIGNING_MONITOR
343
344 /* Structure used to maintain the set of registered profiles on the system */
345 typedef struct _cs_profile {
346 /* The UUID of the registered profile */
347 uuid_t profile_uuid;
348
349 /* The profile validation object from the monitor */
350 void *profile_obj;
351
352 /*
353 * In order to minimize the number of times the same profile would need to be
354 * registered, we allow frequently used profiles to skip the garbage collector
355 * for one pass.
356 */
357 bool skip_collector;
358
359 /* Linked list linkage */
360 SLIST_ENTRY(_cs_profile) link;
361 } cs_profile_t;
362
363 /* Linked list head for registered profiles */
364 static SLIST_HEAD(, _cs_profile) all_profiles = SLIST_HEAD_INITIALIZER(all_profiles);
365
366 /* Lock for the provisioning profiles */
367 LCK_GRP_DECLARE(profiles_lck_grp, "profiles_lck_grp");
368 decl_lck_rw_data(, profiles_lock);
369
370 void
csm_initialize_provisioning_profiles(void)371 csm_initialize_provisioning_profiles(void)
372 {
373 /* Ensure the CoreTrust kernel extension has loaded */
374 if (coretrust == NULL) {
375 panic("coretrust interface not available");
376 }
377
378 /* Initialize the provisoning profiles lock */
379 lck_rw_init(&profiles_lock, &profiles_lck_grp, 0);
380 printf("initialized XNU provisioning profile data\n");
381
382 #if PMAP_CS_PPL_MONITOR
383 pmap_initialize_provisioning_profiles();
384 #endif
385 }
386
387 static cs_profile_t*
search_for_profile_uuid(const uuid_t profile_uuid)388 search_for_profile_uuid(
389 const uuid_t profile_uuid)
390 {
391 cs_profile_t *profile = NULL;
392
393 /* Caller is required to acquire the lock */
394 lck_rw_assert(&profiles_lock, LCK_RW_ASSERT_HELD);
395
396 SLIST_FOREACH(profile, &all_profiles, link) {
397 if (uuid_compare(profile_uuid, profile->profile_uuid) == 0) {
398 return profile;
399 }
400 }
401
402 return NULL;
403 }
404
405 kern_return_t
csm_register_provisioning_profile(const uuid_t profile_uuid,const void * profile_blob,const size_t profile_blob_size)406 csm_register_provisioning_profile(
407 const uuid_t profile_uuid,
408 const void *profile_blob,
409 const size_t profile_blob_size)
410 {
411 cs_profile_t *profile = NULL;
412 void *monitor_profile_obj = NULL;
413 kern_return_t ret = KERN_DENIED;
414
415 /* Allocate storage for the profile wrapper object */
416 profile = kalloc_type(cs_profile_t, Z_WAITOK_ZERO);
417 assert(profile != NULL);
418
419 /* Lock the profile set exclusively */
420 lck_rw_lock_exclusive(&profiles_lock);
421
422 /* Check to make sure this isn't a duplicate UUID */
423 cs_profile_t *dup_profile = search_for_profile_uuid(profile_uuid);
424 if (dup_profile != NULL) {
425 /* This profile might be used soon -- skip garbage collector */
426 dup_profile->skip_collector = true;
427
428 ret = KERN_ALREADY_IN_SET;
429 goto exit;
430 }
431
432 ret = CSM_PREFIX(register_provisioning_profile)(
433 profile_blob,
434 profile_blob_size,
435 &monitor_profile_obj);
436
437 if (ret == KERN_SUCCESS) {
438 /* Copy in the profile UUID */
439 uuid_copy(profile->profile_uuid, profile_uuid);
440
441 /* Setup the monitor's profile object */
442 profile->profile_obj = monitor_profile_obj;
443
444 /* This profile might be used soon -- skip garbage collector */
445 profile->skip_collector = true;
446
447 /* Insert at the head of the profile set */
448 SLIST_INSERT_HEAD(&all_profiles, profile, link);
449 }
450
451 exit:
452 /* Unlock the profile set */
453 lck_rw_unlock_exclusive(&profiles_lock);
454
455 if (ret != KERN_SUCCESS) {
456 /* Free the profile wrapper object */
457 kfree_type(cs_profile_t, profile);
458 profile = NULL;
459
460 if (ret != KERN_ALREADY_IN_SET) {
461 printf("unable to register profile with monitor: %d\n", ret);
462 }
463 }
464
465 return ret;
466 }
467
468 kern_return_t
csm_associate_provisioning_profile(void * monitor_sig_obj,const uuid_t profile_uuid)469 csm_associate_provisioning_profile(
470 void *monitor_sig_obj,
471 const uuid_t profile_uuid)
472 {
473 cs_profile_t *profile = NULL;
474 kern_return_t ret = KERN_DENIED;
475
476 if (csm_enabled() == false) {
477 return KERN_NOT_SUPPORTED;
478 }
479
480 /* Lock the profile set as shared */
481 lck_rw_lock_shared(&profiles_lock);
482
483 /* Search for the provisioning profile */
484 profile = search_for_profile_uuid(profile_uuid);
485 if (profile == NULL) {
486 ret = KERN_NOT_FOUND;
487 goto exit;
488 }
489
490 ret = CSM_PREFIX(associate_provisioning_profile)(
491 monitor_sig_obj,
492 profile->profile_obj);
493
494 if (ret == KERN_SUCCESS) {
495 /*
496 * This seems like an active profile -- let it skip the garbage collector on
497 * the next pass. We can modify this field even though we've only taken a shared
498 * lock as in this case we're always setting it to a fixed value.
499 */
500 profile->skip_collector = true;
501 }
502
503 exit:
504 /* Unlock the profile set */
505 lck_rw_unlock_shared(&profiles_lock);
506
507 if (ret != KERN_SUCCESS) {
508 printf("unable to associate profile: %d\n", ret);
509 }
510 return ret;
511 }
512
513 kern_return_t
csm_disassociate_provisioning_profile(void * monitor_sig_obj)514 csm_disassociate_provisioning_profile(
515 void *monitor_sig_obj)
516 {
517 kern_return_t ret = KERN_DENIED;
518
519 if (csm_enabled() == false) {
520 return KERN_NOT_SUPPORTED;
521 }
522
523 /* Call out to the monitor */
524 ret = CSM_PREFIX(disassociate_provisioning_profile)(monitor_sig_obj);
525
526 if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_FOUND)) {
527 printf("unable to disassociate profile: %d\n", ret);
528 }
529 return ret;
530 }
531
532 static kern_return_t
unregister_provisioning_profile(cs_profile_t * profile)533 unregister_provisioning_profile(
534 cs_profile_t *profile)
535 {
536 kern_return_t ret = KERN_DENIED;
537
538 /* Call out to the monitor */
539 ret = CSM_PREFIX(unregister_provisioning_profile)(profile->profile_obj);
540
541 /*
542 * KERN_FAILURE represents the case when the unregistration failed because the
543 * monitor noted that the profile was still being used. Other than that, there
544 * is no other error expected out of this interface. In fact, there is no easy
545 * way to deal with other errors, as the profile state may be corrupted. If we
546 * see a different error, then we panic.
547 */
548 if ((ret != KERN_SUCCESS) && (ret != KERN_FAILURE)) {
549 panic("unable to unregister profile from monitor: %d | %p\n", ret, profile);
550 }
551
552 return ret;
553 }
554
555 void
csm_free_provisioning_profiles(void)556 csm_free_provisioning_profiles(void)
557 {
558 kern_return_t ret = KERN_DENIED;
559 cs_profile_t *profile = NULL;
560 cs_profile_t *temp_profile = NULL;
561
562 /* Lock the profile set exclusively */
563 lck_rw_lock_exclusive(&profiles_lock);
564
565 SLIST_FOREACH_SAFE(profile, &all_profiles, link, temp_profile) {
566 if (profile->skip_collector == true) {
567 profile->skip_collector = false;
568 continue;
569 }
570
571 /* Attempt to unregister this profile from the system */
572 ret = unregister_provisioning_profile(profile);
573 if (ret == KERN_SUCCESS) {
574 /* Remove the profile from the profile set */
575 SLIST_REMOVE(&all_profiles, profile, _cs_profile, link);
576
577 /* Free the memory consumed for the profile wrapper object */
578 kfree_type(cs_profile_t, profile);
579 profile = NULL;
580 }
581 }
582
583 /* Unlock the profile set */
584 lck_rw_unlock_exclusive(&profiles_lock);
585 }
586
587 #endif /* CODE_SIGNING_MONITOR */
588
589 #pragma mark Code Signing
590 /*
591 * AMFI performs full signature validation by itself. For some things, AMFI uses XNU in
592 * order to abstract away the underlying implementation for data storage, but for most of
593 * these, AMFI doesn't directly interact with them, and they're only required when we have
594 * a code signing monitor on the system.
595 */
596
597 void
set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])598 set_compilation_service_cdhash(
599 const uint8_t cdhash[CS_CDHASH_LEN])
600 {
601 CSM_PREFIX(set_compilation_service_cdhash)(cdhash);
602 }
603
604 bool
match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])605 match_compilation_service_cdhash(
606 const uint8_t cdhash[CS_CDHASH_LEN])
607 {
608 return CSM_PREFIX(match_compilation_service_cdhash)(cdhash);
609 }
610
611 void
set_local_signing_public_key(const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])612 set_local_signing_public_key(
613 const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])
614 {
615 CSM_PREFIX(set_local_signing_public_key)(public_key);
616 }
617
618 uint8_t*
get_local_signing_public_key(void)619 get_local_signing_public_key(void)
620 {
621 return CSM_PREFIX(get_local_signing_public_key)();
622 }
623
624 void
unrestrict_local_signing_cdhash(__unused const uint8_t cdhash[CS_CDHASH_LEN])625 unrestrict_local_signing_cdhash(
626 __unused const uint8_t cdhash[CS_CDHASH_LEN])
627 {
628 /*
629 * Since AMFI manages code signing on its own, we only need to unrestrict the
630 * local signing cdhash when we have a monitor environment.
631 */
632
633 #if CODE_SIGNING_MONITOR
634 CSM_PREFIX(unrestrict_local_signing_cdhash)(cdhash);
635 #endif
636 }
637
638 kern_return_t
get_trust_level_kdp(__unused pmap_t pmap,__unused uint32_t * trust_level)639 get_trust_level_kdp(
640 __unused pmap_t pmap,
641 __unused uint32_t *trust_level)
642 {
643 #if CODE_SIGNING_MONITOR
644 return csm_get_trust_level_kdp(pmap, trust_level);
645 #else
646 return KERN_NOT_SUPPORTED;
647 #endif
648 }
649
650 kern_return_t
csm_resolve_os_entitlements_from_proc(__unused const proc_t process,__unused const void ** os_entitlements)651 csm_resolve_os_entitlements_from_proc(
652 __unused const proc_t process,
653 __unused const void **os_entitlements)
654 {
655 #if CODE_SIGNING_MONITOR
656 task_t task = NULL;
657 vm_map_t task_map = NULL;
658 pmap_t task_pmap = NULL;
659 kern_return_t ret = KERN_DENIED;
660
661 if (csm_enabled() == false) {
662 return KERN_NOT_SUPPORTED;
663 }
664
665 /* Ensure the process comes from the proc_task zone */
666 proc_require(process, PROC_REQUIRE_ALLOW_ALL);
667
668 /* Acquire the task from the proc */
669 task = proc_task(process);
670 if (task == NULL) {
671 return KERN_NOT_FOUND;
672 }
673
674 /* Acquire the virtual memory map from the task -- takes a reference on it */
675 task_map = get_task_map_reference(task);
676 if (task_map == NULL) {
677 return KERN_NOT_FOUND;
678 }
679
680 /* Acquire the pmap from the virtual memory map */
681 task_pmap = vm_map_get_pmap(task_map);
682 assert(task_pmap != NULL);
683
684 /* Call into the monitor to resolve the entitlements */
685 ret = CSM_PREFIX(resolve_kernel_entitlements)(task_pmap, os_entitlements);
686
687 /* Release the reference on the virtual memory map */
688 vm_map_deallocate(task_map);
689
690 return ret;
691 #else
692 return KERN_NOT_SUPPORTED;
693 #endif
694 }
695
696 kern_return_t
address_space_debugged(const proc_t process)697 address_space_debugged(
698 const proc_t process)
699 {
700 /* Must pass in a valid proc_t */
701 if (process == NULL) {
702 printf("%s: provided a NULL process\n", __FUNCTION__);
703 return KERN_DENIED;
704 }
705 proc_require(process, PROC_REQUIRE_ALLOW_ALL);
706
707 /* Developer mode must always be enabled for this to return successfully */
708 if (developer_mode_state() == false) {
709 return KERN_DENIED;
710 }
711
712 #if CODE_SIGNING_MONITOR
713 task_t task = NULL;
714 vm_map_t task_map = NULL;
715 pmap_t task_pmap = NULL;
716
717 if (csm_enabled() == true) {
718 /* Acquire the task from the proc */
719 task = proc_task(process);
720 if (task == NULL) {
721 return KERN_NOT_FOUND;
722 }
723
724 /* Acquire the virtual memory map from the task -- takes a reference on it */
725 task_map = get_task_map_reference(task);
726 if (task_map == NULL) {
727 return KERN_NOT_FOUND;
728 }
729
730 /* Acquire the pmap from the virtual memory map */
731 task_pmap = vm_map_get_pmap(task_map);
732 assert(task_pmap != NULL);
733
734 /* Acquire the state from the monitor */
735 kern_return_t ret = CSM_PREFIX(address_space_debugged)(task_pmap);
736
737 /* Release the reference on the virtual memory map */
738 vm_map_deallocate(task_map);
739
740 return ret;
741 }
742 #endif /* CODE_SIGNING_MONITOR */
743
744 /* Check read-only process flags for state */
745 if (proc_getcsflags(process) & CS_DEBUGGED) {
746 return KERN_SUCCESS;
747 }
748
749 return KERN_DENIED;
750 }
751
752 #if CODE_SIGNING_MONITOR
753
754 bool
csm_enabled(void)755 csm_enabled(void)
756 {
757 return CSM_PREFIX(code_signing_enabled)();
758 }
759
760 vm_size_t
csm_signature_size_limit(void)761 csm_signature_size_limit(void)
762 {
763 return CSM_PREFIX(managed_code_signature_size)();
764 }
765
766 kern_return_t
csm_register_code_signature(const vm_address_t signature_addr,const vm_size_t signature_size,const vm_offset_t code_directory_offset,const char * signature_path,void ** monitor_sig_obj,vm_address_t * monitor_signature_addr)767 csm_register_code_signature(
768 const vm_address_t signature_addr,
769 const vm_size_t signature_size,
770 const vm_offset_t code_directory_offset,
771 const char *signature_path,
772 void **monitor_sig_obj,
773 vm_address_t *monitor_signature_addr)
774 {
775 if (csm_enabled() == false) {
776 return KERN_NOT_SUPPORTED;
777 }
778
779 return CSM_PREFIX(register_code_signature)(
780 signature_addr,
781 signature_size,
782 code_directory_offset,
783 signature_path,
784 monitor_sig_obj,
785 monitor_signature_addr);
786 }
787
788 kern_return_t
csm_unregister_code_signature(void * monitor_sig_obj)789 csm_unregister_code_signature(
790 void *monitor_sig_obj)
791 {
792 if (csm_enabled() == false) {
793 return KERN_NOT_SUPPORTED;
794 }
795
796 return CSM_PREFIX(unregister_code_signature)(monitor_sig_obj);
797 }
798
799 kern_return_t
csm_verify_code_signature(void * monitor_sig_obj)800 csm_verify_code_signature(
801 void *monitor_sig_obj)
802 {
803 if (csm_enabled() == false) {
804 return KERN_NOT_SUPPORTED;
805 }
806
807 return CSM_PREFIX(verify_code_signature)(monitor_sig_obj);
808 }
809
810 kern_return_t
csm_reconstitute_code_signature(void * monitor_sig_obj,vm_address_t * unneeded_addr,vm_size_t * unneeded_size)811 csm_reconstitute_code_signature(
812 void *monitor_sig_obj,
813 vm_address_t *unneeded_addr,
814 vm_size_t *unneeded_size)
815 {
816 if (csm_enabled() == false) {
817 return KERN_NOT_SUPPORTED;
818 }
819
820 return CSM_PREFIX(reconstitute_code_signature)(
821 monitor_sig_obj,
822 unneeded_addr,
823 unneeded_size);
824 }
825
826 kern_return_t
csm_associate_code_signature(pmap_t monitor_pmap,void * monitor_sig_obj,const vm_address_t region_addr,const vm_size_t region_size,const vm_offset_t region_offset)827 csm_associate_code_signature(
828 pmap_t monitor_pmap,
829 void *monitor_sig_obj,
830 const vm_address_t region_addr,
831 const vm_size_t region_size,
832 const vm_offset_t region_offset)
833 {
834 if (csm_enabled() == false) {
835 return KERN_NOT_SUPPORTED;
836 }
837
838 return CSM_PREFIX(associate_code_signature)(
839 monitor_pmap,
840 monitor_sig_obj,
841 region_addr,
842 region_size,
843 region_offset);
844 }
845
846 kern_return_t
csm_allow_jit_region(pmap_t monitor_pmap)847 csm_allow_jit_region(
848 pmap_t monitor_pmap)
849 {
850 if (csm_enabled() == false) {
851 return KERN_SUCCESS;
852 } else if (monitor_pmap == NULL) {
853 return KERN_DENIED;
854 }
855
856 kern_return_t ret = CSM_PREFIX(allow_jit_region)(monitor_pmap);
857 if (ret == KERN_NOT_SUPPORTED) {
858 /*
859 * Some monitor environments do not support this API and as a result will
860 * return KERN_NOT_SUPPORTED. The caller here should not interpret that as
861 * a failure.
862 */
863 ret = KERN_SUCCESS;
864 }
865
866 return ret;
867 }
868
869 kern_return_t
csm_associate_jit_region(pmap_t monitor_pmap,const vm_address_t region_addr,const vm_size_t region_size)870 csm_associate_jit_region(
871 pmap_t monitor_pmap,
872 const vm_address_t region_addr,
873 const vm_size_t region_size)
874 {
875 if (csm_enabled() == false) {
876 return KERN_NOT_SUPPORTED;
877 }
878
879 return CSM_PREFIX(associate_jit_region)(
880 monitor_pmap,
881 region_addr,
882 region_size);
883 }
884
885 kern_return_t
csm_associate_debug_region(pmap_t monitor_pmap,const vm_address_t region_addr,const vm_size_t region_size)886 csm_associate_debug_region(
887 pmap_t monitor_pmap,
888 const vm_address_t region_addr,
889 const vm_size_t region_size)
890 {
891 if (csm_enabled() == false) {
892 return KERN_NOT_SUPPORTED;
893 }
894
895 return CSM_PREFIX(associate_debug_region)(
896 monitor_pmap,
897 region_addr,
898 region_size);
899 }
900
901 kern_return_t
csm_allow_invalid_code(pmap_t pmap)902 csm_allow_invalid_code(
903 pmap_t pmap)
904 {
905 if (csm_enabled() == false) {
906 return KERN_NOT_SUPPORTED;
907 }
908
909 return CSM_PREFIX(allow_invalid_code)(pmap);
910 }
911
912 kern_return_t
csm_get_trust_level_kdp(pmap_t pmap,uint32_t * trust_level)913 csm_get_trust_level_kdp(
914 pmap_t pmap,
915 uint32_t *trust_level)
916 {
917 if (csm_enabled() == false) {
918 return KERN_NOT_SUPPORTED;
919 }
920
921 return CSM_PREFIX(get_trust_level_kdp)(pmap, trust_level);
922 }
923
924 kern_return_t
csm_address_space_exempt(const pmap_t pmap)925 csm_address_space_exempt(
926 const pmap_t pmap)
927 {
928 /*
929 * These exemptions are actually orthogonal to the code signing enforcement. As
930 * a result, we let each monitor explicitly decide how to deal with the exemption
931 * in case code signing enforcement is disabled.
932 */
933
934 return CSM_PREFIX(address_space_exempt)(pmap);
935 }
936
937 kern_return_t
csm_fork_prepare(pmap_t old_pmap,pmap_t new_pmap)938 csm_fork_prepare(
939 pmap_t old_pmap,
940 pmap_t new_pmap)
941 {
942 if (csm_enabled() == false) {
943 return KERN_NOT_SUPPORTED;
944 }
945
946 return CSM_PREFIX(fork_prepare)(old_pmap, new_pmap);
947 }
948
949 kern_return_t
csm_acquire_signing_identifier(const void * monitor_sig_obj,const char ** signing_id)950 csm_acquire_signing_identifier(
951 const void *monitor_sig_obj,
952 const char **signing_id)
953 {
954 if (csm_enabled() == false) {
955 return KERN_NOT_SUPPORTED;
956 }
957
958 return CSM_PREFIX(acquire_signing_identifier)(monitor_sig_obj, signing_id);
959 }
960
961 kern_return_t
csm_associate_os_entitlements(void * monitor_sig_obj,const void * os_entitlements)962 csm_associate_os_entitlements(
963 void *monitor_sig_obj,
964 const void *os_entitlements)
965 {
966 if (csm_enabled() == false) {
967 return KERN_NOT_SUPPORTED;
968 } else if (os_entitlements == NULL) {
969 /* Not every signature has entitlements */
970 return KERN_SUCCESS;
971 }
972
973 return CSM_PREFIX(associate_kernel_entitlements)(monitor_sig_obj, os_entitlements);
974 }
975
976 kern_return_t
csm_accelerate_entitlements(void * monitor_sig_obj,CEQueryContext_t * ce_ctx)977 csm_accelerate_entitlements(
978 void *monitor_sig_obj,
979 CEQueryContext_t *ce_ctx)
980 {
981 if (csm_enabled() == false) {
982 return KERN_NOT_SUPPORTED;
983 }
984
985 return CSM_PREFIX(accelerate_entitlements)(monitor_sig_obj, ce_ctx);
986 }
987
988 #endif /* CODE_SIGNING_MONITOR */
989
990 #pragma mark AppleImage4
991 /*
992 * AppleImage4 uses the monitor environment to safeguard critical security data.
993 * In order to ease the implementation specific, AppleImage4 always depends on these
994 * abstracted APIs, regardless of whether the system has a monitor environment or
995 * not.
996 */
997
998 void*
kernel_image4_storage_data(size_t * allocated_size)999 kernel_image4_storage_data(
1000 size_t *allocated_size)
1001 {
1002 return CSM_PREFIX(image4_storage_data)(allocated_size);
1003 }
1004
1005 void
kernel_image4_set_nonce(const img4_nonce_domain_index_t ndi,const img4_nonce_t * nonce)1006 kernel_image4_set_nonce(
1007 const img4_nonce_domain_index_t ndi,
1008 const img4_nonce_t *nonce)
1009 {
1010 return CSM_PREFIX(image4_set_nonce)(ndi, nonce);
1011 }
1012
1013 void
kernel_image4_roll_nonce(const img4_nonce_domain_index_t ndi)1014 kernel_image4_roll_nonce(
1015 const img4_nonce_domain_index_t ndi)
1016 {
1017 return CSM_PREFIX(image4_roll_nonce)(ndi);
1018 }
1019
1020 errno_t
kernel_image4_copy_nonce(const img4_nonce_domain_index_t ndi,img4_nonce_t * nonce_out)1021 kernel_image4_copy_nonce(
1022 const img4_nonce_domain_index_t ndi,
1023 img4_nonce_t *nonce_out)
1024 {
1025 return CSM_PREFIX(image4_copy_nonce)(ndi, nonce_out);
1026 }
1027
1028 errno_t
kernel_image4_execute_object(img4_runtime_object_spec_index_t obj_spec_index,const img4_buff_t * payload,const img4_buff_t * manifest)1029 kernel_image4_execute_object(
1030 img4_runtime_object_spec_index_t obj_spec_index,
1031 const img4_buff_t *payload,
1032 const img4_buff_t *manifest)
1033 {
1034 return CSM_PREFIX(image4_execute_object)(
1035 obj_spec_index,
1036 payload,
1037 manifest);
1038 }
1039
1040 errno_t
kernel_image4_copy_object(img4_runtime_object_spec_index_t obj_spec_index,vm_address_t object_out,size_t * object_length)1041 kernel_image4_copy_object(
1042 img4_runtime_object_spec_index_t obj_spec_index,
1043 vm_address_t object_out,
1044 size_t *object_length)
1045 {
1046 return CSM_PREFIX(image4_copy_object)(
1047 obj_spec_index,
1048 object_out,
1049 object_length);
1050 }
1051
1052 const void*
kernel_image4_get_monitor_exports(void)1053 kernel_image4_get_monitor_exports(void)
1054 {
1055 return CSM_PREFIX(image4_get_monitor_exports)();
1056 }
1057
1058 errno_t
kernel_image4_set_release_type(const char * release_type)1059 kernel_image4_set_release_type(
1060 const char *release_type)
1061 {
1062 return CSM_PREFIX(image4_set_release_type)(release_type);
1063 }
1064
1065 errno_t
kernel_image4_set_bnch_shadow(const img4_nonce_domain_index_t ndi)1066 kernel_image4_set_bnch_shadow(
1067 const img4_nonce_domain_index_t ndi)
1068 {
1069 return CSM_PREFIX(image4_set_bnch_shadow)(ndi);
1070 }
1071