1 /*
2 * Copyright (c) 2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <os/overflow.h>
24 #include <machine/atomic.h>
25 #include <mach/vm_param.h>
26 #include <vm/vm_kern_xnu.h>
27 #include <vm/pmap.h>
28 #include <vm/pmap_cs.h>
29 #include <vm/vm_map_xnu.h>
30 #include <kern/zalloc.h>
31 #include <kern/kalloc.h>
32 #include <kern/assert.h>
33 #include <kern/locks.h>
34 #include <kern/lock_rw.h>
35 #include <libkern/libkern.h>
36 #include <libkern/section_keywords.h>
37 #include <libkern/coretrust/coretrust.h>
38 #include <pexpert/pexpert.h>
39 #include <sys/user.h>
40 #include <sys/vm.h>
41 #include <sys/proc.h>
42 #include <sys/proc_require.h>
43 #include <sys/codesign.h>
44 #include <sys/code_signing.h>
45 #include <sys/lockdown_mode.h>
46 #include <sys/reason.h>
47 #include <sys/kdebug_kernel.h>
48 #include <sys/kdebug_triage.h>
49 #include <sys/sysctl.h>
50 #include <uuid/uuid.h>
51 #include <IOKit/IOBSD.h>
52
53 #if CONFIG_SPTM
54 #include <sys/trusted_execution_monitor.h>
55 #endif
56
57 #if XNU_KERNEL_PRIVATE
58 vm_address_t
code_signing_allocate(size_t alloc_size)59 code_signing_allocate(
60 size_t alloc_size)
61 {
62 vm_address_t alloc_addr = 0;
63
64 if (alloc_size == 0) {
65 panic("%s: zero allocation size", __FUNCTION__);
66 }
67 size_t aligned_size = round_page(alloc_size);
68
69 kern_return_t ret = kmem_alloc(
70 kernel_map,
71 &alloc_addr, aligned_size,
72 KMA_KOBJECT | KMA_DATA | KMA_ZERO,
73 VM_KERN_MEMORY_SECURITY);
74
75 if (ret != KERN_SUCCESS) {
76 printf("%s: unable to allocate %lu bytes\n", __FUNCTION__, aligned_size);
77 } else if (alloc_addr == 0) {
78 printf("%s: invalid allocation\n", __FUNCTION__);
79 }
80
81 return alloc_addr;
82 }
83
84 void
code_signing_deallocate(vm_address_t * alloc_addr,size_t alloc_size)85 code_signing_deallocate(
86 vm_address_t *alloc_addr,
87 size_t alloc_size)
88 {
89 if (alloc_addr == NULL) {
90 panic("%s: invalid pointer provided", __FUNCTION__);
91 } else if ((*alloc_addr == 0) || ((*alloc_addr & PAGE_MASK) != 0)) {
92 panic("%s: address provided: %p", __FUNCTION__, (void*)(*alloc_addr));
93 } else if (alloc_size == 0) {
94 panic("%s: zero allocation size", __FUNCTION__);
95 }
96 size_t aligned_size = round_page(alloc_size);
97
98 /* Free the allocation */
99 kmem_free(kernel_map, *alloc_addr, aligned_size);
100
101 /* Clear the address */
102 *alloc_addr = 0;
103 }
104 #endif /* XNU_KERNEL_PRIVATE */
105
106 SYSCTL_DECL(_security);
107 SYSCTL_DECL(_security_codesigning);
108 SYSCTL_NODE(_security, OID_AUTO, codesigning, CTLFLAG_RD, 0, "XNU Code Signing");
109
110 static SECURITY_READ_ONLY_LATE(bool) cs_config_set = false;
111 static SECURITY_READ_ONLY_LATE(code_signing_monitor_type_t) cs_monitor = CS_MONITOR_TYPE_NONE;
112 static SECURITY_READ_ONLY_LATE(code_signing_config_t) cs_config = 0;
113 static uint32_t security_boot_mode_complete = 0;
114
115 SYSCTL_UINT(_security_codesigning, OID_AUTO, monitor, CTLFLAG_RD, &cs_monitor, 0, "code signing monitor type");
116 SYSCTL_UINT(_security_codesigning, OID_AUTO, config, CTLFLAG_RD, &cs_config, 0, "code signing configuration");
117
118 SYSCTL_UINT(
119 _security_codesigning, OID_AUTO,
120 security_boot_mode_complete, CTLFLAG_RD,
121 &security_boot_mode_complete, 0, "security boot mode completion status");
122
123 void
code_signing_configuration(code_signing_monitor_type_t * monitor_type_out,code_signing_config_t * config_out)124 code_signing_configuration(
125 code_signing_monitor_type_t *monitor_type_out,
126 code_signing_config_t *config_out)
127 {
128 code_signing_monitor_type_t monitor_type = CS_MONITOR_TYPE_NONE;
129 code_signing_config_t config = 0;
130
131 /*
132 * Since we read this variable with load-acquire semantics, if we observe a value
133 * of true, it means we should be able to observe writes to cs_monitor and also
134 * cs_config.
135 */
136 if (os_atomic_load(&cs_config_set, acquire) == true) {
137 goto config_set;
138 }
139
140 /*
141 * Add support for all the code signing features. This function is called very
142 * early in the system boot, much before kernel extensions such as Apple Mobile
143 * File Integrity come online. As a result, this function assumes that all the
144 * code signing features are enabled, and later on, different components can
145 * disable support for different features using disable_code_signing_feature().
146 */
147 config |= CS_CONFIG_MAP_JIT;
148 config |= CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
149 config |= CS_CONFIG_COMPILATION_SERVICE;
150 config |= CS_CONFIG_LOCAL_SIGNING;
151 config |= CS_CONFIG_OOP_JIT;
152
153 #if CODE_SIGNING_MONITOR
154 /* Mark the code signing monitor as enabled if required */
155 if (csm_enabled() == true) {
156 config |= CS_CONFIG_CSM_ENABLED;
157 }
158
159 #if CONFIG_SPTM
160 /*
161 * Since TrustedExecutionMonitor cannot call into any function within XNU, we
162 * query it's code signing configuration even before this function is called.
163 * Using that, we modify the state of the code signing features available.
164 */
165 if (csm_enabled() == true) {
166 bool platform_code_only = txm_cs_config->systemPolicy->platformCodeOnly;
167
168 /* Disable unsupported features when enforcing platform-code-only */
169 if (platform_code_only == true) {
170 config &= ~CS_CONFIG_MAP_JIT;
171 config &= ~CS_CONFIG_COMPILATION_SERVICE;
172 config &= ~CS_CONFIG_LOCAL_SIGNING;
173 config &= ~CS_CONFIG_OOP_JIT;
174 }
175
176 /*
177 * Restricted Execution Mode support. The pattern for this code snippet breaks
178 * the norm compared to others. For the other features, we consider them enabled
179 * by default unless TXM disables them. For REM, given this is a TXM only feature,
180 * we consider it disabled unless TXM explicitly tells us it is enabled.
181 */
182 if (txm_cs_config->systemPolicy->featureSet.restrictedExecutionMode == true) {
183 config |= CS_CONFIG_REM_SUPPORTED;
184 }
185
186 /* MAP_JIT support */
187 if (txm_cs_config->systemPolicy->featureSet.JIT == false) {
188 config &= ~CS_CONFIG_MAP_JIT;
189 }
190
191 /* Developer mode support */
192 if (txm_cs_config->systemPolicy->featureSet.developerMode == false) {
193 config &= ~CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
194 }
195
196 /* Compilation service support */
197 if (txm_cs_config->systemPolicy->featureSet.compilationService == false) {
198 config &= ~CS_CONFIG_COMPILATION_SERVICE;
199 }
200
201 /* Local signing support */
202 if (txm_cs_config->systemPolicy->featureSet.localSigning == false) {
203 config &= ~CS_CONFIG_LOCAL_SIGNING;
204 }
205
206 /* OOP-JIT support */
207 if (txm_cs_config->systemPolicy->featureSet.OOPJit == false) {
208 config &= ~CS_CONFIG_OOP_JIT;
209 }
210 }
211 monitor_type = CS_MONITOR_TYPE_TXM;
212 #elif PMAP_CS_PPL_MONITOR
213 monitor_type = CS_MONITOR_TYPE_PPL;
214 #endif /* CONFIG_SPTM */
215 #endif /* CODE_SIGNING_MONITOR */
216
217 #if DEVELOPMENT || DEBUG
218 /*
219 * We only ever need to parse for boot-args based exemption state on DEVELOPMENT
220 * or DEBUG builds as this state is not respected by any code signing component
221 * on RELEASE builds.
222 */
223
224 #define CS_AMFI_MASK_UNRESTRICT_TASK_FOR_PID 0x01
225 #define CS_AMFI_MASK_ALLOW_ANY_SIGNATURE 0x02
226 #define CS_AMFI_MASK_GET_OUT_OF_MY_WAY 0x80
227
228 int amfi_mask = 0;
229 int amfi_allow_any_signature = 0;
230 int amfi_unrestrict_task_for_pid = 0;
231 int amfi_get_out_of_my_way = 0;
232 int cs_enforcement_disabled = 0;
233 int cs_integrity_skip = 0;
234 int amfi_relax_profile_trust = 0;
235
236 /* Parse the AMFI mask */
237 PE_parse_boot_argn("amfi", &amfi_mask, sizeof(amfi_mask));
238
239 /* Parse the AMFI soft-bypass */
240 PE_parse_boot_argn(
241 "amfi_allow_any_signature",
242 &amfi_allow_any_signature,
243 sizeof(amfi_allow_any_signature));
244
245 /* Parse the AMFI debug-bypass */
246 PE_parse_boot_argn(
247 "amfi_unrestrict_task_for_pid",
248 &amfi_unrestrict_task_for_pid,
249 sizeof(amfi_unrestrict_task_for_pid));
250
251 /* Parse the AMFI hard-bypass */
252 PE_parse_boot_argn(
253 "amfi_get_out_of_my_way",
254 &amfi_get_out_of_my_way,
255 sizeof(amfi_get_out_of_my_way));
256
257 /* Parse the system code signing hard-bypass */
258 PE_parse_boot_argn(
259 "cs_enforcement_disable",
260 &cs_enforcement_disabled,
261 sizeof(cs_enforcement_disabled));
262
263 /* Parse the system code signing integrity-check bypass */
264 PE_parse_boot_argn(
265 "cs_integrity_skip",
266 &cs_integrity_skip,
267 sizeof(cs_integrity_skip));
268
269 /* Parse the AMFI profile trust bypass */
270 PE_parse_boot_argn(
271 "amfi_relax_profile_trust",
272 &amfi_relax_profile_trust,
273 sizeof(amfi_relax_profile_trust));
274
275 /* CS_CONFIG_UNRESTRICTED_DEBUGGING */
276 if (amfi_mask & CS_AMFI_MASK_UNRESTRICT_TASK_FOR_PID) {
277 config |= CS_CONFIG_UNRESTRICTED_DEBUGGING;
278 } else if (amfi_unrestrict_task_for_pid) {
279 config |= CS_CONFIG_UNRESTRICTED_DEBUGGING;
280 }
281
282 /* CS_CONFIG_ALLOW_ANY_SIGNATURE */
283 if (amfi_mask & CS_AMFI_MASK_ALLOW_ANY_SIGNATURE) {
284 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
285 } else if (amfi_mask & CS_AMFI_MASK_GET_OUT_OF_MY_WAY) {
286 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
287 } else if (amfi_allow_any_signature) {
288 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
289 } else if (amfi_get_out_of_my_way) {
290 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
291 } else if (cs_enforcement_disabled) {
292 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
293 }
294
295 /* CS_CONFIG_ENFORCEMENT_DISABLED */
296 if (cs_enforcement_disabled) {
297 config |= CS_CONFIG_ENFORCEMENT_DISABLED;
298 }
299
300 /* CS_CONFIG_GET_OUT_OF_MY_WAY */
301 if (amfi_mask & CS_AMFI_MASK_GET_OUT_OF_MY_WAY) {
302 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
303 } else if (amfi_get_out_of_my_way) {
304 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
305 } else if (cs_enforcement_disabled) {
306 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
307 }
308
309 /* CS_CONFIG_INTEGRITY_SKIP */
310 if (cs_integrity_skip) {
311 config |= CS_CONFIG_INTEGRITY_SKIP;
312 }
313
314 /* CS_CONFIG_RELAX_PROFILE_TRUST */
315 if (amfi_relax_profile_trust) {
316 config |= CS_CONFIG_RELAX_PROFILE_TRUST;
317 }
318
319 #if CONFIG_SPTM
320
321 if (csm_enabled() == true) {
322 /* allow_any_signature */
323 if (txm_cs_config->exemptions.allowAnySignature == false) {
324 config &= ~CS_CONFIG_ALLOW_ANY_SIGNATURE;
325 }
326
327 /* unrestrict_task_for_pid */
328 if (txm_ro_data && !txm_ro_data->exemptions.allowUnrestrictedDebugging) {
329 config &= ~CS_CONFIG_UNRESTRICTED_DEBUGGING;
330 }
331
332 /* cs_enforcement_disable */
333 if (txm_ro_data && !txm_ro_data->exemptions.allowModifiedCode) {
334 config &= ~CS_CONFIG_ENFORCEMENT_DISABLED;
335 }
336
337 /* get_out_of_my_way (skip_trust_evaluation) */
338 if (txm_cs_config->exemptions.skipTrustEvaluation == false) {
339 config &= ~CS_CONFIG_GET_OUT_OF_MY_WAY;
340 }
341
342 #if kTXMKernelAPIVersion >= 7
343 /*
344 * In some cases, the relax_profile_trust exemption can be set even without
345 * the boot-arg on TXM devices. As a result, we always overrule the kernel's
346 * data with TXM's data for this exemption.
347 */
348 if (txm_cs_config->exemptions.relaxProfileTrust == true) {
349 config |= CS_CONFIG_RELAX_PROFILE_TRUST;
350 } else {
351 config &= ~CS_CONFIG_RELAX_PROFILE_TRUST;
352 }
353 #endif
354 }
355
356 #elif PMAP_CS_PPL_MONITOR
357
358 if (csm_enabled() == true) {
359 int pmap_cs_allow_any_signature = 0;
360 bool override = PE_parse_boot_argn(
361 "pmap_cs_allow_any_signature",
362 &pmap_cs_allow_any_signature,
363 sizeof(pmap_cs_allow_any_signature));
364
365 if (!pmap_cs_allow_any_signature && override) {
366 config &= ~CS_CONFIG_ALLOW_ANY_SIGNATURE;
367 }
368
369 int pmap_cs_unrestrict_task_for_pid = 0;
370 override = PE_parse_boot_argn(
371 "pmap_cs_unrestrict_pmap_cs_disable",
372 &pmap_cs_unrestrict_task_for_pid,
373 sizeof(pmap_cs_unrestrict_task_for_pid));
374
375 if (!pmap_cs_unrestrict_task_for_pid && override) {
376 config &= ~CS_CONFIG_UNRESTRICTED_DEBUGGING;
377 }
378
379 int pmap_cs_enforcement_disable = 0;
380 override = PE_parse_boot_argn(
381 "pmap_cs_allow_modified_code_pages",
382 &pmap_cs_enforcement_disable,
383 sizeof(pmap_cs_enforcement_disable));
384
385 if (!pmap_cs_enforcement_disable && override) {
386 config &= ~CS_CONFIG_ENFORCEMENT_DISABLED;
387 }
388 }
389
390 #endif /* CONFIG_SPTM */
391 #endif /* DEVELOPMENT || DEBUG */
392
393 os_atomic_store(&cs_monitor, monitor_type, relaxed);
394 os_atomic_store(&cs_config, config, relaxed);
395
396 /*
397 * We write the cs_config_set variable with store-release semantics which means
398 * no writes before this call will be re-ordered to after this call. Hence, if
399 * someone reads this variable with load-acquire semantics, and they observe a
400 * value of true, then they will be able to observe the correct values of the
401 * cs_monitor and the cs_config variables as well.
402 */
403 os_atomic_store(&cs_config_set, true, release);
404
405 config_set:
406 /* Ensure configuration has been set */
407 assert(os_atomic_load(&cs_config_set, relaxed) == true);
408
409 /* Set the monitor type */
410 if (monitor_type_out) {
411 *monitor_type_out = os_atomic_load(&cs_monitor, relaxed);
412 }
413
414 /* Set the configuration */
415 if (config_out) {
416 *config_out = os_atomic_load(&cs_config, relaxed);
417 }
418 }
419
420 void
disable_code_signing_feature(code_signing_config_t feature)421 disable_code_signing_feature(
422 code_signing_config_t feature)
423 {
424 /*
425 * We require that this function be called only after the code signing config
426 * has been setup initially with a call to code_signing_configuration.
427 */
428 if (os_atomic_load(&cs_config_set, acquire) == false) {
429 panic("attempted to disable code signing feature without init: %u", feature);
430 }
431
432 /*
433 * We require that only a single feature be disabled through a single call to this
434 * function. Moreover, we ensure that only valid features are being disabled.
435 */
436 switch (feature) {
437 case CS_CONFIG_DEVELOPER_MODE_SUPPORTED:
438 cs_config &= ~CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
439 break;
440
441 case CS_CONFIG_COMPILATION_SERVICE:
442 cs_config &= ~CS_CONFIG_COMPILATION_SERVICE;
443 break;
444
445 case CS_CONFIG_LOCAL_SIGNING:
446 cs_config &= ~CS_CONFIG_LOCAL_SIGNING;
447 break;
448
449 case CS_CONFIG_OOP_JIT:
450 cs_config &= ~CS_CONFIG_OOP_JIT;
451 break;
452
453 case CS_CONFIG_MAP_JIT:
454 cs_config &= ~CS_CONFIG_MAP_JIT;
455 break;
456
457 default:
458 panic("attempted to disable a code signing feature invalidly: %u", feature);
459 }
460
461 /* Ensure all readers can observe the latest data */
462 #if defined(__arm64__)
463 __asm__ volatile ("dmb ish" ::: "memory");
464 #elif defined(__x86_64__)
465 __asm__ volatile ("mfence" ::: "memory");
466 #else
467 #error "Unknown platform -- fence instruction unavailable"
468 #endif
469 }
470
471 kern_return_t
secure_channel_shared_page(uint64_t * secure_channel_phys,size_t * secure_channel_size)472 secure_channel_shared_page(
473 uint64_t *secure_channel_phys,
474 size_t *secure_channel_size)
475 {
476 return CSM_PREFIX(secure_channel_shared_page)(
477 secure_channel_phys,
478 secure_channel_size);
479 }
480
481 #pragma mark Developer Mode
482
483 void
enable_developer_mode(void)484 enable_developer_mode(void)
485 {
486 CSM_PREFIX(toggle_developer_mode)(true);
487 }
488
489 void
disable_developer_mode(void)490 disable_developer_mode(void)
491 {
492 CSM_PREFIX(toggle_developer_mode)(false);
493 }
494
495 bool
developer_mode_state(void)496 developer_mode_state(void)
497 {
498 /* Assume false if the pointer isn't setup */
499 if (developer_mode_enabled == NULL) {
500 return false;
501 }
502
503 return os_atomic_load(developer_mode_enabled, relaxed);
504 }
505
506 #pragma mark Restricted Execution Mode
507
508 kern_return_t
restricted_execution_mode_enable(void)509 restricted_execution_mode_enable(void)
510 {
511 return CSM_PREFIX(rem_enable)();
512 }
513
514 kern_return_t
restricted_execution_mode_state(void)515 restricted_execution_mode_state(void)
516 {
517 return CSM_PREFIX(rem_state)();
518 }
519
520 void
update_csm_device_state(void)521 update_csm_device_state(void)
522 {
523 CSM_PREFIX(update_device_state)();
524 }
525
526 void
complete_security_boot_mode(uint32_t security_boot_mode)527 complete_security_boot_mode(
528 uint32_t security_boot_mode)
529 {
530 CSM_PREFIX(complete_security_boot_mode)(security_boot_mode);
531
532 /*
533 * If we're reach here, it means the completion of the security boot mode was
534 * successful. We update our sysctl with the provided boot mode in order to
535 * signify both completion and the boot mode identifier.
536 */
537 security_boot_mode_complete = security_boot_mode;
538 }
539
540 #pragma mark Provisioning Profiles
541 /*
542 * AMFI performs full profile validation by itself. XNU only needs to manage provisioning
543 * profiles when we have a monitor since the monitor needs to independently verify the
544 * profile data as well.
545 */
546
547 void
garbage_collect_provisioning_profiles(void)548 garbage_collect_provisioning_profiles(void)
549 {
550 #if CODE_SIGNING_MONITOR
551 csm_free_provisioning_profiles();
552 #endif
553 }
554
555 #if CODE_SIGNING_MONITOR
556
557 /* Structure used to maintain the set of registered profiles on the system */
558 typedef struct _cs_profile {
559 /* The UUID of the registered profile */
560 uuid_t profile_uuid;
561
562 /* The profile validation object from the monitor */
563 void *profile_obj;
564
565 /*
566 * In order to minimize the number of times the same profile would need to be
567 * registered, we allow frequently used profiles to skip the garbage collector
568 * for one pass.
569 */
570 bool skip_collector;
571
572 /* We skip repeated trust validations of the profile */
573 bool trusted;
574
575 /* Linked list linkage */
576 SLIST_ENTRY(_cs_profile) link;
577 } cs_profile_t;
578
579 /* Linked list head for registered profiles */
580 static SLIST_HEAD(, _cs_profile) all_profiles = SLIST_HEAD_INITIALIZER(all_profiles);
581
582 /* Lock for the provisioning profiles */
583 LCK_GRP_DECLARE(profiles_lck_grp, "profiles_lck_grp");
584 decl_lck_rw_data(, profiles_lock);
585
586 void
csm_initialize_provisioning_profiles(void)587 csm_initialize_provisioning_profiles(void)
588 {
589 /* Ensure the CoreTrust kernel extension has loaded */
590 if (coretrust == NULL) {
591 panic("coretrust interface not available");
592 }
593
594 /* Initialize the provisoning profiles lock */
595 lck_rw_init(&profiles_lock, &profiles_lck_grp, 0);
596 printf("initialized XNU provisioning profile data\n");
597
598 #if PMAP_CS_PPL_MONITOR
599 pmap_initialize_provisioning_profiles();
600 #endif
601 }
602
603 static cs_profile_t*
search_for_profile_uuid(const uuid_t profile_uuid)604 search_for_profile_uuid(
605 const uuid_t profile_uuid)
606 {
607 cs_profile_t *profile = NULL;
608
609 /* Caller is required to acquire the lock */
610 lck_rw_assert(&profiles_lock, LCK_RW_ASSERT_HELD);
611
612 SLIST_FOREACH(profile, &all_profiles, link) {
613 if (uuid_compare(profile_uuid, profile->profile_uuid) == 0) {
614 return profile;
615 }
616 }
617
618 return NULL;
619 }
620
621 kern_return_t
csm_register_provisioning_profile(const uuid_t profile_uuid,const void * profile_blob,const size_t profile_blob_size)622 csm_register_provisioning_profile(
623 const uuid_t profile_uuid,
624 const void *profile_blob,
625 const size_t profile_blob_size)
626 {
627 cs_profile_t *profile = NULL;
628 void *monitor_profile_obj = NULL;
629 kern_return_t ret = KERN_DENIED;
630
631 /* Only proceed if code-signing-monitor is enabled */
632 if (csm_enabled() == false) {
633 return KERN_NOT_SUPPORTED;
634 }
635
636 /* Allocate storage for the profile wrapper object */
637 profile = kalloc_type(cs_profile_t, Z_WAITOK_ZERO);
638 assert(profile != NULL);
639
640 /* Lock the profile set exclusively */
641 lck_rw_lock_exclusive(&profiles_lock);
642
643 /* Check to make sure this isn't a duplicate UUID */
644 cs_profile_t *dup_profile = search_for_profile_uuid(profile_uuid);
645 if (dup_profile != NULL) {
646 /* This profile might be used soon -- skip garbage collector */
647 dup_profile->skip_collector = true;
648
649 ret = KERN_ALREADY_IN_SET;
650 goto exit;
651 }
652
653 ret = CSM_PREFIX(register_provisioning_profile)(
654 profile_blob,
655 profile_blob_size,
656 &monitor_profile_obj);
657
658 if (ret == KERN_SUCCESS) {
659 /* Copy in the profile UUID */
660 uuid_copy(profile->profile_uuid, profile_uuid);
661
662 /* Setup the monitor's profile object */
663 profile->profile_obj = monitor_profile_obj;
664
665 /* This profile might be used soon -- skip garbage collector */
666 profile->skip_collector = true;
667
668 /* Insert at the head of the profile set */
669 SLIST_INSERT_HEAD(&all_profiles, profile, link);
670 }
671
672 exit:
673 /* Unlock the profile set */
674 lck_rw_unlock_exclusive(&profiles_lock);
675
676 if (ret != KERN_SUCCESS) {
677 /* Free the profile wrapper object */
678 kfree_type(cs_profile_t, profile);
679 profile = NULL;
680
681 if (ret != KERN_ALREADY_IN_SET) {
682 printf("unable to register profile with monitor: %d\n", ret);
683 }
684 }
685
686 return ret;
687 }
688
689 kern_return_t
csm_trust_provisioning_profile(const uuid_t profile_uuid,const void * sig_data,size_t sig_size)690 csm_trust_provisioning_profile(
691 const uuid_t profile_uuid,
692 const void *sig_data,
693 size_t sig_size)
694 {
695 cs_profile_t *profile = NULL;
696 kern_return_t ret = KERN_NOT_FOUND;
697
698 /*
699 * We don't explicitly make a check here for if the code-signing-monitor is enabled
700 * or not because this function should never be called unless registration of the
701 * profile succeeded, which it won't in cases where the CSM is disabled.
702 *
703 * If this function does somehow get called, it'll result in a panic -- this is good
704 * for us to detect and to fix the code path which results in this behavior.
705 */
706
707 /* Lock the profile set exclusively */
708 lck_rw_lock_exclusive(&profiles_lock);
709
710 /* Search for the registered profile */
711 profile = search_for_profile_uuid(profile_uuid);
712 if (profile == NULL) {
713 goto exit;
714 } else if (profile->trusted == true) {
715 ret = KERN_SUCCESS;
716 goto exit;
717 }
718
719 ret = CSM_PREFIX(trust_provisioning_profile)(
720 profile->profile_obj,
721 sig_data,
722 sig_size);
723
724 /* Mark profile as trusted if needed */
725 if (ret == KERN_SUCCESS) {
726 profile->trusted = true;
727 } else {
728 printf("unable to trust profile with monitor: %d\n", ret);
729 }
730
731 exit:
732 /* Unlock the profile set */
733 lck_rw_unlock_exclusive(&profiles_lock);
734
735 return ret;
736 }
737
738 kern_return_t
csm_associate_provisioning_profile(void * monitor_sig_obj,const uuid_t profile_uuid)739 csm_associate_provisioning_profile(
740 void *monitor_sig_obj,
741 const uuid_t profile_uuid)
742 {
743 cs_profile_t *profile = NULL;
744 kern_return_t ret = KERN_DENIED;
745
746 /*
747 * We don't explicitly make a check here for if the code-signing-monitor is enabled
748 * or not because this function should never be called unless registration of the
749 * profile succeeded, which it won't in cases where the CSM is disabled.
750 *
751 * If this function does somehow get called, it'll result in a panic -- this is good
752 * for us to detect and to fix the code path which results in this behavior.
753 */
754
755 /* Lock the profile set as shared */
756 lck_rw_lock_shared(&profiles_lock);
757
758 /* Search for the provisioning profile */
759 profile = search_for_profile_uuid(profile_uuid);
760 if (profile == NULL) {
761 ret = KERN_NOT_FOUND;
762 goto exit;
763 }
764
765 ret = CSM_PREFIX(associate_provisioning_profile)(
766 monitor_sig_obj,
767 profile->profile_obj);
768
769 if (ret == KERN_SUCCESS) {
770 /*
771 * This seems like an active profile -- let it skip the garbage collector on
772 * the next pass. We can modify this field even though we've only taken a shared
773 * lock as in this case we're always setting it to a fixed value.
774 */
775 profile->skip_collector = true;
776 }
777
778 exit:
779 /* Unlock the profile set */
780 lck_rw_unlock_shared(&profiles_lock);
781
782 if (ret != KERN_SUCCESS) {
783 printf("unable to associate profile: %d\n", ret);
784 }
785 return ret;
786 }
787
788 kern_return_t
csm_disassociate_provisioning_profile(void * monitor_sig_obj)789 csm_disassociate_provisioning_profile(
790 void *monitor_sig_obj)
791 {
792 kern_return_t ret = KERN_DENIED;
793
794 if (csm_enabled() == false) {
795 return KERN_NOT_SUPPORTED;
796 }
797
798 /* Call out to the monitor */
799 ret = CSM_PREFIX(disassociate_provisioning_profile)(monitor_sig_obj);
800
801 if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_FOUND)) {
802 printf("unable to disassociate profile: %d\n", ret);
803 }
804 return ret;
805 }
806
807 static kern_return_t
unregister_provisioning_profile(cs_profile_t * profile)808 unregister_provisioning_profile(
809 cs_profile_t *profile)
810 {
811 kern_return_t ret = KERN_DENIED;
812
813 /* Call out to the monitor */
814 ret = CSM_PREFIX(unregister_provisioning_profile)(profile->profile_obj);
815
816 /*
817 * KERN_FAILURE represents the case when the unregistration failed because the
818 * monitor noted that the profile was still being used. Other than that, there
819 * is no other error expected out of this interface. In fact, there is no easy
820 * way to deal with other errors, as the profile state may be corrupted. If we
821 * see a different error, then we panic.
822 */
823 if ((ret != KERN_SUCCESS) && (ret != KERN_FAILURE)) {
824 panic("unable to unregister profile from monitor: %d | %p\n", ret, profile);
825 }
826
827 return ret;
828 }
829
830 void
csm_free_provisioning_profiles(void)831 csm_free_provisioning_profiles(void)
832 {
833 kern_return_t ret = KERN_DENIED;
834 cs_profile_t *profile = NULL;
835 cs_profile_t *temp_profile = NULL;
836
837 /* Lock the profile set exclusively */
838 lck_rw_lock_exclusive(&profiles_lock);
839
840 SLIST_FOREACH_SAFE(profile, &all_profiles, link, temp_profile) {
841 if (profile->skip_collector == true) {
842 profile->skip_collector = false;
843 continue;
844 }
845
846 /* Attempt to unregister this profile from the system */
847 ret = unregister_provisioning_profile(profile);
848 if (ret == KERN_SUCCESS) {
849 /* Remove the profile from the profile set */
850 SLIST_REMOVE(&all_profiles, profile, _cs_profile, link);
851
852 /* Free the memory consumed for the profile wrapper object */
853 kfree_type(cs_profile_t, profile);
854 profile = NULL;
855 }
856 }
857
858 /* Unlock the profile set */
859 lck_rw_unlock_exclusive(&profiles_lock);
860 }
861
862 #endif /* CODE_SIGNING_MONITOR */
863
864 #pragma mark Code Signing
865 /*
866 * AMFI performs full signature validation by itself. For some things, AMFI uses XNU in
867 * order to abstract away the underlying implementation for data storage, but for most of
868 * these, AMFI doesn't directly interact with them, and they're only required when we have
869 * a code signing monitor on the system.
870 */
871
872 void
set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])873 set_compilation_service_cdhash(
874 const uint8_t cdhash[CS_CDHASH_LEN])
875 {
876 CSM_PREFIX(set_compilation_service_cdhash)(cdhash);
877 }
878
879 bool
match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])880 match_compilation_service_cdhash(
881 const uint8_t cdhash[CS_CDHASH_LEN])
882 {
883 return CSM_PREFIX(match_compilation_service_cdhash)(cdhash);
884 }
885
886 void
set_local_signing_public_key(const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])887 set_local_signing_public_key(
888 const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])
889 {
890 CSM_PREFIX(set_local_signing_public_key)(public_key);
891 }
892
893 uint8_t*
get_local_signing_public_key(void)894 get_local_signing_public_key(void)
895 {
896 return CSM_PREFIX(get_local_signing_public_key)();
897 }
898
899 void
unrestrict_local_signing_cdhash(__unused const uint8_t cdhash[CS_CDHASH_LEN])900 unrestrict_local_signing_cdhash(
901 __unused const uint8_t cdhash[CS_CDHASH_LEN])
902 {
903 /*
904 * Since AMFI manages code signing on its own, we only need to unrestrict the
905 * local signing cdhash when we have a monitor environment.
906 */
907
908 #if CODE_SIGNING_MONITOR
909 CSM_PREFIX(unrestrict_local_signing_cdhash)(cdhash);
910 #endif
911 }
912
913 kern_return_t
get_trust_level_kdp(__unused pmap_t pmap,__unused uint32_t * trust_level)914 get_trust_level_kdp(
915 __unused pmap_t pmap,
916 __unused uint32_t *trust_level)
917 {
918 #if CODE_SIGNING_MONITOR
919 return csm_get_trust_level_kdp(pmap, trust_level);
920 #else
921 return KERN_NOT_SUPPORTED;
922 #endif
923 }
924
925 kern_return_t
get_jit_address_range_kdp(__unused pmap_t pmap,__unused uintptr_t * jit_region_start,__unused uintptr_t * jit_region_end)926 get_jit_address_range_kdp(
927 __unused pmap_t pmap,
928 __unused uintptr_t *jit_region_start,
929 __unused uintptr_t *jit_region_end)
930 {
931 #if CODE_SIGNING_MONITOR
932 return csm_get_jit_address_range_kdp(pmap, jit_region_start, jit_region_end);
933 #else
934 return KERN_NOT_SUPPORTED;
935 #endif
936 }
937
938 kern_return_t
csm_resolve_os_entitlements_from_proc(__unused const proc_t process,__unused const void ** os_entitlements)939 csm_resolve_os_entitlements_from_proc(
940 __unused const proc_t process,
941 __unused const void **os_entitlements)
942 {
943 #if CODE_SIGNING_MONITOR
944 task_t task = NULL;
945 vm_map_t task_map = NULL;
946 pmap_t task_pmap = NULL;
947 kern_return_t ret = KERN_DENIED;
948
949 if (csm_enabled() == false) {
950 return KERN_NOT_SUPPORTED;
951 }
952
953 /* Ensure the process comes from the proc_task zone */
954 proc_require(process, PROC_REQUIRE_ALLOW_ALL);
955
956 /* Acquire the task from the proc */
957 task = proc_task(process);
958 if (task == NULL) {
959 return KERN_NOT_FOUND;
960 }
961
962 /* Acquire the virtual memory map from the task -- takes a reference on it */
963 task_map = get_task_map_reference(task);
964 if (task_map == NULL) {
965 return KERN_NOT_FOUND;
966 }
967
968 /* Acquire the pmap from the virtual memory map */
969 task_pmap = vm_map_get_pmap(task_map);
970 assert(task_pmap != NULL);
971
972 /* Call into the monitor to resolve the entitlements */
973 ret = CSM_PREFIX(resolve_kernel_entitlements)(task_pmap, os_entitlements);
974
975 /* Release the reference on the virtual memory map */
976 vm_map_deallocate(task_map);
977
978 return ret;
979 #else
980 return KERN_NOT_SUPPORTED;
981 #endif
982 }
983
984 kern_return_t
address_space_debugged_state(const proc_t process)985 address_space_debugged_state(
986 const proc_t process)
987 {
988 /* Must pass in a valid proc_t */
989 if (process == NULL) {
990 printf("%s: provided a NULL process\n", __FUNCTION__);
991 return KERN_DENIED;
992 }
993 proc_require(process, PROC_REQUIRE_ALLOW_ALL);
994
995 /* Developer mode must always be enabled for this to return successfully */
996 if (developer_mode_state() == false) {
997 return KERN_DENIED;
998 }
999
1000 #if CODE_SIGNING_MONITOR
1001 task_t task = NULL;
1002 vm_map_t task_map = NULL;
1003 pmap_t task_pmap = NULL;
1004
1005 if (csm_enabled() == true) {
1006 /* Acquire the task from the proc */
1007 task = proc_task(process);
1008 if (task == NULL) {
1009 return KERN_NOT_FOUND;
1010 }
1011
1012 /* Acquire the virtual memory map from the task -- takes a reference on it */
1013 task_map = get_task_map_reference(task);
1014 if (task_map == NULL) {
1015 return KERN_NOT_FOUND;
1016 }
1017
1018 /* Acquire the pmap from the virtual memory map */
1019 task_pmap = vm_map_get_pmap(task_map);
1020 assert(task_pmap != NULL);
1021
1022 /* Acquire the state from the monitor */
1023 kern_return_t ret = CSM_PREFIX(address_space_debugged)(task_pmap);
1024
1025 /* Release the reference on the virtual memory map */
1026 vm_map_deallocate(task_map);
1027
1028 return ret;
1029 }
1030 #endif /* CODE_SIGNING_MONITOR */
1031
1032 /* Check read-only process flags for state */
1033 if (proc_getcsflags(process) & CS_DEBUGGED) {
1034 return KERN_SUCCESS;
1035 }
1036
1037 #if XNU_TARGET_OS_OSX
1038 /*
1039 * For macOS systems only, we allow the execution of unsigned code. On Intel, code
1040 * doesn't need to be signed, and on ASi, Rosetta binaries don't need to be signed.
1041 * In these cases, we return successfully from this function because we don't know
1042 * what else we can do.
1043 */
1044 if ((proc_getcsflags(process) & CS_SIGNED) == 0) {
1045 return KERN_SUCCESS;
1046 }
1047 #endif
1048
1049 return KERN_DENIED;
1050 }
1051
1052 bool
is_address_space_debugged(const proc_t process)1053 is_address_space_debugged(const proc_t process)
1054 {
1055 return address_space_debugged_state(process) == KERN_SUCCESS;
1056 }
1057
1058 #if CODE_SIGNING_MONITOR
1059
1060 bool
csm_enabled(void)1061 csm_enabled(void)
1062 {
1063 return CSM_PREFIX(code_signing_enabled)();
1064 }
1065
1066 vm_size_t
csm_signature_size_limit(void)1067 csm_signature_size_limit(void)
1068 {
1069 return CSM_PREFIX(managed_code_signature_size)();
1070 }
1071
1072 void
csm_check_lockdown_mode(void)1073 csm_check_lockdown_mode(void)
1074 {
1075 if (get_lockdown_mode_state() == 0) {
1076 return;
1077 }
1078
1079 /* Inform the code signing monitor about lockdown mode */
1080 CSM_PREFIX(enter_lockdown_mode)();
1081
1082 #if CONFIG_SPTM
1083 /* MAP_JIT lockdown */
1084 if (txm_cs_config->systemPolicy->featureSet.JIT == false) {
1085 disable_code_signing_feature(CS_CONFIG_MAP_JIT);
1086 }
1087
1088 /* Compilation service lockdown */
1089 if (txm_cs_config->systemPolicy->featureSet.compilationService == false) {
1090 disable_code_signing_feature(CS_CONFIG_COMPILATION_SERVICE);
1091 }
1092
1093 /* Local signing lockdown */
1094 if (txm_cs_config->systemPolicy->featureSet.localSigning == false) {
1095 disable_code_signing_feature(CS_CONFIG_LOCAL_SIGNING);
1096 }
1097
1098 /* OOP-JIT lockdown */
1099 if (txm_cs_config->systemPolicy->featureSet.OOPJit == false) {
1100 disable_code_signing_feature(CS_CONFIG_OOP_JIT);
1101 }
1102 #else
1103 /*
1104 * Lockdown mode is supposed to disable all forms of JIT on the system. For now,
1105 * we leave JIT enabled by default until some blockers are resolved. The way this
1106 * code is written, we don't need to change anything once we enforce MAP_JIT to
1107 * be disabled for lockdown mode.
1108 */
1109 if (ppl_lockdown_mode_enforce_jit == true) {
1110 disable_code_signing_feature(CS_CONFIG_MAP_JIT);
1111 }
1112 disable_code_signing_feature(CS_CONFIG_OOP_JIT);
1113 disable_code_signing_feature(CS_CONFIG_LOCAL_SIGNING);
1114 disable_code_signing_feature(CS_CONFIG_COMPILATION_SERVICE);
1115 #endif /* CONFIG_SPTM */
1116 }
1117
1118 void
csm_code_signing_violation(proc_t proc,vm_offset_t addr)1119 csm_code_signing_violation(
1120 proc_t proc,
1121 vm_offset_t addr)
1122 {
1123 /* No enforcement if code-signing-monitor is disabled */
1124 if (csm_enabled() == false) {
1125 return;
1126 }
1127
1128 /* Leave a log for triage purposes */
1129 printf("[%s] code-signing-violation at %p\n", proc_best_name(proc), (void*)addr);
1130
1131 /*
1132 * For now, the only input into this function is from current_proc(), so using current_thread()
1133 * over here is alright. If this function ever gets called from another location, we need to
1134 * then change where we get the user thread from.
1135 */
1136 assert(proc == current_proc());
1137
1138 /*
1139 * Force exit the process and set it to allow generating crash reports, which is critical
1140 * for better triaging these issues.
1141 */
1142
1143 exception_info_t info = {
1144 .os_reason = OS_REASON_CODESIGNING,
1145 .exception_type = EXC_BAD_ACCESS,
1146 .mx_code = CODESIGNING_EXIT_REASON_INVALID_PAGE,
1147 .mx_subcode = VM_USER_STRIP_PTR(addr),
1148 .kt_info.kt_subsys = KDBG_TRIAGE_SUBSYS_VM,
1149 .kt_info.kt_error = KDBG_TRIAGE_VM_CODE_SIGNING
1150 };
1151
1152 exit_with_mach_exception(proc, info, PX_KTRIAGE);
1153 }
1154
1155 kern_return_t
csm_register_code_signature(const vm_address_t signature_addr,const vm_size_t signature_size,const vm_offset_t code_directory_offset,const char * signature_path,void ** monitor_sig_obj,vm_address_t * monitor_signature_addr)1156 csm_register_code_signature(
1157 const vm_address_t signature_addr,
1158 const vm_size_t signature_size,
1159 const vm_offset_t code_directory_offset,
1160 const char *signature_path,
1161 void **monitor_sig_obj,
1162 vm_address_t *monitor_signature_addr)
1163 {
1164 if (csm_enabled() == false) {
1165 return KERN_NOT_SUPPORTED;
1166 }
1167
1168 return CSM_PREFIX(register_code_signature)(
1169 signature_addr,
1170 signature_size,
1171 code_directory_offset,
1172 signature_path,
1173 monitor_sig_obj,
1174 monitor_signature_addr);
1175 }
1176
1177 kern_return_t
csm_unregister_code_signature(void * monitor_sig_obj)1178 csm_unregister_code_signature(
1179 void *monitor_sig_obj)
1180 {
1181 if (csm_enabled() == false) {
1182 return KERN_NOT_SUPPORTED;
1183 }
1184
1185 return CSM_PREFIX(unregister_code_signature)(monitor_sig_obj);
1186 }
1187
1188 kern_return_t
csm_verify_code_signature(void * monitor_sig_obj,uint32_t * trust_level)1189 csm_verify_code_signature(
1190 void *monitor_sig_obj,
1191 uint32_t *trust_level)
1192 {
1193 if (csm_enabled() == false) {
1194 return KERN_NOT_SUPPORTED;
1195 }
1196
1197 return CSM_PREFIX(verify_code_signature)(monitor_sig_obj, trust_level);
1198 }
1199
1200 kern_return_t
csm_reconstitute_code_signature(void * monitor_sig_obj,vm_address_t * unneeded_addr,vm_size_t * unneeded_size)1201 csm_reconstitute_code_signature(
1202 void *monitor_sig_obj,
1203 vm_address_t *unneeded_addr,
1204 vm_size_t *unneeded_size)
1205 {
1206 if (csm_enabled() == false) {
1207 return KERN_NOT_SUPPORTED;
1208 }
1209
1210 return CSM_PREFIX(reconstitute_code_signature)(
1211 monitor_sig_obj,
1212 unneeded_addr,
1213 unneeded_size);
1214 }
1215
1216 kern_return_t
csm_associate_code_signature(pmap_t monitor_pmap,void * monitor_sig_obj,const vm_address_t region_addr,const vm_size_t region_size,const vm_offset_t region_offset)1217 csm_associate_code_signature(
1218 pmap_t monitor_pmap,
1219 void *monitor_sig_obj,
1220 const vm_address_t region_addr,
1221 const vm_size_t region_size,
1222 const vm_offset_t region_offset)
1223 {
1224 if (csm_enabled() == false) {
1225 return KERN_NOT_SUPPORTED;
1226 }
1227
1228 return CSM_PREFIX(associate_code_signature)(
1229 monitor_pmap,
1230 monitor_sig_obj,
1231 region_addr,
1232 region_size,
1233 region_offset);
1234 }
1235
1236 kern_return_t
csm_allow_jit_region(pmap_t monitor_pmap)1237 csm_allow_jit_region(
1238 pmap_t monitor_pmap)
1239 {
1240 if (csm_enabled() == false) {
1241 return KERN_SUCCESS;
1242 } else if (monitor_pmap == NULL) {
1243 return KERN_DENIED;
1244 }
1245
1246 kern_return_t ret = CSM_PREFIX(allow_jit_region)(monitor_pmap);
1247 if (ret == KERN_NOT_SUPPORTED) {
1248 /*
1249 * Some monitor environments do not support this API and as a result will
1250 * return KERN_NOT_SUPPORTED. The caller here should not interpret that as
1251 * a failure.
1252 */
1253 ret = KERN_SUCCESS;
1254 }
1255
1256 return ret;
1257 }
1258
1259 kern_return_t
csm_associate_jit_region(pmap_t monitor_pmap,const vm_address_t region_addr,const vm_size_t region_size)1260 csm_associate_jit_region(
1261 pmap_t monitor_pmap,
1262 const vm_address_t region_addr,
1263 const vm_size_t region_size)
1264 {
1265 if (csm_enabled() == false) {
1266 return KERN_NOT_SUPPORTED;
1267 }
1268
1269 return CSM_PREFIX(associate_jit_region)(
1270 monitor_pmap,
1271 region_addr,
1272 region_size);
1273 }
1274
1275 kern_return_t
csm_associate_debug_region(pmap_t monitor_pmap,const vm_address_t region_addr,const vm_size_t region_size)1276 csm_associate_debug_region(
1277 pmap_t monitor_pmap,
1278 const vm_address_t region_addr,
1279 const vm_size_t region_size)
1280 {
1281 if (csm_enabled() == false) {
1282 return KERN_NOT_SUPPORTED;
1283 }
1284
1285 kern_return_t ret = CSM_PREFIX(associate_debug_region)(
1286 monitor_pmap,
1287 region_addr,
1288 region_size);
1289
1290 if (ret != KERN_SUCCESS) {
1291 printf("unable to create debug region in address space: %d\n", ret);
1292 }
1293 return ret;
1294 }
1295
1296 kern_return_t
csm_allow_invalid_code(pmap_t pmap)1297 csm_allow_invalid_code(
1298 pmap_t pmap)
1299 {
1300 if (csm_enabled() == false) {
1301 return KERN_NOT_SUPPORTED;
1302 }
1303
1304 return CSM_PREFIX(allow_invalid_code)(pmap);
1305 }
1306
1307 kern_return_t
csm_get_trust_level_kdp(pmap_t pmap,uint32_t * trust_level)1308 csm_get_trust_level_kdp(
1309 pmap_t pmap,
1310 uint32_t *trust_level)
1311 {
1312 if (csm_enabled() == false) {
1313 return KERN_NOT_SUPPORTED;
1314 }
1315
1316 return CSM_PREFIX(get_trust_level_kdp)(pmap, trust_level);
1317 }
1318
1319 kern_return_t
csm_get_jit_address_range_kdp(pmap_t pmap,uintptr_t * jit_region_start,uintptr_t * jit_region_end)1320 csm_get_jit_address_range_kdp(
1321 pmap_t pmap,
1322 uintptr_t *jit_region_start,
1323 uintptr_t *jit_region_end)
1324 {
1325 if (csm_enabled() == false) {
1326 return KERN_NOT_SUPPORTED;
1327 }
1328
1329 return CSM_PREFIX(get_jit_address_range_kdp)(pmap, jit_region_start, jit_region_end);
1330 }
1331
1332 kern_return_t
csm_address_space_exempt(const pmap_t pmap)1333 csm_address_space_exempt(
1334 const pmap_t pmap)
1335 {
1336 /*
1337 * These exemptions are actually orthogonal to the code signing enforcement. As
1338 * a result, we let each monitor explicitly decide how to deal with the exemption
1339 * in case code signing enforcement is disabled.
1340 */
1341
1342 return CSM_PREFIX(address_space_exempt)(pmap);
1343 }
1344
1345 kern_return_t
csm_fork_prepare(pmap_t old_pmap,pmap_t new_pmap)1346 csm_fork_prepare(
1347 pmap_t old_pmap,
1348 pmap_t new_pmap)
1349 {
1350 if (csm_enabled() == false) {
1351 return KERN_NOT_SUPPORTED;
1352 }
1353
1354 return CSM_PREFIX(fork_prepare)(old_pmap, new_pmap);
1355 }
1356
1357 kern_return_t
csm_acquire_signing_identifier(const void * monitor_sig_obj,const char ** signing_id)1358 csm_acquire_signing_identifier(
1359 const void *monitor_sig_obj,
1360 const char **signing_id)
1361 {
1362 if (csm_enabled() == false) {
1363 return KERN_NOT_SUPPORTED;
1364 }
1365
1366 return CSM_PREFIX(acquire_signing_identifier)(monitor_sig_obj, signing_id);
1367 }
1368
1369 kern_return_t
csm_associate_os_entitlements(void * monitor_sig_obj,const void * os_entitlements)1370 csm_associate_os_entitlements(
1371 void *monitor_sig_obj,
1372 const void *os_entitlements)
1373 {
1374 if (csm_enabled() == false) {
1375 return KERN_NOT_SUPPORTED;
1376 } else if (os_entitlements == NULL) {
1377 /* Not every signature has entitlements */
1378 return KERN_SUCCESS;
1379 }
1380
1381 return CSM_PREFIX(associate_kernel_entitlements)(monitor_sig_obj, os_entitlements);
1382 }
1383
1384 kern_return_t
csm_accelerate_entitlements(void * monitor_sig_obj,CEQueryContext_t * ce_ctx)1385 csm_accelerate_entitlements(
1386 void *monitor_sig_obj,
1387 CEQueryContext_t *ce_ctx)
1388 {
1389 if (csm_enabled() == false) {
1390 return KERN_NOT_SUPPORTED;
1391 }
1392
1393 return CSM_PREFIX(accelerate_entitlements)(monitor_sig_obj, ce_ctx);
1394 }
1395
1396 #endif /* CODE_SIGNING_MONITOR */
1397
1398 #pragma mark AppleImage4
1399 /*
1400 * AppleImage4 uses the monitor environment to safeguard critical security data.
1401 * In order to ease the implementation specific, AppleImage4 always depends on these
1402 * abstracted APIs, regardless of whether the system has a monitor environment or
1403 * not.
1404 */
1405
1406 void*
kernel_image4_storage_data(size_t * allocated_size)1407 kernel_image4_storage_data(
1408 size_t *allocated_size)
1409 {
1410 return CSM_PREFIX(image4_storage_data)(allocated_size);
1411 }
1412
1413 void
kernel_image4_set_nonce(const img4_nonce_domain_index_t ndi,const img4_nonce_t * nonce)1414 kernel_image4_set_nonce(
1415 const img4_nonce_domain_index_t ndi,
1416 const img4_nonce_t *nonce)
1417 {
1418 return CSM_PREFIX(image4_set_nonce)(ndi, nonce);
1419 }
1420
1421 void
kernel_image4_roll_nonce(const img4_nonce_domain_index_t ndi)1422 kernel_image4_roll_nonce(
1423 const img4_nonce_domain_index_t ndi)
1424 {
1425 return CSM_PREFIX(image4_roll_nonce)(ndi);
1426 }
1427
1428 errno_t
kernel_image4_copy_nonce(const img4_nonce_domain_index_t ndi,img4_nonce_t * nonce_out)1429 kernel_image4_copy_nonce(
1430 const img4_nonce_domain_index_t ndi,
1431 img4_nonce_t *nonce_out)
1432 {
1433 return CSM_PREFIX(image4_copy_nonce)(ndi, nonce_out);
1434 }
1435
1436 errno_t
kernel_image4_execute_object(img4_runtime_object_spec_index_t obj_spec_index,const img4_buff_t * payload,const img4_buff_t * manifest)1437 kernel_image4_execute_object(
1438 img4_runtime_object_spec_index_t obj_spec_index,
1439 const img4_buff_t *payload,
1440 const img4_buff_t *manifest)
1441 {
1442 return CSM_PREFIX(image4_execute_object)(
1443 obj_spec_index,
1444 payload,
1445 manifest);
1446 }
1447
1448 errno_t
kernel_image4_copy_object(img4_runtime_object_spec_index_t obj_spec_index,vm_address_t object_out,size_t * object_length)1449 kernel_image4_copy_object(
1450 img4_runtime_object_spec_index_t obj_spec_index,
1451 vm_address_t object_out,
1452 size_t *object_length)
1453 {
1454 return CSM_PREFIX(image4_copy_object)(
1455 obj_spec_index,
1456 object_out,
1457 object_length);
1458 }
1459
1460 const void*
kernel_image4_get_monitor_exports(void)1461 kernel_image4_get_monitor_exports(void)
1462 {
1463 return CSM_PREFIX(image4_get_monitor_exports)();
1464 }
1465
1466 errno_t
kernel_image4_set_release_type(const char * release_type)1467 kernel_image4_set_release_type(
1468 const char *release_type)
1469 {
1470 return CSM_PREFIX(image4_set_release_type)(release_type);
1471 }
1472
1473 errno_t
kernel_image4_set_bnch_shadow(const img4_nonce_domain_index_t ndi)1474 kernel_image4_set_bnch_shadow(
1475 const img4_nonce_domain_index_t ndi)
1476 {
1477 return CSM_PREFIX(image4_set_bnch_shadow)(ndi);
1478 }
1479
1480 #pragma mark Image4 - New
1481
1482
1483
1484 static errno_t
_kernel_image4_monitor_trap_image_activate(image4_cs_trap_t selector,const void * input_data)1485 _kernel_image4_monitor_trap_image_activate(
1486 image4_cs_trap_t selector,
1487 const void *input_data)
1488 {
1489 /*
1490 * csmx_payload (csmx_payload_len) --> __cs_xfer
1491 * csmx_manifest (csmx_manifest_len) --> __cs_borrow
1492 */
1493 image4_cs_trap_argv(image_activate) input = {0};
1494 vm_address_t payload_addr = 0;
1495 vm_address_t manifest_addr = 0;
1496 errno_t err = EPERM;
1497
1498 /* Copy the input data */
1499 memcpy(&input, input_data, sizeof(input));
1500
1501 payload_addr = code_signing_allocate(input.csmx_payload_len);
1502 if (payload_addr == 0) {
1503 goto out;
1504 }
1505 memcpy((void*)payload_addr, (void*)input.csmx_payload, input.csmx_payload_len);
1506
1507 manifest_addr = code_signing_allocate(input.csmx_manifest_len);
1508 if (manifest_addr == 0) {
1509 goto out;
1510 }
1511 memcpy((void*)manifest_addr, (void*)input.csmx_manifest, input.csmx_manifest_len);
1512
1513 /* Transfer both regions to the monitor */
1514 CSM_PREFIX(image4_transfer_region)(selector, payload_addr, input.csmx_payload_len);
1515 CSM_PREFIX(image4_transfer_region)(selector, manifest_addr, input.csmx_manifest_len);
1516
1517 /* Setup the input with new addresses */
1518 input.csmx_payload = payload_addr;
1519 input.csmx_manifest = manifest_addr;
1520
1521 /* Trap into the monitor for this selector */
1522 err = CSM_PREFIX(image4_monitor_trap)(selector, &input, sizeof(input));
1523
1524 out:
1525 if ((err != 0) && (payload_addr != 0)) {
1526 /* Retyping only happens after allocating the manifest */
1527 if (manifest_addr != 0) {
1528 CSM_PREFIX(image4_reclaim_region)(
1529 selector, payload_addr, input.csmx_payload_len);
1530 }
1531 code_signing_deallocate(&payload_addr, input.csmx_payload_len);
1532 }
1533
1534 if (manifest_addr != 0) {
1535 /* Reclaim the manifest region -- will be retyped if not NULL */
1536 CSM_PREFIX(image4_reclaim_region)(
1537 selector, manifest_addr, input.csmx_manifest_len);
1538
1539 /* Deallocate the manifest region */
1540 code_signing_deallocate(&manifest_addr, input.csmx_manifest_len);
1541 }
1542
1543 return err;
1544 }
1545
1546 static errno_t
_kernel_image4_monitor_trap(image4_cs_trap_t selector,const void * input_data,size_t input_size)1547 _kernel_image4_monitor_trap(
1548 image4_cs_trap_t selector,
1549 const void *input_data,
1550 size_t input_size)
1551 {
1552 /* Validate input size for the selector */
1553 if (input_size != image4_cs_trap_vector_size(selector)) {
1554 printf("image4 dispatch: invalid input: %llu | %lu\n", selector, input_size);
1555 return EINVAL;
1556 }
1557
1558 switch (selector) {
1559 case IMAGE4_CS_TRAP_IMAGE_ACTIVATE:
1560 return _kernel_image4_monitor_trap_image_activate(selector, input_data);
1561
1562 default:
1563 return CSM_PREFIX(image4_monitor_trap)(selector, input_data, input_size);
1564 }
1565 }
1566
1567 errno_t
kernel_image4_monitor_trap(image4_cs_trap_t selector,const void * input_data,size_t input_size,__unused void * output_data,__unused size_t * output_size)1568 kernel_image4_monitor_trap(
1569 image4_cs_trap_t selector,
1570 const void *input_data,
1571 size_t input_size,
1572 __unused void *output_data,
1573 __unused size_t *output_size)
1574 {
1575 size_t length_check = 0;
1576
1577 /* Input data is always required */
1578 if ((input_data == NULL) || (input_size == 0)) {
1579 printf("image4 dispatch: no input data: %llu\n", selector);
1580 return EINVAL;
1581 } else if (os_add_overflow((vm_address_t)input_data, input_size, &length_check)) {
1582 panic("image4_ dispatch: overflow on input: %p | %lu", input_data, input_size);
1583 }
1584
1585 return _kernel_image4_monitor_trap(selector, input_data, input_size);
1586 }
1587