1 /*
2 * Copyright (c) 2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <os/overflow.h>
24 #include <machine/atomic.h>
25 #include <mach/vm_param.h>
26 #include <vm/vm_kern_xnu.h>
27 #include <vm/pmap.h>
28 #include <vm/pmap_cs.h>
29 #include <vm/vm_map_xnu.h>
30 #include <kern/zalloc.h>
31 #include <kern/kalloc.h>
32 #include <kern/assert.h>
33 #include <kern/locks.h>
34 #include <kern/lock_rw.h>
35 #include <libkern/libkern.h>
36 #include <libkern/section_keywords.h>
37 #include <libkern/coretrust/coretrust.h>
38 #include <pexpert/pexpert.h>
39 #include <sys/user.h>
40 #include <sys/vm.h>
41 #include <sys/proc.h>
42 #include <sys/proc_require.h>
43 #include <sys/codesign.h>
44 #include <sys/code_signing.h>
45 #include <sys/lockdown_mode.h>
46 #include <sys/reason.h>
47 #include <sys/kdebug_kernel.h>
48 #include <sys/kdebug_triage.h>
49 #include <sys/sysctl.h>
50 #include <uuid/uuid.h>
51 #include <IOKit/IOBSD.h>
52
53 #if CONFIG_SPTM
54 #include <sys/trusted_execution_monitor.h>
55 #endif
56
57 #if XNU_KERNEL_PRIVATE
58 vm_address_t
code_signing_allocate(size_t alloc_size)59 code_signing_allocate(
60 size_t alloc_size)
61 {
62 vm_address_t alloc_addr = 0;
63
64 if (alloc_size == 0) {
65 panic("%s: zero allocation size", __FUNCTION__);
66 }
67 size_t aligned_size = round_page(alloc_size);
68
69 kern_return_t ret = kmem_alloc(
70 kernel_map,
71 &alloc_addr, aligned_size,
72 KMA_KOBJECT | KMA_DATA | KMA_ZERO,
73 VM_KERN_MEMORY_SECURITY);
74
75 if (ret != KERN_SUCCESS) {
76 printf("%s: unable to allocate %lu bytes\n", __FUNCTION__, aligned_size);
77 } else if (alloc_addr == 0) {
78 printf("%s: invalid allocation\n", __FUNCTION__);
79 }
80
81 return alloc_addr;
82 }
83
84 void
code_signing_deallocate(vm_address_t * alloc_addr,size_t alloc_size)85 code_signing_deallocate(
86 vm_address_t *alloc_addr,
87 size_t alloc_size)
88 {
89 if (alloc_addr == NULL) {
90 panic("%s: invalid pointer provided", __FUNCTION__);
91 } else if ((*alloc_addr == 0) || ((*alloc_addr & PAGE_MASK) != 0)) {
92 panic("%s: address provided: %p", __FUNCTION__, (void*)(*alloc_addr));
93 } else if (alloc_size == 0) {
94 panic("%s: zero allocation size", __FUNCTION__);
95 }
96 size_t aligned_size = round_page(alloc_size);
97
98 /* Free the allocation */
99 kmem_free(kernel_map, *alloc_addr, aligned_size);
100
101 /* Clear the address */
102 *alloc_addr = 0;
103 }
104 #endif /* XNU_KERNEL_PRIVATE */
105
106 SYSCTL_DECL(_security);
107 SYSCTL_DECL(_security_codesigning);
108 SYSCTL_NODE(_security, OID_AUTO, codesigning, CTLFLAG_RD, 0, "XNU Code Signing");
109
110 static SECURITY_READ_ONLY_LATE(bool) cs_config_set = false;
111 static SECURITY_READ_ONLY_LATE(code_signing_monitor_type_t) cs_monitor = CS_MONITOR_TYPE_NONE;
112 static SECURITY_READ_ONLY_LATE(code_signing_config_t) cs_config = 0;
113 static uint32_t security_boot_mode_complete = 0;
114
115 SYSCTL_UINT(_security_codesigning, OID_AUTO, monitor, CTLFLAG_RD, &cs_monitor, 0, "code signing monitor type");
116 SYSCTL_UINT(_security_codesigning, OID_AUTO, config, CTLFLAG_RD, &cs_config, 0, "code signing configuration");
117
118 SYSCTL_UINT(
119 _security_codesigning, OID_AUTO,
120 security_boot_mode_complete, CTLFLAG_RD,
121 &security_boot_mode_complete, 0, "security boot mode completion status");
122
123 void
code_signing_configuration(code_signing_monitor_type_t * monitor_type_out,code_signing_config_t * config_out)124 code_signing_configuration(
125 code_signing_monitor_type_t *monitor_type_out,
126 code_signing_config_t *config_out)
127 {
128 code_signing_monitor_type_t monitor_type = CS_MONITOR_TYPE_NONE;
129 code_signing_config_t config = 0;
130
131 /*
132 * Since we read this variable with load-acquire semantics, if we observe a value
133 * of true, it means we should be able to observe writes to cs_monitor and also
134 * cs_config.
135 */
136 if (os_atomic_load(&cs_config_set, acquire) == true) {
137 goto config_set;
138 }
139
140 /*
141 * Add support for all the code signing features. This function is called very
142 * early in the system boot, much before kernel extensions such as Apple Mobile
143 * File Integrity come online. As a result, this function assumes that all the
144 * code signing features are enabled, and later on, different components can
145 * disable support for different features using disable_code_signing_feature().
146 */
147 config |= CS_CONFIG_MAP_JIT;
148 config |= CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
149 config |= CS_CONFIG_COMPILATION_SERVICE;
150 config |= CS_CONFIG_LOCAL_SIGNING;
151 config |= CS_CONFIG_OOP_JIT;
152
153 #if CODE_SIGNING_MONITOR
154 /* Mark the code signing monitor as enabled if required */
155 if (csm_enabled() == true) {
156 config |= CS_CONFIG_CSM_ENABLED;
157 }
158
159 #if CONFIG_SPTM
160 /*
161 * Since TrustedExecutionMonitor cannot call into any function within XNU, we
162 * query it's code signing configuration even before this function is called.
163 * Using that, we modify the state of the code signing features available.
164 */
165 if (csm_enabled() == true) {
166 bool platform_code_only = txm_cs_config->systemPolicy->platformCodeOnly;
167
168 /* Disable unsupported features when enforcing platform-code-only */
169 if (platform_code_only == true) {
170 config &= ~CS_CONFIG_MAP_JIT;
171 config &= ~CS_CONFIG_COMPILATION_SERVICE;
172 config &= ~CS_CONFIG_LOCAL_SIGNING;
173 config &= ~CS_CONFIG_OOP_JIT;
174 }
175
176 /*
177 * Restricted Execution Mode support. The pattern for this code snippet breaks
178 * the norm compared to others. For the other features, we consider them enabled
179 * by default unless TXM disables them. For REM, given this is a TXM only feature,
180 * we consider it disabled unless TXM explicitly tells us it is enabled.
181 */
182 if (txm_cs_config->systemPolicy->featureSet.restrictedExecutionMode == true) {
183 config |= CS_CONFIG_REM_SUPPORTED;
184 }
185
186 /* MAP_JIT support */
187 if (txm_cs_config->systemPolicy->featureSet.JIT == false) {
188 config &= ~CS_CONFIG_MAP_JIT;
189 }
190
191 /* Developer mode support */
192 if (txm_cs_config->systemPolicy->featureSet.developerMode == false) {
193 config &= ~CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
194 }
195
196 /* Compilation service support */
197 if (txm_cs_config->systemPolicy->featureSet.compilationService == false) {
198 config &= ~CS_CONFIG_COMPILATION_SERVICE;
199 }
200
201 /* Local signing support */
202 if (txm_cs_config->systemPolicy->featureSet.localSigning == false) {
203 config &= ~CS_CONFIG_LOCAL_SIGNING;
204 }
205
206 /* OOP-JIT support */
207 if (txm_cs_config->systemPolicy->featureSet.OOPJit == false) {
208 config &= ~CS_CONFIG_OOP_JIT;
209 }
210 }
211 monitor_type = CS_MONITOR_TYPE_TXM;
212 #elif PMAP_CS_PPL_MONITOR
213 monitor_type = CS_MONITOR_TYPE_PPL;
214 #endif /* CONFIG_SPTM */
215 #endif /* CODE_SIGNING_MONITOR */
216
217 #if DEVELOPMENT || DEBUG
218 /*
219 * We only ever need to parse for boot-args based exemption state on DEVELOPMENT
220 * or DEBUG builds as this state is not respected by any code signing component
221 * on RELEASE builds.
222 */
223
224 #define CS_AMFI_MASK_UNRESTRICT_TASK_FOR_PID 0x01
225 #define CS_AMFI_MASK_ALLOW_ANY_SIGNATURE 0x02
226 #define CS_AMFI_MASK_GET_OUT_OF_MY_WAY 0x80
227
228 int amfi_mask = 0;
229 int amfi_allow_any_signature = 0;
230 int amfi_unrestrict_task_for_pid = 0;
231 int amfi_get_out_of_my_way = 0;
232 int cs_enforcement_disabled = 0;
233 int cs_integrity_skip = 0;
234 int amfi_relax_profile_trust = 0;
235 int amfi_dev_mode_policy = 0;
236
237 /* Parse the AMFI mask */
238 PE_parse_boot_argn("amfi", &amfi_mask, sizeof(amfi_mask));
239
240 /* Parse the AMFI soft-bypass */
241 PE_parse_boot_argn(
242 "amfi_allow_any_signature",
243 &amfi_allow_any_signature,
244 sizeof(amfi_allow_any_signature));
245
246 /* Parse the AMFI debug-bypass */
247 PE_parse_boot_argn(
248 "amfi_unrestrict_task_for_pid",
249 &amfi_unrestrict_task_for_pid,
250 sizeof(amfi_unrestrict_task_for_pid));
251
252 /* Parse the AMFI hard-bypass */
253 PE_parse_boot_argn(
254 "amfi_get_out_of_my_way",
255 &amfi_get_out_of_my_way,
256 sizeof(amfi_get_out_of_my_way));
257
258 /* Parse the system code signing hard-bypass */
259 PE_parse_boot_argn(
260 "cs_enforcement_disable",
261 &cs_enforcement_disabled,
262 sizeof(cs_enforcement_disabled));
263
264 /* Parse the system code signing integrity-check bypass */
265 PE_parse_boot_argn(
266 "cs_integrity_skip",
267 &cs_integrity_skip,
268 sizeof(cs_integrity_skip));
269
270 /* Parse the AMFI profile trust bypass */
271 PE_parse_boot_argn(
272 "amfi_relax_profile_trust",
273 &amfi_relax_profile_trust,
274 sizeof(amfi_relax_profile_trust));
275
276 /* Parse the AMFI customer developer mode policy */
277 PE_parse_boot_argn(
278 "amfi_dev_mode_policy",
279 &amfi_dev_mode_policy,
280 sizeof(amfi_dev_mode_policy));
281
282 /* CS_CONFIG_UNRESTRICTED_DEBUGGING */
283 if (amfi_mask & CS_AMFI_MASK_UNRESTRICT_TASK_FOR_PID) {
284 config |= CS_CONFIG_UNRESTRICTED_DEBUGGING;
285 } else if (amfi_unrestrict_task_for_pid) {
286 config |= CS_CONFIG_UNRESTRICTED_DEBUGGING;
287 }
288
289 /* CS_CONFIG_ALLOW_ANY_SIGNATURE */
290 if (amfi_mask & CS_AMFI_MASK_ALLOW_ANY_SIGNATURE) {
291 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
292 } else if (amfi_mask & CS_AMFI_MASK_GET_OUT_OF_MY_WAY) {
293 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
294 } else if (amfi_allow_any_signature) {
295 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
296 } else if (amfi_get_out_of_my_way) {
297 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
298 } else if (cs_enforcement_disabled) {
299 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
300 }
301
302 /* CS_CONFIG_ENFORCEMENT_DISABLED */
303 if (cs_enforcement_disabled) {
304 config |= CS_CONFIG_ENFORCEMENT_DISABLED;
305 }
306
307 /* CS_CONFIG_GET_OUT_OF_MY_WAY */
308 if (amfi_mask & CS_AMFI_MASK_GET_OUT_OF_MY_WAY) {
309 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
310 } else if (amfi_get_out_of_my_way) {
311 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
312 } else if (cs_enforcement_disabled) {
313 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
314 }
315
316 /* CS_CONFIG_INTEGRITY_SKIP */
317 if (cs_integrity_skip) {
318 config |= CS_CONFIG_INTEGRITY_SKIP;
319 }
320
321 /* CS_CONFIG_RELAX_PROFILE_TRUST */
322 if (amfi_relax_profile_trust) {
323 config |= CS_CONFIG_RELAX_PROFILE_TRUST;
324 }
325
326 /* CS_CONFIG_DEV_MODE_POLICY */
327 if (amfi_dev_mode_policy) {
328 config |= CS_CONFIG_DEV_MODE_POLICY;
329 }
330
331 #if CONFIG_SPTM
332
333 if (csm_enabled() == true) {
334 /* allow_any_signature */
335 if (txm_cs_config->exemptions.allowAnySignature == false) {
336 config &= ~CS_CONFIG_ALLOW_ANY_SIGNATURE;
337 }
338
339 /* unrestrict_task_for_pid */
340 if (txm_ro_data && !txm_ro_data->exemptions.allowUnrestrictedDebugging) {
341 config &= ~CS_CONFIG_UNRESTRICTED_DEBUGGING;
342 }
343
344 /* cs_enforcement_disable */
345 if (txm_ro_data && !txm_ro_data->exemptions.allowModifiedCode) {
346 config &= ~CS_CONFIG_ENFORCEMENT_DISABLED;
347 }
348
349 /* get_out_of_my_way (skip_trust_evaluation) */
350 if (txm_cs_config->exemptions.skipTrustEvaluation == false) {
351 config &= ~CS_CONFIG_GET_OUT_OF_MY_WAY;
352 }
353
354 #if kTXMKernelAPIVersion >= 7
355 /*
356 * In some cases, the relax_profile_trust exemption can be set even without
357 * the boot-arg on TXM devices. As a result, we always overrule the kernel's
358 * data with TXM's data for this exemption.
359 */
360 if (txm_cs_config->exemptions.relaxProfileTrust == true) {
361 config |= CS_CONFIG_RELAX_PROFILE_TRUST;
362 } else {
363 config &= ~CS_CONFIG_RELAX_PROFILE_TRUST;
364 }
365 #endif
366 }
367
368 #elif PMAP_CS_PPL_MONITOR
369
370 if (csm_enabled() == true) {
371 int pmap_cs_allow_any_signature = 0;
372 bool override = PE_parse_boot_argn(
373 "pmap_cs_allow_any_signature",
374 &pmap_cs_allow_any_signature,
375 sizeof(pmap_cs_allow_any_signature));
376
377 if (!pmap_cs_allow_any_signature && override) {
378 config &= ~CS_CONFIG_ALLOW_ANY_SIGNATURE;
379 }
380
381 int pmap_cs_unrestrict_task_for_pid = 0;
382 override = PE_parse_boot_argn(
383 "pmap_cs_unrestrict_pmap_cs_disable",
384 &pmap_cs_unrestrict_task_for_pid,
385 sizeof(pmap_cs_unrestrict_task_for_pid));
386
387 if (!pmap_cs_unrestrict_task_for_pid && override) {
388 config &= ~CS_CONFIG_UNRESTRICTED_DEBUGGING;
389 }
390
391 int pmap_cs_enforcement_disable = 0;
392 override = PE_parse_boot_argn(
393 "pmap_cs_allow_modified_code_pages",
394 &pmap_cs_enforcement_disable,
395 sizeof(pmap_cs_enforcement_disable));
396
397 if (!pmap_cs_enforcement_disable && override) {
398 config &= ~CS_CONFIG_ENFORCEMENT_DISABLED;
399 }
400 }
401
402 #endif /* CONFIG_SPTM */
403 #endif /* DEVELOPMENT || DEBUG */
404
405 os_atomic_store(&cs_monitor, monitor_type, relaxed);
406 os_atomic_store(&cs_config, config, relaxed);
407
408 /*
409 * We write the cs_config_set variable with store-release semantics which means
410 * no writes before this call will be re-ordered to after this call. Hence, if
411 * someone reads this variable with load-acquire semantics, and they observe a
412 * value of true, then they will be able to observe the correct values of the
413 * cs_monitor and the cs_config variables as well.
414 */
415 os_atomic_store(&cs_config_set, true, release);
416
417 config_set:
418 /* Ensure configuration has been set */
419 assert(os_atomic_load(&cs_config_set, relaxed) == true);
420
421 /* Set the monitor type */
422 if (monitor_type_out) {
423 *monitor_type_out = os_atomic_load(&cs_monitor, relaxed);
424 }
425
426 /* Set the configuration */
427 if (config_out) {
428 *config_out = os_atomic_load(&cs_config, relaxed);
429 }
430 }
431
432 void
disable_code_signing_feature(code_signing_config_t feature)433 disable_code_signing_feature(
434 code_signing_config_t feature)
435 {
436 /*
437 * We require that this function be called only after the code signing config
438 * has been setup initially with a call to code_signing_configuration.
439 */
440 if (os_atomic_load(&cs_config_set, acquire) == false) {
441 panic("attempted to disable code signing feature without init: %u", feature);
442 }
443
444 /*
445 * We require that only a single feature be disabled through a single call to this
446 * function. Moreover, we ensure that only valid features are being disabled.
447 */
448 switch (feature) {
449 case CS_CONFIG_DEVELOPER_MODE_SUPPORTED:
450 cs_config &= ~CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
451 break;
452
453 case CS_CONFIG_COMPILATION_SERVICE:
454 cs_config &= ~CS_CONFIG_COMPILATION_SERVICE;
455 break;
456
457 case CS_CONFIG_LOCAL_SIGNING:
458 cs_config &= ~CS_CONFIG_LOCAL_SIGNING;
459 break;
460
461 case CS_CONFIG_OOP_JIT:
462 cs_config &= ~CS_CONFIG_OOP_JIT;
463 break;
464
465 case CS_CONFIG_MAP_JIT:
466 cs_config &= ~CS_CONFIG_MAP_JIT;
467 break;
468
469 default:
470 panic("attempted to disable a code signing feature invalidly: %u", feature);
471 }
472
473 /* Ensure all readers can observe the latest data */
474 #if defined(__arm64__)
475 __asm__ volatile ("dmb ish" ::: "memory");
476 #elif defined(__x86_64__)
477 __asm__ volatile ("mfence" ::: "memory");
478 #else
479 #error "Unknown platform -- fence instruction unavailable"
480 #endif
481 }
482
483 kern_return_t
secure_channel_shared_page(uint64_t * secure_channel_phys,size_t * secure_channel_size)484 secure_channel_shared_page(
485 uint64_t *secure_channel_phys,
486 size_t *secure_channel_size)
487 {
488 return CSM_PREFIX(secure_channel_shared_page)(
489 secure_channel_phys,
490 secure_channel_size);
491 }
492
493 #pragma mark Developer Mode
494
495 void
enable_developer_mode(void)496 enable_developer_mode(void)
497 {
498 CSM_PREFIX(toggle_developer_mode)(true);
499 }
500
501 void
disable_developer_mode(void)502 disable_developer_mode(void)
503 {
504 CSM_PREFIX(toggle_developer_mode)(false);
505 }
506
507 bool
developer_mode_state(void)508 developer_mode_state(void)
509 {
510 /* Assume false if the pointer isn't setup */
511 if (developer_mode_enabled == NULL) {
512 return false;
513 }
514
515 return os_atomic_load(developer_mode_enabled, relaxed);
516 }
517
518 #pragma mark Research Mode
519
520 SECURITY_READ_ONLY_LATE(bool) research_mode_enabled = false;
521 SECURITY_READ_ONLY_LATE(bool) extended_research_mode_enabled = false;
522
523 bool
research_mode_state(void)524 research_mode_state(void)
525 {
526 if (allow_research_modes() == true) {
527 return research_mode_enabled;
528 }
529 return false;
530 }
531
532 bool
extended_research_mode_state(void)533 extended_research_mode_state(void)
534 {
535 if (allow_research_modes() == true) {
536 return extended_research_mode_enabled;
537 }
538 return false;
539 }
540
541 #pragma mark Restricted Execution Mode
542
543 kern_return_t
restricted_execution_mode_enable(void)544 restricted_execution_mode_enable(void)
545 {
546 return CSM_PREFIX(rem_enable)();
547 }
548
549 kern_return_t
restricted_execution_mode_state(void)550 restricted_execution_mode_state(void)
551 {
552 return CSM_PREFIX(rem_state)();
553 }
554
555 void
update_csm_device_state(void)556 update_csm_device_state(void)
557 {
558 CSM_PREFIX(update_device_state)();
559 }
560
561 void
complete_security_boot_mode(uint32_t security_boot_mode)562 complete_security_boot_mode(
563 uint32_t security_boot_mode)
564 {
565 CSM_PREFIX(complete_security_boot_mode)(security_boot_mode);
566
567 /*
568 * If we're reach here, it means the completion of the security boot mode was
569 * successful. We update our sysctl with the provided boot mode in order to
570 * signify both completion and the boot mode identifier.
571 */
572 security_boot_mode_complete = security_boot_mode;
573 }
574
575 #pragma mark Provisioning Profiles
576 /*
577 * AMFI performs full profile validation by itself. XNU only needs to manage provisioning
578 * profiles when we have a monitor since the monitor needs to independently verify the
579 * profile data as well.
580 */
581
582 void
garbage_collect_provisioning_profiles(void)583 garbage_collect_provisioning_profiles(void)
584 {
585 #if CODE_SIGNING_MONITOR
586 csm_free_provisioning_profiles();
587 #endif
588 }
589
590 #if CODE_SIGNING_MONITOR
591
592 /* Structure used to maintain the set of registered profiles on the system */
593 typedef struct _cs_profile {
594 /* The UUID of the registered profile */
595 uuid_t profile_uuid;
596
597 /* The profile validation object from the monitor */
598 void *profile_obj;
599
600 /*
601 * In order to minimize the number of times the same profile would need to be
602 * registered, we allow frequently used profiles to skip the garbage collector
603 * for one pass.
604 */
605 bool skip_collector;
606
607 /* We skip repeated trust validations of the profile */
608 bool trusted;
609
610 /* Linked list linkage */
611 SLIST_ENTRY(_cs_profile) link;
612 } cs_profile_t;
613
614 /* Linked list head for registered profiles */
615 static SLIST_HEAD(, _cs_profile) all_profiles = SLIST_HEAD_INITIALIZER(all_profiles);
616
617 /* Lock for the provisioning profiles */
618 LCK_GRP_DECLARE(profiles_lck_grp, "profiles_lck_grp");
619 decl_lck_rw_data(, profiles_lock);
620
621 void
csm_initialize_provisioning_profiles(void)622 csm_initialize_provisioning_profiles(void)
623 {
624 /* Ensure the CoreTrust kernel extension has loaded */
625 if (coretrust == NULL) {
626 panic("coretrust interface not available");
627 }
628
629 /* Initialize the provisoning profiles lock */
630 lck_rw_init(&profiles_lock, &profiles_lck_grp, 0);
631 printf("initialized XNU provisioning profile data\n");
632
633 #if PMAP_CS_PPL_MONITOR
634 pmap_initialize_provisioning_profiles();
635 #endif
636 }
637
638 static cs_profile_t*
search_for_profile_uuid(const uuid_t profile_uuid)639 search_for_profile_uuid(
640 const uuid_t profile_uuid)
641 {
642 cs_profile_t *profile = NULL;
643
644 /* Caller is required to acquire the lock */
645 lck_rw_assert(&profiles_lock, LCK_RW_ASSERT_HELD);
646
647 SLIST_FOREACH(profile, &all_profiles, link) {
648 if (uuid_compare(profile_uuid, profile->profile_uuid) == 0) {
649 return profile;
650 }
651 }
652
653 return NULL;
654 }
655
656 kern_return_t
csm_register_provisioning_profile(const uuid_t profile_uuid,const void * profile_blob,const size_t profile_blob_size)657 csm_register_provisioning_profile(
658 const uuid_t profile_uuid,
659 const void *profile_blob,
660 const size_t profile_blob_size)
661 {
662 cs_profile_t *profile = NULL;
663 void *monitor_profile_obj = NULL;
664 kern_return_t ret = KERN_DENIED;
665
666 /* Only proceed if code-signing-monitor is enabled */
667 if (csm_enabled() == false) {
668 return KERN_NOT_SUPPORTED;
669 }
670
671 /* Allocate storage for the profile wrapper object */
672 profile = kalloc_type(cs_profile_t, Z_WAITOK_ZERO);
673 assert(profile != NULL);
674
675 /* Lock the profile set exclusively */
676 lck_rw_lock_exclusive(&profiles_lock);
677
678 /* Check to make sure this isn't a duplicate UUID */
679 cs_profile_t *dup_profile = search_for_profile_uuid(profile_uuid);
680 if (dup_profile != NULL) {
681 /* This profile might be used soon -- skip garbage collector */
682 dup_profile->skip_collector = true;
683
684 ret = KERN_ALREADY_IN_SET;
685 goto exit;
686 }
687
688 ret = CSM_PREFIX(register_provisioning_profile)(
689 profile_blob,
690 profile_blob_size,
691 &monitor_profile_obj);
692
693 if (ret == KERN_SUCCESS) {
694 /* Copy in the profile UUID */
695 uuid_copy(profile->profile_uuid, profile_uuid);
696
697 /* Setup the monitor's profile object */
698 profile->profile_obj = monitor_profile_obj;
699
700 /* This profile might be used soon -- skip garbage collector */
701 profile->skip_collector = true;
702
703 /* Insert at the head of the profile set */
704 SLIST_INSERT_HEAD(&all_profiles, profile, link);
705 }
706
707 exit:
708 /* Unlock the profile set */
709 lck_rw_unlock_exclusive(&profiles_lock);
710
711 if (ret != KERN_SUCCESS) {
712 /* Free the profile wrapper object */
713 kfree_type(cs_profile_t, profile);
714 profile = NULL;
715
716 if (ret != KERN_ALREADY_IN_SET) {
717 printf("unable to register profile with monitor: %d\n", ret);
718 }
719 }
720
721 return ret;
722 }
723
724 kern_return_t
csm_trust_provisioning_profile(const uuid_t profile_uuid,const void * sig_data,size_t sig_size)725 csm_trust_provisioning_profile(
726 const uuid_t profile_uuid,
727 const void *sig_data,
728 size_t sig_size)
729 {
730 cs_profile_t *profile = NULL;
731 kern_return_t ret = KERN_NOT_FOUND;
732
733 /*
734 * We don't explicitly make a check here for if the code-signing-monitor is enabled
735 * or not because this function should never be called unless registration of the
736 * profile succeeded, which it won't in cases where the CSM is disabled.
737 *
738 * If this function does somehow get called, it'll result in a panic -- this is good
739 * for us to detect and to fix the code path which results in this behavior.
740 */
741
742 /* Lock the profile set exclusively */
743 lck_rw_lock_exclusive(&profiles_lock);
744
745 /* Search for the registered profile */
746 profile = search_for_profile_uuid(profile_uuid);
747 if (profile == NULL) {
748 goto exit;
749 } else if (profile->trusted == true) {
750 ret = KERN_SUCCESS;
751 goto exit;
752 }
753
754 ret = CSM_PREFIX(trust_provisioning_profile)(
755 profile->profile_obj,
756 sig_data,
757 sig_size);
758
759 /* Mark profile as trusted if needed */
760 if (ret == KERN_SUCCESS) {
761 profile->trusted = true;
762 } else {
763 printf("unable to trust profile with monitor: %d\n", ret);
764 }
765
766 exit:
767 /* Unlock the profile set */
768 lck_rw_unlock_exclusive(&profiles_lock);
769
770 return ret;
771 }
772
773 kern_return_t
csm_associate_provisioning_profile(void * monitor_sig_obj,const uuid_t profile_uuid)774 csm_associate_provisioning_profile(
775 void *monitor_sig_obj,
776 const uuid_t profile_uuid)
777 {
778 cs_profile_t *profile = NULL;
779 kern_return_t ret = KERN_DENIED;
780
781 /*
782 * We don't explicitly make a check here for if the code-signing-monitor is enabled
783 * or not because this function should never be called unless registration of the
784 * profile succeeded, which it won't in cases where the CSM is disabled.
785 *
786 * If this function does somehow get called, it'll result in a panic -- this is good
787 * for us to detect and to fix the code path which results in this behavior.
788 */
789
790 /* Lock the profile set as shared */
791 lck_rw_lock_shared(&profiles_lock);
792
793 /* Search for the provisioning profile */
794 profile = search_for_profile_uuid(profile_uuid);
795 if (profile == NULL) {
796 ret = KERN_NOT_FOUND;
797 goto exit;
798 }
799
800 ret = CSM_PREFIX(associate_provisioning_profile)(
801 monitor_sig_obj,
802 profile->profile_obj);
803
804 if (ret == KERN_SUCCESS) {
805 /*
806 * This seems like an active profile -- let it skip the garbage collector on
807 * the next pass. We can modify this field even though we've only taken a shared
808 * lock as in this case we're always setting it to a fixed value.
809 */
810 profile->skip_collector = true;
811 }
812
813 exit:
814 /* Unlock the profile set */
815 lck_rw_unlock_shared(&profiles_lock);
816
817 if (ret != KERN_SUCCESS) {
818 printf("unable to associate profile: %d\n", ret);
819 }
820 return ret;
821 }
822
823 kern_return_t
csm_disassociate_provisioning_profile(void * monitor_sig_obj)824 csm_disassociate_provisioning_profile(
825 void *monitor_sig_obj)
826 {
827 kern_return_t ret = KERN_DENIED;
828
829 if (csm_enabled() == false) {
830 return KERN_NOT_SUPPORTED;
831 }
832
833 /* Call out to the monitor */
834 ret = CSM_PREFIX(disassociate_provisioning_profile)(monitor_sig_obj);
835
836 if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_FOUND)) {
837 printf("unable to disassociate profile: %d\n", ret);
838 }
839 return ret;
840 }
841
842 static kern_return_t
unregister_provisioning_profile(cs_profile_t * profile)843 unregister_provisioning_profile(
844 cs_profile_t *profile)
845 {
846 kern_return_t ret = KERN_DENIED;
847
848 /* Call out to the monitor */
849 ret = CSM_PREFIX(unregister_provisioning_profile)(profile->profile_obj);
850
851 /*
852 * KERN_FAILURE represents the case when the unregistration failed because the
853 * monitor noted that the profile was still being used. Other than that, there
854 * is no other error expected out of this interface. In fact, there is no easy
855 * way to deal with other errors, as the profile state may be corrupted. If we
856 * see a different error, then we panic.
857 */
858 if ((ret != KERN_SUCCESS) && (ret != KERN_FAILURE)) {
859 panic("unable to unregister profile from monitor: %d | %p\n", ret, profile);
860 }
861
862 return ret;
863 }
864
865 void
csm_free_provisioning_profiles(void)866 csm_free_provisioning_profiles(void)
867 {
868 kern_return_t ret = KERN_DENIED;
869 cs_profile_t *profile = NULL;
870 cs_profile_t *temp_profile = NULL;
871
872 /* Lock the profile set exclusively */
873 lck_rw_lock_exclusive(&profiles_lock);
874
875 SLIST_FOREACH_SAFE(profile, &all_profiles, link, temp_profile) {
876 if (profile->skip_collector == true) {
877 profile->skip_collector = false;
878 continue;
879 }
880
881 /* Attempt to unregister this profile from the system */
882 ret = unregister_provisioning_profile(profile);
883 if (ret == KERN_SUCCESS) {
884 /* Remove the profile from the profile set */
885 SLIST_REMOVE(&all_profiles, profile, _cs_profile, link);
886
887 /* Free the memory consumed for the profile wrapper object */
888 kfree_type(cs_profile_t, profile);
889 profile = NULL;
890 }
891 }
892
893 /* Unlock the profile set */
894 lck_rw_unlock_exclusive(&profiles_lock);
895 }
896
897 #endif /* CODE_SIGNING_MONITOR */
898
899 #pragma mark Code Signing
900 /*
901 * AMFI performs full signature validation by itself. For some things, AMFI uses XNU in
902 * order to abstract away the underlying implementation for data storage, but for most of
903 * these, AMFI doesn't directly interact with them, and they're only required when we have
904 * a code signing monitor on the system.
905 */
906
907 void
set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])908 set_compilation_service_cdhash(
909 const uint8_t cdhash[CS_CDHASH_LEN])
910 {
911 CSM_PREFIX(set_compilation_service_cdhash)(cdhash);
912 }
913
914 bool
match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])915 match_compilation_service_cdhash(
916 const uint8_t cdhash[CS_CDHASH_LEN])
917 {
918 return CSM_PREFIX(match_compilation_service_cdhash)(cdhash);
919 }
920
921 void
set_local_signing_public_key(const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])922 set_local_signing_public_key(
923 const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])
924 {
925 CSM_PREFIX(set_local_signing_public_key)(public_key);
926 }
927
928 uint8_t*
get_local_signing_public_key(void)929 get_local_signing_public_key(void)
930 {
931 return CSM_PREFIX(get_local_signing_public_key)();
932 }
933
934 void
unrestrict_local_signing_cdhash(__unused const uint8_t cdhash[CS_CDHASH_LEN])935 unrestrict_local_signing_cdhash(
936 __unused const uint8_t cdhash[CS_CDHASH_LEN])
937 {
938 /*
939 * Since AMFI manages code signing on its own, we only need to unrestrict the
940 * local signing cdhash when we have a monitor environment.
941 */
942
943 #if CODE_SIGNING_MONITOR
944 CSM_PREFIX(unrestrict_local_signing_cdhash)(cdhash);
945 #endif
946 }
947
948 kern_return_t
get_trust_level_kdp(__unused pmap_t pmap,__unused uint32_t * trust_level)949 get_trust_level_kdp(
950 __unused pmap_t pmap,
951 __unused uint32_t *trust_level)
952 {
953 #if CODE_SIGNING_MONITOR
954 return csm_get_trust_level_kdp(pmap, trust_level);
955 #else
956 return KERN_NOT_SUPPORTED;
957 #endif
958 }
959
960 kern_return_t
get_jit_address_range_kdp(__unused pmap_t pmap,__unused uintptr_t * jit_region_start,__unused uintptr_t * jit_region_end)961 get_jit_address_range_kdp(
962 __unused pmap_t pmap,
963 __unused uintptr_t *jit_region_start,
964 __unused uintptr_t *jit_region_end)
965 {
966 #if CODE_SIGNING_MONITOR
967 return csm_get_jit_address_range_kdp(pmap, jit_region_start, jit_region_end);
968 #else
969 return KERN_NOT_SUPPORTED;
970 #endif
971 }
972
973 kern_return_t
csm_resolve_os_entitlements_from_proc(__unused const proc_t process,__unused const void ** os_entitlements)974 csm_resolve_os_entitlements_from_proc(
975 __unused const proc_t process,
976 __unused const void **os_entitlements)
977 {
978 #if CODE_SIGNING_MONITOR
979 task_t task = NULL;
980 vm_map_t task_map = NULL;
981 pmap_t task_pmap = NULL;
982 kern_return_t ret = KERN_DENIED;
983
984 if (csm_enabled() == false) {
985 return KERN_NOT_SUPPORTED;
986 }
987
988 /* Ensure the process comes from the proc_task zone */
989 proc_require(process, PROC_REQUIRE_ALLOW_ALL);
990
991 /* Acquire the task from the proc */
992 task = proc_task(process);
993 if (task == NULL) {
994 return KERN_NOT_FOUND;
995 }
996
997 /* Acquire the virtual memory map from the task -- takes a reference on it */
998 task_map = get_task_map_reference(task);
999 if (task_map == NULL) {
1000 return KERN_NOT_FOUND;
1001 }
1002
1003 /* Acquire the pmap from the virtual memory map */
1004 task_pmap = vm_map_get_pmap(task_map);
1005 assert(task_pmap != NULL);
1006
1007 /* Call into the monitor to resolve the entitlements */
1008 ret = CSM_PREFIX(resolve_kernel_entitlements)(task_pmap, os_entitlements);
1009
1010 /* Release the reference on the virtual memory map */
1011 vm_map_deallocate(task_map);
1012
1013 return ret;
1014 #else
1015 return KERN_NOT_SUPPORTED;
1016 #endif
1017 }
1018
1019 kern_return_t
address_space_debugged_state(const proc_t process)1020 address_space_debugged_state(
1021 const proc_t process)
1022 {
1023 /* Must pass in a valid proc_t */
1024 if (process == NULL) {
1025 printf("%s: provided a NULL process\n", __FUNCTION__);
1026 return KERN_DENIED;
1027 }
1028 proc_require(process, PROC_REQUIRE_ALLOW_ALL);
1029
1030 /* Developer mode must always be enabled for this to return successfully */
1031 if (developer_mode_state() == false) {
1032 return KERN_DENIED;
1033 }
1034
1035 #if CODE_SIGNING_MONITOR
1036 task_t task = NULL;
1037 vm_map_t task_map = NULL;
1038 pmap_t task_pmap = NULL;
1039
1040 if (csm_enabled() == true) {
1041 /* Acquire the task from the proc */
1042 task = proc_task(process);
1043 if (task == NULL) {
1044 return KERN_NOT_FOUND;
1045 }
1046
1047 /* Acquire the virtual memory map from the task -- takes a reference on it */
1048 task_map = get_task_map_reference(task);
1049 if (task_map == NULL) {
1050 return KERN_NOT_FOUND;
1051 }
1052
1053 /* Acquire the pmap from the virtual memory map */
1054 task_pmap = vm_map_get_pmap(task_map);
1055 assert(task_pmap != NULL);
1056
1057 /* Acquire the state from the monitor */
1058 kern_return_t ret = CSM_PREFIX(address_space_debugged)(task_pmap);
1059
1060 /* Release the reference on the virtual memory map */
1061 vm_map_deallocate(task_map);
1062
1063 return ret;
1064 }
1065 #endif /* CODE_SIGNING_MONITOR */
1066
1067 /* Check read-only process flags for state */
1068 if (proc_getcsflags(process) & CS_DEBUGGED) {
1069 return KERN_SUCCESS;
1070 }
1071
1072 #if XNU_TARGET_OS_OSX
1073 /*
1074 * For macOS systems only, we allow the execution of unsigned code. On Intel, code
1075 * doesn't need to be signed, and on ASi, Rosetta binaries don't need to be signed.
1076 * In these cases, we return successfully from this function because we don't know
1077 * what else we can do.
1078 */
1079 if ((proc_getcsflags(process) & CS_SIGNED) == 0) {
1080 return KERN_SUCCESS;
1081 }
1082 #endif
1083
1084 return KERN_DENIED;
1085 }
1086
1087 bool
is_address_space_debugged(const proc_t process)1088 is_address_space_debugged(const proc_t process)
1089 {
1090 return address_space_debugged_state(process) == KERN_SUCCESS;
1091 }
1092
1093 #if CODE_SIGNING_MONITOR
1094
1095 bool
csm_enabled(void)1096 csm_enabled(void)
1097 {
1098 return CSM_PREFIX(code_signing_enabled)();
1099 }
1100
1101 vm_size_t
csm_signature_size_limit(void)1102 csm_signature_size_limit(void)
1103 {
1104 return CSM_PREFIX(managed_code_signature_size)();
1105 }
1106
1107 void
csm_check_lockdown_mode(void)1108 csm_check_lockdown_mode(void)
1109 {
1110 if (get_lockdown_mode_state() == 0) {
1111 return;
1112 }
1113
1114 /* Inform the code signing monitor about lockdown mode */
1115 CSM_PREFIX(enter_lockdown_mode)();
1116
1117 #if CONFIG_SPTM
1118 /* MAP_JIT lockdown */
1119 if (txm_cs_config->systemPolicy->featureSet.JIT == false) {
1120 disable_code_signing_feature(CS_CONFIG_MAP_JIT);
1121 }
1122
1123 /* Compilation service lockdown */
1124 if (txm_cs_config->systemPolicy->featureSet.compilationService == false) {
1125 disable_code_signing_feature(CS_CONFIG_COMPILATION_SERVICE);
1126 }
1127
1128 /* Local signing lockdown */
1129 if (txm_cs_config->systemPolicy->featureSet.localSigning == false) {
1130 disable_code_signing_feature(CS_CONFIG_LOCAL_SIGNING);
1131 }
1132
1133 /* OOP-JIT lockdown */
1134 if (txm_cs_config->systemPolicy->featureSet.OOPJit == false) {
1135 disable_code_signing_feature(CS_CONFIG_OOP_JIT);
1136 }
1137 #else
1138 /*
1139 * Lockdown mode is supposed to disable all forms of JIT on the system. For now,
1140 * we leave JIT enabled by default until some blockers are resolved. The way this
1141 * code is written, we don't need to change anything once we enforce MAP_JIT to
1142 * be disabled for lockdown mode.
1143 */
1144 if (ppl_lockdown_mode_enforce_jit == true) {
1145 disable_code_signing_feature(CS_CONFIG_MAP_JIT);
1146 }
1147 disable_code_signing_feature(CS_CONFIG_OOP_JIT);
1148 disable_code_signing_feature(CS_CONFIG_LOCAL_SIGNING);
1149 disable_code_signing_feature(CS_CONFIG_COMPILATION_SERVICE);
1150 #endif /* CONFIG_SPTM */
1151 }
1152
1153 void
csm_code_signing_violation(proc_t proc,vm_offset_t addr)1154 csm_code_signing_violation(
1155 proc_t proc,
1156 vm_offset_t addr)
1157 {
1158 /* No enforcement if code-signing-monitor is disabled */
1159 if (csm_enabled() == false) {
1160 return;
1161 }
1162
1163 /* Leave a log for triage purposes */
1164 printf("[%s] code-signing-violation at %p\n", proc_best_name(proc), (void*)addr);
1165
1166 /*
1167 * For now, the only input into this function is from current_proc(), so using current_thread()
1168 * over here is alright. If this function ever gets called from another location, we need to
1169 * then change where we get the user thread from.
1170 */
1171 assert(proc == current_proc());
1172
1173 /*
1174 * Force exit the process and set it to allow generating crash reports, which is critical
1175 * for better triaging these issues.
1176 */
1177
1178 exception_info_t info = {
1179 .os_reason = OS_REASON_CODESIGNING,
1180 .exception_type = EXC_BAD_ACCESS,
1181 .mx_code = CODESIGNING_EXIT_REASON_INVALID_PAGE,
1182 .mx_subcode = VM_USER_STRIP_PTR(addr),
1183 .kt_info.kt_subsys = KDBG_TRIAGE_SUBSYS_VM,
1184 .kt_info.kt_error = KDBG_TRIAGE_VM_CODE_SIGNING
1185 };
1186
1187 exit_with_mach_exception(proc, info, PX_KTRIAGE);
1188 }
1189
1190 kern_return_t
csm_register_code_signature(const vm_address_t signature_addr,const vm_size_t signature_size,const vm_offset_t code_directory_offset,const char * signature_path,void ** monitor_sig_obj,vm_address_t * monitor_signature_addr)1191 csm_register_code_signature(
1192 const vm_address_t signature_addr,
1193 const vm_size_t signature_size,
1194 const vm_offset_t code_directory_offset,
1195 const char *signature_path,
1196 void **monitor_sig_obj,
1197 vm_address_t *monitor_signature_addr)
1198 {
1199 if (csm_enabled() == false) {
1200 return KERN_NOT_SUPPORTED;
1201 }
1202
1203 return CSM_PREFIX(register_code_signature)(
1204 signature_addr,
1205 signature_size,
1206 code_directory_offset,
1207 signature_path,
1208 monitor_sig_obj,
1209 monitor_signature_addr);
1210 }
1211
1212 kern_return_t
csm_unregister_code_signature(void * monitor_sig_obj)1213 csm_unregister_code_signature(
1214 void *monitor_sig_obj)
1215 {
1216 if (csm_enabled() == false) {
1217 return KERN_NOT_SUPPORTED;
1218 }
1219
1220 return CSM_PREFIX(unregister_code_signature)(monitor_sig_obj);
1221 }
1222
1223 kern_return_t
csm_verify_code_signature(void * monitor_sig_obj,uint32_t * trust_level)1224 csm_verify_code_signature(
1225 void *monitor_sig_obj,
1226 uint32_t *trust_level)
1227 {
1228 if (csm_enabled() == false) {
1229 return KERN_NOT_SUPPORTED;
1230 }
1231
1232 return CSM_PREFIX(verify_code_signature)(monitor_sig_obj, trust_level);
1233 }
1234
1235 kern_return_t
csm_reconstitute_code_signature(void * monitor_sig_obj,vm_address_t * unneeded_addr,vm_size_t * unneeded_size)1236 csm_reconstitute_code_signature(
1237 void *monitor_sig_obj,
1238 vm_address_t *unneeded_addr,
1239 vm_size_t *unneeded_size)
1240 {
1241 if (csm_enabled() == false) {
1242 return KERN_NOT_SUPPORTED;
1243 }
1244
1245 return CSM_PREFIX(reconstitute_code_signature)(
1246 monitor_sig_obj,
1247 unneeded_addr,
1248 unneeded_size);
1249 }
1250
1251 kern_return_t
csm_setup_nested_address_space(pmap_t pmap,const vm_address_t region_addr,const vm_size_t region_size)1252 csm_setup_nested_address_space(
1253 pmap_t pmap,
1254 const vm_address_t region_addr,
1255 const vm_size_t region_size)
1256 {
1257 return CSM_PREFIX(setup_nested_address_space)(
1258 pmap,
1259 region_addr,
1260 region_size);
1261 }
1262
1263 kern_return_t
csm_associate_code_signature(pmap_t monitor_pmap,void * monitor_sig_obj,const vm_address_t region_addr,const vm_size_t region_size,const vm_offset_t region_offset)1264 csm_associate_code_signature(
1265 pmap_t monitor_pmap,
1266 void *monitor_sig_obj,
1267 const vm_address_t region_addr,
1268 const vm_size_t region_size,
1269 const vm_offset_t region_offset)
1270 {
1271 if (csm_enabled() == false) {
1272 return KERN_NOT_SUPPORTED;
1273 }
1274
1275 return CSM_PREFIX(associate_code_signature)(
1276 monitor_pmap,
1277 monitor_sig_obj,
1278 region_addr,
1279 region_size,
1280 region_offset);
1281 }
1282
1283 kern_return_t
csm_allow_jit_region(pmap_t monitor_pmap)1284 csm_allow_jit_region(
1285 pmap_t monitor_pmap)
1286 {
1287 if (csm_enabled() == false) {
1288 return KERN_SUCCESS;
1289 } else if (monitor_pmap == NULL) {
1290 return KERN_DENIED;
1291 }
1292
1293 kern_return_t ret = CSM_PREFIX(allow_jit_region)(monitor_pmap);
1294 if (ret == KERN_NOT_SUPPORTED) {
1295 /*
1296 * Some monitor environments do not support this API and as a result will
1297 * return KERN_NOT_SUPPORTED. The caller here should not interpret that as
1298 * a failure.
1299 */
1300 ret = KERN_SUCCESS;
1301 }
1302
1303 return ret;
1304 }
1305
1306 kern_return_t
csm_associate_jit_region(pmap_t monitor_pmap,const vm_address_t region_addr,const vm_size_t region_size)1307 csm_associate_jit_region(
1308 pmap_t monitor_pmap,
1309 const vm_address_t region_addr,
1310 const vm_size_t region_size)
1311 {
1312 if (csm_enabled() == false) {
1313 return KERN_NOT_SUPPORTED;
1314 }
1315
1316 return CSM_PREFIX(associate_jit_region)(
1317 monitor_pmap,
1318 region_addr,
1319 region_size);
1320 }
1321
1322 kern_return_t
csm_associate_debug_region(pmap_t monitor_pmap,const vm_address_t region_addr,const vm_size_t region_size)1323 csm_associate_debug_region(
1324 pmap_t monitor_pmap,
1325 const vm_address_t region_addr,
1326 const vm_size_t region_size)
1327 {
1328 if (csm_enabled() == false) {
1329 return KERN_NOT_SUPPORTED;
1330 }
1331
1332 kern_return_t ret = CSM_PREFIX(associate_debug_region)(
1333 monitor_pmap,
1334 region_addr,
1335 region_size);
1336
1337 if (ret != KERN_SUCCESS) {
1338 printf("unable to create debug region in address space: %d\n", ret);
1339 }
1340 return ret;
1341 }
1342
1343 kern_return_t
csm_allow_invalid_code(pmap_t pmap)1344 csm_allow_invalid_code(
1345 pmap_t pmap)
1346 {
1347 if (csm_enabled() == false) {
1348 return KERN_NOT_SUPPORTED;
1349 }
1350
1351 return CSM_PREFIX(allow_invalid_code)(pmap);
1352 }
1353
1354 kern_return_t
csm_get_trust_level_kdp(pmap_t pmap,uint32_t * trust_level)1355 csm_get_trust_level_kdp(
1356 pmap_t pmap,
1357 uint32_t *trust_level)
1358 {
1359 if (csm_enabled() == false) {
1360 return KERN_NOT_SUPPORTED;
1361 }
1362
1363 return CSM_PREFIX(get_trust_level_kdp)(pmap, trust_level);
1364 }
1365
1366 kern_return_t
csm_get_jit_address_range_kdp(pmap_t pmap,uintptr_t * jit_region_start,uintptr_t * jit_region_end)1367 csm_get_jit_address_range_kdp(
1368 pmap_t pmap,
1369 uintptr_t *jit_region_start,
1370 uintptr_t *jit_region_end)
1371 {
1372 if (csm_enabled() == false) {
1373 return KERN_NOT_SUPPORTED;
1374 }
1375
1376 return CSM_PREFIX(get_jit_address_range_kdp)(pmap, jit_region_start, jit_region_end);
1377 }
1378
1379 kern_return_t
csm_address_space_exempt(const pmap_t pmap)1380 csm_address_space_exempt(
1381 const pmap_t pmap)
1382 {
1383 /*
1384 * These exemptions are actually orthogonal to the code signing enforcement. As
1385 * a result, we let each monitor explicitly decide how to deal with the exemption
1386 * in case code signing enforcement is disabled.
1387 */
1388
1389 return CSM_PREFIX(address_space_exempt)(pmap);
1390 }
1391
1392 kern_return_t
csm_fork_prepare(pmap_t old_pmap,pmap_t new_pmap)1393 csm_fork_prepare(
1394 pmap_t old_pmap,
1395 pmap_t new_pmap)
1396 {
1397 if (csm_enabled() == false) {
1398 return KERN_NOT_SUPPORTED;
1399 }
1400
1401 return CSM_PREFIX(fork_prepare)(old_pmap, new_pmap);
1402 }
1403
1404 kern_return_t
csm_acquire_signing_identifier(const void * monitor_sig_obj,const char ** signing_id)1405 csm_acquire_signing_identifier(
1406 const void *monitor_sig_obj,
1407 const char **signing_id)
1408 {
1409 if (csm_enabled() == false) {
1410 return KERN_NOT_SUPPORTED;
1411 }
1412
1413 return CSM_PREFIX(acquire_signing_identifier)(monitor_sig_obj, signing_id);
1414 }
1415
1416 kern_return_t
csm_associate_os_entitlements(void * monitor_sig_obj,const void * os_entitlements)1417 csm_associate_os_entitlements(
1418 void *monitor_sig_obj,
1419 const void *os_entitlements)
1420 {
1421 if (csm_enabled() == false) {
1422 return KERN_NOT_SUPPORTED;
1423 } else if (os_entitlements == NULL) {
1424 /* Not every signature has entitlements */
1425 return KERN_SUCCESS;
1426 }
1427
1428 return CSM_PREFIX(associate_kernel_entitlements)(monitor_sig_obj, os_entitlements);
1429 }
1430
1431 kern_return_t
csm_accelerate_entitlements(void * monitor_sig_obj,CEQueryContext_t * ce_ctx)1432 csm_accelerate_entitlements(
1433 void *monitor_sig_obj,
1434 CEQueryContext_t *ce_ctx)
1435 {
1436 if (csm_enabled() == false) {
1437 return KERN_NOT_SUPPORTED;
1438 }
1439
1440 return CSM_PREFIX(accelerate_entitlements)(monitor_sig_obj, ce_ctx);
1441 }
1442
1443 #endif /* CODE_SIGNING_MONITOR */
1444
1445 #pragma mark AppleImage4
1446 /*
1447 * AppleImage4 uses the monitor environment to safeguard critical security data.
1448 * In order to ease the implementation specific, AppleImage4 always depends on these
1449 * abstracted APIs, regardless of whether the system has a monitor environment or
1450 * not.
1451 */
1452
1453 void*
kernel_image4_storage_data(size_t * allocated_size)1454 kernel_image4_storage_data(
1455 size_t *allocated_size)
1456 {
1457 return CSM_PREFIX(image4_storage_data)(allocated_size);
1458 }
1459
1460 void
kernel_image4_set_nonce(const img4_nonce_domain_index_t ndi,const img4_nonce_t * nonce)1461 kernel_image4_set_nonce(
1462 const img4_nonce_domain_index_t ndi,
1463 const img4_nonce_t *nonce)
1464 {
1465 return CSM_PREFIX(image4_set_nonce)(ndi, nonce);
1466 }
1467
1468 void
kernel_image4_roll_nonce(const img4_nonce_domain_index_t ndi)1469 kernel_image4_roll_nonce(
1470 const img4_nonce_domain_index_t ndi)
1471 {
1472 return CSM_PREFIX(image4_roll_nonce)(ndi);
1473 }
1474
1475 errno_t
kernel_image4_copy_nonce(const img4_nonce_domain_index_t ndi,img4_nonce_t * nonce_out)1476 kernel_image4_copy_nonce(
1477 const img4_nonce_domain_index_t ndi,
1478 img4_nonce_t *nonce_out)
1479 {
1480 return CSM_PREFIX(image4_copy_nonce)(ndi, nonce_out);
1481 }
1482
1483 errno_t
kernel_image4_execute_object(img4_runtime_object_spec_index_t obj_spec_index,const img4_buff_t * payload,const img4_buff_t * manifest)1484 kernel_image4_execute_object(
1485 img4_runtime_object_spec_index_t obj_spec_index,
1486 const img4_buff_t *payload,
1487 const img4_buff_t *manifest)
1488 {
1489 return CSM_PREFIX(image4_execute_object)(
1490 obj_spec_index,
1491 payload,
1492 manifest);
1493 }
1494
1495 errno_t
kernel_image4_copy_object(img4_runtime_object_spec_index_t obj_spec_index,vm_address_t object_out,size_t * object_length)1496 kernel_image4_copy_object(
1497 img4_runtime_object_spec_index_t obj_spec_index,
1498 vm_address_t object_out,
1499 size_t *object_length)
1500 {
1501 return CSM_PREFIX(image4_copy_object)(
1502 obj_spec_index,
1503 object_out,
1504 object_length);
1505 }
1506
1507 const void*
kernel_image4_get_monitor_exports(void)1508 kernel_image4_get_monitor_exports(void)
1509 {
1510 return CSM_PREFIX(image4_get_monitor_exports)();
1511 }
1512
1513 errno_t
kernel_image4_set_release_type(const char * release_type)1514 kernel_image4_set_release_type(
1515 const char *release_type)
1516 {
1517 return CSM_PREFIX(image4_set_release_type)(release_type);
1518 }
1519
1520 errno_t
kernel_image4_set_bnch_shadow(const img4_nonce_domain_index_t ndi)1521 kernel_image4_set_bnch_shadow(
1522 const img4_nonce_domain_index_t ndi)
1523 {
1524 return CSM_PREFIX(image4_set_bnch_shadow)(ndi);
1525 }
1526
1527 #pragma mark Image4 - New
1528
1529
1530
1531 static errno_t
_kernel_image4_monitor_trap_image_activate(image4_cs_trap_t selector,const void * input_data)1532 _kernel_image4_monitor_trap_image_activate(
1533 image4_cs_trap_t selector,
1534 const void *input_data)
1535 {
1536 /*
1537 * csmx_payload (csmx_payload_len) --> __cs_xfer
1538 * csmx_manifest (csmx_manifest_len) --> __cs_borrow
1539 */
1540 image4_cs_trap_argv(image_activate) input = {0};
1541 vm_address_t payload_addr = 0;
1542 vm_address_t manifest_addr = 0;
1543 errno_t err = EPERM;
1544
1545 /* Copy the input data */
1546 memcpy(&input, input_data, sizeof(input));
1547
1548 payload_addr = code_signing_allocate(input.csmx_payload_len);
1549 if (payload_addr == 0) {
1550 goto out;
1551 }
1552 memcpy((void*)payload_addr, (void*)input.csmx_payload, input.csmx_payload_len);
1553
1554 manifest_addr = code_signing_allocate(input.csmx_manifest_len);
1555 if (manifest_addr == 0) {
1556 goto out;
1557 }
1558 memcpy((void*)manifest_addr, (void*)input.csmx_manifest, input.csmx_manifest_len);
1559
1560 /* Transfer both regions to the monitor */
1561 CSM_PREFIX(image4_transfer_region)(selector, payload_addr, input.csmx_payload_len);
1562 CSM_PREFIX(image4_transfer_region)(selector, manifest_addr, input.csmx_manifest_len);
1563
1564 /* Setup the input with new addresses */
1565 input.csmx_payload = payload_addr;
1566 input.csmx_manifest = manifest_addr;
1567
1568 /* Trap into the monitor for this selector */
1569 err = CSM_PREFIX(image4_monitor_trap)(selector, &input, sizeof(input));
1570
1571 out:
1572 if ((err != 0) && (payload_addr != 0)) {
1573 /* Retyping only happens after allocating the manifest */
1574 if (manifest_addr != 0) {
1575 CSM_PREFIX(image4_reclaim_region)(
1576 selector, payload_addr, input.csmx_payload_len);
1577 }
1578 code_signing_deallocate(&payload_addr, input.csmx_payload_len);
1579 }
1580
1581 if (manifest_addr != 0) {
1582 /* Reclaim the manifest region -- will be retyped if not NULL */
1583 CSM_PREFIX(image4_reclaim_region)(
1584 selector, manifest_addr, input.csmx_manifest_len);
1585
1586 /* Deallocate the manifest region */
1587 code_signing_deallocate(&manifest_addr, input.csmx_manifest_len);
1588 }
1589
1590 return err;
1591 }
1592
1593 static errno_t
_kernel_image4_monitor_trap(image4_cs_trap_t selector,const void * input_data,size_t input_size)1594 _kernel_image4_monitor_trap(
1595 image4_cs_trap_t selector,
1596 const void *input_data,
1597 size_t input_size)
1598 {
1599 /* Validate input size for the selector */
1600 if (input_size != image4_cs_trap_vector_size(selector)) {
1601 printf("image4 dispatch: invalid input: %llu | %lu\n", selector, input_size);
1602 return EINVAL;
1603 }
1604
1605 switch (selector) {
1606 case IMAGE4_CS_TRAP_IMAGE_ACTIVATE:
1607 return _kernel_image4_monitor_trap_image_activate(selector, input_data);
1608
1609 default:
1610 return CSM_PREFIX(image4_monitor_trap)(selector, input_data, input_size);
1611 }
1612 }
1613
1614 errno_t
kernel_image4_monitor_trap(image4_cs_trap_t selector,const void * input_data,size_t input_size,__unused void * output_data,__unused size_t * output_size)1615 kernel_image4_monitor_trap(
1616 image4_cs_trap_t selector,
1617 const void *input_data,
1618 size_t input_size,
1619 __unused void *output_data,
1620 __unused size_t *output_size)
1621 {
1622 size_t length_check = 0;
1623
1624 /* Input data is always required */
1625 if ((input_data == NULL) || (input_size == 0)) {
1626 printf("image4 dispatch: no input data: %llu\n", selector);
1627 return EINVAL;
1628 } else if (os_add_overflow((vm_address_t)input_data, input_size, &length_check)) {
1629 panic("image4_ dispatch: overflow on input: %p | %lu", input_data, input_size);
1630 }
1631
1632 return _kernel_image4_monitor_trap(selector, input_data, input_size);
1633 }
1634