1 /*
2 * Copyright (c) 2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <os/overflow.h>
24 #include <machine/atomic.h>
25 #include <mach/vm_param.h>
26 #include <vm/vm_kern.h>
27 #include <vm/pmap.h>
28 #include <vm/pmap_cs.h>
29 #include <vm/vm_map.h>
30 #include <kern/zalloc.h>
31 #include <kern/kalloc.h>
32 #include <kern/assert.h>
33 #include <kern/locks.h>
34 #include <kern/lock_rw.h>
35 #include <libkern/libkern.h>
36 #include <libkern/section_keywords.h>
37 #include <libkern/coretrust/coretrust.h>
38 #include <pexpert/pexpert.h>
39 #include <sys/vm.h>
40 #include <sys/proc.h>
41 #include <sys/proc_require.h>
42 #include <sys/codesign.h>
43 #include <sys/code_signing.h>
44 #include <sys/lockdown_mode.h>
45 #include <sys/reason.h>
46 #include <sys/kdebug_kernel.h>
47 #include <sys/kdebug_triage.h>
48 #include <sys/sysctl.h>
49 #include <uuid/uuid.h>
50 #include <IOKit/IOBSD.h>
51
52 #if CONFIG_SPTM
53 #include <sys/trusted_execution_monitor.h>
54 #endif
55
56 #if XNU_KERNEL_PRIVATE
57 vm_address_t
code_signing_allocate(size_t alloc_size)58 code_signing_allocate(
59 size_t alloc_size)
60 {
61 vm_address_t alloc_addr = 0;
62
63 if (alloc_size == 0) {
64 panic("%s: zero allocation size", __FUNCTION__);
65 }
66 size_t aligned_size = round_page(alloc_size);
67
68 kern_return_t ret = kmem_alloc(
69 kernel_map,
70 &alloc_addr, aligned_size,
71 KMA_KOBJECT | KMA_DATA | KMA_ZERO,
72 VM_KERN_MEMORY_SECURITY);
73
74 if (ret != KERN_SUCCESS) {
75 printf("%s: unable to allocate %lu bytes\n", __FUNCTION__, aligned_size);
76 } else if (alloc_addr == 0) {
77 printf("%s: invalid allocation\n", __FUNCTION__);
78 }
79
80 return alloc_addr;
81 }
82
83 void
code_signing_deallocate(vm_address_t * alloc_addr,size_t alloc_size)84 code_signing_deallocate(
85 vm_address_t *alloc_addr,
86 size_t alloc_size)
87 {
88 if (alloc_addr == NULL) {
89 panic("%s: invalid pointer provided", __FUNCTION__);
90 } else if ((*alloc_addr == 0) || ((*alloc_addr & PAGE_MASK) != 0)) {
91 panic("%s: address provided: %p", __FUNCTION__, (void*)(*alloc_addr));
92 } else if (alloc_size == 0) {
93 panic("%s: zero allocation size", __FUNCTION__);
94 }
95 size_t aligned_size = round_page(alloc_size);
96
97 /* Free the allocation */
98 kmem_free(kernel_map, *alloc_addr, aligned_size);
99
100 /* Clear the address */
101 *alloc_addr = 0;
102 }
103 #endif /* XNU_KERNEL_PRIVATE */
104
105 SYSCTL_DECL(_security);
106 SYSCTL_DECL(_security_codesigning);
107 SYSCTL_NODE(_security, OID_AUTO, codesigning, CTLFLAG_RD, 0, "XNU Code Signing");
108
109 static SECURITY_READ_ONLY_LATE(bool) cs_config_set = false;
110 static SECURITY_READ_ONLY_LATE(code_signing_monitor_type_t) cs_monitor = CS_MONITOR_TYPE_NONE;
111 static SECURITY_READ_ONLY_LATE(code_signing_config_t) cs_config = 0;
112
113 SYSCTL_UINT(_security_codesigning, OID_AUTO, monitor, CTLFLAG_RD, &cs_monitor, 0, "code signing monitor type");
114 SYSCTL_UINT(_security_codesigning, OID_AUTO, config, CTLFLAG_RD, &cs_config, 0, "code signing configuration");
115
116 void
code_signing_configuration(code_signing_monitor_type_t * monitor_type_out,code_signing_config_t * config_out)117 code_signing_configuration(
118 code_signing_monitor_type_t *monitor_type_out,
119 code_signing_config_t *config_out)
120 {
121 code_signing_monitor_type_t monitor_type = CS_MONITOR_TYPE_NONE;
122 code_signing_config_t config = 0;
123
124 /*
125 * Since we read this variable with load-acquire semantics, if we observe a value
126 * of true, it means we should be able to observe writes to cs_monitor and also
127 * cs_config.
128 */
129 if (os_atomic_load(&cs_config_set, acquire) == true) {
130 goto config_set;
131 }
132
133 /*
134 * Add support for all the code signing features. This function is called very
135 * early in the system boot, much before kernel extensions such as Apple Mobile
136 * File Integrity come online. As a result, this function assumes that all the
137 * code signing features are enabled, and later on, different components can
138 * disable support for different features using disable_code_signing_feature().
139 */
140 config |= CS_CONFIG_MAP_JIT;
141 config |= CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
142 config |= CS_CONFIG_COMPILATION_SERVICE;
143 config |= CS_CONFIG_LOCAL_SIGNING;
144 config |= CS_CONFIG_OOP_JIT;
145
146 #if CODE_SIGNING_MONITOR
147 /* Mark the code signing monitor as enabled if required */
148 if (csm_enabled() == true) {
149 config |= CS_CONFIG_CSM_ENABLED;
150 }
151
152 #if CONFIG_SPTM
153 /*
154 * Since TrustedExecutionMonitor cannot call into any function within XNU, we
155 * query it's code signing configuration even before this function is called.
156 * Using that, we modify the state of the code signing features available.
157 */
158 if (csm_enabled() == true) {
159 #if kTXMKernelAPIVersion >= 3
160 bool platform_code_only = txm_cs_config->systemPolicy->platformCodeOnly;
161 #else
162 bool platform_code_only = txm_ro_data->platformCodeOnly;
163 #endif
164
165 /* Disable unsupported features when enforcing platform-code-only */
166 if (platform_code_only == true) {
167 config &= ~CS_CONFIG_MAP_JIT;
168 config &= ~CS_CONFIG_COMPILATION_SERVICE;
169 config &= ~CS_CONFIG_LOCAL_SIGNING;
170 config &= ~CS_CONFIG_OOP_JIT;
171 }
172
173 #if kTXMKernelAPIVersion >= 3
174 /* MAP_JIT support */
175 if (txm_cs_config->systemPolicy->featureSet.JIT == false) {
176 config &= ~CS_CONFIG_MAP_JIT;
177 }
178 #endif
179
180 /* Developer mode support */
181 if (txm_cs_config->systemPolicy->featureSet.developerMode == false) {
182 config &= ~CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
183 }
184
185 /* Compilation service support */
186 if (txm_cs_config->systemPolicy->featureSet.compilationService == false) {
187 config &= ~CS_CONFIG_COMPILATION_SERVICE;
188 }
189
190 /* Local signing support */
191 if (txm_cs_config->systemPolicy->featureSet.localSigning == false) {
192 config &= ~CS_CONFIG_LOCAL_SIGNING;
193 }
194
195 /* OOP-JIT support */
196 if (txm_cs_config->systemPolicy->featureSet.OOPJit == false) {
197 config &= ~CS_CONFIG_OOP_JIT;
198 }
199 }
200 monitor_type = CS_MONITOR_TYPE_TXM;
201 #elif PMAP_CS_PPL_MONITOR
202 monitor_type = CS_MONITOR_TYPE_PPL;
203 #endif /* CONFIG_SPTM */
204 #endif /* CODE_SIGNING_MONITOR */
205
206 #if DEVELOPMENT || DEBUG
207 /*
208 * We only ever need to parse for boot-args based exemption state on DEVELOPMENT
209 * or DEBUG builds as this state is not respected by any code signing component
210 * on RELEASE builds.
211 */
212
213 #define CS_AMFI_MASK_UNRESTRICT_TASK_FOR_PID 0x01
214 #define CS_AMFI_MASK_ALLOW_ANY_SIGNATURE 0x02
215 #define CS_AMFI_MASK_GET_OUT_OF_MY_WAY 0x80
216
217 int amfi_mask = 0;
218 int amfi_allow_any_signature = 0;
219 int amfi_unrestrict_task_for_pid = 0;
220 int amfi_get_out_of_my_way = 0;
221 int cs_enforcement_disabled = 0;
222 int cs_integrity_skip = 0;
223
224 /* Parse the AMFI mask */
225 PE_parse_boot_argn("amfi", &amfi_mask, sizeof(amfi_mask));
226
227 /* Parse the AMFI soft-bypass */
228 PE_parse_boot_argn(
229 "amfi_allow_any_signature",
230 &amfi_allow_any_signature,
231 sizeof(amfi_allow_any_signature));
232
233 /* Parse the AMFI debug-bypass */
234 PE_parse_boot_argn(
235 "amfi_unrestrict_task_for_pid",
236 &amfi_unrestrict_task_for_pid,
237 sizeof(amfi_unrestrict_task_for_pid));
238
239 /* Parse the AMFI hard-bypass */
240 PE_parse_boot_argn(
241 "amfi_get_out_of_my_way",
242 &amfi_get_out_of_my_way,
243 sizeof(amfi_get_out_of_my_way));
244
245 /* Parse the system code signing hard-bypass */
246 PE_parse_boot_argn(
247 "cs_enforcement_disable",
248 &cs_enforcement_disabled,
249 sizeof(cs_enforcement_disabled));
250
251 /* Parse the system code signing integrity-check bypass */
252 PE_parse_boot_argn(
253 "cs_integrity_skip",
254 &cs_integrity_skip,
255 sizeof(cs_integrity_skip));
256
257 /* CS_CONFIG_UNRESTRICTED_DEBUGGING */
258 if (amfi_mask & CS_AMFI_MASK_UNRESTRICT_TASK_FOR_PID) {
259 config |= CS_CONFIG_UNRESTRICTED_DEBUGGING;
260 } else if (amfi_unrestrict_task_for_pid) {
261 config |= CS_CONFIG_UNRESTRICTED_DEBUGGING;
262 }
263
264 /* CS_CONFIG_ALLOW_ANY_SIGNATURE */
265 if (amfi_mask & CS_AMFI_MASK_ALLOW_ANY_SIGNATURE) {
266 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
267 } else if (amfi_mask & CS_AMFI_MASK_GET_OUT_OF_MY_WAY) {
268 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
269 } else if (amfi_allow_any_signature) {
270 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
271 } else if (amfi_get_out_of_my_way) {
272 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
273 } else if (cs_enforcement_disabled) {
274 config |= CS_CONFIG_ALLOW_ANY_SIGNATURE;
275 }
276
277 /* CS_CONFIG_ENFORCEMENT_DISABLED */
278 if (cs_enforcement_disabled) {
279 config |= CS_CONFIG_ENFORCEMENT_DISABLED;
280 }
281
282 /* CS_CONFIG_GET_OUT_OF_MY_WAY */
283 if (amfi_mask & CS_AMFI_MASK_GET_OUT_OF_MY_WAY) {
284 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
285 } else if (amfi_get_out_of_my_way) {
286 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
287 } else if (cs_enforcement_disabled) {
288 config |= CS_CONFIG_GET_OUT_OF_MY_WAY;
289 }
290
291 /* CS_CONFIG_INTEGRITY_SKIP */
292 if (cs_integrity_skip) {
293 config |= CS_CONFIG_INTEGRITY_SKIP;
294 }
295
296 #if CONFIG_SPTM
297
298 if (csm_enabled() == true) {
299 /* allow_any_signature */
300 if (txm_cs_config->exemptions.allowAnySignature == false) {
301 config &= ~CS_CONFIG_ALLOW_ANY_SIGNATURE;
302 }
303
304 /* unrestrict_task_for_pid */
305 if (txm_ro_data && !txm_ro_data->exemptions.allowUnrestrictedDebugging) {
306 config &= ~CS_CONFIG_UNRESTRICTED_DEBUGGING;
307 }
308
309 /* cs_enforcement_disable */
310 if (txm_ro_data && !txm_ro_data->exemptions.allowModifiedCode) {
311 config &= ~CS_CONFIG_ENFORCEMENT_DISABLED;
312 }
313
314 /* get_out_of_my_way (skip_trust_evaluation) */
315 if (txm_cs_config->exemptions.skipTrustEvaluation == false) {
316 config &= ~CS_CONFIG_GET_OUT_OF_MY_WAY;
317 }
318 }
319
320 #elif PMAP_CS_PPL_MONITOR
321
322 if (csm_enabled() == true) {
323 int pmap_cs_allow_any_signature = 0;
324 bool override = PE_parse_boot_argn(
325 "pmap_cs_allow_any_signature",
326 &pmap_cs_allow_any_signature,
327 sizeof(pmap_cs_allow_any_signature));
328
329 if (!pmap_cs_allow_any_signature && override) {
330 config &= ~CS_CONFIG_ALLOW_ANY_SIGNATURE;
331 }
332
333 int pmap_cs_unrestrict_task_for_pid = 0;
334 override = PE_parse_boot_argn(
335 "pmap_cs_unrestrict_pmap_cs_disable",
336 &pmap_cs_unrestrict_task_for_pid,
337 sizeof(pmap_cs_unrestrict_task_for_pid));
338
339 if (!pmap_cs_unrestrict_task_for_pid && override) {
340 config &= ~CS_CONFIG_UNRESTRICTED_DEBUGGING;
341 }
342
343 int pmap_cs_enforcement_disable = 0;
344 override = PE_parse_boot_argn(
345 "pmap_cs_allow_modified_code_pages",
346 &pmap_cs_enforcement_disable,
347 sizeof(pmap_cs_enforcement_disable));
348
349 if (!pmap_cs_enforcement_disable && override) {
350 config &= ~CS_CONFIG_ENFORCEMENT_DISABLED;
351 }
352 }
353
354 #endif /* CONFIG_SPTM */
355 #endif /* DEVELOPMENT || DEBUG */
356
357 os_atomic_store(&cs_monitor, monitor_type, relaxed);
358 os_atomic_store(&cs_config, config, relaxed);
359
360 /*
361 * We write the cs_config_set variable with store-release semantics which means
362 * no writes before this call will be re-ordered to after this call. Hence, if
363 * someone reads this variable with load-acquire semantics, and they observe a
364 * value of true, then they will be able to observe the correct values of the
365 * cs_monitor and the cs_config variables as well.
366 */
367 os_atomic_store(&cs_config_set, true, release);
368
369 config_set:
370 /* Ensure configuration has been set */
371 assert(os_atomic_load(&cs_config_set, relaxed) == true);
372
373 /* Set the monitor type */
374 if (monitor_type_out) {
375 *monitor_type_out = os_atomic_load(&cs_monitor, relaxed);
376 }
377
378 /* Set the configuration */
379 if (config_out) {
380 *config_out = os_atomic_load(&cs_config, relaxed);
381 }
382 }
383
384 void
disable_code_signing_feature(code_signing_config_t feature)385 disable_code_signing_feature(
386 code_signing_config_t feature)
387 {
388 /*
389 * We require that this function be called only after the code signing config
390 * has been setup initially with a call to code_signing_configuration.
391 */
392 if (os_atomic_load(&cs_config_set, acquire) == false) {
393 panic("attempted to disable code signing feature without init: %u", feature);
394 }
395
396 /*
397 * We require that only a single feature be disabled through a single call to this
398 * function. Moreover, we ensure that only valid features are being disabled.
399 */
400 switch (feature) {
401 case CS_CONFIG_DEVELOPER_MODE_SUPPORTED:
402 cs_config &= ~CS_CONFIG_DEVELOPER_MODE_SUPPORTED;
403 break;
404
405 case CS_CONFIG_COMPILATION_SERVICE:
406 cs_config &= ~CS_CONFIG_COMPILATION_SERVICE;
407 break;
408
409 case CS_CONFIG_LOCAL_SIGNING:
410 cs_config &= ~CS_CONFIG_LOCAL_SIGNING;
411 break;
412
413 case CS_CONFIG_OOP_JIT:
414 cs_config &= ~CS_CONFIG_OOP_JIT;
415 break;
416
417 case CS_CONFIG_MAP_JIT:
418 cs_config &= ~CS_CONFIG_MAP_JIT;
419 break;
420
421 default:
422 panic("attempted to disable a code signing feature invalidly: %u", feature);
423 }
424
425 /* Ensure all readers can observe the latest data */
426 #if defined(__arm64__)
427 __asm__ volatile ("dmb ish" ::: "memory");
428 #elif defined(__x86_64__)
429 __asm__ volatile ("mfence" ::: "memory");
430 #else
431 #error "Unknown platform -- fence instruction unavailable"
432 #endif
433 }
434
435 #pragma mark Developer Mode
436
437 void
enable_developer_mode(void)438 enable_developer_mode(void)
439 {
440 CSM_PREFIX(toggle_developer_mode)(true);
441 }
442
443 void
disable_developer_mode(void)444 disable_developer_mode(void)
445 {
446 CSM_PREFIX(toggle_developer_mode)(false);
447 }
448
449 bool
developer_mode_state(void)450 developer_mode_state(void)
451 {
452 /* Assume false if the pointer isn't setup */
453 if (developer_mode_enabled == NULL) {
454 return false;
455 }
456
457 return os_atomic_load(developer_mode_enabled, relaxed);
458 }
459
460 #pragma mark Provisioning Profiles
461 /*
462 * AMFI performs full profile validation by itself. XNU only needs to manage provisioning
463 * profiles when we have a monitor since the monitor needs to independently verify the
464 * profile data as well.
465 */
466
467 void
garbage_collect_provisioning_profiles(void)468 garbage_collect_provisioning_profiles(void)
469 {
470 #if CODE_SIGNING_MONITOR
471 csm_free_provisioning_profiles();
472 #endif
473 }
474
475 #if CODE_SIGNING_MONITOR
476
477 /* Structure used to maintain the set of registered profiles on the system */
478 typedef struct _cs_profile {
479 /* The UUID of the registered profile */
480 uuid_t profile_uuid;
481
482 /* The profile validation object from the monitor */
483 void *profile_obj;
484
485 /*
486 * In order to minimize the number of times the same profile would need to be
487 * registered, we allow frequently used profiles to skip the garbage collector
488 * for one pass.
489 */
490 bool skip_collector;
491
492 /* Linked list linkage */
493 SLIST_ENTRY(_cs_profile) link;
494 } cs_profile_t;
495
496 /* Linked list head for registered profiles */
497 static SLIST_HEAD(, _cs_profile) all_profiles = SLIST_HEAD_INITIALIZER(all_profiles);
498
499 /* Lock for the provisioning profiles */
500 LCK_GRP_DECLARE(profiles_lck_grp, "profiles_lck_grp");
501 decl_lck_rw_data(, profiles_lock);
502
503 void
csm_initialize_provisioning_profiles(void)504 csm_initialize_provisioning_profiles(void)
505 {
506 /* Ensure the CoreTrust kernel extension has loaded */
507 if (coretrust == NULL) {
508 panic("coretrust interface not available");
509 }
510
511 /* Initialize the provisoning profiles lock */
512 lck_rw_init(&profiles_lock, &profiles_lck_grp, 0);
513 printf("initialized XNU provisioning profile data\n");
514
515 #if PMAP_CS_PPL_MONITOR
516 pmap_initialize_provisioning_profiles();
517 #endif
518 }
519
520 static cs_profile_t*
search_for_profile_uuid(const uuid_t profile_uuid)521 search_for_profile_uuid(
522 const uuid_t profile_uuid)
523 {
524 cs_profile_t *profile = NULL;
525
526 /* Caller is required to acquire the lock */
527 lck_rw_assert(&profiles_lock, LCK_RW_ASSERT_HELD);
528
529 SLIST_FOREACH(profile, &all_profiles, link) {
530 if (uuid_compare(profile_uuid, profile->profile_uuid) == 0) {
531 return profile;
532 }
533 }
534
535 return NULL;
536 }
537
538 kern_return_t
csm_register_provisioning_profile(const uuid_t profile_uuid,const void * profile_blob,const size_t profile_blob_size)539 csm_register_provisioning_profile(
540 const uuid_t profile_uuid,
541 const void *profile_blob,
542 const size_t profile_blob_size)
543 {
544 cs_profile_t *profile = NULL;
545 void *monitor_profile_obj = NULL;
546 kern_return_t ret = KERN_DENIED;
547
548 /* Allocate storage for the profile wrapper object */
549 profile = kalloc_type(cs_profile_t, Z_WAITOK_ZERO);
550 assert(profile != NULL);
551
552 /* Lock the profile set exclusively */
553 lck_rw_lock_exclusive(&profiles_lock);
554
555 /* Check to make sure this isn't a duplicate UUID */
556 cs_profile_t *dup_profile = search_for_profile_uuid(profile_uuid);
557 if (dup_profile != NULL) {
558 /* This profile might be used soon -- skip garbage collector */
559 dup_profile->skip_collector = true;
560
561 ret = KERN_ALREADY_IN_SET;
562 goto exit;
563 }
564
565 ret = CSM_PREFIX(register_provisioning_profile)(
566 profile_blob,
567 profile_blob_size,
568 &monitor_profile_obj);
569
570 if (ret == KERN_SUCCESS) {
571 /* Copy in the profile UUID */
572 uuid_copy(profile->profile_uuid, profile_uuid);
573
574 /* Setup the monitor's profile object */
575 profile->profile_obj = monitor_profile_obj;
576
577 /* This profile might be used soon -- skip garbage collector */
578 profile->skip_collector = true;
579
580 /* Insert at the head of the profile set */
581 SLIST_INSERT_HEAD(&all_profiles, profile, link);
582 }
583
584 exit:
585 /* Unlock the profile set */
586 lck_rw_unlock_exclusive(&profiles_lock);
587
588 if (ret != KERN_SUCCESS) {
589 /* Free the profile wrapper object */
590 kfree_type(cs_profile_t, profile);
591 profile = NULL;
592
593 if (ret != KERN_ALREADY_IN_SET) {
594 printf("unable to register profile with monitor: %d\n", ret);
595 }
596 }
597
598 return ret;
599 }
600
601 kern_return_t
csm_associate_provisioning_profile(void * monitor_sig_obj,const uuid_t profile_uuid)602 csm_associate_provisioning_profile(
603 void *monitor_sig_obj,
604 const uuid_t profile_uuid)
605 {
606 cs_profile_t *profile = NULL;
607 kern_return_t ret = KERN_DENIED;
608
609 if (csm_enabled() == false) {
610 return KERN_NOT_SUPPORTED;
611 }
612
613 /* Lock the profile set as shared */
614 lck_rw_lock_shared(&profiles_lock);
615
616 /* Search for the provisioning profile */
617 profile = search_for_profile_uuid(profile_uuid);
618 if (profile == NULL) {
619 ret = KERN_NOT_FOUND;
620 goto exit;
621 }
622
623 ret = CSM_PREFIX(associate_provisioning_profile)(
624 monitor_sig_obj,
625 profile->profile_obj);
626
627 if (ret == KERN_SUCCESS) {
628 /*
629 * This seems like an active profile -- let it skip the garbage collector on
630 * the next pass. We can modify this field even though we've only taken a shared
631 * lock as in this case we're always setting it to a fixed value.
632 */
633 profile->skip_collector = true;
634 }
635
636 exit:
637 /* Unlock the profile set */
638 lck_rw_unlock_shared(&profiles_lock);
639
640 if (ret != KERN_SUCCESS) {
641 printf("unable to associate profile: %d\n", ret);
642 }
643 return ret;
644 }
645
646 kern_return_t
csm_disassociate_provisioning_profile(void * monitor_sig_obj)647 csm_disassociate_provisioning_profile(
648 void *monitor_sig_obj)
649 {
650 kern_return_t ret = KERN_DENIED;
651
652 if (csm_enabled() == false) {
653 return KERN_NOT_SUPPORTED;
654 }
655
656 /* Call out to the monitor */
657 ret = CSM_PREFIX(disassociate_provisioning_profile)(monitor_sig_obj);
658
659 if ((ret != KERN_SUCCESS) && (ret != KERN_NOT_FOUND)) {
660 printf("unable to disassociate profile: %d\n", ret);
661 }
662 return ret;
663 }
664
665 static kern_return_t
unregister_provisioning_profile(cs_profile_t * profile)666 unregister_provisioning_profile(
667 cs_profile_t *profile)
668 {
669 kern_return_t ret = KERN_DENIED;
670
671 /* Call out to the monitor */
672 ret = CSM_PREFIX(unregister_provisioning_profile)(profile->profile_obj);
673
674 /*
675 * KERN_FAILURE represents the case when the unregistration failed because the
676 * monitor noted that the profile was still being used. Other than that, there
677 * is no other error expected out of this interface. In fact, there is no easy
678 * way to deal with other errors, as the profile state may be corrupted. If we
679 * see a different error, then we panic.
680 */
681 if ((ret != KERN_SUCCESS) && (ret != KERN_FAILURE)) {
682 panic("unable to unregister profile from monitor: %d | %p\n", ret, profile);
683 }
684
685 return ret;
686 }
687
688 void
csm_free_provisioning_profiles(void)689 csm_free_provisioning_profiles(void)
690 {
691 kern_return_t ret = KERN_DENIED;
692 cs_profile_t *profile = NULL;
693 cs_profile_t *temp_profile = NULL;
694
695 /* Lock the profile set exclusively */
696 lck_rw_lock_exclusive(&profiles_lock);
697
698 SLIST_FOREACH_SAFE(profile, &all_profiles, link, temp_profile) {
699 if (profile->skip_collector == true) {
700 profile->skip_collector = false;
701 continue;
702 }
703
704 /* Attempt to unregister this profile from the system */
705 ret = unregister_provisioning_profile(profile);
706 if (ret == KERN_SUCCESS) {
707 /* Remove the profile from the profile set */
708 SLIST_REMOVE(&all_profiles, profile, _cs_profile, link);
709
710 /* Free the memory consumed for the profile wrapper object */
711 kfree_type(cs_profile_t, profile);
712 profile = NULL;
713 }
714 }
715
716 /* Unlock the profile set */
717 lck_rw_unlock_exclusive(&profiles_lock);
718 }
719
720 #endif /* CODE_SIGNING_MONITOR */
721
722 #pragma mark Code Signing
723 /*
724 * AMFI performs full signature validation by itself. For some things, AMFI uses XNU in
725 * order to abstract away the underlying implementation for data storage, but for most of
726 * these, AMFI doesn't directly interact with them, and they're only required when we have
727 * a code signing monitor on the system.
728 */
729
730 void
set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])731 set_compilation_service_cdhash(
732 const uint8_t cdhash[CS_CDHASH_LEN])
733 {
734 CSM_PREFIX(set_compilation_service_cdhash)(cdhash);
735 }
736
737 bool
match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])738 match_compilation_service_cdhash(
739 const uint8_t cdhash[CS_CDHASH_LEN])
740 {
741 return CSM_PREFIX(match_compilation_service_cdhash)(cdhash);
742 }
743
744 void
set_local_signing_public_key(const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])745 set_local_signing_public_key(
746 const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])
747 {
748 CSM_PREFIX(set_local_signing_public_key)(public_key);
749 }
750
751 uint8_t*
get_local_signing_public_key(void)752 get_local_signing_public_key(void)
753 {
754 return CSM_PREFIX(get_local_signing_public_key)();
755 }
756
757 void
unrestrict_local_signing_cdhash(__unused const uint8_t cdhash[CS_CDHASH_LEN])758 unrestrict_local_signing_cdhash(
759 __unused const uint8_t cdhash[CS_CDHASH_LEN])
760 {
761 /*
762 * Since AMFI manages code signing on its own, we only need to unrestrict the
763 * local signing cdhash when we have a monitor environment.
764 */
765
766 #if CODE_SIGNING_MONITOR
767 CSM_PREFIX(unrestrict_local_signing_cdhash)(cdhash);
768 #endif
769 }
770
771 kern_return_t
get_trust_level_kdp(__unused pmap_t pmap,__unused uint32_t * trust_level)772 get_trust_level_kdp(
773 __unused pmap_t pmap,
774 __unused uint32_t *trust_level)
775 {
776 #if CODE_SIGNING_MONITOR
777 return csm_get_trust_level_kdp(pmap, trust_level);
778 #else
779 return KERN_NOT_SUPPORTED;
780 #endif
781 }
782
783 kern_return_t
csm_resolve_os_entitlements_from_proc(__unused const proc_t process,__unused const void ** os_entitlements)784 csm_resolve_os_entitlements_from_proc(
785 __unused const proc_t process,
786 __unused const void **os_entitlements)
787 {
788 #if CODE_SIGNING_MONITOR
789 task_t task = NULL;
790 vm_map_t task_map = NULL;
791 pmap_t task_pmap = NULL;
792 kern_return_t ret = KERN_DENIED;
793
794 if (csm_enabled() == false) {
795 return KERN_NOT_SUPPORTED;
796 }
797
798 /* Ensure the process comes from the proc_task zone */
799 proc_require(process, PROC_REQUIRE_ALLOW_ALL);
800
801 /* Acquire the task from the proc */
802 task = proc_task(process);
803 if (task == NULL) {
804 return KERN_NOT_FOUND;
805 }
806
807 /* Acquire the virtual memory map from the task -- takes a reference on it */
808 task_map = get_task_map_reference(task);
809 if (task_map == NULL) {
810 return KERN_NOT_FOUND;
811 }
812
813 /* Acquire the pmap from the virtual memory map */
814 task_pmap = vm_map_get_pmap(task_map);
815 assert(task_pmap != NULL);
816
817 /* Call into the monitor to resolve the entitlements */
818 ret = CSM_PREFIX(resolve_kernel_entitlements)(task_pmap, os_entitlements);
819
820 /* Release the reference on the virtual memory map */
821 vm_map_deallocate(task_map);
822
823 return ret;
824 #else
825 return KERN_NOT_SUPPORTED;
826 #endif
827 }
828
829 kern_return_t
address_space_debugged(const proc_t process)830 address_space_debugged(
831 const proc_t process)
832 {
833 /* Must pass in a valid proc_t */
834 if (process == NULL) {
835 printf("%s: provided a NULL process\n", __FUNCTION__);
836 return KERN_DENIED;
837 }
838 proc_require(process, PROC_REQUIRE_ALLOW_ALL);
839
840 /* Developer mode must always be enabled for this to return successfully */
841 if (developer_mode_state() == false) {
842 return KERN_DENIED;
843 }
844
845 #if CODE_SIGNING_MONITOR
846 task_t task = NULL;
847 vm_map_t task_map = NULL;
848 pmap_t task_pmap = NULL;
849
850 if (csm_enabled() == true) {
851 /* Acquire the task from the proc */
852 task = proc_task(process);
853 if (task == NULL) {
854 return KERN_NOT_FOUND;
855 }
856
857 /* Acquire the virtual memory map from the task -- takes a reference on it */
858 task_map = get_task_map_reference(task);
859 if (task_map == NULL) {
860 return KERN_NOT_FOUND;
861 }
862
863 /* Acquire the pmap from the virtual memory map */
864 task_pmap = vm_map_get_pmap(task_map);
865 assert(task_pmap != NULL);
866
867 /* Acquire the state from the monitor */
868 kern_return_t ret = CSM_PREFIX(address_space_debugged)(task_pmap);
869
870 /* Release the reference on the virtual memory map */
871 vm_map_deallocate(task_map);
872
873 return ret;
874 }
875 #endif /* CODE_SIGNING_MONITOR */
876
877 /* Check read-only process flags for state */
878 if (proc_getcsflags(process) & CS_DEBUGGED) {
879 return KERN_SUCCESS;
880 }
881
882 return KERN_DENIED;
883 }
884
885 #if CODE_SIGNING_MONITOR
886
887 bool
csm_enabled(void)888 csm_enabled(void)
889 {
890 return CSM_PREFIX(code_signing_enabled)();
891 }
892
893 vm_size_t
csm_signature_size_limit(void)894 csm_signature_size_limit(void)
895 {
896 return CSM_PREFIX(managed_code_signature_size)();
897 }
898
899 void
csm_check_lockdown_mode(void)900 csm_check_lockdown_mode(void)
901 {
902 if (get_lockdown_mode_state() == 0) {
903 return;
904 }
905
906 /* Inform the code signing monitor about lockdown mode */
907 CSM_PREFIX(enter_lockdown_mode)();
908
909 #if CONFIG_SPTM
910 #if kTXMKernelAPIVersion >= 3
911 /* MAP_JIT lockdown */
912 if (txm_cs_config->systemPolicy->featureSet.JIT == false) {
913 disable_code_signing_feature(CS_CONFIG_MAP_JIT);
914 }
915 #endif
916
917 /* Compilation service lockdown */
918 if (txm_cs_config->systemPolicy->featureSet.compilationService == false) {
919 disable_code_signing_feature(CS_CONFIG_COMPILATION_SERVICE);
920 }
921
922 /* Local signing lockdown */
923 if (txm_cs_config->systemPolicy->featureSet.localSigning == false) {
924 disable_code_signing_feature(CS_CONFIG_LOCAL_SIGNING);
925 }
926
927 /* OOP-JIT lockdown */
928 if (txm_cs_config->systemPolicy->featureSet.OOPJit == false) {
929 disable_code_signing_feature(CS_CONFIG_OOP_JIT);
930 }
931 #else
932 /*
933 * Lockdown mode is supposed to disable all forms of JIT on the system. For now,
934 * we leave JIT enabled by default until some blockers are resolved. The way this
935 * code is written, we don't need to change anything once we enforce MAP_JIT to
936 * be disabled for lockdown mode.
937 */
938 if (ppl_lockdown_mode_enforce_jit == true) {
939 disable_code_signing_feature(CS_CONFIG_MAP_JIT);
940 }
941 disable_code_signing_feature(CS_CONFIG_OOP_JIT);
942 disable_code_signing_feature(CS_CONFIG_LOCAL_SIGNING);
943 disable_code_signing_feature(CS_CONFIG_COMPILATION_SERVICE);
944 #endif /* CONFIG_SPTM */
945 }
946
947 void
csm_code_signing_violation(proc_t proc,vm_offset_t addr)948 csm_code_signing_violation(
949 proc_t proc,
950 vm_offset_t addr)
951 {
952 os_reason_t kill_reason = OS_REASON_NULL;
953
954 /* No enforcement if code-signing-monitor is disabled */
955 if (csm_enabled() == false) {
956 return;
957 } else if (proc == PROC_NULL) {
958 panic("code-signing violation without a valid proc");
959 }
960
961 /*
962 * If the address space is being debugged, then we expect this task to undergo
963 * some code signing violations. In this case, we return without killing the
964 * task.
965 */
966 if (address_space_debugged(proc) == KERN_SUCCESS) {
967 return;
968 }
969
970 /* Leave a ktriage record */
971 ktriage_record(
972 thread_tid(current_thread()),
973 KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_CODE_SIGNING),
974 0);
975
976 /* Leave a log for triage purposes */
977 printf("[%s: killed] code-signing-violation at %p\n", proc_best_name(proc), (void*)addr);
978
979 /*
980 * Create a reason for the SIGKILL and set it to allow generating crash reports,
981 * which is critical for better triaging these issues.
982 */
983 kill_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
984 if (kill_reason != NULL) {
985 kill_reason->osr_flags |= OS_REASON_FLAG_GENERATE_CRASH_REPORT;
986 }
987
988 /*
989 * Send a SIGKILL to the process. This function will consume the kill_reason, so
990 * we do not need to manually free it here.
991 */
992 psignal_with_reason(proc, SIGKILL, kill_reason);
993 }
994
995 kern_return_t
csm_register_code_signature(const vm_address_t signature_addr,const vm_size_t signature_size,const vm_offset_t code_directory_offset,const char * signature_path,void ** monitor_sig_obj,vm_address_t * monitor_signature_addr)996 csm_register_code_signature(
997 const vm_address_t signature_addr,
998 const vm_size_t signature_size,
999 const vm_offset_t code_directory_offset,
1000 const char *signature_path,
1001 void **monitor_sig_obj,
1002 vm_address_t *monitor_signature_addr)
1003 {
1004 if (csm_enabled() == false) {
1005 return KERN_NOT_SUPPORTED;
1006 }
1007
1008 return CSM_PREFIX(register_code_signature)(
1009 signature_addr,
1010 signature_size,
1011 code_directory_offset,
1012 signature_path,
1013 monitor_sig_obj,
1014 monitor_signature_addr);
1015 }
1016
1017 kern_return_t
csm_unregister_code_signature(void * monitor_sig_obj)1018 csm_unregister_code_signature(
1019 void *monitor_sig_obj)
1020 {
1021 if (csm_enabled() == false) {
1022 return KERN_NOT_SUPPORTED;
1023 }
1024
1025 return CSM_PREFIX(unregister_code_signature)(monitor_sig_obj);
1026 }
1027
1028 kern_return_t
csm_verify_code_signature(void * monitor_sig_obj)1029 csm_verify_code_signature(
1030 void *monitor_sig_obj)
1031 {
1032 if (csm_enabled() == false) {
1033 return KERN_NOT_SUPPORTED;
1034 }
1035
1036 return CSM_PREFIX(verify_code_signature)(monitor_sig_obj);
1037 }
1038
1039 kern_return_t
csm_reconstitute_code_signature(void * monitor_sig_obj,vm_address_t * unneeded_addr,vm_size_t * unneeded_size)1040 csm_reconstitute_code_signature(
1041 void *monitor_sig_obj,
1042 vm_address_t *unneeded_addr,
1043 vm_size_t *unneeded_size)
1044 {
1045 if (csm_enabled() == false) {
1046 return KERN_NOT_SUPPORTED;
1047 }
1048
1049 return CSM_PREFIX(reconstitute_code_signature)(
1050 monitor_sig_obj,
1051 unneeded_addr,
1052 unneeded_size);
1053 }
1054
1055 kern_return_t
csm_associate_code_signature(pmap_t monitor_pmap,void * monitor_sig_obj,const vm_address_t region_addr,const vm_size_t region_size,const vm_offset_t region_offset)1056 csm_associate_code_signature(
1057 pmap_t monitor_pmap,
1058 void *monitor_sig_obj,
1059 const vm_address_t region_addr,
1060 const vm_size_t region_size,
1061 const vm_offset_t region_offset)
1062 {
1063 if (csm_enabled() == false) {
1064 return KERN_NOT_SUPPORTED;
1065 }
1066
1067 return CSM_PREFIX(associate_code_signature)(
1068 monitor_pmap,
1069 monitor_sig_obj,
1070 region_addr,
1071 region_size,
1072 region_offset);
1073 }
1074
1075 kern_return_t
csm_allow_jit_region(pmap_t monitor_pmap)1076 csm_allow_jit_region(
1077 pmap_t monitor_pmap)
1078 {
1079 if (csm_enabled() == false) {
1080 return KERN_SUCCESS;
1081 } else if (monitor_pmap == NULL) {
1082 return KERN_DENIED;
1083 }
1084
1085 kern_return_t ret = CSM_PREFIX(allow_jit_region)(monitor_pmap);
1086 if (ret == KERN_NOT_SUPPORTED) {
1087 /*
1088 * Some monitor environments do not support this API and as a result will
1089 * return KERN_NOT_SUPPORTED. The caller here should not interpret that as
1090 * a failure.
1091 */
1092 ret = KERN_SUCCESS;
1093 }
1094
1095 return ret;
1096 }
1097
1098 kern_return_t
csm_associate_jit_region(pmap_t monitor_pmap,const vm_address_t region_addr,const vm_size_t region_size)1099 csm_associate_jit_region(
1100 pmap_t monitor_pmap,
1101 const vm_address_t region_addr,
1102 const vm_size_t region_size)
1103 {
1104 if (csm_enabled() == false) {
1105 return KERN_NOT_SUPPORTED;
1106 }
1107
1108 return CSM_PREFIX(associate_jit_region)(
1109 monitor_pmap,
1110 region_addr,
1111 region_size);
1112 }
1113
1114 kern_return_t
csm_associate_debug_region(pmap_t monitor_pmap,const vm_address_t region_addr,const vm_size_t region_size)1115 csm_associate_debug_region(
1116 pmap_t monitor_pmap,
1117 const vm_address_t region_addr,
1118 const vm_size_t region_size)
1119 {
1120 if (csm_enabled() == false) {
1121 return KERN_NOT_SUPPORTED;
1122 }
1123
1124 return CSM_PREFIX(associate_debug_region)(
1125 monitor_pmap,
1126 region_addr,
1127 region_size);
1128 }
1129
1130 kern_return_t
csm_allow_invalid_code(pmap_t pmap)1131 csm_allow_invalid_code(
1132 pmap_t pmap)
1133 {
1134 if (csm_enabled() == false) {
1135 return KERN_NOT_SUPPORTED;
1136 }
1137
1138 return CSM_PREFIX(allow_invalid_code)(pmap);
1139 }
1140
1141 kern_return_t
csm_get_trust_level_kdp(pmap_t pmap,uint32_t * trust_level)1142 csm_get_trust_level_kdp(
1143 pmap_t pmap,
1144 uint32_t *trust_level)
1145 {
1146 if (csm_enabled() == false) {
1147 return KERN_NOT_SUPPORTED;
1148 }
1149
1150 return CSM_PREFIX(get_trust_level_kdp)(pmap, trust_level);
1151 }
1152
1153 kern_return_t
csm_address_space_exempt(const pmap_t pmap)1154 csm_address_space_exempt(
1155 const pmap_t pmap)
1156 {
1157 /*
1158 * These exemptions are actually orthogonal to the code signing enforcement. As
1159 * a result, we let each monitor explicitly decide how to deal with the exemption
1160 * in case code signing enforcement is disabled.
1161 */
1162
1163 return CSM_PREFIX(address_space_exempt)(pmap);
1164 }
1165
1166 kern_return_t
csm_fork_prepare(pmap_t old_pmap,pmap_t new_pmap)1167 csm_fork_prepare(
1168 pmap_t old_pmap,
1169 pmap_t new_pmap)
1170 {
1171 if (csm_enabled() == false) {
1172 return KERN_NOT_SUPPORTED;
1173 }
1174
1175 return CSM_PREFIX(fork_prepare)(old_pmap, new_pmap);
1176 }
1177
1178 kern_return_t
csm_acquire_signing_identifier(const void * monitor_sig_obj,const char ** signing_id)1179 csm_acquire_signing_identifier(
1180 const void *monitor_sig_obj,
1181 const char **signing_id)
1182 {
1183 if (csm_enabled() == false) {
1184 return KERN_NOT_SUPPORTED;
1185 }
1186
1187 return CSM_PREFIX(acquire_signing_identifier)(monitor_sig_obj, signing_id);
1188 }
1189
1190 kern_return_t
csm_associate_os_entitlements(void * monitor_sig_obj,const void * os_entitlements)1191 csm_associate_os_entitlements(
1192 void *monitor_sig_obj,
1193 const void *os_entitlements)
1194 {
1195 if (csm_enabled() == false) {
1196 return KERN_NOT_SUPPORTED;
1197 } else if (os_entitlements == NULL) {
1198 /* Not every signature has entitlements */
1199 return KERN_SUCCESS;
1200 }
1201
1202 return CSM_PREFIX(associate_kernel_entitlements)(monitor_sig_obj, os_entitlements);
1203 }
1204
1205 kern_return_t
csm_accelerate_entitlements(void * monitor_sig_obj,CEQueryContext_t * ce_ctx)1206 csm_accelerate_entitlements(
1207 void *monitor_sig_obj,
1208 CEQueryContext_t *ce_ctx)
1209 {
1210 if (csm_enabled() == false) {
1211 return KERN_NOT_SUPPORTED;
1212 }
1213
1214 return CSM_PREFIX(accelerate_entitlements)(monitor_sig_obj, ce_ctx);
1215 }
1216
1217 #endif /* CODE_SIGNING_MONITOR */
1218
1219 #pragma mark AppleImage4
1220 /*
1221 * AppleImage4 uses the monitor environment to safeguard critical security data.
1222 * In order to ease the implementation specific, AppleImage4 always depends on these
1223 * abstracted APIs, regardless of whether the system has a monitor environment or
1224 * not.
1225 */
1226
1227 void*
kernel_image4_storage_data(size_t * allocated_size)1228 kernel_image4_storage_data(
1229 size_t *allocated_size)
1230 {
1231 return CSM_PREFIX(image4_storage_data)(allocated_size);
1232 }
1233
1234 void
kernel_image4_set_nonce(const img4_nonce_domain_index_t ndi,const img4_nonce_t * nonce)1235 kernel_image4_set_nonce(
1236 const img4_nonce_domain_index_t ndi,
1237 const img4_nonce_t *nonce)
1238 {
1239 return CSM_PREFIX(image4_set_nonce)(ndi, nonce);
1240 }
1241
1242 void
kernel_image4_roll_nonce(const img4_nonce_domain_index_t ndi)1243 kernel_image4_roll_nonce(
1244 const img4_nonce_domain_index_t ndi)
1245 {
1246 return CSM_PREFIX(image4_roll_nonce)(ndi);
1247 }
1248
1249 errno_t
kernel_image4_copy_nonce(const img4_nonce_domain_index_t ndi,img4_nonce_t * nonce_out)1250 kernel_image4_copy_nonce(
1251 const img4_nonce_domain_index_t ndi,
1252 img4_nonce_t *nonce_out)
1253 {
1254 return CSM_PREFIX(image4_copy_nonce)(ndi, nonce_out);
1255 }
1256
1257 errno_t
kernel_image4_execute_object(img4_runtime_object_spec_index_t obj_spec_index,const img4_buff_t * payload,const img4_buff_t * manifest)1258 kernel_image4_execute_object(
1259 img4_runtime_object_spec_index_t obj_spec_index,
1260 const img4_buff_t *payload,
1261 const img4_buff_t *manifest)
1262 {
1263 return CSM_PREFIX(image4_execute_object)(
1264 obj_spec_index,
1265 payload,
1266 manifest);
1267 }
1268
1269 errno_t
kernel_image4_copy_object(img4_runtime_object_spec_index_t obj_spec_index,vm_address_t object_out,size_t * object_length)1270 kernel_image4_copy_object(
1271 img4_runtime_object_spec_index_t obj_spec_index,
1272 vm_address_t object_out,
1273 size_t *object_length)
1274 {
1275 return CSM_PREFIX(image4_copy_object)(
1276 obj_spec_index,
1277 object_out,
1278 object_length);
1279 }
1280
1281 const void*
kernel_image4_get_monitor_exports(void)1282 kernel_image4_get_monitor_exports(void)
1283 {
1284 return CSM_PREFIX(image4_get_monitor_exports)();
1285 }
1286
1287 errno_t
kernel_image4_set_release_type(const char * release_type)1288 kernel_image4_set_release_type(
1289 const char *release_type)
1290 {
1291 return CSM_PREFIX(image4_set_release_type)(release_type);
1292 }
1293
1294 errno_t
kernel_image4_set_bnch_shadow(const img4_nonce_domain_index_t ndi)1295 kernel_image4_set_bnch_shadow(
1296 const img4_nonce_domain_index_t ndi)
1297 {
1298 return CSM_PREFIX(image4_set_bnch_shadow)(ndi);
1299 }
1300
1301 #pragma mark Image4 - New
1302
1303
1304
1305 static errno_t
_kernel_image4_monitor_trap_image_activate(image4_cs_trap_t selector,const void * input_data)1306 _kernel_image4_monitor_trap_image_activate(
1307 image4_cs_trap_t selector,
1308 const void *input_data)
1309 {
1310 /*
1311 * csmx_payload (csmx_payload_len) --> __cs_xfer
1312 * csmx_manifest (csmx_manifest_len) --> __cs_borrow
1313 */
1314 image4_cs_trap_argv(image_activate) input = {0};
1315 vm_address_t payload_addr = 0;
1316 vm_address_t manifest_addr = 0;
1317 errno_t err = EPERM;
1318
1319 /* Copy the input data */
1320 memcpy(&input, input_data, sizeof(input));
1321
1322 payload_addr = code_signing_allocate(input.csmx_payload_len);
1323 if (payload_addr == 0) {
1324 goto out;
1325 }
1326 memcpy((void*)payload_addr, (void*)input.csmx_payload, input.csmx_payload_len);
1327
1328 manifest_addr = code_signing_allocate(input.csmx_manifest_len);
1329 if (manifest_addr == 0) {
1330 goto out;
1331 }
1332 memcpy((void*)manifest_addr, (void*)input.csmx_manifest, input.csmx_manifest_len);
1333
1334 /* Transfer both regions to the monitor */
1335 CSM_PREFIX(image4_transfer_region)(selector, payload_addr, input.csmx_payload_len);
1336 CSM_PREFIX(image4_transfer_region)(selector, manifest_addr, input.csmx_manifest_len);
1337
1338 /* Setup the input with new addresses */
1339 input.csmx_payload = payload_addr;
1340 input.csmx_manifest = manifest_addr;
1341
1342 /* Trap into the monitor for this selector */
1343 err = CSM_PREFIX(image4_monitor_trap)(selector, &input, sizeof(input));
1344
1345 out:
1346 if ((err != 0) && (payload_addr != 0)) {
1347 /* Retyping only happens after allocating the manifest */
1348 if (manifest_addr != 0) {
1349 CSM_PREFIX(image4_reclaim_region)(
1350 selector, payload_addr, input.csmx_payload_len);
1351 }
1352 code_signing_deallocate(&payload_addr, input.csmx_payload_len);
1353 }
1354
1355 if (manifest_addr != 0) {
1356 /* Reclaim the manifest region -- will be retyped if not NULL */
1357 CSM_PREFIX(image4_reclaim_region)(
1358 selector, manifest_addr, input.csmx_manifest_len);
1359
1360 /* Deallocate the manifest region */
1361 code_signing_deallocate(&manifest_addr, input.csmx_manifest_len);
1362 }
1363
1364 return err;
1365 }
1366
1367 static errno_t
_kernel_image4_monitor_trap(image4_cs_trap_t selector,const void * input_data,size_t input_size)1368 _kernel_image4_monitor_trap(
1369 image4_cs_trap_t selector,
1370 const void *input_data,
1371 size_t input_size)
1372 {
1373 /* Validate input size for the selector */
1374 if (input_size != image4_cs_trap_vector_size(selector)) {
1375 printf("image4 dispatch: invalid input: %llu | %lu\n", selector, input_size);
1376 return EINVAL;
1377 }
1378
1379 switch (selector) {
1380 case IMAGE4_CS_TRAP_IMAGE_ACTIVATE:
1381 return _kernel_image4_monitor_trap_image_activate(selector, input_data);
1382
1383 default:
1384 return CSM_PREFIX(image4_monitor_trap)(selector, input_data, input_size);
1385 }
1386 }
1387
1388 errno_t
kernel_image4_monitor_trap(image4_cs_trap_t selector,const void * input_data,size_t input_size,__unused void * output_data,__unused size_t * output_size)1389 kernel_image4_monitor_trap(
1390 image4_cs_trap_t selector,
1391 const void *input_data,
1392 size_t input_size,
1393 __unused void *output_data,
1394 __unused size_t *output_size)
1395 {
1396 size_t length_check = 0;
1397
1398 /* Input data is always required */
1399 if ((input_data == NULL) || (input_size == 0)) {
1400 printf("image4 dispatch: no input data: %llu\n", selector);
1401 return EINVAL;
1402 } else if (os_add_overflow((vm_address_t)input_data, input_size, &length_check)) {
1403 panic("image4_ dispatch: overflow on input: %p | %lu", input_data, input_size);
1404 }
1405
1406 return _kernel_image4_monitor_trap(selector, input_data, input_size);
1407 }
1408