1 /*
2 * Copyright (c) 2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <os/overflow.h>
24 #include <pexpert/pexpert.h>
25 #include <pexpert/device_tree.h>
26 #include <mach/boolean.h>
27 #include <mach/vm_param.h>
28 #include <vm/vm_kern.h>
29 #include <vm/pmap_cs.h>
30 #include <kern/zalloc.h>
31 #include <kern/kalloc.h>
32 #include <kern/assert.h>
33 #include <kern/lock_rw.h>
34 #include <libkern/libkern.h>
35 #include <libkern/section_keywords.h>
36 #include <libkern/img4/interface.h>
37 #include <libkern/amfi/amfi.h>
38 #include <sys/vm.h>
39 #include <sys/proc.h>
40 #include <sys/codesign.h>
41 #include <sys/trust_caches.h>
42 #include <IOKit/IOBSD.h>
43 #include <img4/firmware.h>
44 #include <TrustCache/API.h>
45
46 static bool boot_os_tc_loaded = false;
47 static bool boot_app_tc_loaded = false;
48
49 #if PMAP_CS_PPL_MONITOR
50 /*
51 * We have the Page Protection Layer environment available. All of our artifacts
52 * need to be page-aligned. The PPL will lockdown the artifacts before it begins
53 * the validation.
54 *
55 * Even though the runtimes are PPL owned, we expect the runtime init function
56 * to be called before the PPL has been locked down, which allows us to write
57 * to them.
58 */
59
60 /* Immutable part of the runtime */
61 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = &ppl_trust_cache_rt;
62
63 /* Mutable part of the runtime */
64 SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = &ppl_trust_cache_mut_rt;
65
66 void
trust_cache_runtime_init(void)67 trust_cache_runtime_init(void)
68 {
69 bool allow_second_static_cache = false;
70 bool allow_engineering_caches = false;
71
72 #if CONFIG_SECOND_STATIC_TRUST_CACHE
73 allow_second_static_cache = true;
74 #endif
75
76 #if PMAP_CS_INCLUDE_INTERNAL_CODE
77 allow_engineering_caches = true;
78 #endif
79
80 /* Image4 interface needs to be available */
81 if (img4if == NULL) {
82 panic("image4 interface not available");
83 }
84
85 trustCacheInitializeRuntime(
86 trust_cache_rt,
87 trust_cache_mut_rt,
88 allow_second_static_cache,
89 allow_engineering_caches,
90 false,
91 IMG4_RUNTIME_PMAP_CS);
92
93 /* Locks are initialized in "pmap_bootstrap()" */
94 }
95
96 static kern_return_t
ppl_load_trust_cache(TCType_t type,const uint8_t * img4_payload,const size_t img4_payload_len,const uint8_t * img4_manifest,const size_t img4_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)97 ppl_load_trust_cache(
98 TCType_t type,
99 const uint8_t *img4_payload, const size_t img4_payload_len,
100 const uint8_t *img4_manifest, const size_t img4_manifest_len,
101 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
102 {
103 kern_return_t ret = KERN_DENIED;
104 vm_address_t payload_addr = 0;
105 vm_size_t payload_len = 0;
106 vm_size_t payload_len_aligned = 0;
107 vm_address_t manifest_addr = 0;
108 vm_size_t manifest_len_aligned = 0;
109 vm_address_t aux_manifest_addr = 0;
110 vm_size_t aux_manifest_len_aligned = 0;
111
112 /* The trust cache data structure is bundled with the img4 payload */
113 if (os_add_overflow(img4_payload_len, sizeof(pmap_img4_payload_t), &payload_len)) {
114 panic("overflow on pmap img4 payload: %lu", img4_payload_len);
115 }
116 payload_len_aligned = round_page(payload_len);
117 manifest_len_aligned = round_page(img4_manifest_len);
118 aux_manifest_len_aligned = round_page(img4_aux_manifest_len);
119
120 ret = kmem_alloc(kernel_map, &payload_addr, payload_len_aligned,
121 KMA_KOBJECT | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
122 if (ret != KERN_SUCCESS) {
123 printf("unable to allocate memory for pmap image4 payload: %d\n", ret);
124 goto out;
125 }
126
127 pmap_img4_payload_t *pmap_payload = (pmap_img4_payload_t*)payload_addr;
128 memcpy(pmap_payload->img4_payload, img4_payload, img4_payload_len);
129
130 /* Allocate storage for the manifest */
131 ret = kmem_alloc(kernel_map, &manifest_addr, manifest_len_aligned,
132 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
133 if (ret != KERN_SUCCESS) {
134 printf("unable to allocate memory for image4 manifest: %d\n", ret);
135 goto out;
136 }
137 memcpy((void*)manifest_addr, img4_manifest, img4_manifest_len);
138
139 if (aux_manifest_len_aligned != 0) {
140 /* Allocate storage for the auxiliary manifest */
141 ret = kmem_alloc(kernel_map, &aux_manifest_addr, aux_manifest_len_aligned,
142 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
143 if (ret != KERN_SUCCESS) {
144 printf("unable to allocate memory for auxiliary image4 manifest: %d\n", ret);
145 goto out;
146 }
147 memcpy((void*)aux_manifest_addr, img4_aux_manifest, img4_aux_manifest_len);
148 }
149
150 /* The PPL will round up the length to page size itself */
151 ret = pmap_load_trust_cache_with_type(
152 type,
153 payload_addr, payload_len,
154 manifest_addr, img4_manifest_len,
155 aux_manifest_addr, img4_aux_manifest_len);
156
157 out:
158 if (aux_manifest_addr != 0) {
159 kmem_free(kernel_map, aux_manifest_addr, aux_manifest_len_aligned);
160 aux_manifest_addr = 0;
161 aux_manifest_len_aligned = 0;
162 }
163
164 if (manifest_addr != 0) {
165 kmem_free(kernel_map, manifest_addr, manifest_len_aligned);
166 manifest_addr = 0;
167 manifest_len_aligned = 0;
168 }
169
170 if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
171 kmem_free(kernel_map, payload_addr, payload_len_aligned);
172 payload_addr = 0;
173 payload_len_aligned = 0;
174 }
175
176 return ret;
177 }
178
179 static kern_return_t
ppl_load_legacy_trust_cache(__unused const uint8_t * module_data,__unused const size_t module_size)180 ppl_load_legacy_trust_cache(
181 __unused const uint8_t *module_data, __unused const size_t module_size)
182 {
183 panic("legacy trust caches are not supported on this platform");
184 }
185
186 static kern_return_t
ppl_query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)187 ppl_query_trust_cache(
188 TCQueryType_t query_type,
189 const uint8_t cdhash[kTCEntryHashSize],
190 TrustCacheQueryToken_t *query_token)
191 {
192 /*
193 * We need to query by trapping into the PPL since the PPL trust cache runtime
194 * lock needs to be held. We cannot hold the lock from outside the PPL.
195 */
196 return pmap_query_trust_cache(query_type, cdhash, query_token);
197 }
198
199 #else
200 /*
201 * We don't have a monitor environment available. This means someone with a kernel
202 * memory exploit will be able to inject a trust cache into the system. There is
203 * not much we can do here, since this is older HW.
204 */
205
206 /* Lock for the runtime */
207 LCK_GRP_DECLARE(trust_cache_lck_grp, "trust_cache_lck_grp");
208 decl_lck_rw_data(, trust_cache_rt_lock);
209
210 /* Immutable part of the runtime */
211 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t) trust_cache_rt_storage;
212 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = &trust_cache_rt_storage;
213
214 /* Mutable part of the runtime */
215 TrustCacheMutableRuntime_t trust_cache_mut_rt_storage;
216 SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = &trust_cache_mut_rt_storage;
217
218 void
trust_cache_runtime_init(void)219 trust_cache_runtime_init(void)
220 {
221 bool allow_second_static_cache = false;
222 bool allow_engineering_caches = false;
223 bool allow_legacy_caches = false;
224
225 #if CONFIG_SECOND_STATIC_TRUST_CACHE
226 allow_second_static_cache = true;
227 #endif
228
229 #if TRUST_CACHE_INCLUDE_INTERNAL_CODE
230 allow_engineering_caches = true;
231 #endif
232
233 #ifdef XNU_PLATFORM_BridgeOS
234 allow_legacy_caches = true;
235 #endif
236
237 /* Image4 interface needs to be available */
238 if (img4if == NULL) {
239 panic("image4 interface not available");
240 }
241
242 trustCacheInitializeRuntime(
243 trust_cache_rt,
244 trust_cache_mut_rt,
245 allow_second_static_cache,
246 allow_engineering_caches,
247 allow_legacy_caches,
248 IMG4_RUNTIME_DEFAULT);
249
250 /* Initialize the read-write lock */
251 lck_rw_init(&trust_cache_rt_lock, &trust_cache_lck_grp, 0);
252 }
253
254 static kern_return_t
xnu_load_trust_cache(TCType_t type,const uint8_t * img4_payload,const size_t img4_payload_len,const uint8_t * img4_manifest,const size_t img4_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)255 xnu_load_trust_cache(
256 TCType_t type,
257 const uint8_t *img4_payload, const size_t img4_payload_len,
258 const uint8_t *img4_manifest, const size_t img4_manifest_len,
259 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
260 {
261 kern_return_t ret = KERN_DENIED;
262
263 /* Ignore the auxiliary manifest until we add support for it */
264 (void)img4_aux_manifest;
265 (void)img4_aux_manifest_len;
266
267 /* AMFI interface needs to be available */
268 if (amfi == NULL) {
269 panic("amfi interface not available");
270 }
271
272 const TrustCacheInterface_t *interface = &amfi->TrustCache;
273 if (interface->version < 1) {
274 /* AMFI change hasn't landed in the build */
275 printf("unable to load trust cache (type: %u): interface not supported\n", type);
276 return KERN_NOT_SUPPORTED;
277 }
278
279 /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
280 TrustCache_t *trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
281 assert(trust_cache != NULL);
282
283 /*
284 * The manifests aren't needed after the validation is complete, but the payload needs
285 * to persist. The caller of this API expects us to make our own allocations. Since we
286 * don't need the manifests after validation, we can use the manifests passed in to us
287 * but we need to make a new allocation for the payload, since that needs to persist.
288 *
289 * Z_WAITOK implies that this allocation can never fail.
290 */
291 uint8_t *payload = (uint8_t*)kalloc_data(img4_payload_len, Z_WAITOK);
292 assert(payload != NULL);
293
294 /* Copy the payload into our allocation */
295 memcpy(payload, img4_payload, img4_payload_len);
296
297 /* Exclusively lock the runtime */
298 lck_rw_lock_exclusive(&trust_cache_rt_lock);
299
300 TCReturn_t tc_ret = interface->load(
301 trust_cache_rt,
302 type,
303 trust_cache,
304 (const uintptr_t)payload, img4_payload_len,
305 (const uintptr_t)img4_manifest, img4_manifest_len);
306
307 /* Unlock the runtime */
308 lck_rw_unlock_exclusive(&trust_cache_rt_lock);
309
310 if (tc_ret.error == kTCReturnSuccess) {
311 ret = KERN_SUCCESS;
312 } else if (tc_ret.error == kTCReturnDuplicate) {
313 ret = KERN_ALREADY_IN_SET;
314 } else {
315 printf("unable to load trust cache (TCReturn: 0x%02X | 0x%02X | %u)\n",
316 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
317
318 ret = KERN_FAILURE;
319 }
320
321 if (ret != KERN_SUCCESS) {
322 kfree_data(payload, img4_payload_len);
323 payload = NULL;
324
325 kfree_type(TrustCache_t, trust_cache);
326 trust_cache = NULL;
327 }
328 return ret;
329 }
330
331 static kern_return_t
xnu_load_legacy_trust_cache(__unused const uint8_t * module_data,__unused const size_t module_size)332 xnu_load_legacy_trust_cache(
333 __unused const uint8_t *module_data, __unused const size_t module_size)
334 {
335 #if XNU_HAS_LEGACY_TRUST_CACHE_LOADING
336 kern_return_t ret = KERN_DENIED;
337
338 /* AMFI interface needs to be available */
339 if (amfi == NULL) {
340 panic("amfi interface not available");
341 }
342
343 const TrustCacheInterface_t *interface = &amfi->TrustCache;
344 if (interface->version < 1) {
345 /* AMFI change hasn't landed in the build */
346 printf("unable to load legacy trust cache: interface not supported\n");
347 return KERN_NOT_SUPPORTED;
348 }
349
350 /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
351 TrustCache_t *trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
352 assert(trust_cache != NULL);
353
354 /* Allocate storage for the module -- Z_WAITOK means this can't fail */
355 uint8_t *module = (uint8_t*)kalloc_data(module_size, Z_WAITOK);
356 assert(module != NULL);
357
358 /* Copy the module into our allocation */
359 memcpy(module, module_data, module_size);
360
361 /* Exclusively lock the runtime */
362 lck_rw_lock_exclusive(&trust_cache_rt_lock);
363
364 TCReturn_t tc_ret = interface->loadModule(
365 trust_cache_rt,
366 kTCTypeLegacy,
367 trust_cache,
368 (const uintptr_t)module, module_size);
369
370 /* Unlock the runtime */
371 lck_rw_unlock_exclusive(&trust_cache_rt_lock);
372
373 if (tc_ret.error == kTCReturnSuccess) {
374 ret = KERN_SUCCESS;
375 } else if (tc_ret.error == kTCReturnDuplicate) {
376 ret = KERN_ALREADY_IN_SET;
377 } else {
378 printf("unable to load legacy trust cache (TCReturn: 0x%02X | 0x%02X | %u)\n",
379 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
380
381 ret = KERN_FAILURE;
382 }
383
384 if (ret != KERN_SUCCESS) {
385 kfree_data(module, module_size);
386 module = NULL;
387
388 kfree_type(TrustCache_t, trust_cache);
389 trust_cache = NULL;
390 }
391 return ret;
392 #else
393 panic("legacy trust caches are not supported on this platform");
394 #endif /* XNU_HAS_LEGACY_TRUST_CACHE_LOADING */
395 }
396
397 static kern_return_t
xnu_query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)398 xnu_query_trust_cache(
399 TCQueryType_t query_type,
400 const uint8_t cdhash[kTCEntryHashSize],
401 TrustCacheQueryToken_t *query_token)
402 {
403 kern_return_t ret = KERN_NOT_FOUND;
404
405 /* AMFI interface needs to be available */
406 if (amfi == NULL) {
407 panic("amfi interface not available");
408 }
409
410 const TrustCacheInterface_t *interface = &amfi->TrustCache;
411 if (interface->version < 1) {
412 /* AMFI change hasn't landed in the build */
413 printf("unable to query trust cache: interface not supported\n");
414 return KERN_NOT_SUPPORTED;
415 }
416
417 /* Validate the query type preemptively */
418 if (query_type >= kTCQueryTypeTotal) {
419 printf("unable to query trust cache: invalid query type: %u\n", query_type);
420 return KERN_INVALID_ARGUMENT;
421 }
422
423 /* Lock the runtime as shared */
424 lck_rw_lock_shared(&trust_cache_rt_lock);
425
426 TCReturn_t tc_ret = interface->query(
427 trust_cache_rt,
428 query_type,
429 cdhash,
430 query_token);
431
432 /* Unlock the runtime */
433 lck_rw_unlock_shared(&trust_cache_rt_lock);
434
435 if (tc_ret.error == kTCReturnSuccess) {
436 ret = KERN_SUCCESS;
437 } else if (tc_ret.error == kTCReturnNotFound) {
438 ret = KERN_NOT_FOUND;
439 } else {
440 ret = KERN_FAILURE;
441 printf("trust cache query failed (TCReturn: 0x%02X | 0x%02X | %u)\n",
442 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
443 }
444
445 return ret;
446 }
447
448 #endif /* */
449
450 kern_return_t
load_trust_cache(const uint8_t * img4_object,const size_t img4_object_len,const uint8_t * img4_ext_manifest,const size_t img4_ext_manifest_len)451 load_trust_cache(
452 const uint8_t *img4_object, const size_t img4_object_len,
453 const uint8_t *img4_ext_manifest, const size_t img4_ext_manifest_len)
454 {
455 TCType_t type = kTCTypeInvalid;
456 kern_return_t ret = KERN_DENIED;
457
458 /* Start from the first valid type and attempt to validate through each */
459 for (type = kTCTypeLTRS; type < kTCTypeTotal; type += 1) {
460 ret = load_trust_cache_with_type(
461 type,
462 img4_object, img4_object_len,
463 img4_ext_manifest, img4_ext_manifest_len,
464 NULL, 0);
465
466 if ((ret == KERN_SUCCESS) || (ret == KERN_ALREADY_IN_SET)) {
467 return ret;
468 }
469 }
470
471 #if TRUST_CACHE_INCLUDE_INTERNAL_CODE
472 /* Attempt to load as an engineering root */
473 ret = load_trust_cache_with_type(
474 kTCTypeDTRS,
475 img4_object, img4_object_len,
476 img4_ext_manifest, img4_ext_manifest_len,
477 NULL, 0);
478 #endif
479
480 return ret;
481 }
482
483 kern_return_t
load_trust_cache_with_type(TCType_t type,const uint8_t * img4_object,const size_t img4_object_len,const uint8_t * img4_ext_manifest,const size_t img4_ext_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)484 load_trust_cache_with_type(
485 TCType_t type,
486 const uint8_t *img4_object, const size_t img4_object_len,
487 const uint8_t *img4_ext_manifest, const size_t img4_ext_manifest_len,
488 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
489 {
490 kern_return_t ret = KERN_DENIED;
491 uintptr_t length_check = 0;
492 const uint8_t *img4_payload = NULL;
493 size_t img4_payload_len = 0;
494 const uint8_t *img4_manifest = NULL;
495 size_t img4_manifest_len = 0;
496
497 /* Image4 interface needs to be available */
498 if (img4if == NULL) {
499 panic("image4 interface not available");
500 }
501
502 /* img4_object is required */
503 if (!img4_object || (img4_object_len == 0)) {
504 printf("unable to load trust cache (type: %u): no img4_object provided\n", type);
505 return KERN_INVALID_ARGUMENT;
506 } else if (os_add_overflow((uintptr_t)img4_object, img4_object_len, &length_check)) {
507 panic("overflow on the img4 object: %p | %lu", img4_object, img4_object_len);
508 }
509
510 /* img4_ext_manifest is optional */
511 if (img4_ext_manifest_len != 0) {
512 if (!img4_ext_manifest) {
513 printf("unable to load trust cache (type: %u): img4_ext_manifest expected\n", type);
514 return KERN_INVALID_ARGUMENT;
515 } else if (os_add_overflow((uintptr_t)img4_ext_manifest, img4_ext_manifest_len, &length_check)) {
516 panic("overflow on the ext manifest: %p | %lu", img4_ext_manifest, img4_ext_manifest_len);
517 }
518 }
519
520 /* img4_aux_manifest is optional */
521 if (img4_aux_manifest_len != 0) {
522 if (!img4_aux_manifest) {
523 printf("unable to load trust cache (type: %u): img4_aux_manifest expected\n", type);
524 return KERN_INVALID_ARGUMENT;
525 } else if (os_add_overflow((uintptr_t)img4_aux_manifest, img4_aux_manifest_len, &length_check)) {
526 panic("overflow on the ext manifest: %p | %lu", img4_aux_manifest, img4_aux_manifest_len);
527 }
528 }
529
530 /*
531 * If we don't have an external manifest provided, we expect the img4_object to have
532 * the manifest embedded. In this case, we need to extract the different artifacts
533 * out of the object.
534 */
535 if (img4_ext_manifest_len != 0) {
536 img4_payload = img4_object;
537 img4_payload_len = img4_object_len;
538 img4_manifest = img4_ext_manifest;
539 img4_manifest_len = img4_ext_manifest_len;
540 } else {
541 if (img4if->i4if_version < 15) {
542 /* AppleImage4 change hasn't landed in the build */
543 printf("unable to extract payload and manifest from object\n");
544 return KERN_NOT_SUPPORTED;
545 }
546 img4_buff_t img4_buff = IMG4_BUFF_INIT;
547
548 /* Extract the payload */
549 if (img4_get_payload(img4_object, img4_object_len, &img4_buff) == NULL) {
550 printf("unable to find payload within img4 object\n");
551 return KERN_NOT_FOUND;
552 }
553 img4_payload = img4_buff.i4b_bytes;
554 img4_payload_len = img4_buff.i4b_len;
555
556 /* Extract the manifest */
557 if (img4_get_manifest(img4_object, img4_object_len, &img4_buff) == NULL) {
558 printf("unable to find manifest within img4 object\n");
559 return KERN_NOT_FOUND;
560 }
561 img4_manifest = img4_buff.i4b_bytes;
562 img4_manifest_len = img4_buff.i4b_len;
563 }
564
565 if ((type == kTCTypeStatic) || (type == kTCTypeEngineering) || (type == kTCTypeLegacy)) {
566 printf("unable to load trust cache: invalid type: %u\n", type);
567 return KERN_INVALID_ARGUMENT;
568 } else if (type >= kTCTypeTotal) {
569 printf("unable to load trust cache: unknown type: %u\n", type);
570 return KERN_INVALID_ARGUMENT;
571 }
572
573 /* Validate entitlement for the calling process */
574 if (TCTypeConfig[type].entitlementValue != NULL) {
575 const bool entitlement_satisfied = IOCurrentTaskHasStringEntitlement(
576 "com.apple.private.pmap.load-trust-cache",
577 TCTypeConfig[type].entitlementValue);
578
579 if (entitlement_satisfied == false) {
580 printf("unable to load trust cache (type: %u): unsatisfied entitlement\n", type);
581 return KERN_DENIED;
582 }
583 }
584
585 if ((type == kTCTypeCryptex1BootOS) && boot_os_tc_loaded) {
586 printf("disallowed to load multiple kTCTypeCryptex1BootOS trust caches\n");
587 return KERN_DENIED;
588 } else if ((type == kTCTypeCryptex1BootApp) && boot_app_tc_loaded) {
589 printf("disallowed to load multiple kTCTypeCryptex1BootApp trust caches\n");
590 return KERN_DENIED;
591 }
592
593 #if PMAP_CS_PPL_MONITOR
594 ret = ppl_load_trust_cache(
595 type,
596 img4_payload, img4_payload_len,
597 img4_manifest, img4_manifest_len,
598 img4_aux_manifest, img4_aux_manifest_len);
599 #else
600 ret = xnu_load_trust_cache(
601 type,
602 img4_payload, img4_payload_len,
603 img4_manifest, img4_manifest_len,
604 img4_aux_manifest, img4_aux_manifest_len);
605 #endif
606
607 if (ret != KERN_SUCCESS) {
608 printf("unable to load trust cache (type: %u): %d\n", type, ret);
609 } else {
610 if (type == kTCTypeCryptex1BootOS) {
611 boot_os_tc_loaded = true;
612 } else if (type == kTCTypeCryptex1BootApp) {
613 boot_app_tc_loaded = true;
614 }
615 printf("successfully loaded trust cache of type: %u\n", type);
616 }
617
618 return ret;
619 }
620
621 kern_return_t
load_legacy_trust_cache(const uint8_t * module_data,const size_t module_size)622 load_legacy_trust_cache(
623 const uint8_t *module_data, const size_t module_size)
624 {
625 kern_return_t ret = KERN_DENIED;
626 uintptr_t length_check = 0;
627
628 /* Module is required */
629 if (!module_data || (module_size == 0)) {
630 printf("unable to load legacy trust cache: no module provided\n");
631 return KERN_INVALID_ARGUMENT;
632 } else if (os_add_overflow((uintptr_t)module_data, module_size, &length_check)) {
633 panic("overflow on the module: %p | %lu", module_data, module_size);
634 }
635
636 #if PMAP_CS_PPL_MONITOR
637 ret = ppl_load_legacy_trust_cache(module_data, module_size);
638 #else
639 ret = xnu_load_legacy_trust_cache(module_data, module_size);
640 #endif
641
642 if (ret != KERN_SUCCESS) {
643 printf("unable to load legacy trust cache: %d\n", ret);
644 } else {
645 printf("successfully loaded legacy trust cache\n");
646 }
647
648 return ret;
649 }
650
651 kern_return_t
query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)652 query_trust_cache(
653 TCQueryType_t query_type,
654 const uint8_t cdhash[kTCEntryHashSize],
655 TrustCacheQueryToken_t *query_token)
656 {
657 kern_return_t ret = KERN_NOT_FOUND;
658
659 if (cdhash == NULL) {
660 printf("unable to query trust caches: no cdhash provided\n");
661 return KERN_INVALID_ARGUMENT;
662 }
663
664 #if PMAP_CS_PPL_MONITOR
665 ret = ppl_query_trust_cache(query_type, cdhash, query_token);
666 #else
667 ret = xnu_query_trust_cache(query_type, cdhash, query_token);
668 #endif
669
670 return ret;
671 }
672
673 /*
674 * The trust cache management library uses a wrapper data structure to manage each
675 * of the trust cache modules. We know the exact number of static trust caches we
676 * expect, so we keep around a read-only-late allocation of the data structure for
677 * use.
678 *
679 * Since engineering trust caches are only ever allowed on development builds, they
680 * are not protected through the read-only-late property, and instead allocated
681 * dynamically.
682 */
683
684 SECURITY_READ_ONLY_LATE(bool) trust_cache_static_init = false;
685 SECURITY_READ_ONLY_LATE(bool) trust_cache_static_loaded = true;
686 SECURITY_READ_ONLY_LATE(TrustCache_t) trust_cache_static0 = {0};
687
688 #if CONFIG_SECOND_STATIC_TRUST_CACHE
689 SECURITY_READ_ONLY_LATE(TrustCache_t) trust_cache_static1 = {0};
690 #endif
691
692 #if defined(__arm64__)
693
694 /*
695 * On arm platforms, the static and engineering trust caches are part of the EXTRADATA
696 * segment. The device tree is also a part of the same segment. When building for this
697 * platform, we ensure the format of the EXTRADATA segment is how we expect it to be.
698 */
699 extern vm_offset_t segEXTRADATA;
700 extern unsigned long segSizeEXTRADATA;
701
702 typedef uint64_t pmap_paddr_t;
703 extern vm_map_address_t phystokv(pmap_paddr_t pa);
704
705 #else /* x86_64 */
706 /*
707 * We need this duplicate definition because it is hidden behind the MACH_KERNEL_PRIVATE
708 * macro definition, which makes it inaccessible to this part of the code base.
709 */
710 extern uint64_t physmap_base, physmap_max;
711
712 static inline void*
PHYSMAP_PTOV_check(void * paddr)713 PHYSMAP_PTOV_check(void *paddr)
714 {
715 uint64_t pvaddr = (uint64_t)paddr + physmap_base;
716
717 if (__improbable(pvaddr >= physmap_max)) {
718 panic("PHYSMAP_PTOV bounds exceeded, 0x%qx, 0x%qx, 0x%qx",
719 pvaddr, physmap_base, physmap_max);
720 }
721
722 return (void*)pvaddr;
723 }
724
725 #define PHYSMAP_PTOV(x) (PHYSMAP_PTOV_check((void*) (x)))
726 #define phystokv(x) ((vm_offset_t)(PHYSMAP_PTOV(x)))
727
728 #endif /* defined(__arm__) || defined(__arm64__) */
729
730 void
load_static_trust_cache(void)731 load_static_trust_cache(void)
732 {
733 DTEntry memory_map = {0};
734 const DTTrustCacheRange *tc_range = NULL;
735 trust_cache_offsets_t *tc_offsets = NULL;
736 unsigned int tc_dt_prop_length = 0;
737 size_t tc_segment_length = 0;
738
739 /* Mark this function as having been called */
740 trust_cache_static_init = true;
741
742 /* Nothing to do when the runtime isn't set */
743 if (trust_cache_rt == NULL) {
744 return;
745 }
746
747 /* AMFI interface needs to be available */
748 if (amfi == NULL) {
749 panic("amfi interface not available");
750 }
751
752 const TrustCacheInterface_t *interface = &amfi->TrustCache;
753 if (interface->version < 1) {
754 /* AMFI change hasn't landed in the build */
755 printf("unable to load static trust cache: interface not supported\n");
756 return;
757 }
758
759 int err = SecureDTLookupEntry(NULL, "chosen/memory-map", &memory_map);
760 if (err != kSuccess) {
761 printf("unable to find chosen/memory-map in the device tree: %d\n", err);
762 return;
763 }
764
765 err = SecureDTGetProperty(memory_map, "TrustCache", (const void **)&tc_range, &tc_dt_prop_length);
766 if (err == kSuccess) {
767 if (tc_dt_prop_length != sizeof(DTTrustCacheRange)) {
768 panic("unexpected size for TrustCache property: %u != %zu",
769 tc_dt_prop_length, sizeof(DTTrustCacheRange));
770 }
771
772 tc_offsets = (void*)phystokv(tc_range->paddr);
773 tc_segment_length = tc_range->length;
774 }
775
776 /* x86_64 devices aren't expected to have trust caches */
777 if (tc_segment_length == 0) {
778 if (tc_offsets && tc_offsets->num_caches != 0) {
779 panic("trust cache segment is zero length but trust caches are available: %u",
780 tc_offsets->num_caches);
781 }
782
783 printf("no external trust caches found (segment length is zero)\n");
784 return;
785 } else if (tc_offsets->num_caches == 0) {
786 panic("trust cache segment isn't zero but no trust caches available: %lu",
787 (unsigned long)tc_segment_length);
788 }
789
790 size_t offsets_length = 0;
791 size_t struct_length = 0;
792 if (os_mul_overflow(tc_offsets->num_caches, sizeof(uint32_t), &offsets_length)) {
793 panic("overflow on the number of trust caches provided: %u", tc_offsets->num_caches);
794 } else if (os_add_overflow(offsets_length, sizeof(trust_cache_offsets_t), &struct_length)) {
795 panic("overflow on length of the trust cache offsets: %lu",
796 (unsigned long)offsets_length);
797 } else if (tc_segment_length < struct_length) {
798 panic("trust cache segment length smaller than required: %lu | %lu",
799 (unsigned long)tc_segment_length, (unsigned long)struct_length);
800 }
801 const uintptr_t tc_region_end = (uintptr_t)tc_offsets + tc_segment_length;
802
803 printf("attempting to load %u external trust cache modules\n", tc_offsets->num_caches);
804
805 for (uint32_t i = 0; i < tc_offsets->num_caches; i++) {
806 TCReturn_t tc_ret = (TCReturn_t){.error = kTCReturnError};
807 TCType_t tc_type = kTCTypeEngineering;
808 TrustCache_t *trust_cache = NULL;
809
810 uintptr_t tc_module = 0;
811 if (os_add_overflow((uintptr_t)tc_offsets, tc_offsets->offsets[i], &tc_module)) {
812 panic("trust cache module start overflows: %u | %lu | %u",
813 i, (unsigned long)tc_offsets, tc_offsets->offsets[i]);
814 } else if (tc_module >= tc_region_end) {
815 panic("trust cache module begins after segment ends: %u | %lx | %lx",
816 i, (unsigned long)tc_module, tc_region_end);
817 }
818
819 /* Should be safe for underflow */
820 const size_t buffer_length = tc_region_end - tc_module;
821
822 /* The first module is always the static trust cache */
823 if (i == 0) {
824 tc_type = kTCTypeStatic;
825 trust_cache = &trust_cache_static0;
826 }
827
828 #if CONFIG_SECOND_STATIC_TRUST_CACHE
829 if (trust_cache_rt->allowSecondStaticTC && (i == 1)) {
830 tc_type = kTCTypeStatic;
831 trust_cache = &trust_cache_static1;
832 }
833 #endif
834
835 if (tc_type == kTCTypeEngineering) {
836 if (trust_cache_rt->allowEngineeringTC == false) {
837 printf("skipping engineering trust cache module: %u\n", i);
838 continue;
839 }
840
841 /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
842 trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
843 assert(trust_cache != NULL);
844 }
845
846 tc_ret = interface->loadModule(
847 trust_cache_rt,
848 tc_type,
849 trust_cache,
850 tc_module, buffer_length);
851
852 if (tc_ret.error != kTCReturnSuccess) {
853 printf("unable to load trust cache module: %u (TCReturn: 0x%02X | 0x%02X | %u)\n",
854 i, tc_ret.component, tc_ret.error, tc_ret.uniqueError);
855
856 if (tc_type == kTCTypeStatic) {
857 panic("failed to load static trust cache module: %u", i);
858 }
859 continue;
860 }
861 printf("loaded external trust cache module: %u\n", i);
862
863 /*
864 * The first module is always loaded as a static trust cache. If loading it failed,
865 * then this function would've panicked. If we reach here, it means we've loaded a
866 * static trust cache on the system.
867 */
868 trust_cache_static_loaded = true;
869 }
870
871 printf("completed loading external trust cache modules\n");
872 }
873
874 kern_return_t
static_trust_cache_capabilities(uint32_t * num_static_trust_caches,TCCapabilities_t * capabilities0,TCCapabilities_t * capabilities1)875 static_trust_cache_capabilities(
876 uint32_t *num_static_trust_caches,
877 TCCapabilities_t *capabilities0,
878 TCCapabilities_t *capabilities1)
879 {
880 TCReturn_t tcRet = {.error = kTCReturnError};
881
882 *num_static_trust_caches = 0;
883 *capabilities0 = kTCCapabilityNone;
884 *capabilities1 = kTCCapabilityNone;
885
886 /* Ensure static trust caches have been initialized */
887 if (trust_cache_static_init == false) {
888 panic("attempted to query static trust cache capabilities without init");
889 }
890
891 const TrustCacheInterface_t *interface = &amfi->TrustCache;
892 if (interface->version < 2) {
893 /* AMFI change hasn't landed in the build */
894 printf("unable to get static trust cache capabilities: interface not supported\n");
895 return KERN_NOT_SUPPORTED;
896 } else if (trust_cache_static_loaded == false) {
897 /* Return arguments already set */
898 return KERN_SUCCESS;
899 }
900
901 tcRet = interface->getCapabilities(&trust_cache_static0, capabilities0);
902 assert(tcRet.error == kTCReturnSuccess);
903 *num_static_trust_caches += 1;
904
905 #if CONFIG_SECOND_STATIC_TRUST_CACHE
906 tcRet = interface->getCapabilities(&trust_cache_static1, capabilities1);
907 assert(tcRet.error == kTCReturnSuccess);
908 *num_static_trust_caches += 1;
909 #endif
910
911 return KERN_SUCCESS;
912 }
913