1 /*
2 * Copyright (c) 2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <os/overflow.h>
24 #include <pexpert/pexpert.h>
25 #include <pexpert/device_tree.h>
26 #include <mach/boolean.h>
27 #include <mach/vm_param.h>
28 #include <vm/vm_kern_xnu.h>
29 #include <vm/pmap_cs.h>
30 #include <kern/zalloc.h>
31 #include <kern/kalloc.h>
32 #include <kern/assert.h>
33 #include <kern/lock_rw.h>
34 #include <libkern/libkern.h>
35 #include <libkern/section_keywords.h>
36 #include <libkern/img4/interface.h>
37 #include <libkern/amfi/amfi.h>
38 #include <sys/vm.h>
39 #include <sys/proc.h>
40 #include <sys/codesign.h>
41 #include <sys/trust_caches.h>
42 #include <sys/code_signing.h>
43 #include <IOKit/IOBSD.h>
44 #include <img4/firmware.h>
45 #include <TrustCache/API.h>
46
47 static bool boot_os_tc_loaded = false;
48 static bool boot_app_tc_loaded = false;
49
50 #if CONFIG_SPTM
51 /*
52 * We have the TrustedExecutionMonitor environment available. All of our artifacts
53 * need to be page-aligned, and transferred to the appropriate TXM type before we
54 * call into TXM to load the trust cache.
55 *
56 * The trust cache runtime is managed independently by TXM. All initialization work
57 * is done by the TXM bootstrap and there is nothing more we need to do here.
58 */
59 #include <sys/trusted_execution_monitor.h>
60
61 LCK_GRP_DECLARE(txm_trust_cache_lck_grp, "txm_trust_cache_lck_grp");
62 decl_lck_rw_data(, txm_trust_cache_lck);
63
64 /* Immutable part of the runtime */
65 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = NULL;
66
67 /* Mutable part of the runtime */
68 SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = NULL;
69
70 /* Static trust cache information collected from TXM */
71 SECURITY_READ_ONLY_LATE(uint32_t) num_static_trust_caches = 0;
72 SECURITY_READ_ONLY_LATE(TCCapabilities_t) static_trust_cache_capabilities0 = 0;
73 SECURITY_READ_ONLY_LATE(TCCapabilities_t) static_trust_cache_capabilities1 = 0;
74
75 static void
get_trust_cache_info(void)76 get_trust_cache_info(void)
77 {
78 txm_call_t txm_call = {
79 .selector = kTXMKernelSelectorGetTrustCacheInfo,
80 .failure_fatal = true,
81 .num_output_args = 4
82 };
83 txm_kernel_call(&txm_call);
84
85 /*
86 * The monitor returns the libTrustCache runtime it uses within the first
87 * returned word. The kernel doesn't currently have a use-case for this, so
88 * we don't use it. But we continue to return this value from the monitor
89 * in case it ever comes in use later down the line.
90 */
91
92 num_static_trust_caches = (uint32_t)txm_call.return_words[1];
93 static_trust_cache_capabilities0 = (TCCapabilities_t)txm_call.return_words[2];
94 static_trust_cache_capabilities1 = (TCCapabilities_t)txm_call.return_words[3];
95 }
96
97 void
trust_cache_runtime_init(void)98 trust_cache_runtime_init(void)
99 {
100 /* Image4 interface needs to be available */
101 if (img4if == NULL) {
102 panic("image4 interface not available");
103 }
104
105 /* AMFI interface needs to be available */
106 if (amfi == NULL) {
107 panic("amfi interface not available");
108 } else if (amfi->TrustCache.version < 2) {
109 panic("amfi interface is stale: %u", amfi->TrustCache.version);
110 }
111
112 /* Initialize the TXM trust cache read-write lock */
113 lck_rw_init(&txm_trust_cache_lck, &txm_trust_cache_lck_grp, 0);
114
115 /* Acquire trust cache information from the monitor */
116 get_trust_cache_info();
117 }
118
119 static kern_return_t
txm_load_trust_cache(TCType_t type,const uint8_t * img4_payload,const size_t img4_payload_len,const uint8_t * img4_manifest,const size_t img4_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)120 txm_load_trust_cache(
121 TCType_t type,
122 const uint8_t *img4_payload, const size_t img4_payload_len,
123 const uint8_t *img4_manifest, const size_t img4_manifest_len,
124 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
125 {
126 txm_call_t txm_call = {
127 .selector = kTXMKernelSelectorLoadTrustCache,
128 .num_input_args = 7
129 };
130 vm_address_t payload_addr = 0;
131 vm_address_t manifest_addr = 0;
132 kern_return_t ret = KERN_DENIED;
133
134 /* We don't support the auxiliary manifest for now */
135 (void)img4_aux_manifest;
136 (void)img4_aux_manifest_len;
137
138 ret = kmem_alloc(kernel_map, &payload_addr, img4_payload_len,
139 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
140 if (ret != KERN_SUCCESS) {
141 printf("unable to allocate memory for image4 payload: %d\n", ret);
142 goto out;
143 }
144 memcpy((void*)payload_addr, img4_payload, img4_payload_len);
145
146 ret = kmem_alloc(kernel_map, &manifest_addr, img4_manifest_len,
147 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
148 if (ret != KERN_SUCCESS) {
149 printf("unable to allocate memory for image4 manifest: %d\n", ret);
150 goto out;
151 }
152 memcpy((void*)manifest_addr, img4_manifest, img4_manifest_len);
153
154 /* Transfer both regions to be TXM owned */
155 txm_transfer_region(payload_addr, img4_payload_len);
156 txm_transfer_region(manifest_addr, img4_manifest_len);
157
158 /* Take the trust cache lock exclusively */
159 lck_rw_lock_exclusive(&txm_trust_cache_lck);
160
161 /* TXM will round-up to page length itself */
162 ret = txm_kernel_call(
163 &txm_call,
164 type,
165 payload_addr, img4_payload_len,
166 manifest_addr, img4_manifest_len,
167 0, 0);
168
169 /* Release the trust cache lock */
170 lck_rw_unlock_exclusive(&txm_trust_cache_lck);
171
172 /* Check for duplicate trust cache error */
173 if (txm_call.txm_ret.returnCode == kTXMReturnTrustCache) {
174 if (txm_call.txm_ret.tcRet.error == kTCReturnDuplicate) {
175 ret = KERN_ALREADY_IN_SET;
176 }
177 }
178
179 out:
180 if (manifest_addr != 0) {
181 /* Reclaim the manifest region */
182 txm_reclaim_region(manifest_addr, img4_manifest_len);
183
184 /* Free the manifest region */
185 kmem_free(kernel_map, manifest_addr, img4_manifest_len);
186 manifest_addr = 0;
187 }
188
189 if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
190 /* Reclaim the payload region */
191 txm_reclaim_region(payload_addr, img4_payload_len);
192
193 /* Free the payload region */
194 kmem_free(kernel_map, payload_addr, img4_payload_len);
195 payload_addr = 0;
196 }
197
198 return ret;
199 }
200
201 static kern_return_t
txm_load_legacy_trust_cache(__unused const uint8_t * module_data,__unused const size_t module_size)202 txm_load_legacy_trust_cache(
203 __unused const uint8_t *module_data, __unused const size_t module_size)
204 {
205 panic("legacy trust caches are not supported on this platform");
206 }
207
208 static kern_return_t
txm_query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)209 txm_query_trust_cache(
210 TCQueryType_t query_type,
211 const uint8_t cdhash[kTCEntryHashSize],
212 TrustCacheQueryToken_t *query_token)
213 {
214 txm_call_t txm_call = {
215 .selector = kTXMKernelSelectorQueryTrustCache,
216 .failure_silent = true,
217 .num_input_args = 2,
218 .num_output_args = 2,
219 };
220 kern_return_t ret = KERN_NOT_FOUND;
221
222 lck_rw_lock_shared(&txm_trust_cache_lck);
223 ret = txm_kernel_call(&txm_call, query_type, cdhash);
224 lck_rw_unlock_shared(&txm_trust_cache_lck);
225
226 if (ret == KERN_SUCCESS) {
227 if (query_token) {
228 query_token->trustCache = (const TrustCache_t*)txm_call.return_words[0];
229 query_token->trustCacheEntry = (const void*)txm_call.return_words[1];
230 }
231 return KERN_SUCCESS;
232 }
233
234 /* Check for not-found trust cache error */
235 if (txm_call.txm_ret.returnCode == kTXMReturnTrustCache) {
236 if (txm_call.txm_ret.tcRet.error == kTCReturnNotFound) {
237 ret = KERN_NOT_FOUND;
238 }
239 }
240
241 return ret;
242 }
243
244 static kern_return_t
txm_check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])245 txm_check_trust_cache_runtime_for_uuid(
246 const uint8_t check_uuid[kUUIDSize])
247 {
248 txm_call_t txm_call = {
249 .selector = kTXMKernelSelectorCheckTrustCacheRuntimeForUUID,
250 .failure_silent = true,
251 .num_input_args = 1
252 };
253 kern_return_t ret = KERN_DENIED;
254
255 lck_rw_lock_shared(&txm_trust_cache_lck);
256 ret = txm_kernel_call(&txm_call, check_uuid);
257 lck_rw_unlock_shared(&txm_trust_cache_lck);
258
259 /* Check for not-found trust cache error */
260 if (txm_call.txm_ret.returnCode == kTXMReturnTrustCache) {
261 if (txm_call.txm_ret.tcRet.error == kTCReturnNotFound) {
262 ret = KERN_NOT_FOUND;
263 }
264 }
265
266 return ret;
267 }
268
269 #elif PMAP_CS_PPL_MONITOR
270 /*
271 * We have the Page Protection Layer environment available. All of our artifacts
272 * need to be page-aligned. The PPL will lockdown the artifacts before it begins
273 * the validation.
274 *
275 * Even though the runtimes are PPL owned, we expect the runtime init function
276 * to be called before the PPL has been locked down, which allows us to write
277 * to them.
278 */
279
280 /* Immutable part of the runtime */
281 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = &ppl_trust_cache_rt;
282
283 /* Mutable part of the runtime */
284 SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = &ppl_trust_cache_mut_rt;
285
286 void
trust_cache_runtime_init(void)287 trust_cache_runtime_init(void)
288 {
289 bool allow_second_static_cache = false;
290 bool allow_engineering_caches = false;
291
292 #if CONFIG_SECOND_STATIC_TRUST_CACHE
293 allow_second_static_cache = true;
294 #endif
295
296 #if PMAP_CS_INCLUDE_INTERNAL_CODE
297 allow_engineering_caches = true;
298 #endif
299
300 /* Image4 interface needs to be available */
301 if (img4if == NULL) {
302 panic("image4 interface not available");
303 }
304
305 /* AMFI interface needs to be available */
306 if (amfi == NULL) {
307 panic("amfi interface not available");
308 } else if (amfi->TrustCache.version < 2) {
309 panic("amfi interface is stale: %u", amfi->TrustCache.version);
310 }
311
312 trustCacheInitializeRuntime(
313 trust_cache_rt,
314 trust_cache_mut_rt,
315 allow_second_static_cache,
316 allow_engineering_caches,
317 false,
318 IMG4_RUNTIME_PMAP_CS);
319
320 /* Locks are initialized in "pmap_bootstrap()" */
321 }
322
323 static kern_return_t
ppl_load_trust_cache(TCType_t type,const uint8_t * img4_payload,const size_t img4_payload_len,const uint8_t * img4_manifest,const size_t img4_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)324 ppl_load_trust_cache(
325 TCType_t type,
326 const uint8_t *img4_payload, const size_t img4_payload_len,
327 const uint8_t *img4_manifest, const size_t img4_manifest_len,
328 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
329 {
330 kern_return_t ret = KERN_DENIED;
331 vm_address_t payload_addr = 0;
332 vm_size_t payload_len = 0;
333 vm_size_t payload_len_aligned = 0;
334 vm_address_t manifest_addr = 0;
335 vm_size_t manifest_len_aligned = 0;
336 vm_address_t aux_manifest_addr = 0;
337 vm_size_t aux_manifest_len_aligned = 0;
338
339 /* The trust cache data structure is bundled with the img4 payload */
340 if (os_add_overflow(img4_payload_len, sizeof(pmap_img4_payload_t), &payload_len)) {
341 panic("overflow on pmap img4 payload: %lu", img4_payload_len);
342 }
343 payload_len_aligned = round_page(payload_len);
344 manifest_len_aligned = round_page(img4_manifest_len);
345 aux_manifest_len_aligned = round_page(img4_aux_manifest_len);
346
347 ret = kmem_alloc(kernel_map, &payload_addr, payload_len_aligned,
348 KMA_KOBJECT | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
349 if (ret != KERN_SUCCESS) {
350 printf("unable to allocate memory for pmap image4 payload: %d\n", ret);
351 goto out;
352 }
353
354 pmap_img4_payload_t *pmap_payload = (pmap_img4_payload_t*)payload_addr;
355 memcpy(pmap_payload->img4_payload, img4_payload, img4_payload_len);
356
357 /* Allocate storage for the manifest */
358 ret = kmem_alloc(kernel_map, &manifest_addr, manifest_len_aligned,
359 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
360 if (ret != KERN_SUCCESS) {
361 printf("unable to allocate memory for image4 manifest: %d\n", ret);
362 goto out;
363 }
364 memcpy((void*)manifest_addr, img4_manifest, img4_manifest_len);
365
366 if (aux_manifest_len_aligned != 0) {
367 /* Allocate storage for the auxiliary manifest */
368 ret = kmem_alloc(kernel_map, &aux_manifest_addr, aux_manifest_len_aligned,
369 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
370 if (ret != KERN_SUCCESS) {
371 printf("unable to allocate memory for auxiliary image4 manifest: %d\n", ret);
372 goto out;
373 }
374 memcpy((void*)aux_manifest_addr, img4_aux_manifest, img4_aux_manifest_len);
375 }
376
377 /* The PPL will round up the length to page size itself */
378 ret = pmap_load_trust_cache_with_type(
379 type,
380 payload_addr, payload_len,
381 manifest_addr, img4_manifest_len,
382 aux_manifest_addr, img4_aux_manifest_len);
383
384 out:
385 if (aux_manifest_addr != 0) {
386 kmem_free(kernel_map, aux_manifest_addr, aux_manifest_len_aligned);
387 aux_manifest_addr = 0;
388 aux_manifest_len_aligned = 0;
389 }
390
391 if (manifest_addr != 0) {
392 kmem_free(kernel_map, manifest_addr, manifest_len_aligned);
393 manifest_addr = 0;
394 manifest_len_aligned = 0;
395 }
396
397 if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
398 kmem_free(kernel_map, payload_addr, payload_len_aligned);
399 payload_addr = 0;
400 payload_len_aligned = 0;
401 }
402
403 return ret;
404 }
405
406 static kern_return_t
ppl_load_legacy_trust_cache(__unused const uint8_t * module_data,__unused const size_t module_size)407 ppl_load_legacy_trust_cache(
408 __unused const uint8_t *module_data, __unused const size_t module_size)
409 {
410 panic("legacy trust caches are not supported on this platform");
411 }
412
413 static kern_return_t
ppl_query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)414 ppl_query_trust_cache(
415 TCQueryType_t query_type,
416 const uint8_t cdhash[kTCEntryHashSize],
417 TrustCacheQueryToken_t *query_token)
418 {
419 /*
420 * We need to query by trapping into the PPL since the PPL trust cache runtime
421 * lock needs to be held. We cannot hold the lock from outside the PPL.
422 */
423 return pmap_query_trust_cache(query_type, cdhash, query_token);
424 }
425
426 static kern_return_t
ppl_check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])427 ppl_check_trust_cache_runtime_for_uuid(
428 const uint8_t check_uuid[kUUIDSize])
429 {
430 return pmap_check_trust_cache_runtime_for_uuid(check_uuid);
431 }
432
433 #else
434 /*
435 * We don't have a monitor environment available. This means someone with a kernel
436 * memory exploit will be able to inject a trust cache into the system. There is
437 * not much we can do here, since this is older HW.
438 */
439
440 /* Lock for the runtime */
441 LCK_GRP_DECLARE(trust_cache_lck_grp, "trust_cache_lck_grp");
442 decl_lck_rw_data(, trust_cache_rt_lock);
443
444 /* Immutable part of the runtime */
445 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t) trust_cache_rt_storage;
446 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = &trust_cache_rt_storage;
447
448 /* Mutable part of the runtime */
449 TrustCacheMutableRuntime_t trust_cache_mut_rt_storage;
450 SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = &trust_cache_mut_rt_storage;
451
452 void
trust_cache_runtime_init(void)453 trust_cache_runtime_init(void)
454 {
455 bool allow_second_static_cache = false;
456 bool allow_engineering_caches = false;
457 bool allow_legacy_caches = false;
458
459 #if CONFIG_SECOND_STATIC_TRUST_CACHE
460 allow_second_static_cache = true;
461 #endif
462
463 #if TRUST_CACHE_INCLUDE_INTERNAL_CODE
464 allow_engineering_caches = true;
465 #endif
466
467 #ifdef XNU_PLATFORM_BridgeOS
468 allow_legacy_caches = true;
469 #endif
470
471 /* Image4 interface needs to be available */
472 if (img4if == NULL) {
473 panic("image4 interface not available");
474 }
475
476 /* AMFI interface needs to be available */
477 if (amfi == NULL) {
478 panic("amfi interface not available");
479 } else if (amfi->TrustCache.version < 2) {
480 panic("amfi interface is stale: %u", amfi->TrustCache.version);
481 }
482
483 trustCacheInitializeRuntime(
484 trust_cache_rt,
485 trust_cache_mut_rt,
486 allow_second_static_cache,
487 allow_engineering_caches,
488 allow_legacy_caches,
489 IMG4_RUNTIME_DEFAULT);
490
491 /* Initialize the read-write lock */
492 lck_rw_init(&trust_cache_rt_lock, &trust_cache_lck_grp, 0);
493 }
494
495 static kern_return_t
xnu_load_trust_cache(TCType_t type,const uint8_t * img4_payload,const size_t img4_payload_len,const uint8_t * img4_manifest,const size_t img4_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)496 xnu_load_trust_cache(
497 TCType_t type,
498 const uint8_t *img4_payload, const size_t img4_payload_len,
499 const uint8_t *img4_manifest, const size_t img4_manifest_len,
500 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
501 {
502 kern_return_t ret = KERN_DENIED;
503
504 /* Ignore the auxiliary manifest until we add support for it */
505 (void)img4_aux_manifest;
506 (void)img4_aux_manifest_len;
507
508 /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
509 TrustCache_t *trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
510 assert(trust_cache != NULL);
511
512 /*
513 * The manifests aren't needed after the validation is complete, but the payload needs
514 * to persist. The caller of this API expects us to make our own allocations. Since we
515 * don't need the manifests after validation, we can use the manifests passed in to us
516 * but we need to make a new allocation for the payload, since that needs to persist.
517 *
518 * Z_WAITOK implies that this allocation can never fail.
519 */
520 uint8_t *payload = (uint8_t*)kalloc_data(img4_payload_len, Z_WAITOK);
521 assert(payload != NULL);
522
523 /* Copy the payload into our allocation */
524 memcpy(payload, img4_payload, img4_payload_len);
525
526 /* Exclusively lock the runtime */
527 lck_rw_lock_exclusive(&trust_cache_rt_lock);
528
529 TCReturn_t tc_ret = amfi->TrustCache.load(
530 trust_cache_rt,
531 type,
532 trust_cache,
533 (const uintptr_t)payload, img4_payload_len,
534 (const uintptr_t)img4_manifest, img4_manifest_len);
535
536 /* Unlock the runtime */
537 lck_rw_unlock_exclusive(&trust_cache_rt_lock);
538
539 if (tc_ret.error == kTCReturnSuccess) {
540 ret = KERN_SUCCESS;
541 } else if (tc_ret.error == kTCReturnDuplicate) {
542 ret = KERN_ALREADY_IN_SET;
543 } else {
544 printf("unable to load trust cache (TCReturn: 0x%02X | 0x%02X | %u)\n",
545 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
546
547 ret = KERN_FAILURE;
548 }
549
550 if (ret != KERN_SUCCESS) {
551 kfree_data(payload, img4_payload_len);
552 payload = NULL;
553
554 kfree_type(TrustCache_t, trust_cache);
555 trust_cache = NULL;
556 }
557 return ret;
558 }
559
560 static kern_return_t
xnu_load_legacy_trust_cache(__unused const uint8_t * module_data,__unused const size_t module_size)561 xnu_load_legacy_trust_cache(
562 __unused const uint8_t *module_data, __unused const size_t module_size)
563 {
564 #if XNU_HAS_LEGACY_TRUST_CACHE_LOADING
565 kern_return_t ret = KERN_DENIED;
566
567 /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
568 TrustCache_t *trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
569 assert(trust_cache != NULL);
570
571 /* Allocate storage for the module -- Z_WAITOK means this can't fail */
572 uint8_t *module = (uint8_t*)kalloc_data(module_size, Z_WAITOK);
573 assert(module != NULL);
574
575 /* Copy the module into our allocation */
576 memcpy(module, module_data, module_size);
577
578 /* Exclusively lock the runtime */
579 lck_rw_lock_exclusive(&trust_cache_rt_lock);
580
581 TCReturn_t tc_ret = amfi->TrustCache.loadModule(
582 trust_cache_rt,
583 kTCTypeLegacy,
584 trust_cache,
585 (const uintptr_t)module, module_size);
586
587 /* Unlock the runtime */
588 lck_rw_unlock_exclusive(&trust_cache_rt_lock);
589
590 if (tc_ret.error == kTCReturnSuccess) {
591 ret = KERN_SUCCESS;
592 } else if (tc_ret.error == kTCReturnDuplicate) {
593 ret = KERN_ALREADY_IN_SET;
594 } else {
595 printf("unable to load legacy trust cache (TCReturn: 0x%02X | 0x%02X | %u)\n",
596 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
597
598 ret = KERN_FAILURE;
599 }
600
601 if (ret != KERN_SUCCESS) {
602 kfree_data(module, module_size);
603 module = NULL;
604
605 kfree_type(TrustCache_t, trust_cache);
606 trust_cache = NULL;
607 }
608 return ret;
609 #else
610 panic("legacy trust caches are not supported on this platform");
611 #endif /* XNU_HAS_LEGACY_TRUST_CACHE_LOADING */
612 }
613
614 static kern_return_t
xnu_query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)615 xnu_query_trust_cache(
616 TCQueryType_t query_type,
617 const uint8_t cdhash[kTCEntryHashSize],
618 TrustCacheQueryToken_t *query_token)
619 {
620 kern_return_t ret = KERN_NOT_FOUND;
621
622 /* Validate the query type preemptively */
623 if (query_type >= kTCQueryTypeTotal) {
624 printf("unable to query trust cache: invalid query type: %u\n", query_type);
625 return KERN_INVALID_ARGUMENT;
626 }
627
628 /* Lock the runtime as shared */
629 lck_rw_lock_shared(&trust_cache_rt_lock);
630
631 TCReturn_t tc_ret = amfi->TrustCache.query(
632 trust_cache_rt,
633 query_type,
634 cdhash,
635 query_token);
636
637 /* Unlock the runtime */
638 lck_rw_unlock_shared(&trust_cache_rt_lock);
639
640 if (tc_ret.error == kTCReturnSuccess) {
641 ret = KERN_SUCCESS;
642 } else if (tc_ret.error == kTCReturnNotFound) {
643 ret = KERN_NOT_FOUND;
644 } else {
645 ret = KERN_FAILURE;
646 printf("trust cache query failed (TCReturn: 0x%02X | 0x%02X | %u)\n",
647 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
648 }
649
650 return ret;
651 }
652
653 static kern_return_t
xnu_check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])654 xnu_check_trust_cache_runtime_for_uuid(
655 const uint8_t check_uuid[kUUIDSize])
656 {
657 kern_return_t ret = KERN_DENIED;
658
659 if (amfi->TrustCache.version < 3) {
660 /* AMFI change hasn't landed in the build */
661 printf("unable to check for loaded trust cache: interface not supported\n");
662 return KERN_NOT_SUPPORTED;
663 }
664
665 /* Lock the runtime as shared */
666 lck_rw_lock_shared(&trust_cache_rt_lock);
667
668 TCReturn_t tc_ret = amfi->TrustCache.checkRuntimeForUUID(
669 trust_cache_rt,
670 check_uuid,
671 NULL);
672
673 /* Unlock the runtime */
674 lck_rw_unlock_shared(&trust_cache_rt_lock);
675
676 if (tc_ret.error == kTCReturnSuccess) {
677 ret = KERN_SUCCESS;
678 } else if (tc_ret.error == kTCReturnNotFound) {
679 ret = KERN_NOT_FOUND;
680 } else {
681 ret = KERN_FAILURE;
682 printf("trust cache UUID check failed (TCReturn: 0x%02X | 0x%02X | %u)\n",
683 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
684 }
685
686 return ret;
687 }
688
689 #endif /* CONFIG_SPTM */
690
691 kern_return_t
check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])692 check_trust_cache_runtime_for_uuid(
693 const uint8_t check_uuid[kUUIDSize])
694 {
695 kern_return_t ret = KERN_DENIED;
696
697 if (check_uuid == NULL) {
698 return KERN_INVALID_ARGUMENT;
699 }
700
701 #if CONFIG_SPTM
702 ret = txm_check_trust_cache_runtime_for_uuid(check_uuid);
703 #elif PMAP_CS_PPL_MONITOR
704 ret = ppl_check_trust_cache_runtime_for_uuid(check_uuid);
705 #else
706 ret = xnu_check_trust_cache_runtime_for_uuid(check_uuid);
707 #endif
708
709 return ret;
710 }
711
712 kern_return_t
load_trust_cache(const uint8_t * img4_object,const size_t img4_object_len,const uint8_t * img4_ext_manifest,const size_t img4_ext_manifest_len)713 load_trust_cache(
714 const uint8_t *img4_object, const size_t img4_object_len,
715 const uint8_t *img4_ext_manifest, const size_t img4_ext_manifest_len)
716 {
717 TCType_t type = kTCTypeInvalid;
718 kern_return_t ret = KERN_DENIED;
719
720 /* Start from the first valid type and attempt to validate through each */
721 for (type = kTCTypeLTRS; type < kTCTypeTotal; type += 1) {
722 ret = load_trust_cache_with_type(
723 type,
724 img4_object, img4_object_len,
725 img4_ext_manifest, img4_ext_manifest_len,
726 NULL, 0);
727
728 if ((ret == KERN_SUCCESS) || (ret == KERN_ALREADY_IN_SET)) {
729 return ret;
730 }
731 }
732
733 #if TRUST_CACHE_INCLUDE_INTERNAL_CODE
734 /* Attempt to load as an engineering root */
735 ret = load_trust_cache_with_type(
736 kTCTypeDTRS,
737 img4_object, img4_object_len,
738 img4_ext_manifest, img4_ext_manifest_len,
739 NULL, 0);
740 #endif
741
742 return ret;
743 }
744
745 kern_return_t
load_trust_cache_with_type(TCType_t type,const uint8_t * img4_object,const size_t img4_object_len,const uint8_t * img4_ext_manifest,const size_t img4_ext_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)746 load_trust_cache_with_type(
747 TCType_t type,
748 const uint8_t *img4_object, const size_t img4_object_len,
749 const uint8_t *img4_ext_manifest, const size_t img4_ext_manifest_len,
750 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
751 {
752 kern_return_t ret = KERN_DENIED;
753 uintptr_t length_check = 0;
754 const uint8_t *img4_payload = NULL;
755 size_t img4_payload_len = 0;
756 const uint8_t *img4_manifest = NULL;
757 size_t img4_manifest_len = 0;
758
759 /* img4_object is required */
760 if (!img4_object || (img4_object_len == 0)) {
761 printf("unable to load trust cache (type: %u): no img4_object provided\n", type);
762 return KERN_INVALID_ARGUMENT;
763 } else if (os_add_overflow((uintptr_t)img4_object, img4_object_len, &length_check)) {
764 panic("overflow on the img4 object: %p | %lu", img4_object, img4_object_len);
765 }
766
767 /* img4_ext_manifest is optional */
768 if (img4_ext_manifest_len != 0) {
769 if (!img4_ext_manifest) {
770 printf("unable to load trust cache (type: %u): img4_ext_manifest expected\n", type);
771 return KERN_INVALID_ARGUMENT;
772 } else if (os_add_overflow((uintptr_t)img4_ext_manifest, img4_ext_manifest_len, &length_check)) {
773 panic("overflow on the ext manifest: %p | %lu", img4_ext_manifest, img4_ext_manifest_len);
774 }
775 }
776
777 /* img4_aux_manifest is optional */
778 if (img4_aux_manifest_len != 0) {
779 if (!img4_aux_manifest) {
780 printf("unable to load trust cache (type: %u): img4_aux_manifest expected\n", type);
781 return KERN_INVALID_ARGUMENT;
782 } else if (os_add_overflow((uintptr_t)img4_aux_manifest, img4_aux_manifest_len, &length_check)) {
783 panic("overflow on the ext manifest: %p | %lu", img4_aux_manifest, img4_aux_manifest_len);
784 }
785 }
786
787 /*
788 * If we don't have an external manifest provided, we expect the img4_object to have
789 * the manifest embedded. In this case, we need to extract the different artifacts
790 * out of the object.
791 */
792 if (img4_ext_manifest_len != 0) {
793 img4_payload = img4_object;
794 img4_payload_len = img4_object_len;
795 img4_manifest = img4_ext_manifest;
796 img4_manifest_len = img4_ext_manifest_len;
797 } else {
798 if (img4if->i4if_version < 15) {
799 /* AppleImage4 change hasn't landed in the build */
800 printf("unable to extract payload and manifest from object\n");
801 return KERN_NOT_SUPPORTED;
802 }
803 img4_buff_t img4_buff = IMG4_BUFF_INIT;
804
805 /* Extract the payload */
806 if (img4_get_payload(img4_object, img4_object_len, &img4_buff) == NULL) {
807 printf("unable to find payload within img4 object\n");
808 return KERN_NOT_FOUND;
809 }
810 img4_payload = img4_buff.i4b_bytes;
811 img4_payload_len = img4_buff.i4b_len;
812
813 /* Extract the manifest */
814 if (img4_get_manifest(img4_object, img4_object_len, &img4_buff) == NULL) {
815 printf("unable to find manifest within img4 object\n");
816 return KERN_NOT_FOUND;
817 }
818 img4_manifest = img4_buff.i4b_bytes;
819 img4_manifest_len = img4_buff.i4b_len;
820 }
821
822 if ((type == kTCTypeStatic) || (type == kTCTypeEngineering) || (type == kTCTypeLegacy)) {
823 printf("unable to load trust cache: invalid type: %u\n", type);
824 return KERN_INVALID_ARGUMENT;
825 } else if (type >= kTCTypeTotal) {
826 printf("unable to load trust cache: unknown type: %u\n", type);
827 return KERN_INVALID_ARGUMENT;
828 }
829
830 /* Validate entitlement for the calling process */
831 if (TCTypeConfig[type].entitlementValue != NULL) {
832 const bool entitlement_satisfied = IOCurrentTaskHasStringEntitlement(
833 "com.apple.private.pmap.load-trust-cache",
834 TCTypeConfig[type].entitlementValue);
835
836 if (entitlement_satisfied == false) {
837 printf("unable to load trust cache (type: %u): unsatisfied entitlement\n", type);
838 return KERN_DENIED;
839 }
840 }
841
842 if ((type == kTCTypeCryptex1BootOS) && boot_os_tc_loaded) {
843 printf("disallowed to load multiple kTCTypeCryptex1BootOS trust caches\n");
844 return KERN_DENIED;
845 } else if ((type == kTCTypeCryptex1BootApp) && boot_app_tc_loaded) {
846 printf("disallowed to load multiple kTCTypeCryptex1BootApp trust caches\n");
847 return KERN_DENIED;
848 }
849
850 if (restricted_execution_mode_state() == KERN_SUCCESS) {
851 printf("disallowed to load trust caches once REM is enabled\n");
852 return KERN_DENIED;
853 }
854
855 #if CONFIG_SPTM
856 ret = txm_load_trust_cache(
857 type,
858 img4_payload, img4_payload_len,
859 img4_manifest, img4_manifest_len,
860 img4_aux_manifest, img4_aux_manifest_len);
861 #elif PMAP_CS_PPL_MONITOR
862 ret = ppl_load_trust_cache(
863 type,
864 img4_payload, img4_payload_len,
865 img4_manifest, img4_manifest_len,
866 img4_aux_manifest, img4_aux_manifest_len);
867 #else
868 ret = xnu_load_trust_cache(
869 type,
870 img4_payload, img4_payload_len,
871 img4_manifest, img4_manifest_len,
872 img4_aux_manifest, img4_aux_manifest_len);
873 #endif
874
875 if (ret != KERN_SUCCESS) {
876 printf("unable to load trust cache (type: %u): %d\n", type, ret);
877 } else {
878 if (type == kTCTypeCryptex1BootOS) {
879 boot_os_tc_loaded = true;
880 } else if (type == kTCTypeCryptex1BootApp) {
881 boot_app_tc_loaded = true;
882 }
883 printf("successfully loaded trust cache of type: %u\n", type);
884 }
885
886 return ret;
887 }
888
889 kern_return_t
load_legacy_trust_cache(const uint8_t * module_data,const size_t module_size)890 load_legacy_trust_cache(
891 const uint8_t *module_data, const size_t module_size)
892 {
893 kern_return_t ret = KERN_DENIED;
894 uintptr_t length_check = 0;
895
896 /* Module is required */
897 if (!module_data || (module_size == 0)) {
898 printf("unable to load legacy trust cache: no module provided\n");
899 return KERN_INVALID_ARGUMENT;
900 } else if (os_add_overflow((uintptr_t)module_data, module_size, &length_check)) {
901 panic("overflow on the module: %p | %lu", module_data, module_size);
902 }
903
904 #if CONFIG_SPTM
905 ret = txm_load_legacy_trust_cache(module_data, module_size);
906 #elif PMAP_CS_PPL_MONITOR
907 ret = ppl_load_legacy_trust_cache(module_data, module_size);
908 #else
909 ret = xnu_load_legacy_trust_cache(module_data, module_size);
910 #endif
911
912 if (ret != KERN_SUCCESS) {
913 printf("unable to load legacy trust cache: %d\n", ret);
914 } else {
915 printf("successfully loaded legacy trust cache\n");
916 }
917
918 return ret;
919 }
920
921 kern_return_t
query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)922 query_trust_cache(
923 TCQueryType_t query_type,
924 const uint8_t cdhash[kTCEntryHashSize],
925 TrustCacheQueryToken_t *query_token)
926 {
927 kern_return_t ret = KERN_NOT_FOUND;
928
929 if (cdhash == NULL) {
930 printf("unable to query trust caches: no cdhash provided\n");
931 return KERN_INVALID_ARGUMENT;
932 }
933
934 #if CONFIG_SPTM
935 ret = txm_query_trust_cache(query_type, cdhash, query_token);
936 #elif PMAP_CS_PPL_MONITOR
937 ret = ppl_query_trust_cache(query_type, cdhash, query_token);
938 #else
939 ret = xnu_query_trust_cache(query_type, cdhash, query_token);
940 #endif
941
942 return ret;
943 }
944
945 /*
946 * The trust cache management library uses a wrapper data structure to manage each
947 * of the trust cache modules. We know the exact number of static trust caches we
948 * expect, so we keep around a read-only-late allocation of the data structure for
949 * use.
950 *
951 * Since engineering trust caches are only ever allowed on development builds, they
952 * are not protected through the read-only-late property, and instead allocated
953 * dynamically.
954 */
955
956 SECURITY_READ_ONLY_LATE(bool) trust_cache_static_init = false;
957 SECURITY_READ_ONLY_LATE(bool) trust_cache_static_loaded = false;
958 SECURITY_READ_ONLY_LATE(TrustCache_t) trust_cache_static0 = {0};
959
960 #if CONFIG_SECOND_STATIC_TRUST_CACHE
961 SECURITY_READ_ONLY_LATE(TrustCache_t) trust_cache_static1 = {0};
962 #endif
963
964 #if defined(__arm64__)
965
966 typedef uint64_t pmap_paddr_t __kernel_ptr_semantics;
967 extern vm_map_address_t phystokv(pmap_paddr_t pa);
968
969 #else /* x86_64 */
970 /*
971 * We need this duplicate definition because it is hidden behind the MACH_KERNEL_PRIVATE
972 * macro definition, which makes it inaccessible to this part of the code base.
973 */
974 extern uint64_t physmap_base, physmap_max;
975
976 static inline void*
PHYSMAP_PTOV_check(void * paddr)977 PHYSMAP_PTOV_check(void *paddr)
978 {
979 uint64_t pvaddr = (uint64_t)paddr + physmap_base;
980
981 if (__improbable(pvaddr >= physmap_max)) {
982 panic("PHYSMAP_PTOV bounds exceeded, 0x%qx, 0x%qx, 0x%qx",
983 pvaddr, physmap_base, physmap_max);
984 }
985
986 return (void*)pvaddr;
987 }
988
989 #define PHYSMAP_PTOV(x) (PHYSMAP_PTOV_check((void*) (x)))
990 #define phystokv(x) ((vm_offset_t)(PHYSMAP_PTOV(x)))
991
992 #endif /* defined(__arm__) || defined(__arm64__) */
993
994 void
load_static_trust_cache(void)995 load_static_trust_cache(void)
996 {
997 DTEntry memory_map = {0};
998 const DTTrustCacheRange *tc_range = NULL;
999 trust_cache_offsets_t *tc_offsets = NULL;
1000 unsigned int tc_dt_prop_length = 0;
1001 size_t tc_segment_length = 0;
1002
1003 /* Mark this function as having been called */
1004 trust_cache_static_init = true;
1005
1006 /* Nothing to do when the runtime isn't set */
1007 if (trust_cache_rt == NULL) {
1008 return;
1009 }
1010
1011 if (amfi->TrustCache.version < 1) {
1012 /* AMFI change hasn't landed in the build */
1013 printf("unable to load static trust cache: interface not supported\n");
1014 return;
1015 }
1016
1017 int err = SecureDTLookupEntry(NULL, "chosen/memory-map", &memory_map);
1018 if (err != kSuccess) {
1019 printf("unable to find chosen/memory-map in the device tree: %d\n", err);
1020 return;
1021 }
1022
1023 err = SecureDTGetProperty(memory_map, "TrustCache", (const void **)&tc_range, &tc_dt_prop_length);
1024 if (err == kSuccess) {
1025 if (tc_dt_prop_length != sizeof(DTTrustCacheRange)) {
1026 panic("unexpected size for TrustCache property: %u != %zu",
1027 tc_dt_prop_length, sizeof(DTTrustCacheRange));
1028 }
1029
1030 tc_offsets = (void*)phystokv(tc_range->paddr);
1031 tc_segment_length = tc_range->length;
1032 }
1033
1034 /* x86_64 devices aren't expected to have trust caches */
1035 if (tc_segment_length == 0) {
1036 if (tc_offsets && tc_offsets->num_caches != 0) {
1037 panic("trust cache segment is zero length but trust caches are available: %u",
1038 tc_offsets->num_caches);
1039 }
1040
1041 printf("no external trust caches found (segment length is zero)\n");
1042 return;
1043 } else if (tc_offsets->num_caches == 0) {
1044 panic("trust cache segment isn't zero but no trust caches available: %lu",
1045 (unsigned long)tc_segment_length);
1046 }
1047
1048 size_t offsets_length = 0;
1049 size_t struct_length = 0;
1050 if (os_mul_overflow(tc_offsets->num_caches, sizeof(uint32_t), &offsets_length)) {
1051 panic("overflow on the number of trust caches provided: %u", tc_offsets->num_caches);
1052 } else if (os_add_overflow(offsets_length, sizeof(trust_cache_offsets_t), &struct_length)) {
1053 panic("overflow on length of the trust cache offsets: %lu",
1054 (unsigned long)offsets_length);
1055 } else if (tc_segment_length < struct_length) {
1056 panic("trust cache segment length smaller than required: %lu | %lu",
1057 (unsigned long)tc_segment_length, (unsigned long)struct_length);
1058 }
1059 const uintptr_t tc_region_end = (uintptr_t)tc_offsets + tc_segment_length;
1060
1061 printf("attempting to load %u external trust cache modules\n", tc_offsets->num_caches);
1062
1063 for (uint32_t i = 0; i < tc_offsets->num_caches; i++) {
1064 TCReturn_t tc_ret = (TCReturn_t){.error = kTCReturnError};
1065 TCType_t tc_type = kTCTypeEngineering;
1066 TrustCache_t *trust_cache = NULL;
1067
1068 uintptr_t tc_module = 0;
1069 if (os_add_overflow((uintptr_t)tc_offsets, tc_offsets->offsets[i], &tc_module)) {
1070 panic("trust cache module start overflows: %u | %lu | %u",
1071 i, (unsigned long)tc_offsets, tc_offsets->offsets[i]);
1072 } else if (tc_module >= tc_region_end) {
1073 panic("trust cache module begins after segment ends: %u | %lx | %lx",
1074 i, (unsigned long)tc_module, tc_region_end);
1075 }
1076
1077 /* Should be safe for underflow */
1078 const size_t buffer_length = tc_region_end - tc_module;
1079
1080 /* The first module is always the static trust cache */
1081 if (i == 0) {
1082 tc_type = kTCTypeStatic;
1083 trust_cache = &trust_cache_static0;
1084 }
1085
1086 #if CONFIG_SECOND_STATIC_TRUST_CACHE
1087 if (trust_cache_rt->allowSecondStaticTC && (i == 1)) {
1088 tc_type = kTCTypeStatic;
1089 trust_cache = &trust_cache_static1;
1090 }
1091 #endif
1092
1093 if (tc_type == kTCTypeEngineering) {
1094 if (trust_cache_rt->allowEngineeringTC == false) {
1095 printf("skipping engineering trust cache module: %u\n", i);
1096 continue;
1097 }
1098
1099 /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
1100 trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
1101 assert(trust_cache != NULL);
1102 }
1103
1104 tc_ret = amfi->TrustCache.loadModule(
1105 trust_cache_rt,
1106 tc_type,
1107 trust_cache,
1108 tc_module, buffer_length);
1109
1110 if (tc_ret.error != kTCReturnSuccess) {
1111 printf("unable to load trust cache module: %u (TCReturn: 0x%02X | 0x%02X | %u)\n",
1112 i, tc_ret.component, tc_ret.error, tc_ret.uniqueError);
1113
1114 if (tc_type == kTCTypeStatic) {
1115 panic("failed to load static trust cache module: %u", i);
1116 }
1117 continue;
1118 }
1119 printf("loaded external trust cache module: %u\n", i);
1120
1121 /*
1122 * The first module is always loaded as a static trust cache. If loading it failed,
1123 * then this function would've panicked. If we reach here, it means we've loaded a
1124 * static trust cache on the system.
1125 */
1126 trust_cache_static_loaded = true;
1127 }
1128
1129 printf("completed loading external trust cache modules\n");
1130 }
1131
1132 kern_return_t
static_trust_cache_capabilities(uint32_t * num_static_trust_caches_ret,TCCapabilities_t * capabilities0_ret,TCCapabilities_t * capabilities1_ret)1133 static_trust_cache_capabilities(
1134 uint32_t *num_static_trust_caches_ret,
1135 TCCapabilities_t *capabilities0_ret,
1136 TCCapabilities_t *capabilities1_ret)
1137 {
1138 TCReturn_t tcRet = {.error = kTCReturnError};
1139
1140 *num_static_trust_caches_ret = 0;
1141 *capabilities0_ret = kTCCapabilityNone;
1142 *capabilities1_ret = kTCCapabilityNone;
1143
1144 /* Ensure static trust caches have been initialized */
1145 if (trust_cache_static_init == false) {
1146 panic("attempted to query static trust cache capabilities without init");
1147 }
1148
1149 #if CONFIG_SPTM
1150 if (num_static_trust_caches > 0) {
1151 /* Copy in the data received from TrustedExecutionMonitor */
1152 *num_static_trust_caches_ret = num_static_trust_caches;
1153 *capabilities0_ret = static_trust_cache_capabilities0;
1154 *capabilities1_ret = static_trust_cache_capabilities1;
1155
1156 /* Return successfully */
1157 return KERN_SUCCESS;
1158 }
1159 #endif
1160
1161 if (amfi->TrustCache.version < 2) {
1162 /* AMFI change hasn't landed in the build */
1163 printf("unable to get static trust cache capabilities: interface not supported\n");
1164 return KERN_NOT_SUPPORTED;
1165 } else if (trust_cache_static_loaded == false) {
1166 /* Return arguments already set */
1167 return KERN_SUCCESS;
1168 }
1169
1170 tcRet = amfi->TrustCache.getCapabilities(&trust_cache_static0, capabilities0_ret);
1171 assert(tcRet.error == kTCReturnSuccess);
1172 *num_static_trust_caches_ret += 1;
1173
1174 #if CONFIG_SECOND_STATIC_TRUST_CACHE
1175 tcRet = amfi->TrustCache.getCapabilities(&trust_cache_static1, capabilities1_ret);
1176 assert(tcRet.error == kTCReturnSuccess);
1177 *num_static_trust_caches_ret += 1;
1178 #endif
1179
1180 return KERN_SUCCESS;
1181 }
1182