1 /*
2 * Copyright (c) 2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <os/overflow.h>
24 #include <pexpert/pexpert.h>
25 #include <pexpert/device_tree.h>
26 #include <mach/boolean.h>
27 #include <mach/vm_param.h>
28 #include <vm/vm_kern_xnu.h>
29 #include <vm/pmap_cs.h>
30 #include <kern/zalloc.h>
31 #include <kern/kalloc.h>
32 #include <kern/assert.h>
33 #include <kern/lock_rw.h>
34 #include <libkern/libkern.h>
35 #include <libkern/section_keywords.h>
36 #include <libkern/img4/interface.h>
37 #include <libkern/amfi/amfi.h>
38 #include <sys/vm.h>
39 #include <sys/proc.h>
40 #include <sys/codesign.h>
41 #include <sys/trust_caches.h>
42 #include <sys/code_signing.h>
43 #include <IOKit/IOBSD.h>
44 #include <img4/firmware.h>
45 #include <TrustCache/API.h>
46
47 static bool boot_os_tc_loaded = false;
48 static bool boot_app_tc_loaded = false;
49
50 #if CONFIG_SPTM
51 /*
52 * We have the TrustedExecutionMonitor environment available. All of our artifacts
53 * need to be page-aligned, and transferred to the appropriate TXM type before we
54 * call into TXM to load the trust cache.
55 *
56 * The trust cache runtime is managed independently by TXM. All initialization work
57 * is done by the TXM bootstrap and there is nothing more we need to do here.
58 */
59 #include <sys/trusted_execution_monitor.h>
60
61 /* Immutable part of the runtime */
62 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = NULL;
63
64 /* Mutable part of the runtime */
65 SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = NULL;
66
67 /* Static trust cache information collected from TXM */
68 SECURITY_READ_ONLY_LATE(uint32_t) num_static_trust_caches = 0;
69 SECURITY_READ_ONLY_LATE(TCCapabilities_t) static_trust_cache_capabilities0 = 0;
70 SECURITY_READ_ONLY_LATE(TCCapabilities_t) static_trust_cache_capabilities1 = 0;
71
72 static void
get_trust_cache_info(void)73 get_trust_cache_info(void)
74 {
75 txm_call_t txm_call = {
76 .selector = kTXMKernelSelectorGetTrustCacheInfo,
77 .failure_fatal = true,
78 .num_output_args = 4
79 };
80 txm_kernel_call(&txm_call);
81
82 /*
83 * The monitor returns the libTrustCache runtime it uses within the first
84 * returned word. The kernel doesn't currently have a use-case for this, so
85 * we don't use it. But we continue to return this value from the monitor
86 * in case it ever comes in use later down the line.
87 */
88
89 num_static_trust_caches = (uint32_t)txm_call.return_words[1];
90 static_trust_cache_capabilities0 = (TCCapabilities_t)txm_call.return_words[2];
91 static_trust_cache_capabilities1 = (TCCapabilities_t)txm_call.return_words[3];
92 }
93
94 void
trust_cache_runtime_init(void)95 trust_cache_runtime_init(void)
96 {
97 /* Image4 interface needs to be available */
98 if (img4if == NULL) {
99 panic("image4 interface not available");
100 }
101
102 /* AMFI interface needs to be available */
103 if (amfi == NULL) {
104 panic("amfi interface not available");
105 } else if (amfi->TrustCache.version < 2) {
106 panic("amfi interface is stale: %u", amfi->TrustCache.version);
107 }
108
109 /* Acquire trust cache information from the monitor */
110 get_trust_cache_info();
111 }
112
113 static kern_return_t
txm_load_trust_cache(TCType_t type,const uint8_t * img4_payload,const size_t img4_payload_len,const uint8_t * img4_manifest,const size_t img4_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)114 txm_load_trust_cache(
115 TCType_t type,
116 const uint8_t *img4_payload, const size_t img4_payload_len,
117 const uint8_t *img4_manifest, const size_t img4_manifest_len,
118 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
119 {
120 txm_call_t txm_call = {
121 .selector = kTXMKernelSelectorLoadTrustCache,
122 .num_input_args = 7
123 };
124 vm_address_t payload_addr = 0;
125 vm_address_t manifest_addr = 0;
126 kern_return_t ret = KERN_DENIED;
127
128 /* We don't support the auxiliary manifest for now */
129 (void)img4_aux_manifest;
130 (void)img4_aux_manifest_len;
131
132 ret = kmem_alloc(kernel_map, &payload_addr, img4_payload_len,
133 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
134 if (ret != KERN_SUCCESS) {
135 printf("unable to allocate memory for image4 payload: %d\n", ret);
136 goto out;
137 }
138 memcpy((void*)payload_addr, img4_payload, img4_payload_len);
139
140 ret = kmem_alloc(kernel_map, &manifest_addr, img4_manifest_len,
141 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
142 if (ret != KERN_SUCCESS) {
143 printf("unable to allocate memory for image4 manifest: %d\n", ret);
144 goto out;
145 }
146 memcpy((void*)manifest_addr, img4_manifest, img4_manifest_len);
147
148 /* Transfer both regions to be TXM owned */
149 txm_transfer_region(payload_addr, img4_payload_len);
150 txm_transfer_region(manifest_addr, img4_manifest_len);
151
152 /* TXM will round-up to page length itself */
153 ret = txm_kernel_call(
154 &txm_call,
155 type,
156 payload_addr, img4_payload_len,
157 manifest_addr, img4_manifest_len,
158 0, 0);
159
160 /* Check for duplicate trust cache error */
161 if (txm_call.txm_ret.returnCode == kTXMReturnTrustCache) {
162 if (txm_call.txm_ret.tcRet.error == kTCReturnDuplicate) {
163 ret = KERN_ALREADY_IN_SET;
164 }
165 }
166
167 out:
168 if (manifest_addr != 0) {
169 /* Reclaim the manifest region */
170 txm_reclaim_region(manifest_addr, img4_manifest_len);
171
172 /* Free the manifest region */
173 kmem_free(kernel_map, manifest_addr, img4_manifest_len);
174 manifest_addr = 0;
175 }
176
177 if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
178 /* Reclaim the payload region */
179 txm_reclaim_region(payload_addr, img4_payload_len);
180
181 /* Free the payload region */
182 kmem_free(kernel_map, payload_addr, img4_payload_len);
183 payload_addr = 0;
184 }
185
186 return ret;
187 }
188
189 static kern_return_t
txm_load_legacy_trust_cache(__unused const uint8_t * module_data,__unused const size_t module_size)190 txm_load_legacy_trust_cache(
191 __unused const uint8_t *module_data, __unused const size_t module_size)
192 {
193 panic("legacy trust caches are not supported on this platform");
194 }
195
196 static kern_return_t
txm_query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)197 txm_query_trust_cache(
198 TCQueryType_t query_type,
199 const uint8_t cdhash[kTCEntryHashSize],
200 TrustCacheQueryToken_t *query_token)
201 {
202 txm_call_t txm_call = {
203 .selector = kTXMKernelSelectorQueryTrustCache,
204 .failure_silent = true,
205 .num_input_args = 2,
206 .num_output_args = 2,
207 };
208 kern_return_t ret = txm_kernel_call(&txm_call, query_type, cdhash);
209
210 if (ret == KERN_SUCCESS) {
211 if (query_token) {
212 query_token->trustCache = (const TrustCache_t*)txm_call.return_words[0];
213 query_token->trustCacheEntry = (const void*)txm_call.return_words[1];
214 }
215 return KERN_SUCCESS;
216 }
217
218 /* Check for not-found trust cache error */
219 if (txm_call.txm_ret.returnCode == kTXMReturnTrustCache) {
220 if (txm_call.txm_ret.tcRet.error == kTCReturnNotFound) {
221 ret = KERN_NOT_FOUND;
222 }
223 }
224
225 return ret;
226 }
227
228 static kern_return_t
txm_query_trust_cache_for_rem(const uint8_t cdhash[kTCEntryHashSize],uint8_t * rem_perms)229 txm_query_trust_cache_for_rem(
230 const uint8_t cdhash[kTCEntryHashSize],
231 uint8_t *rem_perms)
232 {
233 #if XNU_HAS_TRUST_CACHE_QUERY_FOR_REM
234 txm_call_t txm_call = {
235 .selector = kTXMKernelSelectorQueryTrustCacheForREM,
236 .num_input_args = 1,
237 .num_output_args = 1
238 };
239 kern_return_t ret = txm_kernel_call(&txm_call, cdhash);
240
241 if ((ret == KERN_SUCCESS) && (rem_perms != NULL)) {
242 *rem_perms = (uint8_t)txm_call.return_words[0];
243 }
244
245 return ret;
246 #else
247 (void)cdhash;
248 (void)rem_perms;
249 return KERN_NOT_SUPPORTED;
250 #endif /* XNU_HAS_TRUST_CACHE_QUERY_FOR_REM */
251 }
252
253 static kern_return_t
txm_check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])254 txm_check_trust_cache_runtime_for_uuid(
255 const uint8_t check_uuid[kUUIDSize])
256 {
257 txm_call_t txm_call = {
258 .selector = kTXMKernelSelectorCheckTrustCacheRuntimeForUUID,
259 .failure_silent = true,
260 .num_input_args = 1
261 };
262 kern_return_t ret = txm_kernel_call(&txm_call, check_uuid);
263
264 /* Check for not-found trust cache error */
265 if (txm_call.txm_ret.returnCode == kTXMReturnTrustCache) {
266 if (txm_call.txm_ret.tcRet.error == kTCReturnNotFound) {
267 ret = KERN_NOT_FOUND;
268 }
269 }
270
271 return ret;
272 }
273
274 #elif PMAP_CS_PPL_MONITOR
275 /*
276 * We have the Page Protection Layer environment available. All of our artifacts
277 * need to be page-aligned. The PPL will lockdown the artifacts before it begins
278 * the validation.
279 *
280 * Even though the runtimes are PPL owned, we expect the runtime init function
281 * to be called before the PPL has been locked down, which allows us to write
282 * to them.
283 */
284
285 /* Immutable part of the runtime */
286 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = &ppl_trust_cache_rt;
287
288 /* Mutable part of the runtime */
289 SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = &ppl_trust_cache_mut_rt;
290
291 void
trust_cache_runtime_init(void)292 trust_cache_runtime_init(void)
293 {
294 bool allow_second_static_cache = false;
295 bool allow_engineering_caches = false;
296
297 #if CONFIG_SECOND_STATIC_TRUST_CACHE
298 allow_second_static_cache = true;
299 #endif
300
301 #if PMAP_CS_INCLUDE_INTERNAL_CODE
302 allow_engineering_caches = true;
303 #endif
304
305 /* Image4 interface needs to be available */
306 if (img4if == NULL) {
307 panic("image4 interface not available");
308 }
309
310 /* AMFI interface needs to be available */
311 if (amfi == NULL) {
312 panic("amfi interface not available");
313 } else if (amfi->TrustCache.version < 2) {
314 panic("amfi interface is stale: %u", amfi->TrustCache.version);
315 }
316
317 trustCacheInitializeRuntime(
318 trust_cache_rt,
319 trust_cache_mut_rt,
320 allow_second_static_cache,
321 allow_engineering_caches,
322 false,
323 IMG4_RUNTIME_PMAP_CS);
324
325 /* Locks are initialized in "pmap_bootstrap()" */
326 }
327
328 static kern_return_t
ppl_load_trust_cache(TCType_t type,const uint8_t * img4_payload,const size_t img4_payload_len,const uint8_t * img4_manifest,const size_t img4_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)329 ppl_load_trust_cache(
330 TCType_t type,
331 const uint8_t *img4_payload, const size_t img4_payload_len,
332 const uint8_t *img4_manifest, const size_t img4_manifest_len,
333 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
334 {
335 kern_return_t ret = KERN_DENIED;
336 vm_address_t payload_addr = 0;
337 vm_size_t payload_len = 0;
338 vm_size_t payload_len_aligned = 0;
339 vm_address_t manifest_addr = 0;
340 vm_size_t manifest_len_aligned = 0;
341 vm_address_t aux_manifest_addr = 0;
342 vm_size_t aux_manifest_len_aligned = 0;
343
344 /* The trust cache data structure is bundled with the img4 payload */
345 if (os_add_overflow(img4_payload_len, sizeof(pmap_img4_payload_t), &payload_len)) {
346 panic("overflow on pmap img4 payload: %lu", img4_payload_len);
347 }
348 payload_len_aligned = round_page(payload_len);
349 manifest_len_aligned = round_page(img4_manifest_len);
350 aux_manifest_len_aligned = round_page(img4_aux_manifest_len);
351
352 ret = kmem_alloc(kernel_map, &payload_addr, payload_len_aligned,
353 KMA_KOBJECT | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
354 if (ret != KERN_SUCCESS) {
355 printf("unable to allocate memory for pmap image4 payload: %d\n", ret);
356 goto out;
357 }
358
359 pmap_img4_payload_t *pmap_payload = (pmap_img4_payload_t*)payload_addr;
360 memcpy(pmap_payload->img4_payload, img4_payload, img4_payload_len);
361
362 /* Allocate storage for the manifest */
363 ret = kmem_alloc(kernel_map, &manifest_addr, manifest_len_aligned,
364 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
365 if (ret != KERN_SUCCESS) {
366 printf("unable to allocate memory for image4 manifest: %d\n", ret);
367 goto out;
368 }
369 memcpy((void*)manifest_addr, img4_manifest, img4_manifest_len);
370
371 if (aux_manifest_len_aligned != 0) {
372 /* Allocate storage for the auxiliary manifest */
373 ret = kmem_alloc(kernel_map, &aux_manifest_addr, aux_manifest_len_aligned,
374 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
375 if (ret != KERN_SUCCESS) {
376 printf("unable to allocate memory for auxiliary image4 manifest: %d\n", ret);
377 goto out;
378 }
379 memcpy((void*)aux_manifest_addr, img4_aux_manifest, img4_aux_manifest_len);
380 }
381
382 /* The PPL will round up the length to page size itself */
383 ret = pmap_load_trust_cache_with_type(
384 type,
385 payload_addr, payload_len,
386 manifest_addr, img4_manifest_len,
387 aux_manifest_addr, img4_aux_manifest_len);
388
389 out:
390 if (aux_manifest_addr != 0) {
391 kmem_free(kernel_map, aux_manifest_addr, aux_manifest_len_aligned);
392 aux_manifest_addr = 0;
393 aux_manifest_len_aligned = 0;
394 }
395
396 if (manifest_addr != 0) {
397 kmem_free(kernel_map, manifest_addr, manifest_len_aligned);
398 manifest_addr = 0;
399 manifest_len_aligned = 0;
400 }
401
402 if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
403 kmem_free(kernel_map, payload_addr, payload_len_aligned);
404 payload_addr = 0;
405 payload_len_aligned = 0;
406 }
407
408 return ret;
409 }
410
411 static kern_return_t
ppl_load_legacy_trust_cache(__unused const uint8_t * module_data,__unused const size_t module_size)412 ppl_load_legacy_trust_cache(
413 __unused const uint8_t *module_data, __unused const size_t module_size)
414 {
415 panic("legacy trust caches are not supported on this platform");
416 }
417
418 static kern_return_t
ppl_query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)419 ppl_query_trust_cache(
420 TCQueryType_t query_type,
421 const uint8_t cdhash[kTCEntryHashSize],
422 TrustCacheQueryToken_t *query_token)
423 {
424 /*
425 * We need to query by trapping into the PPL since the PPL trust cache runtime
426 * lock needs to be held. We cannot hold the lock from outside the PPL.
427 */
428 return pmap_query_trust_cache(query_type, cdhash, query_token);
429 }
430
431 static kern_return_t
ppl_check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])432 ppl_check_trust_cache_runtime_for_uuid(
433 const uint8_t check_uuid[kUUIDSize])
434 {
435 return pmap_check_trust_cache_runtime_for_uuid(check_uuid);
436 }
437
438 #else
439 /*
440 * We don't have a monitor environment available. This means someone with a kernel
441 * memory exploit will be able to inject a trust cache into the system. There is
442 * not much we can do here, since this is older HW.
443 */
444
445 /* Lock for the runtime */
446 LCK_GRP_DECLARE(trust_cache_lck_grp, "trust_cache_lck_grp");
447 decl_lck_rw_data(, trust_cache_rt_lock);
448
449 /* Immutable part of the runtime */
450 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t) trust_cache_rt_storage;
451 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = &trust_cache_rt_storage;
452
453 /* Mutable part of the runtime */
454 TrustCacheMutableRuntime_t trust_cache_mut_rt_storage;
455 SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = &trust_cache_mut_rt_storage;
456
457 void
trust_cache_runtime_init(void)458 trust_cache_runtime_init(void)
459 {
460 bool allow_second_static_cache = false;
461 bool allow_engineering_caches = false;
462 bool allow_legacy_caches = false;
463
464 #if CONFIG_SECOND_STATIC_TRUST_CACHE
465 allow_second_static_cache = true;
466 #endif
467
468 #if TRUST_CACHE_INCLUDE_INTERNAL_CODE
469 allow_engineering_caches = true;
470 #endif
471
472 #ifdef XNU_PLATFORM_BridgeOS
473 allow_legacy_caches = true;
474 #endif
475
476 /* Image4 interface needs to be available */
477 if (img4if == NULL) {
478 panic("image4 interface not available");
479 }
480
481 /* AMFI interface needs to be available */
482 if (amfi == NULL) {
483 panic("amfi interface not available");
484 } else if (amfi->TrustCache.version < 2) {
485 panic("amfi interface is stale: %u", amfi->TrustCache.version);
486 }
487
488 trustCacheInitializeRuntime(
489 trust_cache_rt,
490 trust_cache_mut_rt,
491 allow_second_static_cache,
492 allow_engineering_caches,
493 allow_legacy_caches,
494 IMG4_RUNTIME_DEFAULT);
495
496 /* Initialize the read-write lock */
497 lck_rw_init(&trust_cache_rt_lock, &trust_cache_lck_grp, 0);
498 }
499
500 static kern_return_t
xnu_load_trust_cache(TCType_t type,const uint8_t * img4_payload,const size_t img4_payload_len,const uint8_t * img4_manifest,const size_t img4_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)501 xnu_load_trust_cache(
502 TCType_t type,
503 const uint8_t *img4_payload, const size_t img4_payload_len,
504 const uint8_t *img4_manifest, const size_t img4_manifest_len,
505 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
506 {
507 kern_return_t ret = KERN_DENIED;
508
509 /* Ignore the auxiliary manifest until we add support for it */
510 (void)img4_aux_manifest;
511 (void)img4_aux_manifest_len;
512
513 /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
514 TrustCache_t *trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
515 assert(trust_cache != NULL);
516
517 /*
518 * The manifests aren't needed after the validation is complete, but the payload needs
519 * to persist. The caller of this API expects us to make our own allocations. Since we
520 * don't need the manifests after validation, we can use the manifests passed in to us
521 * but we need to make a new allocation for the payload, since that needs to persist.
522 *
523 * Z_WAITOK implies that this allocation can never fail.
524 */
525 uint8_t *payload = (uint8_t*)kalloc_data(img4_payload_len, Z_WAITOK);
526 assert(payload != NULL);
527
528 /* Copy the payload into our allocation */
529 memcpy(payload, img4_payload, img4_payload_len);
530
531 /* Exclusively lock the runtime */
532 lck_rw_lock_exclusive(&trust_cache_rt_lock);
533
534 TCReturn_t tc_ret = amfi->TrustCache.load(
535 trust_cache_rt,
536 type,
537 trust_cache,
538 (const uintptr_t)payload, img4_payload_len,
539 (const uintptr_t)img4_manifest, img4_manifest_len);
540
541 /* Unlock the runtime */
542 lck_rw_unlock_exclusive(&trust_cache_rt_lock);
543
544 if (tc_ret.error == kTCReturnSuccess) {
545 ret = KERN_SUCCESS;
546 } else if (tc_ret.error == kTCReturnDuplicate) {
547 ret = KERN_ALREADY_IN_SET;
548 } else {
549 printf("unable to load trust cache (TCReturn: 0x%02X | 0x%02X | %u)\n",
550 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
551
552 ret = KERN_FAILURE;
553 }
554
555 if (ret != KERN_SUCCESS) {
556 kfree_data(payload, img4_payload_len);
557 payload = NULL;
558
559 kfree_type(TrustCache_t, trust_cache);
560 trust_cache = NULL;
561 }
562 return ret;
563 }
564
565 static kern_return_t
xnu_load_legacy_trust_cache(__unused const uint8_t * module_data,__unused const size_t module_size)566 xnu_load_legacy_trust_cache(
567 __unused const uint8_t *module_data, __unused const size_t module_size)
568 {
569 #if XNU_HAS_LEGACY_TRUST_CACHE_LOADING
570 kern_return_t ret = KERN_DENIED;
571
572 /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
573 TrustCache_t *trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
574 assert(trust_cache != NULL);
575
576 /* Allocate storage for the module -- Z_WAITOK means this can't fail */
577 uint8_t *module = (uint8_t*)kalloc_data(module_size, Z_WAITOK);
578 assert(module != NULL);
579
580 /* Copy the module into our allocation */
581 memcpy(module, module_data, module_size);
582
583 /* Exclusively lock the runtime */
584 lck_rw_lock_exclusive(&trust_cache_rt_lock);
585
586 TCReturn_t tc_ret = amfi->TrustCache.loadModule(
587 trust_cache_rt,
588 kTCTypeLegacy,
589 trust_cache,
590 (const uintptr_t)module, module_size);
591
592 /* Unlock the runtime */
593 lck_rw_unlock_exclusive(&trust_cache_rt_lock);
594
595 if (tc_ret.error == kTCReturnSuccess) {
596 ret = KERN_SUCCESS;
597 } else if (tc_ret.error == kTCReturnDuplicate) {
598 ret = KERN_ALREADY_IN_SET;
599 } else {
600 printf("unable to load legacy trust cache (TCReturn: 0x%02X | 0x%02X | %u)\n",
601 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
602
603 ret = KERN_FAILURE;
604 }
605
606 if (ret != KERN_SUCCESS) {
607 kfree_data(module, module_size);
608 module = NULL;
609
610 kfree_type(TrustCache_t, trust_cache);
611 trust_cache = NULL;
612 }
613 return ret;
614 #else
615 panic("legacy trust caches are not supported on this platform");
616 #endif /* XNU_HAS_LEGACY_TRUST_CACHE_LOADING */
617 }
618
619 static kern_return_t
xnu_query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)620 xnu_query_trust_cache(
621 TCQueryType_t query_type,
622 const uint8_t cdhash[kTCEntryHashSize],
623 TrustCacheQueryToken_t *query_token)
624 {
625 kern_return_t ret = KERN_NOT_FOUND;
626
627 /* Validate the query type preemptively */
628 if (query_type >= kTCQueryTypeTotal) {
629 printf("unable to query trust cache: invalid query type: %u\n", query_type);
630 return KERN_INVALID_ARGUMENT;
631 }
632
633 /* Lock the runtime as shared */
634 lck_rw_lock_shared(&trust_cache_rt_lock);
635
636 TCReturn_t tc_ret = amfi->TrustCache.query(
637 trust_cache_rt,
638 query_type,
639 cdhash,
640 query_token);
641
642 /* Unlock the runtime */
643 lck_rw_unlock_shared(&trust_cache_rt_lock);
644
645 if (tc_ret.error == kTCReturnSuccess) {
646 ret = KERN_SUCCESS;
647 } else if (tc_ret.error == kTCReturnNotFound) {
648 ret = KERN_NOT_FOUND;
649 } else {
650 ret = KERN_FAILURE;
651 printf("trust cache query failed (TCReturn: 0x%02X | 0x%02X | %u)\n",
652 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
653 }
654
655 return ret;
656 }
657
658 static kern_return_t
xnu_check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])659 xnu_check_trust_cache_runtime_for_uuid(
660 const uint8_t check_uuid[kUUIDSize])
661 {
662 kern_return_t ret = KERN_DENIED;
663
664 if (amfi->TrustCache.version < 3) {
665 /* AMFI change hasn't landed in the build */
666 printf("unable to check for loaded trust cache: interface not supported\n");
667 return KERN_NOT_SUPPORTED;
668 }
669
670 /* Lock the runtime as shared */
671 lck_rw_lock_shared(&trust_cache_rt_lock);
672
673 TCReturn_t tc_ret = amfi->TrustCache.checkRuntimeForUUID(
674 trust_cache_rt,
675 check_uuid,
676 NULL);
677
678 /* Unlock the runtime */
679 lck_rw_unlock_shared(&trust_cache_rt_lock);
680
681 if (tc_ret.error == kTCReturnSuccess) {
682 ret = KERN_SUCCESS;
683 } else if (tc_ret.error == kTCReturnNotFound) {
684 ret = KERN_NOT_FOUND;
685 } else {
686 ret = KERN_FAILURE;
687 printf("trust cache UUID check failed (TCReturn: 0x%02X | 0x%02X | %u)\n",
688 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
689 }
690
691 return ret;
692 }
693
694 #endif /* CONFIG_SPTM */
695
696 kern_return_t
check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])697 check_trust_cache_runtime_for_uuid(
698 const uint8_t check_uuid[kUUIDSize])
699 {
700 kern_return_t ret = KERN_DENIED;
701
702 if (check_uuid == NULL) {
703 return KERN_INVALID_ARGUMENT;
704 }
705
706 #if CONFIG_SPTM
707 ret = txm_check_trust_cache_runtime_for_uuid(check_uuid);
708 #elif PMAP_CS_PPL_MONITOR
709 ret = ppl_check_trust_cache_runtime_for_uuid(check_uuid);
710 #else
711 ret = xnu_check_trust_cache_runtime_for_uuid(check_uuid);
712 #endif
713
714 return ret;
715 }
716
717 kern_return_t
load_trust_cache(const uint8_t * img4_object,const size_t img4_object_len,const uint8_t * img4_ext_manifest,const size_t img4_ext_manifest_len)718 load_trust_cache(
719 const uint8_t *img4_object, const size_t img4_object_len,
720 const uint8_t *img4_ext_manifest, const size_t img4_ext_manifest_len)
721 {
722 TCType_t type = kTCTypeInvalid;
723 kern_return_t ret = KERN_DENIED;
724
725 /* Start from the first valid type and attempt to validate through each */
726 for (type = kTCTypeLTRS; type < kTCTypeTotal; type += 1) {
727 ret = load_trust_cache_with_type(
728 type,
729 img4_object, img4_object_len,
730 img4_ext_manifest, img4_ext_manifest_len,
731 NULL, 0);
732
733 if ((ret == KERN_SUCCESS) || (ret == KERN_ALREADY_IN_SET)) {
734 return ret;
735 }
736 }
737
738 #if TRUST_CACHE_INCLUDE_INTERNAL_CODE
739 /* Attempt to load as an engineering root */
740 ret = load_trust_cache_with_type(
741 kTCTypeDTRS,
742 img4_object, img4_object_len,
743 img4_ext_manifest, img4_ext_manifest_len,
744 NULL, 0);
745 #endif
746
747 return ret;
748 }
749
750 kern_return_t
load_trust_cache_with_type(TCType_t type,const uint8_t * img4_object,const size_t img4_object_len,const uint8_t * img4_ext_manifest,const size_t img4_ext_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)751 load_trust_cache_with_type(
752 TCType_t type,
753 const uint8_t *img4_object, const size_t img4_object_len,
754 const uint8_t *img4_ext_manifest, const size_t img4_ext_manifest_len,
755 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
756 {
757 kern_return_t ret = KERN_DENIED;
758 uintptr_t length_check = 0;
759 const uint8_t *img4_payload = NULL;
760 size_t img4_payload_len = 0;
761 const uint8_t *img4_manifest = NULL;
762 size_t img4_manifest_len = 0;
763
764 /* img4_object is required */
765 if (!img4_object || (img4_object_len == 0)) {
766 printf("unable to load trust cache (type: %u): no img4_object provided\n", type);
767 return KERN_INVALID_ARGUMENT;
768 } else if (os_add_overflow((uintptr_t)img4_object, img4_object_len, &length_check)) {
769 panic("overflow on the img4 object: %p | %lu", img4_object, img4_object_len);
770 }
771
772 /* img4_ext_manifest is optional */
773 if (img4_ext_manifest_len != 0) {
774 if (!img4_ext_manifest) {
775 printf("unable to load trust cache (type: %u): img4_ext_manifest expected\n", type);
776 return KERN_INVALID_ARGUMENT;
777 } else if (os_add_overflow((uintptr_t)img4_ext_manifest, img4_ext_manifest_len, &length_check)) {
778 panic("overflow on the ext manifest: %p | %lu", img4_ext_manifest, img4_ext_manifest_len);
779 }
780 }
781
782 /* img4_aux_manifest is optional */
783 if (img4_aux_manifest_len != 0) {
784 if (!img4_aux_manifest) {
785 printf("unable to load trust cache (type: %u): img4_aux_manifest expected\n", type);
786 return KERN_INVALID_ARGUMENT;
787 } else if (os_add_overflow((uintptr_t)img4_aux_manifest, img4_aux_manifest_len, &length_check)) {
788 panic("overflow on the ext manifest: %p | %lu", img4_aux_manifest, img4_aux_manifest_len);
789 }
790 }
791
792 /*
793 * If we don't have an external manifest provided, we expect the img4_object to have
794 * the manifest embedded. In this case, we need to extract the different artifacts
795 * out of the object.
796 */
797 if (img4_ext_manifest_len != 0) {
798 img4_payload = img4_object;
799 img4_payload_len = img4_object_len;
800 img4_manifest = img4_ext_manifest;
801 img4_manifest_len = img4_ext_manifest_len;
802 } else {
803 if (img4if->i4if_version < 15) {
804 /* AppleImage4 change hasn't landed in the build */
805 printf("unable to extract payload and manifest from object\n");
806 return KERN_NOT_SUPPORTED;
807 }
808 img4_buff_t img4_buff = IMG4_BUFF_INIT;
809
810 /* Extract the payload */
811 if (img4_get_payload(img4_object, img4_object_len, &img4_buff) == NULL) {
812 printf("unable to find payload within img4 object\n");
813 return KERN_NOT_FOUND;
814 }
815 img4_payload = img4_buff.i4b_bytes;
816 img4_payload_len = img4_buff.i4b_len;
817
818 /* Extract the manifest */
819 if (img4_get_manifest(img4_object, img4_object_len, &img4_buff) == NULL) {
820 printf("unable to find manifest within img4 object\n");
821 return KERN_NOT_FOUND;
822 }
823 img4_manifest = img4_buff.i4b_bytes;
824 img4_manifest_len = img4_buff.i4b_len;
825 }
826
827 if ((type == kTCTypeStatic) || (type == kTCTypeEngineering) || (type == kTCTypeLegacy)) {
828 printf("unable to load trust cache: invalid type: %u\n", type);
829 return KERN_INVALID_ARGUMENT;
830 } else if (type >= kTCTypeTotal) {
831 printf("unable to load trust cache: unknown type: %u\n", type);
832 return KERN_INVALID_ARGUMENT;
833 }
834
835 /* Validate entitlement for the calling process */
836 if (TCTypeConfig[type].entitlementValue != NULL) {
837 const bool entitlement_satisfied = IOCurrentTaskHasStringEntitlement(
838 "com.apple.private.pmap.load-trust-cache",
839 TCTypeConfig[type].entitlementValue);
840
841 if (entitlement_satisfied == false) {
842 printf("unable to load trust cache (type: %u): unsatisfied entitlement\n", type);
843 return KERN_DENIED;
844 }
845 }
846
847 if ((type == kTCTypeCryptex1BootOS) && boot_os_tc_loaded) {
848 printf("disallowed to load multiple kTCTypeCryptex1BootOS trust caches\n");
849 return KERN_DENIED;
850 } else if ((type == kTCTypeCryptex1BootApp) && boot_app_tc_loaded) {
851 printf("disallowed to load multiple kTCTypeCryptex1BootApp trust caches\n");
852 return KERN_DENIED;
853 }
854
855 if (restricted_execution_mode_state() == KERN_SUCCESS) {
856 printf("disallowed to load trust caches once REM is enabled\n");
857 return KERN_DENIED;
858 }
859
860 #if CONFIG_SPTM
861 ret = txm_load_trust_cache(
862 type,
863 img4_payload, img4_payload_len,
864 img4_manifest, img4_manifest_len,
865 img4_aux_manifest, img4_aux_manifest_len);
866 #elif PMAP_CS_PPL_MONITOR
867 ret = ppl_load_trust_cache(
868 type,
869 img4_payload, img4_payload_len,
870 img4_manifest, img4_manifest_len,
871 img4_aux_manifest, img4_aux_manifest_len);
872 #else
873 ret = xnu_load_trust_cache(
874 type,
875 img4_payload, img4_payload_len,
876 img4_manifest, img4_manifest_len,
877 img4_aux_manifest, img4_aux_manifest_len);
878 #endif
879
880 if (ret != KERN_SUCCESS) {
881 printf("unable to load trust cache (type: %u): %d\n", type, ret);
882 } else {
883 if (type == kTCTypeCryptex1BootOS) {
884 boot_os_tc_loaded = true;
885 } else if (type == kTCTypeCryptex1BootApp) {
886 boot_app_tc_loaded = true;
887 }
888 printf("successfully loaded trust cache of type: %u\n", type);
889 }
890
891 return ret;
892 }
893
894 kern_return_t
load_legacy_trust_cache(const uint8_t * module_data,const size_t module_size)895 load_legacy_trust_cache(
896 const uint8_t *module_data, const size_t module_size)
897 {
898 kern_return_t ret = KERN_DENIED;
899 uintptr_t length_check = 0;
900
901 /* Module is required */
902 if (!module_data || (module_size == 0)) {
903 printf("unable to load legacy trust cache: no module provided\n");
904 return KERN_INVALID_ARGUMENT;
905 } else if (os_add_overflow((uintptr_t)module_data, module_size, &length_check)) {
906 panic("overflow on the module: %p | %lu", module_data, module_size);
907 }
908
909 #if CONFIG_SPTM
910 ret = txm_load_legacy_trust_cache(module_data, module_size);
911 #elif PMAP_CS_PPL_MONITOR
912 ret = ppl_load_legacy_trust_cache(module_data, module_size);
913 #else
914 ret = xnu_load_legacy_trust_cache(module_data, module_size);
915 #endif
916
917 if (ret != KERN_SUCCESS) {
918 printf("unable to load legacy trust cache: %d\n", ret);
919 } else {
920 printf("successfully loaded legacy trust cache\n");
921 }
922
923 return ret;
924 }
925
926 kern_return_t
query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)927 query_trust_cache(
928 TCQueryType_t query_type,
929 const uint8_t cdhash[kTCEntryHashSize],
930 TrustCacheQueryToken_t *query_token)
931 {
932 kern_return_t ret = KERN_NOT_FOUND;
933
934 if (cdhash == NULL) {
935 printf("unable to query trust caches: no cdhash provided\n");
936 return KERN_INVALID_ARGUMENT;
937 }
938
939 #if CONFIG_SPTM
940 ret = txm_query_trust_cache(query_type, cdhash, query_token);
941 #elif PMAP_CS_PPL_MONITOR
942 ret = ppl_query_trust_cache(query_type, cdhash, query_token);
943 #else
944 ret = xnu_query_trust_cache(query_type, cdhash, query_token);
945 #endif
946
947 return ret;
948 }
949
950 kern_return_t
query_trust_cache_for_rem(const uint8_t cdhash[kTCEntryHashSize],__unused uint8_t * rem_perms)951 query_trust_cache_for_rem(
952 const uint8_t cdhash[kTCEntryHashSize],
953 __unused uint8_t *rem_perms)
954 {
955 kern_return_t ret = KERN_NOT_SUPPORTED;
956
957 if (cdhash == NULL) {
958 printf("unable to query trust caches: no cdhash provided\n");
959 return KERN_INVALID_ARGUMENT;
960 }
961
962 /*
963 * Only when the system is using the Trusted Execution Monitor environment does
964 * it support restricted execution mode. For all other monitor environments, or
965 * when we don't have a monitor, the return defaults to a not supported.
966 */
967 #if CONFIG_SPTM
968 ret = txm_query_trust_cache_for_rem(cdhash, rem_perms);
969 #endif
970
971 return ret;
972 }
973
974 /*
975 * The trust cache management library uses a wrapper data structure to manage each
976 * of the trust cache modules. We know the exact number of static trust caches we
977 * expect, so we keep around a read-only-late allocation of the data structure for
978 * use.
979 *
980 * Since engineering trust caches are only ever allowed on development builds, they
981 * are not protected through the read-only-late property, and instead allocated
982 * dynamically.
983 */
984
985 SECURITY_READ_ONLY_LATE(bool) trust_cache_static_init = false;
986 SECURITY_READ_ONLY_LATE(bool) trust_cache_static_loaded = false;
987 SECURITY_READ_ONLY_LATE(TrustCache_t) trust_cache_static0 = {0};
988
989 #if CONFIG_SECOND_STATIC_TRUST_CACHE
990 SECURITY_READ_ONLY_LATE(TrustCache_t) trust_cache_static1 = {0};
991 #endif
992
993 #if defined(__arm64__)
994
995 typedef uint64_t pmap_paddr_t __kernel_ptr_semantics;
996 extern vm_map_address_t phystokv(pmap_paddr_t pa);
997
998 #else /* x86_64 */
999 /*
1000 * We need this duplicate definition because it is hidden behind the MACH_KERNEL_PRIVATE
1001 * macro definition, which makes it inaccessible to this part of the code base.
1002 */
1003 extern uint64_t physmap_base, physmap_max;
1004
1005 static inline void*
PHYSMAP_PTOV_check(void * paddr)1006 PHYSMAP_PTOV_check(void *paddr)
1007 {
1008 uint64_t pvaddr = (uint64_t)paddr + physmap_base;
1009
1010 if (__improbable(pvaddr >= physmap_max)) {
1011 panic("PHYSMAP_PTOV bounds exceeded, 0x%qx, 0x%qx, 0x%qx",
1012 pvaddr, physmap_base, physmap_max);
1013 }
1014
1015 return (void*)pvaddr;
1016 }
1017
1018 #define PHYSMAP_PTOV(x) (PHYSMAP_PTOV_check((void*) (x)))
1019 #define phystokv(x) ((vm_offset_t)(PHYSMAP_PTOV(x)))
1020
1021 #endif /* defined(__arm__) || defined(__arm64__) */
1022
1023 void
load_static_trust_cache(void)1024 load_static_trust_cache(void)
1025 {
1026 DTEntry memory_map = {0};
1027 const DTTrustCacheRange *tc_range = NULL;
1028 trust_cache_offsets_t *tc_offsets = NULL;
1029 unsigned int tc_dt_prop_length = 0;
1030 size_t tc_segment_length = 0;
1031
1032 /* Mark this function as having been called */
1033 trust_cache_static_init = true;
1034
1035 /* Nothing to do when the runtime isn't set */
1036 if (trust_cache_rt == NULL) {
1037 return;
1038 }
1039
1040 if (amfi->TrustCache.version < 1) {
1041 /* AMFI change hasn't landed in the build */
1042 printf("unable to load static trust cache: interface not supported\n");
1043 return;
1044 }
1045
1046 int err = SecureDTLookupEntry(NULL, "chosen/memory-map", &memory_map);
1047 if (err != kSuccess) {
1048 printf("unable to find chosen/memory-map in the device tree: %d\n", err);
1049 return;
1050 }
1051
1052 err = SecureDTGetProperty(memory_map, "TrustCache", (const void **)&tc_range, &tc_dt_prop_length);
1053 if (err == kSuccess) {
1054 if (tc_dt_prop_length != sizeof(DTTrustCacheRange)) {
1055 panic("unexpected size for TrustCache property: %u != %zu",
1056 tc_dt_prop_length, sizeof(DTTrustCacheRange));
1057 }
1058
1059 tc_offsets = (void*)phystokv(tc_range->paddr);
1060 tc_segment_length = tc_range->length;
1061 }
1062
1063 /* x86_64 devices aren't expected to have trust caches */
1064 if (tc_segment_length == 0) {
1065 if (tc_offsets && tc_offsets->num_caches != 0) {
1066 panic("trust cache segment is zero length but trust caches are available: %u",
1067 tc_offsets->num_caches);
1068 }
1069
1070 printf("no external trust caches found (segment length is zero)\n");
1071 return;
1072 } else if (tc_offsets->num_caches == 0) {
1073 panic("trust cache segment isn't zero but no trust caches available: %lu",
1074 (unsigned long)tc_segment_length);
1075 }
1076
1077 size_t offsets_length = 0;
1078 size_t struct_length = 0;
1079 if (os_mul_overflow(tc_offsets->num_caches, sizeof(uint32_t), &offsets_length)) {
1080 panic("overflow on the number of trust caches provided: %u", tc_offsets->num_caches);
1081 } else if (os_add_overflow(offsets_length, sizeof(trust_cache_offsets_t), &struct_length)) {
1082 panic("overflow on length of the trust cache offsets: %lu",
1083 (unsigned long)offsets_length);
1084 } else if (tc_segment_length < struct_length) {
1085 panic("trust cache segment length smaller than required: %lu | %lu",
1086 (unsigned long)tc_segment_length, (unsigned long)struct_length);
1087 }
1088 const uintptr_t tc_region_end = (uintptr_t)tc_offsets + tc_segment_length;
1089
1090 printf("attempting to load %u external trust cache modules\n", tc_offsets->num_caches);
1091
1092 for (uint32_t i = 0; i < tc_offsets->num_caches; i++) {
1093 TCReturn_t tc_ret = (TCReturn_t){.error = kTCReturnError};
1094 TCType_t tc_type = kTCTypeEngineering;
1095 TrustCache_t *trust_cache = NULL;
1096
1097 uintptr_t tc_module = 0;
1098 if (os_add_overflow((uintptr_t)tc_offsets, tc_offsets->offsets[i], &tc_module)) {
1099 panic("trust cache module start overflows: %u | %lu | %u",
1100 i, (unsigned long)tc_offsets, tc_offsets->offsets[i]);
1101 } else if (tc_module >= tc_region_end) {
1102 panic("trust cache module begins after segment ends: %u | %lx | %lx",
1103 i, (unsigned long)tc_module, tc_region_end);
1104 }
1105
1106 /* Should be safe for underflow */
1107 const size_t buffer_length = tc_region_end - tc_module;
1108
1109 /* The first module is always the static trust cache */
1110 if (i == 0) {
1111 tc_type = kTCTypeStatic;
1112 trust_cache = &trust_cache_static0;
1113 }
1114
1115 #if CONFIG_SECOND_STATIC_TRUST_CACHE
1116 if (trust_cache_rt->allowSecondStaticTC && (i == 1)) {
1117 tc_type = kTCTypeStatic;
1118 trust_cache = &trust_cache_static1;
1119 }
1120 #endif
1121
1122 if (tc_type == kTCTypeEngineering) {
1123 if (trust_cache_rt->allowEngineeringTC == false) {
1124 printf("skipping engineering trust cache module: %u\n", i);
1125 continue;
1126 }
1127
1128 /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
1129 trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
1130 assert(trust_cache != NULL);
1131 }
1132
1133 tc_ret = amfi->TrustCache.loadModule(
1134 trust_cache_rt,
1135 tc_type,
1136 trust_cache,
1137 tc_module, buffer_length);
1138
1139 if (tc_ret.error != kTCReturnSuccess) {
1140 printf("unable to load trust cache module: %u (TCReturn: 0x%02X | 0x%02X | %u)\n",
1141 i, tc_ret.component, tc_ret.error, tc_ret.uniqueError);
1142
1143 if (tc_type == kTCTypeStatic) {
1144 panic("failed to load static trust cache module: %u", i);
1145 }
1146 continue;
1147 }
1148 printf("loaded external trust cache module: %u\n", i);
1149
1150 /*
1151 * The first module is always loaded as a static trust cache. If loading it failed,
1152 * then this function would've panicked. If we reach here, it means we've loaded a
1153 * static trust cache on the system.
1154 */
1155 trust_cache_static_loaded = true;
1156 }
1157
1158 printf("completed loading external trust cache modules\n");
1159 }
1160
1161 kern_return_t
static_trust_cache_capabilities(uint32_t * num_static_trust_caches_ret,TCCapabilities_t * capabilities0_ret,TCCapabilities_t * capabilities1_ret)1162 static_trust_cache_capabilities(
1163 uint32_t *num_static_trust_caches_ret,
1164 TCCapabilities_t *capabilities0_ret,
1165 TCCapabilities_t *capabilities1_ret)
1166 {
1167 TCReturn_t tcRet = {.error = kTCReturnError};
1168
1169 *num_static_trust_caches_ret = 0;
1170 *capabilities0_ret = kTCCapabilityNone;
1171 *capabilities1_ret = kTCCapabilityNone;
1172
1173 /* Ensure static trust caches have been initialized */
1174 if (trust_cache_static_init == false) {
1175 panic("attempted to query static trust cache capabilities without init");
1176 }
1177
1178 #if CONFIG_SPTM
1179 if (num_static_trust_caches > 0) {
1180 /* Copy in the data received from TrustedExecutionMonitor */
1181 *num_static_trust_caches_ret = num_static_trust_caches;
1182 *capabilities0_ret = static_trust_cache_capabilities0;
1183 *capabilities1_ret = static_trust_cache_capabilities1;
1184
1185 /* Return successfully */
1186 return KERN_SUCCESS;
1187 }
1188 #endif
1189
1190 if (amfi->TrustCache.version < 2) {
1191 /* AMFI change hasn't landed in the build */
1192 printf("unable to get static trust cache capabilities: interface not supported\n");
1193 return KERN_NOT_SUPPORTED;
1194 } else if (trust_cache_static_loaded == false) {
1195 /* Return arguments already set */
1196 return KERN_SUCCESS;
1197 }
1198
1199 tcRet = amfi->TrustCache.getCapabilities(&trust_cache_static0, capabilities0_ret);
1200 assert(tcRet.error == kTCReturnSuccess);
1201 *num_static_trust_caches_ret += 1;
1202
1203 #if CONFIG_SECOND_STATIC_TRUST_CACHE
1204 tcRet = amfi->TrustCache.getCapabilities(&trust_cache_static1, capabilities1_ret);
1205 assert(tcRet.error == kTCReturnSuccess);
1206 *num_static_trust_caches_ret += 1;
1207 #endif
1208
1209 return KERN_SUCCESS;
1210 }
1211