1 /*
2 * Copyright (c) 2021 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22
23 #include <os/atomic_private.h>
24 #include <os/overflow.h>
25 #include <pexpert/pexpert.h>
26 #include <pexpert/device_tree.h>
27 #include <mach/boolean.h>
28 #include <mach/vm_param.h>
29 #include <vm/vm_kern_xnu.h>
30 #include <vm/pmap_cs.h>
31 #include <kern/zalloc.h>
32 #include <kern/kalloc.h>
33 #include <kern/assert.h>
34 #include <kern/lock_rw.h>
35 #include <libkern/libkern.h>
36 #include <libkern/section_keywords.h>
37 #include <libkern/img4/interface.h>
38 #include <libkern/amfi/amfi.h>
39 #include <sys/vm.h>
40 #include <sys/ubc.h>
41 #include <sys/proc.h>
42 #include <sys/sysctl.h>
43 #include <sys/codesign.h>
44 #include <sys/trust_caches.h>
45 #include <sys/code_signing.h>
46 #include <IOKit/IOLib.h>
47 #include <IOKit/IOBSD.h>
48 #include <img4/firmware.h>
49 #include <TrustCache/API.h>
50
51 static bool boot_os_tc_loaded = false;
52 static bool boot_app_tc_loaded = false;
53
54 SECURITY_READ_ONLY_LATE(uint32_t) num_static_trust_caches = 0;
55 SECURITY_READ_ONLY_LATE(uint32_t) num_engineering_trust_caches = 0;
56 uint32_t num_loadable_trust_caches = 0;
57
58 SYSCTL_DECL(_security);
59 SYSCTL_DECL(_security_codesigning);
60 SYSCTL_DECL(_security_codesigning_trustcaches);
61 SYSCTL_NODE(_security_codesigning, OID_AUTO, trustcaches, CTLFLAG_RD, 0, "XNU Trust Caches");
62
63 SYSCTL_UINT(
64 _security_codesigning_trustcaches, OID_AUTO,
65 num_static, CTLFLAG_RD, &num_static_trust_caches,
66 0, "number of static trust caches loaded"
67 );
68
69 SYSCTL_UINT(
70 _security_codesigning_trustcaches, OID_AUTO,
71 num_engineering, CTLFLAG_RD, &num_engineering_trust_caches,
72 0, "number of engineering trust caches loaded"
73 );
74
75 SYSCTL_UINT(
76 _security_codesigning_trustcaches, OID_AUTO,
77 num_loadable, CTLFLAG_RD, &num_loadable_trust_caches,
78 0, "number of loadable trust caches loaded"
79 );
80
81 #if CONFIG_SPTM
82 /*
83 * We have the TrustedExecutionMonitor environment available. All of our artifacts
84 * need to be page-aligned, and transferred to the appropriate TXM type before we
85 * call into TXM to load the trust cache.
86 *
87 * The trust cache runtime is managed independently by TXM. All initialization work
88 * is done by the TXM bootstrap and there is nothing more we need to do here.
89 */
90 #include <sys/trusted_execution_monitor.h>
91
92 /* Immutable part of the runtime */
93 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = NULL;
94
95 /* Mutable part of the runtime */
96 SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = NULL;
97
98 /* Static trust cache information collected from TXM */
99 SECURITY_READ_ONLY_LATE(uint32_t) txm_static_trust_caches = 0;
100 SECURITY_READ_ONLY_LATE(TCCapabilities_t) static_trust_cache_capabilities0 = 0;
101 SECURITY_READ_ONLY_LATE(TCCapabilities_t) static_trust_cache_capabilities1 = 0;
102
103 static void
get_trust_cache_info(void)104 get_trust_cache_info(void)
105 {
106 txm_call_t txm_call = {
107 .selector = kTXMKernelSelectorGetTrustCacheInfo,
108 .failure_fatal = true,
109 .num_output_args = 4
110 };
111 txm_kernel_call(&txm_call);
112
113 /*
114 * The monitor returns the libTrustCache runtime it uses within the first
115 * returned word. The kernel doesn't currently have a use-case for this, so
116 * we don't use it. But we continue to return this value from the monitor
117 * in case it ever comes in use later down the line.
118 */
119 txm_static_trust_caches = (uint32_t)txm_call.return_words[1];
120 static_trust_cache_capabilities0 = (TCCapabilities_t)txm_call.return_words[2];
121 static_trust_cache_capabilities1 = (TCCapabilities_t)txm_call.return_words[3];
122 }
123
124 void
trust_cache_runtime_init(void)125 trust_cache_runtime_init(void)
126 {
127 /* Image4 interface needs to be available */
128 if (img4if == NULL) {
129 panic("image4 interface not available");
130 }
131
132 /* AMFI interface needs to be available */
133 if (amfi == NULL) {
134 panic("amfi interface not available");
135 } else if (amfi->TrustCache.version < 2) {
136 panic("amfi interface is stale: %u", amfi->TrustCache.version);
137 }
138
139 /* Acquire trust cache information from the monitor */
140 get_trust_cache_info();
141 }
142
143 static kern_return_t
txm_unload_trust_cache(uuid_t uuid)144 txm_unload_trust_cache(uuid_t uuid)
145 {
146 txm_call_t txm_call = {
147 .selector = kTXMKernelSelectorUnloadTrustCache,
148 .num_input_args = 1
149 };
150 kern_return_t ret = txm_kernel_call(&txm_call, uuid);
151
152 /* Check for not found trust cache error */
153 if (txm_call.txm_ret.returnCode == kTXMReturnTrustCache) {
154 if (txm_call.txm_ret.tcRet.error == kTCReturnNotFound) {
155 ret = KERN_NOT_FOUND;
156 }
157 }
158
159 return ret;
160 }
161
162 static kern_return_t
txm_load_trust_cache(TCType_t type,const uint8_t * img4_payload,const size_t img4_payload_len,const uint8_t * img4_manifest,const size_t img4_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)163 txm_load_trust_cache(
164 TCType_t type,
165 const uint8_t *img4_payload, const size_t img4_payload_len,
166 const uint8_t *img4_manifest, const size_t img4_manifest_len,
167 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
168 {
169 txm_call_t txm_call = {
170 .selector = kTXMKernelSelectorLoadTrustCache,
171 .num_input_args = 7,
172 .num_output_args = 1,
173 };
174 vm_address_t payload_addr = 0;
175 vm_address_t manifest_addr = 0;
176 kern_return_t ret = KERN_DENIED;
177 bool reclaim_payload = false;
178
179 /* We don't support the auxiliary manifest for now */
180 (void)img4_aux_manifest;
181 (void)img4_aux_manifest_len;
182
183 ret = kmem_alloc(kernel_map, &payload_addr, img4_payload_len,
184 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
185 if (ret != KERN_SUCCESS) {
186 printf("unable to allocate memory for image4 payload: %d\n", ret);
187 goto out;
188 }
189 memcpy((void*)payload_addr, img4_payload, img4_payload_len);
190
191 ret = kmem_alloc(kernel_map, &manifest_addr, img4_manifest_len,
192 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
193 if (ret != KERN_SUCCESS) {
194 printf("unable to allocate memory for image4 manifest: %d\n", ret);
195 goto out;
196 }
197 memcpy((void*)manifest_addr, img4_manifest, img4_manifest_len);
198
199 /* Transfer both regions to be TXM owned */
200 txm_transfer_region(payload_addr, img4_payload_len);
201 txm_transfer_region(manifest_addr, img4_manifest_len);
202
203 /* TXM will round-up to page length itself */
204 ret = txm_kernel_call(
205 &txm_call,
206 type,
207 payload_addr, img4_payload_len,
208 manifest_addr, img4_manifest_len,
209 0, 0);
210
211 /* Check for duplicate trust cache error */
212 if (txm_call.txm_ret.returnCode == kTXMReturnTrustCache) {
213 if (txm_call.txm_ret.tcRet.error == kTCReturnDuplicate) {
214 ret = KERN_ALREADY_IN_SET;
215 }
216 }
217
218 /*
219 * Most trust cache payloads are small. In order to conserve memory, TXM will
220 * prefer creating its own allocation, and copying the contents of the payload
221 * into that allocation. When this happens, the payload is returned back to be
222 * reclaimed by the kernel.
223 */
224 if (ret == KERN_SUCCESS) {
225 reclaim_payload = txm_call.return_words[0] != 0;
226 }
227
228 out:
229 if (manifest_addr != 0) {
230 /* Reclaim the manifest region */
231 txm_reclaim_region(manifest_addr, img4_manifest_len);
232
233 /* Free the manifest region */
234 kmem_free(kernel_map, manifest_addr, img4_manifest_len);
235 manifest_addr = 0;
236 }
237
238 if (((ret != KERN_SUCCESS) || (reclaim_payload == true)) && (payload_addr != 0)) {
239 /* Reclaim the payload region */
240 txm_reclaim_region(payload_addr, img4_payload_len);
241
242 /* Free the payload region */
243 kmem_free(kernel_map, payload_addr, img4_payload_len);
244 payload_addr = 0;
245 }
246
247 return ret;
248 }
249
250 static kern_return_t
txm_load_legacy_trust_cache(__unused const uint8_t * module_data,__unused const size_t module_size)251 txm_load_legacy_trust_cache(
252 __unused const uint8_t *module_data, __unused const size_t module_size)
253 {
254 panic("legacy trust caches are not supported on this platform");
255 }
256
257 static kern_return_t
txm_query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)258 txm_query_trust_cache(
259 TCQueryType_t query_type,
260 const uint8_t cdhash[kTCEntryHashSize],
261 TrustCacheQueryToken_t *query_token)
262 {
263 txm_call_t txm_call = {
264 .selector = kTXMKernelSelectorQueryTrustCache,
265 .failure_silent = true,
266 .num_input_args = 2,
267 .num_output_args = 2,
268 };
269 kern_return_t ret = txm_kernel_call(&txm_call, query_type, cdhash);
270
271 if (ret == KERN_SUCCESS) {
272 if (query_token) {
273 query_token->trustCache = (const TrustCache_t*)txm_call.return_words[0];
274 query_token->trustCacheEntry = (const void*)txm_call.return_words[1];
275 }
276 return KERN_SUCCESS;
277 }
278
279 /* Check for not-found trust cache error */
280 if (txm_call.txm_ret.returnCode == kTXMReturnTrustCache) {
281 if (txm_call.txm_ret.tcRet.error == kTCReturnNotFound) {
282 ret = KERN_NOT_FOUND;
283 }
284 }
285
286 return ret;
287 }
288
289 static kern_return_t
txm_query_trust_cache_for_rem(const uint8_t cdhash[kTCEntryHashSize],uint8_t * rem_perms)290 txm_query_trust_cache_for_rem(
291 const uint8_t cdhash[kTCEntryHashSize],
292 uint8_t *rem_perms)
293 {
294 #if XNU_HAS_TRUST_CACHE_QUERY_FOR_REM
295 txm_call_t txm_call = {
296 .selector = kTXMKernelSelectorQueryTrustCacheForREM,
297 .num_input_args = 1,
298 .num_output_args = 1
299 };
300 kern_return_t ret = txm_kernel_call(&txm_call, cdhash);
301
302 if ((ret == KERN_SUCCESS) && (rem_perms != NULL)) {
303 *rem_perms = (uint8_t)txm_call.return_words[0];
304 }
305
306 return ret;
307 #else
308 (void)cdhash;
309 (void)rem_perms;
310 return KERN_NOT_SUPPORTED;
311 #endif /* XNU_HAS_TRUST_CACHE_QUERY_FOR_REM */
312 }
313
314 static kern_return_t
txm_check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])315 txm_check_trust_cache_runtime_for_uuid(
316 const uint8_t check_uuid[kUUIDSize])
317 {
318 txm_call_t txm_call = {
319 .selector = kTXMKernelSelectorCheckTrustCacheRuntimeForUUID,
320 .failure_silent = true,
321 .num_input_args = 1
322 };
323 kern_return_t ret = txm_kernel_call(&txm_call, check_uuid);
324
325 /* Check for not-found trust cache error */
326 if (txm_call.txm_ret.returnCode == kTXMReturnTrustCache) {
327 if (txm_call.txm_ret.tcRet.error == kTCReturnNotFound) {
328 ret = KERN_NOT_FOUND;
329 }
330 }
331
332 return ret;
333 }
334
335 #elif PMAP_CS_PPL_MONITOR
336 /*
337 * We have the Page Protection Layer environment available. All of our artifacts
338 * need to be page-aligned. The PPL will lockdown the artifacts before it begins
339 * the validation.
340 *
341 * Even though the runtimes are PPL owned, we expect the runtime init function
342 * to be called before the PPL has been locked down, which allows us to write
343 * to them.
344 */
345
346 /* Immutable part of the runtime */
347 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = &ppl_trust_cache_rt;
348
349 /* Mutable part of the runtime */
350 SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = &ppl_trust_cache_mut_rt;
351
352 static bool
is_internal_iBoot(void)353 is_internal_iBoot(void)
354 {
355 DTEntry node = {0};
356 const char *variant = NULL;
357 uint32_t size = 0;
358
359 int err = SecureDTLookupEntry(NULL, "/chosen/iBoot", &node);
360 if (err != kSuccess) {
361 printf("unable to find /chosen/iBoot in the device tree: %d\n", err);
362 return false;
363 }
364
365 err = SecureDTGetProperty(node, "iboot-build-variant", (const void **)&variant, &size);
366 if (err != kSuccess) {
367 printf("unable to find iboot-build-variant in /chosen/iBoot: %d\n", err);
368 return false;
369 } else if ((variant == NULL) || (size == 0)) {
370 printf("missing data for iboot-build-variant property\n");
371 return false;
372 }
373 printf("resolved iBoot build variant: %s\n", variant);
374
375 if (strcmp(variant, "development") == 0) {
376 return true;
377 } else if (strcmp(variant, "debug") == 0) {
378 return true;
379 }
380 return false;
381 }
382
383 void
trust_cache_runtime_init(void)384 trust_cache_runtime_init(void)
385 {
386 bool allow_second_static_cache = false;
387 bool allow_engineering_caches = false;
388
389 #if CONFIG_SECOND_STATIC_TRUST_CACHE
390 allow_second_static_cache = true;
391 #endif
392
393 #if PMAP_CS_INCLUDE_INTERNAL_CODE
394 allow_engineering_caches = true;
395 #endif
396
397 /*
398 * Allow engineering trust caches when the system is booting using a development
399 * or debug variant of iBoot.
400 */
401 if (is_internal_iBoot() == true) {
402 allow_engineering_caches = true;
403 }
404
405 /* Image4 interface needs to be available */
406 if (img4if == NULL) {
407 panic("image4 interface not available");
408 }
409
410 /* AMFI interface needs to be available */
411 if (amfi == NULL) {
412 panic("amfi interface not available");
413 } else if (amfi->TrustCache.version < 2) {
414 panic("amfi interface is stale: %u", amfi->TrustCache.version);
415 }
416
417 trustCacheInitializeRuntime(
418 trust_cache_rt,
419 trust_cache_mut_rt,
420 allow_second_static_cache,
421 allow_engineering_caches,
422 false,
423 IMG4_RUNTIME_PMAP_CS);
424
425 /* Locks are initialized in "pmap_bootstrap()" */
426 }
427
428 static kern_return_t
ppl_unload_trust_cache(__unused uuid_t uuid)429 ppl_unload_trust_cache(__unused uuid_t uuid)
430 {
431 return KERN_NOT_SUPPORTED;
432 }
433
434 static kern_return_t
ppl_load_trust_cache(TCType_t type,const uint8_t * img4_payload,const size_t img4_payload_len,const uint8_t * img4_manifest,const size_t img4_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)435 ppl_load_trust_cache(
436 TCType_t type,
437 const uint8_t *img4_payload, const size_t img4_payload_len,
438 const uint8_t *img4_manifest, const size_t img4_manifest_len,
439 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
440 {
441 kern_return_t ret = KERN_DENIED;
442 vm_address_t payload_addr = 0;
443 vm_size_t payload_len = 0;
444 vm_size_t payload_len_aligned = 0;
445 vm_address_t manifest_addr = 0;
446 vm_size_t manifest_len_aligned = 0;
447 vm_address_t aux_manifest_addr = 0;
448 vm_size_t aux_manifest_len_aligned = 0;
449
450 /* The trust cache data structure is bundled with the img4 payload */
451 if (os_add_overflow(img4_payload_len, sizeof(pmap_img4_payload_t), &payload_len)) {
452 panic("overflow on pmap img4 payload: %lu", img4_payload_len);
453 }
454 payload_len_aligned = round_page(payload_len);
455 manifest_len_aligned = round_page(img4_manifest_len);
456 aux_manifest_len_aligned = round_page(img4_aux_manifest_len);
457
458 ret = kmem_alloc(kernel_map, &payload_addr, payload_len_aligned,
459 KMA_KOBJECT | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
460 if (ret != KERN_SUCCESS) {
461 printf("unable to allocate memory for pmap image4 payload: %d\n", ret);
462 goto out;
463 }
464
465 pmap_img4_payload_t *pmap_payload = (pmap_img4_payload_t*)payload_addr;
466 memcpy(pmap_payload->img4_payload, img4_payload, img4_payload_len);
467
468 /* Allocate storage for the manifest */
469 ret = kmem_alloc(kernel_map, &manifest_addr, manifest_len_aligned,
470 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
471 if (ret != KERN_SUCCESS) {
472 printf("unable to allocate memory for image4 manifest: %d\n", ret);
473 goto out;
474 }
475 memcpy((void*)manifest_addr, img4_manifest, img4_manifest_len);
476
477 if (aux_manifest_len_aligned != 0) {
478 /* Allocate storage for the auxiliary manifest */
479 ret = kmem_alloc(kernel_map, &aux_manifest_addr, aux_manifest_len_aligned,
480 KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
481 if (ret != KERN_SUCCESS) {
482 printf("unable to allocate memory for auxiliary image4 manifest: %d\n", ret);
483 goto out;
484 }
485 memcpy((void*)aux_manifest_addr, img4_aux_manifest, img4_aux_manifest_len);
486 }
487
488 /* The PPL will round up the length to page size itself */
489 ret = pmap_load_trust_cache_with_type(
490 type,
491 payload_addr, payload_len,
492 manifest_addr, img4_manifest_len,
493 aux_manifest_addr, img4_aux_manifest_len);
494
495 out:
496 if (aux_manifest_addr != 0) {
497 kmem_free(kernel_map, aux_manifest_addr, aux_manifest_len_aligned);
498 aux_manifest_addr = 0;
499 aux_manifest_len_aligned = 0;
500 }
501
502 if (manifest_addr != 0) {
503 kmem_free(kernel_map, manifest_addr, manifest_len_aligned);
504 manifest_addr = 0;
505 manifest_len_aligned = 0;
506 }
507
508 if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
509 kmem_free(kernel_map, payload_addr, payload_len_aligned);
510 payload_addr = 0;
511 payload_len_aligned = 0;
512 }
513
514 return ret;
515 }
516
517 static kern_return_t
ppl_load_legacy_trust_cache(__unused const uint8_t * module_data,__unused const size_t module_size)518 ppl_load_legacy_trust_cache(
519 __unused const uint8_t *module_data, __unused const size_t module_size)
520 {
521 panic("legacy trust caches are not supported on this platform");
522 }
523
524 static kern_return_t
ppl_query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)525 ppl_query_trust_cache(
526 TCQueryType_t query_type,
527 const uint8_t cdhash[kTCEntryHashSize],
528 TrustCacheQueryToken_t *query_token)
529 {
530 /*
531 * We need to query by trapping into the PPL since the PPL trust cache runtime
532 * lock needs to be held. We cannot hold the lock from outside the PPL.
533 */
534 return pmap_query_trust_cache(query_type, cdhash, query_token);
535 }
536
537 static kern_return_t
ppl_check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])538 ppl_check_trust_cache_runtime_for_uuid(
539 const uint8_t check_uuid[kUUIDSize])
540 {
541 return pmap_check_trust_cache_runtime_for_uuid(check_uuid);
542 }
543
544 #else
545 /*
546 * We don't have a monitor environment available. This means someone with a kernel
547 * memory exploit will be able to inject a trust cache into the system. There is
548 * not much we can do here, since this is older HW.
549 */
550
551 /* Lock for the runtime */
552 LCK_GRP_DECLARE(trust_cache_lck_grp, "trust_cache_lck_grp");
553 decl_lck_rw_data(, trust_cache_rt_lock);
554
555 /* Immutable part of the runtime */
556 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t) trust_cache_rt_storage;
557 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = &trust_cache_rt_storage;
558
559 /* Mutable part of the runtime */
560 TrustCacheMutableRuntime_t trust_cache_mut_rt_storage;
561 SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = &trust_cache_mut_rt_storage;
562
563 void
trust_cache_runtime_init(void)564 trust_cache_runtime_init(void)
565 {
566 bool allow_second_static_cache = false;
567 bool allow_engineering_caches = false;
568 bool allow_legacy_caches = false;
569
570 #if CONFIG_SECOND_STATIC_TRUST_CACHE
571 allow_second_static_cache = true;
572 #endif
573
574 #if TRUST_CACHE_INCLUDE_INTERNAL_CODE
575 allow_engineering_caches = true;
576 #endif
577
578 #ifdef XNU_PLATFORM_BridgeOS
579 allow_legacy_caches = true;
580 #endif
581
582 /* Image4 interface needs to be available */
583 if (img4if == NULL) {
584 panic("image4 interface not available");
585 }
586
587 /* AMFI interface needs to be available */
588 if (amfi == NULL) {
589 panic("amfi interface not available");
590 } else if (amfi->TrustCache.version < 2) {
591 panic("amfi interface is stale: %u", amfi->TrustCache.version);
592 }
593
594 trustCacheInitializeRuntime(
595 trust_cache_rt,
596 trust_cache_mut_rt,
597 allow_second_static_cache,
598 allow_engineering_caches,
599 allow_legacy_caches,
600 IMG4_RUNTIME_DEFAULT);
601
602 /* Initialize the read-write lock */
603 lck_rw_init(&trust_cache_rt_lock, &trust_cache_lck_grp, 0);
604 }
605
606 static kern_return_t
xnu_unload_trust_cache(__unused uuid_t uuid)607 xnu_unload_trust_cache(__unused uuid_t uuid)
608 {
609 return KERN_NOT_SUPPORTED;
610 }
611
612 static kern_return_t
xnu_load_trust_cache(TCType_t type,const uint8_t * img4_payload,const size_t img4_payload_len,const uint8_t * img4_manifest,const size_t img4_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)613 xnu_load_trust_cache(
614 TCType_t type,
615 const uint8_t *img4_payload, const size_t img4_payload_len,
616 const uint8_t *img4_manifest, const size_t img4_manifest_len,
617 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
618 {
619 kern_return_t ret = KERN_DENIED;
620
621 /* Ignore the auxiliary manifest until we add support for it */
622 (void)img4_aux_manifest;
623 (void)img4_aux_manifest_len;
624
625 /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
626 TrustCache_t *trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
627 assert(trust_cache != NULL);
628
629 /*
630 * The manifests aren't needed after the validation is complete, but the payload needs
631 * to persist. The caller of this API expects us to make our own allocations. Since we
632 * don't need the manifests after validation, we can use the manifests passed in to us
633 * but we need to make a new allocation for the payload, since that needs to persist.
634 *
635 * Z_WAITOK implies that this allocation can never fail.
636 */
637 uint8_t *payload = (uint8_t*)kalloc_data(img4_payload_len, Z_WAITOK);
638 assert(payload != NULL);
639
640 /* Copy the payload into our allocation */
641 memcpy(payload, img4_payload, img4_payload_len);
642
643 /* Exclusively lock the runtime */
644 lck_rw_lock_exclusive(&trust_cache_rt_lock);
645
646 TCReturn_t tc_ret = amfi->TrustCache.load(
647 trust_cache_rt,
648 type,
649 trust_cache,
650 (const uintptr_t)payload, img4_payload_len,
651 (const uintptr_t)img4_manifest, img4_manifest_len);
652
653 /* Unlock the runtime */
654 lck_rw_unlock_exclusive(&trust_cache_rt_lock);
655
656 if (tc_ret.error == kTCReturnSuccess) {
657 ret = KERN_SUCCESS;
658 } else if (tc_ret.error == kTCReturnDuplicate) {
659 ret = KERN_ALREADY_IN_SET;
660 } else {
661 printf("unable to load trust cache (TCReturn: 0x%02X | 0x%02X | %u)\n",
662 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
663
664 ret = KERN_FAILURE;
665 }
666
667 if (ret != KERN_SUCCESS) {
668 kfree_data(payload, img4_payload_len);
669 payload = NULL;
670
671 kfree_type(TrustCache_t, trust_cache);
672 trust_cache = NULL;
673 }
674 return ret;
675 }
676
677 static kern_return_t
xnu_load_legacy_trust_cache(__unused const uint8_t * module_data,__unused const size_t module_size)678 xnu_load_legacy_trust_cache(
679 __unused const uint8_t *module_data, __unused const size_t module_size)
680 {
681 #if XNU_HAS_LEGACY_TRUST_CACHE_LOADING
682 kern_return_t ret = KERN_DENIED;
683
684 /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
685 TrustCache_t *trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
686 assert(trust_cache != NULL);
687
688 /* Allocate storage for the module -- Z_WAITOK means this can't fail */
689 uint8_t *module = (uint8_t*)kalloc_data(module_size, Z_WAITOK);
690 assert(module != NULL);
691
692 /* Copy the module into our allocation */
693 memcpy(module, module_data, module_size);
694
695 /* Exclusively lock the runtime */
696 lck_rw_lock_exclusive(&trust_cache_rt_lock);
697
698 TCReturn_t tc_ret = amfi->TrustCache.loadModule(
699 trust_cache_rt,
700 kTCTypeLegacy,
701 trust_cache,
702 (const uintptr_t)module, module_size);
703
704 /* Unlock the runtime */
705 lck_rw_unlock_exclusive(&trust_cache_rt_lock);
706
707 if (tc_ret.error == kTCReturnSuccess) {
708 ret = KERN_SUCCESS;
709 } else if (tc_ret.error == kTCReturnDuplicate) {
710 ret = KERN_ALREADY_IN_SET;
711 } else {
712 printf("unable to load legacy trust cache (TCReturn: 0x%02X | 0x%02X | %u)\n",
713 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
714
715 ret = KERN_FAILURE;
716 }
717
718 if (ret != KERN_SUCCESS) {
719 kfree_data(module, module_size);
720 module = NULL;
721
722 kfree_type(TrustCache_t, trust_cache);
723 trust_cache = NULL;
724 }
725 return ret;
726 #else
727 panic("legacy trust caches are not supported on this platform");
728 #endif /* XNU_HAS_LEGACY_TRUST_CACHE_LOADING */
729 }
730
731 static kern_return_t
xnu_query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)732 xnu_query_trust_cache(
733 TCQueryType_t query_type,
734 const uint8_t cdhash[kTCEntryHashSize],
735 TrustCacheQueryToken_t *query_token)
736 {
737 kern_return_t ret = KERN_NOT_FOUND;
738
739 /* Validate the query type preemptively */
740 if (query_type >= kTCQueryTypeTotal) {
741 printf("unable to query trust cache: invalid query type: %u\n", query_type);
742 return KERN_INVALID_ARGUMENT;
743 }
744
745 /* Lock the runtime as shared */
746 lck_rw_lock_shared(&trust_cache_rt_lock);
747
748 TCReturn_t tc_ret = amfi->TrustCache.query(
749 trust_cache_rt,
750 query_type,
751 cdhash,
752 query_token);
753
754 /* Unlock the runtime */
755 lck_rw_unlock_shared(&trust_cache_rt_lock);
756
757 if (tc_ret.error == kTCReturnSuccess) {
758 ret = KERN_SUCCESS;
759 } else if (tc_ret.error == kTCReturnNotFound) {
760 ret = KERN_NOT_FOUND;
761 } else {
762 ret = KERN_FAILURE;
763 printf("trust cache query failed (TCReturn: 0x%02X | 0x%02X | %u)\n",
764 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
765 }
766
767 return ret;
768 }
769
770 static kern_return_t
xnu_check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])771 xnu_check_trust_cache_runtime_for_uuid(
772 const uint8_t check_uuid[kUUIDSize])
773 {
774 kern_return_t ret = KERN_DENIED;
775
776 /* Lock the runtime as shared */
777 lck_rw_lock_shared(&trust_cache_rt_lock);
778
779 TCReturn_t tc_ret = amfi->TrustCache.checkRuntimeForUUID(
780 trust_cache_rt,
781 check_uuid,
782 NULL);
783
784 /* Unlock the runtime */
785 lck_rw_unlock_shared(&trust_cache_rt_lock);
786
787 if (tc_ret.error == kTCReturnSuccess) {
788 ret = KERN_SUCCESS;
789 } else if (tc_ret.error == kTCReturnNotFound) {
790 ret = KERN_NOT_FOUND;
791 } else {
792 ret = KERN_FAILURE;
793 printf("trust cache UUID check failed (TCReturn: 0x%02X | 0x%02X | %u)\n",
794 tc_ret.component, tc_ret.error, tc_ret.uniqueError);
795 }
796
797 return ret;
798 }
799
800 #endif /* CONFIG_SPTM */
801
802 kern_return_t
check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])803 check_trust_cache_runtime_for_uuid(
804 const uint8_t check_uuid[kUUIDSize])
805 {
806 kern_return_t ret = KERN_DENIED;
807
808 if (check_uuid == NULL) {
809 return KERN_INVALID_ARGUMENT;
810 }
811
812 #if CONFIG_SPTM
813 ret = txm_check_trust_cache_runtime_for_uuid(check_uuid);
814 #elif PMAP_CS_PPL_MONITOR
815 ret = ppl_check_trust_cache_runtime_for_uuid(check_uuid);
816 #else
817 ret = xnu_check_trust_cache_runtime_for_uuid(check_uuid);
818 #endif
819
820 return ret;
821 }
822
823 kern_return_t
unload_trust_cache(uuid_t uuid)824 unload_trust_cache(uuid_t uuid)
825 {
826 kern_return_t ret = KERN_DENIED;
827 uuid_string_t uuid_string = {0};
828
829 /* Parse the UUID into a string */
830 uuid_unparse_lower(uuid, uuid_string);
831
832 /* We want to capture this log even on release kernels */
833 IOLog("attempting to unload trust cache with UUID: %s\n", uuid_string);
834
835 /* Check the entitlement on the calling process */
836 if (IOCurrentTaskHasEntitlement("com.apple.private.unload-trust-cache") == false) {
837 printf("calling task not permitted to unload trust caches\n");
838 return KERN_DENIED;
839 }
840
841 #if CONFIG_SPTM
842 ret = txm_unload_trust_cache(uuid);
843 #elif PMAP_CS_PPL_MONITOR
844 ret = ppl_unload_trust_cache(uuid);
845 #else
846 ret = xnu_unload_trust_cache(uuid);
847 #endif
848
849 if (ret == KERN_SUCCESS) {
850 IOLog("successfully unloaded trust cache with UUID: %s\n", uuid_string);
851 cs_blob_reset_cache();
852 } else {
853 IOLog("unable to unload trust cache with UUID: %s | %d\n", uuid_string, ret);
854 }
855
856 return ret;
857 }
858
859 kern_return_t
load_trust_cache(const uint8_t * img4_object,const size_t img4_object_len,const uint8_t * img4_ext_manifest,const size_t img4_ext_manifest_len)860 load_trust_cache(
861 const uint8_t *img4_object, const size_t img4_object_len,
862 const uint8_t *img4_ext_manifest, const size_t img4_ext_manifest_len)
863 {
864 TCType_t type = kTCTypeInvalid;
865 kern_return_t ret = KERN_DENIED;
866
867 /* Start from the first valid type and attempt to validate through each */
868 for (type = kTCTypeLTRS; type < kTCTypeTotal; type += 1) {
869 ret = load_trust_cache_with_type(
870 type,
871 img4_object, img4_object_len,
872 img4_ext_manifest, img4_ext_manifest_len,
873 NULL, 0);
874
875 if ((ret == KERN_SUCCESS) || (ret == KERN_ALREADY_IN_SET)) {
876 return ret;
877 }
878 }
879
880 #if TRUST_CACHE_INCLUDE_INTERNAL_CODE
881 /* Attempt to load as an engineering root */
882 ret = load_trust_cache_with_type(
883 kTCTypeDTRS,
884 img4_object, img4_object_len,
885 img4_ext_manifest, img4_ext_manifest_len,
886 NULL, 0);
887 #endif
888
889 return ret;
890 }
891
892 kern_return_t
load_trust_cache_with_type(TCType_t type,const uint8_t * img4_object,const size_t img4_object_len,const uint8_t * img4_ext_manifest,const size_t img4_ext_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)893 load_trust_cache_with_type(
894 TCType_t type,
895 const uint8_t *img4_object, const size_t img4_object_len,
896 const uint8_t *img4_ext_manifest, const size_t img4_ext_manifest_len,
897 const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
898 {
899 kern_return_t ret = KERN_DENIED;
900 uintptr_t length_check = 0;
901 const uint8_t *img4_payload = NULL;
902 size_t img4_payload_len = 0;
903 const uint8_t *img4_manifest = NULL;
904 size_t img4_manifest_len = 0;
905
906 /* img4_object is required */
907 if (!img4_object || (img4_object_len == 0)) {
908 printf("unable to load trust cache (type: %u): no img4_object provided\n", type);
909 return KERN_INVALID_ARGUMENT;
910 } else if (os_add_overflow((uintptr_t)img4_object, img4_object_len, &length_check)) {
911 panic("overflow on the img4 object: %p | %lu", img4_object, img4_object_len);
912 }
913
914 /* img4_ext_manifest is optional */
915 if (img4_ext_manifest_len != 0) {
916 if (!img4_ext_manifest) {
917 printf("unable to load trust cache (type: %u): img4_ext_manifest expected\n", type);
918 return KERN_INVALID_ARGUMENT;
919 } else if (os_add_overflow((uintptr_t)img4_ext_manifest, img4_ext_manifest_len, &length_check)) {
920 panic("overflow on the ext manifest: %p | %lu", img4_ext_manifest, img4_ext_manifest_len);
921 }
922 }
923
924 /* img4_aux_manifest is optional */
925 if (img4_aux_manifest_len != 0) {
926 if (!img4_aux_manifest) {
927 printf("unable to load trust cache (type: %u): img4_aux_manifest expected\n", type);
928 return KERN_INVALID_ARGUMENT;
929 } else if (os_add_overflow((uintptr_t)img4_aux_manifest, img4_aux_manifest_len, &length_check)) {
930 panic("overflow on the ext manifest: %p | %lu", img4_aux_manifest, img4_aux_manifest_len);
931 }
932 }
933
934 /*
935 * If we don't have an external manifest provided, we expect the img4_object to have
936 * the manifest embedded. In this case, we need to extract the different artifacts
937 * out of the object.
938 */
939 if (img4_ext_manifest_len != 0) {
940 img4_payload = img4_object;
941 img4_payload_len = img4_object_len;
942 img4_manifest = img4_ext_manifest;
943 img4_manifest_len = img4_ext_manifest_len;
944 } else {
945 if (img4if->i4if_version < 15) {
946 /* AppleImage4 change hasn't landed in the build */
947 printf("unable to extract payload and manifest from object\n");
948 return KERN_NOT_SUPPORTED;
949 }
950 img4_buff_t img4_buff = IMG4_BUFF_INIT;
951
952 /* Extract the payload */
953 if (img4_get_payload(img4_object, img4_object_len, &img4_buff) == NULL) {
954 printf("unable to find payload within img4 object\n");
955 return KERN_NOT_FOUND;
956 }
957 img4_payload = img4_buff.i4b_bytes;
958 img4_payload_len = img4_buff.i4b_len;
959
960 /* Extract the manifest */
961 if (img4_get_manifest(img4_object, img4_object_len, &img4_buff) == NULL) {
962 printf("unable to find manifest within img4 object\n");
963 return KERN_NOT_FOUND;
964 }
965 img4_manifest = img4_buff.i4b_bytes;
966 img4_manifest_len = img4_buff.i4b_len;
967 }
968
969 if ((type == kTCTypeStatic) || (type == kTCTypeEngineering) || (type == kTCTypeLegacy)) {
970 printf("unable to load trust cache: invalid type: %u\n", type);
971 return KERN_INVALID_ARGUMENT;
972 } else if (type >= kTCTypeTotal) {
973 printf("unable to load trust cache: unknown type: %u\n", type);
974 return KERN_INVALID_ARGUMENT;
975 }
976
977 /* Validate entitlement for the calling process */
978 if (TCTypeConfig[type].entitlementValue != NULL) {
979 const bool entitlement_satisfied = IOCurrentTaskHasStringEntitlement(
980 "com.apple.private.pmap.load-trust-cache",
981 TCTypeConfig[type].entitlementValue);
982
983 if (entitlement_satisfied == false) {
984 printf("unable to load trust cache (type: %u): unsatisfied entitlement\n", type);
985 return KERN_DENIED;
986 }
987 }
988
989 if ((type == kTCTypeCryptex1BootOS) && boot_os_tc_loaded) {
990 printf("disallowed to load multiple kTCTypeCryptex1BootOS trust caches\n");
991 return KERN_DENIED;
992 } else if ((type == kTCTypeCryptex1BootApp) && boot_app_tc_loaded) {
993 printf("disallowed to load multiple kTCTypeCryptex1BootApp trust caches\n");
994 return KERN_DENIED;
995 }
996
997 if (restricted_execution_mode_state() == KERN_SUCCESS) {
998 printf("disallowed to load trust caches once REM is enabled\n");
999 return KERN_DENIED;
1000 }
1001
1002 #if CONFIG_SPTM
1003 ret = txm_load_trust_cache(
1004 type,
1005 img4_payload, img4_payload_len,
1006 img4_manifest, img4_manifest_len,
1007 img4_aux_manifest, img4_aux_manifest_len);
1008 #elif PMAP_CS_PPL_MONITOR
1009 ret = ppl_load_trust_cache(
1010 type,
1011 img4_payload, img4_payload_len,
1012 img4_manifest, img4_manifest_len,
1013 img4_aux_manifest, img4_aux_manifest_len);
1014 #else
1015 ret = xnu_load_trust_cache(
1016 type,
1017 img4_payload, img4_payload_len,
1018 img4_manifest, img4_manifest_len,
1019 img4_aux_manifest, img4_aux_manifest_len);
1020 #endif
1021
1022 if (ret != KERN_SUCCESS) {
1023 printf("unable to load trust cache (type: %u): %d\n", type, ret);
1024 } else {
1025 if (type == kTCTypeCryptex1BootOS) {
1026 boot_os_tc_loaded = true;
1027 } else if (type == kTCTypeCryptex1BootApp) {
1028 boot_app_tc_loaded = true;
1029 }
1030 os_atomic_add(&num_loadable_trust_caches, 1, relaxed);
1031 printf("successfully loaded trust cache of type: %u\n", type);
1032 }
1033
1034 return ret;
1035 }
1036
1037 kern_return_t
load_legacy_trust_cache(const uint8_t * module_data,const size_t module_size)1038 load_legacy_trust_cache(
1039 const uint8_t *module_data, const size_t module_size)
1040 {
1041 kern_return_t ret = KERN_DENIED;
1042 uintptr_t length_check = 0;
1043
1044 /* Module is required */
1045 if (!module_data || (module_size == 0)) {
1046 printf("unable to load legacy trust cache: no module provided\n");
1047 return KERN_INVALID_ARGUMENT;
1048 } else if (os_add_overflow((uintptr_t)module_data, module_size, &length_check)) {
1049 panic("overflow on the module: %p | %lu", module_data, module_size);
1050 }
1051
1052 #if CONFIG_SPTM
1053 ret = txm_load_legacy_trust_cache(module_data, module_size);
1054 #elif PMAP_CS_PPL_MONITOR
1055 ret = ppl_load_legacy_trust_cache(module_data, module_size);
1056 #else
1057 ret = xnu_load_legacy_trust_cache(module_data, module_size);
1058 #endif
1059
1060 if (ret != KERN_SUCCESS) {
1061 printf("unable to load legacy trust cache: %d\n", ret);
1062 } else {
1063 printf("successfully loaded legacy trust cache\n");
1064 }
1065
1066 return ret;
1067 }
1068
1069 kern_return_t
query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)1070 query_trust_cache(
1071 TCQueryType_t query_type,
1072 const uint8_t cdhash[kTCEntryHashSize],
1073 TrustCacheQueryToken_t *query_token)
1074 {
1075 kern_return_t ret = KERN_NOT_FOUND;
1076
1077 if (cdhash == NULL) {
1078 printf("unable to query trust caches: no cdhash provided\n");
1079 return KERN_INVALID_ARGUMENT;
1080 }
1081
1082 #if CONFIG_SPTM
1083 ret = txm_query_trust_cache(query_type, cdhash, query_token);
1084 #elif PMAP_CS_PPL_MONITOR
1085 ret = ppl_query_trust_cache(query_type, cdhash, query_token);
1086 #else
1087 ret = xnu_query_trust_cache(query_type, cdhash, query_token);
1088 #endif
1089
1090 return ret;
1091 }
1092
1093 kern_return_t
query_trust_cache_for_rem(const uint8_t cdhash[kTCEntryHashSize],__unused uint8_t * rem_perms)1094 query_trust_cache_for_rem(
1095 const uint8_t cdhash[kTCEntryHashSize],
1096 __unused uint8_t *rem_perms)
1097 {
1098 kern_return_t ret = KERN_NOT_SUPPORTED;
1099
1100 if (cdhash == NULL) {
1101 printf("unable to query trust caches: no cdhash provided\n");
1102 return KERN_INVALID_ARGUMENT;
1103 }
1104
1105 /*
1106 * Only when the system is using the Trusted Execution Monitor environment does
1107 * it support restricted execution mode. For all other monitor environments, or
1108 * when we don't have a monitor, the return defaults to a not supported.
1109 */
1110 #if CONFIG_SPTM
1111 ret = txm_query_trust_cache_for_rem(cdhash, rem_perms);
1112 #endif
1113
1114 return ret;
1115 }
1116
1117 /*
1118 * The trust cache management library uses a wrapper data structure to manage each
1119 * of the trust cache modules. We know the exact number of static trust caches we
1120 * expect, so we keep around a read-only-late allocation of the data structure for
1121 * use.
1122 *
1123 * Since engineering trust caches are only ever allowed on development builds, they
1124 * are not protected through the read-only-late property, and instead allocated
1125 * dynamically.
1126 */
1127
1128 SECURITY_READ_ONLY_LATE(bool) trust_cache_static_init = false;
1129 SECURITY_READ_ONLY_LATE(bool) trust_cache_static_loaded = false;
1130 SECURITY_READ_ONLY_LATE(TrustCache_t) trust_cache_static0 = {0};
1131
1132 #if CONFIG_SECOND_STATIC_TRUST_CACHE
1133 SECURITY_READ_ONLY_LATE(TrustCache_t) trust_cache_static1 = {0};
1134 #endif
1135
1136 #if defined(__arm64__)
1137
1138 typedef uint64_t pmap_paddr_t __kernel_ptr_semantics;
1139 extern vm_map_address_t phystokv(pmap_paddr_t pa);
1140
1141 #else /* x86_64 */
1142 /*
1143 * We need this duplicate definition because it is hidden behind the MACH_KERNEL_PRIVATE
1144 * macro definition, which makes it inaccessible to this part of the code base.
1145 */
1146 extern uint64_t physmap_base, physmap_max;
1147
1148 static inline void*
PHYSMAP_PTOV_check(void * paddr)1149 PHYSMAP_PTOV_check(void *paddr)
1150 {
1151 uint64_t pvaddr = (uint64_t)paddr + physmap_base;
1152
1153 if (__improbable(pvaddr >= physmap_max)) {
1154 panic("PHYSMAP_PTOV bounds exceeded, 0x%qx, 0x%qx, 0x%qx",
1155 pvaddr, physmap_base, physmap_max);
1156 }
1157
1158 return (void*)pvaddr;
1159 }
1160
1161 #define PHYSMAP_PTOV(x) (PHYSMAP_PTOV_check((void*) (x)))
1162 #define phystokv(x) ((vm_offset_t)(PHYSMAP_PTOV(x)))
1163
1164 #endif /* defined(__arm__) || defined(__arm64__) */
1165
1166 void
load_static_trust_cache(void)1167 load_static_trust_cache(void)
1168 {
1169 DTEntry memory_map = {0};
1170 const DTTrustCacheRange *tc_range = NULL;
1171 trust_cache_offsets_t *tc_offsets = NULL;
1172 unsigned int tc_dt_prop_length = 0;
1173 size_t tc_segment_length = 0;
1174
1175 /* Mark this function as having been called */
1176 trust_cache_static_init = true;
1177
1178 /* Nothing to do when the runtime isn't set */
1179 if (trust_cache_rt == NULL) {
1180 return;
1181 }
1182
1183 int err = SecureDTLookupEntry(NULL, "chosen/memory-map", &memory_map);
1184 if (err != kSuccess) {
1185 printf("unable to find chosen/memory-map in the device tree: %d\n", err);
1186 return;
1187 }
1188
1189 err = SecureDTGetProperty(memory_map, "TrustCache", (const void **)&tc_range, &tc_dt_prop_length);
1190 if (err == kSuccess) {
1191 if (tc_dt_prop_length != sizeof(DTTrustCacheRange)) {
1192 panic("unexpected size for TrustCache property: %u != %zu",
1193 tc_dt_prop_length, sizeof(DTTrustCacheRange));
1194 }
1195
1196 tc_offsets = (void*)phystokv(tc_range->paddr);
1197 tc_segment_length = tc_range->length;
1198 }
1199
1200 /* x86_64 devices aren't expected to have trust caches */
1201 if (tc_segment_length == 0) {
1202 if (tc_offsets && tc_offsets->num_caches != 0) {
1203 panic("trust cache segment is zero length but trust caches are available: %u",
1204 tc_offsets->num_caches);
1205 }
1206
1207 printf("no external trust caches found (segment length is zero)\n");
1208 return;
1209 } else if (tc_offsets->num_caches == 0) {
1210 panic("trust cache segment isn't zero but no trust caches available: %lu",
1211 (unsigned long)tc_segment_length);
1212 }
1213
1214 size_t offsets_length = 0;
1215 size_t struct_length = 0;
1216 if (os_mul_overflow(tc_offsets->num_caches, sizeof(uint32_t), &offsets_length)) {
1217 panic("overflow on the number of trust caches provided: %u", tc_offsets->num_caches);
1218 } else if (os_add_overflow(offsets_length, sizeof(trust_cache_offsets_t), &struct_length)) {
1219 panic("overflow on length of the trust cache offsets: %lu",
1220 (unsigned long)offsets_length);
1221 } else if (tc_segment_length < struct_length) {
1222 panic("trust cache segment length smaller than required: %lu | %lu",
1223 (unsigned long)tc_segment_length, (unsigned long)struct_length);
1224 }
1225 const uintptr_t tc_region_end = (uintptr_t)tc_offsets + tc_segment_length;
1226
1227 printf("attempting to load %u external trust cache modules\n", tc_offsets->num_caches);
1228
1229 for (uint32_t i = 0; i < tc_offsets->num_caches; i++) {
1230 TCReturn_t tc_ret = (TCReturn_t){.error = kTCReturnError};
1231 TCType_t tc_type = kTCTypeEngineering;
1232 TrustCache_t *trust_cache = NULL;
1233
1234 uintptr_t tc_module = 0;
1235 if (os_add_overflow((uintptr_t)tc_offsets, tc_offsets->offsets[i], &tc_module)) {
1236 panic("trust cache module start overflows: %u | %lu | %u",
1237 i, (unsigned long)tc_offsets, tc_offsets->offsets[i]);
1238 } else if (tc_module >= tc_region_end) {
1239 panic("trust cache module begins after segment ends: %u | %lx | %lx",
1240 i, (unsigned long)tc_module, tc_region_end);
1241 }
1242
1243 /* Should be safe for underflow */
1244 const size_t buffer_length = tc_region_end - tc_module;
1245
1246 /* The first module is always the static trust cache */
1247 if (i == 0) {
1248 tc_type = kTCTypeStatic;
1249 trust_cache = &trust_cache_static0;
1250 }
1251
1252 #if CONFIG_SECOND_STATIC_TRUST_CACHE
1253 if (trust_cache_rt->allowSecondStaticTC && (i == 1)) {
1254 tc_type = kTCTypeStatic;
1255 trust_cache = &trust_cache_static1;
1256 }
1257 #endif
1258
1259 if (tc_type == kTCTypeEngineering) {
1260 if (trust_cache_rt->allowEngineeringTC == false) {
1261 printf("skipping engineering trust cache module: %u\n", i);
1262 continue;
1263 }
1264
1265 /* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
1266 trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
1267 assert(trust_cache != NULL);
1268 }
1269
1270 tc_ret = amfi->TrustCache.loadModule(
1271 trust_cache_rt,
1272 tc_type,
1273 trust_cache,
1274 tc_module, buffer_length);
1275
1276 if (tc_ret.error != kTCReturnSuccess) {
1277 printf("unable to load trust cache module: %u (TCReturn: 0x%02X | 0x%02X | %u)\n",
1278 i, tc_ret.component, tc_ret.error, tc_ret.uniqueError);
1279
1280 if (tc_type == kTCTypeStatic) {
1281 panic("failed to load static trust cache module: %u", i);
1282 }
1283 continue;
1284 }
1285 printf("loaded external trust cache module: %u\n", i);
1286
1287 /*
1288 * The first module is always loaded as a static trust cache. If loading it failed,
1289 * then this function would've panicked. If we reach here, it means we've loaded a
1290 * static trust cache on the system.
1291 */
1292 trust_cache_static_loaded = true;
1293
1294 /* Increment the number of boot trust caches */
1295 if (tc_type == kTCTypeStatic) {
1296 num_static_trust_caches += 1;
1297 } else {
1298 num_engineering_trust_caches += 1;
1299 }
1300 }
1301
1302 printf("completed loading external trust cache modules\n");
1303 }
1304
1305 kern_return_t
static_trust_cache_capabilities(uint32_t * num_static_trust_caches_ret,TCCapabilities_t * capabilities0_ret,TCCapabilities_t * capabilities1_ret)1306 static_trust_cache_capabilities(
1307 uint32_t *num_static_trust_caches_ret,
1308 TCCapabilities_t *capabilities0_ret,
1309 TCCapabilities_t *capabilities1_ret)
1310 {
1311 TCReturn_t tcRet = {.error = kTCReturnError};
1312
1313 *num_static_trust_caches_ret = 0;
1314 *capabilities0_ret = kTCCapabilityNone;
1315 *capabilities1_ret = kTCCapabilityNone;
1316
1317 /* Ensure static trust caches have been initialized */
1318 if (trust_cache_static_init == false) {
1319 panic("attempted to query static trust cache capabilities without init");
1320 }
1321
1322 #if CONFIG_SPTM
1323 if (txm_static_trust_caches > 0) {
1324 /* Copy in the data received from TrustedExecutionMonitor */
1325 *num_static_trust_caches_ret = txm_static_trust_caches;
1326 *capabilities0_ret = static_trust_cache_capabilities0;
1327 *capabilities1_ret = static_trust_cache_capabilities1;
1328
1329 /* Return successfully */
1330 return KERN_SUCCESS;
1331 }
1332 #endif
1333
1334 if (trust_cache_static_loaded == false) {
1335 /* Return arguments already set */
1336 return KERN_SUCCESS;
1337 }
1338
1339 tcRet = amfi->TrustCache.getCapabilities(&trust_cache_static0, capabilities0_ret);
1340 assert(tcRet.error == kTCReturnSuccess);
1341 *num_static_trust_caches_ret += 1;
1342
1343 #if CONFIG_SECOND_STATIC_TRUST_CACHE
1344 tcRet = amfi->TrustCache.getCapabilities(&trust_cache_static1, capabilities1_ret);
1345 assert(tcRet.error == kTCReturnSuccess);
1346 *num_static_trust_caches_ret += 1;
1347 #endif
1348
1349 return KERN_SUCCESS;
1350 }
1351