xref: /xnu-10002.1.13/bsd/kern/kern_trustcache.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2021 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_LICENSE_HEADER_START@
5  *
6  * The contents of this file constitute Original Code as defined in and
7  * are subject to the Apple Public Source License Version 1.1 (the
8  * "License").  You may not use this file except in compliance with the
9  * License.  Please obtain a copy of the License at
10  * http://www.apple.com/publicsource and read it before using this file.
11  *
12  * This Original Code and all software distributed under the License are
13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
17  * License for the specific language governing rights and limitations
18  * under the License.
19  *
20  * @APPLE_LICENSE_HEADER_END@
21  */
22 
23 #include <os/overflow.h>
24 #include <pexpert/pexpert.h>
25 #include <pexpert/device_tree.h>
26 #include <mach/boolean.h>
27 #include <mach/vm_param.h>
28 #include <vm/vm_kern.h>
29 #include <vm/pmap_cs.h>
30 #include <kern/zalloc.h>
31 #include <kern/kalloc.h>
32 #include <kern/assert.h>
33 #include <kern/lock_rw.h>
34 #include <libkern/libkern.h>
35 #include <libkern/section_keywords.h>
36 #include <libkern/img4/interface.h>
37 #include <libkern/amfi/amfi.h>
38 #include <sys/vm.h>
39 #include <sys/proc.h>
40 #include <sys/codesign.h>
41 #include <sys/trust_caches.h>
42 #include <IOKit/IOBSD.h>
43 #include <img4/firmware.h>
44 #include <TrustCache/API.h>
45 
46 static bool boot_os_tc_loaded = false;
47 static bool boot_app_tc_loaded = false;
48 
49 #if   PMAP_CS_PPL_MONITOR
50 /*
51  * We have the Page Protection Layer environment available. All of our artifacts
52  * need to be page-aligned. The PPL will lockdown the artifacts before it begins
53  * the validation.
54  *
55  * Even though the runtimes are PPL owned, we expect the runtime init function
56  * to be called before the PPL has been locked down, which allows us to write
57  * to them.
58  */
59 
60 /* Immutable part of the runtime */
61 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = &ppl_trust_cache_rt;
62 
63 /* Mutable part of the runtime */
64 SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = &ppl_trust_cache_mut_rt;
65 
66 void
trust_cache_runtime_init(void)67 trust_cache_runtime_init(void)
68 {
69 	bool allow_second_static_cache = false;
70 	bool allow_engineering_caches = false;
71 
72 #if CONFIG_SECOND_STATIC_TRUST_CACHE
73 	allow_second_static_cache = true;
74 #endif
75 
76 #if PMAP_CS_INCLUDE_INTERNAL_CODE
77 	allow_engineering_caches = true;
78 #endif
79 
80 	/* Image4 interface needs to be available */
81 	if (img4if == NULL) {
82 		panic("image4 interface not available");
83 	}
84 
85 	/* AMFI interface needs to be available */
86 	if (amfi == NULL) {
87 		panic("amfi interface not available");
88 	} else if (amfi->TrustCache.version < 2) {
89 		panic("amfi interface is stale: %u", amfi->TrustCache.version);
90 	}
91 
92 	trustCacheInitializeRuntime(
93 		trust_cache_rt,
94 		trust_cache_mut_rt,
95 		allow_second_static_cache,
96 		allow_engineering_caches,
97 		false,
98 		IMG4_RUNTIME_PMAP_CS);
99 
100 	/* Locks are initialized in "pmap_bootstrap()" */
101 }
102 
103 static kern_return_t
ppl_load_trust_cache(TCType_t type,const uint8_t * img4_payload,const size_t img4_payload_len,const uint8_t * img4_manifest,const size_t img4_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)104 ppl_load_trust_cache(
105 	TCType_t type,
106 	const uint8_t *img4_payload, const size_t img4_payload_len,
107 	const uint8_t *img4_manifest, const size_t img4_manifest_len,
108 	const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
109 {
110 	kern_return_t ret = KERN_DENIED;
111 	vm_address_t payload_addr = 0;
112 	vm_size_t payload_len = 0;
113 	vm_size_t payload_len_aligned = 0;
114 	vm_address_t manifest_addr = 0;
115 	vm_size_t manifest_len_aligned = 0;
116 	vm_address_t aux_manifest_addr = 0;
117 	vm_size_t aux_manifest_len_aligned = 0;
118 
119 	/* The trust cache data structure is bundled with the img4 payload */
120 	if (os_add_overflow(img4_payload_len, sizeof(pmap_img4_payload_t), &payload_len)) {
121 		panic("overflow on pmap img4 payload: %lu", img4_payload_len);
122 	}
123 	payload_len_aligned = round_page(payload_len);
124 	manifest_len_aligned = round_page(img4_manifest_len);
125 	aux_manifest_len_aligned = round_page(img4_aux_manifest_len);
126 
127 	ret = kmem_alloc(kernel_map, &payload_addr, payload_len_aligned,
128 	    KMA_KOBJECT | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
129 	if (ret != KERN_SUCCESS) {
130 		printf("unable to allocate memory for pmap image4 payload: %d\n", ret);
131 		goto out;
132 	}
133 
134 	pmap_img4_payload_t *pmap_payload = (pmap_img4_payload_t*)payload_addr;
135 	memcpy(pmap_payload->img4_payload, img4_payload, img4_payload_len);
136 
137 	/* Allocate storage for the manifest */
138 	ret = kmem_alloc(kernel_map, &manifest_addr, manifest_len_aligned,
139 	    KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
140 	if (ret != KERN_SUCCESS) {
141 		printf("unable to allocate memory for image4 manifest: %d\n", ret);
142 		goto out;
143 	}
144 	memcpy((void*)manifest_addr, img4_manifest, img4_manifest_len);
145 
146 	if (aux_manifest_len_aligned != 0) {
147 		/* Allocate storage for the auxiliary manifest */
148 		ret = kmem_alloc(kernel_map, &aux_manifest_addr, aux_manifest_len_aligned,
149 		    KMA_KOBJECT | KMA_DATA | KMA_ZERO, VM_KERN_MEMORY_SECURITY);
150 		if (ret != KERN_SUCCESS) {
151 			printf("unable to allocate memory for auxiliary image4 manifest: %d\n", ret);
152 			goto out;
153 		}
154 		memcpy((void*)aux_manifest_addr, img4_aux_manifest, img4_aux_manifest_len);
155 	}
156 
157 	/* The PPL will round up the length to page size itself */
158 	ret = pmap_load_trust_cache_with_type(
159 		type,
160 		payload_addr, payload_len,
161 		manifest_addr, img4_manifest_len,
162 		aux_manifest_addr, img4_aux_manifest_len);
163 
164 out:
165 	if (aux_manifest_addr != 0) {
166 		kmem_free(kernel_map, aux_manifest_addr, aux_manifest_len_aligned);
167 		aux_manifest_addr = 0;
168 		aux_manifest_len_aligned = 0;
169 	}
170 
171 	if (manifest_addr != 0) {
172 		kmem_free(kernel_map, manifest_addr, manifest_len_aligned);
173 		manifest_addr = 0;
174 		manifest_len_aligned = 0;
175 	}
176 
177 	if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
178 		kmem_free(kernel_map, payload_addr, payload_len_aligned);
179 		payload_addr = 0;
180 		payload_len_aligned = 0;
181 	}
182 
183 	return ret;
184 }
185 
186 static kern_return_t
ppl_load_legacy_trust_cache(__unused const uint8_t * module_data,__unused const size_t module_size)187 ppl_load_legacy_trust_cache(
188 	__unused const uint8_t *module_data, __unused const size_t module_size)
189 {
190 	panic("legacy trust caches are not supported on this platform");
191 }
192 
193 static kern_return_t
ppl_query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)194 ppl_query_trust_cache(
195 	TCQueryType_t query_type,
196 	const uint8_t cdhash[kTCEntryHashSize],
197 	TrustCacheQueryToken_t *query_token)
198 {
199 	/*
200 	 * We need to query by trapping into the PPL since the PPL trust cache runtime
201 	 * lock needs to be held. We cannot hold the lock from outside the PPL.
202 	 */
203 	return pmap_query_trust_cache(query_type, cdhash, query_token);
204 }
205 
206 static kern_return_t
ppl_check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])207 ppl_check_trust_cache_runtime_for_uuid(
208 	const uint8_t check_uuid[kUUIDSize])
209 {
210 	return pmap_check_trust_cache_runtime_for_uuid(check_uuid);
211 }
212 
213 #else
214 /*
215  * We don't have a monitor environment available. This means someone with a kernel
216  * memory exploit will be able to inject a trust cache into the system. There is
217  * not much we can do here, since this is older HW.
218  */
219 
220 /* Lock for the runtime */
221 LCK_GRP_DECLARE(trust_cache_lck_grp, "trust_cache_lck_grp");
222 decl_lck_rw_data(, trust_cache_rt_lock);
223 
224 /* Immutable part of the runtime */
225 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t) trust_cache_rt_storage;
226 SECURITY_READ_ONLY_LATE(TrustCacheRuntime_t*) trust_cache_rt = &trust_cache_rt_storage;
227 
228 /* Mutable part of the runtime */
229 TrustCacheMutableRuntime_t trust_cache_mut_rt_storage;
230 SECURITY_READ_ONLY_LATE(TrustCacheMutableRuntime_t*) trust_cache_mut_rt = &trust_cache_mut_rt_storage;
231 
232 void
trust_cache_runtime_init(void)233 trust_cache_runtime_init(void)
234 {
235 	bool allow_second_static_cache = false;
236 	bool allow_engineering_caches = false;
237 	bool allow_legacy_caches = false;
238 
239 #if CONFIG_SECOND_STATIC_TRUST_CACHE
240 	allow_second_static_cache = true;
241 #endif
242 
243 #if TRUST_CACHE_INCLUDE_INTERNAL_CODE
244 	allow_engineering_caches = true;
245 #endif
246 
247 #ifdef XNU_PLATFORM_BridgeOS
248 	allow_legacy_caches = true;
249 #endif
250 
251 	/* Image4 interface needs to be available */
252 	if (img4if == NULL) {
253 		panic("image4 interface not available");
254 	}
255 
256 	/* AMFI interface needs to be available */
257 	if (amfi == NULL) {
258 		panic("amfi interface not available");
259 	} else if (amfi->TrustCache.version < 2) {
260 		panic("amfi interface is stale: %u", amfi->TrustCache.version);
261 	}
262 
263 	trustCacheInitializeRuntime(
264 		trust_cache_rt,
265 		trust_cache_mut_rt,
266 		allow_second_static_cache,
267 		allow_engineering_caches,
268 		allow_legacy_caches,
269 		IMG4_RUNTIME_DEFAULT);
270 
271 	/* Initialize the read-write lock */
272 	lck_rw_init(&trust_cache_rt_lock, &trust_cache_lck_grp, 0);
273 }
274 
275 static kern_return_t
xnu_load_trust_cache(TCType_t type,const uint8_t * img4_payload,const size_t img4_payload_len,const uint8_t * img4_manifest,const size_t img4_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)276 xnu_load_trust_cache(
277 	TCType_t type,
278 	const uint8_t *img4_payload, const size_t img4_payload_len,
279 	const uint8_t *img4_manifest, const size_t img4_manifest_len,
280 	const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
281 {
282 	kern_return_t ret = KERN_DENIED;
283 
284 	/* Ignore the auxiliary manifest until we add support for it */
285 	(void)img4_aux_manifest;
286 	(void)img4_aux_manifest_len;
287 
288 	/* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
289 	TrustCache_t *trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
290 	assert(trust_cache != NULL);
291 
292 	/*
293 	 * The manifests aren't needed after the validation is complete, but the payload needs
294 	 * to persist. The caller of this API expects us to make our own allocations. Since we
295 	 * don't need the manifests after validation, we can use the manifests passed in to us
296 	 * but we need to make a new allocation for the payload, since that needs to persist.
297 	 *
298 	 * Z_WAITOK implies that this allocation can never fail.
299 	 */
300 	uint8_t *payload = (uint8_t*)kalloc_data(img4_payload_len, Z_WAITOK);
301 	assert(payload != NULL);
302 
303 	/* Copy the payload into our allocation */
304 	memcpy(payload, img4_payload, img4_payload_len);
305 
306 	/* Exclusively lock the runtime */
307 	lck_rw_lock_exclusive(&trust_cache_rt_lock);
308 
309 	TCReturn_t tc_ret = amfi->TrustCache.load(
310 		trust_cache_rt,
311 		type,
312 		trust_cache,
313 		(const uintptr_t)payload, img4_payload_len,
314 		(const uintptr_t)img4_manifest, img4_manifest_len);
315 
316 	/* Unlock the runtime */
317 	lck_rw_unlock_exclusive(&trust_cache_rt_lock);
318 
319 	if (tc_ret.error == kTCReturnSuccess) {
320 		ret = KERN_SUCCESS;
321 	} else if (tc_ret.error == kTCReturnDuplicate) {
322 		ret = KERN_ALREADY_IN_SET;
323 	} else {
324 		printf("unable to load trust cache (TCReturn: 0x%02X | 0x%02X | %u)\n",
325 		    tc_ret.component, tc_ret.error, tc_ret.uniqueError);
326 
327 		ret = KERN_FAILURE;
328 	}
329 
330 	if (ret != KERN_SUCCESS) {
331 		kfree_data(payload, img4_payload_len);
332 		payload = NULL;
333 
334 		kfree_type(TrustCache_t, trust_cache);
335 		trust_cache = NULL;
336 	}
337 	return ret;
338 }
339 
340 static kern_return_t
xnu_load_legacy_trust_cache(__unused const uint8_t * module_data,__unused const size_t module_size)341 xnu_load_legacy_trust_cache(
342 	__unused const uint8_t *module_data, __unused const size_t module_size)
343 {
344 #if XNU_HAS_LEGACY_TRUST_CACHE_LOADING
345 	kern_return_t ret = KERN_DENIED;
346 
347 	/* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
348 	TrustCache_t *trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
349 	assert(trust_cache != NULL);
350 
351 	/* Allocate storage for the module -- Z_WAITOK means this can't fail */
352 	uint8_t *module = (uint8_t*)kalloc_data(module_size, Z_WAITOK);
353 	assert(module != NULL);
354 
355 	/* Copy the module into our allocation */
356 	memcpy(module, module_data, module_size);
357 
358 	/* Exclusively lock the runtime */
359 	lck_rw_lock_exclusive(&trust_cache_rt_lock);
360 
361 	TCReturn_t tc_ret = amfi->TrustCache.loadModule(
362 		trust_cache_rt,
363 		kTCTypeLegacy,
364 		trust_cache,
365 		(const uintptr_t)module, module_size);
366 
367 	/* Unlock the runtime */
368 	lck_rw_unlock_exclusive(&trust_cache_rt_lock);
369 
370 	if (tc_ret.error == kTCReturnSuccess) {
371 		ret = KERN_SUCCESS;
372 	} else if (tc_ret.error == kTCReturnDuplicate) {
373 		ret = KERN_ALREADY_IN_SET;
374 	} else {
375 		printf("unable to load legacy trust cache (TCReturn: 0x%02X | 0x%02X | %u)\n",
376 		    tc_ret.component, tc_ret.error, tc_ret.uniqueError);
377 
378 		ret = KERN_FAILURE;
379 	}
380 
381 	if (ret != KERN_SUCCESS) {
382 		kfree_data(module, module_size);
383 		module = NULL;
384 
385 		kfree_type(TrustCache_t, trust_cache);
386 		trust_cache = NULL;
387 	}
388 	return ret;
389 #else
390 	panic("legacy trust caches are not supported on this platform");
391 #endif /* XNU_HAS_LEGACY_TRUST_CACHE_LOADING */
392 }
393 
394 static kern_return_t
xnu_query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)395 xnu_query_trust_cache(
396 	TCQueryType_t query_type,
397 	const uint8_t cdhash[kTCEntryHashSize],
398 	TrustCacheQueryToken_t *query_token)
399 {
400 	kern_return_t ret = KERN_NOT_FOUND;
401 
402 	/* Validate the query type preemptively */
403 	if (query_type >= kTCQueryTypeTotal) {
404 		printf("unable to query trust cache: invalid query type: %u\n", query_type);
405 		return KERN_INVALID_ARGUMENT;
406 	}
407 
408 	/* Lock the runtime as shared */
409 	lck_rw_lock_shared(&trust_cache_rt_lock);
410 
411 	TCReturn_t tc_ret = amfi->TrustCache.query(
412 		trust_cache_rt,
413 		query_type,
414 		cdhash,
415 		query_token);
416 
417 	/* Unlock the runtime */
418 	lck_rw_unlock_shared(&trust_cache_rt_lock);
419 
420 	if (tc_ret.error == kTCReturnSuccess) {
421 		ret = KERN_SUCCESS;
422 	} else if (tc_ret.error == kTCReturnNotFound) {
423 		ret = KERN_NOT_FOUND;
424 	} else {
425 		ret = KERN_FAILURE;
426 		printf("trust cache query failed (TCReturn: 0x%02X | 0x%02X | %u)\n",
427 		    tc_ret.component, tc_ret.error, tc_ret.uniqueError);
428 	}
429 
430 	return ret;
431 }
432 
433 static kern_return_t
xnu_check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])434 xnu_check_trust_cache_runtime_for_uuid(
435 	const uint8_t check_uuid[kUUIDSize])
436 {
437 	kern_return_t ret = KERN_DENIED;
438 
439 	if (amfi->TrustCache.version < 3) {
440 		/* AMFI change hasn't landed in the build */
441 		printf("unable to check for loaded trust cache: interface not supported\n");
442 		return KERN_NOT_SUPPORTED;
443 	}
444 
445 	/* Lock the runtime as shared */
446 	lck_rw_lock_shared(&trust_cache_rt_lock);
447 
448 	TCReturn_t tc_ret = amfi->TrustCache.checkRuntimeForUUID(
449 		trust_cache_rt,
450 		check_uuid,
451 		NULL);
452 
453 	/* Unlock the runtime */
454 	lck_rw_unlock_shared(&trust_cache_rt_lock);
455 
456 	if (tc_ret.error == kTCReturnSuccess) {
457 		ret = KERN_SUCCESS;
458 	} else if (tc_ret.error == kTCReturnNotFound) {
459 		ret = KERN_NOT_FOUND;
460 	} else {
461 		ret = KERN_FAILURE;
462 		printf("trust cache UUID check failed (TCReturn: 0x%02X | 0x%02X | %u)\n",
463 		    tc_ret.component, tc_ret.error, tc_ret.uniqueError);
464 	}
465 
466 	return ret;
467 }
468 
469 #endif /* */
470 
471 kern_return_t
check_trust_cache_runtime_for_uuid(const uint8_t check_uuid[kUUIDSize])472 check_trust_cache_runtime_for_uuid(
473 	const uint8_t check_uuid[kUUIDSize])
474 {
475 	kern_return_t ret = KERN_DENIED;
476 
477 	if (check_uuid == NULL) {
478 		return KERN_INVALID_ARGUMENT;
479 	}
480 
481 #if   PMAP_CS_PPL_MONITOR
482 	ret = ppl_check_trust_cache_runtime_for_uuid(check_uuid);
483 #else
484 	ret = xnu_check_trust_cache_runtime_for_uuid(check_uuid);
485 #endif
486 
487 	return ret;
488 }
489 
490 kern_return_t
load_trust_cache(const uint8_t * img4_object,const size_t img4_object_len,const uint8_t * img4_ext_manifest,const size_t img4_ext_manifest_len)491 load_trust_cache(
492 	const uint8_t *img4_object, const size_t img4_object_len,
493 	const uint8_t *img4_ext_manifest, const size_t img4_ext_manifest_len)
494 {
495 	TCType_t type = kTCTypeInvalid;
496 	kern_return_t ret = KERN_DENIED;
497 
498 	/* Start from the first valid type and attempt to validate through each */
499 	for (type = kTCTypeLTRS; type < kTCTypeTotal; type += 1) {
500 		ret = load_trust_cache_with_type(
501 			type,
502 			img4_object, img4_object_len,
503 			img4_ext_manifest, img4_ext_manifest_len,
504 			NULL, 0);
505 
506 		if ((ret == KERN_SUCCESS) || (ret == KERN_ALREADY_IN_SET)) {
507 			return ret;
508 		}
509 	}
510 
511 #if TRUST_CACHE_INCLUDE_INTERNAL_CODE
512 	/* Attempt to load as an engineering root */
513 	ret = load_trust_cache_with_type(
514 		kTCTypeDTRS,
515 		img4_object, img4_object_len,
516 		img4_ext_manifest, img4_ext_manifest_len,
517 		NULL, 0);
518 #endif
519 
520 	return ret;
521 }
522 
523 kern_return_t
load_trust_cache_with_type(TCType_t type,const uint8_t * img4_object,const size_t img4_object_len,const uint8_t * img4_ext_manifest,const size_t img4_ext_manifest_len,const uint8_t * img4_aux_manifest,const size_t img4_aux_manifest_len)524 load_trust_cache_with_type(
525 	TCType_t type,
526 	const uint8_t *img4_object, const size_t img4_object_len,
527 	const uint8_t *img4_ext_manifest, const size_t img4_ext_manifest_len,
528 	const uint8_t *img4_aux_manifest, const size_t img4_aux_manifest_len)
529 {
530 	kern_return_t ret = KERN_DENIED;
531 	uintptr_t length_check = 0;
532 	const uint8_t *img4_payload = NULL;
533 	size_t img4_payload_len = 0;
534 	const uint8_t *img4_manifest = NULL;
535 	size_t img4_manifest_len = 0;
536 
537 	/* img4_object is required */
538 	if (!img4_object || (img4_object_len == 0)) {
539 		printf("unable to load trust cache (type: %u): no img4_object provided\n", type);
540 		return KERN_INVALID_ARGUMENT;
541 	} else if (os_add_overflow((uintptr_t)img4_object, img4_object_len, &length_check)) {
542 		panic("overflow on the img4 object: %p | %lu", img4_object, img4_object_len);
543 	}
544 
545 	/* img4_ext_manifest is optional */
546 	if (img4_ext_manifest_len != 0) {
547 		if (!img4_ext_manifest) {
548 			printf("unable to load trust cache (type: %u): img4_ext_manifest expected\n", type);
549 			return KERN_INVALID_ARGUMENT;
550 		} else if (os_add_overflow((uintptr_t)img4_ext_manifest, img4_ext_manifest_len, &length_check)) {
551 			panic("overflow on the ext manifest: %p | %lu", img4_ext_manifest, img4_ext_manifest_len);
552 		}
553 	}
554 
555 	/* img4_aux_manifest is optional */
556 	if (img4_aux_manifest_len != 0) {
557 		if (!img4_aux_manifest) {
558 			printf("unable to load trust cache (type: %u): img4_aux_manifest expected\n", type);
559 			return KERN_INVALID_ARGUMENT;
560 		} else if (os_add_overflow((uintptr_t)img4_aux_manifest, img4_aux_manifest_len, &length_check)) {
561 			panic("overflow on the ext manifest: %p | %lu", img4_aux_manifest, img4_aux_manifest_len);
562 		}
563 	}
564 
565 	/*
566 	 * If we don't have an external manifest provided, we expect the img4_object to have
567 	 * the manifest embedded. In this case, we need to extract the different artifacts
568 	 * out of the object.
569 	 */
570 	if (img4_ext_manifest_len != 0) {
571 		img4_payload = img4_object;
572 		img4_payload_len = img4_object_len;
573 		img4_manifest = img4_ext_manifest;
574 		img4_manifest_len = img4_ext_manifest_len;
575 	} else {
576 		if (img4if->i4if_version < 15) {
577 			/* AppleImage4 change hasn't landed in the build */
578 			printf("unable to extract payload and manifest from object\n");
579 			return KERN_NOT_SUPPORTED;
580 		}
581 		img4_buff_t img4_buff = IMG4_BUFF_INIT;
582 
583 		/* Extract the payload */
584 		if (img4_get_payload(img4_object, img4_object_len, &img4_buff) == NULL) {
585 			printf("unable to find payload within img4 object\n");
586 			return KERN_NOT_FOUND;
587 		}
588 		img4_payload = img4_buff.i4b_bytes;
589 		img4_payload_len = img4_buff.i4b_len;
590 
591 		/* Extract the manifest */
592 		if (img4_get_manifest(img4_object, img4_object_len, &img4_buff) == NULL) {
593 			printf("unable to find manifest within img4 object\n");
594 			return KERN_NOT_FOUND;
595 		}
596 		img4_manifest = img4_buff.i4b_bytes;
597 		img4_manifest_len = img4_buff.i4b_len;
598 	}
599 
600 	if ((type == kTCTypeStatic) || (type == kTCTypeEngineering) || (type == kTCTypeLegacy)) {
601 		printf("unable to load trust cache: invalid type: %u\n", type);
602 		return KERN_INVALID_ARGUMENT;
603 	} else if (type >= kTCTypeTotal) {
604 		printf("unable to load trust cache: unknown type: %u\n", type);
605 		return KERN_INVALID_ARGUMENT;
606 	}
607 
608 	/* Validate entitlement for the calling process */
609 	if (TCTypeConfig[type].entitlementValue != NULL) {
610 		const bool entitlement_satisfied = IOCurrentTaskHasStringEntitlement(
611 			"com.apple.private.pmap.load-trust-cache",
612 			TCTypeConfig[type].entitlementValue);
613 
614 		if (entitlement_satisfied == false) {
615 			printf("unable to load trust cache (type: %u): unsatisfied entitlement\n", type);
616 			return KERN_DENIED;
617 		}
618 	}
619 
620 	if ((type == kTCTypeCryptex1BootOS) && boot_os_tc_loaded) {
621 		printf("disallowed to load multiple kTCTypeCryptex1BootOS trust caches\n");
622 		return KERN_DENIED;
623 	} else if ((type == kTCTypeCryptex1BootApp) && boot_app_tc_loaded) {
624 		printf("disallowed to load multiple kTCTypeCryptex1BootApp trust caches\n");
625 		return KERN_DENIED;
626 	}
627 
628 #if   PMAP_CS_PPL_MONITOR
629 	ret = ppl_load_trust_cache(
630 		type,
631 		img4_payload, img4_payload_len,
632 		img4_manifest, img4_manifest_len,
633 		img4_aux_manifest, img4_aux_manifest_len);
634 #else
635 	ret = xnu_load_trust_cache(
636 		type,
637 		img4_payload, img4_payload_len,
638 		img4_manifest, img4_manifest_len,
639 		img4_aux_manifest, img4_aux_manifest_len);
640 #endif
641 
642 	if (ret != KERN_SUCCESS) {
643 		printf("unable to load trust cache (type: %u): %d\n", type, ret);
644 	} else {
645 		if (type == kTCTypeCryptex1BootOS) {
646 			boot_os_tc_loaded = true;
647 		} else if (type == kTCTypeCryptex1BootApp) {
648 			boot_app_tc_loaded = true;
649 		}
650 		printf("successfully loaded trust cache of type: %u\n", type);
651 	}
652 
653 	return ret;
654 }
655 
656 kern_return_t
load_legacy_trust_cache(const uint8_t * module_data,const size_t module_size)657 load_legacy_trust_cache(
658 	const uint8_t *module_data, const size_t module_size)
659 {
660 	kern_return_t ret = KERN_DENIED;
661 	uintptr_t length_check = 0;
662 
663 	/* Module is required */
664 	if (!module_data || (module_size == 0)) {
665 		printf("unable to load legacy trust cache: no module provided\n");
666 		return KERN_INVALID_ARGUMENT;
667 	} else if (os_add_overflow((uintptr_t)module_data, module_size, &length_check)) {
668 		panic("overflow on the module: %p | %lu", module_data, module_size);
669 	}
670 
671 #if   PMAP_CS_PPL_MONITOR
672 	ret = ppl_load_legacy_trust_cache(module_data, module_size);
673 #else
674 	ret = xnu_load_legacy_trust_cache(module_data, module_size);
675 #endif
676 
677 	if (ret != KERN_SUCCESS) {
678 		printf("unable to load legacy trust cache: %d\n", ret);
679 	} else {
680 		printf("successfully loaded legacy trust cache\n");
681 	}
682 
683 	return ret;
684 }
685 
686 kern_return_t
query_trust_cache(TCQueryType_t query_type,const uint8_t cdhash[kTCEntryHashSize],TrustCacheQueryToken_t * query_token)687 query_trust_cache(
688 	TCQueryType_t query_type,
689 	const uint8_t cdhash[kTCEntryHashSize],
690 	TrustCacheQueryToken_t *query_token)
691 {
692 	kern_return_t ret = KERN_NOT_FOUND;
693 
694 	if (cdhash == NULL) {
695 		printf("unable to query trust caches: no cdhash provided\n");
696 		return KERN_INVALID_ARGUMENT;
697 	}
698 
699 #if   PMAP_CS_PPL_MONITOR
700 	ret = ppl_query_trust_cache(query_type, cdhash, query_token);
701 #else
702 	ret = xnu_query_trust_cache(query_type, cdhash, query_token);
703 #endif
704 
705 	return ret;
706 }
707 
708 /*
709  * The trust cache management library uses a wrapper data structure to manage each
710  * of the trust cache modules. We know the exact number of static trust caches we
711  * expect, so we keep around a read-only-late allocation of the data structure for
712  * use.
713  *
714  * Since engineering trust caches are only ever allowed on development builds, they
715  * are not protected through the read-only-late property, and instead allocated
716  * dynamically.
717  */
718 
719 SECURITY_READ_ONLY_LATE(bool) trust_cache_static_init = false;
720 SECURITY_READ_ONLY_LATE(bool) trust_cache_static_loaded = false;
721 SECURITY_READ_ONLY_LATE(TrustCache_t) trust_cache_static0 = {0};
722 
723 #if CONFIG_SECOND_STATIC_TRUST_CACHE
724 SECURITY_READ_ONLY_LATE(TrustCache_t) trust_cache_static1 = {0};
725 #endif
726 
727 #if defined(__arm64__)
728 
729 typedef uint64_t pmap_paddr_t __kernel_ptr_semantics;
730 extern vm_map_address_t phystokv(pmap_paddr_t pa);
731 
732 #else /* x86_64 */
733 /*
734  * We need this duplicate definition because it is hidden behind the MACH_KERNEL_PRIVATE
735  * macro definition, which makes it inaccessible to this part of the code base.
736  */
737 extern uint64_t physmap_base, physmap_max;
738 
739 static inline void*
PHYSMAP_PTOV_check(void * paddr)740 PHYSMAP_PTOV_check(void *paddr)
741 {
742 	uint64_t pvaddr = (uint64_t)paddr + physmap_base;
743 
744 	if (__improbable(pvaddr >= physmap_max)) {
745 		panic("PHYSMAP_PTOV bounds exceeded, 0x%qx, 0x%qx, 0x%qx",
746 		    pvaddr, physmap_base, physmap_max);
747 	}
748 
749 	return (void*)pvaddr;
750 }
751 
752 #define PHYSMAP_PTOV(x) (PHYSMAP_PTOV_check((void*) (x)))
753 #define phystokv(x) ((vm_offset_t)(PHYSMAP_PTOV(x)))
754 
755 #endif /* defined(__arm__) || defined(__arm64__) */
756 
757 void
load_static_trust_cache(void)758 load_static_trust_cache(void)
759 {
760 	DTEntry memory_map = {0};
761 	const DTTrustCacheRange *tc_range = NULL;
762 	trust_cache_offsets_t *tc_offsets = NULL;
763 	unsigned int tc_dt_prop_length = 0;
764 	size_t tc_segment_length = 0;
765 
766 	/* Mark this function as having been called */
767 	trust_cache_static_init = true;
768 
769 	/* Nothing to do when the runtime isn't set */
770 	if (trust_cache_rt == NULL) {
771 		return;
772 	}
773 
774 	if (amfi->TrustCache.version < 1) {
775 		/* AMFI change hasn't landed in the build */
776 		printf("unable to load static trust cache: interface not supported\n");
777 		return;
778 	}
779 
780 	int err = SecureDTLookupEntry(NULL, "chosen/memory-map", &memory_map);
781 	if (err != kSuccess) {
782 		printf("unable to find chosen/memory-map in the device tree: %d\n", err);
783 		return;
784 	}
785 
786 	err = SecureDTGetProperty(memory_map, "TrustCache", (const void **)&tc_range, &tc_dt_prop_length);
787 	if (err == kSuccess) {
788 		if (tc_dt_prop_length != sizeof(DTTrustCacheRange)) {
789 			panic("unexpected size for TrustCache property: %u != %zu",
790 			    tc_dt_prop_length, sizeof(DTTrustCacheRange));
791 		}
792 
793 		tc_offsets = (void*)phystokv(tc_range->paddr);
794 		tc_segment_length = tc_range->length;
795 	}
796 
797 	/* x86_64 devices aren't expected to have trust caches */
798 	if (tc_segment_length == 0) {
799 		if (tc_offsets && tc_offsets->num_caches != 0) {
800 			panic("trust cache segment is zero length but trust caches are available: %u",
801 			    tc_offsets->num_caches);
802 		}
803 
804 		printf("no external trust caches found (segment length is zero)\n");
805 		return;
806 	} else if (tc_offsets->num_caches == 0) {
807 		panic("trust cache segment isn't zero but no trust caches available: %lu",
808 		    (unsigned long)tc_segment_length);
809 	}
810 
811 	size_t offsets_length = 0;
812 	size_t struct_length = 0;
813 	if (os_mul_overflow(tc_offsets->num_caches, sizeof(uint32_t), &offsets_length)) {
814 		panic("overflow on the number of trust caches provided: %u", tc_offsets->num_caches);
815 	} else if (os_add_overflow(offsets_length, sizeof(trust_cache_offsets_t), &struct_length)) {
816 		panic("overflow on length of the trust cache offsets: %lu",
817 		    (unsigned long)offsets_length);
818 	} else if (tc_segment_length < struct_length) {
819 		panic("trust cache segment length smaller than required: %lu | %lu",
820 		    (unsigned long)tc_segment_length, (unsigned long)struct_length);
821 	}
822 	const uintptr_t tc_region_end = (uintptr_t)tc_offsets + tc_segment_length;
823 
824 	printf("attempting to load %u external trust cache modules\n", tc_offsets->num_caches);
825 
826 	for (uint32_t i = 0; i < tc_offsets->num_caches; i++) {
827 		TCReturn_t tc_ret = (TCReturn_t){.error = kTCReturnError};
828 		TCType_t tc_type = kTCTypeEngineering;
829 		TrustCache_t *trust_cache = NULL;
830 
831 		uintptr_t tc_module = 0;
832 		if (os_add_overflow((uintptr_t)tc_offsets, tc_offsets->offsets[i], &tc_module)) {
833 			panic("trust cache module start overflows: %u | %lu | %u",
834 			    i, (unsigned long)tc_offsets, tc_offsets->offsets[i]);
835 		} else if (tc_module >= tc_region_end) {
836 			panic("trust cache module begins after segment ends: %u | %lx | %lx",
837 			    i, (unsigned long)tc_module, tc_region_end);
838 		}
839 
840 		/* Should be safe for underflow */
841 		const size_t buffer_length = tc_region_end - tc_module;
842 
843 		/* The first module is always the static trust cache */
844 		if (i == 0) {
845 			tc_type = kTCTypeStatic;
846 			trust_cache = &trust_cache_static0;
847 		}
848 
849 #if CONFIG_SECOND_STATIC_TRUST_CACHE
850 		if (trust_cache_rt->allowSecondStaticTC && (i == 1)) {
851 			tc_type = kTCTypeStatic;
852 			trust_cache = &trust_cache_static1;
853 		}
854 #endif
855 
856 		if (tc_type == kTCTypeEngineering) {
857 			if (trust_cache_rt->allowEngineeringTC == false) {
858 				printf("skipping engineering trust cache module: %u\n", i);
859 				continue;
860 			}
861 
862 			/* Allocate the trust cache data structure -- Z_WAITOK_ZERO means this can't fail */
863 			trust_cache = kalloc_type(TrustCache_t, Z_WAITOK_ZERO);
864 			assert(trust_cache != NULL);
865 		}
866 
867 		tc_ret = amfi->TrustCache.loadModule(
868 			trust_cache_rt,
869 			tc_type,
870 			trust_cache,
871 			tc_module, buffer_length);
872 
873 		if (tc_ret.error != kTCReturnSuccess) {
874 			printf("unable to load trust cache module: %u (TCReturn: 0x%02X | 0x%02X | %u)\n",
875 			    i, tc_ret.component, tc_ret.error, tc_ret.uniqueError);
876 
877 			if (tc_type == kTCTypeStatic) {
878 				panic("failed to load static trust cache module: %u", i);
879 			}
880 			continue;
881 		}
882 		printf("loaded external trust cache module: %u\n", i);
883 
884 		/*
885 		 * The first module is always loaded as a static trust cache. If loading it failed,
886 		 * then this function would've panicked. If we reach here, it means we've loaded a
887 		 * static trust cache on the system.
888 		 */
889 		trust_cache_static_loaded = true;
890 	}
891 
892 	printf("completed loading external trust cache modules\n");
893 }
894 
895 kern_return_t
static_trust_cache_capabilities(uint32_t * num_static_trust_caches_ret,TCCapabilities_t * capabilities0_ret,TCCapabilities_t * capabilities1_ret)896 static_trust_cache_capabilities(
897 	uint32_t *num_static_trust_caches_ret,
898 	TCCapabilities_t *capabilities0_ret,
899 	TCCapabilities_t *capabilities1_ret)
900 {
901 	TCReturn_t tcRet = {.error = kTCReturnError};
902 
903 	*num_static_trust_caches_ret = 0;
904 	*capabilities0_ret = kTCCapabilityNone;
905 	*capabilities1_ret = kTCCapabilityNone;
906 
907 	/* Ensure static trust caches have been initialized */
908 	if (trust_cache_static_init == false) {
909 		panic("attempted to query static trust cache capabilities without init");
910 	}
911 
912 
913 	if (amfi->TrustCache.version < 2) {
914 		/* AMFI change hasn't landed in the build */
915 		printf("unable to get static trust cache capabilities: interface not supported\n");
916 		return KERN_NOT_SUPPORTED;
917 	} else if (trust_cache_static_loaded == false) {
918 		/* Return arguments already set */
919 		return KERN_SUCCESS;
920 	}
921 
922 	tcRet = amfi->TrustCache.getCapabilities(&trust_cache_static0, capabilities0_ret);
923 	assert(tcRet.error == kTCReturnSuccess);
924 	*num_static_trust_caches_ret += 1;
925 
926 #if CONFIG_SECOND_STATIC_TRUST_CACHE
927 	tcRet = amfi->TrustCache.getCapabilities(&trust_cache_static1, capabilities1_ret);
928 	assert(tcRet.error == kTCReturnSuccess);
929 	*num_static_trust_caches_ret += 1;
930 #endif
931 
932 	return KERN_SUCCESS;
933 }
934