xref: /xnu-11215.41.3/bsd/kern/code_signing/txm.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2022 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_LICENSE_HEADER_START@
5  *
6  * The contents of this file constitute Original Code as defined in and
7  * are subject to the Apple Public Source License Version 1.1 (the
8  * "License").  You may not use this file except in compliance with the
9  * License.  Please obtain a copy of the License at
10  * http://www.apple.com/publicsource and read it before using this file.
11  *
12  * This Original Code and all software distributed under the License are
13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
17  * License for the specific language governing rights and limitations
18  * under the License.
19  *
20  * @APPLE_LICENSE_HEADER_END@
21  */
22 
23 #include <stdarg.h>
24 #include <stdatomic.h>
25 #include <os/overflow.h>
26 #include <machine/atomic.h>
27 #include <mach/vm_param.h>
28 #include <mach/vm_map.h>
29 #include <mach/shared_region.h>
30 #include <vm/vm_kern_xnu.h>
31 #include <kern/zalloc.h>
32 #include <kern/kalloc.h>
33 #include <kern/assert.h>
34 #include <kern/locks.h>
35 #include <kern/recount.h>
36 #include <kern/sched_prim.h>
37 #include <kern/lock_rw.h>
38 #include <libkern/libkern.h>
39 #include <libkern/section_keywords.h>
40 #include <libkern/coretrust/coretrust.h>
41 #include <libkern/amfi/amfi.h>
42 #include <pexpert/pexpert.h>
43 #include <sys/vm.h>
44 #include <sys/proc.h>
45 #include <sys/codesign.h>
46 #include <sys/code_signing.h>
47 #include <sys/sysctl.h>
48 #include <uuid/uuid.h>
49 #include <IOKit/IOLib.h>
50 #include <IOKit/IOBSD.h>
51 
52 #if CONFIG_SPTM
53 /*
54  * The TrustedExecutionMonitor environment works in tandem with the SPTM to provide code
55  * signing and memory isolation enforcement for data structures critical to ensuring that
56  * all code executed on the system is authorized to do so.
57  *
58  * Unless the data is managed by TXM itself, XNU needs to page-align everything, make the
59  * relevant type transfer, and then reference the memory as read-only.
60  *
61  * TXM enforces concurrency on its side, but through the use of try-locks. Upon a failure
62  * in acquiring the lock, TXM will panic. As a result, in order to ensure single-threaded
63  * behavior, the kernel also has to take some locks on its side befor calling into TXM.
64  */
65 #include <sys/trusted_execution_monitor.h>
66 #include <pexpert/arm64/board_config.h>
67 
68 /* Lock group used for all locks within the kernel for TXM */
69 LCK_GRP_DECLARE(txm_lck_grp, "txm_code_signing_lck_grp");
70 
71 #pragma mark Utilities
72 
73 /* Number of thread stacks is known at build-time */
74 #define NUM_TXM_THREAD_STACKS (MAX_CPUS)
75 txm_thread_stack_t thread_stacks[NUM_TXM_THREAD_STACKS] = {0};
76 
77 /* Singly-linked-list head for thread stacks */
78 SLIST_HEAD(thread_stack_head, _txm_thread_stack) thread_stacks_head =
79     SLIST_HEAD_INITIALIZER(thread_stacks_head);
80 
81 static decl_lck_mtx_data(, thread_stacks_lock);
82 static void *thread_stack_event = NULL;
83 
84 static void
setup_thread_stacks(void)85 setup_thread_stacks(void)
86 {
87 	extern const sptm_bootstrap_args_xnu_t *SPTMArgs;
88 	txm_thread_stack_t *thread_stack = NULL;
89 
90 	/* Initialize each thread stack and add it to the list */
91 	for (uint32_t i = 0; i < NUM_TXM_THREAD_STACKS; i++) {
92 		thread_stack = &thread_stacks[i];
93 
94 		/* Acquire the thread stack virtual mapping */
95 		thread_stack->thread_stack_papt = SPTMArgs->txm_thread_stacks[i];
96 
97 		/* Acquire the thread stack physical page */
98 		thread_stack->thread_stack_phys = (uintptr_t)kvtophys_nofail(
99 			thread_stack->thread_stack_papt);
100 
101 		/* Resolve the pointer to the thread stack data */
102 		thread_stack->thread_stack_data =
103 		    (TXMThreadStack_t*)(thread_stack->thread_stack_papt + (PAGE_SIZE - 1024));
104 
105 		/* Add thread stack to the list head */
106 		SLIST_INSERT_HEAD(&thread_stacks_head, thread_stack, link);
107 	}
108 
109 	/* Initialize the thread stacks lock */
110 	lck_mtx_init(&thread_stacks_lock, &txm_lck_grp, 0);
111 }
112 
113 static txm_thread_stack_t*
acquire_thread_stack(void)114 acquire_thread_stack(void)
115 {
116 	txm_thread_stack_t *thread_stack = NULL;
117 
118 	/* Lock the thread stack list */
119 	lck_mtx_lock(&thread_stacks_lock);
120 
121 	while (SLIST_EMPTY(&thread_stacks_head) == true) {
122 		lck_mtx_sleep(
123 			&thread_stacks_lock,
124 			LCK_SLEEP_DEFAULT,
125 			&thread_stack_event,
126 			THREAD_UNINT);
127 	}
128 
129 	if (SLIST_EMPTY(&thread_stacks_head) == true) {
130 		panic("unable to acquire a thread stack for TXM");
131 	}
132 
133 	/* Use the first available thread stack */
134 	thread_stack = SLIST_FIRST(&thread_stacks_head);
135 
136 	/* Remove the thread stack from the list */
137 	SLIST_REMOVE_HEAD(&thread_stacks_head, link);
138 
139 	/* Unlock the thread stack list */
140 	lck_mtx_unlock(&thread_stacks_lock);
141 
142 	/* Associate the thread stack with the current thread */
143 	thread_associate_txm_thread_stack(thread_stack->thread_stack_phys);
144 
145 	return thread_stack;
146 }
147 
148 static void
release_thread_stack(txm_thread_stack_t * thread_stack)149 release_thread_stack(
150 	txm_thread_stack_t* thread_stack)
151 {
152 	/* Remove the TXM thread stack association with the current thread */
153 	thread_disassociate_txm_thread_stack(thread_stack->thread_stack_phys);
154 
155 	/* Lock the thread stack list */
156 	lck_mtx_lock(&thread_stacks_lock);
157 
158 	/* Add the thread stack at the list head */
159 	SLIST_INSERT_HEAD(&thread_stacks_head, thread_stack, link);
160 
161 	/* Unlock the thread stack list */
162 	lck_mtx_unlock(&thread_stacks_lock);
163 
164 	/* Wake up any threads waiting to acquire a thread stack */
165 	thread_wakeup(&thread_stack_event);
166 }
167 
168 static kern_return_t
txm_parse_return(TXMReturn_t txm_ret)169 txm_parse_return(
170 	TXMReturn_t txm_ret)
171 {
172 	switch (txm_ret.returnCode) {
173 	case kTXMSuccess:
174 		return KERN_SUCCESS;
175 
176 	case kTXMReturnOutOfMemory:
177 		return KERN_RESOURCE_SHORTAGE;
178 
179 	case kTXMReturnNotFound:
180 		return KERN_NOT_FOUND;
181 
182 	case kTXMReturnNotSupported:
183 		return KERN_NOT_SUPPORTED;
184 
185 #if kTXMKernelAPIVersion >= 6
186 	case kTXMReturnTryAgain:
187 		return KERN_OPERATION_TIMED_OUT;
188 #endif
189 
190 	default:
191 		return KERN_FAILURE;
192 	}
193 }
194 
195 static void
txm_print_return(TXMKernelSelector_t selector,TXMReturn_t txm_ret)196 txm_print_return(
197 	TXMKernelSelector_t selector,
198 	TXMReturn_t txm_ret)
199 {
200 	/*
201 	 * We specifically use IOLog instead of printf since printf is compiled out on
202 	 * RELEASE kernels. We want to ensure that errors from TXM are captured within
203 	 * sysdiagnoses from the field.
204 	 */
205 
206 	if (txm_ret.returnCode == kTXMSuccess) {
207 		return;
208 	} else if (txm_ret.returnCode == kTXMReturnTrustCache) {
209 		IOLog("TXM [Error]: TrustCache: selector: %u | 0x%02X | 0x%02X | %u\n",
210 		    selector, txm_ret.tcRet.component, txm_ret.tcRet.error, txm_ret.tcRet.uniqueError);
211 	} else if (txm_ret.returnCode == kTXMReturnCodeSignature) {
212 		IOLog("TXM [Error]: CodeSignature: selector: %u | 0x%02X | 0x%02X | %u\n",
213 		    selector, txm_ret.csRet.component, txm_ret.csRet.error, txm_ret.csRet.uniqueError);
214 	} else if (txm_ret.returnCode == kTXMReturnCodeErrno) {
215 		IOLog("TXM [Error]: Errno: selector: %u | %d\n",
216 		    selector, txm_ret.errnoRet);
217 	} else {
218 		IOLog("TXM [Error]: selector: %u | %u\n",
219 		    selector, txm_ret.returnCode);
220 	}
221 }
222 
223 #pragma mark Page Allocation
224 
225 static void
txm_add_page(void)226 txm_add_page(void)
227 {
228 	txm_call_t txm_call = {
229 		.selector = kTXMKernelSelectorAddFreeListPage,
230 		.failure_fatal = true,
231 		.num_input_args = 1
232 	};
233 
234 	/* Allocate a page from the VM -- transfers page to TXM internally */
235 	vm_map_address_t phys_addr = pmap_txm_allocate_page();
236 
237 	/* Add this page to the TXM free list */
238 	txm_kernel_call(&txm_call, phys_addr);
239 }
240 
241 #pragma mark Calls
242 
243 static void
txm_kernel_call_registers_setup(txm_call_t * parameters,sptm_call_regs_t * registers,va_list args)244 txm_kernel_call_registers_setup(
245 	txm_call_t *parameters,
246 	sptm_call_regs_t *registers,
247 	va_list args)
248 {
249 	/*
250 	 * We are only ever allowed a maximum of 7 arguments for calling into TXM.
251 	 * This is because the SPTM dispatch only sets up registers x0-x7 for the
252 	 * call, and x0 is always reserved for passing in a thread stack for TXM
253 	 * to operate on.
254 	 */
255 
256 	switch (parameters->num_input_args) {
257 	case 7:
258 		registers->x1 = va_arg(args, uintptr_t);
259 		registers->x2 = va_arg(args, uintptr_t);
260 		registers->x3 = va_arg(args, uintptr_t);
261 		registers->x4 = va_arg(args, uintptr_t);
262 		registers->x5 = va_arg(args, uintptr_t);
263 		registers->x6 = va_arg(args, uintptr_t);
264 		registers->x7 = va_arg(args, uintptr_t);
265 		break;
266 
267 	case 6:
268 		registers->x1 = va_arg(args, uintptr_t);
269 		registers->x2 = va_arg(args, uintptr_t);
270 		registers->x3 = va_arg(args, uintptr_t);
271 		registers->x4 = va_arg(args, uintptr_t);
272 		registers->x5 = va_arg(args, uintptr_t);
273 		registers->x6 = va_arg(args, uintptr_t);
274 		break;
275 
276 	case 5:
277 		registers->x1 = va_arg(args, uintptr_t);
278 		registers->x2 = va_arg(args, uintptr_t);
279 		registers->x3 = va_arg(args, uintptr_t);
280 		registers->x4 = va_arg(args, uintptr_t);
281 		registers->x5 = va_arg(args, uintptr_t);
282 		break;
283 
284 	case 4:
285 		registers->x1 = va_arg(args, uintptr_t);
286 		registers->x2 = va_arg(args, uintptr_t);
287 		registers->x3 = va_arg(args, uintptr_t);
288 		registers->x4 = va_arg(args, uintptr_t);
289 		break;
290 
291 	case 3:
292 		registers->x1 = va_arg(args, uintptr_t);
293 		registers->x2 = va_arg(args, uintptr_t);
294 		registers->x3 = va_arg(args, uintptr_t);
295 		break;
296 
297 	case 2:
298 		registers->x1 = va_arg(args, uintptr_t);
299 		registers->x2 = va_arg(args, uintptr_t);
300 		break;
301 
302 	case 1:
303 		registers->x1 = va_arg(args, uintptr_t);
304 		break;
305 
306 	case 0:
307 		break;
308 
309 	default:
310 		panic("invalid number of arguments to TXM: selector: %u | %u",
311 		    parameters->selector, parameters->num_input_args);
312 	}
313 }
314 
315 static TXMReturn_t
txm_kernel_call_internal(txm_call_t * parameters,va_list args)316 txm_kernel_call_internal(
317 	txm_call_t *parameters,
318 	va_list args)
319 {
320 	TXMReturn_t txm_ret = (TXMReturn_t){.returnCode = kTXMReturnGeneric};
321 	sptm_call_regs_t txm_registers = {0};
322 	txm_thread_stack_t *thread_stack = NULL;
323 	const TXMThreadStack_t *thread_stack_data = NULL;
324 	const TXMSharedContextData_t *shared_context_data = NULL;
325 
326 	/* Obtain a stack for this call */
327 	thread_stack = acquire_thread_stack();
328 	thread_stack_data = thread_stack->thread_stack_data;
329 	shared_context_data = &thread_stack_data->sharedData;
330 
331 	/* Setup argument registers */
332 	txm_registers.x0 = thread_stack->thread_stack_phys;
333 	txm_kernel_call_registers_setup(parameters, &txm_registers, args);
334 
335 	/* Track resource usage */
336 	recount_enter_secure();
337 
338 	/* Call into TXM */
339 	txm_enter(parameters->selector, &txm_registers);
340 
341 	recount_leave_secure();
342 
343 	txm_ret = (TXMReturn_t){.rawValue = shared_context_data->txmReturnCode};
344 	parameters->txm_ret = txm_ret;
345 
346 	if (parameters->txm_ret.returnCode == kTXMSuccess) {
347 		parameters->num_return_words = shared_context_data->txmNumReturnWords;
348 		if (parameters->num_return_words > kTXMStackReturnWords) {
349 			panic("received excessive return words from TXM: selector: %u | %llu",
350 			    parameters->selector, parameters->num_return_words);
351 		}
352 
353 		for (uint64_t i = 0; i < parameters->num_return_words; i++) {
354 			parameters->return_words[i] = shared_context_data->txmReturnWords[i];
355 		}
356 	}
357 
358 	/* Release the thread stack as it is no longer needed */
359 	release_thread_stack(thread_stack);
360 	thread_stack_data = NULL;
361 	shared_context_data = NULL;
362 
363 	return txm_ret;
364 }
365 
366 kern_return_t
txm_kernel_call(txm_call_t * parameters,...)367 txm_kernel_call(
368 	txm_call_t *parameters, ...)
369 {
370 	TXMReturn_t txm_ret = (TXMReturn_t){.returnCode = kTXMReturnGeneric};
371 	kern_return_t ret = KERN_DENIED;
372 	va_list args;
373 
374 	/* Start the variadic arguments list */
375 	va_start(args, parameters);
376 
377 	do {
378 		txm_ret = txm_kernel_call_internal(parameters, args);
379 		if (txm_ret.returnCode == kTXMReturnOutOfMemory) {
380 			if (parameters->selector == kTXMKernelSelectorAddFreeListPage) {
381 				panic("received out-of-memory error when adding a free page to TXM");
382 			}
383 			txm_add_page();
384 		}
385 	} while (txm_ret.returnCode == kTXMReturnOutOfMemory);
386 
387 	/* Clean up the variadic arguments list */
388 	va_end(args);
389 
390 	/* Print all TXM logs from the log buffer */
391 	if (parameters->skip_logs == false) {
392 		txm_print_logs();
393 	}
394 
395 	/* Print the return code from TXM -- only prints for an error */
396 	if (parameters->failure_silent != true) {
397 		if (parameters->failure_code_silent != txm_ret.returnCode) {
398 			txm_print_return(parameters->selector, txm_ret);
399 		}
400 	}
401 
402 	/*
403 	 * To ease the process of calling into TXM, and to also reduce the number of
404 	 * lines of code for each call site, the txm_call_t offers some properties
405 	 * we can enforce over here. Go through these, and panic in case they aren't
406 	 * honored.
407 	 *
408 	 * NOTE: We check for "<" instead of "!=" for the number of return words we
409 	 * get back from TXM since this helps in forward development. If the kernel
410 	 * and TXM are proceeding at different project cadences, we do not want to
411 	 * gate adding more return words from TXM on the kernel first adopting the
412 	 * new number of return words.
413 	 */
414 	ret = txm_parse_return(txm_ret);
415 
416 	if (parameters->failure_fatal && (ret != KERN_SUCCESS)) {
417 		panic("received fatal error for a selector from TXM: selector: %u | 0x%0llX",
418 		    parameters->selector, txm_ret.rawValue);
419 	} else if (parameters->num_return_words < parameters->num_output_args) {
420 		/* Only panic if return was a success */
421 		if (ret == KERN_SUCCESS) {
422 			panic("received fewer than expected return words from TXM: selector: %u | %llu",
423 			    parameters->selector, parameters->num_return_words);
424 		}
425 	}
426 
427 	return ret;
428 }
429 
430 void
txm_transfer_region(vm_address_t addr,vm_size_t size)431 txm_transfer_region(
432 	vm_address_t addr,
433 	vm_size_t size)
434 {
435 	vm_address_t addr_end = 0;
436 	vm_size_t size_aligned = round_page(size);
437 
438 	if ((addr & PAGE_MASK) != 0) {
439 		panic("attempted to transfer non-page-aligned memory to TXM: %p", (void*)addr);
440 	} else if (os_add_overflow(addr, size_aligned, &addr_end)) {
441 		panic("overflow on range to be transferred to TXM: %p | %lu",
442 		    (void*)addr, size);
443 	}
444 
445 	/* Make the memory read-only first (transfer will panic otherwise) */
446 	vm_protect(kernel_map, addr, size_aligned, false, VM_PROT_READ);
447 
448 	/* Transfer each physical page to be TXM_DEFAULT */
449 	for (vm_address_t page = addr; page < addr_end; page += PAGE_SIZE) {
450 		pmap_txm_transfer_page(page);
451 	}
452 }
453 
454 void
txm_reclaim_region(vm_address_t addr,vm_size_t size)455 txm_reclaim_region(
456 	vm_address_t addr,
457 	vm_size_t size)
458 {
459 	vm_address_t addr_end = 0;
460 	vm_size_t size_aligned = round_page(size);
461 
462 	if ((addr & PAGE_MASK) != 0) {
463 		panic("attempted to reclaim non-page-aligned memory from TXM: %p", (void*)addr);
464 	} else if (os_add_overflow(addr, size_aligned, &addr_end)) {
465 		panic("overflow on range to be reclaimed from TXM: %p | %lu",
466 		    (void*)addr, size);
467 	}
468 
469 	/*
470 	 * We can only reclaim once TXM has transferred the memory range back to the
471 	 * kernel. Hence, we simply try and switch permissions to read-write. If TXM
472 	 * hasn't transferred pages, this then should panic.
473 	 */
474 	vm_protect(kernel_map, addr, size_aligned, false, VM_PROT_READ | VM_PROT_WRITE);
475 }
476 
477 static SECURITY_READ_ONLY_LATE(const char*) txm_log_page = NULL;
478 static SECURITY_READ_ONLY_LATE(const uint32_t*) txm_log_head = NULL;
479 static SECURITY_READ_ONLY_LATE(const uint32_t*) txm_log_sync = NULL;
480 
481 static decl_lck_mtx_data(, log_lock);
482 static uint32_t log_head = 0;
483 
484 void
txm_print_logs(void)485 txm_print_logs(void)
486 {
487 	uint32_t start_index = 0;
488 	uint32_t end_index = 0;
489 
490 	/*
491 	 * The design here is very simple. TXM keeps adding slots to its circular buffer
492 	 * and the kernel attempts to read each one and print it, maintaining its own head
493 	 * for the log.
494 	 *
495 	 * This design is by nature lazy. TXM doesn't know or care if the kernel has gone
496 	 * through and printed any of the logs, so it'll just keep writing into its buffer
497 	 * and then circle around when it becomes full.
498 	 *
499 	 * This is fine most of the time since there are a decent amount of slots in the
500 	 * log buffer. We mostly have an issue when TXM is adding so many logs so quickly
501 	 * such that it wraps around and starts overwriting logs which haven't been seen
502 	 * by the kernel. If this were to happen, TXM's log head may circle around the
503 	 * head maintained by the kernel, causing a lot of logs to be missed, since the
504 	 * kernel only attempts the number of logs in-between the two heads.
505 	 *
506 	 * The fix for that is complicated, and until we see an actual impact, we're going
507 	 * to keep the simpler design in place.
508 	 */
509 
510 	/* Return if the logging hasn't been setup yet */
511 	if (txm_log_sync == NULL) {
512 		return;
513 	}
514 
515 	/*
516 	 * Holding the log lock and printing can cause lots of issues since printing can
517 	 * be rather slow. While we make it a point to keep the logging buffer quiet, some
518 	 * actions (such as loading trust caches) are still very chatty.
519 	 *
520 	 * As a result, we optimize this routine to ensure that the lock itself isn't held
521 	 * for very long. All we need to do within the critical section is calculate the
522 	 * starting and ending index of the log buffer. The actual printing doesn't need
523 	 * to be done with the lock held.
524 	 */
525 	lck_mtx_lock(&log_lock);
526 
527 	start_index = log_head;
528 	end_index = os_atomic_load(txm_log_head, relaxed) % kTXMLogSlots;
529 
530 	/* Update the log head with the new index */
531 	log_head = end_index;
532 
533 	/* Release the log lock */
534 	lck_mtx_unlock(&log_lock);
535 
536 	if (start_index != end_index) {
537 		/* Use load acquire here to sync up with all writes to the buffer */
538 		os_atomic_load(txm_log_sync, acquire);
539 
540 		while (start_index != end_index) {
541 			const char *slot = txm_log_page + (start_index * kTXMLogSlotSize);
542 
543 			/* We add newlines after each log statement since TXM does not */
544 			printf("%s\n", slot);
545 
546 			start_index = (start_index + 1) % kTXMLogSlots;
547 		}
548 	}
549 }
550 
551 #pragma mark Initialization
552 
553 SECURITY_READ_ONLY_LATE(const TXMReadOnlyData_t*) txm_ro_data = NULL;
554 SECURITY_READ_ONLY_LATE(const TXMStatistics_t*) txm_stats = NULL;
555 SECURITY_READ_ONLY_LATE(const CSConfig_t*) txm_cs_config = NULL;
556 SECURITY_READ_ONLY_LATE(CSRestrictedModeState_t*) txm_restricted_mode_state = NULL;
557 
558 SECURITY_READ_ONLY_LATE(bool*) developer_mode_enabled = NULL;
559 static SECURITY_READ_ONLY_LATE(bool) code_signing_enabled = true;
560 static SECURITY_READ_ONLY_LATE(uint32_t) managed_signature_size = 0;
561 
562 static decl_lck_mtx_data(, compilation_service_lock);
563 static decl_lck_mtx_data(, unregister_sync_lock);
564 
565 static void
get_logging_info(void)566 get_logging_info(void)
567 {
568 	txm_call_t txm_call = {
569 		.selector = kTXMKernelSelectorGetLogInfo,
570 		.failure_fatal = true,
571 		.num_output_args = 3
572 	};
573 	txm_kernel_call(&txm_call);
574 
575 	txm_log_page = (const char*)txm_call.return_words[0];
576 	txm_log_head = (const uint32_t*)txm_call.return_words[1];
577 	txm_log_sync = (const uint32_t*)txm_call.return_words[2];
578 }
579 
580 static void
get_code_signing_info(void)581 get_code_signing_info(void)
582 {
583 	txm_call_t txm_call = {
584 		.selector = kTXMKernelSelectorGetCodeSigningInfo,
585 		.failure_fatal = true,
586 		.num_output_args = 6
587 	};
588 	txm_kernel_call(&txm_call);
589 
590 	/*
591 	 * Not using txm_call.return_words[0] for now. This was previously the
592 	 * code_signing_enabled field, but we've since switched to acquiring that
593 	 * value from TXM's read-only data.
594 	 *
595 	 * Not using txm_call.return_words[4] for now. This was previously the
596 	 * txm_cs_config field, but we've since switched to acquiring that value
597 	 * from TXM's read-only data.
598 	 */
599 
600 	developer_mode_enabled = (bool*)txm_call.return_words[1];
601 	txm_stats = (TXMStatistics_t*)txm_call.return_words[2];
602 	managed_signature_size = (uint32_t)txm_call.return_words[3];
603 	txm_ro_data = (TXMReadOnlyData_t*)txm_call.return_words[5];
604 
605 	/* Set code_signing_disabled based on read-only data */
606 	code_signing_enabled = txm_ro_data->codeSigningDisabled == false;
607 
608 	/* Set txm_cs_config based on read-only data */
609 	txm_cs_config = &txm_ro_data->CSConfiguration;
610 
611 	/* Only setup when REM is supported on the platform */
612 	if (txm_cs_config->systemPolicy->featureSet.restrictedExecutionMode == true) {
613 		txm_restricted_mode_state = txm_ro_data->restrictedModeState;
614 	}
615 }
616 
617 static void
set_shared_region_base_address(void)618 set_shared_region_base_address(void)
619 {
620 	txm_call_t txm_call = {
621 		.selector = kTXMKernelSelectorSetSharedRegionBaseAddress,
622 		.failure_fatal = true,
623 		.num_input_args = 2,
624 	};
625 
626 	txm_kernel_call(&txm_call,
627 	    SHARED_REGION_BASE,
628 	    SHARED_REGION_SIZE);
629 }
630 
631 void
code_signing_init(void)632 code_signing_init(void)
633 {
634 #if kTXMKernelAPIVersion >= 6
635 	printf("libTXM_KernelVersion: %u\n", libTrustedExecutionMonitor_KernelVersion);
636 	printf("libTXM_Image4Version: %u\n", libTrustedExecutionMonitor_Image4Version);
637 #endif
638 
639 	/* Setup the thread stacks used by TXM */
640 	setup_thread_stacks();
641 
642 	/* Setup the logging lock */
643 	lck_mtx_init(&log_lock, &txm_lck_grp, 0);
644 
645 	/* Setup TXM logging information */
646 	get_logging_info();
647 
648 	/* Setup code signing configuration */
649 	get_code_signing_info();
650 
651 	/* Setup all the other locks we need */
652 	lck_mtx_init(&compilation_service_lock, &txm_lck_grp, 0);
653 	lck_mtx_init(&unregister_sync_lock, &txm_lck_grp, 0);
654 
655 	/*
656 	 * We need to let TXM know what the shared region base address is going
657 	 * to be for this boot.
658 	 */
659 	set_shared_region_base_address();
660 
661 	/* Require signed code when monitor is enabled */
662 	if (code_signing_enabled == true) {
663 		cs_debug_fail_on_unsigned_code = 1;
664 	}
665 }
666 
667 void
txm_enter_lockdown_mode(void)668 txm_enter_lockdown_mode(void)
669 {
670 	txm_call_t txm_call = {
671 		.selector = kTXMKernelSelectorEnterLockdownMode,
672 		.failure_fatal = true,
673 	};
674 	txm_kernel_call(&txm_call);
675 }
676 
677 kern_return_t
txm_secure_channel_shared_page(uint64_t * secure_channel_phys,size_t * secure_channel_size)678 txm_secure_channel_shared_page(
679 	uint64_t *secure_channel_phys,
680 	size_t *secure_channel_size)
681 {
682 #if kTXMKernelAPIVersion >= 5
683 	txm_call_t txm_call = {
684 		.selector = kTXMKernelSelectorGetSecureChannelAddr,
685 		.num_output_args = 2
686 	};
687 
688 	kern_return_t ret = txm_kernel_call(&txm_call);
689 	if (ret == KERN_NOT_SUPPORTED) {
690 		return ret;
691 	} else if (ret != KERN_SUCCESS) {
692 		panic("unexpected failure for TXM secure channel: %d", ret);
693 	}
694 
695 	/* Return the physical address */
696 	if (secure_channel_phys != NULL) {
697 		*secure_channel_phys = txm_call.return_words[0];
698 	}
699 
700 	/* Return the size */
701 	if (secure_channel_size != NULL) {
702 		*secure_channel_size = txm_call.return_words[1];
703 	}
704 
705 	return KERN_SUCCESS;
706 #else
707 	(void)secure_channel_phys;
708 	(void)secure_channel_size;
709 	return KERN_NOT_SUPPORTED;
710 #endif
711 }
712 
713 #pragma mark Developer Mode
714 
715 void
txm_toggle_developer_mode(bool state)716 txm_toggle_developer_mode(bool state)
717 {
718 	txm_call_t txm_call = {
719 		.selector = kTXMKernelSelectorDeveloperModeToggle,
720 		.failure_fatal = true,
721 		.num_input_args = 1
722 	};
723 
724 	txm_kernel_call(&txm_call, state);
725 }
726 
727 #pragma mark Restricted Execution Mode
728 
729 kern_return_t
txm_rem_enable(void)730 txm_rem_enable(void)
731 {
732 	txm_call_t txm_call = {
733 		.selector = kTXMKernelSelectorEnableRestrictedMode
734 	};
735 	return txm_kernel_call(&txm_call);
736 }
737 
738 kern_return_t
txm_rem_state(void)739 txm_rem_state(void)
740 {
741 	if (txm_restricted_mode_state == NULL) {
742 		return KERN_NOT_SUPPORTED;
743 	}
744 
745 	CSReturn_t cs_ret = restrictedModeStatus(txm_restricted_mode_state);
746 	if (cs_ret.error == kCSReturnSuccess) {
747 		return KERN_SUCCESS;
748 	}
749 	return KERN_DENIED;
750 }
751 
752 #pragma mark Device State
753 
754 void
txm_update_device_state(void)755 txm_update_device_state(void)
756 {
757 #if kTXMKernelAPIVersion >= 6
758 	txm_call_t txm_call = {
759 		.selector = kTXMSelectorUpdateDeviceState,
760 		.failure_fatal = true
761 	};
762 	txm_kernel_call(&txm_call);
763 #endif
764 }
765 
766 void
txm_complete_security_boot_mode(__unused uint32_t security_boot_mode)767 txm_complete_security_boot_mode(
768 	__unused uint32_t security_boot_mode)
769 {
770 #if kTXMKernelAPIVersion >= 6
771 	txm_call_t txm_call = {
772 		.selector = kTXMSelectorCompleteSecurityBootMode,
773 		.num_input_args = 1,
774 		.failure_fatal = true
775 	};
776 	txm_kernel_call(&txm_call, security_boot_mode);
777 #endif
778 }
779 
780 #pragma mark Code Signing and Provisioning Profiles
781 
782 bool
txm_code_signing_enabled(void)783 txm_code_signing_enabled(void)
784 {
785 	return code_signing_enabled;
786 }
787 
788 vm_size_t
txm_managed_code_signature_size(void)789 txm_managed_code_signature_size(void)
790 {
791 	return managed_signature_size;
792 }
793 
794 kern_return_t
txm_register_provisioning_profile(const void * profile_blob,const size_t profile_blob_size,void ** profile_obj)795 txm_register_provisioning_profile(
796 	const void *profile_blob,
797 	const size_t profile_blob_size,
798 	void **profile_obj)
799 {
800 	txm_call_t txm_call = {
801 		.selector = kTXMKernelSelectorRegisterProvisioningProfile,
802 		.num_input_args = 2,
803 		.num_output_args = 1
804 	};
805 	vm_address_t payload_addr = 0;
806 	kern_return_t ret = KERN_DENIED;
807 
808 	/* We need to allocate page-wise in order to transfer the range to TXM */
809 	ret = kmem_alloc(kernel_map, &payload_addr, profile_blob_size,
810 	    KMA_KOBJECT | KMA_DATA, VM_KERN_MEMORY_SECURITY);
811 	if (ret != KERN_SUCCESS) {
812 		printf("unable to allocate memory for profile payload: %d\n", ret);
813 		goto exit;
814 	}
815 
816 	/* Copy the contents into the allocation */
817 	memcpy((void*)payload_addr, profile_blob, profile_blob_size);
818 
819 	/* Transfer the memory range to TXM */
820 	txm_transfer_region(payload_addr, profile_blob_size);
821 
822 	ret = txm_kernel_call(&txm_call, payload_addr, profile_blob_size);
823 	if (ret == KERN_SUCCESS) {
824 		*profile_obj = (void*)txm_call.return_words[0];
825 	}
826 
827 exit:
828 	if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
829 		/* Reclaim this memory range */
830 		txm_reclaim_region(payload_addr, profile_blob_size);
831 
832 		/* Free the memory range */
833 		kmem_free(kernel_map, payload_addr, profile_blob_size);
834 		payload_addr = 0;
835 	}
836 
837 	return ret;
838 }
839 
840 kern_return_t
txm_trust_provisioning_profile(__unused void * profile_obj,__unused const void * sig_data,__unused size_t sig_size)841 txm_trust_provisioning_profile(
842 	__unused void *profile_obj,
843 	__unused const void *sig_data,
844 	__unused size_t sig_size)
845 {
846 #if kTXMKernelAPIVersion >= 7
847 	txm_call_t txm_call = {
848 		.selector = kTXMKernelSelectorTrustProvisioningProfile,
849 		.num_input_args = 3
850 	};
851 
852 	return txm_kernel_call(&txm_call, profile_obj, sig_data, sig_size);
853 #else
854 	/* The TXM selector hasn't yet landed */
855 	return KERN_SUCCESS;
856 #endif
857 }
858 
859 kern_return_t
txm_unregister_provisioning_profile(void * profile_obj)860 txm_unregister_provisioning_profile(
861 	void *profile_obj)
862 {
863 	txm_call_t txm_call = {
864 		.selector = kTXMKernelSelectorUnregisterProvisioningProfile,
865 		.num_input_args = 1,
866 		.num_output_args = 2
867 	};
868 	vm_address_t profile_addr = 0;
869 	vm_size_t profile_size = 0;
870 	kern_return_t ret = KERN_DENIED;
871 
872 	ret = txm_kernel_call(&txm_call, profile_obj);
873 	if (ret != KERN_SUCCESS) {
874 		return ret;
875 	}
876 
877 	profile_addr = txm_call.return_words[0];
878 	profile_size = txm_call.return_words[1];
879 
880 	/* Reclaim this memory range */
881 	txm_reclaim_region(profile_addr, profile_size);
882 
883 	/* Free the memory range */
884 	kmem_free(kernel_map, profile_addr, profile_size);
885 
886 	return KERN_SUCCESS;
887 }
888 
889 kern_return_t
txm_associate_provisioning_profile(void * sig_obj,void * profile_obj)890 txm_associate_provisioning_profile(
891 	void *sig_obj,
892 	void *profile_obj)
893 {
894 	txm_call_t txm_call = {
895 		.selector = kTXMKernelSelectorAssociateProvisioningProfile,
896 		.num_input_args = 2,
897 	};
898 
899 	return txm_kernel_call(&txm_call, sig_obj, profile_obj);
900 }
901 
902 kern_return_t
txm_disassociate_provisioning_profile(void * sig_obj)903 txm_disassociate_provisioning_profile(
904 	void *sig_obj)
905 {
906 	txm_call_t txm_call = {
907 		.selector = kTXMKernelSelectorDisassociateProvisioningProfile,
908 		.num_input_args = 1,
909 	};
910 
911 	/*
912 	 * Take the unregistration sync lock.
913 	 * For more information: rdar://99205627.
914 	 */
915 	lck_mtx_lock(&unregister_sync_lock);
916 
917 	/* Disassociate the profile from the signature */
918 	kern_return_t ret = txm_kernel_call(&txm_call, sig_obj);
919 
920 	/* Release the unregistration sync lock */
921 	lck_mtx_unlock(&unregister_sync_lock);
922 
923 	return ret;
924 }
925 
926 void
txm_set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])927 txm_set_compilation_service_cdhash(
928 	const uint8_t cdhash[CS_CDHASH_LEN])
929 {
930 	txm_call_t txm_call = {
931 		.selector = kTXMKernelSelectorAuthorizeCompilationServiceCDHash,
932 		.num_input_args = 1,
933 	};
934 
935 	lck_mtx_lock(&compilation_service_lock);
936 	txm_kernel_call(&txm_call, cdhash);
937 	lck_mtx_unlock(&compilation_service_lock);
938 }
939 
940 bool
txm_match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])941 txm_match_compilation_service_cdhash(
942 	const uint8_t cdhash[CS_CDHASH_LEN])
943 {
944 	txm_call_t txm_call = {
945 		.selector = kTXMKernelSelectorMatchCompilationServiceCDHash,
946 		.failure_silent = true,
947 		.num_input_args = 1,
948 		.num_output_args = 1,
949 	};
950 	kern_return_t ret = KERN_DENIED;
951 
952 	/* Be safe and take the lock (avoid thread collisions) */
953 	lck_mtx_lock(&compilation_service_lock);
954 	ret = txm_kernel_call(&txm_call, cdhash);
955 	lck_mtx_unlock(&compilation_service_lock);
956 
957 	if (ret == KERN_SUCCESS) {
958 		return true;
959 	}
960 	return false;
961 }
962 
963 void
txm_set_local_signing_public_key(const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])964 txm_set_local_signing_public_key(
965 	const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])
966 {
967 	txm_call_t txm_call = {
968 		.selector = kTXMKernelSelectorSetLocalSigningPublicKey,
969 		.num_input_args = 1,
970 	};
971 
972 	txm_kernel_call(&txm_call, public_key);
973 }
974 
975 uint8_t*
txm_get_local_signing_public_key(void)976 txm_get_local_signing_public_key(void)
977 {
978 	txm_call_t txm_call = {
979 		.selector = kTXMKernelSelectorGetLocalSigningPublicKey,
980 		.num_output_args = 1,
981 	};
982 	kern_return_t ret = KERN_DENIED;
983 
984 	ret = txm_kernel_call(&txm_call);
985 	if (ret != KERN_SUCCESS) {
986 		return NULL;
987 	}
988 
989 	return (uint8_t*)txm_call.return_words[0];
990 }
991 
992 void
txm_unrestrict_local_signing_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])993 txm_unrestrict_local_signing_cdhash(
994 	const uint8_t cdhash[CS_CDHASH_LEN])
995 {
996 	txm_call_t txm_call = {
997 		.selector = kTXMKernelSelectorAuthorizeLocalSigningCDHash,
998 		.num_input_args = 1,
999 	};
1000 
1001 	txm_kernel_call(&txm_call, cdhash);
1002 }
1003 
1004 kern_return_t
txm_register_code_signature(const vm_address_t signature_addr,const vm_size_t signature_size,const vm_offset_t code_directory_offset,const char * signature_path,void ** sig_obj,vm_address_t * txm_signature_addr)1005 txm_register_code_signature(
1006 	const vm_address_t signature_addr,
1007 	const vm_size_t signature_size,
1008 	const vm_offset_t code_directory_offset,
1009 	const char *signature_path,
1010 	void **sig_obj,
1011 	vm_address_t *txm_signature_addr)
1012 {
1013 	txm_call_t txm_call = {
1014 		.selector = kTXMKernelSelectorRegisterCodeSignature,
1015 		.num_input_args = 3,
1016 		.num_output_args = 2,
1017 	};
1018 	kern_return_t ret = KERN_DENIED;
1019 
1020 	/*
1021 	 * TXM performs more exhaustive validation of the code signature and figures
1022 	 * out the best code directory to use on its own. As a result, this offset here
1023 	 * is not used.
1024 	 */
1025 	(void)code_directory_offset;
1026 
1027 	/*
1028 	 * If the signature is large enough to not fit within TXM's managed signature
1029 	 * size, then we need to transfer it over so it is owned by TXM.
1030 	 */
1031 	if (signature_size > txm_managed_code_signature_size()) {
1032 		txm_transfer_region(signature_addr, signature_size);
1033 	}
1034 
1035 	ret = txm_kernel_call(
1036 		&txm_call,
1037 		signature_addr,
1038 		signature_size,
1039 		signature_path);
1040 
1041 	if (ret != KERN_SUCCESS) {
1042 		goto exit;
1043 	}
1044 
1045 	*sig_obj = (void*)txm_call.return_words[0];
1046 	*txm_signature_addr = txm_call.return_words[1];
1047 
1048 exit:
1049 	if ((ret != KERN_SUCCESS) && (signature_size > txm_managed_code_signature_size())) {
1050 		txm_reclaim_region(signature_addr, signature_size);
1051 	}
1052 
1053 	return ret;
1054 }
1055 
1056 kern_return_t
txm_unregister_code_signature(void * sig_obj)1057 txm_unregister_code_signature(
1058 	void *sig_obj)
1059 {
1060 	txm_call_t txm_call = {
1061 		.selector = kTXMKernelSelectorUnregisterCodeSignature,
1062 		.failure_fatal = true,
1063 		.num_input_args = 1,
1064 		.num_output_args = 2,
1065 	};
1066 	TXMCodeSignature_t *cs_obj = sig_obj;
1067 	vm_address_t signature_addr = 0;
1068 	vm_size_t signature_size = 0;
1069 	bool txm_managed = false;
1070 
1071 	/* Check if the signature memory is TXM managed */
1072 	txm_managed = cs_obj->sptmType != TXM_BULK_DATA;
1073 
1074 	/*
1075 	 * Take the unregistration sync lock.
1076 	 * For more information: rdar://99205627.
1077 	 */
1078 	lck_mtx_lock(&unregister_sync_lock);
1079 
1080 	/* Unregister the signature from TXM -- cannot fail */
1081 	txm_kernel_call(&txm_call, sig_obj);
1082 
1083 	/* Release the unregistration sync lock */
1084 	lck_mtx_unlock(&unregister_sync_lock);
1085 
1086 	signature_addr = txm_call.return_words[0];
1087 	signature_size = txm_call.return_words[1];
1088 
1089 	/* Reclaim the memory range in case we need to */
1090 	if (txm_managed == false) {
1091 		txm_reclaim_region(signature_addr, signature_size);
1092 	}
1093 
1094 	return KERN_SUCCESS;
1095 }
1096 
1097 kern_return_t
txm_verify_code_signature(void * sig_obj)1098 txm_verify_code_signature(
1099 	void *sig_obj)
1100 {
1101 	txm_call_t txm_call = {
1102 		.selector = kTXMKernelSelectorValidateCodeSignature,
1103 		.num_input_args = 1,
1104 	};
1105 	kern_return_t ret = KERN_DENIED;
1106 
1107 	/*
1108 	 * Verification of the code signature may perform a trust cache look up.
1109 	 * In order to avoid any collisions with threads which may be loading a
1110 	 * trust cache, we take a reader lock on the trust cache runtime.
1111 	 */
1112 
1113 	lck_rw_lock_shared(&txm_trust_cache_lck);
1114 	ret = txm_kernel_call(&txm_call, sig_obj);
1115 	lck_rw_unlock_shared(&txm_trust_cache_lck);
1116 
1117 	return ret;
1118 }
1119 
1120 kern_return_t
txm_reconstitute_code_signature(void * sig_obj,vm_address_t * unneeded_addr,vm_size_t * unneeded_size)1121 txm_reconstitute_code_signature(
1122 	void *sig_obj,
1123 	vm_address_t *unneeded_addr,
1124 	vm_size_t *unneeded_size)
1125 {
1126 	txm_call_t txm_call = {
1127 		.selector = kTXMKernelSelectorReconstituteCodeSignature,
1128 		.failure_fatal = true,
1129 		.num_input_args = 1,
1130 		.num_output_args = 2,
1131 	};
1132 	vm_address_t return_addr = 0;
1133 	vm_size_t return_size = 0;
1134 
1135 	/* Reconstitute the code signature -- cannot fail */
1136 	txm_kernel_call(&txm_call, sig_obj);
1137 
1138 	return_addr = txm_call.return_words[0];
1139 	return_size = txm_call.return_words[1];
1140 
1141 	/* Reclaim the memory region if we need to */
1142 	if ((return_addr != 0) && (return_size != 0)) {
1143 		txm_reclaim_region(return_addr, return_size);
1144 	}
1145 
1146 	*unneeded_addr = return_addr;
1147 	*unneeded_size = return_size;
1148 
1149 	return KERN_SUCCESS;
1150 }
1151 
1152 #pragma mark Address Spaces
1153 
1154 kern_return_t
txm_register_address_space(pmap_t pmap,uint16_t addr_space_id,TXMAddressSpaceFlags_t flags)1155 txm_register_address_space(
1156 	pmap_t pmap,
1157 	uint16_t addr_space_id,
1158 	TXMAddressSpaceFlags_t flags)
1159 {
1160 	txm_call_t txm_call = {
1161 		.selector = kTXMKernelSelectorRegisterAddressSpace,
1162 		.failure_fatal = true,
1163 		.num_input_args = 2,
1164 		.num_output_args = 1,
1165 	};
1166 	TXMAddressSpace_t *txm_addr_space = NULL;
1167 
1168 	/* Register the address space -- cannot fail */
1169 	txm_kernel_call(&txm_call, addr_space_id, flags);
1170 
1171 	/* Set the address space object within the PMAP */
1172 	txm_addr_space = (TXMAddressSpace_t*)txm_call.return_words[0];
1173 	pmap_txm_set_addr_space(pmap, txm_addr_space);
1174 
1175 	return KERN_SUCCESS;
1176 }
1177 
1178 kern_return_t
txm_unregister_address_space(pmap_t pmap)1179 txm_unregister_address_space(
1180 	pmap_t pmap)
1181 {
1182 	txm_call_t txm_call = {
1183 		.selector = kTXMKernelSelectorUnregisterAddressSpace,
1184 		.failure_fatal = true,
1185 		.num_input_args = 1,
1186 	};
1187 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1188 
1189 	/*
1190 	 * Take the unregistration sync lock.
1191 	 * For more information: rdar://99205627.
1192 	 */
1193 	lck_mtx_lock(&unregister_sync_lock);
1194 
1195 	/* Unregister the address space -- cannot fail */
1196 	txm_kernel_call(&txm_call, txm_addr_space);
1197 
1198 	/* Release the unregistration sync lock */
1199 	lck_mtx_unlock(&unregister_sync_lock);
1200 
1201 	/* Remove the address space from the pmap */
1202 	pmap_txm_set_addr_space(pmap, NULL);
1203 
1204 	return KERN_SUCCESS;
1205 }
1206 
1207 kern_return_t
txm_associate_code_signature(pmap_t pmap,void * sig_obj,const vm_address_t region_addr,const vm_size_t region_size,const vm_offset_t region_offset)1208 txm_associate_code_signature(
1209 	pmap_t pmap,
1210 	void *sig_obj,
1211 	const vm_address_t region_addr,
1212 	const vm_size_t region_size,
1213 	const vm_offset_t region_offset)
1214 {
1215 	txm_call_t txm_call = {
1216 		.selector = kTXMKernelSelectorAssociateCodeSignature,
1217 		.num_input_args = 5,
1218 	};
1219 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1220 	kern_return_t ret = KERN_DENIED;
1221 
1222 	/*
1223 	 * Associating a code signature may require exclusive access to the TXM address
1224 	 * space lock within TXM.
1225 	 */
1226 	pmap_txm_acquire_exclusive_lock(pmap);
1227 
1228 	/*
1229 	 * If the address space in question is a nested address space, then all associations
1230 	 * need to go into the shared region base range. The VM layer is inconsistent with
1231 	 * how it makes associations with TXM vs. how it maps pages into the shared region.
1232 	 *
1233 	 * For TXM, the associations are made without taking the base range into account,
1234 	 * but when mappings are entered into the shared region, the base range is taken
1235 	 * into account. To normalize this, we add the base range address here.
1236 	 */
1237 	vm_address_t adjusted_region_addr = region_addr;
1238 	if (txm_addr_space->addrSpaceID.type == kTXMAddressSpaceIDTypeSharedRegion) {
1239 		adjusted_region_addr += SHARED_REGION_BASE;
1240 	}
1241 
1242 	/*
1243 	 * The VM tries a bunch of weird mappings within launchd for some platform code
1244 	 * which isn't mapped contiguously. These mappings don't succeed, but the failure
1245 	 * is fairly harmless since everything seems to work. However, since the call to
1246 	 * TXM fails, we make a series of logs. Hence, for launchd, we suppress failure
1247 	 * logs.
1248 	 */
1249 	if (txm_addr_space->addrSpaceID.type == kTXMAddressSpaceIDTypeAddressSpace) {
1250 		/* TXMTODO: Scope this to launchd better */
1251 		txm_call.failure_code_silent = kTXMReturnPlatformCodeMapping;
1252 	}
1253 
1254 	/* Check if the main region has been set on the address space */
1255 	bool main_region_set = txm_addr_space->mainRegion != NULL;
1256 	bool main_region_set_after = false;
1257 
1258 	ret = txm_kernel_call(
1259 		&txm_call,
1260 		txm_addr_space,
1261 		sig_obj,
1262 		adjusted_region_addr,
1263 		region_size,
1264 		region_offset);
1265 
1266 	while (ret == KERN_OPERATION_TIMED_OUT) {
1267 		/*
1268 		 * There is no easy method to sleep in the kernel. This operation has the
1269 		 * potential to burn CPU cycles, but that is alright since we don't actually
1270 		 * ever expect to enter this case on legitimately operating systems.
1271 		 */
1272 		ret = txm_kernel_call(
1273 			&txm_call,
1274 			txm_addr_space,
1275 			sig_obj,
1276 			adjusted_region_addr,
1277 			region_size,
1278 			region_offset);
1279 	}
1280 
1281 	/*
1282 	 * If the main region wasn't set on the address space before hand, but this new
1283 	 * call into TXM was successful and sets the main region, it means this signature
1284 	 * object is associated with the main region on the address space. With this, we
1285 	 * can now set the appropriate trust level on the PMAP.
1286 	 */
1287 	if (ret == KERN_SUCCESS) {
1288 		main_region_set_after = txm_addr_space->mainRegion != NULL;
1289 	}
1290 
1291 	/* Unlock the TXM address space lock */
1292 	pmap_txm_release_exclusive_lock(pmap);
1293 
1294 	/* Check if we should set the trust level on the PMAP */
1295 	if (!main_region_set && main_region_set_after) {
1296 		const TXMCodeSignature_t *cs_obj = sig_obj;
1297 		const SignatureValidation_t *sig = &cs_obj->sig;
1298 
1299 		/*
1300 		 * This is gross, as we're dereferencing into a private data structure type.
1301 		 * There are 2 ways to clean this up in the future:
1302 		 * 1. Import libCodeSignature, so we can use "codeSignatureGetTrustLevel".
1303 		 * 2. Cache the trust level on the address space within TXM and then use it.
1304 		 */
1305 		pmap_txm_set_trust_level(pmap, sig->trustLevel);
1306 	}
1307 
1308 	return ret;
1309 }
1310 
1311 kern_return_t
txm_allow_jit_region(pmap_t pmap)1312 txm_allow_jit_region(
1313 	pmap_t pmap)
1314 {
1315 	txm_call_t txm_call = {
1316 		.selector = kTXMKernelSelectorAllowJITRegion,
1317 		.num_input_args = 1,
1318 	};
1319 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1320 	kern_return_t ret = KERN_DENIED;
1321 
1322 	pmap_txm_acquire_shared_lock(pmap);
1323 	ret = txm_kernel_call(&txm_call, txm_addr_space);
1324 	pmap_txm_release_shared_lock(pmap);
1325 
1326 	return ret;
1327 }
1328 
1329 kern_return_t
txm_associate_jit_region(pmap_t pmap,const vm_address_t region_addr,const vm_size_t region_size)1330 txm_associate_jit_region(
1331 	pmap_t pmap,
1332 	const vm_address_t region_addr,
1333 	const vm_size_t region_size)
1334 {
1335 	txm_call_t txm_call = {
1336 		.selector = kTXMKernelSelectorAssociateJITRegion,
1337 		.num_input_args = 3,
1338 	};
1339 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1340 	kern_return_t ret = KERN_DENIED;
1341 
1342 	/*
1343 	 * Associating a JIT region may require exclusive access to the TXM address
1344 	 * space lock within TXM.
1345 	 */
1346 	pmap_txm_acquire_exclusive_lock(pmap);
1347 
1348 	ret = txm_kernel_call(
1349 		&txm_call,
1350 		txm_addr_space,
1351 		region_addr,
1352 		region_size);
1353 
1354 	/* Unlock the TXM address space lock */
1355 	pmap_txm_release_exclusive_lock(pmap);
1356 
1357 	return ret;
1358 }
1359 
1360 kern_return_t
txm_address_space_debugged(pmap_t pmap)1361 txm_address_space_debugged(
1362 	pmap_t pmap)
1363 {
1364 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1365 	bool debug_regions_allowed = false;
1366 
1367 	/*
1368 	 * We do not actually need to trap into the monitor for this function for
1369 	 * now. It might be a tad bit more secure to actually trap into the monitor
1370 	 * as it implicitly verifies all of our pointers, but since this is a simple
1371 	 * state check against the address space, the real policy around it lies
1372 	 * within the kernel still, in which case entering the monitor doesn't
1373 	 * really provide much more security.
1374 	 */
1375 
1376 	pmap_txm_acquire_shared_lock(pmap);
1377 	debug_regions_allowed = os_atomic_load(&txm_addr_space->allowsInvalidCode, relaxed);
1378 	pmap_txm_release_shared_lock(pmap);
1379 
1380 	if (debug_regions_allowed == true) {
1381 		return KERN_SUCCESS;
1382 	}
1383 	return KERN_DENIED;
1384 }
1385 
1386 kern_return_t
txm_associate_debug_region(pmap_t pmap,const vm_address_t region_addr,const vm_size_t region_size)1387 txm_associate_debug_region(
1388 	pmap_t pmap,
1389 	const vm_address_t region_addr,
1390 	const vm_size_t region_size)
1391 {
1392 	/*
1393 	 * This function is an interesting one. There is no need for us to make
1394 	 * a call into TXM for this one and instead, all we need to do here is
1395 	 * to verify that the TXM address space actually allows debug regions to
1396 	 * be mapped in or not.
1397 	 */
1398 	(void)region_addr;
1399 	(void)region_size;
1400 
1401 	kern_return_t ret = txm_address_space_debugged(pmap);
1402 	if (ret != KERN_SUCCESS) {
1403 		printf("address space does not allow creating debug regions\n");
1404 	}
1405 
1406 	return ret;
1407 }
1408 
1409 kern_return_t
txm_allow_invalid_code(pmap_t pmap)1410 txm_allow_invalid_code(
1411 	pmap_t pmap)
1412 {
1413 	txm_call_t txm_call = {
1414 		.selector = kTXMKernelSelectorAllowInvalidCode,
1415 		.num_input_args = 1,
1416 	};
1417 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1418 	kern_return_t ret = KERN_DENIED;
1419 
1420 	/*
1421 	 * Allowing invalid code may require exclusive access to the TXM address
1422 	 * space lock within TXM.
1423 	 */
1424 
1425 	pmap_txm_acquire_exclusive_lock(pmap);
1426 	ret = txm_kernel_call(&txm_call, txm_addr_space);
1427 	pmap_txm_release_exclusive_lock(pmap);
1428 
1429 	return ret;
1430 }
1431 
1432 kern_return_t
txm_get_trust_level_kdp(pmap_t pmap,uint32_t * trust_level)1433 txm_get_trust_level_kdp(
1434 	pmap_t pmap,
1435 	uint32_t *trust_level)
1436 {
1437 	CSTrust_t txm_trust_level = kCSTrustUntrusted;
1438 
1439 	kern_return_t ret = pmap_txm_get_trust_level_kdp(pmap, &txm_trust_level);
1440 	if (ret != KERN_SUCCESS) {
1441 		return ret;
1442 	}
1443 
1444 	if (trust_level != NULL) {
1445 		*trust_level = txm_trust_level;
1446 	}
1447 	return KERN_SUCCESS;
1448 }
1449 
1450 kern_return_t
txm_get_jit_address_range_kdp(pmap_t pmap,uintptr_t * jit_region_start,uintptr_t * jit_region_end)1451 txm_get_jit_address_range_kdp(
1452 	pmap_t pmap,
1453 	uintptr_t *jit_region_start,
1454 	uintptr_t *jit_region_end)
1455 {
1456 	return pmap_txm_get_jit_address_range_kdp(pmap, jit_region_start, jit_region_end);
1457 }
1458 
1459 kern_return_t
txm_address_space_exempt(const pmap_t pmap)1460 txm_address_space_exempt(
1461 	const pmap_t pmap)
1462 {
1463 	if (pmap_performs_stage2_translations(pmap) == true) {
1464 		return KERN_SUCCESS;
1465 	}
1466 
1467 	return KERN_DENIED;
1468 }
1469 
1470 kern_return_t
txm_fork_prepare(pmap_t old_pmap,pmap_t new_pmap)1471 txm_fork_prepare(
1472 	pmap_t old_pmap,
1473 	pmap_t new_pmap)
1474 {
1475 	/*
1476 	 * We'll add support for this as the need for it becomes more important.
1477 	 * TXMTODO: Complete this implementation.
1478 	 */
1479 	(void)old_pmap;
1480 	(void)new_pmap;
1481 
1482 	return KERN_SUCCESS;
1483 }
1484 
1485 kern_return_t
txm_acquire_signing_identifier(const void * sig_obj,const char ** signing_id)1486 txm_acquire_signing_identifier(
1487 	const void *sig_obj,
1488 	const char **signing_id)
1489 {
1490 	txm_call_t txm_call = {
1491 		.selector = kTXMKernelSelectorAcquireSigningIdentifier,
1492 		.num_input_args = 1,
1493 		.num_output_args = 1,
1494 		.failure_fatal = true,
1495 	};
1496 
1497 	/* Get the signing ID -- should not fail */
1498 	txm_kernel_call(&txm_call, sig_obj);
1499 
1500 	if (signing_id != NULL) {
1501 		*signing_id = (const char*)txm_call.return_words[0];
1502 	}
1503 	return KERN_SUCCESS;
1504 }
1505 
1506 #pragma mark Entitlements
1507 
1508 kern_return_t
txm_associate_kernel_entitlements(void * sig_obj,const void * kernel_entitlements)1509 txm_associate_kernel_entitlements(
1510 	void *sig_obj,
1511 	const void *kernel_entitlements)
1512 {
1513 	txm_call_t txm_call = {
1514 		.selector = kTXMKernelSelectorAssociateKernelEntitlements,
1515 		.num_input_args = 2,
1516 		.failure_fatal = true,
1517 	};
1518 
1519 	/* Associate the kernel entitlements -- should not fail */
1520 	txm_kernel_call(&txm_call, sig_obj, kernel_entitlements);
1521 
1522 	return KERN_SUCCESS;
1523 }
1524 
1525 kern_return_t
txm_resolve_kernel_entitlements(pmap_t pmap,const void ** kernel_entitlements)1526 txm_resolve_kernel_entitlements(
1527 	pmap_t pmap,
1528 	const void **kernel_entitlements)
1529 {
1530 	txm_call_t txm_call = {
1531 		.selector = kTXMKernelSelectorResolveKernelEntitlementsAddressSpace,
1532 		.skip_logs = true,
1533 		.num_input_args = 1,
1534 		.num_output_args = 1,
1535 		.failure_silent = true,
1536 	};
1537 	TXMAddressSpace_t *txm_addr_space = NULL;
1538 	kern_return_t ret = KERN_DENIED;
1539 
1540 	if (pmap == pmap_txm_kernel_pmap()) {
1541 		return KERN_NOT_FOUND;
1542 	}
1543 	txm_addr_space = pmap_txm_addr_space(pmap);
1544 
1545 	pmap_txm_acquire_shared_lock(pmap);
1546 	ret = txm_kernel_call(&txm_call, txm_addr_space);
1547 	pmap_txm_release_shared_lock(pmap);
1548 
1549 	if ((ret == KERN_SUCCESS) && (kernel_entitlements != NULL)) {
1550 		*kernel_entitlements = (const void*)txm_call.return_words[0];
1551 	}
1552 	return ret;
1553 }
1554 
1555 kern_return_t
txm_accelerate_entitlements(void * sig_obj,CEQueryContext_t * ce_ctx)1556 txm_accelerate_entitlements(
1557 	void *sig_obj,
1558 	CEQueryContext_t *ce_ctx)
1559 {
1560 	txm_call_t txm_call = {
1561 		.selector = kTXMKernelSelectorAccelerateEntitlements,
1562 		.num_input_args = 1,
1563 		.num_output_args = 1,
1564 	};
1565 	kern_return_t ret = KERN_DENIED;
1566 
1567 	ret = txm_kernel_call(&txm_call, sig_obj);
1568 	if ((ret == KERN_SUCCESS) && (ce_ctx != NULL)) {
1569 		*ce_ctx = (CEQueryContext_t)txm_call.return_words[0];
1570 	}
1571 
1572 	return ret;
1573 }
1574 
1575 #pragma mark Image4
1576 
1577 void*
txm_image4_storage_data(__unused size_t * allocated_size)1578 txm_image4_storage_data(
1579 	__unused size_t *allocated_size)
1580 {
1581 	/*
1582 	 * AppleImage4 builds a variant of TXM which TXM should link against statically
1583 	 * thereby removing the need for the kernel to allocate some data on behalf of
1584 	 * the kernel extension.
1585 	 */
1586 	panic("unsupported AppleImage4 interface");
1587 }
1588 
1589 void
txm_image4_set_nonce(const img4_nonce_domain_index_t ndi,const img4_nonce_t * nonce)1590 txm_image4_set_nonce(
1591 	const img4_nonce_domain_index_t ndi,
1592 	const img4_nonce_t *nonce)
1593 {
1594 	txm_call_t txm_call = {
1595 		.selector = kTXMKernelSelectorImage4SetNonce,
1596 		.failure_fatal = true,
1597 		.num_input_args = 2,
1598 	};
1599 
1600 	txm_kernel_call(&txm_call, ndi, nonce);
1601 }
1602 
1603 void
txm_image4_roll_nonce(const img4_nonce_domain_index_t ndi)1604 txm_image4_roll_nonce(
1605 	const img4_nonce_domain_index_t ndi)
1606 {
1607 	txm_call_t txm_call = {
1608 		.selector = kTXMKernelSelectorImage4RollNonce,
1609 		.failure_fatal = true,
1610 		.num_input_args = 1,
1611 	};
1612 
1613 	txm_kernel_call(&txm_call, ndi);
1614 }
1615 
1616 errno_t
txm_image4_copy_nonce(const img4_nonce_domain_index_t ndi,img4_nonce_t * nonce_out)1617 txm_image4_copy_nonce(
1618 	const img4_nonce_domain_index_t ndi,
1619 	img4_nonce_t *nonce_out)
1620 {
1621 	txm_call_t txm_call = {
1622 		.selector = kTXMKernelSelectorImage4GetNonce,
1623 		.num_input_args = 1,
1624 		.num_output_args = 1,
1625 	};
1626 	const img4_nonce_t *nonce = NULL;
1627 	TXMReturn_t txm_ret = {0};
1628 	kern_return_t ret = KERN_DENIED;
1629 
1630 	ret = txm_kernel_call(&txm_call, ndi);
1631 	if (ret != KERN_SUCCESS) {
1632 		txm_ret = txm_call.txm_ret;
1633 		if (txm_ret.returnCode != kTXMReturnCodeErrno) {
1634 			return EPERM;
1635 		}
1636 		return txm_ret.errnoRet;
1637 	}
1638 
1639 	/* Acquire a pointer to the nonce from TXM */
1640 	nonce = (const img4_nonce_t*)txm_call.return_words[0];
1641 
1642 	if (nonce_out) {
1643 		*nonce_out = *nonce;
1644 	}
1645 	return 0;
1646 }
1647 
1648 errno_t
txm_image4_execute_object(img4_runtime_object_spec_index_t obj_spec_index,const img4_buff_t * payload,const img4_buff_t * manifest)1649 txm_image4_execute_object(
1650 	img4_runtime_object_spec_index_t obj_spec_index,
1651 	const img4_buff_t *payload,
1652 	const img4_buff_t *manifest)
1653 {
1654 	/* Not supported within TXM yet */
1655 	(void)obj_spec_index;
1656 	(void)payload;
1657 	(void)manifest;
1658 
1659 	printf("image4 object execution isn't supported by TXM\n");
1660 	return ENOSYS;
1661 }
1662 
1663 errno_t
txm_image4_copy_object(img4_runtime_object_spec_index_t obj_spec_index,vm_address_t object_out,size_t * object_length)1664 txm_image4_copy_object(
1665 	img4_runtime_object_spec_index_t obj_spec_index,
1666 	vm_address_t object_out,
1667 	size_t *object_length)
1668 {
1669 	/* Not supported within TXM yet */
1670 	(void)obj_spec_index;
1671 	(void)object_out;
1672 	(void)object_length;
1673 
1674 	printf("image4 object copying isn't supported by TXM\n");
1675 	return ENOSYS;
1676 }
1677 
1678 const void*
txm_image4_get_monitor_exports(void)1679 txm_image4_get_monitor_exports(void)
1680 {
1681 	txm_call_t txm_call = {
1682 		.selector = kTXMKernelSelectorImage4GetExports,
1683 		.failure_fatal = true,
1684 		.num_output_args = 1,
1685 	};
1686 
1687 	txm_kernel_call(&txm_call);
1688 	return (const void*)txm_call.return_words[0];
1689 }
1690 
1691 errno_t
txm_image4_set_release_type(const char * release_type)1692 txm_image4_set_release_type(
1693 	const char *release_type)
1694 {
1695 	txm_call_t txm_call = {
1696 		.selector = kTXMKernelSelectorImage4SetReleaseType,
1697 		.failure_fatal = true,
1698 		.num_input_args = 1,
1699 	};
1700 
1701 	/* Set the release type -- cannot fail */
1702 	txm_kernel_call(&txm_call, release_type);
1703 
1704 	return 0;
1705 }
1706 
1707 errno_t
txm_image4_set_bnch_shadow(const img4_nonce_domain_index_t ndi)1708 txm_image4_set_bnch_shadow(
1709 	const img4_nonce_domain_index_t ndi)
1710 {
1711 	txm_call_t txm_call = {
1712 		.selector = kTXMKernelSelectorImage4SetBootNonceShadow,
1713 		.failure_fatal = true,
1714 		.num_input_args = 1,
1715 	};
1716 
1717 	/* Set the release type -- cannot fail */
1718 	txm_kernel_call(&txm_call, ndi);
1719 
1720 	return 0;
1721 }
1722 
1723 #pragma mark Image4 - New
1724 
1725 static inline bool
_txm_image4_monitor_trap_supported(image4_cs_trap_t selector)1726 _txm_image4_monitor_trap_supported(
1727 	image4_cs_trap_t selector)
1728 {
1729 	switch (selector) {
1730 #if kTXMImage4APIVersion >= 1
1731 	case IMAGE4_CS_TRAP_KMOD_SET_RELEASE_TYPE:
1732 	case IMAGE4_CS_TRAP_NONCE_SET:
1733 	case IMAGE4_CS_TRAP_NONCE_ROLL:
1734 	case IMAGE4_CS_TRAP_IMAGE_ACTIVATE:
1735 		return true;
1736 #endif
1737 
1738 	default:
1739 		return false;
1740 	}
1741 }
1742 
1743 kern_return_t
txm_image4_transfer_region(image4_cs_trap_t selector,vm_address_t region_addr,vm_size_t region_size)1744 txm_image4_transfer_region(
1745 	image4_cs_trap_t selector,
1746 	vm_address_t region_addr,
1747 	vm_size_t region_size)
1748 {
1749 	if (_txm_image4_monitor_trap_supported(selector) == true) {
1750 		txm_transfer_region(region_addr, region_size);
1751 	}
1752 	return KERN_SUCCESS;
1753 }
1754 
1755 kern_return_t
txm_image4_reclaim_region(image4_cs_trap_t selector,vm_address_t region_addr,vm_size_t region_size)1756 txm_image4_reclaim_region(
1757 	image4_cs_trap_t selector,
1758 	vm_address_t region_addr,
1759 	vm_size_t region_size)
1760 {
1761 	if (_txm_image4_monitor_trap_supported(selector) == true) {
1762 		txm_reclaim_region(region_addr, region_size);
1763 	}
1764 	return KERN_SUCCESS;
1765 }
1766 
1767 errno_t
txm_image4_monitor_trap(image4_cs_trap_t selector,const void * input_data,size_t input_size)1768 txm_image4_monitor_trap(
1769 	image4_cs_trap_t selector,
1770 	const void *input_data,
1771 	size_t input_size)
1772 {
1773 	txm_call_t txm_call = {
1774 		.selector = kTXMKernelSelectorImage4Dispatch,
1775 		.num_input_args = 5,
1776 	};
1777 
1778 	kern_return_t ret = txm_kernel_call(
1779 		&txm_call, selector,
1780 		input_data, input_size,
1781 		NULL, NULL);
1782 
1783 	/* Return 0 for success */
1784 	if (ret == KERN_SUCCESS) {
1785 		return 0;
1786 	}
1787 
1788 	/* Check for an errno_t return */
1789 	if (txm_call.txm_ret.returnCode == kTXMReturnCodeErrno) {
1790 		if (txm_call.txm_ret.errnoRet == 0) {
1791 			panic("image4 dispatch: unexpected success errno_t: %llu", selector);
1792 		}
1793 		return txm_call.txm_ret.errnoRet;
1794 	}
1795 
1796 	/* Return a generic error */
1797 	return EPERM;
1798 }
1799 
1800 
1801 #endif /* CONFIG_SPTM */
1802