xref: /xnu-12377.81.4/bsd/kern/code_signing/txm.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2022 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_LICENSE_HEADER_START@
5  *
6  * The contents of this file constitute Original Code as defined in and
7  * are subject to the Apple Public Source License Version 1.1 (the
8  * "License").  You may not use this file except in compliance with the
9  * License.  Please obtain a copy of the License at
10  * http://www.apple.com/publicsource and read it before using this file.
11  *
12  * This Original Code and all software distributed under the License are
13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
17  * License for the specific language governing rights and limitations
18  * under the License.
19  *
20  * @APPLE_LICENSE_HEADER_END@
21  */
22 
23 #include <stdarg.h>
24 #include <stdatomic.h>
25 #include <os/overflow.h>
26 #include <os/atomic_private.h>
27 #include <machine/atomic.h>
28 #include <mach/vm_param.h>
29 #include <mach/vm_map.h>
30 #include <mach/shared_region.h>
31 #include <vm/vm_kern_xnu.h>
32 #include <kern/zalloc.h>
33 #include <kern/kalloc.h>
34 #include <kern/assert.h>
35 #include <kern/locks.h>
36 #include <kern/recount.h>
37 #include <kern/sched_prim.h>
38 #include <kern/lock_rw.h>
39 #include <libkern/libkern.h>
40 #include <libkern/section_keywords.h>
41 #include <libkern/coretrust/coretrust.h>
42 #include <libkern/amfi/amfi.h>
43 #include <pexpert/pexpert.h>
44 #include <sys/vm.h>
45 #include <sys/proc.h>
46 #include <sys/codesign.h>
47 #include <sys/code_signing.h>
48 #include <sys/trust_caches.h>
49 #include <sys/sysctl.h>
50 #include <sys/reboot.h>
51 #include <uuid/uuid.h>
52 #include <IOKit/IOLib.h>
53 #include <IOKit/IOBSD.h>
54 
55 #if CONFIG_SPTM
56 /*
57  * The TrustedExecutionMonitor environment works in tandem with the SPTM to provide code
58  * signing and memory isolation enforcement for data structures critical to ensuring that
59  * all code executed on the system is authorized to do so.
60  *
61  * Unless the data is managed by TXM itself, XNU needs to page-align everything, make the
62  * relevant type transfer, and then reference the memory as read-only.
63  *
64  * TXM enforces concurrency on its side, but through the use of try-locks. Upon a failure
65  * in acquiring the lock, TXM will panic. As a result, in order to ensure single-threaded
66  * behavior, the kernel also has to take some locks on its side befor calling into TXM.
67  */
68 #include <sys/trusted_execution_monitor.h>
69 #include <pexpert/arm64/board_config.h>
70 
71 /* Lock group used for all locks within the kernel for TXM */
72 LCK_GRP_DECLARE(txm_lck_grp, "txm_code_signing_lck_grp");
73 
74 #pragma mark Utilities
75 
76 /* Number of thread stacks is known at build-time */
77 #define NUM_TXM_THREAD_STACKS (MAX_CPUS)
78 txm_thread_stack_t thread_stacks[NUM_TXM_THREAD_STACKS] = {0};
79 
80 /* Singly-linked-list head for thread stacks */
81 SLIST_HEAD(thread_stack_head, _txm_thread_stack) thread_stacks_head =
82     SLIST_HEAD_INITIALIZER(thread_stacks_head);
83 
84 static decl_lck_mtx_data(, thread_stacks_lock);
85 static void *thread_stack_event = NULL;
86 
87 static void
setup_thread_stacks(void)88 setup_thread_stacks(void)
89 {
90 	extern const sptm_bootstrap_args_xnu_t *SPTMArgs;
91 	txm_thread_stack_t *thread_stack = NULL;
92 
93 	/* Initialize each thread stack and add it to the list */
94 	for (uint32_t i = 0; i < NUM_TXM_THREAD_STACKS; i++) {
95 		thread_stack = &thread_stacks[i];
96 
97 		/* Acquire the thread stack virtual mapping */
98 		thread_stack->thread_stack_papt = SPTMArgs->txm_thread_stacks[i];
99 
100 		/* Acquire the thread stack physical page */
101 		thread_stack->thread_stack_phys = (uintptr_t)kvtophys_nofail(
102 			thread_stack->thread_stack_papt);
103 
104 		/* Resolve the pointer to the thread stack data */
105 		thread_stack->thread_stack_data =
106 		    (TXMThreadStack_t*)(thread_stack->thread_stack_papt + (PAGE_SIZE - 1024));
107 
108 		/* Add thread stack to the list head */
109 		SLIST_INSERT_HEAD(&thread_stacks_head, thread_stack, link);
110 	}
111 
112 	/* Initialize the thread stacks lock */
113 	lck_mtx_init(&thread_stacks_lock, &txm_lck_grp, 0);
114 }
115 
116 static txm_thread_stack_t*
acquire_thread_stack(void)117 acquire_thread_stack(void)
118 {
119 	txm_thread_stack_t *thread_stack = NULL;
120 
121 	/* Lock the thread stack list */
122 	lck_mtx_lock(&thread_stacks_lock);
123 
124 	while (SLIST_EMPTY(&thread_stacks_head) == true) {
125 		lck_mtx_sleep(
126 			&thread_stacks_lock,
127 			LCK_SLEEP_DEFAULT,
128 			&thread_stack_event,
129 			THREAD_UNINT);
130 	}
131 
132 	if (SLIST_EMPTY(&thread_stacks_head) == true) {
133 		panic("unable to acquire a thread stack for TXM");
134 	}
135 
136 	/* Use the first available thread stack */
137 	thread_stack = SLIST_FIRST(&thread_stacks_head);
138 
139 	/* Remove the thread stack from the list */
140 	SLIST_REMOVE_HEAD(&thread_stacks_head, link);
141 
142 	/* Unlock the thread stack list */
143 	lck_mtx_unlock(&thread_stacks_lock);
144 
145 	/* Associate the thread stack with the current thread */
146 	thread_associate_txm_thread_stack(thread_stack->thread_stack_phys);
147 
148 	return thread_stack;
149 }
150 
151 static void
release_thread_stack(txm_thread_stack_t * thread_stack)152 release_thread_stack(
153 	txm_thread_stack_t* thread_stack)
154 {
155 	/* Remove the TXM thread stack association with the current thread */
156 	thread_disassociate_txm_thread_stack(thread_stack->thread_stack_phys);
157 
158 	/* Lock the thread stack list */
159 	lck_mtx_lock(&thread_stacks_lock);
160 
161 	/* Add the thread stack at the list head */
162 	SLIST_INSERT_HEAD(&thread_stacks_head, thread_stack, link);
163 
164 	/* Unlock the thread stack list */
165 	lck_mtx_unlock(&thread_stacks_lock);
166 
167 	/* Wake up any threads waiting to acquire a thread stack */
168 	thread_wakeup(&thread_stack_event);
169 }
170 
171 static kern_return_t
txm_parse_return(TXMReturn_t txm_ret)172 txm_parse_return(
173 	TXMReturn_t txm_ret)
174 {
175 	switch (txm_ret.returnCode) {
176 	case kTXMSuccess:
177 		return KERN_SUCCESS;
178 
179 	case kTXMReturnOutOfMemory:
180 		return KERN_RESOURCE_SHORTAGE;
181 
182 	case kTXMReturnNotFound:
183 		return KERN_NOT_FOUND;
184 
185 	case kTXMReturnNotSupported:
186 		return KERN_NOT_SUPPORTED;
187 
188 #if kTXMKernelAPIVersion >= 6
189 	case kTXMReturnTryAgain:
190 		return KERN_OPERATION_TIMED_OUT;
191 #endif
192 
193 	default:
194 		return KERN_FAILURE;
195 	}
196 }
197 
198 static void
txm_print_return(TXMKernelSelector_t selector,TXMReturn_t txm_ret)199 txm_print_return(
200 	TXMKernelSelector_t selector,
201 	TXMReturn_t txm_ret)
202 {
203 	/*
204 	 * We specifically use IOLog instead of printf since printf is compiled out on
205 	 * RELEASE kernels. We want to ensure that errors from TXM are captured within
206 	 * sysdiagnoses from the field.
207 	 */
208 
209 	if (txm_ret.returnCode == kTXMSuccess) {
210 		return;
211 	} else if (txm_ret.returnCode == kTXMReturnTrustCache) {
212 		IOLog("TXM [Error]: TrustCache: selector: %u | 0x%02X | 0x%02X | %u\n",
213 		    selector, txm_ret.tcRet.component, txm_ret.tcRet.error, txm_ret.tcRet.uniqueError);
214 	} else if (txm_ret.returnCode == kTXMReturnCodeSignature) {
215 		IOLog("TXM [Error]: CodeSignature: selector: %u | 0x%02X | 0x%02X | %u\n",
216 		    selector, txm_ret.csRet.component, txm_ret.csRet.error, txm_ret.csRet.uniqueError);
217 	} else if (txm_ret.returnCode == kTXMReturnCodeErrno) {
218 		IOLog("TXM [Error]: Errno: selector: %u | %d\n",
219 		    selector, txm_ret.errnoRet);
220 	} else {
221 		IOLog("TXM [Error]: selector: %u | %u\n",
222 		    selector, txm_ret.returnCode);
223 	}
224 }
225 
226 #pragma mark Page Allocation
227 
228 static void
txm_add_page(void)229 txm_add_page(void)
230 {
231 	txm_call_t txm_call = {
232 		.selector = kTXMKernelSelectorAddFreeListPage,
233 		.failure_fatal = true,
234 		.num_input_args = 1
235 	};
236 
237 	/* Allocate a page from the VM -- transfers page to TXM internally */
238 	vm_map_address_t phys_addr = pmap_txm_allocate_page();
239 
240 	/* Add this page to the TXM free list */
241 	txm_kernel_call(&txm_call, phys_addr);
242 }
243 
244 #pragma mark Calls
245 
246 static void
txm_kernel_call_registers_setup(txm_call_t * parameters,sptm_call_regs_t * registers,va_list args)247 txm_kernel_call_registers_setup(
248 	txm_call_t *parameters,
249 	sptm_call_regs_t *registers,
250 	va_list args)
251 {
252 	/*
253 	 * We are only ever allowed a maximum of 7 arguments for calling into TXM.
254 	 * This is because the SPTM dispatch only sets up registers x0-x7 for the
255 	 * call, and x0 is always reserved for passing in a thread stack for TXM
256 	 * to operate on.
257 	 */
258 
259 	switch (parameters->num_input_args) {
260 	case 7:
261 		registers->x1 = va_arg(args, uintptr_t);
262 		registers->x2 = va_arg(args, uintptr_t);
263 		registers->x3 = va_arg(args, uintptr_t);
264 		registers->x4 = va_arg(args, uintptr_t);
265 		registers->x5 = va_arg(args, uintptr_t);
266 		registers->x6 = va_arg(args, uintptr_t);
267 		registers->x7 = va_arg(args, uintptr_t);
268 		break;
269 
270 	case 6:
271 		registers->x1 = va_arg(args, uintptr_t);
272 		registers->x2 = va_arg(args, uintptr_t);
273 		registers->x3 = va_arg(args, uintptr_t);
274 		registers->x4 = va_arg(args, uintptr_t);
275 		registers->x5 = va_arg(args, uintptr_t);
276 		registers->x6 = va_arg(args, uintptr_t);
277 		break;
278 
279 	case 5:
280 		registers->x1 = va_arg(args, uintptr_t);
281 		registers->x2 = va_arg(args, uintptr_t);
282 		registers->x3 = va_arg(args, uintptr_t);
283 		registers->x4 = va_arg(args, uintptr_t);
284 		registers->x5 = va_arg(args, uintptr_t);
285 		break;
286 
287 	case 4:
288 		registers->x1 = va_arg(args, uintptr_t);
289 		registers->x2 = va_arg(args, uintptr_t);
290 		registers->x3 = va_arg(args, uintptr_t);
291 		registers->x4 = va_arg(args, uintptr_t);
292 		break;
293 
294 	case 3:
295 		registers->x1 = va_arg(args, uintptr_t);
296 		registers->x2 = va_arg(args, uintptr_t);
297 		registers->x3 = va_arg(args, uintptr_t);
298 		break;
299 
300 	case 2:
301 		registers->x1 = va_arg(args, uintptr_t);
302 		registers->x2 = va_arg(args, uintptr_t);
303 		break;
304 
305 	case 1:
306 		registers->x1 = va_arg(args, uintptr_t);
307 		break;
308 
309 	case 0:
310 		break;
311 
312 	default:
313 		panic("invalid number of arguments to TXM: selector: %u | %u",
314 		    parameters->selector, parameters->num_input_args);
315 	}
316 }
317 
318 static TXMReturn_t
txm_kernel_call_internal(txm_call_t * parameters,va_list args)319 txm_kernel_call_internal(
320 	txm_call_t *parameters,
321 	va_list args)
322 {
323 	TXMReturn_t txm_ret = (TXMReturn_t){.returnCode = kTXMReturnGeneric};
324 	sptm_call_regs_t txm_registers = {0};
325 	txm_thread_stack_t *thread_stack = NULL;
326 	const TXMThreadStack_t *thread_stack_data = NULL;
327 	const TXMSharedContextData_t *shared_context_data = NULL;
328 
329 	/* Obtain a stack for this call */
330 	thread_stack = acquire_thread_stack();
331 	thread_stack_data = thread_stack->thread_stack_data;
332 	shared_context_data = &thread_stack_data->sharedData;
333 
334 	/* Setup argument registers */
335 	txm_registers.x0 = thread_stack->thread_stack_phys;
336 	txm_kernel_call_registers_setup(parameters, &txm_registers, args);
337 
338 	/* Track resource usage */
339 	recount_enter_secure();
340 
341 	/* Call into TXM */
342 	txm_enter(parameters->selector, &txm_registers);
343 
344 	recount_leave_secure();
345 
346 	txm_ret = (TXMReturn_t){.rawValue = shared_context_data->txmReturnCode};
347 	parameters->txm_ret = txm_ret;
348 
349 	if (parameters->txm_ret.returnCode == kTXMSuccess) {
350 		parameters->num_return_words = shared_context_data->txmNumReturnWords;
351 		if (parameters->num_return_words > kTXMStackReturnWords) {
352 			panic("received excessive return words from TXM: selector: %u | %llu",
353 			    parameters->selector, parameters->num_return_words);
354 		}
355 
356 		for (uint64_t i = 0; i < parameters->num_return_words; i++) {
357 			parameters->return_words[i] = shared_context_data->txmReturnWords[i];
358 		}
359 	}
360 
361 	/* Release the thread stack as it is no longer needed */
362 	release_thread_stack(thread_stack);
363 	thread_stack_data = NULL;
364 	shared_context_data = NULL;
365 
366 	return txm_ret;
367 }
368 
369 kern_return_t
txm_kernel_call(txm_call_t * parameters,...)370 txm_kernel_call(
371 	txm_call_t *parameters, ...)
372 {
373 	TXMReturn_t txm_ret = (TXMReturn_t){.returnCode = kTXMReturnGeneric};
374 	kern_return_t ret = KERN_DENIED;
375 	va_list args;
376 
377 	/* Start the variadic arguments list */
378 	va_start(args, parameters);
379 
380 	do {
381 		txm_ret = txm_kernel_call_internal(parameters, args);
382 		if (txm_ret.returnCode == kTXMReturnOutOfMemory) {
383 			if (parameters->selector == kTXMKernelSelectorAddFreeListPage) {
384 				panic("received out-of-memory error when adding a free page to TXM");
385 			}
386 			txm_add_page();
387 		}
388 	} while (txm_ret.returnCode == kTXMReturnOutOfMemory);
389 
390 	/* Clean up the variadic arguments list */
391 	va_end(args);
392 
393 	/* Print all TXM logs from the log buffer */
394 	if (parameters->skip_logs == false) {
395 		txm_print_logs();
396 	}
397 
398 	/* Print the return code from TXM -- only prints for an error */
399 	if (parameters->failure_silent != true) {
400 		if (parameters->failure_code_silent != txm_ret.returnCode) {
401 			txm_print_return(parameters->selector, txm_ret);
402 		}
403 	}
404 
405 	/*
406 	 * To ease the process of calling into TXM, and to also reduce the number of
407 	 * lines of code for each call site, the txm_call_t offers some properties
408 	 * we can enforce over here. Go through these, and panic in case they aren't
409 	 * honored.
410 	 *
411 	 * NOTE: We check for "<" instead of "!=" for the number of return words we
412 	 * get back from TXM since this helps in forward development. If the kernel
413 	 * and TXM are proceeding at different project cadences, we do not want to
414 	 * gate adding more return words from TXM on the kernel first adopting the
415 	 * new number of return words.
416 	 */
417 	ret = txm_parse_return(txm_ret);
418 
419 	if (parameters->failure_fatal && (ret != KERN_SUCCESS)) {
420 		panic("received fatal error for a selector from TXM: selector: %u | 0x%0llX",
421 		    parameters->selector, txm_ret.rawValue);
422 	} else if (parameters->num_return_words < parameters->num_output_args) {
423 		/* Only panic if return was a success */
424 		if (ret == KERN_SUCCESS) {
425 			panic("received fewer than expected return words from TXM: selector: %u | %llu",
426 			    parameters->selector, parameters->num_return_words);
427 		}
428 	}
429 
430 	return ret;
431 }
432 
433 void
txm_transfer_region(vm_address_t addr,vm_size_t size)434 txm_transfer_region(
435 	vm_address_t addr,
436 	vm_size_t size)
437 {
438 	vm_address_t addr_end = 0;
439 	vm_size_t size_aligned = round_page(size);
440 
441 	if ((addr & PAGE_MASK) != 0) {
442 		panic("attempted to transfer non-page-aligned memory to TXM: %p", (void*)addr);
443 	} else if (os_add_overflow(addr, size_aligned, &addr_end)) {
444 		panic("overflow on range to be transferred to TXM: %p | %lu",
445 		    (void*)addr, size);
446 	}
447 
448 	/* Make the memory read-only first (transfer will panic otherwise) */
449 	vm_protect(kernel_map, addr, size_aligned, false, VM_PROT_READ);
450 
451 	/* Transfer each physical page to be TXM_DEFAULT */
452 	for (vm_address_t page = addr; page < addr_end; page += PAGE_SIZE) {
453 		pmap_txm_transfer_page(page);
454 	}
455 }
456 
457 void
txm_reclaim_region(vm_address_t addr,vm_size_t size)458 txm_reclaim_region(
459 	vm_address_t addr,
460 	vm_size_t size)
461 {
462 	vm_address_t addr_end = 0;
463 	vm_size_t size_aligned = round_page(size);
464 
465 	if ((addr & PAGE_MASK) != 0) {
466 		panic("attempted to reclaim non-page-aligned memory from TXM: %p", (void*)addr);
467 	} else if (os_add_overflow(addr, size_aligned, &addr_end)) {
468 		panic("overflow on range to be reclaimed from TXM: %p | %lu",
469 		    (void*)addr, size);
470 	}
471 
472 	/*
473 	 * We can only reclaim once TXM has transferred the memory range back to the
474 	 * kernel. Hence, we simply try and switch permissions to read-write. If TXM
475 	 * hasn't transferred pages, this then should panic.
476 	 */
477 	vm_protect(kernel_map, addr, size_aligned, false, VM_PROT_READ | VM_PROT_WRITE);
478 }
479 
480 static SECURITY_READ_ONLY_LATE(const char*) txm_log_page = NULL;
481 static SECURITY_READ_ONLY_LATE(const uint32_t*) txm_log_head = NULL;
482 static SECURITY_READ_ONLY_LATE(const uint32_t*) txm_log_sync = NULL;
483 
484 static decl_lck_mtx_data(, log_lock);
485 static uint32_t log_head = 0;
486 
487 void
txm_print_logs(void)488 txm_print_logs(void)
489 {
490 	uint32_t start_index = 0;
491 	uint32_t end_index = 0;
492 
493 	/*
494 	 * The design here is very simple. TXM keeps adding slots to its circular buffer
495 	 * and the kernel attempts to read each one and print it, maintaining its own head
496 	 * for the log.
497 	 *
498 	 * This design is by nature lazy. TXM doesn't know or care if the kernel has gone
499 	 * through and printed any of the logs, so it'll just keep writing into its buffer
500 	 * and then circle around when it becomes full.
501 	 *
502 	 * This is fine most of the time since there are a decent amount of slots in the
503 	 * log buffer. We mostly have an issue when TXM is adding so many logs so quickly
504 	 * such that it wraps around and starts overwriting logs which haven't been seen
505 	 * by the kernel. If this were to happen, TXM's log head may circle around the
506 	 * head maintained by the kernel, causing a lot of logs to be missed, since the
507 	 * kernel only attempts the number of logs in-between the two heads.
508 	 *
509 	 * The fix for that is complicated, and until we see an actual impact, we're going
510 	 * to keep the simpler design in place.
511 	 */
512 
513 	/* Return if the logging hasn't been setup yet */
514 	if (txm_log_sync == NULL) {
515 		return;
516 	}
517 
518 	/*
519 	 * Holding the log lock and printing can cause lots of issues since printing can
520 	 * be rather slow. While we make it a point to keep the logging buffer quiet, some
521 	 * actions (such as loading trust caches) are still very chatty.
522 	 *
523 	 * As a result, we optimize this routine to ensure that the lock itself isn't held
524 	 * for very long. All we need to do within the critical section is calculate the
525 	 * starting and ending index of the log buffer. The actual printing doesn't need
526 	 * to be done with the lock held.
527 	 */
528 	lck_mtx_lock(&log_lock);
529 
530 	start_index = log_head;
531 	end_index = os_atomic_load(txm_log_head, relaxed) % kTXMLogSlots;
532 
533 	/* Update the log head with the new index */
534 	log_head = end_index;
535 
536 	/* Release the log lock */
537 	lck_mtx_unlock(&log_lock);
538 
539 	if (start_index != end_index) {
540 		/* Use load acquire here to sync up with all writes to the buffer */
541 		os_atomic_load(txm_log_sync, acquire);
542 
543 		while (start_index != end_index) {
544 			const char *slot = txm_log_page + (start_index * kTXMLogSlotSize);
545 
546 			/* We add newlines after each log statement since TXM does not */
547 			printf("%s\n", slot);
548 
549 			start_index = (start_index + 1) % kTXMLogSlots;
550 		}
551 	}
552 }
553 
554 #pragma mark Initialization
555 
556 SECURITY_READ_ONLY_LATE(const TXMReadWriteData_t*) txm_rw_data = NULL;
557 SECURITY_READ_ONLY_LATE(const TXMReadOnlyData_t*) txm_ro_data = NULL;
558 SECURITY_READ_ONLY_LATE(const CSConfig_t*) txm_cs_config = NULL;
559 SECURITY_READ_ONLY_LATE(CSRestrictedModeState_t*) txm_restricted_mode_state = NULL;
560 SECURITY_READ_ONLY_LATE(const TXMMetrics_t*) txm_metrics = NULL;
561 
562 SECURITY_READ_ONLY_LATE(bool*) developer_mode_enabled = NULL;
563 static SECURITY_READ_ONLY_LATE(bool) code_signing_enabled = true;
564 static SECURITY_READ_ONLY_LATE(uint32_t) managed_signature_size = 0;
565 
566 static decl_lck_mtx_data(, compilation_service_lock);
567 static decl_lck_mtx_data(, unregister_sync_lock);
568 
569 static void
get_logging_info(void)570 get_logging_info(void)
571 {
572 	txm_call_t txm_call = {
573 		.selector = kTXMKernelSelectorGetLogInfo,
574 		.failure_fatal = true,
575 		.num_output_args = 3
576 	};
577 	txm_kernel_call(&txm_call);
578 
579 	txm_log_page = (const char*)txm_call.return_words[0];
580 	txm_log_head = (const uint32_t*)txm_call.return_words[1];
581 	txm_log_sync = (const uint32_t*)txm_call.return_words[2];
582 }
583 
584 static void
get_code_signing_info(void)585 get_code_signing_info(void)
586 {
587 	txm_call_t txm_call = {
588 		.selector = kTXMKernelSelectorGetCodeSigningInfo,
589 		.failure_fatal = true,
590 		.num_output_args = 6
591 	};
592 	txm_kernel_call(&txm_call);
593 
594 	/*
595 	 * Not using txm_call.return_words[0] for now. This was previously the
596 	 * code_signing_enabled field, but we've since switched to acquiring that
597 	 * value from TXM's read-only data.
598 	 *
599 	 * Not using txm_call.return_words[2] for now. This was previously the
600 	 * metrics field, but we've since switched to acquiring that value from
601 	 * TXM's read-write data.
602 	 *
603 	 * Not using txm_call.return_words[4] for now. This was previously the
604 	 * txm_cs_config field, but we've since switched to acquiring that value
605 	 * from TXM's read-only data.
606 	 */
607 	txm_rw_data = (TXMReadWriteData_t*)txm_call.return_words[0];
608 	developer_mode_enabled = (bool*)txm_call.return_words[1];
609 	managed_signature_size = (uint32_t)txm_call.return_words[3];
610 	txm_ro_data = (TXMReadOnlyData_t*)txm_call.return_words[5];
611 	txm_metrics = &txm_rw_data->metrics;
612 
613 	/* Set code_signing_disabled based on read-only data */
614 	code_signing_enabled = txm_ro_data->codeSigningDisabled == false;
615 
616 	/* Set txm_cs_config based on read-only data */
617 	txm_cs_config = &txm_ro_data->CSConfiguration;
618 
619 	/* Only setup when REM is supported on the platform */
620 	if (txm_cs_config->systemPolicy->featureSet.restrictedExecutionMode == true) {
621 		txm_restricted_mode_state = txm_ro_data->restrictedModeState;
622 	}
623 
624 #if kTXMKernelAPIVersion >= 11
625 	research_mode_enabled = txm_ro_data->buildType.research;
626 	extended_research_mode_enabled = txm_ro_data->buildType.extendedResearch;
627 #endif
628 
629 	/* Setup the number of boot trust caches */
630 	num_static_trust_caches = os_atomic_load(&txm_metrics->trustCaches.numStatic, relaxed);
631 	num_engineering_trust_caches = os_atomic_load(&txm_metrics->trustCaches.numEngineering, relaxed);
632 }
633 
634 void
code_signing_init(void)635 code_signing_init(void)
636 {
637 	printf("libTXM_KernelVersion: %u\n", libTrustedExecutionMonitor_KernelVersion);
638 	printf("libTXM_Image4Version: %u\n", libTrustedExecutionMonitor_Image4Version);
639 
640 	/* Setup the thread stacks used by TXM */
641 	setup_thread_stacks();
642 
643 	/* Setup the logging lock */
644 	lck_mtx_init(&log_lock, &txm_lck_grp, 0);
645 
646 	/* Setup TXM logging information */
647 	get_logging_info();
648 
649 	/* Setup code signing configuration */
650 	get_code_signing_info();
651 
652 	/* Setup all the other locks we need */
653 	lck_mtx_init(&compilation_service_lock, &txm_lck_grp, 0);
654 	lck_mtx_init(&unregister_sync_lock, &txm_lck_grp, 0);
655 
656 	/* Require signed code when monitor is enabled */
657 	if (code_signing_enabled == true) {
658 		cs_debug_fail_on_unsigned_code = 1;
659 	}
660 }
661 
662 void
txm_enter_lockdown_mode(void)663 txm_enter_lockdown_mode(void)
664 {
665 	txm_call_t txm_call = {
666 		.selector = kTXMKernelSelectorEnterLockdownMode,
667 		.failure_fatal = true,
668 	};
669 	txm_kernel_call(&txm_call);
670 }
671 
672 kern_return_t
txm_secure_channel_shared_page(uint64_t * secure_channel_phys,size_t * secure_channel_size)673 txm_secure_channel_shared_page(
674 	uint64_t *secure_channel_phys,
675 	size_t *secure_channel_size)
676 {
677 #if kTXMKernelAPIVersion >= 5
678 	txm_call_t txm_call = {
679 		.selector = kTXMKernelSelectorGetSecureChannelAddr,
680 		.num_output_args = 2
681 	};
682 
683 	kern_return_t ret = txm_kernel_call(&txm_call);
684 	if (ret == KERN_NOT_SUPPORTED) {
685 		return ret;
686 	} else if (ret != KERN_SUCCESS) {
687 		panic("unexpected failure for TXM secure channel: %d", ret);
688 	}
689 
690 	/* Return the physical address */
691 	if (secure_channel_phys != NULL) {
692 		*secure_channel_phys = txm_call.return_words[0];
693 	}
694 
695 	/* Return the size */
696 	if (secure_channel_size != NULL) {
697 		*secure_channel_size = txm_call.return_words[1];
698 	}
699 
700 	return KERN_SUCCESS;
701 #else
702 	(void)secure_channel_phys;
703 	(void)secure_channel_size;
704 	return KERN_NOT_SUPPORTED;
705 #endif
706 }
707 
708 #pragma mark Developer Mode
709 
710 void
txm_toggle_developer_mode(bool state)711 txm_toggle_developer_mode(bool state)
712 {
713 	txm_call_t txm_call = {
714 		.selector = kTXMKernelSelectorDeveloperModeToggle,
715 		.failure_fatal = true,
716 		.num_input_args = 1
717 	};
718 
719 	txm_kernel_call(&txm_call, state);
720 }
721 
722 #pragma mark Restricted Execution Mode
723 
724 kern_return_t
txm_rem_enable(void)725 txm_rem_enable(void)
726 {
727 	txm_call_t txm_call = {
728 		.selector = kTXMKernelSelectorEnableRestrictedMode
729 	};
730 	return txm_kernel_call(&txm_call);
731 }
732 
733 kern_return_t
txm_rem_state(void)734 txm_rem_state(void)
735 {
736 	if (txm_restricted_mode_state == NULL) {
737 		return KERN_NOT_SUPPORTED;
738 	}
739 
740 	CSReturn_t cs_ret = restrictedModeStatus(txm_restricted_mode_state);
741 	if (cs_ret.error == kCSReturnSuccess) {
742 		return KERN_SUCCESS;
743 	}
744 	return KERN_DENIED;
745 }
746 
747 #pragma mark Device State
748 
749 void
txm_update_device_state(void)750 txm_update_device_state(void)
751 {
752 #if kTXMKernelAPIVersion >= 6
753 	txm_call_t txm_call = {
754 		.selector = kTXMSelectorUpdateDeviceState,
755 		.failure_fatal = true
756 	};
757 	txm_kernel_call(&txm_call);
758 #endif
759 }
760 
761 void
txm_complete_security_boot_mode(__unused uint32_t security_boot_mode)762 txm_complete_security_boot_mode(
763 	__unused uint32_t security_boot_mode)
764 {
765 #if kTXMKernelAPIVersion >= 6
766 	txm_call_t txm_call = {
767 		.selector = kTXMSelectorCompleteSecurityBootMode,
768 		.num_input_args = 1,
769 		.failure_fatal = true
770 	};
771 	txm_kernel_call(&txm_call, security_boot_mode);
772 #endif
773 }
774 
775 #pragma mark Code Signing and Provisioning Profiles
776 
777 bool
txm_code_signing_enabled(void)778 txm_code_signing_enabled(void)
779 {
780 	return code_signing_enabled;
781 }
782 
783 vm_size_t
txm_managed_code_signature_size(void)784 txm_managed_code_signature_size(void)
785 {
786 	return managed_signature_size;
787 }
788 
789 kern_return_t
txm_register_provisioning_profile(const void * profile_blob,const size_t profile_blob_size,void ** profile_obj)790 txm_register_provisioning_profile(
791 	const void *profile_blob,
792 	const size_t profile_blob_size,
793 	void **profile_obj)
794 {
795 	txm_call_t txm_call = {
796 		.selector = kTXMKernelSelectorRegisterProvisioningProfile,
797 		.num_input_args = 2,
798 		.num_output_args = 1
799 	};
800 	vm_address_t payload_addr = 0;
801 	kern_return_t ret = KERN_DENIED;
802 
803 	/* We need to allocate page-wise in order to transfer the range to TXM */
804 	ret = kmem_alloc(kernel_map, &payload_addr, profile_blob_size,
805 	    KMA_KOBJECT | KMA_DATA, VM_KERN_MEMORY_SECURITY);
806 	if (ret != KERN_SUCCESS) {
807 		printf("unable to allocate memory for profile payload: %d\n", ret);
808 		goto exit;
809 	}
810 
811 	/* Copy the contents into the allocation */
812 	memcpy((void*)payload_addr, profile_blob, profile_blob_size);
813 
814 	/* Transfer the memory range to TXM */
815 	txm_transfer_region(payload_addr, profile_blob_size);
816 
817 	ret = txm_kernel_call(&txm_call, payload_addr, profile_blob_size);
818 	if (ret == KERN_SUCCESS) {
819 		*profile_obj = (void*)txm_call.return_words[0];
820 	}
821 
822 exit:
823 	if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
824 		/* Reclaim this memory range */
825 		txm_reclaim_region(payload_addr, profile_blob_size);
826 
827 		/* Free the memory range */
828 		kmem_free(kernel_map, payload_addr, profile_blob_size);
829 		payload_addr = 0;
830 	}
831 
832 	return ret;
833 }
834 
835 kern_return_t
txm_trust_provisioning_profile(__unused void * profile_obj,__unused const void * sig_data,__unused size_t sig_size)836 txm_trust_provisioning_profile(
837 	__unused void *profile_obj,
838 	__unused const void *sig_data,
839 	__unused size_t sig_size)
840 {
841 #if kTXMKernelAPIVersion >= 7
842 	txm_call_t txm_call = {
843 		.selector = kTXMKernelSelectorTrustProvisioningProfile,
844 		.num_input_args = 3
845 	};
846 
847 	return txm_kernel_call(&txm_call, profile_obj, sig_data, sig_size);
848 #else
849 	/* The TXM selector hasn't yet landed */
850 	return KERN_SUCCESS;
851 #endif
852 }
853 
854 kern_return_t
txm_unregister_provisioning_profile(void * profile_obj)855 txm_unregister_provisioning_profile(
856 	void *profile_obj)
857 {
858 	txm_call_t txm_call = {
859 		.selector = kTXMKernelSelectorUnregisterProvisioningProfile,
860 		.num_input_args = 1,
861 		.num_output_args = 2
862 	};
863 	vm_address_t profile_addr = 0;
864 	vm_size_t profile_size = 0;
865 	kern_return_t ret = KERN_DENIED;
866 
867 	ret = txm_kernel_call(&txm_call, profile_obj);
868 	if (ret != KERN_SUCCESS) {
869 		return ret;
870 	}
871 
872 	profile_addr = txm_call.return_words[0];
873 	profile_size = txm_call.return_words[1];
874 
875 	/* Reclaim this memory range */
876 	txm_reclaim_region(profile_addr, profile_size);
877 
878 	/* Free the memory range */
879 	kmem_free(kernel_map, profile_addr, profile_size);
880 
881 	return KERN_SUCCESS;
882 }
883 
884 kern_return_t
txm_associate_provisioning_profile(void * sig_obj,void * profile_obj)885 txm_associate_provisioning_profile(
886 	void *sig_obj,
887 	void *profile_obj)
888 {
889 	txm_call_t txm_call = {
890 		.selector = kTXMKernelSelectorAssociateProvisioningProfile,
891 		.num_input_args = 2,
892 	};
893 
894 	return txm_kernel_call(&txm_call, sig_obj, profile_obj);
895 }
896 
897 kern_return_t
txm_disassociate_provisioning_profile(void * sig_obj)898 txm_disassociate_provisioning_profile(
899 	void *sig_obj)
900 {
901 	txm_call_t txm_call = {
902 		.selector = kTXMKernelSelectorDisassociateProvisioningProfile,
903 		.num_input_args = 1,
904 	};
905 
906 	/*
907 	 * Take the unregistration sync lock.
908 	 * For more information: rdar://99205627.
909 	 */
910 	lck_mtx_lock(&unregister_sync_lock);
911 
912 	/* Disassociate the profile from the signature */
913 	kern_return_t ret = txm_kernel_call(&txm_call, sig_obj);
914 
915 	/* Release the unregistration sync lock */
916 	lck_mtx_unlock(&unregister_sync_lock);
917 
918 	return ret;
919 }
920 
921 void
txm_set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])922 txm_set_compilation_service_cdhash(
923 	const uint8_t cdhash[CS_CDHASH_LEN])
924 {
925 	txm_call_t txm_call = {
926 		.selector = kTXMKernelSelectorAuthorizeCompilationServiceCDHash,
927 		.num_input_args = 1,
928 	};
929 
930 	lck_mtx_lock(&compilation_service_lock);
931 	txm_kernel_call(&txm_call, cdhash);
932 	lck_mtx_unlock(&compilation_service_lock);
933 }
934 
935 bool
txm_match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])936 txm_match_compilation_service_cdhash(
937 	const uint8_t cdhash[CS_CDHASH_LEN])
938 {
939 	txm_call_t txm_call = {
940 		.selector = kTXMKernelSelectorMatchCompilationServiceCDHash,
941 		.failure_silent = true,
942 		.num_input_args = 1,
943 		.num_output_args = 1,
944 	};
945 	kern_return_t ret = KERN_DENIED;
946 
947 	/* Be safe and take the lock (avoid thread collisions) */
948 	lck_mtx_lock(&compilation_service_lock);
949 	ret = txm_kernel_call(&txm_call, cdhash);
950 	lck_mtx_unlock(&compilation_service_lock);
951 
952 	if (ret == KERN_SUCCESS) {
953 		return true;
954 	}
955 	return false;
956 }
957 
958 void
txm_set_local_signing_public_key(const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])959 txm_set_local_signing_public_key(
960 	const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])
961 {
962 	txm_call_t txm_call = {
963 		.selector = kTXMKernelSelectorSetLocalSigningPublicKey,
964 		.num_input_args = 1,
965 	};
966 
967 	txm_kernel_call(&txm_call, public_key);
968 }
969 
970 uint8_t*
txm_get_local_signing_public_key(void)971 txm_get_local_signing_public_key(void)
972 {
973 	txm_call_t txm_call = {
974 		.selector = kTXMKernelSelectorGetLocalSigningPublicKey,
975 		.num_output_args = 1,
976 	};
977 	kern_return_t ret = KERN_DENIED;
978 
979 	ret = txm_kernel_call(&txm_call);
980 	if (ret != KERN_SUCCESS) {
981 		return NULL;
982 	}
983 
984 	return (uint8_t*)txm_call.return_words[0];
985 }
986 
987 void
txm_unrestrict_local_signing_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])988 txm_unrestrict_local_signing_cdhash(
989 	const uint8_t cdhash[CS_CDHASH_LEN])
990 {
991 	txm_call_t txm_call = {
992 		.selector = kTXMKernelSelectorAuthorizeLocalSigningCDHash,
993 		.num_input_args = 1,
994 	};
995 
996 	txm_kernel_call(&txm_call, cdhash);
997 }
998 
999 kern_return_t
txm_register_code_signature(const vm_address_t signature_addr,const vm_size_t signature_size,const vm_offset_t code_directory_offset,const char * signature_path,void ** sig_obj,vm_address_t * txm_signature_addr)1000 txm_register_code_signature(
1001 	const vm_address_t signature_addr,
1002 	const vm_size_t signature_size,
1003 	const vm_offset_t code_directory_offset,
1004 	const char *signature_path,
1005 	void **sig_obj,
1006 	vm_address_t *txm_signature_addr)
1007 {
1008 	txm_call_t txm_call = {
1009 		.selector = kTXMKernelSelectorRegisterCodeSignature,
1010 		.num_input_args = 3,
1011 		.num_output_args = 2,
1012 	};
1013 	kern_return_t ret = KERN_DENIED;
1014 
1015 	/*
1016 	 * TXM performs more exhaustive validation of the code signature and figures
1017 	 * out the best code directory to use on its own. As a result, this offset here
1018 	 * is not used.
1019 	 */
1020 	(void)code_directory_offset;
1021 
1022 	/*
1023 	 * If the signature is large enough to not fit within TXM's managed signature
1024 	 * size, then we need to transfer it over so it is owned by TXM.
1025 	 */
1026 	if (signature_size > txm_managed_code_signature_size()) {
1027 		txm_transfer_region(signature_addr, signature_size);
1028 	}
1029 
1030 	ret = txm_kernel_call(
1031 		&txm_call,
1032 		signature_addr,
1033 		signature_size,
1034 		signature_path);
1035 
1036 	if (ret != KERN_SUCCESS) {
1037 		goto exit;
1038 	}
1039 
1040 	*sig_obj = (void*)txm_call.return_words[0];
1041 	*txm_signature_addr = txm_call.return_words[1];
1042 
1043 exit:
1044 	if ((ret != KERN_SUCCESS) && (signature_size > txm_managed_code_signature_size())) {
1045 		txm_reclaim_region(signature_addr, signature_size);
1046 	}
1047 
1048 	return ret;
1049 }
1050 
1051 kern_return_t
txm_unregister_code_signature(void * sig_obj)1052 txm_unregister_code_signature(
1053 	void *sig_obj)
1054 {
1055 	txm_call_t txm_call = {
1056 		.selector = kTXMKernelSelectorUnregisterCodeSignature,
1057 		.failure_fatal = true,
1058 		.num_input_args = 1,
1059 		.num_output_args = 2,
1060 	};
1061 	TXMCodeSignature_t *cs_obj = sig_obj;
1062 	vm_address_t signature_addr = 0;
1063 	vm_size_t signature_size = 0;
1064 	bool txm_managed = false;
1065 
1066 	/*
1067 	 * Unregistering a code signature can cause lock contention in TXM against a
1068 	 * set of other functions. The unregistration operation is very common when the
1069 	 * system is about to reboot because the VFS layer unmounts all volumes.
1070 	 *
1071 	 * In order to avoid this issue, we detect if the code signature in question
1072 	 * has been mapped in other address spaces, and if so, we avoid unregistering
1073 	 * the code signature when we're about to shut down. This leaks memory, but
1074 	 * we're about to shut down.
1075 	 */
1076 	if ((cs_obj->referenceCount > 0) && (get_system_inshutdown() != 0)) {
1077 		printf("TXM [XNU]: unregistration of signature skipped as system is in shutdown\n");
1078 		return KERN_ABORTED;
1079 	}
1080 
1081 	/* Check if the signature memory is TXM managed */
1082 	txm_managed = cs_obj->sptmType != TXM_BULK_DATA;
1083 
1084 	/*
1085 	 * Take the unregistration sync lock.
1086 	 * For more information: rdar://99205627.
1087 	 */
1088 	lck_mtx_lock(&unregister_sync_lock);
1089 
1090 	/* Unregister the signature from TXM -- cannot fail */
1091 	txm_kernel_call(&txm_call, sig_obj);
1092 
1093 	/* Release the unregistration sync lock */
1094 	lck_mtx_unlock(&unregister_sync_lock);
1095 
1096 	signature_addr = txm_call.return_words[0];
1097 	signature_size = txm_call.return_words[1];
1098 
1099 	/* Reclaim the memory range in case we need to */
1100 	if (txm_managed == false) {
1101 		txm_reclaim_region(signature_addr, signature_size);
1102 	}
1103 
1104 	return KERN_SUCCESS;
1105 }
1106 
1107 kern_return_t
txm_verify_code_signature(void * sig_obj,uint32_t * trust_level)1108 txm_verify_code_signature(
1109 	void *sig_obj,
1110 	uint32_t *trust_level)
1111 {
1112 	txm_call_t txm_call = {
1113 		.selector = kTXMKernelSelectorValidateCodeSignature,
1114 		.num_input_args = 1,
1115 	};
1116 	kern_return_t ret = txm_kernel_call(&txm_call, sig_obj);
1117 
1118 	if ((ret == KERN_SUCCESS) && (trust_level != NULL)) {
1119 		/*
1120 		 * Abolsutely gross, but it's not worth linking all of libCodeSignature just for
1121 		 * this simple change. We should either return the trust level from TXM, or when
1122 		 * we adopt libCodeSignature more broadly, then use an accessor function.
1123 		 */
1124 		*trust_level = ((TXMCodeSignature_t*)sig_obj)->sig.trustLevel;
1125 	}
1126 	return ret;
1127 }
1128 
1129 kern_return_t
txm_reconstitute_code_signature(void * sig_obj,vm_address_t * unneeded_addr,vm_size_t * unneeded_size)1130 txm_reconstitute_code_signature(
1131 	void *sig_obj,
1132 	vm_address_t *unneeded_addr,
1133 	vm_size_t *unneeded_size)
1134 {
1135 	txm_call_t txm_call = {
1136 		.selector = kTXMKernelSelectorReconstituteCodeSignature,
1137 		.failure_fatal = true,
1138 		.num_input_args = 1,
1139 		.num_output_args = 2,
1140 	};
1141 	vm_address_t return_addr = 0;
1142 	vm_size_t return_size = 0;
1143 
1144 	/* Reconstitute the code signature -- cannot fail */
1145 	txm_kernel_call(&txm_call, sig_obj);
1146 
1147 	return_addr = txm_call.return_words[0];
1148 	return_size = txm_call.return_words[1];
1149 
1150 	/* Reclaim the memory region if we need to */
1151 	if ((return_addr != 0) && (return_size != 0)) {
1152 		txm_reclaim_region(return_addr, return_size);
1153 	}
1154 
1155 	*unneeded_addr = return_addr;
1156 	*unneeded_size = return_size;
1157 
1158 	return KERN_SUCCESS;
1159 }
1160 
1161 #pragma mark Address Spaces
1162 
1163 kern_return_t
txm_register_address_space(pmap_t pmap,uint16_t addr_space_id,TXMAddressSpaceFlags_t flags)1164 txm_register_address_space(
1165 	pmap_t pmap,
1166 	uint16_t addr_space_id,
1167 	TXMAddressSpaceFlags_t flags)
1168 {
1169 	txm_call_t txm_call = {
1170 		.selector = kTXMKernelSelectorRegisterAddressSpace,
1171 		.failure_fatal = true,
1172 		.num_input_args = 2,
1173 		.num_output_args = 1,
1174 	};
1175 	TXMAddressSpace_t *txm_addr_space = NULL;
1176 
1177 	/* Register the address space -- cannot fail */
1178 	txm_kernel_call(&txm_call, addr_space_id, flags);
1179 
1180 	/* Set the address space object within the PMAP */
1181 	txm_addr_space = (TXMAddressSpace_t*)txm_call.return_words[0];
1182 	pmap_txm_set_addr_space(pmap, txm_addr_space);
1183 
1184 	return KERN_SUCCESS;
1185 }
1186 
1187 kern_return_t
txm_unregister_address_space(pmap_t pmap)1188 txm_unregister_address_space(
1189 	pmap_t pmap)
1190 {
1191 	txm_call_t txm_call = {
1192 		.selector = kTXMKernelSelectorUnregisterAddressSpace,
1193 		.failure_fatal = true,
1194 		.num_input_args = 1,
1195 	};
1196 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1197 
1198 	/*
1199 	 * Take the unregistration sync lock.
1200 	 * For more information: rdar://99205627.
1201 	 */
1202 	lck_mtx_lock(&unregister_sync_lock);
1203 
1204 	/* Unregister the address space -- cannot fail */
1205 	txm_kernel_call(&txm_call, txm_addr_space);
1206 
1207 	/* Release the unregistration sync lock */
1208 	lck_mtx_unlock(&unregister_sync_lock);
1209 
1210 	/* Remove the address space from the pmap */
1211 	pmap_txm_set_addr_space(pmap, NULL);
1212 
1213 	return KERN_SUCCESS;
1214 }
1215 
1216 kern_return_t
txm_setup_nested_address_space(pmap_t pmap,const vm_address_t region_addr,const vm_size_t region_size)1217 txm_setup_nested_address_space(
1218 	pmap_t pmap,
1219 	const vm_address_t region_addr,
1220 	const vm_size_t region_size)
1221 {
1222 	txm_call_t txm_call = {
1223 		.selector = kTXMKernelSelectorSetupNestedAddressSpace,
1224 		.num_input_args = 3
1225 	};
1226 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1227 	kern_return_t ret = KERN_DENIED;
1228 
1229 	pmap_txm_acquire_exclusive_lock(pmap);
1230 	ret = txm_kernel_call(&txm_call, txm_addr_space, region_addr, region_size);
1231 	pmap_txm_release_exclusive_lock(pmap);
1232 
1233 	return ret;
1234 }
1235 
1236 kern_return_t
txm_associate_code_signature(pmap_t pmap,void * sig_obj,const vm_address_t region_addr,const vm_size_t region_size,const vm_offset_t region_offset)1237 txm_associate_code_signature(
1238 	pmap_t pmap,
1239 	void *sig_obj,
1240 	const vm_address_t region_addr,
1241 	const vm_size_t region_size,
1242 	const vm_offset_t region_offset)
1243 {
1244 	txm_call_t txm_call = {
1245 		.selector = kTXMKernelSelectorAssociateCodeSignature,
1246 		.num_input_args = 5,
1247 	};
1248 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1249 	kern_return_t ret = KERN_DENIED;
1250 
1251 	/*
1252 	 * Associating a code signature may require exclusive access to the TXM address
1253 	 * space lock within TXM.
1254 	 */
1255 	pmap_txm_acquire_exclusive_lock(pmap);
1256 
1257 	/*
1258 	 * If the address space in question is a nested address space, then all associations
1259 	 * need to go into the shared region base range. The VM layer is inconsistent with
1260 	 * how it makes associations with TXM vs. how it maps pages into the shared region.
1261 	 *
1262 	 * For TXM, the associations are made without taking the base range into account,
1263 	 * but when mappings are entered into the shared region, the base range is taken
1264 	 * into account. To normalize this, we add the base range address here.
1265 	 */
1266 	vm_address_t adjusted_region_addr = region_addr;
1267 	if (txm_addr_space->addrSpaceID.type == kTXMAddressSpaceIDTypeSharedRegion) {
1268 		adjusted_region_addr += txm_addr_space->baseAddr;
1269 	}
1270 
1271 	/*
1272 	 * The VM tries a bunch of weird mappings within launchd for some platform code
1273 	 * which isn't mapped contiguously. These mappings don't succeed, but the failure
1274 	 * is fairly harmless since everything seems to work. However, since the call to
1275 	 * TXM fails, we make a series of logs. Hence, for launchd, we suppress failure
1276 	 * logs.
1277 	 */
1278 	if (txm_addr_space->addrSpaceID.type == kTXMAddressSpaceIDTypeAddressSpace) {
1279 		/* TXMTODO: Scope this to launchd better */
1280 		txm_call.failure_code_silent = kTXMReturnPlatformCodeMapping;
1281 	}
1282 
1283 	/* Check if the main region has been set on the address space */
1284 	bool main_region_set = txm_addr_space->mainRegion != NULL;
1285 	bool main_region_set_after = false;
1286 
1287 	ret = txm_kernel_call(
1288 		&txm_call,
1289 		txm_addr_space,
1290 		sig_obj,
1291 		adjusted_region_addr,
1292 		region_size,
1293 		region_offset);
1294 
1295 	while (ret == KERN_OPERATION_TIMED_OUT) {
1296 		/*
1297 		 * There is no easy method to sleep in the kernel. This operation has the
1298 		 * potential to burn CPU cycles, but that is alright since we don't actually
1299 		 * ever expect to enter this case on legitimately operating systems.
1300 		 */
1301 		ret = txm_kernel_call(
1302 			&txm_call,
1303 			txm_addr_space,
1304 			sig_obj,
1305 			adjusted_region_addr,
1306 			region_size,
1307 			region_offset);
1308 	}
1309 
1310 	/*
1311 	 * If the main region wasn't set on the address space before hand, but this new
1312 	 * call into TXM was successful and sets the main region, it means this signature
1313 	 * object is associated with the main region on the address space. With this, we
1314 	 * can now set the appropriate trust level on the PMAP.
1315 	 */
1316 	if (ret == KERN_SUCCESS) {
1317 		main_region_set_after = txm_addr_space->mainRegion != NULL;
1318 	}
1319 
1320 	/* Unlock the TXM address space lock */
1321 	pmap_txm_release_exclusive_lock(pmap);
1322 
1323 	/* Check if we should set the trust level on the PMAP */
1324 	if (!main_region_set && main_region_set_after) {
1325 		const TXMCodeSignature_t *cs_obj = sig_obj;
1326 		const SignatureValidation_t *sig = &cs_obj->sig;
1327 
1328 		/*
1329 		 * This is gross, as we're dereferencing into a private data structure type.
1330 		 * There are 2 ways to clean this up in the future:
1331 		 * 1. Import libCodeSignature, so we can use "codeSignatureGetTrustLevel".
1332 		 * 2. Cache the trust level on the address space within TXM and then use it.
1333 		 */
1334 		pmap_txm_set_trust_level(pmap, sig->trustLevel);
1335 	}
1336 
1337 	return ret;
1338 }
1339 
1340 kern_return_t
txm_allow_jit_region(pmap_t pmap)1341 txm_allow_jit_region(
1342 	pmap_t pmap)
1343 {
1344 	txm_call_t txm_call = {
1345 		.selector = kTXMKernelSelectorAllowJITRegion,
1346 		.num_input_args = 1,
1347 	};
1348 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1349 	kern_return_t ret = KERN_DENIED;
1350 
1351 	pmap_txm_acquire_shared_lock(pmap);
1352 	ret = txm_kernel_call(&txm_call, txm_addr_space);
1353 	pmap_txm_release_shared_lock(pmap);
1354 
1355 	return ret;
1356 }
1357 
1358 kern_return_t
txm_associate_jit_region(pmap_t pmap,const vm_address_t region_addr,const vm_size_t region_size)1359 txm_associate_jit_region(
1360 	pmap_t pmap,
1361 	const vm_address_t region_addr,
1362 	const vm_size_t region_size)
1363 {
1364 	txm_call_t txm_call = {
1365 		.selector = kTXMKernelSelectorAssociateJITRegion,
1366 		.num_input_args = 3,
1367 	};
1368 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1369 	kern_return_t ret = KERN_DENIED;
1370 
1371 	/*
1372 	 * Associating a JIT region may require exclusive access to the TXM address
1373 	 * space lock within TXM.
1374 	 */
1375 	pmap_txm_acquire_exclusive_lock(pmap);
1376 
1377 	ret = txm_kernel_call(
1378 		&txm_call,
1379 		txm_addr_space,
1380 		region_addr,
1381 		region_size);
1382 
1383 	/* Unlock the TXM address space lock */
1384 	pmap_txm_release_exclusive_lock(pmap);
1385 
1386 	return ret;
1387 }
1388 
1389 kern_return_t
txm_address_space_debugged(pmap_t pmap)1390 txm_address_space_debugged(
1391 	pmap_t pmap)
1392 {
1393 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1394 	bool debug_regions_allowed = false;
1395 
1396 	/*
1397 	 * We do not actually need to trap into the monitor for this function for
1398 	 * now. It might be a tad bit more secure to actually trap into the monitor
1399 	 * as it implicitly verifies all of our pointers, but since this is a simple
1400 	 * state check against the address space, the real policy around it lies
1401 	 * within the kernel still, in which case entering the monitor doesn't
1402 	 * really provide much more security.
1403 	 */
1404 
1405 	pmap_txm_acquire_shared_lock(pmap);
1406 	debug_regions_allowed = os_atomic_load(&txm_addr_space->allowsInvalidCode, relaxed);
1407 	pmap_txm_release_shared_lock(pmap);
1408 
1409 	if (debug_regions_allowed == true) {
1410 		return KERN_SUCCESS;
1411 	}
1412 	return KERN_DENIED;
1413 }
1414 
1415 kern_return_t
txm_associate_debug_region(pmap_t pmap,const vm_address_t region_addr,const vm_size_t region_size)1416 txm_associate_debug_region(
1417 	pmap_t pmap,
1418 	const vm_address_t region_addr,
1419 	const vm_size_t region_size)
1420 {
1421 #if kTXMKernelAPIVersion >= 10
1422 	txm_call_t txm_call = {
1423 		.selector = kTXMKernelSelectorAssociateDebugRegion,
1424 		.num_input_args = 3,
1425 	};
1426 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1427 	kern_return_t ret = KERN_DENIED;
1428 
1429 	/*
1430 	 * Associating a debug region may require exclusive access to the TXM address
1431 	 * space lock within TXM.
1432 	 */
1433 	pmap_txm_acquire_exclusive_lock(pmap);
1434 
1435 	ret = txm_kernel_call(
1436 		&txm_call,
1437 		txm_addr_space,
1438 		region_addr,
1439 		region_size);
1440 
1441 	/* Unlock the TXM address space lock */
1442 	pmap_txm_release_exclusive_lock(pmap);
1443 
1444 	return ret;
1445 #else
1446 	/*
1447 	 * This function is an interesting one. There is no need for us to make
1448 	 * a call into TXM for this one and instead, all we need to do here is
1449 	 * to verify that the TXM address space actually allows debug regions to
1450 	 * be mapped in or not.
1451 	 */
1452 	(void)region_addr;
1453 	(void)region_size;
1454 
1455 	kern_return_t ret = txm_address_space_debugged(pmap);
1456 	if (ret != KERN_SUCCESS) {
1457 		printf("address space does not allow creating debug regions\n");
1458 	}
1459 
1460 	return ret;
1461 #endif
1462 }
1463 
1464 kern_return_t
txm_allow_invalid_code(pmap_t pmap)1465 txm_allow_invalid_code(
1466 	pmap_t pmap)
1467 {
1468 	txm_call_t txm_call = {
1469 		.selector = kTXMKernelSelectorAllowInvalidCode,
1470 		.num_input_args = 1,
1471 	};
1472 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1473 	kern_return_t ret = KERN_DENIED;
1474 
1475 	/*
1476 	 * Allowing invalid code may require exclusive access to the TXM address
1477 	 * space lock within TXM.
1478 	 */
1479 
1480 	pmap_txm_acquire_exclusive_lock(pmap);
1481 	ret = txm_kernel_call(&txm_call, txm_addr_space);
1482 	pmap_txm_release_exclusive_lock(pmap);
1483 
1484 	return ret;
1485 }
1486 
1487 kern_return_t
txm_get_trust_level_kdp(pmap_t pmap,uint32_t * trust_level)1488 txm_get_trust_level_kdp(
1489 	pmap_t pmap,
1490 	uint32_t *trust_level)
1491 {
1492 	CSTrust_t txm_trust_level = kCSTrustUntrusted;
1493 
1494 	kern_return_t ret = pmap_txm_get_trust_level_kdp(pmap, &txm_trust_level);
1495 	if (ret != KERN_SUCCESS) {
1496 		return ret;
1497 	}
1498 
1499 	if (trust_level != NULL) {
1500 		*trust_level = txm_trust_level;
1501 	}
1502 	return KERN_SUCCESS;
1503 }
1504 
1505 kern_return_t
txm_get_jit_address_range_kdp(pmap_t pmap,uintptr_t * jit_region_start,uintptr_t * jit_region_end)1506 txm_get_jit_address_range_kdp(
1507 	pmap_t pmap,
1508 	uintptr_t *jit_region_start,
1509 	uintptr_t *jit_region_end)
1510 {
1511 	return pmap_txm_get_jit_address_range_kdp(pmap, jit_region_start, jit_region_end);
1512 }
1513 
1514 kern_return_t
txm_address_space_exempt(const pmap_t pmap)1515 txm_address_space_exempt(
1516 	const pmap_t pmap)
1517 {
1518 	if (pmap_performs_stage2_translations(pmap) == true) {
1519 		return KERN_SUCCESS;
1520 	}
1521 
1522 	return KERN_DENIED;
1523 }
1524 
1525 kern_return_t
txm_fork_prepare(pmap_t old_pmap,pmap_t new_pmap)1526 txm_fork_prepare(
1527 	pmap_t old_pmap,
1528 	pmap_t new_pmap)
1529 {
1530 	/*
1531 	 * We'll add support for this as the need for it becomes more important.
1532 	 * TXMTODO: Complete this implementation.
1533 	 */
1534 	(void)old_pmap;
1535 	(void)new_pmap;
1536 
1537 	return KERN_SUCCESS;
1538 }
1539 
1540 kern_return_t
txm_acquire_signing_identifier(const void * sig_obj,const char ** signing_id)1541 txm_acquire_signing_identifier(
1542 	const void *sig_obj,
1543 	const char **signing_id)
1544 {
1545 	txm_call_t txm_call = {
1546 		.selector = kTXMKernelSelectorAcquireSigningIdentifier,
1547 		.num_input_args = 1,
1548 		.num_output_args = 1,
1549 		.failure_fatal = true,
1550 	};
1551 
1552 	/* Get the signing ID -- should not fail */
1553 	txm_kernel_call(&txm_call, sig_obj);
1554 
1555 	if (signing_id != NULL) {
1556 		*signing_id = (const char*)txm_call.return_words[0];
1557 	}
1558 	return KERN_SUCCESS;
1559 }
1560 
1561 #pragma mark Entitlements
1562 
1563 kern_return_t
txm_associate_kernel_entitlements(void * sig_obj,const void * kernel_entitlements)1564 txm_associate_kernel_entitlements(
1565 	void *sig_obj,
1566 	const void *kernel_entitlements)
1567 {
1568 	txm_call_t txm_call = {
1569 		.selector = kTXMKernelSelectorAssociateKernelEntitlements,
1570 		.num_input_args = 2,
1571 		.failure_fatal = true,
1572 	};
1573 
1574 	/* Associate the kernel entitlements -- should not fail */
1575 	txm_kernel_call(&txm_call, sig_obj, kernel_entitlements);
1576 
1577 	return KERN_SUCCESS;
1578 }
1579 
1580 kern_return_t
txm_resolve_kernel_entitlements(pmap_t pmap,const void ** kernel_entitlements)1581 txm_resolve_kernel_entitlements(
1582 	pmap_t pmap,
1583 	const void **kernel_entitlements)
1584 {
1585 	txm_call_t txm_call = {
1586 		.selector = kTXMKernelSelectorResolveKernelEntitlementsAddressSpace,
1587 		.skip_logs = true,
1588 		.num_input_args = 1,
1589 		.num_output_args = 1,
1590 		.failure_silent = true,
1591 	};
1592 	TXMAddressSpace_t *txm_addr_space = NULL;
1593 	kern_return_t ret = KERN_DENIED;
1594 
1595 	if (pmap == pmap_txm_kernel_pmap()) {
1596 		return KERN_NOT_FOUND;
1597 	}
1598 	txm_addr_space = pmap_txm_addr_space(pmap);
1599 
1600 	pmap_txm_acquire_shared_lock(pmap);
1601 	ret = txm_kernel_call(&txm_call, txm_addr_space);
1602 	pmap_txm_release_shared_lock(pmap);
1603 
1604 	if ((ret == KERN_SUCCESS) && (kernel_entitlements != NULL)) {
1605 		*kernel_entitlements = (const void*)txm_call.return_words[0];
1606 	}
1607 	return ret;
1608 }
1609 
1610 kern_return_t
txm_accelerate_entitlements(void * sig_obj,CEQueryContext_t * ce_ctx)1611 txm_accelerate_entitlements(
1612 	void *sig_obj,
1613 	CEQueryContext_t *ce_ctx)
1614 {
1615 	txm_call_t txm_call = {
1616 		.selector = kTXMKernelSelectorAccelerateEntitlements,
1617 		.num_input_args = 1,
1618 		.num_output_args = 1,
1619 	};
1620 	kern_return_t ret = KERN_DENIED;
1621 
1622 	ret = txm_kernel_call(&txm_call, sig_obj);
1623 	if ((ret == KERN_SUCCESS) && (ce_ctx != NULL)) {
1624 		*ce_ctx = (CEQueryContext_t)txm_call.return_words[0];
1625 	}
1626 
1627 	return ret;
1628 }
1629 
1630 #pragma mark Image4
1631 
1632 void*
txm_image4_storage_data(__unused size_t * allocated_size)1633 txm_image4_storage_data(
1634 	__unused size_t *allocated_size)
1635 {
1636 	/*
1637 	 * AppleImage4 builds a variant of TXM which TXM should link against statically
1638 	 * thereby removing the need for the kernel to allocate some data on behalf of
1639 	 * the kernel extension.
1640 	 */
1641 	panic("unsupported AppleImage4 interface");
1642 }
1643 
1644 void
txm_image4_set_nonce(const img4_nonce_domain_index_t ndi,const img4_nonce_t * nonce)1645 txm_image4_set_nonce(
1646 	const img4_nonce_domain_index_t ndi,
1647 	const img4_nonce_t *nonce)
1648 {
1649 	txm_call_t txm_call = {
1650 		.selector = kTXMKernelSelectorImage4SetNonce,
1651 		.failure_fatal = true,
1652 		.num_input_args = 2,
1653 	};
1654 
1655 	txm_kernel_call(&txm_call, ndi, nonce);
1656 }
1657 
1658 void
txm_image4_roll_nonce(const img4_nonce_domain_index_t ndi)1659 txm_image4_roll_nonce(
1660 	const img4_nonce_domain_index_t ndi)
1661 {
1662 	txm_call_t txm_call = {
1663 		.selector = kTXMKernelSelectorImage4RollNonce,
1664 		.failure_fatal = true,
1665 		.num_input_args = 1,
1666 	};
1667 
1668 	txm_kernel_call(&txm_call, ndi);
1669 }
1670 
1671 errno_t
txm_image4_copy_nonce(const img4_nonce_domain_index_t ndi,img4_nonce_t * nonce_out)1672 txm_image4_copy_nonce(
1673 	const img4_nonce_domain_index_t ndi,
1674 	img4_nonce_t *nonce_out)
1675 {
1676 	txm_call_t txm_call = {
1677 		.selector = kTXMKernelSelectorImage4GetNonce,
1678 		.num_input_args = 1,
1679 		.num_output_args = 1,
1680 	};
1681 	const img4_nonce_t *nonce = NULL;
1682 	TXMReturn_t txm_ret = {0};
1683 	kern_return_t ret = KERN_DENIED;
1684 
1685 	ret = txm_kernel_call(&txm_call, ndi);
1686 	if (ret != KERN_SUCCESS) {
1687 		txm_ret = txm_call.txm_ret;
1688 		if (txm_ret.returnCode != kTXMReturnCodeErrno) {
1689 			return EPERM;
1690 		}
1691 		return txm_ret.errnoRet;
1692 	}
1693 
1694 	/* Acquire a pointer to the nonce from TXM */
1695 	nonce = (const img4_nonce_t*)txm_call.return_words[0];
1696 
1697 	if (nonce_out) {
1698 		*nonce_out = *nonce;
1699 	}
1700 	return 0;
1701 }
1702 
1703 errno_t
txm_image4_execute_object(img4_runtime_object_spec_index_t obj_spec_index,const img4_buff_t * payload,const img4_buff_t * manifest)1704 txm_image4_execute_object(
1705 	img4_runtime_object_spec_index_t obj_spec_index,
1706 	const img4_buff_t *payload,
1707 	const img4_buff_t *manifest)
1708 {
1709 	/* Not supported within TXM yet */
1710 	(void)obj_spec_index;
1711 	(void)payload;
1712 	(void)manifest;
1713 
1714 	printf("image4 object execution isn't supported by TXM\n");
1715 	return ENOSYS;
1716 }
1717 
1718 errno_t
txm_image4_copy_object(img4_runtime_object_spec_index_t obj_spec_index,vm_address_t object_out,size_t * object_length)1719 txm_image4_copy_object(
1720 	img4_runtime_object_spec_index_t obj_spec_index,
1721 	vm_address_t object_out,
1722 	size_t *object_length)
1723 {
1724 	/* Not supported within TXM yet */
1725 	(void)obj_spec_index;
1726 	(void)object_out;
1727 	(void)object_length;
1728 
1729 	printf("image4 object copying isn't supported by TXM\n");
1730 	return ENOSYS;
1731 }
1732 
1733 const void*
txm_image4_get_monitor_exports(void)1734 txm_image4_get_monitor_exports(void)
1735 {
1736 	txm_call_t txm_call = {
1737 		.selector = kTXMKernelSelectorImage4GetExports,
1738 		.failure_fatal = true,
1739 		.num_output_args = 1,
1740 	};
1741 
1742 	txm_kernel_call(&txm_call);
1743 	return (const void*)txm_call.return_words[0];
1744 }
1745 
1746 errno_t
txm_image4_set_release_type(const char * release_type)1747 txm_image4_set_release_type(
1748 	const char *release_type)
1749 {
1750 	txm_call_t txm_call = {
1751 		.selector = kTXMKernelSelectorImage4SetReleaseType,
1752 		.failure_fatal = true,
1753 		.num_input_args = 1,
1754 	};
1755 
1756 	/* Set the release type -- cannot fail */
1757 	txm_kernel_call(&txm_call, release_type);
1758 
1759 	return 0;
1760 }
1761 
1762 errno_t
txm_image4_set_bnch_shadow(const img4_nonce_domain_index_t ndi)1763 txm_image4_set_bnch_shadow(
1764 	const img4_nonce_domain_index_t ndi)
1765 {
1766 	txm_call_t txm_call = {
1767 		.selector = kTXMKernelSelectorImage4SetBootNonceShadow,
1768 		.failure_fatal = true,
1769 		.num_input_args = 1,
1770 	};
1771 
1772 	/* Set the release type -- cannot fail */
1773 	txm_kernel_call(&txm_call, ndi);
1774 
1775 	return 0;
1776 }
1777 
1778 #pragma mark Image4 - New
1779 
1780 static inline bool
_txm_image4_monitor_trap_supported(image4_cs_trap_t selector)1781 _txm_image4_monitor_trap_supported(
1782 	image4_cs_trap_t selector)
1783 {
1784 	switch (selector) {
1785 #if kTXMImage4APIVersion >= 1
1786 	case IMAGE4_CS_TRAP_KMOD_SET_RELEASE_TYPE:
1787 	case IMAGE4_CS_TRAP_NONCE_SET:
1788 	case IMAGE4_CS_TRAP_NONCE_ROLL:
1789 	case IMAGE4_CS_TRAP_IMAGE_ACTIVATE:
1790 		return true;
1791 #endif
1792 
1793 	default:
1794 		return false;
1795 	}
1796 }
1797 
1798 kern_return_t
txm_image4_transfer_region(image4_cs_trap_t selector,vm_address_t region_addr,vm_size_t region_size)1799 txm_image4_transfer_region(
1800 	image4_cs_trap_t selector,
1801 	vm_address_t region_addr,
1802 	vm_size_t region_size)
1803 {
1804 	if (_txm_image4_monitor_trap_supported(selector) == true) {
1805 		txm_transfer_region(region_addr, region_size);
1806 	}
1807 	return KERN_SUCCESS;
1808 }
1809 
1810 kern_return_t
txm_image4_reclaim_region(image4_cs_trap_t selector,vm_address_t region_addr,vm_size_t region_size)1811 txm_image4_reclaim_region(
1812 	image4_cs_trap_t selector,
1813 	vm_address_t region_addr,
1814 	vm_size_t region_size)
1815 {
1816 	if (_txm_image4_monitor_trap_supported(selector) == true) {
1817 		txm_reclaim_region(region_addr, region_size);
1818 	}
1819 	return KERN_SUCCESS;
1820 }
1821 
1822 errno_t
txm_image4_monitor_trap(image4_cs_trap_t selector,const void * input_data,size_t input_size)1823 txm_image4_monitor_trap(
1824 	image4_cs_trap_t selector,
1825 	const void *input_data,
1826 	size_t input_size)
1827 {
1828 	txm_call_t txm_call = {
1829 		.selector = kTXMKernelSelectorImage4Dispatch,
1830 		.num_input_args = 5,
1831 	};
1832 
1833 	kern_return_t ret = txm_kernel_call(
1834 		&txm_call, selector,
1835 		input_data, input_size,
1836 		NULL, NULL);
1837 
1838 	/* Return 0 for success */
1839 	if (ret == KERN_SUCCESS) {
1840 		return 0;
1841 	}
1842 
1843 	/* Check for an errno_t return */
1844 	if (txm_call.txm_ret.returnCode == kTXMReturnCodeErrno) {
1845 		if (txm_call.txm_ret.errnoRet == 0) {
1846 			panic("image4 dispatch: unexpected success errno_t: %llu", selector);
1847 		}
1848 		return txm_call.txm_ret.errnoRet;
1849 	}
1850 
1851 	/* Return a generic error */
1852 	return EPERM;
1853 }
1854 
1855 #pragma mark Metrics
1856 
1857 #if DEVELOPMENT || DEBUG
1858 
1859 SYSCTL_DECL(_txm);
1860 SYSCTL_NODE(, OID_AUTO, txm, CTLFLAG_RD, 0, "TXM");
1861 
1862 SYSCTL_DECL(_txm_metrics);
1863 SYSCTL_NODE(_txm, OID_AUTO, metrics, CTLFLAG_RD, 0, "TXM Metrics");
1864 
1865 #define TXM_METRIC(type, name, field)                                               \
1866 static int __txm_metric_ ## type ## _ ## name SYSCTL_HANDLER_ARGS;                  \
1867 SYSCTL_DECL(_txm_metrics_ ## type);                                                 \
1868 SYSCTL_PROC(                                                                        \
1869 	_txm_metrics_ ## type, OID_AUTO,                                                \
1870 	name, CTLTYPE_INT | CTLFLAG_RD,                                                 \
1871 	NULL, 0, __txm_metric_ ## type ## _ ## name,                                    \
1872 	"I", "collected data from \'" #type "\':\'" #field "\'");                       \
1873 static int __txm_metric_ ## type ## _ ## name SYSCTL_HANDLER_ARGS                   \
1874 {                                                                                   \
1875 	if (req->newptr) {                                                              \
1876 	    return EPERM;                                                               \
1877 	}                                                                               \
1878 	uint32_t value = os_atomic_load(&txm_metrics->field, relaxed);                  \
1879 	return SYSCTL_OUT(req, &value, sizeof(value));                                  \
1880 }
1881 
1882 SYSCTL_DECL(_txm_metrics_memory);
1883 SYSCTL_NODE(_txm_metrics, OID_AUTO, memory, CTLFLAG_RD, 0, "TXM Metrics - Memory");
1884 
1885 #define TXM_ALLOCATOR_METRIC(name, field)                                                   \
1886 SYSCTL_DECL(_txm_metrics_memory_ ## name);                                                  \
1887 SYSCTL_NODE(_txm_metrics_memory, OID_AUTO, name, CTLFLAG_RD, 0, "\'" #name "\' allocator"); \
1888 TXM_METRIC(memory_ ## name, bytes_allocated, field->allocated);                             \
1889 TXM_METRIC(memory_ ## name, bytes_unused, field->unused);                                   \
1890 TXM_METRIC(memory_ ## name, bytes_wasted, field->wasted);                                   \
1891 
1892 TXM_METRIC(memory, bootstrap, memory.bootstrap);
1893 TXM_METRIC(memory, free_list, memory.freeList);
1894 TXM_METRIC(memory, bulk_data, memory.bulkData);
1895 TXM_ALLOCATOR_METRIC(trust_cache, memory.slabs.trustCache);
1896 TXM_ALLOCATOR_METRIC(provisioning_profile, memory.slabs.profile);
1897 TXM_ALLOCATOR_METRIC(code_signature, memory.slabs.codeSignature);
1898 TXM_ALLOCATOR_METRIC(code_region, memory.slabs.codeRegion);
1899 TXM_ALLOCATOR_METRIC(address_space, memory.slabs.addressSpace);
1900 TXM_ALLOCATOR_METRIC(bucket_1024, memory.buckets.b1024);
1901 TXM_ALLOCATOR_METRIC(bucket_2048, memory.buckets.b2048);
1902 TXM_ALLOCATOR_METRIC(bucket_4096, memory.buckets.b4096);
1903 TXM_ALLOCATOR_METRIC(bucket_8192, memory.buckets.b8192);
1904 
1905 SYSCTL_DECL(_txm_metrics_acceleration);
1906 SYSCTL_NODE(_txm_metrics, OID_AUTO, acceleration, CTLFLAG_RD, 0, "TXM Metrics - Acceleration");
1907 TXM_METRIC(acceleration, num_signature, acceleration.signature);
1908 TXM_METRIC(acceleration, num_bucket, acceleration.bucket);
1909 TXM_METRIC(acceleration, num_page, acceleration.page);
1910 TXM_METRIC(acceleration, bucket_256, acceleration.bucket256);
1911 TXM_METRIC(acceleration, unsupported, acceleration.large);
1912 
1913 SYSCTL_DECL(_txm_metrics_trustcaches);
1914 SYSCTL_NODE(_txm_metrics, OID_AUTO, trustcaches, CTLFLAG_RD, 0, "TXM Metrics - Trust Caches");
1915 TXM_METRIC(trustcaches, bytes_needed, trustCaches.bytesNeeded);
1916 TXM_METRIC(trustcaches, bytes_allocated, trustCaches.bytesAllocated);
1917 TXM_METRIC(trustcaches, bytes_locked, trustCaches.bytesLocked);
1918 TXM_METRIC(trustcaches, bytes_tombstoned, trustCaches.bytesTombstoned);
1919 
1920 #endif /* DEVELOPMENT || DEBUG */
1921 
1922 #if HAS_MTE && (DEVELOPMENT || DEBUG)
1923 
1924 /* Need ARM MTE built-ins */
1925 #include <arm_acle.h>
1926 
1927 static int
mte_test_gl0(int64_t test_case,__unused int64_t * out)1928 mte_test_gl0(
1929 	int64_t test_case,
1930 	__unused int64_t *out)
1931 {
1932 	kern_return_t ret = KERN_DENIED;
1933 	vm_address_t address = 0;
1934 	uintptr_t phys_addr = 0;
1935 	uint8_t *untagged_ptr = NULL;
1936 	uint8_t *tagged_ptr = NULL;
1937 	uint8_t *txm_ptr = NULL;
1938 
1939 	/*
1940 	 * Test Cases:
1941 	 * 1. Pass TXM a pointer with a valid tag --> success
1942 	 * 2. Pass TXM a pointer from the physical aperture --> success
1943 	 * 3. Pass TXM a pointer with an invalid tag --> panic
1944 	 */
1945 	ret = kmem_alloc(
1946 		kernel_map, &address, PAGE_SIZE,
1947 		KMA_ZERO | KMA_TAG | KMA_KOBJECT, VM_KERN_MEMORY_DIAG);
1948 	if ((ret != KERN_SUCCESS) || (address == 0)) {
1949 		printf("%s: unable to allocate tagged memory: %d | 0x%0lX\n", __FUNCTION__, ret, address);
1950 		return -1;
1951 	}
1952 
1953 	phys_addr = kvtophys_nofail(address);
1954 	untagged_ptr = (uint8_t*)address;
1955 	tagged_ptr = __arm_mte_create_random_tag(untagged_ptr, 0);
1956 
1957 	/* Commit the random tag to memory */
1958 	__arm_mte_set_tag(tagged_ptr);
1959 
1960 	/* Ensure we can access the tagged_ptr */
1961 	*tagged_ptr = 0xF7;
1962 
1963 	switch (test_case) {
1964 	case 0:
1965 		txm_ptr = tagged_ptr;
1966 		printf("%s: using valid memory tag\n", __FUNCTION__);
1967 		break;
1968 
1969 	case 1:
1970 		txm_ptr = (uint8_t*)phystokv(kvtophys_nofail((uintptr_t)tagged_ptr));
1971 		printf("%s: using physical aperture mapping\n", __FUNCTION__);
1972 		break;
1973 
1974 	case 2:
1975 		txm_ptr = __arm_mte_increment_tag(tagged_ptr, 1);
1976 		printf("%s: using invalid memory tag\n", __FUNCTION__);
1977 		break;
1978 
1979 	default:
1980 		kmem_free_guard(kernel_map, address, PAGE_SIZE, KMF_TAG, KMEM_GUARD_NONE);
1981 		printf("%s: invalid test case: %lld\n", __FUNCTION__, test_case);
1982 		return -1;
1983 	}
1984 
1985 #if kTXMKernelAPIVersion >= 8
1986 	txm_call_t txm_call = {
1987 		.selector = kTXMKernelSelectorGL0ExceptionTest,
1988 		.num_input_args = 3
1989 	};
1990 	txm_kernel_call(&txm_call, phys_addr, (uintptr_t)txm_ptr, test_case);
1991 #else
1992 	printf("%s: required selector not present\n", __FUNCTION__);
1993 #endif
1994 
1995 	/* Free the kernel allocation */
1996 	kmem_free_guard(kernel_map, address, PAGE_SIZE, KMF_TAG, KMEM_GUARD_NONE);
1997 
1998 	return 0;
1999 }
2000 
2001 /*
2002  * The test can be invoked on the command line through the "sysctl" tool as
2003  * follows: $ sysctl debug.test.mte_gl0=<test-case>
2004  */
2005 SYSCTL_TEST_REGISTER(mte_gl0, mte_test_gl0);
2006 
2007 #endif /* HAS_MTE && (DEVELOPMENT || DEBUG) */
2008 
2009 #endif /* CONFIG_SPTM */
2010