xref: /xnu-11417.140.69/bsd/kern/code_signing/txm.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2022 Apple Computer, Inc. All rights reserved.
3  *
4  * @APPLE_LICENSE_HEADER_START@
5  *
6  * The contents of this file constitute Original Code as defined in and
7  * are subject to the Apple Public Source License Version 1.1 (the
8  * "License").  You may not use this file except in compliance with the
9  * License.  Please obtain a copy of the License at
10  * http://www.apple.com/publicsource and read it before using this file.
11  *
12  * This Original Code and all software distributed under the License are
13  * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT.  Please see the
17  * License for the specific language governing rights and limitations
18  * under the License.
19  *
20  * @APPLE_LICENSE_HEADER_END@
21  */
22 
23 #include <stdarg.h>
24 #include <stdatomic.h>
25 #include <os/overflow.h>
26 #include <os/atomic_private.h>
27 #include <machine/atomic.h>
28 #include <mach/vm_param.h>
29 #include <mach/vm_map.h>
30 #include <mach/shared_region.h>
31 #include <vm/vm_kern_xnu.h>
32 #include <kern/zalloc.h>
33 #include <kern/kalloc.h>
34 #include <kern/assert.h>
35 #include <kern/locks.h>
36 #include <kern/recount.h>
37 #include <kern/sched_prim.h>
38 #include <kern/lock_rw.h>
39 #include <libkern/libkern.h>
40 #include <libkern/section_keywords.h>
41 #include <libkern/coretrust/coretrust.h>
42 #include <libkern/amfi/amfi.h>
43 #include <pexpert/pexpert.h>
44 #include <sys/vm.h>
45 #include <sys/proc.h>
46 #include <sys/codesign.h>
47 #include <sys/code_signing.h>
48 #include <sys/trust_caches.h>
49 #include <sys/sysctl.h>
50 #include <sys/reboot.h>
51 #include <uuid/uuid.h>
52 #include <IOKit/IOLib.h>
53 #include <IOKit/IOBSD.h>
54 
55 #if CONFIG_SPTM
56 /*
57  * The TrustedExecutionMonitor environment works in tandem with the SPTM to provide code
58  * signing and memory isolation enforcement for data structures critical to ensuring that
59  * all code executed on the system is authorized to do so.
60  *
61  * Unless the data is managed by TXM itself, XNU needs to page-align everything, make the
62  * relevant type transfer, and then reference the memory as read-only.
63  *
64  * TXM enforces concurrency on its side, but through the use of try-locks. Upon a failure
65  * in acquiring the lock, TXM will panic. As a result, in order to ensure single-threaded
66  * behavior, the kernel also has to take some locks on its side befor calling into TXM.
67  */
68 #include <sys/trusted_execution_monitor.h>
69 #include <pexpert/arm64/board_config.h>
70 
71 /* Lock group used for all locks within the kernel for TXM */
72 LCK_GRP_DECLARE(txm_lck_grp, "txm_code_signing_lck_grp");
73 
74 #pragma mark Utilities
75 
76 /* Number of thread stacks is known at build-time */
77 #define NUM_TXM_THREAD_STACKS (MAX_CPUS)
78 txm_thread_stack_t thread_stacks[NUM_TXM_THREAD_STACKS] = {0};
79 
80 /* Singly-linked-list head for thread stacks */
81 SLIST_HEAD(thread_stack_head, _txm_thread_stack) thread_stacks_head =
82     SLIST_HEAD_INITIALIZER(thread_stacks_head);
83 
84 static decl_lck_mtx_data(, thread_stacks_lock);
85 static void *thread_stack_event = NULL;
86 
87 static void
setup_thread_stacks(void)88 setup_thread_stacks(void)
89 {
90 	extern const sptm_bootstrap_args_xnu_t *SPTMArgs;
91 	txm_thread_stack_t *thread_stack = NULL;
92 
93 	/* Initialize each thread stack and add it to the list */
94 	for (uint32_t i = 0; i < NUM_TXM_THREAD_STACKS; i++) {
95 		thread_stack = &thread_stacks[i];
96 
97 		/* Acquire the thread stack virtual mapping */
98 		thread_stack->thread_stack_papt = SPTMArgs->txm_thread_stacks[i];
99 
100 		/* Acquire the thread stack physical page */
101 		thread_stack->thread_stack_phys = (uintptr_t)kvtophys_nofail(
102 			thread_stack->thread_stack_papt);
103 
104 		/* Resolve the pointer to the thread stack data */
105 		thread_stack->thread_stack_data =
106 		    (TXMThreadStack_t*)(thread_stack->thread_stack_papt + (PAGE_SIZE - 1024));
107 
108 		/* Add thread stack to the list head */
109 		SLIST_INSERT_HEAD(&thread_stacks_head, thread_stack, link);
110 	}
111 
112 	/* Initialize the thread stacks lock */
113 	lck_mtx_init(&thread_stacks_lock, &txm_lck_grp, 0);
114 }
115 
116 static txm_thread_stack_t*
acquire_thread_stack(void)117 acquire_thread_stack(void)
118 {
119 	txm_thread_stack_t *thread_stack = NULL;
120 
121 	/* Lock the thread stack list */
122 	lck_mtx_lock(&thread_stacks_lock);
123 
124 	while (SLIST_EMPTY(&thread_stacks_head) == true) {
125 		lck_mtx_sleep(
126 			&thread_stacks_lock,
127 			LCK_SLEEP_DEFAULT,
128 			&thread_stack_event,
129 			THREAD_UNINT);
130 	}
131 
132 	if (SLIST_EMPTY(&thread_stacks_head) == true) {
133 		panic("unable to acquire a thread stack for TXM");
134 	}
135 
136 	/* Use the first available thread stack */
137 	thread_stack = SLIST_FIRST(&thread_stacks_head);
138 
139 	/* Remove the thread stack from the list */
140 	SLIST_REMOVE_HEAD(&thread_stacks_head, link);
141 
142 	/* Unlock the thread stack list */
143 	lck_mtx_unlock(&thread_stacks_lock);
144 
145 	/* Associate the thread stack with the current thread */
146 	thread_associate_txm_thread_stack(thread_stack->thread_stack_phys);
147 
148 	return thread_stack;
149 }
150 
151 static void
release_thread_stack(txm_thread_stack_t * thread_stack)152 release_thread_stack(
153 	txm_thread_stack_t* thread_stack)
154 {
155 	/* Remove the TXM thread stack association with the current thread */
156 	thread_disassociate_txm_thread_stack(thread_stack->thread_stack_phys);
157 
158 	/* Lock the thread stack list */
159 	lck_mtx_lock(&thread_stacks_lock);
160 
161 	/* Add the thread stack at the list head */
162 	SLIST_INSERT_HEAD(&thread_stacks_head, thread_stack, link);
163 
164 	/* Unlock the thread stack list */
165 	lck_mtx_unlock(&thread_stacks_lock);
166 
167 	/* Wake up any threads waiting to acquire a thread stack */
168 	thread_wakeup(&thread_stack_event);
169 }
170 
171 static kern_return_t
txm_parse_return(TXMReturn_t txm_ret)172 txm_parse_return(
173 	TXMReturn_t txm_ret)
174 {
175 	switch (txm_ret.returnCode) {
176 	case kTXMSuccess:
177 		return KERN_SUCCESS;
178 
179 	case kTXMReturnOutOfMemory:
180 		return KERN_RESOURCE_SHORTAGE;
181 
182 	case kTXMReturnNotFound:
183 		return KERN_NOT_FOUND;
184 
185 	case kTXMReturnNotSupported:
186 		return KERN_NOT_SUPPORTED;
187 
188 #if kTXMKernelAPIVersion >= 6
189 	case kTXMReturnTryAgain:
190 		return KERN_OPERATION_TIMED_OUT;
191 #endif
192 
193 	default:
194 		return KERN_FAILURE;
195 	}
196 }
197 
198 static void
txm_print_return(TXMKernelSelector_t selector,TXMReturn_t txm_ret)199 txm_print_return(
200 	TXMKernelSelector_t selector,
201 	TXMReturn_t txm_ret)
202 {
203 	/*
204 	 * We specifically use IOLog instead of printf since printf is compiled out on
205 	 * RELEASE kernels. We want to ensure that errors from TXM are captured within
206 	 * sysdiagnoses from the field.
207 	 */
208 
209 	if (txm_ret.returnCode == kTXMSuccess) {
210 		return;
211 	} else if (txm_ret.returnCode == kTXMReturnTrustCache) {
212 		IOLog("TXM [Error]: TrustCache: selector: %u | 0x%02X | 0x%02X | %u\n",
213 		    selector, txm_ret.tcRet.component, txm_ret.tcRet.error, txm_ret.tcRet.uniqueError);
214 	} else if (txm_ret.returnCode == kTXMReturnCodeSignature) {
215 		IOLog("TXM [Error]: CodeSignature: selector: %u | 0x%02X | 0x%02X | %u\n",
216 		    selector, txm_ret.csRet.component, txm_ret.csRet.error, txm_ret.csRet.uniqueError);
217 	} else if (txm_ret.returnCode == kTXMReturnCodeErrno) {
218 		IOLog("TXM [Error]: Errno: selector: %u | %d\n",
219 		    selector, txm_ret.errnoRet);
220 	} else {
221 		IOLog("TXM [Error]: selector: %u | %u\n",
222 		    selector, txm_ret.returnCode);
223 	}
224 }
225 
226 #pragma mark Page Allocation
227 
228 static void
txm_add_page(void)229 txm_add_page(void)
230 {
231 	txm_call_t txm_call = {
232 		.selector = kTXMKernelSelectorAddFreeListPage,
233 		.failure_fatal = true,
234 		.num_input_args = 1
235 	};
236 
237 	/* Allocate a page from the VM -- transfers page to TXM internally */
238 	vm_map_address_t phys_addr = pmap_txm_allocate_page();
239 
240 	/* Add this page to the TXM free list */
241 	txm_kernel_call(&txm_call, phys_addr);
242 }
243 
244 #pragma mark Calls
245 
246 static void
txm_kernel_call_registers_setup(txm_call_t * parameters,sptm_call_regs_t * registers,va_list args)247 txm_kernel_call_registers_setup(
248 	txm_call_t *parameters,
249 	sptm_call_regs_t *registers,
250 	va_list args)
251 {
252 	/*
253 	 * We are only ever allowed a maximum of 7 arguments for calling into TXM.
254 	 * This is because the SPTM dispatch only sets up registers x0-x7 for the
255 	 * call, and x0 is always reserved for passing in a thread stack for TXM
256 	 * to operate on.
257 	 */
258 
259 	switch (parameters->num_input_args) {
260 	case 7:
261 		registers->x1 = va_arg(args, uintptr_t);
262 		registers->x2 = va_arg(args, uintptr_t);
263 		registers->x3 = va_arg(args, uintptr_t);
264 		registers->x4 = va_arg(args, uintptr_t);
265 		registers->x5 = va_arg(args, uintptr_t);
266 		registers->x6 = va_arg(args, uintptr_t);
267 		registers->x7 = va_arg(args, uintptr_t);
268 		break;
269 
270 	case 6:
271 		registers->x1 = va_arg(args, uintptr_t);
272 		registers->x2 = va_arg(args, uintptr_t);
273 		registers->x3 = va_arg(args, uintptr_t);
274 		registers->x4 = va_arg(args, uintptr_t);
275 		registers->x5 = va_arg(args, uintptr_t);
276 		registers->x6 = va_arg(args, uintptr_t);
277 		break;
278 
279 	case 5:
280 		registers->x1 = va_arg(args, uintptr_t);
281 		registers->x2 = va_arg(args, uintptr_t);
282 		registers->x3 = va_arg(args, uintptr_t);
283 		registers->x4 = va_arg(args, uintptr_t);
284 		registers->x5 = va_arg(args, uintptr_t);
285 		break;
286 
287 	case 4:
288 		registers->x1 = va_arg(args, uintptr_t);
289 		registers->x2 = va_arg(args, uintptr_t);
290 		registers->x3 = va_arg(args, uintptr_t);
291 		registers->x4 = va_arg(args, uintptr_t);
292 		break;
293 
294 	case 3:
295 		registers->x1 = va_arg(args, uintptr_t);
296 		registers->x2 = va_arg(args, uintptr_t);
297 		registers->x3 = va_arg(args, uintptr_t);
298 		break;
299 
300 	case 2:
301 		registers->x1 = va_arg(args, uintptr_t);
302 		registers->x2 = va_arg(args, uintptr_t);
303 		break;
304 
305 	case 1:
306 		registers->x1 = va_arg(args, uintptr_t);
307 		break;
308 
309 	case 0:
310 		break;
311 
312 	default:
313 		panic("invalid number of arguments to TXM: selector: %u | %u",
314 		    parameters->selector, parameters->num_input_args);
315 	}
316 }
317 
318 static TXMReturn_t
txm_kernel_call_internal(txm_call_t * parameters,va_list args)319 txm_kernel_call_internal(
320 	txm_call_t *parameters,
321 	va_list args)
322 {
323 	TXMReturn_t txm_ret = (TXMReturn_t){.returnCode = kTXMReturnGeneric};
324 	sptm_call_regs_t txm_registers = {0};
325 	txm_thread_stack_t *thread_stack = NULL;
326 	const TXMThreadStack_t *thread_stack_data = NULL;
327 	const TXMSharedContextData_t *shared_context_data = NULL;
328 
329 	/* Obtain a stack for this call */
330 	thread_stack = acquire_thread_stack();
331 	thread_stack_data = thread_stack->thread_stack_data;
332 	shared_context_data = &thread_stack_data->sharedData;
333 
334 	/* Setup argument registers */
335 	txm_registers.x0 = thread_stack->thread_stack_phys;
336 	txm_kernel_call_registers_setup(parameters, &txm_registers, args);
337 
338 	/* Track resource usage */
339 	recount_enter_secure();
340 
341 	/* Call into TXM */
342 	txm_enter(parameters->selector, &txm_registers);
343 
344 	recount_leave_secure();
345 
346 	txm_ret = (TXMReturn_t){.rawValue = shared_context_data->txmReturnCode};
347 	parameters->txm_ret = txm_ret;
348 
349 	if (parameters->txm_ret.returnCode == kTXMSuccess) {
350 		parameters->num_return_words = shared_context_data->txmNumReturnWords;
351 		if (parameters->num_return_words > kTXMStackReturnWords) {
352 			panic("received excessive return words from TXM: selector: %u | %llu",
353 			    parameters->selector, parameters->num_return_words);
354 		}
355 
356 		for (uint64_t i = 0; i < parameters->num_return_words; i++) {
357 			parameters->return_words[i] = shared_context_data->txmReturnWords[i];
358 		}
359 	}
360 
361 	/* Release the thread stack as it is no longer needed */
362 	release_thread_stack(thread_stack);
363 	thread_stack_data = NULL;
364 	shared_context_data = NULL;
365 
366 	return txm_ret;
367 }
368 
369 kern_return_t
txm_kernel_call(txm_call_t * parameters,...)370 txm_kernel_call(
371 	txm_call_t *parameters, ...)
372 {
373 	TXMReturn_t txm_ret = (TXMReturn_t){.returnCode = kTXMReturnGeneric};
374 	kern_return_t ret = KERN_DENIED;
375 	va_list args;
376 
377 	/* Start the variadic arguments list */
378 	va_start(args, parameters);
379 
380 	do {
381 		txm_ret = txm_kernel_call_internal(parameters, args);
382 		if (txm_ret.returnCode == kTXMReturnOutOfMemory) {
383 			if (parameters->selector == kTXMKernelSelectorAddFreeListPage) {
384 				panic("received out-of-memory error when adding a free page to TXM");
385 			}
386 			txm_add_page();
387 		}
388 	} while (txm_ret.returnCode == kTXMReturnOutOfMemory);
389 
390 	/* Clean up the variadic arguments list */
391 	va_end(args);
392 
393 	/* Print all TXM logs from the log buffer */
394 	if (parameters->skip_logs == false) {
395 		txm_print_logs();
396 	}
397 
398 	/* Print the return code from TXM -- only prints for an error */
399 	if (parameters->failure_silent != true) {
400 		if (parameters->failure_code_silent != txm_ret.returnCode) {
401 			txm_print_return(parameters->selector, txm_ret);
402 		}
403 	}
404 
405 	/*
406 	 * To ease the process of calling into TXM, and to also reduce the number of
407 	 * lines of code for each call site, the txm_call_t offers some properties
408 	 * we can enforce over here. Go through these, and panic in case they aren't
409 	 * honored.
410 	 *
411 	 * NOTE: We check for "<" instead of "!=" for the number of return words we
412 	 * get back from TXM since this helps in forward development. If the kernel
413 	 * and TXM are proceeding at different project cadences, we do not want to
414 	 * gate adding more return words from TXM on the kernel first adopting the
415 	 * new number of return words.
416 	 */
417 	ret = txm_parse_return(txm_ret);
418 
419 	if (parameters->failure_fatal && (ret != KERN_SUCCESS)) {
420 		panic("received fatal error for a selector from TXM: selector: %u | 0x%0llX",
421 		    parameters->selector, txm_ret.rawValue);
422 	} else if (parameters->num_return_words < parameters->num_output_args) {
423 		/* Only panic if return was a success */
424 		if (ret == KERN_SUCCESS) {
425 			panic("received fewer than expected return words from TXM: selector: %u | %llu",
426 			    parameters->selector, parameters->num_return_words);
427 		}
428 	}
429 
430 	return ret;
431 }
432 
433 void
txm_transfer_region(vm_address_t addr,vm_size_t size)434 txm_transfer_region(
435 	vm_address_t addr,
436 	vm_size_t size)
437 {
438 	vm_address_t addr_end = 0;
439 	vm_size_t size_aligned = round_page(size);
440 
441 	if ((addr & PAGE_MASK) != 0) {
442 		panic("attempted to transfer non-page-aligned memory to TXM: %p", (void*)addr);
443 	} else if (os_add_overflow(addr, size_aligned, &addr_end)) {
444 		panic("overflow on range to be transferred to TXM: %p | %lu",
445 		    (void*)addr, size);
446 	}
447 
448 	/* Make the memory read-only first (transfer will panic otherwise) */
449 	vm_protect(kernel_map, addr, size_aligned, false, VM_PROT_READ);
450 
451 	/* Transfer each physical page to be TXM_DEFAULT */
452 	for (vm_address_t page = addr; page < addr_end; page += PAGE_SIZE) {
453 		pmap_txm_transfer_page(page);
454 	}
455 }
456 
457 void
txm_reclaim_region(vm_address_t addr,vm_size_t size)458 txm_reclaim_region(
459 	vm_address_t addr,
460 	vm_size_t size)
461 {
462 	vm_address_t addr_end = 0;
463 	vm_size_t size_aligned = round_page(size);
464 
465 	if ((addr & PAGE_MASK) != 0) {
466 		panic("attempted to reclaim non-page-aligned memory from TXM: %p", (void*)addr);
467 	} else if (os_add_overflow(addr, size_aligned, &addr_end)) {
468 		panic("overflow on range to be reclaimed from TXM: %p | %lu",
469 		    (void*)addr, size);
470 	}
471 
472 	/*
473 	 * We can only reclaim once TXM has transferred the memory range back to the
474 	 * kernel. Hence, we simply try and switch permissions to read-write. If TXM
475 	 * hasn't transferred pages, this then should panic.
476 	 */
477 	vm_protect(kernel_map, addr, size_aligned, false, VM_PROT_READ | VM_PROT_WRITE);
478 }
479 
480 static SECURITY_READ_ONLY_LATE(const char*) txm_log_page = NULL;
481 static SECURITY_READ_ONLY_LATE(const uint32_t*) txm_log_head = NULL;
482 static SECURITY_READ_ONLY_LATE(const uint32_t*) txm_log_sync = NULL;
483 
484 static decl_lck_mtx_data(, log_lock);
485 static uint32_t log_head = 0;
486 
487 void
txm_print_logs(void)488 txm_print_logs(void)
489 {
490 	uint32_t start_index = 0;
491 	uint32_t end_index = 0;
492 
493 	/*
494 	 * The design here is very simple. TXM keeps adding slots to its circular buffer
495 	 * and the kernel attempts to read each one and print it, maintaining its own head
496 	 * for the log.
497 	 *
498 	 * This design is by nature lazy. TXM doesn't know or care if the kernel has gone
499 	 * through and printed any of the logs, so it'll just keep writing into its buffer
500 	 * and then circle around when it becomes full.
501 	 *
502 	 * This is fine most of the time since there are a decent amount of slots in the
503 	 * log buffer. We mostly have an issue when TXM is adding so many logs so quickly
504 	 * such that it wraps around and starts overwriting logs which haven't been seen
505 	 * by the kernel. If this were to happen, TXM's log head may circle around the
506 	 * head maintained by the kernel, causing a lot of logs to be missed, since the
507 	 * kernel only attempts the number of logs in-between the two heads.
508 	 *
509 	 * The fix for that is complicated, and until we see an actual impact, we're going
510 	 * to keep the simpler design in place.
511 	 */
512 
513 	/* Return if the logging hasn't been setup yet */
514 	if (txm_log_sync == NULL) {
515 		return;
516 	}
517 
518 	/*
519 	 * Holding the log lock and printing can cause lots of issues since printing can
520 	 * be rather slow. While we make it a point to keep the logging buffer quiet, some
521 	 * actions (such as loading trust caches) are still very chatty.
522 	 *
523 	 * As a result, we optimize this routine to ensure that the lock itself isn't held
524 	 * for very long. All we need to do within the critical section is calculate the
525 	 * starting and ending index of the log buffer. The actual printing doesn't need
526 	 * to be done with the lock held.
527 	 */
528 	lck_mtx_lock(&log_lock);
529 
530 	start_index = log_head;
531 	end_index = os_atomic_load(txm_log_head, relaxed) % kTXMLogSlots;
532 
533 	/* Update the log head with the new index */
534 	log_head = end_index;
535 
536 	/* Release the log lock */
537 	lck_mtx_unlock(&log_lock);
538 
539 	if (start_index != end_index) {
540 		/* Use load acquire here to sync up with all writes to the buffer */
541 		os_atomic_load(txm_log_sync, acquire);
542 
543 		while (start_index != end_index) {
544 			const char *slot = txm_log_page + (start_index * kTXMLogSlotSize);
545 
546 			/* We add newlines after each log statement since TXM does not */
547 			printf("%s\n", slot);
548 
549 			start_index = (start_index + 1) % kTXMLogSlots;
550 		}
551 	}
552 }
553 
554 #pragma mark Initialization
555 
556 SECURITY_READ_ONLY_LATE(const TXMReadWriteData_t*) txm_rw_data = NULL;
557 SECURITY_READ_ONLY_LATE(const TXMReadOnlyData_t*) txm_ro_data = NULL;
558 SECURITY_READ_ONLY_LATE(const CSConfig_t*) txm_cs_config = NULL;
559 SECURITY_READ_ONLY_LATE(CSRestrictedModeState_t*) txm_restricted_mode_state = NULL;
560 SECURITY_READ_ONLY_LATE(const TXMMetrics_t*) txm_metrics = NULL;
561 
562 SECURITY_READ_ONLY_LATE(bool*) developer_mode_enabled = NULL;
563 static SECURITY_READ_ONLY_LATE(bool) code_signing_enabled = true;
564 static SECURITY_READ_ONLY_LATE(uint32_t) managed_signature_size = 0;
565 
566 static decl_lck_mtx_data(, compilation_service_lock);
567 static decl_lck_mtx_data(, unregister_sync_lock);
568 
569 static void
get_logging_info(void)570 get_logging_info(void)
571 {
572 	txm_call_t txm_call = {
573 		.selector = kTXMKernelSelectorGetLogInfo,
574 		.failure_fatal = true,
575 		.num_output_args = 3
576 	};
577 	txm_kernel_call(&txm_call);
578 
579 	txm_log_page = (const char*)txm_call.return_words[0];
580 	txm_log_head = (const uint32_t*)txm_call.return_words[1];
581 	txm_log_sync = (const uint32_t*)txm_call.return_words[2];
582 }
583 
584 static void
get_code_signing_info(void)585 get_code_signing_info(void)
586 {
587 	txm_call_t txm_call = {
588 		.selector = kTXMKernelSelectorGetCodeSigningInfo,
589 		.failure_fatal = true,
590 		.num_output_args = 6
591 	};
592 	txm_kernel_call(&txm_call);
593 
594 	/*
595 	 * Not using txm_call.return_words[0] for now. This was previously the
596 	 * code_signing_enabled field, but we've since switched to acquiring that
597 	 * value from TXM's read-only data.
598 	 *
599 	 * Not using txm_call.return_words[2] for now. This was previously the
600 	 * metrics field, but we've since switched to acquiring that value from
601 	 * TXM's read-write data.
602 	 *
603 	 * Not using txm_call.return_words[4] for now. This was previously the
604 	 * txm_cs_config field, but we've since switched to acquiring that value
605 	 * from TXM's read-only data.
606 	 */
607 	txm_rw_data = (TXMReadWriteData_t*)txm_call.return_words[0];
608 	developer_mode_enabled = (bool*)txm_call.return_words[1];
609 	managed_signature_size = (uint32_t)txm_call.return_words[3];
610 	txm_ro_data = (TXMReadOnlyData_t*)txm_call.return_words[5];
611 	txm_metrics = &txm_rw_data->metrics;
612 
613 	/* Set code_signing_disabled based on read-only data */
614 	code_signing_enabled = txm_ro_data->codeSigningDisabled == false;
615 
616 	/* Set txm_cs_config based on read-only data */
617 	txm_cs_config = &txm_ro_data->CSConfiguration;
618 
619 	/* Only setup when REM is supported on the platform */
620 	if (txm_cs_config->systemPolicy->featureSet.restrictedExecutionMode == true) {
621 		txm_restricted_mode_state = txm_ro_data->restrictedModeState;
622 	}
623 
624 	/* Setup the number of boot trust caches */
625 	num_static_trust_caches = os_atomic_load(&txm_metrics->trustCaches.numStatic, relaxed);
626 	num_engineering_trust_caches = os_atomic_load(&txm_metrics->trustCaches.numEngineering, relaxed);
627 }
628 
629 static void
set_shared_region_base_address(void)630 set_shared_region_base_address(void)
631 {
632 	txm_call_t txm_call = {
633 		.selector = kTXMKernelSelectorSetSharedRegionBaseAddress,
634 		.failure_fatal = true,
635 		.num_input_args = 2,
636 	};
637 
638 	txm_kernel_call(&txm_call,
639 	    SHARED_REGION_BASE,
640 	    SHARED_REGION_SIZE);
641 }
642 
643 void
code_signing_init(void)644 code_signing_init(void)
645 {
646 	printf("libTXM_KernelVersion: %u\n", libTrustedExecutionMonitor_KernelVersion);
647 	printf("libTXM_Image4Version: %u\n", libTrustedExecutionMonitor_Image4Version);
648 
649 	/* Setup the thread stacks used by TXM */
650 	setup_thread_stacks();
651 
652 	/* Setup the logging lock */
653 	lck_mtx_init(&log_lock, &txm_lck_grp, 0);
654 
655 	/* Setup TXM logging information */
656 	get_logging_info();
657 
658 	/* Setup code signing configuration */
659 	get_code_signing_info();
660 
661 	/* Setup all the other locks we need */
662 	lck_mtx_init(&compilation_service_lock, &txm_lck_grp, 0);
663 	lck_mtx_init(&unregister_sync_lock, &txm_lck_grp, 0);
664 
665 	/*
666 	 * We need to let TXM know what the shared region base address is going
667 	 * to be for this boot.
668 	 */
669 	set_shared_region_base_address();
670 
671 	/* Require signed code when monitor is enabled */
672 	if (code_signing_enabled == true) {
673 		cs_debug_fail_on_unsigned_code = 1;
674 	}
675 }
676 
677 void
txm_enter_lockdown_mode(void)678 txm_enter_lockdown_mode(void)
679 {
680 	txm_call_t txm_call = {
681 		.selector = kTXMKernelSelectorEnterLockdownMode,
682 		.failure_fatal = true,
683 	};
684 	txm_kernel_call(&txm_call);
685 }
686 
687 kern_return_t
txm_secure_channel_shared_page(uint64_t * secure_channel_phys,size_t * secure_channel_size)688 txm_secure_channel_shared_page(
689 	uint64_t *secure_channel_phys,
690 	size_t *secure_channel_size)
691 {
692 #if kTXMKernelAPIVersion >= 5
693 	txm_call_t txm_call = {
694 		.selector = kTXMKernelSelectorGetSecureChannelAddr,
695 		.num_output_args = 2
696 	};
697 
698 	kern_return_t ret = txm_kernel_call(&txm_call);
699 	if (ret == KERN_NOT_SUPPORTED) {
700 		return ret;
701 	} else if (ret != KERN_SUCCESS) {
702 		panic("unexpected failure for TXM secure channel: %d", ret);
703 	}
704 
705 	/* Return the physical address */
706 	if (secure_channel_phys != NULL) {
707 		*secure_channel_phys = txm_call.return_words[0];
708 	}
709 
710 	/* Return the size */
711 	if (secure_channel_size != NULL) {
712 		*secure_channel_size = txm_call.return_words[1];
713 	}
714 
715 	return KERN_SUCCESS;
716 #else
717 	(void)secure_channel_phys;
718 	(void)secure_channel_size;
719 	return KERN_NOT_SUPPORTED;
720 #endif
721 }
722 
723 #pragma mark Developer Mode
724 
725 void
txm_toggle_developer_mode(bool state)726 txm_toggle_developer_mode(bool state)
727 {
728 	txm_call_t txm_call = {
729 		.selector = kTXMKernelSelectorDeveloperModeToggle,
730 		.failure_fatal = true,
731 		.num_input_args = 1
732 	};
733 
734 	txm_kernel_call(&txm_call, state);
735 }
736 
737 #pragma mark Restricted Execution Mode
738 
739 kern_return_t
txm_rem_enable(void)740 txm_rem_enable(void)
741 {
742 	txm_call_t txm_call = {
743 		.selector = kTXMKernelSelectorEnableRestrictedMode
744 	};
745 	return txm_kernel_call(&txm_call);
746 }
747 
748 kern_return_t
txm_rem_state(void)749 txm_rem_state(void)
750 {
751 	if (txm_restricted_mode_state == NULL) {
752 		return KERN_NOT_SUPPORTED;
753 	}
754 
755 	CSReturn_t cs_ret = restrictedModeStatus(txm_restricted_mode_state);
756 	if (cs_ret.error == kCSReturnSuccess) {
757 		return KERN_SUCCESS;
758 	}
759 	return KERN_DENIED;
760 }
761 
762 #pragma mark Device State
763 
764 void
txm_update_device_state(void)765 txm_update_device_state(void)
766 {
767 #if kTXMKernelAPIVersion >= 6
768 	txm_call_t txm_call = {
769 		.selector = kTXMSelectorUpdateDeviceState,
770 		.failure_fatal = true
771 	};
772 	txm_kernel_call(&txm_call);
773 #endif
774 }
775 
776 void
txm_complete_security_boot_mode(__unused uint32_t security_boot_mode)777 txm_complete_security_boot_mode(
778 	__unused uint32_t security_boot_mode)
779 {
780 #if kTXMKernelAPIVersion >= 6
781 	txm_call_t txm_call = {
782 		.selector = kTXMSelectorCompleteSecurityBootMode,
783 		.num_input_args = 1,
784 		.failure_fatal = true
785 	};
786 	txm_kernel_call(&txm_call, security_boot_mode);
787 #endif
788 }
789 
790 #pragma mark Code Signing and Provisioning Profiles
791 
792 bool
txm_code_signing_enabled(void)793 txm_code_signing_enabled(void)
794 {
795 	return code_signing_enabled;
796 }
797 
798 vm_size_t
txm_managed_code_signature_size(void)799 txm_managed_code_signature_size(void)
800 {
801 	return managed_signature_size;
802 }
803 
804 kern_return_t
txm_register_provisioning_profile(const void * profile_blob,const size_t profile_blob_size,void ** profile_obj)805 txm_register_provisioning_profile(
806 	const void *profile_blob,
807 	const size_t profile_blob_size,
808 	void **profile_obj)
809 {
810 	txm_call_t txm_call = {
811 		.selector = kTXMKernelSelectorRegisterProvisioningProfile,
812 		.num_input_args = 2,
813 		.num_output_args = 1
814 	};
815 	vm_address_t payload_addr = 0;
816 	kern_return_t ret = KERN_DENIED;
817 
818 	/* We need to allocate page-wise in order to transfer the range to TXM */
819 	ret = kmem_alloc(kernel_map, &payload_addr, profile_blob_size,
820 	    KMA_KOBJECT | KMA_DATA, VM_KERN_MEMORY_SECURITY);
821 	if (ret != KERN_SUCCESS) {
822 		printf("unable to allocate memory for profile payload: %d\n", ret);
823 		goto exit;
824 	}
825 
826 	/* Copy the contents into the allocation */
827 	memcpy((void*)payload_addr, profile_blob, profile_blob_size);
828 
829 	/* Transfer the memory range to TXM */
830 	txm_transfer_region(payload_addr, profile_blob_size);
831 
832 	ret = txm_kernel_call(&txm_call, payload_addr, profile_blob_size);
833 	if (ret == KERN_SUCCESS) {
834 		*profile_obj = (void*)txm_call.return_words[0];
835 	}
836 
837 exit:
838 	if ((ret != KERN_SUCCESS) && (payload_addr != 0)) {
839 		/* Reclaim this memory range */
840 		txm_reclaim_region(payload_addr, profile_blob_size);
841 
842 		/* Free the memory range */
843 		kmem_free(kernel_map, payload_addr, profile_blob_size);
844 		payload_addr = 0;
845 	}
846 
847 	return ret;
848 }
849 
850 kern_return_t
txm_trust_provisioning_profile(__unused void * profile_obj,__unused const void * sig_data,__unused size_t sig_size)851 txm_trust_provisioning_profile(
852 	__unused void *profile_obj,
853 	__unused const void *sig_data,
854 	__unused size_t sig_size)
855 {
856 #if kTXMKernelAPIVersion >= 7
857 	txm_call_t txm_call = {
858 		.selector = kTXMKernelSelectorTrustProvisioningProfile,
859 		.num_input_args = 3
860 	};
861 
862 	return txm_kernel_call(&txm_call, profile_obj, sig_data, sig_size);
863 #else
864 	/* The TXM selector hasn't yet landed */
865 	return KERN_SUCCESS;
866 #endif
867 }
868 
869 kern_return_t
txm_unregister_provisioning_profile(void * profile_obj)870 txm_unregister_provisioning_profile(
871 	void *profile_obj)
872 {
873 	txm_call_t txm_call = {
874 		.selector = kTXMKernelSelectorUnregisterProvisioningProfile,
875 		.num_input_args = 1,
876 		.num_output_args = 2
877 	};
878 	vm_address_t profile_addr = 0;
879 	vm_size_t profile_size = 0;
880 	kern_return_t ret = KERN_DENIED;
881 
882 	ret = txm_kernel_call(&txm_call, profile_obj);
883 	if (ret != KERN_SUCCESS) {
884 		return ret;
885 	}
886 
887 	profile_addr = txm_call.return_words[0];
888 	profile_size = txm_call.return_words[1];
889 
890 	/* Reclaim this memory range */
891 	txm_reclaim_region(profile_addr, profile_size);
892 
893 	/* Free the memory range */
894 	kmem_free(kernel_map, profile_addr, profile_size);
895 
896 	return KERN_SUCCESS;
897 }
898 
899 kern_return_t
txm_associate_provisioning_profile(void * sig_obj,void * profile_obj)900 txm_associate_provisioning_profile(
901 	void *sig_obj,
902 	void *profile_obj)
903 {
904 	txm_call_t txm_call = {
905 		.selector = kTXMKernelSelectorAssociateProvisioningProfile,
906 		.num_input_args = 2,
907 	};
908 
909 	return txm_kernel_call(&txm_call, sig_obj, profile_obj);
910 }
911 
912 kern_return_t
txm_disassociate_provisioning_profile(void * sig_obj)913 txm_disassociate_provisioning_profile(
914 	void *sig_obj)
915 {
916 	txm_call_t txm_call = {
917 		.selector = kTXMKernelSelectorDisassociateProvisioningProfile,
918 		.num_input_args = 1,
919 	};
920 
921 	/*
922 	 * Take the unregistration sync lock.
923 	 * For more information: rdar://99205627.
924 	 */
925 	lck_mtx_lock(&unregister_sync_lock);
926 
927 	/* Disassociate the profile from the signature */
928 	kern_return_t ret = txm_kernel_call(&txm_call, sig_obj);
929 
930 	/* Release the unregistration sync lock */
931 	lck_mtx_unlock(&unregister_sync_lock);
932 
933 	return ret;
934 }
935 
936 void
txm_set_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])937 txm_set_compilation_service_cdhash(
938 	const uint8_t cdhash[CS_CDHASH_LEN])
939 {
940 	txm_call_t txm_call = {
941 		.selector = kTXMKernelSelectorAuthorizeCompilationServiceCDHash,
942 		.num_input_args = 1,
943 	};
944 
945 	lck_mtx_lock(&compilation_service_lock);
946 	txm_kernel_call(&txm_call, cdhash);
947 	lck_mtx_unlock(&compilation_service_lock);
948 }
949 
950 bool
txm_match_compilation_service_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])951 txm_match_compilation_service_cdhash(
952 	const uint8_t cdhash[CS_CDHASH_LEN])
953 {
954 	txm_call_t txm_call = {
955 		.selector = kTXMKernelSelectorMatchCompilationServiceCDHash,
956 		.failure_silent = true,
957 		.num_input_args = 1,
958 		.num_output_args = 1,
959 	};
960 	kern_return_t ret = KERN_DENIED;
961 
962 	/* Be safe and take the lock (avoid thread collisions) */
963 	lck_mtx_lock(&compilation_service_lock);
964 	ret = txm_kernel_call(&txm_call, cdhash);
965 	lck_mtx_unlock(&compilation_service_lock);
966 
967 	if (ret == KERN_SUCCESS) {
968 		return true;
969 	}
970 	return false;
971 }
972 
973 void
txm_set_local_signing_public_key(const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])974 txm_set_local_signing_public_key(
975 	const uint8_t public_key[XNU_LOCAL_SIGNING_KEY_SIZE])
976 {
977 	txm_call_t txm_call = {
978 		.selector = kTXMKernelSelectorSetLocalSigningPublicKey,
979 		.num_input_args = 1,
980 	};
981 
982 	txm_kernel_call(&txm_call, public_key);
983 }
984 
985 uint8_t*
txm_get_local_signing_public_key(void)986 txm_get_local_signing_public_key(void)
987 {
988 	txm_call_t txm_call = {
989 		.selector = kTXMKernelSelectorGetLocalSigningPublicKey,
990 		.num_output_args = 1,
991 	};
992 	kern_return_t ret = KERN_DENIED;
993 
994 	ret = txm_kernel_call(&txm_call);
995 	if (ret != KERN_SUCCESS) {
996 		return NULL;
997 	}
998 
999 	return (uint8_t*)txm_call.return_words[0];
1000 }
1001 
1002 void
txm_unrestrict_local_signing_cdhash(const uint8_t cdhash[CS_CDHASH_LEN])1003 txm_unrestrict_local_signing_cdhash(
1004 	const uint8_t cdhash[CS_CDHASH_LEN])
1005 {
1006 	txm_call_t txm_call = {
1007 		.selector = kTXMKernelSelectorAuthorizeLocalSigningCDHash,
1008 		.num_input_args = 1,
1009 	};
1010 
1011 	txm_kernel_call(&txm_call, cdhash);
1012 }
1013 
1014 kern_return_t
txm_register_code_signature(const vm_address_t signature_addr,const vm_size_t signature_size,const vm_offset_t code_directory_offset,const char * signature_path,void ** sig_obj,vm_address_t * txm_signature_addr)1015 txm_register_code_signature(
1016 	const vm_address_t signature_addr,
1017 	const vm_size_t signature_size,
1018 	const vm_offset_t code_directory_offset,
1019 	const char *signature_path,
1020 	void **sig_obj,
1021 	vm_address_t *txm_signature_addr)
1022 {
1023 	txm_call_t txm_call = {
1024 		.selector = kTXMKernelSelectorRegisterCodeSignature,
1025 		.num_input_args = 3,
1026 		.num_output_args = 2,
1027 	};
1028 	kern_return_t ret = KERN_DENIED;
1029 
1030 	/*
1031 	 * TXM performs more exhaustive validation of the code signature and figures
1032 	 * out the best code directory to use on its own. As a result, this offset here
1033 	 * is not used.
1034 	 */
1035 	(void)code_directory_offset;
1036 
1037 	/*
1038 	 * If the signature is large enough to not fit within TXM's managed signature
1039 	 * size, then we need to transfer it over so it is owned by TXM.
1040 	 */
1041 	if (signature_size > txm_managed_code_signature_size()) {
1042 		txm_transfer_region(signature_addr, signature_size);
1043 	}
1044 
1045 	ret = txm_kernel_call(
1046 		&txm_call,
1047 		signature_addr,
1048 		signature_size,
1049 		signature_path);
1050 
1051 	if (ret != KERN_SUCCESS) {
1052 		goto exit;
1053 	}
1054 
1055 	*sig_obj = (void*)txm_call.return_words[0];
1056 	*txm_signature_addr = txm_call.return_words[1];
1057 
1058 exit:
1059 	if ((ret != KERN_SUCCESS) && (signature_size > txm_managed_code_signature_size())) {
1060 		txm_reclaim_region(signature_addr, signature_size);
1061 	}
1062 
1063 	return ret;
1064 }
1065 
1066 kern_return_t
txm_unregister_code_signature(void * sig_obj)1067 txm_unregister_code_signature(
1068 	void *sig_obj)
1069 {
1070 	txm_call_t txm_call = {
1071 		.selector = kTXMKernelSelectorUnregisterCodeSignature,
1072 		.failure_fatal = true,
1073 		.num_input_args = 1,
1074 		.num_output_args = 2,
1075 	};
1076 	TXMCodeSignature_t *cs_obj = sig_obj;
1077 	vm_address_t signature_addr = 0;
1078 	vm_size_t signature_size = 0;
1079 	bool txm_managed = false;
1080 
1081 	/*
1082 	 * Unregistering a code signature can cause lock contention in TXM against a
1083 	 * set of other functions. The unregistration operation is very common when the
1084 	 * system is about to reboot because the VFS layer unmounts all volumes.
1085 	 *
1086 	 * In order to avoid this issue, we detect if the code signature in question
1087 	 * has been mapped in other address spaces, and if so, we avoid unregistering
1088 	 * the code signature when we're about to shut down. This leaks memory, but
1089 	 * we're about to shut down.
1090 	 */
1091 	if ((cs_obj->referenceCount > 0) && (get_system_inshutdown() != 0)) {
1092 		printf("TXM [XNU]: unregistration of signature skipped as system is in shutdown\n");
1093 		return KERN_ABORTED;
1094 	}
1095 
1096 	/* Check if the signature memory is TXM managed */
1097 	txm_managed = cs_obj->sptmType != TXM_BULK_DATA;
1098 
1099 	/*
1100 	 * Take the unregistration sync lock.
1101 	 * For more information: rdar://99205627.
1102 	 */
1103 	lck_mtx_lock(&unregister_sync_lock);
1104 
1105 	/* Unregister the signature from TXM -- cannot fail */
1106 	txm_kernel_call(&txm_call, sig_obj);
1107 
1108 	/* Release the unregistration sync lock */
1109 	lck_mtx_unlock(&unregister_sync_lock);
1110 
1111 	signature_addr = txm_call.return_words[0];
1112 	signature_size = txm_call.return_words[1];
1113 
1114 	/* Reclaim the memory range in case we need to */
1115 	if (txm_managed == false) {
1116 		txm_reclaim_region(signature_addr, signature_size);
1117 	}
1118 
1119 	return KERN_SUCCESS;
1120 }
1121 
1122 kern_return_t
txm_verify_code_signature(void * sig_obj,uint32_t * trust_level)1123 txm_verify_code_signature(
1124 	void *sig_obj,
1125 	uint32_t *trust_level)
1126 {
1127 	txm_call_t txm_call = {
1128 		.selector = kTXMKernelSelectorValidateCodeSignature,
1129 		.num_input_args = 1,
1130 	};
1131 	kern_return_t ret = txm_kernel_call(&txm_call, sig_obj);
1132 
1133 	if ((ret == KERN_SUCCESS) && (trust_level != NULL)) {
1134 		/*
1135 		 * Abolsutely gross, but it's not worth linking all of libCodeSignature just for
1136 		 * this simple change. We should either return the trust level from TXM, or when
1137 		 * we adopt libCodeSignature more broadly, then use an accessor function.
1138 		 */
1139 		*trust_level = ((TXMCodeSignature_t*)sig_obj)->sig.trustLevel;
1140 	}
1141 	return ret;
1142 }
1143 
1144 kern_return_t
txm_reconstitute_code_signature(void * sig_obj,vm_address_t * unneeded_addr,vm_size_t * unneeded_size)1145 txm_reconstitute_code_signature(
1146 	void *sig_obj,
1147 	vm_address_t *unneeded_addr,
1148 	vm_size_t *unneeded_size)
1149 {
1150 	txm_call_t txm_call = {
1151 		.selector = kTXMKernelSelectorReconstituteCodeSignature,
1152 		.failure_fatal = true,
1153 		.num_input_args = 1,
1154 		.num_output_args = 2,
1155 	};
1156 	vm_address_t return_addr = 0;
1157 	vm_size_t return_size = 0;
1158 
1159 	/* Reconstitute the code signature -- cannot fail */
1160 	txm_kernel_call(&txm_call, sig_obj);
1161 
1162 	return_addr = txm_call.return_words[0];
1163 	return_size = txm_call.return_words[1];
1164 
1165 	/* Reclaim the memory region if we need to */
1166 	if ((return_addr != 0) && (return_size != 0)) {
1167 		txm_reclaim_region(return_addr, return_size);
1168 	}
1169 
1170 	*unneeded_addr = return_addr;
1171 	*unneeded_size = return_size;
1172 
1173 	return KERN_SUCCESS;
1174 }
1175 
1176 #pragma mark Address Spaces
1177 
1178 kern_return_t
txm_register_address_space(pmap_t pmap,uint16_t addr_space_id,TXMAddressSpaceFlags_t flags)1179 txm_register_address_space(
1180 	pmap_t pmap,
1181 	uint16_t addr_space_id,
1182 	TXMAddressSpaceFlags_t flags)
1183 {
1184 	txm_call_t txm_call = {
1185 		.selector = kTXMKernelSelectorRegisterAddressSpace,
1186 		.failure_fatal = true,
1187 		.num_input_args = 2,
1188 		.num_output_args = 1,
1189 	};
1190 	TXMAddressSpace_t *txm_addr_space = NULL;
1191 
1192 	/* Register the address space -- cannot fail */
1193 	txm_kernel_call(&txm_call, addr_space_id, flags);
1194 
1195 	/* Set the address space object within the PMAP */
1196 	txm_addr_space = (TXMAddressSpace_t*)txm_call.return_words[0];
1197 	pmap_txm_set_addr_space(pmap, txm_addr_space);
1198 
1199 	return KERN_SUCCESS;
1200 }
1201 
1202 kern_return_t
txm_unregister_address_space(pmap_t pmap)1203 txm_unregister_address_space(
1204 	pmap_t pmap)
1205 {
1206 	txm_call_t txm_call = {
1207 		.selector = kTXMKernelSelectorUnregisterAddressSpace,
1208 		.failure_fatal = true,
1209 		.num_input_args = 1,
1210 	};
1211 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1212 
1213 	/*
1214 	 * Take the unregistration sync lock.
1215 	 * For more information: rdar://99205627.
1216 	 */
1217 	lck_mtx_lock(&unregister_sync_lock);
1218 
1219 	/* Unregister the address space -- cannot fail */
1220 	txm_kernel_call(&txm_call, txm_addr_space);
1221 
1222 	/* Release the unregistration sync lock */
1223 	lck_mtx_unlock(&unregister_sync_lock);
1224 
1225 	/* Remove the address space from the pmap */
1226 	pmap_txm_set_addr_space(pmap, NULL);
1227 
1228 	return KERN_SUCCESS;
1229 }
1230 
1231 kern_return_t
txm_associate_code_signature(pmap_t pmap,void * sig_obj,const vm_address_t region_addr,const vm_size_t region_size,const vm_offset_t region_offset)1232 txm_associate_code_signature(
1233 	pmap_t pmap,
1234 	void *sig_obj,
1235 	const vm_address_t region_addr,
1236 	const vm_size_t region_size,
1237 	const vm_offset_t region_offset)
1238 {
1239 	txm_call_t txm_call = {
1240 		.selector = kTXMKernelSelectorAssociateCodeSignature,
1241 		.num_input_args = 5,
1242 	};
1243 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1244 	kern_return_t ret = KERN_DENIED;
1245 
1246 	/*
1247 	 * Associating a code signature may require exclusive access to the TXM address
1248 	 * space lock within TXM.
1249 	 */
1250 	pmap_txm_acquire_exclusive_lock(pmap);
1251 
1252 	/*
1253 	 * If the address space in question is a nested address space, then all associations
1254 	 * need to go into the shared region base range. The VM layer is inconsistent with
1255 	 * how it makes associations with TXM vs. how it maps pages into the shared region.
1256 	 *
1257 	 * For TXM, the associations are made without taking the base range into account,
1258 	 * but when mappings are entered into the shared region, the base range is taken
1259 	 * into account. To normalize this, we add the base range address here.
1260 	 */
1261 	vm_address_t adjusted_region_addr = region_addr;
1262 	if (txm_addr_space->addrSpaceID.type == kTXMAddressSpaceIDTypeSharedRegion) {
1263 		adjusted_region_addr += SHARED_REGION_BASE;
1264 	}
1265 
1266 	/*
1267 	 * The VM tries a bunch of weird mappings within launchd for some platform code
1268 	 * which isn't mapped contiguously. These mappings don't succeed, but the failure
1269 	 * is fairly harmless since everything seems to work. However, since the call to
1270 	 * TXM fails, we make a series of logs. Hence, for launchd, we suppress failure
1271 	 * logs.
1272 	 */
1273 	if (txm_addr_space->addrSpaceID.type == kTXMAddressSpaceIDTypeAddressSpace) {
1274 		/* TXMTODO: Scope this to launchd better */
1275 		txm_call.failure_code_silent = kTXMReturnPlatformCodeMapping;
1276 	}
1277 
1278 	/* Check if the main region has been set on the address space */
1279 	bool main_region_set = txm_addr_space->mainRegion != NULL;
1280 	bool main_region_set_after = false;
1281 
1282 	ret = txm_kernel_call(
1283 		&txm_call,
1284 		txm_addr_space,
1285 		sig_obj,
1286 		adjusted_region_addr,
1287 		region_size,
1288 		region_offset);
1289 
1290 	while (ret == KERN_OPERATION_TIMED_OUT) {
1291 		/*
1292 		 * There is no easy method to sleep in the kernel. This operation has the
1293 		 * potential to burn CPU cycles, but that is alright since we don't actually
1294 		 * ever expect to enter this case on legitimately operating systems.
1295 		 */
1296 		ret = txm_kernel_call(
1297 			&txm_call,
1298 			txm_addr_space,
1299 			sig_obj,
1300 			adjusted_region_addr,
1301 			region_size,
1302 			region_offset);
1303 	}
1304 
1305 	/*
1306 	 * If the main region wasn't set on the address space before hand, but this new
1307 	 * call into TXM was successful and sets the main region, it means this signature
1308 	 * object is associated with the main region on the address space. With this, we
1309 	 * can now set the appropriate trust level on the PMAP.
1310 	 */
1311 	if (ret == KERN_SUCCESS) {
1312 		main_region_set_after = txm_addr_space->mainRegion != NULL;
1313 	}
1314 
1315 	/* Unlock the TXM address space lock */
1316 	pmap_txm_release_exclusive_lock(pmap);
1317 
1318 	/* Check if we should set the trust level on the PMAP */
1319 	if (!main_region_set && main_region_set_after) {
1320 		const TXMCodeSignature_t *cs_obj = sig_obj;
1321 		const SignatureValidation_t *sig = &cs_obj->sig;
1322 
1323 		/*
1324 		 * This is gross, as we're dereferencing into a private data structure type.
1325 		 * There are 2 ways to clean this up in the future:
1326 		 * 1. Import libCodeSignature, so we can use "codeSignatureGetTrustLevel".
1327 		 * 2. Cache the trust level on the address space within TXM and then use it.
1328 		 */
1329 		pmap_txm_set_trust_level(pmap, sig->trustLevel);
1330 	}
1331 
1332 	return ret;
1333 }
1334 
1335 kern_return_t
txm_allow_jit_region(pmap_t pmap)1336 txm_allow_jit_region(
1337 	pmap_t pmap)
1338 {
1339 	txm_call_t txm_call = {
1340 		.selector = kTXMKernelSelectorAllowJITRegion,
1341 		.num_input_args = 1,
1342 	};
1343 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1344 	kern_return_t ret = KERN_DENIED;
1345 
1346 	pmap_txm_acquire_shared_lock(pmap);
1347 	ret = txm_kernel_call(&txm_call, txm_addr_space);
1348 	pmap_txm_release_shared_lock(pmap);
1349 
1350 	return ret;
1351 }
1352 
1353 kern_return_t
txm_associate_jit_region(pmap_t pmap,const vm_address_t region_addr,const vm_size_t region_size)1354 txm_associate_jit_region(
1355 	pmap_t pmap,
1356 	const vm_address_t region_addr,
1357 	const vm_size_t region_size)
1358 {
1359 	txm_call_t txm_call = {
1360 		.selector = kTXMKernelSelectorAssociateJITRegion,
1361 		.num_input_args = 3,
1362 	};
1363 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1364 	kern_return_t ret = KERN_DENIED;
1365 
1366 	/*
1367 	 * Associating a JIT region may require exclusive access to the TXM address
1368 	 * space lock within TXM.
1369 	 */
1370 	pmap_txm_acquire_exclusive_lock(pmap);
1371 
1372 	ret = txm_kernel_call(
1373 		&txm_call,
1374 		txm_addr_space,
1375 		region_addr,
1376 		region_size);
1377 
1378 	/* Unlock the TXM address space lock */
1379 	pmap_txm_release_exclusive_lock(pmap);
1380 
1381 	return ret;
1382 }
1383 
1384 kern_return_t
txm_address_space_debugged(pmap_t pmap)1385 txm_address_space_debugged(
1386 	pmap_t pmap)
1387 {
1388 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1389 	bool debug_regions_allowed = false;
1390 
1391 	/*
1392 	 * We do not actually need to trap into the monitor for this function for
1393 	 * now. It might be a tad bit more secure to actually trap into the monitor
1394 	 * as it implicitly verifies all of our pointers, but since this is a simple
1395 	 * state check against the address space, the real policy around it lies
1396 	 * within the kernel still, in which case entering the monitor doesn't
1397 	 * really provide much more security.
1398 	 */
1399 
1400 	pmap_txm_acquire_shared_lock(pmap);
1401 	debug_regions_allowed = os_atomic_load(&txm_addr_space->allowsInvalidCode, relaxed);
1402 	pmap_txm_release_shared_lock(pmap);
1403 
1404 	if (debug_regions_allowed == true) {
1405 		return KERN_SUCCESS;
1406 	}
1407 	return KERN_DENIED;
1408 }
1409 
1410 kern_return_t
txm_associate_debug_region(pmap_t pmap,const vm_address_t region_addr,const vm_size_t region_size)1411 txm_associate_debug_region(
1412 	pmap_t pmap,
1413 	const vm_address_t region_addr,
1414 	const vm_size_t region_size)
1415 {
1416 #if kTXMKernelAPIVersion >= 10
1417 	txm_call_t txm_call = {
1418 		.selector = kTXMKernelSelectorAssociateDebugRegion,
1419 		.num_input_args = 3,
1420 	};
1421 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1422 	kern_return_t ret = KERN_DENIED;
1423 
1424 	/*
1425 	 * Associating a debug region may require exclusive access to the TXM address
1426 	 * space lock within TXM.
1427 	 */
1428 	pmap_txm_acquire_exclusive_lock(pmap);
1429 
1430 	ret = txm_kernel_call(
1431 		&txm_call,
1432 		txm_addr_space,
1433 		region_addr,
1434 		region_size);
1435 
1436 	/* Unlock the TXM address space lock */
1437 	pmap_txm_release_exclusive_lock(pmap);
1438 
1439 	return ret;
1440 #else
1441 	/*
1442 	 * This function is an interesting one. There is no need for us to make
1443 	 * a call into TXM for this one and instead, all we need to do here is
1444 	 * to verify that the TXM address space actually allows debug regions to
1445 	 * be mapped in or not.
1446 	 */
1447 	(void)region_addr;
1448 	(void)region_size;
1449 
1450 	kern_return_t ret = txm_address_space_debugged(pmap);
1451 	if (ret != KERN_SUCCESS) {
1452 		printf("address space does not allow creating debug regions\n");
1453 	}
1454 
1455 	return ret;
1456 #endif
1457 }
1458 
1459 kern_return_t
txm_allow_invalid_code(pmap_t pmap)1460 txm_allow_invalid_code(
1461 	pmap_t pmap)
1462 {
1463 	txm_call_t txm_call = {
1464 		.selector = kTXMKernelSelectorAllowInvalidCode,
1465 		.num_input_args = 1,
1466 	};
1467 	TXMAddressSpace_t *txm_addr_space = pmap_txm_addr_space(pmap);
1468 	kern_return_t ret = KERN_DENIED;
1469 
1470 	/*
1471 	 * Allowing invalid code may require exclusive access to the TXM address
1472 	 * space lock within TXM.
1473 	 */
1474 
1475 	pmap_txm_acquire_exclusive_lock(pmap);
1476 	ret = txm_kernel_call(&txm_call, txm_addr_space);
1477 	pmap_txm_release_exclusive_lock(pmap);
1478 
1479 	return ret;
1480 }
1481 
1482 kern_return_t
txm_get_trust_level_kdp(pmap_t pmap,uint32_t * trust_level)1483 txm_get_trust_level_kdp(
1484 	pmap_t pmap,
1485 	uint32_t *trust_level)
1486 {
1487 	CSTrust_t txm_trust_level = kCSTrustUntrusted;
1488 
1489 	kern_return_t ret = pmap_txm_get_trust_level_kdp(pmap, &txm_trust_level);
1490 	if (ret != KERN_SUCCESS) {
1491 		return ret;
1492 	}
1493 
1494 	if (trust_level != NULL) {
1495 		*trust_level = txm_trust_level;
1496 	}
1497 	return KERN_SUCCESS;
1498 }
1499 
1500 kern_return_t
txm_get_jit_address_range_kdp(pmap_t pmap,uintptr_t * jit_region_start,uintptr_t * jit_region_end)1501 txm_get_jit_address_range_kdp(
1502 	pmap_t pmap,
1503 	uintptr_t *jit_region_start,
1504 	uintptr_t *jit_region_end)
1505 {
1506 	return pmap_txm_get_jit_address_range_kdp(pmap, jit_region_start, jit_region_end);
1507 }
1508 
1509 kern_return_t
txm_address_space_exempt(const pmap_t pmap)1510 txm_address_space_exempt(
1511 	const pmap_t pmap)
1512 {
1513 	if (pmap_performs_stage2_translations(pmap) == true) {
1514 		return KERN_SUCCESS;
1515 	}
1516 
1517 	return KERN_DENIED;
1518 }
1519 
1520 kern_return_t
txm_fork_prepare(pmap_t old_pmap,pmap_t new_pmap)1521 txm_fork_prepare(
1522 	pmap_t old_pmap,
1523 	pmap_t new_pmap)
1524 {
1525 	/*
1526 	 * We'll add support for this as the need for it becomes more important.
1527 	 * TXMTODO: Complete this implementation.
1528 	 */
1529 	(void)old_pmap;
1530 	(void)new_pmap;
1531 
1532 	return KERN_SUCCESS;
1533 }
1534 
1535 kern_return_t
txm_acquire_signing_identifier(const void * sig_obj,const char ** signing_id)1536 txm_acquire_signing_identifier(
1537 	const void *sig_obj,
1538 	const char **signing_id)
1539 {
1540 	txm_call_t txm_call = {
1541 		.selector = kTXMKernelSelectorAcquireSigningIdentifier,
1542 		.num_input_args = 1,
1543 		.num_output_args = 1,
1544 		.failure_fatal = true,
1545 	};
1546 
1547 	/* Get the signing ID -- should not fail */
1548 	txm_kernel_call(&txm_call, sig_obj);
1549 
1550 	if (signing_id != NULL) {
1551 		*signing_id = (const char*)txm_call.return_words[0];
1552 	}
1553 	return KERN_SUCCESS;
1554 }
1555 
1556 #pragma mark Entitlements
1557 
1558 kern_return_t
txm_associate_kernel_entitlements(void * sig_obj,const void * kernel_entitlements)1559 txm_associate_kernel_entitlements(
1560 	void *sig_obj,
1561 	const void *kernel_entitlements)
1562 {
1563 	txm_call_t txm_call = {
1564 		.selector = kTXMKernelSelectorAssociateKernelEntitlements,
1565 		.num_input_args = 2,
1566 		.failure_fatal = true,
1567 	};
1568 
1569 	/* Associate the kernel entitlements -- should not fail */
1570 	txm_kernel_call(&txm_call, sig_obj, kernel_entitlements);
1571 
1572 	return KERN_SUCCESS;
1573 }
1574 
1575 kern_return_t
txm_resolve_kernel_entitlements(pmap_t pmap,const void ** kernel_entitlements)1576 txm_resolve_kernel_entitlements(
1577 	pmap_t pmap,
1578 	const void **kernel_entitlements)
1579 {
1580 	txm_call_t txm_call = {
1581 		.selector = kTXMKernelSelectorResolveKernelEntitlementsAddressSpace,
1582 		.skip_logs = true,
1583 		.num_input_args = 1,
1584 		.num_output_args = 1,
1585 		.failure_silent = true,
1586 	};
1587 	TXMAddressSpace_t *txm_addr_space = NULL;
1588 	kern_return_t ret = KERN_DENIED;
1589 
1590 	if (pmap == pmap_txm_kernel_pmap()) {
1591 		return KERN_NOT_FOUND;
1592 	}
1593 	txm_addr_space = pmap_txm_addr_space(pmap);
1594 
1595 	pmap_txm_acquire_shared_lock(pmap);
1596 	ret = txm_kernel_call(&txm_call, txm_addr_space);
1597 	pmap_txm_release_shared_lock(pmap);
1598 
1599 	if ((ret == KERN_SUCCESS) && (kernel_entitlements != NULL)) {
1600 		*kernel_entitlements = (const void*)txm_call.return_words[0];
1601 	}
1602 	return ret;
1603 }
1604 
1605 kern_return_t
txm_accelerate_entitlements(void * sig_obj,CEQueryContext_t * ce_ctx)1606 txm_accelerate_entitlements(
1607 	void *sig_obj,
1608 	CEQueryContext_t *ce_ctx)
1609 {
1610 	txm_call_t txm_call = {
1611 		.selector = kTXMKernelSelectorAccelerateEntitlements,
1612 		.num_input_args = 1,
1613 		.num_output_args = 1,
1614 	};
1615 	kern_return_t ret = KERN_DENIED;
1616 
1617 	ret = txm_kernel_call(&txm_call, sig_obj);
1618 	if ((ret == KERN_SUCCESS) && (ce_ctx != NULL)) {
1619 		*ce_ctx = (CEQueryContext_t)txm_call.return_words[0];
1620 	}
1621 
1622 	return ret;
1623 }
1624 
1625 #pragma mark Image4
1626 
1627 void*
txm_image4_storage_data(__unused size_t * allocated_size)1628 txm_image4_storage_data(
1629 	__unused size_t *allocated_size)
1630 {
1631 	/*
1632 	 * AppleImage4 builds a variant of TXM which TXM should link against statically
1633 	 * thereby removing the need for the kernel to allocate some data on behalf of
1634 	 * the kernel extension.
1635 	 */
1636 	panic("unsupported AppleImage4 interface");
1637 }
1638 
1639 void
txm_image4_set_nonce(const img4_nonce_domain_index_t ndi,const img4_nonce_t * nonce)1640 txm_image4_set_nonce(
1641 	const img4_nonce_domain_index_t ndi,
1642 	const img4_nonce_t *nonce)
1643 {
1644 	txm_call_t txm_call = {
1645 		.selector = kTXMKernelSelectorImage4SetNonce,
1646 		.failure_fatal = true,
1647 		.num_input_args = 2,
1648 	};
1649 
1650 	txm_kernel_call(&txm_call, ndi, nonce);
1651 }
1652 
1653 void
txm_image4_roll_nonce(const img4_nonce_domain_index_t ndi)1654 txm_image4_roll_nonce(
1655 	const img4_nonce_domain_index_t ndi)
1656 {
1657 	txm_call_t txm_call = {
1658 		.selector = kTXMKernelSelectorImage4RollNonce,
1659 		.failure_fatal = true,
1660 		.num_input_args = 1,
1661 	};
1662 
1663 	txm_kernel_call(&txm_call, ndi);
1664 }
1665 
1666 errno_t
txm_image4_copy_nonce(const img4_nonce_domain_index_t ndi,img4_nonce_t * nonce_out)1667 txm_image4_copy_nonce(
1668 	const img4_nonce_domain_index_t ndi,
1669 	img4_nonce_t *nonce_out)
1670 {
1671 	txm_call_t txm_call = {
1672 		.selector = kTXMKernelSelectorImage4GetNonce,
1673 		.num_input_args = 1,
1674 		.num_output_args = 1,
1675 	};
1676 	const img4_nonce_t *nonce = NULL;
1677 	TXMReturn_t txm_ret = {0};
1678 	kern_return_t ret = KERN_DENIED;
1679 
1680 	ret = txm_kernel_call(&txm_call, ndi);
1681 	if (ret != KERN_SUCCESS) {
1682 		txm_ret = txm_call.txm_ret;
1683 		if (txm_ret.returnCode != kTXMReturnCodeErrno) {
1684 			return EPERM;
1685 		}
1686 		return txm_ret.errnoRet;
1687 	}
1688 
1689 	/* Acquire a pointer to the nonce from TXM */
1690 	nonce = (const img4_nonce_t*)txm_call.return_words[0];
1691 
1692 	if (nonce_out) {
1693 		*nonce_out = *nonce;
1694 	}
1695 	return 0;
1696 }
1697 
1698 errno_t
txm_image4_execute_object(img4_runtime_object_spec_index_t obj_spec_index,const img4_buff_t * payload,const img4_buff_t * manifest)1699 txm_image4_execute_object(
1700 	img4_runtime_object_spec_index_t obj_spec_index,
1701 	const img4_buff_t *payload,
1702 	const img4_buff_t *manifest)
1703 {
1704 	/* Not supported within TXM yet */
1705 	(void)obj_spec_index;
1706 	(void)payload;
1707 	(void)manifest;
1708 
1709 	printf("image4 object execution isn't supported by TXM\n");
1710 	return ENOSYS;
1711 }
1712 
1713 errno_t
txm_image4_copy_object(img4_runtime_object_spec_index_t obj_spec_index,vm_address_t object_out,size_t * object_length)1714 txm_image4_copy_object(
1715 	img4_runtime_object_spec_index_t obj_spec_index,
1716 	vm_address_t object_out,
1717 	size_t *object_length)
1718 {
1719 	/* Not supported within TXM yet */
1720 	(void)obj_spec_index;
1721 	(void)object_out;
1722 	(void)object_length;
1723 
1724 	printf("image4 object copying isn't supported by TXM\n");
1725 	return ENOSYS;
1726 }
1727 
1728 const void*
txm_image4_get_monitor_exports(void)1729 txm_image4_get_monitor_exports(void)
1730 {
1731 	txm_call_t txm_call = {
1732 		.selector = kTXMKernelSelectorImage4GetExports,
1733 		.failure_fatal = true,
1734 		.num_output_args = 1,
1735 	};
1736 
1737 	txm_kernel_call(&txm_call);
1738 	return (const void*)txm_call.return_words[0];
1739 }
1740 
1741 errno_t
txm_image4_set_release_type(const char * release_type)1742 txm_image4_set_release_type(
1743 	const char *release_type)
1744 {
1745 	txm_call_t txm_call = {
1746 		.selector = kTXMKernelSelectorImage4SetReleaseType,
1747 		.failure_fatal = true,
1748 		.num_input_args = 1,
1749 	};
1750 
1751 	/* Set the release type -- cannot fail */
1752 	txm_kernel_call(&txm_call, release_type);
1753 
1754 	return 0;
1755 }
1756 
1757 errno_t
txm_image4_set_bnch_shadow(const img4_nonce_domain_index_t ndi)1758 txm_image4_set_bnch_shadow(
1759 	const img4_nonce_domain_index_t ndi)
1760 {
1761 	txm_call_t txm_call = {
1762 		.selector = kTXMKernelSelectorImage4SetBootNonceShadow,
1763 		.failure_fatal = true,
1764 		.num_input_args = 1,
1765 	};
1766 
1767 	/* Set the release type -- cannot fail */
1768 	txm_kernel_call(&txm_call, ndi);
1769 
1770 	return 0;
1771 }
1772 
1773 #pragma mark Image4 - New
1774 
1775 static inline bool
_txm_image4_monitor_trap_supported(image4_cs_trap_t selector)1776 _txm_image4_monitor_trap_supported(
1777 	image4_cs_trap_t selector)
1778 {
1779 	switch (selector) {
1780 #if kTXMImage4APIVersion >= 1
1781 	case IMAGE4_CS_TRAP_KMOD_SET_RELEASE_TYPE:
1782 	case IMAGE4_CS_TRAP_NONCE_SET:
1783 	case IMAGE4_CS_TRAP_NONCE_ROLL:
1784 	case IMAGE4_CS_TRAP_IMAGE_ACTIVATE:
1785 		return true;
1786 #endif
1787 
1788 	default:
1789 		return false;
1790 	}
1791 }
1792 
1793 kern_return_t
txm_image4_transfer_region(image4_cs_trap_t selector,vm_address_t region_addr,vm_size_t region_size)1794 txm_image4_transfer_region(
1795 	image4_cs_trap_t selector,
1796 	vm_address_t region_addr,
1797 	vm_size_t region_size)
1798 {
1799 	if (_txm_image4_monitor_trap_supported(selector) == true) {
1800 		txm_transfer_region(region_addr, region_size);
1801 	}
1802 	return KERN_SUCCESS;
1803 }
1804 
1805 kern_return_t
txm_image4_reclaim_region(image4_cs_trap_t selector,vm_address_t region_addr,vm_size_t region_size)1806 txm_image4_reclaim_region(
1807 	image4_cs_trap_t selector,
1808 	vm_address_t region_addr,
1809 	vm_size_t region_size)
1810 {
1811 	if (_txm_image4_monitor_trap_supported(selector) == true) {
1812 		txm_reclaim_region(region_addr, region_size);
1813 	}
1814 	return KERN_SUCCESS;
1815 }
1816 
1817 errno_t
txm_image4_monitor_trap(image4_cs_trap_t selector,const void * input_data,size_t input_size)1818 txm_image4_monitor_trap(
1819 	image4_cs_trap_t selector,
1820 	const void *input_data,
1821 	size_t input_size)
1822 {
1823 	txm_call_t txm_call = {
1824 		.selector = kTXMKernelSelectorImage4Dispatch,
1825 		.num_input_args = 5,
1826 	};
1827 
1828 	kern_return_t ret = txm_kernel_call(
1829 		&txm_call, selector,
1830 		input_data, input_size,
1831 		NULL, NULL);
1832 
1833 	/* Return 0 for success */
1834 	if (ret == KERN_SUCCESS) {
1835 		return 0;
1836 	}
1837 
1838 	/* Check for an errno_t return */
1839 	if (txm_call.txm_ret.returnCode == kTXMReturnCodeErrno) {
1840 		if (txm_call.txm_ret.errnoRet == 0) {
1841 			panic("image4 dispatch: unexpected success errno_t: %llu", selector);
1842 		}
1843 		return txm_call.txm_ret.errnoRet;
1844 	}
1845 
1846 	/* Return a generic error */
1847 	return EPERM;
1848 }
1849 
1850 #pragma mark Metrics
1851 
1852 #if DEVELOPMENT || DEBUG
1853 
1854 SYSCTL_DECL(_txm);
1855 SYSCTL_NODE(, OID_AUTO, txm, CTLFLAG_RD, 0, "TXM");
1856 
1857 SYSCTL_DECL(_txm_metrics);
1858 SYSCTL_NODE(_txm, OID_AUTO, metrics, CTLFLAG_RD, 0, "TXM Metrics");
1859 
1860 #define TXM_METRIC(type, name, field)                                               \
1861 static int __txm_metric_ ## type ## _ ## name SYSCTL_HANDLER_ARGS;                  \
1862 SYSCTL_DECL(_txm_metrics_ ## type);                                                 \
1863 SYSCTL_PROC(                                                                        \
1864 	_txm_metrics_ ## type, OID_AUTO,                                                \
1865 	name, CTLTYPE_INT | CTLFLAG_RD,                                                 \
1866 	NULL, 0, __txm_metric_ ## type ## _ ## name,                                    \
1867 	"I", "collected data from \'" #type "\':\'" #field "\'");                       \
1868 static int __txm_metric_ ## type ## _ ## name SYSCTL_HANDLER_ARGS                   \
1869 {                                                                                   \
1870 	if (req->newptr) {                                                              \
1871 	    return EPERM;                                                               \
1872 	}                                                                               \
1873 	uint32_t value = os_atomic_load(&txm_metrics->field, relaxed);                  \
1874 	return SYSCTL_OUT(req, &value, sizeof(value));                                  \
1875 }
1876 
1877 SYSCTL_DECL(_txm_metrics_memory);
1878 SYSCTL_NODE(_txm_metrics, OID_AUTO, memory, CTLFLAG_RD, 0, "TXM Metrics - Memory");
1879 
1880 #define TXM_ALLOCATOR_METRIC(name, field)                                                   \
1881 SYSCTL_DECL(_txm_metrics_memory_ ## name);                                                  \
1882 SYSCTL_NODE(_txm_metrics_memory, OID_AUTO, name, CTLFLAG_RD, 0, "\'" #name "\' allocator"); \
1883 TXM_METRIC(memory_ ## name, bytes_allocated, field->allocated);                             \
1884 TXM_METRIC(memory_ ## name, bytes_unused, field->unused);                                   \
1885 TXM_METRIC(memory_ ## name, bytes_wasted, field->wasted);                                   \
1886 
1887 TXM_METRIC(memory, bootstrap, memory.bootstrap);
1888 TXM_METRIC(memory, free_list, memory.freeList);
1889 TXM_METRIC(memory, bulk_data, memory.bulkData);
1890 TXM_ALLOCATOR_METRIC(trust_cache, memory.slabs.trustCache);
1891 TXM_ALLOCATOR_METRIC(provisioning_profile, memory.slabs.profile);
1892 TXM_ALLOCATOR_METRIC(code_signature, memory.slabs.codeSignature);
1893 TXM_ALLOCATOR_METRIC(code_region, memory.slabs.codeRegion);
1894 TXM_ALLOCATOR_METRIC(address_space, memory.slabs.addressSpace);
1895 TXM_ALLOCATOR_METRIC(bucket_1024, memory.buckets.b1024);
1896 TXM_ALLOCATOR_METRIC(bucket_2048, memory.buckets.b2048);
1897 TXM_ALLOCATOR_METRIC(bucket_4096, memory.buckets.b4096);
1898 TXM_ALLOCATOR_METRIC(bucket_8192, memory.buckets.b8192);
1899 
1900 SYSCTL_DECL(_txm_metrics_acceleration);
1901 SYSCTL_NODE(_txm_metrics, OID_AUTO, acceleration, CTLFLAG_RD, 0, "TXM Metrics - Acceleration");
1902 TXM_METRIC(acceleration, num_signature, acceleration.signature);
1903 TXM_METRIC(acceleration, num_bucket, acceleration.bucket);
1904 TXM_METRIC(acceleration, num_page, acceleration.page);
1905 TXM_METRIC(acceleration, bucket_256, acceleration.bucket256);
1906 TXM_METRIC(acceleration, unsupported, acceleration.large);
1907 
1908 SYSCTL_DECL(_txm_metrics_trustcaches);
1909 SYSCTL_NODE(_txm_metrics, OID_AUTO, trustcaches, CTLFLAG_RD, 0, "TXM Metrics - Trust Caches");
1910 TXM_METRIC(trustcaches, bytes_needed, trustCaches.bytesNeeded);
1911 TXM_METRIC(trustcaches, bytes_allocated, trustCaches.bytesAllocated);
1912 TXM_METRIC(trustcaches, bytes_locked, trustCaches.bytesLocked);
1913 TXM_METRIC(trustcaches, bytes_tombstoned, trustCaches.bytesTombstoned);
1914 
1915 #endif /* DEVELOPMENT || DEBUG */
1916 
1917 
1918 #endif /* CONFIG_SPTM */
1919