xref: /xnu-10063.101.15/osfmk/kern/exclaves.c (revision 94d3b452840153a99b38a3a9659680b2a006908e)
1 /*
2  * Copyright (c) 2022 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <mach/exclaves.h>
30 #include <mach/mach_traps.h>
31 #include <kern/misc_protos.h>
32 #include <kern/assert.h>
33 #include <kern/recount.h>
34 #include <kern/startup.h>
35 
36 #if CONFIG_EXCLAVES
37 
38 #if CONFIG_SPTM
39 #include <arm64/sptm/sptm.h>
40 #else
41 #error Invalid configuration
42 #endif /* CONFIG_SPTM */
43 
44 #include <arm/cpu_data_internal.h>
45 #include <kern/epoch_sync.h>
46 #include <kern/ipc_kobject.h>
47 #include <kern/kalloc.h>
48 #include <kern/locks.h>
49 #include <kern/percpu.h>
50 #include <kern/thread.h>
51 #include <kern/zalloc.h>
52 #include <kern/exclaves_stackshot.h>
53 #include <kern/exclaves_test_stackshot.h>
54 #include <vm/pmap.h>
55 #include <pexpert/pexpert.h>
56 #include <pexpert/device_tree.h>
57 
58 #include <mach/exclaves_l4.h>
59 #include <mach/mach_port.h>
60 
61 #include <Exclaves/Exclaves.h>
62 
63 #include "exclaves_debug.h"
64 #include "exclaves_panic.h"
65 
66 /* External & generated headers */
67 #include <xrt_hosted_types/types.h>
68 #include <xnuproxy/messages.h>
69 
70 /* Use the new version of xnuproxy_msg_t. */
71 #define xnuproxy_msg_t xnuproxy_msg_new_t
72 
73 #if __has_include(<Tightbeam/tightbeam.h>)
74 #include <Tightbeam/tightbeam.h>
75 #include <Tightbeam/tightbeam_private.h>
76 #endif
77 
78 #include "exclaves_resource.h"
79 #include "exclaves_upcalls.h"
80 #include "exclaves_boot.h"
81 #include "exclaves_inspection.h"
82 
83 
84 /* Unslid pointers defining the range of code which switches threads into
85  * secure world */
86 uintptr_t exclaves_enter_range_start;
87 uintptr_t exclaves_enter_range_end;
88 
89 /* Unslid pointers defining the range of code which triggers upcall handlers */
90 uintptr_t exclaves_upcall_range_start;
91 uintptr_t exclaves_upcall_range_end;
92 
93 /* Number of allocated ipcb buffers, estimate of active exclave threads */
94 static _Atomic size_t exclaves_ipcb_cnt;
95 
96 LCK_GRP_DECLARE(exclaves_lck_grp, "exclaves");
97 
98 /* Lock around communication with singleton xnu proxy server thread */
99 LCK_MTX_DECLARE(exclaves_xnu_proxy_lock, &exclaves_lck_grp);
100 
101 /* Boot lock - only used here for assertions. */
102 extern lck_mtx_t exclaves_boot_lock;
103 
104 /*
105  * Control access to exclaves. Multicore support is learned at runtime.
106  */
107 static LCK_MTX_DECLARE(exclaves_scheduler_lock, &exclaves_lck_grp);
108 static bool exclaves_multicore;
109 #if DEVELOPMENT || DEBUG
110 /* boot-arg to control use of the exclaves_scheduler_lock independently of
111  * whether exclaves multicore support is enabled */
112 static TUNABLE(bool, exclaves_smp_enabled, "exclaves_smp", true);
113 #else
114 #define exclaves_smp_enabled true
115 #endif
116 
117 static kern_return_t
118 exclaves_acquire_ipc_buffer(Exclaves_L4_IpcBuffer_t **ipcb_out,
119     Exclaves_L4_Word_t *scid_out);
120 static kern_return_t
121 exclaves_relinquish_ipc_buffer(Exclaves_L4_IpcBuffer_t *ipcb,
122     Exclaves_L4_Word_t scid);
123 static kern_return_t
124 exclaves_endpoint_call_internal(ipc_port_t port, exclaves_id_t endpoint_id);
125 
126 static kern_return_t
127 exclaves_enter(void);
128 static kern_return_t
129 exclaves_bootinfo(uint64_t *out_boot_info);
130 
131 static kern_return_t
132 exclaves_scheduler_init(uint64_t boot_info);
133 
134 kern_return_t
135 exclaves_scheduler_resume_scheduling_context(Exclaves_L4_Word_t scid,
136     Exclaves_L4_Word_t *spawned_scid, bool interrupted);
137 
138 static kern_return_t
139 exclaves_xnu_proxy_init(uint64_t xnu_proxy_boot_info);
140 static kern_return_t
141 exclaves_xnu_proxy_allocate_context(Exclaves_L4_Word_t *out_scid,
142     Exclaves_L4_IpcBuffer_t **out_ipcb);
143 static kern_return_t
144 exclaves_xnu_proxy_free_context(Exclaves_L4_Word_t scid);
145 static kern_return_t
146 exclaves_xnu_proxy_endpoint_call(Exclaves_L4_Word_t endpoint_id);
147 static kern_return_t
148 exclaves_hosted_error(bool success, XrtHosted_Error_t *error);
149 
150 /*
151  * A static set of exclave epoch counters.
152  */
153 static os_atomic(uint64_t) epoch_counter[XrtHosted_Counter_limit] = {};
154 
os_atomic(uint64_t)155 static inline os_atomic(uint64_t) *
156 exclaves_get_queue_counter(const uint64_t id)
157 {
158 	return &epoch_counter[XrtHosted_Counter_fromQueueId(id)];
159 }
160 
os_atomic(uint64_t)161 static inline os_atomic(uint64_t) *
162 exclaves_get_thread_counter(const uint64_t id)
163 {
164 	return &epoch_counter[XrtHosted_Counter_fromThreadId(id)];
165 }
166 
167 /*
168  * A (simple, for now...) cache of IPC buffers for communicating with XNU-Proxy.
169  * Limited in size by the same value as XNU-Proxy's EC limit.
170  * Must be realtime-safe.
171  */
172 
173 static kern_return_t
174 exclaves_ipc_buffer_cache_init(void);
175 
176 /* Intrusive linked list within the unused IPC buffer */
177 struct exclaves_ipc_buffer_cache_item {
178 	struct exclaves_ipc_buffer_cache_item *next;
179 	Exclaves_L4_Word_t scid;
180 } __attribute__((__packed__));
181 
182 _Static_assert(Exclaves_L4_IpcBuffer_Size >= sizeof(struct exclaves_ipc_buffer_cache_item),
183     "Invalid Exclaves_L4_IpcBuffer_Size");
184 
185 LCK_SPIN_DECLARE(exclaves_ipc_buffer_cache_lock, &exclaves_lck_grp);
186 static struct exclaves_ipc_buffer_cache_item *exclaves_ipc_buffer_cache;
187 
188 /* -------------------------------------------------------------------------- */
189 #pragma mark exclaves debug configuration
190 
191 #if DEVELOPMENT || DEBUG
192 TUNABLE_WRITEABLE(unsigned int, exclaves_debug, "exclaves_debug", 0);
193 #endif /* DEVELOPMENT || DEBUG */
194 
195 #if DEVELOPMENT || DEBUG
196 TUNABLE_WRITEABLE(unsigned int, exclaves_ipc_buffer_cache_enabled, "exclaves_ipcb_cache", 1);
197 #else
198 #define exclaves_ipc_buffer_cache_enabled 1
199 #endif
200 #endif /* CONFIG_EXCLAVES */
201 
202 /* -------------------------------------------------------------------------- */
203 #pragma mark userspace entry point
204 
205 kern_return_t
_exclaves_ctl_trap(struct exclaves_ctl_trap_args * uap)206 _exclaves_ctl_trap(struct exclaves_ctl_trap_args *uap)
207 {
208 #if CONFIG_EXCLAVES
209 	kern_return_t kr = KERN_SUCCESS;
210 	int error = 0;
211 
212 	mach_port_name_t name = uap->name;
213 	exclaves_id_t identifier = uap->identifier;
214 	mach_vm_address_t ubuffer = uap->buffer;
215 	mach_vm_size_t usize = uap->size;
216 	mach_vm_size_t uoffset = (mach_vm_size_t)uap->identifier;
217 	mach_vm_size_t usize2 = uap->size2;
218 	mach_vm_size_t uoffset2 = uap->offset;
219 	bool start_conclave = false;
220 
221 	/*
222 	 * EXCLAVES_XNU_PROXY_CR_RETVAL comes from ExclavePlatform and is shared
223 	 * with xnu. That header is not shared with userspace. Make sure that
224 	 * the retval userspace picks up is the same as the one
225 	 * xnu/ExclavePlatform thinks it is.
226 	 */
227 	assert3p(&EXCLAVES_XNU_PROXY_CR_RETVAL((Exclaves_L4_IpcBuffer_t *)0), ==,
228 	    &XNUPROXY_CR_RETVAL((Exclaves_L4_IpcBuffer_t *)0));
229 
230 	uint8_t operation = EXCLAVES_CTL_OP(uap->operation_and_flags);
231 	uint32_t flags = EXCLAVES_CTL_FLAGS(uap->operation_and_flags);
232 	if (flags != 0) {
233 		return KERN_INVALID_ARGUMENT;
234 	}
235 
236 	/*
237 	 * The only valid operation if exclaves are not booted to
238 	 * EXCLAVES_BOOT_STAGE_EXCLAVEKIT, is the BOOT op.
239 	 */
240 	if (operation != EXCLAVES_CTL_OP_BOOT) {
241 		/*
242 		 * Make this EXCLAVES_BOOT_STAGE_2 until userspace is actually
243 		 * triggering the EXCLAVESKIT boot stage.
244 		 */
245 		kr = exclaves_boot_wait(EXCLAVES_BOOT_STAGE_2);
246 		if (kr != KERN_SUCCESS) {
247 			return kr;
248 		}
249 	}
250 
251 	switch (operation) {
252 	case EXCLAVES_CTL_OP_ENDPOINT_CALL:
253 		if (name != MACH_PORT_NULL) {
254 			/* Only accept MACH_PORT_NULL for now */
255 			return KERN_INVALID_CAPABILITY;
256 		}
257 		if (ubuffer == USER_ADDR_NULL || usize == 0 ||
258 		    usize != Exclaves_L4_IpcBuffer_Size) {
259 			return KERN_INVALID_ARGUMENT;
260 		}
261 
262 		Exclaves_L4_IpcBuffer_t *ipcb;
263 		if ((error = exclaves_allocate_ipc_buffer((void**)&ipcb))) {
264 			return error;
265 		}
266 		assert(ipcb != NULL);
267 		if ((error = copyin(ubuffer, ipcb, usize))) {
268 			return error;
269 		}
270 		kr = exclaves_endpoint_call_internal(IPC_PORT_NULL, identifier);
271 		if ((error = copyout(ipcb, ubuffer, usize))) {
272 			return error;
273 		}
274 		break;
275 
276 	case EXCLAVES_CTL_OP_NAMED_BUFFER_CREATE: {
277 		if (name != MACH_PORT_NULL) {
278 			/* Only accept MACH_PORT_NULL for now */
279 			return KERN_INVALID_CAPABILITY;
280 		}
281 
282 		size_t len = 0;
283 		char id_name[XNUPROXY_RESOURCE_NAME_MAX] = "";
284 		if (copyinstr(identifier, id_name, XNUPROXY_RESOURCE_NAME_MAX,
285 		    &len) != 0 || id_name[0] == '\0') {
286 			return KERN_INVALID_ARGUMENT;
287 		}
288 
289 		exclaves_buffer_perm_t perm = (exclaves_buffer_perm_t)usize2;
290 		const exclaves_buffer_perm_t supported =
291 		    EXCLAVES_BUFFER_PERM_READ | EXCLAVES_BUFFER_PERM_WRITE;
292 		if ((perm & supported) == 0 || (perm & ~supported) != 0) {
293 			return KERN_INVALID_ARGUMENT;
294 		}
295 
296 		/*
297 		 * Fallback to the kernel domain for now if the conclave can't
298 		 * be found.
299 		 */
300 		exclaves_resource_t *resource = task_get_conclave(current_task());
301 		const char *domain = resource != NULL ?
302 		    resource->r_name : EXCLAVES_DOMAIN_KERNEL;
303 		const bool new_api =
304 		    (perm == EXCLAVES_BUFFER_PERM_READ) ||
305 		    (perm == EXCLAVES_BUFFER_PERM_WRITE);
306 		const bool shared_mem_available =
307 		    exclaves_resource_lookup_by_name(domain, id_name,
308 		    XNUPROXY_RESOURCE_SHARED_MEMORY) != NULL;
309 		const bool use_shared_mem = new_api && shared_mem_available;
310 
311 		kr = use_shared_mem ?
312 		    exclaves_resource_shared_memory_map(domain, id_name, usize, perm, &resource) :
313 		    exclaves_named_buffer_map(domain, id_name, usize, perm, &resource);
314 		if (kr != KERN_SUCCESS) {
315 			return kr;
316 		}
317 
318 		kr = exclaves_resource_create_port_name(resource,
319 		    current_space(), &name);
320 		if (kr != KERN_SUCCESS) {
321 			return kr;
322 		}
323 
324 		kr = copyout(&name, ubuffer, sizeof(mach_port_name_t));
325 		if (kr != KERN_SUCCESS) {
326 			mach_port_deallocate(current_space(), name);
327 			return kr;
328 		}
329 
330 		break;
331 	}
332 
333 	case EXCLAVES_CTL_OP_NAMED_BUFFER_COPYIN: {
334 		exclaves_resource_t *resource = NULL;
335 		kr = exclaves_resource_from_port_name(current_space(), name,
336 		    &resource);
337 		if (kr != KERN_SUCCESS) {
338 			return kr;
339 		}
340 
341 		switch (resource->r_type) {
342 		case XNUPROXY_RESOURCE_NAMED_BUFFER:
343 			kr = exclaves_named_buffer_copyin(resource, ubuffer,
344 			    usize, uoffset, usize2, uoffset2);
345 			break;
346 
347 		case XNUPROXY_RESOURCE_SHARED_MEMORY:
348 			kr = exclaves_resource_shared_memory_copyin(resource,
349 			    ubuffer, usize, uoffset, usize2, uoffset2);
350 			break;
351 
352 		default:
353 			exclaves_resource_release(resource);
354 			return KERN_INVALID_CAPABILITY;
355 		}
356 
357 		exclaves_resource_release(resource);
358 
359 		if (kr != KERN_SUCCESS) {
360 			return kr;
361 		}
362 		break;
363 	}
364 
365 	case EXCLAVES_CTL_OP_NAMED_BUFFER_COPYOUT: {
366 		exclaves_resource_t *resource = NULL;
367 		kr = exclaves_resource_from_port_name(current_space(), name,
368 		    &resource);
369 		if (kr != KERN_SUCCESS) {
370 			return kr;
371 		}
372 
373 		switch (resource->r_type) {
374 		case XNUPROXY_RESOURCE_NAMED_BUFFER:
375 			kr = exclaves_named_buffer_copyout(resource, ubuffer,
376 			    usize, uoffset, usize2, uoffset2);
377 			break;
378 
379 		case XNUPROXY_RESOURCE_SHARED_MEMORY:
380 			kr = exclaves_resource_shared_memory_copyout(resource,
381 			    ubuffer, usize, uoffset, usize2, uoffset2);
382 			break;
383 
384 		default:
385 			exclaves_resource_release(resource);
386 			return KERN_INVALID_CAPABILITY;
387 		}
388 
389 		exclaves_resource_release(resource);
390 
391 		if (kr != KERN_SUCCESS) {
392 			return kr;
393 		}
394 		break;
395 	}
396 
397 	case EXCLAVES_CTL_OP_BOOT:
398 		if (name != MACH_PORT_NULL) {
399 			/* Only accept MACH_PORT_NULL for now */
400 			return KERN_INVALID_CAPABILITY;
401 		}
402 		kr = exclaves_boot((uint32_t)identifier);
403 		break;
404 
405 	case EXCLAVES_CTL_OP_LAUNCH_CONCLAVE:
406 		start_conclave = true;
407 		OS_FALLTHROUGH;
408 	case EXCLAVES_CTL_OP_LOOKUP_RESOURCES:
409 		if (name != MACH_PORT_NULL) {
410 			/* Only accept MACH_PORT_NULL for now */
411 			return KERN_INVALID_CAPABILITY;
412 		}
413 		struct exclaves_resource_user *conclave_resource_user = NULL;
414 		int resource_count = 0;
415 
416 		if (usize > (MAX_CONCLAVE_RESOURCE_NUM * sizeof(struct exclaves_resource_user)) ||
417 		    (usize % sizeof(struct exclaves_resource_user) != 0)) {
418 			return KERN_INVALID_ARGUMENT;
419 		}
420 
421 		if ((ubuffer == USER_ADDR_NULL && usize != 0) ||
422 		    (usize == 0 && ubuffer != USER_ADDR_NULL)) {
423 			return KERN_INVALID_ARGUMENT;
424 		}
425 
426 		if (ubuffer != USER_ADDR_NULL) {
427 			conclave_resource_user = (struct exclaves_resource_user *)
428 			    kalloc_data(usize, Z_WAITOK | Z_ZERO | Z_NOFAIL);
429 			error = copyin(ubuffer, conclave_resource_user, usize);
430 			if (error) {
431 				kfree_data(conclave_resource_user, usize);
432 				return KERN_INVALID_ARGUMENT;
433 			}
434 			resource_count = (int)usize / sizeof(struct exclaves_resource_user);
435 
436 			const size_t name_buf_len = sizeof(conclave_resource_user->r_name);
437 			for (int i = 0; i < resource_count; i++) {
438 				if (strnlen(conclave_resource_user[i].r_name,
439 				    name_buf_len) == name_buf_len) {
440 					kfree_data(conclave_resource_user, usize);
441 					return KERN_INVALID_ARGUMENT;
442 				}
443 			}
444 		}
445 		kr = task_start_conclave_and_lookup_resources(MACH_PORT_NULL,
446 		    start_conclave, conclave_resource_user, resource_count);
447 		if (kr == KERN_SUCCESS && conclave_resource_user != NULL) {
448 			error = copyout(conclave_resource_user, ubuffer, usize);
449 			if (error) {
450 				kr = KERN_INVALID_ADDRESS;
451 			}
452 		}
453 
454 		if (conclave_resource_user) {
455 			kfree_data(conclave_resource_user, usize);
456 		}
457 		break;
458 
459 	case EXCLAVES_CTL_OP_AUDIO_BUFFER_CREATE: {
460 		if (identifier == 0) {
461 			return KERN_INVALID_ARGUMENT;
462 		}
463 
464 		/* copy in string name */
465 		char id_name[XNUPROXY_RESOURCE_NAME_MAX] = "";
466 		size_t done = 0;
467 		if (copyinstr(identifier, id_name, XNUPROXY_RESOURCE_NAME_MAX, &done) != 0) {
468 			return KERN_INVALID_ARGUMENT;
469 		}
470 
471 		/*
472 		 * Fallback to the kernel domain for now if the conclave can't
473 		 * be found.
474 		 */
475 		exclaves_resource_t *resource = task_get_conclave(current_task());
476 		const char *domain = resource != NULL ?
477 		    resource->r_name : EXCLAVES_DOMAIN_KERNEL;
478 		const bool use_audio_memory =
479 		    exclaves_resource_lookup_by_name(domain, id_name,
480 		    XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY) != NULL;
481 
482 		kr = use_audio_memory ?
483 		    exclaves_resource_audio_memory_map(domain, id_name, usize, &resource) :
484 		    exclaves_audio_buffer_map(domain, id_name, usize, &resource);
485 		if (kr != KERN_SUCCESS) {
486 			return kr;
487 		}
488 
489 		kr = exclaves_resource_create_port_name(resource, current_space(),
490 		    &name);
491 		if (kr != KERN_SUCCESS) {
492 			return kr;
493 		}
494 
495 		kr = copyout(&name, ubuffer, sizeof(mach_port_name_t));
496 		if (kr != KERN_SUCCESS) {
497 			mach_port_deallocate(current_space(), name);
498 			return kr;
499 		}
500 
501 		break;
502 	}
503 
504 	case EXCLAVES_CTL_OP_AUDIO_BUFFER_COPYOUT: {
505 		exclaves_resource_t *resource;
506 
507 		kr = exclaves_resource_from_port_name(current_space(), name, &resource);
508 		if (kr != KERN_SUCCESS) {
509 			return kr;
510 		}
511 
512 		switch (resource->r_type) {
513 		case XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER:
514 			kr = exclaves_audio_buffer_copyout(resource, ubuffer,
515 			    usize, uoffset, usize2, uoffset2);
516 			break;
517 
518 		case XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY:
519 			kr = exclaves_resource_audio_memory_copyout(resource,
520 			    ubuffer, usize, uoffset, usize2, uoffset2);
521 			break;
522 
523 		default:
524 			exclaves_resource_release(resource);
525 			return KERN_INVALID_CAPABILITY;
526 		}
527 
528 		exclaves_resource_release(resource);
529 
530 		if (kr != KERN_SUCCESS) {
531 			return kr;
532 		}
533 
534 		break;
535 	}
536 
537 	case EXCLAVES_CTL_OP_SENSOR_CREATE: {
538 		if (identifier == 0) {
539 			return KERN_INVALID_ARGUMENT;
540 		}
541 
542 		/* copy in string name */
543 		char id_name[XNUPROXY_RESOURCE_NAME_MAX] = "";
544 		size_t done = 0;
545 		if (copyinstr(identifier, id_name, XNUPROXY_RESOURCE_NAME_MAX, &done) != 0) {
546 			return KERN_INVALID_ARGUMENT;
547 		}
548 
549 		/*
550 		 * Fallback to the kernel domain for now if the conclave can't
551 		 * be found.
552 		 */
553 		exclaves_resource_t *resource = task_get_conclave(current_task());
554 		const char *domain = resource != NULL ?
555 		    resource->r_name : EXCLAVES_DOMAIN_KERNEL;
556 
557 		kr = exclaves_resource_sensor_open(domain, id_name, &resource);
558 		if (kr != KERN_SUCCESS) {
559 			return kr;
560 		}
561 
562 		kr = exclaves_resource_create_port_name(resource, current_space(),
563 		    &name);
564 		if (kr != KERN_SUCCESS) {
565 			return kr;
566 		}
567 
568 		kr = copyout(&name, ubuffer, sizeof(mach_port_name_t));
569 		if (kr != KERN_SUCCESS) {
570 			/* No senders drops the reference. */
571 			mach_port_deallocate(current_space(), name);
572 			return kr;
573 		}
574 
575 		break;
576 	}
577 
578 	case EXCLAVES_CTL_OP_SENSOR_START: {
579 		exclaves_resource_t *resource;
580 		kr = exclaves_resource_from_port_name(current_space(), name, &resource);
581 		if (kr != KERN_SUCCESS) {
582 			return kr;
583 		}
584 
585 		if (resource->r_type != XNUPROXY_RESOURCE_SENSOR) {
586 			exclaves_resource_release(resource);
587 			return KERN_FAILURE;
588 		}
589 
590 		exclaves_sensor_status_t status;
591 		kr = exclaves_resource_sensor_start(resource, identifier, &status);
592 
593 		exclaves_resource_release(resource);
594 
595 		if (kr != KERN_SUCCESS) {
596 			return kr;
597 		}
598 
599 		kr = copyout(&status, ubuffer, sizeof(exclaves_sensor_status_t));
600 
601 		break;
602 	}
603 	case EXCLAVES_CTL_OP_SENSOR_STOP: {
604 		exclaves_resource_t *resource;
605 		kr = exclaves_resource_from_port_name(current_space(), name, &resource);
606 		if (kr != KERN_SUCCESS) {
607 			return kr;
608 		}
609 
610 		if (resource->r_type != XNUPROXY_RESOURCE_SENSOR) {
611 			exclaves_resource_release(resource);
612 			return KERN_FAILURE;
613 		}
614 
615 		exclaves_sensor_status_t status;
616 		kr = exclaves_resource_sensor_stop(resource, identifier, &status);
617 
618 		exclaves_resource_release(resource);
619 
620 		if (kr != KERN_SUCCESS) {
621 			return kr;
622 		}
623 
624 		kr = copyout(&status, ubuffer, sizeof(exclaves_sensor_status_t));
625 
626 		break;
627 	}
628 	case EXCLAVES_CTL_OP_SENSOR_STATUS: {
629 		exclaves_resource_t *resource;
630 		kr = exclaves_resource_from_port_name(current_space(), name, &resource);
631 		if (kr != KERN_SUCCESS) {
632 			return kr;
633 		}
634 
635 		if (resource->r_type != XNUPROXY_RESOURCE_SENSOR) {
636 			exclaves_resource_release(resource);
637 			return KERN_FAILURE;
638 		}
639 
640 
641 		exclaves_sensor_status_t status;
642 		kr = exclaves_resource_sensor_status(resource, identifier, &status);
643 
644 		exclaves_resource_release(resource);
645 
646 		if (kr != KERN_SUCCESS) {
647 			return kr;
648 		}
649 
650 		kr = copyout(&status, ubuffer, sizeof(exclaves_sensor_status_t));
651 		break;
652 	}
653 	case EXCLAVES_CTL_OP_NOTIFICATION_RESOURCE_LOOKUP: {
654 		exclaves_resource_t *notification_resource = NULL;
655 		mach_port_name_t port_name = MACH_PORT_NULL;
656 
657 		struct exclaves_resource_user *notification_resource_user = NULL;
658 		if (usize != sizeof(struct exclaves_resource_user)) {
659 			return KERN_INVALID_ARGUMENT;
660 		}
661 
662 		if (ubuffer == USER_ADDR_NULL) {
663 			return KERN_INVALID_ARGUMENT;
664 		}
665 
666 		notification_resource_user = (struct exclaves_resource_user *)
667 		    kalloc_data(usize, Z_WAITOK | Z_ZERO | Z_NOFAIL);
668 
669 		error = copyin(ubuffer, notification_resource_user, usize);
670 		if (error) {
671 			kr = KERN_INVALID_ARGUMENT;
672 			goto notification_resource_lookup_out;
673 		}
674 
675 		const size_t name_buf_len = sizeof(notification_resource_user->r_name);
676 		if (strnlen(notification_resource_user->r_name, name_buf_len)
677 		    == name_buf_len) {
678 			kr = KERN_INVALID_ARGUMENT;
679 			goto notification_resource_lookup_out;
680 		}
681 
682 		/*
683 		 * Fallback to the kernel domain for now if the conclave can't
684 		 * be found.
685 		 */
686 		exclaves_resource_t *resource = task_get_conclave(current_task());
687 		const char *domain = resource != NULL ?
688 		    resource->r_name : EXCLAVES_DOMAIN_KERNEL;
689 
690 		kr = exclaves_notification_create(domain,
691 		    notification_resource_user->r_name, &notification_resource);
692 		if (kr != KERN_SUCCESS) {
693 			goto notification_resource_lookup_out;
694 		}
695 
696 		kr = exclaves_resource_create_port_name(notification_resource,
697 		    current_space(), &port_name);
698 		if (kr != KERN_SUCCESS) {
699 			goto notification_resource_lookup_out;
700 		}
701 		notification_resource_user->r_type = notification_resource->r_type;
702 		notification_resource_user->r_id = notification_resource->r_id;
703 		notification_resource_user->r_port = port_name;
704 		error = copyout(notification_resource_user, ubuffer, usize);
705 		if (error) {
706 			kr = KERN_INVALID_ADDRESS;
707 			goto notification_resource_lookup_out;
708 		}
709 
710 notification_resource_lookup_out:
711 		if (notification_resource_user != NULL) {
712 			kfree_data(notification_resource_user, usize);
713 		}
714 		if (kr != KERN_SUCCESS && port_name != MACH_PORT_NULL) {
715 			mach_port_deallocate(current_space(), port_name);
716 		}
717 		break;
718 	}
719 
720 	default:
721 		kr = KERN_INVALID_ARGUMENT;
722 		break;
723 	}
724 
725 	return kr;
726 #else /* CONFIG_EXCLAVES */
727 #pragma unused(uap)
728 	return KERN_NOT_SUPPORTED;
729 #endif /* CONFIG_EXCLAVES */
730 }
731 
732 /* -------------------------------------------------------------------------- */
733 #pragma mark kernel entry points
734 
735 kern_return_t
exclaves_endpoint_call(ipc_port_t port,exclaves_id_t endpoint_id,exclaves_tag_t * tag,exclaves_error_t * error)736 exclaves_endpoint_call(ipc_port_t port, exclaves_id_t endpoint_id,
737     exclaves_tag_t *tag, exclaves_error_t *error)
738 {
739 #if CONFIG_EXCLAVES
740 	kern_return_t kr = KERN_SUCCESS;
741 	assert(port == IPC_PORT_NULL);
742 
743 	Exclaves_L4_IpcBuffer_t *ipcb = Exclaves_L4_IpcBuffer();
744 	assert(ipcb != NULL);
745 
746 	exclaves_debug_printf(show_progress,
747 	    "exclaves: endpoint call:\tendpoint id %lld tag 0x%llx\n",
748 	    endpoint_id, *tag);
749 
750 	ipcb->mr[Exclaves_L4_Ipc_Mr_Tag] = *tag;
751 	kr = exclaves_endpoint_call_internal(port, endpoint_id);
752 	*tag = ipcb->mr[Exclaves_L4_Ipc_Mr_Tag];
753 	*error = XNUPROXY_CR_RETVAL(ipcb);
754 
755 	exclaves_debug_printf(show_progress,
756 	    "exclaves: endpoint call return:\tendpoint id %lld tag 0x%llx "
757 	    "error 0x%llx\n", endpoint_id, *tag, *error);
758 
759 	return kr;
760 #else /* CONFIG_EXCLAVES */
761 #pragma unused(port, endpoint_id, tag, error)
762 	return KERN_NOT_SUPPORTED;
763 #endif /* CONFIG_EXCLAVES */
764 }
765 
766 /* Realtime-safe acquisition of an IPC buffer */
767 kern_return_t
exclaves_allocate_ipc_buffer(void ** out_ipc_buffer)768 exclaves_allocate_ipc_buffer(void **out_ipc_buffer)
769 {
770 #if CONFIG_EXCLAVES
771 	kern_return_t kr = KERN_SUCCESS;
772 	thread_t thread = current_thread();
773 	Exclaves_L4_IpcBuffer_t *ipcb = thread->th_exclaves_ipc_buffer;
774 	Exclaves_L4_Word_t scid = thread->th_exclaves_scheduling_context_id;
775 
776 	if (ipcb == NULL) {
777 		assert(scid == 0);
778 		if ((kr = exclaves_acquire_ipc_buffer(&ipcb, &scid))) {
779 			return kr;
780 		}
781 		thread->th_exclaves_ipc_buffer = ipcb;
782 		thread->th_exclaves_scheduling_context_id = scid;
783 	}
784 	if (out_ipc_buffer) {
785 		*out_ipc_buffer = (void*)ipcb;
786 	}
787 
788 	return kr;
789 #else /* CONFIG_EXCLAVES */
790 #pragma unused(out_ipc_buffer)
791 	return KERN_NOT_SUPPORTED;
792 #endif /* CONFIG_EXCLAVES */
793 }
794 
795 #if CONFIG_EXCLAVES
796 static kern_return_t
exclaves_thread_free_ipc_buffer(thread_t thread)797 exclaves_thread_free_ipc_buffer(thread_t thread)
798 {
799 	kern_return_t kr = KERN_SUCCESS;
800 	Exclaves_L4_IpcBuffer_t *ipcb = thread->th_exclaves_ipc_buffer;
801 	Exclaves_L4_Word_t scid = thread->th_exclaves_scheduling_context_id;
802 
803 	if (ipcb != NULL) {
804 		assert(scid != 0);
805 		thread->th_exclaves_ipc_buffer = NULL;
806 		thread->th_exclaves_scheduling_context_id = 0;
807 
808 		kr = exclaves_relinquish_ipc_buffer(ipcb, scid);
809 	} else {
810 		assert(scid == 0);
811 	}
812 
813 	return kr;
814 }
815 #endif /* CONFIG_EXCLAVES */
816 
817 kern_return_t
exclaves_free_ipc_buffer(void)818 exclaves_free_ipc_buffer(void)
819 {
820 #if CONFIG_EXCLAVES
821 	return exclaves_thread_free_ipc_buffer(current_thread());
822 #else /* CONFIG_EXCLAVES */
823 	return KERN_NOT_SUPPORTED;
824 #endif /* CONFIG_EXCLAVES */
825 }
826 
827 kern_return_t
exclaves_thread_terminate(__unused thread_t thread)828 exclaves_thread_terminate(__unused thread_t thread)
829 {
830 	kern_return_t kr = KERN_SUCCESS;
831 
832 #if CONFIG_EXCLAVES
833 	assert(thread == current_thread());
834 	assert(thread->th_exclaves_intstate == 0);
835 	assert(thread->th_exclaves_state == 0);
836 	if (thread->th_exclaves_ipc_buffer) {
837 		exclaves_debug_printf(show_progress,
838 		    "exclaves: thread_terminate freeing abandoned exclaves "
839 		    "ipc buffer\n");
840 		kr = exclaves_thread_free_ipc_buffer(thread);
841 		assert(kr == KERN_SUCCESS);
842 	}
843 #else
844 #pragma unused(thread)
845 #endif /* CONFIG_EXCLAVES */
846 
847 	return kr;
848 }
849 
850 OS_CONST
851 void*
exclaves_get_ipc_buffer(void)852 exclaves_get_ipc_buffer(void)
853 {
854 #if CONFIG_EXCLAVES
855 	thread_t thread = current_thread();
856 	Exclaves_L4_IpcBuffer_t *ipcb = thread->th_exclaves_ipc_buffer;
857 	assert(ipcb != NULL);
858 
859 	return ipcb;
860 #else /* CONFIG_EXCLAVES */
861 	return NULL;
862 #endif /* CONFIG_EXCLAVES */
863 }
864 
865 #if CONFIG_EXCLAVES
866 
867 __startup_func
868 static void
initialize_exclaves_call_range(void)869 initialize_exclaves_call_range(void)
870 {
871 	exclaves_enter_range_start = VM_KERNEL_UNSLIDE(&exclaves_enter_start_label);
872 	assert3u(exclaves_enter_range_start, !=, 0);
873 	exclaves_enter_range_end = VM_KERNEL_UNSLIDE(&exclaves_enter_end_label);
874 	assert3u(exclaves_enter_range_end, !=, 0);
875 	exclaves_upcall_range_start = VM_KERNEL_UNSLIDE(&exclaves_upcall_start_label);
876 	assert3u(exclaves_upcall_range_start, !=, 0);
877 	exclaves_upcall_range_end = VM_KERNEL_UNSLIDE(&exclaves_upcall_end_label);
878 	assert3u(exclaves_upcall_range_end, !=, 0);
879 }
880 STARTUP(EARLY_BOOT, STARTUP_RANK_MIDDLE, initialize_exclaves_call_range);
881 
882 extern kern_return_t exclaves_boot_early(void);
883 kern_return_t
exclaves_boot_early(void)884 exclaves_boot_early(void)
885 {
886 	kern_return_t kr = KERN_FAILURE;
887 	uint64_t boot_info = 0;
888 
889 	lck_mtx_assert(&exclaves_boot_lock, LCK_MTX_ASSERT_OWNED);
890 
891 	kr = exclaves_bootinfo(&boot_info);
892 	if (kr != KERN_SUCCESS) {
893 		exclaves_debug_printf(show_errors,
894 		    "exclaves: Get bootinfo failed\n");
895 		return kr;
896 	}
897 
898 	kr = exclaves_scheduler_init(boot_info);
899 	if (kr != KERN_SUCCESS) {
900 		exclaves_debug_printf(show_errors,
901 		    "exclaves: Init scheduler failed\n");
902 		return kr;
903 	}
904 
905 	kr = exclaves_ipc_buffer_cache_init();
906 	if (kr != KERN_SUCCESS) {
907 		exclaves_debug_printf(show_errors,
908 		    "exclaves: failed to initialize IPC buffer cache\n");
909 		return kr;
910 	}
911 
912 	kr = exclaves_resource_init();
913 	if (kr != KERN_SUCCESS) {
914 		exclaves_debug_printf(show_errors,
915 		    "exclaves: failed to initialize resources\n");
916 		return kr;
917 	}
918 
919 	return KERN_SUCCESS;
920 }
921 #endif /* CONFIG_EXCLAVES */
922 
923 #if CONFIG_EXCLAVES
924 static struct XrtHosted_Callbacks *exclaves_callbacks = NULL;
925 #endif /* CONFIG_EXCLAVES */
926 
927 void
exclaves_register_xrt_hosted_callbacks(struct XrtHosted_Callbacks * callbacks)928 exclaves_register_xrt_hosted_callbacks(struct XrtHosted_Callbacks *callbacks)
929 {
930 #if CONFIG_EXCLAVES
931 	if (exclaves_callbacks == NULL) {
932 		exclaves_callbacks = callbacks;
933 	}
934 #else /* CONFIG_EXCLAVES */
935 #pragma unused(callbacks)
936 #endif /* CONFIG_EXCLAVES */
937 }
938 
939 /* -------------------------------------------------------------------------- */
940 
941 #pragma mark exclaves ipc internals
942 
943 #if CONFIG_EXCLAVES
944 
945 static kern_return_t
exclaves_acquire_ipc_buffer(Exclaves_L4_IpcBuffer_t ** out_ipcb,Exclaves_L4_Word_t * out_scid)946 exclaves_acquire_ipc_buffer(Exclaves_L4_IpcBuffer_t **out_ipcb,
947     Exclaves_L4_Word_t *out_scid)
948 {
949 	kern_return_t kr = KERN_SUCCESS;
950 	Exclaves_L4_IpcBuffer_t *ipcb = NULL;
951 	Exclaves_L4_Word_t scid = 0;
952 	struct exclaves_ipc_buffer_cache_item *cached_buffer = NULL;
953 
954 
955 	_Static_assert(Exclaves_L4_IpcBuffer_Size < PAGE_SIZE,
956 	    "Invalid Exclaves_L4_IpcBuffer_Size");
957 
958 	if (exclaves_ipc_buffer_cache_enabled) {
959 		lck_spin_lock(&exclaves_ipc_buffer_cache_lock);
960 		if (exclaves_ipc_buffer_cache != NULL) {
961 			cached_buffer = exclaves_ipc_buffer_cache;
962 			exclaves_ipc_buffer_cache = cached_buffer->next;
963 		}
964 		lck_spin_unlock(&exclaves_ipc_buffer_cache_lock);
965 	}
966 
967 	if (cached_buffer) {
968 		scid = cached_buffer->scid;
969 
970 		/* zero out this usage of the buffer to avoid any confusion in xnuproxy */
971 		cached_buffer->next = NULL;
972 		cached_buffer->scid = 0;
973 
974 		ipcb = (Exclaves_L4_IpcBuffer_t*)cached_buffer;
975 	} else {
976 		kr = exclaves_xnu_proxy_allocate_context(&scid, &ipcb);
977 		if (kr == KERN_NO_SPACE) {
978 			panic("Exclaves IPC buffer allocation failed");
979 		}
980 	}
981 
982 	*out_ipcb = ipcb;
983 	*out_scid = scid;
984 
985 	return kr;
986 }
987 
988 size_t
exclaves_ipc_buffer_count(void)989 exclaves_ipc_buffer_count(void)
990 {
991 	return os_atomic_load(&exclaves_ipcb_cnt, relaxed);
992 }
993 
994 static kern_return_t
exclaves_relinquish_ipc_buffer(Exclaves_L4_IpcBuffer_t * ipcb,Exclaves_L4_Word_t scid)995 exclaves_relinquish_ipc_buffer(Exclaves_L4_IpcBuffer_t *ipcb,
996     Exclaves_L4_Word_t scid)
997 {
998 	kern_return_t kr = KERN_SUCCESS;
999 	struct exclaves_ipc_buffer_cache_item *cached_buffer;
1000 
1001 	if (!exclaves_ipc_buffer_cache_enabled) {
1002 		kr = exclaves_xnu_proxy_free_context(scid);
1003 	} else {
1004 		cached_buffer = (struct exclaves_ipc_buffer_cache_item*)ipcb;
1005 		cached_buffer->scid = scid;
1006 
1007 		lck_spin_lock(&exclaves_ipc_buffer_cache_lock);
1008 		cached_buffer->next = exclaves_ipc_buffer_cache;
1009 		exclaves_ipc_buffer_cache = cached_buffer;
1010 		lck_spin_unlock(&exclaves_ipc_buffer_cache_lock);
1011 	}
1012 
1013 	return kr;
1014 }
1015 
1016 static kern_return_t
exclaves_endpoint_call_internal(__unused ipc_port_t port,exclaves_id_t endpoint_id)1017 exclaves_endpoint_call_internal(__unused ipc_port_t port,
1018     exclaves_id_t endpoint_id)
1019 {
1020 	kern_return_t kr = KERN_SUCCESS;
1021 
1022 	assert(port == IPC_PORT_NULL);
1023 
1024 	kr = exclaves_xnu_proxy_endpoint_call(endpoint_id);
1025 
1026 	return kr;
1027 }
1028 
1029 /* -------------------------------------------------------------------------- */
1030 #pragma mark secure kernel communication
1031 
1032 /* ringgate entry endpoints */
1033 enum {
1034 	RINGGATE_EP_ENTER,
1035 	RINGGATE_EP_INFO
1036 };
1037 
1038 /* ringgate entry status codes */
1039 enum {
1040 	RINGGATE_STATUS_SUCCESS,
1041 	RINGGATE_STATUS_ERROR
1042 };
1043 
1044 OS_NOINLINE
1045 static kern_return_t
exclaves_enter(void)1046 exclaves_enter(void)
1047 {
1048 	uint32_t endpoint = RINGGATE_EP_ENTER;
1049 	uint64_t result = RINGGATE_STATUS_ERROR;
1050 
1051 	sptm_call_regs_t regs = { };
1052 
1053 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES, MACH_EXCLAVES_SWITCH)
1054 	    | DBG_FUNC_START);
1055 	recount_enter_secure();
1056 	/*
1057 	 * Bracket with labels so stackshot can determine where exclaves are
1058 	 * entered from xnu.
1059 	 */
1060 	__asm__ volatile (
1061             "EXCLAVES_ENTRY_START: nop\n\t"
1062         );
1063 	result = sk_enter(endpoint, &regs);
1064 	__asm__ volatile (
1065             "EXCLAVES_ENTRY_END: nop\n\t"
1066         );
1067 	recount_leave_secure();
1068 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES, MACH_EXCLAVES_SWITCH)
1069 	    | DBG_FUNC_END);
1070 
1071 
1072 	if (result == RINGGATE_STATUS_ERROR) {
1073 		return KERN_FAILURE;
1074 	}
1075 
1076 	return KERN_SUCCESS;
1077 }
1078 
1079 OS_NOINLINE
1080 static kern_return_t
exclaves_bootinfo(uint64_t * out_boot_info)1081 exclaves_bootinfo(uint64_t *out_boot_info)
1082 {
1083 	uint32_t endpoint = RINGGATE_EP_INFO;
1084 	uint64_t result = RINGGATE_STATUS_ERROR;
1085 
1086 	sptm_call_regs_t regs = { };
1087 
1088 	recount_enter_secure();
1089 	result = sk_enter(endpoint, &regs);
1090 	recount_leave_secure();
1091 	if (result == RINGGATE_STATUS_ERROR) {
1092 		return KERN_FAILURE;
1093 	}
1094 	*out_boot_info = result;
1095 
1096 	return KERN_SUCCESS;
1097 }
1098 
1099 /* -------------------------------------------------------------------------- */
1100 
1101 #pragma mark exclaves scheduler communication
1102 
1103 static XrtHosted_Buffer_t * PERCPU_DATA(exclaves_request);
1104 static XrtHosted_Buffer_t * PERCPU_DATA(exclaves_response);
1105 
1106 static void
exclaves_init_multicore(void)1107 exclaves_init_multicore(void)
1108 {
1109 	assert(exclaves_multicore);
1110 
1111 	XrtHosted_Buffer_t **req, **res;
1112 
1113 	exclaves_wait_for_cpu_init();
1114 
1115 	DTEntry entry, child;
1116 	OpaqueDTEntryIterator iter;
1117 	int err = SecureDTLookupEntry(NULL, "/cpus", &entry);
1118 	assert(err == kSuccess);
1119 	err = SecureDTInitEntryIterator(entry, &iter);
1120 	assert(err == kSuccess);
1121 
1122 	/*
1123 	 * Match the hardwareID to the physical ID and stash the pointers to the
1124 	 * request/response buffers in per-cpu data for quick access.
1125 	 */
1126 	size_t core_count = exclaves_callbacks->v1.cores();
1127 	for (size_t i = 0; i < core_count; i++) {
1128 		const XrtHosted_Core_t *core = exclaves_callbacks->v1.core(i);
1129 
1130 		/* Find the physical ID of the entry at position hardwareId in the
1131 		 * DeviceTree "cpus" array */
1132 		uint32_t dt_phys_id = 0, dt_index = 0;
1133 		bool dt_entry_found = false;
1134 		err = SecureDTRestartEntryIteration(&iter);
1135 		assert(err == kSuccess);
1136 		while (kSuccess == SecureDTIterateEntries(&iter, &child)) {
1137 			if (core->v2.hardwareId == dt_index) {
1138 				void const *dt_prop;
1139 				unsigned int dt_prop_sz;
1140 				err = SecureDTGetProperty(child, "reg", &dt_prop, &dt_prop_sz);
1141 				assert(err == kSuccess);
1142 				assert(dt_prop_sz == sizeof(uint32_t));
1143 				dt_phys_id = *((uint32_t const *)dt_prop);
1144 				dt_entry_found = true;
1145 				break;
1146 			}
1147 			dt_index++;
1148 		}
1149 		if (!dt_entry_found) {
1150 			continue;
1151 		}
1152 
1153 		percpu_foreach(cpu_data, cpu_data) {
1154 			if (cpu_data->cpu_phys_id != dt_phys_id) {
1155 				continue;
1156 			}
1157 			req = PERCPU_GET_RELATIVE(exclaves_request, cpu_data, cpu_data);
1158 			*req = exclaves_callbacks->v1.Core.request(i);
1159 
1160 			res = PERCPU_GET_RELATIVE(exclaves_response, cpu_data, cpu_data);
1161 			*res = exclaves_callbacks->v1.Core.response(i);
1162 
1163 			break;
1164 		}
1165 	}
1166 }
1167 
1168 static void
exclaves_init_unicore(void)1169 exclaves_init_unicore(void)
1170 {
1171 	assert(!exclaves_multicore);
1172 
1173 	XrtHosted_Buffer_t *breq, *bres, **req, **res;
1174 
1175 	exclaves_wait_for_cpu_init();
1176 
1177 	breq = exclaves_callbacks->v1.Core.request(XrtHosted_Core_bootIndex);
1178 	bres = exclaves_callbacks->v1.Core.response(XrtHosted_Core_bootIndex);
1179 
1180 	/* Always use the boot request/response buffers. */
1181 	percpu_foreach(cpu_data, cpu_data) {
1182 		req = PERCPU_GET_RELATIVE(exclaves_request, cpu_data, cpu_data);
1183 		*req = breq;
1184 
1185 		res = PERCPU_GET_RELATIVE(exclaves_response, cpu_data, cpu_data);
1186 		*res = bres;
1187 	}
1188 }
1189 
1190 static kern_return_t
exclaves_scheduler_init(uint64_t boot_info)1191 exclaves_scheduler_init(uint64_t boot_info)
1192 {
1193 	kern_return_t kr = KERN_SUCCESS;
1194 	XrtHosted_Error_t hosted_error;
1195 
1196 	lck_mtx_assert(&exclaves_boot_lock, LCK_MTX_ASSERT_OWNED);
1197 
1198 	if (!pmap_valid_address(boot_info)) {
1199 		exclaves_debug_printf(show_errors,
1200 		    "exclaves: %s: 0x%012llx\n",
1201 		    "Invalid root physical address",
1202 		    boot_info);
1203 		return KERN_FAILURE;
1204 	}
1205 
1206 	if (exclaves_callbacks == NULL) {
1207 		exclaves_debug_printf(show_errors,
1208 		    "exclaves: Callbacks not registered\n");
1209 		return KERN_FAILURE;
1210 	}
1211 
1212 	/* Initialise XrtHostedXnu kext */
1213 	kr = exclaves_hosted_error(
1214 		exclaves_callbacks->v1.init(
1215 			XrtHosted_Version_current,
1216 			phystokv(boot_info),
1217 			&hosted_error),
1218 		&hosted_error);
1219 	if (kr != KERN_SUCCESS) {
1220 		return kr;
1221 	}
1222 
1223 	/* Record aperture addresses in buffer */
1224 	size_t frames = exclaves_callbacks->v1.frames();
1225 	XrtHosted_Mapped_t **pages = zalloc_permanent(
1226 		frames * sizeof(XrtHosted_Mapped_t *),
1227 		ZALIGN(XrtHosted_Mapped_t *));
1228 	size_t index = 0;
1229 	uint64_t phys = boot_info;
1230 	while (index < frames) {
1231 		if (!pmap_valid_address(phys)) {
1232 			exclaves_debug_printf(show_errors,
1233 			    "exclaves: %s: 0x%012llx\n",
1234 			    "Invalid shared physical address",
1235 			    phys);
1236 			return KERN_FAILURE;
1237 		}
1238 		pages[index] = (XrtHosted_Mapped_t *)phystokv(phys);
1239 		kr = exclaves_hosted_error(
1240 			exclaves_callbacks->v1.nextPhys(
1241 				pages[index],
1242 				&index,
1243 				&phys,
1244 				&hosted_error),
1245 			&hosted_error);
1246 		if (kr != KERN_SUCCESS) {
1247 			return kr;
1248 		}
1249 	}
1250 
1251 	/* Initialise the mapped region */
1252 	exclaves_callbacks->v1.setMapping(
1253 		XrtHosted_Region_scattered(frames, pages));
1254 
1255 	/* Initialise the XNU proxy */
1256 	XrtHosted_Global_t *global = exclaves_callbacks->v1.global();
1257 
1258 	exclaves_multicore = global->v2.smpStatus == XrtHosted_SmpStatus_Multicore;
1259 	exclaves_multicore ?
1260 	exclaves_init_multicore() :
1261 	exclaves_init_unicore();
1262 
1263 	uint64_t xnu_proxy_boot_info = global->v1.proxyInit;
1264 	kr = exclaves_xnu_proxy_init(xnu_proxy_boot_info);
1265 
1266 	return kr;
1267 }
1268 
1269 #if EXCLAVES_ENABLE_SHOW_SCHEDULER_REQUEST_RESPONSE
1270 #define exclaves_scheduler_debug_save_buffer(_buf) \
1271 	XrtHosted_Buffer_t _buf##_copy = *(_buf)
1272 #define exclaves_scheduler_debug_show_request_response(_request_buf, \
1273 	    _response_buf) ({ \
1274 	if (exclaves_debug_enabled(show_scheduler_request_response)) { \
1275 	        printf("exclaves: Scheduler request = %p\n", _request_buf); \
1276 	        printf("exclaves: Scheduler request.tag = 0x%04llx\n", \
1277 	            _request_buf##_copy.tag); \
1278 	        for (size_t arg = 0; arg < XrtHosted_Buffer_args; arg += 1) { \
1279 	                printf("exclaves: Scheduler request.arguments[%02zu] = " \
1280 	                    "0x%04llx\n", arg, \
1281 	                    _request_buf##_copy.arguments[arg]); \
1282 	        } \
1283 	        printf("exclaves: Scheduler response = %p\n", _response_buf); \
1284 	        printf("exclaves: Scheduler response.tag = 0x%04llx\n", \
1285 	                _response_buf##_copy.tag); \
1286 	        for (size_t arg = 0; arg < XrtHosted_Buffer_args; arg += 1) { \
1287 	                printf("exclaves: Scheduler response.arguments[%02zu] = " \
1288 	                    "0x%04llx\n", arg, \
1289 	                    _response_buf##_copy.arguments[arg]); \
1290 	        } \
1291 	}})
1292 #else // EXCLAVES_SHOW_SCHEDULER_REQUEST_RESPONSE
1293 #define exclaves_scheduler_debug_save_buffer(_buf) ({ })
1294 #define exclaves_scheduler_debug_show_request_response(_request_buf, \
1295 	    _response_buf) ({ })
1296 #endif // EXCLAVES_SHOW_SCHEDULER_REQUEST_RESPONSE
1297 
1298 __attribute__((always_inline))
1299 static kern_return_t
exclaves_scheduler_request(const XrtHosted_Request_t * request,XrtHosted_Response_t * response)1300 exclaves_scheduler_request(const XrtHosted_Request_t *request, XrtHosted_Response_t *response)
1301 {
1302 	assert3u(request->tag, >, XrtHosted_Request_Invalid);
1303 	assert3u(request->tag, <, XrtHosted_Request_Limit);
1304 
1305 	kern_return_t kr = KERN_FAILURE;
1306 	bool istate;
1307 	thread_t thread = current_thread();
1308 
1309 	if (!exclaves_multicore || !exclaves_smp_enabled) {
1310 		lck_mtx_lock(&exclaves_scheduler_lock);
1311 	}
1312 
1313 	/*
1314 	 * Disable preemption and interrupts as the xrt hosted scheduler data
1315 	 * structures are per-core.
1316 	 * Preemption disabled and interrupt disabled timeouts are disabled for
1317 	 * now until we can co-ordinate the measurements with the exclaves side of
1318 	 * things.
1319 	 */
1320 	istate = ml_set_interrupts_enabled_with_debug(false, false);
1321 
1322 	/*
1323 	 * This needs to be done with interrupts disabled, otherwise stackshot could
1324 	 * mark the thread blocked just after this function exits and a thread marked
1325 	 * as AST blocked would go into exclaves.
1326 	 */
1327 
1328 	while ((os_atomic_load(&thread->th_exclaves_inspection_state, relaxed) & ~TH_EXCLAVES_INSPECTION_NOINSPECT) != 0) {
1329 		/* Enable interrupts */
1330 		(void) ml_set_interrupts_enabled_with_debug(true, false);
1331 
1332 		if (!exclaves_multicore || !exclaves_smp_enabled) {
1333 			lck_mtx_unlock(&exclaves_scheduler_lock);
1334 		}
1335 
1336 		/* Wait until the thread is collected on exclaves side */
1337 		exclaves_inspection_check_ast();
1338 
1339 		if (!exclaves_multicore || !exclaves_smp_enabled) {
1340 			lck_mtx_lock(&exclaves_scheduler_lock);
1341 		}
1342 
1343 		/* Disable interrupts and preemption before next AST check */
1344 		ml_set_interrupts_enabled_with_debug(false, false);
1345 	}
1346 	/* Interrupts are disabled and exclaves_stackshot_ast is clean */
1347 
1348 	disable_preemption_without_measurements();
1349 
1350 	assert((thread->th_exclaves_intstate & TH_EXCLAVES_EXECUTION) == 0);
1351 	thread->th_exclaves_intstate |= TH_EXCLAVES_EXECUTION;
1352 
1353 	XrtHosted_Buffer_t *request_buf = *PERCPU_GET(exclaves_request);
1354 	assert3p(request_buf, !=, NULL);
1355 
1356 	exclaves_callbacks->v1.Request.encode(request_buf, request);
1357 	exclaves_scheduler_debug_save_buffer(request_buf);
1358 
1359 	kr = exclaves_enter();
1360 
1361 	/* The response may have come back on a different core. */
1362 	XrtHosted_Buffer_t *response_buf = *PERCPU_GET(exclaves_response);
1363 	assert3p(response_buf, !=, NULL);
1364 
1365 	exclaves_scheduler_debug_save_buffer(response_buf);
1366 	exclaves_callbacks->v1.Response.decode(response_buf, response);
1367 
1368 	thread->th_exclaves_intstate &= ~TH_EXCLAVES_EXECUTION;
1369 	enable_preemption();
1370 	(void) ml_set_interrupts_enabled_with_debug(istate, false);
1371 
1372 	exclaves_scheduler_debug_show_request_response(request_buf, response_buf);
1373 
1374 	if (!exclaves_multicore || !exclaves_smp_enabled) {
1375 		lck_mtx_unlock(&exclaves_scheduler_lock);
1376 	}
1377 
1378 	return kr;
1379 }
1380 
1381 static kern_return_t
handle_response_yield(__assert_only Exclaves_L4_Word_t scid,const XrtHosted_Yield_t * yield)1382 handle_response_yield(__assert_only Exclaves_L4_Word_t scid,
1383     const XrtHosted_Yield_t *yield)
1384 {
1385 	Exclaves_L4_Word_t responding_scid = yield->thread;
1386 	Exclaves_L4_Word_t yielded_to_scid = yield->yieldTo;
1387 	__assert_only ctid_t ctid = thread_get_ctid(current_thread());
1388 
1389 	exclaves_debug_printf(show_progress,
1390 	    "exclaves: Scheduler: scid 0x%lx yielded to scid 0x%lx\n",
1391 	    responding_scid, yielded_to_scid);
1392 	/* TODO: 1. remember yielding scid if it isn't the xnu proxy's
1393 	 * th_exclaves_scheduling_context_id so we know to resume it later
1394 	 * 2. translate yield_to to thread_switch()-style handoff.
1395 	 */
1396 	assert3u(responding_scid, ==, scid);
1397 	assert3u(yield->threadHostId, ==, ctid);
1398 
1399 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES_SCHEDULER,
1400 	    MACH_EXCLAVES_SCHEDULER_YIELD), yielded_to_scid);
1401 
1402 	return KERN_SUCCESS;
1403 }
1404 
1405 static kern_return_t
handle_response_spawned(__assert_only Exclaves_L4_Word_t scid,const XrtHosted_Spawned_t * spawned,Exclaves_L4_Word_t * spawned_scid)1406 handle_response_spawned(__assert_only Exclaves_L4_Word_t scid,
1407     const XrtHosted_Spawned_t *spawned, Exclaves_L4_Word_t *spawned_scid)
1408 {
1409 	Exclaves_L4_Word_t responding_scid = spawned->thread;
1410 	__assert_only ctid_t ctid = thread_get_ctid(current_thread());
1411 
1412 	if (spawned_scid == NULL) {
1413 		exclaves_debug_printf(show_errors,
1414 		    "exclaves: Scheduler: Unexpected thread spawn: "
1415 		    "scid 0x%lx spawned scid 0x%llx\n",
1416 		    responding_scid, spawned->spawned);
1417 		return KERN_FAILURE;
1418 	}
1419 
1420 	*spawned_scid = spawned->spawned;
1421 	exclaves_debug_printf(show_progress,
1422 	    "exclaves: Scheduler: scid 0x%lx spawned scid 0x%lx\n",
1423 	    responding_scid, *spawned_scid);
1424 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES_SCHEDULER,
1425 	    MACH_EXCLAVES_SCHEDULER_SPAWNED), *spawned_scid);
1426 
1427 	/* TODO: remember yielding scid if it isn't the xnu proxy's
1428 	 * th_exclaves_scheduling_context_id so we know to resume it later
1429 	 */
1430 	if (0) {
1431 		// FIXME: reenable when exclaves scheduler is fixed
1432 		assert3u(responding_scid, ==, scid);
1433 		assert3u(spawned->threadHostId, ==, ctid);
1434 	}
1435 
1436 	return KERN_SUCCESS;
1437 }
1438 
1439 static kern_return_t
handle_response_terminated(const XrtHosted_Terminated_t * terminated)1440 handle_response_terminated(const XrtHosted_Terminated_t *terminated)
1441 {
1442 	Exclaves_L4_Word_t responding_scid = terminated->thread;
1443 	__assert_only ctid_t ctid = thread_get_ctid(current_thread());
1444 
1445 	exclaves_debug_printf(show_errors,
1446 	    "exclaves: Scheduler: Unexpected thread terminate: "
1447 	    "scid 0x%lx terminated scid 0x%llx\n", responding_scid,
1448 	    terminated->terminated);
1449 	assert3u(terminated->threadHostId, ==, ctid);
1450 
1451 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES_SCHEDULER,
1452 	    MACH_EXCLAVES_SCHEDULER_TERMINATED),
1453 	    terminated->terminated);
1454 
1455 	return KERN_TERMINATED;
1456 }
1457 
1458 static kern_return_t
handle_response_wait(const XrtHosted_Wait_t * wait)1459 handle_response_wait(const XrtHosted_Wait_t *wait)
1460 {
1461 	Exclaves_L4_Word_t responding_scid = wait->waiter;
1462 	__assert_only ctid_t ctid = thread_get_ctid(current_thread());
1463 
1464 	exclaves_debug_printf(show_progress,
1465 	    "exclaves: Scheduler: Wait: "
1466 	    "scid 0x%lx wait on owner scid 0x%llx, queue id 0x%llx, "
1467 	    "epoch 0x%llx\n", responding_scid, wait->owner,
1468 	    wait->queueId, wait->epoch);
1469 	assert3u(wait->waiterHostId, ==, ctid);
1470 
1471 	/*
1472 	 * Note, "owner" may not be safe to access directly, for example
1473 	 * the thread may have exited and been freed. esync_wait will
1474 	 * only access it under a lock if the epoch is fresh thus
1475 	 * ensuring safety.
1476 	 */
1477 	const ctid_t owner = (ctid_t)wait->ownerHostId;
1478 	const XrtHosted_Word_t id = wait->queueId;
1479 	const uint64_t epoch = wait->epoch;
1480 
1481 	wait_interrupt_t interruptible;
1482 	esync_policy_t policy;
1483 
1484 	switch (wait->interruptible) {
1485 	case XrtHosted_Interruptibility_None:
1486 		interruptible = THREAD_UNINT;
1487 		policy = ESYNC_POLICY_KERNEL;
1488 		break;
1489 
1490 	case XrtHosted_Interruptibility_Voluntary:
1491 		interruptible = THREAD_INTERRUPTIBLE;
1492 		policy = ESYNC_POLICY_KERNEL;
1493 		break;
1494 
1495 	case XrtHosted_Interruptibility_DynamicQueue:
1496 		interruptible = THREAD_INTERRUPTIBLE;
1497 		policy = ESYNC_POLICY_USER;
1498 		break;
1499 
1500 	default:
1501 		panic("Unknown exclaves interruptibility: %llu",
1502 		    wait->interruptible);
1503 	}
1504 
1505 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES_SCHEDULER,
1506 	    MACH_EXCLAVES_SCHEDULER_WAIT) | DBG_FUNC_START, id, epoch, owner,
1507 	    wait->interruptible);
1508 	const wait_result_t wr = esync_wait(&esync_queue_ht, id, epoch,
1509 	    exclaves_get_queue_counter(id), owner, policy, interruptible);
1510 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES_SCHEDULER,
1511 	    MACH_EXCLAVES_SCHEDULER_WAIT) | DBG_FUNC_END, wr);
1512 
1513 	switch (wr) {
1514 	case THREAD_INTERRUPTED:
1515 		return KERN_ABORTED;
1516 
1517 	case THREAD_NOT_WAITING:
1518 	case THREAD_AWAKENED:
1519 		return KERN_SUCCESS;
1520 
1521 	default:
1522 		panic("Unexpected wait result from esync_wait: %d", wr);
1523 	}
1524 }
1525 
1526 static kern_return_t
handle_response_wake(const XrtHosted_Wake_t * wake)1527 handle_response_wake(const XrtHosted_Wake_t *wake)
1528 {
1529 	Exclaves_L4_Word_t responding_scid = wake->waker;
1530 	__assert_only ctid_t ctid = thread_get_ctid(current_thread());
1531 
1532 	exclaves_debug_printf(show_progress,
1533 	    "exclaves: Scheduler: Wake: "
1534 	    "scid 0x%lx wake of queue id 0x%llx, "
1535 	    "epoch 0x%llx, all 0x%llx\n", responding_scid,
1536 	    wake->queueId, wake->epoch, wake->all);
1537 	assert3u(wake->wakerHostId, ==, ctid);
1538 
1539 	const XrtHosted_Word_t id = wake->queueId;
1540 	const uint64_t epoch = wake->epoch;
1541 	const esync_wake_mode_t mode = wake->all != 0 ?
1542 	    ESYNC_WAKE_ALL : ESYNC_WAKE_ONE;
1543 
1544 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES_SCHEDULER,
1545 	    MACH_EXCLAVES_SCHEDULER_WAKE) | DBG_FUNC_START, id, epoch, 0, mode);
1546 
1547 	kern_return_t kr = esync_wake(&esync_queue_ht, id, epoch,
1548 	    exclaves_get_queue_counter(id), mode, 0);
1549 
1550 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES_SCHEDULER,
1551 	    MACH_EXCLAVES_SCHEDULER_WAKE) | DBG_FUNC_END,
1552 	    kr == KERN_SUCCESS ? THREAD_AWAKENED : THREAD_NOT_WAITING);
1553 
1554 	return KERN_SUCCESS;
1555 }
1556 
1557 static kern_return_t
handle_response_wake_with_owner(const XrtHosted_WakeWithOwner_t * wake)1558 handle_response_wake_with_owner(const XrtHosted_WakeWithOwner_t *wake)
1559 {
1560 	Exclaves_L4_Word_t responding_scid = wake->waker;
1561 	__assert_only ctid_t ctid = thread_get_ctid(current_thread());
1562 
1563 	exclaves_debug_printf(show_progress,
1564 	    "exclaves: Scheduler: WakeWithOwner: "
1565 	    "scid 0x%lx wake of queue id 0x%llx, "
1566 	    "epoch 0x%llx, owner 0x%llx\n", responding_scid,
1567 	    wake->queueId, wake->epoch,
1568 	    wake->owner);
1569 
1570 	assert3u(wake->wakerHostId, ==, ctid);
1571 
1572 	const ctid_t owner = (ctid_t)wake->ownerHostId;
1573 	const XrtHosted_Word_t id = wake->queueId;
1574 	const uint64_t epoch = wake->epoch;
1575 
1576 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES_SCHEDULER,
1577 	    MACH_EXCLAVES_SCHEDULER_WAKE) | DBG_FUNC_START, id, epoch, owner,
1578 	    ESYNC_WAKE_ONE);
1579 
1580 	kern_return_t kr = esync_wake(&esync_queue_ht, id, epoch,
1581 	    exclaves_get_queue_counter(id), ESYNC_WAKE_ONE_WITH_OWNER, owner);
1582 
1583 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES,
1584 	    MACH_EXCLAVES_SCHEDULER_WAKE) | DBG_FUNC_END,
1585 	    kr == KERN_SUCCESS ? THREAD_AWAKENED : THREAD_NOT_WAITING);
1586 
1587 	return KERN_SUCCESS;
1588 }
1589 
1590 static kern_return_t
handle_response_panic_wait(const XrtHosted_PanicWait_t * panic_wait)1591 handle_response_panic_wait(const XrtHosted_PanicWait_t *panic_wait)
1592 {
1593 	Exclaves_L4_Word_t panic_thread_scid = panic_wait->handler;
1594 	__assert_only thread_t thread = current_thread();
1595 
1596 	exclaves_debug_printf(show_progress,
1597 	    "exclaves: Scheduler: PanicWait: "
1598 	    "Panic thread SCID %lx\n",
1599 	    panic_thread_scid);
1600 
1601 	assert3u(panic_thread_scid, ==, thread->th_exclaves_scheduling_context_id);
1602 
1603 	exclaves_panic_thread_wait();
1604 
1605 	/* NOT REACHABLE */
1606 	return KERN_SUCCESS;
1607 }
1608 
1609 static kern_return_t
handle_response_suspended(const XrtHosted_Suspended_t * suspended)1610 handle_response_suspended(const XrtHosted_Suspended_t *suspended)
1611 {
1612 	Exclaves_L4_Word_t responding_scid = suspended->suspended;
1613 	__assert_only ctid_t ctid = thread_get_ctid(current_thread());
1614 
1615 	exclaves_debug_printf(show_progress,
1616 	    "exclaves: Scheduler: Suspended: "
1617 	    "scid 0x%lx epoch 0x%llx\n", responding_scid, suspended->epoch);
1618 	assert3u(suspended->suspendedHostId, ==, ctid);
1619 
1620 	const uint64_t id = suspended->suspended;
1621 	const uint64_t epoch = suspended->epoch;
1622 
1623 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES_SCHEDULER,
1624 	    MACH_EXCLAVES_SCHEDULER_SUSPENDED) | DBG_FUNC_START, id, epoch);
1625 
1626 	const wait_result_t wr = esync_wait(&esync_thread_ht, id, epoch,
1627 	    exclaves_get_thread_counter(id), 0, ESYNC_POLICY_KERNEL, THREAD_UNINT);
1628 
1629 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES_SCHEDULER,
1630 	    MACH_EXCLAVES_SCHEDULER_SUSPENDED) | DBG_FUNC_END, wr);
1631 
1632 	switch (wr) {
1633 	case THREAD_INTERRUPTED:
1634 		return KERN_ABORTED;
1635 
1636 	case THREAD_NOT_WAITING:
1637 	case THREAD_AWAKENED:
1638 		return KERN_SUCCESS;
1639 
1640 	default:
1641 		panic("Unexpected wait result from esync_wait: %d", wr);
1642 	}
1643 }
1644 
1645 static kern_return_t
handle_response_resumed(const XrtHosted_Resumed_t * resumed)1646 handle_response_resumed(const XrtHosted_Resumed_t *resumed)
1647 {
1648 	Exclaves_L4_Word_t responding_scid = resumed->thread;
1649 	__assert_only ctid_t ctid = thread_get_ctid(current_thread());
1650 
1651 	exclaves_debug_printf(show_progress,
1652 	    "exclaves: Scheduler: Resumed: scid 0x%lx resume of scid 0x%llx "
1653 	    "(ctid: 0x%llx), epoch 0x%llx\n", responding_scid, resumed->resumed,
1654 	    resumed->resumedHostId, resumed->epoch);
1655 	assert3u(resumed->threadHostId, ==, ctid);
1656 
1657 	const ctid_t target = (ctid_t)resumed->resumedHostId;
1658 	const XrtHosted_Word_t id = resumed->resumed;
1659 	const uint64_t epoch = resumed->epoch;
1660 
1661 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES_SCHEDULER,
1662 	    MACH_EXCLAVES_SCHEDULER_RESUMED) | DBG_FUNC_START, id, epoch,
1663 	    target);
1664 
1665 	kern_return_t kr = esync_wake(&esync_thread_ht, id, epoch,
1666 	    exclaves_get_thread_counter(id), ESYNC_WAKE_THREAD, target);
1667 
1668 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES_SCHEDULER,
1669 	    MACH_EXCLAVES_SCHEDULER_RESUMED) | DBG_FUNC_END,
1670 	    kr == KERN_SUCCESS ? THREAD_AWAKENED : THREAD_NOT_WAITING);
1671 
1672 	return KERN_SUCCESS;
1673 }
1674 
1675 static kern_return_t
handle_response_interrupted(const XrtHosted_Interrupted_t * interrupted)1676 handle_response_interrupted(const XrtHosted_Interrupted_t *interrupted)
1677 {
1678 	Exclaves_L4_Word_t responding_scid = interrupted->thread;
1679 	__assert_only ctid_t ctid = thread_get_ctid(current_thread());
1680 
1681 	exclaves_debug_printf(show_progress,
1682 	    "exclaves: Scheduler: Interrupted: "
1683 	    "scid 0x%lx interrupt on queue id 0x%llx, "
1684 	    "epoch 0x%llx, target 0x%llx\n", responding_scid,
1685 	    interrupted->queueId, interrupted->epoch,
1686 	    interrupted->interruptedHostId);
1687 	assert3u(interrupted->threadHostId, ==, ctid);
1688 
1689 	const ctid_t target = (ctid_t)interrupted->interruptedHostId;
1690 	const XrtHosted_Word_t id = interrupted->queueId;
1691 	const uint64_t epoch = interrupted->epoch;
1692 
1693 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES_SCHEDULER,
1694 	    MACH_EXCLAVES_SCHEDULER_INTERRUPTED) | DBG_FUNC_START, id, epoch,
1695 	    target);
1696 
1697 	kern_return_t kr = esync_wake(&esync_queue_ht, id, epoch,
1698 	    exclaves_get_queue_counter(id), ESYNC_WAKE_THREAD, target);
1699 
1700 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES,
1701 	    MACH_EXCLAVES_SCHEDULER_INTERRUPTED) | DBG_FUNC_END,
1702 	    kr == KERN_SUCCESS ? THREAD_AWAKENED : THREAD_NOT_WAITING);
1703 
1704 	return KERN_SUCCESS;
1705 }
1706 
1707 kern_return_t
exclaves_scheduler_resume_scheduling_context(Exclaves_L4_Word_t scid,Exclaves_L4_Word_t * spawned_scid,bool interrupted)1708 exclaves_scheduler_resume_scheduling_context(Exclaves_L4_Word_t scid,
1709     Exclaves_L4_Word_t *spawned_scid, bool interrupted)
1710 {
1711 	kern_return_t kr = KERN_SUCCESS;
1712 	thread_t thread = current_thread();
1713 	const ctid_t ctid = thread_get_ctid(thread);
1714 
1715 	exclaves_debug_printf(show_progress,
1716 	    "exclaves: Scheduler: Request to resume scid 0x%lx\n", scid);
1717 
1718 	XrtHosted_Response_t response = {};
1719 	const XrtHosted_Request_t request = interrupted ?
1720 	    XrtHosted_Request_InterruptWithHostIdMsg(
1721 		.thread = scid,
1722 		.hostId = ctid,
1723 		) :
1724 	    XrtHosted_Request_ResumeWithHostIdMsg(
1725 		.thread = scid,
1726 		.hostId = ctid,
1727 		);
1728 	kr = exclaves_scheduler_request(&request, &response);
1729 
1730 	if (kr) {
1731 		exclaves_debug_printf(show_errors, "exclaves: Enter failed\n");
1732 		return kr;
1733 	}
1734 
1735 	switch (response.tag) {
1736 	case XrtHosted_Response_Wait:
1737 		return handle_response_wait(&response.Wait);
1738 
1739 	case XrtHosted_Response_Wake:
1740 		return handle_response_wake(&response.Wake);
1741 
1742 	case XrtHosted_Response_Yield:
1743 		return handle_response_yield(scid, &response.Yield);
1744 
1745 	case XrtHosted_Response_Spawned:
1746 		return handle_response_spawned(scid, &response.Spawned, spawned_scid);
1747 
1748 	case XrtHosted_Response_Terminated:
1749 		return handle_response_terminated(&response.Terminated);
1750 
1751 	case XrtHosted_Response_WakeWithOwner:
1752 		return handle_response_wake_with_owner(&response.WakeWithOwner);
1753 
1754 	case XrtHosted_Response_PanicWait:
1755 		return handle_response_panic_wait(&response.PanicWait);
1756 
1757 	case XrtHosted_Response_Suspended:
1758 		return handle_response_suspended(&response.Suspended);
1759 
1760 	case XrtHosted_Response_Resumed:
1761 		return handle_response_resumed(&response.Resumed);
1762 
1763 	case XrtHosted_Response_Interrupted:
1764 		return handle_response_interrupted(&response.Interrupted);
1765 
1766 	case XrtHosted_Response_Invalid:
1767 	case XrtHosted_Response_Failure:
1768 	case XrtHosted_Response_Pong:
1769 	case XrtHosted_Response_SleepUntil:
1770 	case XrtHosted_Response_Awaken:
1771 	default:
1772 		exclaves_debug_printf(show_errors,
1773 		    "exclaves: Scheduler: Unexpected response: tag 0x%x\n",
1774 		    response.tag);
1775 		return KERN_FAILURE;
1776 	}
1777 }
1778 
1779 /* -------------------------------------------------------------------------- */
1780 
1781 #pragma mark exclaves xnu proxy communication
1782 static const char *
cmd_to_str(xnuproxy_cmd_t cmd)1783 cmd_to_str(xnuproxy_cmd_t cmd)
1784 {
1785 	switch (cmd) {
1786 	case XNUPROXY_CMD_UNDEFINED:           return "undefined";
1787 	case XNUPROXY_CMD_SETUP:               return "setup";
1788 	case XNUPROXY_CMD_CONTEXT_ALLOCATE:    return "allocate context";
1789 	case XNUPROXY_CMD_CONTEXT_FREE:        return "free context";
1790 	case XNUPROXY_CMD_NAMED_BUFFER_CREATE: return "named buffer create";
1791 	case XNUPROXY_CMD_NAMED_BUFFER_DELETE: return "named buffer delete";
1792 	case XNUPROXY_CMD_RESOURCE_INFO:       return "resource info";
1793 	case XNUPROXY_CMD_AUDIO_BUFFER_CREATE: return "audio buffer create";
1794 	case XNUPROXY_CMD_AUDIO_BUFFER_COPYOUT: return "audio buffer copyout";
1795 	case XNUPROXY_CMD_AUDIO_BUFFER_DELETE: return "audio buffer delete";
1796 	case XNUPROXY_CMD_SENSOR_START:        return "sensor start";
1797 	case XNUPROXY_CMD_SENSOR_STOP:         return "sensor stop";
1798 	case XNUPROXY_CMD_SENSOR_STATUS:       return "sensor status";
1799 	case XNUPROXY_CMD_DISPLAY_HEALTHCHECK_RATE: return "display healthcheck rate";
1800 	case XNUPROXY_CMD_NAMED_BUFFER_MAP:    return "named buffer map";
1801 	case XNUPROXY_CMD_NAMED_BUFFER_LAYOUT: return "named buffer layout";
1802 	case XNUPROXY_CMD_AUDIO_BUFFER_MAP:    return "audio buffer map";
1803 	case XNUPROXY_CMD_AUDIO_BUFFER_LAYOUT: return "audio buffer layout";
1804 	default:                               return "<unknown>";
1805 	}
1806 }
1807 #define exclaves_xnu_proxy_debug(flag, step, msg) \
1808 	exclaves_debug_printf(flag, \
1809 	    "exclaves: xnu proxy %s " #step ":\t" \
1810 	    "msg %p server_id 0x%lx cmd %u status %u\n", \
1811 	    cmd_to_str((msg)->cmd), (msg), (msg)->server_id, (msg)->cmd, \
1812 	    os_atomic_load(&(msg)->status, relaxed))
1813 #define exclaves_xnu_proxy_show_progress(step, msg) \
1814 	exclaves_xnu_proxy_debug(show_progress, step, msg)
1815 #define exclaves_xnu_proxy_show_error(msg) \
1816 	exclaves_xnu_proxy_debug(show_errors, failed, msg)
1817 #define exclaves_xnu_proxy_endpoint_call_show_progress(operation, step, \
1818 	    eid, scid, status) \
1819 	exclaves_debug_printf(show_progress, \
1820 	    "exclaves: xnu proxy endpoint " #operation " " #step ":\t" \
1821 	    "endpoint id %ld scid 0x%lx status %u\n", \
1822 	    (eid), (scid), (status))
1823 
1824 static xnuproxy_msg_t *exclaves_xnu_proxy_msg_buffer;
1825 static uint64_t exclaves_xnu_proxy_scid;
1826 
1827 extern kern_return_t exclaves_xnu_proxy_send(xnuproxy_msg_t *, Exclaves_L4_Word_t *);
1828 kern_return_t
exclaves_xnu_proxy_send(xnuproxy_msg_t * _msg,Exclaves_L4_Word_t * spawned)1829 exclaves_xnu_proxy_send(xnuproxy_msg_t *_msg, Exclaves_L4_Word_t *spawned)
1830 {
1831 	assert3p(_msg, !=, NULL);
1832 
1833 	if (exclaves_xnu_proxy_msg_buffer == NULL) {
1834 		return KERN_FAILURE;
1835 	}
1836 
1837 	kern_return_t kr = KERN_SUCCESS;
1838 	xnuproxy_msg_t *msg = exclaves_xnu_proxy_msg_buffer;
1839 	bool interrupted = false;
1840 
1841 	lck_mtx_lock(&exclaves_xnu_proxy_lock);
1842 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES, MACH_EXCLAVES_XNUPROXY)
1843 	    | DBG_FUNC_START, exclaves_xnu_proxy_scid, _msg->cmd);
1844 
1845 	*msg = *_msg;
1846 	msg->server_id = exclaves_xnu_proxy_scid;
1847 
1848 	os_atomic_store(&msg->status, XNUPROXY_MSG_STATUS_PROCESSING,
1849 	    release);
1850 
1851 	while (os_atomic_load(&msg->status, relaxed) ==
1852 	    XNUPROXY_MSG_STATUS_PROCESSING) {
1853 		exclaves_xnu_proxy_show_progress(in progress, msg);
1854 		kr = exclaves_scheduler_resume_scheduling_context(msg->server_id,
1855 		    spawned, interrupted);
1856 		assert(kr == KERN_SUCCESS || kr == KERN_ABORTED);
1857 
1858 		/* A wait was interrupted. */
1859 		interrupted = kr == KERN_ABORTED;
1860 	}
1861 	if (os_atomic_load(&msg->status, acquire) ==
1862 	    XNUPROXY_MSG_STATUS_NONE) {
1863 		exclaves_xnu_proxy_show_progress(complete, msg);
1864 	} else {
1865 		kr = KERN_FAILURE;
1866 		exclaves_xnu_proxy_show_error(msg);
1867 	}
1868 
1869 	*_msg = *msg;
1870 
1871 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES, MACH_EXCLAVES_XNUPROXY)
1872 	    | DBG_FUNC_END);
1873 	lck_mtx_unlock(&exclaves_xnu_proxy_lock);
1874 
1875 	return kr;
1876 }
1877 
1878 static kern_return_t
exclaves_xnu_proxy_init(uint64_t xnu_proxy_boot_info)1879 exclaves_xnu_proxy_init(uint64_t xnu_proxy_boot_info)
1880 {
1881 	kern_return_t kr = KERN_SUCCESS;
1882 	pmap_paddr_t msg_buffer_paddr = xnu_proxy_boot_info;
1883 
1884 	lck_mtx_assert(&exclaves_boot_lock, LCK_MTX_ASSERT_OWNED);
1885 
1886 	if (msg_buffer_paddr && pmap_valid_address(msg_buffer_paddr)) {
1887 		lck_mtx_lock(&exclaves_xnu_proxy_lock);
1888 		exclaves_xnu_proxy_msg_buffer =
1889 		    (xnuproxy_msg_t*)phystokv(msg_buffer_paddr);
1890 		exclaves_xnu_proxy_scid =
1891 		    exclaves_xnu_proxy_msg_buffer->server_id;
1892 		lck_mtx_unlock(&exclaves_xnu_proxy_lock);
1893 	} else {
1894 		exclaves_debug_printf(show_errors,
1895 		    "exclaves: %s: 0x%012llx\n",
1896 		    "Invalid xnu proxy boot info physical address",
1897 		    xnu_proxy_boot_info);
1898 		return KERN_FAILURE;
1899 	}
1900 
1901 	xnuproxy_msg_t msg = {
1902 		.cmd = XNUPROXY_CMD_SETUP,
1903 	};
1904 
1905 	kr = exclaves_xnu_proxy_send(&msg, NULL);
1906 	if (kr != KERN_SUCCESS) {
1907 		return kr;
1908 	}
1909 
1910 	if (msg.cmd_setup.response.version != XNUPROXY_MSG_VERSION) {
1911 		exclaves_debug_printf(show_errors,
1912 		    "exclaves: mismatched xnuproxy message version, "
1913 		    "xnuproxy: %u, xnu: %u  ", msg.cmd_setup.response.version,
1914 		    XNUPROXY_MSG_VERSION);
1915 		return KERN_FAILURE;
1916 	}
1917 
1918 	exclaves_debug_printf(show_progress,
1919 	    "exclaves: xnuproxy message version: 0x%u\n", XNUPROXY_MSG_VERSION);
1920 
1921 	kr = exclaves_panic_thread_setup();
1922 	if (kr != KERN_SUCCESS) {
1923 		exclaves_debug_printf(show_errors,
1924 		    "XNU proxy panic thread setup failed\n");
1925 		return KERN_FAILURE;
1926 	}
1927 
1928 	return KERN_SUCCESS;
1929 }
1930 
1931 static kern_return_t
exclaves_xnu_proxy_allocate_context(Exclaves_L4_Word_t * scid,Exclaves_L4_IpcBuffer_t ** ipcb)1932 exclaves_xnu_proxy_allocate_context(Exclaves_L4_Word_t *scid,
1933     Exclaves_L4_IpcBuffer_t **ipcb)
1934 {
1935 	kern_return_t kr = KERN_FAILURE;
1936 	Exclaves_L4_Word_t spawned_scid = 0;
1937 
1938 	xnuproxy_msg_t msg = {
1939 		.cmd = XNUPROXY_CMD_CONTEXT_ALLOCATE,
1940 	};
1941 
1942 	kr = exclaves_xnu_proxy_send(&msg, &spawned_scid);
1943 	if (kr != KERN_SUCCESS) {
1944 		return kr;
1945 	}
1946 
1947 	if (msg.cmd_ctx_alloc.response.ipc_paddr == 0) {
1948 		return KERN_NO_SPACE;
1949 	}
1950 
1951 	if (spawned_scid != 0) {
1952 		assert3u(msg.cmd_ctx_alloc.response.sched_id, ==, spawned_scid);
1953 	}
1954 
1955 	*scid = msg.cmd_ctx_alloc.response.sched_id;
1956 	*ipcb = (Exclaves_L4_IpcBuffer_t *)
1957 	    phystokv(msg.cmd_ctx_alloc.response.ipc_paddr);
1958 	os_atomic_inc(&exclaves_ipcb_cnt, relaxed);
1959 
1960 	return KERN_SUCCESS;
1961 }
1962 
1963 static kern_return_t
exclaves_xnu_proxy_free_context(Exclaves_L4_Word_t scid)1964 exclaves_xnu_proxy_free_context(Exclaves_L4_Word_t scid)
1965 {
1966 	kern_return_t kr = KERN_FAILURE;
1967 	xnuproxy_msg_t msg = {
1968 		.cmd = XNUPROXY_CMD_CONTEXT_FREE,
1969 		.cmd_ctx_free = (xnuproxy_cmd_ctx_free_t) {
1970 			.request.sched_id = scid,
1971 			.request.destroy = false,
1972 		},
1973 	};
1974 
1975 	kr = exclaves_xnu_proxy_send(&msg, NULL);
1976 	if (kr == KERN_SUCCESS) {
1977 		size_t orig_ipcb_cnt = os_atomic_dec_orig(&exclaves_ipcb_cnt, relaxed);
1978 		assert3u(orig_ipcb_cnt, >=, 1);
1979 		if (orig_ipcb_cnt == 0) { /* This is just to avoid unused variable warning */
1980 			kr = KERN_FAILURE;
1981 		}
1982 	}
1983 	return kr;
1984 }
1985 
1986 OS_NOINLINE
1987 static kern_return_t
exclaves_xnu_proxy_endpoint_call(Exclaves_L4_Word_t endpoint_id)1988 exclaves_xnu_proxy_endpoint_call(Exclaves_L4_Word_t endpoint_id)
1989 {
1990 	kern_return_t kr = KERN_SUCCESS;
1991 	thread_t thread = current_thread();
1992 	bool interrupted = false;
1993 
1994 	Exclaves_L4_Word_t scid = thread->th_exclaves_scheduling_context_id;
1995 	Exclaves_L4_IpcBuffer_t *ipcb = thread->th_exclaves_ipc_buffer;
1996 	xnuproxy_msg_status_t status =
1997 	    XNUPROXY_MSG_STATUS_PROCESSING;
1998 
1999 	XNUPROXY_CR_ENDPOINT_ID(ipcb) = endpoint_id;
2000 	XNUPROXY_CR_STATUS(ipcb) = status;
2001 
2002 	exclaves_xnu_proxy_endpoint_call_show_progress(call, entry,
2003 	    endpoint_id, scid, status);
2004 
2005 	assert((thread->th_exclaves_state &
2006 	    (TH_EXCLAVES_RPC | TH_EXCLAVES_UPCALL)) == 0);
2007 	thread->th_exclaves_state |= TH_EXCLAVES_RPC;
2008 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES, MACH_EXCLAVES_RPC)
2009 	    | DBG_FUNC_START, scid, endpoint_id);
2010 
2011 	while (1) {
2012 		kr = exclaves_scheduler_resume_scheduling_context(scid, NULL,
2013 		    interrupted);
2014 		assert(kr == KERN_SUCCESS || kr == KERN_ABORTED);
2015 
2016 		/* A wait was interrupted. */
2017 		interrupted = kr == KERN_ABORTED;
2018 
2019 		status = (xnuproxy_msg_status_t)
2020 		    XNUPROXY_CR_STATUS(ipcb);
2021 
2022 		switch (status) {
2023 		case XNUPROXY_MSG_STATUS_PROCESSING:
2024 			exclaves_xnu_proxy_endpoint_call_show_progress(call, yielded,
2025 			    endpoint_id, scid, status);
2026 			continue;
2027 
2028 		case XNUPROXY_MSG_STATUS_REPLY:
2029 			exclaves_xnu_proxy_endpoint_call_show_progress(call, returned,
2030 			    endpoint_id, scid, status);
2031 			kr = KERN_SUCCESS;
2032 			break;
2033 
2034 		case XNUPROXY_MSG_STATUS_UPCALL:
2035 			thread->th_exclaves_state |= TH_EXCLAVES_UPCALL;
2036 			endpoint_id = XNUPROXY_CR_ENDPOINT_ID(ipcb);
2037 			KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES, MACH_EXCLAVES_UPCALL)
2038 			    | DBG_FUNC_START, scid, endpoint_id);
2039 			exclaves_xnu_proxy_endpoint_call_show_progress(upcall, entry,
2040 			    endpoint_id, scid, status);
2041 			__asm__ volatile (
2042                                 "EXCLAVES_UPCALL_START: nop\n\t"
2043                         );
2044 			kr = exclaves_call_upcall_handler(endpoint_id);
2045 			__asm__ volatile (
2046                                 "EXCLAVES_UPCALL_END: nop\n\t"
2047                         );
2048 			XNUPROXY_CR_STATUS(ipcb) =
2049 			    XNUPROXY_MSG_STATUS_PROCESSING;
2050 			/* TODO: More state returned than Success or OperationInvalid? */
2051 			XNUPROXY_CR_RETVAL(ipcb) =
2052 			    (kr == KERN_SUCCESS) ? Exclaves_L4_Success :
2053 			    Exclaves_L4_ErrorOperationInvalid;
2054 			KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES, MACH_EXCLAVES_UPCALL)
2055 			    | DBG_FUNC_END);
2056 			thread->th_exclaves_state &= ~TH_EXCLAVES_UPCALL;
2057 			exclaves_xnu_proxy_endpoint_call_show_progress(upcall, returned,
2058 			    endpoint_id, scid,
2059 			    (unsigned int)XNUPROXY_CR_RETVAL(ipcb));
2060 			continue;
2061 
2062 		default:
2063 			// Should we have an assert(valid return) here?
2064 			exclaves_xnu_proxy_endpoint_call_show_progress(call, failed,
2065 			    endpoint_id, scid, status);
2066 			kr = KERN_FAILURE;
2067 			break;
2068 		}
2069 		break;
2070 	}
2071 
2072 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES, MACH_EXCLAVES_RPC)
2073 	    | DBG_FUNC_END);
2074 	thread->th_exclaves_state &= ~TH_EXCLAVES_RPC;
2075 
2076 	return kr;
2077 }
2078 
2079 static kern_return_t
exclaves_hosted_error(bool success,XrtHosted_Error_t * error)2080 exclaves_hosted_error(bool success, XrtHosted_Error_t *error)
2081 {
2082 	if (success) {
2083 		return KERN_SUCCESS;
2084 	} else {
2085 		exclaves_debug_printf(show_errors,
2086 		    "exclaves: XrtHosted: %s[%d] (%s): %s\n",
2087 		    error->file,
2088 		    error->line,
2089 		    error->function,
2090 		    error->expression
2091 		    );
2092 		return KERN_FAILURE;
2093 	}
2094 }
2095 
2096 kern_return_t
exclaves_ipc_buffer_cache_init(void)2097 exclaves_ipc_buffer_cache_init(void)
2098 {
2099 	kern_return_t kr = KERN_SUCCESS;
2100 	Exclaves_L4_IpcBuffer_t *ipcb = NULL;
2101 	Exclaves_L4_Word_t scid = 0;
2102 
2103 	LCK_MTX_ASSERT(&exclaves_boot_lock, LCK_MTX_ASSERT_OWNED);
2104 	assert(exclaves_ipc_buffer_cache == NULL);
2105 
2106 	if (exclaves_ipc_buffer_cache_enabled) {
2107 		if ((kr = exclaves_xnu_proxy_allocate_context(&scid, &ipcb))) {
2108 			return kr;
2109 		}
2110 
2111 		/* relinquish the new buffer into the cache */
2112 		exclaves_relinquish_ipc_buffer(ipcb, scid);
2113 	}
2114 	return kr;
2115 }
2116 
2117 exclaves_id_t
exclaves_endpoint_lookup(const char * name)2118 exclaves_endpoint_lookup(const char *name)
2119 {
2120 	exclaves_id_t id;
2121 
2122 	/*
2123 	 * First look-up in the kernel domain, fallback to the darwin domain if
2124 	 * not found.
2125 	 */
2126 	id = exclaves_service_lookup(EXCLAVES_DOMAIN_KERNEL, name);
2127 	if (id == UINT64_C(~0)) {
2128 		id = exclaves_service_lookup(EXCLAVES_DOMAIN_DARWIN, name);
2129 	}
2130 
2131 	assert3u(id, !=, UINT64_C(~0));
2132 
2133 	return id;
2134 }
2135 
2136 #endif /* CONFIG_EXCLAVES */
2137 
2138 #ifndef CONFIG_EXCLAVES
2139 /* stubs for sensor functions which are not compiled in from exclaves.c when
2140  * CONFIG_EXCLAVE is disabled */
2141 
2142 kern_return_t
exclaves_sensor_start(exclaves_sensor_type_t sensor_type,uint64_t flags,exclaves_sensor_status_t * status)2143 exclaves_sensor_start(exclaves_sensor_type_t sensor_type, uint64_t flags,
2144     exclaves_sensor_status_t *status)
2145 {
2146 #pragma unused(sensor_type, flags, status)
2147 	return KERN_NOT_SUPPORTED;
2148 }
2149 
2150 kern_return_t
exclaves_sensor_stop(exclaves_sensor_type_t sensor_type,uint64_t flags,exclaves_sensor_status_t * status)2151 exclaves_sensor_stop(exclaves_sensor_type_t sensor_type, uint64_t flags,
2152     exclaves_sensor_status_t *status)
2153 {
2154 #pragma unused(sensor_type, flags, status)
2155 	return KERN_NOT_SUPPORTED;
2156 }
2157 
2158 kern_return_t
exclaves_sensor_status(exclaves_sensor_type_t sensor_type,uint64_t flags,exclaves_sensor_status_t * status)2159 exclaves_sensor_status(exclaves_sensor_type_t sensor_type, uint64_t flags,
2160     exclaves_sensor_status_t *status)
2161 {
2162 #pragma unused(sensor_type, flags, status)
2163 	return KERN_NOT_SUPPORTED;
2164 }
2165 
2166 #endif /* ! CONFIG_EXCLAVES */
2167