xref: /xnu-11215.81.4/osfmk/kern/exclaves_xnuproxy.c (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1 /*
2  * Copyright (c) 2024 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #if CONFIG_EXCLAVES
30 
31 #include <stdint.h>
32 #include <mach/kern_return.h>
33 #include <kern/assert.h>
34 #include <kern/misc_protos.h>
35 #include <kern/locks.h>
36 #include <kern/thread.h>
37 #include <vm/pmap.h>
38 #include <mach/exclaves_l4.h>
39 
40 #include "exclaves_debug.h"
41 #include "exclaves_xnuproxy.h"
42 #include "exclaves_resource.h"
43 #include "exclaves_upcalls.h"
44 #include "exclaves_internal.h"
45 #include "exclaves_inspection.h"
46 
47 #include "kern/exclaves.tightbeam.h"
48 
49 #include <xnuproxy/messages.h>
50 
51 /* -------------------------------------------------------------------------- */
52 #pragma mark IPC bootstrap
53 
54 /* Lock protecting the use of the bootstrap scheduling context */
55 static LCK_MTX_DECLARE(exclaves_xnuproxy_lock, &exclaves_lck_grp);
56 
57 /*
58  * Bootstrap context. Used for context allocate/free. Initialized in
59  * exclaves_xnuproxy_init().
60  */
61 static exclaves_ctx_t exclaves_bootstrap_ctx = {};
62 
63 /*
64  * Switch the current thread to use the bootstrap context. Stash the old context
65  * into the supplied arguments.
66  * Returns with exclaves_xnuproxy_lock held.
67  */
68 static void
exclaves_bootstrap_context_acquire(exclaves_ctx_t * save_ctx)69 exclaves_bootstrap_context_acquire(exclaves_ctx_t *save_ctx)
70 {
71 	assert3p(exclaves_bootstrap_ctx.ipcb, !=, NULL);
72 	assert3u(save_ctx->scid, !=, exclaves_bootstrap_ctx.scid);
73 
74 	lck_mtx_lock(&exclaves_xnuproxy_lock);
75 
76 	thread_t thread = current_thread();
77 
78 	*save_ctx = thread->th_exclaves_ipc_ctx;
79 
80 	thread->th_exclaves_ipc_ctx = exclaves_bootstrap_ctx;
81 
82 	LCK_MTX_ASSERT(&exclaves_xnuproxy_lock, LCK_MTX_ASSERT_OWNED);
83 }
84 
85 /*
86  * Restore the scheduling context of the current thread.
87  * Returns with exclaves_xnuproxy_lock released.
88  */
89 static void
exclaves_bootstrap_context_release(const exclaves_ctx_t * restore_ctx)90 exclaves_bootstrap_context_release(const exclaves_ctx_t *restore_ctx)
91 {
92 	assert3u(restore_ctx->scid, !=, exclaves_bootstrap_ctx.scid);
93 
94 	LCK_MTX_ASSERT(&exclaves_xnuproxy_lock, LCK_MTX_ASSERT_OWNED);
95 
96 	thread_t thread = current_thread();
97 	assert3p(thread->th_exclaves_ipc_ctx.ipcb, ==, exclaves_bootstrap_ctx.ipcb);
98 
99 	/* Reset */
100 	thread->th_exclaves_ipc_ctx = *restore_ctx;
101 
102 	lck_mtx_unlock(&exclaves_xnuproxy_lock);
103 }
104 
105 /* -------------------------------------------------------------------------- */
106 #pragma mark IPC buffer count
107 
108 /*
109  * Number of allocated ipcb buffers. Estimates the number of active exclave
110  * threads.
111  */
112 static _Atomic size_t exclaves_ipcb_cnt;
113 
114 size_t
exclaves_ipc_buffer_count(void)115 exclaves_ipc_buffer_count(void)
116 {
117 	return os_atomic_load(&exclaves_ipcb_cnt, relaxed);
118 }
119 
120 static void
exclaves_ipc_buffer_count_inc(void)121 exclaves_ipc_buffer_count_inc(void)
122 {
123 	os_atomic_inc(&exclaves_ipcb_cnt, relaxed);
124 }
125 
126 static void
exclaves_ipc_buffer_count_dec(void)127 exclaves_ipc_buffer_count_dec(void)
128 {
129 	__assert_only size_t orig_ipcb_cnt =
130 	    os_atomic_dec_orig(&exclaves_ipcb_cnt, relaxed);
131 	assert3u(orig_ipcb_cnt, >=, 1);
132 }
133 
134 /* -------------------------------------------------------------------------- */
135 #pragma mark IPC buffer cache
136 
137 /*
138  * A (simple, for now...) cache of IPC buffers for communicating with XNU-Proxy.
139  * The cache itself is realtime safe and relies on a spin lock for
140  * synchronization. However, if there's no cached buffer available, the calling
141  * code will fallback to doing a full IPC buffer allocation with xnu-proxy. This
142  * involves taking a mutex and is not realtime safe.
143  */
144 
145 /*
146  * Determines the maximum size of the buffer cache. Can be overriden via an EDT
147  * entry or boot-arg.
148  */
149 TUNABLE_DEV_WRITEABLE(unsigned int, exclaves_ipc_buffer_cache_max,
150     "exclaves_ipcb_cache", 16);
151 
152 /* Current count of entries in the buffer cache. */
153 static unsigned int exclaves_ipc_buffer_cache_count = 0;
154 
155 /* Intrusive linked list within the unused IPC buffer */
156 typedef struct exclaves_ipc_buffer_cache_item {
157 	struct exclaves_ipc_buffer_cache_item *next;
158 	Exclaves_L4_Word_t scid;
159 }__attribute__((__packed__)) exclaves_ipc_buffer_cache_item_t;
160 
161 static_assert(Exclaves_L4_IpcBuffer_Size >=
162     sizeof(exclaves_ipc_buffer_cache_item_t),
163     "Invalid Exclaves_L4_IpcBuffer_Size");
164 
165 static LCK_SPIN_DECLARE(exclaves_ipc_buffer_cache_lock, &exclaves_lck_grp);
166 static exclaves_ipc_buffer_cache_item_t *exclaves_ipc_buffer_cache;
167 
168 static bool
exclaves_ipc_buffer_cache_alloc(exclaves_ctx_t * ctx)169 exclaves_ipc_buffer_cache_alloc(exclaves_ctx_t *ctx)
170 {
171 	lck_spin_lock(&exclaves_ipc_buffer_cache_lock);
172 
173 	if (exclaves_ipc_buffer_cache_count == 0) {
174 		lck_spin_unlock(&exclaves_ipc_buffer_cache_lock);
175 		return false;
176 	}
177 
178 	assert3p(exclaves_ipc_buffer_cache, !=, NULL);
179 
180 	exclaves_ipc_buffer_cache_item_t *cached_buffer = exclaves_ipc_buffer_cache;
181 	exclaves_ipc_buffer_cache = cached_buffer->next;
182 
183 	exclaves_ipc_buffer_cache_count--;
184 
185 	lck_spin_unlock(&exclaves_ipc_buffer_cache_lock);
186 
187 	ctx->ipcb = (void *)cached_buffer;
188 	ctx->scid = cached_buffer->scid;
189 	ctx->usecnt = 0;
190 
191 	/*
192 	 * Zero out this usage of the buffer to avoid any confusion in
193 	 * xnu-proxy.
194 	 */
195 	cached_buffer->next = NULL;
196 	cached_buffer->scid = 0;
197 
198 	return true;
199 }
200 
201 static bool
exclaves_ipc_buffer_cache_free(exclaves_ctx_t * ctx)202 exclaves_ipc_buffer_cache_free(exclaves_ctx_t *ctx)
203 {
204 	assert3u(ctx->scid, !=, exclaves_bootstrap_ctx.scid);
205 
206 	/* Zero out the IPC buffer to avoid having old IPC data lying around. */
207 	bzero(ctx->ipcb, Exclaves_L4_IpcBuffer_Size);
208 
209 	lck_spin_lock(&exclaves_ipc_buffer_cache_lock);
210 
211 #if 0 /* Removed with the fix for rdar://126257712 */
212 	/* Don't free into the cache if the cache has hit its limit. */
213 	if (exclaves_ipc_buffer_cache_count == exclaves_ipc_buffer_cache_max) {
214 		lck_spin_unlock(&exclaves_ipc_buffer_cache_lock);
215 		return false;
216 	}
217 #endif
218 
219 	exclaves_ipc_buffer_cache_item_t *cached_buffer = NULL;
220 
221 	cached_buffer = (void *)ctx->ipcb;
222 	cached_buffer->scid = ctx->scid;
223 
224 	ctx->ipcb = NULL;
225 	ctx->scid = 0;
226 	ctx->usecnt = 0;
227 
228 	cached_buffer->next = exclaves_ipc_buffer_cache;
229 	exclaves_ipc_buffer_cache = cached_buffer;
230 
231 	exclaves_ipc_buffer_cache_count++;
232 
233 	lck_spin_unlock(&exclaves_ipc_buffer_cache_lock);
234 
235 	return true;
236 }
237 
238 static kern_return_t
exclaves_ipc_buffer_cache_init(void)239 exclaves_ipc_buffer_cache_init(void)
240 {
241 	if (exclaves_ipc_buffer_cache_max == 0) {
242 		return KERN_SUCCESS;
243 	}
244 
245 	kern_return_t kr = KERN_FAILURE;
246 
247 	assert3p(exclaves_ipc_buffer_cache, ==, NULL);
248 
249 	exclaves_ctx_t *ctx = kalloc_type(exclaves_ctx_t,
250 	    exclaves_ipc_buffer_cache_max, Z_WAITOK | Z_ZERO | Z_NOFAIL);
251 
252 	/*
253 	 * Pre-warm the cache by allocating up to cache_max and then releasing
254 	 * the allocated contexts back into the cache.
255 	 */
256 	for (unsigned int i = 0; i < exclaves_ipc_buffer_cache_max; i++) {
257 		kr = exclaves_xnuproxy_ctx_alloc(&ctx[i]);
258 		if (kr != KERN_SUCCESS) {
259 			kfree_type(exclaves_ctx_t,
260 			    exclaves_ipc_buffer_cache_max, ctx);
261 			return kr;
262 		}
263 	}
264 
265 	/*
266 	 * Release the newly allocated contexts so they ends up in the cache. We
267 	 * know this will succeed because the only failure modes of
268 	 * exclaves_xnuproxy_ctx_free are if the downcall fails. The downcall
269 	 * won't be used here as we *know* that the buffer cache is active.
270 	 */
271 	for (unsigned int i = 0; i < exclaves_ipc_buffer_cache_max; i++) {
272 		kr = exclaves_xnuproxy_ctx_free(&ctx[i]);
273 		assert3u(kr, ==, KERN_SUCCESS);
274 	}
275 
276 	kfree_type(exclaves_ctx_t, exclaves_ipc_buffer_cache_max, ctx);
277 
278 	return KERN_SUCCESS;
279 }
280 
281 
282 /* -------------------------------------------------------------------------- */
283 #pragma mark xnu-proxy calls
284 
285 static xnuproxy_cmd_s xnuproxy_cmd_client = {0};
286 
287 kern_return_t
exclaves_xnuproxy_ctx_alloc(exclaves_ctx_t * ctx)288 exclaves_xnuproxy_ctx_alloc(exclaves_ctx_t *ctx)
289 {
290 	assert3p(ctx, !=, NULL);
291 
292 	/* Try to allocate it from the cache. */
293 	if (exclaves_ipc_buffer_cache_alloc(ctx)) {
294 		assert(ctx->usecnt == 0);
295 		return KERN_SUCCESS;
296 	}
297 
298 	/*
299 	 * Fallback to a full allocation with xnuproxy. This must be done in the
300 	 * context of the bootstrap scheduling context.
301 	 */
302 	exclaves_ctx_t stash_ctx = {};
303 	__block exclaves_ctx_t local_ctx = {};
304 
305 	exclaves_bootstrap_context_acquire(&stash_ctx);
306 
307 	/* This may spawn a new exclaves thread. */
308 	thread_exclaves_state_flags_t state = current_thread()->th_exclaves_state;
309 	current_thread()->th_exclaves_state |= TH_EXCLAVES_SPAWN_EXPECTED;
310 
311 	tb_error_t ret = xnuproxy_cmd_ipccontextallocate(&xnuproxy_cmd_client,
312 	    ^(xnuproxy_ipccontext_s c) {
313 		local_ctx.ipcb = (Exclaves_L4_IpcBuffer_t *)phystokv(c.buffer);
314 		local_ctx.scid = c.scid;
315 	});
316 
317 	/* Restore the old state (which itself may have set the SPAWN flag).  */
318 	current_thread()->th_exclaves_state = state;
319 
320 	exclaves_bootstrap_context_release(&stash_ctx);
321 
322 	if (ret != TB_ERROR_SUCCESS) {
323 		exclaves_debug_printf(show_errors,
324 		    "allocate context: failure %u\n", ret);
325 		return KERN_FAILURE;
326 	}
327 
328 	/* Update count. */
329 	exclaves_ipc_buffer_count_inc();
330 
331 	*ctx = local_ctx;
332 
333 	assert(ctx->usecnt == 0);
334 	return KERN_SUCCESS;
335 }
336 
337 kern_return_t
exclaves_xnuproxy_ctx_free(exclaves_ctx_t * ctx)338 exclaves_xnuproxy_ctx_free(exclaves_ctx_t *ctx)
339 {
340 	assert3p(ctx, !=, NULL);
341 
342 	/* exclaves_bootstrap_ctx.scid should never be freed. */
343 	if (ctx->scid == exclaves_bootstrap_ctx.scid) {
344 		return KERN_SUCCESS;
345 	}
346 
347 	assert(ctx->usecnt == 0);
348 	/* Try to free it back to the cache. */
349 	if (exclaves_ipc_buffer_cache_free(ctx)) {
350 		return KERN_SUCCESS;
351 	}
352 
353 	/*
354 	 * Fallback to a full free with xnuproxy. This must be done in the
355 	 * context of the bootstrap scheduling context.
356 	 */
357 	exclaves_ctx_t stash_ctx = {};
358 	__block exclaves_ctx_t local_ctx = *ctx;
359 
360 	exclaves_bootstrap_context_acquire(&stash_ctx);
361 
362 	xnuproxy_ipccontext_s c = {
363 		.scid = local_ctx.scid,
364 	};
365 
366 	tb_error_t ret = xnuproxy_cmd_ipccontextfree(&xnuproxy_cmd_client, &c);
367 
368 	exclaves_bootstrap_context_release(&stash_ctx);
369 
370 	if (ret != TB_ERROR_SUCCESS) {
371 		exclaves_debug_printf(show_errors,
372 		    "free context: failure %u\n", ret);
373 		return KERN_FAILURE;
374 	}
375 
376 	ctx->ipcb = NULL;
377 	ctx->scid = 0;
378 	ctx->usecnt = 0;
379 
380 	/* Update count. */
381 	exclaves_ipc_buffer_count_dec();
382 
383 	return KERN_SUCCESS;
384 }
385 
386 static size_t
countof_char_v(const char_v_s * cv)387 countof_char_v(const char_v_s *cv)
388 {
389 	assert3p(cv, !=, NULL);
390 
391 	__block size_t count = 0;
392 
393 	char__v_visit(cv,
394 	    ^( __unused size_t i, __unused const xnuproxy_char_s item) {
395 		count++;
396 	});
397 
398 	return count;
399 }
400 
401 static void
copy_char_v(const char_v_s * src,char * dst)402 copy_char_v(const char_v_s *src, char *dst)
403 {
404 	assert3p(src, !=, NULL);
405 	assert3p(dst, !=, NULL);
406 
407 	char__v_visit(src,
408 	    ^(size_t i, const xnuproxy_char_s item) {
409 		dst[i] = item;
410 	});
411 }
412 
413 /*
414  * Iterate over all the resources calling cb for each one.
415  */
416 kern_return_t
417 exclaves_xnuproxy_resource_info(void (^cb)(const char *name, const char *domain,
418     xnuproxy_resourcetype_s, uint64_t id, bool))
419 {
420 	/* BEGIN IGNORE CODESTYLE */
421 	tb_error_t ret = xnuproxy_cmd_resourceinfo(&xnuproxy_cmd_client,
422 	    ^(xnuproxy_resourceinfo_v_s ri) {
423 		xnuproxy_resourceinfo__v_visit(&ri,
424 		    ^(__unused size_t i, const xnuproxy_resourceinfo_s *item) {
425 			char name_copy[countof_char_v(&item->name)];
426 			copy_char_v(&item->name, name_copy);
427 
428 			char domain_copy[countof_char_v(&item->domain)];
429 			copy_char_v(&item->domain, domain_copy);
430 
431 			cb(name_copy, domain_copy,
432 			    (xnuproxy_resourcetype_s)item->type, item->id,
433 			    item->connected);
434 		});
435 	});
436 	/* END IGNORE CODESTYLE */
437 
438 	if (ret != TB_ERROR_SUCCESS) {
439 		exclaves_debug_printf(show_errors,
440 		    "resource info: failure %u\n", ret);
441 		return KERN_FAILURE;
442 	}
443 
444 	return KERN_SUCCESS;
445 }
446 
447 kern_return_t
exclaves_xnuproxy_pmm_usage(void)448 exclaves_xnuproxy_pmm_usage(void)
449 {
450 	tb_error_t ret = xnuproxy_cmd_pmmmemusage(&xnuproxy_cmd_client);
451 	if (ret != TB_ERROR_SUCCESS) {
452 		exclaves_debug_printf(show_errors,
453 		    "pmm usage: failure %u\n", ret);
454 		return KERN_FAILURE;
455 	}
456 
457 	return KERN_SUCCESS;
458 }
459 
460 /* -------------------------------------------------------------------------- */
461 #pragma mark exclaves xnu-proxy downcall
462 
463 #define exclaves_xnuproxy_endpoint_call_show_progress(operation, step, \
464 	    eid, scid, status) \
465 	exclaves_debug_printf(show_progress, \
466 	    "exclaves: xnu proxy endpoint " #operation " " #step ":\t" \
467 	    "endpoint id %ld scid 0x%lx status %u\n", \
468 	    (eid), (scid), (status))
469 OS_NOINLINE
470 kern_return_t
exclaves_xnuproxy_endpoint_call(Exclaves_L4_Word_t endpoint_id)471 exclaves_xnuproxy_endpoint_call(Exclaves_L4_Word_t endpoint_id)
472 {
473 	kern_return_t kr = KERN_SUCCESS;
474 	thread_t thread = current_thread();
475 	bool interrupted = false;
476 
477 	Exclaves_L4_Word_t scid = thread->th_exclaves_ipc_ctx.scid;
478 	Exclaves_L4_IpcBuffer_t *ipcb = thread->th_exclaves_ipc_ctx.ipcb;
479 	xnuproxy_msg_status_t status =
480 	    XNUPROXY_MSG_STATUS_PROCESSING;
481 
482 	XNUPROXY_CR_ENDPOINT_ID(ipcb) = endpoint_id;
483 	XNUPROXY_CR_STATUS(ipcb) = status;
484 
485 	exclaves_xnuproxy_endpoint_call_show_progress(call, entry,
486 	    endpoint_id, scid, status);
487 
488 	assert3u(thread->th_exclaves_state & TH_EXCLAVES_STATE_ANY, ==, 0);
489 	thread->th_exclaves_state |= TH_EXCLAVES_RPC;
490 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES, MACH_EXCLAVES_RPC)
491 	    | DBG_FUNC_START, scid, endpoint_id);
492 
493 	while (1) {
494 		kr = exclaves_run(thread, interrupted);
495 		assert(kr == KERN_SUCCESS || kr == KERN_ABORTED);
496 
497 		/* A wait was interrupted. */
498 		interrupted = kr == KERN_ABORTED;
499 
500 		status = (xnuproxy_msg_status_t)
501 		    XNUPROXY_CR_STATUS(ipcb);
502 
503 		switch (status) {
504 		case XNUPROXY_MSG_STATUS_PROCESSING:
505 			exclaves_xnuproxy_endpoint_call_show_progress(call, yielded,
506 			    endpoint_id, scid, status);
507 			continue;
508 
509 		case XNUPROXY_MSG_STATUS_REPLY:
510 			exclaves_xnuproxy_endpoint_call_show_progress(call, returned,
511 			    endpoint_id, scid, status);
512 			kr = KERN_SUCCESS;
513 			break;
514 
515 		case XNUPROXY_MSG_STATUS_UPCALL:
516 			thread->th_exclaves_state |= TH_EXCLAVES_UPCALL;
517 			endpoint_id = XNUPROXY_CR_ENDPOINT_ID(ipcb);
518 			KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES, MACH_EXCLAVES_UPCALL)
519 			    | DBG_FUNC_START, scid, endpoint_id);
520 			exclaves_xnuproxy_endpoint_call_show_progress(upcall, entry,
521 			    endpoint_id, scid, status);
522 			kr = exclaves_call_upcall_handler(endpoint_id);
523 			XNUPROXY_CR_STATUS(ipcb) =
524 			    XNUPROXY_MSG_STATUS_PROCESSING;
525 			/* TODO: More state returned than Success or OperationInvalid? */
526 			XNUPROXY_CR_RETVAL(ipcb) =
527 			    (kr == KERN_SUCCESS) ? Exclaves_L4_Success :
528 			    Exclaves_L4_ErrorOperationInvalid;
529 			KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES, MACH_EXCLAVES_UPCALL)
530 			    | DBG_FUNC_END);
531 			thread->th_exclaves_state &= ~TH_EXCLAVES_UPCALL;
532 			exclaves_xnuproxy_endpoint_call_show_progress(upcall, returned,
533 			    endpoint_id, scid,
534 			    (unsigned int)XNUPROXY_CR_RETVAL(ipcb));
535 			continue;
536 
537 		default:
538 			// Should we have an assert(valid return) here?
539 			exclaves_xnuproxy_endpoint_call_show_progress(call, failed,
540 			    endpoint_id, scid, status);
541 			kr = KERN_FAILURE;
542 			break;
543 		}
544 		break;
545 	}
546 
547 	KDBG_RELEASE(MACHDBG_CODE(DBG_MACH_EXCLAVES, MACH_EXCLAVES_RPC)
548 	    | DBG_FUNC_END);
549 	thread->th_exclaves_state &= ~TH_EXCLAVES_RPC;
550 
551 	/* This condition provides fast path and also ensures that collection
552 	 * thread will never block on AST (it does have only
553 	 * TH_EXCLAVES_INSPECTION_NOINSPECT flag).
554 	 *
555 	 * The th_exclaves_inspection_state condition below has to be done after
556 	 * cleanup of TH_EXCLAVES_RPC. Compiler must not reorder it.
557 	 * With opposite order, Stackshot could put the thread with RPC flag on collection
558 	 * list but the thread would be free to continue and to release its SCID.
559 	 */
560 	os_compiler_barrier();
561 	if ((os_atomic_load(&thread->th_exclaves_inspection_state,
562 	    relaxed) & ~TH_EXCLAVES_INSPECTION_NOINSPECT) != 0) {
563 		exclaves_inspection_check_ast();
564 	}
565 
566 	return kr;
567 }
568 
569 
570 /* -------------------------------------------------------------------------- */
571 #pragma mark exclaves xnu-proxy initialisation
572 
573 kern_return_t
exclaves_xnuproxy_init(uint64_t bootinfo_pa)574 exclaves_xnuproxy_init(uint64_t bootinfo_pa)
575 {
576 	assert3u(bootinfo_pa, !=, 0);
577 
578 	kern_return_t kr = KERN_FAILURE;
579 
580 	void *bootinfo_va = (void *)phystokv(bootinfo_pa);
581 	assert3p(bootinfo_va, !=, NULL);
582 	const size_t bootinfo_size =
583 	    xnuproxy_bootinfo__marshal_sizeof(&(xnuproxy_bootinfo_s){});
584 
585 	__block uint64_t endpoint = 0;
586 
587 	/* BEGIN IGNORE CODESTYLE */
588 	tb_error_t ret = xnuproxy_bootinfo__unmarshal(bootinfo_va,
589 	    bootinfo_size, ^(xnuproxy_bootinfo_s bootinfo) {
590 
591 		/* Do the version check. */
592 		if (bootinfo.version != XNUPROXY_VERSION_CURRENT) {
593 			exclaves_debug_printf(show_errors,
594 			    "exclaves: mismatched xnuproxy message version, "
595 			    "xnuproxy: %u, xnu: %u\n", bootinfo.version,
596 			    XNUPROXY_VERSION_CURRENT);
597 			return;
598 		}
599 
600 		exclaves_debug_printf(show_progress,
601 		    "exclaves: xnuproxy message version: 0x%u\n",
602 		    XNUPROXY_VERSION_CURRENT);
603 
604 
605 		if (!pmap_valid_address(bootinfo.buffer)) {
606 			exclaves_debug_printf(show_errors,
607 			    "exclaves: invalid bootstrap IPC buffer address: "
608 			    "0x%llx\n", bootinfo.buffer);
609 			return;
610 		}
611 
612 		exclaves_bootstrap_ctx.scid = bootinfo.scid;
613 		exclaves_bootstrap_ctx.ipcb =
614 		    (Exclaves_L4_IpcBuffer_t *)phystokv(bootinfo.buffer);
615 		assert3p(exclaves_bootstrap_ctx.ipcb, !=, NULL);
616 		exclaves_bootstrap_ctx.usecnt = 1;
617 
618 		endpoint = bootinfo.endpointid;
619 	});
620 
621 	/* END IGNORE CODESTYLE */
622 	if (ret != TB_ERROR_SUCCESS) {
623 		exclaves_debug_printf(show_errors,
624 		    "failed to unmarshal bootinfo\n");
625 		return KERN_FAILURE;
626 	}
627 
628 	/*
629 	 * Check to see if we bailed out of the unmarshal block early which
630 	 * would indicate a failure (for example the version check may have
631 	 * failed).
632 	 */
633 	if (endpoint == 0) {
634 		return KERN_FAILURE;
635 	}
636 
637 	if (exclaves_bootstrap_ctx.ipcb == NULL) {
638 		return KERN_FAILURE;
639 	}
640 
641 	/* BEGIN IGNORE CODESTYLE */
642 	tb_endpoint_t ep = tb_endpoint_create_with_value(
643 	    TB_TRANSPORT_TYPE_XNU, endpoint, TB_ENDPOINT_OPTIONS_NONE);
644 	/* END IGNORE CODESTYLE */
645 	ret = xnuproxy_cmd__init(&xnuproxy_cmd_client, ep);
646 	if (ret != TB_ERROR_SUCCESS) {
647 		exclaves_debug_printf(show_errors,
648 		    "failed to create xnuproxy endpoint\n");
649 		return KERN_FAILURE;
650 	}
651 	/* Downcalls to xnu-proxy now supported. */
652 
653 	kr = exclaves_ipc_buffer_cache_init();
654 	if (kr != KERN_SUCCESS) {
655 		return kr;
656 	}
657 
658 	return KERN_SUCCESS;
659 }
660 
661 #endif /* CONFIG_EXCLAVES */
662