xref: /xnu-10063.101.15/osfmk/kern/exclaves_resource.c (revision 94d3b452840153a99b38a3a9659680b2a006908e)
1 /*
2  * Copyright (c) 2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #if CONFIG_EXCLAVES
30 
31 #include <stdint.h>
32 
33 #include <mach/exclaves.h>
34 #include <mach/kern_return.h>
35 
36 #include <string.h>
37 
38 #include <kern/assert.h>
39 #include <kern/queue.h>
40 #include <kern/kalloc.h>
41 #include <kern/locks.h>
42 #include <kern/task.h>
43 #include <kern/thread_call.h>
44 
45 #include <vm/pmap.h>
46 
47 
48 #include <kern/ipc_kobject.h>
49 
50 #include <os/hash.h>
51 
52 #include <libxnuproxy/messages.h>
53 
54 #include <mach/mach_traps.h>
55 #include <mach/mach_port.h>
56 
57 #include <sys/event.h>
58 
59 #include "exclaves_resource.h"
60 #include "exclaves_shared_memory.h"
61 #include "exclaves_sensor.h"
62 #include "exclaves_conclave.h"
63 
64 /* Use the new version of xnuproxy_msg_t. */
65 #define xnuproxy_msg_t xnuproxy_msg_new_t
66 
67 static LCK_GRP_DECLARE(resource_lck_grp, "exclaves_resource");
68 
69 extern kern_return_t exclaves_xnu_proxy_send(xnuproxy_msg_t *, void *);
70 
71 /*
72  * Exclave Resources
73  *
74  * Exclaves provide a fixed static set of resources available to XNU. Some
75  * examples of types of resources:
76  *     - Conclave managers
77  *     - Services
78  *     - Named buffers
79  *     - Audio buffers
80  *     ...
81  *
82  * Each resource has a name, a type and a corresponding identifier which is
83  * shared between XNU and Exclaves. Resources are scoped by what entities are
84  * allowed to access them.
85  * Resources are discovered during boot and made available in a two-level table
86  * scheme. The root table collects resources by their scope, with the
87  * second-level tables listing the actual resources.
88  *
89  *
90  *           Root Table
91  * ┌────────────────────────────┐
92  * │ ┌────────────────────────┐ │
93  * │ │  "com.apple.kernel"    │─┼─────┐
94  * │ └────────────────────────┘ │     │
95  * │ ┌────────────────────────┐ │     │
96  * │ │"com.apple.conclave.a"  │─┼─┐   │
97  * │ └────────────────────────┘ │ │   │
98  * │ ┌────────────────────────┐ │ │   │
99  * │ │"com.apple.conclave.b"  │ │ │   │
100  * │ └────────────────────────┘ │ │   │
101  * │ ┌────────────────────────┐ │ │   │
102  * │ │ "com.apple.driver.a"   │ │ │   │
103  * │ └────────────────────────┘ │ │   │
104  * │  ...                       │ │   │
105  * │                            │ │   │
106  * └────────────────────────────┘ │   │
107  *      ┌─────────────────────────┘   │
108  *      │                             │
109  *      │   ┌─────────────────────────┘
110  *      │   │
111  *      │   │
112  *      │   │
113  *      │   └──▶  "com.apple.kernel"
114  *      │        ┌─────────────────────────────────────────────────────┐
115  *      │        │┌───────────────────────┬──────────────────┬────────┐│
116  *      │        ││"com.apple.conclave.a" │ CONCLAVE_MANAGER │ 0x1234 ││
117  *      │        │└───────────────────────┴──────────────────┴────────┘│
118  *      │        │┌───────────────────────┬──────────────────┬────────┐│
119  *      │        ││"com.apple.conclave.b" │ CONCLAVE_MANAGER │ 0x7654 ││
120  *      │        │└───────────────────────┴──────────────────┴────────┘│
121  *      │        │                                                     │
122  *      │        │  ...                                                │
123  *      │        └─────────────────────────────────────────────────────┘
124  *      │
125  *      └─────▶   "com.apple.conclave.a"
126  *               ┌─────────────────────────────────────────────────────┐
127  *               │┌───────────────────────┬──────────────────┬────────┐│
128  *               ││      "audio_buf"      │   AUDIO_BUFFER   │ 0x9999 ││
129  *               │└───────────────────────┴──────────────────┴────────┘│
130  *               │┌───────────────────────┬──────────────────┬────────┐│
131  *               ││      "service_x"      │     SERVICE      │ 0x1111 ││
132  *               │└───────────────────────┴──────────────────┴────────┘│
133  *               │┌───────────────────────┬──────────────────┬────────┐│
134  *               ││   "named_buffer_x"    │   NAMED_BUFFER   │0x66565 ││
135  *               │└───────────────────────┴──────────────────┴────────┘│
136  *               │  ...                                                │
137  *               └─────────────────────────────────────────────────────┘
138  *
139  *                 ...
140  *
141  *
142  * Resources can be looked up by first finding the root table entry (the
143  * "domain") and then searching for the identifier in that domain.
144  * For example to lookup the conclave manager ID for "com.apple.conclave.a",
145  * the "com.apple.kernel" domain would be found and then within that domain, the
146  * search would continue using the conclave name and the CONCLAVE_MANAGER type.
147  * Every conclave domain has a corresponding CONCLAVE_MANAGER resource in the
148  * "com.apple.kernel" domain.
149  */
150 
151 /* -------------------------------------------------------------------------- */
152 #pragma mark Hash Table
153 
154 #define TABLE_LEN 64
155 
156 /*
157  * A table item is what ends up being stored in the hash table. It has a key and
158  * a value.
159  */
160 typedef struct {
161 	const void    *i_key;
162 	size_t         i_key_len;
163 	void          *i_value;
164 
165 	queue_chain_t  i_chain;
166 } table_item_t;
167 
168 /*
169  * The hash table consists of an array of buckets (queues). The hashing function
170  * will choose in which bucket a particular item belongs.
171  */
172 typedef struct {
173 	queue_head_t *t_buckets;
174 	size_t        t_buckets_count;
175 } table_t;
176 
177 /*
178  * Given a key, return the corresponding bucket.
179  */
180 static queue_head_t *
get_bucket(table_t * table,const void * key,size_t key_len)181 get_bucket(table_t *table, const void *key, size_t key_len)
182 {
183 	const uint32_t idx = os_hash_jenkins(key, key_len) &
184 	    (table->t_buckets_count - 1);
185 	return &table->t_buckets[idx];
186 }
187 
188 /*
189  * Insert a new table item associated with 'key' into a table.
190  */
191 static void
table_put(table_t * table,const void * key,size_t key_len,table_item_t * item)192 table_put(table_t *table, const void *key, size_t key_len, table_item_t *item)
193 {
194 	assert3p(item->i_chain.next, ==, NULL);
195 	assert3p(item->i_chain.prev, ==, NULL);
196 	assert3p(item->i_value, !=, NULL);
197 
198 	queue_head_t *head = get_bucket(table, key, key_len);
199 	enqueue(head, &item->i_chain);
200 }
201 
202 /*
203  * Iterate through all items matching 'key' calling cb for each.
204  */
205 static void
206 table_get(table_t *table, const void *key, size_t key_len, bool (^cb)(void *))
207 {
208 	const queue_head_t *head = get_bucket(table, key, key_len);
209 	table_item_t *elem = NULL;
210 
211 	assert3p(head, !=, NULL);
212 
qe_foreach_element(elem,head,i_chain)213 	qe_foreach_element(elem, head, i_chain) {
214 		if (elem->i_key_len == key_len &&
215 		    memcmp(elem->i_key, key, elem->i_key_len) == 0) {
216 			if (cb(elem->i_value)) {
217 				return;
218 			}
219 		}
220 	}
221 
222 	return;
223 }
224 
225 /*
226  * Initialize the queues.
227  */
228 static void
table_init(table_t * table)229 table_init(table_t *table)
230 {
231 	assert3u(table->t_buckets_count & (table->t_buckets_count - 1), ==, 0);
232 
233 	/* Initialise each bucket. */
234 	for (size_t i = 0; i < table->t_buckets_count; i++) {
235 		queue_init(&table->t_buckets[i]);
236 	}
237 }
238 
239 /*
240  * Allocate a new table with the specified number of buckets.
241  */
242 static table_t *
table_alloc(size_t nbuckets)243 table_alloc(size_t nbuckets)
244 {
245 	assert3u(nbuckets, >, 0);
246 	assert3u(nbuckets & (nbuckets - 1), ==, 0);
247 
248 	table_t *table = kalloc_type(table_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
249 
250 	table->t_buckets_count = nbuckets;
251 	table->t_buckets = kalloc_type(queue_head_t, nbuckets,
252 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
253 
254 	return table;
255 }
256 
257 
258 /* -------------------------------------------------------------------------- */
259 #pragma mark Root Table
260 
261 /*
262  * The root table is a hash table which contains an entry for every top-level
263  * domain.
264  * Domains scope resources. For example a conclave domain will contain a list of
265  * services available in that conclave. The kernel itself gets its own domain
266  * which holds conclave managers and other resources the kernel communicates
267  * with directly.
268  */
269 table_t root_table = {
270 	.t_buckets = (queue_chain_t *)(queue_chain_t[TABLE_LEN]){},
271 	.t_buckets_count = TABLE_LEN,
272 };
273 
274 /*
275  * Entries in the root table. Each itself a table containing resources available
276  * in that domain.
277  */
278 typedef struct {
279 	char     d_name[XNUPROXY_RESOURCE_NAME_MAX];
280 	table_t *d_table_name;
281 	table_t *d_table_id;
282 } exclaves_resource_domain_t;
283 
284 static exclaves_resource_domain_t *
lookup_domain(const char * domain_name)285 lookup_domain(const char *domain_name)
286 {
287 	__block exclaves_resource_domain_t *domain = NULL;
288 	table_get(&root_table, domain_name, strlen(domain_name), ^bool (void *data) {
289 		domain = data;
290 		return true;
291 	});
292 
293 	return domain;
294 }
295 
296 static exclaves_resource_t *
lookup_resource_by_name(exclaves_resource_domain_t * domain,const char * name,xnuproxy_resource_t type)297 lookup_resource_by_name(exclaves_resource_domain_t *domain, const char *name,
298     xnuproxy_resource_t type)
299 {
300 	__block exclaves_resource_t *resource = NULL;
301 	table_get(domain->d_table_name, name, strlen(name), ^bool (void *data) {
302 		exclaves_resource_t *tmp = data;
303 		if (tmp->r_type == type) {
304 		        resource = data;
305 		        return true;
306 		}
307 		return false;
308 	});
309 
310 	return resource;
311 }
312 
313 static exclaves_resource_t *
lookup_resource_by_id(exclaves_resource_domain_t * domain,uint64_t id,xnuproxy_resource_t type)314 lookup_resource_by_id(exclaves_resource_domain_t *domain, uint64_t id,
315     xnuproxy_resource_t type)
316 {
317 	__block exclaves_resource_t *resource = NULL;
318 	table_get(domain->d_table_id, &id, sizeof(id), ^bool (void *data) {
319 		exclaves_resource_t *tmp = data;
320 		if (tmp->r_type == type) {
321 		        resource = data;
322 		        return true;
323 		}
324 		return false;
325 	});
326 
327 	return resource;
328 }
329 
330 static exclaves_resource_domain_t *
exclaves_resource_domain_alloc(const char * scope)331 exclaves_resource_domain_alloc(const char *scope)
332 {
333 	assert3u(strlen(scope), >, 0);
334 	assert3u(strlen(scope), <=, XNUPROXY_RESOURCE_NAME_MAX);
335 
336 	exclaves_resource_domain_t *domain = kalloc_type(
337 		exclaves_resource_domain_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
338 	(void) strlcpy(domain->d_name, scope,
339 	    sizeof(domain->d_name));
340 
341 	domain->d_table_name = table_alloc(TABLE_LEN);
342 	table_init(domain->d_table_name);
343 
344 	domain->d_table_id = table_alloc(TABLE_LEN);
345 	table_init(domain->d_table_id);
346 
347 	table_item_t *item = kalloc_type(table_item_t,
348 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
349 	item->i_key = domain->d_name;
350 	item->i_key_len = strlen(domain->d_name);
351 	item->i_value = domain;
352 
353 	table_put(&root_table, scope, strlen(scope), item);
354 
355 	return domain;
356 }
357 
358 static exclaves_resource_t *
exclaves_resource_alloc(xnuproxy_resource_t type,const char * name,uint64_t id,exclaves_resource_domain_t * domain)359 exclaves_resource_alloc(xnuproxy_resource_t type, const char *name, uint64_t id,
360     exclaves_resource_domain_t *domain)
361 {
362 	exclaves_resource_t *resource = kalloc_type(exclaves_resource_t,
363 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
364 
365 	resource->r_type = type;
366 	resource->r_id = id;
367 	resource->r_active = false;
368 	os_atomic_store(&resource->r_usecnt, 0, relaxed);
369 
370 	/*
371 	 * Each resource has an associated kobject of type
372 	 * IKOT_EXCLAVES_RESOURCE.
373 	 */
374 	ipc_port_t port = ipc_kobject_alloc_port((ipc_kobject_t)resource,
375 	    IKOT_EXCLAVES_RESOURCE, IPC_KOBJECT_ALLOC_NSREQUEST);
376 	resource->r_port = port;
377 
378 	lck_mtx_init(&resource->r_mutex, &resource_lck_grp, NULL);
379 
380 	(void) strlcpy(resource->r_name, name, sizeof(resource->r_name));
381 
382 
383 	/* Stick the newly created resource into the name table. */
384 	table_item_t *name_item = kalloc_type(table_item_t,
385 	    Z_WAITOK | Z_ZERO | Z_NOFAIL);
386 
387 	name_item->i_key = resource->r_name;
388 	name_item->i_key_len = strlen(resource->r_name);
389 	name_item->i_value = resource;
390 
391 	assert(lookup_resource_by_name(domain, name, type) == NULL);
392 	table_put(domain->d_table_name, name, strlen(name), name_item);
393 
394 	/*
395 	 * Some types also need to lookup by id in addition to looking up by
396 	 * name.
397 	 */
398 	switch (type) {
399 	case XNUPROXY_RESOURCE_NOTIFICATION: {
400 		/* Stick the newly created resource into the ID table. */
401 		table_item_t *id_item = kalloc_type(table_item_t,
402 		    Z_WAITOK | Z_ZERO | Z_NOFAIL);
403 		id_item->i_key = &resource->r_id;
404 		id_item->i_key_len = sizeof(resource->r_id);
405 		id_item->i_value = resource;
406 
407 		assert(lookup_resource_by_id(domain, id, type) == NULL);
408 		table_put(domain->d_table_id, &id, sizeof(id), id_item);
409 		break;
410 	}
411 
412 	default:
413 		break;
414 	}
415 
416 	return resource;
417 }
418 
419 /* -------------------------------------------------------------------------- */
420 #pragma mark Exclaves Resources
421 
422 static void exclaves_resource_no_senders(ipc_port_t port,
423     mach_port_mscount_t mscount);
424 
425 IPC_KOBJECT_DEFINE(IKOT_EXCLAVES_RESOURCE,
426     .iko_op_stable = true,
427     .iko_op_no_senders = exclaves_resource_no_senders);
428 
429 static void exclaves_conclave_init(exclaves_resource_t *resource);
430 static void exclaves_notification_init(exclaves_resource_t *resource);
431 static void exclaves_named_buffer_unmap(exclaves_resource_t *resource);
432 static void exclaves_audio_buffer_delete(exclaves_resource_t *resource);
433 static void exclaves_resource_sensor_reset(exclaves_resource_t *resource);
434 static void exclaves_resource_shared_memory_unmap(exclaves_resource_t *resource);
435 static void exclaves_resource_audio_memory_unmap(exclaves_resource_t *resource);
436 
437 /*
438  * Discover all the static exclaves resources populating the resource tables as
439  * we go.
440  */
441 kern_return_t
exclaves_resource_init(void)442 exclaves_resource_init(void)
443 {
444 	/* Initialize the root table. */
445 	table_init(&root_table);
446 
447 	for (uint32_t i = 0;; i++) {
448 		/* Get info about the 'i'th resource. */
449 		xnuproxy_msg_t msg = {
450 			.cmd = XNUPROXY_CMD_RESOURCE_INFO,
451 			.cmd_resource_info = (xnuproxy_cmd_resource_info_t) {
452 				.request.index = i,
453 			},
454 		};
455 
456 		kern_return_t kr = exclaves_xnu_proxy_send(&msg, NULL);
457 		if (kr != KERN_SUCCESS) {
458 			return kr;
459 		}
460 
461 		/*
462 		 * An empty name indicates there are no resources left to
463 		 * enumerate.
464 		 */
465 		if (msg.cmd_resource_info.response.name[0] == '\0') {
466 			break;
467 		}
468 
469 		xnuproxy_resource_t type = msg.cmd_resource_info.response.type;
470 		const char *name =
471 		    (const char *)&msg.cmd_resource_info.response.name;
472 		const uint64_t id = msg.cmd_resource_info.response.id;
473 		const char *scope =
474 		    (const char *)&msg.cmd_resource_info.response.domain;
475 
476 		/*
477 		 * Every resource is scoped to a specific domain, find the
478 		 * domain (or create one if it doesn't exist).
479 		 */
480 		exclaves_resource_domain_t *domain = lookup_domain(scope);
481 		if (domain == NULL) {
482 			domain = exclaves_resource_domain_alloc(scope);
483 		}
484 
485 		/* Allocate a new resource in the domain. */
486 		exclaves_resource_t *resource = exclaves_resource_alloc(type,
487 		    name, id, domain);
488 
489 		/*
490 		 * Type specific initialization.
491 		 */
492 		switch (type) {
493 		case XNUPROXY_RESOURCE_CONCLAVE_MANAGER:
494 			exclaves_conclave_init(resource);
495 			break;
496 
497 		case XNUPROXY_RESOURCE_NOTIFICATION:
498 			exclaves_notification_init(resource);
499 			break;
500 
501 		default:
502 			break;
503 		}
504 	}
505 
506 	return KERN_SUCCESS;
507 }
508 
509 exclaves_resource_t *
exclaves_resource_lookup_by_name(const char * domain_name,const char * name,xnuproxy_resource_t type)510 exclaves_resource_lookup_by_name(const char *domain_name, const char *name,
511     xnuproxy_resource_t type)
512 {
513 	assert3u(strlen(domain_name), >, 0);
514 	assert3u(strlen(name), >, 0);
515 
516 	exclaves_resource_domain_t *domain = lookup_domain(domain_name);
517 	if (domain == NULL) {
518 		return NULL;
519 	}
520 
521 	return lookup_resource_by_name(domain, name, type);
522 }
523 
524 static exclaves_resource_t *
exclaves_resource_lookup_by_id(const char * domain_name,uint64_t id,xnuproxy_resource_t type)525 exclaves_resource_lookup_by_id(const char *domain_name, uint64_t id,
526     xnuproxy_resource_t type)
527 {
528 	assert3u(strlen(domain_name), >, 0);
529 
530 	exclaves_resource_domain_t *domain = lookup_domain(domain_name);
531 	if (domain == NULL) {
532 		return NULL;
533 	}
534 
535 	return lookup_resource_by_id(domain, id, type);
536 }
537 
538 const char *
exclaves_resource_name(const exclaves_resource_t * resource)539 exclaves_resource_name(const exclaves_resource_t *resource)
540 {
541 	return resource->r_name;
542 }
543 
544 /*
545  * Notes on use-count management
546  * For the most part everything is done under the resource lock.
547  * In some cases, it's necessary to grab/release a use count without
548  * holding the lock - for example the realtime audio paths doing copyin/copyout
549  * of named buffers/audio buffers.
550  * To prevent against races, initialization/de-initialization should always
551  * recheck the use-count under the lock.
552  */
553 uint32_t
exclaves_resource_retain(exclaves_resource_t * resource)554 exclaves_resource_retain(exclaves_resource_t *resource)
555 {
556 	uint32_t orig =
557 	    os_atomic_inc_orig(&resource->r_usecnt, relaxed);
558 	assert3u(orig, <, UINT32_MAX);
559 
560 	return orig;
561 }
562 
563 void
exclaves_resource_release(exclaves_resource_t * resource)564 exclaves_resource_release(exclaves_resource_t *resource)
565 {
566 	/*
567 	 * Drop the use count without holding the lock (this path may be called
568 	 * by RT threads and should be RT-safe).
569 	 */
570 	uint32_t orig = os_atomic_dec_orig(&resource->r_usecnt, relaxed);
571 	assert3u(orig, !=, 0);
572 	if (orig != 1) {
573 		return;
574 	}
575 
576 	/*
577 	 * Now grab the lock. The RT-safe paths calling this function shouldn't
578 	 * end up here unless there's a bug or mis-behaving user code (like
579 	 * deallocating an in-use mach port).
580 	 */
581 	lck_mtx_lock(&resource->r_mutex);
582 
583 	/*
584 	 * Re-check the use count - as a second user of the resource
585 	 * may have snuck in in the meantime.
586 	 */
587 	if (os_atomic_load(&resource->r_usecnt, relaxed) > 0) {
588 		lck_mtx_unlock(&resource->r_mutex);
589 		return;
590 	}
591 
592 	switch (resource->r_type) {
593 	case XNUPROXY_RESOURCE_NAMED_BUFFER:
594 		exclaves_named_buffer_unmap(resource);
595 		break;
596 
597 	case XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER:
598 		exclaves_audio_buffer_delete(resource);
599 		break;
600 
601 	case XNUPROXY_RESOURCE_SENSOR:
602 		exclaves_resource_sensor_reset(resource);
603 		break;
604 
605 	case XNUPROXY_RESOURCE_SHARED_MEMORY:
606 		exclaves_resource_shared_memory_unmap(resource);
607 		break;
608 
609 	case XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY:
610 		exclaves_resource_audio_memory_unmap(resource);
611 		break;
612 
613 	default:
614 		break;
615 	}
616 
617 	lck_mtx_unlock(&resource->r_mutex);
618 }
619 
620 kern_return_t
exclaves_resource_from_port_name(ipc_space_t space,mach_port_name_t name,exclaves_resource_t ** out)621 exclaves_resource_from_port_name(ipc_space_t space, mach_port_name_t name,
622     exclaves_resource_t **out)
623 {
624 	kern_return_t kr = KERN_SUCCESS;
625 	ipc_port_t port = IPC_PORT_NULL;
626 
627 	if (!MACH_PORT_VALID(name)) {
628 		return KERN_INVALID_NAME;
629 	}
630 
631 	kr = ipc_port_translate_send(space, name, &port);
632 	if (kr != KERN_SUCCESS) {
633 		return kr;
634 	}
635 
636 	/* port is locked */
637 	assert(IP_VALID(port));
638 
639 	exclaves_resource_t *resource = ipc_kobject_get_stable(port,
640 	    IKOT_EXCLAVES_RESOURCE);
641 
642 	/* The port is valid, but doesn't denote an exclaves resource. */
643 	if (resource == NULL) {
644 		ip_mq_unlock(port);
645 		return KERN_INVALID_CAPABILITY;
646 	}
647 
648 	/* Grab a reference while the port is good and the ipc lock is held. */
649 	__assert_only uint32_t orig = exclaves_resource_retain(resource);
650 	assert3u(orig, >, 0);
651 
652 	ip_mq_unlock(port);
653 	*out = resource;
654 
655 	return KERN_SUCCESS;
656 }
657 
658 /*
659  * Consumes a reference to the resource. On success the resource is reference is
660  * associated with the lifetime of the port.
661  */
662 kern_return_t
exclaves_resource_create_port_name(exclaves_resource_t * resource,ipc_space_t space,mach_port_name_t * name)663 exclaves_resource_create_port_name(exclaves_resource_t *resource, ipc_space_t space,
664     mach_port_name_t *name)
665 {
666 	assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
667 
668 	ipc_port_t port = resource->r_port;
669 
670 	ip_mq_lock(port);
671 
672 	/* Create an armed send right. */
673 	kern_return_t ret = ipc_kobject_make_send_nsrequest_locked(port,
674 	    resource, IKOT_EXCLAVES_RESOURCE);
675 	if (ret != KERN_SUCCESS &&
676 	    ret != KERN_ALREADY_WAITING) {
677 		ip_mq_unlock(port);
678 		exclaves_resource_release(resource);
679 		return ret;
680 	}
681 
682 	/*
683 	 * If there was already a send right, then the port already has an
684 	 * associated use count so drop this one.
685 	 */
686 	if (port->ip_srights > 1) {
687 		assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 1);
688 		exclaves_resource_release(resource);
689 	}
690 
691 	ip_mq_unlock(port);
692 
693 	*name = ipc_port_copyout_send(port, space);
694 	if (!MACH_PORT_VALID(*name)) {
695 		/*
696 		 * ipc_port_copyout_send() releases the send right on failure
697 		 * (possibly calling exclaves_resource_no_senders() in the
698 		 * process).
699 		 */
700 		return KERN_RESOURCE_SHORTAGE;
701 	}
702 
703 	return KERN_SUCCESS;
704 }
705 
706 static void
exclaves_resource_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)707 exclaves_resource_no_senders(ipc_port_t port,
708     __unused mach_port_mscount_t mscount)
709 {
710 	exclaves_resource_t *resource = ipc_kobject_get_stable(port,
711 	    IKOT_EXCLAVES_RESOURCE);
712 
713 	exclaves_resource_release(resource);
714 }
715 
716 /* -------------------------------------------------------------------------- */
717 #pragma mark Named Buffers
718 
719 int
720 exclaves_named_buffer_io(exclaves_resource_t *resource, off_t offset,
721     size_t len, int (^cb)(char *, size_t))
722 {
723 	assert(resource->r_type == XNUPROXY_RESOURCE_NAMED_BUFFER ||
724 	    resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER);
725 	assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
726 
727 	named_buffer_resource_t *nb = &resource->r_named_buffer;
728 	assert3u(nb->nb_nranges, >, 0);
729 	assert3u(nb->nb_size, !=, 0);
730 	assert3u(offset + len, <=, nb->nb_size);
731 
732 	for (int i = 0; i < nb->nb_nranges; i++) {
733 		/* Skip forward to the starting range. */
734 		if (offset >= nb->nb_range[i].npages * PAGE_SIZE) {
735 			offset -= nb->nb_range[i].npages * PAGE_SIZE;
736 			continue;
737 		}
738 
739 		size_t size = MIN((nb->nb_range[i].npages * PAGE_SIZE) - offset, len);
740 		int ret = cb(nb->nb_range[i].address + offset, size);
741 		if (ret != 0) {
742 			return ret;
743 		}
744 
745 		offset = 0;
746 		len -= size;
747 
748 		if (len == 0) {
749 			break;
750 		}
751 	}
752 	assert3u(len, ==, 0);
753 
754 	return 0;
755 }
756 
757 static kern_return_t
exclaves_named_buffer_io_copyin(exclaves_resource_t * resource,user_addr_t _src,off_t offset,size_t len)758 exclaves_named_buffer_io_copyin(exclaves_resource_t *resource,
759     user_addr_t _src, off_t offset, size_t len)
760 {
761 	assert3u(resource->r_named_buffer.nb_perm & EXCLAVES_BUFFER_PERM_WRITE,
762 	    !=, 0);
763 
764 	__block user_addr_t src = _src;
765 	return exclaves_named_buffer_io(resource, offset, len,
766 	           ^(char *buffer, size_t size) {
767 		if (copyin(src, buffer, size) != 0) {
768 		        return KERN_FAILURE;
769 		}
770 
771 		src += size;
772 		return KERN_SUCCESS;
773 	});
774 }
775 
776 kern_return_t
exclaves_named_buffer_copyin(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2)777 exclaves_named_buffer_copyin(exclaves_resource_t *resource,
778     user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
779     mach_vm_size_t size2, mach_vm_size_t offset2)
780 {
781 	assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
782 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_NAMED_BUFFER);
783 
784 	mach_vm_size_t umax = 0;
785 	kern_return_t kr = KERN_FAILURE;
786 
787 	if (buffer == USER_ADDR_NULL || size1 == 0) {
788 		return KERN_INVALID_ARGUMENT;
789 	}
790 
791 	named_buffer_resource_t *nb = &resource->r_named_buffer;
792 	assert3u(nb->nb_nranges, >, 0);
793 	assert3u(nb->nb_size, !=, 0);
794 
795 	if (os_add_overflow(offset1, size1, &umax) || umax > nb->nb_size) {
796 		return KERN_INVALID_ARGUMENT;
797 	}
798 
799 	if (os_add_overflow(offset2, size2, &umax) || umax > nb->nb_size) {
800 		return KERN_INVALID_ARGUMENT;
801 	}
802 
803 	if ((nb->nb_perm & EXCLAVES_BUFFER_PERM_WRITE) == 0) {
804 		return KERN_PROTECTION_FAILURE;
805 	}
806 
807 	kr = exclaves_named_buffer_io_copyin(resource, buffer, offset1, size1);
808 	if (kr != KERN_SUCCESS) {
809 		return kr;
810 	}
811 
812 	kr = exclaves_named_buffer_io_copyin(resource, buffer + size1, offset2,
813 	    size2);
814 	if (kr != KERN_SUCCESS) {
815 		return kr;
816 	}
817 
818 	return KERN_SUCCESS;
819 }
820 
821 static kern_return_t
exclaves_named_buffer_io_copyout(exclaves_resource_t * resource,user_addr_t _dst,off_t offset,size_t len)822 exclaves_named_buffer_io_copyout(exclaves_resource_t *resource,
823     user_addr_t _dst, off_t offset, size_t len)
824 {
825 	assert3u(resource->r_named_buffer.nb_perm & EXCLAVES_BUFFER_PERM_READ,
826 	    !=, 0);
827 
828 	__block user_addr_t dst = _dst;
829 	return exclaves_named_buffer_io(resource, offset, len,
830 	           ^(char *buffer, size_t size) {
831 		if (copyout(buffer, dst, size) != 0) {
832 		        return KERN_FAILURE;
833 		}
834 
835 		dst += size;
836 		return KERN_SUCCESS;
837 	});
838 }
839 
840 kern_return_t
exclaves_named_buffer_copyout(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2)841 exclaves_named_buffer_copyout(exclaves_resource_t *resource,
842     user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
843     mach_vm_size_t size2, mach_vm_size_t offset2)
844 {
845 	assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
846 	assert(resource->r_type == XNUPROXY_RESOURCE_NAMED_BUFFER ||
847 	    resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER);
848 
849 	mach_vm_size_t umax = 0;
850 	kern_return_t kr = KERN_FAILURE;
851 
852 	if (buffer == USER_ADDR_NULL || size1 == 0) {
853 		return KERN_INVALID_ARGUMENT;
854 	}
855 
856 	named_buffer_resource_t *nb = &resource->r_named_buffer;
857 	assert3u(nb->nb_nranges, >, 0);
858 	assert3u(nb->nb_size, !=, 0);
859 
860 	if (os_add_overflow(offset1, size1, &umax) || umax > nb->nb_size) {
861 		return KERN_INVALID_ARGUMENT;
862 	}
863 
864 	if (os_add_overflow(offset2, size2, &umax) || umax > nb->nb_size) {
865 		return KERN_INVALID_ARGUMENT;
866 	}
867 
868 	if ((nb->nb_perm & EXCLAVES_BUFFER_PERM_READ) == 0) {
869 		return KERN_PROTECTION_FAILURE;
870 	}
871 
872 	kr = exclaves_named_buffer_io_copyout(resource, buffer, offset1, size1);
873 	if (kr != KERN_SUCCESS) {
874 		return kr;
875 	}
876 
877 	kr = exclaves_named_buffer_io_copyout(resource, buffer + size1,
878 	    offset2, size2);
879 	if (kr != KERN_SUCCESS) {
880 		return kr;
881 	}
882 
883 	return KERN_SUCCESS;
884 }
885 
886 static void
named_buffer_unmap(exclaves_resource_t * resource)887 named_buffer_unmap(exclaves_resource_t *resource)
888 {
889 	assert(resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER ||
890 	    resource->r_type == XNUPROXY_RESOURCE_NAMED_BUFFER);
891 	LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
892 
893 	/* BEGIN IGNORE CODESTYLE */
894 	resource->r_type == XNUPROXY_RESOURCE_NAMED_BUFFER ?
895 	    exclaves_named_buffer_unmap(resource) :
896 	    exclaves_audio_buffer_delete(resource);
897 	/* END IGNORE CODESTYLE */
898 }
899 
900 static kern_return_t
named_buffer_map(exclaves_resource_t * resource,size_t size,exclaves_buffer_perm_t perm)901 named_buffer_map(exclaves_resource_t *resource, size_t size,
902     exclaves_buffer_perm_t perm)
903 {
904 	assert(resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER ||
905 	    resource->r_type == XNUPROXY_RESOURCE_NAMED_BUFFER);
906 	assert3u(perm & ~(EXCLAVES_BUFFER_PERM_READ | EXCLAVES_BUFFER_PERM_WRITE), ==, 0);
907 
908 	xnuproxy_cmd_t cmd = 0;
909 	kern_return_t kr = KERN_FAILURE;
910 	uint32_t status = 0;
911 
912 	if (size == 0) {
913 		return KERN_INVALID_ARGUMENT;
914 	}
915 
916 	/* round size up to nearest page */
917 	mach_vm_offset_t rounded_size = 0;
918 	if (mach_vm_round_page_overflow(size, &rounded_size)) {
919 		return KERN_INVALID_ARGUMENT;
920 	}
921 
922 	lck_mtx_lock(&resource->r_mutex);
923 
924 	/*
925 	 * If already active, bump the use count, check that the perms and size
926 	 * are compatible and return. Checking the use count is insufficient
927 	 * here as this can race with with a non-locked use count release.
928 	 */
929 	if (resource->r_active) {
930 		const named_buffer_resource_t *nb = &resource->r_named_buffer;
931 
932 		/*
933 		 * When only inbound and outbound buffers are supported, the
934 		 * perm check should be updated to ensure that the perms match
935 		 * (rather than being a subset). */
936 		if (nb->nb_size < rounded_size ||
937 		    (nb->nb_perm & perm) == 0) {
938 			lck_mtx_unlock(&resource->r_mutex);
939 			return KERN_INVALID_ARGUMENT;
940 		}
941 
942 		exclaves_resource_retain(resource);
943 		lck_mtx_unlock(&resource->r_mutex);
944 		return KERN_SUCCESS;
945 	}
946 
947 	cmd = resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER ?
948 	    XNUPROXY_CMD_AUDIO_BUFFER_MAP:
949 	    XNUPROXY_CMD_NAMED_BUFFER_MAP;
950 	xnuproxy_msg_t msg = {
951 		.cmd = cmd,
952 		.cmd_named_buf_map = (xnuproxy_cmd_named_buf_map_t) {
953 			.request.buffer_id = resource->r_id,
954 			.request.buffer_size = rounded_size,
955 		}
956 	};
957 
958 	kr = exclaves_xnu_proxy_send(&msg, NULL);
959 	if (kr != KERN_SUCCESS) {
960 		lck_mtx_unlock(&resource->r_mutex);
961 		return kr;
962 	}
963 	status = msg.cmd_named_buf_map.response.status;
964 	if (status != XNUPROXY_NAMED_BUFFER_SUCCESS) {
965 		lck_mtx_unlock(&resource->r_mutex);
966 		return status == XNUPROXY_NAMED_BUFFER_EINVAL ?
967 		       KERN_INVALID_ARGUMENT : KERN_FAILURE;
968 	}
969 
970 	/*
971 	 * From this point on named_buffer_unmap() must be called if
972 	 * something goes wrong so that the buffer will be properly unmapped.
973 	 */
974 	const bool ro = msg.cmd_named_buf_map.response.readonly != 0;
975 	switch (perm) {
976 	case EXCLAVES_BUFFER_PERM_READ:
977 		if (!ro) {
978 			named_buffer_unmap(resource);
979 			lck_mtx_unlock(&resource->r_mutex);
980 			return KERN_PROTECTION_FAILURE;
981 		}
982 		break;
983 	case EXCLAVES_BUFFER_PERM_WRITE:
984 		if (ro) {
985 			named_buffer_unmap(resource);
986 			lck_mtx_unlock(&resource->r_mutex);
987 			return KERN_PROTECTION_FAILURE;
988 		}
989 		break;
990 	/* Maintain backwards compatibility for named buffers (READ|WRITE) */
991 	case EXCLAVES_BUFFER_PERM_READ | EXCLAVES_BUFFER_PERM_WRITE:
992 		if (ro) {
993 			perm &= ~EXCLAVES_BUFFER_PERM_WRITE;
994 		}
995 		break;
996 	}
997 
998 	named_buffer_resource_t *nb = &resource->r_named_buffer;
999 	nb->nb_size = rounded_size;
1000 	nb->nb_perm = perm;
1001 
1002 	/*
1003 	 * The named buffer is now accessible by xnu. Discover the
1004 	 * layout of the memory.
1005 	 */
1006 	const uint64_t count = rounded_size / PAGE_SIZE;
1007 	uint32_t page = 0;
1008 	cmd = resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER ?
1009 	    XNUPROXY_CMD_AUDIO_BUFFER_LAYOUT:
1010 	    XNUPROXY_CMD_NAMED_BUFFER_LAYOUT;
1011 	while (page < count) {
1012 		xnuproxy_msg_t layout_msg = {
1013 			.cmd = cmd,
1014 			.cmd_named_buf_layout = (xnuproxy_cmd_named_buf_layout_t) {
1015 				.request.buffer_id = resource->r_id,
1016 				.request.start = page,
1017 				.request.npages = (uint32_t)count - page,
1018 			}
1019 		};
1020 
1021 		kr = exclaves_xnu_proxy_send(&layout_msg, NULL);
1022 		if (kr != KERN_SUCCESS) {
1023 			named_buffer_unmap(resource);
1024 			lck_mtx_unlock(&resource->r_mutex);
1025 			return kr;
1026 		}
1027 
1028 		status = layout_msg.cmd_named_buf_layout.response.status;
1029 		switch (status) {
1030 		case XNUPROXY_NAMED_BUFFER_SUCCESS:
1031 		case XNUPROXY_NAMED_BUFFER_ENOSPC:
1032 			break;
1033 
1034 		case XNUPROXY_NAMED_BUFFER_EINVAL:
1035 			named_buffer_unmap(resource);
1036 			lck_mtx_unlock(&resource->r_mutex);
1037 			return KERN_INVALID_ARGUMENT;
1038 
1039 		default:
1040 			named_buffer_unmap(resource);
1041 			lck_mtx_unlock(&resource->r_mutex);
1042 			return KERN_FAILURE;
1043 		}
1044 
1045 		xnuproxy_named_buf_range_t *range =
1046 		    layout_msg.cmd_named_buf_layout.response.range;
1047 		uint32_t nranges =
1048 		    layout_msg.cmd_named_buf_layout.response.nranges;
1049 
1050 		if (nb->nb_nranges + nranges > EXCLAVES_SHARED_BUFFER_MAX_RANGES) {
1051 			named_buffer_unmap(resource);
1052 			lck_mtx_unlock(&resource->r_mutex);
1053 			printf("exclaves: "
1054 			    "fragmented named buffer can't fit\n");
1055 			return KERN_FAILURE;
1056 		}
1057 
1058 		for (uint32_t i = 0; i < nranges; i++) {
1059 			nb->nb_range[nb->nb_nranges].address =
1060 			    (char *)phystokv(range[i].address);
1061 			nb->nb_range[nb->nb_nranges].npages = range[i].npages;
1062 
1063 			assert3p(nb->nb_range[nb->nb_nranges].address, !=,
1064 			    NULL);
1065 
1066 			nb->nb_nranges++;
1067 			page += range[i].npages;
1068 			assert3u(page, <=, count);
1069 		}
1070 	}
1071 
1072 	exclaves_resource_retain(resource);
1073 	resource->r_active = true;
1074 
1075 	lck_mtx_unlock(&resource->r_mutex);
1076 
1077 	return KERN_SUCCESS;
1078 }
1079 
1080 kern_return_t
exclaves_named_buffer_map(const char * domain,const char * name,size_t size,exclaves_buffer_perm_t perm,exclaves_resource_t ** out)1081 exclaves_named_buffer_map(const char *domain, const char *name, size_t size,
1082     exclaves_buffer_perm_t perm, exclaves_resource_t **out)
1083 {
1084 	assert3p(out, !=, NULL);
1085 
1086 	exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
1087 	    name, XNUPROXY_RESOURCE_NAMED_BUFFER);
1088 	if (resource == NULL) {
1089 		return KERN_NOT_FOUND;
1090 	}
1091 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_NAMED_BUFFER);
1092 
1093 	kern_return_t kr = named_buffer_map(resource, size, perm);
1094 	if (kr != KERN_SUCCESS) {
1095 		return kr;
1096 	}
1097 
1098 	*out = resource;
1099 	return KERN_SUCCESS;
1100 }
1101 
1102 static void
exclaves_named_buffer_unmap(exclaves_resource_t * resource)1103 exclaves_named_buffer_unmap(exclaves_resource_t *resource)
1104 {
1105 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_NAMED_BUFFER);
1106 	assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0);
1107 	LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
1108 
1109 	xnuproxy_msg_t msg = {
1110 		.cmd = XNUPROXY_CMD_NAMED_BUFFER_DELETE,
1111 		.cmd_named_buf_delete.request.buffer_id = resource->r_id,
1112 	};
1113 
1114 	kern_return_t kr = exclaves_xnu_proxy_send(&msg, NULL);
1115 	if (kr != KERN_SUCCESS) {
1116 		printf("exclaves: failed to delete named buffer: %s\n",
1117 		    resource->r_name);
1118 		return;
1119 	}
1120 	uint8_t status = msg.cmd_named_buf_delete.response.status;
1121 
1122 	if (status != XNUPROXY_NAMED_BUFFER_SUCCESS) {
1123 		printf("exclaves: failed to delete named buffer: %s, "
1124 		    "status: %d\n", resource->r_name, status);
1125 		return;
1126 	}
1127 
1128 	bzero(&resource->r_named_buffer, sizeof(resource->r_named_buffer));
1129 
1130 	resource->r_active = false;
1131 }
1132 
1133 /* -------------------------------------------------------------------------- */
1134 #pragma mark Audio buffers
1135 
1136 kern_return_t
exclaves_audio_buffer_map(const char * domain,const char * name,size_t size,exclaves_resource_t ** out)1137 exclaves_audio_buffer_map(const char *domain, const char *name, size_t size,
1138     exclaves_resource_t **out)
1139 {
1140 	assert3p(out, !=, NULL);
1141 
1142 	exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
1143 	    name, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER);
1144 	if (resource == NULL) {
1145 		return KERN_NOT_FOUND;
1146 	}
1147 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER);
1148 
1149 	kern_return_t kr = named_buffer_map(resource, size,
1150 	    EXCLAVES_BUFFER_PERM_READ);
1151 	if (kr != KERN_SUCCESS) {
1152 		return kr;
1153 	}
1154 
1155 	*out = resource;
1156 	return KERN_SUCCESS;
1157 }
1158 
1159 static void
exclaves_audio_buffer_delete(exclaves_resource_t * resource)1160 exclaves_audio_buffer_delete(exclaves_resource_t *resource)
1161 {
1162 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER);
1163 	assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0);
1164 	LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
1165 
1166 	xnuproxy_msg_t msg = {
1167 		.cmd = XNUPROXY_CMD_AUDIO_BUFFER_DELETE,
1168 		.cmd_audio_buf_delete.request.buffer_id = resource->r_id,
1169 	};
1170 
1171 	kern_return_t kr = exclaves_xnu_proxy_send(&msg, NULL);
1172 	if (kr != KERN_SUCCESS) {
1173 		printf("exclaves: failed to delete audio buffer: %s\n",
1174 		    resource->r_name);
1175 		return;
1176 	}
1177 	uint8_t status = msg.cmd_audio_buf_delete.response.status;
1178 
1179 	if (status != XNUPROXY_NAMED_BUFFER_SUCCESS) {
1180 		printf("exclaves: failed to delete audio buffer: %s, "
1181 		    "status: %d\n", resource->r_name, status);
1182 		return;
1183 	}
1184 
1185 	bzero(&resource->r_named_buffer, sizeof(resource->r_named_buffer));
1186 	resource->r_active = false;
1187 }
1188 
1189 kern_return_t
exclaves_audio_buffer_copyout(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2)1190 exclaves_audio_buffer_copyout(exclaves_resource_t *resource,
1191     user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
1192     mach_vm_size_t size2, mach_vm_size_t offset2)
1193 {
1194 	assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
1195 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_BUFFER);
1196 
1197 	xnuproxy_msg_t msg = {
1198 		.cmd = XNUPROXY_CMD_AUDIO_BUFFER_COPYOUT,
1199 		.cmd_audio_buf_copyout.request.buffer_id = resource->r_id,
1200 		.cmd_audio_buf_copyout.request.size1 = size1,
1201 		.cmd_audio_buf_copyout.request.offset1 = offset1,
1202 		.cmd_audio_buf_copyout.request.size2 = size2,
1203 		.cmd_audio_buf_copyout.request.offset2 = offset2,
1204 	};
1205 
1206 	kern_return_t kr = exclaves_xnu_proxy_send(&msg, NULL);
1207 	if (kr != KERN_SUCCESS) {
1208 		return kr;
1209 	}
1210 	uint8_t status = msg.cmd_audio_buf_copyout.response.status;
1211 
1212 	if (status != XNUPROXY_NAMED_BUFFER_SUCCESS) {
1213 		if (status == XNUPROXY_NAMED_BUFFER_EINVAL) {
1214 			return KERN_INVALID_ARGUMENT;
1215 		}
1216 		return KERN_FAILURE;
1217 	}
1218 
1219 	return exclaves_named_buffer_copyout(resource, buffer, size1, offset1,
1220 	           size2, offset2);
1221 }
1222 
1223 /* -------------------------------------------------------------------------- */
1224 #pragma mark Conclave Manager
1225 
1226 static void
exclaves_conclave_init(exclaves_resource_t * resource)1227 exclaves_conclave_init(exclaves_resource_t *resource)
1228 {
1229 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER);
1230 
1231 	tb_client_connection_t connection = NULL;
1232 	__assert_only kern_return_t kr = exclaves_conclave_launcher_init(resource->r_id,
1233 	    &connection);
1234 	assert3u(kr, ==, KERN_SUCCESS);
1235 
1236 	conclave_resource_t *conclave = &resource->r_conclave;
1237 
1238 	conclave->c_control = connection;
1239 	conclave->c_state = CONCLAVE_S_NONE;
1240 	conclave->c_task = TASK_NULL;
1241 }
1242 
1243 kern_return_t
exclaves_conclave_attach(const char * domain,const char * name,task_t task)1244 exclaves_conclave_attach(const char *domain, const char *name, task_t task)
1245 {
1246 	assert3p(task, !=, TASK_NULL);
1247 
1248 	exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
1249 	    name, XNUPROXY_RESOURCE_CONCLAVE_MANAGER);
1250 	if (resource == NULL) {
1251 		return KERN_INVALID_ARGUMENT;
1252 	}
1253 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER);
1254 
1255 	conclave_resource_t *conclave = &resource->r_conclave;
1256 
1257 	lck_mtx_lock(&resource->r_mutex);
1258 
1259 	if (conclave->c_state != CONCLAVE_S_NONE) {
1260 		lck_mtx_unlock(&resource->r_mutex);
1261 		return KERN_INVALID_ARGUMENT;
1262 	}
1263 
1264 	task_reference(task);
1265 
1266 	task->conclave = resource;
1267 
1268 	conclave->c_task = task;
1269 	conclave->c_state = CONCLAVE_S_ATTACHED;
1270 
1271 	lck_mtx_unlock(&resource->r_mutex);
1272 
1273 	return KERN_SUCCESS;
1274 }
1275 
1276 kern_return_t
exclaves_conclave_detach(exclaves_resource_t * resource,task_t task)1277 exclaves_conclave_detach(exclaves_resource_t *resource, task_t task)
1278 {
1279 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER);
1280 
1281 	conclave_resource_t *conclave = &resource->r_conclave;
1282 
1283 	lck_mtx_lock(&resource->r_mutex);
1284 
1285 	if (conclave->c_state != CONCLAVE_S_ATTACHED &&
1286 	    conclave->c_state != CONCLAVE_S_STOPPED) {
1287 		panic("Task %p trying to detach a conclave %p but it is in a "
1288 		    "weird state", task, conclave);
1289 	}
1290 
1291 	assert3p(task->conclave, !=, NULL);
1292 	assert3p(resource, ==, task->conclave);
1293 
1294 	task->conclave = NULL;
1295 	conclave->c_task = TASK_NULL;
1296 
1297 	conclave->c_state = CONCLAVE_S_NONE;
1298 
1299 	lck_mtx_unlock(&resource->r_mutex);
1300 
1301 	task_deallocate(task);
1302 
1303 	return KERN_SUCCESS;
1304 }
1305 
1306 kern_return_t
exclaves_conclave_inherit(exclaves_resource_t * resource,task_t old_task,task_t new_task)1307 exclaves_conclave_inherit(exclaves_resource_t *resource, task_t old_task,
1308     task_t new_task)
1309 {
1310 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER);
1311 
1312 	conclave_resource_t *conclave = &resource->r_conclave;
1313 
1314 	lck_mtx_lock(&resource->r_mutex);
1315 
1316 	assert3u(conclave->c_state, !=, CONCLAVE_S_NONE);
1317 
1318 	assert3p(new_task->conclave, ==, NULL);
1319 	assert3p(old_task->conclave, !=, NULL);
1320 	assert3p(resource, ==, old_task->conclave);
1321 
1322 	/* Only allow inheriting the conclave if it has not yet started. */
1323 	if (conclave->c_state != CONCLAVE_S_ATTACHED) {
1324 		lck_mtx_unlock(&resource->r_mutex);
1325 		return KERN_FAILURE;
1326 	}
1327 
1328 	old_task->conclave = NULL;
1329 
1330 	task_reference(new_task);
1331 	new_task->conclave = resource;
1332 
1333 	conclave->c_task = new_task;
1334 
1335 	lck_mtx_unlock(&resource->r_mutex);
1336 	task_deallocate(old_task);
1337 
1338 	return KERN_SUCCESS;
1339 }
1340 
1341 kern_return_t
exclaves_conclave_launch(exclaves_resource_t * resource)1342 exclaves_conclave_launch(exclaves_resource_t *resource)
1343 {
1344 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER);
1345 
1346 	conclave_resource_t *conclave = &resource->r_conclave;
1347 
1348 	lck_mtx_lock(&resource->r_mutex);
1349 
1350 	if (conclave->c_state != CONCLAVE_S_ATTACHED) {
1351 		lck_mtx_unlock(&resource->r_mutex);
1352 		return KERN_FAILURE;
1353 	}
1354 
1355 	conclave->c_state = CONCLAVE_S_LAUNCHING;
1356 	lck_mtx_unlock(&resource->r_mutex);
1357 
1358 	__assert_only kern_return_t ret =
1359 	    exclaves_conclave_launcher_launch(conclave->c_control);
1360 	assert3u(ret, ==, KERN_SUCCESS);
1361 
1362 	lck_mtx_lock(&resource->r_mutex);
1363 	/* Check if conclave stop is requested */
1364 	if (conclave->c_state == CONCLAVE_S_STOP_REQUESTED) {
1365 		conclave->c_state = CONCLAVE_S_STOPPING;
1366 		lck_mtx_unlock(&resource->r_mutex);
1367 
1368 		ret = exclaves_conclave_launcher_stop(conclave->c_control,
1369 		    CONCLAVE_LAUNCHER_CONCLAVESTOPREASON_EXIT);
1370 		assert3u(ret, ==, KERN_SUCCESS);
1371 
1372 		lck_mtx_lock(&resource->r_mutex);
1373 		conclave->c_state = CONCLAVE_S_STOPPED;
1374 	} else {
1375 		conclave->c_state = CONCLAVE_S_LAUNCHED;
1376 	}
1377 	lck_mtx_unlock(&resource->r_mutex);
1378 
1379 	return KERN_SUCCESS;
1380 }
1381 
1382 kern_return_t
exclaves_conclave_lookup_resources(exclaves_resource_t * resource,struct exclaves_resource_user * conclave_resource_user,int resource_count)1383 exclaves_conclave_lookup_resources(exclaves_resource_t *resource,
1384     struct exclaves_resource_user *conclave_resource_user, int resource_count)
1385 {
1386 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER);
1387 	conclave_resource_t *conclave = &resource->r_conclave;
1388 	lck_mtx_lock(&resource->r_mutex);
1389 
1390 	if (conclave->c_state != CONCLAVE_S_LAUNCHED) {
1391 		lck_mtx_unlock(&resource->r_mutex);
1392 		return KERN_FAILURE;
1393 	}
1394 
1395 	for (int i = 0; i < resource_count; i++) {
1396 		exclaves_resource_t *service_resource =
1397 		    exclaves_resource_lookup_by_name(resource->r_name,
1398 		    conclave_resource_user[i].r_name,
1399 		    XNUPROXY_RESOURCE_SERVICE);
1400 		if (service_resource == NULL) {
1401 			/*
1402 			 * Fall back to checking the Darwin domain. This should
1403 			 * be removed once conclaves are properly defined.
1404 			 */
1405 			service_resource = exclaves_resource_lookup_by_name(
1406 				EXCLAVES_DOMAIN_DARWIN,
1407 				conclave_resource_user[i].r_name,
1408 				XNUPROXY_RESOURCE_SERVICE);
1409 		}
1410 		if (service_resource == NULL) {
1411 			conclave_resource_user[i].r_id = 0;
1412 			conclave_resource_user[i].r_port = MACH_PORT_NULL;
1413 			continue;
1414 		}
1415 
1416 		conclave_resource_user[i].r_id = service_resource->r_id;
1417 		conclave_resource_user[i].r_port = MACH_PORT_NULL;
1418 	}
1419 
1420 	lck_mtx_unlock(&resource->r_mutex);
1421 	return KERN_SUCCESS;
1422 }
1423 
1424 kern_return_t
exclaves_conclave_stop(exclaves_resource_t * resource,bool gather_crash_bt)1425 exclaves_conclave_stop(exclaves_resource_t *resource, bool gather_crash_bt)
1426 {
1427 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER);
1428 
1429 	conclave_resource_t *conclave = &resource->r_conclave;
1430 
1431 	uint32_t conclave_stop_reason = gather_crash_bt ?
1432 	    CONCLAVE_LAUNCHER_CONCLAVESTOPREASON_KILLED :
1433 	    CONCLAVE_LAUNCHER_CONCLAVESTOPREASON_EXIT;
1434 
1435 	lck_mtx_lock(&resource->r_mutex);
1436 
1437 	/* TBD Call stop on the conclave manager endpoint. */
1438 	if (conclave->c_state == CONCLAVE_S_LAUNCHING) {
1439 		/* If another thread is launching, just request a stop */
1440 		conclave->c_state = CONCLAVE_S_STOP_REQUESTED;
1441 		lck_mtx_unlock(&resource->r_mutex);
1442 		return KERN_SUCCESS;
1443 	} else if (conclave->c_state == CONCLAVE_S_ATTACHED) {
1444 		/* Change the state to stopped if the conclave was never started */
1445 		conclave->c_state = CONCLAVE_S_STOPPED;
1446 		lck_mtx_unlock(&resource->r_mutex);
1447 		return KERN_SUCCESS;
1448 	} else if (conclave->c_state == CONCLAVE_S_STOPPING ||
1449 	    conclave->c_state == CONCLAVE_S_STOPPED) {
1450 		/* Upcall to stop the conclave might be in progress, bail out */
1451 		lck_mtx_unlock(&resource->r_mutex);
1452 		return KERN_SUCCESS;
1453 	}
1454 
1455 	if (conclave->c_state != CONCLAVE_S_LAUNCHED) {
1456 		lck_mtx_unlock(&resource->r_mutex);
1457 		return KERN_FAILURE;
1458 	}
1459 
1460 	conclave->c_state = CONCLAVE_S_STOPPING;
1461 	lck_mtx_unlock(&resource->r_mutex);
1462 
1463 	__assert_only kern_return_t kr =
1464 	    exclaves_conclave_launcher_stop(conclave->c_control,
1465 	    conclave_stop_reason);
1466 	assert3u(kr, ==, KERN_SUCCESS);
1467 
1468 	lck_mtx_lock(&resource->r_mutex);
1469 	conclave->c_state = CONCLAVE_S_STOPPED;
1470 	lck_mtx_unlock(&resource->r_mutex);
1471 
1472 	return KERN_SUCCESS;
1473 }
1474 
1475 extern int exit_with_exclave_exception(void *p);
1476 
1477 kern_return_t
exclaves_conclave_stop_upcall(exclaves_resource_t * resource,task_t task)1478 exclaves_conclave_stop_upcall(exclaves_resource_t *resource, task_t task)
1479 {
1480 	assert3p(resource, !=, NULL);
1481 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_CONCLAVE_MANAGER);
1482 
1483 	conclave_resource_t *conclave = &resource->r_conclave;
1484 
1485 	lck_mtx_lock(&resource->r_mutex);
1486 
1487 	if (conclave->c_state == CONCLAVE_S_STOPPING || conclave->c_state == CONCLAVE_S_STOPPED) {
1488 		/* Upcall to stop the conclave might be in progress, bail out */
1489 		lck_mtx_unlock(&resource->r_mutex);
1490 		return KERN_SUCCESS;
1491 	}
1492 
1493 	if (conclave->c_state != CONCLAVE_S_LAUNCHED && conclave->c_state != CONCLAVE_S_LAUNCHING
1494 	    && conclave->c_state != CONCLAVE_S_ATTACHED) {
1495 		lck_mtx_unlock(&resource->r_mutex);
1496 		return KERN_FAILURE;
1497 	}
1498 
1499 	conclave->c_state = CONCLAVE_S_STOPPING;
1500 	lck_mtx_unlock(&resource->r_mutex);
1501 
1502 	exit_with_exclave_exception(get_bsdtask_info(task));
1503 
1504 	lck_mtx_lock(&resource->r_mutex);
1505 	conclave->c_state = CONCLAVE_S_STOPPED;
1506 	lck_mtx_unlock(&resource->r_mutex);
1507 	return KERN_SUCCESS;
1508 }
1509 
1510 
1511 /* -------------------------------------------------------------------------- */
1512 #pragma mark Sensors
1513 
1514 static void
exclaves_resource_sensor_reset(exclaves_resource_t * resource)1515 exclaves_resource_sensor_reset(exclaves_resource_t *resource)
1516 {
1517 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_SENSOR);
1518 	assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0);
1519 	LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
1520 
1521 	exclaves_sensor_status_t status;
1522 
1523 	for (int i = 0; i < resource->r_sensor.s_startcount; i++) {
1524 		__assert_only kern_return_t kr = exclaves_sensor_stop(
1525 			(exclaves_sensor_type_t)resource->r_id, 0, &status);
1526 		assert3u(kr, !=, KERN_INVALID_ARGUMENT);
1527 	}
1528 
1529 	resource->r_sensor.s_startcount = 0;
1530 }
1531 
1532 kern_return_t
exclaves_resource_sensor_open(const char * domain,const char * id_name,exclaves_resource_t ** out)1533 exclaves_resource_sensor_open(const char *domain, const char *id_name,
1534     exclaves_resource_t **out)
1535 {
1536 	assert3p(out, !=, NULL);
1537 
1538 	exclaves_resource_t *sensor = exclaves_resource_lookup_by_name(domain,
1539 	    id_name, XNUPROXY_RESOURCE_SENSOR);
1540 
1541 	if (sensor == NULL) {
1542 		return KERN_NOT_FOUND;
1543 	}
1544 
1545 	assert3u(sensor->r_type, ==, XNUPROXY_RESOURCE_SENSOR);
1546 
1547 	lck_mtx_lock(&sensor->r_mutex);
1548 	exclaves_resource_retain(sensor);
1549 	lck_mtx_unlock(&sensor->r_mutex);
1550 
1551 	*out = sensor;
1552 
1553 	return KERN_SUCCESS;
1554 }
1555 
1556 kern_return_t
exclaves_resource_sensor_start(exclaves_resource_t * resource,uint64_t flags,exclaves_sensor_status_t * status)1557 exclaves_resource_sensor_start(exclaves_resource_t *resource, uint64_t flags,
1558     exclaves_sensor_status_t *status)
1559 {
1560 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_SENSOR);
1561 
1562 	lck_mtx_lock(&resource->r_mutex);
1563 	if (resource->r_sensor.s_startcount == UINT64_MAX) {
1564 		lck_mtx_unlock(&resource->r_mutex);
1565 		return KERN_INVALID_ARGUMENT;
1566 	}
1567 
1568 	kern_return_t kr = exclaves_sensor_start(
1569 		(exclaves_sensor_type_t)resource->r_id, flags, status);
1570 	if (kr == KERN_SUCCESS) {
1571 		resource->r_sensor.s_startcount += 1;
1572 	}
1573 	lck_mtx_unlock(&resource->r_mutex);
1574 	return kr;
1575 }
1576 
1577 kern_return_t
exclaves_resource_sensor_status(exclaves_resource_t * resource,uint64_t flags,exclaves_sensor_status_t * status)1578 exclaves_resource_sensor_status(exclaves_resource_t *resource, uint64_t flags,
1579     exclaves_sensor_status_t *status)
1580 {
1581 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_SENSOR);
1582 
1583 	lck_mtx_lock(&resource->r_mutex);
1584 	kern_return_t kr = exclaves_sensor_status(
1585 		(exclaves_sensor_type_t)resource->r_id, flags, status);
1586 	lck_mtx_unlock(&resource->r_mutex);
1587 
1588 	return kr;
1589 }
1590 
1591 kern_return_t
exclaves_resource_sensor_stop(exclaves_resource_t * resource,uint64_t flags,exclaves_sensor_status_t * status)1592 exclaves_resource_sensor_stop(exclaves_resource_t *resource, uint64_t flags,
1593     exclaves_sensor_status_t *status)
1594 {
1595 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_SENSOR);
1596 
1597 	lck_mtx_lock(&resource->r_mutex);
1598 	if (resource->r_sensor.s_startcount == 0) {
1599 		lck_mtx_unlock(&resource->r_mutex);
1600 		return KERN_INVALID_ARGUMENT;
1601 	}
1602 
1603 	kern_return_t kr = exclaves_sensor_stop(
1604 		(exclaves_sensor_type_t)resource->r_id, flags, status);
1605 	if (kr == KERN_SUCCESS) {
1606 		resource->r_sensor.s_startcount -= 1;
1607 	}
1608 	lck_mtx_unlock(&resource->r_mutex);
1609 
1610 	return kr;
1611 }
1612 
1613 /* -------------------------------------------------------------------------- */
1614 #pragma mark Notifications
1615 
1616 static void
exclaves_notification_init(exclaves_resource_t * resource)1617 exclaves_notification_init(exclaves_resource_t *resource)
1618 {
1619 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_NOTIFICATION);
1620 	exclaves_notification_t *notification = &resource->r_notification;
1621 	klist_init(&notification->notification_klist);
1622 }
1623 
1624 static int
filt_exclaves_notification_attach(struct knote * kn,__unused struct kevent_qos_s * kev)1625 filt_exclaves_notification_attach(struct knote *kn, __unused struct kevent_qos_s *kev)
1626 {
1627 	int error = 0;
1628 	exclaves_resource_t *exclaves_resource = NULL;
1629 	kern_return_t kr = exclaves_resource_from_port_name(current_space(), (mach_port_name_t)kn->kn_id, &exclaves_resource);
1630 	if (kr != KERN_SUCCESS) {
1631 		error = ENOENT;
1632 		goto out;
1633 	}
1634 	assert3p(exclaves_resource, !=, NULL);
1635 	if (exclaves_resource->r_type != XNUPROXY_RESOURCE_NOTIFICATION) {
1636 		exclaves_resource_release(exclaves_resource);
1637 		error = EINVAL;
1638 		goto out;
1639 	}
1640 
1641 	lck_mtx_lock(&exclaves_resource->r_mutex);
1642 
1643 	if (kn->kn_exclaves_resource != NULL) {
1644 		lck_mtx_unlock(&exclaves_resource->r_mutex);
1645 		exclaves_resource_release(exclaves_resource);
1646 		error = EBUSY;
1647 		goto out;
1648 	}
1649 
1650 	/* kn_exclaves_resource consumes the ref. */
1651 	kn->kn_exclaves_resource = exclaves_resource;
1652 	KNOTE_ATTACH(&exclaves_resource->r_notification.notification_klist, kn);
1653 	lck_mtx_unlock(&exclaves_resource->r_mutex);
1654 
1655 	error = 0;
1656 out:
1657 	return error;
1658 }
1659 
1660 static void
filt_exclaves_notification_detach(struct knote * kn)1661 filt_exclaves_notification_detach(struct knote *kn)
1662 {
1663 	exclaves_resource_t *exclaves_resource = kn->kn_exclaves_resource;
1664 
1665 	if (exclaves_resource != NULL) {
1666 		assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCE_NOTIFICATION);
1667 		lck_mtx_lock(&exclaves_resource->r_mutex);
1668 		kn->kn_exclaves_resource = NULL;
1669 		KNOTE_DETACH(&exclaves_resource->r_notification.notification_klist, kn);
1670 		lck_mtx_unlock(&exclaves_resource->r_mutex);
1671 
1672 		exclaves_resource_release(exclaves_resource);
1673 	}
1674 }
1675 
1676 static int
filt_exclaves_notification_event(struct knote * kn,long hint)1677 filt_exclaves_notification_event(struct knote *kn, long hint)
1678 {
1679 	/* ALWAYS CALLED WITH exclaves_resource mutex held */
1680 	exclaves_resource_t *exclaves_resource __assert_only = kn->kn_exclaves_resource;
1681 	LCK_MTX_ASSERT(&exclaves_resource->r_mutex, LCK_MTX_ASSERT_OWNED);
1682 
1683 	/*
1684 	 * if the user is interested in this event, record it.
1685 	 */
1686 	if (kn->kn_sfflags & hint) {
1687 		kn->kn_fflags |= hint;
1688 	}
1689 
1690 	/* if we have any matching state, activate the knote */
1691 	if (kn->kn_fflags != 0) {
1692 		return FILTER_ACTIVE;
1693 	} else {
1694 		return 0;
1695 	}
1696 }
1697 
1698 static int
filt_exclaves_notification_touch(struct knote * kn,struct kevent_qos_s * kev)1699 filt_exclaves_notification_touch(struct knote *kn, struct kevent_qos_s *kev)
1700 {
1701 	int result;
1702 	exclaves_resource_t *exclaves_resource = kn->kn_exclaves_resource;
1703 	assert3p(exclaves_resource, !=, NULL);
1704 	assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCE_NOTIFICATION);
1705 
1706 	lck_mtx_lock(&exclaves_resource->r_mutex);
1707 	/* accept new mask and mask off output events no long interesting */
1708 	kn->kn_sfflags = kev->fflags;
1709 	kn->kn_fflags &= kn->kn_sfflags;
1710 	if (kn->kn_fflags != 0) {
1711 		result = FILTER_ACTIVE;
1712 	} else {
1713 		result = 0;
1714 	}
1715 	lck_mtx_unlock(&exclaves_resource->r_mutex);
1716 
1717 	return result;
1718 }
1719 
1720 static int
filt_exclaves_notification_process(struct knote * kn,struct kevent_qos_s * kev)1721 filt_exclaves_notification_process(struct knote *kn, struct kevent_qos_s *kev)
1722 {
1723 	int result = 0;
1724 	exclaves_resource_t *exclaves_resource = kn->kn_exclaves_resource;
1725 	assert3p(exclaves_resource, !=, NULL);
1726 	assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCE_NOTIFICATION);
1727 
1728 	lck_mtx_lock(&exclaves_resource->r_mutex);
1729 	if (kn->kn_fflags) {
1730 		knote_fill_kevent(kn, kev, 0);
1731 		result = FILTER_ACTIVE;
1732 	}
1733 	lck_mtx_unlock(&exclaves_resource->r_mutex);
1734 	return result;
1735 }
1736 
1737 SECURITY_READ_ONLY_EARLY(struct filterops) exclaves_notification_filtops = {
1738 	.f_attach  = filt_exclaves_notification_attach,
1739 	.f_detach  = filt_exclaves_notification_detach,
1740 	.f_event   = filt_exclaves_notification_event,
1741 	.f_touch   = filt_exclaves_notification_touch,
1742 	.f_process = filt_exclaves_notification_process,
1743 };
1744 
1745 kern_return_t
exclaves_notification_create(const char * domain,const char * name,exclaves_resource_t ** out)1746 exclaves_notification_create(const char *domain, const char *name,
1747     exclaves_resource_t **out)
1748 {
1749 	assert3p(out, !=, NULL);
1750 
1751 	exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
1752 	    name, XNUPROXY_RESOURCE_NOTIFICATION);
1753 
1754 	if (resource == NULL) {
1755 		return KERN_NOT_FOUND;
1756 	}
1757 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_NOTIFICATION);
1758 
1759 	lck_mtx_lock(&resource->r_mutex);
1760 	exclaves_resource_retain(resource);
1761 	lck_mtx_unlock(&resource->r_mutex);
1762 
1763 	*out = resource;
1764 
1765 	return KERN_SUCCESS;
1766 }
1767 
1768 kern_return_t
exclaves_notification_signal(exclaves_resource_t * exclaves_resource,long event_mask)1769 exclaves_notification_signal(exclaves_resource_t *exclaves_resource, long event_mask)
1770 {
1771 	assert3p(exclaves_resource, !=, NULL);
1772 	assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCE_NOTIFICATION);
1773 
1774 	lck_mtx_lock(&exclaves_resource->r_mutex);
1775 	KNOTE(&exclaves_resource->r_notification.notification_klist, event_mask);
1776 	lck_mtx_unlock(&exclaves_resource->r_mutex);
1777 
1778 	return KERN_SUCCESS;
1779 }
1780 
1781 exclaves_resource_t *
exclaves_notification_lookup_by_id(const char * domain,uint64_t id)1782 exclaves_notification_lookup_by_id(const char *domain, uint64_t id)
1783 {
1784 	return exclaves_resource_lookup_by_id(domain, id,
1785 	           XNUPROXY_RESOURCE_NOTIFICATION);
1786 }
1787 
1788 uint64_t
exclaves_service_lookup(const char * domain,const char * name)1789 exclaves_service_lookup(const char *domain, const char *name)
1790 {
1791 	assert3p(domain, !=, NULL);
1792 	assert3p(name, !=, NULL);
1793 
1794 	exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
1795 	    name, XNUPROXY_RESOURCE_SERVICE);
1796 	if (resource == NULL) {
1797 		return UINT64_C(~0);
1798 	}
1799 
1800 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_SERVICE);
1801 	return resource->r_id;
1802 }
1803 
1804 kern_return_t
exclaves_xnu_proxy_check_mem_usage(void)1805 exclaves_xnu_proxy_check_mem_usage(void)
1806 {
1807 	xnuproxy_msg_t msg = {
1808 		.cmd = XNUPROXY_CMD_REPORT_MEMORY_USAGE,
1809 	};
1810 
1811 	return exclaves_xnu_proxy_send(&msg, NULL);
1812 }
1813 
1814 /* -------------------------------------------------------------------------- */
1815 #pragma mark Shared Memory
1816 
1817 int
1818 exclaves_resource_shared_memory_io(exclaves_resource_t *resource, off_t offset,
1819     size_t len, int (^cb)(char *, size_t))
1820 {
1821 	assert(resource->r_type == XNUPROXY_RESOURCE_SHARED_MEMORY ||
1822 	    resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY);
1823 	assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
1824 
1825 	shared_memory_resource_t *sm = &resource->r_shared_memory;
1826 	assert3u(sm->sm_nranges, >, 0);
1827 	assert3u(sm->sm_size, !=, 0);
1828 	assert3u(offset + len, <=, sm->sm_size);
1829 
1830 	for (int i = 0; i < sm->sm_nranges; i++) {
1831 		/* Skip forward to the starting range. */
1832 		if (offset >= sm->sm_range[i].npages * PAGE_SIZE) {
1833 			offset -= sm->sm_range[i].npages * PAGE_SIZE;
1834 			continue;
1835 		}
1836 
1837 		size_t size = MIN((sm->sm_range[i].npages * PAGE_SIZE) - offset, len);
1838 		int ret = cb(sm->sm_range[i].address + offset, size);
1839 		if (ret != 0) {
1840 			return ret;
1841 		}
1842 
1843 		offset = 0;
1844 		len -= size;
1845 
1846 		if (len == 0) {
1847 			break;
1848 		}
1849 	}
1850 	assert3u(len, ==, 0);
1851 
1852 	return 0;
1853 }
1854 
1855 static kern_return_t
exclaves_resource_shared_memory_io_copyin(exclaves_resource_t * resource,user_addr_t _src,off_t offset,size_t len)1856 exclaves_resource_shared_memory_io_copyin(exclaves_resource_t *resource,
1857     user_addr_t _src, off_t offset, size_t len)
1858 {
1859 	assert3u(resource->r_shared_memory.sm_perm & EXCLAVES_BUFFER_PERM_WRITE,
1860 	    !=, 0);
1861 
1862 	__block user_addr_t src = _src;
1863 	return exclaves_resource_shared_memory_io(resource, offset, len,
1864 	           ^(char *buffer, size_t size) {
1865 		if (copyin(src, buffer, size) != 0) {
1866 		        return KERN_FAILURE;
1867 		}
1868 
1869 		src += size;
1870 		return KERN_SUCCESS;
1871 	});
1872 }
1873 
1874 kern_return_t
exclaves_resource_shared_memory_copyin(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2)1875 exclaves_resource_shared_memory_copyin(exclaves_resource_t *resource,
1876     user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
1877     mach_vm_size_t size2, mach_vm_size_t offset2)
1878 {
1879 	assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
1880 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_SHARED_MEMORY);
1881 
1882 	mach_vm_size_t umax = 0;
1883 	kern_return_t kr = KERN_FAILURE;
1884 
1885 	if (buffer == USER_ADDR_NULL || size1 == 0) {
1886 		return KERN_INVALID_ARGUMENT;
1887 	}
1888 
1889 	shared_memory_resource_t *sm = &resource->r_shared_memory;
1890 	assert3u(sm->sm_nranges, >, 0);
1891 	assert3u(sm->sm_size, !=, 0);
1892 
1893 	if (os_add_overflow(offset1, size1, &umax) || umax > sm->sm_size) {
1894 		return KERN_INVALID_ARGUMENT;
1895 	}
1896 
1897 	if (os_add_overflow(offset2, size2, &umax) || umax > sm->sm_size) {
1898 		return KERN_INVALID_ARGUMENT;
1899 	}
1900 
1901 	if ((sm->sm_perm & EXCLAVES_BUFFER_PERM_WRITE) == 0) {
1902 		return KERN_PROTECTION_FAILURE;
1903 	}
1904 
1905 	kr = exclaves_resource_shared_memory_io_copyin(resource, buffer, offset1, size1);
1906 	if (kr != KERN_SUCCESS) {
1907 		return kr;
1908 	}
1909 
1910 	kr = exclaves_resource_shared_memory_io_copyin(resource, buffer + size1, offset2,
1911 	    size2);
1912 	if (kr != KERN_SUCCESS) {
1913 		return kr;
1914 	}
1915 
1916 	return KERN_SUCCESS;
1917 }
1918 
1919 static kern_return_t
exclaves_resource_shared_memory_io_copyout(exclaves_resource_t * resource,user_addr_t _dst,off_t offset,size_t len)1920 exclaves_resource_shared_memory_io_copyout(exclaves_resource_t *resource,
1921     user_addr_t _dst, off_t offset, size_t len)
1922 {
1923 	assert3u(resource->r_shared_memory.sm_perm & EXCLAVES_BUFFER_PERM_READ,
1924 	    !=, 0);
1925 
1926 	__block user_addr_t dst = _dst;
1927 	return exclaves_resource_shared_memory_io(resource, offset, len,
1928 	           ^(char *buffer, size_t size) {
1929 		if (copyout(buffer, dst, size) != 0) {
1930 		        return KERN_FAILURE;
1931 		}
1932 
1933 		dst += size;
1934 		return KERN_SUCCESS;
1935 	});
1936 }
1937 
1938 kern_return_t
exclaves_resource_shared_memory_copyout(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2)1939 exclaves_resource_shared_memory_copyout(exclaves_resource_t *resource,
1940     user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
1941     mach_vm_size_t size2, mach_vm_size_t offset2)
1942 {
1943 	assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
1944 	assert(resource->r_type == XNUPROXY_RESOURCE_SHARED_MEMORY ||
1945 	    resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY);
1946 
1947 	mach_vm_size_t umax = 0;
1948 	kern_return_t kr = KERN_FAILURE;
1949 
1950 	if (buffer == USER_ADDR_NULL || size1 == 0) {
1951 		return KERN_INVALID_ARGUMENT;
1952 	}
1953 
1954 	shared_memory_resource_t *sm = &resource->r_shared_memory;
1955 	assert3u(sm->sm_nranges, >, 0);
1956 	assert3u(sm->sm_size, !=, 0);
1957 
1958 	if (os_add_overflow(offset1, size1, &umax) || umax > sm->sm_size) {
1959 		return KERN_INVALID_ARGUMENT;
1960 	}
1961 
1962 	if (os_add_overflow(offset2, size2, &umax) || umax > sm->sm_size) {
1963 		return KERN_INVALID_ARGUMENT;
1964 	}
1965 
1966 	if ((sm->sm_perm & EXCLAVES_BUFFER_PERM_READ) == 0) {
1967 		return KERN_PROTECTION_FAILURE;
1968 	}
1969 
1970 	kr = exclaves_resource_shared_memory_io_copyout(resource, buffer, offset1, size1);
1971 	if (kr != KERN_SUCCESS) {
1972 		return kr;
1973 	}
1974 
1975 	kr = exclaves_resource_shared_memory_io_copyout(resource, buffer + size1,
1976 	    offset2, size2);
1977 	if (kr != KERN_SUCCESS) {
1978 		return kr;
1979 	}
1980 
1981 	return KERN_SUCCESS;
1982 }
1983 
1984 /* The lower 32bits contain the endpoint id. */
1985 static uint32_t
audio_memory_get_endpoint(exclaves_resource_t * resource)1986 audio_memory_get_endpoint(exclaves_resource_t *resource)
1987 {
1988 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY);
1989 	return resource->r_id << 32 >> 32;
1990 }
1991 
1992 /* The upper 32bits of the id contain the buffer id. */
1993 static uint32_t
audio_memory_get_buffer_id(exclaves_resource_t * resource)1994 audio_memory_get_buffer_id(exclaves_resource_t *resource)
1995 {
1996 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY);
1997 	return resource->r_id >> 32;
1998 }
1999 
2000 static kern_return_t
shared_memory_map(exclaves_resource_t * resource,size_t size,exclaves_buffer_perm_t perm)2001 shared_memory_map(exclaves_resource_t *resource, size_t size,
2002     exclaves_buffer_perm_t perm)
2003 {
2004 	assert(resource->r_type == XNUPROXY_RESOURCE_SHARED_MEMORY ||
2005 	    resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY);
2006 	assert3u(perm & ~(EXCLAVES_BUFFER_PERM_READ | EXCLAVES_BUFFER_PERM_WRITE), ==, 0);
2007 
2008 	kern_return_t kr = KERN_FAILURE;
2009 
2010 	/* round size up to nearest page */
2011 	mach_vm_offset_t rounded_size = 0;
2012 	if (size == 0 || mach_vm_round_page_overflow(size, &rounded_size)) {
2013 		return KERN_INVALID_ARGUMENT;
2014 	}
2015 
2016 	lck_mtx_lock(&resource->r_mutex);
2017 
2018 	__block shared_memory_resource_t *sm = &resource->r_shared_memory;
2019 
2020 	/*
2021 	 * If already active, bump the use count, check that the perms and size
2022 	 * are compatible and return. Checking the use count is insufficient
2023 	 * here as this can race with with a non-locked use count release.
2024 	 */
2025 	if (resource->r_active) {
2026 		/*
2027 		 * Both the permissions and size must match.
2028 		 */
2029 		if (sm->sm_size < rounded_size || sm->sm_perm != perm) {
2030 			lck_mtx_unlock(&resource->r_mutex);
2031 			return KERN_INVALID_ARGUMENT;
2032 		}
2033 
2034 		exclaves_resource_retain(resource);
2035 		lck_mtx_unlock(&resource->r_mutex);
2036 		return KERN_SUCCESS;
2037 	}
2038 
2039 	/* This is lazily initialised and never de-initialised. */
2040 	if (sm->sm_client.connection == NULL) {
2041 		uint64_t endpoint = resource->r_type == XNUPROXY_RESOURCE_SHARED_MEMORY ?
2042 		    resource->r_id :
2043 		    audio_memory_get_endpoint(resource);
2044 
2045 		kr = exclaves_shared_memory_init(endpoint, &sm->sm_client);
2046 		if (kr != KERN_SUCCESS) {
2047 			lck_mtx_unlock(&resource->r_mutex);
2048 			return kr;
2049 		}
2050 	}
2051 
2052 	const sharedmemorybase_perms_s sm_perm = perm == EXCLAVES_BUFFER_PERM_WRITE ?
2053 	    SHAREDMEMORYBASE_PERMS_READWRITE : SHAREDMEMORYBASE_PERMS_READONLY;
2054 	sharedmemorybase_mapping_s mapping = 0;
2055 	kr = exclaves_shared_memory_setup(&sm->sm_client, sm_perm, 0,
2056 	    rounded_size / PAGE_SIZE, &mapping);
2057 	if (kr != KERN_SUCCESS) {
2058 		lck_mtx_unlock(&resource->r_mutex);
2059 		return kr;
2060 	}
2061 
2062 	/*
2063 	 * From this point on exclaves_shared_memory_teardown() must be called
2064 	 * if something goes wrong so that the buffer will be properly unmapped.
2065 	 */
2066 	sm->sm_size = rounded_size;
2067 	sm->sm_perm = perm;
2068 	sm->sm_nranges = 0;
2069 
2070 	/*
2071 	 * The shared buffer is now accessible by xnu. Discover the layout of
2072 	 * the memory.
2073 	 */
2074 	__block bool success = true;
2075 	kr = exclaves_shared_memory_iterate(&sm->sm_client, &mapping, 0,
2076 	    rounded_size / PAGE_SIZE, ^(uint64_t pa) {
2077 		char *vaddr = (char *)phystokv(pa);
2078 		assert3p(vaddr, !=, NULL);
2079 
2080 		/*
2081 		 * If this virtual address is adjacent to the previous
2082 		 * one, just extend the current range.
2083 		 */
2084 		if (sm->sm_nranges > 0) {
2085 		        const size_t len = sm->sm_range[sm->sm_nranges - 1].npages * PAGE_SIZE;
2086 		        const char *addr = sm->sm_range[sm->sm_nranges - 1].address + len;
2087 
2088 		        if (vaddr == addr) {
2089 		                sm->sm_range[sm->sm_nranges - 1].npages++;
2090 		                return;
2091 			}
2092 
2093 		        if (sm->sm_nranges == EXCLAVES_SHARED_BUFFER_MAX_RANGES - 1) {
2094 		                (void) printf("exclaves: too many ranges, can't fit\n");
2095 		                success = false;
2096 		                return;
2097 			}
2098 		}
2099 
2100 		/*
2101 		 * Page is not virtually contiguous with the previous one -
2102 		 * stick it in a new range.
2103 		 */
2104 		sm->sm_range[sm->sm_nranges].npages = 1;
2105 		sm->sm_range[sm->sm_nranges].address = vaddr;
2106 		sm->sm_nranges++;
2107 	});
2108 	if (kr != KERN_SUCCESS || !success) {
2109 		exclaves_shared_memory_teardown(&sm->sm_client, &mapping);
2110 		lck_mtx_unlock(&resource->r_mutex);
2111 		return KERN_FAILURE;
2112 	}
2113 
2114 	sm->sm_mapping = mapping;
2115 
2116 	exclaves_resource_retain(resource);
2117 	resource->r_active = true;
2118 
2119 	lck_mtx_unlock(&resource->r_mutex);
2120 
2121 	return KERN_SUCCESS;
2122 }
2123 
2124 kern_return_t
exclaves_resource_shared_memory_map(const char * domain,const char * name,size_t size,exclaves_buffer_perm_t perm,exclaves_resource_t ** out)2125 exclaves_resource_shared_memory_map(const char *domain, const char *name, size_t size,
2126     exclaves_buffer_perm_t perm, exclaves_resource_t **out)
2127 {
2128 	assert3p(out, !=, NULL);
2129 
2130 	exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
2131 	    name, XNUPROXY_RESOURCE_SHARED_MEMORY);
2132 	if (resource == NULL) {
2133 		return KERN_NOT_FOUND;
2134 	}
2135 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_SHARED_MEMORY);
2136 
2137 	kern_return_t kr = shared_memory_map(resource, size, perm);
2138 	if (kr != KERN_SUCCESS) {
2139 		return kr;
2140 	}
2141 
2142 	*out = resource;
2143 	return KERN_SUCCESS;
2144 }
2145 
2146 
2147 static void
exclaves_resource_shared_memory_unmap(exclaves_resource_t * resource)2148 exclaves_resource_shared_memory_unmap(exclaves_resource_t *resource)
2149 {
2150 	assert(resource->r_type == XNUPROXY_RESOURCE_SHARED_MEMORY ||
2151 	    resource->r_type == XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY);
2152 	assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0);
2153 	LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
2154 
2155 	shared_memory_resource_t *sm = &resource->r_shared_memory;
2156 
2157 	kern_return_t kr = exclaves_shared_memory_teardown(&sm->sm_client,
2158 	    &sm->sm_mapping);
2159 	if (kr != KERN_SUCCESS) {
2160 		printf("exclaves: failed to teardown shared memory: %s, \n",
2161 		    resource->r_name);
2162 		return;
2163 	}
2164 
2165 	bzero(&resource->r_shared_memory, sizeof(resource->r_shared_memory));
2166 
2167 	resource->r_active = false;
2168 }
2169 
2170 
2171 /* -------------------------------------------------------------------------- */
2172 #pragma mark Arbitrated Audio Memory
2173 
2174 kern_return_t
exclaves_resource_audio_memory_map(const char * domain,const char * name,size_t size,exclaves_resource_t ** out)2175 exclaves_resource_audio_memory_map(const char *domain, const char *name,
2176     size_t size, exclaves_resource_t **out)
2177 {
2178 	assert3p(out, !=, NULL);
2179 
2180 	exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
2181 	    name, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY);
2182 	if (resource == NULL) {
2183 		return KERN_NOT_FOUND;
2184 	}
2185 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY);
2186 
2187 	kern_return_t kr = shared_memory_map(resource, size,
2188 	    EXCLAVES_BUFFER_PERM_READ);
2189 	if (kr != KERN_SUCCESS) {
2190 		return kr;
2191 	}
2192 
2193 	*out = resource;
2194 	return KERN_SUCCESS;
2195 }
2196 
2197 static void
exclaves_resource_audio_memory_unmap(exclaves_resource_t * resource)2198 exclaves_resource_audio_memory_unmap(exclaves_resource_t *resource)
2199 {
2200 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY);
2201 	assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0);
2202 	LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
2203 
2204 	exclaves_resource_shared_memory_unmap(resource);
2205 }
2206 
2207 static kern_return_t
copyout_zero(user_addr_t buffer,mach_vm_size_t size,mach_vm_size_t offset)2208 copyout_zero(user_addr_t buffer, mach_vm_size_t size, mach_vm_size_t offset)
2209 {
2210 	static const char zero[PAGE_SIZE] = {0};
2211 
2212 	while (size > 0) {
2213 		size_t copy_size = MIN(size, sizeof(zero));
2214 		if (copyout(zero, buffer + offset, copy_size) != 0) {
2215 			return KERN_FAILURE;
2216 		}
2217 
2218 		offset += copy_size;
2219 		size -= copy_size;
2220 	}
2221 
2222 	return KERN_SUCCESS;
2223 }
2224 
2225 kern_return_t
exclaves_resource_audio_memory_copyout(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2)2226 exclaves_resource_audio_memory_copyout(exclaves_resource_t *resource,
2227     user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
2228     mach_vm_size_t size2, mach_vm_size_t offset2)
2229 {
2230 	assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
2231 	assert3u(resource->r_type, ==, XNUPROXY_RESOURCE_ARBITRATED_AUDIO_MEMORY);
2232 
2233 	kern_return_t kr = KERN_FAILURE;
2234 	exclaves_sensor_status_t status;
2235 	const uint32_t id = audio_memory_get_buffer_id(resource);
2236 
2237 	kr = exclaves_sensor_copy(id, size1, offset1, size2, offset2, &status);
2238 	if (kr != KERN_SUCCESS) {
2239 		return kr;
2240 	}
2241 
2242 	if (status == EXCLAVES_SENSOR_STATUS_ALLOWED) {
2243 		kr = exclaves_resource_shared_memory_copyout(resource, buffer,
2244 		    size1, offset1, size2, offset2);
2245 		if (kr != KERN_SUCCESS) {
2246 			return kr;
2247 		}
2248 	} else {
2249 		/*
2250 		 * This should be removed once the audio arbiter is properly
2251 		 * switching buffers and instead we should always rely on the
2252 		 * audio arbiter to do its job and make the data available or
2253 		 * not.
2254 		 */
2255 		kr = copyout_zero(buffer, size1, offset1);
2256 		if (kr != KERN_SUCCESS) {
2257 			return kr;
2258 		}
2259 
2260 		kr = copyout_zero(buffer, size2, offset2);
2261 		if (kr != KERN_SUCCESS) {
2262 			return kr;
2263 		}
2264 	}
2265 
2266 	return KERN_SUCCESS;
2267 }
2268 
2269 #endif /* CONFIG_EXCLAVES */
2270