1 /*
2 * Copyright (c) 2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #if CONFIG_EXCLAVES
30
31 #include <stdint.h>
32 #include <stdbool.h>
33
34 #include <mach/exclaves.h>
35 #include <mach/kern_return.h>
36
37 #include <string.h>
38
39 #include <kern/assert.h>
40 #include <kern/bits.h>
41 #include <kern/queue.h>
42 #include <kern/kalloc.h>
43 #include <kern/locks.h>
44 #include <kern/task.h>
45 #include <kern/thread_call.h>
46
47 #include <vm/pmap.h>
48
49 #include <kern/ipc_kobject.h>
50
51 #include <os/hash.h>
52
53 #include <mach/mach_traps.h>
54 #include <mach/mach_port.h>
55
56 #include <sys/event.h>
57 #include <sys/reason.h>
58
59 #include "exclaves_conclave.h"
60 #include "exclaves_debug.h"
61 #include "exclaves_resource.h"
62 #include "exclaves_sensor.h"
63 #include "exclaves_shared_memory.h"
64 #include "exclaves_xnuproxy.h"
65 #include "exclaves_memory.h"
66
67 #include "kern/exclaves.tightbeam.h"
68
69 static LCK_GRP_DECLARE(resource_lck_grp, "exclaves_resource");
70 static kern_return_t
71 exclaves_update_state_machine_locked(exclaves_resource_t *resource);
72
73 /*
74 * A cache of service ids in the kernel domain
75 */
76 static bitmap_t
77 kernel_service_bitmap[BITMAP_LEN(CONCLAVE_SERVICE_MAX)] = {0};
78
79 /*
80 * Exclave Resources
81 *
82 * Exclaves provide a fixed static set of resources available to XNU. Some
83 * examples of types of resources:
84 * - Conclave managers
85 * - Services
86 * - Named buffers
87 * - Audio buffers
88 * ...
89 *
90 * Each resource has a name, a type and a corresponding identifier which is
91 * shared between XNU and Exclaves. Resources are scoped by what entities are
92 * allowed to access them.
93 * Resources are discovered during boot and made available in a two-level table
94 * scheme. The root table collects resources by their scope, with the
95 * second-level tables listing the actual resources.
96 *
97 *
98 * Root Table
99 * ┌────────────────────────────┐
100 * │ ┌────────────────────────┐ │
101 * │ │ "com.apple.kernel" │─┼─────┐
102 * │ └────────────────────────┘ │ │
103 * │ ┌────────────────────────┐ │ │
104 * │ │"com.apple.conclave.a" │─┼─┐ │
105 * │ └────────────────────────┘ │ │ │
106 * │ ┌────────────────────────┐ │ │ │
107 * │ │"com.apple.conclave.b" │ │ │ │
108 * │ └────────────────────────┘ │ │ │
109 * │ ┌────────────────────────┐ │ │ │
110 * │ │ "com.apple.driver.a" │ │ │ │
111 * │ └────────────────────────┘ │ │ │
112 * │ ... │ │ │
113 * │ │ │ │
114 * └────────────────────────────┘ │ │
115 * ┌─────────────────────────┘ │
116 * │ │
117 * │ ┌─────────────────────────┘
118 * │ │
119 * │ │
120 * │ │
121 * │ └──▶ "com.apple.kernel"
122 * │ ┌─────────────────────────────────────────────────────┐
123 * │ │┌───────────────────────┬──────────────────┬────────┐│
124 * │ ││"com.apple.conclave.a" │ CONCLAVE_MANAGER │ 0x1234 ││
125 * │ │└───────────────────────┴──────────────────┴────────┘│
126 * │ │┌───────────────────────┬──────────────────┬────────┐│
127 * │ ││"com.apple.conclave.b" │ CONCLAVE_MANAGER │ 0x7654 ││
128 * │ │└───────────────────────┴──────────────────┴────────┘│
129 * │ │ │
130 * │ │ ... │
131 * │ └─────────────────────────────────────────────────────┘
132 * │
133 * └─────▶ "com.apple.conclave.a"
134 * ┌─────────────────────────────────────────────────────┐
135 * │┌───────────────────────┬──────────────────┬────────┐│
136 * ││ "audio_buf" │ AUDIO_BUFFER │ 0x9999 ││
137 * │└───────────────────────┴──────────────────┴────────┘│
138 * │┌───────────────────────┬──────────────────┬────────┐│
139 * ││ "service_x" │ SERVICE │ 0x1111 ││
140 * │└───────────────────────┴──────────────────┴────────┘│
141 * │┌───────────────────────┬──────────────────┬────────┐│
142 * ││ "named_buffer_x" │ NAMED_BUFFER │0x66565 ││
143 * │└───────────────────────┴──────────────────┴────────┘│
144 * │ ... │
145 * └─────────────────────────────────────────────────────┘
146 *
147 * ...
148 *
149 *
150 * Resources can be looked up by first finding the root table entry (the
151 * "domain") and then searching for the identifier in that domain.
152 * For example to lookup the conclave manager ID for "com.apple.conclave.a",
153 * the "com.apple.kernel" domain would be found and then within that domain, the
154 * search would continue using the conclave name and the CONCLAVE_MANAGER type.
155 * Every conclave domain has a corresponding CONCLAVE_MANAGER resource in the
156 * "com.apple.kernel" domain.
157 */
158
159 /* -------------------------------------------------------------------------- */
160 #pragma mark Hash Table
161
162 #define TABLE_LEN 64
163
164 /*
165 * A table item is what ends up being stored in the hash table. It has a key and
166 * a value.
167 */
168 typedef struct {
169 const void *i_key;
170 size_t i_key_len;
171 void *i_value;
172
173 queue_chain_t i_chain;
174 } table_item_t;
175
176 /*
177 * The hash table consists of an array of buckets (queues). The hashing function
178 * will choose in which bucket a particular item belongs.
179 */
180 typedef struct {
181 queue_head_t *t_buckets;
182 size_t t_buckets_count;
183 } table_t;
184
185 /*
186 * Given a key, return the corresponding bucket.
187 */
188 static queue_head_t *
get_bucket(table_t * table,const void * key,size_t key_len)189 get_bucket(table_t *table, const void *key, size_t key_len)
190 {
191 const uint32_t idx = os_hash_jenkins(key, key_len) &
192 (table->t_buckets_count - 1);
193 return &table->t_buckets[idx];
194 }
195
196 /*
197 * Insert a new table item associated with 'key' into a table.
198 */
199 static void
table_put(table_t * table,const void * key,size_t key_len,table_item_t * item)200 table_put(table_t *table, const void *key, size_t key_len, table_item_t *item)
201 {
202 assert3p(item->i_chain.next, ==, NULL);
203 assert3p(item->i_chain.prev, ==, NULL);
204 assert3p(item->i_value, !=, NULL);
205
206 queue_head_t *head = get_bucket(table, key, key_len);
207 enqueue(head, &item->i_chain);
208 }
209
210 /*
211 * Iterate through all items matching 'key' calling cb for each.
212 */
213 static void
214 table_get(table_t *table, const void *key, size_t key_len, bool (^cb)(void *))
215 {
216 const queue_head_t *head = get_bucket(table, key, key_len);
217 table_item_t *elem = NULL;
218
219 assert3p(head, !=, NULL);
220
qe_foreach_element(elem,head,i_chain)221 qe_foreach_element(elem, head, i_chain) {
222 if (elem->i_key_len == key_len &&
223 memcmp(elem->i_key, key, elem->i_key_len) == 0) {
224 if (cb(elem->i_value)) {
225 return;
226 }
227 }
228 }
229
230 return;
231 }
232
233 /*
234 * Initialize the queues.
235 */
236 static void
table_init(table_t * table)237 table_init(table_t *table)
238 {
239 assert3u(table->t_buckets_count & (table->t_buckets_count - 1), ==, 0);
240
241 /* Initialise each bucket. */
242 for (size_t i = 0; i < table->t_buckets_count; i++) {
243 queue_init(&table->t_buckets[i]);
244 }
245 }
246
247 /*
248 * Allocate a new table with the specified number of buckets.
249 */
250 static table_t *
table_alloc(size_t nbuckets)251 table_alloc(size_t nbuckets)
252 {
253 assert3u(nbuckets, >, 0);
254 assert3u(nbuckets & (nbuckets - 1), ==, 0);
255
256 table_t *table = kalloc_type(table_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
257
258 table->t_buckets_count = nbuckets;
259 table->t_buckets = kalloc_type(queue_head_t, nbuckets,
260 Z_WAITOK | Z_ZERO | Z_NOFAIL);
261
262 return table;
263 }
264
265 static void
266 table_iterate(table_t *table,
267 bool (^cb)(const void *key, size_t key_len, void *value))
268 {
269 for (size_t i = 0; i < table->t_buckets_count; i++) {
270 const queue_head_t *head = &table->t_buckets[i];
271 table_item_t *elem = NULL;
272
qe_foreach_element(elem,head,i_chain)273 qe_foreach_element(elem, head, i_chain) {
274 if (cb(elem->i_key, elem->i_key_len, elem->i_value)) {
275 return;
276 }
277 }
278 }
279 }
280
281
282 /* -------------------------------------------------------------------------- */
283 #pragma mark Root Table
284
285 /*
286 * The root table is a hash table which contains an entry for every top-level
287 * domain.
288 * Domains scope resources. For example a conclave domain will contain a list of
289 * services available in that conclave. The kernel itself gets its own domain
290 * which holds conclave managers and other resources the kernel communicates
291 * with directly.
292 */
293 table_t root_table = {
294 .t_buckets = (queue_chain_t *)(queue_chain_t[TABLE_LEN]){},
295 .t_buckets_count = TABLE_LEN,
296 };
297
298 /*
299 * Entries in the root table. Each itself a table containing resources available
300 * in that domain.
301 */
302 typedef struct {
303 char d_name[EXCLAVES_RESOURCE_NAME_MAX];
304 table_t *d_table_name;
305 table_t *d_table_id;
306 } exclaves_resource_domain_t;
307
308 static exclaves_resource_domain_t *
lookup_domain(const char * domain_name)309 lookup_domain(const char *domain_name)
310 {
311 __block exclaves_resource_domain_t *domain = NULL;
312 table_get(&root_table, domain_name, strlen(domain_name), ^bool (void *data) {
313 domain = data;
314 return true;
315 });
316
317 return domain;
318 }
319
320 static void
321 iterate_domains(bool (^cb)(exclaves_resource_domain_t *))
322 {
323 table_iterate(&root_table,
324 ^(__unused const void *key, __unused size_t key_len, void *value) {
325 exclaves_resource_domain_t *domain = value;
326 return cb(domain);
327 });
328 }
329
330 static void
331 iterate_resources(exclaves_resource_domain_t *domain,
332 bool (^cb)(exclaves_resource_t *))
333 {
334 table_iterate(domain->d_table_name,
335 ^(__unused const void *key, __unused size_t key_len, void *value) {
336 exclaves_resource_t *resource = value;
337 return cb(resource);
338 });
339 }
340
341 static exclaves_resource_t *
lookup_resource_by_name(exclaves_resource_domain_t * domain,const char * name,xnuproxy_resourcetype_s type)342 lookup_resource_by_name(exclaves_resource_domain_t *domain, const char *name,
343 xnuproxy_resourcetype_s type)
344 {
345 __block exclaves_resource_t *resource = NULL;
346 table_get(domain->d_table_name, name, strlen(name), ^bool (void *data) {
347 exclaves_resource_t *tmp = data;
348 if (tmp->r_type == type) {
349 resource = data;
350 return true;
351 }
352 return false;
353 });
354
355 return resource;
356 }
357
358 static exclaves_resource_t *
lookup_resource_by_id(exclaves_resource_domain_t * domain,uint64_t id,xnuproxy_resourcetype_s type)359 lookup_resource_by_id(exclaves_resource_domain_t *domain, uint64_t id,
360 xnuproxy_resourcetype_s type)
361 {
362 __block exclaves_resource_t *resource = NULL;
363 table_get(domain->d_table_id, &id, sizeof(id), ^bool (void *data) {
364 exclaves_resource_t *tmp = data;
365 if (tmp->r_type == type) {
366 resource = data;
367 return true;
368 }
369 return false;
370 });
371
372 return resource;
373 }
374
375 static exclaves_resource_domain_t *
exclaves_resource_domain_alloc(const char * scope)376 exclaves_resource_domain_alloc(const char *scope)
377 {
378 assert3u(strlen(scope), >, 0);
379 assert3u(strlen(scope), <=, EXCLAVES_RESOURCE_NAME_MAX);
380
381 exclaves_resource_domain_t *domain = kalloc_type(
382 exclaves_resource_domain_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
383 (void) strlcpy(domain->d_name, scope,
384 sizeof(domain->d_name));
385
386 domain->d_table_name = table_alloc(TABLE_LEN);
387 table_init(domain->d_table_name);
388
389 domain->d_table_id = table_alloc(TABLE_LEN);
390 table_init(domain->d_table_id);
391
392 table_item_t *item = kalloc_type(table_item_t,
393 Z_WAITOK | Z_ZERO | Z_NOFAIL);
394 item->i_key = domain->d_name;
395 item->i_key_len = strlen(domain->d_name);
396 item->i_value = domain;
397
398 table_put(&root_table, scope, strlen(scope), item);
399
400 return domain;
401 }
402
403 static exclaves_resource_t *
exclaves_resource_alloc(xnuproxy_resourcetype_s type,const char * name,uint64_t id,exclaves_resource_domain_t * domain,bool connected)404 exclaves_resource_alloc(xnuproxy_resourcetype_s type, const char *name, uint64_t id,
405 exclaves_resource_domain_t *domain, bool connected)
406 {
407 exclaves_resource_t *resource = kalloc_type(exclaves_resource_t,
408 Z_WAITOK | Z_ZERO | Z_NOFAIL);
409
410 resource->r_type = type;
411 resource->r_id = id;
412 resource->r_active = false;
413 resource->r_connected = connected;
414 os_atomic_store(&resource->r_usecnt, 0, relaxed);
415
416 /*
417 * Each resource has an associated kobject of type
418 * IKOT_EXCLAVES_RESOURCE.
419 */
420 ipc_port_t port = ipc_kobject_alloc_port((ipc_kobject_t)resource,
421 IKOT_EXCLAVES_RESOURCE, IPC_KOBJECT_ALLOC_NSREQUEST);
422 resource->r_port = port;
423
424 lck_mtx_init(&resource->r_mutex, &resource_lck_grp, NULL);
425
426 (void) strlcpy(resource->r_name, name, sizeof(resource->r_name));
427
428
429 /* Stick the newly created resource into the name table. */
430 table_item_t *name_item = kalloc_type(table_item_t,
431 Z_WAITOK | Z_ZERO | Z_NOFAIL);
432
433 name_item->i_key = resource->r_name;
434 name_item->i_key_len = strlen(resource->r_name);
435 name_item->i_value = resource;
436
437 assert(lookup_resource_by_name(domain, name, type) == NULL);
438 table_put(domain->d_table_name, name, strlen(name), name_item);
439
440 /*
441 * Some types also need to lookup by id in addition to looking up by
442 * name.
443 */
444 switch (type) {
445 case XNUPROXY_RESOURCETYPE_NOTIFICATION: {
446 /* Stick the newly created resource into the ID table. */
447 table_item_t *id_item = kalloc_type(table_item_t,
448 Z_WAITOK | Z_ZERO | Z_NOFAIL);
449 id_item->i_key = &resource->r_id;
450 id_item->i_key_len = sizeof(resource->r_id);
451 id_item->i_value = resource;
452
453 assert(lookup_resource_by_id(domain, id, type) == NULL);
454 table_put(domain->d_table_id, &id, sizeof(id), id_item);
455 break;
456 }
457
458 default:
459 break;
460 }
461
462 return resource;
463 }
464
465 /* -------------------------------------------------------------------------- */
466 #pragma mark Exclaves Resources
467
468 static void exclaves_resource_no_senders(ipc_port_t port,
469 mach_port_mscount_t mscount);
470
471 IPC_KOBJECT_DEFINE(IKOT_EXCLAVES_RESOURCE,
472 .iko_op_stable = true,
473 .iko_op_no_senders = exclaves_resource_no_senders);
474
475 static void exclaves_conclave_init(exclaves_resource_t *resource);
476 static void exclaves_notification_init(exclaves_resource_t *resource);
477 static void exclaves_resource_sensor_reset(exclaves_resource_t *resource);
478 static void exclaves_resource_shared_memory_unmap(exclaves_resource_t *resource);
479 static void exclaves_resource_audio_memory_unmap(exclaves_resource_t *resource);
480
481 static void
populate_conclave_services(void)482 populate_conclave_services(void)
483 {
484 /* BEGIN IGNORE CODESTYLE */
485 iterate_domains(^(exclaves_resource_domain_t *domain) {
486
487 const bool is_kernel_domain =
488 (strcmp(domain->d_name, EXCLAVES_DOMAIN_KERNEL) == 0 ||
489 strcmp(domain->d_name, EXCLAVES_DOMAIN_DARWIN) == 0);
490
491 exclaves_resource_t *cm = exclaves_resource_lookup_by_name(
492 EXCLAVES_DOMAIN_KERNEL, domain->d_name,
493 XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
494
495 iterate_resources(domain, ^(exclaves_resource_t *resource) {
496 if (resource->r_type != XNUPROXY_RESOURCETYPE_SERVICE) {
497 return (bool)false;
498 }
499
500 if (cm != NULL) {
501 conclave_resource_t *c = &cm->r_conclave;
502 bitmap_set(c->c_service_bitmap,
503 (uint32_t)resource->r_id);
504 return (bool)false;
505 }
506
507 if (is_kernel_domain) {
508 bitmap_set(kernel_service_bitmap,
509 (uint32_t)resource->r_id);
510 return (bool)false;
511
512 }
513
514 /*
515 * Ignore services that are in unknown domains. This can
516 * happen if a conclave manager doesn't have a populated
517 * endpoint (for example during bringup).
518 */
519 return (bool)false;
520 });
521
522 return (bool)false;
523 });
524 /* END IGNORE CODESTYLE */
525 }
526
527 /*
528 * Discover all the static exclaves resources populating the resource tables as
529 * we go.
530 */
531 kern_return_t
exclaves_resource_init(void)532 exclaves_resource_init(void)
533 {
534 /* Initialize the root table. */
535 table_init(&root_table);
536
537 /* BEGIN IGNORE CODESTYLE */
538 kern_return_t kr = exclaves_xnuproxy_resource_info(
539 ^(const char *name, const char *scope,
540 xnuproxy_resourcetype_s type, uint64_t id, bool connected) {
541 /*
542 * Every resource is scoped to a specific domain, find the
543 * domain (or create one if it doesn't exist).
544 */
545 exclaves_resource_domain_t *domain = lookup_domain(scope);
546 if (domain == NULL) {
547 domain = exclaves_resource_domain_alloc(scope);
548 }
549
550 /* Allocate a new resource in the domain. */
551 exclaves_resource_t *resource = exclaves_resource_alloc(type,
552 name, id, domain, connected);
553
554 /*
555 * Type specific initialization.
556 */
557 switch (type) {
558 case XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER:
559 exclaves_conclave_init(resource);
560 break;
561
562 case XNUPROXY_RESOURCETYPE_NOTIFICATION:
563 exclaves_notification_init(resource);
564 break;
565
566 case XNUPROXY_RESOURCETYPE_SERVICE:
567 assert3u(resource->r_id, <, CONCLAVE_SERVICE_MAX);
568 break;
569
570 default:
571 break;
572 }
573 });
574 /* END IGNORE CODESTYLE */
575
576 if (kr != KERN_SUCCESS) {
577 return kr;
578 }
579
580 /* Populate the conclave service ID bitmaps. */
581 populate_conclave_services();
582
583 return KERN_SUCCESS;
584 }
585
586 exclaves_resource_t *
exclaves_resource_lookup_by_name(const char * domain_name,const char * name,xnuproxy_resourcetype_s type)587 exclaves_resource_lookup_by_name(const char *domain_name, const char *name,
588 xnuproxy_resourcetype_s type)
589 {
590 assert3u(strlen(domain_name), >, 0);
591 assert3u(strlen(name), >, 0);
592
593 exclaves_resource_domain_t *domain = lookup_domain(domain_name);
594 if (domain == NULL) {
595 return NULL;
596 }
597
598 exclaves_resource_t *r = lookup_resource_by_name(domain, name, type);
599
600 /* Ignore entries not marked connected. */
601 if (r == NULL || !r->r_connected) {
602 return NULL;
603 }
604
605 return r;
606 }
607
608 static exclaves_resource_t *
exclaves_resource_lookup_by_id(const char * domain_name,uint64_t id,xnuproxy_resourcetype_s type)609 exclaves_resource_lookup_by_id(const char *domain_name, uint64_t id,
610 xnuproxy_resourcetype_s type)
611 {
612 assert3u(strlen(domain_name), >, 0);
613
614 exclaves_resource_domain_t *domain = lookup_domain(domain_name);
615 if (domain == NULL) {
616 return NULL;
617 }
618
619 exclaves_resource_t *r = lookup_resource_by_id(domain, id, type);
620
621 /* Ignore entries not marked connected. */
622 if (r == NULL || !r->r_connected) {
623 return NULL;
624 }
625
626 return r;
627 }
628
629 const char *
exclaves_resource_name(const exclaves_resource_t * resource)630 exclaves_resource_name(const exclaves_resource_t *resource)
631 {
632 return resource->r_name;
633 }
634
635 /*
636 * Notes on use-count management
637 * For the most part everything is done under the resource lock.
638 * In some cases, it's necessary to grab/release a use count without
639 * holding the lock - for example the realtime audio paths doing copyin/copyout
640 * of named buffers/audio buffers.
641 * To prevent against races, initialization/de-initialization should always
642 * recheck the use-count under the lock.
643 */
644 uint32_t
exclaves_resource_retain(exclaves_resource_t * resource)645 exclaves_resource_retain(exclaves_resource_t *resource)
646 {
647 uint32_t orig =
648 os_atomic_inc_orig(&resource->r_usecnt, relaxed);
649 assert3u(orig, <, UINT32_MAX);
650
651 return orig;
652 }
653
654 void
exclaves_resource_release(exclaves_resource_t * resource)655 exclaves_resource_release(exclaves_resource_t *resource)
656 {
657 /*
658 * Drop the use count without holding the lock (this path may be called
659 * by RT threads and should be RT-safe).
660 */
661 uint32_t orig = os_atomic_dec_orig(&resource->r_usecnt, release);
662 assert3u(orig, !=, 0);
663 if (orig != 1) {
664 return;
665 }
666
667 /*
668 * Now grab the lock. The RT-safe paths calling this function shouldn't
669 * end up here unless there's a bug or mis-behaving user code (like
670 * deallocating an in-use mach port).
671 */
672 lck_mtx_lock(&resource->r_mutex);
673
674 /*
675 * Re-check the use count - as a second user of the resource
676 * may have snuck in in the meantime.
677 */
678 if (os_atomic_load(&resource->r_usecnt, acquire) > 0) {
679 lck_mtx_unlock(&resource->r_mutex);
680 return;
681 }
682
683 switch (resource->r_type) {
684 case XNUPROXY_RESOURCETYPE_SENSOR:
685 exclaves_resource_sensor_reset(resource);
686 break;
687
688 case XNUPROXY_RESOURCETYPE_SHAREDMEMORY:
689 exclaves_resource_shared_memory_unmap(resource);
690 break;
691
692 case XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY:
693 exclaves_resource_audio_memory_unmap(resource);
694 break;
695
696 default:
697 break;
698 }
699
700 lck_mtx_unlock(&resource->r_mutex);
701 }
702
703 kern_return_t
exclaves_resource_from_port_name(ipc_space_t space,mach_port_name_t name,exclaves_resource_t ** out)704 exclaves_resource_from_port_name(ipc_space_t space, mach_port_name_t name,
705 exclaves_resource_t **out)
706 {
707 kern_return_t kr = KERN_SUCCESS;
708 ipc_port_t port = IPC_PORT_NULL;
709
710 if (!MACH_PORT_VALID(name)) {
711 return KERN_INVALID_NAME;
712 }
713
714 kr = ipc_port_translate_send(space, name, &port);
715 if (kr != KERN_SUCCESS) {
716 return kr;
717 }
718
719 /* port is locked */
720 assert(IP_VALID(port));
721
722 exclaves_resource_t *resource = ipc_kobject_get_stable(port,
723 IKOT_EXCLAVES_RESOURCE);
724
725 /* The port is valid, but doesn't denote an exclaves resource. */
726 if (resource == NULL) {
727 ip_mq_unlock(port);
728 return KERN_INVALID_CAPABILITY;
729 }
730
731 /* Grab a reference while the port is good and the ipc lock is held. */
732 __assert_only uint32_t orig = exclaves_resource_retain(resource);
733 assert3u(orig, >, 0);
734
735 ip_mq_unlock(port);
736 *out = resource;
737
738 return KERN_SUCCESS;
739 }
740
741 /*
742 * Consumes a reference to the resource. On success the resource is reference is
743 * associated with the lifetime of the port.
744 */
745 kern_return_t
exclaves_resource_create_port_name(exclaves_resource_t * resource,ipc_space_t space,mach_port_name_t * name)746 exclaves_resource_create_port_name(exclaves_resource_t *resource, ipc_space_t space,
747 mach_port_name_t *name)
748 {
749 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
750
751 ipc_port_t port = resource->r_port;
752
753 ip_mq_lock(port);
754
755 /* Create an armed send right. */
756 kern_return_t ret = ipc_kobject_make_send_nsrequest_locked(port,
757 resource, IKOT_EXCLAVES_RESOURCE);
758 if (ret != KERN_SUCCESS &&
759 ret != KERN_ALREADY_WAITING) {
760 ip_mq_unlock(port);
761 exclaves_resource_release(resource);
762 return ret;
763 }
764
765 /*
766 * If there was already a send right, then the port already has an
767 * associated use count so drop this one.
768 */
769 if (port->ip_srights > 1) {
770 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 1);
771 exclaves_resource_release(resource);
772 }
773
774 ip_mq_unlock(port);
775
776 *name = ipc_port_copyout_send(port, space);
777 if (!MACH_PORT_VALID(*name)) {
778 /*
779 * ipc_port_copyout_send() releases the send right on failure
780 * (possibly calling exclaves_resource_no_senders() in the
781 * process).
782 */
783 return KERN_RESOURCE_SHORTAGE;
784 }
785
786 return KERN_SUCCESS;
787 }
788
789 static void
exclaves_resource_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)790 exclaves_resource_no_senders(ipc_port_t port,
791 __unused mach_port_mscount_t mscount)
792 {
793 exclaves_resource_t *resource = ipc_kobject_get_stable(port,
794 IKOT_EXCLAVES_RESOURCE);
795
796 exclaves_resource_release(resource);
797 }
798
799 /* -------------------------------------------------------------------------- */
800 #pragma mark Conclave Manager
801
802 static void
exclaves_conclave_init(exclaves_resource_t * resource)803 exclaves_conclave_init(exclaves_resource_t *resource)
804 {
805 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
806
807 tb_client_connection_t connection = NULL;
808 __assert_only kern_return_t kr = exclaves_conclave_launcher_init(resource->r_id,
809 &connection);
810 assert3u(kr, ==, KERN_SUCCESS);
811
812 conclave_resource_t *conclave = &resource->r_conclave;
813
814 conclave->c_control = connection;
815 conclave->c_state = CONCLAVE_S_NONE;
816 conclave->c_request = CONCLAVE_R_NONE;
817 conclave->c_active_downcall = false;
818 conclave->c_active_stopcall = false;
819 conclave->c_downcall_thread = THREAD_NULL;
820 conclave->c_task = TASK_NULL;
821 }
822
823 kern_return_t
exclaves_conclave_attach(const char * name,task_t task)824 exclaves_conclave_attach(const char *name, task_t task)
825 {
826 assert3p(task, !=, TASK_NULL);
827
828 exclaves_resource_t *resource = exclaves_resource_lookup_by_name(
829 EXCLAVES_DOMAIN_KERNEL, name, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
830 if (resource == NULL) {
831 /* Just return success here. The conclave launch will fail. */
832 return KERN_SUCCESS;
833 }
834 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
835
836 conclave_resource_t *conclave = &resource->r_conclave;
837
838 lck_mtx_lock(&resource->r_mutex);
839
840 if (conclave->c_state != CONCLAVE_S_NONE) {
841 lck_mtx_unlock(&resource->r_mutex);
842 return KERN_INVALID_ARGUMENT;
843 }
844
845 task_reference(task);
846
847 task->conclave = resource;
848
849 conclave->c_task = task;
850 conclave->c_state = CONCLAVE_S_ATTACHED;
851
852 lck_mtx_unlock(&resource->r_mutex);
853
854 return KERN_SUCCESS;
855 }
856
857 kern_return_t
exclaves_conclave_detach(exclaves_resource_t * resource,task_t task)858 exclaves_conclave_detach(exclaves_resource_t *resource, task_t task)
859 {
860 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
861
862 conclave_resource_t *conclave = &resource->r_conclave;
863
864 lck_mtx_lock(&resource->r_mutex);
865
866 while (conclave->c_active_downcall) {
867 conclave->c_active_detach = true;
868 assert3p(conclave->c_downcall_thread, !=, THREAD_NULL);
869 lck_mtx_sleep_with_inheritor(&resource->r_mutex,
870 LCK_SLEEP_DEFAULT,
871 (event_t)&conclave->c_active_downcall,
872 conclave->c_downcall_thread,
873 THREAD_UNINT,
874 TIMEOUT_WAIT_FOREVER);
875 conclave->c_active_detach = false;
876 }
877
878 if (conclave->c_state != CONCLAVE_S_ATTACHED &&
879 conclave->c_state != CONCLAVE_S_STOPPED) {
880 panic("Task %p trying to detach a conclave %p but it is in a "
881 "weird state", task, conclave);
882 }
883
884 assert3u(conclave->c_active_downcall, ==, 0);
885 assert3u(conclave->c_active_stopcall, ==, 0);
886 assert3p(conclave->c_downcall_thread, ==, THREAD_NULL);
887 assert3u(conclave->c_request, ==, CONCLAVE_R_NONE);
888 assert3p(task->conclave, !=, NULL);
889 assert3p(resource, ==, task->conclave);
890
891 task->conclave = NULL;
892 conclave->c_task = TASK_NULL;
893
894 conclave->c_state = CONCLAVE_S_NONE;
895
896 lck_mtx_unlock(&resource->r_mutex);
897
898 task_deallocate(task);
899
900 return KERN_SUCCESS;
901 }
902
903 kern_return_t
exclaves_conclave_inherit(exclaves_resource_t * resource,task_t old_task,task_t new_task)904 exclaves_conclave_inherit(exclaves_resource_t *resource, task_t old_task,
905 task_t new_task)
906 {
907 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
908
909 conclave_resource_t *conclave = &resource->r_conclave;
910
911 lck_mtx_lock(&resource->r_mutex);
912
913 assert3u(conclave->c_state, !=, CONCLAVE_S_NONE);
914
915 assert3p(new_task->conclave, ==, NULL);
916 assert3p(old_task->conclave, !=, NULL);
917 assert3p(resource, ==, old_task->conclave);
918
919 /* Only allow inheriting the conclave if it has not yet started. */
920 if (conclave->c_state != CONCLAVE_S_ATTACHED ||
921 conclave->c_active_downcall ||
922 conclave->c_active_stopcall) {
923 lck_mtx_unlock(&resource->r_mutex);
924 return KERN_FAILURE;
925 }
926
927 old_task->conclave = NULL;
928
929 task_reference(new_task);
930 new_task->conclave = resource;
931
932 conclave->c_task = new_task;
933
934 lck_mtx_unlock(&resource->r_mutex);
935 task_deallocate(old_task);
936
937 return KERN_SUCCESS;
938 }
939
940 bool
exclaves_conclave_is_attached(const exclaves_resource_t * resource)941 exclaves_conclave_is_attached(const exclaves_resource_t *resource)
942 {
943 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
944 const conclave_resource_t *conclave = &resource->r_conclave;
945
946 return conclave->c_state == CONCLAVE_S_ATTACHED;
947 }
948
949 kern_return_t
exclaves_conclave_launch(exclaves_resource_t * resource)950 exclaves_conclave_launch(exclaves_resource_t *resource)
951 {
952 kern_return_t kr;
953 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
954
955 conclave_resource_t *conclave = &resource->r_conclave;
956
957 if (exclaves_boot_wait(EXCLAVES_BOOT_STAGE_EXCLAVEKIT) != KERN_SUCCESS) {
958 /*
959 * This should only ever happen if the EXCLAVEKIT requirement was
960 * relaxed.
961 */
962 exclaves_requirement_assert(EXCLAVES_R_EXCLAVEKIT,
963 "failed to boot to exclavekit");
964 return KERN_NOT_SUPPORTED;
965 }
966
967 lck_mtx_lock(&resource->r_mutex);
968
969 if (conclave->c_state != CONCLAVE_S_ATTACHED ||
970 conclave->c_active_downcall ||
971 conclave->c_active_stopcall) {
972 lck_mtx_unlock(&resource->r_mutex);
973 return KERN_FAILURE;
974 }
975
976 conclave->c_request |= CONCLAVE_R_LAUNCH_REQUESTED;
977 kr = exclaves_update_state_machine_locked(resource);
978 return kr;
979 }
980
981 static kern_return_t
exclaves_update_state_machine_locked(exclaves_resource_t * resource)982 exclaves_update_state_machine_locked(exclaves_resource_t *resource)
983 {
984 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
985
986 conclave_resource_t *conclave = &resource->r_conclave;
987 conclave_state_t pending_state = CONCLAVE_S_NONE;
988 kern_return_t ret;
989
990 while (1) {
991 bool stop_call = false;
992 /* Check if there are pending requests */
993 if (conclave->c_request & CONCLAVE_R_LAUNCH_REQUESTED) {
994 conclave->c_request &= ~CONCLAVE_R_LAUNCH_REQUESTED;
995 assert3u(conclave->c_active_downcall, ==, 0);
996 conclave->c_active_downcall = true;
997 conclave->c_downcall_thread = current_thread();
998 pending_state = CONCLAVE_S_RUNNING;
999 lck_mtx_unlock(&resource->r_mutex);
1000
1001 ret = exclaves_conclave_launcher_launch(conclave->c_control);
1002 assert3u(ret, ==, KERN_SUCCESS);
1003 } else if (conclave->c_request & CONCLAVE_R_SUSPEND_REQUESTED) {
1004 task_t task = conclave->c_task;
1005 int suspend_count;
1006 bool suspend;
1007
1008 task_lock(task);
1009 suspend_count = task->suspend_count;
1010 task_unlock(task);
1011
1012 suspend = (suspend_count > 0) ? true : false;
1013 conclave->c_request &= ~CONCLAVE_R_SUSPEND_REQUESTED;
1014
1015 /* Check the state to see if downcall is needed */
1016 if (suspend && conclave->c_state != CONCLAVE_S_RUNNING) {
1017 continue;
1018 }
1019
1020 if (!suspend && conclave->c_state != CONCLAVE_S_SUSPENDED) {
1021 continue;
1022 }
1023
1024 assert3u(conclave->c_active_downcall, ==, 0);
1025 conclave->c_active_downcall = true;
1026 conclave->c_downcall_thread = current_thread();
1027 pending_state = suspend ? CONCLAVE_S_SUSPENDED : CONCLAVE_S_RUNNING;
1028 lck_mtx_unlock(&resource->r_mutex);
1029
1030 ret = exclaves_conclave_launcher_suspend(conclave->c_control,
1031 suspend);
1032 } else if (conclave->c_request & CONCLAVE_R_STOP_REQUESTED) {
1033 conclave->c_request &= ~CONCLAVE_R_STOP_REQUESTED;
1034
1035 /* Check the state to see if downcall is needed */
1036 if (conclave->c_state != CONCLAVE_S_RUNNING &&
1037 conclave->c_state != CONCLAVE_S_SUSPENDED) {
1038 continue;
1039 }
1040 assert3u(conclave->c_active_downcall, ==, 0);
1041 conclave->c_active_downcall = true;
1042 conclave->c_downcall_thread = current_thread();
1043 conclave->c_active_stopcall = true;
1044 stop_call = true;
1045 pending_state = CONCLAVE_S_STOPPED;
1046 lck_mtx_unlock(&resource->r_mutex);
1047
1048 ret = exclaves_conclave_launcher_stop(conclave->c_control,
1049 CONCLAVE_LAUNCHER_CONCLAVESTOPREASON_EXIT);
1050 assert3u(ret, ==, KERN_SUCCESS);
1051 } else {
1052 lck_mtx_unlock(&resource->r_mutex);
1053 break;
1054 }
1055
1056 lck_mtx_lock(&resource->r_mutex);
1057 assert3u(conclave->c_active_downcall, ==, 1);
1058 assert3p(conclave->c_downcall_thread, ==, current_thread());
1059 conclave->c_active_downcall = false;
1060 conclave->c_downcall_thread = THREAD_NULL;
1061 if (stop_call) {
1062 conclave->c_active_stopcall = false;
1063 }
1064 if (conclave->c_active_detach) {
1065 wakeup_all_with_inheritor((event_t)&conclave->c_active_downcall, THREAD_AWAKENED);
1066 }
1067
1068 /* Bail out if active stopcall is going on */
1069 if (conclave->c_active_stopcall || conclave->c_state == CONCLAVE_S_STOPPED) {
1070 lck_mtx_unlock(&resource->r_mutex);
1071 break;
1072 }
1073
1074 conclave->c_state = pending_state;
1075 }
1076 return KERN_SUCCESS;
1077 }
1078
1079 /*
1080 * Return the domain associated with the current conclave.
1081 * If not joined to a conclave, return the KERNEL domain. This implies that the
1082 * calling task is sufficiently privileged.
1083 */
1084 const char *
exclaves_conclave_get_domain(exclaves_resource_t * resource)1085 exclaves_conclave_get_domain(exclaves_resource_t *resource)
1086 {
1087 if (resource != NULL) {
1088 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1089 return resource->r_name;
1090 }
1091
1092 if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_KERNEL_DOMAIN)) {
1093 exclaves_requirement_assert(EXCLAVES_R_CONCLAVE_RESOURCES,
1094 "no conclave manager present");
1095 }
1096
1097 return EXCLAVES_DOMAIN_KERNEL;
1098 }
1099
1100 kern_return_t
exclaves_conclave_stop(exclaves_resource_t * resource,bool gather_crash_bt __unused)1101 exclaves_conclave_stop(exclaves_resource_t *resource, bool gather_crash_bt __unused)
1102 {
1103 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1104
1105 conclave_resource_t *conclave = &resource->r_conclave;
1106
1107 lck_mtx_lock(&resource->r_mutex);
1108
1109 /* Bailout if active stopcall in progress */
1110 if (conclave->c_active_stopcall || conclave->c_state == CONCLAVE_S_STOPPED) {
1111 lck_mtx_unlock(&resource->r_mutex);
1112 return KERN_SUCCESS;
1113 }
1114
1115 /* Arm stop requested if downcall in progress */
1116 if (conclave->c_active_downcall) {
1117 conclave->c_request |= CONCLAVE_R_STOP_REQUESTED;
1118 lck_mtx_unlock(&resource->r_mutex);
1119 return KERN_SUCCESS;
1120 }
1121
1122 if (conclave->c_state == CONCLAVE_S_ATTACHED) {
1123 /* Change the state to stopped if the conclave was never started */
1124 conclave->c_state = CONCLAVE_S_STOPPED;
1125
1126 /* Suspend might be requested, clear it as well */
1127 conclave->c_request = CONCLAVE_R_NONE;
1128 lck_mtx_unlock(&resource->r_mutex);
1129 return KERN_SUCCESS;
1130 }
1131
1132 conclave->c_request |= CONCLAVE_R_STOP_REQUESTED;
1133 kern_return_t kr = exclaves_update_state_machine_locked(resource);
1134
1135 return kr;
1136 }
1137
1138 kern_return_t
exclaves_conclave_suspend(exclaves_resource_t * resource)1139 exclaves_conclave_suspend(exclaves_resource_t *resource)
1140 {
1141 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1142
1143 conclave_resource_t *conclave = &resource->r_conclave;
1144
1145 lck_mtx_lock(&resource->r_mutex);
1146
1147 /* Bailout if active stopcall in progress */
1148 if (conclave->c_active_stopcall || conclave->c_state == CONCLAVE_S_STOPPED) {
1149 lck_mtx_unlock(&resource->r_mutex);
1150 return KERN_SUCCESS;
1151 }
1152
1153 /* Arm suspend requested if downcall in progress */
1154 if (conclave->c_active_downcall) {
1155 conclave->c_request |= CONCLAVE_R_SUSPEND_REQUESTED;
1156 lck_mtx_unlock(&resource->r_mutex);
1157 return KERN_SUCCESS;
1158 }
1159
1160 if (conclave->c_state == CONCLAVE_S_ATTACHED) {
1161 /* Conclave is not yet launched, just arm suspend requested and bailout */
1162 conclave->c_request |= CONCLAVE_R_SUSPEND_REQUESTED;
1163 lck_mtx_unlock(&resource->r_mutex);
1164 return KERN_SUCCESS;
1165 } else if (conclave->c_state == CONCLAVE_S_SUSPENDED) {
1166 lck_mtx_unlock(&resource->r_mutex);
1167 return KERN_SUCCESS;
1168 }
1169
1170 conclave->c_request |= CONCLAVE_R_SUSPEND_REQUESTED;
1171 kern_return_t kr = exclaves_update_state_machine_locked(resource);
1172
1173 return kr;
1174 }
1175
1176 kern_return_t
exclaves_conclave_resume(exclaves_resource_t * resource)1177 exclaves_conclave_resume(exclaves_resource_t *resource)
1178 {
1179 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1180
1181 conclave_resource_t *conclave = &resource->r_conclave;
1182
1183 lck_mtx_lock(&resource->r_mutex);
1184
1185 /* Bailout if active stopcall in progress */
1186 if (conclave->c_active_stopcall || conclave->c_state == CONCLAVE_S_STOPPED) {
1187 lck_mtx_unlock(&resource->r_mutex);
1188 return KERN_SUCCESS;
1189 }
1190
1191 /* Arm suspend requested if downcall in progress */
1192 if (conclave->c_active_downcall) {
1193 conclave->c_request |= CONCLAVE_R_SUSPEND_REQUESTED;
1194 lck_mtx_unlock(&resource->r_mutex);
1195 return KERN_SUCCESS;
1196 }
1197
1198 if (conclave->c_state == CONCLAVE_S_ATTACHED) {
1199 /* Conclave is not yet launched, just arm suspend requested and bailout */
1200 conclave->c_request |= CONCLAVE_R_SUSPEND_REQUESTED;
1201 lck_mtx_unlock(&resource->r_mutex);
1202 return KERN_SUCCESS;
1203 } else if (conclave->c_state == CONCLAVE_S_RUNNING) {
1204 lck_mtx_unlock(&resource->r_mutex);
1205 return KERN_SUCCESS;
1206 }
1207
1208 conclave->c_request |= CONCLAVE_R_SUSPEND_REQUESTED;
1209 kern_return_t kr = exclaves_update_state_machine_locked(resource);
1210
1211 return kr;
1212 }
1213
1214 kern_return_t
exclaves_conclave_stop_upcall(exclaves_resource_t * resource)1215 exclaves_conclave_stop_upcall(exclaves_resource_t *resource)
1216 {
1217 assert3p(resource, !=, NULL);
1218 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1219
1220 conclave_resource_t *conclave = &resource->r_conclave;
1221 thread_t thread = current_thread();
1222
1223 lck_mtx_lock(&resource->r_mutex);
1224
1225 if (conclave->c_state == CONCLAVE_S_STOPPED || conclave->c_active_stopcall) {
1226 lck_mtx_unlock(&resource->r_mutex);
1227 return KERN_SUCCESS;
1228 }
1229
1230 conclave->c_active_stopcall = true;
1231 thread->th_exclaves_state |= TH_EXCLAVES_STOP_UPCALL_PENDING;
1232 lck_mtx_unlock(&resource->r_mutex);
1233
1234 return KERN_SUCCESS;
1235 }
1236
1237 kern_return_t
exclaves_conclave_stop_upcall_complete(exclaves_resource_t * resource,task_t task)1238 exclaves_conclave_stop_upcall_complete(exclaves_resource_t *resource, task_t task)
1239 {
1240 assert3p(resource, !=, NULL);
1241 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1242
1243 conclave_resource_t *conclave = &resource->r_conclave;
1244 thread_t thread = current_thread();
1245
1246 thread->th_exclaves_state &= ~TH_EXCLAVES_STOP_UPCALL_PENDING;
1247
1248 int flags = PX_DEBUG_NO_HONOR | PX_NO_EXCEPTION_UTHREAD;
1249 exception_info_t info = {
1250 .os_reason = OS_REASON_GUARD,
1251 .exception_type = EXC_GUARD,
1252 .mx_code = GUARD_REASON_EXCLAVES,
1253 .mx_subcode = 0
1254 };
1255
1256 exit_with_exclave_exception(get_bsdtask_info(task), info, flags);
1257
1258 lck_mtx_lock(&resource->r_mutex);
1259
1260 conclave->c_active_stopcall = false;
1261 conclave->c_state = CONCLAVE_S_STOPPED;
1262 conclave->c_request = CONCLAVE_R_NONE;
1263
1264 lck_mtx_unlock(&resource->r_mutex);
1265 return KERN_SUCCESS;
1266 }
1267
1268 bool
exclaves_conclave_has_service(exclaves_resource_t * resource,uint64_t id)1269 exclaves_conclave_has_service(exclaves_resource_t *resource, uint64_t id)
1270 {
1271 assert3u(id, <, CONCLAVE_SERVICE_MAX);
1272
1273 if (resource == NULL) {
1274 /* There's no conclave, fallback to the kernel domain. */
1275 if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_KERNEL_DOMAIN)) {
1276 exclaves_requirement_assert(EXCLAVES_R_CONCLAVE_RESOURCES,
1277 "no conclave manager present");
1278 }
1279 return bitmap_test(kernel_service_bitmap, (uint32_t)id);
1280 }
1281
1282 assert3p(resource, !=, NULL);
1283 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1284
1285 conclave_resource_t *conclave = &resource->r_conclave;
1286
1287 return bitmap_test(conclave->c_service_bitmap, (uint32_t)id);
1288 }
1289
1290 /* -------------------------------------------------------------------------- */
1291 #pragma mark Sensors
1292
1293 static void
exclaves_resource_sensor_reset(exclaves_resource_t * resource)1294 exclaves_resource_sensor_reset(exclaves_resource_t *resource)
1295 {
1296 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_SENSOR);
1297 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0);
1298 LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
1299
1300 exclaves_sensor_status_t status;
1301
1302 for (int i = 0; i < resource->r_sensor.s_startcount; i++) {
1303 __assert_only kern_return_t kr = exclaves_sensor_stop(
1304 (exclaves_sensor_type_t)resource->r_id, 0, &status);
1305 assert3u(kr, !=, KERN_INVALID_ARGUMENT);
1306 }
1307
1308 resource->r_sensor.s_startcount = 0;
1309 }
1310
1311 kern_return_t
exclaves_resource_sensor_open(const char * domain,const char * id_name,exclaves_resource_t ** out)1312 exclaves_resource_sensor_open(const char *domain, const char *id_name,
1313 exclaves_resource_t **out)
1314 {
1315 assert3p(out, !=, NULL);
1316
1317 exclaves_resource_t *sensor = exclaves_resource_lookup_by_name(domain,
1318 id_name, XNUPROXY_RESOURCETYPE_SENSOR);
1319
1320 if (sensor == NULL) {
1321 return KERN_NOT_FOUND;
1322 }
1323
1324 assert3u(sensor->r_type, ==, XNUPROXY_RESOURCETYPE_SENSOR);
1325
1326 lck_mtx_lock(&sensor->r_mutex);
1327 exclaves_resource_retain(sensor);
1328 lck_mtx_unlock(&sensor->r_mutex);
1329
1330 *out = sensor;
1331
1332 return KERN_SUCCESS;
1333 }
1334
1335 kern_return_t
exclaves_resource_sensor_start(exclaves_resource_t * resource,uint64_t flags,exclaves_sensor_status_t * status)1336 exclaves_resource_sensor_start(exclaves_resource_t *resource, uint64_t flags,
1337 exclaves_sensor_status_t *status)
1338 {
1339 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_SENSOR);
1340
1341 lck_mtx_lock(&resource->r_mutex);
1342 if (resource->r_sensor.s_startcount == UINT64_MAX) {
1343 lck_mtx_unlock(&resource->r_mutex);
1344 return KERN_INVALID_ARGUMENT;
1345 }
1346
1347 kern_return_t kr = exclaves_sensor_start(
1348 (exclaves_sensor_type_t)resource->r_id, flags, status);
1349 if (kr == KERN_SUCCESS) {
1350 resource->r_sensor.s_startcount += 1;
1351 }
1352 lck_mtx_unlock(&resource->r_mutex);
1353 return kr;
1354 }
1355
1356 kern_return_t
exclaves_resource_sensor_status(exclaves_resource_t * resource,uint64_t flags,exclaves_sensor_status_t * status)1357 exclaves_resource_sensor_status(exclaves_resource_t *resource, uint64_t flags,
1358 exclaves_sensor_status_t *status)
1359 {
1360 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_SENSOR);
1361
1362 lck_mtx_lock(&resource->r_mutex);
1363 kern_return_t kr = exclaves_sensor_status(
1364 (exclaves_sensor_type_t)resource->r_id, flags, status);
1365 lck_mtx_unlock(&resource->r_mutex);
1366
1367 return kr;
1368 }
1369
1370 kern_return_t
exclaves_resource_sensor_stop(exclaves_resource_t * resource,uint64_t flags,exclaves_sensor_status_t * status)1371 exclaves_resource_sensor_stop(exclaves_resource_t *resource, uint64_t flags,
1372 exclaves_sensor_status_t *status)
1373 {
1374 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_SENSOR);
1375
1376 lck_mtx_lock(&resource->r_mutex);
1377 if (resource->r_sensor.s_startcount == 0) {
1378 lck_mtx_unlock(&resource->r_mutex);
1379 return KERN_INVALID_ARGUMENT;
1380 }
1381
1382 kern_return_t kr = exclaves_sensor_stop(
1383 (exclaves_sensor_type_t)resource->r_id, flags, status);
1384 if (kr == KERN_SUCCESS) {
1385 resource->r_sensor.s_startcount -= 1;
1386 }
1387 lck_mtx_unlock(&resource->r_mutex);
1388
1389 return kr;
1390 }
1391
1392 /* -------------------------------------------------------------------------- */
1393 #pragma mark Notifications
1394
1395 static void
exclaves_notification_init(exclaves_resource_t * resource)1396 exclaves_notification_init(exclaves_resource_t *resource)
1397 {
1398 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_NOTIFICATION);
1399 exclaves_notification_t *notification = &resource->r_notification;
1400 klist_init(¬ification->notification_klist);
1401 }
1402
1403 static int
filt_exclaves_notification_attach(struct knote * kn,__unused struct kevent_qos_s * kev)1404 filt_exclaves_notification_attach(struct knote *kn, __unused struct kevent_qos_s *kev)
1405 {
1406 int error = 0;
1407 exclaves_resource_t *exclaves_resource = NULL;
1408 kern_return_t kr = exclaves_resource_from_port_name(current_space(), (mach_port_name_t)kn->kn_id, &exclaves_resource);
1409 if (kr != KERN_SUCCESS) {
1410 error = ENOENT;
1411 goto out;
1412 }
1413 assert3p(exclaves_resource, !=, NULL);
1414 if (exclaves_resource->r_type != XNUPROXY_RESOURCETYPE_NOTIFICATION) {
1415 exclaves_resource_release(exclaves_resource);
1416 error = EINVAL;
1417 goto out;
1418 }
1419
1420 lck_mtx_lock(&exclaves_resource->r_mutex);
1421
1422 if (kn->kn_exclaves_resource != NULL) {
1423 lck_mtx_unlock(&exclaves_resource->r_mutex);
1424 exclaves_resource_release(exclaves_resource);
1425 error = EBUSY;
1426 goto out;
1427 }
1428
1429 /* kn_exclaves_resource consumes the ref. */
1430 kn->kn_exclaves_resource = exclaves_resource;
1431 KNOTE_ATTACH(&exclaves_resource->r_notification.notification_klist, kn);
1432 lck_mtx_unlock(&exclaves_resource->r_mutex);
1433
1434 error = 0;
1435 out:
1436 return error;
1437 }
1438
1439 static void
filt_exclaves_notification_detach(struct knote * kn)1440 filt_exclaves_notification_detach(struct knote *kn)
1441 {
1442 exclaves_resource_t *exclaves_resource = kn->kn_exclaves_resource;
1443
1444 if (exclaves_resource != NULL) {
1445 assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCETYPE_NOTIFICATION);
1446 lck_mtx_lock(&exclaves_resource->r_mutex);
1447 kn->kn_exclaves_resource = NULL;
1448 KNOTE_DETACH(&exclaves_resource->r_notification.notification_klist, kn);
1449 lck_mtx_unlock(&exclaves_resource->r_mutex);
1450
1451 exclaves_resource_release(exclaves_resource);
1452 }
1453 }
1454
1455 static int
filt_exclaves_notification_event(struct knote * kn,long hint)1456 filt_exclaves_notification_event(struct knote *kn, long hint)
1457 {
1458 /* ALWAYS CALLED WITH exclaves_resource mutex held */
1459 exclaves_resource_t *exclaves_resource __assert_only = kn->kn_exclaves_resource;
1460 LCK_MTX_ASSERT(&exclaves_resource->r_mutex, LCK_MTX_ASSERT_OWNED);
1461
1462 /*
1463 * if the user is interested in this event, record it.
1464 */
1465 if (kn->kn_sfflags & hint) {
1466 kn->kn_fflags |= hint;
1467 }
1468
1469 /* if we have any matching state, activate the knote */
1470 if (kn->kn_fflags != 0) {
1471 return FILTER_ACTIVE;
1472 } else {
1473 return 0;
1474 }
1475 }
1476
1477 static int
filt_exclaves_notification_touch(struct knote * kn,struct kevent_qos_s * kev)1478 filt_exclaves_notification_touch(struct knote *kn, struct kevent_qos_s *kev)
1479 {
1480 int result;
1481 exclaves_resource_t *exclaves_resource = kn->kn_exclaves_resource;
1482 assert3p(exclaves_resource, !=, NULL);
1483 assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCETYPE_NOTIFICATION);
1484
1485 lck_mtx_lock(&exclaves_resource->r_mutex);
1486 /* accept new mask and mask off output events no long interesting */
1487 kn->kn_sfflags = kev->fflags;
1488 kn->kn_fflags &= kn->kn_sfflags;
1489 if (kn->kn_fflags != 0) {
1490 result = FILTER_ACTIVE;
1491 } else {
1492 result = 0;
1493 }
1494 lck_mtx_unlock(&exclaves_resource->r_mutex);
1495
1496 return result;
1497 }
1498
1499 static int
filt_exclaves_notification_process(struct knote * kn,struct kevent_qos_s * kev)1500 filt_exclaves_notification_process(struct knote *kn, struct kevent_qos_s *kev)
1501 {
1502 int result = 0;
1503 exclaves_resource_t *exclaves_resource = kn->kn_exclaves_resource;
1504 assert3p(exclaves_resource, !=, NULL);
1505 assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCETYPE_NOTIFICATION);
1506
1507 lck_mtx_lock(&exclaves_resource->r_mutex);
1508 if (kn->kn_fflags) {
1509 knote_fill_kevent(kn, kev, 0);
1510 result = FILTER_ACTIVE;
1511 }
1512 lck_mtx_unlock(&exclaves_resource->r_mutex);
1513 return result;
1514 }
1515
1516 SECURITY_READ_ONLY_EARLY(struct filterops) exclaves_notification_filtops = {
1517 .f_attach = filt_exclaves_notification_attach,
1518 .f_detach = filt_exclaves_notification_detach,
1519 .f_event = filt_exclaves_notification_event,
1520 .f_touch = filt_exclaves_notification_touch,
1521 .f_process = filt_exclaves_notification_process,
1522 };
1523
1524 kern_return_t
exclaves_notification_create(const char * domain,const char * name,exclaves_resource_t ** out)1525 exclaves_notification_create(const char *domain, const char *name,
1526 exclaves_resource_t **out)
1527 {
1528 assert3p(out, !=, NULL);
1529
1530 exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
1531 name, XNUPROXY_RESOURCETYPE_NOTIFICATION);
1532
1533 if (resource == NULL) {
1534 return KERN_NOT_FOUND;
1535 }
1536 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_NOTIFICATION);
1537
1538 lck_mtx_lock(&resource->r_mutex);
1539 exclaves_resource_retain(resource);
1540 lck_mtx_unlock(&resource->r_mutex);
1541
1542 *out = resource;
1543
1544 return KERN_SUCCESS;
1545 }
1546
1547 kern_return_t
exclaves_notification_signal(exclaves_resource_t * exclaves_resource,long event_mask)1548 exclaves_notification_signal(exclaves_resource_t *exclaves_resource, long event_mask)
1549 {
1550 assert3p(exclaves_resource, !=, NULL);
1551 assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCETYPE_NOTIFICATION);
1552
1553 lck_mtx_lock(&exclaves_resource->r_mutex);
1554 KNOTE(&exclaves_resource->r_notification.notification_klist, event_mask);
1555 lck_mtx_unlock(&exclaves_resource->r_mutex);
1556
1557 return KERN_SUCCESS;
1558 }
1559
1560 exclaves_resource_t *
exclaves_notification_lookup_by_id(const char * domain,uint64_t id)1561 exclaves_notification_lookup_by_id(const char *domain, uint64_t id)
1562 {
1563 return exclaves_resource_lookup_by_id(domain, id,
1564 XNUPROXY_RESOURCETYPE_NOTIFICATION);
1565 }
1566
1567 uint64_t
exclaves_service_lookup(const char * domain,const char * name)1568 exclaves_service_lookup(const char *domain, const char *name)
1569 {
1570 assert3p(domain, !=, NULL);
1571 assert3p(name, !=, NULL);
1572
1573 exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
1574 name, XNUPROXY_RESOURCETYPE_SERVICE);
1575 if (resource == NULL) {
1576 return EXCLAVES_INVALID_ID;
1577 }
1578
1579 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_SERVICE);
1580 return resource->r_id;
1581 }
1582
1583 /* -------------------------------------------------------------------------- */
1584 #pragma mark Shared Memory
1585
1586 kern_return_t
exclaves_resource_shared_memory_copyin(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2)1587 exclaves_resource_shared_memory_copyin(exclaves_resource_t *resource,
1588 user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
1589 mach_vm_size_t size2, mach_vm_size_t offset2)
1590 {
1591 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
1592 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_SHAREDMEMORY);
1593
1594 mach_vm_size_t umax = 0;
1595
1596 if (buffer == USER_ADDR_NULL || size1 == 0) {
1597 return KERN_INVALID_ARGUMENT;
1598 }
1599
1600 shared_memory_resource_t *sm = &resource->r_shared_memory;
1601 assert3p(sm->sm_addr, !=, NULL);
1602 assert3u(sm->sm_size, !=, 0);
1603
1604 if (os_add_overflow(offset1, size1, &umax) || umax > sm->sm_size) {
1605 return KERN_INVALID_ARGUMENT;
1606 }
1607
1608 if (os_add_overflow(offset2, size2, &umax) || umax > sm->sm_size) {
1609 return KERN_INVALID_ARGUMENT;
1610 }
1611
1612 if ((sm->sm_perm & EXCLAVES_BUFFER_PERM_WRITE) == 0) {
1613 return KERN_PROTECTION_FAILURE;
1614 }
1615
1616 if (copyin(buffer, sm->sm_addr + offset1, size1) != 0) {
1617 return KERN_FAILURE;
1618 }
1619
1620 if (copyin(buffer + size1, sm->sm_addr + offset2, size2) != 0) {
1621 return KERN_FAILURE;
1622 }
1623
1624 return KERN_SUCCESS;
1625 }
1626
1627 kern_return_t
exclaves_resource_shared_memory_copyout(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2)1628 exclaves_resource_shared_memory_copyout(exclaves_resource_t *resource,
1629 user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
1630 mach_vm_size_t size2, mach_vm_size_t offset2)
1631 {
1632 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
1633 assert(resource->r_type == XNUPROXY_RESOURCETYPE_SHAREDMEMORY ||
1634 resource->r_type == XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
1635
1636 mach_vm_size_t umax = 0;
1637
1638 if (buffer == USER_ADDR_NULL || size1 == 0) {
1639 return KERN_INVALID_ARGUMENT;
1640 }
1641
1642 shared_memory_resource_t *sm = &resource->r_shared_memory;
1643 assert3p(sm->sm_addr, !=, NULL);
1644 assert3u(sm->sm_size, !=, 0);
1645
1646 if (os_add_overflow(offset1, size1, &umax) || umax > sm->sm_size) {
1647 return KERN_INVALID_ARGUMENT;
1648 }
1649
1650 if (os_add_overflow(offset2, size2, &umax) || umax > sm->sm_size) {
1651 return KERN_INVALID_ARGUMENT;
1652 }
1653
1654 if ((sm->sm_perm & EXCLAVES_BUFFER_PERM_READ) == 0) {
1655 return KERN_PROTECTION_FAILURE;
1656 }
1657
1658 if (copyout(sm->sm_addr + offset1, buffer, size1) != 0) {
1659 return KERN_FAILURE;
1660 }
1661
1662 if (copyout(sm->sm_addr + offset2, buffer + size1, size2) != 0) {
1663 return KERN_FAILURE;
1664 }
1665
1666 return KERN_SUCCESS;
1667 }
1668
1669 /* The lower 32bits contain the endpoint id. */
1670 static uint32_t
audio_memory_get_endpoint(exclaves_resource_t * resource)1671 audio_memory_get_endpoint(exclaves_resource_t *resource)
1672 {
1673 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
1674 return resource->r_id << 32 >> 32;
1675 }
1676
1677 /* The upper 32bits of the id contain the buffer id. */
1678 static uint32_t
audio_memory_get_buffer_id(exclaves_resource_t * resource)1679 audio_memory_get_buffer_id(exclaves_resource_t *resource)
1680 {
1681 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
1682 return resource->r_id >> 32;
1683 }
1684
1685 static kern_return_t
shared_memory_map(exclaves_resource_t * resource,size_t size,exclaves_buffer_perm_t perm)1686 shared_memory_map(exclaves_resource_t *resource, size_t size,
1687 exclaves_buffer_perm_t perm)
1688 {
1689 assert(resource->r_type == XNUPROXY_RESOURCETYPE_SHAREDMEMORY ||
1690 resource->r_type == XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
1691
1692 /*
1693 * It is expected that shared memory is either write-only or read-only.
1694 * This is enforced through the userspace APIs (inbound or outbound buffers
1695 * respectively).
1696 */
1697 assert(perm == EXCLAVES_BUFFER_PERM_READ ||
1698 perm == EXCLAVES_BUFFER_PERM_WRITE);
1699
1700 kern_return_t kr = KERN_FAILURE;
1701
1702 /* round size up to nearest page */
1703 mach_vm_offset_t rounded_size = 0;
1704 if (size == 0 || mach_vm_round_page_overflow(size, &rounded_size)) {
1705 return KERN_INVALID_ARGUMENT;
1706 }
1707 const size_t page_count = rounded_size / PAGE_SIZE;
1708
1709 lck_mtx_lock(&resource->r_mutex);
1710
1711 __block shared_memory_resource_t *sm = &resource->r_shared_memory;
1712
1713 /*
1714 * If already active, bump the use count, check that the perms and size
1715 * are compatible and return. Checking the use count is insufficient
1716 * here as this can race with with a non-locked use count release.
1717 */
1718 if (resource->r_active) {
1719 /*
1720 * Both the permissions and size must match.
1721 */
1722 if (sm->sm_size < rounded_size || sm->sm_perm != perm) {
1723 lck_mtx_unlock(&resource->r_mutex);
1724 return KERN_INVALID_ARGUMENT;
1725 }
1726
1727 exclaves_resource_retain(resource);
1728 lck_mtx_unlock(&resource->r_mutex);
1729 return KERN_SUCCESS;
1730 }
1731
1732 /* This is lazily initialised and never de-initialised. */
1733 if (sm->sm_client.connection == NULL) {
1734 uint64_t endpoint = resource->r_type == XNUPROXY_RESOURCETYPE_SHAREDMEMORY ?
1735 resource->r_id :
1736 audio_memory_get_endpoint(resource);
1737
1738 kr = exclaves_shared_memory_init(endpoint, &sm->sm_client);
1739 if (kr != KERN_SUCCESS) {
1740 lck_mtx_unlock(&resource->r_mutex);
1741 return kr;
1742 }
1743 }
1744
1745 const sharedmemorybase_perms_s sm_perm = perm == EXCLAVES_BUFFER_PERM_WRITE ?
1746 SHAREDMEMORYBASE_PERMS_READWRITE : SHAREDMEMORYBASE_PERMS_READONLY;
1747 sharedmemorybase_mapping_s mapping = 0;
1748 kr = exclaves_shared_memory_setup(&sm->sm_client, sm_perm, 0,
1749 page_count, &mapping);
1750 if (kr != KERN_SUCCESS) {
1751 lck_mtx_unlock(&resource->r_mutex);
1752 return kr;
1753 }
1754
1755 /*
1756 * From this point on exclaves_shared_memory_teardown() must be called
1757 * if something goes wrong so that the buffer will be properly unmapped.
1758 */
1759 sm->sm_size = rounded_size;
1760 sm->sm_perm = perm;
1761 sm->sm_addr = NULL;
1762
1763 /*
1764 * The shared buffer is now accessible by xnu. Discover the layout of
1765 * the memory and map it into the kernel.
1766 */
1767 uint32_t *pages = kalloc_type(uint32_t, page_count,
1768 Z_WAITOK | Z_ZERO | Z_NOFAIL);
1769 __block uint32_t idx = 0;
1770 /* BEGIN IGNORE CODESTYLE */
1771 kr = exclaves_shared_memory_iterate(&sm->sm_client, &mapping, 0,
1772 page_count, ^(uint64_t pa) {
1773 assert3u(pa & PAGE_MASK, ==, 0);
1774 assert3u(idx, <, page_count);
1775
1776 pages[idx++] = (uint32_t)atop(pa);
1777 });
1778 /* END IGNORE CODESTYLE */
1779
1780 if (kr != KERN_SUCCESS) {
1781 kfree_type(uint32_t, page_count, pages);
1782 exclaves_shared_memory_teardown(&sm->sm_client, &mapping);
1783 lck_mtx_unlock(&resource->r_mutex);
1784 return KERN_FAILURE;
1785 }
1786
1787 assert3u(idx, ==, page_count);
1788
1789 const vm_prot_t prot = (perm & EXCLAVES_BUFFER_PERM_WRITE) != 0 ?
1790 VM_PROT_READ | VM_PROT_WRITE :
1791 VM_PROT_READ;
1792 kr = exclaves_memory_map((uint32_t)page_count, pages, prot, &sm->sm_addr);
1793 kfree_type(uint32_t, page_count, pages);
1794 if (kr != KERN_SUCCESS) {
1795 exclaves_shared_memory_teardown(&sm->sm_client, &mapping);
1796 lck_mtx_unlock(&resource->r_mutex);
1797 return KERN_FAILURE;
1798 }
1799
1800 sm->sm_mapping = mapping;
1801
1802 exclaves_resource_retain(resource);
1803 resource->r_active = true;
1804
1805 lck_mtx_unlock(&resource->r_mutex);
1806
1807 return KERN_SUCCESS;
1808 }
1809
1810 kern_return_t
exclaves_resource_shared_memory_map(const char * domain,const char * name,size_t size,exclaves_buffer_perm_t perm,exclaves_resource_t ** out)1811 exclaves_resource_shared_memory_map(const char *domain, const char *name, size_t size,
1812 exclaves_buffer_perm_t perm, exclaves_resource_t **out)
1813 {
1814 assert3p(out, !=, NULL);
1815
1816 exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
1817 name, XNUPROXY_RESOURCETYPE_SHAREDMEMORY);
1818 if (resource == NULL) {
1819 return KERN_NOT_FOUND;
1820 }
1821 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_SHAREDMEMORY);
1822
1823 kern_return_t kr = shared_memory_map(resource, size, perm);
1824 if (kr != KERN_SUCCESS) {
1825 return kr;
1826 }
1827
1828 *out = resource;
1829 return KERN_SUCCESS;
1830 }
1831
1832
1833 static void
exclaves_resource_shared_memory_unmap(exclaves_resource_t * resource)1834 exclaves_resource_shared_memory_unmap(exclaves_resource_t *resource)
1835 {
1836 assert(resource->r_type == XNUPROXY_RESOURCETYPE_SHAREDMEMORY ||
1837 resource->r_type == XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
1838 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0);
1839 LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
1840
1841 shared_memory_resource_t *sm = &resource->r_shared_memory;
1842
1843 if (sm->sm_addr != NULL) {
1844 __assert_only kern_return_t kr =
1845 exclaves_memory_unmap(sm->sm_addr, sm->sm_size);
1846 assert3u(kr, ==, KERN_SUCCESS);
1847 sm->sm_addr = NULL;
1848 sm->sm_size = 0;
1849 }
1850
1851 kern_return_t kr = exclaves_shared_memory_teardown(&sm->sm_client,
1852 &sm->sm_mapping);
1853 if (kr != KERN_SUCCESS) {
1854 exclaves_debug_printf(show_errors,
1855 "exclaves: failed to teardown shared memory: %s, \n",
1856 resource->r_name);
1857 return;
1858 }
1859
1860 bzero(&resource->r_shared_memory, sizeof(resource->r_shared_memory));
1861
1862 resource->r_active = false;
1863 }
1864
1865 char *
exclaves_resource_shared_memory_get_buffer(exclaves_resource_t * resource,size_t * buffer_len)1866 exclaves_resource_shared_memory_get_buffer(exclaves_resource_t *resource,
1867 size_t *buffer_len)
1868 {
1869 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
1870 assert(resource->r_type == XNUPROXY_RESOURCETYPE_SHAREDMEMORY ||
1871 resource->r_type == XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
1872
1873 shared_memory_resource_t *sm = &resource->r_shared_memory;
1874 assert3p(sm->sm_addr, !=, NULL);
1875 assert3u(sm->sm_size, !=, 0);
1876
1877 if (buffer_len != NULL) {
1878 *buffer_len = sm->sm_size;
1879 }
1880
1881 return sm->sm_addr;
1882 }
1883
1884 /* -------------------------------------------------------------------------- */
1885 #pragma mark Arbitrated Audio Memory
1886
1887 kern_return_t
exclaves_resource_audio_memory_map(const char * domain,const char * name,size_t size,exclaves_resource_t ** out)1888 exclaves_resource_audio_memory_map(const char *domain, const char *name,
1889 size_t size, exclaves_resource_t **out)
1890 {
1891 assert3p(out, !=, NULL);
1892
1893 exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
1894 name, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
1895 if (resource == NULL) {
1896 return KERN_NOT_FOUND;
1897 }
1898 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
1899
1900 kern_return_t kr = shared_memory_map(resource, size,
1901 EXCLAVES_BUFFER_PERM_READ);
1902 if (kr != KERN_SUCCESS) {
1903 return kr;
1904 }
1905
1906 *out = resource;
1907 return KERN_SUCCESS;
1908 }
1909
1910 static void
exclaves_resource_audio_memory_unmap(exclaves_resource_t * resource)1911 exclaves_resource_audio_memory_unmap(exclaves_resource_t *resource)
1912 {
1913 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
1914 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0);
1915 LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
1916
1917 exclaves_resource_shared_memory_unmap(resource);
1918 }
1919
1920 kern_return_t
exclaves_resource_audio_memory_copyout(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2,user_addr_t ustatus)1921 exclaves_resource_audio_memory_copyout(exclaves_resource_t *resource,
1922 user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
1923 mach_vm_size_t size2, mach_vm_size_t offset2, user_addr_t ustatus)
1924 {
1925 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
1926 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
1927
1928 kern_return_t kr = KERN_FAILURE;
1929 exclaves_sensor_status_t status;
1930 const uint32_t id = audio_memory_get_buffer_id(resource);
1931
1932 kr = exclaves_sensor_copy(id, size1, offset1, size2, offset2, &status);
1933 if (kr != KERN_SUCCESS) {
1934 return kr;
1935 }
1936
1937 kr = exclaves_resource_shared_memory_copyout(resource, buffer,
1938 size1, offset1, size2, offset2);
1939 if (kr != KERN_SUCCESS) {
1940 return kr;
1941 }
1942
1943 if (ustatus != 0 &&
1944 copyout(&status, ustatus, sizeof(status)) != 0) {
1945 return KERN_FAILURE;
1946 }
1947
1948 return KERN_SUCCESS;
1949 }
1950
1951 #endif /* CONFIG_EXCLAVES */
1952