1 /*
2 * Copyright (c) 2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #if CONFIG_EXCLAVES
30
31 #include <stdint.h>
32 #include <stdbool.h>
33
34 #include <mach/exclaves.h>
35 #include <mach/kern_return.h>
36
37 #include <string.h>
38
39 #include <kern/assert.h>
40 #include <kern/bits.h>
41 #include <kern/queue.h>
42 #include <kern/kalloc.h>
43 #include <kern/locks.h>
44 #include <kern/task.h>
45 #include <kern/thread_call.h>
46
47 #include <vm/pmap.h>
48
49 #include <kern/ipc_kobject.h>
50
51 #include <os/hash.h>
52
53 #include <mach/mach_traps.h>
54 #include <mach/mach_port.h>
55
56 #include <sys/event.h>
57 #include <sys/reason.h>
58
59 #include "exclaves_conclave.h"
60 #include "exclaves_debug.h"
61 #include "exclaves_resource.h"
62 #include "exclaves_sensor.h"
63 #include "exclaves_shared_memory.h"
64 #include "exclaves_xnuproxy.h"
65
66 #include "kern/exclaves.tightbeam.h"
67
68 static LCK_GRP_DECLARE(resource_lck_grp, "exclaves_resource");
69
70 /*
71 * A cache of service ids in the kernel domain
72 */
73 static bitmap_t
74 kernel_service_bitmap[BITMAP_LEN(CONCLAVE_SERVICE_MAX)] = {0};
75
76 /*
77 * Exclave Resources
78 *
79 * Exclaves provide a fixed static set of resources available to XNU. Some
80 * examples of types of resources:
81 * - Conclave managers
82 * - Services
83 * - Named buffers
84 * - Audio buffers
85 * ...
86 *
87 * Each resource has a name, a type and a corresponding identifier which is
88 * shared between XNU and Exclaves. Resources are scoped by what entities are
89 * allowed to access them.
90 * Resources are discovered during boot and made available in a two-level table
91 * scheme. The root table collects resources by their scope, with the
92 * second-level tables listing the actual resources.
93 *
94 *
95 * Root Table
96 * ┌────────────────────────────┐
97 * │ ┌────────────────────────┐ │
98 * │ │ "com.apple.kernel" │─┼─────┐
99 * │ └────────────────────────┘ │ │
100 * │ ┌────────────────────────┐ │ │
101 * │ │"com.apple.conclave.a" │─┼─┐ │
102 * │ └────────────────────────┘ │ │ │
103 * │ ┌────────────────────────┐ │ │ │
104 * │ │"com.apple.conclave.b" │ │ │ │
105 * │ └────────────────────────┘ │ │ │
106 * │ ┌────────────────────────┐ │ │ │
107 * │ │ "com.apple.driver.a" │ │ │ │
108 * │ └────────────────────────┘ │ │ │
109 * │ ... │ │ │
110 * │ │ │ │
111 * └────────────────────────────┘ │ │
112 * ┌─────────────────────────┘ │
113 * │ │
114 * │ ┌─────────────────────────┘
115 * │ │
116 * │ │
117 * │ │
118 * │ └──▶ "com.apple.kernel"
119 * │ ┌─────────────────────────────────────────────────────┐
120 * │ │┌───────────────────────┬──────────────────┬────────┐│
121 * │ ││"com.apple.conclave.a" │ CONCLAVE_MANAGER │ 0x1234 ││
122 * │ │└───────────────────────┴──────────────────┴────────┘│
123 * │ │┌───────────────────────┬──────────────────┬────────┐│
124 * │ ││"com.apple.conclave.b" │ CONCLAVE_MANAGER │ 0x7654 ││
125 * │ │└───────────────────────┴──────────────────┴────────┘│
126 * │ │ │
127 * │ │ ... │
128 * │ └─────────────────────────────────────────────────────┘
129 * │
130 * └─────▶ "com.apple.conclave.a"
131 * ┌─────────────────────────────────────────────────────┐
132 * │┌───────────────────────┬──────────────────┬────────┐│
133 * ││ "audio_buf" │ AUDIO_BUFFER │ 0x9999 ││
134 * │└───────────────────────┴──────────────────┴────────┘│
135 * │┌───────────────────────┬──────────────────┬────────┐│
136 * ││ "service_x" │ SERVICE │ 0x1111 ││
137 * │└───────────────────────┴──────────────────┴────────┘│
138 * │┌───────────────────────┬──────────────────┬────────┐│
139 * ││ "named_buffer_x" │ NAMED_BUFFER │0x66565 ││
140 * │└───────────────────────┴──────────────────┴────────┘│
141 * │ ... │
142 * └─────────────────────────────────────────────────────┘
143 *
144 * ...
145 *
146 *
147 * Resources can be looked up by first finding the root table entry (the
148 * "domain") and then searching for the identifier in that domain.
149 * For example to lookup the conclave manager ID for "com.apple.conclave.a",
150 * the "com.apple.kernel" domain would be found and then within that domain, the
151 * search would continue using the conclave name and the CONCLAVE_MANAGER type.
152 * Every conclave domain has a corresponding CONCLAVE_MANAGER resource in the
153 * "com.apple.kernel" domain.
154 */
155
156 /* -------------------------------------------------------------------------- */
157 #pragma mark Hash Table
158
159 #define TABLE_LEN 64
160
161 /*
162 * A table item is what ends up being stored in the hash table. It has a key and
163 * a value.
164 */
165 typedef struct {
166 const void *i_key;
167 size_t i_key_len;
168 void *i_value;
169
170 queue_chain_t i_chain;
171 } table_item_t;
172
173 /*
174 * The hash table consists of an array of buckets (queues). The hashing function
175 * will choose in which bucket a particular item belongs.
176 */
177 typedef struct {
178 queue_head_t *t_buckets;
179 size_t t_buckets_count;
180 } table_t;
181
182 /*
183 * Given a key, return the corresponding bucket.
184 */
185 static queue_head_t *
get_bucket(table_t * table,const void * key,size_t key_len)186 get_bucket(table_t *table, const void *key, size_t key_len)
187 {
188 const uint32_t idx = os_hash_jenkins(key, key_len) &
189 (table->t_buckets_count - 1);
190 return &table->t_buckets[idx];
191 }
192
193 /*
194 * Insert a new table item associated with 'key' into a table.
195 */
196 static void
table_put(table_t * table,const void * key,size_t key_len,table_item_t * item)197 table_put(table_t *table, const void *key, size_t key_len, table_item_t *item)
198 {
199 assert3p(item->i_chain.next, ==, NULL);
200 assert3p(item->i_chain.prev, ==, NULL);
201 assert3p(item->i_value, !=, NULL);
202
203 queue_head_t *head = get_bucket(table, key, key_len);
204 enqueue(head, &item->i_chain);
205 }
206
207 /*
208 * Iterate through all items matching 'key' calling cb for each.
209 */
210 static void
211 table_get(table_t *table, const void *key, size_t key_len, bool (^cb)(void *))
212 {
213 const queue_head_t *head = get_bucket(table, key, key_len);
214 table_item_t *elem = NULL;
215
216 assert3p(head, !=, NULL);
217
qe_foreach_element(elem,head,i_chain)218 qe_foreach_element(elem, head, i_chain) {
219 if (elem->i_key_len == key_len &&
220 memcmp(elem->i_key, key, elem->i_key_len) == 0) {
221 if (cb(elem->i_value)) {
222 return;
223 }
224 }
225 }
226
227 return;
228 }
229
230 /*
231 * Initialize the queues.
232 */
233 static void
table_init(table_t * table)234 table_init(table_t *table)
235 {
236 assert3u(table->t_buckets_count & (table->t_buckets_count - 1), ==, 0);
237
238 /* Initialise each bucket. */
239 for (size_t i = 0; i < table->t_buckets_count; i++) {
240 queue_init(&table->t_buckets[i]);
241 }
242 }
243
244 /*
245 * Allocate a new table with the specified number of buckets.
246 */
247 static table_t *
table_alloc(size_t nbuckets)248 table_alloc(size_t nbuckets)
249 {
250 assert3u(nbuckets, >, 0);
251 assert3u(nbuckets & (nbuckets - 1), ==, 0);
252
253 table_t *table = kalloc_type(table_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
254
255 table->t_buckets_count = nbuckets;
256 table->t_buckets = kalloc_type(queue_head_t, nbuckets,
257 Z_WAITOK | Z_ZERO | Z_NOFAIL);
258
259 return table;
260 }
261
262 static void
263 table_iterate(table_t *table,
264 bool (^cb)(const void *key, size_t key_len, void *value))
265 {
266 for (size_t i = 0; i < table->t_buckets_count; i++) {
267 const queue_head_t *head = &table->t_buckets[i];
268 table_item_t *elem = NULL;
269
qe_foreach_element(elem,head,i_chain)270 qe_foreach_element(elem, head, i_chain) {
271 if (cb(elem->i_key, elem->i_key_len, elem->i_value)) {
272 return;
273 }
274 }
275 }
276 }
277
278
279 /* -------------------------------------------------------------------------- */
280 #pragma mark Root Table
281
282 /*
283 * The root table is a hash table which contains an entry for every top-level
284 * domain.
285 * Domains scope resources. For example a conclave domain will contain a list of
286 * services available in that conclave. The kernel itself gets its own domain
287 * which holds conclave managers and other resources the kernel communicates
288 * with directly.
289 */
290 table_t root_table = {
291 .t_buckets = (queue_chain_t *)(queue_chain_t[TABLE_LEN]){},
292 .t_buckets_count = TABLE_LEN,
293 };
294
295 /*
296 * Entries in the root table. Each itself a table containing resources available
297 * in that domain.
298 */
299 typedef struct {
300 char d_name[EXCLAVES_RESOURCE_NAME_MAX];
301 table_t *d_table_name;
302 table_t *d_table_id;
303 } exclaves_resource_domain_t;
304
305 static exclaves_resource_domain_t *
lookup_domain(const char * domain_name)306 lookup_domain(const char *domain_name)
307 {
308 __block exclaves_resource_domain_t *domain = NULL;
309 table_get(&root_table, domain_name, strlen(domain_name), ^bool (void *data) {
310 domain = data;
311 return true;
312 });
313
314 return domain;
315 }
316
317 static void
318 iterate_domains(bool (^cb)(exclaves_resource_domain_t *))
319 {
320 table_iterate(&root_table,
321 ^(__unused const void *key, __unused size_t key_len, void *value) {
322 exclaves_resource_domain_t *domain = value;
323 return cb(domain);
324 });
325 }
326
327 static void
328 iterate_resources(exclaves_resource_domain_t *domain,
329 bool (^cb)(exclaves_resource_t *))
330 {
331 table_iterate(domain->d_table_name,
332 ^(__unused const void *key, __unused size_t key_len, void *value) {
333 exclaves_resource_t *resource = value;
334 return cb(resource);
335 });
336 }
337
338 static exclaves_resource_t *
lookup_resource_by_name(exclaves_resource_domain_t * domain,const char * name,xnuproxy_resourcetype_s type)339 lookup_resource_by_name(exclaves_resource_domain_t *domain, const char *name,
340 xnuproxy_resourcetype_s type)
341 {
342 __block exclaves_resource_t *resource = NULL;
343 table_get(domain->d_table_name, name, strlen(name), ^bool (void *data) {
344 exclaves_resource_t *tmp = data;
345 if (tmp->r_type == type) {
346 resource = data;
347 return true;
348 }
349 return false;
350 });
351
352 return resource;
353 }
354
355 static exclaves_resource_t *
lookup_resource_by_id(exclaves_resource_domain_t * domain,uint64_t id,xnuproxy_resourcetype_s type)356 lookup_resource_by_id(exclaves_resource_domain_t *domain, uint64_t id,
357 xnuproxy_resourcetype_s type)
358 {
359 __block exclaves_resource_t *resource = NULL;
360 table_get(domain->d_table_id, &id, sizeof(id), ^bool (void *data) {
361 exclaves_resource_t *tmp = data;
362 if (tmp->r_type == type) {
363 resource = data;
364 return true;
365 }
366 return false;
367 });
368
369 return resource;
370 }
371
372 static exclaves_resource_domain_t *
exclaves_resource_domain_alloc(const char * scope)373 exclaves_resource_domain_alloc(const char *scope)
374 {
375 assert3u(strlen(scope), >, 0);
376 assert3u(strlen(scope), <=, EXCLAVES_RESOURCE_NAME_MAX);
377
378 exclaves_resource_domain_t *domain = kalloc_type(
379 exclaves_resource_domain_t, Z_WAITOK | Z_ZERO | Z_NOFAIL);
380 (void) strlcpy(domain->d_name, scope,
381 sizeof(domain->d_name));
382
383 domain->d_table_name = table_alloc(TABLE_LEN);
384 table_init(domain->d_table_name);
385
386 domain->d_table_id = table_alloc(TABLE_LEN);
387 table_init(domain->d_table_id);
388
389 table_item_t *item = kalloc_type(table_item_t,
390 Z_WAITOK | Z_ZERO | Z_NOFAIL);
391 item->i_key = domain->d_name;
392 item->i_key_len = strlen(domain->d_name);
393 item->i_value = domain;
394
395 table_put(&root_table, scope, strlen(scope), item);
396
397 return domain;
398 }
399
400 static exclaves_resource_t *
exclaves_resource_alloc(xnuproxy_resourcetype_s type,const char * name,uint64_t id,exclaves_resource_domain_t * domain,bool connected)401 exclaves_resource_alloc(xnuproxy_resourcetype_s type, const char *name, uint64_t id,
402 exclaves_resource_domain_t *domain, bool connected)
403 {
404 exclaves_resource_t *resource = kalloc_type(exclaves_resource_t,
405 Z_WAITOK | Z_ZERO | Z_NOFAIL);
406
407 resource->r_type = type;
408 resource->r_id = id;
409 resource->r_active = false;
410 resource->r_connected = connected;
411 os_atomic_store(&resource->r_usecnt, 0, relaxed);
412
413 /*
414 * Each resource has an associated kobject of type
415 * IKOT_EXCLAVES_RESOURCE.
416 */
417 ipc_port_t port = ipc_kobject_alloc_port((ipc_kobject_t)resource,
418 IKOT_EXCLAVES_RESOURCE, IPC_KOBJECT_ALLOC_NSREQUEST);
419 resource->r_port = port;
420
421 lck_mtx_init(&resource->r_mutex, &resource_lck_grp, NULL);
422
423 (void) strlcpy(resource->r_name, name, sizeof(resource->r_name));
424
425
426 /* Stick the newly created resource into the name table. */
427 table_item_t *name_item = kalloc_type(table_item_t,
428 Z_WAITOK | Z_ZERO | Z_NOFAIL);
429
430 name_item->i_key = resource->r_name;
431 name_item->i_key_len = strlen(resource->r_name);
432 name_item->i_value = resource;
433
434 assert(lookup_resource_by_name(domain, name, type) == NULL);
435 table_put(domain->d_table_name, name, strlen(name), name_item);
436
437 /*
438 * Some types also need to lookup by id in addition to looking up by
439 * name.
440 */
441 switch (type) {
442 case XNUPROXY_RESOURCETYPE_NOTIFICATION: {
443 /* Stick the newly created resource into the ID table. */
444 table_item_t *id_item = kalloc_type(table_item_t,
445 Z_WAITOK | Z_ZERO | Z_NOFAIL);
446 id_item->i_key = &resource->r_id;
447 id_item->i_key_len = sizeof(resource->r_id);
448 id_item->i_value = resource;
449
450 assert(lookup_resource_by_id(domain, id, type) == NULL);
451 table_put(domain->d_table_id, &id, sizeof(id), id_item);
452 break;
453 }
454
455 default:
456 break;
457 }
458
459 return resource;
460 }
461
462 /* -------------------------------------------------------------------------- */
463 #pragma mark Exclaves Resources
464
465 static void exclaves_resource_no_senders(ipc_port_t port,
466 mach_port_mscount_t mscount);
467
468 IPC_KOBJECT_DEFINE(IKOT_EXCLAVES_RESOURCE,
469 .iko_op_stable = true,
470 .iko_op_no_senders = exclaves_resource_no_senders);
471
472 static void exclaves_conclave_init(exclaves_resource_t *resource);
473 static void exclaves_notification_init(exclaves_resource_t *resource);
474 static void exclaves_named_buffer_unmap(exclaves_resource_t *resource);
475 static void exclaves_audio_buffer_delete(exclaves_resource_t *resource);
476 static void exclaves_resource_sensor_reset(exclaves_resource_t *resource);
477 static void exclaves_resource_shared_memory_unmap(exclaves_resource_t *resource);
478 static void exclaves_resource_audio_memory_unmap(exclaves_resource_t *resource);
479
480 static void
populate_conclave_services(void)481 populate_conclave_services(void)
482 {
483 /* BEGIN IGNORE CODESTYLE */
484 iterate_domains(^(exclaves_resource_domain_t *domain) {
485
486 const bool is_kernel_domain =
487 (strcmp(domain->d_name, EXCLAVES_DOMAIN_KERNEL) == 0 ||
488 strcmp(domain->d_name, EXCLAVES_DOMAIN_DARWIN) == 0);
489
490 exclaves_resource_t *cm = exclaves_resource_lookup_by_name(
491 EXCLAVES_DOMAIN_KERNEL, domain->d_name,
492 XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
493
494 iterate_resources(domain, ^(exclaves_resource_t *resource) {
495 if (resource->r_type != XNUPROXY_RESOURCETYPE_SERVICE) {
496 return (bool)false;
497 }
498
499 if (cm != NULL) {
500 conclave_resource_t *c = &cm->r_conclave;
501 bitmap_set(c->c_service_bitmap,
502 (uint32_t)resource->r_id);
503 return (bool)false;
504 }
505
506 if (is_kernel_domain) {
507 bitmap_set(kernel_service_bitmap,
508 (uint32_t)resource->r_id);
509 return (bool)false;
510
511 }
512
513 /*
514 * Ignore services that are in unknown domains. This can
515 * happen if a conclave manager doesn't have a populated
516 * endpoint (for example during bringup).
517 */
518 return (bool)false;
519 });
520
521 return (bool)false;
522 });
523 /* END IGNORE CODESTYLE */
524 }
525
526 /*
527 * Discover all the static exclaves resources populating the resource tables as
528 * we go.
529 */
530 kern_return_t
exclaves_resource_init(void)531 exclaves_resource_init(void)
532 {
533 /* Initialize the root table. */
534 table_init(&root_table);
535
536 /* BEGIN IGNORE CODESTYLE */
537 kern_return_t kr = exclaves_xnuproxy_resource_info(
538 ^(const char *name, const char *scope,
539 xnuproxy_resourcetype_s type, uint64_t id, bool connected) {
540 /*
541 * Every resource is scoped to a specific domain, find the
542 * domain (or create one if it doesn't exist).
543 */
544 exclaves_resource_domain_t *domain = lookup_domain(scope);
545 if (domain == NULL) {
546 domain = exclaves_resource_domain_alloc(scope);
547 }
548
549 /* Allocate a new resource in the domain. */
550 exclaves_resource_t *resource = exclaves_resource_alloc(type,
551 name, id, domain, connected);
552
553 /*
554 * Type specific initialization.
555 */
556 switch (type) {
557 case XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER:
558 exclaves_conclave_init(resource);
559 break;
560
561 case XNUPROXY_RESOURCETYPE_NOTIFICATION:
562 exclaves_notification_init(resource);
563 break;
564
565 case XNUPROXY_RESOURCETYPE_SERVICE:
566 assert3u(resource->r_id, <, CONCLAVE_SERVICE_MAX);
567 break;
568
569 default:
570 break;
571 }
572 });
573 /* END IGNORE CODESTYLE */
574
575 if (kr != KERN_SUCCESS) {
576 return kr;
577 }
578
579 /* Populate the conclave service ID bitmaps. */
580 populate_conclave_services();
581
582 return KERN_SUCCESS;
583 }
584
585 exclaves_resource_t *
exclaves_resource_lookup_by_name(const char * domain_name,const char * name,xnuproxy_resourcetype_s type)586 exclaves_resource_lookup_by_name(const char *domain_name, const char *name,
587 xnuproxy_resourcetype_s type)
588 {
589 assert3u(strlen(domain_name), >, 0);
590 assert3u(strlen(name), >, 0);
591
592 exclaves_resource_domain_t *domain = lookup_domain(domain_name);
593 if (domain == NULL) {
594 return NULL;
595 }
596
597 exclaves_resource_t *r = lookup_resource_by_name(domain, name, type);
598
599 /* Ignore entries not marked connected. */
600 if (r == NULL || !r->r_connected) {
601 return NULL;
602 }
603
604 return r;
605 }
606
607 static exclaves_resource_t *
exclaves_resource_lookup_by_id(const char * domain_name,uint64_t id,xnuproxy_resourcetype_s type)608 exclaves_resource_lookup_by_id(const char *domain_name, uint64_t id,
609 xnuproxy_resourcetype_s type)
610 {
611 assert3u(strlen(domain_name), >, 0);
612
613 exclaves_resource_domain_t *domain = lookup_domain(domain_name);
614 if (domain == NULL) {
615 return NULL;
616 }
617
618 exclaves_resource_t *r = lookup_resource_by_id(domain, id, type);
619
620 /* Ignore entries not marked connected. */
621 if (r == NULL || !r->r_connected) {
622 return NULL;
623 }
624
625 return r;
626 }
627
628 const char *
exclaves_resource_name(const exclaves_resource_t * resource)629 exclaves_resource_name(const exclaves_resource_t *resource)
630 {
631 return resource->r_name;
632 }
633
634 /*
635 * Notes on use-count management
636 * For the most part everything is done under the resource lock.
637 * In some cases, it's necessary to grab/release a use count without
638 * holding the lock - for example the realtime audio paths doing copyin/copyout
639 * of named buffers/audio buffers.
640 * To prevent against races, initialization/de-initialization should always
641 * recheck the use-count under the lock.
642 */
643 uint32_t
exclaves_resource_retain(exclaves_resource_t * resource)644 exclaves_resource_retain(exclaves_resource_t *resource)
645 {
646 uint32_t orig =
647 os_atomic_inc_orig(&resource->r_usecnt, relaxed);
648 assert3u(orig, <, UINT32_MAX);
649
650 return orig;
651 }
652
653 void
exclaves_resource_release(exclaves_resource_t * resource)654 exclaves_resource_release(exclaves_resource_t *resource)
655 {
656 /*
657 * Drop the use count without holding the lock (this path may be called
658 * by RT threads and should be RT-safe).
659 */
660 uint32_t orig = os_atomic_dec_orig(&resource->r_usecnt, release);
661 assert3u(orig, !=, 0);
662 if (orig != 1) {
663 return;
664 }
665
666 /*
667 * Now grab the lock. The RT-safe paths calling this function shouldn't
668 * end up here unless there's a bug or mis-behaving user code (like
669 * deallocating an in-use mach port).
670 */
671 lck_mtx_lock(&resource->r_mutex);
672
673 /*
674 * Re-check the use count - as a second user of the resource
675 * may have snuck in in the meantime.
676 */
677 if (os_atomic_load(&resource->r_usecnt, acquire) > 0) {
678 lck_mtx_unlock(&resource->r_mutex);
679 return;
680 }
681
682 switch (resource->r_type) {
683 case XNUPROXY_RESOURCETYPE_NAMEDBUFFER:
684 exclaves_named_buffer_unmap(resource);
685 break;
686
687 case XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOBUFFER:
688 exclaves_audio_buffer_delete(resource);
689 break;
690
691 case XNUPROXY_RESOURCETYPE_SENSOR:
692 exclaves_resource_sensor_reset(resource);
693 break;
694
695 case XNUPROXY_RESOURCETYPE_SHAREDMEMORY:
696 exclaves_resource_shared_memory_unmap(resource);
697 break;
698
699 case XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY:
700 exclaves_resource_audio_memory_unmap(resource);
701 break;
702
703 default:
704 break;
705 }
706
707 lck_mtx_unlock(&resource->r_mutex);
708 }
709
710 kern_return_t
exclaves_resource_from_port_name(ipc_space_t space,mach_port_name_t name,exclaves_resource_t ** out)711 exclaves_resource_from_port_name(ipc_space_t space, mach_port_name_t name,
712 exclaves_resource_t **out)
713 {
714 kern_return_t kr = KERN_SUCCESS;
715 ipc_port_t port = IPC_PORT_NULL;
716
717 if (!MACH_PORT_VALID(name)) {
718 return KERN_INVALID_NAME;
719 }
720
721 kr = ipc_port_translate_send(space, name, &port);
722 if (kr != KERN_SUCCESS) {
723 return kr;
724 }
725
726 /* port is locked */
727 assert(IP_VALID(port));
728
729 exclaves_resource_t *resource = ipc_kobject_get_stable(port,
730 IKOT_EXCLAVES_RESOURCE);
731
732 /* The port is valid, but doesn't denote an exclaves resource. */
733 if (resource == NULL) {
734 ip_mq_unlock(port);
735 return KERN_INVALID_CAPABILITY;
736 }
737
738 /* Grab a reference while the port is good and the ipc lock is held. */
739 __assert_only uint32_t orig = exclaves_resource_retain(resource);
740 assert3u(orig, >, 0);
741
742 ip_mq_unlock(port);
743 *out = resource;
744
745 return KERN_SUCCESS;
746 }
747
748 /*
749 * Consumes a reference to the resource. On success the resource is reference is
750 * associated with the lifetime of the port.
751 */
752 kern_return_t
exclaves_resource_create_port_name(exclaves_resource_t * resource,ipc_space_t space,mach_port_name_t * name)753 exclaves_resource_create_port_name(exclaves_resource_t *resource, ipc_space_t space,
754 mach_port_name_t *name)
755 {
756 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
757
758 ipc_port_t port = resource->r_port;
759
760 ip_mq_lock(port);
761
762 /* Create an armed send right. */
763 kern_return_t ret = ipc_kobject_make_send_nsrequest_locked(port,
764 resource, IKOT_EXCLAVES_RESOURCE);
765 if (ret != KERN_SUCCESS &&
766 ret != KERN_ALREADY_WAITING) {
767 ip_mq_unlock(port);
768 exclaves_resource_release(resource);
769 return ret;
770 }
771
772 /*
773 * If there was already a send right, then the port already has an
774 * associated use count so drop this one.
775 */
776 if (port->ip_srights > 1) {
777 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 1);
778 exclaves_resource_release(resource);
779 }
780
781 ip_mq_unlock(port);
782
783 *name = ipc_port_copyout_send(port, space);
784 if (!MACH_PORT_VALID(*name)) {
785 /*
786 * ipc_port_copyout_send() releases the send right on failure
787 * (possibly calling exclaves_resource_no_senders() in the
788 * process).
789 */
790 return KERN_RESOURCE_SHORTAGE;
791 }
792
793 return KERN_SUCCESS;
794 }
795
796 static void
exclaves_resource_no_senders(ipc_port_t port,__unused mach_port_mscount_t mscount)797 exclaves_resource_no_senders(ipc_port_t port,
798 __unused mach_port_mscount_t mscount)
799 {
800 exclaves_resource_t *resource = ipc_kobject_get_stable(port,
801 IKOT_EXCLAVES_RESOURCE);
802
803 exclaves_resource_release(resource);
804 }
805
806 /* -------------------------------------------------------------------------- */
807 #pragma mark Named Buffers
808
809 int
810 exclaves_named_buffer_io(exclaves_resource_t *resource, off_t offset,
811 size_t len, int (^cb)(char *, size_t))
812 {
813 assert(resource->r_type == XNUPROXY_RESOURCETYPE_NAMEDBUFFER ||
814 resource->r_type == XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOBUFFER);
815 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
816
817 named_buffer_resource_t *nb = &resource->r_named_buffer;
818 assert3u(nb->nb_nranges, >, 0);
819 assert3u(nb->nb_size, !=, 0);
820 assert3u(offset + len, <=, nb->nb_size);
821
822 for (int i = 0; i < nb->nb_nranges; i++) {
823 /* Skip forward to the starting range. */
824 if (offset >= nb->nb_range[i].npages * PAGE_SIZE) {
825 offset -= nb->nb_range[i].npages * PAGE_SIZE;
826 continue;
827 }
828
829 size_t size = MIN((nb->nb_range[i].npages * PAGE_SIZE) - offset, len);
830 int ret = cb(nb->nb_range[i].address + offset, size);
831 if (ret != 0) {
832 return ret;
833 }
834
835 offset = 0;
836 len -= size;
837
838 if (len == 0) {
839 break;
840 }
841 }
842 assert3u(len, ==, 0);
843
844 return 0;
845 }
846
847 static kern_return_t
exclaves_named_buffer_io_copyin(exclaves_resource_t * resource,user_addr_t _src,off_t offset,size_t len)848 exclaves_named_buffer_io_copyin(exclaves_resource_t *resource,
849 user_addr_t _src, off_t offset, size_t len)
850 {
851 assert3u(resource->r_named_buffer.nb_perm & EXCLAVES_BUFFER_PERM_WRITE,
852 !=, 0);
853
854 __block user_addr_t src = _src;
855 return exclaves_named_buffer_io(resource, offset, len,
856 ^(char *buffer, size_t size) {
857 if (copyin(src, buffer, size) != 0) {
858 return KERN_FAILURE;
859 }
860
861 src += size;
862 return KERN_SUCCESS;
863 });
864 }
865
866 kern_return_t
exclaves_named_buffer_copyin(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2)867 exclaves_named_buffer_copyin(exclaves_resource_t *resource,
868 user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
869 mach_vm_size_t size2, mach_vm_size_t offset2)
870 {
871 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
872 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_NAMEDBUFFER);
873
874 mach_vm_size_t umax = 0;
875 kern_return_t kr = KERN_FAILURE;
876
877 if (buffer == USER_ADDR_NULL || size1 == 0) {
878 return KERN_INVALID_ARGUMENT;
879 }
880
881 named_buffer_resource_t *nb = &resource->r_named_buffer;
882 assert3u(nb->nb_nranges, >, 0);
883 assert3u(nb->nb_size, !=, 0);
884
885 if (os_add_overflow(offset1, size1, &umax) || umax > nb->nb_size) {
886 return KERN_INVALID_ARGUMENT;
887 }
888
889 if (os_add_overflow(offset2, size2, &umax) || umax > nb->nb_size) {
890 return KERN_INVALID_ARGUMENT;
891 }
892
893 if ((nb->nb_perm & EXCLAVES_BUFFER_PERM_WRITE) == 0) {
894 return KERN_PROTECTION_FAILURE;
895 }
896
897 kr = exclaves_named_buffer_io_copyin(resource, buffer, offset1, size1);
898 if (kr != KERN_SUCCESS) {
899 return kr;
900 }
901
902 kr = exclaves_named_buffer_io_copyin(resource, buffer + size1, offset2,
903 size2);
904 if (kr != KERN_SUCCESS) {
905 return kr;
906 }
907
908 return KERN_SUCCESS;
909 }
910
911 static kern_return_t
exclaves_named_buffer_io_copyout(exclaves_resource_t * resource,user_addr_t _dst,off_t offset,size_t len)912 exclaves_named_buffer_io_copyout(exclaves_resource_t *resource,
913 user_addr_t _dst, off_t offset, size_t len)
914 {
915 assert3u(resource->r_named_buffer.nb_perm & EXCLAVES_BUFFER_PERM_READ,
916 !=, 0);
917
918 __block user_addr_t dst = _dst;
919 return exclaves_named_buffer_io(resource, offset, len,
920 ^(char *buffer, size_t size) {
921 if (copyout(buffer, dst, size) != 0) {
922 return KERN_FAILURE;
923 }
924
925 dst += size;
926 return KERN_SUCCESS;
927 });
928 }
929
930 kern_return_t
exclaves_named_buffer_copyout(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2)931 exclaves_named_buffer_copyout(exclaves_resource_t *resource,
932 user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
933 mach_vm_size_t size2, mach_vm_size_t offset2)
934 {
935 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
936 assert(resource->r_type == XNUPROXY_RESOURCETYPE_NAMEDBUFFER ||
937 resource->r_type == XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOBUFFER);
938
939 mach_vm_size_t umax = 0;
940 kern_return_t kr = KERN_FAILURE;
941
942 if (buffer == USER_ADDR_NULL || size1 == 0) {
943 return KERN_INVALID_ARGUMENT;
944 }
945
946 named_buffer_resource_t *nb = &resource->r_named_buffer;
947 assert3u(nb->nb_nranges, >, 0);
948 assert3u(nb->nb_size, !=, 0);
949
950 if (os_add_overflow(offset1, size1, &umax) || umax > nb->nb_size) {
951 return KERN_INVALID_ARGUMENT;
952 }
953
954 if (os_add_overflow(offset2, size2, &umax) || umax > nb->nb_size) {
955 return KERN_INVALID_ARGUMENT;
956 }
957
958 if ((nb->nb_perm & EXCLAVES_BUFFER_PERM_READ) == 0) {
959 return KERN_PROTECTION_FAILURE;
960 }
961
962 kr = exclaves_named_buffer_io_copyout(resource, buffer, offset1, size1);
963 if (kr != KERN_SUCCESS) {
964 return kr;
965 }
966
967 kr = exclaves_named_buffer_io_copyout(resource, buffer + size1,
968 offset2, size2);
969 if (kr != KERN_SUCCESS) {
970 return kr;
971 }
972
973 return KERN_SUCCESS;
974 }
975
976 static void
named_buffer_unmap(exclaves_resource_t * resource)977 named_buffer_unmap(exclaves_resource_t *resource)
978 {
979 assert(resource->r_type == XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOBUFFER ||
980 resource->r_type == XNUPROXY_RESOURCETYPE_NAMEDBUFFER);
981 LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
982
983 /* BEGIN IGNORE CODESTYLE */
984 resource->r_type == XNUPROXY_RESOURCETYPE_NAMEDBUFFER ?
985 exclaves_named_buffer_unmap(resource) :
986 exclaves_audio_buffer_delete(resource);
987 /* END IGNORE CODESTYLE */
988 }
989
990 static kern_return_t
named_buffer_map(exclaves_resource_t * resource,size_t size,exclaves_buffer_perm_t perm)991 named_buffer_map(exclaves_resource_t *resource, size_t size,
992 exclaves_buffer_perm_t perm)
993 {
994 assert(resource->r_type == XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOBUFFER ||
995 resource->r_type == XNUPROXY_RESOURCETYPE_NAMEDBUFFER);
996 assert3u(perm & ~(EXCLAVES_BUFFER_PERM_READ | EXCLAVES_BUFFER_PERM_WRITE), ==, 0);
997
998 kern_return_t kr = KERN_FAILURE;
999
1000 if (size == 0) {
1001 return KERN_INVALID_ARGUMENT;
1002 }
1003
1004 /* round size up to nearest page */
1005 mach_vm_offset_t rounded_size = 0;
1006 if (mach_vm_round_page_overflow(size, &rounded_size)) {
1007 return KERN_INVALID_ARGUMENT;
1008 }
1009
1010 lck_mtx_lock(&resource->r_mutex);
1011
1012 /*
1013 * If already active, bump the use count, check that the perms and size
1014 * are compatible and return. Checking the use count is insufficient
1015 * here as this can race with with a non-locked use count release.
1016 */
1017 if (resource->r_active) {
1018 const named_buffer_resource_t *nb = &resource->r_named_buffer;
1019
1020 /*
1021 * When only inbound and outbound buffers are supported, the
1022 * perm check should be updated to ensure that the perms match
1023 * (rather than being a subset). */
1024 if (nb->nb_size < rounded_size ||
1025 (nb->nb_perm & perm) == 0) {
1026 lck_mtx_unlock(&resource->r_mutex);
1027 return KERN_INVALID_ARGUMENT;
1028 }
1029
1030 exclaves_resource_retain(resource);
1031 lck_mtx_unlock(&resource->r_mutex);
1032 return KERN_SUCCESS;
1033 }
1034
1035 bool ro = true;
1036 if (resource->r_type == XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOBUFFER) {
1037 kr = exclaves_xnuproxy_audio_buffer_map(resource->r_id, rounded_size, &ro);
1038 } else {
1039 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_NAMEDBUFFER);
1040 kr = exclaves_xnuproxy_named_buffer_map(resource->r_id, rounded_size, &ro);
1041 }
1042
1043 if (kr != KERN_SUCCESS) {
1044 lck_mtx_unlock(&resource->r_mutex);
1045 return kr;
1046 }
1047
1048 /*
1049 * From this point on named_buffer_unmap() must be called if
1050 * something goes wrong so that the buffer will be properly unmapped.
1051 */
1052 switch (perm) {
1053 case EXCLAVES_BUFFER_PERM_READ:
1054 if (!ro) {
1055 named_buffer_unmap(resource);
1056 lck_mtx_unlock(&resource->r_mutex);
1057 return KERN_PROTECTION_FAILURE;
1058 }
1059 break;
1060 case EXCLAVES_BUFFER_PERM_WRITE:
1061 if (ro) {
1062 named_buffer_unmap(resource);
1063 lck_mtx_unlock(&resource->r_mutex);
1064 return KERN_PROTECTION_FAILURE;
1065 }
1066 break;
1067 /* Maintain backwards compatibility for named buffers (READ|WRITE) */
1068 case EXCLAVES_BUFFER_PERM_READ | EXCLAVES_BUFFER_PERM_WRITE:
1069 if (ro) {
1070 perm &= ~EXCLAVES_BUFFER_PERM_WRITE;
1071 }
1072 break;
1073 }
1074
1075 named_buffer_resource_t *nb = &resource->r_named_buffer;
1076 nb->nb_size = rounded_size;
1077 nb->nb_perm = perm;
1078 nb->nb_nranges = 0;
1079
1080 /*
1081 * The named buffer is now accessible by xnu. Discover the
1082 * layout of the memory.
1083 */
1084 kern_return_t (*layout)(uint64_t, uint32_t, uint32_t, kern_return_t (^)(uint64_t, uint32_t)) =
1085 resource->r_type == XNUPROXY_RESOURCETYPE_NAMEDBUFFER ?
1086 exclaves_xnuproxy_named_buffer_layout :
1087 exclaves_xnuproxy_audio_buffer_layout;
1088 kr = layout(resource->r_id, 0, (uint32_t) (rounded_size / PAGE_SIZE),
1089 ^(uint64_t base, uint32_t npages) {
1090 if (nb->nb_nranges >= EXCLAVES_SHARED_BUFFER_MAX_RANGES) {
1091 exclaves_debug_printf(show_errors, "exclaves: "
1092 "fragmented named buffer can't fit\n");
1093 return KERN_NO_SPACE;
1094 }
1095
1096 nb->nb_range[nb->nb_nranges].address = (char *)phystokv(base);
1097 assert3p(nb->nb_range[nb->nb_nranges].address, !=, NULL);
1098
1099 nb->nb_range[nb->nb_nranges].npages = npages;
1100 assert3u(nb->nb_range[nb->nb_nranges].npages, !=, 0);
1101
1102 nb->nb_nranges++;
1103
1104 return KERN_SUCCESS;
1105 });
1106
1107 if (kr != KERN_SUCCESS) {
1108 named_buffer_unmap(resource);
1109 lck_mtx_unlock(&resource->r_mutex);
1110 return kr;
1111 }
1112
1113 exclaves_resource_retain(resource);
1114 resource->r_active = true;
1115
1116 lck_mtx_unlock(&resource->r_mutex);
1117
1118 return KERN_SUCCESS;
1119 }
1120
1121 kern_return_t
exclaves_named_buffer_map(const char * domain,const char * name,size_t size,exclaves_buffer_perm_t perm,exclaves_resource_t ** out)1122 exclaves_named_buffer_map(const char *domain, const char *name, size_t size,
1123 exclaves_buffer_perm_t perm, exclaves_resource_t **out)
1124 {
1125 assert3p(out, !=, NULL);
1126
1127 exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
1128 name, XNUPROXY_RESOURCETYPE_NAMEDBUFFER);
1129 if (resource == NULL) {
1130 return KERN_NOT_FOUND;
1131 }
1132 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_NAMEDBUFFER);
1133
1134 kern_return_t kr = named_buffer_map(resource, size, perm);
1135 if (kr != KERN_SUCCESS) {
1136 return kr;
1137 }
1138
1139 *out = resource;
1140 return KERN_SUCCESS;
1141 }
1142
1143 static void
exclaves_named_buffer_unmap(exclaves_resource_t * resource)1144 exclaves_named_buffer_unmap(exclaves_resource_t *resource)
1145 {
1146 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_NAMEDBUFFER);
1147 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0);
1148 LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
1149
1150 kern_return_t kr = exclaves_xnuproxy_named_buffer_delete(resource->r_id);
1151 if (kr != KERN_SUCCESS) {
1152 exclaves_debug_printf(show_errors,
1153 "exclaves: failed to delete named buffer: %s\n",
1154 resource->r_name);
1155 return;
1156 }
1157
1158 bzero(&resource->r_named_buffer, sizeof(resource->r_named_buffer));
1159
1160 resource->r_active = false;
1161 }
1162
1163 /* -------------------------------------------------------------------------- */
1164 #pragma mark Audio buffers
1165
1166 kern_return_t
exclaves_audio_buffer_map(const char * domain,const char * name,size_t size,exclaves_resource_t ** out)1167 exclaves_audio_buffer_map(const char *domain, const char *name, size_t size,
1168 exclaves_resource_t **out)
1169 {
1170 assert3p(out, !=, NULL);
1171
1172 exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
1173 name, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOBUFFER);
1174 if (resource == NULL) {
1175 return KERN_NOT_FOUND;
1176 }
1177 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOBUFFER);
1178
1179 kern_return_t kr = named_buffer_map(resource, size,
1180 EXCLAVES_BUFFER_PERM_READ);
1181 if (kr != KERN_SUCCESS) {
1182 return kr;
1183 }
1184
1185 *out = resource;
1186 return KERN_SUCCESS;
1187 }
1188
1189 static void
exclaves_audio_buffer_delete(exclaves_resource_t * resource)1190 exclaves_audio_buffer_delete(exclaves_resource_t *resource)
1191 {
1192 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOBUFFER);
1193 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0);
1194 LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
1195
1196 kern_return_t kr = exclaves_xnuproxy_audio_buffer_delete(resource->r_id);
1197 if (kr != KERN_SUCCESS) {
1198 exclaves_debug_printf(show_errors,
1199 "exclaves: failed to delete audio buffer: %s\n",
1200 resource->r_name);
1201 return;
1202 }
1203
1204 bzero(&resource->r_named_buffer, sizeof(resource->r_named_buffer));
1205 resource->r_active = false;
1206 }
1207
1208 kern_return_t
exclaves_audio_buffer_copyout(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2)1209 exclaves_audio_buffer_copyout(exclaves_resource_t *resource,
1210 user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
1211 mach_vm_size_t size2, mach_vm_size_t offset2)
1212 {
1213 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
1214 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOBUFFER);
1215
1216 kern_return_t kr = exclaves_xnuproxy_audio_buffer_copyout(resource->r_id, size1, offset1, size2, offset2);
1217 if (kr != KERN_SUCCESS) {
1218 return kr;
1219 }
1220
1221 return exclaves_named_buffer_copyout(resource, buffer, size1, offset1,
1222 size2, offset2);
1223 }
1224
1225 /* -------------------------------------------------------------------------- */
1226 #pragma mark Conclave Manager
1227
1228 static void
exclaves_conclave_init(exclaves_resource_t * resource)1229 exclaves_conclave_init(exclaves_resource_t *resource)
1230 {
1231 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1232
1233 tb_client_connection_t connection = NULL;
1234 __assert_only kern_return_t kr = exclaves_conclave_launcher_init(resource->r_id,
1235 &connection);
1236 assert3u(kr, ==, KERN_SUCCESS);
1237
1238 conclave_resource_t *conclave = &resource->r_conclave;
1239
1240 conclave->c_control = connection;
1241 conclave->c_state = CONCLAVE_S_NONE;
1242 conclave->c_task = TASK_NULL;
1243 }
1244
1245 kern_return_t
exclaves_conclave_attach(const char * name,task_t task)1246 exclaves_conclave_attach(const char *name, task_t task)
1247 {
1248 assert3p(task, !=, TASK_NULL);
1249
1250 exclaves_resource_t *resource = exclaves_resource_lookup_by_name(
1251 EXCLAVES_DOMAIN_KERNEL, name, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1252 if (resource == NULL) {
1253 exclaves_requirement_assert(EXCLAVES_R_CONCLAVE,
1254 "failed to find conclave manager (%s)", name);
1255 return KERN_SUCCESS;
1256 }
1257 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1258
1259 conclave_resource_t *conclave = &resource->r_conclave;
1260
1261 lck_mtx_lock(&resource->r_mutex);
1262
1263 if (conclave->c_state != CONCLAVE_S_NONE) {
1264 lck_mtx_unlock(&resource->r_mutex);
1265 return KERN_INVALID_ARGUMENT;
1266 }
1267
1268 task_reference(task);
1269
1270 task->conclave = resource;
1271
1272 conclave->c_task = task;
1273 conclave->c_state = CONCLAVE_S_ATTACHED;
1274
1275 lck_mtx_unlock(&resource->r_mutex);
1276
1277 return KERN_SUCCESS;
1278 }
1279
1280 kern_return_t
exclaves_conclave_detach(exclaves_resource_t * resource,task_t task)1281 exclaves_conclave_detach(exclaves_resource_t *resource, task_t task)
1282 {
1283 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1284
1285 conclave_resource_t *conclave = &resource->r_conclave;
1286
1287 lck_mtx_lock(&resource->r_mutex);
1288
1289 if (conclave->c_state != CONCLAVE_S_ATTACHED &&
1290 conclave->c_state != CONCLAVE_S_STOPPED) {
1291 panic("Task %p trying to detach a conclave %p but it is in a "
1292 "weird state", task, conclave);
1293 }
1294
1295 assert3p(task->conclave, !=, NULL);
1296 assert3p(resource, ==, task->conclave);
1297
1298 task->conclave = NULL;
1299 conclave->c_task = TASK_NULL;
1300
1301 conclave->c_state = CONCLAVE_S_NONE;
1302
1303 lck_mtx_unlock(&resource->r_mutex);
1304
1305 task_deallocate(task);
1306
1307 return KERN_SUCCESS;
1308 }
1309
1310 kern_return_t
exclaves_conclave_inherit(exclaves_resource_t * resource,task_t old_task,task_t new_task)1311 exclaves_conclave_inherit(exclaves_resource_t *resource, task_t old_task,
1312 task_t new_task)
1313 {
1314 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1315
1316 conclave_resource_t *conclave = &resource->r_conclave;
1317
1318 lck_mtx_lock(&resource->r_mutex);
1319
1320 assert3u(conclave->c_state, !=, CONCLAVE_S_NONE);
1321
1322 assert3p(new_task->conclave, ==, NULL);
1323 assert3p(old_task->conclave, !=, NULL);
1324 assert3p(resource, ==, old_task->conclave);
1325
1326 /* Only allow inheriting the conclave if it has not yet started. */
1327 if (conclave->c_state != CONCLAVE_S_ATTACHED) {
1328 lck_mtx_unlock(&resource->r_mutex);
1329 return KERN_FAILURE;
1330 }
1331
1332 old_task->conclave = NULL;
1333
1334 task_reference(new_task);
1335 new_task->conclave = resource;
1336
1337 conclave->c_task = new_task;
1338
1339 lck_mtx_unlock(&resource->r_mutex);
1340 task_deallocate(old_task);
1341
1342 return KERN_SUCCESS;
1343 }
1344
1345 bool
exclaves_conclave_is_attached(const exclaves_resource_t * resource)1346 exclaves_conclave_is_attached(const exclaves_resource_t *resource)
1347 {
1348 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1349 const conclave_resource_t *conclave = &resource->r_conclave;
1350
1351 return conclave->c_state == CONCLAVE_S_ATTACHED;
1352 }
1353
1354 kern_return_t
exclaves_conclave_launch(exclaves_resource_t * resource)1355 exclaves_conclave_launch(exclaves_resource_t *resource)
1356 {
1357 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1358
1359 conclave_resource_t *conclave = &resource->r_conclave;
1360
1361 if (exclaves_boot_wait(EXCLAVES_BOOT_STAGE_EXCLAVEKIT) != KERN_SUCCESS) {
1362 /*
1363 * This should only ever happen if the EXCLAVEKIT requirement was
1364 * relaxed.
1365 */
1366 exclaves_requirement_assert(EXCLAVES_R_EXCLAVEKIT,
1367 "failed to boot to exclavekit");
1368 return KERN_NOT_SUPPORTED;
1369 }
1370
1371 lck_mtx_lock(&resource->r_mutex);
1372
1373 if (conclave->c_state != CONCLAVE_S_ATTACHED) {
1374 lck_mtx_unlock(&resource->r_mutex);
1375 return KERN_FAILURE;
1376 }
1377
1378 conclave->c_state = CONCLAVE_S_LAUNCHING;
1379 lck_mtx_unlock(&resource->r_mutex);
1380
1381 __assert_only kern_return_t ret =
1382 exclaves_conclave_launcher_launch(conclave->c_control);
1383 assert3u(ret, ==, KERN_SUCCESS);
1384
1385 lck_mtx_lock(&resource->r_mutex);
1386 /* Check if conclave stop is requested */
1387 if (conclave->c_state == CONCLAVE_S_STOP_REQUESTED) {
1388 conclave->c_state = CONCLAVE_S_STOPPING;
1389 lck_mtx_unlock(&resource->r_mutex);
1390
1391 ret = exclaves_conclave_launcher_stop(conclave->c_control,
1392 CONCLAVE_LAUNCHER_CONCLAVESTOPREASON_EXIT);
1393 assert3u(ret, ==, KERN_SUCCESS);
1394
1395 lck_mtx_lock(&resource->r_mutex);
1396 conclave->c_state = CONCLAVE_S_STOPPED;
1397 } else if (conclave->c_state == CONCLAVE_S_LAUNCHING) {
1398 conclave->c_state = CONCLAVE_S_LAUNCHED;
1399 }
1400 lck_mtx_unlock(&resource->r_mutex);
1401
1402 return KERN_SUCCESS;
1403 }
1404
1405 /*
1406 * Return the domain associated with the current conclave.
1407 * If not joined to a conclave, return the KERNEL domain. This implies that the
1408 * calling task is sufficiently privileged.
1409 */
1410 const char *
exclaves_conclave_get_domain(exclaves_resource_t * resource)1411 exclaves_conclave_get_domain(exclaves_resource_t *resource)
1412 {
1413 if (resource != NULL) {
1414 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1415 return resource->r_name;
1416 }
1417
1418 if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_KERNEL_DOMAIN)) {
1419 exclaves_requirement_assert(EXCLAVES_R_CONCLAVE_RESOURCES,
1420 "no conclave manager present");
1421 }
1422
1423 return EXCLAVES_DOMAIN_KERNEL;
1424 }
1425
1426 kern_return_t
exclaves_conclave_stop(exclaves_resource_t * resource,bool gather_crash_bt)1427 exclaves_conclave_stop(exclaves_resource_t *resource, bool gather_crash_bt)
1428 {
1429 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1430
1431 conclave_resource_t *conclave = &resource->r_conclave;
1432
1433 uint32_t conclave_stop_reason = gather_crash_bt ?
1434 CONCLAVE_LAUNCHER_CONCLAVESTOPREASON_KILLED :
1435 CONCLAVE_LAUNCHER_CONCLAVESTOPREASON_EXIT;
1436
1437 lck_mtx_lock(&resource->r_mutex);
1438
1439 /* TBD Call stop on the conclave manager endpoint. */
1440 if (conclave->c_state == CONCLAVE_S_LAUNCHING) {
1441 /* If another thread is launching, just request a stop */
1442 conclave->c_state = CONCLAVE_S_STOP_REQUESTED;
1443 lck_mtx_unlock(&resource->r_mutex);
1444 return KERN_SUCCESS;
1445 } else if (conclave->c_state == CONCLAVE_S_ATTACHED) {
1446 /* Change the state to stopped if the conclave was never started */
1447 conclave->c_state = CONCLAVE_S_STOPPED;
1448 lck_mtx_unlock(&resource->r_mutex);
1449 return KERN_SUCCESS;
1450 } else if (conclave->c_state == CONCLAVE_S_STOPPING ||
1451 conclave->c_state == CONCLAVE_S_STOPPED) {
1452 /* Upcall to stop the conclave might be in progress, bail out */
1453 lck_mtx_unlock(&resource->r_mutex);
1454 return KERN_SUCCESS;
1455 }
1456
1457 if (conclave->c_state != CONCLAVE_S_LAUNCHED) {
1458 lck_mtx_unlock(&resource->r_mutex);
1459 return KERN_FAILURE;
1460 }
1461
1462 conclave->c_state = CONCLAVE_S_STOPPING;
1463 lck_mtx_unlock(&resource->r_mutex);
1464
1465 __assert_only kern_return_t kr =
1466 exclaves_conclave_launcher_stop(conclave->c_control,
1467 conclave_stop_reason);
1468 assert3u(kr, ==, KERN_SUCCESS);
1469
1470 lck_mtx_lock(&resource->r_mutex);
1471 conclave->c_state = CONCLAVE_S_STOPPED;
1472 lck_mtx_unlock(&resource->r_mutex);
1473
1474 return KERN_SUCCESS;
1475 }
1476
1477 kern_return_t
exclaves_conclave_stop_upcall(exclaves_resource_t * resource)1478 exclaves_conclave_stop_upcall(exclaves_resource_t *resource)
1479 {
1480 assert3p(resource, !=, NULL);
1481 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1482
1483 conclave_resource_t *conclave = &resource->r_conclave;
1484 thread_t thread = current_thread();
1485
1486 lck_mtx_lock(&resource->r_mutex);
1487
1488 if (conclave->c_state == CONCLAVE_S_STOPPING || conclave->c_state == CONCLAVE_S_STOPPED) {
1489 /* Upcall to stop the conclave might be in progress, bail out */
1490 lck_mtx_unlock(&resource->r_mutex);
1491 return KERN_SUCCESS;
1492 }
1493
1494 if (conclave->c_state != CONCLAVE_S_LAUNCHED && conclave->c_state != CONCLAVE_S_LAUNCHING
1495 && conclave->c_state != CONCLAVE_S_ATTACHED
1496 && conclave->c_state != CONCLAVE_S_STOP_REQUESTED) {
1497 lck_mtx_unlock(&resource->r_mutex);
1498 return KERN_FAILURE;
1499 }
1500
1501 conclave->c_state = CONCLAVE_S_STOPPING;
1502 thread->th_exclaves_state |= TH_EXCLAVES_STOP_UPCALL_PENDING;
1503 lck_mtx_unlock(&resource->r_mutex);
1504
1505 return KERN_SUCCESS;
1506 }
1507
1508 kern_return_t
exclaves_conclave_stop_upcall_complete(exclaves_resource_t * resource,task_t task)1509 exclaves_conclave_stop_upcall_complete(exclaves_resource_t *resource, task_t task)
1510 {
1511 assert3p(resource, !=, NULL);
1512 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1513
1514 conclave_resource_t *conclave = &resource->r_conclave;
1515 thread_t thread = current_thread();
1516
1517 thread->th_exclaves_state &= ~TH_EXCLAVES_STOP_UPCALL_PENDING;
1518
1519 int flags = PX_DEBUG_NO_HONOR | PX_NO_EXCEPTION_UTHREAD;
1520 exception_info_t info = {
1521 .os_reason = OS_REASON_GUARD,
1522 .exception_type = EXC_GUARD,
1523 .mx_code = GUARD_REASON_EXCLAVES,
1524 .mx_subcode = 0
1525 };
1526
1527 exit_with_exclave_exception(get_bsdtask_info(task), info, flags);
1528
1529 lck_mtx_lock(&resource->r_mutex);
1530
1531 assert3u(conclave->c_state, ==, CONCLAVE_S_STOPPING);
1532 conclave->c_state = CONCLAVE_S_STOPPED;
1533
1534 lck_mtx_unlock(&resource->r_mutex);
1535 return KERN_SUCCESS;
1536 }
1537
1538 bool
exclaves_conclave_has_service(exclaves_resource_t * resource,uint64_t id)1539 exclaves_conclave_has_service(exclaves_resource_t *resource, uint64_t id)
1540 {
1541 assert3u(id, <, CONCLAVE_SERVICE_MAX);
1542
1543 if (resource == NULL) {
1544 /* There's no conclave, fallback to the kernel domain. */
1545 if (!exclaves_has_priv(current_task(), EXCLAVES_PRIV_KERNEL_DOMAIN)) {
1546 exclaves_requirement_assert(EXCLAVES_R_CONCLAVE_RESOURCES,
1547 "no conclave manager present");
1548 }
1549 return bitmap_test(kernel_service_bitmap, (uint32_t)id);
1550 }
1551
1552 assert3p(resource, !=, NULL);
1553 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_CONCLAVEMANAGER);
1554
1555 conclave_resource_t *conclave = &resource->r_conclave;
1556
1557 return bitmap_test(conclave->c_service_bitmap, (uint32_t)id);
1558 }
1559
1560
1561 /* -------------------------------------------------------------------------- */
1562 #pragma mark Sensors
1563
1564 static void
exclaves_resource_sensor_reset(exclaves_resource_t * resource)1565 exclaves_resource_sensor_reset(exclaves_resource_t *resource)
1566 {
1567 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_SENSOR);
1568 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0);
1569 LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
1570
1571 exclaves_sensor_status_t status;
1572
1573 for (int i = 0; i < resource->r_sensor.s_startcount; i++) {
1574 __assert_only kern_return_t kr = exclaves_sensor_stop(
1575 (exclaves_sensor_type_t)resource->r_id, 0, &status);
1576 assert3u(kr, !=, KERN_INVALID_ARGUMENT);
1577 }
1578
1579 resource->r_sensor.s_startcount = 0;
1580 }
1581
1582 kern_return_t
exclaves_resource_sensor_open(const char * domain,const char * id_name,exclaves_resource_t ** out)1583 exclaves_resource_sensor_open(const char *domain, const char *id_name,
1584 exclaves_resource_t **out)
1585 {
1586 assert3p(out, !=, NULL);
1587
1588 exclaves_resource_t *sensor = exclaves_resource_lookup_by_name(domain,
1589 id_name, XNUPROXY_RESOURCETYPE_SENSOR);
1590
1591 if (sensor == NULL) {
1592 return KERN_NOT_FOUND;
1593 }
1594
1595 assert3u(sensor->r_type, ==, XNUPROXY_RESOURCETYPE_SENSOR);
1596
1597 lck_mtx_lock(&sensor->r_mutex);
1598 exclaves_resource_retain(sensor);
1599 lck_mtx_unlock(&sensor->r_mutex);
1600
1601 *out = sensor;
1602
1603 return KERN_SUCCESS;
1604 }
1605
1606 kern_return_t
exclaves_resource_sensor_start(exclaves_resource_t * resource,uint64_t flags,exclaves_sensor_status_t * status)1607 exclaves_resource_sensor_start(exclaves_resource_t *resource, uint64_t flags,
1608 exclaves_sensor_status_t *status)
1609 {
1610 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_SENSOR);
1611
1612 lck_mtx_lock(&resource->r_mutex);
1613 if (resource->r_sensor.s_startcount == UINT64_MAX) {
1614 lck_mtx_unlock(&resource->r_mutex);
1615 return KERN_INVALID_ARGUMENT;
1616 }
1617
1618 kern_return_t kr = exclaves_sensor_start(
1619 (exclaves_sensor_type_t)resource->r_id, flags, status);
1620 if (kr == KERN_SUCCESS) {
1621 resource->r_sensor.s_startcount += 1;
1622 }
1623 lck_mtx_unlock(&resource->r_mutex);
1624 return kr;
1625 }
1626
1627 kern_return_t
exclaves_resource_sensor_status(exclaves_resource_t * resource,uint64_t flags,exclaves_sensor_status_t * status)1628 exclaves_resource_sensor_status(exclaves_resource_t *resource, uint64_t flags,
1629 exclaves_sensor_status_t *status)
1630 {
1631 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_SENSOR);
1632
1633 lck_mtx_lock(&resource->r_mutex);
1634 kern_return_t kr = exclaves_sensor_status(
1635 (exclaves_sensor_type_t)resource->r_id, flags, status);
1636 lck_mtx_unlock(&resource->r_mutex);
1637
1638 return kr;
1639 }
1640
1641 kern_return_t
exclaves_resource_sensor_stop(exclaves_resource_t * resource,uint64_t flags,exclaves_sensor_status_t * status)1642 exclaves_resource_sensor_stop(exclaves_resource_t *resource, uint64_t flags,
1643 exclaves_sensor_status_t *status)
1644 {
1645 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_SENSOR);
1646
1647 lck_mtx_lock(&resource->r_mutex);
1648 if (resource->r_sensor.s_startcount == 0) {
1649 lck_mtx_unlock(&resource->r_mutex);
1650 return KERN_INVALID_ARGUMENT;
1651 }
1652
1653 kern_return_t kr = exclaves_sensor_stop(
1654 (exclaves_sensor_type_t)resource->r_id, flags, status);
1655 if (kr == KERN_SUCCESS) {
1656 resource->r_sensor.s_startcount -= 1;
1657 }
1658 lck_mtx_unlock(&resource->r_mutex);
1659
1660 return kr;
1661 }
1662
1663 /* -------------------------------------------------------------------------- */
1664 #pragma mark Notifications
1665
1666 static void
exclaves_notification_init(exclaves_resource_t * resource)1667 exclaves_notification_init(exclaves_resource_t *resource)
1668 {
1669 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_NOTIFICATION);
1670 exclaves_notification_t *notification = &resource->r_notification;
1671 klist_init(¬ification->notification_klist);
1672 }
1673
1674 static int
filt_exclaves_notification_attach(struct knote * kn,__unused struct kevent_qos_s * kev)1675 filt_exclaves_notification_attach(struct knote *kn, __unused struct kevent_qos_s *kev)
1676 {
1677 int error = 0;
1678 exclaves_resource_t *exclaves_resource = NULL;
1679 kern_return_t kr = exclaves_resource_from_port_name(current_space(), (mach_port_name_t)kn->kn_id, &exclaves_resource);
1680 if (kr != KERN_SUCCESS) {
1681 error = ENOENT;
1682 goto out;
1683 }
1684 assert3p(exclaves_resource, !=, NULL);
1685 if (exclaves_resource->r_type != XNUPROXY_RESOURCETYPE_NOTIFICATION) {
1686 exclaves_resource_release(exclaves_resource);
1687 error = EINVAL;
1688 goto out;
1689 }
1690
1691 lck_mtx_lock(&exclaves_resource->r_mutex);
1692
1693 if (kn->kn_exclaves_resource != NULL) {
1694 lck_mtx_unlock(&exclaves_resource->r_mutex);
1695 exclaves_resource_release(exclaves_resource);
1696 error = EBUSY;
1697 goto out;
1698 }
1699
1700 /* kn_exclaves_resource consumes the ref. */
1701 kn->kn_exclaves_resource = exclaves_resource;
1702 KNOTE_ATTACH(&exclaves_resource->r_notification.notification_klist, kn);
1703 lck_mtx_unlock(&exclaves_resource->r_mutex);
1704
1705 error = 0;
1706 out:
1707 return error;
1708 }
1709
1710 static void
filt_exclaves_notification_detach(struct knote * kn)1711 filt_exclaves_notification_detach(struct knote *kn)
1712 {
1713 exclaves_resource_t *exclaves_resource = kn->kn_exclaves_resource;
1714
1715 if (exclaves_resource != NULL) {
1716 assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCETYPE_NOTIFICATION);
1717 lck_mtx_lock(&exclaves_resource->r_mutex);
1718 kn->kn_exclaves_resource = NULL;
1719 KNOTE_DETACH(&exclaves_resource->r_notification.notification_klist, kn);
1720 lck_mtx_unlock(&exclaves_resource->r_mutex);
1721
1722 exclaves_resource_release(exclaves_resource);
1723 }
1724 }
1725
1726 static int
filt_exclaves_notification_event(struct knote * kn,long hint)1727 filt_exclaves_notification_event(struct knote *kn, long hint)
1728 {
1729 /* ALWAYS CALLED WITH exclaves_resource mutex held */
1730 exclaves_resource_t *exclaves_resource __assert_only = kn->kn_exclaves_resource;
1731 LCK_MTX_ASSERT(&exclaves_resource->r_mutex, LCK_MTX_ASSERT_OWNED);
1732
1733 /*
1734 * if the user is interested in this event, record it.
1735 */
1736 if (kn->kn_sfflags & hint) {
1737 kn->kn_fflags |= hint;
1738 }
1739
1740 /* if we have any matching state, activate the knote */
1741 if (kn->kn_fflags != 0) {
1742 return FILTER_ACTIVE;
1743 } else {
1744 return 0;
1745 }
1746 }
1747
1748 static int
filt_exclaves_notification_touch(struct knote * kn,struct kevent_qos_s * kev)1749 filt_exclaves_notification_touch(struct knote *kn, struct kevent_qos_s *kev)
1750 {
1751 int result;
1752 exclaves_resource_t *exclaves_resource = kn->kn_exclaves_resource;
1753 assert3p(exclaves_resource, !=, NULL);
1754 assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCETYPE_NOTIFICATION);
1755
1756 lck_mtx_lock(&exclaves_resource->r_mutex);
1757 /* accept new mask and mask off output events no long interesting */
1758 kn->kn_sfflags = kev->fflags;
1759 kn->kn_fflags &= kn->kn_sfflags;
1760 if (kn->kn_fflags != 0) {
1761 result = FILTER_ACTIVE;
1762 } else {
1763 result = 0;
1764 }
1765 lck_mtx_unlock(&exclaves_resource->r_mutex);
1766
1767 return result;
1768 }
1769
1770 static int
filt_exclaves_notification_process(struct knote * kn,struct kevent_qos_s * kev)1771 filt_exclaves_notification_process(struct knote *kn, struct kevent_qos_s *kev)
1772 {
1773 int result = 0;
1774 exclaves_resource_t *exclaves_resource = kn->kn_exclaves_resource;
1775 assert3p(exclaves_resource, !=, NULL);
1776 assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCETYPE_NOTIFICATION);
1777
1778 lck_mtx_lock(&exclaves_resource->r_mutex);
1779 if (kn->kn_fflags) {
1780 knote_fill_kevent(kn, kev, 0);
1781 result = FILTER_ACTIVE;
1782 }
1783 lck_mtx_unlock(&exclaves_resource->r_mutex);
1784 return result;
1785 }
1786
1787 SECURITY_READ_ONLY_EARLY(struct filterops) exclaves_notification_filtops = {
1788 .f_attach = filt_exclaves_notification_attach,
1789 .f_detach = filt_exclaves_notification_detach,
1790 .f_event = filt_exclaves_notification_event,
1791 .f_touch = filt_exclaves_notification_touch,
1792 .f_process = filt_exclaves_notification_process,
1793 };
1794
1795 kern_return_t
exclaves_notification_create(const char * domain,const char * name,exclaves_resource_t ** out)1796 exclaves_notification_create(const char *domain, const char *name,
1797 exclaves_resource_t **out)
1798 {
1799 assert3p(out, !=, NULL);
1800
1801 exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
1802 name, XNUPROXY_RESOURCETYPE_NOTIFICATION);
1803
1804 if (resource == NULL) {
1805 return KERN_NOT_FOUND;
1806 }
1807 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_NOTIFICATION);
1808
1809 lck_mtx_lock(&resource->r_mutex);
1810 exclaves_resource_retain(resource);
1811 lck_mtx_unlock(&resource->r_mutex);
1812
1813 *out = resource;
1814
1815 return KERN_SUCCESS;
1816 }
1817
1818 kern_return_t
exclaves_notification_signal(exclaves_resource_t * exclaves_resource,long event_mask)1819 exclaves_notification_signal(exclaves_resource_t *exclaves_resource, long event_mask)
1820 {
1821 assert3p(exclaves_resource, !=, NULL);
1822 assert3u(exclaves_resource->r_type, ==, XNUPROXY_RESOURCETYPE_NOTIFICATION);
1823
1824 lck_mtx_lock(&exclaves_resource->r_mutex);
1825 KNOTE(&exclaves_resource->r_notification.notification_klist, event_mask);
1826 lck_mtx_unlock(&exclaves_resource->r_mutex);
1827
1828 return KERN_SUCCESS;
1829 }
1830
1831 exclaves_resource_t *
exclaves_notification_lookup_by_id(const char * domain,uint64_t id)1832 exclaves_notification_lookup_by_id(const char *domain, uint64_t id)
1833 {
1834 return exclaves_resource_lookup_by_id(domain, id,
1835 XNUPROXY_RESOURCETYPE_NOTIFICATION);
1836 }
1837
1838 uint64_t
exclaves_service_lookup(const char * domain,const char * name)1839 exclaves_service_lookup(const char *domain, const char *name)
1840 {
1841 assert3p(domain, !=, NULL);
1842 assert3p(name, !=, NULL);
1843
1844 exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
1845 name, XNUPROXY_RESOURCETYPE_SERVICE);
1846 if (resource == NULL) {
1847 return EXCLAVES_INVALID_ID;
1848 }
1849
1850 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_SERVICE);
1851 return resource->r_id;
1852 }
1853
1854 /* -------------------------------------------------------------------------- */
1855 #pragma mark Shared Memory
1856
1857 int
1858 exclaves_resource_shared_memory_io(exclaves_resource_t *resource, off_t offset,
1859 size_t len, int (^cb)(char *, size_t))
1860 {
1861 assert(resource->r_type == XNUPROXY_RESOURCETYPE_SHAREDMEMORY ||
1862 resource->r_type == XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
1863 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
1864
1865 shared_memory_resource_t *sm = &resource->r_shared_memory;
1866 assert3u(sm->sm_nranges, >, 0);
1867 assert3u(sm->sm_size, !=, 0);
1868 assert3u(offset + len, <=, sm->sm_size);
1869
1870 for (int i = 0; i < sm->sm_nranges; i++) {
1871 /* Skip forward to the starting range. */
1872 if (offset >= sm->sm_range[i].npages * PAGE_SIZE) {
1873 offset -= sm->sm_range[i].npages * PAGE_SIZE;
1874 continue;
1875 }
1876
1877 size_t size = MIN((sm->sm_range[i].npages * PAGE_SIZE) - offset, len);
1878 int ret = cb(sm->sm_range[i].address + offset, size);
1879 if (ret != 0) {
1880 return ret;
1881 }
1882
1883 offset = 0;
1884 len -= size;
1885
1886 if (len == 0) {
1887 break;
1888 }
1889 }
1890 assert3u(len, ==, 0);
1891
1892 return 0;
1893 }
1894
1895 static kern_return_t
exclaves_resource_shared_memory_io_copyin(exclaves_resource_t * resource,user_addr_t _src,off_t offset,size_t len)1896 exclaves_resource_shared_memory_io_copyin(exclaves_resource_t *resource,
1897 user_addr_t _src, off_t offset, size_t len)
1898 {
1899 assert3u(resource->r_shared_memory.sm_perm & EXCLAVES_BUFFER_PERM_WRITE,
1900 !=, 0);
1901
1902 __block user_addr_t src = _src;
1903 return exclaves_resource_shared_memory_io(resource, offset, len,
1904 ^(char *buffer, size_t size) {
1905 if (copyin(src, buffer, size) != 0) {
1906 return KERN_FAILURE;
1907 }
1908
1909 src += size;
1910 return KERN_SUCCESS;
1911 });
1912 }
1913
1914 kern_return_t
exclaves_resource_shared_memory_copyin(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2)1915 exclaves_resource_shared_memory_copyin(exclaves_resource_t *resource,
1916 user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
1917 mach_vm_size_t size2, mach_vm_size_t offset2)
1918 {
1919 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
1920 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_SHAREDMEMORY);
1921
1922 mach_vm_size_t umax = 0;
1923 kern_return_t kr = KERN_FAILURE;
1924
1925 if (buffer == USER_ADDR_NULL || size1 == 0) {
1926 return KERN_INVALID_ARGUMENT;
1927 }
1928
1929 shared_memory_resource_t *sm = &resource->r_shared_memory;
1930 assert3u(sm->sm_nranges, >, 0);
1931 assert3u(sm->sm_size, !=, 0);
1932
1933 if (os_add_overflow(offset1, size1, &umax) || umax > sm->sm_size) {
1934 return KERN_INVALID_ARGUMENT;
1935 }
1936
1937 if (os_add_overflow(offset2, size2, &umax) || umax > sm->sm_size) {
1938 return KERN_INVALID_ARGUMENT;
1939 }
1940
1941 if ((sm->sm_perm & EXCLAVES_BUFFER_PERM_WRITE) == 0) {
1942 return KERN_PROTECTION_FAILURE;
1943 }
1944
1945 kr = exclaves_resource_shared_memory_io_copyin(resource, buffer, offset1, size1);
1946 if (kr != KERN_SUCCESS) {
1947 return kr;
1948 }
1949
1950 kr = exclaves_resource_shared_memory_io_copyin(resource, buffer + size1, offset2,
1951 size2);
1952 if (kr != KERN_SUCCESS) {
1953 return kr;
1954 }
1955
1956 return KERN_SUCCESS;
1957 }
1958
1959 static kern_return_t
exclaves_resource_shared_memory_io_copyout(exclaves_resource_t * resource,user_addr_t _dst,off_t offset,size_t len)1960 exclaves_resource_shared_memory_io_copyout(exclaves_resource_t *resource,
1961 user_addr_t _dst, off_t offset, size_t len)
1962 {
1963 assert3u(resource->r_shared_memory.sm_perm & EXCLAVES_BUFFER_PERM_READ,
1964 !=, 0);
1965
1966 __block user_addr_t dst = _dst;
1967 return exclaves_resource_shared_memory_io(resource, offset, len,
1968 ^(char *buffer, size_t size) {
1969 if (copyout(buffer, dst, size) != 0) {
1970 return KERN_FAILURE;
1971 }
1972
1973 dst += size;
1974 return KERN_SUCCESS;
1975 });
1976 }
1977
1978 kern_return_t
exclaves_resource_shared_memory_copyout(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2)1979 exclaves_resource_shared_memory_copyout(exclaves_resource_t *resource,
1980 user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
1981 mach_vm_size_t size2, mach_vm_size_t offset2)
1982 {
1983 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
1984 assert(resource->r_type == XNUPROXY_RESOURCETYPE_SHAREDMEMORY ||
1985 resource->r_type == XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
1986
1987 mach_vm_size_t umax = 0;
1988 kern_return_t kr = KERN_FAILURE;
1989
1990 if (buffer == USER_ADDR_NULL || size1 == 0) {
1991 return KERN_INVALID_ARGUMENT;
1992 }
1993
1994 shared_memory_resource_t *sm = &resource->r_shared_memory;
1995 assert3u(sm->sm_nranges, >, 0);
1996 assert3u(sm->sm_size, !=, 0);
1997
1998 if (os_add_overflow(offset1, size1, &umax) || umax > sm->sm_size) {
1999 return KERN_INVALID_ARGUMENT;
2000 }
2001
2002 if (os_add_overflow(offset2, size2, &umax) || umax > sm->sm_size) {
2003 return KERN_INVALID_ARGUMENT;
2004 }
2005
2006 if ((sm->sm_perm & EXCLAVES_BUFFER_PERM_READ) == 0) {
2007 return KERN_PROTECTION_FAILURE;
2008 }
2009
2010 kr = exclaves_resource_shared_memory_io_copyout(resource, buffer, offset1, size1);
2011 if (kr != KERN_SUCCESS) {
2012 return kr;
2013 }
2014
2015 kr = exclaves_resource_shared_memory_io_copyout(resource, buffer + size1,
2016 offset2, size2);
2017 if (kr != KERN_SUCCESS) {
2018 return kr;
2019 }
2020
2021 return KERN_SUCCESS;
2022 }
2023
2024 /* The lower 32bits contain the endpoint id. */
2025 static uint32_t
audio_memory_get_endpoint(exclaves_resource_t * resource)2026 audio_memory_get_endpoint(exclaves_resource_t *resource)
2027 {
2028 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
2029 return resource->r_id << 32 >> 32;
2030 }
2031
2032 /* The upper 32bits of the id contain the buffer id. */
2033 static uint32_t
audio_memory_get_buffer_id(exclaves_resource_t * resource)2034 audio_memory_get_buffer_id(exclaves_resource_t *resource)
2035 {
2036 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
2037 return resource->r_id >> 32;
2038 }
2039
2040 static kern_return_t
shared_memory_map(exclaves_resource_t * resource,size_t size,exclaves_buffer_perm_t perm)2041 shared_memory_map(exclaves_resource_t *resource, size_t size,
2042 exclaves_buffer_perm_t perm)
2043 {
2044 assert(resource->r_type == XNUPROXY_RESOURCETYPE_SHAREDMEMORY ||
2045 resource->r_type == XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
2046 assert3u(perm & ~(EXCLAVES_BUFFER_PERM_READ | EXCLAVES_BUFFER_PERM_WRITE), ==, 0);
2047
2048 kern_return_t kr = KERN_FAILURE;
2049
2050 /* round size up to nearest page */
2051 mach_vm_offset_t rounded_size = 0;
2052 if (size == 0 || mach_vm_round_page_overflow(size, &rounded_size)) {
2053 return KERN_INVALID_ARGUMENT;
2054 }
2055
2056 lck_mtx_lock(&resource->r_mutex);
2057
2058 __block shared_memory_resource_t *sm = &resource->r_shared_memory;
2059
2060 /*
2061 * If already active, bump the use count, check that the perms and size
2062 * are compatible and return. Checking the use count is insufficient
2063 * here as this can race with with a non-locked use count release.
2064 */
2065 if (resource->r_active) {
2066 /*
2067 * Both the permissions and size must match.
2068 */
2069 if (sm->sm_size < rounded_size || sm->sm_perm != perm) {
2070 lck_mtx_unlock(&resource->r_mutex);
2071 return KERN_INVALID_ARGUMENT;
2072 }
2073
2074 exclaves_resource_retain(resource);
2075 lck_mtx_unlock(&resource->r_mutex);
2076 return KERN_SUCCESS;
2077 }
2078
2079 /* This is lazily initialised and never de-initialised. */
2080 if (sm->sm_client.connection == NULL) {
2081 uint64_t endpoint = resource->r_type == XNUPROXY_RESOURCETYPE_SHAREDMEMORY ?
2082 resource->r_id :
2083 audio_memory_get_endpoint(resource);
2084
2085 kr = exclaves_shared_memory_init(endpoint, &sm->sm_client);
2086 if (kr != KERN_SUCCESS) {
2087 lck_mtx_unlock(&resource->r_mutex);
2088 return kr;
2089 }
2090 }
2091
2092 const sharedmemorybase_perms_s sm_perm = perm == EXCLAVES_BUFFER_PERM_WRITE ?
2093 SHAREDMEMORYBASE_PERMS_READWRITE : SHAREDMEMORYBASE_PERMS_READONLY;
2094 sharedmemorybase_mapping_s mapping = 0;
2095 kr = exclaves_shared_memory_setup(&sm->sm_client, sm_perm, 0,
2096 rounded_size / PAGE_SIZE, &mapping);
2097 if (kr != KERN_SUCCESS) {
2098 lck_mtx_unlock(&resource->r_mutex);
2099 return kr;
2100 }
2101
2102 /*
2103 * From this point on exclaves_shared_memory_teardown() must be called
2104 * if something goes wrong so that the buffer will be properly unmapped.
2105 */
2106 sm->sm_size = rounded_size;
2107 sm->sm_perm = perm;
2108 sm->sm_nranges = 0;
2109
2110 /*
2111 * The shared buffer is now accessible by xnu. Discover the layout of
2112 * the memory.
2113 */
2114 __block bool success = true;
2115 /* BEGIN IGNORE CODESTYLE */
2116 kr = exclaves_shared_memory_iterate(&sm->sm_client, &mapping, 0,
2117 rounded_size / PAGE_SIZE, ^(uint64_t pa) {
2118 char *vaddr = (char *)phystokv(pa);
2119 assert3p(vaddr, !=, NULL);
2120
2121 /*
2122 * If this virtual address is adjacent to the previous
2123 * one, just extend the current range.
2124 */
2125 if (sm->sm_nranges > 0) {
2126 const size_t len = sm->sm_range[sm->sm_nranges - 1].npages * PAGE_SIZE;
2127 const char *addr = sm->sm_range[sm->sm_nranges - 1].address + len;
2128
2129 if (vaddr == addr) {
2130 sm->sm_range[sm->sm_nranges - 1].npages++;
2131 return;
2132 }
2133
2134 if (sm->sm_nranges >= EXCLAVES_SHARED_BUFFER_MAX_RANGES) {
2135 exclaves_debug_printf(show_errors,
2136 "exclaves: too many ranges, can't fit\n");
2137 success = false;
2138 return;
2139 }
2140 }
2141
2142 /*
2143 * Page is not virtually contiguous with the previous one -
2144 * stick it in a new range.
2145 */
2146 sm->sm_range[sm->sm_nranges].npages = 1;
2147 sm->sm_range[sm->sm_nranges].address = vaddr;
2148 sm->sm_nranges++;
2149 });
2150 /* END IGNORE CODESTYLE */
2151
2152
2153 if (kr != KERN_SUCCESS || !success) {
2154 exclaves_shared_memory_teardown(&sm->sm_client, &mapping);
2155 lck_mtx_unlock(&resource->r_mutex);
2156 return KERN_FAILURE;
2157 }
2158
2159 sm->sm_mapping = mapping;
2160
2161 exclaves_resource_retain(resource);
2162 resource->r_active = true;
2163
2164 lck_mtx_unlock(&resource->r_mutex);
2165
2166 return KERN_SUCCESS;
2167 }
2168
2169 kern_return_t
exclaves_resource_shared_memory_map(const char * domain,const char * name,size_t size,exclaves_buffer_perm_t perm,exclaves_resource_t ** out)2170 exclaves_resource_shared_memory_map(const char *domain, const char *name, size_t size,
2171 exclaves_buffer_perm_t perm, exclaves_resource_t **out)
2172 {
2173 assert3p(out, !=, NULL);
2174
2175 exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
2176 name, XNUPROXY_RESOURCETYPE_SHAREDMEMORY);
2177 if (resource == NULL) {
2178 return KERN_NOT_FOUND;
2179 }
2180 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_SHAREDMEMORY);
2181
2182 kern_return_t kr = shared_memory_map(resource, size, perm);
2183 if (kr != KERN_SUCCESS) {
2184 return kr;
2185 }
2186
2187 *out = resource;
2188 return KERN_SUCCESS;
2189 }
2190
2191
2192 static void
exclaves_resource_shared_memory_unmap(exclaves_resource_t * resource)2193 exclaves_resource_shared_memory_unmap(exclaves_resource_t *resource)
2194 {
2195 assert(resource->r_type == XNUPROXY_RESOURCETYPE_SHAREDMEMORY ||
2196 resource->r_type == XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
2197 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0);
2198 LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
2199
2200 shared_memory_resource_t *sm = &resource->r_shared_memory;
2201
2202 kern_return_t kr = exclaves_shared_memory_teardown(&sm->sm_client,
2203 &sm->sm_mapping);
2204 if (kr != KERN_SUCCESS) {
2205 exclaves_debug_printf(show_errors,
2206 "exclaves: failed to teardown shared memory: %s, \n",
2207 resource->r_name);
2208 return;
2209 }
2210
2211 bzero(&resource->r_shared_memory, sizeof(resource->r_shared_memory));
2212
2213 resource->r_active = false;
2214 }
2215
2216
2217 /* -------------------------------------------------------------------------- */
2218 #pragma mark Arbitrated Audio Memory
2219
2220 kern_return_t
exclaves_resource_audio_memory_map(const char * domain,const char * name,size_t size,exclaves_resource_t ** out)2221 exclaves_resource_audio_memory_map(const char *domain, const char *name,
2222 size_t size, exclaves_resource_t **out)
2223 {
2224 assert3p(out, !=, NULL);
2225
2226 exclaves_resource_t *resource = exclaves_resource_lookup_by_name(domain,
2227 name, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
2228 if (resource == NULL) {
2229 return KERN_NOT_FOUND;
2230 }
2231 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
2232
2233 kern_return_t kr = shared_memory_map(resource, size,
2234 EXCLAVES_BUFFER_PERM_READ);
2235 if (kr != KERN_SUCCESS) {
2236 return kr;
2237 }
2238
2239 *out = resource;
2240 return KERN_SUCCESS;
2241 }
2242
2243 static void
exclaves_resource_audio_memory_unmap(exclaves_resource_t * resource)2244 exclaves_resource_audio_memory_unmap(exclaves_resource_t *resource)
2245 {
2246 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
2247 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), ==, 0);
2248 LCK_MTX_ASSERT(&resource->r_mutex, LCK_MTX_ASSERT_OWNED);
2249
2250 exclaves_resource_shared_memory_unmap(resource);
2251 }
2252
2253 static kern_return_t
copyout_zero(user_addr_t buffer,mach_vm_size_t size,mach_vm_size_t offset)2254 copyout_zero(user_addr_t buffer, mach_vm_size_t size, mach_vm_size_t offset)
2255 {
2256 static const char zero[PAGE_SIZE] = {0};
2257
2258 while (size > 0) {
2259 size_t copy_size = MIN(size, sizeof(zero));
2260 if (copyout(zero, buffer + offset, copy_size) != 0) {
2261 return KERN_FAILURE;
2262 }
2263
2264 offset += copy_size;
2265 size -= copy_size;
2266 }
2267
2268 return KERN_SUCCESS;
2269 }
2270
2271 kern_return_t
exclaves_resource_audio_memory_copyout(exclaves_resource_t * resource,user_addr_t buffer,mach_vm_size_t size1,mach_vm_size_t offset1,mach_vm_size_t size2,mach_vm_size_t offset2)2272 exclaves_resource_audio_memory_copyout(exclaves_resource_t *resource,
2273 user_addr_t buffer, mach_vm_size_t size1, mach_vm_size_t offset1,
2274 mach_vm_size_t size2, mach_vm_size_t offset2)
2275 {
2276 assert3u(os_atomic_load(&resource->r_usecnt, relaxed), >, 0);
2277 assert3u(resource->r_type, ==, XNUPROXY_RESOURCETYPE_ARBITRATEDAUDIOMEMORY);
2278
2279 kern_return_t kr = KERN_FAILURE;
2280 exclaves_sensor_status_t status;
2281 const uint32_t id = audio_memory_get_buffer_id(resource);
2282
2283 kr = exclaves_sensor_copy(id, size1, offset1, size2, offset2, &status);
2284 if (kr != KERN_SUCCESS) {
2285 return kr;
2286 }
2287
2288 if (status == EXCLAVES_SENSOR_STATUS_ALLOWED) {
2289 kr = exclaves_resource_shared_memory_copyout(resource, buffer,
2290 size1, offset1, size2, offset2);
2291 if (kr != KERN_SUCCESS) {
2292 return kr;
2293 }
2294 } else {
2295 /*
2296 * This should be removed once the audio arbiter is properly
2297 * switching buffers and instead we should always rely on the
2298 * audio arbiter to do its job and make the data available or
2299 * not.
2300 */
2301 kr = copyout_zero(buffer, size1, offset1);
2302 if (kr != KERN_SUCCESS) {
2303 return kr;
2304 }
2305
2306 kr = copyout_zero(buffer, size2, offset2);
2307 if (kr != KERN_SUCCESS) {
2308 return kr;
2309 }
2310 }
2311
2312 return KERN_SUCCESS;
2313 }
2314
2315 #endif /* CONFIG_EXCLAVES */
2316