1 /*
2 * Copyright (c) 2024 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #if CONFIG_EXCLAVES
30
31 #include <vm/pmap.h>
32
33 #include <vm/vm_page_internal.h>
34 #include <vm/vm_object_xnu.h>
35 #include <vm/vm_pageout_xnu.h>
36 #include <vm/vm_kern_xnu.h>
37 #include <vm/vm_map_xnu.h>
38 #include <vm/vm_memory_entry_xnu.h>
39 #include <vm/vm_protos.h>
40
41 #include <mach/mach_vm.h>
42 #include <mach/mach_host.h>
43
44 #include <device/device_port.h>
45
46 #include <kern/ipc_kobject.h>
47
48 #include <libkern/coreanalytics/coreanalytics.h>
49 #include <kern/ledger.h>
50
51 #include <pexpert/device_tree.h>
52
53 #include "exclaves_memory.h"
54
55 /* -------------------------------------------------------------------------- */
56 #pragma mark Accounting
57
58 typedef struct {
59 _Atomic uint64_t pages_alloced;
60 _Atomic uint64_t pages_freed;
61 _Atomic uint64_t time_allocating;
62 _Atomic uint64_t max_alloc_latency;
63 _Atomic uint64_t alloc_latency_byhighbit[16];// highbit(MCT end - MCT start)/4
64 } exclaves_allocation_statistics_t;
65
66 exclaves_allocation_statistics_t exclaves_allocation_statistics;
67
68 CA_EVENT(ca_exclaves_allocation_statistics,
69 CA_INT, pages_alloced,
70 CA_INT, pages_freed,
71 CA_INT, time_allocating,
72 CA_INT, max_alloc_latency,
73 CA_INT, alloc_latency_highbit0,
74 CA_INT, alloc_latency_highbit1,
75 CA_INT, alloc_latency_highbit2,
76 CA_INT, alloc_latency_highbit3,
77 CA_INT, alloc_latency_highbit4,
78 CA_INT, alloc_latency_highbit5,
79 CA_INT, alloc_latency_highbit6,
80 CA_INT, alloc_latency_highbit7,
81 CA_INT, alloc_latency_highbit8,
82 CA_INT, alloc_latency_highbit9,
83 CA_INT, alloc_latency_highbit10,
84 CA_INT, alloc_latency_highbit11,
85 CA_INT, alloc_latency_highbit12,
86 CA_INT, alloc_latency_highbit13,
87 CA_INT, alloc_latency_highbit14,
88 CA_INT, alloc_latency_highbit15);
89
90 void
exclaves_memory_report_accounting(void)91 exclaves_memory_report_accounting(void)
92 {
93 ca_event_t event = CA_EVENT_ALLOCATE(ca_exclaves_allocation_statistics);
94 CA_EVENT_TYPE(ca_exclaves_allocation_statistics) * e = event->data;
95
96 e->pages_alloced = os_atomic_load(&exclaves_allocation_statistics.pages_alloced, relaxed);
97 e->pages_freed = os_atomic_load(&exclaves_allocation_statistics.pages_freed, relaxed);
98 e->time_allocating = os_atomic_load(&exclaves_allocation_statistics.time_allocating, relaxed);
99 e->max_alloc_latency = os_atomic_load(&exclaves_allocation_statistics.max_alloc_latency, relaxed);
100 e->alloc_latency_highbit0 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[0], relaxed);
101 e->alloc_latency_highbit1 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[1], relaxed);
102 e->alloc_latency_highbit2 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[2], relaxed);
103 e->alloc_latency_highbit3 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[3], relaxed);
104 e->alloc_latency_highbit4 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[4], relaxed);
105 e->alloc_latency_highbit5 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[5], relaxed);
106 e->alloc_latency_highbit6 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[6], relaxed);
107 e->alloc_latency_highbit7 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[7], relaxed);
108 e->alloc_latency_highbit8 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[8], relaxed);
109 e->alloc_latency_highbit9 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[9], relaxed);
110 e->alloc_latency_highbit10 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[10], relaxed);
111 e->alloc_latency_highbit11 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[11], relaxed);
112 e->alloc_latency_highbit12 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[12], relaxed);
113 e->alloc_latency_highbit13 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[13], relaxed);
114 e->alloc_latency_highbit14 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[14], relaxed);
115 e->alloc_latency_highbit15 = os_atomic_load(&exclaves_allocation_statistics.alloc_latency_byhighbit[15], relaxed);
116
117 CA_EVENT_SEND(event);
118 }
119
120 static_assert(
121 (EXCLAVES_MEMORY_PAGEKIND_ROOTDOMAIN == XNUUPCALLS_PAGEKIND_ROOTDOMAIN) &&
122 (EXCLAVES_MEMORY_PAGEKIND_CONCLAVE == XNUUPCALLS_PAGEKIND_CONCLAVE),
123 "xnuupcalls_pagekind_s mismatch");
124 static_assert(
125 (EXCLAVES_MEMORY_PAGEKIND_ROOTDOMAIN == XNUUPCALLSV2_PAGEKIND_ROOTDOMAIN) &&
126 (EXCLAVES_MEMORY_PAGEKIND_CONCLAVE == XNUUPCALLSV2_PAGEKIND_CONCLAVE),
127 "xnuupcallsv2_pagekind_s mismatch");
128
129 static ledger_t
get_conclave_mem_ledger(exclaves_memory_pagekind_t kind)130 get_conclave_mem_ledger(exclaves_memory_pagekind_t kind)
131 {
132 ledger_t ledger;
133 switch (kind) {
134 case EXCLAVES_MEMORY_PAGEKIND_ROOTDOMAIN:
135 ledger = kernel_task->ledger;
136 break;
137 case EXCLAVES_MEMORY_PAGEKIND_CONCLAVE:
138 if (current_thread()->conclave_stop_task != NULL) {
139 ledger = current_thread()->conclave_stop_task->ledger;
140 } else {
141 ledger = current_task()->ledger;
142 }
143 break;
144 default:
145 panic("Conclave Memory ledger doesn't recognize pagekind");
146 break;
147 }
148 return ledger;
149 }
150
151
152 /* -------------------------------------------------------------------------- */
153 #pragma mark Allocation/Free
154
155 void
exclaves_memory_alloc(const uint32_t npages,uint32_t * pages,const exclaves_memory_pagekind_t kind,const exclaves_memory_page_flags_t flags)156 exclaves_memory_alloc(const uint32_t npages, uint32_t *pages, const exclaves_memory_pagekind_t kind, const exclaves_memory_page_flags_t flags)
157 {
158 uint32_t pages_left = npages;
159 vm_page_t page_list = NULL;
160 vm_page_t sequestered = NULL;
161 unsigned p = 0;
162
163 uint64_t start_time = mach_continuous_approximate_time();
164 kma_flags_t kma_flags = KMA_NOFAIL;
165 vm_object_t vm_obj = exclaves_object;
166
167 #if HAS_MTE
168 /**
169 * Avoid specifying KMA_TAG if MTE has been disabled by boot arg.
170 * Otherwise, sptm_retype() will panic if asked to produce a tagged SK page
171 * without tag storage space to back it.
172 */
173 if ((flags & EXCLAVES_MEMORY_PAGE_FLAGS_MTE_TAGGED) && is_mte_enabled) {
174 kma_flags |= KMA_TAG;
175 vm_obj = exclaves_object_tagged;
176 }
177 #else /* !HAS_MTE */
178 (void)flags;
179 #endif /* HAS_MTE */
180
181 while (pages_left) {
182 vm_page_t next;
183 vm_page_alloc_list(pages_left, kma_flags, &page_list);
184
185 vm_object_lock(vm_obj);
186 for (vm_page_t mem = page_list; mem != VM_PAGE_NULL; mem = next) {
187 next = mem->vmp_snext;
188 if (!vm_page_in_array(mem)) {
189 // avoid ml_static_mfree() pages due to 117505258
190 mem->vmp_snext = sequestered;
191 sequestered = mem;
192 continue;
193 }
194 mem->vmp_snext = NULL;
195
196 vm_page_lock_queues();
197 vm_page_wire(mem, VM_KERN_MEMORY_EXCLAVES, FALSE);
198 vm_page_unlock_queues();
199 /* Insert the page into the exclaves object */
200 vm_page_insert_wired(mem, vm_obj,
201 ptoa(VM_PAGE_GET_PHYS_PAGE(mem)),
202 VM_KERN_MEMORY_EXCLAVES);
203
204 /* Retype via SPTM to SK owned */
205 sptm_retype_params_t retype_params = {
206 .raw = SPTM_RETYPE_PARAMS_NULL
207 };
208 #if HAS_MTE
209 if (kma_flags & KMA_TAG) {
210 retype_params.sk_flags |= SPTM_SK_PAGE_FLAGS_TAGGABLE;
211 pmap_unmake_tagged_page(VM_PAGE_GET_PHYS_PAGE(mem));
212 }
213 #endif /* HAS_MTE */
214 sptm_retype(ptoa(VM_PAGE_GET_PHYS_PAGE(mem)),
215 XNU_DEFAULT, SK_DEFAULT, retype_params);
216
217 pages[p++] = VM_PAGE_GET_PHYS_PAGE(mem);
218 pages_left--;
219 }
220 vm_object_unlock(vm_obj);
221 }
222
223 vm_page_free_list(sequestered, FALSE);
224
225 uint64_t elapsed_time = mach_continuous_approximate_time() - start_time;
226
227 os_atomic_add(&exclaves_allocation_statistics.pages_alloced, npages, relaxed);
228 os_atomic_add(&exclaves_allocation_statistics.time_allocating, elapsed_time, relaxed);
229 os_atomic_max(&exclaves_allocation_statistics.max_alloc_latency, elapsed_time, relaxed);
230 os_atomic_add(&exclaves_allocation_statistics.alloc_latency_byhighbit[ffsll(elapsed_time) / 4], elapsed_time, relaxed);
231
232 ledger_t ledger = get_conclave_mem_ledger(kind);
233 kern_return_t ledger_ret = ledger_credit(ledger,
234 task_ledgers.conclave_mem,
235 (ledger_amount_t) (npages * PAGE_SIZE));
236 if (ledger_ret != KERN_SUCCESS) {
237 panic("Ledger credit failed. count %u error code %d",
238 npages,
239 ledger_ret);
240 }
241 }
242
243 void
exclaves_memory_free(const uint32_t npages,const uint32_t * pages,const exclaves_memory_pagekind_t kind,const exclaves_memory_page_flags_t flags)244 exclaves_memory_free(const uint32_t npages, const uint32_t *pages, const exclaves_memory_pagekind_t kind, const exclaves_memory_page_flags_t flags)
245 {
246 vm_object_t vm_obj = exclaves_object;
247 #if HAS_MTE
248 if (flags & EXCLAVES_MEMORY_PAGE_FLAGS_MTE_TAGGED) {
249 vm_obj = exclaves_object_tagged;
250 }
251 #else /* !HAS_MTE */
252 (void)flags;
253 #endif /* HAS_MTE */
254
255 vm_object_lock(vm_obj);
256 for (size_t p = 0; p < npages; p++) {
257 /* Find the page in the exclaves object. */
258 vm_page_t m;
259 m = vm_page_lookup(vm_obj, ptoa(pages[p]));
260
261 /* Assert we found the page */
262 assert(m != VM_PAGE_NULL);
263
264 /* Via SPTM, verify the page type is something ownable by xnu. */
265 assert3u(sptm_get_frame_type(ptoa(VM_PAGE_GET_PHYS_PAGE(m))),
266 ==, XNU_DEFAULT);
267
268 #if HAS_MTE
269 if (vm_obj == exclaves_object_tagged) {
270 /* pmap_make_tagged_page works lazily, hence we need to mark page m as `using_mte == false` */
271 m->vmp_using_mte = false;
272 pmap_make_tagged_page(VM_PAGE_GET_PHYS_PAGE(m));
273 m->vmp_using_mte = true;
274 }
275 #endif /* HAS_MTE */
276
277 /* Free the page */
278 vm_page_lock_queues();
279 vm_page_free(m);
280 vm_page_unlock_queues();
281 }
282 vm_object_unlock(vm_obj);
283
284 os_atomic_add(&exclaves_allocation_statistics.pages_freed, npages, relaxed);
285
286 ledger_t ledger = get_conclave_mem_ledger(kind);
287 kern_return_t ledger_ret = ledger_debit(ledger,
288 task_ledgers.conclave_mem,
289 (ledger_amount_t) (npages * PAGE_SIZE));
290 if (ledger_ret != KERN_SUCCESS) {
291 panic("Ledger debit failed. count %u error code %d",
292 npages,
293 ledger_ret);
294 }
295 }
296
297 static void
validate_for_mapping(uint32_t page,vm_prot_t prot)298 validate_for_mapping(uint32_t page, vm_prot_t prot)
299 {
300 const sptm_frame_type_t type = sptm_get_frame_type(ptoa(page));
301
302 // Mapping RW and type is SK_SHARED_RW.
303 if (type == SK_SHARED_RW && (prot & VM_PROT_WRITE) != 0) {
304 return;
305 }
306
307 // Mapping RO and type is SK_SHARED_RW or SH_SHARED_RO
308 if ((type == SK_SHARED_RW || type == SK_SHARED_RO) &&
309 (prot & VM_PROT_WRITE) == 0) {
310 return;
311 }
312
313 // Mismatch of type and prot
314 panic("trying to map exclaves memory (prot: %u) "
315 "but memory is of the wrong type (%u)", prot, type);
316 }
317
318 kern_return_t
exclaves_memory_map(uint32_t npages,const uint32_t * pages,vm_prot_t prot,char ** address)319 exclaves_memory_map(uint32_t npages, const uint32_t *pages, vm_prot_t prot,
320 char **address)
321 {
322 assert3u(npages, >, 0);
323
324 kern_return_t kr = KERN_FAILURE;
325 const vm_map_kernel_flags_t vmk_flags = {
326 .vmf_fixed = false,
327 .vm_tag = VM_KERN_MEMORY_EXCLAVES_SHARED,
328 };
329 const vm_size_t size = npages * PAGE_SIZE;
330
331 memory_object_t pager = device_pager_setup((memory_object_t)NULL,
332 (uintptr_t)NULL, size, DEVICE_PAGER_COHERENT);
333 assert3p(pager, !=, NULL);
334
335 for (uint32_t i = 0; i < npages; i++) {
336 validate_for_mapping(pages[i], prot);
337
338 kr = device_pager_populate_object(pager, ptoa(i), pages[i],
339 PAGE_SIZE);
340 if (kr != KERN_SUCCESS) {
341 device_pager_deallocate(pager);
342 return kr;
343 }
344 }
345
346 ipc_port_t entry = IPC_PORT_NULL;
347 kr = mach_memory_object_memory_entry_64((host_t)1, false, size,
348 prot, pager, &entry);
349 if (kr != KERN_SUCCESS) {
350 device_pager_deallocate(pager);
351 return kr;
352 }
353
354 kr = mach_vm_map_kernel(kernel_map, (mach_vm_offset_ut *)address, size, 0, vmk_flags, entry,
355 0, FALSE, prot, prot, VM_INHERIT_DEFAULT);
356
357 mach_memory_entry_port_release(entry);
358
359 if (kr != KERN_SUCCESS) {
360 device_pager_deallocate(pager);
361 return kr;
362 }
363
364 device_pager_deallocate(pager);
365
366 /*
367 * Wire the memory so that it's paged-in up-front. This memory is
368 * already wired via exclaves_memory_alloc.
369 */
370 const vm_map_offset_ut start = *(vm_map_offset_ut *)address;
371 kr = vm_map_wire_kernel(kernel_map, start, start + size, prot,
372 VM_KERN_MEMORY_EXCLAVES_SHARED, false);
373 if (kr != KERN_SUCCESS) {
374 mach_vm_deallocate(kernel_map, start, size);
375 return kr;
376 }
377
378 return KERN_SUCCESS;
379 }
380
381 kern_return_t
exclaves_memory_unmap(char * address,size_t size)382 exclaves_memory_unmap(char *address, size_t size)
383 {
384 kern_return_t kr = KERN_FAILURE;
385
386 const vm_map_offset_ut start = (vm_map_offset_ut)address;
387 kr = vm_map_unwire(kernel_map, start, start + size, false);
388 if (kr != KERN_SUCCESS) {
389 return kr;
390 }
391
392 kr = mach_vm_deallocate(kernel_map, (mach_vm_address_t)address, size);
393 if (kr != KERN_SUCCESS) {
394 return kr;
395 }
396
397 return KERN_SUCCESS;
398 }
399
400 /* -------------------------------------------------------------------------- */
401 #pragma mark Upcalls
402
403 /* Legacy upcall handlers */
404
405 tb_error_t
406 exclaves_memory_upcall_legacy_alloc(uint32_t npages, xnuupcalls_pagekind_s kind,
407 tb_error_t (^completion)(xnuupcalls_pagelist_s))
408 {
409 xnuupcalls_pagelist_s pagelist = {};
410
411 assert3u(npages, <=, ARRAY_COUNT(pagelist.pages));
412 if (npages > ARRAY_COUNT(pagelist.pages)) {
413 panic("npages");
414 }
415
416 exclaves_memory_alloc(npages, pagelist.pages,
417 (exclaves_memory_pagekind_t) kind,
418 EXCLAVES_MEMORY_PAGE_FLAGS_NONE);
419 return completion(pagelist);
420 }
421
422 tb_error_t
423 exclaves_memory_upcall_legacy_alloc_ext(uint32_t npages, xnuupcalls_pageallocflags_s flags,
424 tb_error_t (^completion)(xnuupcalls_pagelist_s))
425 {
426 xnuupcalls_pagelist_s pagelist = {};
427 exclaves_memory_pagekind_t kind = EXCLAVES_MEMORY_PAGEKIND_ROOTDOMAIN;
428 exclaves_memory_page_flags_t alloc_flags = EXCLAVES_MEMORY_PAGE_FLAGS_NONE;
429
430 assert3u(npages, <=, ARRAY_COUNT(pagelist.pages));
431 if (npages > ARRAY_COUNT(pagelist.pages)) {
432 panic("npages");
433 }
434
435 if (flags & XNUUPCALLS_PAGEALLOCFLAGS_CONCLAVE) {
436 kind = EXCLAVES_MEMORY_PAGEKIND_CONCLAVE;
437 }
438 #if HAS_MTE
439 if (flags & XNUUPCALLS_PAGEALLOCFLAGS_SEC_TRANSITION) {
440 alloc_flags |= EXCLAVES_MEMORY_PAGE_FLAGS_MTE_TAGGED;
441 }
442 #endif /* HAS_MTE */
443 exclaves_memory_alloc(npages, pagelist.pages, kind, alloc_flags);
444 return completion(pagelist);
445 }
446
447
448 tb_error_t
449 exclaves_memory_upcall_legacy_free(const uint32_t pages[EXCLAVES_MEMORY_MAX_REQUEST],
450 uint32_t npages, const xnuupcalls_pagekind_s kind,
451 tb_error_t (^completion)(void))
452 {
453 /* Get pointer for page list paddr */
454 assert(npages <= EXCLAVES_MEMORY_MAX_REQUEST);
455 if (npages > EXCLAVES_MEMORY_MAX_REQUEST) {
456 panic("npages");
457 }
458
459 exclaves_memory_free(npages, pages, (exclaves_memory_pagekind_t) kind, EXCLAVES_MEMORY_PAGE_FLAGS_NONE);
460
461 return completion();
462 }
463
464 tb_error_t
465 exclaves_memory_upcall_legacy_free_ext(const uint32_t pages[EXCLAVES_MEMORY_MAX_REQUEST],
466 uint32_t npages, const xnuupcalls_pagefreeflags_s flags,
467 tb_error_t (^completion)(void))
468 {
469 exclaves_memory_pagekind_t kind = EXCLAVES_MEMORY_PAGEKIND_ROOTDOMAIN;
470 exclaves_memory_page_flags_t free_flags = EXCLAVES_MEMORY_PAGE_FLAGS_NONE;
471 /* Get pointer for page list paddr */
472 assert(npages <= EXCLAVES_MEMORY_MAX_REQUEST);
473 if (npages > EXCLAVES_MEMORY_MAX_REQUEST) {
474 panic("npages");
475 }
476 if (flags & XNUUPCALLS_PAGEALLOCFLAGS_CONCLAVE) {
477 kind = EXCLAVES_MEMORY_PAGEKIND_CONCLAVE;
478 }
479 #if HAS_MTE
480 if (flags & XNUUPCALLS_PAGEFREEFLAGS_SEC_TRANSITION) {
481 free_flags |= EXCLAVES_MEMORY_PAGE_FLAGS_MTE_TAGGED;
482 }
483 #endif /* HAS_MTE */
484
485 exclaves_memory_free(npages, pages, kind, free_flags);
486
487 return completion();
488 }
489
490 /* Upcall handlers */
491
492 tb_error_t
493 exclaves_memory_upcall_alloc(uint32_t npages, xnuupcallsv2_pagekind_s kind,
494 tb_error_t (^completion)(xnuupcallsv2_pagelist_s))
495 {
496 uint32_t pages[EXCLAVES_MEMORY_MAX_REQUEST];
497 xnuupcallsv2_pagelist_s pagelist = {};
498
499 assert3u(npages, <=, EXCLAVES_MEMORY_MAX_REQUEST);
500 if (npages > EXCLAVES_MEMORY_MAX_REQUEST) {
501 panic("npages");
502 }
503
504 exclaves_memory_alloc(npages, pages,
505 (exclaves_memory_pagekind_t) kind,
506 EXCLAVES_MEMORY_PAGE_FLAGS_NONE);
507
508 u32__v_assign_unowned(&pagelist, pages, npages);
509
510 return completion(pagelist);
511 }
512
513 tb_error_t
514 exclaves_memory_upcall_alloc_ext(uint32_t npages, xnuupcallsv2_pageallocflagsv2_s flags,
515 tb_error_t (^completion)(xnuupcallsv2_pagelist_s))
516 {
517 uint32_t pages[EXCLAVES_MEMORY_MAX_REQUEST];
518 xnuupcallsv2_pagelist_s pagelist = {};
519 exclaves_memory_pagekind_t kind = EXCLAVES_MEMORY_PAGEKIND_ROOTDOMAIN;
520 exclaves_memory_page_flags_t alloc_flags = EXCLAVES_MEMORY_PAGE_FLAGS_NONE;
521
522 assert3u(npages, <=, EXCLAVES_MEMORY_MAX_REQUEST);
523 if (npages > EXCLAVES_MEMORY_MAX_REQUEST) {
524 panic("npages");
525 }
526
527 if (flags & XNUUPCALLSV2_PAGEALLOCFLAGSV2_CONCLAVE) {
528 kind = EXCLAVES_MEMORY_PAGEKIND_CONCLAVE;
529 }
530 #if HAS_MTE
531 if (flags & XNUUPCALLSV2_PAGEALLOCFLAGSV2_SEC_TRANSITION) {
532 alloc_flags |= EXCLAVES_MEMORY_PAGE_FLAGS_MTE_TAGGED;
533 }
534 #endif /* HAS_MTE */
535
536 exclaves_memory_alloc(npages, pages, kind, alloc_flags);
537
538 u32__v_assign_unowned(&pagelist, pages, npages);
539
540 return completion(pagelist);
541 }
542
543
544 tb_error_t
545 exclaves_memory_upcall_free(const xnuupcallsv2_pagelist_s pages,
546 const xnuupcallsv2_pagekind_s kind, tb_error_t (^completion)(void))
547 {
548 uint32_t _pages[EXCLAVES_MEMORY_MAX_REQUEST];
549 uint32_t *pages_ptr = _pages;
550 uint32_t __block npages = 0;
551
552 u32__v_visit(&pages, ^(size_t i, const uint32_t page) {
553 if (++npages > EXCLAVES_MEMORY_MAX_REQUEST) {
554 panic("npages");
555 }
556 pages_ptr[i] = page;
557 });
558
559 exclaves_memory_free(npages, _pages, (exclaves_memory_pagekind_t) kind, EXCLAVES_MEMORY_PAGE_FLAGS_NONE);
560
561 return completion();
562 }
563
564 tb_error_t
565 exclaves_memory_upcall_free_ext(const xnuupcallsv2_pagelist_s pages,
566 const xnuupcallsv2_pagefreeflagsv2_s flags, tb_error_t (^completion)(void))
567 {
568 uint32_t _pages[EXCLAVES_MEMORY_MAX_REQUEST];
569 uint32_t *pages_ptr = _pages;
570 uint32_t __block npages = 0;
571 exclaves_memory_pagekind_t kind = EXCLAVES_MEMORY_PAGEKIND_ROOTDOMAIN;
572 exclaves_memory_page_flags_t free_flags = EXCLAVES_MEMORY_PAGE_FLAGS_NONE;
573
574 u32__v_visit(&pages, ^(size_t i, const uint32_t page) {
575 if (++npages > EXCLAVES_MEMORY_MAX_REQUEST) {
576 panic("npages");
577 }
578 pages_ptr[i] = page;
579 });
580
581 if (flags & XNUUPCALLSV2_PAGEFREEFLAGSV2_CONCLAVE) {
582 kind = EXCLAVES_MEMORY_PAGEKIND_CONCLAVE;
583 }
584 #if HAS_MTE
585 if (flags & XNUUPCALLSV2_PAGEFREEFLAGSV2_SEC_TRANSITION) {
586 free_flags |= EXCLAVES_MEMORY_PAGE_FLAGS_MTE_TAGGED;
587 }
588 #endif /* HAS_MTE */
589
590 exclaves_memory_free(npages, _pages, kind, free_flags);
591
592 return completion();
593 }
594
595 #pragma mark Carveout memory accounting
596
597 // Size of the iBoot-loaded ExclaveCoreBundle in bytes
598 // This is also part of VM_KERN_COUNT_BOOT_STOLEN / ml_get_booter_memory_size
599 uint64_t exclaves_bundle_size = 0;
600 // Size of the SPTM-managed Exclaves carveout in bytes
601 uint64_t exclaves_carveout_size = 0;
602
603 __startup_func
604 static void
initialize_exclaves_bundle_bytes(void)605 initialize_exclaves_bundle_bytes(void)
606 {
607 int err;
608 DTEntry memory_map;
609
610 err = SecureDTLookupEntry(NULL, "chosen/memory-map", &memory_map);
611
612 const char *CL4_Properties[] = {
613 "CL4-ro", "CL4-rx", "CL4-bx", "CL4-rw", "CL4-le"
614 };
615
616 for (int i = 0; i < sizeof(CL4_Properties) / sizeof(*CL4_Properties); i++) {
617 unsigned int range_size;
618 DTMemoryMapRange const *range;
619
620 err = SecureDTGetProperty(memory_map, CL4_Properties[i], (void const **)&range, &range_size);
621 if (err == kSuccess && range_size == sizeof(DTMemoryMapRange)) {
622 if (range->length != SIZE_MAX) {
623 exclaves_bundle_size += range->length;
624 }
625 }
626 }
627
628 exclaves_carveout_size = SPTMArgs->sk_carveout_size;
629
630 /*
631 * Credit the carveout size to kernel_task's conclave_mem ledger so that
632 * exclaves memory accounting includes the initial carveout allocation.
633 */
634 kern_return_t ledger_ret = ledger_credit(kernel_task->ledger,
635 task_ledgers.conclave_mem,
636 (ledger_amount_t)exclaves_carveout_size);
637 if (ledger_ret != KERN_SUCCESS) {
638 panic("Ledger credit failed for exclaves carveout, error code %d",
639 ledger_ret);
640 }
641 }
642
643 STARTUP(EXCLAVES, STARTUP_RANK_MIDDLE, initialize_exclaves_bundle_bytes);
644
645 #endif /* CONFIG_EXCLAVES */
646