1 /*
2 * Copyright (c) 2018-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/queue.h>
46 #include <kern/thread.h>
47 #include <kern/ipc_kobject.h>
48
49 #include <vm/memory_object_internal.h>
50 #include <vm/vm_kern.h>
51 #include <vm/vm_fault_internal.h>
52 #include <vm/vm_map.h>
53 #include <vm/vm_pageout_xnu.h>
54 #include <vm/vm_protos_internal.h>
55 #include <vm/vm_shared_region_internal.h>
56 #include <vm/vm_ubc.h>
57 #include <vm/vm_page_internal.h>
58 #include <vm/vm_object_internal.h>
59
60 #include <sys/kdebug_triage.h>
61 #include <sys/random.h>
62
63 #if __has_feature(ptrauth_calls)
64 #include <ptrauth.h>
65 extern boolean_t diversify_user_jop;
66 #endif /* __has_feature(ptrauth_calls) */
67
68 extern int panic_on_dyld_issue;
69
70 /*
71 * SHARED REGION MEMORY PAGER
72 *
73 * This external memory manager (EMM) handles mappings of a dyld shared cache
74 * in shared regions, applying any necessary modifications (sliding,
75 * pointer signing, ...).
76 *
77 * It mostly handles page-in requests (from memory_object_data_request()) by
78 * getting the original data from its backing VM object, itself backed by
79 * the dyld shared cache file, modifying it if needed and providing it to VM.
80 *
81 * The modified pages will never be dirtied, so the memory manager doesn't
82 * need to handle page-out requests (from memory_object_data_return()). The
83 * pages need to be mapped copy-on-write, so that the originals stay clean.
84 *
85 * We don't expect to have to handle a large number of shared cache files,
86 * so the data structures are very simple (simple linked list) for now.
87 */
88
89 /* forward declarations */
90 void shared_region_pager_reference(memory_object_t mem_obj);
91 void shared_region_pager_deallocate(memory_object_t mem_obj);
92 kern_return_t shared_region_pager_init(memory_object_t mem_obj,
93 memory_object_control_t control,
94 memory_object_cluster_size_t pg_size);
95 kern_return_t shared_region_pager_terminate(memory_object_t mem_obj);
96 kern_return_t shared_region_pager_data_request(memory_object_t mem_obj,
97 memory_object_offset_t offset,
98 memory_object_cluster_size_t length,
99 vm_prot_t protection_required,
100 memory_object_fault_info_t fault_info);
101 kern_return_t shared_region_pager_data_return(memory_object_t mem_obj,
102 memory_object_offset_t offset,
103 memory_object_cluster_size_t data_cnt,
104 memory_object_offset_t *resid_offset,
105 int *io_error,
106 boolean_t dirty,
107 boolean_t kernel_copy,
108 int upl_flags);
109 kern_return_t shared_region_pager_data_initialize(memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 memory_object_cluster_size_t data_cnt);
112 kern_return_t shared_region_pager_map(memory_object_t mem_obj,
113 vm_prot_t prot);
114 kern_return_t shared_region_pager_last_unmap(memory_object_t mem_obj);
115 boolean_t shared_region_pager_backing_object(
116 memory_object_t mem_obj,
117 memory_object_offset_t mem_obj_offset,
118 vm_object_t *backing_object,
119 vm_object_offset_t *backing_offset);
120
121 /*
122 * Vector of VM operations for this EMM.
123 * These routines are invoked by VM via the memory_object_*() interfaces.
124 */
125 const struct memory_object_pager_ops shared_region_pager_ops = {
126 .memory_object_reference = shared_region_pager_reference,
127 .memory_object_deallocate = shared_region_pager_deallocate,
128 .memory_object_init = shared_region_pager_init,
129 .memory_object_terminate = shared_region_pager_terminate,
130 .memory_object_data_request = shared_region_pager_data_request,
131 .memory_object_data_return = shared_region_pager_data_return,
132 .memory_object_data_initialize = shared_region_pager_data_initialize,
133 .memory_object_map = shared_region_pager_map,
134 .memory_object_last_unmap = shared_region_pager_last_unmap,
135 .memory_object_backing_object = shared_region_pager_backing_object,
136 .memory_object_pager_name = "shared_region"
137 };
138
139 #if __has_feature(ptrauth_calls)
140 /*
141 * Track mappings between shared_region_id and the key used to sign
142 * authenticated pointers.
143 */
144 typedef struct shared_region_jop_key_map {
145 queue_chain_t srk_queue;
146 char *srk_shared_region_id;
147 uint64_t srk_jop_key;
148 os_refcnt_t srk_ref_count; /* count of tasks active with this shared_region_id */
149 } *shared_region_jop_key_map_t;
150
151 os_refgrp_decl(static, srk_refgrp, "shared region key ref cnts", NULL);
152
153 /*
154 * The list is protected by the "shared_region_key_map" lock.
155 */
156 int shared_region_key_count = 0; /* number of active shared_region_id keys */
157 queue_head_t shared_region_jop_key_queue = QUEUE_HEAD_INITIALIZER(shared_region_jop_key_queue);
158 LCK_GRP_DECLARE(shared_region_jop_key_lck_grp, "shared_region_jop_key");
159 LCK_MTX_DECLARE(shared_region_jop_key_lock, &shared_region_jop_key_lck_grp);
160
161 #if __has_feature(ptrauth_calls)
162 /*
163 * Generate a random pointer signing key that isn't 0.
164 */
165 uint64_t
generate_jop_key(void)166 generate_jop_key(void)
167 {
168 uint64_t key;
169
170 do {
171 read_random(&key, sizeof key);
172 } while (key == 0);
173 return key;
174 }
175 #endif /* __has_feature(ptrauth_calls) */
176
177 /*
178 * Find the pointer signing key for the give shared_region_id.
179 */
180 uint64_t
shared_region_find_key(char * shared_region_id)181 shared_region_find_key(char *shared_region_id)
182 {
183 shared_region_jop_key_map_t region;
184 uint64_t key;
185
186 lck_mtx_lock(&shared_region_jop_key_lock);
187 queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
188 if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
189 goto found;
190 }
191 }
192 panic("shared_region_find_key() no key for region '%s'", shared_region_id);
193
194 found:
195 key = region->srk_jop_key;
196 lck_mtx_unlock(&shared_region_jop_key_lock);
197 return key;
198 }
199
200 /*
201 * Return a authentication key to use for the given shared_region_id.
202 * If inherit is TRUE, then the key must match inherited_key.
203 * Creates an additional reference when successful.
204 */
205 void
shared_region_key_alloc(char * shared_region_id,bool inherit,uint64_t inherited_key)206 shared_region_key_alloc(char *shared_region_id, bool inherit, uint64_t inherited_key)
207 {
208 shared_region_jop_key_map_t region;
209 shared_region_jop_key_map_t new = NULL;
210
211 assert(shared_region_id != NULL);
212 again:
213 lck_mtx_lock(&shared_region_jop_key_lock);
214 queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
215 if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
216 os_ref_retain_locked(®ion->srk_ref_count);
217 goto done;
218 }
219 }
220
221 /*
222 * ID was not found, if first time, allocate a new one and redo the lookup.
223 */
224 if (new == NULL) {
225 lck_mtx_unlock(&shared_region_jop_key_lock);
226 new = kalloc_type(struct shared_region_jop_key_map, Z_WAITOK);
227 uint_t len = strlen(shared_region_id) + 1;
228 new->srk_shared_region_id = kalloc_data(len, Z_WAITOK);
229 strlcpy(new->srk_shared_region_id, shared_region_id, len);
230 os_ref_init(&new->srk_ref_count, &srk_refgrp);
231
232 if (diversify_user_jop && inherit) {
233 new->srk_jop_key = inherited_key;
234 } else if (diversify_user_jop && strlen(shared_region_id) > 0) {
235 new->srk_jop_key = generate_jop_key();
236 } else {
237 new->srk_jop_key = ml_default_jop_pid();
238 }
239
240 goto again;
241 }
242
243 /*
244 * Use the newly allocated entry
245 */
246 ++shared_region_key_count;
247 queue_enter_first(&shared_region_jop_key_queue, new, shared_region_jop_key_map_t, srk_queue);
248 region = new;
249 new = NULL;
250
251 done:
252 if (inherit && inherited_key != region->srk_jop_key) {
253 panic("shared_region_key_alloc() inherited key mismatch");
254 }
255 lck_mtx_unlock(&shared_region_jop_key_lock);
256
257 /*
258 * free any unused new entry
259 */
260 if (new != NULL) {
261 kfree_data(new->srk_shared_region_id,
262 strlen(new->srk_shared_region_id) + 1);
263 kfree_type(struct shared_region_jop_key_map, new);
264 }
265 }
266
267 /*
268 * Mark the end of using a shared_region_id's key
269 */
270 extern void
shared_region_key_dealloc(char * shared_region_id)271 shared_region_key_dealloc(char *shared_region_id)
272 {
273 shared_region_jop_key_map_t region;
274
275 assert(shared_region_id != NULL);
276 lck_mtx_lock(&shared_region_jop_key_lock);
277 queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
278 if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
279 goto done;
280 }
281 }
282 panic("shared_region_key_dealloc() Shared region ID '%s' not found", shared_region_id);
283
284 done:
285 if (os_ref_release_locked(®ion->srk_ref_count) == 0) {
286 queue_remove(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue);
287 --shared_region_key_count;
288 } else {
289 region = NULL;
290 }
291 lck_mtx_unlock(&shared_region_jop_key_lock);
292
293 if (region != NULL) {
294 kfree_data(region->srk_shared_region_id,
295 strlen(region->srk_shared_region_id) + 1);
296 kfree_type(struct shared_region_jop_key_map, region);
297 }
298 }
299 #endif /* __has_feature(ptrauth_calls) */
300
301 /*
302 * The "shared_region_pager" describes a memory object backed by
303 * the "shared_region" EMM.
304 */
305 typedef struct shared_region_pager {
306 struct memory_object srp_header; /* mandatory generic header */
307
308 /* pager-specific data */
309 queue_chain_t srp_queue; /* next & prev pagers */
310 #if MEMORY_OBJECT_HAS_REFCOUNT
311 #define srp_ref_count srp_header.mo_ref
312 #else
313 os_ref_atomic_t srp_ref_count; /* active uses */
314 #endif
315 bool srp_is_mapped; /* has active mappings */
316 bool srp_is_ready; /* is this pager ready? */
317 vm_object_t srp_backing_object; /* VM object for shared cache */
318 vm_object_offset_t srp_backing_offset;
319 vm_shared_region_slide_info_t srp_slide_info;
320 #if __has_feature(ptrauth_calls)
321 uint64_t srp_jop_key; /* zero if used for arm64 */
322 #endif /* __has_feature(ptrauth_calls) */
323 } *shared_region_pager_t;
324 #define SHARED_REGION_PAGER_NULL ((shared_region_pager_t) NULL)
325
326 /*
327 * List of memory objects managed by this EMM.
328 * The list is protected by the "shared_region_pager_lock" lock.
329 */
330 int shared_region_pager_count = 0; /* number of pagers */
331 int shared_region_pager_count_mapped = 0; /* number of unmapped pagers */
332 queue_head_t shared_region_pager_queue = QUEUE_HEAD_INITIALIZER(shared_region_pager_queue);
333 LCK_GRP_DECLARE(shared_region_pager_lck_grp, "shared_region_pager");
334 LCK_MTX_DECLARE(shared_region_pager_lock, &shared_region_pager_lck_grp);
335
336 /*
337 * Maximum number of unmapped pagers we're willing to keep around.
338 */
339 int shared_region_pager_cache_limit = 0;
340
341 /*
342 * Statistics & counters.
343 */
344 int shared_region_pager_count_max = 0;
345 int shared_region_pager_count_unmapped_max = 0;
346 int shared_region_pager_num_trim_max = 0;
347 int shared_region_pager_num_trim_total = 0;
348
349 uint64_t shared_region_pager_copied = 0;
350 uint64_t shared_region_pager_slid = 0;
351 uint64_t shared_region_pager_slid_error = 0;
352 uint64_t shared_region_pager_reclaimed = 0;
353
354 /* internal prototypes */
355 shared_region_pager_t shared_region_pager_lookup(memory_object_t mem_obj);
356 void shared_region_pager_dequeue(shared_region_pager_t pager);
357 void shared_region_pager_deallocate_internal(shared_region_pager_t pager,
358 boolean_t locked);
359 void shared_region_pager_terminate_internal(shared_region_pager_t pager);
360 void shared_region_pager_trim(void);
361
362
363 #if DEBUG
364 int shared_region_pagerdebug = 0;
365 #define PAGER_ALL 0xffffffff
366 #define PAGER_INIT 0x00000001
367 #define PAGER_PAGEIN 0x00000002
368
369 #define PAGER_DEBUG(LEVEL, A) \
370 MACRO_BEGIN \
371 if ((shared_region_pagerdebug & (LEVEL)) == (LEVEL)) { \
372 printf A; \
373 } \
374 MACRO_END
375 #else
376 #define PAGER_DEBUG(LEVEL, A)
377 #endif
378
379 /*
380 * shared_region_pager_init()
381 *
382 * Initialize the memory object and makes it ready to be used and mapped.
383 */
384 kern_return_t
shared_region_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)385 shared_region_pager_init(
386 memory_object_t mem_obj,
387 memory_object_control_t control,
388 #if !DEBUG
389 __unused
390 #endif
391 memory_object_cluster_size_t pg_size)
392 {
393 shared_region_pager_t pager;
394 kern_return_t kr;
395 memory_object_attr_info_data_t attributes;
396
397 PAGER_DEBUG(PAGER_ALL,
398 ("shared_region_pager_init: %p, %p, %x\n",
399 mem_obj, control, pg_size));
400
401 if (control == MEMORY_OBJECT_CONTROL_NULL) {
402 return KERN_INVALID_ARGUMENT;
403 }
404
405 pager = shared_region_pager_lookup(mem_obj);
406
407 memory_object_control_reference(control);
408
409 pager->srp_header.mo_control = control;
410
411 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
412 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
413 attributes.cluster_size = (1 << (PAGE_SHIFT));
414 attributes.may_cache_object = FALSE;
415 attributes.temporary = TRUE;
416
417 kr = memory_object_change_attributes(
418 control,
419 MEMORY_OBJECT_ATTRIBUTE_INFO,
420 (memory_object_info_t) &attributes,
421 MEMORY_OBJECT_ATTR_INFO_COUNT);
422 if (kr != KERN_SUCCESS) {
423 panic("shared_region_pager_init: "
424 "memory_object_change_attributes() failed");
425 }
426
427 #if CONFIG_SECLUDED_MEMORY
428 if (secluded_for_filecache) {
429 #if 00
430 /*
431 * XXX FBDP do we want this in the secluded pool?
432 * Ideally, we'd want the shared region used by Camera to
433 * NOT be in the secluded pool, but all other shared regions
434 * in the secluded pool...
435 */
436 memory_object_mark_eligible_for_secluded(control, TRUE);
437 #endif /* 00 */
438 }
439 #endif /* CONFIG_SECLUDED_MEMORY */
440
441 return KERN_SUCCESS;
442 }
443
444 /*
445 * shared_region_data_return()
446 *
447 * Handles page-out requests from VM. This should never happen since
448 * the pages provided by this EMM are not supposed to be dirty or dirtied
449 * and VM should simply discard the contents and reclaim the pages if it
450 * needs to.
451 */
452 kern_return_t
shared_region_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)453 shared_region_pager_data_return(
454 __unused memory_object_t mem_obj,
455 __unused memory_object_offset_t offset,
456 __unused memory_object_cluster_size_t data_cnt,
457 __unused memory_object_offset_t *resid_offset,
458 __unused int *io_error,
459 __unused boolean_t dirty,
460 __unused boolean_t kernel_copy,
461 __unused int upl_flags)
462 {
463 panic("shared_region_pager_data_return: should never get called");
464 return KERN_FAILURE;
465 }
466
467 kern_return_t
shared_region_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)468 shared_region_pager_data_initialize(
469 __unused memory_object_t mem_obj,
470 __unused memory_object_offset_t offset,
471 __unused memory_object_cluster_size_t data_cnt)
472 {
473 panic("shared_region_pager_data_initialize: should never get called");
474 return KERN_FAILURE;
475 }
476
477 /*
478 * shared_region_pager_data_request()
479 *
480 * Handles page-in requests from VM.
481 */
482 int shared_region_pager_data_request_debug = 0;
483 kern_return_t
shared_region_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)484 shared_region_pager_data_request(
485 memory_object_t mem_obj,
486 memory_object_offset_t offset,
487 memory_object_cluster_size_t length,
488 #if !DEBUG
489 __unused
490 #endif
491 vm_prot_t protection_required,
492 memory_object_fault_info_t mo_fault_info)
493 {
494 shared_region_pager_t pager;
495 memory_object_control_t mo_control;
496 upl_t upl;
497 int upl_flags;
498 upl_size_t upl_size;
499 upl_page_info_t *upl_pl;
500 unsigned int pl_count;
501 vm_object_t src_top_object, src_page_object, dst_object;
502 kern_return_t kr, retval;
503 vm_fault_return_t vmfr;
504 vm_offset_t src_vaddr, dst_vaddr;
505 vm_offset_t cur_offset;
506 vm_offset_t offset_in_page;
507 kern_return_t error_code;
508 vm_prot_t prot;
509 vm_page_t src_page, top_page;
510 int interruptible;
511 struct vm_object_fault_info fault_info;
512 mach_vm_offset_t slide_start_address;
513 u_int32_t slide_info_page_size;
514
515 PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
516
517 retval = KERN_SUCCESS;
518 src_top_object = VM_OBJECT_NULL;
519 src_page_object = VM_OBJECT_NULL;
520 upl = NULL;
521 upl_pl = NULL;
522 fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
523 fault_info.stealth = TRUE;
524 fault_info.io_sync = FALSE;
525 fault_info.mark_zf_absent = FALSE;
526 fault_info.batch_pmap_op = FALSE;
527 interruptible = fault_info.interruptible;
528
529 pager = shared_region_pager_lookup(mem_obj);
530 assert(pager->srp_is_ready);
531 assert(os_ref_get_count_raw(&pager->srp_ref_count) > 1); /* pager is alive */
532 assert(pager->srp_is_mapped); /* pager is mapped */
533
534 PAGER_DEBUG(PAGER_PAGEIN, ("shared_region_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
535
536 /*
537 * Gather in a UPL all the VM pages requested by VM.
538 */
539 mo_control = pager->srp_header.mo_control;
540
541 upl_size = length;
542 upl_flags =
543 UPL_RET_ONLY_ABSENT |
544 UPL_SET_LITE |
545 UPL_NO_SYNC |
546 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
547 UPL_SET_INTERNAL;
548 pl_count = 0;
549 kr = memory_object_upl_request(mo_control,
550 offset, upl_size,
551 &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
552 if (kr != KERN_SUCCESS) {
553 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_NO_UPL), kr /* arg */);
554 if (panic_on_dyld_issue) {
555 panic("%s(): upl_request(%p, 0x%llx, 0x%llx) ret %d", __func__,
556 mo_control, offset, (uint64_t)upl_size, kr);
557 }
558 retval = kr;
559 goto done;
560 }
561 dst_object = memory_object_control_to_vm_object(mo_control);
562 assert(dst_object != VM_OBJECT_NULL);
563
564 /*
565 * We'll map the original data in the kernel address space from the
566 * backing VM object (itself backed by the shared cache file via
567 * the vnode pager).
568 */
569 src_top_object = pager->srp_backing_object;
570 assert(src_top_object != VM_OBJECT_NULL);
571 vm_object_reference(src_top_object); /* keep the source object alive */
572
573 slide_start_address = pager->srp_slide_info->si_slid_address;
574 slide_info_page_size = pager->srp_slide_info->si_slide_info_entry->version == 1 ? PAGE_SIZE_FOR_SR_SLIDE : pager->srp_slide_info->si_slide_info_entry->page_size;
575
576 fault_info.lo_offset += pager->srp_backing_offset;
577 fault_info.hi_offset += pager->srp_backing_offset;
578
579 /*
580 * Fill in the contents of the pages requested by VM.
581 */
582 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
583 pl_count = length / PAGE_SIZE;
584 for (cur_offset = 0;
585 retval == KERN_SUCCESS && cur_offset < length;
586 cur_offset += PAGE_SIZE) {
587 ppnum_t dst_pnum;
588
589 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
590 /* this page is not in the UPL: skip it */
591 continue;
592 }
593
594 /*
595 * Map the source (dyld shared cache) page in the kernel's
596 * virtual address space.
597 * We already hold a reference on the src_top_object.
598 */
599 retry_src_fault:
600 vm_object_lock(src_top_object);
601 vm_object_paging_begin(src_top_object);
602 error_code = 0;
603 prot = VM_PROT_READ;
604 src_page = VM_PAGE_NULL;
605 vmfr = vm_fault_page(src_top_object,
606 pager->srp_backing_offset + offset + cur_offset,
607 VM_PROT_READ,
608 FALSE,
609 FALSE, /* src_page not looked up */
610 &prot,
611 &src_page,
612 &top_page,
613 NULL,
614 &error_code,
615 FALSE,
616 &fault_info);
617 switch (vmfr) {
618 case VM_FAULT_SUCCESS:
619 break;
620 case VM_FAULT_RETRY:
621 goto retry_src_fault;
622 case VM_FAULT_MEMORY_SHORTAGE:
623 if (vm_page_wait(interruptible)) {
624 goto retry_src_fault;
625 }
626 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_PAGER_MEMORY_SHORTAGE), 0 /* arg */);
627 OS_FALLTHROUGH;
628 case VM_FAULT_INTERRUPTED:
629 retval = MACH_SEND_INTERRUPTED;
630 goto done;
631 case VM_FAULT_SUCCESS_NO_VM_PAGE:
632 /* success but no VM page: fail */
633 vm_object_paging_end(src_top_object);
634 vm_object_unlock(src_top_object);
635 OS_FALLTHROUGH;
636 case VM_FAULT_MEMORY_ERROR:
637 /* the page is not there ! */
638 if (error_code) {
639 retval = error_code;
640 } else {
641 retval = KERN_MEMORY_ERROR;
642 }
643 goto done;
644 case VM_FAULT_BUSY:
645 retval = KERN_ALREADY_WAITING;
646 goto done;
647 }
648 assert(src_page != VM_PAGE_NULL);
649 assert(src_page->vmp_busy);
650
651 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
652 vm_page_lockspin_queues();
653 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
654 vm_page_speculate(src_page, FALSE);
655 }
656 vm_page_unlock_queues();
657 }
658
659 /*
660 * Establish pointers to the source
661 * and destination physical pages.
662 */
663 dst_pnum = (ppnum_t)
664 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
665 assert(dst_pnum != 0);
666
667 src_vaddr = (vm_map_offset_t)
668 phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
669 << PAGE_SHIFT);
670 dst_vaddr = (vm_map_offset_t)
671 phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
672 src_page_object = VM_PAGE_OBJECT(src_page);
673
674 /*
675 * Validate the original page...
676 */
677 if (src_page_object->code_signed) {
678 vm_page_validate_cs_mapped(
679 src_page, PAGE_SIZE, 0,
680 (const void *) src_vaddr);
681 }
682 /*
683 * ... and transfer the results to the destination page.
684 */
685 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
686 src_page->vmp_cs_validated);
687 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
688 src_page->vmp_cs_tainted);
689 UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
690 src_page->vmp_cs_nx);
691
692 /*
693 * The page provider might access a mapped file, so let's
694 * release the object lock for the source page to avoid a
695 * potential deadlock.
696 * The source page is kept busy and we have a
697 * "paging_in_progress" reference on its object, so it's safe
698 * to unlock the object here.
699 */
700 assert(src_page->vmp_busy);
701 assert(src_page_object->paging_in_progress > 0);
702 vm_object_unlock(src_page_object);
703
704 /*
705 * Process the original contents of the source page
706 * into the destination page.
707 */
708 for (offset_in_page = 0;
709 offset_in_page < PAGE_SIZE;
710 offset_in_page += slide_info_page_size) {
711 vm_object_offset_t chunk_offset;
712 vm_object_offset_t offset_in_backing_object;
713 vm_object_offset_t offset_in_sliding_range;
714
715 chunk_offset = offset + cur_offset + offset_in_page;
716
717 bcopy((const char *)(src_vaddr +
718 offset_in_page),
719 (char *)(dst_vaddr + offset_in_page),
720 slide_info_page_size);
721
722 offset_in_backing_object = (chunk_offset +
723 pager->srp_backing_offset);
724 if ((offset_in_backing_object < pager->srp_slide_info->si_start) ||
725 (offset_in_backing_object >= pager->srp_slide_info->si_end)) {
726 /* chunk is outside of sliding range: done */
727 shared_region_pager_copied++;
728 continue;
729 }
730
731 offset_in_sliding_range = offset_in_backing_object - pager->srp_slide_info->si_start;
732 kr = vm_shared_region_slide_page(pager->srp_slide_info,
733 dst_vaddr + offset_in_page,
734 (mach_vm_offset_t) (offset_in_sliding_range + slide_start_address),
735 (uint32_t) (offset_in_sliding_range / slide_info_page_size),
736 #if __has_feature(ptrauth_calls)
737 pager->srp_slide_info->si_ptrauth ? pager->srp_jop_key : 0
738 #else /* __has_feature(ptrauth_calls) */
739 0
740 #endif /* __has_feature(ptrauth_calls) */
741 );
742 if (shared_region_pager_data_request_debug) {
743 printf("shared_region_data_request"
744 "(%p,0x%llx+0x%llx+0x%04llx): 0x%llx "
745 "in sliding range [0x%llx:0x%llx]: "
746 "SLIDE offset 0x%llx="
747 "(0x%llx+0x%llx+0x%llx+0x%04llx)"
748 "[0x%016llx 0x%016llx] "
749 "code_signed=%d "
750 "cs_validated=%d "
751 "cs_tainted=%d "
752 "cs_nx=%d "
753 "kr=0x%x\n",
754 pager,
755 offset,
756 (uint64_t) cur_offset,
757 (uint64_t) offset_in_page,
758 chunk_offset,
759 pager->srp_slide_info->si_start,
760 pager->srp_slide_info->si_end,
761 (pager->srp_backing_offset +
762 offset +
763 cur_offset +
764 offset_in_page),
765 pager->srp_backing_offset,
766 offset,
767 (uint64_t) cur_offset,
768 (uint64_t) offset_in_page,
769 *(uint64_t *)(dst_vaddr + offset_in_page),
770 *(uint64_t *)(dst_vaddr + offset_in_page + 8),
771 src_page_object->code_signed,
772 src_page->vmp_cs_validated,
773 src_page->vmp_cs_tainted,
774 src_page->vmp_cs_nx,
775 kr);
776 }
777 if (kr != KERN_SUCCESS) {
778 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_SLIDE_ERROR), kr /* arg */);
779 if (panic_on_dyld_issue) {
780 panic("%s(): shared region slide error %d",
781 __func__, kr);
782 }
783 shared_region_pager_slid_error++;
784 retval = KERN_MEMORY_ERROR;
785 break;
786 }
787 shared_region_pager_slid++;
788 }
789
790 assert(VM_PAGE_OBJECT(src_page) == src_page_object);
791 assert(src_page->vmp_busy);
792 assert(src_page_object->paging_in_progress > 0);
793 vm_object_lock(src_page_object);
794
795 /*
796 * Cleanup the result of vm_fault_page() of the source page.
797 */
798 vm_page_wakeup_done(src_page_object, src_page);
799 src_page = VM_PAGE_NULL;
800 vm_object_paging_end(src_page_object);
801 vm_object_unlock(src_page_object);
802
803 if (top_page != VM_PAGE_NULL) {
804 assert(VM_PAGE_OBJECT(top_page) == src_top_object);
805 vm_object_lock(src_top_object);
806 VM_PAGE_FREE(top_page);
807 vm_object_paging_end(src_top_object);
808 vm_object_unlock(src_top_object);
809 }
810 }
811
812 done:
813 if (upl != NULL) {
814 /* clean up the UPL */
815
816 /*
817 * The pages are currently dirty because we've just been
818 * writing on them, but as far as we're concerned, they're
819 * clean since they contain their "original" contents as
820 * provided by us, the pager.
821 * Tell the UPL to mark them "clean".
822 */
823 upl_clear_dirty(upl, TRUE);
824
825 /* abort or commit the UPL */
826 if (retval != KERN_SUCCESS) {
827 upl_abort(upl, 0);
828 } else {
829 boolean_t empty;
830 assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
831 "upl %p offset 0x%llx size 0x%x\n",
832 upl, upl->u_offset, upl->u_size);
833 upl_commit_range(upl, 0, upl->u_size,
834 UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
835 upl_pl, pl_count, &empty);
836 }
837
838 /* and deallocate the UPL */
839 upl_deallocate(upl);
840 upl = NULL;
841 }
842 if (src_top_object != VM_OBJECT_NULL) {
843 vm_object_deallocate(src_top_object);
844 }
845 return retval;
846 }
847
848 /*
849 * shared_region_pager_reference()
850 *
851 * Get a reference on this memory object.
852 * For external usage only. Assumes that the initial reference count is not 0,
853 * i.e one should not "revive" a dead pager this way.
854 */
855 void
shared_region_pager_reference(memory_object_t mem_obj)856 shared_region_pager_reference(
857 memory_object_t mem_obj)
858 {
859 shared_region_pager_t pager;
860
861 pager = shared_region_pager_lookup(mem_obj);
862
863 lck_mtx_lock(&shared_region_pager_lock);
864 os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
865 lck_mtx_unlock(&shared_region_pager_lock);
866 }
867
868
869 /*
870 * shared_region_pager_dequeue:
871 *
872 * Removes a pager from the list of pagers.
873 *
874 * The caller must hold "shared_region_pager_lock".
875 */
876 void
shared_region_pager_dequeue(shared_region_pager_t pager)877 shared_region_pager_dequeue(
878 shared_region_pager_t pager)
879 {
880 assert(!pager->srp_is_mapped);
881
882 queue_remove(&shared_region_pager_queue,
883 pager,
884 shared_region_pager_t,
885 srp_queue);
886 pager->srp_queue.next = NULL;
887 pager->srp_queue.prev = NULL;
888
889 shared_region_pager_count--;
890 }
891
892 /*
893 * shared_region_pager_terminate_internal:
894 *
895 * Trigger the asynchronous termination of the memory object associated
896 * with this pager.
897 * When the memory object is terminated, there will be one more call
898 * to memory_object_deallocate() (i.e. shared_region_pager_deallocate())
899 * to finish the clean up.
900 *
901 * "shared_region_pager_lock" should not be held by the caller.
902 * We don't need the lock because the pager has already been removed from
903 * the pagers' list and is now ours exclusively.
904 */
905 void
shared_region_pager_terminate_internal(shared_region_pager_t pager)906 shared_region_pager_terminate_internal(
907 shared_region_pager_t pager)
908 {
909 assert(pager->srp_is_ready);
910 assert(!pager->srp_is_mapped);
911 assert(os_ref_get_count_raw(&pager->srp_ref_count) == 1);
912
913 if (pager->srp_backing_object != VM_OBJECT_NULL) {
914 vm_object_deallocate(pager->srp_backing_object);
915 pager->srp_backing_object = VM_OBJECT_NULL;
916 }
917 /* trigger the destruction of the memory object */
918 memory_object_destroy(pager->srp_header.mo_control, VM_OBJECT_DESTROY_PAGER);
919 }
920
921 /*
922 * shared_region_pager_deallocate_internal()
923 *
924 * Release a reference on this pager and free it when the last reference goes away.
925 * Can be called with shared_region_pager_lock held or not, but always returns
926 * with it unlocked.
927 */
928 void
shared_region_pager_deallocate_internal(shared_region_pager_t pager,boolean_t locked)929 shared_region_pager_deallocate_internal(
930 shared_region_pager_t pager,
931 boolean_t locked)
932 {
933 boolean_t needs_trimming;
934 int count_unmapped;
935 os_ref_count_t ref_count;
936
937 if (!locked) {
938 lck_mtx_lock(&shared_region_pager_lock);
939 }
940
941 /* if we have too many unmapped pagers, trim some */
942 count_unmapped = shared_region_pager_count - shared_region_pager_count_mapped;
943 needs_trimming = (count_unmapped > shared_region_pager_cache_limit);
944
945 /* drop a reference on this pager */
946 ref_count = os_ref_release_locked_raw(&pager->srp_ref_count, NULL);
947
948 if (ref_count == 1) {
949 /*
950 * Only the "named" reference is left, which means that
951 * no one is really holding on to this pager anymore.
952 * Terminate it.
953 */
954 shared_region_pager_dequeue(pager);
955 /* the pager is all ours: no need for the lock now */
956 lck_mtx_unlock(&shared_region_pager_lock);
957 shared_region_pager_terminate_internal(pager);
958 } else if (ref_count == 0) {
959 /*
960 * Dropped the existence reference; the memory object has
961 * been terminated. Do some final cleanup and release the
962 * pager structure.
963 */
964 lck_mtx_unlock(&shared_region_pager_lock);
965
966 vm_shared_region_slide_info_t si = pager->srp_slide_info;
967 #if __has_feature(ptrauth_calls)
968 /*
969 * The slide_info for auth sections lives in the shared region.
970 * Just deallocate() on the shared region and clear the field.
971 */
972 if (si != NULL) {
973 if (si->si_shared_region != NULL) {
974 assert(si->si_ptrauth);
975 vm_shared_region_deallocate(si->si_shared_region);
976 pager->srp_slide_info = NULL;
977 si = NULL;
978 }
979 }
980 #endif /* __has_feature(ptrauth_calls) */
981 if (si != NULL) {
982 vm_object_deallocate(si->si_slide_object);
983 /* free the slide_info_entry */
984 kfree_data(si->si_slide_info_entry,
985 si->si_slide_info_size);
986 kfree_type(struct vm_shared_region_slide_info, si);
987 pager->srp_slide_info = NULL;
988 }
989
990 if (pager->srp_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
991 memory_object_control_deallocate(pager->srp_header.mo_control);
992 pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
993 }
994 kfree_type(struct shared_region_pager, pager);
995 pager = SHARED_REGION_PAGER_NULL;
996 } else {
997 /* there are still plenty of references: keep going... */
998 lck_mtx_unlock(&shared_region_pager_lock);
999 }
1000
1001 if (needs_trimming) {
1002 shared_region_pager_trim();
1003 }
1004 /* caution: lock is not held on return... */
1005 }
1006
1007 /*
1008 * shared_region_pager_deallocate()
1009 *
1010 * Release a reference on this pager and free it when the last
1011 * reference goes away.
1012 */
1013 void
shared_region_pager_deallocate(memory_object_t mem_obj)1014 shared_region_pager_deallocate(
1015 memory_object_t mem_obj)
1016 {
1017 shared_region_pager_t pager;
1018
1019 PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_deallocate: %p\n", mem_obj));
1020 pager = shared_region_pager_lookup(mem_obj);
1021 shared_region_pager_deallocate_internal(pager, FALSE);
1022 }
1023
1024 /*
1025 *
1026 */
1027 kern_return_t
shared_region_pager_terminate(__unused memory_object_t mem_obj)1028 shared_region_pager_terminate(
1029 #if !DEBUG
1030 __unused
1031 #endif
1032 memory_object_t mem_obj)
1033 {
1034 PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_terminate: %p\n", mem_obj));
1035
1036 return KERN_SUCCESS;
1037 }
1038
1039 /*
1040 * shared_region_pager_map()
1041 *
1042 * This allows VM to let us, the EMM, know that this memory object
1043 * is currently mapped one or more times. This is called by VM each time
1044 * the memory object gets mapped, but we only take one extra reference the
1045 * first time it is called.
1046 */
1047 kern_return_t
shared_region_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)1048 shared_region_pager_map(
1049 memory_object_t mem_obj,
1050 __unused vm_prot_t prot)
1051 {
1052 shared_region_pager_t pager;
1053
1054 PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_map: %p\n", mem_obj));
1055
1056 pager = shared_region_pager_lookup(mem_obj);
1057
1058 lck_mtx_lock(&shared_region_pager_lock);
1059 assert(pager->srp_is_ready);
1060 assert(os_ref_get_count_raw(&pager->srp_ref_count) > 0); /* pager is alive */
1061 if (!pager->srp_is_mapped) {
1062 pager->srp_is_mapped = TRUE;
1063 os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
1064 shared_region_pager_count_mapped++;
1065 }
1066 lck_mtx_unlock(&shared_region_pager_lock);
1067
1068 return KERN_SUCCESS;
1069 }
1070
1071 /*
1072 * shared_region_pager_last_unmap()
1073 *
1074 * This is called by VM when this memory object is no longer mapped anywhere.
1075 */
1076 kern_return_t
shared_region_pager_last_unmap(memory_object_t mem_obj)1077 shared_region_pager_last_unmap(
1078 memory_object_t mem_obj)
1079 {
1080 shared_region_pager_t pager;
1081 int count_unmapped;
1082
1083 PAGER_DEBUG(PAGER_ALL,
1084 ("shared_region_pager_last_unmap: %p\n", mem_obj));
1085
1086 pager = shared_region_pager_lookup(mem_obj);
1087
1088 lck_mtx_lock(&shared_region_pager_lock);
1089 if (pager->srp_is_mapped) {
1090 /*
1091 * All the mappings are gone, so let go of the one extra
1092 * reference that represents all the mappings of this pager.
1093 */
1094 shared_region_pager_count_mapped--;
1095 count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped);
1096 if (count_unmapped > shared_region_pager_count_unmapped_max) {
1097 shared_region_pager_count_unmapped_max = count_unmapped;
1098 }
1099 pager->srp_is_mapped = FALSE;
1100 shared_region_pager_deallocate_internal(pager, TRUE);
1101 /* caution: deallocate_internal() released the lock ! */
1102 } else {
1103 lck_mtx_unlock(&shared_region_pager_lock);
1104 }
1105
1106 return KERN_SUCCESS;
1107 }
1108
1109 boolean_t
shared_region_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)1110 shared_region_pager_backing_object(
1111 memory_object_t mem_obj,
1112 memory_object_offset_t offset,
1113 vm_object_t *backing_object,
1114 vm_object_offset_t *backing_offset)
1115 {
1116 shared_region_pager_t pager;
1117
1118 PAGER_DEBUG(PAGER_ALL,
1119 ("shared_region_pager_backing_object: %p\n", mem_obj));
1120
1121 pager = shared_region_pager_lookup(mem_obj);
1122
1123 *backing_object = pager->srp_backing_object;
1124 *backing_offset = pager->srp_backing_offset + offset;
1125
1126 return TRUE;
1127 }
1128
1129
1130 /*
1131 *
1132 */
1133 shared_region_pager_t
shared_region_pager_lookup(memory_object_t mem_obj)1134 shared_region_pager_lookup(
1135 memory_object_t mem_obj)
1136 {
1137 shared_region_pager_t pager;
1138
1139 assert(mem_obj->mo_pager_ops == &shared_region_pager_ops);
1140 pager = (shared_region_pager_t)(uintptr_t) mem_obj;
1141 assert(os_ref_get_count_raw(&pager->srp_ref_count) > 0);
1142 return pager;
1143 }
1144
1145 /*
1146 * Create and return a pager for the given object with the
1147 * given slide information.
1148 */
1149 static shared_region_pager_t
shared_region_pager_create(vm_object_t backing_object,vm_object_offset_t backing_offset,struct vm_shared_region_slide_info * slide_info,__unused uint64_t jop_key)1150 shared_region_pager_create(
1151 vm_object_t backing_object,
1152 vm_object_offset_t backing_offset,
1153 struct vm_shared_region_slide_info *slide_info,
1154 #if !__has_feature(ptrauth_calls)
1155 __unused
1156 #endif /* !__has_feature(ptrauth_calls) */
1157 uint64_t jop_key)
1158 {
1159 shared_region_pager_t pager;
1160 memory_object_control_t control;
1161 kern_return_t kr;
1162 vm_object_t object;
1163
1164 pager = kalloc_type(struct shared_region_pager, Z_WAITOK);
1165 if (pager == SHARED_REGION_PAGER_NULL) {
1166 return SHARED_REGION_PAGER_NULL;
1167 }
1168
1169 /*
1170 * The vm_map call takes both named entry ports and raw memory
1171 * objects in the same parameter. We need to make sure that
1172 * vm_map does not see this object as a named entry port. So,
1173 * we reserve the first word in the object for a fake object type
1174 * setting - that will tell vm_map to use it as a memory object.
1175 */
1176 pager->srp_header.mo_ikot = IKOT_MEMORY_OBJECT;
1177 pager->srp_header.mo_pager_ops = &shared_region_pager_ops;
1178 pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1179 pager->srp_header.mo_last_unmap_ctid = 0;
1180
1181 pager->srp_is_ready = FALSE;/* not ready until it has a "name" */
1182 /* existence reference (for the cache) + 1 for the caller */
1183 os_ref_init_count_raw(&pager->srp_ref_count, NULL, 2);
1184 pager->srp_is_mapped = FALSE;
1185 pager->srp_backing_object = backing_object;
1186 pager->srp_backing_offset = backing_offset;
1187 pager->srp_slide_info = slide_info;
1188 #if __has_feature(ptrauth_calls)
1189 pager->srp_jop_key = jop_key;
1190 /*
1191 * If we're getting slide_info from the shared_region,
1192 * take a reference, so it can't disappear from under us.
1193 */
1194 if (slide_info->si_shared_region) {
1195 assert(slide_info->si_ptrauth);
1196 vm_shared_region_reference(slide_info->si_shared_region);
1197 }
1198 #endif /* __has_feature(ptrauth_calls) */
1199
1200 vm_object_reference(backing_object);
1201
1202 lck_mtx_lock(&shared_region_pager_lock);
1203 /* enter new pager at the head of our list of pagers */
1204 queue_enter_first(&shared_region_pager_queue,
1205 pager,
1206 shared_region_pager_t,
1207 srp_queue);
1208 shared_region_pager_count++;
1209 if (shared_region_pager_count > shared_region_pager_count_max) {
1210 shared_region_pager_count_max = shared_region_pager_count;
1211 }
1212 lck_mtx_unlock(&shared_region_pager_lock);
1213
1214 kr = memory_object_create_named((memory_object_t) pager,
1215 0,
1216 &control);
1217 assert(kr == KERN_SUCCESS);
1218
1219 memory_object_mark_trusted(control);
1220
1221 lck_mtx_lock(&shared_region_pager_lock);
1222 /* the new pager is now ready to be used */
1223 pager->srp_is_ready = TRUE;
1224 object = memory_object_to_vm_object((memory_object_t) pager);
1225 assert(object);
1226 /*
1227 * No one knows about this object and so we get away without the object lock.
1228 * This object is _eventually_ backed by the dyld shared cache and so we want
1229 * to benefit from the lock priority boosting.
1230 */
1231 object->object_is_shared_cache = TRUE;
1232 lck_mtx_unlock(&shared_region_pager_lock);
1233
1234 /* wakeup anyone waiting for this pager to be ready */
1235 thread_wakeup(&pager->srp_is_ready);
1236
1237 return pager;
1238 }
1239
1240 /*
1241 * shared_region_pager_setup()
1242 *
1243 * Provide the caller with a memory object backed by the provided
1244 * "backing_object" VM object.
1245 */
1246 memory_object_t
shared_region_pager_setup(vm_object_t backing_object,vm_object_offset_t backing_offset,struct vm_shared_region_slide_info * slide_info,uint64_t jop_key)1247 shared_region_pager_setup(
1248 vm_object_t backing_object,
1249 vm_object_offset_t backing_offset,
1250 struct vm_shared_region_slide_info *slide_info,
1251 uint64_t jop_key)
1252 {
1253 shared_region_pager_t pager;
1254
1255 /* create new pager */
1256 pager = shared_region_pager_create(backing_object,
1257 backing_offset, slide_info, jop_key);
1258 if (pager == SHARED_REGION_PAGER_NULL) {
1259 /* could not create a new pager */
1260 return MEMORY_OBJECT_NULL;
1261 }
1262
1263 lck_mtx_lock(&shared_region_pager_lock);
1264 while (!pager->srp_is_ready) {
1265 lck_mtx_sleep(&shared_region_pager_lock,
1266 LCK_SLEEP_DEFAULT,
1267 &pager->srp_is_ready,
1268 THREAD_UNINT);
1269 }
1270 lck_mtx_unlock(&shared_region_pager_lock);
1271
1272 return (memory_object_t) pager;
1273 }
1274
1275 #if __has_feature(ptrauth_calls)
1276 /*
1277 * shared_region_pager_match()
1278 *
1279 * Provide the caller with a memory object backed by the provided
1280 * "backing_object" VM object.
1281 */
1282 memory_object_t
shared_region_pager_match(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_shared_region_slide_info_t slide_info,uint64_t jop_key)1283 shared_region_pager_match(
1284 vm_object_t backing_object,
1285 vm_object_offset_t backing_offset,
1286 vm_shared_region_slide_info_t slide_info,
1287 uint64_t jop_key)
1288 {
1289 shared_region_pager_t pager;
1290 vm_shared_region_slide_info_t si;
1291
1292 lck_mtx_lock(&shared_region_pager_lock);
1293 queue_iterate(&shared_region_pager_queue, pager, shared_region_pager_t, srp_queue) {
1294 if (pager->srp_backing_object != backing_object->vo_copy) {
1295 continue;
1296 }
1297 if (pager->srp_backing_offset != backing_offset) {
1298 continue;
1299 }
1300 si = pager->srp_slide_info;
1301
1302 /* If there's no AUTH section then it can't match (slide_info is always !NULL) */
1303 if (!si->si_ptrauth) {
1304 continue;
1305 }
1306 if (pager->srp_jop_key != jop_key) {
1307 continue;
1308 }
1309 if (si->si_slide != slide_info->si_slide) {
1310 continue;
1311 }
1312 if (si->si_start != slide_info->si_start) {
1313 continue;
1314 }
1315 if (si->si_end != slide_info->si_end) {
1316 continue;
1317 }
1318 if (si->si_slide_object != slide_info->si_slide_object) {
1319 continue;
1320 }
1321 if (si->si_slide_info_size != slide_info->si_slide_info_size) {
1322 continue;
1323 }
1324 if (memcmp(si->si_slide_info_entry, slide_info->si_slide_info_entry, si->si_slide_info_size) != 0) {
1325 continue;
1326 }
1327 /* the caller expects a reference on this */
1328 os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
1329 lck_mtx_unlock(&shared_region_pager_lock);
1330 return (memory_object_t)pager;
1331 }
1332
1333 /*
1334 * We didn't find a pre-existing pager, so create one.
1335 *
1336 * Note slight race condition here since we drop the lock. This could lead to more than one
1337 * thread calling setup with the same arguments here. That shouldn't break anything, just
1338 * waste a little memory.
1339 */
1340 lck_mtx_unlock(&shared_region_pager_lock);
1341 return shared_region_pager_setup(backing_object->vo_copy, backing_offset, slide_info, jop_key);
1342 }
1343
1344 void
shared_region_pager_match_task_key(memory_object_t memobj,__unused task_t task)1345 shared_region_pager_match_task_key(memory_object_t memobj, __unused task_t task)
1346 {
1347 __unused shared_region_pager_t pager = (shared_region_pager_t)memobj;
1348
1349 assert(pager->srp_jop_key == task->jop_pid);
1350 }
1351 #endif /* __has_feature(ptrauth_calls) */
1352
1353 void
shared_region_pager_trim(void)1354 shared_region_pager_trim(void)
1355 {
1356 shared_region_pager_t pager, prev_pager;
1357 queue_head_t trim_queue;
1358 int num_trim;
1359 int count_unmapped;
1360
1361 lck_mtx_lock(&shared_region_pager_lock);
1362
1363 /*
1364 * We have too many pagers, try and trim some unused ones,
1365 * starting with the oldest pager at the end of the queue.
1366 */
1367 queue_init(&trim_queue);
1368 num_trim = 0;
1369
1370 for (pager = (shared_region_pager_t)queue_last(&shared_region_pager_queue);
1371 !queue_end(&shared_region_pager_queue, (queue_entry_t) pager);
1372 pager = prev_pager) {
1373 /* get prev elt before we dequeue */
1374 prev_pager = (shared_region_pager_t)queue_prev(&pager->srp_queue);
1375
1376 if (os_ref_get_count_raw(&pager->srp_ref_count) == 2 &&
1377 pager->srp_is_ready &&
1378 !pager->srp_is_mapped) {
1379 /* this pager can be trimmed */
1380 num_trim++;
1381 /* remove this pager from the main list ... */
1382 shared_region_pager_dequeue(pager);
1383 /* ... and add it to our trim queue */
1384 queue_enter_first(&trim_queue,
1385 pager,
1386 shared_region_pager_t,
1387 srp_queue);
1388
1389 /* do we have enough pagers to trim? */
1390 count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped);
1391 if (count_unmapped <= shared_region_pager_cache_limit) {
1392 break;
1393 }
1394 }
1395 }
1396 if (num_trim > shared_region_pager_num_trim_max) {
1397 shared_region_pager_num_trim_max = num_trim;
1398 }
1399 shared_region_pager_num_trim_total += num_trim;
1400
1401 lck_mtx_unlock(&shared_region_pager_lock);
1402
1403 /* terminate the trimmed pagers */
1404 while (!queue_empty(&trim_queue)) {
1405 queue_remove_first(&trim_queue,
1406 pager,
1407 shared_region_pager_t,
1408 srp_queue);
1409 pager->srp_queue.next = NULL;
1410 pager->srp_queue.prev = NULL;
1411 assert(os_ref_get_count_raw(&pager->srp_ref_count) == 2);
1412 /*
1413 * We can't call deallocate_internal() because the pager
1414 * has already been dequeued, but we still need to remove
1415 * a reference.
1416 */
1417 (void)os_ref_release_locked_raw(&pager->srp_ref_count, NULL);
1418 shared_region_pager_terminate_internal(pager);
1419 }
1420 }
1421
1422 static uint64_t
shared_region_pager_purge(shared_region_pager_t pager)1423 shared_region_pager_purge(
1424 shared_region_pager_t pager)
1425 {
1426 uint64_t pages_purged;
1427 vm_object_t object;
1428
1429 pages_purged = 0;
1430 object = memory_object_to_vm_object((memory_object_t) pager);
1431 assert(object != VM_OBJECT_NULL);
1432 vm_object_lock(object);
1433 pages_purged = object->resident_page_count;
1434 vm_object_reap_pages(object, REAP_DATA_FLUSH_CLEAN);
1435 pages_purged -= object->resident_page_count;
1436 // printf(" %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count);
1437 vm_object_unlock(object);
1438 return pages_purged;
1439 }
1440
1441 uint64_t
shared_region_pager_purge_all(void)1442 shared_region_pager_purge_all(void)
1443 {
1444 uint64_t pages_purged;
1445 shared_region_pager_t pager;
1446
1447 pages_purged = 0;
1448 lck_mtx_lock(&shared_region_pager_lock);
1449 queue_iterate(&shared_region_pager_queue, pager, shared_region_pager_t, srp_queue) {
1450 pages_purged += shared_region_pager_purge(pager);
1451 }
1452 lck_mtx_unlock(&shared_region_pager_lock);
1453 #if DEVELOPMENT || DEBUG
1454 printf(" %s:%d pages purged: %llu\n", __FUNCTION__, __LINE__, pages_purged);
1455 #endif /* DEVELOPMENT || DEBUG */
1456 return pages_purged;
1457 }
1458