1 /*
2 * Copyright (c) 2018-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/queue.h>
46 #include <kern/thread.h>
47 #include <kern/ipc_kobject.h>
48
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
51
52 #include <vm/memory_object.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_fault.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_pageout.h>
57 #include <vm/vm_protos.h>
58 #include <vm/vm_shared_region.h>
59
60 #include <sys/kdebug_triage.h>
61
62 #if __has_feature(ptrauth_calls)
63 #include <ptrauth.h>
64 extern boolean_t diversify_user_jop;
65 #endif /* __has_feature(ptrauth_calls) */
66
67 /*
68 * SHARED REGION MEMORY PAGER
69 *
70 * This external memory manager (EMM) handles mappings of a dyld shared cache
71 * in shared regions, applying any necessary modifications (sliding,
72 * pointer signing, ...).
73 *
74 * It mostly handles page-in requests (from memory_object_data_request()) by
75 * getting the original data from its backing VM object, itself backed by
76 * the dyld shared cache file, modifying it if needed and providing it to VM.
77 *
78 * The modified pages will never be dirtied, so the memory manager doesn't
79 * need to handle page-out requests (from memory_object_data_return()). The
80 * pages need to be mapped copy-on-write, so that the originals stay clean.
81 *
82 * We don't expect to have to handle a large number of shared cache files,
83 * so the data structures are very simple (simple linked list) for now.
84 */
85
86 /* forward declarations */
87 void shared_region_pager_reference(memory_object_t mem_obj);
88 void shared_region_pager_deallocate(memory_object_t mem_obj);
89 kern_return_t shared_region_pager_init(memory_object_t mem_obj,
90 memory_object_control_t control,
91 memory_object_cluster_size_t pg_size);
92 kern_return_t shared_region_pager_terminate(memory_object_t mem_obj);
93 kern_return_t shared_region_pager_data_request(memory_object_t mem_obj,
94 memory_object_offset_t offset,
95 memory_object_cluster_size_t length,
96 vm_prot_t protection_required,
97 memory_object_fault_info_t fault_info);
98 kern_return_t shared_region_pager_data_return(memory_object_t mem_obj,
99 memory_object_offset_t offset,
100 memory_object_cluster_size_t data_cnt,
101 memory_object_offset_t *resid_offset,
102 int *io_error,
103 boolean_t dirty,
104 boolean_t kernel_copy,
105 int upl_flags);
106 kern_return_t shared_region_pager_data_initialize(memory_object_t mem_obj,
107 memory_object_offset_t offset,
108 memory_object_cluster_size_t data_cnt);
109 kern_return_t shared_region_pager_data_unlock(memory_object_t mem_obj,
110 memory_object_offset_t offset,
111 memory_object_size_t size,
112 vm_prot_t desired_access);
113 kern_return_t shared_region_pager_synchronize(memory_object_t mem_obj,
114 memory_object_offset_t offset,
115 memory_object_size_t length,
116 vm_sync_t sync_flags);
117 kern_return_t shared_region_pager_map(memory_object_t mem_obj,
118 vm_prot_t prot);
119 kern_return_t shared_region_pager_last_unmap(memory_object_t mem_obj);
120 boolean_t shared_region_pager_backing_object(
121 memory_object_t mem_obj,
122 memory_object_offset_t mem_obj_offset,
123 vm_object_t *backing_object,
124 vm_object_offset_t *backing_offset);
125
126 /*
127 * Vector of VM operations for this EMM.
128 * These routines are invoked by VM via the memory_object_*() interfaces.
129 */
130 const struct memory_object_pager_ops shared_region_pager_ops = {
131 .memory_object_reference = shared_region_pager_reference,
132 .memory_object_deallocate = shared_region_pager_deallocate,
133 .memory_object_init = shared_region_pager_init,
134 .memory_object_terminate = shared_region_pager_terminate,
135 .memory_object_data_request = shared_region_pager_data_request,
136 .memory_object_data_return = shared_region_pager_data_return,
137 .memory_object_data_initialize = shared_region_pager_data_initialize,
138 .memory_object_data_unlock = shared_region_pager_data_unlock,
139 .memory_object_synchronize = shared_region_pager_synchronize,
140 .memory_object_map = shared_region_pager_map,
141 .memory_object_last_unmap = shared_region_pager_last_unmap,
142 .memory_object_data_reclaim = NULL,
143 .memory_object_backing_object = shared_region_pager_backing_object,
144 .memory_object_pager_name = "shared_region"
145 };
146
147 #if __has_feature(ptrauth_calls)
148 /*
149 * Track mappings between shared_region_id and the key used to sign
150 * authenticated pointers.
151 */
152 typedef struct shared_region_jop_key_map {
153 queue_chain_t srk_queue;
154 char *srk_shared_region_id;
155 uint64_t srk_jop_key;
156 os_refcnt_t srk_ref_count; /* count of tasks active with this shared_region_id */
157 } *shared_region_jop_key_map_t;
158
159 os_refgrp_decl(static, srk_refgrp, "shared region key ref cnts", NULL);
160
161 /*
162 * The list is protected by the "shared_region_key_map" lock.
163 */
164 int shared_region_key_count = 0; /* number of active shared_region_id keys */
165 queue_head_t shared_region_jop_key_queue = QUEUE_HEAD_INITIALIZER(shared_region_jop_key_queue);
166 LCK_GRP_DECLARE(shared_region_jop_key_lck_grp, "shared_region_jop_key");
167 LCK_MTX_DECLARE(shared_region_jop_key_lock, &shared_region_jop_key_lck_grp);
168
169 /*
170 * Find the pointer signing key for the give shared_region_id.
171 */
172 uint64_t
shared_region_find_key(char * shared_region_id)173 shared_region_find_key(char *shared_region_id)
174 {
175 shared_region_jop_key_map_t region;
176 uint64_t key;
177
178 lck_mtx_lock(&shared_region_jop_key_lock);
179 queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
180 if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
181 goto found;
182 }
183 }
184 panic("shared_region_find_key() no key for region '%s'", shared_region_id);
185
186 found:
187 key = region->srk_jop_key;
188 lck_mtx_unlock(&shared_region_jop_key_lock);
189 return key;
190 }
191
192 /*
193 * Return a authentication key to use for the given shared_region_id.
194 * If inherit is TRUE, then the key must match inherited_key.
195 * Creates an additional reference when successful.
196 */
197 void
shared_region_key_alloc(char * shared_region_id,bool inherit,uint64_t inherited_key)198 shared_region_key_alloc(char *shared_region_id, bool inherit, uint64_t inherited_key)
199 {
200 shared_region_jop_key_map_t region;
201 shared_region_jop_key_map_t new = NULL;
202
203 assert(shared_region_id != NULL);
204 again:
205 lck_mtx_lock(&shared_region_jop_key_lock);
206 queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
207 if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
208 os_ref_retain_locked(®ion->srk_ref_count);
209 goto done;
210 }
211 }
212
213 /*
214 * ID was not found, if first time, allocate a new one and redo the lookup.
215 */
216 if (new == NULL) {
217 lck_mtx_unlock(&shared_region_jop_key_lock);
218 new = kalloc_type(struct shared_region_jop_key_map, Z_WAITOK);
219 uint_t len = strlen(shared_region_id) + 1;
220 new->srk_shared_region_id = kalloc_data(len, Z_WAITOK);
221 strlcpy(new->srk_shared_region_id, shared_region_id, len);
222 os_ref_init(&new->srk_ref_count, &srk_refgrp);
223
224 if (diversify_user_jop && inherit) {
225 new->srk_jop_key = inherited_key;
226 } else if (diversify_user_jop && strlen(shared_region_id) > 0) {
227 new->srk_jop_key = generate_jop_key();
228 } else {
229 new->srk_jop_key = ml_default_jop_pid();
230 }
231
232 goto again;
233 }
234
235 /*
236 * Use the newly allocated entry
237 */
238 ++shared_region_key_count;
239 queue_enter_first(&shared_region_jop_key_queue, new, shared_region_jop_key_map_t, srk_queue);
240 region = new;
241 new = NULL;
242
243 done:
244 if (inherit && inherited_key != region->srk_jop_key) {
245 panic("shared_region_key_alloc() inherited key mismatch");
246 }
247 lck_mtx_unlock(&shared_region_jop_key_lock);
248
249 /*
250 * free any unused new entry
251 */
252 if (new != NULL) {
253 kfree_data(new->srk_shared_region_id,
254 strlen(new->srk_shared_region_id) + 1);
255 kfree_type(struct shared_region_jop_key_map, new);
256 }
257 }
258
259 /*
260 * Mark the end of using a shared_region_id's key
261 */
262 extern void
shared_region_key_dealloc(char * shared_region_id)263 shared_region_key_dealloc(char *shared_region_id)
264 {
265 shared_region_jop_key_map_t region;
266
267 assert(shared_region_id != NULL);
268 lck_mtx_lock(&shared_region_jop_key_lock);
269 queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
270 if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
271 goto done;
272 }
273 }
274 panic("shared_region_key_dealloc() Shared region ID '%s' not found", shared_region_id);
275
276 done:
277 if (os_ref_release_locked(®ion->srk_ref_count) == 0) {
278 queue_remove(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue);
279 --shared_region_key_count;
280 } else {
281 region = NULL;
282 }
283 lck_mtx_unlock(&shared_region_jop_key_lock);
284
285 if (region != NULL) {
286 kfree_data(region->srk_shared_region_id,
287 strlen(region->srk_shared_region_id) + 1);
288 kfree_type(struct shared_region_jop_key_map, region);
289 }
290 }
291 #endif /* __has_feature(ptrauth_calls) */
292
293 /*
294 * The "shared_region_pager" describes a memory object backed by
295 * the "shared_region" EMM.
296 */
297 typedef struct shared_region_pager {
298 struct memory_object srp_header; /* mandatory generic header */
299
300 /* pager-specific data */
301 queue_chain_t srp_queue; /* next & prev pagers */
302 #if MEMORY_OBJECT_HAS_REFCOUNT
303 #define srp_ref_count srp_header.mo_ref
304 #else
305 os_ref_atomic_t srp_ref_count; /* active uses */
306 #endif
307 bool srp_is_mapped; /* has active mappings */
308 bool srp_is_ready; /* is this pager ready? */
309 vm_object_t srp_backing_object; /* VM object for shared cache */
310 vm_object_offset_t srp_backing_offset;
311 vm_shared_region_slide_info_t srp_slide_info;
312 #if __has_feature(ptrauth_calls)
313 uint64_t srp_jop_key; /* zero if used for arm64 */
314 #endif /* __has_feature(ptrauth_calls) */
315 } *shared_region_pager_t;
316 #define SHARED_REGION_PAGER_NULL ((shared_region_pager_t) NULL)
317
318 /*
319 * List of memory objects managed by this EMM.
320 * The list is protected by the "shared_region_pager_lock" lock.
321 */
322 int shared_region_pager_count = 0; /* number of pagers */
323 int shared_region_pager_count_mapped = 0; /* number of unmapped pagers */
324 queue_head_t shared_region_pager_queue = QUEUE_HEAD_INITIALIZER(shared_region_pager_queue);
325 LCK_GRP_DECLARE(shared_region_pager_lck_grp, "shared_region_pager");
326 LCK_MTX_DECLARE(shared_region_pager_lock, &shared_region_pager_lck_grp);
327
328 /*
329 * Maximum number of unmapped pagers we're willing to keep around.
330 */
331 int shared_region_pager_cache_limit = 0;
332
333 /*
334 * Statistics & counters.
335 */
336 int shared_region_pager_count_max = 0;
337 int shared_region_pager_count_unmapped_max = 0;
338 int shared_region_pager_num_trim_max = 0;
339 int shared_region_pager_num_trim_total = 0;
340
341 uint64_t shared_region_pager_copied = 0;
342 uint64_t shared_region_pager_slid = 0;
343 uint64_t shared_region_pager_slid_error = 0;
344 uint64_t shared_region_pager_reclaimed = 0;
345
346 /* internal prototypes */
347 shared_region_pager_t shared_region_pager_lookup(memory_object_t mem_obj);
348 void shared_region_pager_dequeue(shared_region_pager_t pager);
349 void shared_region_pager_deallocate_internal(shared_region_pager_t pager,
350 boolean_t locked);
351 void shared_region_pager_terminate_internal(shared_region_pager_t pager);
352 void shared_region_pager_trim(void);
353
354
355 #if DEBUG
356 int shared_region_pagerdebug = 0;
357 #define PAGER_ALL 0xffffffff
358 #define PAGER_INIT 0x00000001
359 #define PAGER_PAGEIN 0x00000002
360
361 #define PAGER_DEBUG(LEVEL, A) \
362 MACRO_BEGIN \
363 if ((shared_region_pagerdebug & (LEVEL)) == (LEVEL)) { \
364 printf A; \
365 } \
366 MACRO_END
367 #else
368 #define PAGER_DEBUG(LEVEL, A)
369 #endif
370
371 /*
372 * shared_region_pager_init()
373 *
374 * Initialize the memory object and makes it ready to be used and mapped.
375 */
376 kern_return_t
shared_region_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)377 shared_region_pager_init(
378 memory_object_t mem_obj,
379 memory_object_control_t control,
380 #if !DEBUG
381 __unused
382 #endif
383 memory_object_cluster_size_t pg_size)
384 {
385 shared_region_pager_t pager;
386 kern_return_t kr;
387 memory_object_attr_info_data_t attributes;
388
389 PAGER_DEBUG(PAGER_ALL,
390 ("shared_region_pager_init: %p, %p, %x\n",
391 mem_obj, control, pg_size));
392
393 if (control == MEMORY_OBJECT_CONTROL_NULL) {
394 return KERN_INVALID_ARGUMENT;
395 }
396
397 pager = shared_region_pager_lookup(mem_obj);
398
399 memory_object_control_reference(control);
400
401 pager->srp_header.mo_control = control;
402
403 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
404 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
405 attributes.cluster_size = (1 << (PAGE_SHIFT));
406 attributes.may_cache_object = FALSE;
407 attributes.temporary = TRUE;
408
409 kr = memory_object_change_attributes(
410 control,
411 MEMORY_OBJECT_ATTRIBUTE_INFO,
412 (memory_object_info_t) &attributes,
413 MEMORY_OBJECT_ATTR_INFO_COUNT);
414 if (kr != KERN_SUCCESS) {
415 panic("shared_region_pager_init: "
416 "memory_object_change_attributes() failed");
417 }
418
419 #if CONFIG_SECLUDED_MEMORY
420 if (secluded_for_filecache) {
421 #if 00
422 /*
423 * XXX FBDP do we want this in the secluded pool?
424 * Ideally, we'd want the shared region used by Camera to
425 * NOT be in the secluded pool, but all other shared regions
426 * in the secluded pool...
427 */
428 memory_object_mark_eligible_for_secluded(control, TRUE);
429 #endif /* 00 */
430 }
431 #endif /* CONFIG_SECLUDED_MEMORY */
432
433 return KERN_SUCCESS;
434 }
435
436 /*
437 * shared_region_data_return()
438 *
439 * Handles page-out requests from VM. This should never happen since
440 * the pages provided by this EMM are not supposed to be dirty or dirtied
441 * and VM should simply discard the contents and reclaim the pages if it
442 * needs to.
443 */
444 kern_return_t
shared_region_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)445 shared_region_pager_data_return(
446 __unused memory_object_t mem_obj,
447 __unused memory_object_offset_t offset,
448 __unused memory_object_cluster_size_t data_cnt,
449 __unused memory_object_offset_t *resid_offset,
450 __unused int *io_error,
451 __unused boolean_t dirty,
452 __unused boolean_t kernel_copy,
453 __unused int upl_flags)
454 {
455 panic("shared_region_pager_data_return: should never get called");
456 return KERN_FAILURE;
457 }
458
459 kern_return_t
shared_region_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)460 shared_region_pager_data_initialize(
461 __unused memory_object_t mem_obj,
462 __unused memory_object_offset_t offset,
463 __unused memory_object_cluster_size_t data_cnt)
464 {
465 panic("shared_region_pager_data_initialize: should never get called");
466 return KERN_FAILURE;
467 }
468
469 kern_return_t
shared_region_pager_data_unlock(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t size,__unused vm_prot_t desired_access)470 shared_region_pager_data_unlock(
471 __unused memory_object_t mem_obj,
472 __unused memory_object_offset_t offset,
473 __unused memory_object_size_t size,
474 __unused vm_prot_t desired_access)
475 {
476 return KERN_FAILURE;
477 }
478
479 /*
480 * shared_region_pager_data_request()
481 *
482 * Handles page-in requests from VM.
483 */
484 int shared_region_pager_data_request_debug = 0;
485 kern_return_t
shared_region_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)486 shared_region_pager_data_request(
487 memory_object_t mem_obj,
488 memory_object_offset_t offset,
489 memory_object_cluster_size_t length,
490 #if !DEBUG
491 __unused
492 #endif
493 vm_prot_t protection_required,
494 memory_object_fault_info_t mo_fault_info)
495 {
496 shared_region_pager_t pager;
497 memory_object_control_t mo_control;
498 upl_t upl;
499 int upl_flags;
500 upl_size_t upl_size;
501 upl_page_info_t *upl_pl;
502 unsigned int pl_count;
503 vm_object_t src_top_object, src_page_object, dst_object;
504 kern_return_t kr, retval;
505 vm_offset_t src_vaddr, dst_vaddr;
506 vm_offset_t cur_offset;
507 vm_offset_t offset_in_page;
508 kern_return_t error_code;
509 vm_prot_t prot;
510 vm_page_t src_page, top_page;
511 int interruptible;
512 struct vm_object_fault_info fault_info;
513 mach_vm_offset_t slide_start_address;
514
515 PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
516
517 retval = KERN_SUCCESS;
518 src_top_object = VM_OBJECT_NULL;
519 src_page_object = VM_OBJECT_NULL;
520 upl = NULL;
521 upl_pl = NULL;
522 fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
523 fault_info.stealth = TRUE;
524 fault_info.io_sync = FALSE;
525 fault_info.mark_zf_absent = FALSE;
526 fault_info.batch_pmap_op = FALSE;
527 interruptible = fault_info.interruptible;
528
529 pager = shared_region_pager_lookup(mem_obj);
530 assert(pager->srp_is_ready);
531 assert(os_ref_get_count_raw(&pager->srp_ref_count) > 1); /* pager is alive */
532 assert(pager->srp_is_mapped); /* pager is mapped */
533
534 PAGER_DEBUG(PAGER_PAGEIN, ("shared_region_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
535
536 /*
537 * Gather in a UPL all the VM pages requested by VM.
538 */
539 mo_control = pager->srp_header.mo_control;
540
541 upl_size = length;
542 upl_flags =
543 UPL_RET_ONLY_ABSENT |
544 UPL_SET_LITE |
545 UPL_NO_SYNC |
546 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
547 UPL_SET_INTERNAL;
548 pl_count = 0;
549 kr = memory_object_upl_request(mo_control,
550 offset, upl_size,
551 &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
552 if (kr != KERN_SUCCESS) {
553 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_NO_UPL), 0 /* arg */);
554 retval = kr;
555 goto done;
556 }
557 dst_object = memory_object_control_to_vm_object(mo_control);
558 assert(dst_object != VM_OBJECT_NULL);
559
560 /*
561 * We'll map the original data in the kernel address space from the
562 * backing VM object (itself backed by the shared cache file via
563 * the vnode pager).
564 */
565 src_top_object = pager->srp_backing_object;
566 assert(src_top_object != VM_OBJECT_NULL);
567 vm_object_reference(src_top_object); /* keep the source object alive */
568
569 slide_start_address = pager->srp_slide_info->si_slid_address;
570
571 fault_info.lo_offset += pager->srp_backing_offset;
572 fault_info.hi_offset += pager->srp_backing_offset;
573
574 /*
575 * Fill in the contents of the pages requested by VM.
576 */
577 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
578 pl_count = length / PAGE_SIZE;
579 for (cur_offset = 0;
580 retval == KERN_SUCCESS && cur_offset < length;
581 cur_offset += PAGE_SIZE) {
582 ppnum_t dst_pnum;
583
584 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
585 /* this page is not in the UPL: skip it */
586 continue;
587 }
588
589 /*
590 * Map the source (dyld shared cache) page in the kernel's
591 * virtual address space.
592 * We already hold a reference on the src_top_object.
593 */
594 retry_src_fault:
595 vm_object_lock(src_top_object);
596 vm_object_paging_begin(src_top_object);
597 error_code = 0;
598 prot = VM_PROT_READ;
599 src_page = VM_PAGE_NULL;
600 kr = vm_fault_page(src_top_object,
601 pager->srp_backing_offset + offset + cur_offset,
602 VM_PROT_READ,
603 FALSE,
604 FALSE, /* src_page not looked up */
605 &prot,
606 &src_page,
607 &top_page,
608 NULL,
609 &error_code,
610 FALSE,
611 FALSE,
612 &fault_info);
613 switch (kr) {
614 case VM_FAULT_SUCCESS:
615 break;
616 case VM_FAULT_RETRY:
617 goto retry_src_fault;
618 case VM_FAULT_MEMORY_SHORTAGE:
619 if (vm_page_wait(interruptible)) {
620 goto retry_src_fault;
621 }
622 OS_FALLTHROUGH;
623 case VM_FAULT_INTERRUPTED:
624 retval = MACH_SEND_INTERRUPTED;
625 goto done;
626 case VM_FAULT_SUCCESS_NO_VM_PAGE:
627 /* success but no VM page: fail */
628 vm_object_paging_end(src_top_object);
629 vm_object_unlock(src_top_object);
630 OS_FALLTHROUGH;
631 case VM_FAULT_MEMORY_ERROR:
632 /* the page is not there ! */
633 if (error_code) {
634 retval = error_code;
635 } else {
636 retval = KERN_MEMORY_ERROR;
637 }
638 goto done;
639 default:
640 panic("shared_region_pager_data_request: "
641 "vm_fault_page() unexpected error 0x%x\n",
642 kr);
643 }
644 assert(src_page != VM_PAGE_NULL);
645 assert(src_page->vmp_busy);
646
647 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
648 vm_page_lockspin_queues();
649 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
650 vm_page_speculate(src_page, FALSE);
651 }
652 vm_page_unlock_queues();
653 }
654
655 /*
656 * Establish pointers to the source
657 * and destination physical pages.
658 */
659 dst_pnum = (ppnum_t)
660 upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
661 assert(dst_pnum != 0);
662
663 src_vaddr = (vm_map_offset_t)
664 phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
665 << PAGE_SHIFT);
666 dst_vaddr = (vm_map_offset_t)
667 phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
668 src_page_object = VM_PAGE_OBJECT(src_page);
669
670 /*
671 * Validate the original page...
672 */
673 if (src_page_object->code_signed) {
674 vm_page_validate_cs_mapped(
675 src_page, PAGE_SIZE, 0,
676 (const void *) src_vaddr);
677 }
678 /*
679 * ... and transfer the results to the destination page.
680 */
681 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
682 src_page->vmp_cs_validated);
683 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
684 src_page->vmp_cs_tainted);
685 UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
686 src_page->vmp_cs_nx);
687
688 /*
689 * The page provider might access a mapped file, so let's
690 * release the object lock for the source page to avoid a
691 * potential deadlock.
692 * The source page is kept busy and we have a
693 * "paging_in_progress" reference on its object, so it's safe
694 * to unlock the object here.
695 */
696 assert(src_page->vmp_busy);
697 assert(src_page_object->paging_in_progress > 0);
698 vm_object_unlock(src_page_object);
699
700 /*
701 * Process the original contents of the source page
702 * into the destination page.
703 */
704 for (offset_in_page = 0;
705 offset_in_page < PAGE_SIZE;
706 offset_in_page += PAGE_SIZE_FOR_SR_SLIDE) {
707 vm_object_offset_t chunk_offset;
708 vm_object_offset_t offset_in_backing_object;
709 vm_object_offset_t offset_in_sliding_range;
710
711 chunk_offset = offset + cur_offset + offset_in_page;
712
713 bcopy((const char *)(src_vaddr +
714 offset_in_page),
715 (char *)(dst_vaddr + offset_in_page),
716 PAGE_SIZE_FOR_SR_SLIDE);
717
718 offset_in_backing_object = (chunk_offset +
719 pager->srp_backing_offset);
720 if ((offset_in_backing_object < pager->srp_slide_info->si_start) ||
721 (offset_in_backing_object >= pager->srp_slide_info->si_end)) {
722 /* chunk is outside of sliding range: done */
723 shared_region_pager_copied++;
724 continue;
725 }
726
727 offset_in_sliding_range = offset_in_backing_object - pager->srp_slide_info->si_start;
728 kr = vm_shared_region_slide_page(pager->srp_slide_info,
729 dst_vaddr + offset_in_page,
730 (mach_vm_offset_t) (offset_in_sliding_range + slide_start_address),
731 (uint32_t) (offset_in_sliding_range / PAGE_SIZE_FOR_SR_SLIDE),
732 #if __has_feature(ptrauth_calls)
733 pager->srp_slide_info->si_ptrauth ? pager->srp_jop_key : 0
734 #else /* __has_feature(ptrauth_calls) */
735 0
736 #endif /* __has_feature(ptrauth_calls) */
737 );
738 if (shared_region_pager_data_request_debug) {
739 printf("shared_region_data_request"
740 "(%p,0x%llx+0x%llx+0x%04llx): 0x%llx "
741 "in sliding range [0x%llx:0x%llx]: "
742 "SLIDE offset 0x%llx="
743 "(0x%llx+0x%llx+0x%llx+0x%04llx)"
744 "[0x%016llx 0x%016llx] "
745 "code_signed=%d "
746 "cs_validated=%d "
747 "cs_tainted=%d "
748 "cs_nx=%d "
749 "kr=0x%x\n",
750 pager,
751 offset,
752 (uint64_t) cur_offset,
753 (uint64_t) offset_in_page,
754 chunk_offset,
755 pager->srp_slide_info->si_start,
756 pager->srp_slide_info->si_end,
757 (pager->srp_backing_offset +
758 offset +
759 cur_offset +
760 offset_in_page),
761 pager->srp_backing_offset,
762 offset,
763 (uint64_t) cur_offset,
764 (uint64_t) offset_in_page,
765 *(uint64_t *)(dst_vaddr + offset_in_page),
766 *(uint64_t *)(dst_vaddr + offset_in_page + 8),
767 src_page_object->code_signed,
768 src_page->vmp_cs_validated,
769 src_page->vmp_cs_tainted,
770 src_page->vmp_cs_nx,
771 kr);
772 }
773 if (kr != KERN_SUCCESS) {
774 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_SLIDE_ERROR), 0 /* arg */);
775 shared_region_pager_slid_error++;
776 break;
777 }
778 shared_region_pager_slid++;
779 }
780
781 assert(VM_PAGE_OBJECT(src_page) == src_page_object);
782 assert(src_page->vmp_busy);
783 assert(src_page_object->paging_in_progress > 0);
784 vm_object_lock(src_page_object);
785
786 /*
787 * Cleanup the result of vm_fault_page() of the source page.
788 */
789 PAGE_WAKEUP_DONE(src_page);
790 src_page = VM_PAGE_NULL;
791 vm_object_paging_end(src_page_object);
792 vm_object_unlock(src_page_object);
793
794 if (top_page != VM_PAGE_NULL) {
795 assert(VM_PAGE_OBJECT(top_page) == src_top_object);
796 vm_object_lock(src_top_object);
797 VM_PAGE_FREE(top_page);
798 vm_object_paging_end(src_top_object);
799 vm_object_unlock(src_top_object);
800 }
801 }
802
803 done:
804 if (upl != NULL) {
805 /* clean up the UPL */
806
807 /*
808 * The pages are currently dirty because we've just been
809 * writing on them, but as far as we're concerned, they're
810 * clean since they contain their "original" contents as
811 * provided by us, the pager.
812 * Tell the UPL to mark them "clean".
813 */
814 upl_clear_dirty(upl, TRUE);
815
816 /* abort or commit the UPL */
817 if (retval != KERN_SUCCESS) {
818 upl_abort(upl, 0);
819 } else {
820 boolean_t empty;
821 assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
822 "upl %p offset 0x%llx size 0x%x\n",
823 upl, upl->u_offset, upl->u_size);
824 upl_commit_range(upl, 0, upl->u_size,
825 UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
826 upl_pl, pl_count, &empty);
827 }
828
829 /* and deallocate the UPL */
830 upl_deallocate(upl);
831 upl = NULL;
832 }
833 if (src_top_object != VM_OBJECT_NULL) {
834 vm_object_deallocate(src_top_object);
835 }
836 return retval;
837 }
838
839 /*
840 * shared_region_pager_reference()
841 *
842 * Get a reference on this memory object.
843 * For external usage only. Assumes that the initial reference count is not 0,
844 * i.e one should not "revive" a dead pager this way.
845 */
846 void
shared_region_pager_reference(memory_object_t mem_obj)847 shared_region_pager_reference(
848 memory_object_t mem_obj)
849 {
850 shared_region_pager_t pager;
851
852 pager = shared_region_pager_lookup(mem_obj);
853
854 lck_mtx_lock(&shared_region_pager_lock);
855 os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
856 lck_mtx_unlock(&shared_region_pager_lock);
857 }
858
859
860 /*
861 * shared_region_pager_dequeue:
862 *
863 * Removes a pager from the list of pagers.
864 *
865 * The caller must hold "shared_region_pager_lock".
866 */
867 void
shared_region_pager_dequeue(shared_region_pager_t pager)868 shared_region_pager_dequeue(
869 shared_region_pager_t pager)
870 {
871 assert(!pager->srp_is_mapped);
872
873 queue_remove(&shared_region_pager_queue,
874 pager,
875 shared_region_pager_t,
876 srp_queue);
877 pager->srp_queue.next = NULL;
878 pager->srp_queue.prev = NULL;
879
880 shared_region_pager_count--;
881 }
882
883 /*
884 * shared_region_pager_terminate_internal:
885 *
886 * Trigger the asynchronous termination of the memory object associated
887 * with this pager.
888 * When the memory object is terminated, there will be one more call
889 * to memory_object_deallocate() (i.e. shared_region_pager_deallocate())
890 * to finish the clean up.
891 *
892 * "shared_region_pager_lock" should not be held by the caller.
893 * We don't need the lock because the pager has already been removed from
894 * the pagers' list and is now ours exclusively.
895 */
896 void
shared_region_pager_terminate_internal(shared_region_pager_t pager)897 shared_region_pager_terminate_internal(
898 shared_region_pager_t pager)
899 {
900 assert(pager->srp_is_ready);
901 assert(!pager->srp_is_mapped);
902 assert(os_ref_get_count_raw(&pager->srp_ref_count) == 1);
903
904 if (pager->srp_backing_object != VM_OBJECT_NULL) {
905 vm_object_deallocate(pager->srp_backing_object);
906 pager->srp_backing_object = VM_OBJECT_NULL;
907 }
908 /* trigger the destruction of the memory object */
909 memory_object_destroy(pager->srp_header.mo_control, 0);
910 }
911
912 /*
913 * shared_region_pager_deallocate_internal()
914 *
915 * Release a reference on this pager and free it when the last reference goes away.
916 * Can be called with shared_region_pager_lock held or not, but always returns
917 * with it unlocked.
918 */
919 void
shared_region_pager_deallocate_internal(shared_region_pager_t pager,boolean_t locked)920 shared_region_pager_deallocate_internal(
921 shared_region_pager_t pager,
922 boolean_t locked)
923 {
924 boolean_t needs_trimming;
925 int count_unmapped;
926 os_ref_count_t ref_count;
927
928 if (!locked) {
929 lck_mtx_lock(&shared_region_pager_lock);
930 }
931
932 /* if we have too many unmapped pagers, trim some */
933 count_unmapped = shared_region_pager_count - shared_region_pager_count_mapped;
934 needs_trimming = (count_unmapped > shared_region_pager_cache_limit);
935
936 /* drop a reference on this pager */
937 ref_count = os_ref_release_locked_raw(&pager->srp_ref_count, NULL);
938
939 if (ref_count == 1) {
940 /*
941 * Only the "named" reference is left, which means that
942 * no one is really holding on to this pager anymore.
943 * Terminate it.
944 */
945 shared_region_pager_dequeue(pager);
946 /* the pager is all ours: no need for the lock now */
947 lck_mtx_unlock(&shared_region_pager_lock);
948 shared_region_pager_terminate_internal(pager);
949 } else if (ref_count == 0) {
950 /*
951 * Dropped the existence reference; the memory object has
952 * been terminated. Do some final cleanup and release the
953 * pager structure.
954 */
955 lck_mtx_unlock(&shared_region_pager_lock);
956
957 vm_shared_region_slide_info_t si = pager->srp_slide_info;
958 #if __has_feature(ptrauth_calls)
959 /*
960 * The slide_info for auth sections lives in the shared region.
961 * Just deallocate() on the shared region and clear the field.
962 */
963 if (si != NULL) {
964 if (si->si_shared_region != NULL) {
965 assert(si->si_ptrauth);
966 vm_shared_region_deallocate(si->si_shared_region);
967 pager->srp_slide_info = NULL;
968 si = NULL;
969 }
970 }
971 #endif /* __has_feature(ptrauth_calls) */
972 if (si != NULL) {
973 vm_object_deallocate(si->si_slide_object);
974 /* free the slide_info_entry */
975 kfree_data(si->si_slide_info_entry,
976 si->si_slide_info_size);
977 kfree_type(struct vm_shared_region_slide_info, si);
978 pager->srp_slide_info = NULL;
979 }
980
981 if (pager->srp_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
982 memory_object_control_deallocate(pager->srp_header.mo_control);
983 pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
984 }
985 kfree_type(struct shared_region_pager, pager);
986 pager = SHARED_REGION_PAGER_NULL;
987 } else {
988 /* there are still plenty of references: keep going... */
989 lck_mtx_unlock(&shared_region_pager_lock);
990 }
991
992 if (needs_trimming) {
993 shared_region_pager_trim();
994 }
995 /* caution: lock is not held on return... */
996 }
997
998 /*
999 * shared_region_pager_deallocate()
1000 *
1001 * Release a reference on this pager and free it when the last
1002 * reference goes away.
1003 */
1004 void
shared_region_pager_deallocate(memory_object_t mem_obj)1005 shared_region_pager_deallocate(
1006 memory_object_t mem_obj)
1007 {
1008 shared_region_pager_t pager;
1009
1010 PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_deallocate: %p\n", mem_obj));
1011 pager = shared_region_pager_lookup(mem_obj);
1012 shared_region_pager_deallocate_internal(pager, FALSE);
1013 }
1014
1015 /*
1016 *
1017 */
1018 kern_return_t
shared_region_pager_terminate(__unused memory_object_t mem_obj)1019 shared_region_pager_terminate(
1020 #if !DEBUG
1021 __unused
1022 #endif
1023 memory_object_t mem_obj)
1024 {
1025 PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_terminate: %p\n", mem_obj));
1026
1027 return KERN_SUCCESS;
1028 }
1029
1030 /*
1031 *
1032 */
1033 kern_return_t
shared_region_pager_synchronize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t length,__unused vm_sync_t sync_flags)1034 shared_region_pager_synchronize(
1035 __unused memory_object_t mem_obj,
1036 __unused memory_object_offset_t offset,
1037 __unused memory_object_size_t length,
1038 __unused vm_sync_t sync_flags)
1039 {
1040 panic("shared_region_pager_synchronize: memory_object_synchronize no longer supported");
1041 return KERN_FAILURE;
1042 }
1043
1044 /*
1045 * shared_region_pager_map()
1046 *
1047 * This allows VM to let us, the EMM, know that this memory object
1048 * is currently mapped one or more times. This is called by VM each time
1049 * the memory object gets mapped, but we only take one extra reference the
1050 * first time it is called.
1051 */
1052 kern_return_t
shared_region_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)1053 shared_region_pager_map(
1054 memory_object_t mem_obj,
1055 __unused vm_prot_t prot)
1056 {
1057 shared_region_pager_t pager;
1058
1059 PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_map: %p\n", mem_obj));
1060
1061 pager = shared_region_pager_lookup(mem_obj);
1062
1063 lck_mtx_lock(&shared_region_pager_lock);
1064 assert(pager->srp_is_ready);
1065 assert(os_ref_get_count_raw(&pager->srp_ref_count) > 0); /* pager is alive */
1066 if (!pager->srp_is_mapped) {
1067 pager->srp_is_mapped = TRUE;
1068 os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
1069 shared_region_pager_count_mapped++;
1070 }
1071 lck_mtx_unlock(&shared_region_pager_lock);
1072
1073 return KERN_SUCCESS;
1074 }
1075
1076 /*
1077 * shared_region_pager_last_unmap()
1078 *
1079 * This is called by VM when this memory object is no longer mapped anywhere.
1080 */
1081 kern_return_t
shared_region_pager_last_unmap(memory_object_t mem_obj)1082 shared_region_pager_last_unmap(
1083 memory_object_t mem_obj)
1084 {
1085 shared_region_pager_t pager;
1086 int count_unmapped;
1087
1088 PAGER_DEBUG(PAGER_ALL,
1089 ("shared_region_pager_last_unmap: %p\n", mem_obj));
1090
1091 pager = shared_region_pager_lookup(mem_obj);
1092
1093 lck_mtx_lock(&shared_region_pager_lock);
1094 if (pager->srp_is_mapped) {
1095 /*
1096 * All the mappings are gone, so let go of the one extra
1097 * reference that represents all the mappings of this pager.
1098 */
1099 shared_region_pager_count_mapped--;
1100 count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped);
1101 if (count_unmapped > shared_region_pager_count_unmapped_max) {
1102 shared_region_pager_count_unmapped_max = count_unmapped;
1103 }
1104 pager->srp_is_mapped = FALSE;
1105 shared_region_pager_deallocate_internal(pager, TRUE);
1106 /* caution: deallocate_internal() released the lock ! */
1107 } else {
1108 lck_mtx_unlock(&shared_region_pager_lock);
1109 }
1110
1111 return KERN_SUCCESS;
1112 }
1113
1114 boolean_t
shared_region_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)1115 shared_region_pager_backing_object(
1116 memory_object_t mem_obj,
1117 memory_object_offset_t offset,
1118 vm_object_t *backing_object,
1119 vm_object_offset_t *backing_offset)
1120 {
1121 shared_region_pager_t pager;
1122
1123 PAGER_DEBUG(PAGER_ALL,
1124 ("shared_region_pager_backing_object: %p\n", mem_obj));
1125
1126 pager = shared_region_pager_lookup(mem_obj);
1127
1128 *backing_object = pager->srp_backing_object;
1129 *backing_offset = pager->srp_backing_offset + offset;
1130
1131 return TRUE;
1132 }
1133
1134
1135 /*
1136 *
1137 */
1138 shared_region_pager_t
shared_region_pager_lookup(memory_object_t mem_obj)1139 shared_region_pager_lookup(
1140 memory_object_t mem_obj)
1141 {
1142 shared_region_pager_t pager;
1143
1144 assert(mem_obj->mo_pager_ops == &shared_region_pager_ops);
1145 pager = (shared_region_pager_t)(uintptr_t) mem_obj;
1146 assert(os_ref_get_count_raw(&pager->srp_ref_count) > 0);
1147 return pager;
1148 }
1149
1150 /*
1151 * Create and return a pager for the given object with the
1152 * given slide information.
1153 */
1154 static shared_region_pager_t
shared_region_pager_create(vm_object_t backing_object,vm_object_offset_t backing_offset,struct vm_shared_region_slide_info * slide_info,__unused uint64_t jop_key)1155 shared_region_pager_create(
1156 vm_object_t backing_object,
1157 vm_object_offset_t backing_offset,
1158 struct vm_shared_region_slide_info *slide_info,
1159 #if !__has_feature(ptrauth_calls)
1160 __unused
1161 #endif /* !__has_feature(ptrauth_calls) */
1162 uint64_t jop_key)
1163 {
1164 shared_region_pager_t pager;
1165 memory_object_control_t control;
1166 kern_return_t kr;
1167 vm_object_t object;
1168
1169 pager = kalloc_type(struct shared_region_pager, Z_WAITOK);
1170 if (pager == SHARED_REGION_PAGER_NULL) {
1171 return SHARED_REGION_PAGER_NULL;
1172 }
1173
1174 /*
1175 * The vm_map call takes both named entry ports and raw memory
1176 * objects in the same parameter. We need to make sure that
1177 * vm_map does not see this object as a named entry port. So,
1178 * we reserve the first word in the object for a fake ip_kotype
1179 * setting - that will tell vm_map to use it as a memory object.
1180 */
1181 pager->srp_header.mo_ikot = IKOT_MEMORY_OBJECT;
1182 pager->srp_header.mo_pager_ops = &shared_region_pager_ops;
1183 pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1184
1185 pager->srp_is_ready = FALSE;/* not ready until it has a "name" */
1186 /* existence reference (for the cache) + 1 for the caller */
1187 os_ref_init_count_raw(&pager->srp_ref_count, NULL, 2);
1188 pager->srp_is_mapped = FALSE;
1189 pager->srp_backing_object = backing_object;
1190 pager->srp_backing_offset = backing_offset;
1191 pager->srp_slide_info = slide_info;
1192 #if __has_feature(ptrauth_calls)
1193 pager->srp_jop_key = jop_key;
1194 /*
1195 * If we're getting slide_info from the shared_region,
1196 * take a reference, so it can't disappear from under us.
1197 */
1198 if (slide_info->si_shared_region) {
1199 assert(slide_info->si_ptrauth);
1200 vm_shared_region_reference(slide_info->si_shared_region);
1201 }
1202 #endif /* __has_feature(ptrauth_calls) */
1203
1204 vm_object_reference(backing_object);
1205
1206 lck_mtx_lock(&shared_region_pager_lock);
1207 /* enter new pager at the head of our list of pagers */
1208 queue_enter_first(&shared_region_pager_queue,
1209 pager,
1210 shared_region_pager_t,
1211 srp_queue);
1212 shared_region_pager_count++;
1213 if (shared_region_pager_count > shared_region_pager_count_max) {
1214 shared_region_pager_count_max = shared_region_pager_count;
1215 }
1216 lck_mtx_unlock(&shared_region_pager_lock);
1217
1218 kr = memory_object_create_named((memory_object_t) pager,
1219 0,
1220 &control);
1221 assert(kr == KERN_SUCCESS);
1222
1223 memory_object_mark_trusted(control);
1224
1225 lck_mtx_lock(&shared_region_pager_lock);
1226 /* the new pager is now ready to be used */
1227 pager->srp_is_ready = TRUE;
1228 object = memory_object_to_vm_object((memory_object_t) pager);
1229 assert(object);
1230 /*
1231 * No one knows about this object and so we get away without the object lock.
1232 * This object is _eventually_ backed by the dyld shared cache and so we want
1233 * to benefit from the lock priority boosting.
1234 */
1235 object->object_is_shared_cache = TRUE;
1236 lck_mtx_unlock(&shared_region_pager_lock);
1237
1238 /* wakeup anyone waiting for this pager to be ready */
1239 thread_wakeup(&pager->srp_is_ready);
1240
1241 return pager;
1242 }
1243
1244 /*
1245 * shared_region_pager_setup()
1246 *
1247 * Provide the caller with a memory object backed by the provided
1248 * "backing_object" VM object.
1249 */
1250 memory_object_t
shared_region_pager_setup(vm_object_t backing_object,vm_object_offset_t backing_offset,struct vm_shared_region_slide_info * slide_info,uint64_t jop_key)1251 shared_region_pager_setup(
1252 vm_object_t backing_object,
1253 vm_object_offset_t backing_offset,
1254 struct vm_shared_region_slide_info *slide_info,
1255 uint64_t jop_key)
1256 {
1257 shared_region_pager_t pager;
1258
1259 /* create new pager */
1260 pager = shared_region_pager_create(backing_object,
1261 backing_offset, slide_info, jop_key);
1262 if (pager == SHARED_REGION_PAGER_NULL) {
1263 /* could not create a new pager */
1264 return MEMORY_OBJECT_NULL;
1265 }
1266
1267 lck_mtx_lock(&shared_region_pager_lock);
1268 while (!pager->srp_is_ready) {
1269 lck_mtx_sleep(&shared_region_pager_lock,
1270 LCK_SLEEP_DEFAULT,
1271 &pager->srp_is_ready,
1272 THREAD_UNINT);
1273 }
1274 lck_mtx_unlock(&shared_region_pager_lock);
1275
1276 return (memory_object_t) pager;
1277 }
1278
1279 #if __has_feature(ptrauth_calls)
1280 /*
1281 * shared_region_pager_match()
1282 *
1283 * Provide the caller with a memory object backed by the provided
1284 * "backing_object" VM object.
1285 */
1286 memory_object_t
shared_region_pager_match(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_shared_region_slide_info_t slide_info,uint64_t jop_key)1287 shared_region_pager_match(
1288 vm_object_t backing_object,
1289 vm_object_offset_t backing_offset,
1290 vm_shared_region_slide_info_t slide_info,
1291 uint64_t jop_key)
1292 {
1293 shared_region_pager_t pager;
1294 vm_shared_region_slide_info_t si;
1295
1296 lck_mtx_lock(&shared_region_pager_lock);
1297 queue_iterate(&shared_region_pager_queue, pager, shared_region_pager_t, srp_queue) {
1298 if (pager->srp_backing_object != backing_object->copy) {
1299 continue;
1300 }
1301 if (pager->srp_backing_offset != backing_offset) {
1302 continue;
1303 }
1304 si = pager->srp_slide_info;
1305
1306 /* If there's no AUTH section then it can't match (slide_info is always !NULL) */
1307 if (!si->si_ptrauth) {
1308 continue;
1309 }
1310 if (pager->srp_jop_key != jop_key) {
1311 continue;
1312 }
1313 if (si->si_slide != slide_info->si_slide) {
1314 continue;
1315 }
1316 if (si->si_start != slide_info->si_start) {
1317 continue;
1318 }
1319 if (si->si_end != slide_info->si_end) {
1320 continue;
1321 }
1322 if (si->si_slide_object != slide_info->si_slide_object) {
1323 continue;
1324 }
1325 if (si->si_slide_info_size != slide_info->si_slide_info_size) {
1326 continue;
1327 }
1328 if (memcmp(si->si_slide_info_entry, slide_info->si_slide_info_entry, si->si_slide_info_size) != 0) {
1329 continue;
1330 }
1331 /* the caller expects a reference on this */
1332 os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
1333 lck_mtx_unlock(&shared_region_pager_lock);
1334 return (memory_object_t)pager;
1335 }
1336
1337 /*
1338 * We didn't find a pre-existing pager, so create one.
1339 *
1340 * Note slight race condition here since we drop the lock. This could lead to more than one
1341 * thread calling setup with the same arguments here. That shouldn't break anything, just
1342 * waste a little memory.
1343 */
1344 lck_mtx_unlock(&shared_region_pager_lock);
1345 return shared_region_pager_setup(backing_object->copy, backing_offset, slide_info, jop_key);
1346 }
1347
1348 void
shared_region_pager_match_task_key(memory_object_t memobj,__unused task_t task)1349 shared_region_pager_match_task_key(memory_object_t memobj, __unused task_t task)
1350 {
1351 __unused shared_region_pager_t pager = (shared_region_pager_t)memobj;
1352
1353 assert(pager->srp_jop_key == task->jop_pid);
1354 }
1355 #endif /* __has_feature(ptrauth_calls) */
1356
1357 void
shared_region_pager_trim(void)1358 shared_region_pager_trim(void)
1359 {
1360 shared_region_pager_t pager, prev_pager;
1361 queue_head_t trim_queue;
1362 int num_trim;
1363 int count_unmapped;
1364
1365 lck_mtx_lock(&shared_region_pager_lock);
1366
1367 /*
1368 * We have too many pagers, try and trim some unused ones,
1369 * starting with the oldest pager at the end of the queue.
1370 */
1371 queue_init(&trim_queue);
1372 num_trim = 0;
1373
1374 for (pager = (shared_region_pager_t)queue_last(&shared_region_pager_queue);
1375 !queue_end(&shared_region_pager_queue, (queue_entry_t) pager);
1376 pager = prev_pager) {
1377 /* get prev elt before we dequeue */
1378 prev_pager = (shared_region_pager_t)queue_prev(&pager->srp_queue);
1379
1380 if (os_ref_get_count_raw(&pager->srp_ref_count) == 2 &&
1381 pager->srp_is_ready &&
1382 !pager->srp_is_mapped) {
1383 /* this pager can be trimmed */
1384 num_trim++;
1385 /* remove this pager from the main list ... */
1386 shared_region_pager_dequeue(pager);
1387 /* ... and add it to our trim queue */
1388 queue_enter_first(&trim_queue,
1389 pager,
1390 shared_region_pager_t,
1391 srp_queue);
1392
1393 /* do we have enough pagers to trim? */
1394 count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped);
1395 if (count_unmapped <= shared_region_pager_cache_limit) {
1396 break;
1397 }
1398 }
1399 }
1400 if (num_trim > shared_region_pager_num_trim_max) {
1401 shared_region_pager_num_trim_max = num_trim;
1402 }
1403 shared_region_pager_num_trim_total += num_trim;
1404
1405 lck_mtx_unlock(&shared_region_pager_lock);
1406
1407 /* terminate the trimmed pagers */
1408 while (!queue_empty(&trim_queue)) {
1409 queue_remove_first(&trim_queue,
1410 pager,
1411 shared_region_pager_t,
1412 srp_queue);
1413 pager->srp_queue.next = NULL;
1414 pager->srp_queue.prev = NULL;
1415 assert(os_ref_get_count_raw(&pager->srp_ref_count) == 2);
1416 /*
1417 * We can't call deallocate_internal() because the pager
1418 * has already been dequeued, but we still need to remove
1419 * a reference.
1420 */
1421 (void)os_ref_release_locked_raw(&pager->srp_ref_count, NULL);
1422 shared_region_pager_terminate_internal(pager);
1423 }
1424 }
1425