xref: /xnu-8020.140.41/osfmk/vm/vm_shared_region_pager.c (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2018-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42 
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/queue.h>
46 #include <kern/thread.h>
47 #include <kern/ipc_kobject.h>
48 
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
51 
52 #include <vm/memory_object.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_fault.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_pageout.h>
57 #include <vm/vm_protos.h>
58 #include <vm/vm_shared_region.h>
59 
60 #include <sys/kdebug_triage.h>
61 
62 #if __has_feature(ptrauth_calls)
63 #include <ptrauth.h>
64 extern boolean_t diversify_user_jop;
65 #endif /* __has_feature(ptrauth_calls) */
66 
67 /*
68  * SHARED REGION MEMORY PAGER
69  *
70  * This external memory manager (EMM) handles mappings of a dyld shared cache
71  * in shared regions, applying any necessary modifications (sliding,
72  * pointer signing, ...).
73  *
74  * It mostly handles page-in requests (from memory_object_data_request()) by
75  * getting the original data from its backing VM object, itself backed by
76  * the dyld shared cache file, modifying it if needed and providing it to VM.
77  *
78  * The modified pages will never be dirtied, so the memory manager doesn't
79  * need to handle page-out requests (from memory_object_data_return()).  The
80  * pages need to be mapped copy-on-write, so that the originals stay clean.
81  *
82  * We don't expect to have to handle a large number of shared cache files,
83  * so the data structures are very simple (simple linked list) for now.
84  */
85 
86 /* forward declarations */
87 void shared_region_pager_reference(memory_object_t mem_obj);
88 void shared_region_pager_deallocate(memory_object_t mem_obj);
89 kern_return_t shared_region_pager_init(memory_object_t mem_obj,
90     memory_object_control_t control,
91     memory_object_cluster_size_t pg_size);
92 kern_return_t shared_region_pager_terminate(memory_object_t mem_obj);
93 kern_return_t shared_region_pager_data_request(memory_object_t mem_obj,
94     memory_object_offset_t offset,
95     memory_object_cluster_size_t length,
96     vm_prot_t protection_required,
97     memory_object_fault_info_t fault_info);
98 kern_return_t shared_region_pager_data_return(memory_object_t mem_obj,
99     memory_object_offset_t offset,
100     memory_object_cluster_size_t      data_cnt,
101     memory_object_offset_t *resid_offset,
102     int *io_error,
103     boolean_t dirty,
104     boolean_t kernel_copy,
105     int upl_flags);
106 kern_return_t shared_region_pager_data_initialize(memory_object_t mem_obj,
107     memory_object_offset_t offset,
108     memory_object_cluster_size_t data_cnt);
109 kern_return_t shared_region_pager_data_unlock(memory_object_t mem_obj,
110     memory_object_offset_t offset,
111     memory_object_size_t size,
112     vm_prot_t desired_access);
113 kern_return_t shared_region_pager_synchronize(memory_object_t mem_obj,
114     memory_object_offset_t offset,
115     memory_object_size_t length,
116     vm_sync_t sync_flags);
117 kern_return_t shared_region_pager_map(memory_object_t mem_obj,
118     vm_prot_t prot);
119 kern_return_t shared_region_pager_last_unmap(memory_object_t mem_obj);
120 boolean_t shared_region_pager_backing_object(
121 	memory_object_t mem_obj,
122 	memory_object_offset_t mem_obj_offset,
123 	vm_object_t *backing_object,
124 	vm_object_offset_t *backing_offset);
125 
126 /*
127  * Vector of VM operations for this EMM.
128  * These routines are invoked by VM via the memory_object_*() interfaces.
129  */
130 const struct memory_object_pager_ops shared_region_pager_ops = {
131 	.memory_object_reference = shared_region_pager_reference,
132 	.memory_object_deallocate = shared_region_pager_deallocate,
133 	.memory_object_init = shared_region_pager_init,
134 	.memory_object_terminate = shared_region_pager_terminate,
135 	.memory_object_data_request = shared_region_pager_data_request,
136 	.memory_object_data_return = shared_region_pager_data_return,
137 	.memory_object_data_initialize = shared_region_pager_data_initialize,
138 	.memory_object_data_unlock = shared_region_pager_data_unlock,
139 	.memory_object_synchronize = shared_region_pager_synchronize,
140 	.memory_object_map = shared_region_pager_map,
141 	.memory_object_last_unmap = shared_region_pager_last_unmap,
142 	.memory_object_data_reclaim = NULL,
143 	.memory_object_backing_object = shared_region_pager_backing_object,
144 	.memory_object_pager_name = "shared_region"
145 };
146 
147 #if __has_feature(ptrauth_calls)
148 /*
149  * Track mappings between shared_region_id and the key used to sign
150  * authenticated pointers.
151  */
152 typedef struct shared_region_jop_key_map {
153 	queue_chain_t  srk_queue;
154 	char           *srk_shared_region_id;
155 	uint64_t       srk_jop_key;
156 	os_refcnt_t    srk_ref_count;         /* count of tasks active with this shared_region_id */
157 } *shared_region_jop_key_map_t;
158 
159 os_refgrp_decl(static, srk_refgrp, "shared region key ref cnts", NULL);
160 
161 /*
162  * The list is protected by the "shared_region_key_map" lock.
163  */
164 int shared_region_key_count = 0;              /* number of active shared_region_id keys */
165 queue_head_t shared_region_jop_key_queue = QUEUE_HEAD_INITIALIZER(shared_region_jop_key_queue);
166 LCK_GRP_DECLARE(shared_region_jop_key_lck_grp, "shared_region_jop_key");
167 LCK_MTX_DECLARE(shared_region_jop_key_lock, &shared_region_jop_key_lck_grp);
168 
169 /*
170  * Find the pointer signing key for the give shared_region_id.
171  */
172 uint64_t
shared_region_find_key(char * shared_region_id)173 shared_region_find_key(char *shared_region_id)
174 {
175 	shared_region_jop_key_map_t region;
176 	uint64_t key;
177 
178 	lck_mtx_lock(&shared_region_jop_key_lock);
179 	queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
180 		if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
181 			goto found;
182 		}
183 	}
184 	panic("shared_region_find_key() no key for region '%s'", shared_region_id);
185 
186 found:
187 	key = region->srk_jop_key;
188 	lck_mtx_unlock(&shared_region_jop_key_lock);
189 	return key;
190 }
191 
192 /*
193  * Return a authentication key to use for the given shared_region_id.
194  * If inherit is TRUE, then the key must match inherited_key.
195  * Creates an additional reference when successful.
196  */
197 void
shared_region_key_alloc(char * shared_region_id,bool inherit,uint64_t inherited_key)198 shared_region_key_alloc(char *shared_region_id, bool inherit, uint64_t inherited_key)
199 {
200 	shared_region_jop_key_map_t region;
201 	shared_region_jop_key_map_t new = NULL;
202 
203 	assert(shared_region_id != NULL);
204 again:
205 	lck_mtx_lock(&shared_region_jop_key_lock);
206 	queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
207 		if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
208 			os_ref_retain_locked(&region->srk_ref_count);
209 			goto done;
210 		}
211 	}
212 
213 	/*
214 	 * ID was not found, if first time, allocate a new one and redo the lookup.
215 	 */
216 	if (new == NULL) {
217 		lck_mtx_unlock(&shared_region_jop_key_lock);
218 		new = kalloc_type(struct shared_region_jop_key_map, Z_WAITOK);
219 		uint_t len = strlen(shared_region_id) + 1;
220 		new->srk_shared_region_id = kalloc_data(len, Z_WAITOK);
221 		strlcpy(new->srk_shared_region_id, shared_region_id, len);
222 		os_ref_init(&new->srk_ref_count, &srk_refgrp);
223 
224 		if (diversify_user_jop && inherit) {
225 			new->srk_jop_key = inherited_key;
226 		} else if (diversify_user_jop && strlen(shared_region_id) > 0) {
227 			new->srk_jop_key = generate_jop_key();
228 		} else {
229 			new->srk_jop_key = ml_default_jop_pid();
230 		}
231 
232 		goto again;
233 	}
234 
235 	/*
236 	 * Use the newly allocated entry
237 	 */
238 	++shared_region_key_count;
239 	queue_enter_first(&shared_region_jop_key_queue, new, shared_region_jop_key_map_t, srk_queue);
240 	region = new;
241 	new = NULL;
242 
243 done:
244 	if (inherit && inherited_key != region->srk_jop_key) {
245 		panic("shared_region_key_alloc() inherited key mismatch");
246 	}
247 	lck_mtx_unlock(&shared_region_jop_key_lock);
248 
249 	/*
250 	 * free any unused new entry
251 	 */
252 	if (new != NULL) {
253 		kfree_data(new->srk_shared_region_id,
254 		    strlen(new->srk_shared_region_id) + 1);
255 		kfree_type(struct shared_region_jop_key_map, new);
256 	}
257 }
258 
259 /*
260  * Mark the end of using a shared_region_id's key
261  */
262 extern void
shared_region_key_dealloc(char * shared_region_id)263 shared_region_key_dealloc(char *shared_region_id)
264 {
265 	shared_region_jop_key_map_t region;
266 
267 	assert(shared_region_id != NULL);
268 	lck_mtx_lock(&shared_region_jop_key_lock);
269 	queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
270 		if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
271 			goto done;
272 		}
273 	}
274 	panic("shared_region_key_dealloc() Shared region ID '%s' not found", shared_region_id);
275 
276 done:
277 	if (os_ref_release_locked(&region->srk_ref_count) == 0) {
278 		queue_remove(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue);
279 		--shared_region_key_count;
280 	} else {
281 		region = NULL;
282 	}
283 	lck_mtx_unlock(&shared_region_jop_key_lock);
284 
285 	if (region != NULL) {
286 		kfree_data(region->srk_shared_region_id,
287 		    strlen(region->srk_shared_region_id) + 1);
288 		kfree_type(struct shared_region_jop_key_map, region);
289 	}
290 }
291 #endif /* __has_feature(ptrauth_calls) */
292 
293 /*
294  * The "shared_region_pager" describes a memory object backed by
295  * the "shared_region" EMM.
296  */
297 typedef struct shared_region_pager {
298 	struct memory_object    srp_header;          /* mandatory generic header */
299 
300 	/* pager-specific data */
301 	queue_chain_t           srp_queue;          /* next & prev pagers */
302 #if MEMORY_OBJECT_HAS_REFCOUNT
303 #define srp_ref_count           srp_header.mo_ref
304 #else
305 	os_ref_atomic_t         srp_ref_count;      /* active uses */
306 #endif
307 	bool                    srp_is_mapped;      /* has active mappings */
308 	bool                    srp_is_ready;       /* is this pager ready? */
309 	vm_object_t             srp_backing_object; /* VM object for shared cache */
310 	vm_object_offset_t      srp_backing_offset;
311 	vm_shared_region_slide_info_t srp_slide_info;
312 #if __has_feature(ptrauth_calls)
313 	uint64_t                srp_jop_key;        /* zero if used for arm64 */
314 #endif /* __has_feature(ptrauth_calls) */
315 } *shared_region_pager_t;
316 #define SHARED_REGION_PAGER_NULL        ((shared_region_pager_t) NULL)
317 
318 /*
319  * List of memory objects managed by this EMM.
320  * The list is protected by the "shared_region_pager_lock" lock.
321  */
322 int shared_region_pager_count = 0;              /* number of pagers */
323 int shared_region_pager_count_mapped = 0;       /* number of unmapped pagers */
324 queue_head_t shared_region_pager_queue = QUEUE_HEAD_INITIALIZER(shared_region_pager_queue);
325 LCK_GRP_DECLARE(shared_region_pager_lck_grp, "shared_region_pager");
326 LCK_MTX_DECLARE(shared_region_pager_lock, &shared_region_pager_lck_grp);
327 
328 /*
329  * Maximum number of unmapped pagers we're willing to keep around.
330  */
331 int shared_region_pager_cache_limit = 0;
332 
333 /*
334  * Statistics & counters.
335  */
336 int shared_region_pager_count_max = 0;
337 int shared_region_pager_count_unmapped_max = 0;
338 int shared_region_pager_num_trim_max = 0;
339 int shared_region_pager_num_trim_total = 0;
340 
341 uint64_t shared_region_pager_copied = 0;
342 uint64_t shared_region_pager_slid = 0;
343 uint64_t shared_region_pager_slid_error = 0;
344 uint64_t shared_region_pager_reclaimed = 0;
345 
346 /* internal prototypes */
347 shared_region_pager_t shared_region_pager_lookup(memory_object_t mem_obj);
348 void shared_region_pager_dequeue(shared_region_pager_t pager);
349 void shared_region_pager_deallocate_internal(shared_region_pager_t pager,
350     boolean_t locked);
351 void shared_region_pager_terminate_internal(shared_region_pager_t pager);
352 void shared_region_pager_trim(void);
353 
354 
355 #if DEBUG
356 int shared_region_pagerdebug = 0;
357 #define PAGER_ALL               0xffffffff
358 #define PAGER_INIT              0x00000001
359 #define PAGER_PAGEIN            0x00000002
360 
361 #define PAGER_DEBUG(LEVEL, A)                                           \
362 	MACRO_BEGIN                                                     \
363 	if ((shared_region_pagerdebug & (LEVEL)) == (LEVEL)) {          \
364 	        printf A;                                               \
365 	}                                                               \
366 	MACRO_END
367 #else
368 #define PAGER_DEBUG(LEVEL, A)
369 #endif
370 
371 /*
372  * shared_region_pager_init()
373  *
374  * Initialize the memory object and makes it ready to be used and mapped.
375  */
376 kern_return_t
shared_region_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)377 shared_region_pager_init(
378 	memory_object_t         mem_obj,
379 	memory_object_control_t control,
380 #if !DEBUG
381 	__unused
382 #endif
383 	memory_object_cluster_size_t pg_size)
384 {
385 	shared_region_pager_t   pager;
386 	kern_return_t           kr;
387 	memory_object_attr_info_data_t  attributes;
388 
389 	PAGER_DEBUG(PAGER_ALL,
390 	    ("shared_region_pager_init: %p, %p, %x\n",
391 	    mem_obj, control, pg_size));
392 
393 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
394 		return KERN_INVALID_ARGUMENT;
395 	}
396 
397 	pager = shared_region_pager_lookup(mem_obj);
398 
399 	memory_object_control_reference(control);
400 
401 	pager->srp_header.mo_control = control;
402 
403 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
404 	/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
405 	attributes.cluster_size = (1 << (PAGE_SHIFT));
406 	attributes.may_cache_object = FALSE;
407 	attributes.temporary = TRUE;
408 
409 	kr = memory_object_change_attributes(
410 		control,
411 		MEMORY_OBJECT_ATTRIBUTE_INFO,
412 		(memory_object_info_t) &attributes,
413 		MEMORY_OBJECT_ATTR_INFO_COUNT);
414 	if (kr != KERN_SUCCESS) {
415 		panic("shared_region_pager_init: "
416 		    "memory_object_change_attributes() failed");
417 	}
418 
419 #if CONFIG_SECLUDED_MEMORY
420 	if (secluded_for_filecache) {
421 #if 00
422 		/*
423 		 * XXX FBDP do we want this in the secluded pool?
424 		 * Ideally, we'd want the shared region used by Camera to
425 		 * NOT be in the secluded pool, but all other shared regions
426 		 * in the secluded pool...
427 		 */
428 		memory_object_mark_eligible_for_secluded(control, TRUE);
429 #endif /* 00 */
430 	}
431 #endif /* CONFIG_SECLUDED_MEMORY */
432 
433 	return KERN_SUCCESS;
434 }
435 
436 /*
437  * shared_region_data_return()
438  *
439  * Handles page-out requests from VM.  This should never happen since
440  * the pages provided by this EMM are not supposed to be dirty or dirtied
441  * and VM should simply discard the contents and reclaim the pages if it
442  * needs to.
443  */
444 kern_return_t
shared_region_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)445 shared_region_pager_data_return(
446 	__unused memory_object_t        mem_obj,
447 	__unused memory_object_offset_t offset,
448 	__unused memory_object_cluster_size_t           data_cnt,
449 	__unused memory_object_offset_t *resid_offset,
450 	__unused int                    *io_error,
451 	__unused boolean_t              dirty,
452 	__unused boolean_t              kernel_copy,
453 	__unused int                    upl_flags)
454 {
455 	panic("shared_region_pager_data_return: should never get called");
456 	return KERN_FAILURE;
457 }
458 
459 kern_return_t
shared_region_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)460 shared_region_pager_data_initialize(
461 	__unused memory_object_t        mem_obj,
462 	__unused memory_object_offset_t offset,
463 	__unused memory_object_cluster_size_t           data_cnt)
464 {
465 	panic("shared_region_pager_data_initialize: should never get called");
466 	return KERN_FAILURE;
467 }
468 
469 kern_return_t
shared_region_pager_data_unlock(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t size,__unused vm_prot_t desired_access)470 shared_region_pager_data_unlock(
471 	__unused memory_object_t        mem_obj,
472 	__unused memory_object_offset_t offset,
473 	__unused memory_object_size_t           size,
474 	__unused vm_prot_t              desired_access)
475 {
476 	return KERN_FAILURE;
477 }
478 
479 /*
480  * shared_region_pager_data_request()
481  *
482  * Handles page-in requests from VM.
483  */
484 int shared_region_pager_data_request_debug = 0;
485 kern_return_t
shared_region_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)486 shared_region_pager_data_request(
487 	memory_object_t         mem_obj,
488 	memory_object_offset_t  offset,
489 	memory_object_cluster_size_t            length,
490 #if !DEBUG
491 	__unused
492 #endif
493 	vm_prot_t               protection_required,
494 	memory_object_fault_info_t mo_fault_info)
495 {
496 	shared_region_pager_t   pager;
497 	memory_object_control_t mo_control;
498 	upl_t                   upl;
499 	int                     upl_flags;
500 	upl_size_t              upl_size;
501 	upl_page_info_t         *upl_pl;
502 	unsigned int            pl_count;
503 	vm_object_t             src_top_object, src_page_object, dst_object;
504 	kern_return_t           kr, retval;
505 	vm_offset_t             src_vaddr, dst_vaddr;
506 	vm_offset_t             cur_offset;
507 	vm_offset_t             offset_in_page;
508 	kern_return_t           error_code;
509 	vm_prot_t               prot;
510 	vm_page_t               src_page, top_page;
511 	int                     interruptible;
512 	struct vm_object_fault_info     fault_info;
513 	mach_vm_offset_t        slide_start_address;
514 
515 	PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
516 
517 	retval = KERN_SUCCESS;
518 	src_top_object = VM_OBJECT_NULL;
519 	src_page_object = VM_OBJECT_NULL;
520 	upl = NULL;
521 	upl_pl = NULL;
522 	fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
523 	fault_info.stealth = TRUE;
524 	fault_info.io_sync = FALSE;
525 	fault_info.mark_zf_absent = FALSE;
526 	fault_info.batch_pmap_op = FALSE;
527 	interruptible = fault_info.interruptible;
528 
529 	pager = shared_region_pager_lookup(mem_obj);
530 	assert(pager->srp_is_ready);
531 	assert(os_ref_get_count_raw(&pager->srp_ref_count) > 1); /* pager is alive */
532 	assert(pager->srp_is_mapped); /* pager is mapped */
533 
534 	PAGER_DEBUG(PAGER_PAGEIN, ("shared_region_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
535 
536 	/*
537 	 * Gather in a UPL all the VM pages requested by VM.
538 	 */
539 	mo_control = pager->srp_header.mo_control;
540 
541 	upl_size = length;
542 	upl_flags =
543 	    UPL_RET_ONLY_ABSENT |
544 	    UPL_SET_LITE |
545 	    UPL_NO_SYNC |
546 	    UPL_CLEAN_IN_PLACE |        /* triggers UPL_CLEAR_DIRTY */
547 	    UPL_SET_INTERNAL;
548 	pl_count = 0;
549 	kr = memory_object_upl_request(mo_control,
550 	    offset, upl_size,
551 	    &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
552 	if (kr != KERN_SUCCESS) {
553 		kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_NO_UPL), 0 /* arg */);
554 		retval = kr;
555 		goto done;
556 	}
557 	dst_object = memory_object_control_to_vm_object(mo_control);
558 	assert(dst_object != VM_OBJECT_NULL);
559 
560 	/*
561 	 * We'll map the original data in the kernel address space from the
562 	 * backing VM object (itself backed by the shared cache file via
563 	 * the vnode pager).
564 	 */
565 	src_top_object = pager->srp_backing_object;
566 	assert(src_top_object != VM_OBJECT_NULL);
567 	vm_object_reference(src_top_object); /* keep the source object alive */
568 
569 	slide_start_address = pager->srp_slide_info->si_slid_address;
570 
571 	fault_info.lo_offset += pager->srp_backing_offset;
572 	fault_info.hi_offset += pager->srp_backing_offset;
573 
574 	/*
575 	 * Fill in the contents of the pages requested by VM.
576 	 */
577 	upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
578 	pl_count = length / PAGE_SIZE;
579 	for (cur_offset = 0;
580 	    retval == KERN_SUCCESS && cur_offset < length;
581 	    cur_offset += PAGE_SIZE) {
582 		ppnum_t dst_pnum;
583 
584 		if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
585 			/* this page is not in the UPL: skip it */
586 			continue;
587 		}
588 
589 		/*
590 		 * Map the source (dyld shared cache) page in the kernel's
591 		 * virtual address space.
592 		 * We already hold a reference on the src_top_object.
593 		 */
594 retry_src_fault:
595 		vm_object_lock(src_top_object);
596 		vm_object_paging_begin(src_top_object);
597 		error_code = 0;
598 		prot = VM_PROT_READ;
599 		src_page = VM_PAGE_NULL;
600 		kr = vm_fault_page(src_top_object,
601 		    pager->srp_backing_offset + offset + cur_offset,
602 		    VM_PROT_READ,
603 		    FALSE,
604 		    FALSE,                /* src_page not looked up */
605 		    &prot,
606 		    &src_page,
607 		    &top_page,
608 		    NULL,
609 		    &error_code,
610 		    FALSE,
611 		    &fault_info);
612 		switch (kr) {
613 		case VM_FAULT_SUCCESS:
614 			break;
615 		case VM_FAULT_RETRY:
616 			goto retry_src_fault;
617 		case VM_FAULT_MEMORY_SHORTAGE:
618 			if (vm_page_wait(interruptible)) {
619 				goto retry_src_fault;
620 			}
621 			OS_FALLTHROUGH;
622 		case VM_FAULT_INTERRUPTED:
623 			retval = MACH_SEND_INTERRUPTED;
624 			goto done;
625 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
626 			/* success but no VM page: fail */
627 			vm_object_paging_end(src_top_object);
628 			vm_object_unlock(src_top_object);
629 			OS_FALLTHROUGH;
630 		case VM_FAULT_MEMORY_ERROR:
631 			/* the page is not there ! */
632 			if (error_code) {
633 				retval = error_code;
634 			} else {
635 				retval = KERN_MEMORY_ERROR;
636 			}
637 			goto done;
638 		default:
639 			panic("shared_region_pager_data_request: "
640 			    "vm_fault_page() unexpected error 0x%x\n",
641 			    kr);
642 		}
643 		assert(src_page != VM_PAGE_NULL);
644 		assert(src_page->vmp_busy);
645 
646 		if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
647 			vm_page_lockspin_queues();
648 			if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
649 				vm_page_speculate(src_page, FALSE);
650 			}
651 			vm_page_unlock_queues();
652 		}
653 
654 		/*
655 		 * Establish pointers to the source
656 		 * and destination physical pages.
657 		 */
658 		dst_pnum = (ppnum_t)
659 		    upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
660 		assert(dst_pnum != 0);
661 
662 		src_vaddr = (vm_map_offset_t)
663 		    phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
664 		        << PAGE_SHIFT);
665 		dst_vaddr = (vm_map_offset_t)
666 		    phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
667 		src_page_object = VM_PAGE_OBJECT(src_page);
668 
669 		/*
670 		 * Validate the original page...
671 		 */
672 		if (src_page_object->code_signed) {
673 			vm_page_validate_cs_mapped(
674 				src_page, PAGE_SIZE, 0,
675 				(const void *) src_vaddr);
676 		}
677 		/*
678 		 * ... and transfer the results to the destination page.
679 		 */
680 		UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
681 		    src_page->vmp_cs_validated);
682 		UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
683 		    src_page->vmp_cs_tainted);
684 		UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
685 		    src_page->vmp_cs_nx);
686 
687 		/*
688 		 * The page provider might access a mapped file, so let's
689 		 * release the object lock for the source page to avoid a
690 		 * potential deadlock.
691 		 * The source page is kept busy and we have a
692 		 * "paging_in_progress" reference on its object, so it's safe
693 		 * to unlock the object here.
694 		 */
695 		assert(src_page->vmp_busy);
696 		assert(src_page_object->paging_in_progress > 0);
697 		vm_object_unlock(src_page_object);
698 
699 		/*
700 		 * Process the original contents of the source page
701 		 * into the destination page.
702 		 */
703 		for (offset_in_page = 0;
704 		    offset_in_page < PAGE_SIZE;
705 		    offset_in_page += PAGE_SIZE_FOR_SR_SLIDE) {
706 			vm_object_offset_t chunk_offset;
707 			vm_object_offset_t offset_in_backing_object;
708 			vm_object_offset_t offset_in_sliding_range;
709 
710 			chunk_offset = offset + cur_offset + offset_in_page;
711 
712 			bcopy((const char *)(src_vaddr +
713 			    offset_in_page),
714 			    (char *)(dst_vaddr + offset_in_page),
715 			    PAGE_SIZE_FOR_SR_SLIDE);
716 
717 			offset_in_backing_object = (chunk_offset +
718 			    pager->srp_backing_offset);
719 			if ((offset_in_backing_object < pager->srp_slide_info->si_start) ||
720 			    (offset_in_backing_object >= pager->srp_slide_info->si_end)) {
721 				/* chunk is outside of sliding range: done */
722 				shared_region_pager_copied++;
723 				continue;
724 			}
725 
726 			offset_in_sliding_range = offset_in_backing_object - pager->srp_slide_info->si_start;
727 			kr = vm_shared_region_slide_page(pager->srp_slide_info,
728 			    dst_vaddr + offset_in_page,
729 			    (mach_vm_offset_t) (offset_in_sliding_range + slide_start_address),
730 			    (uint32_t) (offset_in_sliding_range / PAGE_SIZE_FOR_SR_SLIDE),
731 #if __has_feature(ptrauth_calls)
732 			    pager->srp_slide_info->si_ptrauth ? pager->srp_jop_key : 0
733 #else /* __has_feature(ptrauth_calls) */
734 			    0
735 #endif /* __has_feature(ptrauth_calls) */
736 			    );
737 			if (shared_region_pager_data_request_debug) {
738 				printf("shared_region_data_request"
739 				    "(%p,0x%llx+0x%llx+0x%04llx): 0x%llx "
740 				    "in sliding range [0x%llx:0x%llx]: "
741 				    "SLIDE offset 0x%llx="
742 				    "(0x%llx+0x%llx+0x%llx+0x%04llx)"
743 				    "[0x%016llx 0x%016llx] "
744 				    "code_signed=%d "
745 				    "cs_validated=%d "
746 				    "cs_tainted=%d "
747 				    "cs_nx=%d "
748 				    "kr=0x%x\n",
749 				    pager,
750 				    offset,
751 				    (uint64_t) cur_offset,
752 				    (uint64_t) offset_in_page,
753 				    chunk_offset,
754 				    pager->srp_slide_info->si_start,
755 				    pager->srp_slide_info->si_end,
756 				    (pager->srp_backing_offset +
757 				    offset +
758 				    cur_offset +
759 				    offset_in_page),
760 				    pager->srp_backing_offset,
761 				    offset,
762 				    (uint64_t) cur_offset,
763 				    (uint64_t) offset_in_page,
764 				    *(uint64_t *)(dst_vaddr + offset_in_page),
765 				    *(uint64_t *)(dst_vaddr + offset_in_page + 8),
766 				    src_page_object->code_signed,
767 				    src_page->vmp_cs_validated,
768 				    src_page->vmp_cs_tainted,
769 				    src_page->vmp_cs_nx,
770 				    kr);
771 			}
772 			if (kr != KERN_SUCCESS) {
773 				kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_SLIDE_ERROR), 0 /* arg */);
774 				shared_region_pager_slid_error++;
775 				break;
776 			}
777 			shared_region_pager_slid++;
778 		}
779 
780 		assert(VM_PAGE_OBJECT(src_page) == src_page_object);
781 		assert(src_page->vmp_busy);
782 		assert(src_page_object->paging_in_progress > 0);
783 		vm_object_lock(src_page_object);
784 
785 		/*
786 		 * Cleanup the result of vm_fault_page() of the source page.
787 		 */
788 		PAGE_WAKEUP_DONE(src_page);
789 		src_page = VM_PAGE_NULL;
790 		vm_object_paging_end(src_page_object);
791 		vm_object_unlock(src_page_object);
792 
793 		if (top_page != VM_PAGE_NULL) {
794 			assert(VM_PAGE_OBJECT(top_page) == src_top_object);
795 			vm_object_lock(src_top_object);
796 			VM_PAGE_FREE(top_page);
797 			vm_object_paging_end(src_top_object);
798 			vm_object_unlock(src_top_object);
799 		}
800 	}
801 
802 done:
803 	if (upl != NULL) {
804 		/* clean up the UPL */
805 
806 		/*
807 		 * The pages are currently dirty because we've just been
808 		 * writing on them, but as far as we're concerned, they're
809 		 * clean since they contain their "original" contents as
810 		 * provided by us, the pager.
811 		 * Tell the UPL to mark them "clean".
812 		 */
813 		upl_clear_dirty(upl, TRUE);
814 
815 		/* abort or commit the UPL */
816 		if (retval != KERN_SUCCESS) {
817 			upl_abort(upl, 0);
818 		} else {
819 			boolean_t empty;
820 			assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
821 			    "upl %p offset 0x%llx size 0x%x\n",
822 			    upl, upl->u_offset, upl->u_size);
823 			upl_commit_range(upl, 0, upl->u_size,
824 			    UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
825 			    upl_pl, pl_count, &empty);
826 		}
827 
828 		/* and deallocate the UPL */
829 		upl_deallocate(upl);
830 		upl = NULL;
831 	}
832 	if (src_top_object != VM_OBJECT_NULL) {
833 		vm_object_deallocate(src_top_object);
834 	}
835 	return retval;
836 }
837 
838 /*
839  * shared_region_pager_reference()
840  *
841  * Get a reference on this memory object.
842  * For external usage only.  Assumes that the initial reference count is not 0,
843  * i.e one should not "revive" a dead pager this way.
844  */
845 void
shared_region_pager_reference(memory_object_t mem_obj)846 shared_region_pager_reference(
847 	memory_object_t         mem_obj)
848 {
849 	shared_region_pager_t   pager;
850 
851 	pager = shared_region_pager_lookup(mem_obj);
852 
853 	lck_mtx_lock(&shared_region_pager_lock);
854 	os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
855 	lck_mtx_unlock(&shared_region_pager_lock);
856 }
857 
858 
859 /*
860  * shared_region_pager_dequeue:
861  *
862  * Removes a pager from the list of pagers.
863  *
864  * The caller must hold "shared_region_pager_lock".
865  */
866 void
shared_region_pager_dequeue(shared_region_pager_t pager)867 shared_region_pager_dequeue(
868 	shared_region_pager_t pager)
869 {
870 	assert(!pager->srp_is_mapped);
871 
872 	queue_remove(&shared_region_pager_queue,
873 	    pager,
874 	    shared_region_pager_t,
875 	    srp_queue);
876 	pager->srp_queue.next = NULL;
877 	pager->srp_queue.prev = NULL;
878 
879 	shared_region_pager_count--;
880 }
881 
882 /*
883  * shared_region_pager_terminate_internal:
884  *
885  * Trigger the asynchronous termination of the memory object associated
886  * with this pager.
887  * When the memory object is terminated, there will be one more call
888  * to memory_object_deallocate() (i.e. shared_region_pager_deallocate())
889  * to finish the clean up.
890  *
891  * "shared_region_pager_lock" should not be held by the caller.
892  * We don't need the lock because the pager has already been removed from
893  * the pagers' list and is now ours exclusively.
894  */
895 void
shared_region_pager_terminate_internal(shared_region_pager_t pager)896 shared_region_pager_terminate_internal(
897 	shared_region_pager_t pager)
898 {
899 	assert(pager->srp_is_ready);
900 	assert(!pager->srp_is_mapped);
901 	assert(os_ref_get_count_raw(&pager->srp_ref_count) == 1);
902 
903 	if (pager->srp_backing_object != VM_OBJECT_NULL) {
904 		vm_object_deallocate(pager->srp_backing_object);
905 		pager->srp_backing_object = VM_OBJECT_NULL;
906 	}
907 	/* trigger the destruction of the memory object */
908 	memory_object_destroy(pager->srp_header.mo_control, 0);
909 }
910 
911 /*
912  * shared_region_pager_deallocate_internal()
913  *
914  * Release a reference on this pager and free it when the last reference goes away.
915  * Can be called with shared_region_pager_lock held or not, but always returns
916  * with it unlocked.
917  */
918 void
shared_region_pager_deallocate_internal(shared_region_pager_t pager,boolean_t locked)919 shared_region_pager_deallocate_internal(
920 	shared_region_pager_t   pager,
921 	boolean_t               locked)
922 {
923 	boolean_t       needs_trimming;
924 	int             count_unmapped;
925 	os_ref_count_t  ref_count;
926 
927 	if (!locked) {
928 		lck_mtx_lock(&shared_region_pager_lock);
929 	}
930 
931 	/* if we have too many unmapped pagers, trim some */
932 	count_unmapped = shared_region_pager_count - shared_region_pager_count_mapped;
933 	needs_trimming = (count_unmapped > shared_region_pager_cache_limit);
934 
935 	/* drop a reference on this pager */
936 	ref_count = os_ref_release_locked_raw(&pager->srp_ref_count, NULL);
937 
938 	if (ref_count == 1) {
939 		/*
940 		 * Only the "named" reference is left, which means that
941 		 * no one is really holding on to this pager anymore.
942 		 * Terminate it.
943 		 */
944 		shared_region_pager_dequeue(pager);
945 		/* the pager is all ours: no need for the lock now */
946 		lck_mtx_unlock(&shared_region_pager_lock);
947 		shared_region_pager_terminate_internal(pager);
948 	} else if (ref_count == 0) {
949 		/*
950 		 * Dropped the existence reference;  the memory object has
951 		 * been terminated.  Do some final cleanup and release the
952 		 * pager structure.
953 		 */
954 		lck_mtx_unlock(&shared_region_pager_lock);
955 
956 		vm_shared_region_slide_info_t si = pager->srp_slide_info;
957 #if __has_feature(ptrauth_calls)
958 		/*
959 		 * The slide_info for auth sections lives in the shared region.
960 		 * Just deallocate() on the shared region and clear the field.
961 		 */
962 		if (si != NULL) {
963 			if (si->si_shared_region != NULL) {
964 				assert(si->si_ptrauth);
965 				vm_shared_region_deallocate(si->si_shared_region);
966 				pager->srp_slide_info = NULL;
967 				si = NULL;
968 			}
969 		}
970 #endif /* __has_feature(ptrauth_calls) */
971 		if (si != NULL) {
972 			vm_object_deallocate(si->si_slide_object);
973 			/* free the slide_info_entry */
974 			kfree_data(si->si_slide_info_entry,
975 			    si->si_slide_info_size);
976 			kfree_type(struct vm_shared_region_slide_info, si);
977 			pager->srp_slide_info = NULL;
978 		}
979 
980 		if (pager->srp_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
981 			memory_object_control_deallocate(pager->srp_header.mo_control);
982 			pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
983 		}
984 		kfree_type(struct shared_region_pager, pager);
985 		pager = SHARED_REGION_PAGER_NULL;
986 	} else {
987 		/* there are still plenty of references:  keep going... */
988 		lck_mtx_unlock(&shared_region_pager_lock);
989 	}
990 
991 	if (needs_trimming) {
992 		shared_region_pager_trim();
993 	}
994 	/* caution: lock is not held on return... */
995 }
996 
997 /*
998  * shared_region_pager_deallocate()
999  *
1000  * Release a reference on this pager and free it when the last
1001  * reference goes away.
1002  */
1003 void
shared_region_pager_deallocate(memory_object_t mem_obj)1004 shared_region_pager_deallocate(
1005 	memory_object_t         mem_obj)
1006 {
1007 	shared_region_pager_t   pager;
1008 
1009 	PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_deallocate: %p\n", mem_obj));
1010 	pager = shared_region_pager_lookup(mem_obj);
1011 	shared_region_pager_deallocate_internal(pager, FALSE);
1012 }
1013 
1014 /*
1015  *
1016  */
1017 kern_return_t
shared_region_pager_terminate(__unused memory_object_t mem_obj)1018 shared_region_pager_terminate(
1019 #if !DEBUG
1020 	__unused
1021 #endif
1022 	memory_object_t mem_obj)
1023 {
1024 	PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_terminate: %p\n", mem_obj));
1025 
1026 	return KERN_SUCCESS;
1027 }
1028 
1029 /*
1030  *
1031  */
1032 kern_return_t
shared_region_pager_synchronize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t length,__unused vm_sync_t sync_flags)1033 shared_region_pager_synchronize(
1034 	__unused memory_object_t        mem_obj,
1035 	__unused memory_object_offset_t offset,
1036 	__unused memory_object_size_t   length,
1037 	__unused vm_sync_t              sync_flags)
1038 {
1039 	panic("shared_region_pager_synchronize: memory_object_synchronize no longer supported");
1040 	return KERN_FAILURE;
1041 }
1042 
1043 /*
1044  * shared_region_pager_map()
1045  *
1046  * This allows VM to let us, the EMM, know that this memory object
1047  * is currently mapped one or more times.  This is called by VM each time
1048  * the memory object gets mapped, but we only take one extra reference the
1049  * first time it is called.
1050  */
1051 kern_return_t
shared_region_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)1052 shared_region_pager_map(
1053 	memory_object_t         mem_obj,
1054 	__unused vm_prot_t      prot)
1055 {
1056 	shared_region_pager_t   pager;
1057 
1058 	PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_map: %p\n", mem_obj));
1059 
1060 	pager = shared_region_pager_lookup(mem_obj);
1061 
1062 	lck_mtx_lock(&shared_region_pager_lock);
1063 	assert(pager->srp_is_ready);
1064 	assert(os_ref_get_count_raw(&pager->srp_ref_count) > 0); /* pager is alive */
1065 	if (!pager->srp_is_mapped) {
1066 		pager->srp_is_mapped = TRUE;
1067 		os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
1068 		shared_region_pager_count_mapped++;
1069 	}
1070 	lck_mtx_unlock(&shared_region_pager_lock);
1071 
1072 	return KERN_SUCCESS;
1073 }
1074 
1075 /*
1076  * shared_region_pager_last_unmap()
1077  *
1078  * This is called by VM when this memory object is no longer mapped anywhere.
1079  */
1080 kern_return_t
shared_region_pager_last_unmap(memory_object_t mem_obj)1081 shared_region_pager_last_unmap(
1082 	memory_object_t         mem_obj)
1083 {
1084 	shared_region_pager_t   pager;
1085 	int                     count_unmapped;
1086 
1087 	PAGER_DEBUG(PAGER_ALL,
1088 	    ("shared_region_pager_last_unmap: %p\n", mem_obj));
1089 
1090 	pager = shared_region_pager_lookup(mem_obj);
1091 
1092 	lck_mtx_lock(&shared_region_pager_lock);
1093 	if (pager->srp_is_mapped) {
1094 		/*
1095 		 * All the mappings are gone, so let go of the one extra
1096 		 * reference that represents all the mappings of this pager.
1097 		 */
1098 		shared_region_pager_count_mapped--;
1099 		count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped);
1100 		if (count_unmapped > shared_region_pager_count_unmapped_max) {
1101 			shared_region_pager_count_unmapped_max = count_unmapped;
1102 		}
1103 		pager->srp_is_mapped = FALSE;
1104 		shared_region_pager_deallocate_internal(pager, TRUE);
1105 		/* caution: deallocate_internal() released the lock ! */
1106 	} else {
1107 		lck_mtx_unlock(&shared_region_pager_lock);
1108 	}
1109 
1110 	return KERN_SUCCESS;
1111 }
1112 
1113 boolean_t
shared_region_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)1114 shared_region_pager_backing_object(
1115 	memory_object_t         mem_obj,
1116 	memory_object_offset_t  offset,
1117 	vm_object_t             *backing_object,
1118 	vm_object_offset_t      *backing_offset)
1119 {
1120 	shared_region_pager_t   pager;
1121 
1122 	PAGER_DEBUG(PAGER_ALL,
1123 	    ("shared_region_pager_backing_object: %p\n", mem_obj));
1124 
1125 	pager = shared_region_pager_lookup(mem_obj);
1126 
1127 	*backing_object = pager->srp_backing_object;
1128 	*backing_offset = pager->srp_backing_offset + offset;
1129 
1130 	return TRUE;
1131 }
1132 
1133 
1134 /*
1135  *
1136  */
1137 shared_region_pager_t
shared_region_pager_lookup(memory_object_t mem_obj)1138 shared_region_pager_lookup(
1139 	memory_object_t  mem_obj)
1140 {
1141 	shared_region_pager_t   pager;
1142 
1143 	assert(mem_obj->mo_pager_ops == &shared_region_pager_ops);
1144 	pager = (shared_region_pager_t)(uintptr_t) mem_obj;
1145 	assert(os_ref_get_count_raw(&pager->srp_ref_count) > 0);
1146 	return pager;
1147 }
1148 
1149 /*
1150  * Create and return a pager for the given object with the
1151  * given slide information.
1152  */
1153 static shared_region_pager_t
shared_region_pager_create(vm_object_t backing_object,vm_object_offset_t backing_offset,struct vm_shared_region_slide_info * slide_info,__unused uint64_t jop_key)1154 shared_region_pager_create(
1155 	vm_object_t             backing_object,
1156 	vm_object_offset_t      backing_offset,
1157 	struct vm_shared_region_slide_info *slide_info,
1158 #if !__has_feature(ptrauth_calls)
1159 	__unused
1160 #endif /* !__has_feature(ptrauth_calls) */
1161 	uint64_t                jop_key)
1162 {
1163 	shared_region_pager_t   pager;
1164 	memory_object_control_t control;
1165 	kern_return_t           kr;
1166 	vm_object_t             object;
1167 
1168 	pager = kalloc_type(struct shared_region_pager, Z_WAITOK);
1169 	if (pager == SHARED_REGION_PAGER_NULL) {
1170 		return SHARED_REGION_PAGER_NULL;
1171 	}
1172 
1173 	/*
1174 	 * The vm_map call takes both named entry ports and raw memory
1175 	 * objects in the same parameter.  We need to make sure that
1176 	 * vm_map does not see this object as a named entry port.  So,
1177 	 * we reserve the first word in the object for a fake ip_kotype
1178 	 * setting - that will tell vm_map to use it as a memory object.
1179 	 */
1180 	pager->srp_header.mo_ikot = IKOT_MEMORY_OBJECT;
1181 	pager->srp_header.mo_pager_ops = &shared_region_pager_ops;
1182 	pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1183 
1184 	pager->srp_is_ready = FALSE;/* not ready until it has a "name" */
1185 	/* existence reference (for the cache) + 1 for the caller */
1186 	os_ref_init_count_raw(&pager->srp_ref_count, NULL, 2);
1187 	pager->srp_is_mapped = FALSE;
1188 	pager->srp_backing_object = backing_object;
1189 	pager->srp_backing_offset = backing_offset;
1190 	pager->srp_slide_info = slide_info;
1191 #if __has_feature(ptrauth_calls)
1192 	pager->srp_jop_key = jop_key;
1193 	/*
1194 	 * If we're getting slide_info from the shared_region,
1195 	 * take a reference, so it can't disappear from under us.
1196 	 */
1197 	if (slide_info->si_shared_region) {
1198 		assert(slide_info->si_ptrauth);
1199 		vm_shared_region_reference(slide_info->si_shared_region);
1200 	}
1201 #endif /* __has_feature(ptrauth_calls) */
1202 
1203 	vm_object_reference(backing_object);
1204 
1205 	lck_mtx_lock(&shared_region_pager_lock);
1206 	/* enter new pager at the head of our list of pagers */
1207 	queue_enter_first(&shared_region_pager_queue,
1208 	    pager,
1209 	    shared_region_pager_t,
1210 	    srp_queue);
1211 	shared_region_pager_count++;
1212 	if (shared_region_pager_count > shared_region_pager_count_max) {
1213 		shared_region_pager_count_max = shared_region_pager_count;
1214 	}
1215 	lck_mtx_unlock(&shared_region_pager_lock);
1216 
1217 	kr = memory_object_create_named((memory_object_t) pager,
1218 	    0,
1219 	    &control);
1220 	assert(kr == KERN_SUCCESS);
1221 
1222 	memory_object_mark_trusted(control);
1223 
1224 	lck_mtx_lock(&shared_region_pager_lock);
1225 	/* the new pager is now ready to be used */
1226 	pager->srp_is_ready = TRUE;
1227 	object = memory_object_to_vm_object((memory_object_t) pager);
1228 	assert(object);
1229 	/*
1230 	 * No one knows about this object and so we get away without the object lock.
1231 	 * This object is _eventually_ backed by the dyld shared cache and so we want
1232 	 * to benefit from the lock priority boosting.
1233 	 */
1234 	object->object_is_shared_cache = TRUE;
1235 	lck_mtx_unlock(&shared_region_pager_lock);
1236 
1237 	/* wakeup anyone waiting for this pager to be ready */
1238 	thread_wakeup(&pager->srp_is_ready);
1239 
1240 	return pager;
1241 }
1242 
1243 /*
1244  * shared_region_pager_setup()
1245  *
1246  * Provide the caller with a memory object backed by the provided
1247  * "backing_object" VM object.
1248  */
1249 memory_object_t
shared_region_pager_setup(vm_object_t backing_object,vm_object_offset_t backing_offset,struct vm_shared_region_slide_info * slide_info,uint64_t jop_key)1250 shared_region_pager_setup(
1251 	vm_object_t             backing_object,
1252 	vm_object_offset_t      backing_offset,
1253 	struct vm_shared_region_slide_info *slide_info,
1254 	uint64_t                jop_key)
1255 {
1256 	shared_region_pager_t   pager;
1257 
1258 	/* create new pager */
1259 	pager = shared_region_pager_create(backing_object,
1260 	    backing_offset, slide_info, jop_key);
1261 	if (pager == SHARED_REGION_PAGER_NULL) {
1262 		/* could not create a new pager */
1263 		return MEMORY_OBJECT_NULL;
1264 	}
1265 
1266 	lck_mtx_lock(&shared_region_pager_lock);
1267 	while (!pager->srp_is_ready) {
1268 		lck_mtx_sleep(&shared_region_pager_lock,
1269 		    LCK_SLEEP_DEFAULT,
1270 		    &pager->srp_is_ready,
1271 		    THREAD_UNINT);
1272 	}
1273 	lck_mtx_unlock(&shared_region_pager_lock);
1274 
1275 	return (memory_object_t) pager;
1276 }
1277 
1278 #if __has_feature(ptrauth_calls)
1279 /*
1280  * shared_region_pager_match()
1281  *
1282  * Provide the caller with a memory object backed by the provided
1283  * "backing_object" VM object.
1284  */
1285 memory_object_t
shared_region_pager_match(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_shared_region_slide_info_t slide_info,uint64_t jop_key)1286 shared_region_pager_match(
1287 	vm_object_t                   backing_object,
1288 	vm_object_offset_t            backing_offset,
1289 	vm_shared_region_slide_info_t slide_info,
1290 	uint64_t                      jop_key)
1291 {
1292 	shared_region_pager_t         pager;
1293 	vm_shared_region_slide_info_t si;
1294 
1295 	lck_mtx_lock(&shared_region_pager_lock);
1296 	queue_iterate(&shared_region_pager_queue, pager, shared_region_pager_t, srp_queue) {
1297 		if (pager->srp_backing_object != backing_object->copy) {
1298 			continue;
1299 		}
1300 		if (pager->srp_backing_offset != backing_offset) {
1301 			continue;
1302 		}
1303 		si = pager->srp_slide_info;
1304 
1305 		/* If there's no AUTH section then it can't match (slide_info is always !NULL) */
1306 		if (!si->si_ptrauth) {
1307 			continue;
1308 		}
1309 		if (pager->srp_jop_key != jop_key) {
1310 			continue;
1311 		}
1312 		if (si->si_slide != slide_info->si_slide) {
1313 			continue;
1314 		}
1315 		if (si->si_start != slide_info->si_start) {
1316 			continue;
1317 		}
1318 		if (si->si_end != slide_info->si_end) {
1319 			continue;
1320 		}
1321 		if (si->si_slide_object != slide_info->si_slide_object) {
1322 			continue;
1323 		}
1324 		if (si->si_slide_info_size != slide_info->si_slide_info_size) {
1325 			continue;
1326 		}
1327 		if (memcmp(si->si_slide_info_entry, slide_info->si_slide_info_entry, si->si_slide_info_size) != 0) {
1328 			continue;
1329 		}
1330 		/* the caller expects a reference on this */
1331 		os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
1332 		lck_mtx_unlock(&shared_region_pager_lock);
1333 		return (memory_object_t)pager;
1334 	}
1335 
1336 	/*
1337 	 * We didn't find a pre-existing pager, so create one.
1338 	 *
1339 	 * Note slight race condition here since we drop the lock. This could lead to more than one
1340 	 * thread calling setup with the same arguments here. That shouldn't break anything, just
1341 	 * waste a little memory.
1342 	 */
1343 	lck_mtx_unlock(&shared_region_pager_lock);
1344 	return shared_region_pager_setup(backing_object->copy, backing_offset, slide_info, jop_key);
1345 }
1346 
1347 void
shared_region_pager_match_task_key(memory_object_t memobj,__unused task_t task)1348 shared_region_pager_match_task_key(memory_object_t memobj, __unused task_t task)
1349 {
1350 	__unused shared_region_pager_t  pager = (shared_region_pager_t)memobj;
1351 
1352 	assert(pager->srp_jop_key == task->jop_pid);
1353 }
1354 #endif /* __has_feature(ptrauth_calls) */
1355 
1356 void
shared_region_pager_trim(void)1357 shared_region_pager_trim(void)
1358 {
1359 	shared_region_pager_t   pager, prev_pager;
1360 	queue_head_t            trim_queue;
1361 	int                     num_trim;
1362 	int                     count_unmapped;
1363 
1364 	lck_mtx_lock(&shared_region_pager_lock);
1365 
1366 	/*
1367 	 * We have too many pagers, try and trim some unused ones,
1368 	 * starting with the oldest pager at the end of the queue.
1369 	 */
1370 	queue_init(&trim_queue);
1371 	num_trim = 0;
1372 
1373 	for (pager = (shared_region_pager_t)queue_last(&shared_region_pager_queue);
1374 	    !queue_end(&shared_region_pager_queue, (queue_entry_t) pager);
1375 	    pager = prev_pager) {
1376 		/* get prev elt before we dequeue */
1377 		prev_pager = (shared_region_pager_t)queue_prev(&pager->srp_queue);
1378 
1379 		if (os_ref_get_count_raw(&pager->srp_ref_count) == 2 &&
1380 		    pager->srp_is_ready &&
1381 		    !pager->srp_is_mapped) {
1382 			/* this pager can be trimmed */
1383 			num_trim++;
1384 			/* remove this pager from the main list ... */
1385 			shared_region_pager_dequeue(pager);
1386 			/* ... and add it to our trim queue */
1387 			queue_enter_first(&trim_queue,
1388 			    pager,
1389 			    shared_region_pager_t,
1390 			    srp_queue);
1391 
1392 			/* do we have enough pagers to trim? */
1393 			count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped);
1394 			if (count_unmapped <= shared_region_pager_cache_limit) {
1395 				break;
1396 			}
1397 		}
1398 	}
1399 	if (num_trim > shared_region_pager_num_trim_max) {
1400 		shared_region_pager_num_trim_max = num_trim;
1401 	}
1402 	shared_region_pager_num_trim_total += num_trim;
1403 
1404 	lck_mtx_unlock(&shared_region_pager_lock);
1405 
1406 	/* terminate the trimmed pagers */
1407 	while (!queue_empty(&trim_queue)) {
1408 		queue_remove_first(&trim_queue,
1409 		    pager,
1410 		    shared_region_pager_t,
1411 		    srp_queue);
1412 		pager->srp_queue.next = NULL;
1413 		pager->srp_queue.prev = NULL;
1414 		assert(os_ref_get_count_raw(&pager->srp_ref_count) == 2);
1415 		/*
1416 		 * We can't call deallocate_internal() because the pager
1417 		 * has already been dequeued, but we still need to remove
1418 		 * a reference.
1419 		 */
1420 		(void)os_ref_release_locked_raw(&pager->srp_ref_count, NULL);
1421 		shared_region_pager_terminate_internal(pager);
1422 	}
1423 }
1424