xref: /xnu-11417.121.6/osfmk/vm/vm_shared_region_pager.c (revision a1e26a70f38d1d7daa7b49b258e2f8538ad81650)
1 /*
2  * Copyright (c) 2018-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42 
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/queue.h>
46 #include <kern/thread.h>
47 #include <kern/ipc_kobject.h>
48 
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
51 
52 #include <vm/memory_object_internal.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_fault_internal.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_pageout_xnu.h>
57 #include <vm/vm_protos_internal.h>
58 #include <vm/vm_shared_region_internal.h>
59 #include <vm/vm_ubc.h>
60 #include <vm/vm_page_internal.h>
61 #include <vm/vm_object_internal.h>
62 
63 #include <sys/kdebug_triage.h>
64 #include <sys/random.h>
65 
66 #if __has_feature(ptrauth_calls)
67 #include <ptrauth.h>
68 extern boolean_t diversify_user_jop;
69 #endif /* __has_feature(ptrauth_calls) */
70 
71 extern int panic_on_dyld_issue;
72 
73 /*
74  * SHARED REGION MEMORY PAGER
75  *
76  * This external memory manager (EMM) handles mappings of a dyld shared cache
77  * in shared regions, applying any necessary modifications (sliding,
78  * pointer signing, ...).
79  *
80  * It mostly handles page-in requests (from memory_object_data_request()) by
81  * getting the original data from its backing VM object, itself backed by
82  * the dyld shared cache file, modifying it if needed and providing it to VM.
83  *
84  * The modified pages will never be dirtied, so the memory manager doesn't
85  * need to handle page-out requests (from memory_object_data_return()).  The
86  * pages need to be mapped copy-on-write, so that the originals stay clean.
87  *
88  * We don't expect to have to handle a large number of shared cache files,
89  * so the data structures are very simple (simple linked list) for now.
90  */
91 
92 /* forward declarations */
93 void shared_region_pager_reference(memory_object_t mem_obj);
94 void shared_region_pager_deallocate(memory_object_t mem_obj);
95 kern_return_t shared_region_pager_init(memory_object_t mem_obj,
96     memory_object_control_t control,
97     memory_object_cluster_size_t pg_size);
98 kern_return_t shared_region_pager_terminate(memory_object_t mem_obj);
99 kern_return_t shared_region_pager_data_request(memory_object_t mem_obj,
100     memory_object_offset_t offset,
101     memory_object_cluster_size_t length,
102     vm_prot_t protection_required,
103     memory_object_fault_info_t fault_info);
104 kern_return_t shared_region_pager_data_return(memory_object_t mem_obj,
105     memory_object_offset_t offset,
106     memory_object_cluster_size_t      data_cnt,
107     memory_object_offset_t *resid_offset,
108     int *io_error,
109     boolean_t dirty,
110     boolean_t kernel_copy,
111     int upl_flags);
112 kern_return_t shared_region_pager_data_initialize(memory_object_t mem_obj,
113     memory_object_offset_t offset,
114     memory_object_cluster_size_t data_cnt);
115 kern_return_t shared_region_pager_map(memory_object_t mem_obj,
116     vm_prot_t prot);
117 kern_return_t shared_region_pager_last_unmap(memory_object_t mem_obj);
118 boolean_t shared_region_pager_backing_object(
119 	memory_object_t mem_obj,
120 	memory_object_offset_t mem_obj_offset,
121 	vm_object_t *backing_object,
122 	vm_object_offset_t *backing_offset);
123 
124 /*
125  * Vector of VM operations for this EMM.
126  * These routines are invoked by VM via the memory_object_*() interfaces.
127  */
128 const struct memory_object_pager_ops shared_region_pager_ops = {
129 	.memory_object_reference = shared_region_pager_reference,
130 	.memory_object_deallocate = shared_region_pager_deallocate,
131 	.memory_object_init = shared_region_pager_init,
132 	.memory_object_terminate = shared_region_pager_terminate,
133 	.memory_object_data_request = shared_region_pager_data_request,
134 	.memory_object_data_return = shared_region_pager_data_return,
135 	.memory_object_data_initialize = shared_region_pager_data_initialize,
136 	.memory_object_map = shared_region_pager_map,
137 	.memory_object_last_unmap = shared_region_pager_last_unmap,
138 	.memory_object_backing_object = shared_region_pager_backing_object,
139 	.memory_object_pager_name = "shared_region"
140 };
141 
142 #if __has_feature(ptrauth_calls)
143 /*
144  * Track mappings between shared_region_id and the key used to sign
145  * authenticated pointers.
146  */
147 typedef struct shared_region_jop_key_map {
148 	queue_chain_t  srk_queue;
149 	char           *srk_shared_region_id;
150 	uint64_t       srk_jop_key;
151 	os_refcnt_t    srk_ref_count;         /* count of tasks active with this shared_region_id */
152 } *shared_region_jop_key_map_t;
153 
154 os_refgrp_decl(static, srk_refgrp, "shared region key ref cnts", NULL);
155 
156 /*
157  * The list is protected by the "shared_region_key_map" lock.
158  */
159 int shared_region_key_count = 0;              /* number of active shared_region_id keys */
160 queue_head_t shared_region_jop_key_queue = QUEUE_HEAD_INITIALIZER(shared_region_jop_key_queue);
161 LCK_GRP_DECLARE(shared_region_jop_key_lck_grp, "shared_region_jop_key");
162 LCK_MTX_DECLARE(shared_region_jop_key_lock, &shared_region_jop_key_lck_grp);
163 
164 #if __has_feature(ptrauth_calls)
165 /*
166  * Generate a random pointer signing key that isn't 0.
167  */
168 uint64_t
generate_jop_key(void)169 generate_jop_key(void)
170 {
171 	uint64_t key;
172 
173 	do {
174 		read_random(&key, sizeof key);
175 	} while (key == 0);
176 	return key;
177 }
178 #endif /* __has_feature(ptrauth_calls) */
179 
180 /*
181  * Find the pointer signing key for the give shared_region_id.
182  */
183 uint64_t
shared_region_find_key(char * shared_region_id)184 shared_region_find_key(char *shared_region_id)
185 {
186 	shared_region_jop_key_map_t region;
187 	uint64_t key;
188 
189 	lck_mtx_lock(&shared_region_jop_key_lock);
190 	queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
191 		if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
192 			goto found;
193 		}
194 	}
195 	panic("shared_region_find_key() no key for region '%s'", shared_region_id);
196 
197 found:
198 	key = region->srk_jop_key;
199 	lck_mtx_unlock(&shared_region_jop_key_lock);
200 	return key;
201 }
202 
203 /*
204  * Return a authentication key to use for the given shared_region_id.
205  * If inherit is TRUE, then the key must match inherited_key.
206  * Creates an additional reference when successful.
207  */
208 void
shared_region_key_alloc(char * shared_region_id,bool inherit,uint64_t inherited_key)209 shared_region_key_alloc(char *shared_region_id, bool inherit, uint64_t inherited_key)
210 {
211 	shared_region_jop_key_map_t region;
212 	shared_region_jop_key_map_t new = NULL;
213 
214 	assert(shared_region_id != NULL);
215 again:
216 	lck_mtx_lock(&shared_region_jop_key_lock);
217 	queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
218 		if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
219 			os_ref_retain_locked(&region->srk_ref_count);
220 			goto done;
221 		}
222 	}
223 
224 	/*
225 	 * ID was not found, if first time, allocate a new one and redo the lookup.
226 	 */
227 	if (new == NULL) {
228 		lck_mtx_unlock(&shared_region_jop_key_lock);
229 		new = kalloc_type(struct shared_region_jop_key_map, Z_WAITOK);
230 		uint_t len = strlen(shared_region_id) + 1;
231 		new->srk_shared_region_id = kalloc_data(len, Z_WAITOK);
232 		strlcpy(new->srk_shared_region_id, shared_region_id, len);
233 		os_ref_init(&new->srk_ref_count, &srk_refgrp);
234 
235 		if (diversify_user_jop && inherit) {
236 			new->srk_jop_key = inherited_key;
237 		} else if (diversify_user_jop && strlen(shared_region_id) > 0) {
238 			new->srk_jop_key = generate_jop_key();
239 		} else {
240 			new->srk_jop_key = ml_default_jop_pid();
241 		}
242 
243 		goto again;
244 	}
245 
246 	/*
247 	 * Use the newly allocated entry
248 	 */
249 	++shared_region_key_count;
250 	queue_enter_first(&shared_region_jop_key_queue, new, shared_region_jop_key_map_t, srk_queue);
251 	region = new;
252 	new = NULL;
253 
254 done:
255 	if (inherit && inherited_key != region->srk_jop_key) {
256 		panic("shared_region_key_alloc() inherited key mismatch");
257 	}
258 	lck_mtx_unlock(&shared_region_jop_key_lock);
259 
260 	/*
261 	 * free any unused new entry
262 	 */
263 	if (new != NULL) {
264 		kfree_data(new->srk_shared_region_id,
265 		    strlen(new->srk_shared_region_id) + 1);
266 		kfree_type(struct shared_region_jop_key_map, new);
267 	}
268 }
269 
270 /*
271  * Mark the end of using a shared_region_id's key
272  */
273 extern void
shared_region_key_dealloc(char * shared_region_id)274 shared_region_key_dealloc(char *shared_region_id)
275 {
276 	shared_region_jop_key_map_t region;
277 
278 	assert(shared_region_id != NULL);
279 	lck_mtx_lock(&shared_region_jop_key_lock);
280 	queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
281 		if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
282 			goto done;
283 		}
284 	}
285 	panic("shared_region_key_dealloc() Shared region ID '%s' not found", shared_region_id);
286 
287 done:
288 	if (os_ref_release_locked(&region->srk_ref_count) == 0) {
289 		queue_remove(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue);
290 		--shared_region_key_count;
291 	} else {
292 		region = NULL;
293 	}
294 	lck_mtx_unlock(&shared_region_jop_key_lock);
295 
296 	if (region != NULL) {
297 		kfree_data(region->srk_shared_region_id,
298 		    strlen(region->srk_shared_region_id) + 1);
299 		kfree_type(struct shared_region_jop_key_map, region);
300 	}
301 }
302 #endif /* __has_feature(ptrauth_calls) */
303 
304 /*
305  * The "shared_region_pager" describes a memory object backed by
306  * the "shared_region" EMM.
307  */
308 typedef struct shared_region_pager {
309 	struct memory_object    srp_header;          /* mandatory generic header */
310 
311 	/* pager-specific data */
312 	queue_chain_t           srp_queue;          /* next & prev pagers */
313 #if MEMORY_OBJECT_HAS_REFCOUNT
314 #define srp_ref_count           srp_header.mo_ref
315 #else
316 	os_ref_atomic_t         srp_ref_count;      /* active uses */
317 #endif
318 	bool                    srp_is_mapped;      /* has active mappings */
319 	bool                    srp_is_ready;       /* is this pager ready? */
320 	vm_object_t             srp_backing_object; /* VM object for shared cache */
321 	vm_object_offset_t      srp_backing_offset;
322 	vm_shared_region_slide_info_t srp_slide_info;
323 #if __has_feature(ptrauth_calls)
324 	uint64_t                srp_jop_key;        /* zero if used for arm64 */
325 #endif /* __has_feature(ptrauth_calls) */
326 } *shared_region_pager_t;
327 #define SHARED_REGION_PAGER_NULL        ((shared_region_pager_t) NULL)
328 
329 /*
330  * List of memory objects managed by this EMM.
331  * The list is protected by the "shared_region_pager_lock" lock.
332  */
333 int shared_region_pager_count = 0;              /* number of pagers */
334 int shared_region_pager_count_mapped = 0;       /* number of unmapped pagers */
335 queue_head_t shared_region_pager_queue = QUEUE_HEAD_INITIALIZER(shared_region_pager_queue);
336 LCK_GRP_DECLARE(shared_region_pager_lck_grp, "shared_region_pager");
337 LCK_MTX_DECLARE(shared_region_pager_lock, &shared_region_pager_lck_grp);
338 
339 /*
340  * Maximum number of unmapped pagers we're willing to keep around.
341  */
342 int shared_region_pager_cache_limit = 0;
343 
344 /*
345  * Statistics & counters.
346  */
347 int shared_region_pager_count_max = 0;
348 int shared_region_pager_count_unmapped_max = 0;
349 int shared_region_pager_num_trim_max = 0;
350 int shared_region_pager_num_trim_total = 0;
351 
352 uint64_t shared_region_pager_copied = 0;
353 uint64_t shared_region_pager_slid = 0;
354 uint64_t shared_region_pager_slid_error = 0;
355 uint64_t shared_region_pager_reclaimed = 0;
356 
357 /* internal prototypes */
358 shared_region_pager_t shared_region_pager_lookup(memory_object_t mem_obj);
359 void shared_region_pager_dequeue(shared_region_pager_t pager);
360 void shared_region_pager_deallocate_internal(shared_region_pager_t pager,
361     boolean_t locked);
362 void shared_region_pager_terminate_internal(shared_region_pager_t pager);
363 void shared_region_pager_trim(void);
364 
365 
366 #if DEBUG
367 int shared_region_pagerdebug = 0;
368 #define PAGER_ALL               0xffffffff
369 #define PAGER_INIT              0x00000001
370 #define PAGER_PAGEIN            0x00000002
371 
372 #define PAGER_DEBUG(LEVEL, A)                                           \
373 	MACRO_BEGIN                                                     \
374 	if ((shared_region_pagerdebug & (LEVEL)) == (LEVEL)) {          \
375 	        printf A;                                               \
376 	}                                                               \
377 	MACRO_END
378 #else
379 #define PAGER_DEBUG(LEVEL, A)
380 #endif
381 
382 /*
383  * shared_region_pager_init()
384  *
385  * Initialize the memory object and makes it ready to be used and mapped.
386  */
387 kern_return_t
shared_region_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)388 shared_region_pager_init(
389 	memory_object_t         mem_obj,
390 	memory_object_control_t control,
391 #if !DEBUG
392 	__unused
393 #endif
394 	memory_object_cluster_size_t pg_size)
395 {
396 	shared_region_pager_t   pager;
397 	kern_return_t           kr;
398 	memory_object_attr_info_data_t  attributes;
399 
400 	PAGER_DEBUG(PAGER_ALL,
401 	    ("shared_region_pager_init: %p, %p, %x\n",
402 	    mem_obj, control, pg_size));
403 
404 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
405 		return KERN_INVALID_ARGUMENT;
406 	}
407 
408 	pager = shared_region_pager_lookup(mem_obj);
409 
410 	memory_object_control_reference(control);
411 
412 	pager->srp_header.mo_control = control;
413 
414 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
415 	/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
416 	attributes.cluster_size = (1 << (PAGE_SHIFT));
417 	attributes.may_cache_object = FALSE;
418 	attributes.temporary = TRUE;
419 
420 	kr = memory_object_change_attributes(
421 		control,
422 		MEMORY_OBJECT_ATTRIBUTE_INFO,
423 		(memory_object_info_t) &attributes,
424 		MEMORY_OBJECT_ATTR_INFO_COUNT);
425 	if (kr != KERN_SUCCESS) {
426 		panic("shared_region_pager_init: "
427 		    "memory_object_change_attributes() failed");
428 	}
429 
430 #if CONFIG_SECLUDED_MEMORY
431 	if (secluded_for_filecache) {
432 #if 00
433 		/*
434 		 * XXX FBDP do we want this in the secluded pool?
435 		 * Ideally, we'd want the shared region used by Camera to
436 		 * NOT be in the secluded pool, but all other shared regions
437 		 * in the secluded pool...
438 		 */
439 		memory_object_mark_eligible_for_secluded(control, TRUE);
440 #endif /* 00 */
441 	}
442 #endif /* CONFIG_SECLUDED_MEMORY */
443 
444 	return KERN_SUCCESS;
445 }
446 
447 /*
448  * shared_region_data_return()
449  *
450  * Handles page-out requests from VM.  This should never happen since
451  * the pages provided by this EMM are not supposed to be dirty or dirtied
452  * and VM should simply discard the contents and reclaim the pages if it
453  * needs to.
454  */
455 kern_return_t
shared_region_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)456 shared_region_pager_data_return(
457 	__unused memory_object_t        mem_obj,
458 	__unused memory_object_offset_t offset,
459 	__unused memory_object_cluster_size_t           data_cnt,
460 	__unused memory_object_offset_t *resid_offset,
461 	__unused int                    *io_error,
462 	__unused boolean_t              dirty,
463 	__unused boolean_t              kernel_copy,
464 	__unused int                    upl_flags)
465 {
466 	panic("shared_region_pager_data_return: should never get called");
467 	return KERN_FAILURE;
468 }
469 
470 kern_return_t
shared_region_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)471 shared_region_pager_data_initialize(
472 	__unused memory_object_t        mem_obj,
473 	__unused memory_object_offset_t offset,
474 	__unused memory_object_cluster_size_t           data_cnt)
475 {
476 	panic("shared_region_pager_data_initialize: should never get called");
477 	return KERN_FAILURE;
478 }
479 
480 /*
481  * shared_region_pager_data_request()
482  *
483  * Handles page-in requests from VM.
484  */
485 int shared_region_pager_data_request_debug = 0;
486 kern_return_t
shared_region_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)487 shared_region_pager_data_request(
488 	memory_object_t         mem_obj,
489 	memory_object_offset_t  offset,
490 	memory_object_cluster_size_t            length,
491 #if !DEBUG
492 	__unused
493 #endif
494 	vm_prot_t               protection_required,
495 	memory_object_fault_info_t mo_fault_info)
496 {
497 	shared_region_pager_t   pager;
498 	memory_object_control_t mo_control;
499 	upl_t                   upl;
500 	int                     upl_flags;
501 	upl_size_t              upl_size;
502 	upl_page_info_t         *upl_pl;
503 	unsigned int            pl_count;
504 	vm_object_t             src_top_object, src_page_object, dst_object;
505 	kern_return_t           kr, retval;
506 	vm_fault_return_t       vmfr;
507 	vm_offset_t             src_vaddr, dst_vaddr;
508 	vm_offset_t             cur_offset;
509 	vm_offset_t             offset_in_page;
510 	kern_return_t           error_code;
511 	vm_prot_t               prot;
512 	vm_page_t               src_page, top_page;
513 	int                     interruptible;
514 	struct vm_object_fault_info     fault_info;
515 	mach_vm_offset_t        slide_start_address;
516 	u_int32_t                               slide_info_page_size;
517 
518 	PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
519 
520 	retval = KERN_SUCCESS;
521 	src_top_object = VM_OBJECT_NULL;
522 	src_page_object = VM_OBJECT_NULL;
523 	upl = NULL;
524 	upl_pl = NULL;
525 	fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
526 	fault_info.stealth = TRUE;
527 	fault_info.io_sync = FALSE;
528 	fault_info.mark_zf_absent = FALSE;
529 	fault_info.batch_pmap_op = FALSE;
530 	interruptible = fault_info.interruptible;
531 
532 	pager = shared_region_pager_lookup(mem_obj);
533 	assert(pager->srp_is_ready);
534 	assert(os_ref_get_count_raw(&pager->srp_ref_count) > 1); /* pager is alive */
535 	assert(pager->srp_is_mapped); /* pager is mapped */
536 
537 	PAGER_DEBUG(PAGER_PAGEIN, ("shared_region_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
538 
539 	/*
540 	 * Gather in a UPL all the VM pages requested by VM.
541 	 */
542 	mo_control = pager->srp_header.mo_control;
543 
544 	upl_size = length;
545 	upl_flags =
546 	    UPL_RET_ONLY_ABSENT |
547 	    UPL_SET_LITE |
548 	    UPL_NO_SYNC |
549 	    UPL_CLEAN_IN_PLACE |        /* triggers UPL_CLEAR_DIRTY */
550 	    UPL_SET_INTERNAL;
551 	pl_count = 0;
552 	kr = memory_object_upl_request(mo_control,
553 	    offset, upl_size,
554 	    &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
555 	if (kr != KERN_SUCCESS) {
556 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_NO_UPL), kr /* arg */);
557 		if (panic_on_dyld_issue) {
558 			panic("%s(): upl_request(%p, 0x%llx, 0x%llx) ret %d", __func__,
559 			    mo_control, offset, (uint64_t)upl_size, kr);
560 		}
561 		retval = kr;
562 		goto done;
563 	}
564 	dst_object = memory_object_control_to_vm_object(mo_control);
565 	assert(dst_object != VM_OBJECT_NULL);
566 
567 	/*
568 	 * We'll map the original data in the kernel address space from the
569 	 * backing VM object (itself backed by the shared cache file via
570 	 * the vnode pager).
571 	 */
572 	src_top_object = pager->srp_backing_object;
573 	assert(src_top_object != VM_OBJECT_NULL);
574 	vm_object_reference(src_top_object); /* keep the source object alive */
575 
576 	slide_start_address = pager->srp_slide_info->si_slid_address;
577 	slide_info_page_size = pager->srp_slide_info->si_slide_info_entry->version == 1 ? PAGE_SIZE_FOR_SR_SLIDE : pager->srp_slide_info->si_slide_info_entry->page_size;
578 
579 	fault_info.lo_offset += pager->srp_backing_offset;
580 	fault_info.hi_offset += pager->srp_backing_offset;
581 
582 	/*
583 	 * Fill in the contents of the pages requested by VM.
584 	 */
585 	upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
586 	pl_count = length / PAGE_SIZE;
587 	for (cur_offset = 0;
588 	    retval == KERN_SUCCESS && cur_offset < length;
589 	    cur_offset += PAGE_SIZE) {
590 		ppnum_t dst_pnum;
591 
592 		if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
593 			/* this page is not in the UPL: skip it */
594 			continue;
595 		}
596 
597 		/*
598 		 * Map the source (dyld shared cache) page in the kernel's
599 		 * virtual address space.
600 		 * We already hold a reference on the src_top_object.
601 		 */
602 retry_src_fault:
603 		vm_object_lock(src_top_object);
604 		vm_object_paging_begin(src_top_object);
605 		error_code = 0;
606 		prot = VM_PROT_READ;
607 		src_page = VM_PAGE_NULL;
608 		vmfr = vm_fault_page(src_top_object,
609 		    pager->srp_backing_offset + offset + cur_offset,
610 		    VM_PROT_READ,
611 		    FALSE,
612 		    FALSE,                /* src_page not looked up */
613 		    &prot,
614 		    &src_page,
615 		    &top_page,
616 		    NULL,
617 		    &error_code,
618 		    FALSE,
619 		    &fault_info);
620 		switch (vmfr) {
621 		case VM_FAULT_SUCCESS:
622 			break;
623 		case VM_FAULT_RETRY:
624 			goto retry_src_fault;
625 		case VM_FAULT_MEMORY_SHORTAGE:
626 			if (vm_page_wait(interruptible)) {
627 				goto retry_src_fault;
628 			}
629 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_PAGER_MEMORY_SHORTAGE), 0 /* arg */);
630 			OS_FALLTHROUGH;
631 		case VM_FAULT_INTERRUPTED:
632 			retval = MACH_SEND_INTERRUPTED;
633 			goto done;
634 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
635 			/* success but no VM page: fail */
636 			vm_object_paging_end(src_top_object);
637 			vm_object_unlock(src_top_object);
638 			OS_FALLTHROUGH;
639 		case VM_FAULT_MEMORY_ERROR:
640 			/* the page is not there ! */
641 			if (error_code) {
642 				retval = error_code;
643 			} else {
644 				retval = KERN_MEMORY_ERROR;
645 			}
646 			goto done;
647 		case VM_FAULT_BUSY:
648 			retval = KERN_ALREADY_WAITING;
649 			goto done;
650 		}
651 		assert(src_page != VM_PAGE_NULL);
652 		assert(src_page->vmp_busy);
653 
654 		if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
655 			vm_page_lockspin_queues();
656 			if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
657 				vm_page_speculate(src_page, FALSE);
658 			}
659 			vm_page_unlock_queues();
660 		}
661 
662 		/*
663 		 * Establish pointers to the source
664 		 * and destination physical pages.
665 		 */
666 		dst_pnum = (ppnum_t)
667 		    upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
668 		assert(dst_pnum != 0);
669 
670 		src_vaddr = (vm_map_offset_t)
671 		    phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
672 		        << PAGE_SHIFT);
673 		dst_vaddr = (vm_map_offset_t)
674 		    phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
675 		src_page_object = VM_PAGE_OBJECT(src_page);
676 
677 		/*
678 		 * Validate the original page...
679 		 */
680 		if (src_page_object->code_signed) {
681 			vm_page_validate_cs_mapped(
682 				src_page, PAGE_SIZE, 0,
683 				(const void *) src_vaddr);
684 		}
685 		/*
686 		 * ... and transfer the results to the destination page.
687 		 */
688 		UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
689 		    src_page->vmp_cs_validated);
690 		UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
691 		    src_page->vmp_cs_tainted);
692 		UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
693 		    src_page->vmp_cs_nx);
694 
695 		/*
696 		 * The page provider might access a mapped file, so let's
697 		 * release the object lock for the source page to avoid a
698 		 * potential deadlock.
699 		 * The source page is kept busy and we have a
700 		 * "paging_in_progress" reference on its object, so it's safe
701 		 * to unlock the object here.
702 		 */
703 		assert(src_page->vmp_busy);
704 		assert(src_page_object->paging_in_progress > 0);
705 		vm_object_unlock(src_page_object);
706 
707 		/*
708 		 * Process the original contents of the source page
709 		 * into the destination page.
710 		 */
711 		for (offset_in_page = 0;
712 		    offset_in_page < PAGE_SIZE;
713 		    offset_in_page += slide_info_page_size) {
714 			vm_object_offset_t chunk_offset;
715 			vm_object_offset_t offset_in_backing_object;
716 			vm_object_offset_t offset_in_sliding_range;
717 
718 			chunk_offset = offset + cur_offset + offset_in_page;
719 
720 			bcopy((const char *)(src_vaddr +
721 			    offset_in_page),
722 			    (char *)(dst_vaddr + offset_in_page),
723 			    slide_info_page_size);
724 
725 			offset_in_backing_object = (chunk_offset +
726 			    pager->srp_backing_offset);
727 			if ((offset_in_backing_object < pager->srp_slide_info->si_start) ||
728 			    (offset_in_backing_object >= pager->srp_slide_info->si_end)) {
729 				/* chunk is outside of sliding range: done */
730 				shared_region_pager_copied++;
731 				continue;
732 			}
733 
734 			offset_in_sliding_range = offset_in_backing_object - pager->srp_slide_info->si_start;
735 			kr = vm_shared_region_slide_page(pager->srp_slide_info,
736 			    dst_vaddr + offset_in_page,
737 			    (mach_vm_offset_t) (offset_in_sliding_range + slide_start_address),
738 			    (uint32_t) (offset_in_sliding_range / slide_info_page_size),
739 #if __has_feature(ptrauth_calls)
740 			    pager->srp_slide_info->si_ptrauth ? pager->srp_jop_key : 0
741 #else /* __has_feature(ptrauth_calls) */
742 			    0
743 #endif /* __has_feature(ptrauth_calls) */
744 			    );
745 			if (shared_region_pager_data_request_debug) {
746 				printf("shared_region_data_request"
747 				    "(%p,0x%llx+0x%llx+0x%04llx): 0x%llx "
748 				    "in sliding range [0x%llx:0x%llx]: "
749 				    "SLIDE offset 0x%llx="
750 				    "(0x%llx+0x%llx+0x%llx+0x%04llx)"
751 				    "[0x%016llx 0x%016llx] "
752 				    "code_signed=%d "
753 				    "cs_validated=%d "
754 				    "cs_tainted=%d "
755 				    "cs_nx=%d "
756 				    "kr=0x%x\n",
757 				    pager,
758 				    offset,
759 				    (uint64_t) cur_offset,
760 				    (uint64_t) offset_in_page,
761 				    chunk_offset,
762 				    pager->srp_slide_info->si_start,
763 				    pager->srp_slide_info->si_end,
764 				    (pager->srp_backing_offset +
765 				    offset +
766 				    cur_offset +
767 				    offset_in_page),
768 				    pager->srp_backing_offset,
769 				    offset,
770 				    (uint64_t) cur_offset,
771 				    (uint64_t) offset_in_page,
772 				    *(uint64_t *)(dst_vaddr + offset_in_page),
773 				    *(uint64_t *)(dst_vaddr + offset_in_page + 8),
774 				    src_page_object->code_signed,
775 				    src_page->vmp_cs_validated,
776 				    src_page->vmp_cs_tainted,
777 				    src_page->vmp_cs_nx,
778 				    kr);
779 			}
780 			if (kr != KERN_SUCCESS) {
781 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_SLIDE_ERROR), kr /* arg */);
782 				if (panic_on_dyld_issue) {
783 					panic("%s(): shared region slide error %d",
784 					    __func__, kr);
785 				}
786 				shared_region_pager_slid_error++;
787 				retval = KERN_MEMORY_ERROR;
788 				break;
789 			}
790 			shared_region_pager_slid++;
791 		}
792 
793 		assert(VM_PAGE_OBJECT(src_page) == src_page_object);
794 		assert(src_page->vmp_busy);
795 		assert(src_page_object->paging_in_progress > 0);
796 		vm_object_lock(src_page_object);
797 
798 		/*
799 		 * Cleanup the result of vm_fault_page() of the source page.
800 		 */
801 		vm_page_wakeup_done(src_page_object, src_page);
802 		src_page = VM_PAGE_NULL;
803 		vm_object_paging_end(src_page_object);
804 		vm_object_unlock(src_page_object);
805 
806 		if (top_page != VM_PAGE_NULL) {
807 			assert(VM_PAGE_OBJECT(top_page) == src_top_object);
808 			vm_object_lock(src_top_object);
809 			VM_PAGE_FREE(top_page);
810 			vm_object_paging_end(src_top_object);
811 			vm_object_unlock(src_top_object);
812 		}
813 	}
814 
815 done:
816 	if (upl != NULL) {
817 		/* clean up the UPL */
818 
819 		/*
820 		 * The pages are currently dirty because we've just been
821 		 * writing on them, but as far as we're concerned, they're
822 		 * clean since they contain their "original" contents as
823 		 * provided by us, the pager.
824 		 * Tell the UPL to mark them "clean".
825 		 */
826 		upl_clear_dirty(upl, TRUE);
827 
828 		/* abort or commit the UPL */
829 		if (retval != KERN_SUCCESS) {
830 			upl_abort(upl, 0);
831 		} else {
832 			boolean_t empty;
833 			assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
834 			    "upl %p offset 0x%llx size 0x%x\n",
835 			    upl, upl->u_offset, upl->u_size);
836 			upl_commit_range(upl, 0, upl->u_size,
837 			    UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
838 			    upl_pl, pl_count, &empty);
839 		}
840 
841 		/* and deallocate the UPL */
842 		upl_deallocate(upl);
843 		upl = NULL;
844 	}
845 	if (src_top_object != VM_OBJECT_NULL) {
846 		vm_object_deallocate(src_top_object);
847 	}
848 	return retval;
849 }
850 
851 /*
852  * shared_region_pager_reference()
853  *
854  * Get a reference on this memory object.
855  * For external usage only.  Assumes that the initial reference count is not 0,
856  * i.e one should not "revive" a dead pager this way.
857  */
858 void
shared_region_pager_reference(memory_object_t mem_obj)859 shared_region_pager_reference(
860 	memory_object_t         mem_obj)
861 {
862 	shared_region_pager_t   pager;
863 
864 	pager = shared_region_pager_lookup(mem_obj);
865 
866 	lck_mtx_lock(&shared_region_pager_lock);
867 	os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
868 	lck_mtx_unlock(&shared_region_pager_lock);
869 }
870 
871 
872 /*
873  * shared_region_pager_dequeue:
874  *
875  * Removes a pager from the list of pagers.
876  *
877  * The caller must hold "shared_region_pager_lock".
878  */
879 void
shared_region_pager_dequeue(shared_region_pager_t pager)880 shared_region_pager_dequeue(
881 	shared_region_pager_t pager)
882 {
883 	assert(!pager->srp_is_mapped);
884 
885 	queue_remove(&shared_region_pager_queue,
886 	    pager,
887 	    shared_region_pager_t,
888 	    srp_queue);
889 	pager->srp_queue.next = NULL;
890 	pager->srp_queue.prev = NULL;
891 
892 	shared_region_pager_count--;
893 }
894 
895 /*
896  * shared_region_pager_terminate_internal:
897  *
898  * Trigger the asynchronous termination of the memory object associated
899  * with this pager.
900  * When the memory object is terminated, there will be one more call
901  * to memory_object_deallocate() (i.e. shared_region_pager_deallocate())
902  * to finish the clean up.
903  *
904  * "shared_region_pager_lock" should not be held by the caller.
905  * We don't need the lock because the pager has already been removed from
906  * the pagers' list and is now ours exclusively.
907  */
908 void
shared_region_pager_terminate_internal(shared_region_pager_t pager)909 shared_region_pager_terminate_internal(
910 	shared_region_pager_t pager)
911 {
912 	assert(pager->srp_is_ready);
913 	assert(!pager->srp_is_mapped);
914 	assert(os_ref_get_count_raw(&pager->srp_ref_count) == 1);
915 
916 	if (pager->srp_backing_object != VM_OBJECT_NULL) {
917 		vm_object_deallocate(pager->srp_backing_object);
918 		pager->srp_backing_object = VM_OBJECT_NULL;
919 	}
920 	/* trigger the destruction of the memory object */
921 	memory_object_destroy(pager->srp_header.mo_control, VM_OBJECT_DESTROY_PAGER);
922 }
923 
924 /*
925  * shared_region_pager_deallocate_internal()
926  *
927  * Release a reference on this pager and free it when the last reference goes away.
928  * Can be called with shared_region_pager_lock held or not, but always returns
929  * with it unlocked.
930  */
931 void
shared_region_pager_deallocate_internal(shared_region_pager_t pager,boolean_t locked)932 shared_region_pager_deallocate_internal(
933 	shared_region_pager_t   pager,
934 	boolean_t               locked)
935 {
936 	boolean_t       needs_trimming;
937 	int             count_unmapped;
938 	os_ref_count_t  ref_count;
939 
940 	if (!locked) {
941 		lck_mtx_lock(&shared_region_pager_lock);
942 	}
943 
944 	/* if we have too many unmapped pagers, trim some */
945 	count_unmapped = shared_region_pager_count - shared_region_pager_count_mapped;
946 	needs_trimming = (count_unmapped > shared_region_pager_cache_limit);
947 
948 	/* drop a reference on this pager */
949 	ref_count = os_ref_release_locked_raw(&pager->srp_ref_count, NULL);
950 
951 	if (ref_count == 1) {
952 		/*
953 		 * Only the "named" reference is left, which means that
954 		 * no one is really holding on to this pager anymore.
955 		 * Terminate it.
956 		 */
957 		shared_region_pager_dequeue(pager);
958 		/* the pager is all ours: no need for the lock now */
959 		lck_mtx_unlock(&shared_region_pager_lock);
960 		shared_region_pager_terminate_internal(pager);
961 	} else if (ref_count == 0) {
962 		/*
963 		 * Dropped the existence reference;  the memory object has
964 		 * been terminated.  Do some final cleanup and release the
965 		 * pager structure.
966 		 */
967 		lck_mtx_unlock(&shared_region_pager_lock);
968 
969 		vm_shared_region_slide_info_t si = pager->srp_slide_info;
970 #if __has_feature(ptrauth_calls)
971 		/*
972 		 * The slide_info for auth sections lives in the shared region.
973 		 * Just deallocate() on the shared region and clear the field.
974 		 */
975 		if (si != NULL) {
976 			if (si->si_shared_region != NULL) {
977 				assert(si->si_ptrauth);
978 				vm_shared_region_deallocate(si->si_shared_region);
979 				pager->srp_slide_info = NULL;
980 				si = NULL;
981 			}
982 		}
983 #endif /* __has_feature(ptrauth_calls) */
984 		if (si != NULL) {
985 			vm_object_deallocate(si->si_slide_object);
986 			/* free the slide_info_entry */
987 			kfree_data(si->si_slide_info_entry,
988 			    si->si_slide_info_size);
989 			kfree_type(struct vm_shared_region_slide_info, si);
990 			pager->srp_slide_info = NULL;
991 		}
992 
993 		if (pager->srp_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
994 			memory_object_control_deallocate(pager->srp_header.mo_control);
995 			pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
996 		}
997 		kfree_type(struct shared_region_pager, pager);
998 		pager = SHARED_REGION_PAGER_NULL;
999 	} else {
1000 		/* there are still plenty of references:  keep going... */
1001 		lck_mtx_unlock(&shared_region_pager_lock);
1002 	}
1003 
1004 	if (needs_trimming) {
1005 		shared_region_pager_trim();
1006 	}
1007 	/* caution: lock is not held on return... */
1008 }
1009 
1010 /*
1011  * shared_region_pager_deallocate()
1012  *
1013  * Release a reference on this pager and free it when the last
1014  * reference goes away.
1015  */
1016 void
shared_region_pager_deallocate(memory_object_t mem_obj)1017 shared_region_pager_deallocate(
1018 	memory_object_t         mem_obj)
1019 {
1020 	shared_region_pager_t   pager;
1021 
1022 	PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_deallocate: %p\n", mem_obj));
1023 	pager = shared_region_pager_lookup(mem_obj);
1024 	shared_region_pager_deallocate_internal(pager, FALSE);
1025 }
1026 
1027 /*
1028  *
1029  */
1030 kern_return_t
shared_region_pager_terminate(__unused memory_object_t mem_obj)1031 shared_region_pager_terminate(
1032 #if !DEBUG
1033 	__unused
1034 #endif
1035 	memory_object_t mem_obj)
1036 {
1037 	PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_terminate: %p\n", mem_obj));
1038 
1039 	return KERN_SUCCESS;
1040 }
1041 
1042 /*
1043  * shared_region_pager_map()
1044  *
1045  * This allows VM to let us, the EMM, know that this memory object
1046  * is currently mapped one or more times.  This is called by VM each time
1047  * the memory object gets mapped, but we only take one extra reference the
1048  * first time it is called.
1049  */
1050 kern_return_t
shared_region_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)1051 shared_region_pager_map(
1052 	memory_object_t         mem_obj,
1053 	__unused vm_prot_t      prot)
1054 {
1055 	shared_region_pager_t   pager;
1056 
1057 	PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_map: %p\n", mem_obj));
1058 
1059 	pager = shared_region_pager_lookup(mem_obj);
1060 
1061 	lck_mtx_lock(&shared_region_pager_lock);
1062 	assert(pager->srp_is_ready);
1063 	assert(os_ref_get_count_raw(&pager->srp_ref_count) > 0); /* pager is alive */
1064 	if (!pager->srp_is_mapped) {
1065 		pager->srp_is_mapped = TRUE;
1066 		os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
1067 		shared_region_pager_count_mapped++;
1068 	}
1069 	lck_mtx_unlock(&shared_region_pager_lock);
1070 
1071 	return KERN_SUCCESS;
1072 }
1073 
1074 /*
1075  * shared_region_pager_last_unmap()
1076  *
1077  * This is called by VM when this memory object is no longer mapped anywhere.
1078  */
1079 kern_return_t
shared_region_pager_last_unmap(memory_object_t mem_obj)1080 shared_region_pager_last_unmap(
1081 	memory_object_t         mem_obj)
1082 {
1083 	shared_region_pager_t   pager;
1084 	int                     count_unmapped;
1085 
1086 	PAGER_DEBUG(PAGER_ALL,
1087 	    ("shared_region_pager_last_unmap: %p\n", mem_obj));
1088 
1089 	pager = shared_region_pager_lookup(mem_obj);
1090 
1091 	lck_mtx_lock(&shared_region_pager_lock);
1092 	if (pager->srp_is_mapped) {
1093 		/*
1094 		 * All the mappings are gone, so let go of the one extra
1095 		 * reference that represents all the mappings of this pager.
1096 		 */
1097 		shared_region_pager_count_mapped--;
1098 		count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped);
1099 		if (count_unmapped > shared_region_pager_count_unmapped_max) {
1100 			shared_region_pager_count_unmapped_max = count_unmapped;
1101 		}
1102 		pager->srp_is_mapped = FALSE;
1103 		shared_region_pager_deallocate_internal(pager, TRUE);
1104 		/* caution: deallocate_internal() released the lock ! */
1105 	} else {
1106 		lck_mtx_unlock(&shared_region_pager_lock);
1107 	}
1108 
1109 	return KERN_SUCCESS;
1110 }
1111 
1112 boolean_t
shared_region_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)1113 shared_region_pager_backing_object(
1114 	memory_object_t         mem_obj,
1115 	memory_object_offset_t  offset,
1116 	vm_object_t             *backing_object,
1117 	vm_object_offset_t      *backing_offset)
1118 {
1119 	shared_region_pager_t   pager;
1120 
1121 	PAGER_DEBUG(PAGER_ALL,
1122 	    ("shared_region_pager_backing_object: %p\n", mem_obj));
1123 
1124 	pager = shared_region_pager_lookup(mem_obj);
1125 
1126 	*backing_object = pager->srp_backing_object;
1127 	*backing_offset = pager->srp_backing_offset + offset;
1128 
1129 	return TRUE;
1130 }
1131 
1132 
1133 /*
1134  *
1135  */
1136 shared_region_pager_t
shared_region_pager_lookup(memory_object_t mem_obj)1137 shared_region_pager_lookup(
1138 	memory_object_t  mem_obj)
1139 {
1140 	shared_region_pager_t   pager;
1141 
1142 	assert(mem_obj->mo_pager_ops == &shared_region_pager_ops);
1143 	pager = (shared_region_pager_t)(uintptr_t) mem_obj;
1144 	assert(os_ref_get_count_raw(&pager->srp_ref_count) > 0);
1145 	return pager;
1146 }
1147 
1148 /*
1149  * Create and return a pager for the given object with the
1150  * given slide information.
1151  */
1152 static shared_region_pager_t
shared_region_pager_create(vm_object_t backing_object,vm_object_offset_t backing_offset,struct vm_shared_region_slide_info * slide_info,__unused uint64_t jop_key)1153 shared_region_pager_create(
1154 	vm_object_t             backing_object,
1155 	vm_object_offset_t      backing_offset,
1156 	struct vm_shared_region_slide_info *slide_info,
1157 #if !__has_feature(ptrauth_calls)
1158 	__unused
1159 #endif /* !__has_feature(ptrauth_calls) */
1160 	uint64_t                jop_key)
1161 {
1162 	shared_region_pager_t   pager;
1163 	memory_object_control_t control;
1164 	kern_return_t           kr;
1165 	vm_object_t             object;
1166 
1167 	pager = kalloc_type(struct shared_region_pager, Z_WAITOK);
1168 	if (pager == SHARED_REGION_PAGER_NULL) {
1169 		return SHARED_REGION_PAGER_NULL;
1170 	}
1171 
1172 	/*
1173 	 * The vm_map call takes both named entry ports and raw memory
1174 	 * objects in the same parameter.  We need to make sure that
1175 	 * vm_map does not see this object as a named entry port.  So,
1176 	 * we reserve the first word in the object for a fake ip_kotype
1177 	 * setting - that will tell vm_map to use it as a memory object.
1178 	 */
1179 	pager->srp_header.mo_ikot = IKOT_MEMORY_OBJECT;
1180 	pager->srp_header.mo_pager_ops = &shared_region_pager_ops;
1181 	pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1182 
1183 	pager->srp_is_ready = FALSE;/* not ready until it has a "name" */
1184 	/* existence reference (for the cache) + 1 for the caller */
1185 	os_ref_init_count_raw(&pager->srp_ref_count, NULL, 2);
1186 	pager->srp_is_mapped = FALSE;
1187 	pager->srp_backing_object = backing_object;
1188 	pager->srp_backing_offset = backing_offset;
1189 	pager->srp_slide_info = slide_info;
1190 #if __has_feature(ptrauth_calls)
1191 	pager->srp_jop_key = jop_key;
1192 	/*
1193 	 * If we're getting slide_info from the shared_region,
1194 	 * take a reference, so it can't disappear from under us.
1195 	 */
1196 	if (slide_info->si_shared_region) {
1197 		assert(slide_info->si_ptrauth);
1198 		vm_shared_region_reference(slide_info->si_shared_region);
1199 	}
1200 #endif /* __has_feature(ptrauth_calls) */
1201 
1202 	vm_object_reference(backing_object);
1203 
1204 	lck_mtx_lock(&shared_region_pager_lock);
1205 	/* enter new pager at the head of our list of pagers */
1206 	queue_enter_first(&shared_region_pager_queue,
1207 	    pager,
1208 	    shared_region_pager_t,
1209 	    srp_queue);
1210 	shared_region_pager_count++;
1211 	if (shared_region_pager_count > shared_region_pager_count_max) {
1212 		shared_region_pager_count_max = shared_region_pager_count;
1213 	}
1214 	lck_mtx_unlock(&shared_region_pager_lock);
1215 
1216 	kr = memory_object_create_named((memory_object_t) pager,
1217 	    0,
1218 	    &control);
1219 	assert(kr == KERN_SUCCESS);
1220 
1221 	memory_object_mark_trusted(control);
1222 
1223 	lck_mtx_lock(&shared_region_pager_lock);
1224 	/* the new pager is now ready to be used */
1225 	pager->srp_is_ready = TRUE;
1226 	object = memory_object_to_vm_object((memory_object_t) pager);
1227 	assert(object);
1228 	/*
1229 	 * No one knows about this object and so we get away without the object lock.
1230 	 * This object is _eventually_ backed by the dyld shared cache and so we want
1231 	 * to benefit from the lock priority boosting.
1232 	 */
1233 	object->object_is_shared_cache = TRUE;
1234 	lck_mtx_unlock(&shared_region_pager_lock);
1235 
1236 	/* wakeup anyone waiting for this pager to be ready */
1237 	thread_wakeup(&pager->srp_is_ready);
1238 
1239 	return pager;
1240 }
1241 
1242 /*
1243  * shared_region_pager_setup()
1244  *
1245  * Provide the caller with a memory object backed by the provided
1246  * "backing_object" VM object.
1247  */
1248 memory_object_t
shared_region_pager_setup(vm_object_t backing_object,vm_object_offset_t backing_offset,struct vm_shared_region_slide_info * slide_info,uint64_t jop_key)1249 shared_region_pager_setup(
1250 	vm_object_t             backing_object,
1251 	vm_object_offset_t      backing_offset,
1252 	struct vm_shared_region_slide_info *slide_info,
1253 	uint64_t                jop_key)
1254 {
1255 	shared_region_pager_t   pager;
1256 
1257 	/* create new pager */
1258 	pager = shared_region_pager_create(backing_object,
1259 	    backing_offset, slide_info, jop_key);
1260 	if (pager == SHARED_REGION_PAGER_NULL) {
1261 		/* could not create a new pager */
1262 		return MEMORY_OBJECT_NULL;
1263 	}
1264 
1265 	lck_mtx_lock(&shared_region_pager_lock);
1266 	while (!pager->srp_is_ready) {
1267 		lck_mtx_sleep(&shared_region_pager_lock,
1268 		    LCK_SLEEP_DEFAULT,
1269 		    &pager->srp_is_ready,
1270 		    THREAD_UNINT);
1271 	}
1272 	lck_mtx_unlock(&shared_region_pager_lock);
1273 
1274 	return (memory_object_t) pager;
1275 }
1276 
1277 #if __has_feature(ptrauth_calls)
1278 /*
1279  * shared_region_pager_match()
1280  *
1281  * Provide the caller with a memory object backed by the provided
1282  * "backing_object" VM object.
1283  */
1284 memory_object_t
shared_region_pager_match(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_shared_region_slide_info_t slide_info,uint64_t jop_key)1285 shared_region_pager_match(
1286 	vm_object_t                   backing_object,
1287 	vm_object_offset_t            backing_offset,
1288 	vm_shared_region_slide_info_t slide_info,
1289 	uint64_t                      jop_key)
1290 {
1291 	shared_region_pager_t         pager;
1292 	vm_shared_region_slide_info_t si;
1293 
1294 	lck_mtx_lock(&shared_region_pager_lock);
1295 	queue_iterate(&shared_region_pager_queue, pager, shared_region_pager_t, srp_queue) {
1296 		if (pager->srp_backing_object != backing_object->vo_copy) {
1297 			continue;
1298 		}
1299 		if (pager->srp_backing_offset != backing_offset) {
1300 			continue;
1301 		}
1302 		si = pager->srp_slide_info;
1303 
1304 		/* If there's no AUTH section then it can't match (slide_info is always !NULL) */
1305 		if (!si->si_ptrauth) {
1306 			continue;
1307 		}
1308 		if (pager->srp_jop_key != jop_key) {
1309 			continue;
1310 		}
1311 		if (si->si_slide != slide_info->si_slide) {
1312 			continue;
1313 		}
1314 		if (si->si_start != slide_info->si_start) {
1315 			continue;
1316 		}
1317 		if (si->si_end != slide_info->si_end) {
1318 			continue;
1319 		}
1320 		if (si->si_slide_object != slide_info->si_slide_object) {
1321 			continue;
1322 		}
1323 		if (si->si_slide_info_size != slide_info->si_slide_info_size) {
1324 			continue;
1325 		}
1326 		if (memcmp(si->si_slide_info_entry, slide_info->si_slide_info_entry, si->si_slide_info_size) != 0) {
1327 			continue;
1328 		}
1329 		/* the caller expects a reference on this */
1330 		os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
1331 		lck_mtx_unlock(&shared_region_pager_lock);
1332 		return (memory_object_t)pager;
1333 	}
1334 
1335 	/*
1336 	 * We didn't find a pre-existing pager, so create one.
1337 	 *
1338 	 * Note slight race condition here since we drop the lock. This could lead to more than one
1339 	 * thread calling setup with the same arguments here. That shouldn't break anything, just
1340 	 * waste a little memory.
1341 	 */
1342 	lck_mtx_unlock(&shared_region_pager_lock);
1343 	return shared_region_pager_setup(backing_object->vo_copy, backing_offset, slide_info, jop_key);
1344 }
1345 
1346 void
shared_region_pager_match_task_key(memory_object_t memobj,__unused task_t task)1347 shared_region_pager_match_task_key(memory_object_t memobj, __unused task_t task)
1348 {
1349 	__unused shared_region_pager_t  pager = (shared_region_pager_t)memobj;
1350 
1351 	assert(pager->srp_jop_key == task->jop_pid);
1352 }
1353 #endif /* __has_feature(ptrauth_calls) */
1354 
1355 void
shared_region_pager_trim(void)1356 shared_region_pager_trim(void)
1357 {
1358 	shared_region_pager_t   pager, prev_pager;
1359 	queue_head_t            trim_queue;
1360 	int                     num_trim;
1361 	int                     count_unmapped;
1362 
1363 	lck_mtx_lock(&shared_region_pager_lock);
1364 
1365 	/*
1366 	 * We have too many pagers, try and trim some unused ones,
1367 	 * starting with the oldest pager at the end of the queue.
1368 	 */
1369 	queue_init(&trim_queue);
1370 	num_trim = 0;
1371 
1372 	for (pager = (shared_region_pager_t)queue_last(&shared_region_pager_queue);
1373 	    !queue_end(&shared_region_pager_queue, (queue_entry_t) pager);
1374 	    pager = prev_pager) {
1375 		/* get prev elt before we dequeue */
1376 		prev_pager = (shared_region_pager_t)queue_prev(&pager->srp_queue);
1377 
1378 		if (os_ref_get_count_raw(&pager->srp_ref_count) == 2 &&
1379 		    pager->srp_is_ready &&
1380 		    !pager->srp_is_mapped) {
1381 			/* this pager can be trimmed */
1382 			num_trim++;
1383 			/* remove this pager from the main list ... */
1384 			shared_region_pager_dequeue(pager);
1385 			/* ... and add it to our trim queue */
1386 			queue_enter_first(&trim_queue,
1387 			    pager,
1388 			    shared_region_pager_t,
1389 			    srp_queue);
1390 
1391 			/* do we have enough pagers to trim? */
1392 			count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped);
1393 			if (count_unmapped <= shared_region_pager_cache_limit) {
1394 				break;
1395 			}
1396 		}
1397 	}
1398 	if (num_trim > shared_region_pager_num_trim_max) {
1399 		shared_region_pager_num_trim_max = num_trim;
1400 	}
1401 	shared_region_pager_num_trim_total += num_trim;
1402 
1403 	lck_mtx_unlock(&shared_region_pager_lock);
1404 
1405 	/* terminate the trimmed pagers */
1406 	while (!queue_empty(&trim_queue)) {
1407 		queue_remove_first(&trim_queue,
1408 		    pager,
1409 		    shared_region_pager_t,
1410 		    srp_queue);
1411 		pager->srp_queue.next = NULL;
1412 		pager->srp_queue.prev = NULL;
1413 		assert(os_ref_get_count_raw(&pager->srp_ref_count) == 2);
1414 		/*
1415 		 * We can't call deallocate_internal() because the pager
1416 		 * has already been dequeued, but we still need to remove
1417 		 * a reference.
1418 		 */
1419 		(void)os_ref_release_locked_raw(&pager->srp_ref_count, NULL);
1420 		shared_region_pager_terminate_internal(pager);
1421 	}
1422 }
1423 
1424 static uint64_t
shared_region_pager_purge(shared_region_pager_t pager)1425 shared_region_pager_purge(
1426 	shared_region_pager_t pager)
1427 {
1428 	uint64_t pages_purged;
1429 	vm_object_t object;
1430 
1431 	pages_purged = 0;
1432 	object = memory_object_to_vm_object((memory_object_t) pager);
1433 	assert(object != VM_OBJECT_NULL);
1434 	vm_object_lock(object);
1435 	pages_purged = object->resident_page_count;
1436 	vm_object_reap_pages(object, REAP_DATA_FLUSH);
1437 	pages_purged -= object->resident_page_count;
1438 //	printf("     %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count);
1439 	vm_object_unlock(object);
1440 	return pages_purged;
1441 }
1442 
1443 uint64_t
shared_region_pager_purge_all(void)1444 shared_region_pager_purge_all(void)
1445 {
1446 	uint64_t pages_purged;
1447 	shared_region_pager_t pager;
1448 
1449 	pages_purged = 0;
1450 	lck_mtx_lock(&shared_region_pager_lock);
1451 	queue_iterate(&shared_region_pager_queue, pager, shared_region_pager_t, srp_queue) {
1452 		pages_purged += shared_region_pager_purge(pager);
1453 	}
1454 	lck_mtx_unlock(&shared_region_pager_lock);
1455 #if DEVELOPMENT || DEBUG
1456 	printf("   %s:%d pages purged: %llu\n", __FUNCTION__, __LINE__, pages_purged);
1457 #endif /* DEVELOPMENT || DEBUG */
1458 	return pages_purged;
1459 }
1460