xref: /xnu-11215.41.3/osfmk/vm/vm_shared_region_pager.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2018-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42 
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/queue.h>
46 #include <kern/thread.h>
47 #include <kern/ipc_kobject.h>
48 
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
51 
52 #include <vm/memory_object_internal.h>
53 #include <vm/vm_kern.h>
54 #include <vm/vm_fault_internal.h>
55 #include <vm/vm_map.h>
56 #include <vm/vm_pageout_xnu.h>
57 #include <vm/vm_protos_internal.h>
58 #include <vm/vm_shared_region_internal.h>
59 #include <vm/vm_ubc.h>
60 #include <vm/vm_page_internal.h>
61 #include <vm/vm_object_internal.h>
62 
63 #include <sys/kdebug_triage.h>
64 #include <sys/random.h>
65 
66 #if __has_feature(ptrauth_calls)
67 #include <ptrauth.h>
68 extern boolean_t diversify_user_jop;
69 #endif /* __has_feature(ptrauth_calls) */
70 
71 /*
72  * SHARED REGION MEMORY PAGER
73  *
74  * This external memory manager (EMM) handles mappings of a dyld shared cache
75  * in shared regions, applying any necessary modifications (sliding,
76  * pointer signing, ...).
77  *
78  * It mostly handles page-in requests (from memory_object_data_request()) by
79  * getting the original data from its backing VM object, itself backed by
80  * the dyld shared cache file, modifying it if needed and providing it to VM.
81  *
82  * The modified pages will never be dirtied, so the memory manager doesn't
83  * need to handle page-out requests (from memory_object_data_return()).  The
84  * pages need to be mapped copy-on-write, so that the originals stay clean.
85  *
86  * We don't expect to have to handle a large number of shared cache files,
87  * so the data structures are very simple (simple linked list) for now.
88  */
89 
90 /* forward declarations */
91 void shared_region_pager_reference(memory_object_t mem_obj);
92 void shared_region_pager_deallocate(memory_object_t mem_obj);
93 kern_return_t shared_region_pager_init(memory_object_t mem_obj,
94     memory_object_control_t control,
95     memory_object_cluster_size_t pg_size);
96 kern_return_t shared_region_pager_terminate(memory_object_t mem_obj);
97 kern_return_t shared_region_pager_data_request(memory_object_t mem_obj,
98     memory_object_offset_t offset,
99     memory_object_cluster_size_t length,
100     vm_prot_t protection_required,
101     memory_object_fault_info_t fault_info);
102 kern_return_t shared_region_pager_data_return(memory_object_t mem_obj,
103     memory_object_offset_t offset,
104     memory_object_cluster_size_t      data_cnt,
105     memory_object_offset_t *resid_offset,
106     int *io_error,
107     boolean_t dirty,
108     boolean_t kernel_copy,
109     int upl_flags);
110 kern_return_t shared_region_pager_data_initialize(memory_object_t mem_obj,
111     memory_object_offset_t offset,
112     memory_object_cluster_size_t data_cnt);
113 kern_return_t shared_region_pager_map(memory_object_t mem_obj,
114     vm_prot_t prot);
115 kern_return_t shared_region_pager_last_unmap(memory_object_t mem_obj);
116 boolean_t shared_region_pager_backing_object(
117 	memory_object_t mem_obj,
118 	memory_object_offset_t mem_obj_offset,
119 	vm_object_t *backing_object,
120 	vm_object_offset_t *backing_offset);
121 
122 /*
123  * Vector of VM operations for this EMM.
124  * These routines are invoked by VM via the memory_object_*() interfaces.
125  */
126 const struct memory_object_pager_ops shared_region_pager_ops = {
127 	.memory_object_reference = shared_region_pager_reference,
128 	.memory_object_deallocate = shared_region_pager_deallocate,
129 	.memory_object_init = shared_region_pager_init,
130 	.memory_object_terminate = shared_region_pager_terminate,
131 	.memory_object_data_request = shared_region_pager_data_request,
132 	.memory_object_data_return = shared_region_pager_data_return,
133 	.memory_object_data_initialize = shared_region_pager_data_initialize,
134 	.memory_object_map = shared_region_pager_map,
135 	.memory_object_last_unmap = shared_region_pager_last_unmap,
136 	.memory_object_backing_object = shared_region_pager_backing_object,
137 	.memory_object_pager_name = "shared_region"
138 };
139 
140 #if __has_feature(ptrauth_calls)
141 /*
142  * Track mappings between shared_region_id and the key used to sign
143  * authenticated pointers.
144  */
145 typedef struct shared_region_jop_key_map {
146 	queue_chain_t  srk_queue;
147 	char           *srk_shared_region_id;
148 	uint64_t       srk_jop_key;
149 	os_refcnt_t    srk_ref_count;         /* count of tasks active with this shared_region_id */
150 } *shared_region_jop_key_map_t;
151 
152 os_refgrp_decl(static, srk_refgrp, "shared region key ref cnts", NULL);
153 
154 /*
155  * The list is protected by the "shared_region_key_map" lock.
156  */
157 int shared_region_key_count = 0;              /* number of active shared_region_id keys */
158 queue_head_t shared_region_jop_key_queue = QUEUE_HEAD_INITIALIZER(shared_region_jop_key_queue);
159 LCK_GRP_DECLARE(shared_region_jop_key_lck_grp, "shared_region_jop_key");
160 LCK_MTX_DECLARE(shared_region_jop_key_lock, &shared_region_jop_key_lck_grp);
161 
162 #if __has_feature(ptrauth_calls)
163 /*
164  * Generate a random pointer signing key that isn't 0.
165  */
166 uint64_t
generate_jop_key(void)167 generate_jop_key(void)
168 {
169 	uint64_t key;
170 
171 	do {
172 		read_random(&key, sizeof key);
173 	} while (key == 0);
174 	return key;
175 }
176 #endif /* __has_feature(ptrauth_calls) */
177 
178 /*
179  * Find the pointer signing key for the give shared_region_id.
180  */
181 uint64_t
shared_region_find_key(char * shared_region_id)182 shared_region_find_key(char *shared_region_id)
183 {
184 	shared_region_jop_key_map_t region;
185 	uint64_t key;
186 
187 	lck_mtx_lock(&shared_region_jop_key_lock);
188 	queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
189 		if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
190 			goto found;
191 		}
192 	}
193 	panic("shared_region_find_key() no key for region '%s'", shared_region_id);
194 
195 found:
196 	key = region->srk_jop_key;
197 	lck_mtx_unlock(&shared_region_jop_key_lock);
198 	return key;
199 }
200 
201 /*
202  * Return a authentication key to use for the given shared_region_id.
203  * If inherit is TRUE, then the key must match inherited_key.
204  * Creates an additional reference when successful.
205  */
206 void
shared_region_key_alloc(char * shared_region_id,bool inherit,uint64_t inherited_key)207 shared_region_key_alloc(char *shared_region_id, bool inherit, uint64_t inherited_key)
208 {
209 	shared_region_jop_key_map_t region;
210 	shared_region_jop_key_map_t new = NULL;
211 
212 	assert(shared_region_id != NULL);
213 again:
214 	lck_mtx_lock(&shared_region_jop_key_lock);
215 	queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
216 		if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
217 			os_ref_retain_locked(&region->srk_ref_count);
218 			goto done;
219 		}
220 	}
221 
222 	/*
223 	 * ID was not found, if first time, allocate a new one and redo the lookup.
224 	 */
225 	if (new == NULL) {
226 		lck_mtx_unlock(&shared_region_jop_key_lock);
227 		new = kalloc_type(struct shared_region_jop_key_map, Z_WAITOK);
228 		uint_t len = strlen(shared_region_id) + 1;
229 		new->srk_shared_region_id = kalloc_data(len, Z_WAITOK);
230 		strlcpy(new->srk_shared_region_id, shared_region_id, len);
231 		os_ref_init(&new->srk_ref_count, &srk_refgrp);
232 
233 		if (diversify_user_jop && inherit) {
234 			new->srk_jop_key = inherited_key;
235 		} else if (diversify_user_jop && strlen(shared_region_id) > 0) {
236 			new->srk_jop_key = generate_jop_key();
237 		} else {
238 			new->srk_jop_key = ml_default_jop_pid();
239 		}
240 
241 		goto again;
242 	}
243 
244 	/*
245 	 * Use the newly allocated entry
246 	 */
247 	++shared_region_key_count;
248 	queue_enter_first(&shared_region_jop_key_queue, new, shared_region_jop_key_map_t, srk_queue);
249 	region = new;
250 	new = NULL;
251 
252 done:
253 	if (inherit && inherited_key != region->srk_jop_key) {
254 		panic("shared_region_key_alloc() inherited key mismatch");
255 	}
256 	lck_mtx_unlock(&shared_region_jop_key_lock);
257 
258 	/*
259 	 * free any unused new entry
260 	 */
261 	if (new != NULL) {
262 		kfree_data(new->srk_shared_region_id,
263 		    strlen(new->srk_shared_region_id) + 1);
264 		kfree_type(struct shared_region_jop_key_map, new);
265 	}
266 }
267 
268 /*
269  * Mark the end of using a shared_region_id's key
270  */
271 extern void
shared_region_key_dealloc(char * shared_region_id)272 shared_region_key_dealloc(char *shared_region_id)
273 {
274 	shared_region_jop_key_map_t region;
275 
276 	assert(shared_region_id != NULL);
277 	lck_mtx_lock(&shared_region_jop_key_lock);
278 	queue_iterate(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue) {
279 		if (strcmp(region->srk_shared_region_id, shared_region_id) == 0) {
280 			goto done;
281 		}
282 	}
283 	panic("shared_region_key_dealloc() Shared region ID '%s' not found", shared_region_id);
284 
285 done:
286 	if (os_ref_release_locked(&region->srk_ref_count) == 0) {
287 		queue_remove(&shared_region_jop_key_queue, region, shared_region_jop_key_map_t, srk_queue);
288 		--shared_region_key_count;
289 	} else {
290 		region = NULL;
291 	}
292 	lck_mtx_unlock(&shared_region_jop_key_lock);
293 
294 	if (region != NULL) {
295 		kfree_data(region->srk_shared_region_id,
296 		    strlen(region->srk_shared_region_id) + 1);
297 		kfree_type(struct shared_region_jop_key_map, region);
298 	}
299 }
300 #endif /* __has_feature(ptrauth_calls) */
301 
302 /*
303  * The "shared_region_pager" describes a memory object backed by
304  * the "shared_region" EMM.
305  */
306 typedef struct shared_region_pager {
307 	struct memory_object    srp_header;          /* mandatory generic header */
308 
309 	/* pager-specific data */
310 	queue_chain_t           srp_queue;          /* next & prev pagers */
311 #if MEMORY_OBJECT_HAS_REFCOUNT
312 #define srp_ref_count           srp_header.mo_ref
313 #else
314 	os_ref_atomic_t         srp_ref_count;      /* active uses */
315 #endif
316 	bool                    srp_is_mapped;      /* has active mappings */
317 	bool                    srp_is_ready;       /* is this pager ready? */
318 	vm_object_t             srp_backing_object; /* VM object for shared cache */
319 	vm_object_offset_t      srp_backing_offset;
320 	vm_shared_region_slide_info_t srp_slide_info;
321 #if __has_feature(ptrauth_calls)
322 	uint64_t                srp_jop_key;        /* zero if used for arm64 */
323 #endif /* __has_feature(ptrauth_calls) */
324 } *shared_region_pager_t;
325 #define SHARED_REGION_PAGER_NULL        ((shared_region_pager_t) NULL)
326 
327 /*
328  * List of memory objects managed by this EMM.
329  * The list is protected by the "shared_region_pager_lock" lock.
330  */
331 int shared_region_pager_count = 0;              /* number of pagers */
332 int shared_region_pager_count_mapped = 0;       /* number of unmapped pagers */
333 queue_head_t shared_region_pager_queue = QUEUE_HEAD_INITIALIZER(shared_region_pager_queue);
334 LCK_GRP_DECLARE(shared_region_pager_lck_grp, "shared_region_pager");
335 LCK_MTX_DECLARE(shared_region_pager_lock, &shared_region_pager_lck_grp);
336 
337 /*
338  * Maximum number of unmapped pagers we're willing to keep around.
339  */
340 int shared_region_pager_cache_limit = 0;
341 
342 /*
343  * Statistics & counters.
344  */
345 int shared_region_pager_count_max = 0;
346 int shared_region_pager_count_unmapped_max = 0;
347 int shared_region_pager_num_trim_max = 0;
348 int shared_region_pager_num_trim_total = 0;
349 
350 uint64_t shared_region_pager_copied = 0;
351 uint64_t shared_region_pager_slid = 0;
352 uint64_t shared_region_pager_slid_error = 0;
353 uint64_t shared_region_pager_reclaimed = 0;
354 
355 /* internal prototypes */
356 shared_region_pager_t shared_region_pager_lookup(memory_object_t mem_obj);
357 void shared_region_pager_dequeue(shared_region_pager_t pager);
358 void shared_region_pager_deallocate_internal(shared_region_pager_t pager,
359     boolean_t locked);
360 void shared_region_pager_terminate_internal(shared_region_pager_t pager);
361 void shared_region_pager_trim(void);
362 
363 
364 #if DEBUG
365 int shared_region_pagerdebug = 0;
366 #define PAGER_ALL               0xffffffff
367 #define PAGER_INIT              0x00000001
368 #define PAGER_PAGEIN            0x00000002
369 
370 #define PAGER_DEBUG(LEVEL, A)                                           \
371 	MACRO_BEGIN                                                     \
372 	if ((shared_region_pagerdebug & (LEVEL)) == (LEVEL)) {          \
373 	        printf A;                                               \
374 	}                                                               \
375 	MACRO_END
376 #else
377 #define PAGER_DEBUG(LEVEL, A)
378 #endif
379 
380 /*
381  * shared_region_pager_init()
382  *
383  * Initialize the memory object and makes it ready to be used and mapped.
384  */
385 kern_return_t
shared_region_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)386 shared_region_pager_init(
387 	memory_object_t         mem_obj,
388 	memory_object_control_t control,
389 #if !DEBUG
390 	__unused
391 #endif
392 	memory_object_cluster_size_t pg_size)
393 {
394 	shared_region_pager_t   pager;
395 	kern_return_t           kr;
396 	memory_object_attr_info_data_t  attributes;
397 
398 	PAGER_DEBUG(PAGER_ALL,
399 	    ("shared_region_pager_init: %p, %p, %x\n",
400 	    mem_obj, control, pg_size));
401 
402 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
403 		return KERN_INVALID_ARGUMENT;
404 	}
405 
406 	pager = shared_region_pager_lookup(mem_obj);
407 
408 	memory_object_control_reference(control);
409 
410 	pager->srp_header.mo_control = control;
411 
412 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
413 	/* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
414 	attributes.cluster_size = (1 << (PAGE_SHIFT));
415 	attributes.may_cache_object = FALSE;
416 	attributes.temporary = TRUE;
417 
418 	kr = memory_object_change_attributes(
419 		control,
420 		MEMORY_OBJECT_ATTRIBUTE_INFO,
421 		(memory_object_info_t) &attributes,
422 		MEMORY_OBJECT_ATTR_INFO_COUNT);
423 	if (kr != KERN_SUCCESS) {
424 		panic("shared_region_pager_init: "
425 		    "memory_object_change_attributes() failed");
426 	}
427 
428 #if CONFIG_SECLUDED_MEMORY
429 	if (secluded_for_filecache) {
430 #if 00
431 		/*
432 		 * XXX FBDP do we want this in the secluded pool?
433 		 * Ideally, we'd want the shared region used by Camera to
434 		 * NOT be in the secluded pool, but all other shared regions
435 		 * in the secluded pool...
436 		 */
437 		memory_object_mark_eligible_for_secluded(control, TRUE);
438 #endif /* 00 */
439 	}
440 #endif /* CONFIG_SECLUDED_MEMORY */
441 
442 	return KERN_SUCCESS;
443 }
444 
445 /*
446  * shared_region_data_return()
447  *
448  * Handles page-out requests from VM.  This should never happen since
449  * the pages provided by this EMM are not supposed to be dirty or dirtied
450  * and VM should simply discard the contents and reclaim the pages if it
451  * needs to.
452  */
453 kern_return_t
shared_region_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)454 shared_region_pager_data_return(
455 	__unused memory_object_t        mem_obj,
456 	__unused memory_object_offset_t offset,
457 	__unused memory_object_cluster_size_t           data_cnt,
458 	__unused memory_object_offset_t *resid_offset,
459 	__unused int                    *io_error,
460 	__unused boolean_t              dirty,
461 	__unused boolean_t              kernel_copy,
462 	__unused int                    upl_flags)
463 {
464 	panic("shared_region_pager_data_return: should never get called");
465 	return KERN_FAILURE;
466 }
467 
468 kern_return_t
shared_region_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)469 shared_region_pager_data_initialize(
470 	__unused memory_object_t        mem_obj,
471 	__unused memory_object_offset_t offset,
472 	__unused memory_object_cluster_size_t           data_cnt)
473 {
474 	panic("shared_region_pager_data_initialize: should never get called");
475 	return KERN_FAILURE;
476 }
477 
478 /*
479  * shared_region_pager_data_request()
480  *
481  * Handles page-in requests from VM.
482  */
483 int shared_region_pager_data_request_debug = 0;
484 kern_return_t
shared_region_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)485 shared_region_pager_data_request(
486 	memory_object_t         mem_obj,
487 	memory_object_offset_t  offset,
488 	memory_object_cluster_size_t            length,
489 #if !DEBUG
490 	__unused
491 #endif
492 	vm_prot_t               protection_required,
493 	memory_object_fault_info_t mo_fault_info)
494 {
495 	shared_region_pager_t   pager;
496 	memory_object_control_t mo_control;
497 	upl_t                   upl;
498 	int                     upl_flags;
499 	upl_size_t              upl_size;
500 	upl_page_info_t         *upl_pl;
501 	unsigned int            pl_count;
502 	vm_object_t             src_top_object, src_page_object, dst_object;
503 	kern_return_t           kr, retval;
504 	vm_offset_t             src_vaddr, dst_vaddr;
505 	vm_offset_t             cur_offset;
506 	vm_offset_t             offset_in_page;
507 	kern_return_t           error_code;
508 	vm_prot_t               prot;
509 	vm_page_t               src_page, top_page;
510 	int                     interruptible;
511 	struct vm_object_fault_info     fault_info;
512 	mach_vm_offset_t        slide_start_address;
513 	u_int32_t                               slide_info_page_size;
514 
515 	PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_data_request: %p, %llx, %x, %x\n", mem_obj, offset, length, protection_required));
516 
517 	retval = KERN_SUCCESS;
518 	src_top_object = VM_OBJECT_NULL;
519 	src_page_object = VM_OBJECT_NULL;
520 	upl = NULL;
521 	upl_pl = NULL;
522 	fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
523 	fault_info.stealth = TRUE;
524 	fault_info.io_sync = FALSE;
525 	fault_info.mark_zf_absent = FALSE;
526 	fault_info.batch_pmap_op = FALSE;
527 	interruptible = fault_info.interruptible;
528 
529 	pager = shared_region_pager_lookup(mem_obj);
530 	assert(pager->srp_is_ready);
531 	assert(os_ref_get_count_raw(&pager->srp_ref_count) > 1); /* pager is alive */
532 	assert(pager->srp_is_mapped); /* pager is mapped */
533 
534 	PAGER_DEBUG(PAGER_PAGEIN, ("shared_region_pager_data_request: %p, %llx, %x, %x, pager %p\n", mem_obj, offset, length, protection_required, pager));
535 
536 	/*
537 	 * Gather in a UPL all the VM pages requested by VM.
538 	 */
539 	mo_control = pager->srp_header.mo_control;
540 
541 	upl_size = length;
542 	upl_flags =
543 	    UPL_RET_ONLY_ABSENT |
544 	    UPL_SET_LITE |
545 	    UPL_NO_SYNC |
546 	    UPL_CLEAN_IN_PLACE |        /* triggers UPL_CLEAR_DIRTY */
547 	    UPL_SET_INTERNAL;
548 	pl_count = 0;
549 	kr = memory_object_upl_request(mo_control,
550 	    offset, upl_size,
551 	    &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
552 	if (kr != KERN_SUCCESS) {
553 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_NO_UPL), 0 /* arg */);
554 		retval = kr;
555 		goto done;
556 	}
557 	dst_object = memory_object_control_to_vm_object(mo_control);
558 	assert(dst_object != VM_OBJECT_NULL);
559 
560 	/*
561 	 * We'll map the original data in the kernel address space from the
562 	 * backing VM object (itself backed by the shared cache file via
563 	 * the vnode pager).
564 	 */
565 	src_top_object = pager->srp_backing_object;
566 	assert(src_top_object != VM_OBJECT_NULL);
567 	vm_object_reference(src_top_object); /* keep the source object alive */
568 
569 	slide_start_address = pager->srp_slide_info->si_slid_address;
570 	slide_info_page_size = pager->srp_slide_info->si_slide_info_entry->version == 1 ? PAGE_SIZE_FOR_SR_SLIDE : pager->srp_slide_info->si_slide_info_entry->page_size;
571 
572 	fault_info.lo_offset += pager->srp_backing_offset;
573 	fault_info.hi_offset += pager->srp_backing_offset;
574 
575 	/*
576 	 * Fill in the contents of the pages requested by VM.
577 	 */
578 	upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
579 	pl_count = length / PAGE_SIZE;
580 	for (cur_offset = 0;
581 	    retval == KERN_SUCCESS && cur_offset < length;
582 	    cur_offset += PAGE_SIZE) {
583 		ppnum_t dst_pnum;
584 
585 		if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
586 			/* this page is not in the UPL: skip it */
587 			continue;
588 		}
589 
590 		/*
591 		 * Map the source (dyld shared cache) page in the kernel's
592 		 * virtual address space.
593 		 * We already hold a reference on the src_top_object.
594 		 */
595 retry_src_fault:
596 		vm_object_lock(src_top_object);
597 		vm_object_paging_begin(src_top_object);
598 		error_code = 0;
599 		prot = VM_PROT_READ;
600 		src_page = VM_PAGE_NULL;
601 		kr = vm_fault_page(src_top_object,
602 		    pager->srp_backing_offset + offset + cur_offset,
603 		    VM_PROT_READ,
604 		    FALSE,
605 		    FALSE,                /* src_page not looked up */
606 		    &prot,
607 		    &src_page,
608 		    &top_page,
609 		    NULL,
610 		    &error_code,
611 		    FALSE,
612 		    &fault_info);
613 		switch (kr) {
614 		case VM_FAULT_SUCCESS:
615 			break;
616 		case VM_FAULT_RETRY:
617 			goto retry_src_fault;
618 		case VM_FAULT_MEMORY_SHORTAGE:
619 			if (vm_page_wait(interruptible)) {
620 				goto retry_src_fault;
621 			}
622 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_PAGER_MEMORY_SHORTAGE), 0 /* arg */);
623 			OS_FALLTHROUGH;
624 		case VM_FAULT_INTERRUPTED:
625 			retval = MACH_SEND_INTERRUPTED;
626 			goto done;
627 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
628 			/* success but no VM page: fail */
629 			vm_object_paging_end(src_top_object);
630 			vm_object_unlock(src_top_object);
631 			OS_FALLTHROUGH;
632 		case VM_FAULT_MEMORY_ERROR:
633 			/* the page is not there ! */
634 			if (error_code) {
635 				retval = error_code;
636 			} else {
637 				retval = KERN_MEMORY_ERROR;
638 			}
639 			goto done;
640 		default:
641 			panic("shared_region_pager_data_request: "
642 			    "vm_fault_page() unexpected error 0x%x\n",
643 			    kr);
644 		}
645 		assert(src_page != VM_PAGE_NULL);
646 		assert(src_page->vmp_busy);
647 
648 		if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
649 			vm_page_lockspin_queues();
650 			if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
651 				vm_page_speculate(src_page, FALSE);
652 			}
653 			vm_page_unlock_queues();
654 		}
655 
656 		/*
657 		 * Establish pointers to the source
658 		 * and destination physical pages.
659 		 */
660 		dst_pnum = (ppnum_t)
661 		    upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
662 		assert(dst_pnum != 0);
663 
664 		src_vaddr = (vm_map_offset_t)
665 		    phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page)
666 		        << PAGE_SHIFT);
667 		dst_vaddr = (vm_map_offset_t)
668 		    phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
669 		src_page_object = VM_PAGE_OBJECT(src_page);
670 
671 		/*
672 		 * Validate the original page...
673 		 */
674 		if (src_page_object->code_signed) {
675 			vm_page_validate_cs_mapped(
676 				src_page, PAGE_SIZE, 0,
677 				(const void *) src_vaddr);
678 		}
679 		/*
680 		 * ... and transfer the results to the destination page.
681 		 */
682 		UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE,
683 		    src_page->vmp_cs_validated);
684 		UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE,
685 		    src_page->vmp_cs_tainted);
686 		UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE,
687 		    src_page->vmp_cs_nx);
688 
689 		/*
690 		 * The page provider might access a mapped file, so let's
691 		 * release the object lock for the source page to avoid a
692 		 * potential deadlock.
693 		 * The source page is kept busy and we have a
694 		 * "paging_in_progress" reference on its object, so it's safe
695 		 * to unlock the object here.
696 		 */
697 		assert(src_page->vmp_busy);
698 		assert(src_page_object->paging_in_progress > 0);
699 		vm_object_unlock(src_page_object);
700 
701 		/*
702 		 * Process the original contents of the source page
703 		 * into the destination page.
704 		 */
705 		for (offset_in_page = 0;
706 		    offset_in_page < PAGE_SIZE;
707 		    offset_in_page += slide_info_page_size) {
708 			vm_object_offset_t chunk_offset;
709 			vm_object_offset_t offset_in_backing_object;
710 			vm_object_offset_t offset_in_sliding_range;
711 
712 			chunk_offset = offset + cur_offset + offset_in_page;
713 
714 			bcopy((const char *)(src_vaddr +
715 			    offset_in_page),
716 			    (char *)(dst_vaddr + offset_in_page),
717 			    slide_info_page_size);
718 
719 			offset_in_backing_object = (chunk_offset +
720 			    pager->srp_backing_offset);
721 			if ((offset_in_backing_object < pager->srp_slide_info->si_start) ||
722 			    (offset_in_backing_object >= pager->srp_slide_info->si_end)) {
723 				/* chunk is outside of sliding range: done */
724 				shared_region_pager_copied++;
725 				continue;
726 			}
727 
728 			offset_in_sliding_range = offset_in_backing_object - pager->srp_slide_info->si_start;
729 			kr = vm_shared_region_slide_page(pager->srp_slide_info,
730 			    dst_vaddr + offset_in_page,
731 			    (mach_vm_offset_t) (offset_in_sliding_range + slide_start_address),
732 			    (uint32_t) (offset_in_sliding_range / slide_info_page_size),
733 #if __has_feature(ptrauth_calls)
734 			    pager->srp_slide_info->si_ptrauth ? pager->srp_jop_key : 0
735 #else /* __has_feature(ptrauth_calls) */
736 			    0
737 #endif /* __has_feature(ptrauth_calls) */
738 			    );
739 			if (shared_region_pager_data_request_debug) {
740 				printf("shared_region_data_request"
741 				    "(%p,0x%llx+0x%llx+0x%04llx): 0x%llx "
742 				    "in sliding range [0x%llx:0x%llx]: "
743 				    "SLIDE offset 0x%llx="
744 				    "(0x%llx+0x%llx+0x%llx+0x%04llx)"
745 				    "[0x%016llx 0x%016llx] "
746 				    "code_signed=%d "
747 				    "cs_validated=%d "
748 				    "cs_tainted=%d "
749 				    "cs_nx=%d "
750 				    "kr=0x%x\n",
751 				    pager,
752 				    offset,
753 				    (uint64_t) cur_offset,
754 				    (uint64_t) offset_in_page,
755 				    chunk_offset,
756 				    pager->srp_slide_info->si_start,
757 				    pager->srp_slide_info->si_end,
758 				    (pager->srp_backing_offset +
759 				    offset +
760 				    cur_offset +
761 				    offset_in_page),
762 				    pager->srp_backing_offset,
763 				    offset,
764 				    (uint64_t) cur_offset,
765 				    (uint64_t) offset_in_page,
766 				    *(uint64_t *)(dst_vaddr + offset_in_page),
767 				    *(uint64_t *)(dst_vaddr + offset_in_page + 8),
768 				    src_page_object->code_signed,
769 				    src_page->vmp_cs_validated,
770 				    src_page->vmp_cs_tainted,
771 				    src_page->vmp_cs_nx,
772 				    kr);
773 			}
774 			if (kr != KERN_SUCCESS) {
775 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_SHARED_REGION, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_SHARED_REGION_SLIDE_ERROR), 0 /* arg */);
776 				shared_region_pager_slid_error++;
777 				retval = KERN_MEMORY_ERROR;
778 				break;
779 			}
780 			shared_region_pager_slid++;
781 		}
782 
783 		assert(VM_PAGE_OBJECT(src_page) == src_page_object);
784 		assert(src_page->vmp_busy);
785 		assert(src_page_object->paging_in_progress > 0);
786 		vm_object_lock(src_page_object);
787 
788 		/*
789 		 * Cleanup the result of vm_fault_page() of the source page.
790 		 */
791 		vm_page_wakeup_done(src_page_object, src_page);
792 		src_page = VM_PAGE_NULL;
793 		vm_object_paging_end(src_page_object);
794 		vm_object_unlock(src_page_object);
795 
796 		if (top_page != VM_PAGE_NULL) {
797 			assert(VM_PAGE_OBJECT(top_page) == src_top_object);
798 			vm_object_lock(src_top_object);
799 			VM_PAGE_FREE(top_page);
800 			vm_object_paging_end(src_top_object);
801 			vm_object_unlock(src_top_object);
802 		}
803 	}
804 
805 done:
806 	if (upl != NULL) {
807 		/* clean up the UPL */
808 
809 		/*
810 		 * The pages are currently dirty because we've just been
811 		 * writing on them, but as far as we're concerned, they're
812 		 * clean since they contain their "original" contents as
813 		 * provided by us, the pager.
814 		 * Tell the UPL to mark them "clean".
815 		 */
816 		upl_clear_dirty(upl, TRUE);
817 
818 		/* abort or commit the UPL */
819 		if (retval != KERN_SUCCESS) {
820 			upl_abort(upl, 0);
821 		} else {
822 			boolean_t empty;
823 			assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
824 			    "upl %p offset 0x%llx size 0x%x\n",
825 			    upl, upl->u_offset, upl->u_size);
826 			upl_commit_range(upl, 0, upl->u_size,
827 			    UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
828 			    upl_pl, pl_count, &empty);
829 		}
830 
831 		/* and deallocate the UPL */
832 		upl_deallocate(upl);
833 		upl = NULL;
834 	}
835 	if (src_top_object != VM_OBJECT_NULL) {
836 		vm_object_deallocate(src_top_object);
837 	}
838 	return retval;
839 }
840 
841 /*
842  * shared_region_pager_reference()
843  *
844  * Get a reference on this memory object.
845  * For external usage only.  Assumes that the initial reference count is not 0,
846  * i.e one should not "revive" a dead pager this way.
847  */
848 void
shared_region_pager_reference(memory_object_t mem_obj)849 shared_region_pager_reference(
850 	memory_object_t         mem_obj)
851 {
852 	shared_region_pager_t   pager;
853 
854 	pager = shared_region_pager_lookup(mem_obj);
855 
856 	lck_mtx_lock(&shared_region_pager_lock);
857 	os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
858 	lck_mtx_unlock(&shared_region_pager_lock);
859 }
860 
861 
862 /*
863  * shared_region_pager_dequeue:
864  *
865  * Removes a pager from the list of pagers.
866  *
867  * The caller must hold "shared_region_pager_lock".
868  */
869 void
shared_region_pager_dequeue(shared_region_pager_t pager)870 shared_region_pager_dequeue(
871 	shared_region_pager_t pager)
872 {
873 	assert(!pager->srp_is_mapped);
874 
875 	queue_remove(&shared_region_pager_queue,
876 	    pager,
877 	    shared_region_pager_t,
878 	    srp_queue);
879 	pager->srp_queue.next = NULL;
880 	pager->srp_queue.prev = NULL;
881 
882 	shared_region_pager_count--;
883 }
884 
885 /*
886  * shared_region_pager_terminate_internal:
887  *
888  * Trigger the asynchronous termination of the memory object associated
889  * with this pager.
890  * When the memory object is terminated, there will be one more call
891  * to memory_object_deallocate() (i.e. shared_region_pager_deallocate())
892  * to finish the clean up.
893  *
894  * "shared_region_pager_lock" should not be held by the caller.
895  * We don't need the lock because the pager has already been removed from
896  * the pagers' list and is now ours exclusively.
897  */
898 void
shared_region_pager_terminate_internal(shared_region_pager_t pager)899 shared_region_pager_terminate_internal(
900 	shared_region_pager_t pager)
901 {
902 	assert(pager->srp_is_ready);
903 	assert(!pager->srp_is_mapped);
904 	assert(os_ref_get_count_raw(&pager->srp_ref_count) == 1);
905 
906 	if (pager->srp_backing_object != VM_OBJECT_NULL) {
907 		vm_object_deallocate(pager->srp_backing_object);
908 		pager->srp_backing_object = VM_OBJECT_NULL;
909 	}
910 	/* trigger the destruction of the memory object */
911 	memory_object_destroy(pager->srp_header.mo_control, VM_OBJECT_DESTROY_PAGER);
912 }
913 
914 /*
915  * shared_region_pager_deallocate_internal()
916  *
917  * Release a reference on this pager and free it when the last reference goes away.
918  * Can be called with shared_region_pager_lock held or not, but always returns
919  * with it unlocked.
920  */
921 void
shared_region_pager_deallocate_internal(shared_region_pager_t pager,boolean_t locked)922 shared_region_pager_deallocate_internal(
923 	shared_region_pager_t   pager,
924 	boolean_t               locked)
925 {
926 	boolean_t       needs_trimming;
927 	int             count_unmapped;
928 	os_ref_count_t  ref_count;
929 
930 	if (!locked) {
931 		lck_mtx_lock(&shared_region_pager_lock);
932 	}
933 
934 	/* if we have too many unmapped pagers, trim some */
935 	count_unmapped = shared_region_pager_count - shared_region_pager_count_mapped;
936 	needs_trimming = (count_unmapped > shared_region_pager_cache_limit);
937 
938 	/* drop a reference on this pager */
939 	ref_count = os_ref_release_locked_raw(&pager->srp_ref_count, NULL);
940 
941 	if (ref_count == 1) {
942 		/*
943 		 * Only the "named" reference is left, which means that
944 		 * no one is really holding on to this pager anymore.
945 		 * Terminate it.
946 		 */
947 		shared_region_pager_dequeue(pager);
948 		/* the pager is all ours: no need for the lock now */
949 		lck_mtx_unlock(&shared_region_pager_lock);
950 		shared_region_pager_terminate_internal(pager);
951 	} else if (ref_count == 0) {
952 		/*
953 		 * Dropped the existence reference;  the memory object has
954 		 * been terminated.  Do some final cleanup and release the
955 		 * pager structure.
956 		 */
957 		lck_mtx_unlock(&shared_region_pager_lock);
958 
959 		vm_shared_region_slide_info_t si = pager->srp_slide_info;
960 #if __has_feature(ptrauth_calls)
961 		/*
962 		 * The slide_info for auth sections lives in the shared region.
963 		 * Just deallocate() on the shared region and clear the field.
964 		 */
965 		if (si != NULL) {
966 			if (si->si_shared_region != NULL) {
967 				assert(si->si_ptrauth);
968 				vm_shared_region_deallocate(si->si_shared_region);
969 				pager->srp_slide_info = NULL;
970 				si = NULL;
971 			}
972 		}
973 #endif /* __has_feature(ptrauth_calls) */
974 		if (si != NULL) {
975 			vm_object_deallocate(si->si_slide_object);
976 			/* free the slide_info_entry */
977 			kfree_data(si->si_slide_info_entry,
978 			    si->si_slide_info_size);
979 			kfree_type(struct vm_shared_region_slide_info, si);
980 			pager->srp_slide_info = NULL;
981 		}
982 
983 		if (pager->srp_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
984 			memory_object_control_deallocate(pager->srp_header.mo_control);
985 			pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
986 		}
987 		kfree_type(struct shared_region_pager, pager);
988 		pager = SHARED_REGION_PAGER_NULL;
989 	} else {
990 		/* there are still plenty of references:  keep going... */
991 		lck_mtx_unlock(&shared_region_pager_lock);
992 	}
993 
994 	if (needs_trimming) {
995 		shared_region_pager_trim();
996 	}
997 	/* caution: lock is not held on return... */
998 }
999 
1000 /*
1001  * shared_region_pager_deallocate()
1002  *
1003  * Release a reference on this pager and free it when the last
1004  * reference goes away.
1005  */
1006 void
shared_region_pager_deallocate(memory_object_t mem_obj)1007 shared_region_pager_deallocate(
1008 	memory_object_t         mem_obj)
1009 {
1010 	shared_region_pager_t   pager;
1011 
1012 	PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_deallocate: %p\n", mem_obj));
1013 	pager = shared_region_pager_lookup(mem_obj);
1014 	shared_region_pager_deallocate_internal(pager, FALSE);
1015 }
1016 
1017 /*
1018  *
1019  */
1020 kern_return_t
shared_region_pager_terminate(__unused memory_object_t mem_obj)1021 shared_region_pager_terminate(
1022 #if !DEBUG
1023 	__unused
1024 #endif
1025 	memory_object_t mem_obj)
1026 {
1027 	PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_terminate: %p\n", mem_obj));
1028 
1029 	return KERN_SUCCESS;
1030 }
1031 
1032 /*
1033  * shared_region_pager_map()
1034  *
1035  * This allows VM to let us, the EMM, know that this memory object
1036  * is currently mapped one or more times.  This is called by VM each time
1037  * the memory object gets mapped, but we only take one extra reference the
1038  * first time it is called.
1039  */
1040 kern_return_t
shared_region_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)1041 shared_region_pager_map(
1042 	memory_object_t         mem_obj,
1043 	__unused vm_prot_t      prot)
1044 {
1045 	shared_region_pager_t   pager;
1046 
1047 	PAGER_DEBUG(PAGER_ALL, ("shared_region_pager_map: %p\n", mem_obj));
1048 
1049 	pager = shared_region_pager_lookup(mem_obj);
1050 
1051 	lck_mtx_lock(&shared_region_pager_lock);
1052 	assert(pager->srp_is_ready);
1053 	assert(os_ref_get_count_raw(&pager->srp_ref_count) > 0); /* pager is alive */
1054 	if (!pager->srp_is_mapped) {
1055 		pager->srp_is_mapped = TRUE;
1056 		os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
1057 		shared_region_pager_count_mapped++;
1058 	}
1059 	lck_mtx_unlock(&shared_region_pager_lock);
1060 
1061 	return KERN_SUCCESS;
1062 }
1063 
1064 /*
1065  * shared_region_pager_last_unmap()
1066  *
1067  * This is called by VM when this memory object is no longer mapped anywhere.
1068  */
1069 kern_return_t
shared_region_pager_last_unmap(memory_object_t mem_obj)1070 shared_region_pager_last_unmap(
1071 	memory_object_t         mem_obj)
1072 {
1073 	shared_region_pager_t   pager;
1074 	int                     count_unmapped;
1075 
1076 	PAGER_DEBUG(PAGER_ALL,
1077 	    ("shared_region_pager_last_unmap: %p\n", mem_obj));
1078 
1079 	pager = shared_region_pager_lookup(mem_obj);
1080 
1081 	lck_mtx_lock(&shared_region_pager_lock);
1082 	if (pager->srp_is_mapped) {
1083 		/*
1084 		 * All the mappings are gone, so let go of the one extra
1085 		 * reference that represents all the mappings of this pager.
1086 		 */
1087 		shared_region_pager_count_mapped--;
1088 		count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped);
1089 		if (count_unmapped > shared_region_pager_count_unmapped_max) {
1090 			shared_region_pager_count_unmapped_max = count_unmapped;
1091 		}
1092 		pager->srp_is_mapped = FALSE;
1093 		shared_region_pager_deallocate_internal(pager, TRUE);
1094 		/* caution: deallocate_internal() released the lock ! */
1095 	} else {
1096 		lck_mtx_unlock(&shared_region_pager_lock);
1097 	}
1098 
1099 	return KERN_SUCCESS;
1100 }
1101 
1102 boolean_t
shared_region_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)1103 shared_region_pager_backing_object(
1104 	memory_object_t         mem_obj,
1105 	memory_object_offset_t  offset,
1106 	vm_object_t             *backing_object,
1107 	vm_object_offset_t      *backing_offset)
1108 {
1109 	shared_region_pager_t   pager;
1110 
1111 	PAGER_DEBUG(PAGER_ALL,
1112 	    ("shared_region_pager_backing_object: %p\n", mem_obj));
1113 
1114 	pager = shared_region_pager_lookup(mem_obj);
1115 
1116 	*backing_object = pager->srp_backing_object;
1117 	*backing_offset = pager->srp_backing_offset + offset;
1118 
1119 	return TRUE;
1120 }
1121 
1122 
1123 /*
1124  *
1125  */
1126 shared_region_pager_t
shared_region_pager_lookup(memory_object_t mem_obj)1127 shared_region_pager_lookup(
1128 	memory_object_t  mem_obj)
1129 {
1130 	shared_region_pager_t   pager;
1131 
1132 	assert(mem_obj->mo_pager_ops == &shared_region_pager_ops);
1133 	pager = (shared_region_pager_t)(uintptr_t) mem_obj;
1134 	assert(os_ref_get_count_raw(&pager->srp_ref_count) > 0);
1135 	return pager;
1136 }
1137 
1138 /*
1139  * Create and return a pager for the given object with the
1140  * given slide information.
1141  */
1142 static shared_region_pager_t
shared_region_pager_create(vm_object_t backing_object,vm_object_offset_t backing_offset,struct vm_shared_region_slide_info * slide_info,__unused uint64_t jop_key)1143 shared_region_pager_create(
1144 	vm_object_t             backing_object,
1145 	vm_object_offset_t      backing_offset,
1146 	struct vm_shared_region_slide_info *slide_info,
1147 #if !__has_feature(ptrauth_calls)
1148 	__unused
1149 #endif /* !__has_feature(ptrauth_calls) */
1150 	uint64_t                jop_key)
1151 {
1152 	shared_region_pager_t   pager;
1153 	memory_object_control_t control;
1154 	kern_return_t           kr;
1155 	vm_object_t             object;
1156 
1157 	pager = kalloc_type(struct shared_region_pager, Z_WAITOK);
1158 	if (pager == SHARED_REGION_PAGER_NULL) {
1159 		return SHARED_REGION_PAGER_NULL;
1160 	}
1161 
1162 	/*
1163 	 * The vm_map call takes both named entry ports and raw memory
1164 	 * objects in the same parameter.  We need to make sure that
1165 	 * vm_map does not see this object as a named entry port.  So,
1166 	 * we reserve the first word in the object for a fake ip_kotype
1167 	 * setting - that will tell vm_map to use it as a memory object.
1168 	 */
1169 	pager->srp_header.mo_ikot = IKOT_MEMORY_OBJECT;
1170 	pager->srp_header.mo_pager_ops = &shared_region_pager_ops;
1171 	pager->srp_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1172 
1173 	pager->srp_is_ready = FALSE;/* not ready until it has a "name" */
1174 	/* existence reference (for the cache) + 1 for the caller */
1175 	os_ref_init_count_raw(&pager->srp_ref_count, NULL, 2);
1176 	pager->srp_is_mapped = FALSE;
1177 	pager->srp_backing_object = backing_object;
1178 	pager->srp_backing_offset = backing_offset;
1179 	pager->srp_slide_info = slide_info;
1180 #if __has_feature(ptrauth_calls)
1181 	pager->srp_jop_key = jop_key;
1182 	/*
1183 	 * If we're getting slide_info from the shared_region,
1184 	 * take a reference, so it can't disappear from under us.
1185 	 */
1186 	if (slide_info->si_shared_region) {
1187 		assert(slide_info->si_ptrauth);
1188 		vm_shared_region_reference(slide_info->si_shared_region);
1189 	}
1190 #endif /* __has_feature(ptrauth_calls) */
1191 
1192 	vm_object_reference(backing_object);
1193 
1194 	lck_mtx_lock(&shared_region_pager_lock);
1195 	/* enter new pager at the head of our list of pagers */
1196 	queue_enter_first(&shared_region_pager_queue,
1197 	    pager,
1198 	    shared_region_pager_t,
1199 	    srp_queue);
1200 	shared_region_pager_count++;
1201 	if (shared_region_pager_count > shared_region_pager_count_max) {
1202 		shared_region_pager_count_max = shared_region_pager_count;
1203 	}
1204 	lck_mtx_unlock(&shared_region_pager_lock);
1205 
1206 	kr = memory_object_create_named((memory_object_t) pager,
1207 	    0,
1208 	    &control);
1209 	assert(kr == KERN_SUCCESS);
1210 
1211 	memory_object_mark_trusted(control);
1212 
1213 	lck_mtx_lock(&shared_region_pager_lock);
1214 	/* the new pager is now ready to be used */
1215 	pager->srp_is_ready = TRUE;
1216 	object = memory_object_to_vm_object((memory_object_t) pager);
1217 	assert(object);
1218 	/*
1219 	 * No one knows about this object and so we get away without the object lock.
1220 	 * This object is _eventually_ backed by the dyld shared cache and so we want
1221 	 * to benefit from the lock priority boosting.
1222 	 */
1223 	object->object_is_shared_cache = TRUE;
1224 	lck_mtx_unlock(&shared_region_pager_lock);
1225 
1226 	/* wakeup anyone waiting for this pager to be ready */
1227 	thread_wakeup(&pager->srp_is_ready);
1228 
1229 	return pager;
1230 }
1231 
1232 /*
1233  * shared_region_pager_setup()
1234  *
1235  * Provide the caller with a memory object backed by the provided
1236  * "backing_object" VM object.
1237  */
1238 memory_object_t
shared_region_pager_setup(vm_object_t backing_object,vm_object_offset_t backing_offset,struct vm_shared_region_slide_info * slide_info,uint64_t jop_key)1239 shared_region_pager_setup(
1240 	vm_object_t             backing_object,
1241 	vm_object_offset_t      backing_offset,
1242 	struct vm_shared_region_slide_info *slide_info,
1243 	uint64_t                jop_key)
1244 {
1245 	shared_region_pager_t   pager;
1246 
1247 	/* create new pager */
1248 	pager = shared_region_pager_create(backing_object,
1249 	    backing_offset, slide_info, jop_key);
1250 	if (pager == SHARED_REGION_PAGER_NULL) {
1251 		/* could not create a new pager */
1252 		return MEMORY_OBJECT_NULL;
1253 	}
1254 
1255 	lck_mtx_lock(&shared_region_pager_lock);
1256 	while (!pager->srp_is_ready) {
1257 		lck_mtx_sleep(&shared_region_pager_lock,
1258 		    LCK_SLEEP_DEFAULT,
1259 		    &pager->srp_is_ready,
1260 		    THREAD_UNINT);
1261 	}
1262 	lck_mtx_unlock(&shared_region_pager_lock);
1263 
1264 	return (memory_object_t) pager;
1265 }
1266 
1267 #if __has_feature(ptrauth_calls)
1268 /*
1269  * shared_region_pager_match()
1270  *
1271  * Provide the caller with a memory object backed by the provided
1272  * "backing_object" VM object.
1273  */
1274 memory_object_t
shared_region_pager_match(vm_object_t backing_object,vm_object_offset_t backing_offset,vm_shared_region_slide_info_t slide_info,uint64_t jop_key)1275 shared_region_pager_match(
1276 	vm_object_t                   backing_object,
1277 	vm_object_offset_t            backing_offset,
1278 	vm_shared_region_slide_info_t slide_info,
1279 	uint64_t                      jop_key)
1280 {
1281 	shared_region_pager_t         pager;
1282 	vm_shared_region_slide_info_t si;
1283 
1284 	lck_mtx_lock(&shared_region_pager_lock);
1285 	queue_iterate(&shared_region_pager_queue, pager, shared_region_pager_t, srp_queue) {
1286 		if (pager->srp_backing_object != backing_object->vo_copy) {
1287 			continue;
1288 		}
1289 		if (pager->srp_backing_offset != backing_offset) {
1290 			continue;
1291 		}
1292 		si = pager->srp_slide_info;
1293 
1294 		/* If there's no AUTH section then it can't match (slide_info is always !NULL) */
1295 		if (!si->si_ptrauth) {
1296 			continue;
1297 		}
1298 		if (pager->srp_jop_key != jop_key) {
1299 			continue;
1300 		}
1301 		if (si->si_slide != slide_info->si_slide) {
1302 			continue;
1303 		}
1304 		if (si->si_start != slide_info->si_start) {
1305 			continue;
1306 		}
1307 		if (si->si_end != slide_info->si_end) {
1308 			continue;
1309 		}
1310 		if (si->si_slide_object != slide_info->si_slide_object) {
1311 			continue;
1312 		}
1313 		if (si->si_slide_info_size != slide_info->si_slide_info_size) {
1314 			continue;
1315 		}
1316 		if (memcmp(si->si_slide_info_entry, slide_info->si_slide_info_entry, si->si_slide_info_size) != 0) {
1317 			continue;
1318 		}
1319 		/* the caller expects a reference on this */
1320 		os_ref_retain_locked_raw(&pager->srp_ref_count, NULL);
1321 		lck_mtx_unlock(&shared_region_pager_lock);
1322 		return (memory_object_t)pager;
1323 	}
1324 
1325 	/*
1326 	 * We didn't find a pre-existing pager, so create one.
1327 	 *
1328 	 * Note slight race condition here since we drop the lock. This could lead to more than one
1329 	 * thread calling setup with the same arguments here. That shouldn't break anything, just
1330 	 * waste a little memory.
1331 	 */
1332 	lck_mtx_unlock(&shared_region_pager_lock);
1333 	return shared_region_pager_setup(backing_object->vo_copy, backing_offset, slide_info, jop_key);
1334 }
1335 
1336 void
shared_region_pager_match_task_key(memory_object_t memobj,__unused task_t task)1337 shared_region_pager_match_task_key(memory_object_t memobj, __unused task_t task)
1338 {
1339 	__unused shared_region_pager_t  pager = (shared_region_pager_t)memobj;
1340 
1341 	assert(pager->srp_jop_key == task->jop_pid);
1342 }
1343 #endif /* __has_feature(ptrauth_calls) */
1344 
1345 void
shared_region_pager_trim(void)1346 shared_region_pager_trim(void)
1347 {
1348 	shared_region_pager_t   pager, prev_pager;
1349 	queue_head_t            trim_queue;
1350 	int                     num_trim;
1351 	int                     count_unmapped;
1352 
1353 	lck_mtx_lock(&shared_region_pager_lock);
1354 
1355 	/*
1356 	 * We have too many pagers, try and trim some unused ones,
1357 	 * starting with the oldest pager at the end of the queue.
1358 	 */
1359 	queue_init(&trim_queue);
1360 	num_trim = 0;
1361 
1362 	for (pager = (shared_region_pager_t)queue_last(&shared_region_pager_queue);
1363 	    !queue_end(&shared_region_pager_queue, (queue_entry_t) pager);
1364 	    pager = prev_pager) {
1365 		/* get prev elt before we dequeue */
1366 		prev_pager = (shared_region_pager_t)queue_prev(&pager->srp_queue);
1367 
1368 		if (os_ref_get_count_raw(&pager->srp_ref_count) == 2 &&
1369 		    pager->srp_is_ready &&
1370 		    !pager->srp_is_mapped) {
1371 			/* this pager can be trimmed */
1372 			num_trim++;
1373 			/* remove this pager from the main list ... */
1374 			shared_region_pager_dequeue(pager);
1375 			/* ... and add it to our trim queue */
1376 			queue_enter_first(&trim_queue,
1377 			    pager,
1378 			    shared_region_pager_t,
1379 			    srp_queue);
1380 
1381 			/* do we have enough pagers to trim? */
1382 			count_unmapped = (shared_region_pager_count - shared_region_pager_count_mapped);
1383 			if (count_unmapped <= shared_region_pager_cache_limit) {
1384 				break;
1385 			}
1386 		}
1387 	}
1388 	if (num_trim > shared_region_pager_num_trim_max) {
1389 		shared_region_pager_num_trim_max = num_trim;
1390 	}
1391 	shared_region_pager_num_trim_total += num_trim;
1392 
1393 	lck_mtx_unlock(&shared_region_pager_lock);
1394 
1395 	/* terminate the trimmed pagers */
1396 	while (!queue_empty(&trim_queue)) {
1397 		queue_remove_first(&trim_queue,
1398 		    pager,
1399 		    shared_region_pager_t,
1400 		    srp_queue);
1401 		pager->srp_queue.next = NULL;
1402 		pager->srp_queue.prev = NULL;
1403 		assert(os_ref_get_count_raw(&pager->srp_ref_count) == 2);
1404 		/*
1405 		 * We can't call deallocate_internal() because the pager
1406 		 * has already been dequeued, but we still need to remove
1407 		 * a reference.
1408 		 */
1409 		(void)os_ref_release_locked_raw(&pager->srp_ref_count, NULL);
1410 		shared_region_pager_terminate_internal(pager);
1411 	}
1412 }
1413 
1414 static uint64_t
shared_region_pager_purge(shared_region_pager_t pager)1415 shared_region_pager_purge(
1416 	shared_region_pager_t pager)
1417 {
1418 	uint64_t pages_purged;
1419 	vm_object_t object;
1420 
1421 	pages_purged = 0;
1422 	object = memory_object_to_vm_object((memory_object_t) pager);
1423 	assert(object != VM_OBJECT_NULL);
1424 	vm_object_lock(object);
1425 	pages_purged = object->resident_page_count;
1426 	vm_object_reap_pages(object, REAP_DATA_FLUSH);
1427 	pages_purged -= object->resident_page_count;
1428 //	printf("     %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count);
1429 	vm_object_unlock(object);
1430 	return pages_purged;
1431 }
1432 
1433 uint64_t
shared_region_pager_purge_all(void)1434 shared_region_pager_purge_all(void)
1435 {
1436 	uint64_t pages_purged;
1437 	shared_region_pager_t pager;
1438 
1439 	pages_purged = 0;
1440 	lck_mtx_lock(&shared_region_pager_lock);
1441 	queue_iterate(&shared_region_pager_queue, pager, shared_region_pager_t, srp_queue) {
1442 		pages_purged += shared_region_pager_purge(pager);
1443 	}
1444 	lck_mtx_unlock(&shared_region_pager_lock);
1445 #if DEVELOPMENT || DEBUG
1446 	printf("   %s:%d pages purged: %llu\n", __FUNCTION__, __LINE__, pages_purged);
1447 #endif /* DEVELOPMENT || DEBUG */
1448 	return pages_purged;
1449 }
1450