xref: /xnu-11215.41.3/osfmk/vm/vm_dyld_pager.c (revision 33de042d024d46de5ff4e89f2471de6608e37fa4)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42 
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/thread.h>
46 #include <kern/ipc_kobject.h>
47 
48 #include <ipc/ipc_port.h>
49 #include <ipc/ipc_space.h>
50 
51 #include <vm/memory_object_internal.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_fault_internal.h>
54 #include <vm/vm_map_xnu.h>
55 #include <vm/vm_pageout_xnu.h>
56 #include <vm/vm_protos_internal.h>
57 #include <vm/vm_dyld_pager_internal.h>
58 #include <vm/vm_ubc.h>
59 #include <vm/vm_page_internal.h>
60 #include <vm/vm_object_internal.h>
61 #include <vm/vm_sanitize_internal.h>
62 
63 #include <sys/kdebug_triage.h>
64 #include <mach-o/fixup-chains.h>
65 #if defined(HAS_APPLE_PAC)
66 #include <ptrauth.h>
67 #include <arm/misc_protos.h>
68 #endif /* defined(HAS_APPLE_PAC) */
69 
70 /*
71  * DYLD page in linking pager.
72  *
73  * This external memory manager (EMM) applies dyld fixup to data
74  * pages, allowing the modified page to appear "clean".
75  *
76  * The modified pages will never be dirtied, so the memory manager doesn't
77  * need to handle page-out requests (from memory_object_data_return()).  The
78  * pages are mapped copy-on-write, so that the originals stay clean.
79  */
80 
81 /* forward declarations */
82 typedef struct dyld_pager *dyld_pager_t;
83 static void dyld_pager_reference(memory_object_t mem_obj);
84 static void dyld_pager_deallocate(memory_object_t mem_obj);
85 static void dyld_pager_deallocate_internal(dyld_pager_t pager, bool locked);
86 static kern_return_t dyld_pager_init(memory_object_t mem_obj,
87     memory_object_control_t control,
88     memory_object_cluster_size_t pg_size);
89 static kern_return_t dyld_pager_terminate(memory_object_t mem_obj);
90 static void dyld_pager_terminate_internal(dyld_pager_t pager);
91 static kern_return_t dyld_pager_data_request(memory_object_t mem_obj,
92     memory_object_offset_t offset,
93     memory_object_cluster_size_t length,
94     vm_prot_t protection_required,
95     memory_object_fault_info_t fault_info);
96 static kern_return_t dyld_pager_data_return(memory_object_t mem_obj,
97     memory_object_offset_t offset,
98     memory_object_cluster_size_t      data_cnt,
99     memory_object_offset_t *resid_offset,
100     int *io_error,
101     boolean_t dirty,
102     boolean_t kernel_copy,
103     int upl_flags);
104 static kern_return_t dyld_pager_data_initialize(memory_object_t mem_obj,
105     memory_object_offset_t offset,
106     memory_object_cluster_size_t data_cnt);
107 static kern_return_t dyld_pager_map(memory_object_t mem_obj,
108     vm_prot_t prot);
109 static kern_return_t dyld_pager_last_unmap(memory_object_t mem_obj);
110 static boolean_t dyld_pager_backing_object(
111 	memory_object_t mem_obj,
112 	memory_object_offset_t mem_obj_offset,
113 	vm_object_t *backing_object,
114 	vm_object_offset_t *backing_offset);
115 static dyld_pager_t dyld_pager_lookup(memory_object_t  mem_obj);
116 
117 /*
118  * Vector of VM operations for this EMM.
119  * These routines are invoked by VM via the memory_object_*() interfaces.
120  */
121 const struct memory_object_pager_ops dyld_pager_ops = {
122 	.memory_object_reference = dyld_pager_reference,
123 	.memory_object_deallocate = dyld_pager_deallocate,
124 	.memory_object_init = dyld_pager_init,
125 	.memory_object_terminate = dyld_pager_terminate,
126 	.memory_object_data_request = dyld_pager_data_request,
127 	.memory_object_data_return = dyld_pager_data_return,
128 	.memory_object_data_initialize = dyld_pager_data_initialize,
129 	.memory_object_map = dyld_pager_map,
130 	.memory_object_last_unmap = dyld_pager_last_unmap,
131 	.memory_object_backing_object = dyld_pager_backing_object,
132 	.memory_object_pager_name = "dyld"
133 };
134 
135 /*
136  * The "dyld_pager" structure. We create one of these for each use of
137  * map_with_linking_np() that dyld uses.
138  */
139 struct dyld_pager {
140 	struct memory_object    dyld_header;          /* mandatory generic header */
141 
142 #if MEMORY_OBJECT_HAS_REFCOUNT
143 #define dyld_ref_count           dyld_header.mo_ref
144 #else
145 	os_ref_atomic_t         dyld_ref_count;      /* active uses */
146 #endif
147 	queue_chain_t           dyld_pager_queue;    /* next & prev pagers */
148 	bool                    dyld_is_mapped;      /* has active mappings */
149 	bool                    dyld_is_ready;       /* is this pager ready? */
150 	vm_object_t             dyld_backing_object; /* VM object for shared cache */
151 	void                    *dyld_link_info;
152 	uint32_t                dyld_link_info_size;
153 	uint32_t                dyld_num_range;
154 	memory_object_offset_t  dyld_file_offset[MWL_MAX_REGION_COUNT];
155 	mach_vm_address_t       dyld_address[MWL_MAX_REGION_COUNT];
156 	mach_vm_size_t          dyld_size[MWL_MAX_REGION_COUNT];
157 #if defined(HAS_APPLE_PAC)
158 	uint64_t                dyld_a_key;
159 #endif /* defined(HAS_APPLE_PAC) */
160 };
161 
162 queue_head_t dyld_pager_queue = QUEUE_HEAD_INITIALIZER(dyld_pager_queue);
163 
164 /*
165  * "dyld_pager_lock" for counters, ref counting, etc.
166  */
167 LCK_GRP_DECLARE(dyld_pager_lck_grp, "dyld_pager");
168 LCK_MTX_DECLARE(dyld_pager_lock, &dyld_pager_lck_grp);
169 
170 /*
171  * Statistics & counters.
172  */
173 uint32_t dyld_pager_count = 0;
174 uint32_t dyld_pager_count_max = 0;
175 
176 /*
177  * dyld_pager_dequeue()
178  *
179  * Removes a pager from the list of pagers.
180  *
181  * The caller must hold "dyld_pager".
182  */
183 static void
dyld_pager_dequeue(__unused dyld_pager_t pager)184 dyld_pager_dequeue(
185 	__unused dyld_pager_t pager)
186 {
187 	queue_remove(&dyld_pager_queue,
188 	    pager,
189 	    dyld_pager_t,
190 	    dyld_pager_queue);
191 	pager->dyld_pager_queue.next = NULL;
192 	pager->dyld_pager_queue.prev = NULL;
193 	dyld_pager_count--;
194 }
195 
196 /*
197  * dyld_pager_init()
198  *
199  * Initialize the memory object and makes it ready to be used and mapped.
200  */
201 static kern_return_t
dyld_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)202 dyld_pager_init(
203 	memory_object_t                 mem_obj,
204 	memory_object_control_t         control,
205 	__unused
206 	memory_object_cluster_size_t    pg_size)
207 {
208 	dyld_pager_t                    pager;
209 	kern_return_t                   kr;
210 	memory_object_attr_info_data_t  attributes;
211 
212 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
213 		printf("%s(): control NULL\n", __func__);
214 		return KERN_INVALID_ARGUMENT;
215 	}
216 
217 	pager = dyld_pager_lookup(mem_obj);
218 
219 	memory_object_control_reference(control);
220 
221 	pager->dyld_header.mo_control = control;
222 
223 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
224 	attributes.cluster_size = (1 << (PAGE_SHIFT));
225 	attributes.may_cache_object = FALSE;
226 	attributes.temporary = TRUE;
227 
228 	kr = memory_object_change_attributes(
229 		control,
230 		MEMORY_OBJECT_ATTRIBUTE_INFO,
231 		(memory_object_info_t) &attributes,
232 		MEMORY_OBJECT_ATTR_INFO_COUNT);
233 	if (kr != KERN_SUCCESS) {
234 		panic("dyld_pager_init: " "memory_object_change_attributes() failed");
235 	}
236 
237 	return KERN_SUCCESS;
238 }
239 
240 /*
241  * dyld_data_return()
242  *
243  * A page-out request from VM -- should never happen so panic.
244  */
245 static kern_return_t
dyld_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)246 dyld_pager_data_return(
247 	__unused memory_object_t        mem_obj,
248 	__unused memory_object_offset_t offset,
249 	__unused memory_object_cluster_size_t data_cnt,
250 	__unused memory_object_offset_t *resid_offset,
251 	__unused int                    *io_error,
252 	__unused boolean_t              dirty,
253 	__unused boolean_t              kernel_copy,
254 	__unused int                    upl_flags)
255 {
256 	panic("dyld_pager_data_return: should never happen!");
257 	return KERN_FAILURE;
258 }
259 
260 static kern_return_t
dyld_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)261 dyld_pager_data_initialize(
262 	__unused memory_object_t        mem_obj,
263 	__unused memory_object_offset_t offset,
264 	__unused memory_object_cluster_size_t data_cnt)
265 {
266 	panic("dyld_pager_data_initialize: should never happen");
267 	return KERN_FAILURE;
268 }
269 
270 
271 /*
272  * Apply fixups to a page used by a 64 bit process.
273  */
274 static kern_return_t
fixupPage64(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,void * link_info,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex,bool offsetBased)275 fixupPage64(
276 	uint64_t                              userVA,
277 	vm_offset_t                           contents,
278 	vm_offset_t                           end_contents,
279 	void                                  *link_info,
280 	struct dyld_chained_starts_in_segment *segInfo,
281 	uint32_t                              pageIndex,
282 	bool                                  offsetBased)
283 {
284 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
285 	uint64_t                              *bindsArray  = (uint64_t *)((uintptr_t)hdr + hdr->mwli_binds_offset);
286 	uint16_t                              firstStartOffset = segInfo->page_start[pageIndex];
287 
288 	/*
289 	 * Done if no fixups on the page
290 	 */
291 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
292 		return KERN_SUCCESS;
293 	}
294 
295 	/*
296 	 * walk the chain
297 	 */
298 	uint64_t *chain  = (uint64_t *)(contents + firstStartOffset);
299 	uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide);
300 	uint64_t delta = 0;
301 	do {
302 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
303 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
304 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
305 			    (long long)chain, (long long)contents, (long long)end_contents);
306 			return KERN_FAILURE;
307 		}
308 		uint64_t value  = *chain;
309 		bool     isBind = (value & 0x8000000000000000ULL);
310 		delta = (value >> 51) & 0xFFF;
311 		if (isBind) {
312 			uint32_t bindOrdinal = value & 0x00FFFFFF;
313 			if (bindOrdinal >= hdr->mwli_binds_count) {
314 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
315 				printf("%s out of range bind ordinal %u (max %u)\n", __func__,
316 				    bindOrdinal, hdr->mwli_binds_count);
317 				return KERN_FAILURE;
318 			}
319 			uint32_t addend = (value >> 24) & 0xFF;
320 			*chain = bindsArray[bindOrdinal] + addend;
321 		} else {
322 			/* is rebase */
323 			uint64_t target = value & 0xFFFFFFFFFULL;
324 			uint64_t high8  = (value >> 36) & 0xFF;
325 			*chain = target + targetAdjust + (high8 << 56);
326 		}
327 		if (delta * 4 >= PAGE_SIZE) {
328 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_DELTA_TOO_LARGE), (uintptr_t)userVA);
329 			printf("%s(): delta offset > page size %lld\n", __func__, delta * 4);
330 			return KERN_FAILURE;
331 		}
332 		chain = (uint64_t *)((uintptr_t)chain + (delta * 4)); // 4-byte stride
333 	} while (delta != 0);
334 	return KERN_SUCCESS;
335 }
336 
337 
338 /*
339  * Apply fixups within a page used by a 32 bit process.
340  */
341 static kern_return_t
fixupChain32(uint64_t userVA,uint32_t * chain,vm_offset_t contents,vm_offset_t end_contents,void * link_info,struct dyld_chained_starts_in_segment * segInfo,uint32_t * bindsArray)342 fixupChain32(
343 	uint64_t                              userVA,
344 	uint32_t                              *chain,
345 	vm_offset_t                           contents,
346 	vm_offset_t                           end_contents,
347 	void                                  *link_info,
348 	struct dyld_chained_starts_in_segment *segInfo,
349 	uint32_t                              *bindsArray)
350 {
351 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
352 	uint32_t                              delta = 0;
353 
354 	do {
355 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
356 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
357 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
358 			    (long long)chain, (long long)contents, (long long)end_contents);
359 			return KERN_FAILURE;
360 		}
361 		uint32_t value = *chain;
362 		delta = (value >> 26) & 0x1F;
363 		if (value & 0x80000000) {
364 			// is bind
365 			uint32_t bindOrdinal = value & 0x000FFFFF;
366 			if (bindOrdinal >= hdr->mwli_binds_count) {
367 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
368 				printf("%s(): out of range bind ordinal %u (max %u)",
369 				    __func__, bindOrdinal, hdr->mwli_binds_count);
370 				return KERN_FAILURE;
371 			}
372 			uint32_t addend = (value >> 20) & 0x3F;
373 			*chain = bindsArray[bindOrdinal] + addend;
374 		} else {
375 			// is rebase
376 			uint32_t target = value & 0x03FFFFFF;
377 			if (target > segInfo->max_valid_pointer) {
378 				// handle non-pointers in chain
379 				uint32_t bias = (0x04000000 + segInfo->max_valid_pointer) / 2;
380 				*chain = target - bias;
381 			} else {
382 				*chain = target + (uint32_t)hdr->mwli_slide;
383 			}
384 		}
385 		chain += delta;
386 	} while (delta != 0);
387 	return KERN_SUCCESS;
388 }
389 
390 
391 /*
392  * Apply fixups to a page used by a 32 bit process.
393  */
394 static kern_return_t
fixupPage32(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,void * link_info,uint32_t link_info_size,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex)395 fixupPage32(
396 	uint64_t                              userVA,
397 	vm_offset_t                           contents,
398 	vm_offset_t                           end_contents,
399 	void                                  *link_info,
400 	uint32_t                              link_info_size,
401 	struct dyld_chained_starts_in_segment *segInfo,
402 	uint32_t                              pageIndex)
403 {
404 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr  *)link_info;
405 	uint32_t                              *bindsArray = (uint32_t *)((uintptr_t)hdr + hdr->mwli_binds_offset);
406 	uint16_t                              startOffset = segInfo->page_start[pageIndex];
407 
408 	/*
409 	 * done if no fixups
410 	 */
411 	if (startOffset == DYLD_CHAINED_PTR_START_NONE) {
412 		return KERN_SUCCESS;
413 	}
414 
415 	if (startOffset & DYLD_CHAINED_PTR_START_MULTI) {
416 		// some fixups in the page are too far apart, so page has multiple starts
417 		uint32_t overflowIndex = startOffset & ~DYLD_CHAINED_PTR_START_MULTI;
418 		bool chainEnd = false;
419 		while (!chainEnd) {
420 			/*
421 			 * range check against link_info, note +1 to include data we'll dereference
422 			 */
423 			if ((uintptr_t)&segInfo->page_start[overflowIndex + 1] > (uintptr_t)link_info + link_info_size) {
424 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
425 				printf("%s(): out of range segInfo->page_start[overflowIndex]", __func__);
426 				return KERN_FAILURE;
427 			}
428 			chainEnd    = (segInfo->page_start[overflowIndex] & DYLD_CHAINED_PTR_START_LAST);
429 			startOffset = (segInfo->page_start[overflowIndex] & ~DYLD_CHAINED_PTR_START_LAST);
430 			uint32_t *chain = (uint32_t *)(contents + startOffset);
431 			fixupChain32(userVA, chain, contents, end_contents, link_info, segInfo, bindsArray);
432 			++overflowIndex;
433 		}
434 	} else {
435 		uint32_t *chain = (uint32_t *)(contents + startOffset);
436 		fixupChain32(userVA, chain, contents, end_contents, link_info, segInfo, bindsArray);
437 	}
438 	return KERN_SUCCESS;
439 }
440 
441 #if defined(HAS_APPLE_PAC)
442 /*
443  * Sign a pointer needed for fixups.
444  */
445 static kern_return_t
signPointer(uint64_t unsignedAddr,void * loc,bool addrDiv,uint16_t diversity,ptrauth_key key,dyld_pager_t pager,uint64_t * signedAddr)446 signPointer(
447 	uint64_t         unsignedAddr,
448 	void             *loc,
449 	bool             addrDiv,
450 	uint16_t         diversity,
451 	ptrauth_key      key,
452 	dyld_pager_t     pager,
453 	uint64_t         *signedAddr)
454 {
455 	// don't sign NULL
456 	if (unsignedAddr == 0) {
457 		*signedAddr = 0;
458 		return KERN_SUCCESS;
459 	}
460 
461 	uint64_t extendedDiscriminator = diversity;
462 	if (addrDiv) {
463 		extendedDiscriminator = __builtin_ptrauth_blend_discriminator(loc, extendedDiscriminator);
464 	}
465 
466 	switch (key) {
467 	case ptrauth_key_asia:
468 	case ptrauth_key_asda:
469 		if (pager->dyld_a_key == 0 || arm_user_jop_disabled()) {
470 			*signedAddr = unsignedAddr;
471 		} else {
472 			*signedAddr = (uintptr_t)pmap_sign_user_ptr((void *)unsignedAddr, key, extendedDiscriminator, pager->dyld_a_key);
473 		}
474 		break;
475 
476 	default:
477 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_INVALID_AUTH_KEY), (uintptr_t)unsignedAddr);
478 		printf("%s(): Invalid ptr auth key %d\n", __func__, key);
479 		return KERN_FAILURE;
480 	}
481 	return KERN_SUCCESS;
482 }
483 
484 /*
485  * Apply fixups to a page used by a 64 bit process using pointer authentication.
486  */
487 static kern_return_t
fixupPageAuth64(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,dyld_pager_t pager,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex,bool offsetBased)488 fixupPageAuth64(
489 	uint64_t                              userVA,
490 	vm_offset_t                           contents,
491 	vm_offset_t                           end_contents,
492 	dyld_pager_t                          pager,
493 	struct dyld_chained_starts_in_segment *segInfo,
494 	uint32_t                              pageIndex,
495 	bool                                  offsetBased)
496 {
497 	void                 *link_info = pager->dyld_link_info;
498 	uint32_t             link_info_size = pager->dyld_link_info_size;
499 	struct mwl_info_hdr  *hdr = (struct mwl_info_hdr *)link_info;
500 	uint64_t             *bindsArray = (uint64_t*)((uintptr_t)link_info + hdr->mwli_binds_offset);
501 
502 	/*
503 	 * range check against link_info, note +1 to include data we'll dereference
504 	 */
505 	if ((uintptr_t)&segInfo->page_start[pageIndex + 1] > (uintptr_t)link_info + link_info_size) {
506 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
507 		printf("%s(): out of range segInfo->page_start[pageIndex]", __func__);
508 		return KERN_FAILURE;
509 	}
510 	uint16_t firstStartOffset = segInfo->page_start[pageIndex];
511 
512 	/*
513 	 * All done if no fixups on the page
514 	 */
515 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
516 		return KERN_SUCCESS;
517 	}
518 
519 	/*
520 	 * Walk the chain of offsets to fix up
521 	 */
522 	uint64_t *chain = (uint64_t *)(contents + firstStartOffset);
523 	uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide);
524 	uint64_t delta = 0;
525 	do {
526 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
527 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
528 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
529 			    (long long)chain, (long long)contents, (long long)end_contents);
530 			return KERN_FAILURE;
531 		}
532 		uint64_t value = *chain;
533 		delta = (value >> 51) & 0x7FF;
534 		bool isAuth = (value & 0x8000000000000000ULL);
535 		bool isBind = (value & 0x4000000000000000ULL);
536 		if (isAuth) {
537 			ptrauth_key key = (ptrauth_key)((value >> 49) & 0x3);
538 			bool        addrDiv = ((value & (1ULL << 48)) != 0);
539 			uint16_t    diversity = (uint16_t)((value >> 32) & 0xFFFF);
540 			uintptr_t   uVA = userVA + ((uintptr_t)chain - contents);
541 			if (isBind) {
542 				uint32_t bindOrdinal = value & 0x00FFFFFF;
543 				if (bindOrdinal >= hdr->mwli_binds_count) {
544 					ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
545 					printf("%s(): out of range bind ordinal %u (max %u)",
546 					    __func__, bindOrdinal, hdr->mwli_binds_count);
547 					return KERN_FAILURE;
548 				}
549 				if (signPointer(bindsArray[bindOrdinal], (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
550 					return KERN_FAILURE;
551 				}
552 			} else {
553 				/* note: in auth rebases only have 32-bits, so target is always offset - never vmaddr */
554 				uint64_t target = (value & 0xFFFFFFFF) + hdr->mwli_image_address;
555 				if (signPointer(target, (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
556 					return KERN_FAILURE;
557 				}
558 			}
559 		} else {
560 			if (isBind) {
561 				uint32_t bindOrdinal = value & 0x00FFFFFF;
562 				if (bindOrdinal >= hdr->mwli_binds_count) {
563 					ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
564 					printf("%s(): out of range bind ordinal %u (max %u)",
565 					    __func__, bindOrdinal, hdr->mwli_binds_count);
566 					return KERN_FAILURE;
567 				} else {
568 					uint64_t addend19 = (value >> 32) & 0x0007FFFF;
569 					if (addend19 & 0x40000) {
570 						addend19 |=  0xFFFFFFFFFFFC0000ULL;
571 					}
572 					*chain = bindsArray[bindOrdinal] + addend19;
573 				}
574 			} else {
575 				uint64_t target = (value & 0x7FFFFFFFFFFULL);
576 				uint64_t high8  = (value << 13) & 0xFF00000000000000ULL;
577 				*chain = target + targetAdjust + high8;
578 			}
579 		}
580 		chain += delta;
581 	} while (delta != 0);
582 	return KERN_SUCCESS;
583 }
584 
585 /*
586  * Apply fixups to a page used by a 64 bit process using pointer authentication.
587  */
588 static kern_return_t
fixupCachePageAuth64(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,dyld_pager_t pager,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex)589 fixupCachePageAuth64(
590 	uint64_t                              userVA,
591 	vm_offset_t                           contents,
592 	vm_offset_t                           end_contents,
593 	dyld_pager_t                          pager,
594 	struct dyld_chained_starts_in_segment *segInfo,
595 	uint32_t                              pageIndex)
596 {
597 	void                 *link_info = pager->dyld_link_info;
598 	uint32_t             link_info_size = pager->dyld_link_info_size;
599 	struct mwl_info_hdr  *hdr = (struct mwl_info_hdr *)link_info;
600 
601 	/*
602 	 * range check against link_info, note +1 to include data we'll dereference
603 	 */
604 	if ((uintptr_t)&segInfo->page_start[pageIndex + 1] > (uintptr_t)link_info + link_info_size) {
605 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
606 		printf("%s(): out of range segInfo->page_start[pageIndex]", __func__);
607 		return KERN_FAILURE;
608 	}
609 	uint16_t firstStartOffset = segInfo->page_start[pageIndex];
610 
611 	/*
612 	 * All done if no fixups on the page
613 	 */
614 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
615 		return KERN_SUCCESS;
616 	}
617 
618 	/*
619 	 * Walk the chain of offsets to fix up
620 	 */
621 	uint64_t *chain = (uint64_t *)(contents + firstStartOffset);
622 	uint64_t delta = 0;
623 	do {
624 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
625 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
626 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
627 			    (long long)chain, (long long)contents, (long long)end_contents);
628 			return KERN_FAILURE;
629 		}
630 		uint64_t value = *chain;
631 		delta = (value >> 52) & 0x7FF;
632 		bool isAuth = (value & 0x8000000000000000ULL);
633 		if (isAuth) {
634 			bool        addrDiv = ((value & (1ULL << 50)) != 0);
635 			bool        keyIsData = ((value & (1ULL << 51)) != 0);
636 			// the key is always A, and the bit tells us if its IA or ID
637 			ptrauth_key key = keyIsData ? ptrauth_key_asda : ptrauth_key_asia;
638 			uint16_t    diversity = (uint16_t)((value >> 34) & 0xFFFF);
639 			uintptr_t   uVA = userVA + ((uintptr_t)chain - contents);
640 			// target is always a 34-bit runtime offset, never a vmaddr
641 			uint64_t target = (value & 0x3FFFFFFFFULL) + hdr->mwli_image_address;
642 			if (signPointer(target, (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
643 				return KERN_FAILURE;
644 			}
645 		} else {
646 			// target is always a 34-bit runtime offset, never a vmaddr
647 			uint64_t target = (value & 0x3FFFFFFFFULL) + hdr->mwli_image_address;
648 			uint64_t high8  = (value << 22) & 0xFF00000000000000ULL;
649 			*chain = target + high8;
650 		}
651 		chain += delta;
652 	} while (delta != 0);
653 	return KERN_SUCCESS;
654 }
655 #endif /* defined(HAS_APPLE_PAC) */
656 
657 
658 /*
659  * Handle dyld fixups for a page.
660  */
661 static kern_return_t
fixup_page(vm_offset_t contents,vm_offset_t end_contents,uint64_t userVA,dyld_pager_t pager)662 fixup_page(
663 	vm_offset_t         contents,
664 	vm_offset_t         end_contents,
665 	uint64_t            userVA,
666 	dyld_pager_t        pager)
667 {
668 	void                                  *link_info = pager->dyld_link_info;
669 	uint32_t                              link_info_size = pager->dyld_link_info_size;
670 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
671 	struct dyld_chained_starts_in_segment *segInfo = NULL;
672 	uint32_t                              pageIndex = 0;
673 	uint32_t                              segIndex;
674 	struct dyld_chained_starts_in_image   *startsInfo;
675 	struct dyld_chained_starts_in_segment *seg;
676 	uint64_t                              segStartAddress;
677 	uint64_t                              segEndAddress;
678 
679 	/*
680 	 * Note this is a linear search done for every page we have to fix up.
681 	 * However, it should be quick as there should only be 2 or 4 segments:
682 	 * - data
683 	 * - data const
684 	 * - data auth (for arm64e)
685 	 * - data const auth (for arm64e)
686 	 */
687 	startsInfo = (struct dyld_chained_starts_in_image *)((uintptr_t)hdr + hdr->mwli_chains_offset);
688 	for (segIndex = 0; segIndex < startsInfo->seg_count; ++segIndex) {
689 		seg = (struct dyld_chained_starts_in_segment *)
690 		    ((uintptr_t)startsInfo + startsInfo->seg_info_offset[segIndex]);
691 
692 		/*
693 		 * ensure we don't go out of bounds of the link_info
694 		 */
695 		if ((uintptr_t)seg + sizeof(*seg) > (uintptr_t)link_info + link_info_size) {
696 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_INFO_OUT_OF_RANGE), (uintptr_t)userVA);
697 			printf("%s(): seg_info out of bounds\n", __func__);
698 			return KERN_FAILURE;
699 		}
700 
701 		segStartAddress = hdr->mwli_image_address + seg->segment_offset;
702 		segEndAddress = segStartAddress + seg->page_count * seg->page_size;
703 		if (segStartAddress <= userVA && userVA < segEndAddress) {
704 			segInfo = seg;
705 			pageIndex = (uint32_t)(userVA - segStartAddress) / PAGE_SIZE;
706 
707 			/* ensure seg->size fits in link_info_size */
708 			if ((uintptr_t)seg + seg->size > (uintptr_t)link_info + link_info_size) {
709 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_SIZE_OUT_OF_RANGE), (uintptr_t)userVA);
710 				printf("%s(): seg->size out of bounds\n", __func__);
711 				return KERN_FAILURE;
712 			}
713 			if (seg->size < sizeof(struct dyld_chained_starts_in_segment)) {
714 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_SIZE_OUT_OF_RANGE), (uintptr_t)userVA);
715 				printf("%s(): seg->size too small\n", __func__);
716 				return KERN_FAILURE;
717 			}
718 			/* ensure page_count and pageIndex are valid too */
719 			if ((uintptr_t)&seg->page_start[seg->page_count] > (uintptr_t)link_info + link_info_size) {
720 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_PAGE_CNT_OUT_OF_RANGE), (uintptr_t)userVA);
721 				printf("%s(): seg->page_count out of bounds\n", __func__);
722 				return KERN_FAILURE;
723 			}
724 			if (pageIndex >= seg->page_count) {
725 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_PAGE_CNT_OUT_OF_RANGE), (uintptr_t)userVA);
726 				printf("%s(): seg->page_count too small\n", __func__);
727 				return KERN_FAILURE;
728 			}
729 
730 			break;
731 		}
732 	}
733 
734 	/*
735 	 * Question for Nick.. or can we make this OK and just return KERN_SUCCESS, nothing to do?
736 	 */
737 	if (segInfo == NULL) {
738 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_SEG_FOR_VA), (uintptr_t)userVA);
739 		printf("%s(): No segment for user VA 0x%llx\n", __func__, (long long)userVA);
740 		return KERN_FAILURE;
741 	}
742 
743 	/*
744 	 * Route to the appropriate fixup routine
745 	 */
746 	switch (hdr->mwli_pointer_format) {
747 #if defined(HAS_APPLE_PAC)
748 	case DYLD_CHAINED_PTR_ARM64E:
749 		fixupPageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex, false);
750 		break;
751 	case DYLD_CHAINED_PTR_ARM64E_USERLAND:
752 	case DYLD_CHAINED_PTR_ARM64E_USERLAND24:
753 		fixupPageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex, true);
754 		break;
755 	case DYLD_CHAINED_PTR_ARM64E_SHARED_CACHE:
756 		fixupCachePageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex);
757 		break;
758 #endif /* defined(HAS_APPLE_PAC) */
759 	case DYLD_CHAINED_PTR_64:
760 		fixupPage64(userVA, contents, end_contents, link_info, segInfo, pageIndex, false);
761 		break;
762 	case DYLD_CHAINED_PTR_64_OFFSET:
763 		fixupPage64(userVA, contents, end_contents, link_info, segInfo, pageIndex, true);
764 		break;
765 	case DYLD_CHAINED_PTR_32:
766 		fixupPage32(userVA, contents, end_contents, link_info, link_info_size, segInfo, pageIndex);
767 		break;
768 	default:
769 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BAD_POINTER_FMT), (uintptr_t)userVA);
770 		printf("%s(): unknown pointer_format %d\n", __func__, hdr->mwli_pointer_format);
771 		return KERN_FAILURE;
772 	}
773 	return KERN_SUCCESS;
774 }
775 
776 /*
777  * dyld_pager_data_request()
778  *
779  * Handles page-in requests from VM.
780  */
781 static kern_return_t
dyld_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)782 dyld_pager_data_request(
783 	memory_object_t              mem_obj,
784 	memory_object_offset_t       offset,
785 	memory_object_cluster_size_t length,
786 	__unused vm_prot_t           protection_required,
787 	memory_object_fault_info_t   mo_fault_info)
788 {
789 	dyld_pager_t            pager;
790 	memory_object_control_t mo_control;
791 	upl_t                   upl = NULL;
792 	int                     upl_flags;
793 	upl_size_t              upl_size;
794 	upl_page_info_t         *upl_pl = NULL;
795 	unsigned int            pl_count;
796 	vm_object_t             src_top_object = VM_OBJECT_NULL;
797 	vm_object_t             src_page_object = VM_OBJECT_NULL;
798 	vm_object_t             dst_object;
799 	kern_return_t           kr;
800 	kern_return_t           retval = KERN_SUCCESS;
801 	vm_offset_t             src_vaddr;
802 	vm_offset_t             dst_vaddr;
803 	vm_offset_t             cur_offset;
804 	kern_return_t           error_code;
805 	vm_prot_t               prot;
806 	vm_page_t               src_page, top_page;
807 	int                     interruptible;
808 	struct vm_object_fault_info fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
809 	struct mwl_info_hdr     *hdr;
810 	uint32_t                r;
811 	uint64_t                userVA;
812 
813 	fault_info.stealth = TRUE;
814 	fault_info.io_sync = FALSE;
815 	fault_info.mark_zf_absent = FALSE;
816 	fault_info.batch_pmap_op = FALSE;
817 	interruptible = fault_info.interruptible;
818 
819 	pager = dyld_pager_lookup(mem_obj);
820 	assert(pager->dyld_is_ready);
821 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 1); /* pager is alive */
822 	assert(pager->dyld_is_mapped); /* pager is mapped */
823 	hdr = (struct mwl_info_hdr *)pager->dyld_link_info;
824 
825 	/*
826 	 * Gather in a UPL all the VM pages requested by VM.
827 	 */
828 	mo_control = pager->dyld_header.mo_control;
829 
830 	upl_size = length;
831 	upl_flags =
832 	    UPL_RET_ONLY_ABSENT |
833 	    UPL_SET_LITE |
834 	    UPL_NO_SYNC |
835 	    UPL_CLEAN_IN_PLACE |        /* triggers UPL_CLEAR_DIRTY */
836 	    UPL_SET_INTERNAL;
837 	pl_count = 0;
838 	kr = memory_object_upl_request(mo_control,
839 	    offset, upl_size,
840 	    &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
841 	if (kr != KERN_SUCCESS) {
842 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_UPL), 0 /* arg */);
843 		retval = kr;
844 		goto done;
845 	}
846 	dst_object = memory_object_control_to_vm_object(mo_control);
847 	assert(dst_object != VM_OBJECT_NULL);
848 
849 	/*
850 	 * We'll map the original data in the kernel address space from the
851 	 * backing VM object, itself backed by the executable/library file via
852 	 * the vnode pager.
853 	 */
854 	src_top_object = pager->dyld_backing_object;
855 	assert(src_top_object != VM_OBJECT_NULL);
856 	vm_object_reference(src_top_object); /* keep the source object alive */
857 
858 	/*
859 	 * Fill in the contents of the pages requested by VM.
860 	 */
861 	upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
862 	pl_count = length / PAGE_SIZE;
863 	for (cur_offset = 0;
864 	    retval == KERN_SUCCESS && cur_offset < length;
865 	    cur_offset += PAGE_SIZE) {
866 		ppnum_t dst_pnum;
867 
868 		if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
869 			/* this page is not in the UPL: skip it */
870 			continue;
871 		}
872 
873 		/*
874 		 * Map the source page in the kernel's virtual address space.
875 		 * We already hold a reference on the src_top_object.
876 		 */
877 retry_src_fault:
878 		vm_object_lock(src_top_object);
879 		vm_object_paging_begin(src_top_object);
880 		error_code = 0;
881 		prot = VM_PROT_READ;
882 		src_page = VM_PAGE_NULL;
883 		kr = vm_fault_page(src_top_object,
884 		    offset + cur_offset,
885 		    VM_PROT_READ,
886 		    FALSE,
887 		    FALSE,                /* src_page not looked up */
888 		    &prot,
889 		    &src_page,
890 		    &top_page,
891 		    NULL,
892 		    &error_code,
893 		    FALSE,
894 		    &fault_info);
895 		switch (kr) {
896 		case VM_FAULT_SUCCESS:
897 			break;
898 		case VM_FAULT_RETRY:
899 			goto retry_src_fault;
900 		case VM_FAULT_MEMORY_SHORTAGE:
901 			if (vm_page_wait(interruptible)) {
902 				goto retry_src_fault;
903 			}
904 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_MEMORY_SHORTAGE), 0 /* arg */);
905 			OS_FALLTHROUGH;
906 		case VM_FAULT_INTERRUPTED:
907 			retval = MACH_SEND_INTERRUPTED;
908 			goto done;
909 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
910 			/* success but no VM page: fail */
911 			vm_object_paging_end(src_top_object);
912 			vm_object_unlock(src_top_object);
913 			OS_FALLTHROUGH;
914 		case VM_FAULT_MEMORY_ERROR:
915 			/* the page is not there ! */
916 			if (error_code) {
917 				retval = error_code;
918 			} else {
919 				retval = KERN_MEMORY_ERROR;
920 			}
921 			goto done;
922 		default:
923 			panic("dyld_pager_data_request: vm_fault_page() unexpected error 0x%x\n", kr);
924 		}
925 		assert(src_page != VM_PAGE_NULL);
926 		assert(src_page->vmp_busy);
927 
928 		if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
929 			vm_page_lockspin_queues();
930 			if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
931 				vm_page_speculate(src_page, FALSE);
932 			}
933 			vm_page_unlock_queues();
934 		}
935 
936 		/*
937 		 * Establish pointers to the source and destination physical pages.
938 		 */
939 		dst_pnum = (ppnum_t)upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
940 		assert(dst_pnum != 0);
941 
942 		src_vaddr = (vm_map_offset_t)phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) << PAGE_SHIFT);
943 		dst_vaddr = (vm_map_offset_t)phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
944 		src_page_object = VM_PAGE_OBJECT(src_page);
945 
946 		/*
947 		 * Validate the original page...
948 		 */
949 		if (src_page_object->code_signed) {
950 			vm_page_validate_cs_mapped(src_page, PAGE_SIZE, 0, (const void *)src_vaddr);
951 		}
952 
953 		/*
954 		 * ... and transfer the results to the destination page.
955 		 */
956 		UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_validated);
957 		UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_tainted);
958 		UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_nx);
959 
960 		/*
961 		 * The page provider might access a mapped file, so let's
962 		 * release the object lock for the source page to avoid a
963 		 * potential deadlock.
964 		 * The source page is kept busy and we have a
965 		 * "paging_in_progress" reference on its object, so it's safe
966 		 * to unlock the object here.
967 		 */
968 		assert(src_page->vmp_busy);
969 		assert(src_page_object->paging_in_progress > 0);
970 		vm_object_unlock(src_page_object);
971 
972 		/*
973 		 * Process the original contents of the source page
974 		 * into the destination page.
975 		 */
976 		bcopy((const char *)src_vaddr, (char *)dst_vaddr, PAGE_SIZE);
977 
978 		/*
979 		 * Figure out what the original user virtual address was, based on the offset.
980 		 */
981 		userVA = 0;
982 		for (r = 0; r < pager->dyld_num_range; ++r) {
983 			vm_offset_t o = offset + cur_offset;
984 			if (pager->dyld_file_offset[r] <= o &&
985 			    o < pager->dyld_file_offset[r] + pager->dyld_size[r]) {
986 				userVA = pager->dyld_address[r] + (o - pager->dyld_file_offset[r]);
987 				break;
988 			}
989 		}
990 
991 		/*
992 		 * If we have a valid range fixup the page.
993 		 */
994 		if (r == pager->dyld_num_range) {
995 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_RANGE_NOT_FOUND), (uintptr_t)userVA);
996 			printf("%s(): Range not found for offset 0x%llx\n", __func__, (long long)cur_offset);
997 			retval = KERN_FAILURE;
998 		} else if (fixup_page(dst_vaddr, dst_vaddr + PAGE_SIZE, userVA, pager) != KERN_SUCCESS) {
999 			/* KDBG / printf was done under fixup_page() */
1000 			retval = KERN_FAILURE;
1001 		}
1002 		if (retval != KERN_SUCCESS) {
1003 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SLIDE_ERROR), userVA);
1004 		}
1005 
1006 		assert(VM_PAGE_OBJECT(src_page) == src_page_object);
1007 		assert(src_page->vmp_busy);
1008 		assert(src_page_object->paging_in_progress > 0);
1009 		vm_object_lock(src_page_object);
1010 
1011 		/*
1012 		 * Cleanup the result of vm_fault_page() of the source page.
1013 		 */
1014 		vm_page_wakeup_done(src_top_object, src_page);
1015 		src_page = VM_PAGE_NULL;
1016 		vm_object_paging_end(src_page_object);
1017 		vm_object_unlock(src_page_object);
1018 
1019 		if (top_page != VM_PAGE_NULL) {
1020 			assert(VM_PAGE_OBJECT(top_page) == src_top_object);
1021 			vm_object_lock(src_top_object);
1022 			VM_PAGE_FREE(top_page);
1023 			vm_object_paging_end(src_top_object);
1024 			vm_object_unlock(src_top_object);
1025 		}
1026 	}
1027 
1028 done:
1029 	if (upl != NULL) {
1030 		/* clean up the UPL */
1031 
1032 		/*
1033 		 * The pages are currently dirty because we've just been
1034 		 * writing on them, but as far as we're concerned, they're
1035 		 * clean since they contain their "original" contents as
1036 		 * provided by us, the pager.
1037 		 * Tell the UPL to mark them "clean".
1038 		 */
1039 		upl_clear_dirty(upl, TRUE);
1040 
1041 		/* abort or commit the UPL */
1042 		if (retval != KERN_SUCCESS) {
1043 			upl_abort(upl, 0);
1044 		} else {
1045 			boolean_t empty;
1046 			assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
1047 			    "upl %p offset 0x%llx size 0x%x\n",
1048 			    upl, upl->u_offset, upl->u_size);
1049 			upl_commit_range(upl, 0, upl->u_size,
1050 			    UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
1051 			    upl_pl, pl_count, &empty);
1052 		}
1053 
1054 		/* and deallocate the UPL */
1055 		upl_deallocate(upl);
1056 		upl = NULL;
1057 	}
1058 	if (src_top_object != VM_OBJECT_NULL) {
1059 		vm_object_deallocate(src_top_object);
1060 	}
1061 	return retval;
1062 }
1063 
1064 /*
1065  * dyld_pager_reference()
1066  *
1067  * Get a reference on this memory object.
1068  * For external usage only.  Assumes that the initial reference count is not 0,
1069  * i.e one should not "revive" a dead pager this way.
1070  */
1071 static void
dyld_pager_reference(memory_object_t mem_obj)1072 dyld_pager_reference(
1073 	memory_object_t mem_obj)
1074 {
1075 	dyld_pager_t    pager;
1076 
1077 	pager = dyld_pager_lookup(mem_obj);
1078 
1079 	lck_mtx_lock(&dyld_pager_lock);
1080 	os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL);
1081 	lck_mtx_unlock(&dyld_pager_lock);
1082 }
1083 
1084 
1085 
1086 /*
1087  * dyld_pager_terminate_internal:
1088  *
1089  * Trigger the asynchronous termination of the memory object associated
1090  * with this pager.
1091  * When the memory object is terminated, there will be one more call
1092  * to memory_object_deallocate() (i.e. dyld_pager_deallocate())
1093  * to finish the clean up.
1094  *
1095  * "dyld_pager_lock" should not be held by the caller.
1096  */
1097 static void
dyld_pager_terminate_internal(dyld_pager_t pager)1098 dyld_pager_terminate_internal(
1099 	dyld_pager_t pager)
1100 {
1101 	assert(pager->dyld_is_ready);
1102 	assert(!pager->dyld_is_mapped);
1103 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) == 1);
1104 
1105 	if (pager->dyld_backing_object != VM_OBJECT_NULL) {
1106 		vm_object_deallocate(pager->dyld_backing_object);
1107 		pager->dyld_backing_object = VM_OBJECT_NULL;
1108 	}
1109 	/* trigger the destruction of the memory object */
1110 	memory_object_destroy(pager->dyld_header.mo_control, VM_OBJECT_DESTROY_PAGER);
1111 }
1112 
1113 /*
1114  * dyld_pager_deallocate_internal()
1115  *
1116  * Release a reference on this pager and free it when the last reference goes away.
1117  * Can be called with dyld_pager_lock held or not, but always returns
1118  * with it unlocked.
1119  */
1120 static void
dyld_pager_deallocate_internal(dyld_pager_t pager,bool locked)1121 dyld_pager_deallocate_internal(
1122 	dyld_pager_t   pager,
1123 	bool           locked)
1124 {
1125 	os_ref_count_t ref_count;
1126 
1127 	if (!locked) {
1128 		lck_mtx_lock(&dyld_pager_lock);
1129 	}
1130 
1131 	/* drop a reference on this pager */
1132 	ref_count = os_ref_release_locked_raw(&pager->dyld_ref_count, NULL);
1133 
1134 	if (ref_count == 1) {
1135 		/*
1136 		 * Only this reference is left, which means that
1137 		 * no one is really holding on to this pager anymore.
1138 		 * Terminate it.
1139 		 */
1140 		dyld_pager_dequeue(pager);
1141 		/* the pager is all ours: no need for the lock now */
1142 		lck_mtx_unlock(&dyld_pager_lock);
1143 		dyld_pager_terminate_internal(pager);
1144 	} else if (ref_count == 0) {
1145 		/*
1146 		 * Dropped all references;  the memory object has
1147 		 * been terminated.  Do some final cleanup and release the
1148 		 * pager structure.
1149 		 */
1150 		lck_mtx_unlock(&dyld_pager_lock);
1151 
1152 		kfree_data(pager->dyld_link_info, pager->dyld_link_info_size);
1153 		pager->dyld_link_info = NULL;
1154 
1155 		if (pager->dyld_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
1156 			memory_object_control_deallocate(pager->dyld_header.mo_control);
1157 			pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1158 		}
1159 		kfree_type(struct dyld_pager, pager);
1160 		pager = NULL;
1161 	} else {
1162 		/* there are still plenty of references:  keep going... */
1163 		lck_mtx_unlock(&dyld_pager_lock);
1164 	}
1165 
1166 	/* caution: lock is not held on return... */
1167 }
1168 
1169 /*
1170  * dyld_pager_deallocate()
1171  *
1172  * Release a reference on this pager and free it when the last
1173  * reference goes away.
1174  */
1175 static void
dyld_pager_deallocate(memory_object_t mem_obj)1176 dyld_pager_deallocate(
1177 	memory_object_t mem_obj)
1178 {
1179 	dyld_pager_t    pager;
1180 
1181 	pager = dyld_pager_lookup(mem_obj);
1182 	dyld_pager_deallocate_internal(pager, FALSE);
1183 }
1184 
1185 /*
1186  *
1187  */
1188 static kern_return_t
dyld_pager_terminate(__unused memory_object_t mem_obj)1189 dyld_pager_terminate(
1190 #if !DEBUG
1191 	__unused
1192 #endif
1193 	memory_object_t mem_obj)
1194 {
1195 	return KERN_SUCCESS;
1196 }
1197 
1198 /*
1199  * dyld_pager_map()
1200  *
1201  * This allows VM to let us, the EMM, know that this memory object
1202  * is currently mapped one or more times.  This is called by VM each time
1203  * the memory object gets mapped, but we only take one extra reference the
1204  * first time it is called.
1205  */
1206 static kern_return_t
dyld_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)1207 dyld_pager_map(
1208 	memory_object_t         mem_obj,
1209 	__unused vm_prot_t      prot)
1210 {
1211 	dyld_pager_t   pager;
1212 
1213 	pager = dyld_pager_lookup(mem_obj);
1214 
1215 	lck_mtx_lock(&dyld_pager_lock);
1216 	assert(pager->dyld_is_ready);
1217 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0); /* pager is alive */
1218 	if (!pager->dyld_is_mapped) {
1219 		pager->dyld_is_mapped = TRUE;
1220 		os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL);
1221 	}
1222 	lck_mtx_unlock(&dyld_pager_lock);
1223 
1224 	return KERN_SUCCESS;
1225 }
1226 
1227 /*
1228  * dyld_pager_last_unmap()
1229  *
1230  * This is called by VM when this memory object is no longer mapped anywhere.
1231  */
1232 static kern_return_t
dyld_pager_last_unmap(memory_object_t mem_obj)1233 dyld_pager_last_unmap(
1234 	memory_object_t mem_obj)
1235 {
1236 	dyld_pager_t    pager;
1237 
1238 	pager = dyld_pager_lookup(mem_obj);
1239 
1240 	lck_mtx_lock(&dyld_pager_lock);
1241 	if (pager->dyld_is_mapped) {
1242 		/*
1243 		 * All the mappings are gone, so let go of the one extra
1244 		 * reference that represents all the mappings of this pager.
1245 		 */
1246 		pager->dyld_is_mapped = FALSE;
1247 		dyld_pager_deallocate_internal(pager, TRUE);
1248 		/* caution: deallocate_internal() released the lock ! */
1249 	} else {
1250 		lck_mtx_unlock(&dyld_pager_lock);
1251 	}
1252 
1253 	return KERN_SUCCESS;
1254 }
1255 
1256 static boolean_t
dyld_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)1257 dyld_pager_backing_object(
1258 	memory_object_t         mem_obj,
1259 	memory_object_offset_t  offset,
1260 	vm_object_t             *backing_object,
1261 	vm_object_offset_t      *backing_offset)
1262 {
1263 	dyld_pager_t   pager;
1264 
1265 	pager = dyld_pager_lookup(mem_obj);
1266 
1267 	*backing_object = pager->dyld_backing_object;
1268 	*backing_offset = offset;
1269 
1270 	return TRUE;
1271 }
1272 
1273 
1274 /*
1275  * Convert from memory_object to dyld_pager.
1276  */
1277 static dyld_pager_t
dyld_pager_lookup(memory_object_t mem_obj)1278 dyld_pager_lookup(
1279 	memory_object_t  mem_obj)
1280 {
1281 	dyld_pager_t   pager;
1282 
1283 	assert(mem_obj->mo_pager_ops == &dyld_pager_ops);
1284 	pager = (dyld_pager_t)(uintptr_t) mem_obj;
1285 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0);
1286 	return pager;
1287 }
1288 
1289 /*
1290  * Create and return a pager for the given object with the
1291  * given slide information.
1292  */
1293 static dyld_pager_t
dyld_pager_create(__unused task_t task,vm_object_t backing_object,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size)1294 dyld_pager_create(
1295 #if !defined(HAS_APPLE_PAC)
1296 	__unused
1297 #endif /* defined(HAS_APPLE_PAC) */
1298 	task_t            task,
1299 	vm_object_t       backing_object,
1300 	struct mwl_region *regions,
1301 	uint32_t          region_cnt,
1302 	void              *link_info,
1303 	uint32_t          link_info_size)
1304 {
1305 	dyld_pager_t            pager;
1306 	memory_object_control_t control;
1307 	kern_return_t           kr;
1308 
1309 	pager = kalloc_type(struct dyld_pager, Z_WAITOK);
1310 	if (pager == NULL) {
1311 		return NULL;
1312 	}
1313 
1314 	/*
1315 	 * The vm_map call takes both named entry ports and raw memory
1316 	 * objects in the same parameter.  We need to make sure that
1317 	 * vm_map does not see this object as a named entry port.  So,
1318 	 * we reserve the first word in the object for a fake ip_kotype
1319 	 * setting - that will tell vm_map to use it as a memory object.
1320 	 */
1321 	pager->dyld_header.mo_ikot = IKOT_MEMORY_OBJECT;
1322 	pager->dyld_header.mo_pager_ops = &dyld_pager_ops;
1323 	pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1324 
1325 	pager->dyld_is_ready = FALSE;/* not ready until it has a "name" */
1326 	/* existence reference for the caller */
1327 	os_ref_init_count_raw(&pager->dyld_ref_count, NULL, 1);
1328 	pager->dyld_is_mapped = FALSE;
1329 	pager->dyld_backing_object = backing_object;
1330 	pager->dyld_link_info = link_info; /* pager takes ownership of this pointer here */
1331 	pager->dyld_link_info_size = link_info_size;
1332 #if defined(HAS_APPLE_PAC)
1333 	pager->dyld_a_key = (task->map && task->map->pmap && !task->map->pmap->disable_jop) ? task->jop_pid : 0;
1334 #endif /* defined(HAS_APPLE_PAC) */
1335 
1336 	/*
1337 	 * Record the regions so the pager can find the offset from an address.
1338 	 */
1339 	pager->dyld_num_range = region_cnt;
1340 	for (uint32_t r = 0; r < region_cnt; ++r) {
1341 		pager->dyld_file_offset[r] = regions[r].mwlr_file_offset;
1342 		pager->dyld_address[r] = regions[r].mwlr_address;
1343 		pager->dyld_size[r] = regions[r].mwlr_size;
1344 	}
1345 
1346 	vm_object_reference(backing_object);
1347 	lck_mtx_lock(&dyld_pager_lock);
1348 	queue_enter_first(&dyld_pager_queue,
1349 	    pager,
1350 	    dyld_pager_t,
1351 	    dyld_pager_queue);
1352 	dyld_pager_count++;
1353 	if (dyld_pager_count > dyld_pager_count_max) {
1354 		dyld_pager_count_max = dyld_pager_count;
1355 	}
1356 	lck_mtx_unlock(&dyld_pager_lock);
1357 
1358 	kr = memory_object_create_named((memory_object_t) pager, 0, &control);
1359 	assert(kr == KERN_SUCCESS);
1360 
1361 	memory_object_mark_trusted(control);
1362 
1363 	lck_mtx_lock(&dyld_pager_lock);
1364 	/* the new pager is now ready to be used */
1365 	pager->dyld_is_ready = TRUE;
1366 	lck_mtx_unlock(&dyld_pager_lock);
1367 
1368 	/* wakeup anyone waiting for this pager to be ready */
1369 	thread_wakeup(&pager->dyld_is_ready);
1370 
1371 	return pager;
1372 }
1373 
1374 /*
1375  * dyld_pager_setup()
1376  *
1377  * Provide the caller with a memory object backed by the provided
1378  * "backing_object" VM object.
1379  */
1380 static memory_object_t
dyld_pager_setup(task_t task,vm_object_t backing_object,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size)1381 dyld_pager_setup(
1382 	task_t            task,
1383 	vm_object_t       backing_object,
1384 	struct mwl_region *regions,
1385 	uint32_t          region_cnt,
1386 	void              *link_info,
1387 	uint32_t          link_info_size)
1388 {
1389 	dyld_pager_t      pager;
1390 
1391 	/* create new pager */
1392 	pager = dyld_pager_create(task, backing_object, regions, region_cnt, link_info, link_info_size);
1393 	if (pager == NULL) {
1394 		/* could not create a new pager */
1395 		return MEMORY_OBJECT_NULL;
1396 	}
1397 
1398 	lck_mtx_lock(&dyld_pager_lock);
1399 	while (!pager->dyld_is_ready) {
1400 		lck_mtx_sleep(&dyld_pager_lock,
1401 		    LCK_SLEEP_DEFAULT,
1402 		    &pager->dyld_is_ready,
1403 		    THREAD_UNINT);
1404 	}
1405 	lck_mtx_unlock(&dyld_pager_lock);
1406 
1407 	return (memory_object_t) pager;
1408 }
1409 
1410 /*
1411  * Set up regions which use a special pager to apply dyld fixups.
1412  *
1413  * The arguments to this function are mostly just used as input.
1414  * Except for the link_info! That is saved off in the pager that
1415  * gets created. If the pager assumed ownership of *link_info,
1416  * the argument is NULLed, if not, the caller need to free it on error.
1417  */
1418 kern_return_t
vm_map_with_linking(task_t task,struct mwl_region * regions,uint32_t region_cnt,void ** link_info,uint32_t link_info_size,memory_object_control_t file_control)1419 vm_map_with_linking(
1420 	task_t                  task,
1421 	struct mwl_region       *regions,
1422 	uint32_t                region_cnt,
1423 	void                    **link_info,
1424 	uint32_t                link_info_size,
1425 	memory_object_control_t file_control)
1426 {
1427 	vm_map_t                map = task->map;
1428 	vm_object_t             object = VM_OBJECT_NULL;
1429 	memory_object_t         pager = MEMORY_OBJECT_NULL;
1430 	uint32_t                r;
1431 	vm_map_address_t        map_addr;
1432 	kern_return_t           kr = KERN_SUCCESS;
1433 
1434 	object = memory_object_control_to_vm_object(file_control);
1435 	if (object == VM_OBJECT_NULL || object->internal) {
1436 		printf("%s no object for file_control\n", __func__);
1437 		object = VM_OBJECT_NULL;
1438 		kr = KERN_INVALID_ADDRESS;
1439 		goto done;
1440 	}
1441 
1442 	/* create a pager */
1443 	pager = dyld_pager_setup(task, object, regions, region_cnt, *link_info, link_info_size);
1444 	if (pager == MEMORY_OBJECT_NULL) {
1445 		kr = KERN_RESOURCE_SHORTAGE;
1446 		goto done;
1447 	}
1448 	*link_info = NULL; /* ownership of this pointer was given to pager */
1449 
1450 	for (r = 0; r < region_cnt; ++r) {
1451 		vm_map_kernel_flags_t vmk_flags = {
1452 			.vmf_fixed = true,
1453 			.vmf_overwrite = true,
1454 			.vmkf_overwrite_immutable = true,
1455 		};
1456 		struct mwl_region *rp = &regions[r];
1457 
1458 		/* map that pager over the portion of the mapping that needs sliding */
1459 		map_addr = (vm_map_address_t)rp->mwlr_address;
1460 
1461 		if (rp->mwlr_protections & VM_PROT_TPRO) {
1462 			vmk_flags.vmf_tpro = TRUE;
1463 		}
1464 
1465 		kr = mach_vm_map_kernel(map,
1466 		    vm_sanitize_wrap_addr_ref(&map_addr),
1467 		    rp->mwlr_size,
1468 		    0,
1469 		    vmk_flags,
1470 		    (ipc_port_t)(uintptr_t)pager,
1471 		    rp->mwlr_file_offset,
1472 		    TRUE,       /* copy == TRUE, as this is MAP_PRIVATE so COW may happen */
1473 		    rp->mwlr_protections & VM_PROT_DEFAULT,
1474 		    rp->mwlr_protections & VM_PROT_DEFAULT,
1475 		    VM_INHERIT_DEFAULT);
1476 		if (kr != KERN_SUCCESS) {
1477 			/* no need to clean up earlier regions, this will be process fatal */
1478 			goto done;
1479 		}
1480 	}
1481 
1482 	/* success! */
1483 	kr = KERN_SUCCESS;
1484 
1485 done:
1486 
1487 	if (pager != MEMORY_OBJECT_NULL) {
1488 		/*
1489 		 * Release the pager reference obtained by dyld_pager_setup().
1490 		 * The mapping, if it succeeded, is now holding a reference on the memory object.
1491 		 */
1492 		memory_object_deallocate(pager);
1493 		pager = MEMORY_OBJECT_NULL;
1494 	}
1495 	return kr;
1496 }
1497 
1498 static uint64_t
dyld_pager_purge(dyld_pager_t pager)1499 dyld_pager_purge(
1500 	dyld_pager_t pager)
1501 {
1502 	uint64_t pages_purged;
1503 	vm_object_t object;
1504 
1505 	pages_purged = 0;
1506 	object = memory_object_to_vm_object((memory_object_t) pager);
1507 	assert(object != VM_OBJECT_NULL);
1508 	vm_object_lock(object);
1509 	pages_purged = object->resident_page_count;
1510 	vm_object_reap_pages(object, REAP_DATA_FLUSH);
1511 	pages_purged -= object->resident_page_count;
1512 //	printf("     %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count);
1513 	vm_object_unlock(object);
1514 	return pages_purged;
1515 }
1516 
1517 uint64_t
dyld_pager_purge_all(void)1518 dyld_pager_purge_all(void)
1519 {
1520 	uint64_t pages_purged;
1521 	dyld_pager_t pager;
1522 
1523 	pages_purged = 0;
1524 	lck_mtx_lock(&dyld_pager_lock);
1525 	queue_iterate(&dyld_pager_queue, pager, dyld_pager_t, dyld_pager_queue) {
1526 		pages_purged += dyld_pager_purge(pager);
1527 	}
1528 	lck_mtx_unlock(&dyld_pager_lock);
1529 #if DEVELOPMENT || DEBUG
1530 	printf("   %s:%d pages purged: %llu\n", __FUNCTION__, __LINE__, pages_purged);
1531 #endif /* DEVELOPMENT || DEBUG */
1532 	return pages_purged;
1533 }
1534