1 /*
2 * Copyright (c) 2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/thread.h>
46 #include <kern/ipc_kobject.h>
47
48 #include <ipc/ipc_port.h>
49 #include <ipc/ipc_space.h>
50
51 #include <vm/memory_object_internal.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_fault_internal.h>
54 #include <vm/vm_map_xnu.h>
55 #include <vm/vm_pageout_xnu.h>
56 #include <vm/vm_protos_internal.h>
57 #include <vm/vm_dyld_pager_internal.h>
58 #include <vm/vm_ubc.h>
59 #include <vm/vm_page_internal.h>
60 #include <vm/vm_object_internal.h>
61 #include <vm/vm_sanitize_internal.h>
62
63 #include <sys/kdebug_triage.h>
64 #include <mach-o/fixup-chains.h>
65 #if defined(HAS_APPLE_PAC)
66 #include <ptrauth.h>
67 #include <arm/misc_protos.h>
68 #endif /* defined(HAS_APPLE_PAC) */
69
70 extern int panic_on_dyld_issue;
71
72 /*
73 * DYLD page in linking pager.
74 *
75 * This external memory manager (EMM) applies dyld fixup to data
76 * pages, allowing the modified page to appear "clean".
77 *
78 * The modified pages will never be dirtied, so the memory manager doesn't
79 * need to handle page-out requests (from memory_object_data_return()). The
80 * pages are mapped copy-on-write, so that the originals stay clean.
81 */
82
83 /* forward declarations */
84 typedef struct dyld_pager *dyld_pager_t;
85 static void dyld_pager_reference(memory_object_t mem_obj);
86 static void dyld_pager_deallocate(memory_object_t mem_obj);
87 static void dyld_pager_deallocate_internal(dyld_pager_t pager, bool locked);
88 static kern_return_t dyld_pager_init(memory_object_t mem_obj,
89 memory_object_control_t control,
90 memory_object_cluster_size_t pg_size);
91 static kern_return_t dyld_pager_terminate(memory_object_t mem_obj);
92 static void dyld_pager_terminate_internal(dyld_pager_t pager);
93 static kern_return_t dyld_pager_data_request(memory_object_t mem_obj,
94 memory_object_offset_t offset,
95 memory_object_cluster_size_t length,
96 vm_prot_t protection_required,
97 memory_object_fault_info_t fault_info);
98 static kern_return_t dyld_pager_data_return(memory_object_t mem_obj,
99 memory_object_offset_t offset,
100 memory_object_cluster_size_t data_cnt,
101 memory_object_offset_t *resid_offset,
102 int *io_error,
103 boolean_t dirty,
104 boolean_t kernel_copy,
105 int upl_flags);
106 static kern_return_t dyld_pager_data_initialize(memory_object_t mem_obj,
107 memory_object_offset_t offset,
108 memory_object_cluster_size_t data_cnt);
109 static kern_return_t dyld_pager_map(memory_object_t mem_obj,
110 vm_prot_t prot);
111 static kern_return_t dyld_pager_last_unmap(memory_object_t mem_obj);
112 static boolean_t dyld_pager_backing_object(
113 memory_object_t mem_obj,
114 memory_object_offset_t mem_obj_offset,
115 vm_object_t *backing_object,
116 vm_object_offset_t *backing_offset);
117 static dyld_pager_t dyld_pager_lookup(memory_object_t mem_obj);
118
119 /*
120 * Vector of VM operations for this EMM.
121 * These routines are invoked by VM via the memory_object_*() interfaces.
122 */
123 const struct memory_object_pager_ops dyld_pager_ops = {
124 .memory_object_reference = dyld_pager_reference,
125 .memory_object_deallocate = dyld_pager_deallocate,
126 .memory_object_init = dyld_pager_init,
127 .memory_object_terminate = dyld_pager_terminate,
128 .memory_object_data_request = dyld_pager_data_request,
129 .memory_object_data_return = dyld_pager_data_return,
130 .memory_object_data_initialize = dyld_pager_data_initialize,
131 .memory_object_map = dyld_pager_map,
132 .memory_object_last_unmap = dyld_pager_last_unmap,
133 .memory_object_backing_object = dyld_pager_backing_object,
134 .memory_object_pager_name = "dyld"
135 };
136
137 /*
138 * The "dyld_pager" structure. We create one of these for each use of
139 * map_with_linking_np() that dyld uses.
140 */
141 struct dyld_pager {
142 struct memory_object dyld_header; /* mandatory generic header */
143
144 #if MEMORY_OBJECT_HAS_REFCOUNT
145 #define dyld_ref_count dyld_header.mo_ref
146 #else
147 os_ref_atomic_t dyld_ref_count; /* active uses */
148 #endif
149 queue_chain_t dyld_pager_queue; /* next & prev pagers */
150 bool dyld_is_mapped; /* has active mappings */
151 bool dyld_is_ready; /* is this pager ready? */
152 vm_object_t dyld_backing_object; /* VM object for shared cache */
153 void *dyld_link_info;
154 uint32_t dyld_link_info_size;
155 uint32_t dyld_num_range;
156 memory_object_offset_t dyld_file_offset[MWL_MAX_REGION_COUNT];
157 mach_vm_address_t dyld_address[MWL_MAX_REGION_COUNT];
158 mach_vm_size_t dyld_size[MWL_MAX_REGION_COUNT];
159 #if defined(HAS_APPLE_PAC)
160 uint64_t dyld_a_key;
161 #endif /* defined(HAS_APPLE_PAC) */
162 };
163
164 queue_head_t dyld_pager_queue = QUEUE_HEAD_INITIALIZER(dyld_pager_queue);
165
166 /*
167 * "dyld_pager_lock" for counters, ref counting, etc.
168 */
169 LCK_GRP_DECLARE(dyld_pager_lck_grp, "dyld_pager");
170 LCK_MTX_DECLARE(dyld_pager_lock, &dyld_pager_lck_grp);
171
172 /*
173 * Statistics & counters.
174 */
175 uint32_t dyld_pager_count = 0;
176 uint32_t dyld_pager_count_max = 0;
177
178 /*
179 * dyld_pager_dequeue()
180 *
181 * Removes a pager from the list of pagers.
182 *
183 * The caller must hold "dyld_pager".
184 */
185 static void
dyld_pager_dequeue(__unused dyld_pager_t pager)186 dyld_pager_dequeue(
187 __unused dyld_pager_t pager)
188 {
189 queue_remove(&dyld_pager_queue,
190 pager,
191 dyld_pager_t,
192 dyld_pager_queue);
193 pager->dyld_pager_queue.next = NULL;
194 pager->dyld_pager_queue.prev = NULL;
195 dyld_pager_count--;
196 }
197
198 /*
199 * dyld_pager_init()
200 *
201 * Initialize the memory object and makes it ready to be used and mapped.
202 */
203 static kern_return_t
dyld_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)204 dyld_pager_init(
205 memory_object_t mem_obj,
206 memory_object_control_t control,
207 __unused
208 memory_object_cluster_size_t pg_size)
209 {
210 dyld_pager_t pager;
211 kern_return_t kr;
212 memory_object_attr_info_data_t attributes;
213
214 if (control == MEMORY_OBJECT_CONTROL_NULL) {
215 printf("%s(): control NULL\n", __func__);
216 return KERN_INVALID_ARGUMENT;
217 }
218
219 pager = dyld_pager_lookup(mem_obj);
220
221 memory_object_control_reference(control);
222
223 pager->dyld_header.mo_control = control;
224
225 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
226 attributes.cluster_size = (1 << (PAGE_SHIFT));
227 attributes.may_cache_object = FALSE;
228 attributes.temporary = TRUE;
229
230 kr = memory_object_change_attributes(
231 control,
232 MEMORY_OBJECT_ATTRIBUTE_INFO,
233 (memory_object_info_t) &attributes,
234 MEMORY_OBJECT_ATTR_INFO_COUNT);
235 if (kr != KERN_SUCCESS) {
236 panic("dyld_pager_init: " "memory_object_change_attributes() failed");
237 }
238
239 return KERN_SUCCESS;
240 }
241
242 /*
243 * dyld_data_return()
244 *
245 * A page-out request from VM -- should never happen so panic.
246 */
247 static kern_return_t
dyld_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)248 dyld_pager_data_return(
249 __unused memory_object_t mem_obj,
250 __unused memory_object_offset_t offset,
251 __unused memory_object_cluster_size_t data_cnt,
252 __unused memory_object_offset_t *resid_offset,
253 __unused int *io_error,
254 __unused boolean_t dirty,
255 __unused boolean_t kernel_copy,
256 __unused int upl_flags)
257 {
258 panic("dyld_pager_data_return: should never happen!");
259 return KERN_FAILURE;
260 }
261
262 static kern_return_t
dyld_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)263 dyld_pager_data_initialize(
264 __unused memory_object_t mem_obj,
265 __unused memory_object_offset_t offset,
266 __unused memory_object_cluster_size_t data_cnt)
267 {
268 panic("dyld_pager_data_initialize: should never happen");
269 return KERN_FAILURE;
270 }
271
272
273 /*
274 * Apply fixups to a page used by a 64 bit process.
275 */
276 static kern_return_t
fixupPage64(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,void * link_info,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex,bool offsetBased)277 fixupPage64(
278 uint64_t userVA,
279 vm_offset_t contents,
280 vm_offset_t end_contents,
281 void *link_info,
282 struct dyld_chained_starts_in_segment *segInfo,
283 uint32_t pageIndex,
284 bool offsetBased)
285 {
286 struct mwl_info_hdr *hdr = (struct mwl_info_hdr *)link_info;
287 uint64_t *bindsArray = (uint64_t *)((uintptr_t)hdr + hdr->mwli_binds_offset);
288 uint16_t firstStartOffset = segInfo->page_start[pageIndex];
289
290 /*
291 * Done if no fixups on the page
292 */
293 if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
294 return KERN_SUCCESS;
295 }
296
297 /*
298 * walk the chain
299 */
300 uint64_t *chain = (uint64_t *)(contents + firstStartOffset);
301 uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide);
302 uint64_t delta = 0;
303 do {
304 if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
305 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
306 printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
307 (long long)chain, (long long)contents, (long long)end_contents);
308 if (panic_on_dyld_issue) {
309 panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
310 (long long)chain, (long long)contents, (long long)end_contents);
311 }
312
313 return KERN_FAILURE;
314 }
315 uint64_t value = *chain;
316 bool isBind = (value & 0x8000000000000000ULL);
317 delta = (value >> 51) & 0xFFF;
318 if (isBind) {
319 uint32_t bindOrdinal = value & 0x00FFFFFF;
320 if (bindOrdinal >= hdr->mwli_binds_count) {
321 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
322 printf("%s out of range bind ordinal %u (max %u)\n", __func__,
323 bindOrdinal, hdr->mwli_binds_count);
324 if (panic_on_dyld_issue) {
325 panic("%s out of range bind ordinal %u (max %u)", __func__,
326 bindOrdinal, hdr->mwli_binds_count);
327 }
328 return KERN_FAILURE;
329 }
330 uint32_t addend = (value >> 24) & 0xFF;
331 *chain = bindsArray[bindOrdinal] + addend;
332 } else {
333 /* is rebase */
334 uint64_t target = value & 0xFFFFFFFFFULL;
335 uint64_t high8 = (value >> 36) & 0xFF;
336 *chain = target + targetAdjust + (high8 << 56);
337 }
338 if (delta * 4 >= PAGE_SIZE) {
339 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_DELTA_TOO_LARGE), (uintptr_t)userVA);
340 printf("%s(): delta offset > page size %lld\n", __func__, delta * 4);
341 if (panic_on_dyld_issue) {
342 panic("%s(): delta offset > page size %lld", __func__, delta * 4);
343 }
344 return KERN_FAILURE;
345 }
346 chain = (uint64_t *)((uintptr_t)chain + (delta * 4)); // 4-byte stride
347 } while (delta != 0);
348 return KERN_SUCCESS;
349 }
350
351
352 /*
353 * Apply fixups within a page used by a 32 bit process.
354 */
355 static kern_return_t
fixupChain32(uint64_t userVA,uint32_t * chain,vm_offset_t contents,vm_offset_t end_contents,void * link_info,struct dyld_chained_starts_in_segment * segInfo,uint32_t * bindsArray)356 fixupChain32(
357 uint64_t userVA,
358 uint32_t *chain,
359 vm_offset_t contents,
360 vm_offset_t end_contents,
361 void *link_info,
362 struct dyld_chained_starts_in_segment *segInfo,
363 uint32_t *bindsArray)
364 {
365 struct mwl_info_hdr *hdr = (struct mwl_info_hdr *)link_info;
366 uint32_t delta = 0;
367
368 do {
369 if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
370 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
371 printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
372 (long long)chain, (long long)contents, (long long)end_contents);
373 if (panic_on_dyld_issue) {
374 panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
375 (long long)chain, (long long)contents, (long long)end_contents);
376 }
377 return KERN_FAILURE;
378 }
379 uint32_t value = *chain;
380 delta = (value >> 26) & 0x1F;
381 if (value & 0x80000000) {
382 // is bind
383 uint32_t bindOrdinal = value & 0x000FFFFF;
384 if (bindOrdinal >= hdr->mwli_binds_count) {
385 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
386 printf("%s(): out of range bind ordinal %u (max %u)\n",
387 __func__, bindOrdinal, hdr->mwli_binds_count);
388 if (panic_on_dyld_issue) {
389 panic("%s(): out of range bind ordinal %u (max %u)",
390 __func__, bindOrdinal, hdr->mwli_binds_count);
391 }
392 return KERN_FAILURE;
393 }
394 uint32_t addend = (value >> 20) & 0x3F;
395 *chain = bindsArray[bindOrdinal] + addend;
396 } else {
397 // is rebase
398 uint32_t target = value & 0x03FFFFFF;
399 if (target > segInfo->max_valid_pointer) {
400 // handle non-pointers in chain
401 uint32_t bias = (0x04000000 + segInfo->max_valid_pointer) / 2;
402 *chain = target - bias;
403 } else {
404 *chain = target + (uint32_t)hdr->mwli_slide;
405 }
406 }
407 chain += delta;
408 } while (delta != 0);
409 return KERN_SUCCESS;
410 }
411
412
413 /*
414 * Apply fixups to a page used by a 32 bit process.
415 */
416 static kern_return_t
fixupPage32(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,void * link_info,uint32_t link_info_size,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex)417 fixupPage32(
418 uint64_t userVA,
419 vm_offset_t contents,
420 vm_offset_t end_contents,
421 void *link_info,
422 uint32_t link_info_size,
423 struct dyld_chained_starts_in_segment *segInfo,
424 uint32_t pageIndex)
425 {
426 struct mwl_info_hdr *hdr = (struct mwl_info_hdr *)link_info;
427 uint32_t *bindsArray = (uint32_t *)((uintptr_t)hdr + hdr->mwli_binds_offset);
428 uint16_t startOffset = segInfo->page_start[pageIndex];
429
430 /*
431 * done if no fixups
432 */
433 if (startOffset == DYLD_CHAINED_PTR_START_NONE) {
434 return KERN_SUCCESS;
435 }
436
437 if (startOffset & DYLD_CHAINED_PTR_START_MULTI) {
438 // some fixups in the page are too far apart, so page has multiple starts
439 uint32_t overflowIndex = startOffset & ~DYLD_CHAINED_PTR_START_MULTI;
440 bool chainEnd = false;
441 while (!chainEnd) {
442 /*
443 * range check against link_info, note +1 to include data we'll dereference
444 */
445 if ((uintptr_t)&segInfo->page_start[overflowIndex + 1] > (uintptr_t)link_info + link_info_size) {
446 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
447 printf("%s(): out of range segInfo->page_start[overflowIndex]\n", __func__);
448 if (panic_on_dyld_issue) {
449 panic("%s(): out of range segInfo->page_start[overflowIndex]", __func__);
450 }
451 return KERN_FAILURE;
452 }
453 chainEnd = (segInfo->page_start[overflowIndex] & DYLD_CHAINED_PTR_START_LAST);
454 startOffset = (segInfo->page_start[overflowIndex] & ~DYLD_CHAINED_PTR_START_LAST);
455 uint32_t *chain = (uint32_t *)(contents + startOffset);
456 fixupChain32(userVA, chain, contents, end_contents, link_info, segInfo, bindsArray);
457 ++overflowIndex;
458 }
459 } else {
460 uint32_t *chain = (uint32_t *)(contents + startOffset);
461 fixupChain32(userVA, chain, contents, end_contents, link_info, segInfo, bindsArray);
462 }
463 return KERN_SUCCESS;
464 }
465
466 #if defined(HAS_APPLE_PAC)
467 /*
468 * Sign a pointer needed for fixups.
469 */
470 static kern_return_t
signPointer(uint64_t unsignedAddr,void * loc,bool addrDiv,uint16_t diversity,ptrauth_key key,dyld_pager_t pager,uint64_t * signedAddr)471 signPointer(
472 uint64_t unsignedAddr,
473 void *loc,
474 bool addrDiv,
475 uint16_t diversity,
476 ptrauth_key key,
477 dyld_pager_t pager,
478 uint64_t *signedAddr)
479 {
480 // don't sign NULL
481 if (unsignedAddr == 0) {
482 *signedAddr = 0;
483 return KERN_SUCCESS;
484 }
485
486 uint64_t extendedDiscriminator = diversity;
487 if (addrDiv) {
488 extendedDiscriminator = __builtin_ptrauth_blend_discriminator(loc, extendedDiscriminator);
489 }
490
491 switch (key) {
492 case ptrauth_key_asia:
493 case ptrauth_key_asda:
494 if (pager->dyld_a_key == 0 || arm_user_jop_disabled()) {
495 *signedAddr = unsignedAddr;
496 } else {
497 *signedAddr = (uintptr_t)pmap_sign_user_ptr((void *)unsignedAddr, key, extendedDiscriminator, pager->dyld_a_key);
498 }
499 break;
500
501 default:
502 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_INVALID_AUTH_KEY), (uintptr_t)unsignedAddr);
503 printf("%s(): Invalid ptr auth key %d\n", __func__, key);
504 if (panic_on_dyld_issue) {
505 panic("%s(): Invalid ptr auth key %d", __func__, key);
506 }
507 return KERN_FAILURE;
508 }
509 return KERN_SUCCESS;
510 }
511
512 /*
513 * Apply fixups to a page used by a 64 bit process using pointer authentication.
514 */
515 static kern_return_t
fixupPageAuth64(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,dyld_pager_t pager,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex,bool offsetBased)516 fixupPageAuth64(
517 uint64_t userVA,
518 vm_offset_t contents,
519 vm_offset_t end_contents,
520 dyld_pager_t pager,
521 struct dyld_chained_starts_in_segment *segInfo,
522 uint32_t pageIndex,
523 bool offsetBased)
524 {
525 void *link_info = pager->dyld_link_info;
526 uint32_t link_info_size = pager->dyld_link_info_size;
527 struct mwl_info_hdr *hdr = (struct mwl_info_hdr *)link_info;
528 uint64_t *bindsArray = (uint64_t*)((uintptr_t)link_info + hdr->mwli_binds_offset);
529
530 /*
531 * range check against link_info, note +1 to include data we'll dereference
532 */
533 if ((uintptr_t)&segInfo->page_start[pageIndex + 1] > (uintptr_t)link_info + link_info_size) {
534 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
535 printf("%s(): out of range segInfo->page_start[pageIndex]\n", __func__);
536 if (panic_on_dyld_issue) {
537 panic("%s(): out of range segInfo->page_start[pageIndex]", __func__);
538 }
539 return KERN_FAILURE;
540 }
541 uint16_t firstStartOffset = segInfo->page_start[pageIndex];
542
543 /*
544 * All done if no fixups on the page
545 */
546 if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
547 return KERN_SUCCESS;
548 }
549
550 /*
551 * Walk the chain of offsets to fix up
552 */
553 uint64_t *chain = (uint64_t *)(contents + firstStartOffset);
554 uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide);
555 uint64_t delta = 0;
556 do {
557 if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
558 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
559 printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
560 (long long)chain, (long long)contents, (long long)end_contents);
561 if (panic_on_dyld_issue) {
562 panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
563 (long long)chain, (long long)contents, (long long)end_contents);
564 }
565 return KERN_FAILURE;
566 }
567 uint64_t value = *chain;
568 delta = (value >> 51) & 0x7FF;
569 bool isAuth = (value & 0x8000000000000000ULL);
570 bool isBind = (value & 0x4000000000000000ULL);
571 if (isAuth) {
572 ptrauth_key key = (ptrauth_key)((value >> 49) & 0x3);
573 bool addrDiv = ((value & (1ULL << 48)) != 0);
574 uint16_t diversity = (uint16_t)((value >> 32) & 0xFFFF);
575 uintptr_t uVA = userVA + ((uintptr_t)chain - contents);
576 if (isBind) {
577 uint32_t bindOrdinal = value & 0x00FFFFFF;
578 if (bindOrdinal >= hdr->mwli_binds_count) {
579 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
580 printf("%s(): out of range bind ordinal %u (max %u)\n",
581 __func__, bindOrdinal, hdr->mwli_binds_count);
582 if (panic_on_dyld_issue) {
583 panic("%s(): out of range bind ordinal %u (max %u)",
584 __func__, bindOrdinal, hdr->mwli_binds_count);
585 }
586 return KERN_FAILURE;
587 }
588 if (signPointer(bindsArray[bindOrdinal], (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
589 return KERN_FAILURE;
590 }
591 } else {
592 /* note: in auth rebases only have 32-bits, so target is always offset - never vmaddr */
593 uint64_t target = (value & 0xFFFFFFFF) + hdr->mwli_image_address;
594 if (signPointer(target, (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
595 return KERN_FAILURE;
596 }
597 }
598 } else {
599 if (isBind) {
600 uint32_t bindOrdinal = value & 0x00FFFFFF;
601 if (bindOrdinal >= hdr->mwli_binds_count) {
602 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
603 printf("%s(): out of range bind ordinal %u (max %u)\n",
604 __func__, bindOrdinal, hdr->mwli_binds_count);
605 if (panic_on_dyld_issue) {
606 panic("%s(): out of range bind ordinal %u (max %u)",
607 __func__, bindOrdinal, hdr->mwli_binds_count);
608 }
609 return KERN_FAILURE;
610 } else {
611 uint64_t addend19 = (value >> 32) & 0x0007FFFF;
612 if (addend19 & 0x40000) {
613 addend19 |= 0xFFFFFFFFFFFC0000ULL;
614 }
615 *chain = bindsArray[bindOrdinal] + addend19;
616 }
617 } else {
618 uint64_t target = (value & 0x7FFFFFFFFFFULL);
619 uint64_t high8 = (value << 13) & 0xFF00000000000000ULL;
620 *chain = target + targetAdjust + high8;
621 }
622 }
623 chain += delta;
624 } while (delta != 0);
625 return KERN_SUCCESS;
626 }
627
628 /*
629 * Apply fixups to a page used by a 64 bit process using pointer authentication.
630 */
631 static kern_return_t
fixupCachePageAuth64(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,dyld_pager_t pager,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex)632 fixupCachePageAuth64(
633 uint64_t userVA,
634 vm_offset_t contents,
635 vm_offset_t end_contents,
636 dyld_pager_t pager,
637 struct dyld_chained_starts_in_segment *segInfo,
638 uint32_t pageIndex)
639 {
640 void *link_info = pager->dyld_link_info;
641 uint32_t link_info_size = pager->dyld_link_info_size;
642 struct mwl_info_hdr *hdr = (struct mwl_info_hdr *)link_info;
643
644 /*
645 * range check against link_info, note +1 to include data we'll dereference
646 */
647 if ((uintptr_t)&segInfo->page_start[pageIndex + 1] > (uintptr_t)link_info + link_info_size) {
648 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
649 printf("%s(): out of range segInfo->page_start[pageIndex]\n", __func__);
650 if (panic_on_dyld_issue) {
651 panic("%s(): out of range segInfo->page_start[pageIndex]", __func__);
652 }
653 return KERN_FAILURE;
654 }
655 uint16_t firstStartOffset = segInfo->page_start[pageIndex];
656
657 /*
658 * All done if no fixups on the page
659 */
660 if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
661 return KERN_SUCCESS;
662 }
663
664 /*
665 * Walk the chain of offsets to fix up
666 */
667 uint64_t *chain = (uint64_t *)(contents + firstStartOffset);
668 uint64_t delta = 0;
669 do {
670 if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
671 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
672 printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
673 (long long)chain, (long long)contents, (long long)end_contents);
674 if (panic_on_dyld_issue) {
675 panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
676 (long long)chain, (long long)contents, (long long)end_contents);
677 }
678 return KERN_FAILURE;
679 }
680 uint64_t value = *chain;
681 delta = (value >> 52) & 0x7FF;
682 bool isAuth = (value & 0x8000000000000000ULL);
683 if (isAuth) {
684 bool addrDiv = ((value & (1ULL << 50)) != 0);
685 bool keyIsData = ((value & (1ULL << 51)) != 0);
686 // the key is always A, and the bit tells us if its IA or ID
687 ptrauth_key key = keyIsData ? ptrauth_key_asda : ptrauth_key_asia;
688 uint16_t diversity = (uint16_t)((value >> 34) & 0xFFFF);
689 uintptr_t uVA = userVA + ((uintptr_t)chain - contents);
690 // target is always a 34-bit runtime offset, never a vmaddr
691 uint64_t target = (value & 0x3FFFFFFFFULL) + hdr->mwli_image_address;
692 if (signPointer(target, (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
693 return KERN_FAILURE;
694 }
695 } else {
696 // target is always a 34-bit runtime offset, never a vmaddr
697 uint64_t target = (value & 0x3FFFFFFFFULL) + hdr->mwli_image_address;
698 uint64_t high8 = (value << 22) & 0xFF00000000000000ULL;
699 *chain = target + high8;
700 }
701 chain += delta;
702 } while (delta != 0);
703 return KERN_SUCCESS;
704 }
705 #endif /* defined(HAS_APPLE_PAC) */
706
707
708 /*
709 * Handle dyld fixups for a page.
710 */
711 static kern_return_t
fixup_page(vm_offset_t contents,vm_offset_t end_contents,uint64_t userVA,dyld_pager_t pager)712 fixup_page(
713 vm_offset_t contents,
714 vm_offset_t end_contents,
715 uint64_t userVA,
716 dyld_pager_t pager)
717 {
718 void *link_info = pager->dyld_link_info;
719 uint32_t link_info_size = pager->dyld_link_info_size;
720 struct mwl_info_hdr *hdr = (struct mwl_info_hdr *)link_info;
721 struct dyld_chained_starts_in_segment *segInfo = NULL;
722 uint32_t pageIndex = 0;
723 uint32_t segIndex;
724 struct dyld_chained_starts_in_image *startsInfo;
725 struct dyld_chained_starts_in_segment *seg;
726 uint64_t segStartAddress;
727 uint64_t segEndAddress;
728
729 /*
730 * Note this is a linear search done for every page we have to fix up.
731 * However, it should be quick as there should only be 2 or 4 segments:
732 * - data
733 * - data const
734 * - data auth (for arm64e)
735 * - data const auth (for arm64e)
736 */
737 startsInfo = (struct dyld_chained_starts_in_image *)((uintptr_t)hdr + hdr->mwli_chains_offset);
738 for (segIndex = 0; segIndex < startsInfo->seg_count; ++segIndex) {
739 seg = (struct dyld_chained_starts_in_segment *)
740 ((uintptr_t)startsInfo + startsInfo->seg_info_offset[segIndex]);
741
742 /*
743 * ensure we don't go out of bounds of the link_info
744 */
745 if ((uintptr_t)seg + sizeof(*seg) > (uintptr_t)link_info + link_info_size) {
746 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_INFO_OUT_OF_RANGE), (uintptr_t)userVA);
747 printf("%s(): seg_info out of bounds\n", __func__);
748 if (panic_on_dyld_issue) {
749 panic("%s(): seg_info out of bounds", __func__);
750 }
751 return KERN_FAILURE;
752 }
753
754 segStartAddress = hdr->mwli_image_address + seg->segment_offset;
755 segEndAddress = segStartAddress + seg->page_count * seg->page_size;
756 if (segStartAddress <= userVA && userVA < segEndAddress) {
757 segInfo = seg;
758 pageIndex = (uint32_t)(userVA - segStartAddress) / PAGE_SIZE;
759
760 /* ensure seg->size fits in link_info_size */
761 if ((uintptr_t)seg + seg->size > (uintptr_t)link_info + link_info_size) {
762 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_SIZE_OUT_OF_RANGE), (uintptr_t)userVA);
763 printf("%s(): seg->size out of bounds\n", __func__);
764 if (panic_on_dyld_issue) {
765 panic("%s(): seg->size out of bounds", __func__);
766 }
767 return KERN_FAILURE;
768 }
769 if (seg->size < sizeof(struct dyld_chained_starts_in_segment)) {
770 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_SIZE_OUT_OF_RANGE), (uintptr_t)userVA);
771 printf("%s(): seg->size too small\n", __func__);
772 if (panic_on_dyld_issue) {
773 panic("%s(): seg->size too small", __func__);
774 }
775 return KERN_FAILURE;
776 }
777 /* ensure page_count and pageIndex are valid too */
778 if ((uintptr_t)&seg->page_start[seg->page_count] > (uintptr_t)link_info + link_info_size) {
779 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_PAGE_CNT_OUT_OF_RANGE), (uintptr_t)userVA);
780 printf("%s(): seg->page_count out of bounds\n", __func__);
781 if (panic_on_dyld_issue) {
782 panic("%s(): seg->page_count out of bounds", __func__);
783 }
784 return KERN_FAILURE;
785 }
786 if (pageIndex >= seg->page_count) {
787 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_PAGE_CNT_OUT_OF_RANGE), (uintptr_t)userVA);
788 printf("%s(): seg->page_count too small\n", __func__);
789 if (panic_on_dyld_issue) {
790 panic("%s(): seg->page_count too small", __func__);
791 }
792 return KERN_FAILURE;
793 }
794
795 break;
796 }
797 }
798
799 /*
800 * Question for Nick.. or can we make this OK and just return KERN_SUCCESS, nothing to do?
801 */
802 if (segInfo == NULL) {
803 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_SEG_FOR_VA), (uintptr_t)userVA);
804 printf("%s(): No segment for user VA 0x%llx\n", __func__, (long long)userVA);
805 if (panic_on_dyld_issue) {
806 panic("%s(): No segment for user VA 0x%llx", __func__, (long long)userVA);
807 }
808 return KERN_FAILURE;
809 }
810
811 /*
812 * Route to the appropriate fixup routine
813 */
814 switch (hdr->mwli_pointer_format) {
815 #if defined(HAS_APPLE_PAC)
816 case DYLD_CHAINED_PTR_ARM64E:
817 fixupPageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex, false);
818 break;
819 case DYLD_CHAINED_PTR_ARM64E_USERLAND:
820 case DYLD_CHAINED_PTR_ARM64E_USERLAND24:
821 fixupPageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex, true);
822 break;
823 case DYLD_CHAINED_PTR_ARM64E_SHARED_CACHE:
824 fixupCachePageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex);
825 break;
826 #endif /* defined(HAS_APPLE_PAC) */
827 case DYLD_CHAINED_PTR_64:
828 fixupPage64(userVA, contents, end_contents, link_info, segInfo, pageIndex, false);
829 break;
830 case DYLD_CHAINED_PTR_64_OFFSET:
831 fixupPage64(userVA, contents, end_contents, link_info, segInfo, pageIndex, true);
832 break;
833 case DYLD_CHAINED_PTR_32:
834 fixupPage32(userVA, contents, end_contents, link_info, link_info_size, segInfo, pageIndex);
835 break;
836 default:
837 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BAD_POINTER_FMT), (uintptr_t)userVA);
838 printf("%s(): unknown pointer_format %d\n", __func__, hdr->mwli_pointer_format);
839 if (panic_on_dyld_issue) {
840 panic("%s(): unknown pointer_format %d", __func__, hdr->mwli_pointer_format);
841 }
842 return KERN_FAILURE;
843 }
844 return KERN_SUCCESS;
845 }
846
847 /*
848 * dyld_pager_data_request()
849 *
850 * Handles page-in requests from VM.
851 */
852 static kern_return_t
dyld_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)853 dyld_pager_data_request(
854 memory_object_t mem_obj,
855 memory_object_offset_t offset,
856 memory_object_cluster_size_t length,
857 __unused vm_prot_t protection_required,
858 memory_object_fault_info_t mo_fault_info)
859 {
860 dyld_pager_t pager;
861 memory_object_control_t mo_control;
862 upl_t upl = NULL;
863 int upl_flags;
864 upl_size_t upl_size;
865 upl_page_info_t *upl_pl = NULL;
866 unsigned int pl_count;
867 vm_object_t src_top_object = VM_OBJECT_NULL;
868 vm_object_t src_page_object = VM_OBJECT_NULL;
869 vm_object_t dst_object;
870 kern_return_t kr;
871 kern_return_t retval = KERN_SUCCESS;
872 vm_fault_return_t vmfr;
873 vm_offset_t src_vaddr;
874 vm_offset_t dst_vaddr;
875 vm_offset_t cur_offset;
876 kern_return_t error_code;
877 vm_prot_t prot;
878 vm_page_t src_page, top_page;
879 int interruptible;
880 struct vm_object_fault_info fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
881 struct mwl_info_hdr *hdr;
882 uint32_t r;
883 uint64_t userVA;
884
885 fault_info.stealth = TRUE;
886 fault_info.io_sync = FALSE;
887 fault_info.mark_zf_absent = FALSE;
888 fault_info.batch_pmap_op = FALSE;
889 interruptible = fault_info.interruptible;
890
891 pager = dyld_pager_lookup(mem_obj);
892 assert(pager->dyld_is_ready);
893 assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 1); /* pager is alive */
894 assert(pager->dyld_is_mapped); /* pager is mapped */
895 hdr = (struct mwl_info_hdr *)pager->dyld_link_info;
896
897 /*
898 * Gather in a UPL all the VM pages requested by VM.
899 */
900 mo_control = pager->dyld_header.mo_control;
901
902 upl_size = length;
903 upl_flags =
904 UPL_RET_ONLY_ABSENT |
905 UPL_SET_LITE |
906 UPL_NO_SYNC |
907 UPL_CLEAN_IN_PLACE | /* triggers UPL_CLEAR_DIRTY */
908 UPL_SET_INTERNAL;
909 pl_count = 0;
910 kr = memory_object_upl_request(mo_control,
911 offset, upl_size,
912 &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
913 if (kr != KERN_SUCCESS) {
914 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_UPL), kr /* arg */);
915 if (panic_on_dyld_issue) {
916 panic("%s(): upl_request(%p, 0x%llx, 0x%llx) ret %d", __func__,
917 mo_control, offset, (uint64_t)upl_size, kr);
918 }
919 retval = kr;
920 goto done;
921 }
922 dst_object = memory_object_control_to_vm_object(mo_control);
923 assert(dst_object != VM_OBJECT_NULL);
924
925 /*
926 * We'll map the original data in the kernel address space from the
927 * backing VM object, itself backed by the executable/library file via
928 * the vnode pager.
929 */
930 src_top_object = pager->dyld_backing_object;
931 assert(src_top_object != VM_OBJECT_NULL);
932 vm_object_reference(src_top_object); /* keep the source object alive */
933
934 /*
935 * Fill in the contents of the pages requested by VM.
936 */
937 upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
938 pl_count = length / PAGE_SIZE;
939 for (cur_offset = 0;
940 retval == KERN_SUCCESS && cur_offset < length;
941 cur_offset += PAGE_SIZE) {
942 ppnum_t dst_pnum;
943
944 if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
945 /* this page is not in the UPL: skip it */
946 continue;
947 }
948
949 /*
950 * Map the source page in the kernel's virtual address space.
951 * We already hold a reference on the src_top_object.
952 */
953 retry_src_fault:
954 vm_object_lock(src_top_object);
955 vm_object_paging_begin(src_top_object);
956 error_code = 0;
957 prot = VM_PROT_READ;
958 src_page = VM_PAGE_NULL;
959 vmfr = vm_fault_page(src_top_object,
960 offset + cur_offset,
961 VM_PROT_READ,
962 FALSE,
963 FALSE, /* src_page not looked up */
964 &prot,
965 &src_page,
966 &top_page,
967 NULL,
968 &error_code,
969 FALSE,
970 &fault_info);
971 switch (vmfr) {
972 case VM_FAULT_SUCCESS:
973 break;
974 case VM_FAULT_RETRY:
975 goto retry_src_fault;
976 case VM_FAULT_MEMORY_SHORTAGE:
977 if (vm_page_wait(interruptible)) {
978 goto retry_src_fault;
979 }
980 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_MEMORY_SHORTAGE), 0 /* arg */);
981 OS_FALLTHROUGH;
982 case VM_FAULT_INTERRUPTED:
983 retval = MACH_SEND_INTERRUPTED;
984 goto done;
985 case VM_FAULT_SUCCESS_NO_VM_PAGE:
986 /* success but no VM page: fail */
987 vm_object_paging_end(src_top_object);
988 vm_object_unlock(src_top_object);
989 OS_FALLTHROUGH;
990 case VM_FAULT_MEMORY_ERROR:
991 /* the page is not there ! */
992 if (error_code) {
993 retval = error_code;
994 } else {
995 retval = KERN_MEMORY_ERROR;
996 }
997 goto done;
998 case VM_FAULT_BUSY:
999 retval = KERN_ALREADY_WAITING;
1000 goto done;
1001 }
1002 assert(src_page != VM_PAGE_NULL);
1003 assert(src_page->vmp_busy);
1004
1005 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
1006 vm_page_lockspin_queues();
1007 if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
1008 vm_page_speculate(src_page, FALSE);
1009 }
1010 vm_page_unlock_queues();
1011 }
1012
1013 /*
1014 * Establish pointers to the source and destination physical pages.
1015 */
1016 dst_pnum = (ppnum_t)upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
1017 assert(dst_pnum != 0);
1018
1019 src_vaddr = (vm_map_offset_t)phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) << PAGE_SHIFT);
1020 dst_vaddr = (vm_map_offset_t)phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
1021 src_page_object = VM_PAGE_OBJECT(src_page);
1022
1023 /*
1024 * Validate the original page...
1025 */
1026 if (src_page_object->code_signed) {
1027 vm_page_validate_cs_mapped(src_page, PAGE_SIZE, 0, (const void *)src_vaddr);
1028 }
1029
1030 /*
1031 * ... and transfer the results to the destination page.
1032 */
1033 UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_validated);
1034 UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_tainted);
1035 UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_nx);
1036
1037 /*
1038 * The page provider might access a mapped file, so let's
1039 * release the object lock for the source page to avoid a
1040 * potential deadlock.
1041 * The source page is kept busy and we have a
1042 * "paging_in_progress" reference on its object, so it's safe
1043 * to unlock the object here.
1044 */
1045 assert(src_page->vmp_busy);
1046 assert(src_page_object->paging_in_progress > 0);
1047 vm_object_unlock(src_page_object);
1048
1049 /*
1050 * Process the original contents of the source page
1051 * into the destination page.
1052 */
1053 bcopy((const char *)src_vaddr, (char *)dst_vaddr, PAGE_SIZE);
1054
1055 /*
1056 * Figure out what the original user virtual address was, based on the offset.
1057 */
1058 userVA = 0;
1059 for (r = 0; r < pager->dyld_num_range; ++r) {
1060 vm_offset_t o = offset + cur_offset;
1061 if (pager->dyld_file_offset[r] <= o &&
1062 o < pager->dyld_file_offset[r] + pager->dyld_size[r]) {
1063 userVA = pager->dyld_address[r] + (o - pager->dyld_file_offset[r]);
1064 break;
1065 }
1066 }
1067
1068 /*
1069 * If we have a valid range fixup the page.
1070 */
1071 if (r == pager->dyld_num_range) {
1072 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_RANGE_NOT_FOUND), (uintptr_t)userVA);
1073 printf("%s(): Range not found for offset 0x%llx\n", __func__, (long long)cur_offset);
1074 if (panic_on_dyld_issue) {
1075 panic("%s(): Range not found for offset 0x%llx", __func__, (long long)cur_offset);
1076 }
1077 retval = KERN_FAILURE;
1078 } else if (fixup_page(dst_vaddr, dst_vaddr + PAGE_SIZE, userVA, pager) != KERN_SUCCESS) {
1079 /* KDBG / printf was done under fixup_page() */
1080 retval = KERN_FAILURE;
1081 }
1082 if (retval != KERN_SUCCESS) {
1083 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SLIDE_ERROR), userVA);
1084 if (panic_on_dyld_issue) {
1085 panic("%s(): dyld pager slide error %d at 0x%llx", __func__, retval, (uint64_t)userVA);
1086 }
1087 }
1088
1089 assert(VM_PAGE_OBJECT(src_page) == src_page_object);
1090 assert(src_page->vmp_busy);
1091 assert(src_page_object->paging_in_progress > 0);
1092 vm_object_lock(src_page_object);
1093
1094 /*
1095 * Cleanup the result of vm_fault_page() of the source page.
1096 */
1097 vm_page_wakeup_done(src_top_object, src_page);
1098 src_page = VM_PAGE_NULL;
1099 vm_object_paging_end(src_page_object);
1100 vm_object_unlock(src_page_object);
1101
1102 if (top_page != VM_PAGE_NULL) {
1103 assert(VM_PAGE_OBJECT(top_page) == src_top_object);
1104 vm_object_lock(src_top_object);
1105 VM_PAGE_FREE(top_page);
1106 vm_object_paging_end(src_top_object);
1107 vm_object_unlock(src_top_object);
1108 }
1109 }
1110
1111 done:
1112 if (upl != NULL) {
1113 /* clean up the UPL */
1114
1115 /*
1116 * The pages are currently dirty because we've just been
1117 * writing on them, but as far as we're concerned, they're
1118 * clean since they contain their "original" contents as
1119 * provided by us, the pager.
1120 * Tell the UPL to mark them "clean".
1121 */
1122 upl_clear_dirty(upl, TRUE);
1123
1124 /* abort or commit the UPL */
1125 if (retval != KERN_SUCCESS) {
1126 upl_abort(upl, 0);
1127 } else {
1128 boolean_t empty;
1129 assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
1130 "upl %p offset 0x%llx size 0x%x\n",
1131 upl, upl->u_offset, upl->u_size);
1132 upl_commit_range(upl, 0, upl->u_size,
1133 UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
1134 upl_pl, pl_count, &empty);
1135 }
1136
1137 /* and deallocate the UPL */
1138 upl_deallocate(upl);
1139 upl = NULL;
1140 }
1141 if (src_top_object != VM_OBJECT_NULL) {
1142 vm_object_deallocate(src_top_object);
1143 }
1144 return retval;
1145 }
1146
1147 /*
1148 * dyld_pager_reference()
1149 *
1150 * Get a reference on this memory object.
1151 * For external usage only. Assumes that the initial reference count is not 0,
1152 * i.e one should not "revive" a dead pager this way.
1153 */
1154 static void
dyld_pager_reference(memory_object_t mem_obj)1155 dyld_pager_reference(
1156 memory_object_t mem_obj)
1157 {
1158 dyld_pager_t pager;
1159
1160 pager = dyld_pager_lookup(mem_obj);
1161
1162 lck_mtx_lock(&dyld_pager_lock);
1163 os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL);
1164 lck_mtx_unlock(&dyld_pager_lock);
1165 }
1166
1167
1168
1169 /*
1170 * dyld_pager_terminate_internal:
1171 *
1172 * Trigger the asynchronous termination of the memory object associated
1173 * with this pager.
1174 * When the memory object is terminated, there will be one more call
1175 * to memory_object_deallocate() (i.e. dyld_pager_deallocate())
1176 * to finish the clean up.
1177 *
1178 * "dyld_pager_lock" should not be held by the caller.
1179 */
1180 static void
dyld_pager_terminate_internal(dyld_pager_t pager)1181 dyld_pager_terminate_internal(
1182 dyld_pager_t pager)
1183 {
1184 assert(pager->dyld_is_ready);
1185 assert(!pager->dyld_is_mapped);
1186 assert(os_ref_get_count_raw(&pager->dyld_ref_count) == 1);
1187
1188 if (pager->dyld_backing_object != VM_OBJECT_NULL) {
1189 vm_object_deallocate(pager->dyld_backing_object);
1190 pager->dyld_backing_object = VM_OBJECT_NULL;
1191 }
1192 /* trigger the destruction of the memory object */
1193 memory_object_destroy(pager->dyld_header.mo_control, VM_OBJECT_DESTROY_PAGER);
1194 }
1195
1196 /*
1197 * dyld_pager_deallocate_internal()
1198 *
1199 * Release a reference on this pager and free it when the last reference goes away.
1200 * Can be called with dyld_pager_lock held or not, but always returns
1201 * with it unlocked.
1202 */
1203 static void
dyld_pager_deallocate_internal(dyld_pager_t pager,bool locked)1204 dyld_pager_deallocate_internal(
1205 dyld_pager_t pager,
1206 bool locked)
1207 {
1208 os_ref_count_t ref_count;
1209
1210 if (!locked) {
1211 lck_mtx_lock(&dyld_pager_lock);
1212 }
1213
1214 /* drop a reference on this pager */
1215 ref_count = os_ref_release_locked_raw(&pager->dyld_ref_count, NULL);
1216
1217 if (ref_count == 1) {
1218 /*
1219 * Only this reference is left, which means that
1220 * no one is really holding on to this pager anymore.
1221 * Terminate it.
1222 */
1223 dyld_pager_dequeue(pager);
1224 /* the pager is all ours: no need for the lock now */
1225 lck_mtx_unlock(&dyld_pager_lock);
1226 dyld_pager_terminate_internal(pager);
1227 } else if (ref_count == 0) {
1228 /*
1229 * Dropped all references; the memory object has
1230 * been terminated. Do some final cleanup and release the
1231 * pager structure.
1232 */
1233 lck_mtx_unlock(&dyld_pager_lock);
1234
1235 kfree_data(pager->dyld_link_info, pager->dyld_link_info_size);
1236 pager->dyld_link_info = NULL;
1237
1238 if (pager->dyld_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
1239 memory_object_control_deallocate(pager->dyld_header.mo_control);
1240 pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1241 }
1242 kfree_type(struct dyld_pager, pager);
1243 pager = NULL;
1244 } else {
1245 /* there are still plenty of references: keep going... */
1246 lck_mtx_unlock(&dyld_pager_lock);
1247 }
1248
1249 /* caution: lock is not held on return... */
1250 }
1251
1252 /*
1253 * dyld_pager_deallocate()
1254 *
1255 * Release a reference on this pager and free it when the last
1256 * reference goes away.
1257 */
1258 static void
dyld_pager_deallocate(memory_object_t mem_obj)1259 dyld_pager_deallocate(
1260 memory_object_t mem_obj)
1261 {
1262 dyld_pager_t pager;
1263
1264 pager = dyld_pager_lookup(mem_obj);
1265 dyld_pager_deallocate_internal(pager, FALSE);
1266 }
1267
1268 /*
1269 *
1270 */
1271 static kern_return_t
dyld_pager_terminate(__unused memory_object_t mem_obj)1272 dyld_pager_terminate(
1273 #if !DEBUG
1274 __unused
1275 #endif
1276 memory_object_t mem_obj)
1277 {
1278 return KERN_SUCCESS;
1279 }
1280
1281 /*
1282 * dyld_pager_map()
1283 *
1284 * This allows VM to let us, the EMM, know that this memory object
1285 * is currently mapped one or more times. This is called by VM each time
1286 * the memory object gets mapped, but we only take one extra reference the
1287 * first time it is called.
1288 */
1289 static kern_return_t
dyld_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)1290 dyld_pager_map(
1291 memory_object_t mem_obj,
1292 __unused vm_prot_t prot)
1293 {
1294 dyld_pager_t pager;
1295
1296 pager = dyld_pager_lookup(mem_obj);
1297
1298 lck_mtx_lock(&dyld_pager_lock);
1299 assert(pager->dyld_is_ready);
1300 assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0); /* pager is alive */
1301 if (!pager->dyld_is_mapped) {
1302 pager->dyld_is_mapped = TRUE;
1303 os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL);
1304 }
1305 lck_mtx_unlock(&dyld_pager_lock);
1306
1307 return KERN_SUCCESS;
1308 }
1309
1310 /*
1311 * dyld_pager_last_unmap()
1312 *
1313 * This is called by VM when this memory object is no longer mapped anywhere.
1314 */
1315 static kern_return_t
dyld_pager_last_unmap(memory_object_t mem_obj)1316 dyld_pager_last_unmap(
1317 memory_object_t mem_obj)
1318 {
1319 dyld_pager_t pager;
1320
1321 pager = dyld_pager_lookup(mem_obj);
1322
1323 lck_mtx_lock(&dyld_pager_lock);
1324 if (pager->dyld_is_mapped) {
1325 /*
1326 * All the mappings are gone, so let go of the one extra
1327 * reference that represents all the mappings of this pager.
1328 */
1329 pager->dyld_is_mapped = FALSE;
1330 dyld_pager_deallocate_internal(pager, TRUE);
1331 /* caution: deallocate_internal() released the lock ! */
1332 } else {
1333 lck_mtx_unlock(&dyld_pager_lock);
1334 }
1335
1336 return KERN_SUCCESS;
1337 }
1338
1339 static boolean_t
dyld_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)1340 dyld_pager_backing_object(
1341 memory_object_t mem_obj,
1342 memory_object_offset_t offset,
1343 vm_object_t *backing_object,
1344 vm_object_offset_t *backing_offset)
1345 {
1346 dyld_pager_t pager;
1347
1348 pager = dyld_pager_lookup(mem_obj);
1349
1350 *backing_object = pager->dyld_backing_object;
1351 *backing_offset = offset;
1352
1353 return TRUE;
1354 }
1355
1356
1357 /*
1358 * Convert from memory_object to dyld_pager.
1359 */
1360 static dyld_pager_t
dyld_pager_lookup(memory_object_t mem_obj)1361 dyld_pager_lookup(
1362 memory_object_t mem_obj)
1363 {
1364 dyld_pager_t pager;
1365
1366 assert(mem_obj->mo_pager_ops == &dyld_pager_ops);
1367 pager = (dyld_pager_t)(uintptr_t) mem_obj;
1368 assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0);
1369 return pager;
1370 }
1371
1372 /*
1373 * Create and return a pager for the given object with the
1374 * given slide information.
1375 */
1376 static dyld_pager_t
dyld_pager_create(__unused task_t task,vm_object_t backing_object,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size)1377 dyld_pager_create(
1378 #if !defined(HAS_APPLE_PAC)
1379 __unused
1380 #endif /* defined(HAS_APPLE_PAC) */
1381 task_t task,
1382 vm_object_t backing_object,
1383 struct mwl_region *regions,
1384 uint32_t region_cnt,
1385 void *link_info,
1386 uint32_t link_info_size)
1387 {
1388 dyld_pager_t pager;
1389 memory_object_control_t control;
1390 kern_return_t kr;
1391
1392 pager = kalloc_type(struct dyld_pager, Z_WAITOK);
1393 if (pager == NULL) {
1394 return NULL;
1395 }
1396
1397 /*
1398 * The vm_map call takes both named entry ports and raw memory
1399 * objects in the same parameter. We need to make sure that
1400 * vm_map does not see this object as a named entry port. So,
1401 * we reserve the first word in the object for a fake ip_kotype
1402 * setting - that will tell vm_map to use it as a memory object.
1403 */
1404 pager->dyld_header.mo_ikot = IKOT_MEMORY_OBJECT;
1405 pager->dyld_header.mo_pager_ops = &dyld_pager_ops;
1406 pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1407
1408 pager->dyld_is_ready = FALSE;/* not ready until it has a "name" */
1409 /* existence reference for the caller */
1410 os_ref_init_count_raw(&pager->dyld_ref_count, NULL, 1);
1411 pager->dyld_is_mapped = FALSE;
1412 pager->dyld_backing_object = backing_object;
1413 pager->dyld_link_info = link_info; /* pager takes ownership of this pointer here */
1414 pager->dyld_link_info_size = link_info_size;
1415 #if defined(HAS_APPLE_PAC)
1416 pager->dyld_a_key = (task->map && task->map->pmap && !task->map->pmap->disable_jop) ? task->jop_pid : 0;
1417 #endif /* defined(HAS_APPLE_PAC) */
1418
1419 /*
1420 * Record the regions so the pager can find the offset from an address.
1421 */
1422 pager->dyld_num_range = region_cnt;
1423 for (uint32_t r = 0; r < region_cnt; ++r) {
1424 pager->dyld_file_offset[r] = regions[r].mwlr_file_offset;
1425 pager->dyld_address[r] = regions[r].mwlr_address;
1426 pager->dyld_size[r] = regions[r].mwlr_size;
1427 }
1428
1429 vm_object_reference(backing_object);
1430 lck_mtx_lock(&dyld_pager_lock);
1431 queue_enter_first(&dyld_pager_queue,
1432 pager,
1433 dyld_pager_t,
1434 dyld_pager_queue);
1435 dyld_pager_count++;
1436 if (dyld_pager_count > dyld_pager_count_max) {
1437 dyld_pager_count_max = dyld_pager_count;
1438 }
1439 lck_mtx_unlock(&dyld_pager_lock);
1440
1441 kr = memory_object_create_named((memory_object_t) pager, 0, &control);
1442 assert(kr == KERN_SUCCESS);
1443
1444 memory_object_mark_trusted(control);
1445
1446 lck_mtx_lock(&dyld_pager_lock);
1447 /* the new pager is now ready to be used */
1448 pager->dyld_is_ready = TRUE;
1449 lck_mtx_unlock(&dyld_pager_lock);
1450
1451 /* wakeup anyone waiting for this pager to be ready */
1452 thread_wakeup(&pager->dyld_is_ready);
1453
1454 return pager;
1455 }
1456
1457 /*
1458 * dyld_pager_setup()
1459 *
1460 * Provide the caller with a memory object backed by the provided
1461 * "backing_object" VM object.
1462 */
1463 static memory_object_t
dyld_pager_setup(task_t task,vm_object_t backing_object,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size)1464 dyld_pager_setup(
1465 task_t task,
1466 vm_object_t backing_object,
1467 struct mwl_region *regions,
1468 uint32_t region_cnt,
1469 void *link_info,
1470 uint32_t link_info_size)
1471 {
1472 dyld_pager_t pager;
1473
1474 /* create new pager */
1475 pager = dyld_pager_create(task, backing_object, regions, region_cnt, link_info, link_info_size);
1476 if (pager == NULL) {
1477 /* could not create a new pager */
1478 return MEMORY_OBJECT_NULL;
1479 }
1480
1481 lck_mtx_lock(&dyld_pager_lock);
1482 while (!pager->dyld_is_ready) {
1483 lck_mtx_sleep(&dyld_pager_lock,
1484 LCK_SLEEP_DEFAULT,
1485 &pager->dyld_is_ready,
1486 THREAD_UNINT);
1487 }
1488 lck_mtx_unlock(&dyld_pager_lock);
1489
1490 return (memory_object_t) pager;
1491 }
1492
1493 /*
1494 * Set up regions which use a special pager to apply dyld fixups.
1495 *
1496 * The arguments to this function are mostly just used as input.
1497 * Except for the link_info! That is saved off in the pager that
1498 * gets created. If the pager assumed ownership of *link_info,
1499 * the argument is NULLed, if not, the caller need to free it on error.
1500 */
1501 kern_return_t
vm_map_with_linking(task_t task,struct mwl_region * regions,uint32_t region_cnt,void ** link_info,uint32_t link_info_size,memory_object_control_t file_control)1502 vm_map_with_linking(
1503 task_t task,
1504 struct mwl_region *regions,
1505 uint32_t region_cnt,
1506 void **link_info,
1507 uint32_t link_info_size,
1508 memory_object_control_t file_control)
1509 {
1510 vm_map_t map = task->map;
1511 vm_object_t object = VM_OBJECT_NULL;
1512 memory_object_t pager = MEMORY_OBJECT_NULL;
1513 uint32_t r;
1514 vm_map_address_t map_addr;
1515 kern_return_t kr = KERN_SUCCESS;
1516
1517 object = memory_object_control_to_vm_object(file_control);
1518 if (object == VM_OBJECT_NULL || object->internal) {
1519 printf("%s no object for file_control\n", __func__);
1520 object = VM_OBJECT_NULL;
1521 kr = KERN_INVALID_ADDRESS;
1522 goto done;
1523 }
1524
1525 /* create a pager */
1526 pager = dyld_pager_setup(task, object, regions, region_cnt, *link_info, link_info_size);
1527 if (pager == MEMORY_OBJECT_NULL) {
1528 kr = KERN_RESOURCE_SHORTAGE;
1529 goto done;
1530 }
1531 *link_info = NULL; /* ownership of this pointer was given to pager */
1532
1533 for (r = 0; r < region_cnt; ++r) {
1534 vm_map_kernel_flags_t vmk_flags = {
1535 .vmf_fixed = true,
1536 .vmf_overwrite = true,
1537 .vmkf_overwrite_immutable = true,
1538 };
1539 struct mwl_region *rp = ®ions[r];
1540
1541 /* map that pager over the portion of the mapping that needs sliding */
1542 map_addr = (vm_map_address_t)rp->mwlr_address;
1543
1544 if (rp->mwlr_protections & VM_PROT_TPRO) {
1545 vmk_flags.vmf_tpro = TRUE;
1546 }
1547
1548 kr = mach_vm_map_kernel(map,
1549 vm_sanitize_wrap_addr_ref(&map_addr),
1550 rp->mwlr_size,
1551 0,
1552 vmk_flags,
1553 (ipc_port_t)(uintptr_t)pager,
1554 rp->mwlr_file_offset,
1555 TRUE, /* copy == TRUE, as this is MAP_PRIVATE so COW may happen */
1556 rp->mwlr_protections & VM_PROT_DEFAULT,
1557 rp->mwlr_protections & VM_PROT_DEFAULT,
1558 VM_INHERIT_DEFAULT);
1559 if (kr != KERN_SUCCESS) {
1560 /* no need to clean up earlier regions, this will be process fatal */
1561 goto done;
1562 }
1563 }
1564
1565 /* success! */
1566 kr = KERN_SUCCESS;
1567
1568 done:
1569
1570 if (pager != MEMORY_OBJECT_NULL) {
1571 /*
1572 * Release the pager reference obtained by dyld_pager_setup().
1573 * The mapping, if it succeeded, is now holding a reference on the memory object.
1574 */
1575 memory_object_deallocate(pager);
1576 pager = MEMORY_OBJECT_NULL;
1577 }
1578 return kr;
1579 }
1580
1581 static uint64_t
dyld_pager_purge(dyld_pager_t pager)1582 dyld_pager_purge(
1583 dyld_pager_t pager)
1584 {
1585 uint64_t pages_purged;
1586 vm_object_t object;
1587
1588 pages_purged = 0;
1589 object = memory_object_to_vm_object((memory_object_t) pager);
1590 assert(object != VM_OBJECT_NULL);
1591 vm_object_lock(object);
1592 pages_purged = object->resident_page_count;
1593 vm_object_reap_pages(object, REAP_DATA_FLUSH);
1594 pages_purged -= object->resident_page_count;
1595 // printf(" %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count);
1596 vm_object_unlock(object);
1597 return pages_purged;
1598 }
1599
1600 uint64_t
dyld_pager_purge_all(void)1601 dyld_pager_purge_all(void)
1602 {
1603 uint64_t pages_purged;
1604 dyld_pager_t pager;
1605
1606 pages_purged = 0;
1607 lck_mtx_lock(&dyld_pager_lock);
1608 queue_iterate(&dyld_pager_queue, pager, dyld_pager_t, dyld_pager_queue) {
1609 pages_purged += dyld_pager_purge(pager);
1610 }
1611 lck_mtx_unlock(&dyld_pager_lock);
1612 #if DEVELOPMENT || DEBUG
1613 printf(" %s:%d pages purged: %llu\n", __FUNCTION__, __LINE__, pages_purged);
1614 #endif /* DEVELOPMENT || DEBUG */
1615 return pages_purged;
1616 }
1617