xref: /xnu-11215.81.4/osfmk/vm/vm_dyld_pager.c (revision d4514f0bc1d3f944c22d92e68b646ac3fb40d452)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42 
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/thread.h>
46 #include <kern/ipc_kobject.h>
47 
48 #include <ipc/ipc_port.h>
49 #include <ipc/ipc_space.h>
50 
51 #include <vm/memory_object_internal.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_fault_internal.h>
54 #include <vm/vm_map_xnu.h>
55 #include <vm/vm_pageout_xnu.h>
56 #include <vm/vm_protos_internal.h>
57 #include <vm/vm_dyld_pager_internal.h>
58 #include <vm/vm_ubc.h>
59 #include <vm/vm_page_internal.h>
60 #include <vm/vm_object_internal.h>
61 #include <vm/vm_sanitize_internal.h>
62 
63 #include <sys/kdebug_triage.h>
64 #include <mach-o/fixup-chains.h>
65 #if defined(HAS_APPLE_PAC)
66 #include <ptrauth.h>
67 #include <arm/misc_protos.h>
68 #endif /* defined(HAS_APPLE_PAC) */
69 
70 extern int panic_on_dyld_issue;
71 
72 /*
73  * DYLD page in linking pager.
74  *
75  * This external memory manager (EMM) applies dyld fixup to data
76  * pages, allowing the modified page to appear "clean".
77  *
78  * The modified pages will never be dirtied, so the memory manager doesn't
79  * need to handle page-out requests (from memory_object_data_return()).  The
80  * pages are mapped copy-on-write, so that the originals stay clean.
81  */
82 
83 /* forward declarations */
84 typedef struct dyld_pager *dyld_pager_t;
85 static void dyld_pager_reference(memory_object_t mem_obj);
86 static void dyld_pager_deallocate(memory_object_t mem_obj);
87 static void dyld_pager_deallocate_internal(dyld_pager_t pager, bool locked);
88 static kern_return_t dyld_pager_init(memory_object_t mem_obj,
89     memory_object_control_t control,
90     memory_object_cluster_size_t pg_size);
91 static kern_return_t dyld_pager_terminate(memory_object_t mem_obj);
92 static void dyld_pager_terminate_internal(dyld_pager_t pager);
93 static kern_return_t dyld_pager_data_request(memory_object_t mem_obj,
94     memory_object_offset_t offset,
95     memory_object_cluster_size_t length,
96     vm_prot_t protection_required,
97     memory_object_fault_info_t fault_info);
98 static kern_return_t dyld_pager_data_return(memory_object_t mem_obj,
99     memory_object_offset_t offset,
100     memory_object_cluster_size_t      data_cnt,
101     memory_object_offset_t *resid_offset,
102     int *io_error,
103     boolean_t dirty,
104     boolean_t kernel_copy,
105     int upl_flags);
106 static kern_return_t dyld_pager_data_initialize(memory_object_t mem_obj,
107     memory_object_offset_t offset,
108     memory_object_cluster_size_t data_cnt);
109 static kern_return_t dyld_pager_map(memory_object_t mem_obj,
110     vm_prot_t prot);
111 static kern_return_t dyld_pager_last_unmap(memory_object_t mem_obj);
112 static boolean_t dyld_pager_backing_object(
113 	memory_object_t mem_obj,
114 	memory_object_offset_t mem_obj_offset,
115 	vm_object_t *backing_object,
116 	vm_object_offset_t *backing_offset);
117 static dyld_pager_t dyld_pager_lookup(memory_object_t  mem_obj);
118 
119 /*
120  * Vector of VM operations for this EMM.
121  * These routines are invoked by VM via the memory_object_*() interfaces.
122  */
123 const struct memory_object_pager_ops dyld_pager_ops = {
124 	.memory_object_reference = dyld_pager_reference,
125 	.memory_object_deallocate = dyld_pager_deallocate,
126 	.memory_object_init = dyld_pager_init,
127 	.memory_object_terminate = dyld_pager_terminate,
128 	.memory_object_data_request = dyld_pager_data_request,
129 	.memory_object_data_return = dyld_pager_data_return,
130 	.memory_object_data_initialize = dyld_pager_data_initialize,
131 	.memory_object_map = dyld_pager_map,
132 	.memory_object_last_unmap = dyld_pager_last_unmap,
133 	.memory_object_backing_object = dyld_pager_backing_object,
134 	.memory_object_pager_name = "dyld"
135 };
136 
137 /*
138  * The "dyld_pager" structure. We create one of these for each use of
139  * map_with_linking_np() that dyld uses.
140  */
141 struct dyld_pager {
142 	struct memory_object    dyld_header;          /* mandatory generic header */
143 
144 #if MEMORY_OBJECT_HAS_REFCOUNT
145 #define dyld_ref_count           dyld_header.mo_ref
146 #else
147 	os_ref_atomic_t         dyld_ref_count;      /* active uses */
148 #endif
149 	queue_chain_t           dyld_pager_queue;    /* next & prev pagers */
150 	bool                    dyld_is_mapped;      /* has active mappings */
151 	bool                    dyld_is_ready;       /* is this pager ready? */
152 	vm_object_t             dyld_backing_object; /* VM object for shared cache */
153 	void                    *dyld_link_info;
154 	uint32_t                dyld_link_info_size;
155 	uint32_t                dyld_num_range;
156 	memory_object_offset_t  dyld_file_offset[MWL_MAX_REGION_COUNT];
157 	mach_vm_address_t       dyld_address[MWL_MAX_REGION_COUNT];
158 	mach_vm_size_t          dyld_size[MWL_MAX_REGION_COUNT];
159 #if defined(HAS_APPLE_PAC)
160 	uint64_t                dyld_a_key;
161 #endif /* defined(HAS_APPLE_PAC) */
162 };
163 
164 queue_head_t dyld_pager_queue = QUEUE_HEAD_INITIALIZER(dyld_pager_queue);
165 
166 /*
167  * "dyld_pager_lock" for counters, ref counting, etc.
168  */
169 LCK_GRP_DECLARE(dyld_pager_lck_grp, "dyld_pager");
170 LCK_MTX_DECLARE(dyld_pager_lock, &dyld_pager_lck_grp);
171 
172 /*
173  * Statistics & counters.
174  */
175 uint32_t dyld_pager_count = 0;
176 uint32_t dyld_pager_count_max = 0;
177 
178 /*
179  * dyld_pager_dequeue()
180  *
181  * Removes a pager from the list of pagers.
182  *
183  * The caller must hold "dyld_pager".
184  */
185 static void
dyld_pager_dequeue(__unused dyld_pager_t pager)186 dyld_pager_dequeue(
187 	__unused dyld_pager_t pager)
188 {
189 	queue_remove(&dyld_pager_queue,
190 	    pager,
191 	    dyld_pager_t,
192 	    dyld_pager_queue);
193 	pager->dyld_pager_queue.next = NULL;
194 	pager->dyld_pager_queue.prev = NULL;
195 	dyld_pager_count--;
196 }
197 
198 /*
199  * dyld_pager_init()
200  *
201  * Initialize the memory object and makes it ready to be used and mapped.
202  */
203 static kern_return_t
dyld_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)204 dyld_pager_init(
205 	memory_object_t                 mem_obj,
206 	memory_object_control_t         control,
207 	__unused
208 	memory_object_cluster_size_t    pg_size)
209 {
210 	dyld_pager_t                    pager;
211 	kern_return_t                   kr;
212 	memory_object_attr_info_data_t  attributes;
213 
214 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
215 		printf("%s(): control NULL\n", __func__);
216 		return KERN_INVALID_ARGUMENT;
217 	}
218 
219 	pager = dyld_pager_lookup(mem_obj);
220 
221 	memory_object_control_reference(control);
222 
223 	pager->dyld_header.mo_control = control;
224 
225 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
226 	attributes.cluster_size = (1 << (PAGE_SHIFT));
227 	attributes.may_cache_object = FALSE;
228 	attributes.temporary = TRUE;
229 
230 	kr = memory_object_change_attributes(
231 		control,
232 		MEMORY_OBJECT_ATTRIBUTE_INFO,
233 		(memory_object_info_t) &attributes,
234 		MEMORY_OBJECT_ATTR_INFO_COUNT);
235 	if (kr != KERN_SUCCESS) {
236 		panic("dyld_pager_init: " "memory_object_change_attributes() failed");
237 	}
238 
239 	return KERN_SUCCESS;
240 }
241 
242 /*
243  * dyld_data_return()
244  *
245  * A page-out request from VM -- should never happen so panic.
246  */
247 static kern_return_t
dyld_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)248 dyld_pager_data_return(
249 	__unused memory_object_t        mem_obj,
250 	__unused memory_object_offset_t offset,
251 	__unused memory_object_cluster_size_t data_cnt,
252 	__unused memory_object_offset_t *resid_offset,
253 	__unused int                    *io_error,
254 	__unused boolean_t              dirty,
255 	__unused boolean_t              kernel_copy,
256 	__unused int                    upl_flags)
257 {
258 	panic("dyld_pager_data_return: should never happen!");
259 	return KERN_FAILURE;
260 }
261 
262 static kern_return_t
dyld_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)263 dyld_pager_data_initialize(
264 	__unused memory_object_t        mem_obj,
265 	__unused memory_object_offset_t offset,
266 	__unused memory_object_cluster_size_t data_cnt)
267 {
268 	panic("dyld_pager_data_initialize: should never happen");
269 	return KERN_FAILURE;
270 }
271 
272 
273 /*
274  * Apply fixups to a page used by a 64 bit process.
275  */
276 static kern_return_t
fixupPage64(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,void * link_info,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex,bool offsetBased)277 fixupPage64(
278 	uint64_t                              userVA,
279 	vm_offset_t                           contents,
280 	vm_offset_t                           end_contents,
281 	void                                  *link_info,
282 	struct dyld_chained_starts_in_segment *segInfo,
283 	uint32_t                              pageIndex,
284 	bool                                  offsetBased)
285 {
286 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
287 	uint64_t                              *bindsArray  = (uint64_t *)((uintptr_t)hdr + hdr->mwli_binds_offset);
288 	uint16_t                              firstStartOffset = segInfo->page_start[pageIndex];
289 
290 	/*
291 	 * Done if no fixups on the page
292 	 */
293 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
294 		return KERN_SUCCESS;
295 	}
296 
297 	/*
298 	 * walk the chain
299 	 */
300 	uint64_t *chain  = (uint64_t *)(contents + firstStartOffset);
301 	uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide);
302 	uint64_t delta = 0;
303 	do {
304 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
305 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
306 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
307 			    (long long)chain, (long long)contents, (long long)end_contents);
308 			if (panic_on_dyld_issue) {
309 				panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
310 				    (long long)chain, (long long)contents, (long long)end_contents);
311 			}
312 
313 			return KERN_FAILURE;
314 		}
315 		uint64_t value  = *chain;
316 		bool     isBind = (value & 0x8000000000000000ULL);
317 		delta = (value >> 51) & 0xFFF;
318 		if (isBind) {
319 			uint32_t bindOrdinal = value & 0x00FFFFFF;
320 			if (bindOrdinal >= hdr->mwli_binds_count) {
321 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
322 				printf("%s out of range bind ordinal %u (max %u)\n", __func__,
323 				    bindOrdinal, hdr->mwli_binds_count);
324 				if (panic_on_dyld_issue) {
325 					panic("%s out of range bind ordinal %u (max %u)", __func__,
326 					    bindOrdinal, hdr->mwli_binds_count);
327 				}
328 				return KERN_FAILURE;
329 			}
330 			uint32_t addend = (value >> 24) & 0xFF;
331 			*chain = bindsArray[bindOrdinal] + addend;
332 		} else {
333 			/* is rebase */
334 			uint64_t target = value & 0xFFFFFFFFFULL;
335 			uint64_t high8  = (value >> 36) & 0xFF;
336 			*chain = target + targetAdjust + (high8 << 56);
337 		}
338 		if (delta * 4 >= PAGE_SIZE) {
339 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_DELTA_TOO_LARGE), (uintptr_t)userVA);
340 			printf("%s(): delta offset > page size %lld\n", __func__, delta * 4);
341 			if (panic_on_dyld_issue) {
342 				panic("%s(): delta offset > page size %lld", __func__, delta * 4);
343 			}
344 			return KERN_FAILURE;
345 		}
346 		chain = (uint64_t *)((uintptr_t)chain + (delta * 4)); // 4-byte stride
347 	} while (delta != 0);
348 	return KERN_SUCCESS;
349 }
350 
351 
352 /*
353  * Apply fixups within a page used by a 32 bit process.
354  */
355 static kern_return_t
fixupChain32(uint64_t userVA,uint32_t * chain,vm_offset_t contents,vm_offset_t end_contents,void * link_info,struct dyld_chained_starts_in_segment * segInfo,uint32_t * bindsArray)356 fixupChain32(
357 	uint64_t                              userVA,
358 	uint32_t                              *chain,
359 	vm_offset_t                           contents,
360 	vm_offset_t                           end_contents,
361 	void                                  *link_info,
362 	struct dyld_chained_starts_in_segment *segInfo,
363 	uint32_t                              *bindsArray)
364 {
365 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
366 	uint32_t                              delta = 0;
367 
368 	do {
369 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
370 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
371 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
372 			    (long long)chain, (long long)contents, (long long)end_contents);
373 			if (panic_on_dyld_issue) {
374 				panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
375 				    (long long)chain, (long long)contents, (long long)end_contents);
376 			}
377 			return KERN_FAILURE;
378 		}
379 		uint32_t value = *chain;
380 		delta = (value >> 26) & 0x1F;
381 		if (value & 0x80000000) {
382 			// is bind
383 			uint32_t bindOrdinal = value & 0x000FFFFF;
384 			if (bindOrdinal >= hdr->mwli_binds_count) {
385 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
386 				printf("%s(): out of range bind ordinal %u (max %u)\n",
387 				    __func__, bindOrdinal, hdr->mwli_binds_count);
388 				if (panic_on_dyld_issue) {
389 					panic("%s(): out of range bind ordinal %u (max %u)",
390 					    __func__, bindOrdinal, hdr->mwli_binds_count);
391 				}
392 				return KERN_FAILURE;
393 			}
394 			uint32_t addend = (value >> 20) & 0x3F;
395 			*chain = bindsArray[bindOrdinal] + addend;
396 		} else {
397 			// is rebase
398 			uint32_t target = value & 0x03FFFFFF;
399 			if (target > segInfo->max_valid_pointer) {
400 				// handle non-pointers in chain
401 				uint32_t bias = (0x04000000 + segInfo->max_valid_pointer) / 2;
402 				*chain = target - bias;
403 			} else {
404 				*chain = target + (uint32_t)hdr->mwli_slide;
405 			}
406 		}
407 		chain += delta;
408 	} while (delta != 0);
409 	return KERN_SUCCESS;
410 }
411 
412 
413 /*
414  * Apply fixups to a page used by a 32 bit process.
415  */
416 static kern_return_t
fixupPage32(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,void * link_info,uint32_t link_info_size,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex)417 fixupPage32(
418 	uint64_t                              userVA,
419 	vm_offset_t                           contents,
420 	vm_offset_t                           end_contents,
421 	void                                  *link_info,
422 	uint32_t                              link_info_size,
423 	struct dyld_chained_starts_in_segment *segInfo,
424 	uint32_t                              pageIndex)
425 {
426 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr  *)link_info;
427 	uint32_t                              *bindsArray = (uint32_t *)((uintptr_t)hdr + hdr->mwli_binds_offset);
428 	uint16_t                              startOffset = segInfo->page_start[pageIndex];
429 
430 	/*
431 	 * done if no fixups
432 	 */
433 	if (startOffset == DYLD_CHAINED_PTR_START_NONE) {
434 		return KERN_SUCCESS;
435 	}
436 
437 	if (startOffset & DYLD_CHAINED_PTR_START_MULTI) {
438 		// some fixups in the page are too far apart, so page has multiple starts
439 		uint32_t overflowIndex = startOffset & ~DYLD_CHAINED_PTR_START_MULTI;
440 		bool chainEnd = false;
441 		while (!chainEnd) {
442 			/*
443 			 * range check against link_info, note +1 to include data we'll dereference
444 			 */
445 			if ((uintptr_t)&segInfo->page_start[overflowIndex + 1] > (uintptr_t)link_info + link_info_size) {
446 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
447 				printf("%s(): out of range segInfo->page_start[overflowIndex]\n", __func__);
448 				if (panic_on_dyld_issue) {
449 					panic("%s(): out of range segInfo->page_start[overflowIndex]", __func__);
450 				}
451 				return KERN_FAILURE;
452 			}
453 			chainEnd    = (segInfo->page_start[overflowIndex] & DYLD_CHAINED_PTR_START_LAST);
454 			startOffset = (segInfo->page_start[overflowIndex] & ~DYLD_CHAINED_PTR_START_LAST);
455 			uint32_t *chain = (uint32_t *)(contents + startOffset);
456 			fixupChain32(userVA, chain, contents, end_contents, link_info, segInfo, bindsArray);
457 			++overflowIndex;
458 		}
459 	} else {
460 		uint32_t *chain = (uint32_t *)(contents + startOffset);
461 		fixupChain32(userVA, chain, contents, end_contents, link_info, segInfo, bindsArray);
462 	}
463 	return KERN_SUCCESS;
464 }
465 
466 #if defined(HAS_APPLE_PAC)
467 /*
468  * Sign a pointer needed for fixups.
469  */
470 static kern_return_t
signPointer(uint64_t unsignedAddr,void * loc,bool addrDiv,uint16_t diversity,ptrauth_key key,dyld_pager_t pager,uint64_t * signedAddr)471 signPointer(
472 	uint64_t         unsignedAddr,
473 	void             *loc,
474 	bool             addrDiv,
475 	uint16_t         diversity,
476 	ptrauth_key      key,
477 	dyld_pager_t     pager,
478 	uint64_t         *signedAddr)
479 {
480 	// don't sign NULL
481 	if (unsignedAddr == 0) {
482 		*signedAddr = 0;
483 		return KERN_SUCCESS;
484 	}
485 
486 	uint64_t extendedDiscriminator = diversity;
487 	if (addrDiv) {
488 		extendedDiscriminator = __builtin_ptrauth_blend_discriminator(loc, extendedDiscriminator);
489 	}
490 
491 	switch (key) {
492 	case ptrauth_key_asia:
493 	case ptrauth_key_asda:
494 		if (pager->dyld_a_key == 0 || arm_user_jop_disabled()) {
495 			*signedAddr = unsignedAddr;
496 		} else {
497 			*signedAddr = (uintptr_t)pmap_sign_user_ptr((void *)unsignedAddr, key, extendedDiscriminator, pager->dyld_a_key);
498 		}
499 		break;
500 
501 	default:
502 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_INVALID_AUTH_KEY), (uintptr_t)unsignedAddr);
503 		printf("%s(): Invalid ptr auth key %d\n", __func__, key);
504 		if (panic_on_dyld_issue) {
505 			panic("%s(): Invalid ptr auth key %d", __func__, key);
506 		}
507 		return KERN_FAILURE;
508 	}
509 	return KERN_SUCCESS;
510 }
511 
512 /*
513  * Apply fixups to a page used by a 64 bit process using pointer authentication.
514  */
515 static kern_return_t
fixupPageAuth64(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,dyld_pager_t pager,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex,bool offsetBased)516 fixupPageAuth64(
517 	uint64_t                              userVA,
518 	vm_offset_t                           contents,
519 	vm_offset_t                           end_contents,
520 	dyld_pager_t                          pager,
521 	struct dyld_chained_starts_in_segment *segInfo,
522 	uint32_t                              pageIndex,
523 	bool                                  offsetBased)
524 {
525 	void                 *link_info = pager->dyld_link_info;
526 	uint32_t             link_info_size = pager->dyld_link_info_size;
527 	struct mwl_info_hdr  *hdr = (struct mwl_info_hdr *)link_info;
528 	uint64_t             *bindsArray = (uint64_t*)((uintptr_t)link_info + hdr->mwli_binds_offset);
529 
530 	/*
531 	 * range check against link_info, note +1 to include data we'll dereference
532 	 */
533 	if ((uintptr_t)&segInfo->page_start[pageIndex + 1] > (uintptr_t)link_info + link_info_size) {
534 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
535 		printf("%s(): out of range segInfo->page_start[pageIndex]\n", __func__);
536 		if (panic_on_dyld_issue) {
537 			panic("%s(): out of range segInfo->page_start[pageIndex]", __func__);
538 		}
539 		return KERN_FAILURE;
540 	}
541 	uint16_t firstStartOffset = segInfo->page_start[pageIndex];
542 
543 	/*
544 	 * All done if no fixups on the page
545 	 */
546 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
547 		return KERN_SUCCESS;
548 	}
549 
550 	/*
551 	 * Walk the chain of offsets to fix up
552 	 */
553 	uint64_t *chain = (uint64_t *)(contents + firstStartOffset);
554 	uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide);
555 	uint64_t delta = 0;
556 	do {
557 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
558 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
559 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
560 			    (long long)chain, (long long)contents, (long long)end_contents);
561 			if (panic_on_dyld_issue) {
562 				panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
563 				    (long long)chain, (long long)contents, (long long)end_contents);
564 			}
565 			return KERN_FAILURE;
566 		}
567 		uint64_t value = *chain;
568 		delta = (value >> 51) & 0x7FF;
569 		bool isAuth = (value & 0x8000000000000000ULL);
570 		bool isBind = (value & 0x4000000000000000ULL);
571 		if (isAuth) {
572 			ptrauth_key key = (ptrauth_key)((value >> 49) & 0x3);
573 			bool        addrDiv = ((value & (1ULL << 48)) != 0);
574 			uint16_t    diversity = (uint16_t)((value >> 32) & 0xFFFF);
575 			uintptr_t   uVA = userVA + ((uintptr_t)chain - contents);
576 			if (isBind) {
577 				uint32_t bindOrdinal = value & 0x00FFFFFF;
578 				if (bindOrdinal >= hdr->mwli_binds_count) {
579 					ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
580 					printf("%s(): out of range bind ordinal %u (max %u)\n",
581 					    __func__, bindOrdinal, hdr->mwli_binds_count);
582 					if (panic_on_dyld_issue) {
583 						panic("%s(): out of range bind ordinal %u (max %u)",
584 						    __func__, bindOrdinal, hdr->mwli_binds_count);
585 					}
586 					return KERN_FAILURE;
587 				}
588 				if (signPointer(bindsArray[bindOrdinal], (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
589 					return KERN_FAILURE;
590 				}
591 			} else {
592 				/* note: in auth rebases only have 32-bits, so target is always offset - never vmaddr */
593 				uint64_t target = (value & 0xFFFFFFFF) + hdr->mwli_image_address;
594 				if (signPointer(target, (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
595 					return KERN_FAILURE;
596 				}
597 			}
598 		} else {
599 			if (isBind) {
600 				uint32_t bindOrdinal = value & 0x00FFFFFF;
601 				if (bindOrdinal >= hdr->mwli_binds_count) {
602 					ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
603 					printf("%s(): out of range bind ordinal %u (max %u)\n",
604 					    __func__, bindOrdinal, hdr->mwli_binds_count);
605 					if (panic_on_dyld_issue) {
606 						panic("%s(): out of range bind ordinal %u (max %u)",
607 						    __func__, bindOrdinal, hdr->mwli_binds_count);
608 					}
609 					return KERN_FAILURE;
610 				} else {
611 					uint64_t addend19 = (value >> 32) & 0x0007FFFF;
612 					if (addend19 & 0x40000) {
613 						addend19 |=  0xFFFFFFFFFFFC0000ULL;
614 					}
615 					*chain = bindsArray[bindOrdinal] + addend19;
616 				}
617 			} else {
618 				uint64_t target = (value & 0x7FFFFFFFFFFULL);
619 				uint64_t high8  = (value << 13) & 0xFF00000000000000ULL;
620 				*chain = target + targetAdjust + high8;
621 			}
622 		}
623 		chain += delta;
624 	} while (delta != 0);
625 	return KERN_SUCCESS;
626 }
627 
628 /*
629  * Apply fixups to a page used by a 64 bit process using pointer authentication.
630  */
631 static kern_return_t
fixupCachePageAuth64(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,dyld_pager_t pager,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex)632 fixupCachePageAuth64(
633 	uint64_t                              userVA,
634 	vm_offset_t                           contents,
635 	vm_offset_t                           end_contents,
636 	dyld_pager_t                          pager,
637 	struct dyld_chained_starts_in_segment *segInfo,
638 	uint32_t                              pageIndex)
639 {
640 	void                 *link_info = pager->dyld_link_info;
641 	uint32_t             link_info_size = pager->dyld_link_info_size;
642 	struct mwl_info_hdr  *hdr = (struct mwl_info_hdr *)link_info;
643 
644 	/*
645 	 * range check against link_info, note +1 to include data we'll dereference
646 	 */
647 	if ((uintptr_t)&segInfo->page_start[pageIndex + 1] > (uintptr_t)link_info + link_info_size) {
648 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
649 		printf("%s(): out of range segInfo->page_start[pageIndex]\n", __func__);
650 		if (panic_on_dyld_issue) {
651 			panic("%s(): out of range segInfo->page_start[pageIndex]", __func__);
652 		}
653 		return KERN_FAILURE;
654 	}
655 	uint16_t firstStartOffset = segInfo->page_start[pageIndex];
656 
657 	/*
658 	 * All done if no fixups on the page
659 	 */
660 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
661 		return KERN_SUCCESS;
662 	}
663 
664 	/*
665 	 * Walk the chain of offsets to fix up
666 	 */
667 	uint64_t *chain = (uint64_t *)(contents + firstStartOffset);
668 	uint64_t delta = 0;
669 	do {
670 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
671 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
672 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
673 			    (long long)chain, (long long)contents, (long long)end_contents);
674 			if (panic_on_dyld_issue) {
675 				panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
676 				    (long long)chain, (long long)contents, (long long)end_contents);
677 			}
678 			return KERN_FAILURE;
679 		}
680 		uint64_t value = *chain;
681 		delta = (value >> 52) & 0x7FF;
682 		bool isAuth = (value & 0x8000000000000000ULL);
683 		if (isAuth) {
684 			bool        addrDiv = ((value & (1ULL << 50)) != 0);
685 			bool        keyIsData = ((value & (1ULL << 51)) != 0);
686 			// the key is always A, and the bit tells us if its IA or ID
687 			ptrauth_key key = keyIsData ? ptrauth_key_asda : ptrauth_key_asia;
688 			uint16_t    diversity = (uint16_t)((value >> 34) & 0xFFFF);
689 			uintptr_t   uVA = userVA + ((uintptr_t)chain - contents);
690 			// target is always a 34-bit runtime offset, never a vmaddr
691 			uint64_t target = (value & 0x3FFFFFFFFULL) + hdr->mwli_image_address;
692 			if (signPointer(target, (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
693 				return KERN_FAILURE;
694 			}
695 		} else {
696 			// target is always a 34-bit runtime offset, never a vmaddr
697 			uint64_t target = (value & 0x3FFFFFFFFULL) + hdr->mwli_image_address;
698 			uint64_t high8  = (value << 22) & 0xFF00000000000000ULL;
699 			*chain = target + high8;
700 		}
701 		chain += delta;
702 	} while (delta != 0);
703 	return KERN_SUCCESS;
704 }
705 #endif /* defined(HAS_APPLE_PAC) */
706 
707 
708 /*
709  * Handle dyld fixups for a page.
710  */
711 static kern_return_t
fixup_page(vm_offset_t contents,vm_offset_t end_contents,uint64_t userVA,dyld_pager_t pager)712 fixup_page(
713 	vm_offset_t         contents,
714 	vm_offset_t         end_contents,
715 	uint64_t            userVA,
716 	dyld_pager_t        pager)
717 {
718 	void                                  *link_info = pager->dyld_link_info;
719 	uint32_t                              link_info_size = pager->dyld_link_info_size;
720 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
721 	struct dyld_chained_starts_in_segment *segInfo = NULL;
722 	uint32_t                              pageIndex = 0;
723 	uint32_t                              segIndex;
724 	struct dyld_chained_starts_in_image   *startsInfo;
725 	struct dyld_chained_starts_in_segment *seg;
726 	uint64_t                              segStartAddress;
727 	uint64_t                              segEndAddress;
728 
729 	/*
730 	 * Note this is a linear search done for every page we have to fix up.
731 	 * However, it should be quick as there should only be 2 or 4 segments:
732 	 * - data
733 	 * - data const
734 	 * - data auth (for arm64e)
735 	 * - data const auth (for arm64e)
736 	 */
737 	startsInfo = (struct dyld_chained_starts_in_image *)((uintptr_t)hdr + hdr->mwli_chains_offset);
738 	for (segIndex = 0; segIndex < startsInfo->seg_count; ++segIndex) {
739 		seg = (struct dyld_chained_starts_in_segment *)
740 		    ((uintptr_t)startsInfo + startsInfo->seg_info_offset[segIndex]);
741 
742 		/*
743 		 * ensure we don't go out of bounds of the link_info
744 		 */
745 		if ((uintptr_t)seg + sizeof(*seg) > (uintptr_t)link_info + link_info_size) {
746 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_INFO_OUT_OF_RANGE), (uintptr_t)userVA);
747 			printf("%s(): seg_info out of bounds\n", __func__);
748 			if (panic_on_dyld_issue) {
749 				panic("%s(): seg_info out of bounds", __func__);
750 			}
751 			return KERN_FAILURE;
752 		}
753 
754 		segStartAddress = hdr->mwli_image_address + seg->segment_offset;
755 		segEndAddress = segStartAddress + seg->page_count * seg->page_size;
756 		if (segStartAddress <= userVA && userVA < segEndAddress) {
757 			segInfo = seg;
758 			pageIndex = (uint32_t)(userVA - segStartAddress) / PAGE_SIZE;
759 
760 			/* ensure seg->size fits in link_info_size */
761 			if ((uintptr_t)seg + seg->size > (uintptr_t)link_info + link_info_size) {
762 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_SIZE_OUT_OF_RANGE), (uintptr_t)userVA);
763 				printf("%s(): seg->size out of bounds\n", __func__);
764 				if (panic_on_dyld_issue) {
765 					panic("%s(): seg->size out of bounds", __func__);
766 				}
767 				return KERN_FAILURE;
768 			}
769 			if (seg->size < sizeof(struct dyld_chained_starts_in_segment)) {
770 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_SIZE_OUT_OF_RANGE), (uintptr_t)userVA);
771 				printf("%s(): seg->size too small\n", __func__);
772 				if (panic_on_dyld_issue) {
773 					panic("%s(): seg->size too small", __func__);
774 				}
775 				return KERN_FAILURE;
776 			}
777 			/* ensure page_count and pageIndex are valid too */
778 			if ((uintptr_t)&seg->page_start[seg->page_count] > (uintptr_t)link_info + link_info_size) {
779 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_PAGE_CNT_OUT_OF_RANGE), (uintptr_t)userVA);
780 				printf("%s(): seg->page_count out of bounds\n", __func__);
781 				if (panic_on_dyld_issue) {
782 					panic("%s(): seg->page_count out of bounds", __func__);
783 				}
784 				return KERN_FAILURE;
785 			}
786 			if (pageIndex >= seg->page_count) {
787 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_PAGE_CNT_OUT_OF_RANGE), (uintptr_t)userVA);
788 				printf("%s(): seg->page_count too small\n", __func__);
789 				if (panic_on_dyld_issue) {
790 					panic("%s(): seg->page_count too small", __func__);
791 				}
792 				return KERN_FAILURE;
793 			}
794 
795 			break;
796 		}
797 	}
798 
799 	/*
800 	 * Question for Nick.. or can we make this OK and just return KERN_SUCCESS, nothing to do?
801 	 */
802 	if (segInfo == NULL) {
803 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_SEG_FOR_VA), (uintptr_t)userVA);
804 		printf("%s(): No segment for user VA 0x%llx\n", __func__, (long long)userVA);
805 		if (panic_on_dyld_issue) {
806 			panic("%s(): No segment for user VA 0x%llx", __func__, (long long)userVA);
807 		}
808 		return KERN_FAILURE;
809 	}
810 
811 	/*
812 	 * Route to the appropriate fixup routine
813 	 */
814 	switch (hdr->mwli_pointer_format) {
815 #if defined(HAS_APPLE_PAC)
816 	case DYLD_CHAINED_PTR_ARM64E:
817 		fixupPageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex, false);
818 		break;
819 	case DYLD_CHAINED_PTR_ARM64E_USERLAND:
820 	case DYLD_CHAINED_PTR_ARM64E_USERLAND24:
821 		fixupPageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex, true);
822 		break;
823 	case DYLD_CHAINED_PTR_ARM64E_SHARED_CACHE:
824 		fixupCachePageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex);
825 		break;
826 #endif /* defined(HAS_APPLE_PAC) */
827 	case DYLD_CHAINED_PTR_64:
828 		fixupPage64(userVA, contents, end_contents, link_info, segInfo, pageIndex, false);
829 		break;
830 	case DYLD_CHAINED_PTR_64_OFFSET:
831 		fixupPage64(userVA, contents, end_contents, link_info, segInfo, pageIndex, true);
832 		break;
833 	case DYLD_CHAINED_PTR_32:
834 		fixupPage32(userVA, contents, end_contents, link_info, link_info_size, segInfo, pageIndex);
835 		break;
836 	default:
837 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BAD_POINTER_FMT), (uintptr_t)userVA);
838 		printf("%s(): unknown pointer_format %d\n", __func__, hdr->mwli_pointer_format);
839 		if (panic_on_dyld_issue) {
840 			panic("%s(): unknown pointer_format %d", __func__, hdr->mwli_pointer_format);
841 		}
842 		return KERN_FAILURE;
843 	}
844 	return KERN_SUCCESS;
845 }
846 
847 /*
848  * dyld_pager_data_request()
849  *
850  * Handles page-in requests from VM.
851  */
852 static kern_return_t
dyld_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)853 dyld_pager_data_request(
854 	memory_object_t              mem_obj,
855 	memory_object_offset_t       offset,
856 	memory_object_cluster_size_t length,
857 	__unused vm_prot_t           protection_required,
858 	memory_object_fault_info_t   mo_fault_info)
859 {
860 	dyld_pager_t            pager;
861 	memory_object_control_t mo_control;
862 	upl_t                   upl = NULL;
863 	int                     upl_flags;
864 	upl_size_t              upl_size;
865 	upl_page_info_t         *upl_pl = NULL;
866 	unsigned int            pl_count;
867 	vm_object_t             src_top_object = VM_OBJECT_NULL;
868 	vm_object_t             src_page_object = VM_OBJECT_NULL;
869 	vm_object_t             dst_object;
870 	kern_return_t           kr;
871 	kern_return_t           retval = KERN_SUCCESS;
872 	vm_offset_t             src_vaddr;
873 	vm_offset_t             dst_vaddr;
874 	vm_offset_t             cur_offset;
875 	kern_return_t           error_code;
876 	vm_prot_t               prot;
877 	vm_page_t               src_page, top_page;
878 	int                     interruptible;
879 	struct vm_object_fault_info fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
880 	struct mwl_info_hdr     *hdr;
881 	uint32_t                r;
882 	uint64_t                userVA;
883 
884 	fault_info.stealth = TRUE;
885 	fault_info.io_sync = FALSE;
886 	fault_info.mark_zf_absent = FALSE;
887 	fault_info.batch_pmap_op = FALSE;
888 	interruptible = fault_info.interruptible;
889 
890 	pager = dyld_pager_lookup(mem_obj);
891 	assert(pager->dyld_is_ready);
892 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 1); /* pager is alive */
893 	assert(pager->dyld_is_mapped); /* pager is mapped */
894 	hdr = (struct mwl_info_hdr *)pager->dyld_link_info;
895 
896 	/*
897 	 * Gather in a UPL all the VM pages requested by VM.
898 	 */
899 	mo_control = pager->dyld_header.mo_control;
900 
901 	upl_size = length;
902 	upl_flags =
903 	    UPL_RET_ONLY_ABSENT |
904 	    UPL_SET_LITE |
905 	    UPL_NO_SYNC |
906 	    UPL_CLEAN_IN_PLACE |        /* triggers UPL_CLEAR_DIRTY */
907 	    UPL_SET_INTERNAL;
908 	pl_count = 0;
909 	kr = memory_object_upl_request(mo_control,
910 	    offset, upl_size,
911 	    &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
912 	if (kr != KERN_SUCCESS) {
913 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_UPL), kr /* arg */);
914 		if (panic_on_dyld_issue) {
915 			panic("%s(): upl_request(%p, 0x%llx, 0x%llx) ret %d", __func__,
916 			    mo_control, offset, (uint64_t)upl_size, kr);
917 		}
918 		retval = kr;
919 		goto done;
920 	}
921 	dst_object = memory_object_control_to_vm_object(mo_control);
922 	assert(dst_object != VM_OBJECT_NULL);
923 
924 	/*
925 	 * We'll map the original data in the kernel address space from the
926 	 * backing VM object, itself backed by the executable/library file via
927 	 * the vnode pager.
928 	 */
929 	src_top_object = pager->dyld_backing_object;
930 	assert(src_top_object != VM_OBJECT_NULL);
931 	vm_object_reference(src_top_object); /* keep the source object alive */
932 
933 	/*
934 	 * Fill in the contents of the pages requested by VM.
935 	 */
936 	upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
937 	pl_count = length / PAGE_SIZE;
938 	for (cur_offset = 0;
939 	    retval == KERN_SUCCESS && cur_offset < length;
940 	    cur_offset += PAGE_SIZE) {
941 		ppnum_t dst_pnum;
942 
943 		if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
944 			/* this page is not in the UPL: skip it */
945 			continue;
946 		}
947 
948 		/*
949 		 * Map the source page in the kernel's virtual address space.
950 		 * We already hold a reference on the src_top_object.
951 		 */
952 retry_src_fault:
953 		vm_object_lock(src_top_object);
954 		vm_object_paging_begin(src_top_object);
955 		error_code = 0;
956 		prot = VM_PROT_READ;
957 		src_page = VM_PAGE_NULL;
958 		kr = vm_fault_page(src_top_object,
959 		    offset + cur_offset,
960 		    VM_PROT_READ,
961 		    FALSE,
962 		    FALSE,                /* src_page not looked up */
963 		    &prot,
964 		    &src_page,
965 		    &top_page,
966 		    NULL,
967 		    &error_code,
968 		    FALSE,
969 		    &fault_info);
970 		switch (kr) {
971 		case VM_FAULT_SUCCESS:
972 			break;
973 		case VM_FAULT_RETRY:
974 			goto retry_src_fault;
975 		case VM_FAULT_MEMORY_SHORTAGE:
976 			if (vm_page_wait(interruptible)) {
977 				goto retry_src_fault;
978 			}
979 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_MEMORY_SHORTAGE), 0 /* arg */);
980 			OS_FALLTHROUGH;
981 		case VM_FAULT_INTERRUPTED:
982 			retval = MACH_SEND_INTERRUPTED;
983 			goto done;
984 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
985 			/* success but no VM page: fail */
986 			vm_object_paging_end(src_top_object);
987 			vm_object_unlock(src_top_object);
988 			OS_FALLTHROUGH;
989 		case VM_FAULT_MEMORY_ERROR:
990 			/* the page is not there ! */
991 			if (error_code) {
992 				retval = error_code;
993 			} else {
994 				retval = KERN_MEMORY_ERROR;
995 			}
996 			goto done;
997 		default:
998 			panic("dyld_pager_data_request: vm_fault_page() unexpected error 0x%x\n", kr);
999 		}
1000 		assert(src_page != VM_PAGE_NULL);
1001 		assert(src_page->vmp_busy);
1002 
1003 		if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
1004 			vm_page_lockspin_queues();
1005 			if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
1006 				vm_page_speculate(src_page, FALSE);
1007 			}
1008 			vm_page_unlock_queues();
1009 		}
1010 
1011 		/*
1012 		 * Establish pointers to the source and destination physical pages.
1013 		 */
1014 		dst_pnum = (ppnum_t)upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
1015 		assert(dst_pnum != 0);
1016 
1017 		src_vaddr = (vm_map_offset_t)phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) << PAGE_SHIFT);
1018 		dst_vaddr = (vm_map_offset_t)phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
1019 		src_page_object = VM_PAGE_OBJECT(src_page);
1020 
1021 		/*
1022 		 * Validate the original page...
1023 		 */
1024 		if (src_page_object->code_signed) {
1025 			vm_page_validate_cs_mapped(src_page, PAGE_SIZE, 0, (const void *)src_vaddr);
1026 		}
1027 
1028 		/*
1029 		 * ... and transfer the results to the destination page.
1030 		 */
1031 		UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_validated);
1032 		UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_tainted);
1033 		UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_nx);
1034 
1035 		/*
1036 		 * The page provider might access a mapped file, so let's
1037 		 * release the object lock for the source page to avoid a
1038 		 * potential deadlock.
1039 		 * The source page is kept busy and we have a
1040 		 * "paging_in_progress" reference on its object, so it's safe
1041 		 * to unlock the object here.
1042 		 */
1043 		assert(src_page->vmp_busy);
1044 		assert(src_page_object->paging_in_progress > 0);
1045 		vm_object_unlock(src_page_object);
1046 
1047 		/*
1048 		 * Process the original contents of the source page
1049 		 * into the destination page.
1050 		 */
1051 		bcopy((const char *)src_vaddr, (char *)dst_vaddr, PAGE_SIZE);
1052 
1053 		/*
1054 		 * Figure out what the original user virtual address was, based on the offset.
1055 		 */
1056 		userVA = 0;
1057 		for (r = 0; r < pager->dyld_num_range; ++r) {
1058 			vm_offset_t o = offset + cur_offset;
1059 			if (pager->dyld_file_offset[r] <= o &&
1060 			    o < pager->dyld_file_offset[r] + pager->dyld_size[r]) {
1061 				userVA = pager->dyld_address[r] + (o - pager->dyld_file_offset[r]);
1062 				break;
1063 			}
1064 		}
1065 
1066 		/*
1067 		 * If we have a valid range fixup the page.
1068 		 */
1069 		if (r == pager->dyld_num_range) {
1070 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_RANGE_NOT_FOUND), (uintptr_t)userVA);
1071 			printf("%s(): Range not found for offset 0x%llx\n", __func__, (long long)cur_offset);
1072 			if (panic_on_dyld_issue) {
1073 				panic("%s(): Range not found for offset 0x%llx", __func__, (long long)cur_offset);
1074 			}
1075 			retval = KERN_FAILURE;
1076 		} else if (fixup_page(dst_vaddr, dst_vaddr + PAGE_SIZE, userVA, pager) != KERN_SUCCESS) {
1077 			/* KDBG / printf was done under fixup_page() */
1078 			retval = KERN_FAILURE;
1079 		}
1080 		if (retval != KERN_SUCCESS) {
1081 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SLIDE_ERROR), userVA);
1082 			if (panic_on_dyld_issue) {
1083 				panic("%s(): dyld pager slide error %d at 0x%llx", __func__, retval, (uint64_t)userVA);
1084 			}
1085 		}
1086 
1087 		assert(VM_PAGE_OBJECT(src_page) == src_page_object);
1088 		assert(src_page->vmp_busy);
1089 		assert(src_page_object->paging_in_progress > 0);
1090 		vm_object_lock(src_page_object);
1091 
1092 		/*
1093 		 * Cleanup the result of vm_fault_page() of the source page.
1094 		 */
1095 		vm_page_wakeup_done(src_top_object, src_page);
1096 		src_page = VM_PAGE_NULL;
1097 		vm_object_paging_end(src_page_object);
1098 		vm_object_unlock(src_page_object);
1099 
1100 		if (top_page != VM_PAGE_NULL) {
1101 			assert(VM_PAGE_OBJECT(top_page) == src_top_object);
1102 			vm_object_lock(src_top_object);
1103 			VM_PAGE_FREE(top_page);
1104 			vm_object_paging_end(src_top_object);
1105 			vm_object_unlock(src_top_object);
1106 		}
1107 	}
1108 
1109 done:
1110 	if (upl != NULL) {
1111 		/* clean up the UPL */
1112 
1113 		/*
1114 		 * The pages are currently dirty because we've just been
1115 		 * writing on them, but as far as we're concerned, they're
1116 		 * clean since they contain their "original" contents as
1117 		 * provided by us, the pager.
1118 		 * Tell the UPL to mark them "clean".
1119 		 */
1120 		upl_clear_dirty(upl, TRUE);
1121 
1122 		/* abort or commit the UPL */
1123 		if (retval != KERN_SUCCESS) {
1124 			upl_abort(upl, 0);
1125 		} else {
1126 			boolean_t empty;
1127 			assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
1128 			    "upl %p offset 0x%llx size 0x%x\n",
1129 			    upl, upl->u_offset, upl->u_size);
1130 			upl_commit_range(upl, 0, upl->u_size,
1131 			    UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
1132 			    upl_pl, pl_count, &empty);
1133 		}
1134 
1135 		/* and deallocate the UPL */
1136 		upl_deallocate(upl);
1137 		upl = NULL;
1138 	}
1139 	if (src_top_object != VM_OBJECT_NULL) {
1140 		vm_object_deallocate(src_top_object);
1141 	}
1142 	return retval;
1143 }
1144 
1145 /*
1146  * dyld_pager_reference()
1147  *
1148  * Get a reference on this memory object.
1149  * For external usage only.  Assumes that the initial reference count is not 0,
1150  * i.e one should not "revive" a dead pager this way.
1151  */
1152 static void
dyld_pager_reference(memory_object_t mem_obj)1153 dyld_pager_reference(
1154 	memory_object_t mem_obj)
1155 {
1156 	dyld_pager_t    pager;
1157 
1158 	pager = dyld_pager_lookup(mem_obj);
1159 
1160 	lck_mtx_lock(&dyld_pager_lock);
1161 	os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL);
1162 	lck_mtx_unlock(&dyld_pager_lock);
1163 }
1164 
1165 
1166 
1167 /*
1168  * dyld_pager_terminate_internal:
1169  *
1170  * Trigger the asynchronous termination of the memory object associated
1171  * with this pager.
1172  * When the memory object is terminated, there will be one more call
1173  * to memory_object_deallocate() (i.e. dyld_pager_deallocate())
1174  * to finish the clean up.
1175  *
1176  * "dyld_pager_lock" should not be held by the caller.
1177  */
1178 static void
dyld_pager_terminate_internal(dyld_pager_t pager)1179 dyld_pager_terminate_internal(
1180 	dyld_pager_t pager)
1181 {
1182 	assert(pager->dyld_is_ready);
1183 	assert(!pager->dyld_is_mapped);
1184 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) == 1);
1185 
1186 	if (pager->dyld_backing_object != VM_OBJECT_NULL) {
1187 		vm_object_deallocate(pager->dyld_backing_object);
1188 		pager->dyld_backing_object = VM_OBJECT_NULL;
1189 	}
1190 	/* trigger the destruction of the memory object */
1191 	memory_object_destroy(pager->dyld_header.mo_control, VM_OBJECT_DESTROY_PAGER);
1192 }
1193 
1194 /*
1195  * dyld_pager_deallocate_internal()
1196  *
1197  * Release a reference on this pager and free it when the last reference goes away.
1198  * Can be called with dyld_pager_lock held or not, but always returns
1199  * with it unlocked.
1200  */
1201 static void
dyld_pager_deallocate_internal(dyld_pager_t pager,bool locked)1202 dyld_pager_deallocate_internal(
1203 	dyld_pager_t   pager,
1204 	bool           locked)
1205 {
1206 	os_ref_count_t ref_count;
1207 
1208 	if (!locked) {
1209 		lck_mtx_lock(&dyld_pager_lock);
1210 	}
1211 
1212 	/* drop a reference on this pager */
1213 	ref_count = os_ref_release_locked_raw(&pager->dyld_ref_count, NULL);
1214 
1215 	if (ref_count == 1) {
1216 		/*
1217 		 * Only this reference is left, which means that
1218 		 * no one is really holding on to this pager anymore.
1219 		 * Terminate it.
1220 		 */
1221 		dyld_pager_dequeue(pager);
1222 		/* the pager is all ours: no need for the lock now */
1223 		lck_mtx_unlock(&dyld_pager_lock);
1224 		dyld_pager_terminate_internal(pager);
1225 	} else if (ref_count == 0) {
1226 		/*
1227 		 * Dropped all references;  the memory object has
1228 		 * been terminated.  Do some final cleanup and release the
1229 		 * pager structure.
1230 		 */
1231 		lck_mtx_unlock(&dyld_pager_lock);
1232 
1233 		kfree_data(pager->dyld_link_info, pager->dyld_link_info_size);
1234 		pager->dyld_link_info = NULL;
1235 
1236 		if (pager->dyld_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
1237 			memory_object_control_deallocate(pager->dyld_header.mo_control);
1238 			pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1239 		}
1240 		kfree_type(struct dyld_pager, pager);
1241 		pager = NULL;
1242 	} else {
1243 		/* there are still plenty of references:  keep going... */
1244 		lck_mtx_unlock(&dyld_pager_lock);
1245 	}
1246 
1247 	/* caution: lock is not held on return... */
1248 }
1249 
1250 /*
1251  * dyld_pager_deallocate()
1252  *
1253  * Release a reference on this pager and free it when the last
1254  * reference goes away.
1255  */
1256 static void
dyld_pager_deallocate(memory_object_t mem_obj)1257 dyld_pager_deallocate(
1258 	memory_object_t mem_obj)
1259 {
1260 	dyld_pager_t    pager;
1261 
1262 	pager = dyld_pager_lookup(mem_obj);
1263 	dyld_pager_deallocate_internal(pager, FALSE);
1264 }
1265 
1266 /*
1267  *
1268  */
1269 static kern_return_t
dyld_pager_terminate(__unused memory_object_t mem_obj)1270 dyld_pager_terminate(
1271 #if !DEBUG
1272 	__unused
1273 #endif
1274 	memory_object_t mem_obj)
1275 {
1276 	return KERN_SUCCESS;
1277 }
1278 
1279 /*
1280  * dyld_pager_map()
1281  *
1282  * This allows VM to let us, the EMM, know that this memory object
1283  * is currently mapped one or more times.  This is called by VM each time
1284  * the memory object gets mapped, but we only take one extra reference the
1285  * first time it is called.
1286  */
1287 static kern_return_t
dyld_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)1288 dyld_pager_map(
1289 	memory_object_t         mem_obj,
1290 	__unused vm_prot_t      prot)
1291 {
1292 	dyld_pager_t   pager;
1293 
1294 	pager = dyld_pager_lookup(mem_obj);
1295 
1296 	lck_mtx_lock(&dyld_pager_lock);
1297 	assert(pager->dyld_is_ready);
1298 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0); /* pager is alive */
1299 	if (!pager->dyld_is_mapped) {
1300 		pager->dyld_is_mapped = TRUE;
1301 		os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL);
1302 	}
1303 	lck_mtx_unlock(&dyld_pager_lock);
1304 
1305 	return KERN_SUCCESS;
1306 }
1307 
1308 /*
1309  * dyld_pager_last_unmap()
1310  *
1311  * This is called by VM when this memory object is no longer mapped anywhere.
1312  */
1313 static kern_return_t
dyld_pager_last_unmap(memory_object_t mem_obj)1314 dyld_pager_last_unmap(
1315 	memory_object_t mem_obj)
1316 {
1317 	dyld_pager_t    pager;
1318 
1319 	pager = dyld_pager_lookup(mem_obj);
1320 
1321 	lck_mtx_lock(&dyld_pager_lock);
1322 	if (pager->dyld_is_mapped) {
1323 		/*
1324 		 * All the mappings are gone, so let go of the one extra
1325 		 * reference that represents all the mappings of this pager.
1326 		 */
1327 		pager->dyld_is_mapped = FALSE;
1328 		dyld_pager_deallocate_internal(pager, TRUE);
1329 		/* caution: deallocate_internal() released the lock ! */
1330 	} else {
1331 		lck_mtx_unlock(&dyld_pager_lock);
1332 	}
1333 
1334 	return KERN_SUCCESS;
1335 }
1336 
1337 static boolean_t
dyld_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)1338 dyld_pager_backing_object(
1339 	memory_object_t         mem_obj,
1340 	memory_object_offset_t  offset,
1341 	vm_object_t             *backing_object,
1342 	vm_object_offset_t      *backing_offset)
1343 {
1344 	dyld_pager_t   pager;
1345 
1346 	pager = dyld_pager_lookup(mem_obj);
1347 
1348 	*backing_object = pager->dyld_backing_object;
1349 	*backing_offset = offset;
1350 
1351 	return TRUE;
1352 }
1353 
1354 
1355 /*
1356  * Convert from memory_object to dyld_pager.
1357  */
1358 static dyld_pager_t
dyld_pager_lookup(memory_object_t mem_obj)1359 dyld_pager_lookup(
1360 	memory_object_t  mem_obj)
1361 {
1362 	dyld_pager_t   pager;
1363 
1364 	assert(mem_obj->mo_pager_ops == &dyld_pager_ops);
1365 	pager = (dyld_pager_t)(uintptr_t) mem_obj;
1366 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0);
1367 	return pager;
1368 }
1369 
1370 /*
1371  * Create and return a pager for the given object with the
1372  * given slide information.
1373  */
1374 static dyld_pager_t
dyld_pager_create(__unused task_t task,vm_object_t backing_object,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size)1375 dyld_pager_create(
1376 #if !defined(HAS_APPLE_PAC)
1377 	__unused
1378 #endif /* defined(HAS_APPLE_PAC) */
1379 	task_t            task,
1380 	vm_object_t       backing_object,
1381 	struct mwl_region *regions,
1382 	uint32_t          region_cnt,
1383 	void              *link_info,
1384 	uint32_t          link_info_size)
1385 {
1386 	dyld_pager_t            pager;
1387 	memory_object_control_t control;
1388 	kern_return_t           kr;
1389 
1390 	pager = kalloc_type(struct dyld_pager, Z_WAITOK);
1391 	if (pager == NULL) {
1392 		return NULL;
1393 	}
1394 
1395 	/*
1396 	 * The vm_map call takes both named entry ports and raw memory
1397 	 * objects in the same parameter.  We need to make sure that
1398 	 * vm_map does not see this object as a named entry port.  So,
1399 	 * we reserve the first word in the object for a fake ip_kotype
1400 	 * setting - that will tell vm_map to use it as a memory object.
1401 	 */
1402 	pager->dyld_header.mo_ikot = IKOT_MEMORY_OBJECT;
1403 	pager->dyld_header.mo_pager_ops = &dyld_pager_ops;
1404 	pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1405 
1406 	pager->dyld_is_ready = FALSE;/* not ready until it has a "name" */
1407 	/* existence reference for the caller */
1408 	os_ref_init_count_raw(&pager->dyld_ref_count, NULL, 1);
1409 	pager->dyld_is_mapped = FALSE;
1410 	pager->dyld_backing_object = backing_object;
1411 	pager->dyld_link_info = link_info; /* pager takes ownership of this pointer here */
1412 	pager->dyld_link_info_size = link_info_size;
1413 #if defined(HAS_APPLE_PAC)
1414 	pager->dyld_a_key = (task->map && task->map->pmap && !task->map->pmap->disable_jop) ? task->jop_pid : 0;
1415 #endif /* defined(HAS_APPLE_PAC) */
1416 
1417 	/*
1418 	 * Record the regions so the pager can find the offset from an address.
1419 	 */
1420 	pager->dyld_num_range = region_cnt;
1421 	for (uint32_t r = 0; r < region_cnt; ++r) {
1422 		pager->dyld_file_offset[r] = regions[r].mwlr_file_offset;
1423 		pager->dyld_address[r] = regions[r].mwlr_address;
1424 		pager->dyld_size[r] = regions[r].mwlr_size;
1425 	}
1426 
1427 	vm_object_reference(backing_object);
1428 	lck_mtx_lock(&dyld_pager_lock);
1429 	queue_enter_first(&dyld_pager_queue,
1430 	    pager,
1431 	    dyld_pager_t,
1432 	    dyld_pager_queue);
1433 	dyld_pager_count++;
1434 	if (dyld_pager_count > dyld_pager_count_max) {
1435 		dyld_pager_count_max = dyld_pager_count;
1436 	}
1437 	lck_mtx_unlock(&dyld_pager_lock);
1438 
1439 	kr = memory_object_create_named((memory_object_t) pager, 0, &control);
1440 	assert(kr == KERN_SUCCESS);
1441 
1442 	memory_object_mark_trusted(control);
1443 
1444 	lck_mtx_lock(&dyld_pager_lock);
1445 	/* the new pager is now ready to be used */
1446 	pager->dyld_is_ready = TRUE;
1447 	lck_mtx_unlock(&dyld_pager_lock);
1448 
1449 	/* wakeup anyone waiting for this pager to be ready */
1450 	thread_wakeup(&pager->dyld_is_ready);
1451 
1452 	return pager;
1453 }
1454 
1455 /*
1456  * dyld_pager_setup()
1457  *
1458  * Provide the caller with a memory object backed by the provided
1459  * "backing_object" VM object.
1460  */
1461 static memory_object_t
dyld_pager_setup(task_t task,vm_object_t backing_object,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size)1462 dyld_pager_setup(
1463 	task_t            task,
1464 	vm_object_t       backing_object,
1465 	struct mwl_region *regions,
1466 	uint32_t          region_cnt,
1467 	void              *link_info,
1468 	uint32_t          link_info_size)
1469 {
1470 	dyld_pager_t      pager;
1471 
1472 	/* create new pager */
1473 	pager = dyld_pager_create(task, backing_object, regions, region_cnt, link_info, link_info_size);
1474 	if (pager == NULL) {
1475 		/* could not create a new pager */
1476 		return MEMORY_OBJECT_NULL;
1477 	}
1478 
1479 	lck_mtx_lock(&dyld_pager_lock);
1480 	while (!pager->dyld_is_ready) {
1481 		lck_mtx_sleep(&dyld_pager_lock,
1482 		    LCK_SLEEP_DEFAULT,
1483 		    &pager->dyld_is_ready,
1484 		    THREAD_UNINT);
1485 	}
1486 	lck_mtx_unlock(&dyld_pager_lock);
1487 
1488 	return (memory_object_t) pager;
1489 }
1490 
1491 /*
1492  * Set up regions which use a special pager to apply dyld fixups.
1493  *
1494  * The arguments to this function are mostly just used as input.
1495  * Except for the link_info! That is saved off in the pager that
1496  * gets created. If the pager assumed ownership of *link_info,
1497  * the argument is NULLed, if not, the caller need to free it on error.
1498  */
1499 kern_return_t
vm_map_with_linking(task_t task,struct mwl_region * regions,uint32_t region_cnt,void ** link_info,uint32_t link_info_size,memory_object_control_t file_control)1500 vm_map_with_linking(
1501 	task_t                  task,
1502 	struct mwl_region       *regions,
1503 	uint32_t                region_cnt,
1504 	void                    **link_info,
1505 	uint32_t                link_info_size,
1506 	memory_object_control_t file_control)
1507 {
1508 	vm_map_t                map = task->map;
1509 	vm_object_t             object = VM_OBJECT_NULL;
1510 	memory_object_t         pager = MEMORY_OBJECT_NULL;
1511 	uint32_t                r;
1512 	vm_map_address_t        map_addr;
1513 	kern_return_t           kr = KERN_SUCCESS;
1514 
1515 	object = memory_object_control_to_vm_object(file_control);
1516 	if (object == VM_OBJECT_NULL || object->internal) {
1517 		printf("%s no object for file_control\n", __func__);
1518 		object = VM_OBJECT_NULL;
1519 		kr = KERN_INVALID_ADDRESS;
1520 		goto done;
1521 	}
1522 
1523 	/* create a pager */
1524 	pager = dyld_pager_setup(task, object, regions, region_cnt, *link_info, link_info_size);
1525 	if (pager == MEMORY_OBJECT_NULL) {
1526 		kr = KERN_RESOURCE_SHORTAGE;
1527 		goto done;
1528 	}
1529 	*link_info = NULL; /* ownership of this pointer was given to pager */
1530 
1531 	for (r = 0; r < region_cnt; ++r) {
1532 		vm_map_kernel_flags_t vmk_flags = {
1533 			.vmf_fixed = true,
1534 			.vmf_overwrite = true,
1535 			.vmkf_overwrite_immutable = true,
1536 		};
1537 		struct mwl_region *rp = &regions[r];
1538 
1539 		/* map that pager over the portion of the mapping that needs sliding */
1540 		map_addr = (vm_map_address_t)rp->mwlr_address;
1541 
1542 		if (rp->mwlr_protections & VM_PROT_TPRO) {
1543 			vmk_flags.vmf_tpro = TRUE;
1544 		}
1545 
1546 		kr = mach_vm_map_kernel(map,
1547 		    vm_sanitize_wrap_addr_ref(&map_addr),
1548 		    rp->mwlr_size,
1549 		    0,
1550 		    vmk_flags,
1551 		    (ipc_port_t)(uintptr_t)pager,
1552 		    rp->mwlr_file_offset,
1553 		    TRUE,       /* copy == TRUE, as this is MAP_PRIVATE so COW may happen */
1554 		    rp->mwlr_protections & VM_PROT_DEFAULT,
1555 		    rp->mwlr_protections & VM_PROT_DEFAULT,
1556 		    VM_INHERIT_DEFAULT);
1557 		if (kr != KERN_SUCCESS) {
1558 			/* no need to clean up earlier regions, this will be process fatal */
1559 			goto done;
1560 		}
1561 	}
1562 
1563 	/* success! */
1564 	kr = KERN_SUCCESS;
1565 
1566 done:
1567 
1568 	if (pager != MEMORY_OBJECT_NULL) {
1569 		/*
1570 		 * Release the pager reference obtained by dyld_pager_setup().
1571 		 * The mapping, if it succeeded, is now holding a reference on the memory object.
1572 		 */
1573 		memory_object_deallocate(pager);
1574 		pager = MEMORY_OBJECT_NULL;
1575 	}
1576 	return kr;
1577 }
1578 
1579 static uint64_t
dyld_pager_purge(dyld_pager_t pager)1580 dyld_pager_purge(
1581 	dyld_pager_t pager)
1582 {
1583 	uint64_t pages_purged;
1584 	vm_object_t object;
1585 
1586 	pages_purged = 0;
1587 	object = memory_object_to_vm_object((memory_object_t) pager);
1588 	assert(object != VM_OBJECT_NULL);
1589 	vm_object_lock(object);
1590 	pages_purged = object->resident_page_count;
1591 	vm_object_reap_pages(object, REAP_DATA_FLUSH);
1592 	pages_purged -= object->resident_page_count;
1593 //	printf("     %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count);
1594 	vm_object_unlock(object);
1595 	return pages_purged;
1596 }
1597 
1598 uint64_t
dyld_pager_purge_all(void)1599 dyld_pager_purge_all(void)
1600 {
1601 	uint64_t pages_purged;
1602 	dyld_pager_t pager;
1603 
1604 	pages_purged = 0;
1605 	lck_mtx_lock(&dyld_pager_lock);
1606 	queue_iterate(&dyld_pager_queue, pager, dyld_pager_t, dyld_pager_queue) {
1607 		pages_purged += dyld_pager_purge(pager);
1608 	}
1609 	lck_mtx_unlock(&dyld_pager_lock);
1610 #if DEVELOPMENT || DEBUG
1611 	printf("   %s:%d pages purged: %llu\n", __FUNCTION__, __LINE__, pages_purged);
1612 #endif /* DEVELOPMENT || DEBUG */
1613 	return pages_purged;
1614 }
1615