xref: /xnu-8792.41.9/osfmk/vm/vm_dyld_pager.c (revision 5c2921b07a2480ab43ec66f5b9e41cb872bc554f)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42 
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/thread.h>
46 #include <kern/ipc_kobject.h>
47 
48 #include <ipc/ipc_port.h>
49 #include <ipc/ipc_space.h>
50 
51 #include <vm/memory_object.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_fault.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_protos.h>
57 #include <vm/vm_dyld_pager.h>
58 
59 #include <sys/kdebug_triage.h>
60 #include <mach-o/fixup-chains.h>
61 #if defined(HAS_APPLE_PAC)
62 #include <ptrauth.h>
63 #include <arm/misc_protos.h>
64 #endif /* defined(HAS_APPLE_PAC) */
65 
66 /*
67  * DYLD page in linking pager.
68  *
69  * This external memory manager (EMM) applies dyld fixup to data
70  * pages, allowing the modified page to appear "clean".
71  *
72  * The modified pages will never be dirtied, so the memory manager doesn't
73  * need to handle page-out requests (from memory_object_data_return()).  The
74  * pages are mapped copy-on-write, so that the originals stay clean.
75  */
76 
77 /* forward declarations */
78 typedef struct dyld_pager *dyld_pager_t;
79 static void dyld_pager_reference(memory_object_t mem_obj);
80 static void dyld_pager_deallocate(memory_object_t mem_obj);
81 static void dyld_pager_deallocate_internal(dyld_pager_t pager, bool locked);
82 static kern_return_t dyld_pager_init(memory_object_t mem_obj,
83     memory_object_control_t control,
84     memory_object_cluster_size_t pg_size);
85 static kern_return_t dyld_pager_terminate(memory_object_t mem_obj);
86 static void dyld_pager_terminate_internal(dyld_pager_t pager);
87 static kern_return_t dyld_pager_data_request(memory_object_t mem_obj,
88     memory_object_offset_t offset,
89     memory_object_cluster_size_t length,
90     vm_prot_t protection_required,
91     memory_object_fault_info_t fault_info);
92 static kern_return_t dyld_pager_data_return(memory_object_t mem_obj,
93     memory_object_offset_t offset,
94     memory_object_cluster_size_t      data_cnt,
95     memory_object_offset_t *resid_offset,
96     int *io_error,
97     boolean_t dirty,
98     boolean_t kernel_copy,
99     int upl_flags);
100 static kern_return_t dyld_pager_data_initialize(memory_object_t mem_obj,
101     memory_object_offset_t offset,
102     memory_object_cluster_size_t data_cnt);
103 static kern_return_t dyld_pager_map(memory_object_t mem_obj,
104     vm_prot_t prot);
105 static kern_return_t dyld_pager_last_unmap(memory_object_t mem_obj);
106 static boolean_t dyld_pager_backing_object(
107 	memory_object_t mem_obj,
108 	memory_object_offset_t mem_obj_offset,
109 	vm_object_t *backing_object,
110 	vm_object_offset_t *backing_offset);
111 static dyld_pager_t dyld_pager_lookup(memory_object_t  mem_obj);
112 
113 /*
114  * Vector of VM operations for this EMM.
115  * These routines are invoked by VM via the memory_object_*() interfaces.
116  */
117 const struct memory_object_pager_ops dyld_pager_ops = {
118 	.memory_object_reference = dyld_pager_reference,
119 	.memory_object_deallocate = dyld_pager_deallocate,
120 	.memory_object_init = dyld_pager_init,
121 	.memory_object_terminate = dyld_pager_terminate,
122 	.memory_object_data_request = dyld_pager_data_request,
123 	.memory_object_data_return = dyld_pager_data_return,
124 	.memory_object_data_initialize = dyld_pager_data_initialize,
125 	.memory_object_map = dyld_pager_map,
126 	.memory_object_last_unmap = dyld_pager_last_unmap,
127 	.memory_object_backing_object = dyld_pager_backing_object,
128 	.memory_object_pager_name = "dyld"
129 };
130 
131 /*
132  * The "dyld_pager" structure. We create one of these for each use of
133  * map_with_linking_np() that dyld uses.
134  */
135 struct dyld_pager {
136 	struct memory_object    dyld_header;          /* mandatory generic header */
137 
138 #if MEMORY_OBJECT_HAS_REFCOUNT
139 #define dyld_ref_count           dyld_header.mo_ref
140 #else
141 	os_ref_atomic_t         dyld_ref_count;      /* active uses */
142 #endif
143 	bool                    dyld_is_mapped;      /* has active mappings */
144 	bool                    dyld_is_ready;       /* is this pager ready? */
145 	vm_object_t             dyld_backing_object; /* VM object for shared cache */
146 	void                    *dyld_link_info;
147 	uint32_t                dyld_link_info_size;
148 	uint32_t                dyld_num_range;
149 	memory_object_offset_t  dyld_file_offset[MWL_MAX_REGION_COUNT];
150 	mach_vm_address_t       dyld_address[MWL_MAX_REGION_COUNT];
151 	mach_vm_size_t          dyld_size[MWL_MAX_REGION_COUNT];
152 #if defined(HAS_APPLE_PAC)
153 	uint64_t                dyld_a_key;
154 #endif /* defined(HAS_APPLE_PAC) */
155 };
156 
157 
158 /*
159  * "dyld_pager_lock" for counters, ref counting, etc.
160  */
161 LCK_GRP_DECLARE(dyld_pager_lck_grp, "dyld_pager");
162 LCK_MTX_DECLARE(dyld_pager_lock, &dyld_pager_lck_grp);
163 
164 /*
165  * Statistics & counters.
166  */
167 uint32_t dyld_pager_count = 0;
168 uint32_t dyld_pager_count_max = 0;
169 
170 /*
171  * dyld_pager_init()
172  *
173  * Initialize the memory object and makes it ready to be used and mapped.
174  */
175 static kern_return_t
dyld_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)176 dyld_pager_init(
177 	memory_object_t                 mem_obj,
178 	memory_object_control_t         control,
179 	__unused
180 	memory_object_cluster_size_t    pg_size)
181 {
182 	dyld_pager_t                    pager;
183 	kern_return_t                   kr;
184 	memory_object_attr_info_data_t  attributes;
185 
186 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
187 		printf("%s(): control NULL\n", __func__);
188 		return KERN_INVALID_ARGUMENT;
189 	}
190 
191 	pager = dyld_pager_lookup(mem_obj);
192 
193 	memory_object_control_reference(control);
194 
195 	pager->dyld_header.mo_control = control;
196 
197 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
198 	attributes.cluster_size = (1 << (PAGE_SHIFT));
199 	attributes.may_cache_object = FALSE;
200 	attributes.temporary = TRUE;
201 
202 	kr = memory_object_change_attributes(
203 		control,
204 		MEMORY_OBJECT_ATTRIBUTE_INFO,
205 		(memory_object_info_t) &attributes,
206 		MEMORY_OBJECT_ATTR_INFO_COUNT);
207 	if (kr != KERN_SUCCESS) {
208 		panic("dyld_pager_init: " "memory_object_change_attributes() failed");
209 	}
210 
211 	return KERN_SUCCESS;
212 }
213 
214 /*
215  * dyld_data_return()
216  *
217  * A page-out request from VM -- should never happen so panic.
218  */
219 static kern_return_t
dyld_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)220 dyld_pager_data_return(
221 	__unused memory_object_t        mem_obj,
222 	__unused memory_object_offset_t offset,
223 	__unused memory_object_cluster_size_t data_cnt,
224 	__unused memory_object_offset_t *resid_offset,
225 	__unused int                    *io_error,
226 	__unused boolean_t              dirty,
227 	__unused boolean_t              kernel_copy,
228 	__unused int                    upl_flags)
229 {
230 	panic("dyld_pager_data_return: should never happen!");
231 	return KERN_FAILURE;
232 }
233 
234 static kern_return_t
dyld_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)235 dyld_pager_data_initialize(
236 	__unused memory_object_t        mem_obj,
237 	__unused memory_object_offset_t offset,
238 	__unused memory_object_cluster_size_t data_cnt)
239 {
240 	panic("dyld_pager_data_initialize: should never happen");
241 	return KERN_FAILURE;
242 }
243 
244 
245 /*
246  * Apply fixups to a page used by a 64 bit process.
247  */
248 static kern_return_t
fixupPage64(vm_offset_t contents,vm_offset_t end_contents,void * link_info,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex,bool offsetBased)249 fixupPage64(
250 	vm_offset_t                           contents,
251 	vm_offset_t                           end_contents,
252 	void                                  *link_info,
253 	struct dyld_chained_starts_in_segment *segInfo,
254 	uint32_t                              pageIndex,
255 	bool                                  offsetBased)
256 {
257 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
258 	uint64_t                              *bindsArray  = (uint64_t *)((uintptr_t)hdr + hdr->mwli_binds_offset);
259 	uint16_t                              firstStartOffset = segInfo->page_start[pageIndex];
260 
261 	/*
262 	 * Done if no fixups on the page
263 	 */
264 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
265 		return KERN_SUCCESS;
266 	}
267 
268 	/*
269 	 * walk the chain
270 	 */
271 	uint64_t *chain  = (uint64_t *)(contents + firstStartOffset);
272 	uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide);
273 	uint64_t delta = 0;
274 	do {
275 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
276 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
277 			    (long long)chain, (long long)contents, (long long)end_contents);
278 			return KERN_FAILURE;
279 		}
280 		uint64_t value  = *chain;
281 		bool     isBind = (value & 0x8000000000000000ULL);
282 		delta = (value >> 51) & 0xFFF;
283 		if (isBind) {
284 			uint32_t bindOrdinal = value & 0x00FFFFFF;
285 			if (bindOrdinal >= hdr->mwli_binds_count) {
286 				printf("%s out of range bind ordinal %u (max %u)\n", __func__,
287 				    bindOrdinal, hdr->mwli_binds_count);
288 				return KERN_FAILURE;
289 			}
290 			uint32_t addend = (value >> 24) & 0xFF;
291 			*chain = bindsArray[bindOrdinal] + addend;
292 		} else {
293 			/* is rebase */
294 			uint64_t target = value & 0xFFFFFFFFFULL;
295 			uint64_t high8  = (value >> 36) & 0xFF;
296 			*chain = target + targetAdjust + (high8 << 56);
297 		}
298 		if (delta * 4 >= PAGE_SIZE) {
299 			printf("%s(): delta offset > page size %lld\n", __func__, delta * 4);
300 			return KERN_FAILURE;
301 		}
302 		chain = (uint64_t *)((uintptr_t)chain + (delta * 4)); // 4-byte stride
303 	} while (delta != 0);
304 	return KERN_SUCCESS;
305 }
306 
307 
308 /*
309  * Apply fixups within a page used by a 32 bit process.
310  */
311 static kern_return_t
fixupChain32(uint32_t * chain,vm_offset_t contents,vm_offset_t end_contents,void * link_info,struct dyld_chained_starts_in_segment * segInfo,uint32_t * bindsArray)312 fixupChain32(
313 	uint32_t                              *chain,
314 	vm_offset_t                           contents,
315 	vm_offset_t                           end_contents,
316 	void                                  *link_info,
317 	struct dyld_chained_starts_in_segment *segInfo,
318 	uint32_t                              *bindsArray)
319 {
320 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
321 	uint32_t                              delta = 0;
322 
323 	do {
324 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
325 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
326 			    (long long)chain, (long long)contents, (long long)end_contents);
327 			return KERN_FAILURE;
328 		}
329 		uint32_t value = *chain;
330 		delta = (value >> 26) & 0x1F;
331 		if (value & 0x80000000) {
332 			// is bind
333 			uint32_t bindOrdinal = value & 0x000FFFFF;
334 			if (bindOrdinal >= hdr->mwli_binds_count) {
335 				printf("%s(): out of range bind ordinal %u (max %u)",
336 				    __func__, bindOrdinal, hdr->mwli_binds_count);
337 				return KERN_FAILURE;
338 			}
339 			uint32_t addend = (value >> 20) & 0x3F;
340 			*chain = bindsArray[bindOrdinal] + addend;
341 		} else {
342 			// is rebase
343 			uint32_t target = value & 0x03FFFFFF;
344 			if (target > segInfo->max_valid_pointer) {
345 				// handle non-pointers in chain
346 				uint32_t bias = (0x04000000 + segInfo->max_valid_pointer) / 2;
347 				*chain = target - bias;
348 			} else {
349 				*chain = target + (uint32_t)hdr->mwli_slide;
350 			}
351 		}
352 		chain += delta;
353 	} while (delta != 0);
354 	return KERN_SUCCESS;
355 }
356 
357 
358 /*
359  * Apply fixups to a page used by a 32 bit process.
360  */
361 static kern_return_t
fixupPage32(vm_offset_t contents,vm_offset_t end_contents,void * link_info,uint32_t link_info_size,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex)362 fixupPage32(
363 	vm_offset_t                           contents,
364 	vm_offset_t                           end_contents,
365 	void                                  *link_info,
366 	uint32_t                              link_info_size,
367 	struct dyld_chained_starts_in_segment *segInfo,
368 	uint32_t                              pageIndex)
369 {
370 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr  *)link_info;
371 	uint32_t                              *bindsArray = (uint32_t *)((uintptr_t)hdr + hdr->mwli_binds_offset);
372 	uint16_t                              startOffset = segInfo->page_start[pageIndex];
373 
374 	/*
375 	 * done if no fixups
376 	 */
377 	if (startOffset == DYLD_CHAINED_PTR_START_NONE) {
378 		return KERN_SUCCESS;
379 	}
380 
381 	if (startOffset & DYLD_CHAINED_PTR_START_MULTI) {
382 		// some fixups in the page are too far apart, so page has multiple starts
383 		uint32_t overflowIndex = startOffset & ~DYLD_CHAINED_PTR_START_MULTI;
384 		bool chainEnd = false;
385 		while (!chainEnd) {
386 			/*
387 			 * range check against link_info, note +1 to include data we'll dereference
388 			 */
389 			if ((uintptr_t)&segInfo->page_start[overflowIndex + 1] > (uintptr_t)link_info + link_info_size) {
390 				printf("%s(): out of range segInfo->page_start[overflowIndex]", __func__);
391 				return KERN_FAILURE;
392 			}
393 			chainEnd    = (segInfo->page_start[overflowIndex] & DYLD_CHAINED_PTR_START_LAST);
394 			startOffset = (segInfo->page_start[overflowIndex] & ~DYLD_CHAINED_PTR_START_LAST);
395 			uint32_t *chain = (uint32_t *)(contents + startOffset);
396 			fixupChain32(chain, contents, end_contents, link_info, segInfo, bindsArray);
397 			++overflowIndex;
398 		}
399 	} else {
400 		uint32_t *chain = (uint32_t *)(contents + startOffset);
401 		fixupChain32(chain, contents, end_contents, link_info, segInfo, bindsArray);
402 	}
403 	return KERN_SUCCESS;
404 }
405 
406 #if defined(HAS_APPLE_PAC)
407 /*
408  * Sign a pointer needed for fixups.
409  */
410 static kern_return_t
signPointer(uint64_t unsignedAddr,void * loc,bool addrDiv,uint16_t diversity,ptrauth_key key,dyld_pager_t pager,uint64_t * signedAddr)411 signPointer(
412 	uint64_t         unsignedAddr,
413 	void             *loc,
414 	bool             addrDiv,
415 	uint16_t         diversity,
416 	ptrauth_key      key,
417 	dyld_pager_t     pager,
418 	uint64_t         *signedAddr)
419 {
420 	// don't sign NULL
421 	if (unsignedAddr == 0) {
422 		*signedAddr = 0;
423 		return KERN_SUCCESS;
424 	}
425 
426 	uint64_t extendedDiscriminator = diversity;
427 	if (addrDiv) {
428 		extendedDiscriminator = __builtin_ptrauth_blend_discriminator(loc, extendedDiscriminator);
429 	}
430 
431 	switch (key) {
432 	case ptrauth_key_asia:
433 	case ptrauth_key_asda:
434 		if (pager->dyld_a_key == 0 || arm_user_jop_disabled()) {
435 			*signedAddr = unsignedAddr;
436 		} else {
437 			*signedAddr = (uintptr_t)pmap_sign_user_ptr((void *)unsignedAddr, key, extendedDiscriminator, pager->dyld_a_key);
438 		}
439 		break;
440 
441 	default:
442 		printf("%s(): Invalid ptr auth key %d\n", __func__, key);
443 		return KERN_FAILURE;
444 	}
445 	return KERN_SUCCESS;
446 }
447 
448 /*
449  * Apply fixups to a page used by a 64 bit process using pointer authentication.
450  */
451 static kern_return_t
fixupPageAuth64(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,dyld_pager_t pager,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex,bool offsetBased)452 fixupPageAuth64(
453 	uint64_t                              userVA,
454 	vm_offset_t                           contents,
455 	vm_offset_t                           end_contents,
456 	dyld_pager_t                          pager,
457 	struct dyld_chained_starts_in_segment *segInfo,
458 	uint32_t                              pageIndex,
459 	bool                                  offsetBased)
460 {
461 	void                 *link_info = pager->dyld_link_info;
462 	uint32_t             link_info_size = pager->dyld_link_info_size;
463 	struct mwl_info_hdr  *hdr = (struct mwl_info_hdr *)link_info;
464 	uint64_t             *bindsArray = (uint64_t*)((uintptr_t)link_info + hdr->mwli_binds_offset);
465 
466 	/*
467 	 * range check against link_info, note +1 to include data we'll dereference
468 	 */
469 	if ((uintptr_t)&segInfo->page_start[pageIndex + 1] > (uintptr_t)link_info + link_info_size) {
470 		printf("%s(): out of range segInfo->page_start[pageIndex]", __func__);
471 		return KERN_FAILURE;
472 	}
473 	uint16_t firstStartOffset = segInfo->page_start[pageIndex];
474 
475 	/*
476 	 * All done if no fixups on the page
477 	 */
478 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
479 		return KERN_SUCCESS;
480 	}
481 
482 	/*
483 	 * Walk the chain of offsets to fix up
484 	 */
485 	uint64_t *chain = (uint64_t *)(contents + firstStartOffset);
486 	uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide);
487 	uint64_t delta = 0;
488 	do {
489 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
490 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
491 			    (long long)chain, (long long)contents, (long long)end_contents);
492 			return KERN_FAILURE;
493 		}
494 		uint64_t value = *chain;
495 		delta = (value >> 51) & 0x7FF;
496 		bool isAuth = (value & 0x8000000000000000ULL);
497 		bool isBind = (value & 0x4000000000000000ULL);
498 		if (isAuth) {
499 			ptrauth_key key = (ptrauth_key)((value >> 49) & 0x3);
500 			bool        addrDiv = ((value & (1ULL << 48)) != 0);
501 			uint16_t    diversity = (uint16_t)((value >> 32) & 0xFFFF);
502 			uintptr_t   uVA = userVA + ((uintptr_t)chain - contents);
503 			if (isBind) {
504 				uint32_t bindOrdinal = value & 0x00FFFFFF;
505 				if (bindOrdinal >= hdr->mwli_binds_count) {
506 					printf("%s(): out of range bind ordinal %u (max %u)",
507 					    __func__, bindOrdinal, hdr->mwli_binds_count);
508 					return KERN_FAILURE;
509 				}
510 				if (signPointer(bindsArray[bindOrdinal], (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
511 					return KERN_FAILURE;
512 				}
513 			} else {
514 				/* note: in auth rebases only have 32-bits, so target is always offset - never vmaddr */
515 				uint64_t target = (value & 0xFFFFFFFF) + hdr->mwli_image_address;
516 				if (signPointer(target, (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
517 					return KERN_FAILURE;
518 				}
519 			}
520 		} else {
521 			if (isBind) {
522 				uint32_t bindOrdinal = value & 0x00FFFFFF;
523 				if (bindOrdinal >= hdr->mwli_binds_count) {
524 					printf("%s(): out of range bind ordinal %u (max %u)",
525 					    __func__, bindOrdinal, hdr->mwli_binds_count);
526 					return KERN_FAILURE;
527 				} else {
528 					uint64_t addend19 = (value >> 32) & 0x0007FFFF;
529 					if (addend19 & 0x40000) {
530 						addend19 |=  0xFFFFFFFFFFFC0000ULL;
531 					}
532 					*chain = bindsArray[bindOrdinal] + addend19;
533 				}
534 			} else {
535 				uint64_t target = (value & 0x7FFFFFFFFFFULL);
536 				uint64_t high8  = (value << 13) & 0xFF00000000000000ULL;
537 				*chain = target + targetAdjust + high8;
538 			}
539 		}
540 		chain += delta;
541 	} while (delta != 0);
542 	return KERN_SUCCESS;
543 }
544 #endif /* defined(HAS_APPLE_PAC) */
545 
546 
547 /*
548  * Handle dyld fixups for a page.
549  */
550 static kern_return_t
fixup_page(vm_offset_t contents,vm_offset_t end_contents,uint64_t userVA,dyld_pager_t pager)551 fixup_page(
552 	vm_offset_t         contents,
553 	vm_offset_t         end_contents,
554 	uint64_t            userVA,
555 	dyld_pager_t        pager)
556 {
557 	void                                  *link_info = pager->dyld_link_info;
558 	uint32_t                              link_info_size = pager->dyld_link_info_size;
559 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
560 	struct dyld_chained_starts_in_segment *segInfo = NULL;
561 	uint32_t                              pageIndex = 0;
562 	uint32_t                              segIndex;
563 	struct dyld_chained_starts_in_image   *startsInfo;
564 	struct dyld_chained_starts_in_segment *seg;
565 	uint64_t                              segStartAddress;
566 	uint64_t                              segEndAddress;
567 
568 	/*
569 	 * Note this is a linear search done for every page we have to fix up.
570 	 * However, it should be quick as there should only be 2 or 4 segments:
571 	 * - data
572 	 * - data const
573 	 * - data auth (for arm64e)
574 	 * - data const auth (for arm64e)
575 	 */
576 	startsInfo = (struct dyld_chained_starts_in_image *)((uintptr_t)hdr + hdr->mwli_chains_offset);
577 	for (segIndex = 0; segIndex < startsInfo->seg_count; ++segIndex) {
578 		seg = (struct dyld_chained_starts_in_segment *)
579 		    ((uintptr_t)startsInfo + startsInfo->seg_info_offset[segIndex]);
580 
581 		/*
582 		 * ensure we don't go out of bounds of the link_info
583 		 */
584 		if ((uintptr_t)seg + sizeof(*seg) > (uintptr_t)link_info + link_info_size) {
585 			printf("%s(): seg_info out of bounds\n", __func__);
586 			return KERN_FAILURE;
587 		}
588 
589 		segStartAddress = hdr->mwli_image_address + seg->segment_offset;
590 		segEndAddress = segStartAddress + seg->page_count * seg->page_size;
591 		if (segStartAddress <= userVA && userVA < segEndAddress) {
592 			segInfo = seg;
593 			pageIndex = (uint32_t)(userVA - segStartAddress) / PAGE_SIZE;
594 
595 			/* ensure seg->size fits in link_info_size */
596 			if ((uintptr_t)seg + seg->size > (uintptr_t)link_info + link_info_size) {
597 				printf("%s(): seg->size out of bounds\n", __func__);
598 				return KERN_FAILURE;
599 			}
600 			if (seg->size < sizeof(struct dyld_chained_starts_in_segment)) {
601 				printf("%s(): seg->size too small\n", __func__);
602 				return KERN_FAILURE;
603 			}
604 			/* ensure page_count and pageIndex are valid too */
605 			if ((uintptr_t)&seg->page_start[seg->page_count] > (uintptr_t)link_info + link_info_size) {
606 				printf("%s(): seg->page_count out of bounds\n", __func__);
607 				return KERN_FAILURE;
608 			}
609 			if (pageIndex >= seg->page_count) {
610 				printf("%s(): seg->page_count too small\n", __func__);
611 				return KERN_FAILURE;
612 			}
613 
614 			break;
615 		}
616 	}
617 
618 	/*
619 	 * Question for Nick.. or can we make this OK and just return KERN_SUCCESS, nothing to do?
620 	 */
621 	if (segInfo == NULL) {
622 		printf("%s(): No segment for user VA 0x%llx\n", __func__, (long long)userVA);
623 		return KERN_FAILURE;
624 	}
625 
626 	/*
627 	 * Route to the appropriate fixup routine
628 	 */
629 	switch (hdr->mwli_pointer_format) {
630 #if defined(HAS_APPLE_PAC)
631 	case DYLD_CHAINED_PTR_ARM64E:
632 		fixupPageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex, false);
633 		break;
634 	case DYLD_CHAINED_PTR_ARM64E_USERLAND:
635 	case DYLD_CHAINED_PTR_ARM64E_USERLAND24:
636 		fixupPageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex, true);
637 		break;
638 #endif /* defined(HAS_APPLE_PAC) */
639 	case DYLD_CHAINED_PTR_64:
640 		fixupPage64(contents, end_contents, link_info, segInfo, pageIndex, false);
641 		break;
642 	case DYLD_CHAINED_PTR_64_OFFSET:
643 		fixupPage64(contents, end_contents, link_info, segInfo, pageIndex, true);
644 		break;
645 	case DYLD_CHAINED_PTR_32:
646 		fixupPage32(contents, end_contents, link_info, link_info_size, segInfo, pageIndex);
647 		break;
648 	default:
649 		printf("%s(): unknown pointer_format %d\n", __func__, hdr->mwli_pointer_format);
650 		return KERN_FAILURE;
651 	}
652 	return KERN_SUCCESS;
653 }
654 
655 /*
656  * dyld_pager_data_request()
657  *
658  * Handles page-in requests from VM.
659  */
660 static kern_return_t
dyld_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)661 dyld_pager_data_request(
662 	memory_object_t              mem_obj,
663 	memory_object_offset_t       offset,
664 	memory_object_cluster_size_t length,
665 	__unused vm_prot_t           protection_required,
666 	memory_object_fault_info_t   mo_fault_info)
667 {
668 	dyld_pager_t            pager;
669 	memory_object_control_t mo_control;
670 	upl_t                   upl = NULL;
671 	int                     upl_flags;
672 	upl_size_t              upl_size;
673 	upl_page_info_t         *upl_pl = NULL;
674 	unsigned int            pl_count;
675 	vm_object_t             src_top_object = VM_OBJECT_NULL;
676 	vm_object_t             src_page_object = VM_OBJECT_NULL;
677 	vm_object_t             dst_object;
678 	kern_return_t           kr;
679 	kern_return_t           retval = KERN_SUCCESS;
680 	vm_offset_t             src_vaddr;
681 	vm_offset_t             dst_vaddr;
682 	vm_offset_t             cur_offset;
683 	kern_return_t           error_code;
684 	vm_prot_t               prot;
685 	vm_page_t               src_page, top_page;
686 	int                     interruptible;
687 	struct vm_object_fault_info fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
688 	struct mwl_info_hdr     *hdr;
689 	uint32_t                r;
690 	uint64_t                userVA;
691 
692 	fault_info.stealth = TRUE;
693 	fault_info.io_sync = FALSE;
694 	fault_info.mark_zf_absent = FALSE;
695 	fault_info.batch_pmap_op = FALSE;
696 	interruptible = fault_info.interruptible;
697 
698 	pager = dyld_pager_lookup(mem_obj);
699 	assert(pager->dyld_is_ready);
700 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 1); /* pager is alive */
701 	assert(pager->dyld_is_mapped); /* pager is mapped */
702 	hdr = (struct mwl_info_hdr *)pager->dyld_link_info;
703 
704 	/*
705 	 * Gather in a UPL all the VM pages requested by VM.
706 	 */
707 	mo_control = pager->dyld_header.mo_control;
708 
709 	upl_size = length;
710 	upl_flags =
711 	    UPL_RET_ONLY_ABSENT |
712 	    UPL_SET_LITE |
713 	    UPL_NO_SYNC |
714 	    UPL_CLEAN_IN_PLACE |        /* triggers UPL_CLEAR_DIRTY */
715 	    UPL_SET_INTERNAL;
716 	pl_count = 0;
717 	kr = memory_object_upl_request(mo_control,
718 	    offset, upl_size,
719 	    &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
720 	if (kr != KERN_SUCCESS) {
721 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_UPL), 0 /* arg */);
722 		retval = kr;
723 		goto done;
724 	}
725 	dst_object = memory_object_control_to_vm_object(mo_control);
726 	assert(dst_object != VM_OBJECT_NULL);
727 
728 	/*
729 	 * We'll map the original data in the kernel address space from the
730 	 * backing VM object, itself backed by the executable/library file via
731 	 * the vnode pager.
732 	 */
733 	src_top_object = pager->dyld_backing_object;
734 	assert(src_top_object != VM_OBJECT_NULL);
735 	vm_object_reference(src_top_object); /* keep the source object alive */
736 
737 	/*
738 	 * Fill in the contents of the pages requested by VM.
739 	 */
740 	upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
741 	pl_count = length / PAGE_SIZE;
742 	for (cur_offset = 0;
743 	    retval == KERN_SUCCESS && cur_offset < length;
744 	    cur_offset += PAGE_SIZE) {
745 		ppnum_t dst_pnum;
746 
747 		if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
748 			/* this page is not in the UPL: skip it */
749 			continue;
750 		}
751 
752 		/*
753 		 * Map the source page in the kernel's virtual address space.
754 		 * We already hold a reference on the src_top_object.
755 		 */
756 retry_src_fault:
757 		vm_object_lock(src_top_object);
758 		vm_object_paging_begin(src_top_object);
759 		error_code = 0;
760 		prot = VM_PROT_READ;
761 		src_page = VM_PAGE_NULL;
762 		kr = vm_fault_page(src_top_object,
763 		    offset + cur_offset,
764 		    VM_PROT_READ,
765 		    FALSE,
766 		    FALSE,                /* src_page not looked up */
767 		    &prot,
768 		    &src_page,
769 		    &top_page,
770 		    NULL,
771 		    &error_code,
772 		    FALSE,
773 		    &fault_info);
774 		switch (kr) {
775 		case VM_FAULT_SUCCESS:
776 			break;
777 		case VM_FAULT_RETRY:
778 			goto retry_src_fault;
779 		case VM_FAULT_MEMORY_SHORTAGE:
780 			if (vm_page_wait(interruptible)) {
781 				goto retry_src_fault;
782 			}
783 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_MEMORY_SHORTAGE), 0 /* arg */);
784 			OS_FALLTHROUGH;
785 		case VM_FAULT_INTERRUPTED:
786 			retval = MACH_SEND_INTERRUPTED;
787 			goto done;
788 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
789 			/* success but no VM page: fail */
790 			vm_object_paging_end(src_top_object);
791 			vm_object_unlock(src_top_object);
792 			OS_FALLTHROUGH;
793 		case VM_FAULT_MEMORY_ERROR:
794 			/* the page is not there ! */
795 			if (error_code) {
796 				retval = error_code;
797 			} else {
798 				retval = KERN_MEMORY_ERROR;
799 			}
800 			goto done;
801 		default:
802 			panic("dyld_pager_data_request: vm_fault_page() unexpected error 0x%x\n", kr);
803 		}
804 		assert(src_page != VM_PAGE_NULL);
805 		assert(src_page->vmp_busy);
806 
807 		if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
808 			vm_page_lockspin_queues();
809 			if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
810 				vm_page_speculate(src_page, FALSE);
811 			}
812 			vm_page_unlock_queues();
813 		}
814 
815 		/*
816 		 * Establish pointers to the source and destination physical pages.
817 		 */
818 		dst_pnum = (ppnum_t)upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
819 		assert(dst_pnum != 0);
820 
821 		src_vaddr = (vm_map_offset_t)phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) << PAGE_SHIFT);
822 		dst_vaddr = (vm_map_offset_t)phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
823 		src_page_object = VM_PAGE_OBJECT(src_page);
824 
825 		/*
826 		 * Validate the original page...
827 		 */
828 		if (src_page_object->code_signed) {
829 			vm_page_validate_cs_mapped(src_page, PAGE_SIZE, 0, (const void *)src_vaddr);
830 		}
831 
832 		/*
833 		 * ... and transfer the results to the destination page.
834 		 */
835 		UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_validated);
836 		UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_tainted);
837 		UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_nx);
838 
839 		/*
840 		 * The page provider might access a mapped file, so let's
841 		 * release the object lock for the source page to avoid a
842 		 * potential deadlock.
843 		 * The source page is kept busy and we have a
844 		 * "paging_in_progress" reference on its object, so it's safe
845 		 * to unlock the object here.
846 		 */
847 		assert(src_page->vmp_busy);
848 		assert(src_page_object->paging_in_progress > 0);
849 		vm_object_unlock(src_page_object);
850 
851 		/*
852 		 * Process the original contents of the source page
853 		 * into the destination page.
854 		 */
855 		bcopy((const char *)src_vaddr, (char *)dst_vaddr, PAGE_SIZE);
856 
857 		/*
858 		 * Figure out what the original user virtual address was, based on the offset.
859 		 */
860 		userVA = 0;
861 		for (r = 0; r < pager->dyld_num_range; ++r) {
862 			vm_offset_t o = offset + cur_offset;
863 			if (pager->dyld_file_offset[r] <= o &&
864 			    o < pager->dyld_file_offset[r] + pager->dyld_size[r]) {
865 				userVA = pager->dyld_address[r] + (o - pager->dyld_file_offset[r]);
866 				break;
867 			}
868 		}
869 
870 		/*
871 		 * If we have a valid range fixup the page.
872 		 */
873 		if (r == pager->dyld_num_range) {
874 			printf("%s(): Range not found for offset 0x%llx\n", __func__, (long long)cur_offset);
875 			retval = KERN_FAILURE;
876 		} else if (fixup_page(dst_vaddr, dst_vaddr + PAGE_SIZE, userVA, pager) != KERN_SUCCESS) {
877 			/* printf was done under fixup_page() */
878 			retval = KERN_FAILURE;
879 		}
880 		if (retval != KERN_SUCCESS) {
881 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SLIDE_ERROR), 0 /* arg */);
882 		}
883 
884 		assert(VM_PAGE_OBJECT(src_page) == src_page_object);
885 		assert(src_page->vmp_busy);
886 		assert(src_page_object->paging_in_progress > 0);
887 		vm_object_lock(src_page_object);
888 
889 		/*
890 		 * Cleanup the result of vm_fault_page() of the source page.
891 		 */
892 		PAGE_WAKEUP_DONE(src_page);
893 		src_page = VM_PAGE_NULL;
894 		vm_object_paging_end(src_page_object);
895 		vm_object_unlock(src_page_object);
896 
897 		if (top_page != VM_PAGE_NULL) {
898 			assert(VM_PAGE_OBJECT(top_page) == src_top_object);
899 			vm_object_lock(src_top_object);
900 			VM_PAGE_FREE(top_page);
901 			vm_object_paging_end(src_top_object);
902 			vm_object_unlock(src_top_object);
903 		}
904 	}
905 
906 done:
907 	if (upl != NULL) {
908 		/* clean up the UPL */
909 
910 		/*
911 		 * The pages are currently dirty because we've just been
912 		 * writing on them, but as far as we're concerned, they're
913 		 * clean since they contain their "original" contents as
914 		 * provided by us, the pager.
915 		 * Tell the UPL to mark them "clean".
916 		 */
917 		upl_clear_dirty(upl, TRUE);
918 
919 		/* abort or commit the UPL */
920 		if (retval != KERN_SUCCESS) {
921 			upl_abort(upl, 0);
922 		} else {
923 			boolean_t empty;
924 			assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
925 			    "upl %p offset 0x%llx size 0x%x\n",
926 			    upl, upl->u_offset, upl->u_size);
927 			upl_commit_range(upl, 0, upl->u_size,
928 			    UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
929 			    upl_pl, pl_count, &empty);
930 		}
931 
932 		/* and deallocate the UPL */
933 		upl_deallocate(upl);
934 		upl = NULL;
935 	}
936 	if (src_top_object != VM_OBJECT_NULL) {
937 		vm_object_deallocate(src_top_object);
938 	}
939 	return retval;
940 }
941 
942 /*
943  * dyld_pager_reference()
944  *
945  * Get a reference on this memory object.
946  * For external usage only.  Assumes that the initial reference count is not 0,
947  * i.e one should not "revive" a dead pager this way.
948  */
949 static void
dyld_pager_reference(memory_object_t mem_obj)950 dyld_pager_reference(
951 	memory_object_t mem_obj)
952 {
953 	dyld_pager_t    pager;
954 
955 	pager = dyld_pager_lookup(mem_obj);
956 
957 	lck_mtx_lock(&dyld_pager_lock);
958 	os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL);
959 	lck_mtx_unlock(&dyld_pager_lock);
960 }
961 
962 
963 
964 /*
965  * dyld_pager_terminate_internal:
966  *
967  * Trigger the asynchronous termination of the memory object associated
968  * with this pager.
969  * When the memory object is terminated, there will be one more call
970  * to memory_object_deallocate() (i.e. dyld_pager_deallocate())
971  * to finish the clean up.
972  *
973  * "dyld_pager_lock" should not be held by the caller.
974  */
975 static void
dyld_pager_terminate_internal(dyld_pager_t pager)976 dyld_pager_terminate_internal(
977 	dyld_pager_t pager)
978 {
979 	assert(pager->dyld_is_ready);
980 	assert(!pager->dyld_is_mapped);
981 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) == 1);
982 
983 	if (pager->dyld_backing_object != VM_OBJECT_NULL) {
984 		vm_object_deallocate(pager->dyld_backing_object);
985 		pager->dyld_backing_object = VM_OBJECT_NULL;
986 	}
987 	/* trigger the destruction of the memory object */
988 	memory_object_destroy(pager->dyld_header.mo_control, 0);
989 }
990 
991 /*
992  * dyld_pager_deallocate_internal()
993  *
994  * Release a reference on this pager and free it when the last reference goes away.
995  * Can be called with dyld_pager_lock held or not, but always returns
996  * with it unlocked.
997  */
998 static void
dyld_pager_deallocate_internal(dyld_pager_t pager,bool locked)999 dyld_pager_deallocate_internal(
1000 	dyld_pager_t   pager,
1001 	bool           locked)
1002 {
1003 	os_ref_count_t ref_count;
1004 
1005 	if (!locked) {
1006 		lck_mtx_lock(&dyld_pager_lock);
1007 	}
1008 
1009 	/* drop a reference on this pager */
1010 	ref_count = os_ref_release_locked_raw(&pager->dyld_ref_count, NULL);
1011 
1012 	if (ref_count == 1) {
1013 		/*
1014 		 * Only this reference is left, which means that
1015 		 * no one is really holding on to this pager anymore.
1016 		 * Terminate it.
1017 		 */
1018 		dyld_pager_count--;
1019 		/* the pager is all ours: no need for the lock now */
1020 		lck_mtx_unlock(&dyld_pager_lock);
1021 		dyld_pager_terminate_internal(pager);
1022 	} else if (ref_count == 0) {
1023 		/*
1024 		 * Dropped all references;  the memory object has
1025 		 * been terminated.  Do some final cleanup and release the
1026 		 * pager structure.
1027 		 */
1028 		lck_mtx_unlock(&dyld_pager_lock);
1029 
1030 		kfree_data(pager->dyld_link_info, pager->dyld_link_info_size);
1031 		pager->dyld_link_info = NULL;
1032 
1033 		if (pager->dyld_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
1034 			memory_object_control_deallocate(pager->dyld_header.mo_control);
1035 			pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1036 		}
1037 		kfree_type(struct dyld_pager, pager);
1038 		pager = NULL;
1039 	} else {
1040 		/* there are still plenty of references:  keep going... */
1041 		lck_mtx_unlock(&dyld_pager_lock);
1042 	}
1043 
1044 	/* caution: lock is not held on return... */
1045 }
1046 
1047 /*
1048  * dyld_pager_deallocate()
1049  *
1050  * Release a reference on this pager and free it when the last
1051  * reference goes away.
1052  */
1053 static void
dyld_pager_deallocate(memory_object_t mem_obj)1054 dyld_pager_deallocate(
1055 	memory_object_t mem_obj)
1056 {
1057 	dyld_pager_t    pager;
1058 
1059 	pager = dyld_pager_lookup(mem_obj);
1060 	dyld_pager_deallocate_internal(pager, FALSE);
1061 }
1062 
1063 /*
1064  *
1065  */
1066 static kern_return_t
dyld_pager_terminate(__unused memory_object_t mem_obj)1067 dyld_pager_terminate(
1068 #if !DEBUG
1069 	__unused
1070 #endif
1071 	memory_object_t mem_obj)
1072 {
1073 	return KERN_SUCCESS;
1074 }
1075 
1076 /*
1077  * dyld_pager_map()
1078  *
1079  * This allows VM to let us, the EMM, know that this memory object
1080  * is currently mapped one or more times.  This is called by VM each time
1081  * the memory object gets mapped, but we only take one extra reference the
1082  * first time it is called.
1083  */
1084 static kern_return_t
dyld_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)1085 dyld_pager_map(
1086 	memory_object_t         mem_obj,
1087 	__unused vm_prot_t      prot)
1088 {
1089 	dyld_pager_t   pager;
1090 
1091 	pager = dyld_pager_lookup(mem_obj);
1092 
1093 	lck_mtx_lock(&dyld_pager_lock);
1094 	assert(pager->dyld_is_ready);
1095 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0); /* pager is alive */
1096 	if (!pager->dyld_is_mapped) {
1097 		pager->dyld_is_mapped = TRUE;
1098 		os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL);
1099 	}
1100 	lck_mtx_unlock(&dyld_pager_lock);
1101 
1102 	return KERN_SUCCESS;
1103 }
1104 
1105 /*
1106  * dyld_pager_last_unmap()
1107  *
1108  * This is called by VM when this memory object is no longer mapped anywhere.
1109  */
1110 static kern_return_t
dyld_pager_last_unmap(memory_object_t mem_obj)1111 dyld_pager_last_unmap(
1112 	memory_object_t mem_obj)
1113 {
1114 	dyld_pager_t    pager;
1115 
1116 	pager = dyld_pager_lookup(mem_obj);
1117 
1118 	lck_mtx_lock(&dyld_pager_lock);
1119 	if (pager->dyld_is_mapped) {
1120 		/*
1121 		 * All the mappings are gone, so let go of the one extra
1122 		 * reference that represents all the mappings of this pager.
1123 		 */
1124 		pager->dyld_is_mapped = FALSE;
1125 		dyld_pager_deallocate_internal(pager, TRUE);
1126 		/* caution: deallocate_internal() released the lock ! */
1127 	} else {
1128 		lck_mtx_unlock(&dyld_pager_lock);
1129 	}
1130 
1131 	return KERN_SUCCESS;
1132 }
1133 
1134 static boolean_t
dyld_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)1135 dyld_pager_backing_object(
1136 	memory_object_t         mem_obj,
1137 	memory_object_offset_t  offset,
1138 	vm_object_t             *backing_object,
1139 	vm_object_offset_t      *backing_offset)
1140 {
1141 	dyld_pager_t   pager;
1142 
1143 	pager = dyld_pager_lookup(mem_obj);
1144 
1145 	*backing_object = pager->dyld_backing_object;
1146 	*backing_offset = offset;
1147 
1148 	return TRUE;
1149 }
1150 
1151 
1152 /*
1153  * Convert from memory_object to dyld_pager.
1154  */
1155 static dyld_pager_t
dyld_pager_lookup(memory_object_t mem_obj)1156 dyld_pager_lookup(
1157 	memory_object_t  mem_obj)
1158 {
1159 	dyld_pager_t   pager;
1160 
1161 	assert(mem_obj->mo_pager_ops == &dyld_pager_ops);
1162 	pager = (dyld_pager_t)(uintptr_t) mem_obj;
1163 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0);
1164 	return pager;
1165 }
1166 
1167 /*
1168  * Create and return a pager for the given object with the
1169  * given slide information.
1170  */
1171 static dyld_pager_t
dyld_pager_create(__unused task_t task,vm_object_t backing_object,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size)1172 dyld_pager_create(
1173 #if !defined(HAS_APPLE_PAC)
1174 	__unused
1175 #endif /* defined(HAS_APPLE_PAC) */
1176 	task_t            task,
1177 	vm_object_t       backing_object,
1178 	struct mwl_region *regions,
1179 	uint32_t          region_cnt,
1180 	void              *link_info,
1181 	uint32_t          link_info_size)
1182 {
1183 	dyld_pager_t            pager;
1184 	memory_object_control_t control;
1185 	kern_return_t           kr;
1186 
1187 	pager = kalloc_type(struct dyld_pager, Z_WAITOK);
1188 	if (pager == NULL) {
1189 		return NULL;
1190 	}
1191 
1192 	/*
1193 	 * The vm_map call takes both named entry ports and raw memory
1194 	 * objects in the same parameter.  We need to make sure that
1195 	 * vm_map does not see this object as a named entry port.  So,
1196 	 * we reserve the first word in the object for a fake ip_kotype
1197 	 * setting - that will tell vm_map to use it as a memory object.
1198 	 */
1199 	pager->dyld_header.mo_ikot = IKOT_MEMORY_OBJECT;
1200 	pager->dyld_header.mo_pager_ops = &dyld_pager_ops;
1201 	pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1202 
1203 	pager->dyld_is_ready = FALSE;/* not ready until it has a "name" */
1204 	/* existence reference for the caller */
1205 	os_ref_init_count_raw(&pager->dyld_ref_count, NULL, 1);
1206 	pager->dyld_is_mapped = FALSE;
1207 	pager->dyld_backing_object = backing_object;
1208 	pager->dyld_link_info = link_info;
1209 	pager->dyld_link_info_size = link_info_size;
1210 #if defined(HAS_APPLE_PAC)
1211 	pager->dyld_a_key = (task->map && task->map->pmap && !task->map->pmap->disable_jop) ? task->jop_pid : 0;
1212 #endif /* defined(HAS_APPLE_PAC) */
1213 
1214 	/*
1215 	 * Record the regions so the pager can find the offset from an address.
1216 	 */
1217 	pager->dyld_num_range = region_cnt;
1218 	for (uint32_t r = 0; r < region_cnt; ++r) {
1219 		pager->dyld_file_offset[r] = regions[r].mwlr_file_offset;
1220 		pager->dyld_address[r] = regions[r].mwlr_address;
1221 		pager->dyld_size[r] = regions[r].mwlr_size;
1222 	}
1223 
1224 	vm_object_reference(backing_object);
1225 
1226 	lck_mtx_lock(&dyld_pager_lock);
1227 	dyld_pager_count++;
1228 	if (dyld_pager_count > dyld_pager_count_max) {
1229 		dyld_pager_count_max = dyld_pager_count;
1230 	}
1231 	lck_mtx_unlock(&dyld_pager_lock);
1232 
1233 	kr = memory_object_create_named((memory_object_t) pager, 0, &control);
1234 	assert(kr == KERN_SUCCESS);
1235 
1236 	memory_object_mark_trusted(control);
1237 
1238 	lck_mtx_lock(&dyld_pager_lock);
1239 	/* the new pager is now ready to be used */
1240 	pager->dyld_is_ready = TRUE;
1241 	lck_mtx_unlock(&dyld_pager_lock);
1242 
1243 	/* wakeup anyone waiting for this pager to be ready */
1244 	thread_wakeup(&pager->dyld_is_ready);
1245 
1246 	return pager;
1247 }
1248 
1249 /*
1250  * dyld_pager_setup()
1251  *
1252  * Provide the caller with a memory object backed by the provided
1253  * "backing_object" VM object.
1254  */
1255 static memory_object_t
dyld_pager_setup(task_t task,vm_object_t backing_object,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size)1256 dyld_pager_setup(
1257 	task_t            task,
1258 	vm_object_t       backing_object,
1259 	struct mwl_region *regions,
1260 	uint32_t          region_cnt,
1261 	void              *link_info,
1262 	uint32_t          link_info_size)
1263 {
1264 	dyld_pager_t      pager;
1265 
1266 	/* create new pager */
1267 	pager = dyld_pager_create(task, backing_object, regions, region_cnt, link_info, link_info_size);
1268 	if (pager == NULL) {
1269 		/* could not create a new pager */
1270 		return MEMORY_OBJECT_NULL;
1271 	}
1272 
1273 	lck_mtx_lock(&dyld_pager_lock);
1274 	while (!pager->dyld_is_ready) {
1275 		lck_mtx_sleep(&dyld_pager_lock,
1276 		    LCK_SLEEP_DEFAULT,
1277 		    &pager->dyld_is_ready,
1278 		    THREAD_UNINT);
1279 	}
1280 	lck_mtx_unlock(&dyld_pager_lock);
1281 
1282 	return (memory_object_t) pager;
1283 }
1284 
1285 /*
1286  * Set up regions which use a special pager to apply dyld fixups.
1287  *
1288  * The arguments to this function are mostly just used as input.
1289  * Except for the link_info! That is saved off in the pager that
1290  * gets created, so shouldn't be free'd by the caller, if KERN_SUCCES.
1291  */
1292 kern_return_t
vm_map_with_linking(task_t task,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size,memory_object_control_t file_control)1293 vm_map_with_linking(
1294 	task_t                  task,
1295 	struct mwl_region       *regions,
1296 	uint32_t                region_cnt,
1297 	void                    *link_info,
1298 	uint32_t                link_info_size,
1299 	memory_object_control_t file_control)
1300 {
1301 	vm_map_t                map = task->map;
1302 	vm_object_t             object = VM_OBJECT_NULL;
1303 	memory_object_t         pager = MEMORY_OBJECT_NULL;
1304 	uint32_t                r;
1305 	struct mwl_region       *rp;
1306 	vm_map_address_t        map_addr;
1307 	int                     vm_flags;
1308 	vm_map_kernel_flags_t   vmk_flags;
1309 	kern_return_t           kr = KERN_SUCCESS;
1310 
1311 	object = memory_object_control_to_vm_object(file_control);
1312 	if (object == VM_OBJECT_NULL || object->internal) {
1313 		printf("%s no object for file_control\n", __func__);
1314 		object = VM_OBJECT_NULL;
1315 		kr = KERN_INVALID_ADDRESS;
1316 		goto done;
1317 	}
1318 
1319 	/* create a pager */
1320 	pager = dyld_pager_setup(task, object, regions, region_cnt, link_info, link_info_size);
1321 	if (pager == MEMORY_OBJECT_NULL) {
1322 		kr = KERN_RESOURCE_SHORTAGE;
1323 		goto done;
1324 	}
1325 
1326 	for (r = 0; r < region_cnt; ++r) {
1327 		rp = &regions[r];
1328 
1329 		/* map that pager over the portion of the mapping that needs sliding */
1330 		vm_flags = VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE;
1331 		vmk_flags = VM_MAP_KERNEL_FLAGS_NONE;
1332 		vmk_flags.vmkf_overwrite_immutable = TRUE;
1333 		map_addr = (vm_map_address_t)rp->mwlr_address;
1334 		kr = vm_map_enter_mem_object(map,
1335 		    &map_addr,
1336 		    rp->mwlr_size,
1337 		    (mach_vm_offset_t) 0,
1338 		    vm_flags,
1339 		    vmk_flags,
1340 		    VM_KERN_MEMORY_NONE,
1341 		    (ipc_port_t)(uintptr_t)pager,
1342 		    rp->mwlr_file_offset,
1343 		    TRUE,       /* copy == TRUE, as this is MAP_PRIVATE so COW may happen */
1344 		    rp->mwlr_protections,
1345 		    rp->mwlr_protections,
1346 		    VM_INHERIT_DEFAULT);
1347 		if (kr != KERN_SUCCESS) {
1348 			/* no need to clean up earlier regions, this will be process fatal */
1349 			goto done;
1350 		}
1351 	}
1352 
1353 	/* success! */
1354 	kr = KERN_SUCCESS;
1355 
1356 done:
1357 
1358 	if (pager != MEMORY_OBJECT_NULL) {
1359 		/*
1360 		 * Release the pager reference obtained by dyld_pager_setup().
1361 		 * The mapping, if it succeeded, is now holding a reference on the memory object.
1362 		 */
1363 		memory_object_deallocate(pager);
1364 		pager = MEMORY_OBJECT_NULL;
1365 	}
1366 	return kr;
1367 }
1368