xref: /xnu-10002.41.9/osfmk/vm/vm_dyld_pager.c (revision 699cd48037512bf4380799317ca44ca453c82f57)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42 
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/thread.h>
46 #include <kern/ipc_kobject.h>
47 
48 #include <ipc/ipc_port.h>
49 #include <ipc/ipc_space.h>
50 
51 #include <vm/memory_object.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_fault.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_protos.h>
57 #include <vm/vm_dyld_pager.h>
58 
59 #include <sys/kdebug_triage.h>
60 #include <mach-o/fixup-chains.h>
61 #if defined(HAS_APPLE_PAC)
62 #include <ptrauth.h>
63 #include <arm/misc_protos.h>
64 #endif /* defined(HAS_APPLE_PAC) */
65 
66 /*
67  * DYLD page in linking pager.
68  *
69  * This external memory manager (EMM) applies dyld fixup to data
70  * pages, allowing the modified page to appear "clean".
71  *
72  * The modified pages will never be dirtied, so the memory manager doesn't
73  * need to handle page-out requests (from memory_object_data_return()).  The
74  * pages are mapped copy-on-write, so that the originals stay clean.
75  */
76 
77 /* forward declarations */
78 typedef struct dyld_pager *dyld_pager_t;
79 static void dyld_pager_reference(memory_object_t mem_obj);
80 static void dyld_pager_deallocate(memory_object_t mem_obj);
81 static void dyld_pager_deallocate_internal(dyld_pager_t pager, bool locked);
82 static kern_return_t dyld_pager_init(memory_object_t mem_obj,
83     memory_object_control_t control,
84     memory_object_cluster_size_t pg_size);
85 static kern_return_t dyld_pager_terminate(memory_object_t mem_obj);
86 static void dyld_pager_terminate_internal(dyld_pager_t pager);
87 static kern_return_t dyld_pager_data_request(memory_object_t mem_obj,
88     memory_object_offset_t offset,
89     memory_object_cluster_size_t length,
90     vm_prot_t protection_required,
91     memory_object_fault_info_t fault_info);
92 static kern_return_t dyld_pager_data_return(memory_object_t mem_obj,
93     memory_object_offset_t offset,
94     memory_object_cluster_size_t      data_cnt,
95     memory_object_offset_t *resid_offset,
96     int *io_error,
97     boolean_t dirty,
98     boolean_t kernel_copy,
99     int upl_flags);
100 static kern_return_t dyld_pager_data_initialize(memory_object_t mem_obj,
101     memory_object_offset_t offset,
102     memory_object_cluster_size_t data_cnt);
103 static kern_return_t dyld_pager_map(memory_object_t mem_obj,
104     vm_prot_t prot);
105 static kern_return_t dyld_pager_last_unmap(memory_object_t mem_obj);
106 static boolean_t dyld_pager_backing_object(
107 	memory_object_t mem_obj,
108 	memory_object_offset_t mem_obj_offset,
109 	vm_object_t *backing_object,
110 	vm_object_offset_t *backing_offset);
111 static dyld_pager_t dyld_pager_lookup(memory_object_t  mem_obj);
112 
113 /*
114  * Vector of VM operations for this EMM.
115  * These routines are invoked by VM via the memory_object_*() interfaces.
116  */
117 const struct memory_object_pager_ops dyld_pager_ops = {
118 	.memory_object_reference = dyld_pager_reference,
119 	.memory_object_deallocate = dyld_pager_deallocate,
120 	.memory_object_init = dyld_pager_init,
121 	.memory_object_terminate = dyld_pager_terminate,
122 	.memory_object_data_request = dyld_pager_data_request,
123 	.memory_object_data_return = dyld_pager_data_return,
124 	.memory_object_data_initialize = dyld_pager_data_initialize,
125 	.memory_object_map = dyld_pager_map,
126 	.memory_object_last_unmap = dyld_pager_last_unmap,
127 	.memory_object_backing_object = dyld_pager_backing_object,
128 	.memory_object_pager_name = "dyld"
129 };
130 
131 /*
132  * The "dyld_pager" structure. We create one of these for each use of
133  * map_with_linking_np() that dyld uses.
134  */
135 struct dyld_pager {
136 	struct memory_object    dyld_header;          /* mandatory generic header */
137 
138 #if MEMORY_OBJECT_HAS_REFCOUNT
139 #define dyld_ref_count           dyld_header.mo_ref
140 #else
141 	os_ref_atomic_t         dyld_ref_count;      /* active uses */
142 #endif
143 	queue_chain_t           dyld_pager_queue;    /* next & prev pagers */
144 	bool                    dyld_is_mapped;      /* has active mappings */
145 	bool                    dyld_is_ready;       /* is this pager ready? */
146 	vm_object_t             dyld_backing_object; /* VM object for shared cache */
147 	void                    *dyld_link_info;
148 	uint32_t                dyld_link_info_size;
149 	uint32_t                dyld_num_range;
150 	memory_object_offset_t  dyld_file_offset[MWL_MAX_REGION_COUNT];
151 	mach_vm_address_t       dyld_address[MWL_MAX_REGION_COUNT];
152 	mach_vm_size_t          dyld_size[MWL_MAX_REGION_COUNT];
153 #if defined(HAS_APPLE_PAC)
154 	uint64_t                dyld_a_key;
155 #endif /* defined(HAS_APPLE_PAC) */
156 };
157 
158 queue_head_t dyld_pager_queue = QUEUE_HEAD_INITIALIZER(dyld_pager_queue);
159 
160 /*
161  * "dyld_pager_lock" for counters, ref counting, etc.
162  */
163 LCK_GRP_DECLARE(dyld_pager_lck_grp, "dyld_pager");
164 LCK_MTX_DECLARE(dyld_pager_lock, &dyld_pager_lck_grp);
165 
166 /*
167  * Statistics & counters.
168  */
169 uint32_t dyld_pager_count = 0;
170 uint32_t dyld_pager_count_max = 0;
171 
172 /*
173  * dyld_pager_dequeue()
174  *
175  * Removes a pager from the list of pagers.
176  *
177  * The caller must hold "dyld_pager".
178  */
179 static void
dyld_pager_dequeue(__unused dyld_pager_t pager)180 dyld_pager_dequeue(
181 	__unused dyld_pager_t pager)
182 {
183 	queue_remove(&dyld_pager_queue,
184 	    pager,
185 	    dyld_pager_t,
186 	    dyld_pager_queue);
187 	pager->dyld_pager_queue.next = NULL;
188 	pager->dyld_pager_queue.prev = NULL;
189 	dyld_pager_count--;
190 }
191 
192 /*
193  * dyld_pager_init()
194  *
195  * Initialize the memory object and makes it ready to be used and mapped.
196  */
197 static kern_return_t
dyld_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)198 dyld_pager_init(
199 	memory_object_t                 mem_obj,
200 	memory_object_control_t         control,
201 	__unused
202 	memory_object_cluster_size_t    pg_size)
203 {
204 	dyld_pager_t                    pager;
205 	kern_return_t                   kr;
206 	memory_object_attr_info_data_t  attributes;
207 
208 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
209 		printf("%s(): control NULL\n", __func__);
210 		return KERN_INVALID_ARGUMENT;
211 	}
212 
213 	pager = dyld_pager_lookup(mem_obj);
214 
215 	memory_object_control_reference(control);
216 
217 	pager->dyld_header.mo_control = control;
218 
219 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
220 	attributes.cluster_size = (1 << (PAGE_SHIFT));
221 	attributes.may_cache_object = FALSE;
222 	attributes.temporary = TRUE;
223 
224 	kr = memory_object_change_attributes(
225 		control,
226 		MEMORY_OBJECT_ATTRIBUTE_INFO,
227 		(memory_object_info_t) &attributes,
228 		MEMORY_OBJECT_ATTR_INFO_COUNT);
229 	if (kr != KERN_SUCCESS) {
230 		panic("dyld_pager_init: " "memory_object_change_attributes() failed");
231 	}
232 
233 	return KERN_SUCCESS;
234 }
235 
236 /*
237  * dyld_data_return()
238  *
239  * A page-out request from VM -- should never happen so panic.
240  */
241 static kern_return_t
dyld_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)242 dyld_pager_data_return(
243 	__unused memory_object_t        mem_obj,
244 	__unused memory_object_offset_t offset,
245 	__unused memory_object_cluster_size_t data_cnt,
246 	__unused memory_object_offset_t *resid_offset,
247 	__unused int                    *io_error,
248 	__unused boolean_t              dirty,
249 	__unused boolean_t              kernel_copy,
250 	__unused int                    upl_flags)
251 {
252 	panic("dyld_pager_data_return: should never happen!");
253 	return KERN_FAILURE;
254 }
255 
256 static kern_return_t
dyld_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)257 dyld_pager_data_initialize(
258 	__unused memory_object_t        mem_obj,
259 	__unused memory_object_offset_t offset,
260 	__unused memory_object_cluster_size_t data_cnt)
261 {
262 	panic("dyld_pager_data_initialize: should never happen");
263 	return KERN_FAILURE;
264 }
265 
266 
267 /*
268  * Apply fixups to a page used by a 64 bit process.
269  */
270 static kern_return_t
fixupPage64(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,void * link_info,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex,bool offsetBased)271 fixupPage64(
272 	uint64_t                              userVA,
273 	vm_offset_t                           contents,
274 	vm_offset_t                           end_contents,
275 	void                                  *link_info,
276 	struct dyld_chained_starts_in_segment *segInfo,
277 	uint32_t                              pageIndex,
278 	bool                                  offsetBased)
279 {
280 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
281 	uint64_t                              *bindsArray  = (uint64_t *)((uintptr_t)hdr + hdr->mwli_binds_offset);
282 	uint16_t                              firstStartOffset = segInfo->page_start[pageIndex];
283 
284 	/*
285 	 * Done if no fixups on the page
286 	 */
287 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
288 		return KERN_SUCCESS;
289 	}
290 
291 	/*
292 	 * walk the chain
293 	 */
294 	uint64_t *chain  = (uint64_t *)(contents + firstStartOffset);
295 	uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide);
296 	uint64_t delta = 0;
297 	do {
298 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
299 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
300 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
301 			    (long long)chain, (long long)contents, (long long)end_contents);
302 			return KERN_FAILURE;
303 		}
304 		uint64_t value  = *chain;
305 		bool     isBind = (value & 0x8000000000000000ULL);
306 		delta = (value >> 51) & 0xFFF;
307 		if (isBind) {
308 			uint32_t bindOrdinal = value & 0x00FFFFFF;
309 			if (bindOrdinal >= hdr->mwli_binds_count) {
310 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
311 				printf("%s out of range bind ordinal %u (max %u)\n", __func__,
312 				    bindOrdinal, hdr->mwli_binds_count);
313 				return KERN_FAILURE;
314 			}
315 			uint32_t addend = (value >> 24) & 0xFF;
316 			*chain = bindsArray[bindOrdinal] + addend;
317 		} else {
318 			/* is rebase */
319 			uint64_t target = value & 0xFFFFFFFFFULL;
320 			uint64_t high8  = (value >> 36) & 0xFF;
321 			*chain = target + targetAdjust + (high8 << 56);
322 		}
323 		if (delta * 4 >= PAGE_SIZE) {
324 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_DELTA_TOO_LARGE), (uintptr_t)userVA);
325 			printf("%s(): delta offset > page size %lld\n", __func__, delta * 4);
326 			return KERN_FAILURE;
327 		}
328 		chain = (uint64_t *)((uintptr_t)chain + (delta * 4)); // 4-byte stride
329 	} while (delta != 0);
330 	return KERN_SUCCESS;
331 }
332 
333 
334 /*
335  * Apply fixups within a page used by a 32 bit process.
336  */
337 static kern_return_t
fixupChain32(uint64_t userVA,uint32_t * chain,vm_offset_t contents,vm_offset_t end_contents,void * link_info,struct dyld_chained_starts_in_segment * segInfo,uint32_t * bindsArray)338 fixupChain32(
339 	uint64_t                              userVA,
340 	uint32_t                              *chain,
341 	vm_offset_t                           contents,
342 	vm_offset_t                           end_contents,
343 	void                                  *link_info,
344 	struct dyld_chained_starts_in_segment *segInfo,
345 	uint32_t                              *bindsArray)
346 {
347 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
348 	uint32_t                              delta = 0;
349 
350 	do {
351 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
352 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
353 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
354 			    (long long)chain, (long long)contents, (long long)end_contents);
355 			return KERN_FAILURE;
356 		}
357 		uint32_t value = *chain;
358 		delta = (value >> 26) & 0x1F;
359 		if (value & 0x80000000) {
360 			// is bind
361 			uint32_t bindOrdinal = value & 0x000FFFFF;
362 			if (bindOrdinal >= hdr->mwli_binds_count) {
363 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
364 				printf("%s(): out of range bind ordinal %u (max %u)",
365 				    __func__, bindOrdinal, hdr->mwli_binds_count);
366 				return KERN_FAILURE;
367 			}
368 			uint32_t addend = (value >> 20) & 0x3F;
369 			*chain = bindsArray[bindOrdinal] + addend;
370 		} else {
371 			// is rebase
372 			uint32_t target = value & 0x03FFFFFF;
373 			if (target > segInfo->max_valid_pointer) {
374 				// handle non-pointers in chain
375 				uint32_t bias = (0x04000000 + segInfo->max_valid_pointer) / 2;
376 				*chain = target - bias;
377 			} else {
378 				*chain = target + (uint32_t)hdr->mwli_slide;
379 			}
380 		}
381 		chain += delta;
382 	} while (delta != 0);
383 	return KERN_SUCCESS;
384 }
385 
386 
387 /*
388  * Apply fixups to a page used by a 32 bit process.
389  */
390 static kern_return_t
fixupPage32(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,void * link_info,uint32_t link_info_size,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex)391 fixupPage32(
392 	uint64_t                              userVA,
393 	vm_offset_t                           contents,
394 	vm_offset_t                           end_contents,
395 	void                                  *link_info,
396 	uint32_t                              link_info_size,
397 	struct dyld_chained_starts_in_segment *segInfo,
398 	uint32_t                              pageIndex)
399 {
400 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr  *)link_info;
401 	uint32_t                              *bindsArray = (uint32_t *)((uintptr_t)hdr + hdr->mwli_binds_offset);
402 	uint16_t                              startOffset = segInfo->page_start[pageIndex];
403 
404 	/*
405 	 * done if no fixups
406 	 */
407 	if (startOffset == DYLD_CHAINED_PTR_START_NONE) {
408 		return KERN_SUCCESS;
409 	}
410 
411 	if (startOffset & DYLD_CHAINED_PTR_START_MULTI) {
412 		// some fixups in the page are too far apart, so page has multiple starts
413 		uint32_t overflowIndex = startOffset & ~DYLD_CHAINED_PTR_START_MULTI;
414 		bool chainEnd = false;
415 		while (!chainEnd) {
416 			/*
417 			 * range check against link_info, note +1 to include data we'll dereference
418 			 */
419 			if ((uintptr_t)&segInfo->page_start[overflowIndex + 1] > (uintptr_t)link_info + link_info_size) {
420 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
421 				printf("%s(): out of range segInfo->page_start[overflowIndex]", __func__);
422 				return KERN_FAILURE;
423 			}
424 			chainEnd    = (segInfo->page_start[overflowIndex] & DYLD_CHAINED_PTR_START_LAST);
425 			startOffset = (segInfo->page_start[overflowIndex] & ~DYLD_CHAINED_PTR_START_LAST);
426 			uint32_t *chain = (uint32_t *)(contents + startOffset);
427 			fixupChain32(userVA, chain, contents, end_contents, link_info, segInfo, bindsArray);
428 			++overflowIndex;
429 		}
430 	} else {
431 		uint32_t *chain = (uint32_t *)(contents + startOffset);
432 		fixupChain32(userVA, chain, contents, end_contents, link_info, segInfo, bindsArray);
433 	}
434 	return KERN_SUCCESS;
435 }
436 
437 #if defined(HAS_APPLE_PAC)
438 /*
439  * Sign a pointer needed for fixups.
440  */
441 static kern_return_t
signPointer(uint64_t unsignedAddr,void * loc,bool addrDiv,uint16_t diversity,ptrauth_key key,dyld_pager_t pager,uint64_t * signedAddr)442 signPointer(
443 	uint64_t         unsignedAddr,
444 	void             *loc,
445 	bool             addrDiv,
446 	uint16_t         diversity,
447 	ptrauth_key      key,
448 	dyld_pager_t     pager,
449 	uint64_t         *signedAddr)
450 {
451 	// don't sign NULL
452 	if (unsignedAddr == 0) {
453 		*signedAddr = 0;
454 		return KERN_SUCCESS;
455 	}
456 
457 	uint64_t extendedDiscriminator = diversity;
458 	if (addrDiv) {
459 		extendedDiscriminator = __builtin_ptrauth_blend_discriminator(loc, extendedDiscriminator);
460 	}
461 
462 	switch (key) {
463 	case ptrauth_key_asia:
464 	case ptrauth_key_asda:
465 		if (pager->dyld_a_key == 0 || arm_user_jop_disabled()) {
466 			*signedAddr = unsignedAddr;
467 		} else {
468 			*signedAddr = (uintptr_t)pmap_sign_user_ptr((void *)unsignedAddr, key, extendedDiscriminator, pager->dyld_a_key);
469 		}
470 		break;
471 
472 	default:
473 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_INVALID_AUTH_KEY), (uintptr_t)unsignedAddr);
474 		printf("%s(): Invalid ptr auth key %d\n", __func__, key);
475 		return KERN_FAILURE;
476 	}
477 	return KERN_SUCCESS;
478 }
479 
480 /*
481  * Apply fixups to a page used by a 64 bit process using pointer authentication.
482  */
483 static kern_return_t
fixupPageAuth64(uint64_t userVA,vm_offset_t contents,vm_offset_t end_contents,dyld_pager_t pager,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex,bool offsetBased)484 fixupPageAuth64(
485 	uint64_t                              userVA,
486 	vm_offset_t                           contents,
487 	vm_offset_t                           end_contents,
488 	dyld_pager_t                          pager,
489 	struct dyld_chained_starts_in_segment *segInfo,
490 	uint32_t                              pageIndex,
491 	bool                                  offsetBased)
492 {
493 	void                 *link_info = pager->dyld_link_info;
494 	uint32_t             link_info_size = pager->dyld_link_info_size;
495 	struct mwl_info_hdr  *hdr = (struct mwl_info_hdr *)link_info;
496 	uint64_t             *bindsArray = (uint64_t*)((uintptr_t)link_info + hdr->mwli_binds_offset);
497 
498 	/*
499 	 * range check against link_info, note +1 to include data we'll dereference
500 	 */
501 	if ((uintptr_t)&segInfo->page_start[pageIndex + 1] > (uintptr_t)link_info + link_info_size) {
502 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
503 		printf("%s(): out of range segInfo->page_start[pageIndex]", __func__);
504 		return KERN_FAILURE;
505 	}
506 	uint16_t firstStartOffset = segInfo->page_start[pageIndex];
507 
508 	/*
509 	 * All done if no fixups on the page
510 	 */
511 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
512 		return KERN_SUCCESS;
513 	}
514 
515 	/*
516 	 * Walk the chain of offsets to fix up
517 	 */
518 	uint64_t *chain = (uint64_t *)(contents + firstStartOffset);
519 	uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide);
520 	uint64_t delta = 0;
521 	do {
522 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
523 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
524 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
525 			    (long long)chain, (long long)contents, (long long)end_contents);
526 			return KERN_FAILURE;
527 		}
528 		uint64_t value = *chain;
529 		delta = (value >> 51) & 0x7FF;
530 		bool isAuth = (value & 0x8000000000000000ULL);
531 		bool isBind = (value & 0x4000000000000000ULL);
532 		if (isAuth) {
533 			ptrauth_key key = (ptrauth_key)((value >> 49) & 0x3);
534 			bool        addrDiv = ((value & (1ULL << 48)) != 0);
535 			uint16_t    diversity = (uint16_t)((value >> 32) & 0xFFFF);
536 			uintptr_t   uVA = userVA + ((uintptr_t)chain - contents);
537 			if (isBind) {
538 				uint32_t bindOrdinal = value & 0x00FFFFFF;
539 				if (bindOrdinal >= hdr->mwli_binds_count) {
540 					ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
541 					printf("%s(): out of range bind ordinal %u (max %u)",
542 					    __func__, bindOrdinal, hdr->mwli_binds_count);
543 					return KERN_FAILURE;
544 				}
545 				if (signPointer(bindsArray[bindOrdinal], (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
546 					return KERN_FAILURE;
547 				}
548 			} else {
549 				/* note: in auth rebases only have 32-bits, so target is always offset - never vmaddr */
550 				uint64_t target = (value & 0xFFFFFFFF) + hdr->mwli_image_address;
551 				if (signPointer(target, (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
552 					return KERN_FAILURE;
553 				}
554 			}
555 		} else {
556 			if (isBind) {
557 				uint32_t bindOrdinal = value & 0x00FFFFFF;
558 				if (bindOrdinal >= hdr->mwli_binds_count) {
559 					ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
560 					printf("%s(): out of range bind ordinal %u (max %u)",
561 					    __func__, bindOrdinal, hdr->mwli_binds_count);
562 					return KERN_FAILURE;
563 				} else {
564 					uint64_t addend19 = (value >> 32) & 0x0007FFFF;
565 					if (addend19 & 0x40000) {
566 						addend19 |=  0xFFFFFFFFFFFC0000ULL;
567 					}
568 					*chain = bindsArray[bindOrdinal] + addend19;
569 				}
570 			} else {
571 				uint64_t target = (value & 0x7FFFFFFFFFFULL);
572 				uint64_t high8  = (value << 13) & 0xFF00000000000000ULL;
573 				*chain = target + targetAdjust + high8;
574 			}
575 		}
576 		chain += delta;
577 	} while (delta != 0);
578 	return KERN_SUCCESS;
579 }
580 #endif /* defined(HAS_APPLE_PAC) */
581 
582 
583 /*
584  * Handle dyld fixups for a page.
585  */
586 static kern_return_t
fixup_page(vm_offset_t contents,vm_offset_t end_contents,uint64_t userVA,dyld_pager_t pager)587 fixup_page(
588 	vm_offset_t         contents,
589 	vm_offset_t         end_contents,
590 	uint64_t            userVA,
591 	dyld_pager_t        pager)
592 {
593 	void                                  *link_info = pager->dyld_link_info;
594 	uint32_t                              link_info_size = pager->dyld_link_info_size;
595 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
596 	struct dyld_chained_starts_in_segment *segInfo = NULL;
597 	uint32_t                              pageIndex = 0;
598 	uint32_t                              segIndex;
599 	struct dyld_chained_starts_in_image   *startsInfo;
600 	struct dyld_chained_starts_in_segment *seg;
601 	uint64_t                              segStartAddress;
602 	uint64_t                              segEndAddress;
603 
604 	/*
605 	 * Note this is a linear search done for every page we have to fix up.
606 	 * However, it should be quick as there should only be 2 or 4 segments:
607 	 * - data
608 	 * - data const
609 	 * - data auth (for arm64e)
610 	 * - data const auth (for arm64e)
611 	 */
612 	startsInfo = (struct dyld_chained_starts_in_image *)((uintptr_t)hdr + hdr->mwli_chains_offset);
613 	for (segIndex = 0; segIndex < startsInfo->seg_count; ++segIndex) {
614 		seg = (struct dyld_chained_starts_in_segment *)
615 		    ((uintptr_t)startsInfo + startsInfo->seg_info_offset[segIndex]);
616 
617 		/*
618 		 * ensure we don't go out of bounds of the link_info
619 		 */
620 		if ((uintptr_t)seg + sizeof(*seg) > (uintptr_t)link_info + link_info_size) {
621 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_INFO_OUT_OF_RANGE), (uintptr_t)userVA);
622 			printf("%s(): seg_info out of bounds\n", __func__);
623 			return KERN_FAILURE;
624 		}
625 
626 		segStartAddress = hdr->mwli_image_address + seg->segment_offset;
627 		segEndAddress = segStartAddress + seg->page_count * seg->page_size;
628 		if (segStartAddress <= userVA && userVA < segEndAddress) {
629 			segInfo = seg;
630 			pageIndex = (uint32_t)(userVA - segStartAddress) / PAGE_SIZE;
631 
632 			/* ensure seg->size fits in link_info_size */
633 			if ((uintptr_t)seg + seg->size > (uintptr_t)link_info + link_info_size) {
634 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_SIZE_OUT_OF_RANGE), (uintptr_t)userVA);
635 				printf("%s(): seg->size out of bounds\n", __func__);
636 				return KERN_FAILURE;
637 			}
638 			if (seg->size < sizeof(struct dyld_chained_starts_in_segment)) {
639 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_SIZE_OUT_OF_RANGE), (uintptr_t)userVA);
640 				printf("%s(): seg->size too small\n", __func__);
641 				return KERN_FAILURE;
642 			}
643 			/* ensure page_count and pageIndex are valid too */
644 			if ((uintptr_t)&seg->page_start[seg->page_count] > (uintptr_t)link_info + link_info_size) {
645 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_PAGE_CNT_OUT_OF_RANGE), (uintptr_t)userVA);
646 				printf("%s(): seg->page_count out of bounds\n", __func__);
647 				return KERN_FAILURE;
648 			}
649 			if (pageIndex >= seg->page_count) {
650 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_PAGE_CNT_OUT_OF_RANGE), (uintptr_t)userVA);
651 				printf("%s(): seg->page_count too small\n", __func__);
652 				return KERN_FAILURE;
653 			}
654 
655 			break;
656 		}
657 	}
658 
659 	/*
660 	 * Question for Nick.. or can we make this OK and just return KERN_SUCCESS, nothing to do?
661 	 */
662 	if (segInfo == NULL) {
663 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_SEG_FOR_VA), (uintptr_t)userVA);
664 		printf("%s(): No segment for user VA 0x%llx\n", __func__, (long long)userVA);
665 		return KERN_FAILURE;
666 	}
667 
668 	/*
669 	 * Route to the appropriate fixup routine
670 	 */
671 	switch (hdr->mwli_pointer_format) {
672 #if defined(HAS_APPLE_PAC)
673 	case DYLD_CHAINED_PTR_ARM64E:
674 		fixupPageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex, false);
675 		break;
676 	case DYLD_CHAINED_PTR_ARM64E_USERLAND:
677 	case DYLD_CHAINED_PTR_ARM64E_USERLAND24:
678 		fixupPageAuth64(userVA, contents, end_contents, pager, segInfo, pageIndex, true);
679 		break;
680 #endif /* defined(HAS_APPLE_PAC) */
681 	case DYLD_CHAINED_PTR_64:
682 		fixupPage64(userVA, contents, end_contents, link_info, segInfo, pageIndex, false);
683 		break;
684 	case DYLD_CHAINED_PTR_64_OFFSET:
685 		fixupPage64(userVA, contents, end_contents, link_info, segInfo, pageIndex, true);
686 		break;
687 	case DYLD_CHAINED_PTR_32:
688 		fixupPage32(userVA, contents, end_contents, link_info, link_info_size, segInfo, pageIndex);
689 		break;
690 	default:
691 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BAD_POINTER_FMT), (uintptr_t)userVA);
692 		printf("%s(): unknown pointer_format %d\n", __func__, hdr->mwli_pointer_format);
693 		return KERN_FAILURE;
694 	}
695 	return KERN_SUCCESS;
696 }
697 
698 /*
699  * dyld_pager_data_request()
700  *
701  * Handles page-in requests from VM.
702  */
703 static kern_return_t
dyld_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)704 dyld_pager_data_request(
705 	memory_object_t              mem_obj,
706 	memory_object_offset_t       offset,
707 	memory_object_cluster_size_t length,
708 	__unused vm_prot_t           protection_required,
709 	memory_object_fault_info_t   mo_fault_info)
710 {
711 	dyld_pager_t            pager;
712 	memory_object_control_t mo_control;
713 	upl_t                   upl = NULL;
714 	int                     upl_flags;
715 	upl_size_t              upl_size;
716 	upl_page_info_t         *upl_pl = NULL;
717 	unsigned int            pl_count;
718 	vm_object_t             src_top_object = VM_OBJECT_NULL;
719 	vm_object_t             src_page_object = VM_OBJECT_NULL;
720 	vm_object_t             dst_object;
721 	kern_return_t           kr;
722 	kern_return_t           retval = KERN_SUCCESS;
723 	vm_offset_t             src_vaddr;
724 	vm_offset_t             dst_vaddr;
725 	vm_offset_t             cur_offset;
726 	kern_return_t           error_code;
727 	vm_prot_t               prot;
728 	vm_page_t               src_page, top_page;
729 	int                     interruptible;
730 	struct vm_object_fault_info fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
731 	struct mwl_info_hdr     *hdr;
732 	uint32_t                r;
733 	uint64_t                userVA;
734 
735 	fault_info.stealth = TRUE;
736 	fault_info.io_sync = FALSE;
737 	fault_info.mark_zf_absent = FALSE;
738 	fault_info.batch_pmap_op = FALSE;
739 	interruptible = fault_info.interruptible;
740 
741 	pager = dyld_pager_lookup(mem_obj);
742 	assert(pager->dyld_is_ready);
743 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 1); /* pager is alive */
744 	assert(pager->dyld_is_mapped); /* pager is mapped */
745 	hdr = (struct mwl_info_hdr *)pager->dyld_link_info;
746 
747 	/*
748 	 * Gather in a UPL all the VM pages requested by VM.
749 	 */
750 	mo_control = pager->dyld_header.mo_control;
751 
752 	upl_size = length;
753 	upl_flags =
754 	    UPL_RET_ONLY_ABSENT |
755 	    UPL_SET_LITE |
756 	    UPL_NO_SYNC |
757 	    UPL_CLEAN_IN_PLACE |        /* triggers UPL_CLEAR_DIRTY */
758 	    UPL_SET_INTERNAL;
759 	pl_count = 0;
760 	kr = memory_object_upl_request(mo_control,
761 	    offset, upl_size,
762 	    &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
763 	if (kr != KERN_SUCCESS) {
764 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_UPL), 0 /* arg */);
765 		retval = kr;
766 		goto done;
767 	}
768 	dst_object = memory_object_control_to_vm_object(mo_control);
769 	assert(dst_object != VM_OBJECT_NULL);
770 
771 	/*
772 	 * We'll map the original data in the kernel address space from the
773 	 * backing VM object, itself backed by the executable/library file via
774 	 * the vnode pager.
775 	 */
776 	src_top_object = pager->dyld_backing_object;
777 	assert(src_top_object != VM_OBJECT_NULL);
778 	vm_object_reference(src_top_object); /* keep the source object alive */
779 
780 	/*
781 	 * Fill in the contents of the pages requested by VM.
782 	 */
783 	upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
784 	pl_count = length / PAGE_SIZE;
785 	for (cur_offset = 0;
786 	    retval == KERN_SUCCESS && cur_offset < length;
787 	    cur_offset += PAGE_SIZE) {
788 		ppnum_t dst_pnum;
789 
790 		if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
791 			/* this page is not in the UPL: skip it */
792 			continue;
793 		}
794 
795 		/*
796 		 * Map the source page in the kernel's virtual address space.
797 		 * We already hold a reference on the src_top_object.
798 		 */
799 retry_src_fault:
800 		vm_object_lock(src_top_object);
801 		vm_object_paging_begin(src_top_object);
802 		error_code = 0;
803 		prot = VM_PROT_READ;
804 		src_page = VM_PAGE_NULL;
805 		kr = vm_fault_page(src_top_object,
806 		    offset + cur_offset,
807 		    VM_PROT_READ,
808 		    FALSE,
809 		    FALSE,                /* src_page not looked up */
810 		    &prot,
811 		    &src_page,
812 		    &top_page,
813 		    NULL,
814 		    &error_code,
815 		    FALSE,
816 		    &fault_info);
817 		switch (kr) {
818 		case VM_FAULT_SUCCESS:
819 			break;
820 		case VM_FAULT_RETRY:
821 			goto retry_src_fault;
822 		case VM_FAULT_MEMORY_SHORTAGE:
823 			if (vm_page_wait(interruptible)) {
824 				goto retry_src_fault;
825 			}
826 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_MEMORY_SHORTAGE), 0 /* arg */);
827 			OS_FALLTHROUGH;
828 		case VM_FAULT_INTERRUPTED:
829 			retval = MACH_SEND_INTERRUPTED;
830 			goto done;
831 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
832 			/* success but no VM page: fail */
833 			vm_object_paging_end(src_top_object);
834 			vm_object_unlock(src_top_object);
835 			OS_FALLTHROUGH;
836 		case VM_FAULT_MEMORY_ERROR:
837 			/* the page is not there ! */
838 			if (error_code) {
839 				retval = error_code;
840 			} else {
841 				retval = KERN_MEMORY_ERROR;
842 			}
843 			goto done;
844 		default:
845 			panic("dyld_pager_data_request: vm_fault_page() unexpected error 0x%x\n", kr);
846 		}
847 		assert(src_page != VM_PAGE_NULL);
848 		assert(src_page->vmp_busy);
849 
850 		if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
851 			vm_page_lockspin_queues();
852 			if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
853 				vm_page_speculate(src_page, FALSE);
854 			}
855 			vm_page_unlock_queues();
856 		}
857 
858 		/*
859 		 * Establish pointers to the source and destination physical pages.
860 		 */
861 		dst_pnum = (ppnum_t)upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
862 		assert(dst_pnum != 0);
863 
864 		src_vaddr = (vm_map_offset_t)phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) << PAGE_SHIFT);
865 		dst_vaddr = (vm_map_offset_t)phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
866 		src_page_object = VM_PAGE_OBJECT(src_page);
867 
868 		/*
869 		 * Validate the original page...
870 		 */
871 		if (src_page_object->code_signed) {
872 			vm_page_validate_cs_mapped(src_page, PAGE_SIZE, 0, (const void *)src_vaddr);
873 		}
874 
875 		/*
876 		 * ... and transfer the results to the destination page.
877 		 */
878 		UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_validated);
879 		UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_tainted);
880 		UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_nx);
881 
882 		/*
883 		 * The page provider might access a mapped file, so let's
884 		 * release the object lock for the source page to avoid a
885 		 * potential deadlock.
886 		 * The source page is kept busy and we have a
887 		 * "paging_in_progress" reference on its object, so it's safe
888 		 * to unlock the object here.
889 		 */
890 		assert(src_page->vmp_busy);
891 		assert(src_page_object->paging_in_progress > 0);
892 		vm_object_unlock(src_page_object);
893 
894 		/*
895 		 * Process the original contents of the source page
896 		 * into the destination page.
897 		 */
898 		bcopy((const char *)src_vaddr, (char *)dst_vaddr, PAGE_SIZE);
899 
900 		/*
901 		 * Figure out what the original user virtual address was, based on the offset.
902 		 */
903 		userVA = 0;
904 		for (r = 0; r < pager->dyld_num_range; ++r) {
905 			vm_offset_t o = offset + cur_offset;
906 			if (pager->dyld_file_offset[r] <= o &&
907 			    o < pager->dyld_file_offset[r] + pager->dyld_size[r]) {
908 				userVA = pager->dyld_address[r] + (o - pager->dyld_file_offset[r]);
909 				break;
910 			}
911 		}
912 
913 		/*
914 		 * If we have a valid range fixup the page.
915 		 */
916 		if (r == pager->dyld_num_range) {
917 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_RANGE_NOT_FOUND), (uintptr_t)userVA);
918 			printf("%s(): Range not found for offset 0x%llx\n", __func__, (long long)cur_offset);
919 			retval = KERN_FAILURE;
920 		} else if (fixup_page(dst_vaddr, dst_vaddr + PAGE_SIZE, userVA, pager) != KERN_SUCCESS) {
921 			/* KDBG / printf was done under fixup_page() */
922 			retval = KERN_FAILURE;
923 		}
924 		if (retval != KERN_SUCCESS) {
925 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SLIDE_ERROR), userVA);
926 		}
927 
928 		assert(VM_PAGE_OBJECT(src_page) == src_page_object);
929 		assert(src_page->vmp_busy);
930 		assert(src_page_object->paging_in_progress > 0);
931 		vm_object_lock(src_page_object);
932 
933 		/*
934 		 * Cleanup the result of vm_fault_page() of the source page.
935 		 */
936 		PAGE_WAKEUP_DONE(src_page);
937 		src_page = VM_PAGE_NULL;
938 		vm_object_paging_end(src_page_object);
939 		vm_object_unlock(src_page_object);
940 
941 		if (top_page != VM_PAGE_NULL) {
942 			assert(VM_PAGE_OBJECT(top_page) == src_top_object);
943 			vm_object_lock(src_top_object);
944 			VM_PAGE_FREE(top_page);
945 			vm_object_paging_end(src_top_object);
946 			vm_object_unlock(src_top_object);
947 		}
948 	}
949 
950 done:
951 	if (upl != NULL) {
952 		/* clean up the UPL */
953 
954 		/*
955 		 * The pages are currently dirty because we've just been
956 		 * writing on them, but as far as we're concerned, they're
957 		 * clean since they contain their "original" contents as
958 		 * provided by us, the pager.
959 		 * Tell the UPL to mark them "clean".
960 		 */
961 		upl_clear_dirty(upl, TRUE);
962 
963 		/* abort or commit the UPL */
964 		if (retval != KERN_SUCCESS) {
965 			upl_abort(upl, 0);
966 		} else {
967 			boolean_t empty;
968 			assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
969 			    "upl %p offset 0x%llx size 0x%x\n",
970 			    upl, upl->u_offset, upl->u_size);
971 			upl_commit_range(upl, 0, upl->u_size,
972 			    UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
973 			    upl_pl, pl_count, &empty);
974 		}
975 
976 		/* and deallocate the UPL */
977 		upl_deallocate(upl);
978 		upl = NULL;
979 	}
980 	if (src_top_object != VM_OBJECT_NULL) {
981 		vm_object_deallocate(src_top_object);
982 	}
983 	return retval;
984 }
985 
986 /*
987  * dyld_pager_reference()
988  *
989  * Get a reference on this memory object.
990  * For external usage only.  Assumes that the initial reference count is not 0,
991  * i.e one should not "revive" a dead pager this way.
992  */
993 static void
dyld_pager_reference(memory_object_t mem_obj)994 dyld_pager_reference(
995 	memory_object_t mem_obj)
996 {
997 	dyld_pager_t    pager;
998 
999 	pager = dyld_pager_lookup(mem_obj);
1000 
1001 	lck_mtx_lock(&dyld_pager_lock);
1002 	os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL);
1003 	lck_mtx_unlock(&dyld_pager_lock);
1004 }
1005 
1006 
1007 
1008 /*
1009  * dyld_pager_terminate_internal:
1010  *
1011  * Trigger the asynchronous termination of the memory object associated
1012  * with this pager.
1013  * When the memory object is terminated, there will be one more call
1014  * to memory_object_deallocate() (i.e. dyld_pager_deallocate())
1015  * to finish the clean up.
1016  *
1017  * "dyld_pager_lock" should not be held by the caller.
1018  */
1019 static void
dyld_pager_terminate_internal(dyld_pager_t pager)1020 dyld_pager_terminate_internal(
1021 	dyld_pager_t pager)
1022 {
1023 	assert(pager->dyld_is_ready);
1024 	assert(!pager->dyld_is_mapped);
1025 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) == 1);
1026 
1027 	if (pager->dyld_backing_object != VM_OBJECT_NULL) {
1028 		vm_object_deallocate(pager->dyld_backing_object);
1029 		pager->dyld_backing_object = VM_OBJECT_NULL;
1030 	}
1031 	/* trigger the destruction of the memory object */
1032 	memory_object_destroy(pager->dyld_header.mo_control, 0);
1033 }
1034 
1035 /*
1036  * dyld_pager_deallocate_internal()
1037  *
1038  * Release a reference on this pager and free it when the last reference goes away.
1039  * Can be called with dyld_pager_lock held or not, but always returns
1040  * with it unlocked.
1041  */
1042 static void
dyld_pager_deallocate_internal(dyld_pager_t pager,bool locked)1043 dyld_pager_deallocate_internal(
1044 	dyld_pager_t   pager,
1045 	bool           locked)
1046 {
1047 	os_ref_count_t ref_count;
1048 
1049 	if (!locked) {
1050 		lck_mtx_lock(&dyld_pager_lock);
1051 	}
1052 
1053 	/* drop a reference on this pager */
1054 	ref_count = os_ref_release_locked_raw(&pager->dyld_ref_count, NULL);
1055 
1056 	if (ref_count == 1) {
1057 		/*
1058 		 * Only this reference is left, which means that
1059 		 * no one is really holding on to this pager anymore.
1060 		 * Terminate it.
1061 		 */
1062 		dyld_pager_dequeue(pager);
1063 		/* the pager is all ours: no need for the lock now */
1064 		lck_mtx_unlock(&dyld_pager_lock);
1065 		dyld_pager_terminate_internal(pager);
1066 	} else if (ref_count == 0) {
1067 		/*
1068 		 * Dropped all references;  the memory object has
1069 		 * been terminated.  Do some final cleanup and release the
1070 		 * pager structure.
1071 		 */
1072 		lck_mtx_unlock(&dyld_pager_lock);
1073 
1074 		kfree_data(pager->dyld_link_info, pager->dyld_link_info_size);
1075 		pager->dyld_link_info = NULL;
1076 
1077 		if (pager->dyld_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
1078 			memory_object_control_deallocate(pager->dyld_header.mo_control);
1079 			pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1080 		}
1081 		kfree_type(struct dyld_pager, pager);
1082 		pager = NULL;
1083 	} else {
1084 		/* there are still plenty of references:  keep going... */
1085 		lck_mtx_unlock(&dyld_pager_lock);
1086 	}
1087 
1088 	/* caution: lock is not held on return... */
1089 }
1090 
1091 /*
1092  * dyld_pager_deallocate()
1093  *
1094  * Release a reference on this pager and free it when the last
1095  * reference goes away.
1096  */
1097 static void
dyld_pager_deallocate(memory_object_t mem_obj)1098 dyld_pager_deallocate(
1099 	memory_object_t mem_obj)
1100 {
1101 	dyld_pager_t    pager;
1102 
1103 	pager = dyld_pager_lookup(mem_obj);
1104 	dyld_pager_deallocate_internal(pager, FALSE);
1105 }
1106 
1107 /*
1108  *
1109  */
1110 static kern_return_t
dyld_pager_terminate(__unused memory_object_t mem_obj)1111 dyld_pager_terminate(
1112 #if !DEBUG
1113 	__unused
1114 #endif
1115 	memory_object_t mem_obj)
1116 {
1117 	return KERN_SUCCESS;
1118 }
1119 
1120 /*
1121  * dyld_pager_map()
1122  *
1123  * This allows VM to let us, the EMM, know that this memory object
1124  * is currently mapped one or more times.  This is called by VM each time
1125  * the memory object gets mapped, but we only take one extra reference the
1126  * first time it is called.
1127  */
1128 static kern_return_t
dyld_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)1129 dyld_pager_map(
1130 	memory_object_t         mem_obj,
1131 	__unused vm_prot_t      prot)
1132 {
1133 	dyld_pager_t   pager;
1134 
1135 	pager = dyld_pager_lookup(mem_obj);
1136 
1137 	lck_mtx_lock(&dyld_pager_lock);
1138 	assert(pager->dyld_is_ready);
1139 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0); /* pager is alive */
1140 	if (!pager->dyld_is_mapped) {
1141 		pager->dyld_is_mapped = TRUE;
1142 		os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL);
1143 	}
1144 	lck_mtx_unlock(&dyld_pager_lock);
1145 
1146 	return KERN_SUCCESS;
1147 }
1148 
1149 /*
1150  * dyld_pager_last_unmap()
1151  *
1152  * This is called by VM when this memory object is no longer mapped anywhere.
1153  */
1154 static kern_return_t
dyld_pager_last_unmap(memory_object_t mem_obj)1155 dyld_pager_last_unmap(
1156 	memory_object_t mem_obj)
1157 {
1158 	dyld_pager_t    pager;
1159 
1160 	pager = dyld_pager_lookup(mem_obj);
1161 
1162 	lck_mtx_lock(&dyld_pager_lock);
1163 	if (pager->dyld_is_mapped) {
1164 		/*
1165 		 * All the mappings are gone, so let go of the one extra
1166 		 * reference that represents all the mappings of this pager.
1167 		 */
1168 		pager->dyld_is_mapped = FALSE;
1169 		dyld_pager_deallocate_internal(pager, TRUE);
1170 		/* caution: deallocate_internal() released the lock ! */
1171 	} else {
1172 		lck_mtx_unlock(&dyld_pager_lock);
1173 	}
1174 
1175 	return KERN_SUCCESS;
1176 }
1177 
1178 static boolean_t
dyld_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)1179 dyld_pager_backing_object(
1180 	memory_object_t         mem_obj,
1181 	memory_object_offset_t  offset,
1182 	vm_object_t             *backing_object,
1183 	vm_object_offset_t      *backing_offset)
1184 {
1185 	dyld_pager_t   pager;
1186 
1187 	pager = dyld_pager_lookup(mem_obj);
1188 
1189 	*backing_object = pager->dyld_backing_object;
1190 	*backing_offset = offset;
1191 
1192 	return TRUE;
1193 }
1194 
1195 
1196 /*
1197  * Convert from memory_object to dyld_pager.
1198  */
1199 static dyld_pager_t
dyld_pager_lookup(memory_object_t mem_obj)1200 dyld_pager_lookup(
1201 	memory_object_t  mem_obj)
1202 {
1203 	dyld_pager_t   pager;
1204 
1205 	assert(mem_obj->mo_pager_ops == &dyld_pager_ops);
1206 	pager = (dyld_pager_t)(uintptr_t) mem_obj;
1207 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0);
1208 	return pager;
1209 }
1210 
1211 /*
1212  * Create and return a pager for the given object with the
1213  * given slide information.
1214  */
1215 static dyld_pager_t
dyld_pager_create(__unused task_t task,vm_object_t backing_object,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size)1216 dyld_pager_create(
1217 #if !defined(HAS_APPLE_PAC)
1218 	__unused
1219 #endif /* defined(HAS_APPLE_PAC) */
1220 	task_t            task,
1221 	vm_object_t       backing_object,
1222 	struct mwl_region *regions,
1223 	uint32_t          region_cnt,
1224 	void              *link_info,
1225 	uint32_t          link_info_size)
1226 {
1227 	dyld_pager_t            pager;
1228 	memory_object_control_t control;
1229 	kern_return_t           kr;
1230 
1231 	pager = kalloc_type(struct dyld_pager, Z_WAITOK);
1232 	if (pager == NULL) {
1233 		return NULL;
1234 	}
1235 
1236 	/*
1237 	 * The vm_map call takes both named entry ports and raw memory
1238 	 * objects in the same parameter.  We need to make sure that
1239 	 * vm_map does not see this object as a named entry port.  So,
1240 	 * we reserve the first word in the object for a fake ip_kotype
1241 	 * setting - that will tell vm_map to use it as a memory object.
1242 	 */
1243 	pager->dyld_header.mo_ikot = IKOT_MEMORY_OBJECT;
1244 	pager->dyld_header.mo_pager_ops = &dyld_pager_ops;
1245 	pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1246 
1247 	pager->dyld_is_ready = FALSE;/* not ready until it has a "name" */
1248 	/* existence reference for the caller */
1249 	os_ref_init_count_raw(&pager->dyld_ref_count, NULL, 1);
1250 	pager->dyld_is_mapped = FALSE;
1251 	pager->dyld_backing_object = backing_object;
1252 	pager->dyld_link_info = link_info;
1253 	pager->dyld_link_info_size = link_info_size;
1254 #if defined(HAS_APPLE_PAC)
1255 	pager->dyld_a_key = (task->map && task->map->pmap && !task->map->pmap->disable_jop) ? task->jop_pid : 0;
1256 #endif /* defined(HAS_APPLE_PAC) */
1257 
1258 	/*
1259 	 * Record the regions so the pager can find the offset from an address.
1260 	 */
1261 	pager->dyld_num_range = region_cnt;
1262 	for (uint32_t r = 0; r < region_cnt; ++r) {
1263 		pager->dyld_file_offset[r] = regions[r].mwlr_file_offset;
1264 		pager->dyld_address[r] = regions[r].mwlr_address;
1265 		pager->dyld_size[r] = regions[r].mwlr_size;
1266 	}
1267 
1268 	vm_object_reference(backing_object);
1269 	lck_mtx_lock(&dyld_pager_lock);
1270 	queue_enter_first(&dyld_pager_queue,
1271 	    pager,
1272 	    dyld_pager_t,
1273 	    dyld_pager_queue);
1274 	dyld_pager_count++;
1275 	if (dyld_pager_count > dyld_pager_count_max) {
1276 		dyld_pager_count_max = dyld_pager_count;
1277 	}
1278 	lck_mtx_unlock(&dyld_pager_lock);
1279 
1280 	kr = memory_object_create_named((memory_object_t) pager, 0, &control);
1281 	assert(kr == KERN_SUCCESS);
1282 
1283 	memory_object_mark_trusted(control);
1284 
1285 	lck_mtx_lock(&dyld_pager_lock);
1286 	/* the new pager is now ready to be used */
1287 	pager->dyld_is_ready = TRUE;
1288 	lck_mtx_unlock(&dyld_pager_lock);
1289 
1290 	/* wakeup anyone waiting for this pager to be ready */
1291 	thread_wakeup(&pager->dyld_is_ready);
1292 
1293 	return pager;
1294 }
1295 
1296 /*
1297  * dyld_pager_setup()
1298  *
1299  * Provide the caller with a memory object backed by the provided
1300  * "backing_object" VM object.
1301  */
1302 static memory_object_t
dyld_pager_setup(task_t task,vm_object_t backing_object,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size)1303 dyld_pager_setup(
1304 	task_t            task,
1305 	vm_object_t       backing_object,
1306 	struct mwl_region *regions,
1307 	uint32_t          region_cnt,
1308 	void              *link_info,
1309 	uint32_t          link_info_size)
1310 {
1311 	dyld_pager_t      pager;
1312 
1313 	/* create new pager */
1314 	pager = dyld_pager_create(task, backing_object, regions, region_cnt, link_info, link_info_size);
1315 	if (pager == NULL) {
1316 		/* could not create a new pager */
1317 		return MEMORY_OBJECT_NULL;
1318 	}
1319 
1320 	lck_mtx_lock(&dyld_pager_lock);
1321 	while (!pager->dyld_is_ready) {
1322 		lck_mtx_sleep(&dyld_pager_lock,
1323 		    LCK_SLEEP_DEFAULT,
1324 		    &pager->dyld_is_ready,
1325 		    THREAD_UNINT);
1326 	}
1327 	lck_mtx_unlock(&dyld_pager_lock);
1328 
1329 	return (memory_object_t) pager;
1330 }
1331 
1332 /*
1333  * Set up regions which use a special pager to apply dyld fixups.
1334  *
1335  * The arguments to this function are mostly just used as input.
1336  * Except for the link_info! That is saved off in the pager that
1337  * gets created, so shouldn't be free'd by the caller, if KERN_SUCCES.
1338  */
1339 kern_return_t
vm_map_with_linking(task_t task,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size,memory_object_control_t file_control)1340 vm_map_with_linking(
1341 	task_t                  task,
1342 	struct mwl_region       *regions,
1343 	uint32_t                region_cnt,
1344 	void                    *link_info,
1345 	uint32_t                link_info_size,
1346 	memory_object_control_t file_control)
1347 {
1348 	vm_map_t                map = task->map;
1349 	vm_object_t             object = VM_OBJECT_NULL;
1350 	memory_object_t         pager = MEMORY_OBJECT_NULL;
1351 	uint32_t                r;
1352 	vm_map_address_t        map_addr;
1353 	kern_return_t           kr = KERN_SUCCESS;
1354 
1355 	object = memory_object_control_to_vm_object(file_control);
1356 	if (object == VM_OBJECT_NULL || object->internal) {
1357 		printf("%s no object for file_control\n", __func__);
1358 		object = VM_OBJECT_NULL;
1359 		kr = KERN_INVALID_ADDRESS;
1360 		goto done;
1361 	}
1362 
1363 	/* create a pager */
1364 	pager = dyld_pager_setup(task, object, regions, region_cnt, link_info, link_info_size);
1365 	if (pager == MEMORY_OBJECT_NULL) {
1366 		kr = KERN_RESOURCE_SHORTAGE;
1367 		goto done;
1368 	}
1369 
1370 	for (r = 0; r < region_cnt; ++r) {
1371 		vm_map_kernel_flags_t vmk_flags = {
1372 			.vmf_fixed = true,
1373 			.vmf_overwrite = true,
1374 			.vmkf_overwrite_immutable = true,
1375 		};
1376 		struct mwl_region *rp = &regions[r];
1377 
1378 		/* map that pager over the portion of the mapping that needs sliding */
1379 		map_addr = (vm_map_address_t)rp->mwlr_address;
1380 
1381 		if (rp->mwlr_protections & VM_PROT_TPRO) {
1382 			vmk_flags.vmf_tpro = TRUE;
1383 		}
1384 
1385 		kr = vm_map_enter_mem_object(map,
1386 		    &map_addr,
1387 		    rp->mwlr_size,
1388 		    (mach_vm_offset_t) 0,
1389 		    vmk_flags,
1390 		    (ipc_port_t)(uintptr_t)pager,
1391 		    rp->mwlr_file_offset,
1392 		    TRUE,       /* copy == TRUE, as this is MAP_PRIVATE so COW may happen */
1393 		    rp->mwlr_protections & VM_PROT_DEFAULT,
1394 		    rp->mwlr_protections & VM_PROT_DEFAULT,
1395 		    VM_INHERIT_DEFAULT);
1396 		if (kr != KERN_SUCCESS) {
1397 			/* no need to clean up earlier regions, this will be process fatal */
1398 			goto done;
1399 		}
1400 	}
1401 
1402 	/* success! */
1403 	kr = KERN_SUCCESS;
1404 
1405 done:
1406 
1407 	if (pager != MEMORY_OBJECT_NULL) {
1408 		/*
1409 		 * Release the pager reference obtained by dyld_pager_setup().
1410 		 * The mapping, if it succeeded, is now holding a reference on the memory object.
1411 		 */
1412 		memory_object_deallocate(pager);
1413 		pager = MEMORY_OBJECT_NULL;
1414 	}
1415 	return kr;
1416 }
1417 
1418 static uint64_t
dyld_pager_purge(dyld_pager_t pager)1419 dyld_pager_purge(
1420 	dyld_pager_t pager)
1421 {
1422 	uint64_t pages_purged;
1423 	vm_object_t object;
1424 
1425 	pages_purged = 0;
1426 	object = memory_object_to_vm_object((memory_object_t) pager);
1427 	assert(object != VM_OBJECT_NULL);
1428 	vm_object_lock(object);
1429 	pages_purged = object->resident_page_count;
1430 	vm_object_reap_pages(object, REAP_DATA_FLUSH);
1431 	pages_purged -= object->resident_page_count;
1432 //	printf("     %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count);
1433 	vm_object_unlock(object);
1434 	return pages_purged;
1435 }
1436 
1437 uint64_t
dyld_pager_purge_all(void)1438 dyld_pager_purge_all(void)
1439 {
1440 	uint64_t pages_purged;
1441 	dyld_pager_t pager;
1442 
1443 	pages_purged = 0;
1444 	lck_mtx_lock(&dyld_pager_lock);
1445 	queue_iterate(&dyld_pager_queue, pager, dyld_pager_t, dyld_pager_queue) {
1446 		pages_purged += dyld_pager_purge(pager);
1447 	}
1448 	lck_mtx_unlock(&dyld_pager_lock);
1449 #if DEVELOPMENT || DEBUG
1450 	printf("   %s:%d pages purged: %llu\n", __FUNCTION__, __LINE__, pages_purged);
1451 #endif /* DEVELOPMENT || DEBUG */
1452 	return pages_purged;
1453 }
1454