xref: /xnu-12377.1.9/osfmk/vm/vm_dyld_pager.c (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42 
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/thread.h>
46 #include <kern/ipc_kobject.h>
47 
48 #include <vm/memory_object_internal.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_fault_internal.h>
51 #include <vm/vm_map_xnu.h>
52 #include <vm/vm_pageout_xnu.h>
53 #include <vm/vm_protos_internal.h>
54 #include <vm/vm_dyld_pager_internal.h>
55 #include <vm/vm_ubc.h>
56 #include <vm/vm_page_internal.h>
57 #include <vm/vm_object_internal.h>
58 #include <vm/vm_sanitize_internal.h>
59 
60 #include <sys/kdebug_triage.h>
61 #include <mach-o/fixup-chains.h>
62 #if defined(HAS_APPLE_PAC)
63 #include <ptrauth.h>
64 #include <arm/misc_protos.h>
65 #endif /* defined(HAS_APPLE_PAC) */
66 
67 
68 /* For speculation macros */
69 #if __arm64__
70 #include <arm64/speculation.h>
71 #endif /* #if __arm64__ */
72 
73 extern int proc_selfpid(void);
74 extern char *proc_name_address(struct proc *p);
75 
76 extern int panic_on_dyld_issue;
77 
78 /*
79  * DYLD page in linking pager.
80  *
81  * This external memory manager (EMM) applies dyld fixup to data
82  * pages, allowing the modified page to appear "clean".
83  *
84  * The modified pages will never be dirtied, so the memory manager doesn't
85  * need to handle page-out requests (from memory_object_data_return()).  The
86  * pages are mapped copy-on-write, so that the originals stay clean.
87  */
88 
89 /* forward declarations */
90 typedef struct dyld_pager *dyld_pager_t;
91 static void dyld_pager_reference(memory_object_t mem_obj);
92 static void dyld_pager_deallocate(memory_object_t mem_obj);
93 static void dyld_pager_deallocate_internal(dyld_pager_t pager, bool locked);
94 static kern_return_t dyld_pager_init(memory_object_t mem_obj,
95     memory_object_control_t control,
96     memory_object_cluster_size_t pg_size);
97 static kern_return_t dyld_pager_terminate(memory_object_t mem_obj);
98 static void dyld_pager_terminate_internal(dyld_pager_t pager);
99 static kern_return_t dyld_pager_data_request(memory_object_t mem_obj,
100     memory_object_offset_t offset,
101     memory_object_cluster_size_t length,
102     vm_prot_t protection_required,
103     memory_object_fault_info_t fault_info);
104 static kern_return_t dyld_pager_data_return(memory_object_t mem_obj,
105     memory_object_offset_t offset,
106     memory_object_cluster_size_t      data_cnt,
107     memory_object_offset_t *resid_offset,
108     int *io_error,
109     boolean_t dirty,
110     boolean_t kernel_copy,
111     int upl_flags);
112 static kern_return_t dyld_pager_data_initialize(memory_object_t mem_obj,
113     memory_object_offset_t offset,
114     memory_object_cluster_size_t data_cnt);
115 static kern_return_t dyld_pager_map(memory_object_t mem_obj,
116     vm_prot_t prot);
117 static kern_return_t dyld_pager_last_unmap(memory_object_t mem_obj);
118 static boolean_t dyld_pager_backing_object(
119 	memory_object_t mem_obj,
120 	memory_object_offset_t mem_obj_offset,
121 	vm_object_t *backing_object,
122 	vm_object_offset_t *backing_offset);
123 static dyld_pager_t dyld_pager_lookup(memory_object_t  mem_obj);
124 
125 /*
126  * Vector of VM operations for this EMM.
127  * These routines are invoked by VM via the memory_object_*() interfaces.
128  */
129 const struct memory_object_pager_ops dyld_pager_ops = {
130 	.memory_object_reference = dyld_pager_reference,
131 	.memory_object_deallocate = dyld_pager_deallocate,
132 	.memory_object_init = dyld_pager_init,
133 	.memory_object_terminate = dyld_pager_terminate,
134 	.memory_object_data_request = dyld_pager_data_request,
135 	.memory_object_data_return = dyld_pager_data_return,
136 	.memory_object_data_initialize = dyld_pager_data_initialize,
137 	.memory_object_map = dyld_pager_map,
138 	.memory_object_last_unmap = dyld_pager_last_unmap,
139 	.memory_object_backing_object = dyld_pager_backing_object,
140 	.memory_object_pager_name = "dyld"
141 };
142 
143 /* funciton that calculates delta pointer that remains within the same page by using nospec ISA */
144 static inline bool
_delta_ptr_within_page_nospec(uint64_t ** __nonnull ptr,uint64_t deltaByteCount,bool * crossing_page,uintptr_t userVA)145 _delta_ptr_within_page_nospec(uint64_t ** __nonnull ptr, uint64_t deltaByteCount, bool *crossing_page, uintptr_t userVA)
146 {
147 	uintptr_t old_page = (uintptr_t)*ptr >> PAGE_SHIFT;
148 	uintptr_t new_page = ((uintptr_t)*ptr + deltaByteCount) >> PAGE_SHIFT;
149 	uint64_t  nospec_delta = deltaByteCount;
150 	uintptr_t page_offset = (uintptr_t)*ptr & PAGE_MASK;
151 #if __arm64__
152 	bool      nospec_delta_valid = false;
153 	SPECULATION_GUARD_ZEROING_XXX(
154 		/* out */ nospec_delta, /* out_valid */ nospec_delta_valid,
155 		/* value */ nospec_delta,
156 		/* cmp1 */ old_page, /* cmp2 */ new_page,
157 		/* cc */ "EQ");
158 #elif __i386__ || __x86_64__
159 	if (old_page == new_page) {
160 		nospec_delta = deltaByteCount;
161 	} else {
162 		nospec_delta = 0;
163 	}
164 	// MAYBE: lfence here
165 #endif /* __arm64__ */
166 	*ptr = (uint64_t*)((uintptr_t)*ptr + nospec_delta);
167 	*crossing_page = nospec_delta != deltaByteCount;
168 	if (*crossing_page) {
169 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_DELTA_TOO_LARGE), (uintptr_t)userVA);
170 		printf("%s(): fixup chain delta crossing to the next page [{%p} + {%lld}]\n", __func__, (void*)(userVA + page_offset), deltaByteCount);
171 		if (panic_on_dyld_issue) {
172 			panic("%s(): delta offset > page size %lld", __func__, deltaByteCount);
173 		}
174 	}
175 
176 	if (nospec_delta != 0) {
177 		return true;
178 	} else {
179 		return false;
180 	}
181 }
182 
183 static inline bool
_delta_ptr_within_page32_nospec(uint32_t ** __nonnull ptr,uint32_t deltaByteCount,bool * crossing_page,uintptr_t userVA)184 _delta_ptr_within_page32_nospec(uint32_t ** __nonnull ptr, uint32_t deltaByteCount, bool *crossing_page, uintptr_t userVA)
185 {
186 	uintptr_t old_page = (uintptr_t)*ptr >> PAGE_SHIFT;
187 	uintptr_t new_page = ((uintptr_t)*ptr + deltaByteCount) >> PAGE_SHIFT;
188 	uintptr_t page_offset = (uintptr_t)*ptr & PAGE_MASK;
189 	uint64_t  nospec_delta = deltaByteCount;
190 #if __arm64__
191 	bool      nospec_delta_valid = false;
192 	SPECULATION_GUARD_ZEROING_XXX(
193 		/* out */ nospec_delta, /* out_valid */ nospec_delta_valid,
194 		/* value */ nospec_delta,
195 		/* cmp1 */ old_page, /* cmp2 */ new_page,
196 		/* cc */ "EQ");
197 #elif __i386__ || __x86_64__
198 	if (old_page == new_page) {
199 		nospec_delta = deltaByteCount;
200 	} else {
201 		nospec_delta = 0;
202 	}
203 	// MAYBE: lfence here
204 #endif /* __arm64__ */
205 	*ptr = (uint32_t*)((uintptr_t)*ptr + nospec_delta);
206 	*crossing_page = nospec_delta != deltaByteCount;
207 	if (*crossing_page) {
208 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_DELTA_TOO_LARGE), (uintptr_t)userVA);
209 		printf("%s(): fixup chain delta crossing to the next page [{%p} + {%d}]\n", __func__, (void*)(userVA + page_offset), deltaByteCount);
210 		if (panic_on_dyld_issue) {
211 			panic("%s(): delta offset > page size %d", __func__, deltaByteCount);
212 		}
213 	}
214 	if (nospec_delta != 0) {
215 		return true;
216 	} else {
217 		return false;
218 	}
219 }
220 
221 /*
222  * The "dyld_pager" structure. We create one of these for each use of
223  * map_with_linking_np() that dyld uses.
224  */
225 struct dyld_pager {
226 	struct memory_object    dyld_header;          /* mandatory generic header */
227 
228 #if MEMORY_OBJECT_HAS_REFCOUNT
229 #define dyld_ref_count           dyld_header.mo_ref
230 #else
231 	os_ref_atomic_t         dyld_ref_count;      /* active uses */
232 #endif
233 	queue_chain_t           dyld_pager_queue;    /* next & prev pagers */
234 	bool                    dyld_is_mapped;      /* has active mappings */
235 	bool                    dyld_is_ready;       /* is this pager ready? */
236 	vm_object_t             dyld_backing_object; /* VM object for shared cache */
237 	void                    *dyld_link_info;
238 	uint32_t                dyld_link_info_size;
239 	uint32_t                dyld_num_range;
240 	memory_object_offset_t  dyld_file_offset[MWL_MAX_REGION_COUNT];
241 	mach_vm_address_t       dyld_address[MWL_MAX_REGION_COUNT];
242 	mach_vm_size_t          dyld_size[MWL_MAX_REGION_COUNT];
243 #if defined(HAS_APPLE_PAC)
244 	uint64_t                dyld_a_key;
245 #endif /* defined(HAS_APPLE_PAC) */
246 };
247 
248 queue_head_t dyld_pager_queue = QUEUE_HEAD_INITIALIZER(dyld_pager_queue);
249 
250 /*
251  * "dyld_pager_lock" for counters, ref counting, etc.
252  */
253 LCK_GRP_DECLARE(dyld_pager_lck_grp, "dyld_pager");
254 LCK_MTX_DECLARE(dyld_pager_lock, &dyld_pager_lck_grp);
255 
256 /*
257  * Statistics & counters.
258  */
259 uint32_t dyld_pager_count = 0;
260 uint32_t dyld_pager_count_max = 0;
261 
262 /*
263  * dyld_pager_dequeue()
264  *
265  * Removes a pager from the list of pagers.
266  *
267  * The caller must hold "dyld_pager".
268  */
269 static void
dyld_pager_dequeue(__unused dyld_pager_t pager)270 dyld_pager_dequeue(
271 	__unused dyld_pager_t pager)
272 {
273 	queue_remove(&dyld_pager_queue,
274 	    pager,
275 	    dyld_pager_t,
276 	    dyld_pager_queue);
277 	pager->dyld_pager_queue.next = NULL;
278 	pager->dyld_pager_queue.prev = NULL;
279 	dyld_pager_count--;
280 }
281 
282 /*
283  * dyld_pager_init()
284  *
285  * Initialize the memory object and makes it ready to be used and mapped.
286  */
287 static kern_return_t
dyld_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)288 dyld_pager_init(
289 	memory_object_t                 mem_obj,
290 	memory_object_control_t         control,
291 	__unused
292 	memory_object_cluster_size_t    pg_size)
293 {
294 	dyld_pager_t                    pager;
295 	kern_return_t                   kr;
296 	memory_object_attr_info_data_t  attributes;
297 
298 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
299 		printf("%s(): control NULL\n", __func__);
300 		return KERN_INVALID_ARGUMENT;
301 	}
302 
303 	pager = dyld_pager_lookup(mem_obj);
304 
305 	memory_object_control_reference(control);
306 
307 	pager->dyld_header.mo_control = control;
308 
309 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
310 	attributes.cluster_size = (1 << (PAGE_SHIFT));
311 	attributes.may_cache_object = FALSE;
312 	attributes.temporary = TRUE;
313 
314 	kr = memory_object_change_attributes(
315 		control,
316 		MEMORY_OBJECT_ATTRIBUTE_INFO,
317 		(memory_object_info_t) &attributes,
318 		MEMORY_OBJECT_ATTR_INFO_COUNT);
319 	if (kr != KERN_SUCCESS) {
320 		panic("dyld_pager_init: " "memory_object_change_attributes() failed");
321 	}
322 
323 	return KERN_SUCCESS;
324 }
325 
326 /*
327  * dyld_data_return()
328  *
329  * A page-out request from VM -- should never happen so panic.
330  */
331 static kern_return_t
dyld_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)332 dyld_pager_data_return(
333 	__unused memory_object_t        mem_obj,
334 	__unused memory_object_offset_t offset,
335 	__unused memory_object_cluster_size_t data_cnt,
336 	__unused memory_object_offset_t *resid_offset,
337 	__unused int                    *io_error,
338 	__unused boolean_t              dirty,
339 	__unused boolean_t              kernel_copy,
340 	__unused int                    upl_flags)
341 {
342 	panic("dyld_pager_data_return: should never happen!");
343 	return KERN_FAILURE;
344 }
345 
346 static kern_return_t
dyld_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)347 dyld_pager_data_initialize(
348 	__unused memory_object_t        mem_obj,
349 	__unused memory_object_offset_t offset,
350 	__unused memory_object_cluster_size_t data_cnt)
351 {
352 	panic("dyld_pager_data_initialize: should never happen");
353 	return KERN_FAILURE;
354 }
355 
356 
357 /*
358  * Apply fixups to a page used by a 64 bit process.
359  */
360 static kern_return_t
fixupPage64(uint64_t userVA,vm_offset_t contents,void * link_info,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex,bool offsetBased)361 fixupPage64(
362 	uint64_t                              userVA,
363 	vm_offset_t                           contents,
364 	void                                  *link_info,
365 	struct dyld_chained_starts_in_segment *segInfo,
366 	uint32_t                              pageIndex,
367 	bool                                  offsetBased)
368 {
369 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
370 	uint64_t                              *bindsArray  = (uint64_t *)((uintptr_t)hdr + hdr->mwli_binds_offset);
371 	uint16_t                              firstStartOffset = segInfo->page_start[pageIndex];
372 	vm_offset_t                           end_contents = contents + PAGE_SIZE;
373 	//  For DYLD_CHAINED_PTR_64 (arm64 and x86_64) and DYLD_CHAINED_PTR_32 (arm64_32) the stride is always 4
374 	uint64_t                              step_multiplier = 4; // 4-byte stride
375 	/*
376 	 * Done if no fixups on the page
377 	 */
378 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
379 		return KERN_SUCCESS;
380 	}
381 
382 	/*
383 	 * walk the chain
384 	 */
385 	uint64_t *chain  = (uint64_t *)(contents + firstStartOffset);
386 	uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide);
387 	uint64_t delta = 0;
388 	bool     valid_chain = false;
389 	do {
390 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
391 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
392 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
393 			    (long long)chain, (long long)contents, (long long)end_contents);
394 			if (panic_on_dyld_issue) {
395 				panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
396 				    (long long)chain, (long long)contents, (long long)end_contents);
397 			}
398 
399 			return KERN_FAILURE;
400 		}
401 		uint64_t value  = *chain;
402 		bool     isBind = (value & 0x8000000000000000ULL);
403 		/* delta that can be used speculatively */
404 		delta = (value >> 51) & 0xFFF;
405 		delta *= step_multiplier;
406 		if (isBind) {
407 			uint32_t bindOrdinal = value & 0x00FFFFFF;
408 			if (bindOrdinal >= hdr->mwli_binds_count) {
409 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
410 				printf("%s out of range bind ordinal %u (max %u)\n", __func__,
411 				    bindOrdinal, hdr->mwli_binds_count);
412 				if (panic_on_dyld_issue) {
413 					panic("%s out of range bind ordinal %u (max %u)", __func__,
414 					    bindOrdinal, hdr->mwli_binds_count);
415 				}
416 				return KERN_FAILURE;
417 			}
418 			uint32_t addend = (value >> 24) & 0xFF;
419 			*chain = bindsArray[bindOrdinal] + addend;
420 		} else {
421 			/* is rebase */
422 			uint64_t target = value & 0xFFFFFFFFFULL;
423 			uint64_t high8  = (value >> 36) & 0xFF;
424 			*chain = target + targetAdjust + (high8 << 56);
425 		}
426 		/* shifts chain to a delta, chain cannot be used to access outside of page speculatively after this point */
427 		bool crossing_page = false;
428 		valid_chain = _delta_ptr_within_page_nospec(&chain, delta, &crossing_page, (uintptr_t)userVA);
429 		if (crossing_page) {
430 			return KERN_FAILURE;
431 		}
432 	} while (valid_chain);
433 	return KERN_SUCCESS;
434 }
435 
436 
437 /*
438  * Apply fixups within a page used by a 32 bit process.
439  */
440 static kern_return_t
fixupPageChain32(uint64_t userVA,uint32_t * chain,vm_offset_t contents,void * link_info,struct dyld_chained_starts_in_segment * segInfo,uint32_t * bindsArray)441 fixupPageChain32(
442 	uint64_t                              userVA,
443 	uint32_t                              *chain,
444 	vm_offset_t                           contents,
445 	void                                  *link_info,
446 	struct dyld_chained_starts_in_segment *segInfo,
447 	uint32_t                              *bindsArray)
448 {
449 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
450 	uint32_t                              delta = 0;
451 	bool                                  chain_valid = false;
452 	vm_offset_t                           end_contents = contents + PAGE_SIZE;
453 	uint32_t                              step_multiplier = 4; // always 4-bytes stride
454 	do {
455 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
456 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
457 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
458 			    (long long)chain, (long long)contents, (long long)end_contents);
459 			if (panic_on_dyld_issue) {
460 				panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
461 				    (long long)chain, (long long)contents, (long long)end_contents);
462 			}
463 			return KERN_FAILURE;
464 		}
465 		uint32_t value = *chain;
466 		/* delta that can be used speculatively */
467 		delta = (value >> 26) & 0x1F;
468 		delta *= step_multiplier;
469 		if (value & 0x80000000) {
470 			// is bind
471 			uint32_t bindOrdinal = value & 0x000FFFFF;
472 			if (bindOrdinal >= hdr->mwli_binds_count) {
473 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
474 				printf("%s(): out of range bind ordinal %u (max %u)\n",
475 				    __func__, bindOrdinal, hdr->mwli_binds_count);
476 				if (panic_on_dyld_issue) {
477 					panic("%s(): out of range bind ordinal %u (max %u)",
478 					    __func__, bindOrdinal, hdr->mwli_binds_count);
479 				}
480 				return KERN_FAILURE;
481 			}
482 			uint32_t addend = (value >> 20) & 0x3F;
483 			*chain = bindsArray[bindOrdinal] + addend;
484 		} else {
485 			// is rebase
486 			uint32_t target = value & 0x03FFFFFF;
487 			if (target > segInfo->max_valid_pointer) {
488 				// handle non-pointers in chain
489 				uint32_t bias = (0x04000000 + segInfo->max_valid_pointer) / 2;
490 				*chain = target - bias;
491 			} else {
492 				*chain = target + (uint32_t)hdr->mwli_slide;
493 			}
494 		}
495 		bool crossing_page = false;
496 		chain_valid = _delta_ptr_within_page32_nospec(&chain, delta, &crossing_page, (uintptr_t)userVA);
497 
498 		if (crossing_page) {
499 			return KERN_FAILURE;
500 		}
501 	} while (chain_valid);
502 	return KERN_SUCCESS;
503 }
504 
505 
506 /*
507  * Apply fixups to a page used by a 32 bit process.
508  */
509 static kern_return_t
fixupPage32(uint64_t userVA,vm_offset_t contents,void * link_info,uint32_t link_info_size,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex)510 fixupPage32(
511 	uint64_t                              userVA,
512 	vm_offset_t                           contents,
513 	void                                  *link_info,
514 	uint32_t                              link_info_size,
515 	struct dyld_chained_starts_in_segment *segInfo,
516 	uint32_t                              pageIndex)
517 {
518 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr  *)link_info;
519 	uint32_t                              *bindsArray = (uint32_t *)((uintptr_t)hdr + hdr->mwli_binds_offset);
520 	uint16_t                              startOffset = segInfo->page_start[pageIndex];
521 	/*
522 	 * done if no fixups
523 	 */
524 	if (startOffset == DYLD_CHAINED_PTR_START_NONE) {
525 		return KERN_SUCCESS;
526 	}
527 
528 	if (startOffset & DYLD_CHAINED_PTR_START_MULTI) {
529 		// some fixups in the page are too far apart, so page has multiple starts
530 		uint32_t overflowIndex = startOffset & ~DYLD_CHAINED_PTR_START_MULTI;
531 		bool chainEnd = false;
532 		while (!chainEnd) {
533 			/*
534 			 * range check against link_info, note +1 to include data we'll dereference
535 			 */
536 			if ((uintptr_t)&segInfo->page_start[overflowIndex + 1] > (uintptr_t)link_info + link_info_size) {
537 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
538 				printf("%s(): out of range segInfo->page_start[overflowIndex]\n", __func__);
539 				if (panic_on_dyld_issue) {
540 					panic("%s(): out of range segInfo->page_start[overflowIndex]", __func__);
541 				}
542 				return KERN_FAILURE;
543 			}
544 			chainEnd    = (segInfo->page_start[overflowIndex] & DYLD_CHAINED_PTR_START_LAST);
545 			startOffset = (segInfo->page_start[overflowIndex] & ~DYLD_CHAINED_PTR_START_LAST);
546 			uint32_t *chain = (uint32_t *)(contents + startOffset);
547 			fixupPageChain32(userVA, chain, contents, link_info, segInfo, bindsArray);
548 			++overflowIndex;
549 		}
550 	} else {
551 		uint32_t *chain = (uint32_t *)(contents + startOffset);
552 		fixupPageChain32(userVA, chain, contents, link_info, segInfo, bindsArray);
553 	}
554 	return KERN_SUCCESS;
555 }
556 
557 #if defined(HAS_APPLE_PAC)
558 /*
559  * Sign a pointer needed for fixups.
560  */
561 static kern_return_t
signPointer(uint64_t unsignedAddr,void * loc,bool addrDiv,uint16_t diversity,ptrauth_key key,dyld_pager_t pager,uint64_t * signedAddr)562 signPointer(
563 	uint64_t         unsignedAddr,
564 	void             *loc,
565 	bool             addrDiv,
566 	uint16_t         diversity,
567 	ptrauth_key      key,
568 	dyld_pager_t     pager,
569 	uint64_t         *signedAddr)
570 {
571 	// don't sign NULL
572 	if (unsignedAddr == 0) {
573 		*signedAddr = 0;
574 		return KERN_SUCCESS;
575 	}
576 
577 	uint64_t extendedDiscriminator = diversity;
578 	if (addrDiv) {
579 		extendedDiscriminator = __builtin_ptrauth_blend_discriminator(loc, extendedDiscriminator);
580 	}
581 
582 	switch (key) {
583 	case ptrauth_key_asia:
584 	case ptrauth_key_asda:
585 		if (pager->dyld_a_key == 0 || arm_user_jop_disabled()) {
586 			*signedAddr = unsignedAddr;
587 		} else {
588 			*signedAddr = (uintptr_t)pmap_sign_user_ptr((void *)unsignedAddr, key, extendedDiscriminator, pager->dyld_a_key);
589 		}
590 		break;
591 
592 	default:
593 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_INVALID_AUTH_KEY), (uintptr_t)unsignedAddr);
594 		printf("%s(): Invalid ptr auth key %d\n", __func__, key);
595 		if (panic_on_dyld_issue) {
596 			panic("%s(): Invalid ptr auth key %d", __func__, key);
597 		}
598 		return KERN_FAILURE;
599 	}
600 	return KERN_SUCCESS;
601 }
602 
603 /*
604  * Apply fixups to a page used by a 64 bit process using pointer authentication.
605  */
606 static kern_return_t
fixupPageAuth64(uint64_t userVA,vm_offset_t contents,dyld_pager_t pager,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex,bool offsetBased)607 fixupPageAuth64(
608 	uint64_t                              userVA,
609 	vm_offset_t                           contents,
610 	dyld_pager_t                          pager,
611 	struct dyld_chained_starts_in_segment *segInfo,
612 	uint32_t                              pageIndex,
613 	bool                                  offsetBased)
614 {
615 	void                 *link_info = pager->dyld_link_info;
616 	uint32_t             link_info_size = pager->dyld_link_info_size;
617 	struct mwl_info_hdr  *hdr = (struct mwl_info_hdr *)link_info;
618 	uint64_t             *bindsArray = (uint64_t*)((uintptr_t)link_info + hdr->mwli_binds_offset);
619 	vm_offset_t          end_contents = contents + PAGE_SIZE;
620 	bool                 valid_chain = false;
621 	uint64_t             step_multiplier = 8; // always 8-bytes stride for arm64e pages
622 
623 	/*
624 	 * range check against link_info, note +1 to include data we'll dereference
625 	 */
626 	if ((uintptr_t)&segInfo->page_start[pageIndex + 1] > (uintptr_t)link_info + link_info_size) {
627 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
628 		printf("%s(): out of range segInfo->page_start[pageIndex]\n", __func__);
629 		if (panic_on_dyld_issue) {
630 			panic("%s(): out of range segInfo->page_start[pageIndex]", __func__);
631 		}
632 		return KERN_FAILURE;
633 	}
634 	uint16_t firstStartOffset = segInfo->page_start[pageIndex];
635 
636 	/*
637 	 * All done if no fixups on the page
638 	 */
639 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
640 		return KERN_SUCCESS;
641 	}
642 
643 	/*
644 	 * Walk the chain of offsets to fix up
645 	 */
646 	uint64_t *chain = (uint64_t *)(contents + firstStartOffset);
647 	uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide);
648 	uint64_t delta = 0;
649 	do {
650 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
651 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
652 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
653 			    (long long)chain, (long long)contents, (long long)end_contents);
654 			if (panic_on_dyld_issue) {
655 				panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
656 				    (long long)chain, (long long)contents, (long long)end_contents);
657 			}
658 			return KERN_FAILURE;
659 		}
660 		uint64_t value = *chain;
661 		/* delta that can be used speculatively */
662 		delta = (value >> 51) & 0x7FF;
663 		delta *= step_multiplier;
664 		bool isAuth = (value & 0x8000000000000000ULL);
665 		bool isBind = (value & 0x4000000000000000ULL);
666 		if (isAuth) {
667 			ptrauth_key key = (ptrauth_key)((value >> 49) & 0x3);
668 			bool        addrDiv = ((value & (1ULL << 48)) != 0);
669 			uint16_t    diversity = (uint16_t)((value >> 32) & 0xFFFF);
670 			uintptr_t   uVA = userVA + ((uintptr_t)chain - contents);
671 			if (isBind) {
672 				uint32_t bindOrdinal = value & 0x00FFFFFF;
673 				if (bindOrdinal >= hdr->mwli_binds_count) {
674 					ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
675 					printf("%s(): out of range bind ordinal %u (max %u)\n",
676 					    __func__, bindOrdinal, hdr->mwli_binds_count);
677 					if (panic_on_dyld_issue) {
678 						panic("%s(): out of range bind ordinal %u (max %u)",
679 						    __func__, bindOrdinal, hdr->mwli_binds_count);
680 					}
681 					return KERN_FAILURE;
682 				}
683 				if (signPointer(bindsArray[bindOrdinal], (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
684 					return KERN_FAILURE;
685 				}
686 			} else {
687 				/* note: in auth rebases only have 32-bits, so target is always offset - never vmaddr */
688 				uint64_t target = (value & 0xFFFFFFFF) + hdr->mwli_image_address;
689 				if (signPointer(target, (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
690 					return KERN_FAILURE;
691 				}
692 			}
693 		} else {
694 			if (isBind) {
695 				uint32_t bindOrdinal = value & 0x00FFFFFF;
696 				if (bindOrdinal >= hdr->mwli_binds_count) {
697 					ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
698 					printf("%s(): out of range bind ordinal %u (max %u)\n",
699 					    __func__, bindOrdinal, hdr->mwli_binds_count);
700 					if (panic_on_dyld_issue) {
701 						panic("%s(): out of range bind ordinal %u (max %u)",
702 						    __func__, bindOrdinal, hdr->mwli_binds_count);
703 					}
704 					return KERN_FAILURE;
705 				} else {
706 					uint64_t addend19 = (value >> 32) & 0x0007FFFF;
707 					if (addend19 & 0x40000) {
708 						addend19 |=  0xFFFFFFFFFFFC0000ULL;
709 					}
710 					*chain = bindsArray[bindOrdinal] + addend19;
711 				}
712 			} else {
713 				uint64_t target = (value & 0x7FFFFFFFFFFULL);
714 				uint64_t high8  = (value << 13) & 0xFF00000000000000ULL;
715 				*chain = target + targetAdjust + high8;
716 			}
717 		}
718 		bool crossing_page = false;;
719 		valid_chain = _delta_ptr_within_page_nospec(&chain, delta, &crossing_page, (uintptr_t)userVA);
720 
721 		if (crossing_page) {
722 			return KERN_FAILURE;
723 		}
724 	} while (valid_chain);
725 	return KERN_SUCCESS;
726 }
727 
728 /*
729  * Apply fixups to a page used by a 64 bit process using pointer authentication.
730  */
731 static kern_return_t
fixupCachePageAuth64(uint64_t userVA,vm_offset_t contents,dyld_pager_t pager,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex)732 fixupCachePageAuth64(
733 	uint64_t                              userVA,
734 	vm_offset_t                           contents,
735 	dyld_pager_t                          pager,
736 	struct dyld_chained_starts_in_segment *segInfo,
737 	uint32_t                              pageIndex)
738 {
739 	void                 *link_info = pager->dyld_link_info;
740 	uint32_t             link_info_size = pager->dyld_link_info_size;
741 	struct mwl_info_hdr  *hdr = (struct mwl_info_hdr *)link_info;
742 	vm_offset_t          end_contents = contents + PAGE_SIZE;
743 	bool                 valid_chain = false;
744 	uint64_t             step_multiplier = 8; // always 8-bytes stride for arm64e
745 
746 	/*
747 	 * range check against link_info, note +1 to include data we'll dereference
748 	 */
749 	if ((uintptr_t)&segInfo->page_start[pageIndex + 1] > (uintptr_t)link_info + link_info_size) {
750 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
751 		printf("%s(): out of range segInfo->page_start[pageIndex]\n", __func__);
752 		if (panic_on_dyld_issue) {
753 			panic("%s(): out of range segInfo->page_start[pageIndex]", __func__);
754 		}
755 		return KERN_FAILURE;
756 	}
757 	uint16_t firstStartOffset = segInfo->page_start[pageIndex];
758 
759 	/*
760 	 * All done if no fixups on the page
761 	 */
762 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
763 		return KERN_SUCCESS;
764 	}
765 
766 	/*
767 	 * Walk the chain of offsets to fix up
768 	 */
769 	uint64_t *chain = (uint64_t *)(contents + firstStartOffset);
770 	uint64_t delta = 0;
771 	do {
772 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
773 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
774 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
775 			    (long long)chain, (long long)contents, (long long)end_contents);
776 			if (panic_on_dyld_issue) {
777 				panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
778 				    (long long)chain, (long long)contents, (long long)end_contents);
779 			}
780 			return KERN_FAILURE;
781 		}
782 		uint64_t value = *chain;
783 		/* delta that can be used speculatively */
784 		delta = (value >> 52) & 0x7FF;
785 		delta *= step_multiplier;
786 		bool isAuth = (value & 0x8000000000000000ULL);
787 		if (isAuth) {
788 			bool        addrDiv = ((value & (1ULL << 50)) != 0);
789 			bool        keyIsData = ((value & (1ULL << 51)) != 0);
790 			// the key is always A, and the bit tells us if its IA or ID
791 			ptrauth_key key = keyIsData ? ptrauth_key_asda : ptrauth_key_asia;
792 			uint16_t    diversity = (uint16_t)((value >> 34) & 0xFFFF);
793 			uintptr_t   uVA = userVA + ((uintptr_t)chain - contents);
794 			// target is always a 34-bit runtime offset, never a vmaddr
795 			uint64_t target = (value & 0x3FFFFFFFFULL) + hdr->mwli_image_address;
796 			if (signPointer(target, (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
797 				return KERN_FAILURE;
798 			}
799 		} else {
800 			// target is always a 34-bit runtime offset, never a vmaddr
801 			uint64_t target = (value & 0x3FFFFFFFFULL) + hdr->mwli_image_address;
802 			uint64_t high8  = (value << 22) & 0xFF00000000000000ULL;
803 			*chain = target + high8;
804 		}
805 		bool crossing_page = false;
806 		valid_chain = _delta_ptr_within_page_nospec(&chain, delta, &crossing_page, (uintptr_t)userVA);
807 		if (crossing_page) {
808 			return KERN_FAILURE;
809 		}
810 	} while (valid_chain);
811 	return KERN_SUCCESS;
812 }
813 #endif /* defined(HAS_APPLE_PAC) */
814 
815 
816 /*
817  * Handle dyld fixups for a page.
818  */
819 static kern_return_t
fixup_page(vm_offset_t contents,uint64_t userVA,dyld_pager_t pager)820 fixup_page(
821 	vm_offset_t         contents,
822 	uint64_t            userVA,
823 	dyld_pager_t        pager)
824 {
825 	void                                  *link_info = pager->dyld_link_info;
826 	uint32_t                              link_info_size = pager->dyld_link_info_size;
827 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
828 	struct dyld_chained_starts_in_segment *segInfo = NULL;
829 	uint32_t                              pageIndex = 0;
830 	uint32_t                              segIndex;
831 	struct dyld_chained_starts_in_image   *startsInfo;
832 	struct dyld_chained_starts_in_segment *seg;
833 	uint64_t                              segStartAddress;
834 	uint64_t                              segEndAddress;
835 
836 	/*
837 	 * Note this is a linear search done for every page we have to fix up.
838 	 * However, it should be quick as there should only be 2 or 4 segments:
839 	 * - data
840 	 * - data const
841 	 * - data auth (for arm64e)
842 	 * - data const auth (for arm64e)
843 	 */
844 	startsInfo = (struct dyld_chained_starts_in_image *)((uintptr_t)hdr + hdr->mwli_chains_offset);
845 	for (segIndex = 0; segIndex < startsInfo->seg_count; ++segIndex) {
846 		seg = (struct dyld_chained_starts_in_segment *)
847 		    ((uintptr_t)startsInfo + startsInfo->seg_info_offset[segIndex]);
848 
849 		/*
850 		 * ensure we don't go out of bounds of the link_info
851 		 */
852 		if ((uintptr_t)seg + sizeof(*seg) > (uintptr_t)link_info + link_info_size) {
853 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_INFO_OUT_OF_RANGE), (uintptr_t)userVA);
854 			printf("%s(): seg_info out of bounds\n", __func__);
855 			if (panic_on_dyld_issue) {
856 				panic("%s(): seg_info out of bounds", __func__);
857 			}
858 			return KERN_FAILURE;
859 		}
860 
861 		segStartAddress = hdr->mwli_image_address + seg->segment_offset;
862 		segEndAddress = segStartAddress + seg->page_count * seg->page_size;
863 		if (segStartAddress <= userVA && userVA < segEndAddress) {
864 			segInfo = seg;
865 			pageIndex = (uint32_t)(userVA - segStartAddress) / PAGE_SIZE;
866 
867 			/* ensure seg->size fits in link_info_size */
868 			if ((uintptr_t)seg + seg->size > (uintptr_t)link_info + link_info_size) {
869 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_SIZE_OUT_OF_RANGE), (uintptr_t)userVA);
870 				printf("%s(): seg->size out of bounds\n", __func__);
871 				if (panic_on_dyld_issue) {
872 					panic("%s(): seg->size out of bounds", __func__);
873 				}
874 				return KERN_FAILURE;
875 			}
876 			if (seg->size < sizeof(struct dyld_chained_starts_in_segment)) {
877 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_SIZE_OUT_OF_RANGE), (uintptr_t)userVA);
878 				printf("%s(): seg->size too small\n", __func__);
879 				if (panic_on_dyld_issue) {
880 					panic("%s(): seg->size too small", __func__);
881 				}
882 				return KERN_FAILURE;
883 			}
884 			/* ensure page_count and pageIndex are valid too */
885 			if ((uintptr_t)&seg->page_start[seg->page_count] > (uintptr_t)link_info + link_info_size) {
886 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_PAGE_CNT_OUT_OF_RANGE), (uintptr_t)userVA);
887 				printf("%s(): seg->page_count out of bounds\n", __func__);
888 				if (panic_on_dyld_issue) {
889 					panic("%s(): seg->page_count out of bounds", __func__);
890 				}
891 				return KERN_FAILURE;
892 			}
893 			if (pageIndex >= seg->page_count) {
894 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_PAGE_CNT_OUT_OF_RANGE), (uintptr_t)userVA);
895 				printf("%s(): seg->page_count too small\n", __func__);
896 				if (panic_on_dyld_issue) {
897 					panic("%s(): seg->page_count too small", __func__);
898 				}
899 				return KERN_FAILURE;
900 			}
901 
902 			break;
903 		}
904 	}
905 
906 	/*
907 	 * Question for Nick.. or can we make this OK and just return KERN_SUCCESS, nothing to do?
908 	 */
909 	if (segInfo == NULL) {
910 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_SEG_FOR_VA), (uintptr_t)userVA);
911 		printf("%s(): No segment for user VA 0x%llx\n", __func__, (long long)userVA);
912 		if (panic_on_dyld_issue) {
913 			panic("%s(): No segment for user VA 0x%llx", __func__, (long long)userVA);
914 		}
915 		return KERN_FAILURE;
916 	}
917 
918 	/*
919 	 * Route to the appropriate fixup routine
920 	 */
921 	switch (hdr->mwli_pointer_format) {
922 #if defined(HAS_APPLE_PAC)
923 	case DYLD_CHAINED_PTR_ARM64E:
924 		fixupPageAuth64(userVA, contents, pager, segInfo, pageIndex, false);
925 		break;
926 	case DYLD_CHAINED_PTR_ARM64E_USERLAND:
927 	case DYLD_CHAINED_PTR_ARM64E_USERLAND24:
928 		fixupPageAuth64(userVA, contents, pager, segInfo, pageIndex, true);
929 		break;
930 	case DYLD_CHAINED_PTR_ARM64E_SHARED_CACHE:
931 		fixupCachePageAuth64(userVA, contents, pager, segInfo, pageIndex);
932 		break;
933 #endif /* defined(HAS_APPLE_PAC) */
934 	case DYLD_CHAINED_PTR_64:
935 		fixupPage64(userVA, contents, link_info, segInfo, pageIndex, false);
936 		break;
937 	case DYLD_CHAINED_PTR_64_OFFSET:
938 		fixupPage64(userVA, contents, link_info, segInfo, pageIndex, true);
939 		break;
940 	case DYLD_CHAINED_PTR_32:
941 		fixupPage32(userVA, contents, link_info, link_info_size, segInfo, pageIndex);
942 		break;
943 	default:
944 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BAD_POINTER_FMT), (uintptr_t)userVA);
945 		printf("%s(): unknown pointer_format %d\n", __func__, hdr->mwli_pointer_format);
946 		if (panic_on_dyld_issue) {
947 			panic("%s(): unknown pointer_format %d", __func__, hdr->mwli_pointer_format);
948 		}
949 		return KERN_FAILURE;
950 	}
951 	return KERN_SUCCESS;
952 }
953 
954 /*
955  * dyld_pager_data_request()
956  *
957  * Handles page-in requests from VM.
958  */
959 static kern_return_t
dyld_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)960 dyld_pager_data_request(
961 	memory_object_t              mem_obj,
962 	memory_object_offset_t       offset,
963 	memory_object_cluster_size_t length,
964 	__unused vm_prot_t           protection_required,
965 	memory_object_fault_info_t   mo_fault_info)
966 {
967 	dyld_pager_t            pager;
968 	memory_object_control_t mo_control;
969 	upl_t                   upl = NULL;
970 	int                     upl_flags;
971 	upl_size_t              upl_size;
972 	upl_page_info_t         *upl_pl = NULL;
973 	unsigned int            pl_count;
974 	vm_object_t             src_top_object = VM_OBJECT_NULL;
975 	vm_object_t             src_page_object = VM_OBJECT_NULL;
976 	vm_object_t             dst_object;
977 	kern_return_t           kr;
978 	kern_return_t           retval = KERN_SUCCESS;
979 	vm_fault_return_t       vmfr;
980 	vm_offset_t             src_vaddr;
981 	vm_offset_t             dst_vaddr;
982 	vm_offset_t             cur_offset;
983 	kern_return_t           error_code;
984 	vm_prot_t               prot;
985 	vm_page_t               src_page, top_page;
986 	int                     interruptible;
987 	struct vm_object_fault_info fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
988 	struct mwl_info_hdr     *hdr;
989 	uint32_t                r;
990 	uint64_t                userVA;
991 
992 	fault_info.stealth = TRUE;
993 	fault_info.io_sync = FALSE;
994 	fault_info.mark_zf_absent = FALSE;
995 	fault_info.batch_pmap_op = FALSE;
996 	interruptible = fault_info.interruptible;
997 
998 	pager = dyld_pager_lookup(mem_obj);
999 	assert(pager->dyld_is_ready);
1000 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 1); /* pager is alive */
1001 	assert(pager->dyld_is_mapped); /* pager is mapped */
1002 	hdr = (struct mwl_info_hdr *)pager->dyld_link_info;
1003 
1004 	/*
1005 	 * Gather in a UPL all the VM pages requested by VM.
1006 	 */
1007 	mo_control = pager->dyld_header.mo_control;
1008 
1009 	upl_size = length;
1010 	upl_flags =
1011 	    UPL_RET_ONLY_ABSENT |
1012 	    UPL_SET_LITE |
1013 	    UPL_NO_SYNC |
1014 	    UPL_CLEAN_IN_PLACE |        /* triggers UPL_CLEAR_DIRTY */
1015 	    UPL_SET_INTERNAL;
1016 	pl_count = 0;
1017 	kr = memory_object_upl_request(mo_control,
1018 	    offset, upl_size,
1019 	    &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
1020 	if (kr != KERN_SUCCESS) {
1021 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_UPL), kr /* arg */);
1022 		if (panic_on_dyld_issue) {
1023 			panic("%s(): upl_request(%p, 0x%llx, 0x%llx) ret %d", __func__,
1024 			    mo_control, offset, (uint64_t)upl_size, kr);
1025 		}
1026 		retval = kr;
1027 		goto done;
1028 	}
1029 	dst_object = memory_object_control_to_vm_object(mo_control);
1030 	assert(dst_object != VM_OBJECT_NULL);
1031 
1032 	/*
1033 	 * We'll map the original data in the kernel address space from the
1034 	 * backing VM object, itself backed by the executable/library file via
1035 	 * the vnode pager.
1036 	 */
1037 	src_top_object = pager->dyld_backing_object;
1038 	assert(src_top_object != VM_OBJECT_NULL);
1039 	vm_object_reference(src_top_object); /* keep the source object alive */
1040 
1041 	/*
1042 	 * Fill in the contents of the pages requested by VM.
1043 	 */
1044 	upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
1045 	pl_count = length / PAGE_SIZE;
1046 	for (cur_offset = 0;
1047 	    retval == KERN_SUCCESS && cur_offset < length;
1048 	    cur_offset += PAGE_SIZE) {
1049 		ppnum_t dst_pnum;
1050 
1051 		if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
1052 			/* this page is not in the UPL: skip it */
1053 			continue;
1054 		}
1055 
1056 		/*
1057 		 * Map the source page in the kernel's virtual address space.
1058 		 * We already hold a reference on the src_top_object.
1059 		 */
1060 retry_src_fault:
1061 		vm_object_lock(src_top_object);
1062 		vm_object_paging_begin(src_top_object);
1063 		error_code = 0;
1064 		prot = VM_PROT_READ;
1065 		src_page = VM_PAGE_NULL;
1066 		vmfr = vm_fault_page(src_top_object,
1067 		    offset + cur_offset,
1068 		    VM_PROT_READ,
1069 		    FALSE,
1070 		    FALSE,                /* src_page not looked up */
1071 		    &prot,
1072 		    &src_page,
1073 		    &top_page,
1074 		    NULL,
1075 		    &error_code,
1076 		    FALSE,
1077 		    &fault_info);
1078 		switch (vmfr) {
1079 		case VM_FAULT_SUCCESS:
1080 			break;
1081 		case VM_FAULT_RETRY:
1082 			goto retry_src_fault;
1083 		case VM_FAULT_MEMORY_SHORTAGE:
1084 			if (vm_page_wait(interruptible)) {
1085 				goto retry_src_fault;
1086 			}
1087 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_MEMORY_SHORTAGE), 0 /* arg */);
1088 			OS_FALLTHROUGH;
1089 		case VM_FAULT_INTERRUPTED:
1090 			retval = MACH_SEND_INTERRUPTED;
1091 			goto done;
1092 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
1093 			/* success but no VM page: fail */
1094 			vm_object_paging_end(src_top_object);
1095 			vm_object_unlock(src_top_object);
1096 			OS_FALLTHROUGH;
1097 		case VM_FAULT_MEMORY_ERROR:
1098 			/* the page is not there ! */
1099 			if (error_code) {
1100 				retval = error_code;
1101 			} else {
1102 				retval = KERN_MEMORY_ERROR;
1103 			}
1104 			goto done;
1105 		case VM_FAULT_BUSY:
1106 			retval = KERN_ALREADY_WAITING;
1107 			goto done;
1108 		}
1109 		assert(src_page != VM_PAGE_NULL);
1110 		assert(src_page->vmp_busy);
1111 
1112 		if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
1113 			vm_page_lockspin_queues();
1114 			if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
1115 				vm_page_speculate(src_page, FALSE);
1116 			}
1117 			vm_page_unlock_queues();
1118 		}
1119 
1120 		/*
1121 		 * Establish pointers to the source and destination physical pages.
1122 		 */
1123 		dst_pnum = (ppnum_t)upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
1124 		assert(dst_pnum != 0);
1125 
1126 		src_vaddr = (vm_map_offset_t)phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) << PAGE_SHIFT);
1127 		dst_vaddr = (vm_map_offset_t)phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
1128 		src_page_object = VM_PAGE_OBJECT(src_page);
1129 
1130 		/*
1131 		 * Validate the original page...
1132 		 */
1133 		if (src_page_object->code_signed) {
1134 			vm_page_validate_cs_mapped(src_page, PAGE_SIZE, 0, (const void *)src_vaddr);
1135 		}
1136 
1137 		/*
1138 		 * ... and transfer the results to the destination page.
1139 		 */
1140 		UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_validated);
1141 		UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_tainted);
1142 		UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_nx);
1143 
1144 		/*
1145 		 * The page provider might access a mapped file, so let's
1146 		 * release the object lock for the source page to avoid a
1147 		 * potential deadlock.
1148 		 * The source page is kept busy and we have a
1149 		 * "paging_in_progress" reference on its object, so it's safe
1150 		 * to unlock the object here.
1151 		 */
1152 		assert(src_page->vmp_busy);
1153 		assert(src_page_object->paging_in_progress > 0);
1154 		vm_object_unlock(src_page_object);
1155 
1156 		/*
1157 		 * Process the original contents of the source page
1158 		 * into the destination page.
1159 		 */
1160 		bcopy((const char *)src_vaddr, (char *)dst_vaddr, PAGE_SIZE);
1161 
1162 		/*
1163 		 * Figure out what the original user virtual address was, based on the offset.
1164 		 */
1165 		userVA = 0;
1166 		for (r = 0; r < pager->dyld_num_range; ++r) {
1167 			vm_offset_t o = offset + cur_offset;
1168 			if (pager->dyld_file_offset[r] <= o &&
1169 			    o < pager->dyld_file_offset[r] + pager->dyld_size[r]) {
1170 				userVA = pager->dyld_address[r] + (o - pager->dyld_file_offset[r]);
1171 				break;
1172 			}
1173 		}
1174 
1175 		/*
1176 		 * If we have a valid range fixup the page.
1177 		 */
1178 		if (r == pager->dyld_num_range) {
1179 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_RANGE_NOT_FOUND), (uintptr_t)userVA);
1180 			printf("%s(): Range not found for offset 0x%llx\n", __func__, (long long)cur_offset);
1181 			if (panic_on_dyld_issue) {
1182 				panic("%s(): Range not found for offset 0x%llx", __func__, (long long)cur_offset);
1183 			}
1184 			retval = KERN_FAILURE;
1185 		} else if (fixup_page(dst_vaddr, userVA, pager) != KERN_SUCCESS) {
1186 			/* KDBG / printf was done under fixup_page() */
1187 			retval = KERN_FAILURE;
1188 		}
1189 		if (retval != KERN_SUCCESS) {
1190 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SLIDE_ERROR), userVA);
1191 			if (panic_on_dyld_issue) {
1192 				panic("%s(): dyld pager slide error %d at 0x%llx", __func__, retval, (uint64_t)userVA);
1193 			}
1194 		}
1195 
1196 		assert(VM_PAGE_OBJECT(src_page) == src_page_object);
1197 		assert(src_page->vmp_busy);
1198 		assert(src_page_object->paging_in_progress > 0);
1199 		vm_object_lock(src_page_object);
1200 
1201 		/*
1202 		 * Cleanup the result of vm_fault_page() of the source page.
1203 		 */
1204 		vm_page_wakeup_done(src_page_object, src_page);
1205 		src_page = VM_PAGE_NULL;
1206 		vm_object_paging_end(src_page_object);
1207 		vm_object_unlock(src_page_object);
1208 
1209 		if (top_page != VM_PAGE_NULL) {
1210 			assert(VM_PAGE_OBJECT(top_page) == src_top_object);
1211 			vm_object_lock(src_top_object);
1212 			VM_PAGE_FREE(top_page);
1213 			vm_object_paging_end(src_top_object);
1214 			vm_object_unlock(src_top_object);
1215 		}
1216 	}
1217 
1218 done:
1219 	if (upl != NULL) {
1220 		/* clean up the UPL */
1221 
1222 		/*
1223 		 * The pages are currently dirty because we've just been
1224 		 * writing on them, but as far as we're concerned, they're
1225 		 * clean since they contain their "original" contents as
1226 		 * provided by us, the pager.
1227 		 * Tell the UPL to mark them "clean".
1228 		 */
1229 		upl_clear_dirty(upl, TRUE);
1230 
1231 		/* abort or commit the UPL */
1232 		if (retval != KERN_SUCCESS) {
1233 			upl_abort(upl, 0);
1234 		} else {
1235 			boolean_t empty;
1236 			assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
1237 			    "upl %p offset 0x%llx size 0x%x\n",
1238 			    upl, upl->u_offset, upl->u_size);
1239 			upl_commit_range(upl, 0, upl->u_size,
1240 			    UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
1241 			    upl_pl, pl_count, &empty);
1242 		}
1243 
1244 		/* and deallocate the UPL */
1245 		upl_deallocate(upl);
1246 		upl = NULL;
1247 	}
1248 	if (src_top_object != VM_OBJECT_NULL) {
1249 		vm_object_deallocate(src_top_object);
1250 	}
1251 	return retval;
1252 }
1253 
1254 /*
1255  * dyld_pager_reference()
1256  *
1257  * Get a reference on this memory object.
1258  * For external usage only.  Assumes that the initial reference count is not 0,
1259  * i.e one should not "revive" a dead pager this way.
1260  */
1261 static void
dyld_pager_reference(memory_object_t mem_obj)1262 dyld_pager_reference(
1263 	memory_object_t mem_obj)
1264 {
1265 	dyld_pager_t    pager;
1266 
1267 	pager = dyld_pager_lookup(mem_obj);
1268 
1269 	lck_mtx_lock(&dyld_pager_lock);
1270 	os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL);
1271 	lck_mtx_unlock(&dyld_pager_lock);
1272 }
1273 
1274 
1275 
1276 /*
1277  * dyld_pager_terminate_internal:
1278  *
1279  * Trigger the asynchronous termination of the memory object associated
1280  * with this pager.
1281  * When the memory object is terminated, there will be one more call
1282  * to memory_object_deallocate() (i.e. dyld_pager_deallocate())
1283  * to finish the clean up.
1284  *
1285  * "dyld_pager_lock" should not be held by the caller.
1286  */
1287 static void
dyld_pager_terminate_internal(dyld_pager_t pager)1288 dyld_pager_terminate_internal(
1289 	dyld_pager_t pager)
1290 {
1291 	assert(pager->dyld_is_ready);
1292 	assert(!pager->dyld_is_mapped);
1293 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) == 1);
1294 
1295 	if (pager->dyld_backing_object != VM_OBJECT_NULL) {
1296 		vm_object_deallocate(pager->dyld_backing_object);
1297 		pager->dyld_backing_object = VM_OBJECT_NULL;
1298 	}
1299 	/* trigger the destruction of the memory object */
1300 	memory_object_destroy(pager->dyld_header.mo_control, VM_OBJECT_DESTROY_PAGER);
1301 }
1302 
1303 /*
1304  * dyld_pager_deallocate_internal()
1305  *
1306  * Release a reference on this pager and free it when the last reference goes away.
1307  * Can be called with dyld_pager_lock held or not, but always returns
1308  * with it unlocked.
1309  */
1310 static void
dyld_pager_deallocate_internal(dyld_pager_t pager,bool locked)1311 dyld_pager_deallocate_internal(
1312 	dyld_pager_t   pager,
1313 	bool           locked)
1314 {
1315 	os_ref_count_t ref_count;
1316 
1317 	if (!locked) {
1318 		lck_mtx_lock(&dyld_pager_lock);
1319 	}
1320 
1321 	/* drop a reference on this pager */
1322 	ref_count = os_ref_release_locked_raw(&pager->dyld_ref_count, NULL);
1323 
1324 	if (ref_count == 1) {
1325 		/*
1326 		 * Only this reference is left, which means that
1327 		 * no one is really holding on to this pager anymore.
1328 		 * Terminate it.
1329 		 */
1330 		dyld_pager_dequeue(pager);
1331 		/* the pager is all ours: no need for the lock now */
1332 		lck_mtx_unlock(&dyld_pager_lock);
1333 		dyld_pager_terminate_internal(pager);
1334 	} else if (ref_count == 0) {
1335 		/*
1336 		 * Dropped all references;  the memory object has
1337 		 * been terminated.  Do some final cleanup and release the
1338 		 * pager structure.
1339 		 */
1340 		lck_mtx_unlock(&dyld_pager_lock);
1341 
1342 		kfree_data(pager->dyld_link_info, pager->dyld_link_info_size);
1343 		pager->dyld_link_info = NULL;
1344 
1345 		if (pager->dyld_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
1346 			memory_object_control_deallocate(pager->dyld_header.mo_control);
1347 			pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1348 		}
1349 		kfree_type(struct dyld_pager, pager);
1350 		pager = NULL;
1351 	} else {
1352 		/* there are still plenty of references:  keep going... */
1353 		lck_mtx_unlock(&dyld_pager_lock);
1354 	}
1355 
1356 	/* caution: lock is not held on return... */
1357 }
1358 
1359 /*
1360  * dyld_pager_deallocate()
1361  *
1362  * Release a reference on this pager and free it when the last
1363  * reference goes away.
1364  */
1365 static void
dyld_pager_deallocate(memory_object_t mem_obj)1366 dyld_pager_deallocate(
1367 	memory_object_t mem_obj)
1368 {
1369 	dyld_pager_t    pager;
1370 
1371 	pager = dyld_pager_lookup(mem_obj);
1372 	dyld_pager_deallocate_internal(pager, FALSE);
1373 }
1374 
1375 /*
1376  *
1377  */
1378 static kern_return_t
dyld_pager_terminate(__unused memory_object_t mem_obj)1379 dyld_pager_terminate(
1380 #if !DEBUG
1381 	__unused
1382 #endif
1383 	memory_object_t mem_obj)
1384 {
1385 	return KERN_SUCCESS;
1386 }
1387 
1388 /*
1389  * dyld_pager_map()
1390  *
1391  * This allows VM to let us, the EMM, know that this memory object
1392  * is currently mapped one or more times.  This is called by VM each time
1393  * the memory object gets mapped, but we only take one extra reference the
1394  * first time it is called.
1395  */
1396 static kern_return_t
dyld_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)1397 dyld_pager_map(
1398 	memory_object_t         mem_obj,
1399 	__unused vm_prot_t      prot)
1400 {
1401 	dyld_pager_t   pager;
1402 
1403 	pager = dyld_pager_lookup(mem_obj);
1404 
1405 	lck_mtx_lock(&dyld_pager_lock);
1406 	assert(pager->dyld_is_ready);
1407 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0); /* pager is alive */
1408 	if (!pager->dyld_is_mapped) {
1409 		pager->dyld_is_mapped = TRUE;
1410 		os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL);
1411 	}
1412 	lck_mtx_unlock(&dyld_pager_lock);
1413 
1414 	return KERN_SUCCESS;
1415 }
1416 
1417 /*
1418  * dyld_pager_last_unmap()
1419  *
1420  * This is called by VM when this memory object is no longer mapped anywhere.
1421  */
1422 static kern_return_t
dyld_pager_last_unmap(memory_object_t mem_obj)1423 dyld_pager_last_unmap(
1424 	memory_object_t mem_obj)
1425 {
1426 	dyld_pager_t    pager;
1427 
1428 	pager = dyld_pager_lookup(mem_obj);
1429 
1430 	lck_mtx_lock(&dyld_pager_lock);
1431 	if (pager->dyld_is_mapped) {
1432 		/*
1433 		 * All the mappings are gone, so let go of the one extra
1434 		 * reference that represents all the mappings of this pager.
1435 		 */
1436 		pager->dyld_is_mapped = FALSE;
1437 		dyld_pager_deallocate_internal(pager, TRUE);
1438 		/* caution: deallocate_internal() released the lock ! */
1439 	} else {
1440 		lck_mtx_unlock(&dyld_pager_lock);
1441 	}
1442 
1443 	return KERN_SUCCESS;
1444 }
1445 
1446 static boolean_t
dyld_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)1447 dyld_pager_backing_object(
1448 	memory_object_t         mem_obj,
1449 	memory_object_offset_t  offset,
1450 	vm_object_t             *backing_object,
1451 	vm_object_offset_t      *backing_offset)
1452 {
1453 	dyld_pager_t   pager;
1454 
1455 	pager = dyld_pager_lookup(mem_obj);
1456 
1457 	*backing_object = pager->dyld_backing_object;
1458 	*backing_offset = offset;
1459 
1460 	return TRUE;
1461 }
1462 
1463 
1464 /*
1465  * Convert from memory_object to dyld_pager.
1466  */
1467 static dyld_pager_t
dyld_pager_lookup(memory_object_t mem_obj)1468 dyld_pager_lookup(
1469 	memory_object_t  mem_obj)
1470 {
1471 	dyld_pager_t   pager;
1472 
1473 	assert(mem_obj->mo_pager_ops == &dyld_pager_ops);
1474 	pager = (dyld_pager_t)(uintptr_t) mem_obj;
1475 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0);
1476 	return pager;
1477 }
1478 
1479 /*
1480  * Create and return a pager for the given object with the
1481  * given slide information.
1482  */
1483 static dyld_pager_t
dyld_pager_create(__unused task_t task,vm_object_t backing_object,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size)1484 dyld_pager_create(
1485 #if !defined(HAS_APPLE_PAC)
1486 	__unused
1487 #endif /* defined(HAS_APPLE_PAC) */
1488 	task_t            task,
1489 	vm_object_t       backing_object,
1490 	struct mwl_region *regions,
1491 	uint32_t          region_cnt,
1492 	void              *link_info,
1493 	uint32_t          link_info_size)
1494 {
1495 	dyld_pager_t            pager;
1496 	memory_object_control_t control;
1497 	kern_return_t           kr;
1498 
1499 	pager = kalloc_type(struct dyld_pager, Z_WAITOK);
1500 	if (pager == NULL) {
1501 		return NULL;
1502 	}
1503 
1504 	/*
1505 	 * The vm_map call takes both named entry ports and raw memory
1506 	 * objects in the same parameter.  We need to make sure that
1507 	 * vm_map does not see this object as a named entry port.  So,
1508 	 * we reserve the first word in the object for a fake object type
1509 	 * setting - that will tell vm_map to use it as a memory object.
1510 	 */
1511 	pager->dyld_header.mo_ikot = IKOT_MEMORY_OBJECT;
1512 	pager->dyld_header.mo_pager_ops = &dyld_pager_ops;
1513 	pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1514 
1515 	pager->dyld_is_ready = FALSE;/* not ready until it has a "name" */
1516 	/* existence reference for the caller */
1517 	os_ref_init_count_raw(&pager->dyld_ref_count, NULL, 1);
1518 	pager->dyld_is_mapped = FALSE;
1519 	pager->dyld_backing_object = backing_object;
1520 	pager->dyld_link_info = link_info; /* pager takes ownership of this pointer here */
1521 	pager->dyld_link_info_size = link_info_size;
1522 #if defined(HAS_APPLE_PAC)
1523 	pager->dyld_a_key = (task->map && task->map->pmap && !task->map->pmap->disable_jop) ? task->jop_pid : 0;
1524 #endif /* defined(HAS_APPLE_PAC) */
1525 
1526 	/*
1527 	 * Record the regions so the pager can find the offset from an address.
1528 	 */
1529 	pager->dyld_num_range = region_cnt;
1530 	for (uint32_t r = 0; r < region_cnt; ++r) {
1531 		pager->dyld_file_offset[r] = regions[r].mwlr_file_offset;
1532 		pager->dyld_address[r] = regions[r].mwlr_address;
1533 		pager->dyld_size[r] = regions[r].mwlr_size;
1534 	}
1535 
1536 	vm_object_reference(backing_object);
1537 	lck_mtx_lock(&dyld_pager_lock);
1538 	queue_enter_first(&dyld_pager_queue,
1539 	    pager,
1540 	    dyld_pager_t,
1541 	    dyld_pager_queue);
1542 	dyld_pager_count++;
1543 	if (dyld_pager_count > dyld_pager_count_max) {
1544 		dyld_pager_count_max = dyld_pager_count;
1545 	}
1546 	lck_mtx_unlock(&dyld_pager_lock);
1547 
1548 	kr = memory_object_create_named((memory_object_t) pager, 0, &control);
1549 	assert(kr == KERN_SUCCESS);
1550 
1551 	memory_object_mark_trusted(control);
1552 
1553 	lck_mtx_lock(&dyld_pager_lock);
1554 	/* the new pager is now ready to be used */
1555 	pager->dyld_is_ready = TRUE;
1556 	lck_mtx_unlock(&dyld_pager_lock);
1557 
1558 	/* wakeup anyone waiting for this pager to be ready */
1559 	thread_wakeup(&pager->dyld_is_ready);
1560 
1561 	return pager;
1562 }
1563 
1564 /*
1565  * dyld_pager_setup()
1566  *
1567  * Provide the caller with a memory object backed by the provided
1568  * "backing_object" VM object.
1569  */
1570 static memory_object_t
dyld_pager_setup(task_t task,vm_object_t backing_object,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size)1571 dyld_pager_setup(
1572 	task_t            task,
1573 	vm_object_t       backing_object,
1574 	struct mwl_region *regions,
1575 	uint32_t          region_cnt,
1576 	void              *link_info,
1577 	uint32_t          link_info_size)
1578 {
1579 	dyld_pager_t      pager;
1580 
1581 	/* create new pager */
1582 	pager = dyld_pager_create(task, backing_object, regions, region_cnt, link_info, link_info_size);
1583 	if (pager == NULL) {
1584 		/* could not create a new pager */
1585 		return MEMORY_OBJECT_NULL;
1586 	}
1587 
1588 	lck_mtx_lock(&dyld_pager_lock);
1589 	while (!pager->dyld_is_ready) {
1590 		lck_mtx_sleep(&dyld_pager_lock,
1591 		    LCK_SLEEP_DEFAULT,
1592 		    &pager->dyld_is_ready,
1593 		    THREAD_UNINT);
1594 	}
1595 	lck_mtx_unlock(&dyld_pager_lock);
1596 
1597 	return (memory_object_t) pager;
1598 }
1599 
1600 /*
1601  * Set up regions which use a special pager to apply dyld fixups.
1602  *
1603  * The arguments to this function are mostly just used as input.
1604  * Except for the link_info! That is saved off in the pager that
1605  * gets created. If the pager assumed ownership of *link_info,
1606  * the argument is NULLed, if not, the caller need to free it on error.
1607  */
1608 kern_return_t
vm_map_with_linking(task_t task,struct mwl_region * regions,uint32_t region_cnt,void ** link_info,uint32_t link_info_size,memory_object_control_t file_control)1609 vm_map_with_linking(
1610 	task_t                  task,
1611 	struct mwl_region       *regions,
1612 	uint32_t                region_cnt,
1613 	void                    **link_info,
1614 	uint32_t                link_info_size,
1615 	memory_object_control_t file_control)
1616 {
1617 	vm_map_t                map = task->map;
1618 	vm_object_t             file_object = VM_OBJECT_NULL;
1619 	memory_object_t         pager = MEMORY_OBJECT_NULL;
1620 	uint32_t                r;
1621 	vm_map_address_t        map_addr;
1622 	kern_return_t           kr = KERN_SUCCESS;
1623 	vm_map_entry_t          map_entry;
1624 	vm_object_t             backing_object = VM_OBJECT_NULL;
1625 	vm_object_t             shadow_object;
1626 	int                     num_extra_shadows;
1627 
1628 	if (region_cnt == 0) {
1629 		kr = KERN_INVALID_ARGUMENT;
1630 		goto done;
1631 	}
1632 	file_object = memory_object_control_to_vm_object(file_control);
1633 	if (file_object == VM_OBJECT_NULL || file_object->internal) {
1634 		printf("%d[%s] %s: invalid object for provided file\n",
1635 		    proc_selfpid(), proc_name_address(current_proc()), __func__);
1636 		file_object = VM_OBJECT_NULL;
1637 		kr = KERN_INVALID_ARGUMENT;
1638 		goto done;
1639 	}
1640 
1641 	/*
1642 	 * Check that the mapping is backed by the same file.
1643 	 */
1644 	map_addr = regions[0].mwlr_address;
1645 	vm_map_lock_read(map);
1646 	if (!vm_map_lookup_entry(map,
1647 	    map_addr,
1648 	    &map_entry) ||
1649 	    map_entry->is_sub_map ||
1650 	    VME_OBJECT(map_entry) == VM_OBJECT_NULL) {
1651 		vm_map_unlock_read(map);
1652 		kr = KERN_INVALID_ADDRESS;
1653 		goto done;
1654 	}
1655 	/* go down the shadow chain looking for the file object and its copy object */
1656 	num_extra_shadows = 0;
1657 	shadow_object = VME_OBJECT(map_entry);
1658 	vm_object_lock(shadow_object);
1659 	while (shadow_object->shadow != VM_OBJECT_NULL) {
1660 		vm_object_t next_object = shadow_object->shadow;
1661 		if (shadow_object->shadow == file_object &&
1662 		    shadow_object->vo_shadow_offset == 0) {
1663 			/*
1664 			 * Found our file object as shadow_object's shadow.
1665 			 * shadow_object should be its copy object (we'll check below
1666 			 * when we have its lock).
1667 			 * shadow_object will be the backing object for our dyld pager,
1668 			 * so let's take a reference to keep it alive until we create
1669 			 * our dyld pager.
1670 			 */
1671 			backing_object = shadow_object;
1672 			vm_object_reference_locked(backing_object);
1673 		}
1674 		if (backing_object == VM_OBJECT_NULL) {
1675 			num_extra_shadows++;
1676 		}
1677 		vm_object_lock(next_object);
1678 		vm_object_unlock(shadow_object);
1679 		shadow_object = next_object;
1680 	}
1681 	if (shadow_object != file_object) {
1682 		/* the shadow chain does not end at the file provided by the caller */
1683 		printf("%d[%s] %s: mapping at 0x%llx is not backed by the expected file",
1684 		    proc_selfpid(), proc_name_address(current_proc()), __func__,
1685 		    (uint64_t)map_addr);
1686 		// ktriage_record(...);
1687 		vm_object_unlock(shadow_object);
1688 		shadow_object = VM_OBJECT_NULL;
1689 		vm_map_unlock_read(map);
1690 		kr = KERN_INVALID_ARGUMENT;
1691 		goto done;
1692 	}
1693 	vm_object_unlock(shadow_object);
1694 	shadow_object = VM_OBJECT_NULL;
1695 	vm_map_unlock_read(map);
1696 	if (backing_object == VM_OBJECT_NULL ||
1697 	    backing_object != file_object->vo_copy) {
1698 		printf("%d[%s] %s: mapping at 0x%llx not a proper copy-on-write mapping\n",
1699 		    proc_selfpid(), proc_name_address(current_proc()), __func__,
1700 		    (uint64_t)map_addr);
1701 		kr = KERN_INVALID_ARGUMENT;
1702 		goto done;
1703 	}
1704 	if (num_extra_shadows) {
1705 		/*
1706 		 * We found some extra shadow objects in the shadow chain for this mapping.
1707 		 * We're about to replace that mapping with a "dyld" pager backed by the
1708 		 * latest snapshot (copy) of the provided file, so any pages that had
1709 		 * previously been copied and modified in these extra shadow objects
1710 		 * will no longer be visible in this mapping.
1711 		 */
1712 		printf("%d[%s] %s: (warn) skipped %d shadow object(s) at 0x%llx\n",
1713 		    proc_selfpid(), proc_name_address(current_proc()), __func__,
1714 		    num_extra_shadows, (uint64_t)map_addr);
1715 	}
1716 
1717 	/* create a pager, backed by the latest snapshot (copy object) of the file */
1718 	pager = dyld_pager_setup(task, backing_object, regions, region_cnt, *link_info, link_info_size);
1719 	if (pager == MEMORY_OBJECT_NULL) {
1720 		kr = KERN_RESOURCE_SHORTAGE;
1721 		goto done;
1722 	}
1723 	*link_info = NULL; /* ownership of this pointer was given to pager */
1724 
1725 	for (r = 0; r < region_cnt; ++r) {
1726 		vm_map_kernel_flags_t vmk_flags = {
1727 			.vmf_fixed = true,
1728 			.vmf_overwrite = true,
1729 			.vmkf_overwrite_immutable = true,
1730 		};
1731 		struct mwl_region *rp = &regions[r];
1732 
1733 		/* map that pager over the portion of the mapping that needs sliding */
1734 		map_addr = (vm_map_address_t)rp->mwlr_address;
1735 
1736 		if (rp->mwlr_protections & VM_PROT_TPRO) {
1737 			vmk_flags.vmf_tpro = TRUE;
1738 		}
1739 
1740 		kr = mach_vm_map_kernel(map,
1741 		    vm_sanitize_wrap_addr_ref(&map_addr),
1742 		    rp->mwlr_size,
1743 		    0,
1744 		    vmk_flags,
1745 		    (ipc_port_t)(uintptr_t)pager,
1746 		    rp->mwlr_file_offset,
1747 		    TRUE,       /* copy == TRUE, as this is MAP_PRIVATE so COW may happen */
1748 		    rp->mwlr_protections & VM_PROT_DEFAULT,
1749 		    rp->mwlr_protections & VM_PROT_DEFAULT,
1750 		    VM_INHERIT_DEFAULT);
1751 		if (kr != KERN_SUCCESS) {
1752 			/* no need to clean up earlier regions, this will be process fatal */
1753 			goto done;
1754 		}
1755 	}
1756 
1757 	/* success! */
1758 	kr = KERN_SUCCESS;
1759 
1760 done:
1761 	if (backing_object != VM_OBJECT_NULL) {
1762 		/*
1763 		 * Release our extra reference on the backing object.
1764 		 * The pager (if created) took an extra reference on it.
1765 		 */
1766 		vm_object_deallocate(backing_object);
1767 		backing_object = VM_OBJECT_NULL;
1768 	}
1769 	if (pager != MEMORY_OBJECT_NULL) {
1770 		/*
1771 		 * Release the pager reference obtained by dyld_pager_setup().
1772 		 * The mappings, if succesful, are each holding a reference on the
1773 		 * pager's VM object, which keeps the pager (aka memory object) alive.
1774 		 */
1775 		memory_object_deallocate(pager);
1776 		pager = MEMORY_OBJECT_NULL;
1777 	}
1778 	return kr;
1779 }
1780 
1781 static uint64_t
dyld_pager_purge(dyld_pager_t pager)1782 dyld_pager_purge(
1783 	dyld_pager_t pager)
1784 {
1785 	uint64_t pages_purged;
1786 	vm_object_t object;
1787 
1788 	pages_purged = 0;
1789 	object = memory_object_to_vm_object((memory_object_t) pager);
1790 	assert(object != VM_OBJECT_NULL);
1791 	vm_object_lock(object);
1792 	pages_purged = object->resident_page_count;
1793 	vm_object_reap_pages(object, REAP_DATA_FLUSH);
1794 	pages_purged -= object->resident_page_count;
1795 //	printf("     %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count);
1796 	vm_object_unlock(object);
1797 	return pages_purged;
1798 }
1799 
1800 uint64_t
dyld_pager_purge_all(void)1801 dyld_pager_purge_all(void)
1802 {
1803 	uint64_t pages_purged;
1804 	dyld_pager_t pager;
1805 
1806 	pages_purged = 0;
1807 	lck_mtx_lock(&dyld_pager_lock);
1808 	queue_iterate(&dyld_pager_queue, pager, dyld_pager_t, dyld_pager_queue) {
1809 		pages_purged += dyld_pager_purge(pager);
1810 	}
1811 	lck_mtx_unlock(&dyld_pager_lock);
1812 #if DEVELOPMENT || DEBUG
1813 	printf("   %s:%d pages purged: %llu\n", __FUNCTION__, __LINE__, pages_purged);
1814 #endif /* DEVELOPMENT || DEBUG */
1815 	return pages_purged;
1816 }
1817