xref: /xnu-12377.81.4/osfmk/vm/vm_dyld_pager.c (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #include <sys/errno.h>
30 
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41 #include <mach/mach_vm.h>
42 
43 #include <kern/host.h>
44 #include <kern/kalloc.h>
45 #include <kern/thread.h>
46 #include <kern/ipc_kobject.h>
47 
48 #include <vm/memory_object_internal.h>
49 #include <vm/vm_kern.h>
50 #include <vm/vm_fault_internal.h>
51 #include <vm/vm_map_xnu.h>
52 #include <vm/vm_pageout_xnu.h>
53 #include <vm/vm_protos_internal.h>
54 #include <vm/vm_dyld_pager_internal.h>
55 #include <vm/vm_ubc.h>
56 #include <vm/vm_page_internal.h>
57 #include <vm/vm_object_internal.h>
58 #include <vm/vm_sanitize_internal.h>
59 #include <vm/vm_compressor_pager_xnu.h>
60 
61 #include <sys/kdebug_triage.h>
62 #include <mach-o/fixup-chains.h>
63 #if defined(HAS_APPLE_PAC)
64 #include <ptrauth.h>
65 #include <arm/misc_protos.h>
66 #endif /* defined(HAS_APPLE_PAC) */
67 
68 
69 /* For speculation macros */
70 #if __arm64__
71 #include <arm64/speculation.h>
72 #endif /* #if __arm64__ */
73 
74 extern int proc_selfpid(void);
75 extern char *proc_name_address(struct proc *p);
76 
77 extern int panic_on_dyld_issue;
78 
79 /*
80  * DYLD page in linking pager.
81  *
82  * This external memory manager (EMM) applies dyld fixup to data
83  * pages, allowing the modified page to appear "clean".
84  *
85  * The modified pages will never be dirtied, so the memory manager doesn't
86  * need to handle page-out requests (from memory_object_data_return()).  The
87  * pages are mapped copy-on-write, so that the originals stay clean.
88  */
89 
90 /* forward declarations */
91 typedef struct dyld_pager *dyld_pager_t;
92 static void dyld_pager_reference(memory_object_t mem_obj);
93 static void dyld_pager_deallocate(memory_object_t mem_obj);
94 static void dyld_pager_deallocate_internal(dyld_pager_t pager, bool locked);
95 static kern_return_t dyld_pager_init(memory_object_t mem_obj,
96     memory_object_control_t control,
97     memory_object_cluster_size_t pg_size);
98 static kern_return_t dyld_pager_terminate(memory_object_t mem_obj);
99 static void dyld_pager_terminate_internal(dyld_pager_t pager);
100 static kern_return_t dyld_pager_data_request(memory_object_t mem_obj,
101     memory_object_offset_t offset,
102     memory_object_cluster_size_t length,
103     vm_prot_t protection_required,
104     memory_object_fault_info_t fault_info);
105 static kern_return_t dyld_pager_data_return(memory_object_t mem_obj,
106     memory_object_offset_t offset,
107     memory_object_cluster_size_t      data_cnt,
108     memory_object_offset_t *resid_offset,
109     int *io_error,
110     boolean_t dirty,
111     boolean_t kernel_copy,
112     int upl_flags);
113 static kern_return_t dyld_pager_data_initialize(memory_object_t mem_obj,
114     memory_object_offset_t offset,
115     memory_object_cluster_size_t data_cnt);
116 static kern_return_t dyld_pager_map(memory_object_t mem_obj,
117     vm_prot_t prot);
118 static kern_return_t dyld_pager_last_unmap(memory_object_t mem_obj);
119 static boolean_t dyld_pager_backing_object(
120 	memory_object_t mem_obj,
121 	memory_object_offset_t mem_obj_offset,
122 	vm_object_t *backing_object,
123 	vm_object_offset_t *backing_offset);
124 static dyld_pager_t dyld_pager_lookup(memory_object_t  mem_obj);
125 
126 struct vm_map_with_linking_stats vm_map_with_linking_stats;
127 
128 /*
129  * Vector of VM operations for this EMM.
130  * These routines are invoked by VM via the memory_object_*() interfaces.
131  */
132 const struct memory_object_pager_ops dyld_pager_ops = {
133 	.memory_object_reference = dyld_pager_reference,
134 	.memory_object_deallocate = dyld_pager_deallocate,
135 	.memory_object_init = dyld_pager_init,
136 	.memory_object_terminate = dyld_pager_terminate,
137 	.memory_object_data_request = dyld_pager_data_request,
138 	.memory_object_data_return = dyld_pager_data_return,
139 	.memory_object_data_initialize = dyld_pager_data_initialize,
140 	.memory_object_map = dyld_pager_map,
141 	.memory_object_last_unmap = dyld_pager_last_unmap,
142 	.memory_object_backing_object = dyld_pager_backing_object,
143 	.memory_object_pager_name = "dyld"
144 };
145 
146 /* funciton that calculates delta pointer that remains within the same page by using nospec ISA */
147 static inline bool
_delta_ptr_within_page_nospec(uint64_t ** __nonnull ptr,uint64_t deltaByteCount,bool * crossing_page,uintptr_t userVA)148 _delta_ptr_within_page_nospec(uint64_t ** __nonnull ptr, uint64_t deltaByteCount, bool *crossing_page, uintptr_t userVA)
149 {
150 	uintptr_t old_page = (uintptr_t)*ptr >> PAGE_SHIFT;
151 	uintptr_t new_page = ((uintptr_t)*ptr + deltaByteCount) >> PAGE_SHIFT;
152 	uint64_t  nospec_delta = deltaByteCount;
153 	uintptr_t page_offset = (uintptr_t)*ptr & PAGE_MASK;
154 #if __arm64__
155 	bool      nospec_delta_valid = false;
156 	SPECULATION_GUARD_ZEROING_XXX(
157 		/* out */ nospec_delta, /* out_valid */ nospec_delta_valid,
158 		/* value */ nospec_delta,
159 		/* cmp1 */ old_page, /* cmp2 */ new_page,
160 		/* cc */ "EQ");
161 #elif __i386__ || __x86_64__
162 	if (old_page == new_page) {
163 		nospec_delta = deltaByteCount;
164 	} else {
165 		nospec_delta = 0;
166 	}
167 	// MAYBE: lfence here
168 #endif /* __arm64__ */
169 	*ptr = (uint64_t*)((uintptr_t)*ptr + nospec_delta);
170 	*crossing_page = nospec_delta != deltaByteCount;
171 	if (*crossing_page) {
172 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_DELTA_TOO_LARGE), (uintptr_t)userVA);
173 		printf("%s(): fixup chain delta crossing to the next page [{%p} + {%lld}]\n", __func__, (void*)(userVA + page_offset), deltaByteCount);
174 		if (panic_on_dyld_issue) {
175 			panic("%s(): delta offset > page size %lld", __func__, deltaByteCount);
176 		}
177 	}
178 
179 	if (nospec_delta != 0) {
180 		return true;
181 	} else {
182 		return false;
183 	}
184 }
185 
186 static inline bool
_delta_ptr_within_page32_nospec(uint32_t ** __nonnull ptr,uint32_t deltaByteCount,bool * crossing_page,uintptr_t userVA)187 _delta_ptr_within_page32_nospec(uint32_t ** __nonnull ptr, uint32_t deltaByteCount, bool *crossing_page, uintptr_t userVA)
188 {
189 	uintptr_t old_page = (uintptr_t)*ptr >> PAGE_SHIFT;
190 	uintptr_t new_page = ((uintptr_t)*ptr + deltaByteCount) >> PAGE_SHIFT;
191 	uintptr_t page_offset = (uintptr_t)*ptr & PAGE_MASK;
192 	uint64_t  nospec_delta = deltaByteCount;
193 #if __arm64__
194 	bool      nospec_delta_valid = false;
195 	SPECULATION_GUARD_ZEROING_XXX(
196 		/* out */ nospec_delta, /* out_valid */ nospec_delta_valid,
197 		/* value */ nospec_delta,
198 		/* cmp1 */ old_page, /* cmp2 */ new_page,
199 		/* cc */ "EQ");
200 #elif __i386__ || __x86_64__
201 	if (old_page == new_page) {
202 		nospec_delta = deltaByteCount;
203 	} else {
204 		nospec_delta = 0;
205 	}
206 	// MAYBE: lfence here
207 #endif /* __arm64__ */
208 	*ptr = (uint32_t*)((uintptr_t)*ptr + nospec_delta);
209 	*crossing_page = nospec_delta != deltaByteCount;
210 	if (*crossing_page) {
211 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_DELTA_TOO_LARGE), (uintptr_t)userVA);
212 		printf("%s(): fixup chain delta crossing to the next page [{%p} + {%d}]\n", __func__, (void*)(userVA + page_offset), deltaByteCount);
213 		if (panic_on_dyld_issue) {
214 			panic("%s(): delta offset > page size %d", __func__, deltaByteCount);
215 		}
216 	}
217 	if (nospec_delta != 0) {
218 		return true;
219 	} else {
220 		return false;
221 	}
222 }
223 
224 /*
225  * The "dyld_pager" structure. We create one of these for each use of
226  * map_with_linking_np() that dyld uses.
227  */
228 struct dyld_pager {
229 	struct memory_object    dyld_header;          /* mandatory generic header */
230 
231 #if MEMORY_OBJECT_HAS_REFCOUNT
232 #define dyld_ref_count           dyld_header.mo_ref
233 #else
234 	os_ref_atomic_t         dyld_ref_count;      /* active uses */
235 #endif
236 	queue_chain_t           dyld_pager_queue;    /* next & prev pagers */
237 	bool                    dyld_is_mapped;      /* has active mappings */
238 	bool                    dyld_is_ready;       /* is this pager ready? */
239 	vm_object_t             dyld_backing_object; /* VM object for shared cache */
240 	void                    *dyld_link_info;
241 	uint32_t                dyld_link_info_size;
242 	uint32_t                dyld_num_range;
243 	memory_object_offset_t  dyld_file_offset[MWL_MAX_REGION_COUNT];
244 	mach_vm_address_t       dyld_address[MWL_MAX_REGION_COUNT];
245 	mach_vm_size_t          dyld_size[MWL_MAX_REGION_COUNT];
246 #if defined(HAS_APPLE_PAC)
247 	uint64_t                dyld_a_key;
248 #endif /* defined(HAS_APPLE_PAC) */
249 };
250 
251 queue_head_t dyld_pager_queue = QUEUE_HEAD_INITIALIZER(dyld_pager_queue);
252 
253 /*
254  * "dyld_pager_lock" for counters, ref counting, etc.
255  */
256 LCK_GRP_DECLARE(dyld_pager_lck_grp, "dyld_pager");
257 LCK_MTX_DECLARE(dyld_pager_lock, &dyld_pager_lck_grp);
258 
259 /*
260  * Statistics & counters.
261  */
262 uint32_t dyld_pager_count = 0;
263 uint32_t dyld_pager_count_max = 0;
264 
265 /*
266  * dyld_pager_dequeue()
267  *
268  * Removes a pager from the list of pagers.
269  *
270  * The caller must hold "dyld_pager".
271  */
272 static void
dyld_pager_dequeue(__unused dyld_pager_t pager)273 dyld_pager_dequeue(
274 	__unused dyld_pager_t pager)
275 {
276 	queue_remove(&dyld_pager_queue,
277 	    pager,
278 	    dyld_pager_t,
279 	    dyld_pager_queue);
280 	pager->dyld_pager_queue.next = NULL;
281 	pager->dyld_pager_queue.prev = NULL;
282 	dyld_pager_count--;
283 }
284 
285 /*
286  * dyld_pager_init()
287  *
288  * Initialize the memory object and makes it ready to be used and mapped.
289  */
290 static kern_return_t
dyld_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)291 dyld_pager_init(
292 	memory_object_t                 mem_obj,
293 	memory_object_control_t         control,
294 	__unused
295 	memory_object_cluster_size_t    pg_size)
296 {
297 	dyld_pager_t                    pager;
298 	kern_return_t                   kr;
299 	memory_object_attr_info_data_t  attributes;
300 
301 	if (control == MEMORY_OBJECT_CONTROL_NULL) {
302 		printf("%s(): control NULL\n", __func__);
303 		return KERN_INVALID_ARGUMENT;
304 	}
305 
306 	pager = dyld_pager_lookup(mem_obj);
307 
308 	memory_object_control_reference(control);
309 
310 	pager->dyld_header.mo_control = control;
311 
312 	attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
313 	attributes.cluster_size = (1 << (PAGE_SHIFT));
314 	attributes.may_cache_object = FALSE;
315 	attributes.temporary = TRUE;
316 
317 	kr = memory_object_change_attributes(
318 		control,
319 		MEMORY_OBJECT_ATTRIBUTE_INFO,
320 		(memory_object_info_t) &attributes,
321 		MEMORY_OBJECT_ATTR_INFO_COUNT);
322 	if (kr != KERN_SUCCESS) {
323 		panic("dyld_pager_init: " "memory_object_change_attributes() failed");
324 	}
325 
326 	return KERN_SUCCESS;
327 }
328 
329 /*
330  * dyld_data_return()
331  *
332  * A page-out request from VM -- should never happen so panic.
333  */
334 static kern_return_t
dyld_pager_data_return(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt,__unused memory_object_offset_t * resid_offset,__unused int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,__unused int upl_flags)335 dyld_pager_data_return(
336 	__unused memory_object_t        mem_obj,
337 	__unused memory_object_offset_t offset,
338 	__unused memory_object_cluster_size_t data_cnt,
339 	__unused memory_object_offset_t *resid_offset,
340 	__unused int                    *io_error,
341 	__unused boolean_t              dirty,
342 	__unused boolean_t              kernel_copy,
343 	__unused int                    upl_flags)
344 {
345 	panic("dyld_pager_data_return: should never happen!");
346 	return KERN_FAILURE;
347 }
348 
349 static kern_return_t
dyld_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)350 dyld_pager_data_initialize(
351 	__unused memory_object_t        mem_obj,
352 	__unused memory_object_offset_t offset,
353 	__unused memory_object_cluster_size_t data_cnt)
354 {
355 	panic("dyld_pager_data_initialize: should never happen");
356 	return KERN_FAILURE;
357 }
358 
359 
360 /*
361  * Apply fixups to a page used by a 64 bit process.
362  */
363 static kern_return_t
fixupPage64(uint64_t userVA,vm_offset_t contents,void * link_info,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex,bool offsetBased)364 fixupPage64(
365 	uint64_t                              userVA,
366 	vm_offset_t                           contents,
367 	void                                  *link_info,
368 	struct dyld_chained_starts_in_segment *segInfo,
369 	uint32_t                              pageIndex,
370 	bool                                  offsetBased)
371 {
372 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
373 	uint64_t                              *bindsArray  = (uint64_t *)((uintptr_t)hdr + hdr->mwli_binds_offset);
374 	uint16_t                              firstStartOffset = segInfo->page_start[pageIndex];
375 	vm_offset_t                           end_contents = contents + PAGE_SIZE;
376 	//  For DYLD_CHAINED_PTR_64 (arm64 and x86_64) and DYLD_CHAINED_PTR_32 (arm64_32) the stride is always 4
377 	uint64_t                              step_multiplier = 4; // 4-byte stride
378 	/*
379 	 * Done if no fixups on the page
380 	 */
381 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
382 		return KERN_SUCCESS;
383 	}
384 
385 	/*
386 	 * walk the chain
387 	 */
388 	uint64_t *chain  = (uint64_t *)(contents + firstStartOffset);
389 	uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide);
390 	uint64_t delta = 0;
391 	bool     valid_chain = false;
392 	do {
393 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
394 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
395 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
396 			    (long long)chain, (long long)contents, (long long)end_contents);
397 			if (panic_on_dyld_issue) {
398 				panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
399 				    (long long)chain, (long long)contents, (long long)end_contents);
400 			}
401 
402 			return KERN_FAILURE;
403 		}
404 		uint64_t value  = *chain;
405 		bool     isBind = (value & 0x8000000000000000ULL);
406 		/* delta that can be used speculatively */
407 		delta = (value >> 51) & 0xFFF;
408 		delta *= step_multiplier;
409 		if (isBind) {
410 			uint32_t bindOrdinal = value & 0x00FFFFFF;
411 			if (bindOrdinal >= hdr->mwli_binds_count) {
412 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
413 				printf("%s out of range bind ordinal %u (max %u)\n", __func__,
414 				    bindOrdinal, hdr->mwli_binds_count);
415 				if (panic_on_dyld_issue) {
416 					panic("%s out of range bind ordinal %u (max %u)", __func__,
417 					    bindOrdinal, hdr->mwli_binds_count);
418 				}
419 				return KERN_FAILURE;
420 			}
421 			uint32_t addend = (value >> 24) & 0xFF;
422 			*chain = bindsArray[bindOrdinal] + addend;
423 		} else {
424 			/* is rebase */
425 			uint64_t target = value & 0xFFFFFFFFFULL;
426 			uint64_t high8  = (value >> 36) & 0xFF;
427 			*chain = target + targetAdjust + (high8 << 56);
428 		}
429 		/* shifts chain to a delta, chain cannot be used to access outside of page speculatively after this point */
430 		bool crossing_page = false;
431 		valid_chain = _delta_ptr_within_page_nospec(&chain, delta, &crossing_page, (uintptr_t)userVA);
432 		if (crossing_page) {
433 			return KERN_FAILURE;
434 		}
435 	} while (valid_chain);
436 	return KERN_SUCCESS;
437 }
438 
439 
440 /*
441  * Apply fixups within a page used by a 32 bit process.
442  */
443 static kern_return_t
fixupPageChain32(uint64_t userVA,uint32_t * chain,vm_offset_t contents,void * link_info,struct dyld_chained_starts_in_segment * segInfo,uint32_t * bindsArray)444 fixupPageChain32(
445 	uint64_t                              userVA,
446 	uint32_t                              *chain,
447 	vm_offset_t                           contents,
448 	void                                  *link_info,
449 	struct dyld_chained_starts_in_segment *segInfo,
450 	uint32_t                              *bindsArray)
451 {
452 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
453 	uint32_t                              delta = 0;
454 	bool                                  chain_valid = false;
455 	vm_offset_t                           end_contents = contents + PAGE_SIZE;
456 	uint32_t                              step_multiplier = 4; // always 4-bytes stride
457 	do {
458 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
459 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
460 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
461 			    (long long)chain, (long long)contents, (long long)end_contents);
462 			if (panic_on_dyld_issue) {
463 				panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
464 				    (long long)chain, (long long)contents, (long long)end_contents);
465 			}
466 			return KERN_FAILURE;
467 		}
468 		uint32_t value = *chain;
469 		/* delta that can be used speculatively */
470 		delta = (value >> 26) & 0x1F;
471 		delta *= step_multiplier;
472 		if (value & 0x80000000) {
473 			// is bind
474 			uint32_t bindOrdinal = value & 0x000FFFFF;
475 			if (bindOrdinal >= hdr->mwli_binds_count) {
476 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
477 				printf("%s(): out of range bind ordinal %u (max %u)\n",
478 				    __func__, bindOrdinal, hdr->mwli_binds_count);
479 				if (panic_on_dyld_issue) {
480 					panic("%s(): out of range bind ordinal %u (max %u)",
481 					    __func__, bindOrdinal, hdr->mwli_binds_count);
482 				}
483 				return KERN_FAILURE;
484 			}
485 			uint32_t addend = (value >> 20) & 0x3F;
486 			*chain = bindsArray[bindOrdinal] + addend;
487 		} else {
488 			// is rebase
489 			uint32_t target = value & 0x03FFFFFF;
490 			if (target > segInfo->max_valid_pointer) {
491 				// handle non-pointers in chain
492 				uint32_t bias = (0x04000000 + segInfo->max_valid_pointer) / 2;
493 				*chain = target - bias;
494 			} else {
495 				*chain = target + (uint32_t)hdr->mwli_slide;
496 			}
497 		}
498 		bool crossing_page = false;
499 		chain_valid = _delta_ptr_within_page32_nospec(&chain, delta, &crossing_page, (uintptr_t)userVA);
500 
501 		if (crossing_page) {
502 			return KERN_FAILURE;
503 		}
504 	} while (chain_valid);
505 	return KERN_SUCCESS;
506 }
507 
508 
509 /*
510  * Apply fixups to a page used by a 32 bit process.
511  */
512 static kern_return_t
fixupPage32(uint64_t userVA,vm_offset_t contents,void * link_info,uint32_t link_info_size,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex)513 fixupPage32(
514 	uint64_t                              userVA,
515 	vm_offset_t                           contents,
516 	void                                  *link_info,
517 	uint32_t                              link_info_size,
518 	struct dyld_chained_starts_in_segment *segInfo,
519 	uint32_t                              pageIndex)
520 {
521 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr  *)link_info;
522 	uint32_t                              *bindsArray = (uint32_t *)((uintptr_t)hdr + hdr->mwli_binds_offset);
523 	uint16_t                              startOffset = segInfo->page_start[pageIndex];
524 	/*
525 	 * done if no fixups
526 	 */
527 	if (startOffset == DYLD_CHAINED_PTR_START_NONE) {
528 		return KERN_SUCCESS;
529 	}
530 
531 	if (startOffset & DYLD_CHAINED_PTR_START_MULTI) {
532 		// some fixups in the page are too far apart, so page has multiple starts
533 		uint32_t overflowIndex = startOffset & ~DYLD_CHAINED_PTR_START_MULTI;
534 		bool chainEnd = false;
535 		while (!chainEnd) {
536 			/*
537 			 * range check against link_info, note +1 to include data we'll dereference
538 			 */
539 			if ((uintptr_t)&segInfo->page_start[overflowIndex + 1] > (uintptr_t)link_info + link_info_size) {
540 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
541 				printf("%s(): out of range segInfo->page_start[overflowIndex]\n", __func__);
542 				if (panic_on_dyld_issue) {
543 					panic("%s(): out of range segInfo->page_start[overflowIndex]", __func__);
544 				}
545 				return KERN_FAILURE;
546 			}
547 			chainEnd    = (segInfo->page_start[overflowIndex] & DYLD_CHAINED_PTR_START_LAST);
548 			startOffset = (segInfo->page_start[overflowIndex] & ~DYLD_CHAINED_PTR_START_LAST);
549 			uint32_t *chain = (uint32_t *)(contents + startOffset);
550 			fixupPageChain32(userVA, chain, contents, link_info, segInfo, bindsArray);
551 			++overflowIndex;
552 		}
553 	} else {
554 		uint32_t *chain = (uint32_t *)(contents + startOffset);
555 		fixupPageChain32(userVA, chain, contents, link_info, segInfo, bindsArray);
556 	}
557 	return KERN_SUCCESS;
558 }
559 
560 #if defined(HAS_APPLE_PAC)
561 /*
562  * Sign a pointer needed for fixups.
563  */
564 static kern_return_t
signPointer(uint64_t unsignedAddr,void * loc,bool addrDiv,uint16_t diversity,ptrauth_key key,dyld_pager_t pager,uint64_t * signedAddr)565 signPointer(
566 	uint64_t         unsignedAddr,
567 	void             *loc,
568 	bool             addrDiv,
569 	uint16_t         diversity,
570 	ptrauth_key      key,
571 	dyld_pager_t     pager,
572 	uint64_t         *signedAddr)
573 {
574 	// don't sign NULL
575 	if (unsignedAddr == 0) {
576 		*signedAddr = 0;
577 		return KERN_SUCCESS;
578 	}
579 
580 	uint64_t extendedDiscriminator = diversity;
581 	if (addrDiv) {
582 		extendedDiscriminator = __builtin_ptrauth_blend_discriminator(loc, extendedDiscriminator);
583 	}
584 
585 	switch (key) {
586 	case ptrauth_key_asia:
587 	case ptrauth_key_asda:
588 		if (pager->dyld_a_key == 0 || arm_user_jop_disabled()) {
589 			*signedAddr = unsignedAddr;
590 		} else {
591 			*signedAddr = (uintptr_t)pmap_sign_user_ptr((void *)unsignedAddr, key, extendedDiscriminator, pager->dyld_a_key);
592 		}
593 		break;
594 
595 	default:
596 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_INVALID_AUTH_KEY), (uintptr_t)unsignedAddr);
597 		printf("%s(): Invalid ptr auth key %d\n", __func__, key);
598 		if (panic_on_dyld_issue) {
599 			panic("%s(): Invalid ptr auth key %d", __func__, key);
600 		}
601 		return KERN_FAILURE;
602 	}
603 	return KERN_SUCCESS;
604 }
605 
606 /*
607  * Apply fixups to a page used by a 64 bit process using pointer authentication.
608  */
609 static kern_return_t
fixupPageAuth64(uint64_t userVA,vm_offset_t contents,dyld_pager_t pager,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex,bool offsetBased)610 fixupPageAuth64(
611 	uint64_t                              userVA,
612 	vm_offset_t                           contents,
613 	dyld_pager_t                          pager,
614 	struct dyld_chained_starts_in_segment *segInfo,
615 	uint32_t                              pageIndex,
616 	bool                                  offsetBased)
617 {
618 	void                 *link_info = pager->dyld_link_info;
619 	uint32_t             link_info_size = pager->dyld_link_info_size;
620 	struct mwl_info_hdr  *hdr = (struct mwl_info_hdr *)link_info;
621 	uint64_t             *bindsArray = (uint64_t*)((uintptr_t)link_info + hdr->mwli_binds_offset);
622 	vm_offset_t          end_contents = contents + PAGE_SIZE;
623 	bool                 valid_chain = false;
624 	uint64_t             step_multiplier = 8; // always 8-bytes stride for arm64e pages
625 
626 	/*
627 	 * range check against link_info, note +1 to include data we'll dereference
628 	 */
629 	if ((uintptr_t)&segInfo->page_start[pageIndex + 1] > (uintptr_t)link_info + link_info_size) {
630 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
631 		printf("%s(): out of range segInfo->page_start[pageIndex]\n", __func__);
632 		if (panic_on_dyld_issue) {
633 			panic("%s(): out of range segInfo->page_start[pageIndex]", __func__);
634 		}
635 		return KERN_FAILURE;
636 	}
637 	uint16_t firstStartOffset = segInfo->page_start[pageIndex];
638 
639 	/*
640 	 * All done if no fixups on the page
641 	 */
642 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
643 		return KERN_SUCCESS;
644 	}
645 
646 	/*
647 	 * Walk the chain of offsets to fix up
648 	 */
649 	uint64_t *chain = (uint64_t *)(contents + firstStartOffset);
650 	uint64_t targetAdjust = (offsetBased ? hdr->mwli_image_address : hdr->mwli_slide);
651 	uint64_t delta = 0;
652 	do {
653 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
654 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
655 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
656 			    (long long)chain, (long long)contents, (long long)end_contents);
657 			if (panic_on_dyld_issue) {
658 				panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
659 				    (long long)chain, (long long)contents, (long long)end_contents);
660 			}
661 			return KERN_FAILURE;
662 		}
663 		uint64_t value = *chain;
664 		/* delta that can be used speculatively */
665 		delta = (value >> 51) & 0x7FF;
666 		delta *= step_multiplier;
667 		bool isAuth = (value & 0x8000000000000000ULL);
668 		bool isBind = (value & 0x4000000000000000ULL);
669 		if (isAuth) {
670 			ptrauth_key key = (ptrauth_key)((value >> 49) & 0x3);
671 			bool        addrDiv = ((value & (1ULL << 48)) != 0);
672 			uint16_t    diversity = (uint16_t)((value >> 32) & 0xFFFF);
673 			uintptr_t   uVA = userVA + ((uintptr_t)chain - contents);
674 			if (isBind) {
675 				uint32_t bindOrdinal = value & 0x00FFFFFF;
676 				if (bindOrdinal >= hdr->mwli_binds_count) {
677 					ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
678 					printf("%s(): out of range bind ordinal %u (max %u)\n",
679 					    __func__, bindOrdinal, hdr->mwli_binds_count);
680 					if (panic_on_dyld_issue) {
681 						panic("%s(): out of range bind ordinal %u (max %u)",
682 						    __func__, bindOrdinal, hdr->mwli_binds_count);
683 					}
684 					return KERN_FAILURE;
685 				}
686 				if (signPointer(bindsArray[bindOrdinal], (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
687 					return KERN_FAILURE;
688 				}
689 			} else {
690 				/* note: in auth rebases only have 32-bits, so target is always offset - never vmaddr */
691 				uint64_t target = (value & 0xFFFFFFFF) + hdr->mwli_image_address;
692 				if (signPointer(target, (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
693 					return KERN_FAILURE;
694 				}
695 			}
696 		} else {
697 			if (isBind) {
698 				uint32_t bindOrdinal = value & 0x00FFFFFF;
699 				if (bindOrdinal >= hdr->mwli_binds_count) {
700 					ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BIND_ORDINAL), (uintptr_t)userVA);
701 					printf("%s(): out of range bind ordinal %u (max %u)\n",
702 					    __func__, bindOrdinal, hdr->mwli_binds_count);
703 					if (panic_on_dyld_issue) {
704 						panic("%s(): out of range bind ordinal %u (max %u)",
705 						    __func__, bindOrdinal, hdr->mwli_binds_count);
706 					}
707 					return KERN_FAILURE;
708 				} else {
709 					uint64_t addend19 = (value >> 32) & 0x0007FFFF;
710 					if (addend19 & 0x40000) {
711 						addend19 |=  0xFFFFFFFFFFFC0000ULL;
712 					}
713 					*chain = bindsArray[bindOrdinal] + addend19;
714 				}
715 			} else {
716 				uint64_t target = (value & 0x7FFFFFFFFFFULL);
717 				uint64_t high8  = (value << 13) & 0xFF00000000000000ULL;
718 				*chain = target + targetAdjust + high8;
719 			}
720 		}
721 		bool crossing_page = false;;
722 		valid_chain = _delta_ptr_within_page_nospec(&chain, delta, &crossing_page, (uintptr_t)userVA);
723 
724 		if (crossing_page) {
725 			return KERN_FAILURE;
726 		}
727 	} while (valid_chain);
728 	return KERN_SUCCESS;
729 }
730 
731 /*
732  * Apply fixups to a page used by a 64 bit process using pointer authentication.
733  */
734 static kern_return_t
fixupCachePageAuth64(uint64_t userVA,vm_offset_t contents,dyld_pager_t pager,struct dyld_chained_starts_in_segment * segInfo,uint32_t pageIndex)735 fixupCachePageAuth64(
736 	uint64_t                              userVA,
737 	vm_offset_t                           contents,
738 	dyld_pager_t                          pager,
739 	struct dyld_chained_starts_in_segment *segInfo,
740 	uint32_t                              pageIndex)
741 {
742 	void                 *link_info = pager->dyld_link_info;
743 	uint32_t             link_info_size = pager->dyld_link_info_size;
744 	struct mwl_info_hdr  *hdr = (struct mwl_info_hdr *)link_info;
745 	vm_offset_t          end_contents = contents + PAGE_SIZE;
746 	bool                 valid_chain = false;
747 	uint64_t             step_multiplier = 8; // always 8-bytes stride for arm64e
748 
749 	/*
750 	 * range check against link_info, note +1 to include data we'll dereference
751 	 */
752 	if ((uintptr_t)&segInfo->page_start[pageIndex + 1] > (uintptr_t)link_info + link_info_size) {
753 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_PAGE_START_OUT_OF_RANGE), (uintptr_t)userVA);
754 		printf("%s(): out of range segInfo->page_start[pageIndex]\n", __func__);
755 		if (panic_on_dyld_issue) {
756 			panic("%s(): out of range segInfo->page_start[pageIndex]", __func__);
757 		}
758 		return KERN_FAILURE;
759 	}
760 	uint16_t firstStartOffset = segInfo->page_start[pageIndex];
761 
762 	/*
763 	 * All done if no fixups on the page
764 	 */
765 	if (firstStartOffset == DYLD_CHAINED_PTR_START_NONE) {
766 		return KERN_SUCCESS;
767 	}
768 
769 	/*
770 	 * Walk the chain of offsets to fix up
771 	 */
772 	uint64_t *chain = (uint64_t *)(contents + firstStartOffset);
773 	uint64_t delta = 0;
774 	do {
775 		if ((uintptr_t)chain < contents || (uintptr_t)chain + sizeof(*chain) > end_contents) {
776 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_CHAIN_OUT_OF_RANGE), (uintptr_t)userVA);
777 			printf("%s(): chain 0x%llx out of range 0x%llx..0x%llx\n", __func__,
778 			    (long long)chain, (long long)contents, (long long)end_contents);
779 			if (panic_on_dyld_issue) {
780 				panic("%s(): chain 0x%llx out of range 0x%llx..0x%llx", __func__,
781 				    (long long)chain, (long long)contents, (long long)end_contents);
782 			}
783 			return KERN_FAILURE;
784 		}
785 		uint64_t value = *chain;
786 		/* delta that can be used speculatively */
787 		delta = (value >> 52) & 0x7FF;
788 		delta *= step_multiplier;
789 		bool isAuth = (value & 0x8000000000000000ULL);
790 		if (isAuth) {
791 			bool        addrDiv = ((value & (1ULL << 50)) != 0);
792 			bool        keyIsData = ((value & (1ULL << 51)) != 0);
793 			// the key is always A, and the bit tells us if its IA or ID
794 			ptrauth_key key = keyIsData ? ptrauth_key_asda : ptrauth_key_asia;
795 			uint16_t    diversity = (uint16_t)((value >> 34) & 0xFFFF);
796 			uintptr_t   uVA = userVA + ((uintptr_t)chain - contents);
797 			// target is always a 34-bit runtime offset, never a vmaddr
798 			uint64_t target = (value & 0x3FFFFFFFFULL) + hdr->mwli_image_address;
799 			if (signPointer(target, (void *)uVA, addrDiv, diversity, key, pager, chain) != KERN_SUCCESS) {
800 				return KERN_FAILURE;
801 			}
802 		} else {
803 			// target is always a 34-bit runtime offset, never a vmaddr
804 			uint64_t target = (value & 0x3FFFFFFFFULL) + hdr->mwli_image_address;
805 			uint64_t high8  = (value << 22) & 0xFF00000000000000ULL;
806 			*chain = target + high8;
807 		}
808 		bool crossing_page = false;
809 		valid_chain = _delta_ptr_within_page_nospec(&chain, delta, &crossing_page, (uintptr_t)userVA);
810 		if (crossing_page) {
811 			return KERN_FAILURE;
812 		}
813 	} while (valid_chain);
814 	return KERN_SUCCESS;
815 }
816 #endif /* defined(HAS_APPLE_PAC) */
817 
818 
819 /*
820  * Handle dyld fixups for a page.
821  */
822 static kern_return_t
fixup_page(vm_offset_t contents,uint64_t userVA,dyld_pager_t pager)823 fixup_page(
824 	vm_offset_t         contents,
825 	uint64_t            userVA,
826 	dyld_pager_t        pager)
827 {
828 	void                                  *link_info = pager->dyld_link_info;
829 	uint32_t                              link_info_size = pager->dyld_link_info_size;
830 	struct mwl_info_hdr                   *hdr = (struct mwl_info_hdr *)link_info;
831 	struct dyld_chained_starts_in_segment *segInfo = NULL;
832 	uint32_t                              pageIndex = 0;
833 	uint32_t                              segIndex;
834 	struct dyld_chained_starts_in_image   *startsInfo;
835 	struct dyld_chained_starts_in_segment *seg;
836 	uint64_t                              segStartAddress;
837 	uint64_t                              segEndAddress;
838 
839 	/*
840 	 * Note this is a linear search done for every page we have to fix up.
841 	 * However, it should be quick as there should only be 2 or 4 segments:
842 	 * - data
843 	 * - data const
844 	 * - data auth (for arm64e)
845 	 * - data const auth (for arm64e)
846 	 */
847 	startsInfo = (struct dyld_chained_starts_in_image *)((uintptr_t)hdr + hdr->mwli_chains_offset);
848 	for (segIndex = 0; segIndex < startsInfo->seg_count; ++segIndex) {
849 		seg = (struct dyld_chained_starts_in_segment *)
850 		    ((uintptr_t)startsInfo + startsInfo->seg_info_offset[segIndex]);
851 
852 		/*
853 		 * ensure we don't go out of bounds of the link_info
854 		 */
855 		if ((uintptr_t)seg + sizeof(*seg) > (uintptr_t)link_info + link_info_size) {
856 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_INFO_OUT_OF_RANGE), (uintptr_t)userVA);
857 			printf("%s(): seg_info out of bounds\n", __func__);
858 			if (panic_on_dyld_issue) {
859 				panic("%s(): seg_info out of bounds", __func__);
860 			}
861 			return KERN_FAILURE;
862 		}
863 
864 		segStartAddress = hdr->mwli_image_address + seg->segment_offset;
865 		segEndAddress = segStartAddress + seg->page_count * seg->page_size;
866 		if (segStartAddress <= userVA && userVA < segEndAddress) {
867 			segInfo = seg;
868 			pageIndex = (uint32_t)(userVA - segStartAddress) / PAGE_SIZE;
869 
870 			/* ensure seg->size fits in link_info_size */
871 			if ((uintptr_t)seg + seg->size > (uintptr_t)link_info + link_info_size) {
872 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_SIZE_OUT_OF_RANGE), (uintptr_t)userVA);
873 				printf("%s(): seg->size out of bounds\n", __func__);
874 				if (panic_on_dyld_issue) {
875 					panic("%s(): seg->size out of bounds", __func__);
876 				}
877 				return KERN_FAILURE;
878 			}
879 			if (seg->size < sizeof(struct dyld_chained_starts_in_segment)) {
880 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_SIZE_OUT_OF_RANGE), (uintptr_t)userVA);
881 				printf("%s(): seg->size too small\n", __func__);
882 				if (panic_on_dyld_issue) {
883 					panic("%s(): seg->size too small", __func__);
884 				}
885 				return KERN_FAILURE;
886 			}
887 			/* ensure page_count and pageIndex are valid too */
888 			if ((uintptr_t)&seg->page_start[seg->page_count] > (uintptr_t)link_info + link_info_size) {
889 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_PAGE_CNT_OUT_OF_RANGE), (uintptr_t)userVA);
890 				printf("%s(): seg->page_count out of bounds\n", __func__);
891 				if (panic_on_dyld_issue) {
892 					panic("%s(): seg->page_count out of bounds", __func__);
893 				}
894 				return KERN_FAILURE;
895 			}
896 			if (pageIndex >= seg->page_count) {
897 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SEG_PAGE_CNT_OUT_OF_RANGE), (uintptr_t)userVA);
898 				printf("%s(): seg->page_count too small\n", __func__);
899 				if (panic_on_dyld_issue) {
900 					panic("%s(): seg->page_count too small", __func__);
901 				}
902 				return KERN_FAILURE;
903 			}
904 
905 			break;
906 		}
907 	}
908 
909 	/*
910 	 * Question for Nick.. or can we make this OK and just return KERN_SUCCESS, nothing to do?
911 	 */
912 	if (segInfo == NULL) {
913 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_SEG_FOR_VA), (uintptr_t)userVA);
914 		printf("%s(): No segment for user VA 0x%llx\n", __func__, (long long)userVA);
915 		if (panic_on_dyld_issue) {
916 			panic("%s(): No segment for user VA 0x%llx", __func__, (long long)userVA);
917 		}
918 		return KERN_FAILURE;
919 	}
920 
921 	/*
922 	 * Route to the appropriate fixup routine
923 	 */
924 	switch (hdr->mwli_pointer_format) {
925 #if defined(HAS_APPLE_PAC)
926 	case DYLD_CHAINED_PTR_ARM64E:
927 		fixupPageAuth64(userVA, contents, pager, segInfo, pageIndex, false);
928 		break;
929 	case DYLD_CHAINED_PTR_ARM64E_USERLAND:
930 	case DYLD_CHAINED_PTR_ARM64E_USERLAND24:
931 		fixupPageAuth64(userVA, contents, pager, segInfo, pageIndex, true);
932 		break;
933 	case DYLD_CHAINED_PTR_ARM64E_SHARED_CACHE:
934 		fixupCachePageAuth64(userVA, contents, pager, segInfo, pageIndex);
935 		break;
936 #endif /* defined(HAS_APPLE_PAC) */
937 	case DYLD_CHAINED_PTR_64:
938 		fixupPage64(userVA, contents, link_info, segInfo, pageIndex, false);
939 		break;
940 	case DYLD_CHAINED_PTR_64_OFFSET:
941 		fixupPage64(userVA, contents, link_info, segInfo, pageIndex, true);
942 		break;
943 	case DYLD_CHAINED_PTR_32:
944 		fixupPage32(userVA, contents, link_info, link_info_size, segInfo, pageIndex);
945 		break;
946 	default:
947 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_BAD_POINTER_FMT), (uintptr_t)userVA);
948 		printf("%s(): unknown pointer_format %d\n", __func__, hdr->mwli_pointer_format);
949 		if (panic_on_dyld_issue) {
950 			panic("%s(): unknown pointer_format %d", __func__, hdr->mwli_pointer_format);
951 		}
952 		return KERN_FAILURE;
953 	}
954 	return KERN_SUCCESS;
955 }
956 
957 /*
958  * dyld_pager_data_request()
959  *
960  * Handles page-in requests from VM.
961  */
962 static kern_return_t
dyld_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t length,__unused vm_prot_t protection_required,memory_object_fault_info_t mo_fault_info)963 dyld_pager_data_request(
964 	memory_object_t              mem_obj,
965 	memory_object_offset_t       offset,
966 	memory_object_cluster_size_t length,
967 	__unused vm_prot_t           protection_required,
968 	memory_object_fault_info_t   mo_fault_info)
969 {
970 	dyld_pager_t            pager;
971 	memory_object_control_t mo_control;
972 	upl_t                   upl = NULL;
973 	int                     upl_flags;
974 	upl_size_t              upl_size;
975 	upl_page_info_t         *upl_pl = NULL;
976 	unsigned int            pl_count;
977 	vm_object_t             src_top_object = VM_OBJECT_NULL;
978 	vm_object_t             src_page_object = VM_OBJECT_NULL;
979 	vm_object_t             dst_object;
980 	kern_return_t           kr;
981 	kern_return_t           retval = KERN_SUCCESS;
982 	vm_fault_return_t       vmfr;
983 	vm_offset_t             src_vaddr;
984 	vm_offset_t             dst_vaddr;
985 	vm_offset_t             cur_offset;
986 	kern_return_t           error_code;
987 	vm_prot_t               prot;
988 	vm_page_t               src_page, top_page;
989 	int                     interruptible;
990 	struct vm_object_fault_info fault_info = *((struct vm_object_fault_info *)(uintptr_t)mo_fault_info);
991 	struct mwl_info_hdr     *hdr;
992 	uint32_t                r;
993 	uint64_t                userVA;
994 
995 	fault_info.stealth = TRUE;
996 	fault_info.io_sync = FALSE;
997 	fault_info.mark_zf_absent = FALSE;
998 	fault_info.batch_pmap_op = FALSE;
999 	interruptible = fault_info.interruptible;
1000 
1001 	pager = dyld_pager_lookup(mem_obj);
1002 	assert(pager->dyld_is_ready);
1003 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 1); /* pager is alive */
1004 	assert(pager->dyld_is_mapped); /* pager is mapped */
1005 	hdr = (struct mwl_info_hdr *)pager->dyld_link_info;
1006 
1007 	/*
1008 	 * Gather in a UPL all the VM pages requested by VM.
1009 	 */
1010 	mo_control = pager->dyld_header.mo_control;
1011 
1012 	upl_size = length;
1013 	upl_flags =
1014 	    UPL_RET_ONLY_ABSENT |
1015 	    UPL_SET_LITE |
1016 	    UPL_NO_SYNC |
1017 	    UPL_CLEAN_IN_PLACE |        /* triggers UPL_CLEAR_DIRTY */
1018 	    UPL_SET_INTERNAL;
1019 	pl_count = 0;
1020 	kr = memory_object_upl_request(mo_control,
1021 	    offset, upl_size,
1022 	    &upl, NULL, NULL, upl_flags, VM_KERN_MEMORY_SECURITY);
1023 	if (kr != KERN_SUCCESS) {
1024 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_NO_UPL), kr /* arg */);
1025 		if (panic_on_dyld_issue) {
1026 			panic("%s(): upl_request(%p, 0x%llx, 0x%llx) ret %d", __func__,
1027 			    mo_control, offset, (uint64_t)upl_size, kr);
1028 		}
1029 		retval = kr;
1030 		goto done;
1031 	}
1032 	dst_object = memory_object_control_to_vm_object(mo_control);
1033 	assert(dst_object != VM_OBJECT_NULL);
1034 
1035 	/*
1036 	 * We'll map the original data in the kernel address space from the
1037 	 * backing VM object, itself backed by the executable/library file via
1038 	 * the vnode pager.
1039 	 */
1040 	src_top_object = pager->dyld_backing_object;
1041 	assert(src_top_object != VM_OBJECT_NULL);
1042 	vm_object_reference(src_top_object); /* keep the source object alive */
1043 
1044 	/*
1045 	 * Fill in the contents of the pages requested by VM.
1046 	 */
1047 	upl_pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
1048 	pl_count = length / PAGE_SIZE;
1049 	for (cur_offset = 0;
1050 	    retval == KERN_SUCCESS && cur_offset < length;
1051 	    cur_offset += PAGE_SIZE) {
1052 		ppnum_t dst_pnum;
1053 
1054 		if (!upl_page_present(upl_pl, (int)(cur_offset / PAGE_SIZE))) {
1055 			/* this page is not in the UPL: skip it */
1056 			continue;
1057 		}
1058 
1059 		/*
1060 		 * Map the source page in the kernel's virtual address space.
1061 		 * We already hold a reference on the src_top_object.
1062 		 */
1063 retry_src_fault:
1064 		vm_object_lock(src_top_object);
1065 		vm_object_paging_begin(src_top_object);
1066 		error_code = 0;
1067 		prot = VM_PROT_READ;
1068 		src_page = VM_PAGE_NULL;
1069 		vmfr = vm_fault_page(src_top_object,
1070 		    offset + cur_offset,
1071 		    VM_PROT_READ,
1072 		    FALSE,
1073 		    FALSE,                /* src_page not looked up */
1074 		    &prot,
1075 		    &src_page,
1076 		    &top_page,
1077 		    NULL,
1078 		    &error_code,
1079 		    FALSE,
1080 		    &fault_info);
1081 		switch (vmfr) {
1082 		case VM_FAULT_SUCCESS:
1083 			break;
1084 		case VM_FAULT_RETRY:
1085 			goto retry_src_fault;
1086 		case VM_FAULT_MEMORY_SHORTAGE:
1087 			if (vm_page_wait(interruptible)) {
1088 				goto retry_src_fault;
1089 			}
1090 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_MEMORY_SHORTAGE), 0 /* arg */);
1091 			OS_FALLTHROUGH;
1092 		case VM_FAULT_INTERRUPTED:
1093 			retval = MACH_SEND_INTERRUPTED;
1094 			goto done;
1095 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
1096 			/* success but no VM page: fail */
1097 			vm_object_paging_end(src_top_object);
1098 			vm_object_unlock(src_top_object);
1099 			OS_FALLTHROUGH;
1100 		case VM_FAULT_MEMORY_ERROR:
1101 			/* the page is not there ! */
1102 			if (error_code) {
1103 				retval = error_code;
1104 			} else {
1105 				retval = KERN_MEMORY_ERROR;
1106 			}
1107 			goto done;
1108 		case VM_FAULT_BUSY:
1109 			retval = KERN_ALREADY_WAITING;
1110 			goto done;
1111 		}
1112 		assert(src_page != VM_PAGE_NULL);
1113 		assert(src_page->vmp_busy);
1114 
1115 		if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
1116 			vm_page_lockspin_queues();
1117 			if (src_page->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
1118 				vm_page_speculate(src_page, FALSE);
1119 			}
1120 			vm_page_unlock_queues();
1121 		}
1122 
1123 		/*
1124 		 * Establish pointers to the source and destination physical pages.
1125 		 */
1126 		dst_pnum = (ppnum_t)upl_phys_page(upl_pl, (int)(cur_offset / PAGE_SIZE));
1127 		assert(dst_pnum != 0);
1128 
1129 		src_vaddr = (vm_map_offset_t)phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(src_page) << PAGE_SHIFT);
1130 		dst_vaddr = (vm_map_offset_t)phystokv((pmap_paddr_t)dst_pnum << PAGE_SHIFT);
1131 		src_page_object = VM_PAGE_OBJECT(src_page);
1132 
1133 		/*
1134 		 * Validate the original page...
1135 		 */
1136 		if (src_page_object->code_signed) {
1137 			vm_page_validate_cs_mapped(src_page, PAGE_SIZE, 0, (const void *)src_vaddr);
1138 		}
1139 
1140 		/*
1141 		 * ... and transfer the results to the destination page.
1142 		 */
1143 		UPL_SET_CS_VALIDATED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_validated);
1144 		UPL_SET_CS_TAINTED(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_tainted);
1145 		UPL_SET_CS_NX(upl_pl, cur_offset / PAGE_SIZE, src_page->vmp_cs_nx);
1146 
1147 		/*
1148 		 * The page provider might access a mapped file, so let's
1149 		 * release the object lock for the source page to avoid a
1150 		 * potential deadlock.
1151 		 * The source page is kept busy and we have a
1152 		 * "paging_in_progress" reference on its object, so it's safe
1153 		 * to unlock the object here.
1154 		 */
1155 		assert(src_page->vmp_busy);
1156 		assert(src_page_object->paging_in_progress > 0);
1157 		vm_object_unlock(src_page_object);
1158 
1159 		/*
1160 		 * Process the original contents of the source page
1161 		 * into the destination page.
1162 		 */
1163 		bcopy((const char *)src_vaddr, (char *)dst_vaddr, PAGE_SIZE);
1164 
1165 		/*
1166 		 * Figure out what the original user virtual address was, based on the offset.
1167 		 */
1168 		userVA = 0;
1169 		for (r = 0; r < pager->dyld_num_range; ++r) {
1170 			vm_offset_t o = offset + cur_offset;
1171 			if (pager->dyld_file_offset[r] <= o &&
1172 			    o < pager->dyld_file_offset[r] + pager->dyld_size[r]) {
1173 				userVA = pager->dyld_address[r] + (o - pager->dyld_file_offset[r]);
1174 				break;
1175 			}
1176 		}
1177 
1178 		/*
1179 		 * If we have a valid range fixup the page.
1180 		 */
1181 		if (r == pager->dyld_num_range) {
1182 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_RANGE_NOT_FOUND), (uintptr_t)userVA);
1183 			printf("%s(): Range not found for offset 0x%llx\n", __func__, (long long)cur_offset);
1184 			if (panic_on_dyld_issue) {
1185 				panic("%s(): Range not found for offset 0x%llx", __func__, (long long)cur_offset);
1186 			}
1187 			retval = KERN_FAILURE;
1188 		} else if (fixup_page(dst_vaddr, userVA, pager) != KERN_SUCCESS) {
1189 			/* KDBG / printf was done under fixup_page() */
1190 			retval = KERN_FAILURE;
1191 		}
1192 		if (retval != KERN_SUCCESS) {
1193 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_DYLD_PAGER, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_DYLD_PAGER_SLIDE_ERROR), userVA);
1194 			if (panic_on_dyld_issue) {
1195 				panic("%s(): dyld pager slide error %d at 0x%llx", __func__, retval, (uint64_t)userVA);
1196 			}
1197 		}
1198 
1199 		assert(VM_PAGE_OBJECT(src_page) == src_page_object);
1200 		assert(src_page->vmp_busy);
1201 		assert(src_page_object->paging_in_progress > 0);
1202 		vm_object_lock(src_page_object);
1203 
1204 		/*
1205 		 * Cleanup the result of vm_fault_page() of the source page.
1206 		 */
1207 		vm_page_wakeup_done(src_page_object, src_page);
1208 		src_page = VM_PAGE_NULL;
1209 		vm_object_paging_end(src_page_object);
1210 		vm_object_unlock(src_page_object);
1211 
1212 		if (top_page != VM_PAGE_NULL) {
1213 			assert(VM_PAGE_OBJECT(top_page) == src_top_object);
1214 			vm_object_lock(src_top_object);
1215 			VM_PAGE_FREE(top_page);
1216 			vm_object_paging_end(src_top_object);
1217 			vm_object_unlock(src_top_object);
1218 		}
1219 	}
1220 
1221 done:
1222 	if (upl != NULL) {
1223 		/* clean up the UPL */
1224 
1225 		/*
1226 		 * The pages are currently dirty because we've just been
1227 		 * writing on them, but as far as we're concerned, they're
1228 		 * clean since they contain their "original" contents as
1229 		 * provided by us, the pager.
1230 		 * Tell the UPL to mark them "clean".
1231 		 */
1232 		upl_clear_dirty(upl, TRUE);
1233 
1234 		/* abort or commit the UPL */
1235 		if (retval != KERN_SUCCESS) {
1236 			upl_abort(upl, 0);
1237 		} else {
1238 			boolean_t empty;
1239 			assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
1240 			    "upl %p offset 0x%llx size 0x%x\n",
1241 			    upl, upl->u_offset, upl->u_size);
1242 			upl_commit_range(upl, 0, upl->u_size,
1243 			    UPL_COMMIT_CS_VALIDATED | UPL_COMMIT_WRITTEN_BY_KERNEL,
1244 			    upl_pl, pl_count, &empty);
1245 		}
1246 
1247 		/* and deallocate the UPL */
1248 		upl_deallocate(upl);
1249 		upl = NULL;
1250 	}
1251 	if (src_top_object != VM_OBJECT_NULL) {
1252 		vm_object_deallocate(src_top_object);
1253 	}
1254 	return retval;
1255 }
1256 
1257 /*
1258  * dyld_pager_reference()
1259  *
1260  * Get a reference on this memory object.
1261  * For external usage only.  Assumes that the initial reference count is not 0,
1262  * i.e one should not "revive" a dead pager this way.
1263  */
1264 static void
dyld_pager_reference(memory_object_t mem_obj)1265 dyld_pager_reference(
1266 	memory_object_t mem_obj)
1267 {
1268 	dyld_pager_t    pager;
1269 
1270 	pager = dyld_pager_lookup(mem_obj);
1271 
1272 	lck_mtx_lock(&dyld_pager_lock);
1273 	os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL);
1274 	lck_mtx_unlock(&dyld_pager_lock);
1275 }
1276 
1277 
1278 
1279 /*
1280  * dyld_pager_terminate_internal:
1281  *
1282  * Trigger the asynchronous termination of the memory object associated
1283  * with this pager.
1284  * When the memory object is terminated, there will be one more call
1285  * to memory_object_deallocate() (i.e. dyld_pager_deallocate())
1286  * to finish the clean up.
1287  *
1288  * "dyld_pager_lock" should not be held by the caller.
1289  */
1290 static void
dyld_pager_terminate_internal(dyld_pager_t pager)1291 dyld_pager_terminate_internal(
1292 	dyld_pager_t pager)
1293 {
1294 	assert(pager->dyld_is_ready);
1295 	assert(!pager->dyld_is_mapped);
1296 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) == 1);
1297 
1298 	if (pager->dyld_backing_object != VM_OBJECT_NULL) {
1299 		vm_object_deallocate(pager->dyld_backing_object);
1300 		pager->dyld_backing_object = VM_OBJECT_NULL;
1301 	}
1302 	/* trigger the destruction of the memory object */
1303 	memory_object_destroy(pager->dyld_header.mo_control, VM_OBJECT_DESTROY_PAGER);
1304 }
1305 
1306 /*
1307  * dyld_pager_deallocate_internal()
1308  *
1309  * Release a reference on this pager and free it when the last reference goes away.
1310  * Can be called with dyld_pager_lock held or not, but always returns
1311  * with it unlocked.
1312  */
1313 static void
dyld_pager_deallocate_internal(dyld_pager_t pager,bool locked)1314 dyld_pager_deallocate_internal(
1315 	dyld_pager_t   pager,
1316 	bool           locked)
1317 {
1318 	os_ref_count_t ref_count;
1319 
1320 	if (!locked) {
1321 		lck_mtx_lock(&dyld_pager_lock);
1322 	}
1323 
1324 	/* drop a reference on this pager */
1325 	ref_count = os_ref_release_locked_raw(&pager->dyld_ref_count, NULL);
1326 
1327 	if (ref_count == 1) {
1328 		/*
1329 		 * Only this reference is left, which means that
1330 		 * no one is really holding on to this pager anymore.
1331 		 * Terminate it.
1332 		 */
1333 		dyld_pager_dequeue(pager);
1334 		/* the pager is all ours: no need for the lock now */
1335 		lck_mtx_unlock(&dyld_pager_lock);
1336 		dyld_pager_terminate_internal(pager);
1337 	} else if (ref_count == 0) {
1338 		/*
1339 		 * Dropped all references;  the memory object has
1340 		 * been terminated.  Do some final cleanup and release the
1341 		 * pager structure.
1342 		 */
1343 		lck_mtx_unlock(&dyld_pager_lock);
1344 
1345 		kfree_data(pager->dyld_link_info, pager->dyld_link_info_size);
1346 		pager->dyld_link_info = NULL;
1347 
1348 		if (pager->dyld_header.mo_control != MEMORY_OBJECT_CONTROL_NULL) {
1349 			memory_object_control_deallocate(pager->dyld_header.mo_control);
1350 			pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1351 		}
1352 		kfree_type(struct dyld_pager, pager);
1353 		pager = NULL;
1354 	} else {
1355 		/* there are still plenty of references:  keep going... */
1356 		lck_mtx_unlock(&dyld_pager_lock);
1357 	}
1358 
1359 	/* caution: lock is not held on return... */
1360 }
1361 
1362 /*
1363  * dyld_pager_deallocate()
1364  *
1365  * Release a reference on this pager and free it when the last
1366  * reference goes away.
1367  */
1368 static void
dyld_pager_deallocate(memory_object_t mem_obj)1369 dyld_pager_deallocate(
1370 	memory_object_t mem_obj)
1371 {
1372 	dyld_pager_t    pager;
1373 
1374 	pager = dyld_pager_lookup(mem_obj);
1375 	dyld_pager_deallocate_internal(pager, FALSE);
1376 }
1377 
1378 /*
1379  *
1380  */
1381 static kern_return_t
dyld_pager_terminate(__unused memory_object_t mem_obj)1382 dyld_pager_terminate(
1383 #if !DEBUG
1384 	__unused
1385 #endif
1386 	memory_object_t mem_obj)
1387 {
1388 	return KERN_SUCCESS;
1389 }
1390 
1391 /*
1392  * dyld_pager_map()
1393  *
1394  * This allows VM to let us, the EMM, know that this memory object
1395  * is currently mapped one or more times.  This is called by VM each time
1396  * the memory object gets mapped, but we only take one extra reference the
1397  * first time it is called.
1398  */
1399 static kern_return_t
dyld_pager_map(memory_object_t mem_obj,__unused vm_prot_t prot)1400 dyld_pager_map(
1401 	memory_object_t         mem_obj,
1402 	__unused vm_prot_t      prot)
1403 {
1404 	dyld_pager_t   pager;
1405 
1406 	pager = dyld_pager_lookup(mem_obj);
1407 
1408 	lck_mtx_lock(&dyld_pager_lock);
1409 	assert(pager->dyld_is_ready);
1410 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0); /* pager is alive */
1411 	if (!pager->dyld_is_mapped) {
1412 		pager->dyld_is_mapped = TRUE;
1413 		os_ref_retain_locked_raw(&pager->dyld_ref_count, NULL);
1414 	}
1415 	lck_mtx_unlock(&dyld_pager_lock);
1416 
1417 	return KERN_SUCCESS;
1418 }
1419 
1420 /*
1421  * dyld_pager_last_unmap()
1422  *
1423  * This is called by VM when this memory object is no longer mapped anywhere.
1424  */
1425 static kern_return_t
dyld_pager_last_unmap(memory_object_t mem_obj)1426 dyld_pager_last_unmap(
1427 	memory_object_t mem_obj)
1428 {
1429 	dyld_pager_t    pager;
1430 
1431 	pager = dyld_pager_lookup(mem_obj);
1432 
1433 	lck_mtx_lock(&dyld_pager_lock);
1434 	if (pager->dyld_is_mapped) {
1435 		/*
1436 		 * All the mappings are gone, so let go of the one extra
1437 		 * reference that represents all the mappings of this pager.
1438 		 */
1439 		pager->dyld_is_mapped = FALSE;
1440 		dyld_pager_deallocate_internal(pager, TRUE);
1441 		/* caution: deallocate_internal() released the lock ! */
1442 	} else {
1443 		lck_mtx_unlock(&dyld_pager_lock);
1444 	}
1445 
1446 	return KERN_SUCCESS;
1447 }
1448 
1449 static boolean_t
dyld_pager_backing_object(memory_object_t mem_obj,memory_object_offset_t offset,vm_object_t * backing_object,vm_object_offset_t * backing_offset)1450 dyld_pager_backing_object(
1451 	memory_object_t         mem_obj,
1452 	memory_object_offset_t  offset,
1453 	vm_object_t             *backing_object,
1454 	vm_object_offset_t      *backing_offset)
1455 {
1456 	dyld_pager_t   pager;
1457 
1458 	pager = dyld_pager_lookup(mem_obj);
1459 
1460 	*backing_object = pager->dyld_backing_object;
1461 	*backing_offset = offset;
1462 
1463 	return TRUE;
1464 }
1465 
1466 
1467 /*
1468  * Convert from memory_object to dyld_pager.
1469  */
1470 static dyld_pager_t
dyld_pager_lookup(memory_object_t mem_obj)1471 dyld_pager_lookup(
1472 	memory_object_t  mem_obj)
1473 {
1474 	dyld_pager_t   pager;
1475 
1476 	assert(mem_obj->mo_pager_ops == &dyld_pager_ops);
1477 	pager = (dyld_pager_t)(uintptr_t) mem_obj;
1478 	assert(os_ref_get_count_raw(&pager->dyld_ref_count) > 0);
1479 	return pager;
1480 }
1481 
1482 /*
1483  * Create and return a pager for the given object with the
1484  * given slide information.
1485  */
1486 static dyld_pager_t
dyld_pager_create(__unused task_t task,vm_object_t backing_object,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size)1487 dyld_pager_create(
1488 #if !defined(HAS_APPLE_PAC)
1489 	__unused
1490 #endif /* defined(HAS_APPLE_PAC) */
1491 	task_t            task,
1492 	vm_object_t       backing_object,
1493 	struct mwl_region *regions,
1494 	uint32_t          region_cnt,
1495 	void              *link_info,
1496 	uint32_t          link_info_size)
1497 {
1498 	dyld_pager_t            pager;
1499 	memory_object_control_t control;
1500 	kern_return_t           kr;
1501 
1502 	pager = kalloc_type(struct dyld_pager, Z_WAITOK);
1503 	if (pager == NULL) {
1504 		return NULL;
1505 	}
1506 
1507 	/*
1508 	 * The vm_map call takes both named entry ports and raw memory
1509 	 * objects in the same parameter.  We need to make sure that
1510 	 * vm_map does not see this object as a named entry port.  So,
1511 	 * we reserve the first word in the object for a fake object type
1512 	 * setting - that will tell vm_map to use it as a memory object.
1513 	 */
1514 	pager->dyld_header.mo_ikot = IKOT_MEMORY_OBJECT;
1515 	pager->dyld_header.mo_pager_ops = &dyld_pager_ops;
1516 	pager->dyld_header.mo_control = MEMORY_OBJECT_CONTROL_NULL;
1517 	pager->dyld_header.mo_last_unmap_ctid = 0;
1518 
1519 	/*
1520 	 * Record the regions so the pager can find the offset from an address.
1521 	 */
1522 	pager->dyld_num_range = region_cnt;
1523 	for (uint32_t r = 0; r < region_cnt; ++r) {
1524 		pager->dyld_file_offset[r] = regions[r].mwlr_file_offset;
1525 		pager->dyld_address[r] = regions[r].mwlr_address;
1526 		pager->dyld_size[r] = regions[r].mwlr_size;
1527 		/* check that this range is covered by backing_object */
1528 		vm_object_offset_t end_offset;
1529 		if (os_add_overflow(pager->dyld_file_offset[r], pager->dyld_size[r],
1530 		    &end_offset)) {
1531 			vm_map_with_linking_stats.vmwls_overflow++;
1532 			kfree_type(struct dyld_pager, pager);
1533 			pager = NULL;
1534 			return NULL;
1535 		} else if (end_offset > backing_object->vo_size) {
1536 			vm_map_with_linking_stats.vmwls_bad_offset++;
1537 			kfree_type(struct dyld_pager, pager);
1538 			pager = NULL;
1539 			return NULL;
1540 		}
1541 	}
1542 
1543 	pager->dyld_is_ready = FALSE;/* not ready until it has a "name" */
1544 	/* existence reference for the caller */
1545 	os_ref_init_count_raw(&pager->dyld_ref_count, NULL, 1);
1546 	pager->dyld_is_mapped = FALSE;
1547 	pager->dyld_backing_object = backing_object;
1548 	pager->dyld_link_info = link_info; /* pager takes ownership of this pointer here */
1549 	pager->dyld_link_info_size = link_info_size;
1550 #if defined(HAS_APPLE_PAC)
1551 	pager->dyld_a_key = (task->map && task->map->pmap && !task->map->pmap->disable_jop) ? task->jop_pid : 0;
1552 #endif /* defined(HAS_APPLE_PAC) */
1553 
1554 	vm_object_reference(backing_object);
1555 	lck_mtx_lock(&dyld_pager_lock);
1556 	queue_enter_first(&dyld_pager_queue,
1557 	    pager,
1558 	    dyld_pager_t,
1559 	    dyld_pager_queue);
1560 	dyld_pager_count++;
1561 	if (dyld_pager_count > dyld_pager_count_max) {
1562 		dyld_pager_count_max = dyld_pager_count;
1563 	}
1564 	lck_mtx_unlock(&dyld_pager_lock);
1565 
1566 	kr = memory_object_create_named((memory_object_t) pager, 0, &control);
1567 	assert(kr == KERN_SUCCESS);
1568 
1569 	memory_object_mark_trusted(control);
1570 
1571 	lck_mtx_lock(&dyld_pager_lock);
1572 	/* the new pager is now ready to be used */
1573 	pager->dyld_is_ready = TRUE;
1574 	lck_mtx_unlock(&dyld_pager_lock);
1575 
1576 	/* wakeup anyone waiting for this pager to be ready */
1577 	thread_wakeup(&pager->dyld_is_ready);
1578 
1579 	return pager;
1580 }
1581 
1582 /*
1583  * dyld_pager_setup()
1584  *
1585  * Provide the caller with a memory object backed by the provided
1586  * "backing_object" VM object.
1587  */
1588 static memory_object_t
dyld_pager_setup(task_t task,vm_object_t backing_object,struct mwl_region * regions,uint32_t region_cnt,void * link_info,uint32_t link_info_size)1589 dyld_pager_setup(
1590 	task_t            task,
1591 	vm_object_t       backing_object,
1592 	struct mwl_region *regions,
1593 	uint32_t          region_cnt,
1594 	void              *link_info,
1595 	uint32_t          link_info_size)
1596 {
1597 	dyld_pager_t      pager;
1598 
1599 	/* create new pager */
1600 	pager = dyld_pager_create(task, backing_object, regions, region_cnt, link_info, link_info_size);
1601 	if (pager == NULL) {
1602 		/* could not create a new pager */
1603 		return MEMORY_OBJECT_NULL;
1604 	}
1605 
1606 	lck_mtx_lock(&dyld_pager_lock);
1607 	while (!pager->dyld_is_ready) {
1608 		lck_mtx_sleep(&dyld_pager_lock,
1609 		    LCK_SLEEP_DEFAULT,
1610 		    &pager->dyld_is_ready,
1611 		    THREAD_UNINT);
1612 	}
1613 	lck_mtx_unlock(&dyld_pager_lock);
1614 
1615 	return (memory_object_t) pager;
1616 }
1617 
1618 /*
1619  * Set up regions which use a special pager to apply dyld fixups.
1620  *
1621  * The arguments to this function are mostly just used as input.
1622  * Except for the link_info! That is saved off in the pager that
1623  * gets created. If the pager assumed ownership of *link_info,
1624  * the argument is NULLed, if not, the caller need to free it on error.
1625  */
1626 kern_return_t
vm_map_with_linking(task_t task,struct mwl_region * regions,uint32_t region_cnt,void ** link_info,uint32_t link_info_size,memory_object_control_t file_control)1627 vm_map_with_linking(
1628 	task_t                  task,
1629 	struct mwl_region       *regions,
1630 	uint32_t                region_cnt,
1631 	void                    **link_info,
1632 	uint32_t                link_info_size,
1633 	memory_object_control_t file_control)
1634 {
1635 	vm_map_t                map = task->map;
1636 	vm_object_t             file_object = VM_OBJECT_NULL;
1637 	memory_object_t         pager = MEMORY_OBJECT_NULL;
1638 	uint32_t                r;
1639 	vm_map_address_t        map_addr;
1640 	kern_return_t           kr = KERN_SUCCESS;
1641 	vm_map_entry_t          map_entry;
1642 	vm_object_t             backing_object = VM_OBJECT_NULL;
1643 	vm_object_t             shadow_object;
1644 	int                     num_extra_shadows;
1645 	uint64_t                num_extra_shadow_pages;
1646 
1647 	if (region_cnt == 0) {
1648 		kr = KERN_INVALID_ARGUMENT;
1649 		goto done;
1650 	}
1651 	file_object = memory_object_control_to_vm_object(file_control);
1652 	if (file_object == VM_OBJECT_NULL || file_object->internal) {
1653 		printf("%d[%s] %s: invalid object for provided file\n",
1654 		    proc_selfpid(), proc_name_address(current_proc()), __func__);
1655 		file_object = VM_OBJECT_NULL;
1656 		kr = KERN_INVALID_ARGUMENT;
1657 		goto done;
1658 	}
1659 
1660 	/*
1661 	 * Check that the mapping is backed by the same file.
1662 	 */
1663 	map_addr = regions[0].mwlr_address;
1664 	vm_map_lock_read(map);
1665 	if (!vm_map_lookup_entry(map,
1666 	    map_addr,
1667 	    &map_entry) ||
1668 	    map_entry->is_sub_map ||
1669 	    VME_OBJECT(map_entry) == VM_OBJECT_NULL) {
1670 		vm_map_unlock_read(map);
1671 		kr = KERN_INVALID_ADDRESS;
1672 		vm_map_with_linking_stats.vmwls_bad_addr++;
1673 		goto done;
1674 	}
1675 	if (!(map_entry->max_protection & VM_PROT_WRITE)) {
1676 		vm_map_unlock_read(map);
1677 		kr = KERN_PROTECTION_FAILURE;
1678 		vm_map_with_linking_stats.vmwls_bad_prot++;
1679 		goto done;
1680 	}
1681 	/* go down the shadow chain looking for the file object and its copy object */
1682 	num_extra_shadows = 0;
1683 	num_extra_shadow_pages = 0;
1684 	shadow_object = VME_OBJECT(map_entry);
1685 	vm_object_lock(shadow_object);
1686 	while (shadow_object->shadow != VM_OBJECT_NULL) {
1687 		vm_object_t next_object = shadow_object->shadow;
1688 		if (shadow_object->shadow == file_object &&
1689 		    shadow_object->vo_shadow_offset == 0) {
1690 			/*
1691 			 * Found our file object as shadow_object's shadow.
1692 			 * shadow_object should be its copy object (we'll check below
1693 			 * when we have its lock).
1694 			 * shadow_object will be the backing object for our dyld pager,
1695 			 * so let's take a reference to keep it alive until we create
1696 			 * our dyld pager.
1697 			 */
1698 			backing_object = shadow_object;
1699 			vm_object_reference_locked(backing_object);
1700 		}
1701 		if (backing_object == VM_OBJECT_NULL) {
1702 			num_extra_shadows++;
1703 			num_extra_shadow_pages += shadow_object->resident_page_count;
1704 			if (shadow_object->internal && shadow_object->pager) {
1705 				num_extra_shadow_pages +=
1706 				    vm_compressor_pager_get_count(shadow_object->pager);
1707 			}
1708 		}
1709 		vm_object_lock(next_object);
1710 		vm_object_unlock(shadow_object);
1711 		shadow_object = next_object;
1712 	}
1713 	if (shadow_object != file_object) {
1714 		/* the shadow chain does not end at the file provided by the caller */
1715 		printf("%d[%s] %s: mapping at 0x%llx is not backed by the expected file",
1716 		    proc_selfpid(), proc_name_address(current_proc()), __func__,
1717 		    (uint64_t)map_addr);
1718 		// ktriage_record(...);
1719 		vm_object_unlock(shadow_object);
1720 		shadow_object = VM_OBJECT_NULL;
1721 		vm_map_unlock_read(map);
1722 		vm_map_with_linking_stats.vmwls_bad_file++;
1723 		kr = KERN_INVALID_ARGUMENT;
1724 		goto done;
1725 	}
1726 	vm_object_unlock(shadow_object);
1727 	shadow_object = VM_OBJECT_NULL;
1728 	if (num_extra_shadows) {
1729 		/*
1730 		 * We found some extra shadow objects in the shadow chain for this mapping.
1731 		 * We're about to replace that mapping with a "dyld" pager backed by the
1732 		 * latest snapshot (copy) of the provided file, so any pages that had
1733 		 * previously been copied and modified in these extra shadow objects
1734 		 * will no longer be visible in this mapping.
1735 		 */
1736 		printf("%d[%s] %s: (warn) found %d shadow object(s) with %llu pages at 0x%llx\n",
1737 		    proc_selfpid(), proc_name_address(current_proc()), __func__,
1738 		    num_extra_shadows, num_extra_shadow_pages, (uint64_t)map_addr);
1739 		vm_map_unlock_read(map);
1740 		vm_map_with_linking_stats.vmwls_bad_shadows++;
1741 		kr = KERN_INVALID_ARGUMENT;
1742 		goto done;
1743 	}
1744 	if (backing_object == VM_OBJECT_NULL ||
1745 	    backing_object != file_object->vo_copy ||
1746 	    backing_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY ||
1747 	    (backing_object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC &&
1748 	    !map_entry->needs_copy)) {
1749 		printf("%d[%s] %s: mapping at 0x%llx not a proper copy-on-write mapping\n",
1750 		    proc_selfpid(), proc_name_address(current_proc()), __func__,
1751 		    (uint64_t)map_addr);
1752 		vm_map_unlock_read(map);
1753 		vm_map_with_linking_stats.vmwls_bad_cow++;
1754 		kr = KERN_INVALID_ARGUMENT;
1755 		goto done;
1756 	}
1757 
1758 	vm_map_unlock_read(map);
1759 	map_entry = VM_MAP_ENTRY_NULL;
1760 
1761 	/* create a pager, backed by the latest snapshot (copy object) of the file */
1762 	pager = dyld_pager_setup(task, backing_object, regions, region_cnt, *link_info, link_info_size);
1763 	if (pager == MEMORY_OBJECT_NULL) {
1764 		kr = KERN_RESOURCE_SHORTAGE;
1765 		goto done;
1766 	}
1767 	*link_info = NULL; /* ownership of this pointer was given to pager */
1768 
1769 	for (r = 0; r < region_cnt; ++r) {
1770 		vm_map_kernel_flags_t vmk_flags = {
1771 			.vmf_fixed = true,
1772 			.vmf_overwrite = true,
1773 			.vmkf_overwrite_immutable = true,
1774 		};
1775 		struct mwl_region *rp = &regions[r];
1776 
1777 		/* map that pager over the portion of the mapping that needs sliding */
1778 		map_addr = (vm_map_address_t)rp->mwlr_address;
1779 
1780 		if (rp->mwlr_protections & VM_PROT_TPRO) {
1781 			vmk_flags.vmf_tpro = TRUE;
1782 		}
1783 
1784 		kr = mach_vm_map_kernel(map,
1785 		    vm_sanitize_wrap_addr_ref(&map_addr),
1786 		    rp->mwlr_size,
1787 		    0,
1788 		    vmk_flags,
1789 		    (ipc_port_t)(uintptr_t)pager,
1790 		    rp->mwlr_file_offset,
1791 		    TRUE,       /* copy == TRUE, as this is MAP_PRIVATE so COW may happen */
1792 		    rp->mwlr_protections & VM_PROT_DEFAULT,
1793 		    rp->mwlr_protections & VM_PROT_DEFAULT,
1794 		    VM_INHERIT_DEFAULT);
1795 		if (kr != KERN_SUCCESS) {
1796 			/* no need to clean up earlier regions, this will be process fatal */
1797 			goto done;
1798 		}
1799 	}
1800 
1801 	/* success! */
1802 	kr = KERN_SUCCESS;
1803 
1804 done:
1805 	if (backing_object != VM_OBJECT_NULL) {
1806 		/*
1807 		 * Release our extra reference on the backing object.
1808 		 * The pager (if created) took an extra reference on it.
1809 		 */
1810 		vm_object_deallocate(backing_object);
1811 		backing_object = VM_OBJECT_NULL;
1812 	}
1813 	if (pager != MEMORY_OBJECT_NULL) {
1814 		/*
1815 		 * Release the pager reference obtained by dyld_pager_setup().
1816 		 * The mappings, if succesful, are each holding a reference on the
1817 		 * pager's VM object, which keeps the pager (aka memory object) alive.
1818 		 */
1819 		memory_object_deallocate(pager);
1820 		pager = MEMORY_OBJECT_NULL;
1821 	}
1822 	if (kr == KERN_SUCCESS) {
1823 		vm_map_with_linking_stats.vmwls_total_success++;
1824 	} else {
1825 		vm_map_with_linking_stats.vmwls_total_fail++;
1826 	}
1827 	return kr;
1828 }
1829 
1830 static uint64_t
dyld_pager_purge(dyld_pager_t pager)1831 dyld_pager_purge(
1832 	dyld_pager_t pager)
1833 {
1834 	uint64_t pages_purged;
1835 	vm_object_t object;
1836 
1837 	pages_purged = 0;
1838 	object = memory_object_to_vm_object((memory_object_t) pager);
1839 	assert(object != VM_OBJECT_NULL);
1840 	vm_object_lock(object);
1841 	pages_purged = object->resident_page_count;
1842 	vm_object_reap_pages(object, REAP_DATA_FLUSH_CLEAN);
1843 	pages_purged -= object->resident_page_count;
1844 //	printf("     %s:%d pager %p object %p purged %llu left %d\n", __FUNCTION__, __LINE__, pager, object, pages_purged, object->resident_page_count);
1845 	vm_object_unlock(object);
1846 	return pages_purged;
1847 }
1848 
1849 uint64_t
dyld_pager_purge_all(void)1850 dyld_pager_purge_all(void)
1851 {
1852 	uint64_t pages_purged;
1853 	dyld_pager_t pager;
1854 
1855 	pages_purged = 0;
1856 	lck_mtx_lock(&dyld_pager_lock);
1857 	queue_iterate(&dyld_pager_queue, pager, dyld_pager_t, dyld_pager_queue) {
1858 		pages_purged += dyld_pager_purge(pager);
1859 	}
1860 	lck_mtx_unlock(&dyld_pager_lock);
1861 #if DEVELOPMENT || DEBUG
1862 	printf("   %s:%d pages purged: %llu\n", __FUNCTION__, __LINE__, pages_purged);
1863 #endif /* DEVELOPMENT || DEBUG */
1864 	return pages_purged;
1865 }
1866