xref: /xnu-11417.101.15/osfmk/vm/vm_fault.c (revision e3723e1f17661b24996789d8afc084c0c3303b26)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm_fault.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	Page fault handling module.
63  */
64 
65 #include <libkern/OSAtomic.h>
66 
67 #include <mach/mach_types.h>
68 #include <mach/kern_return.h>
69 #include <mach/message.h>       /* for error codes */
70 #include <mach/vm_param.h>
71 #include <mach/vm_behavior.h>
72 #include <mach/memory_object.h>
73 /* For memory_object_data_{request,unlock} */
74 #include <mach/sdt.h>
75 
76 #include <kern/kern_types.h>
77 #include <kern/host_statistics.h>
78 #include <kern/counter.h>
79 #include <kern/task.h>
80 #include <kern/thread.h>
81 #include <kern/sched_prim.h>
82 #include <kern/host.h>
83 #include <kern/mach_param.h>
84 #include <kern/macro_help.h>
85 #include <kern/zalloc_internal.h>
86 #include <kern/misc_protos.h>
87 #include <kern/policy_internal.h>
88 
89 #include <vm/vm_compressor_internal.h>
90 #include <vm/vm_compressor_pager_internal.h>
91 #include <vm/vm_fault_internal.h>
92 #include <vm/vm_map_internal.h>
93 #include <vm/vm_object_internal.h>
94 #include <vm/vm_page_internal.h>
95 #include <vm/vm_kern_internal.h>
96 #include <vm/pmap.h>
97 #include <vm/vm_pageout_internal.h>
98 #include <vm/vm_protos_internal.h>
99 #include <vm/vm_external.h>
100 #include <vm/memory_object.h>
101 #include <vm/vm_purgeable_internal.h>   /* Needed by some vm_page.h macros */
102 #include <vm/vm_shared_region.h>
103 #include <vm/vm_page_internal.h>
104 
105 #include <sys/codesign.h>
106 #include <sys/code_signing.h>
107 #include <sys/kdebug.h>
108 #include <sys/kdebug_triage.h>
109 #include <sys/reason.h>
110 #include <sys/signalvar.h>
111 
112 #include <san/kasan.h>
113 #include <libkern/coreanalytics/coreanalytics.h>
114 
115 #define VM_FAULT_CLASSIFY       0
116 
117 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
118 
119 int vm_protect_privileged_from_untrusted = 1;
120 
121 /*
122  * Enforce a maximum number of concurrent PageIns per vm-object to prevent
123  * high-I/O-volume tasks from saturating storage and starving the rest of the
124  * system.
125  *
126  * TODO: This throttling mechanism may be more naturally done by the pager,
127  * filesystem, or storage layers, which will have better information about how
128  * much concurrency the backing store can reasonably support.
129  */
130 TUNABLE(uint16_t, vm_object_pagein_throttle, "vm_object_pagein_throttle", 16);
131 
132 /*
133  * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which
134  * kicks in when swap space runs out.  64-bit programs have massive address spaces and can leak enormous amounts
135  * of memory if they're buggy and can run the system completely out of swap space.  If this happens, we
136  * impose a hard throttle on them to prevent them from taking the last bit of memory left.  This helps
137  * keep the UI active so that the user has a chance to kill the offending task before the system
138  * completely hangs.
139  *
140  * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied
141  * to tasks that appear to be bloated.  When swap runs out, any task using more than vm_hard_throttle_threshold
142  * will be throttled.  The throttling is done by giving the thread that's trying to demand zero a page a
143  * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
144  */
145 
146 extern void throttle_lowpri_io(int);
147 
148 extern struct vnode *vnode_pager_lookup_vnode(memory_object_t);
149 
150 uint64_t vm_hard_throttle_threshold;
151 
152 #if DEBUG || DEVELOPMENT
153 static bool vmtc_panic_instead = false;
154 int panic_object_not_alive = 1;
155 #endif /* DEBUG || DEVELOPMENT */
156 
157 OS_ALWAYS_INLINE
158 boolean_t
NEED_TO_HARD_THROTTLE_THIS_TASK(void)159 NEED_TO_HARD_THROTTLE_THIS_TASK(void)
160 {
161 	return vm_wants_task_throttled(current_task()) ||
162 	       ((vm_page_free_count < vm_page_throttle_limit ||
163 	       HARD_THROTTLE_LIMIT_REACHED()) &&
164 	       proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED);
165 }
166 
167 
168 /*
169  * XXX: For now, vm faults cannot be recursively disabled. If the need for
170  * nested code that disables faults arises, the implementation can be modified
171  * to track a disabled-count.
172  */
173 
174 OS_ALWAYS_INLINE
175 void
vm_fault_disable(void)176 vm_fault_disable(void)
177 {
178 	thread_t t = current_thread();
179 	assert(!t->th_vm_faults_disabled);
180 	t->th_vm_faults_disabled = true;
181 	act_set_debug_assert();
182 }
183 
184 OS_ALWAYS_INLINE
185 void
vm_fault_enable(void)186 vm_fault_enable(void)
187 {
188 	thread_t t = current_thread();
189 	assert(t->th_vm_faults_disabled);
190 	t->th_vm_faults_disabled = false;
191 }
192 
193 OS_ALWAYS_INLINE
194 bool
vm_fault_get_disabled(void)195 vm_fault_get_disabled(void)
196 {
197 	thread_t t = current_thread();
198 	return t->th_vm_faults_disabled;
199 }
200 
201 #define HARD_THROTTLE_DELAY     10000   /* 10000 us == 10 ms */
202 #define SOFT_THROTTLE_DELAY     200     /* 200 us == .2 ms */
203 
204 #define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS   6
205 #define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC  20000
206 
207 
208 #define VM_STAT_DECOMPRESSIONS()        \
209 MACRO_BEGIN                             \
210 	counter_inc(&vm_statistics_decompressions); \
211 	current_thread()->decompressions++; \
212 MACRO_END
213 
214 boolean_t current_thread_aborted(void);
215 
216 /* Forward declarations of internal routines. */
217 static kern_return_t vm_fault_wire_fast(
218 	vm_map_t        map,
219 	vm_map_offset_t va,
220 	vm_prot_t       prot,
221 	vm_tag_t        wire_tag,
222 	vm_map_entry_t  entry,
223 	pmap_t          pmap,
224 	vm_map_offset_t pmap_addr,
225 	ppnum_t         *physpage_p);
226 
227 static kern_return_t vm_fault_internal(
228 	vm_map_t               map,
229 	vm_map_offset_t        vaddr,
230 	vm_prot_t              caller_prot,
231 	vm_tag_t               wire_tag,
232 	pmap_t                 pmap,
233 	vm_map_offset_t        pmap_addr,
234 	ppnum_t                *physpage_p,
235 	vm_object_fault_info_t fault_info);
236 
237 static void vm_fault_copy_cleanup(
238 	vm_page_t       page,
239 	vm_page_t       top_page);
240 
241 static void vm_fault_copy_dst_cleanup(
242 	vm_page_t       page);
243 
244 #if     VM_FAULT_CLASSIFY
245 extern void vm_fault_classify(vm_object_t       object,
246     vm_object_offset_t    offset,
247     vm_prot_t             fault_type);
248 
249 extern void vm_fault_classify_init(void);
250 #endif
251 
252 unsigned long vm_pmap_enter_blocked = 0;
253 unsigned long vm_pmap_enter_retried = 0;
254 
255 unsigned long vm_cs_validates = 0;
256 unsigned long vm_cs_revalidates = 0;
257 unsigned long vm_cs_query_modified = 0;
258 unsigned long vm_cs_validated_dirtied = 0;
259 unsigned long vm_cs_bitmap_validated = 0;
260 
261 #if CODE_SIGNING_MONITOR
262 uint64_t vm_cs_defer_to_csm = 0;
263 uint64_t vm_cs_defer_to_csm_not = 0;
264 #endif /* CODE_SIGNING_MONITOR */
265 
266 extern char *kdp_compressor_decompressed_page;
267 extern addr64_t kdp_compressor_decompressed_page_paddr;
268 extern ppnum_t  kdp_compressor_decompressed_page_ppnum;
269 
270 struct vmrtfr {
271 	int vmrtfr_maxi;
272 	int vmrtfr_curi;
273 	int64_t vmrtf_total;
274 	vm_rtfault_record_t *vm_rtf_records;
275 } vmrtfrs;
276 #define VMRTF_DEFAULT_BUFSIZE (4096)
277 #define VMRTF_NUM_RECORDS_DEFAULT (VMRTF_DEFAULT_BUFSIZE / sizeof(vm_rtfault_record_t))
278 TUNABLE(int, vmrtf_num_records, "vm_rtfault_records", VMRTF_NUM_RECORDS_DEFAULT);
279 
280 static void vm_rtfrecord_lock(void);
281 static void vm_rtfrecord_unlock(void);
282 static void vm_record_rtfault(thread_t, uint64_t, vm_map_offset_t, int);
283 
284 extern lck_grp_t vm_page_lck_grp_bucket;
285 extern lck_attr_t vm_page_lck_attr;
286 LCK_SPIN_DECLARE_ATTR(vm_rtfr_slock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
287 
288 #if DEVELOPMENT || DEBUG
289 extern int madvise_free_debug;
290 extern int madvise_free_debug_sometimes;
291 #endif /* DEVELOPMENT || DEBUG */
292 
293 extern int vm_pageout_protect_realtime;
294 
295 #if CONFIG_FREEZE
296 #endif /* CONFIG_FREEZE */
297 
298 /*
299  *	Routine:	vm_fault_init
300  *	Purpose:
301  *		Initialize our private data structures.
302  */
303 __startup_func
304 void
vm_fault_init(void)305 vm_fault_init(void)
306 {
307 	int i, vm_compressor_temp;
308 	boolean_t need_default_val = TRUE;
309 	/*
310 	 * Choose a value for the hard throttle threshold based on the amount of ram.  The threshold is
311 	 * computed as a percentage of available memory, and the percentage used is scaled inversely with
312 	 * the amount of memory.  The percentage runs between 10% and 35%.  We use 35% for small memory systems
313 	 * and reduce the value down to 10% for very large memory configurations.  This helps give us a
314 	 * definition of a memory hog that makes more sense relative to the amount of ram in the machine.
315 	 * The formula here simply uses the number of gigabytes of ram to adjust the percentage.
316 	 */
317 
318 	vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024 * 1024 * 1024)), 25)) / 100;
319 
320 	/*
321 	 * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry.
322 	 */
323 
324 	if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof(vm_compressor_temp))) {
325 		for (i = 0; i < VM_PAGER_MAX_MODES; i++) {
326 			if (((vm_compressor_temp & (1 << i)) == vm_compressor_temp)) {
327 				need_default_val = FALSE;
328 				vm_compressor_mode = vm_compressor_temp;
329 				break;
330 			}
331 		}
332 		if (need_default_val) {
333 			printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp);
334 		}
335 	}
336 #if CONFIG_FREEZE
337 	if (need_default_val) {
338 		if (osenvironment_is_diagnostics()) {
339 			printf("osenvironment == \"diagnostics\". Setting \"vm_compressor_mode\" to in-core compressor only\n");
340 			vm_compressor_mode = VM_PAGER_COMPRESSOR_NO_SWAP;
341 			need_default_val = false;
342 		}
343 	}
344 #endif /* CONFIG_FREEZE */
345 	if (need_default_val) {
346 		/* If no boot arg or incorrect boot arg, try device tree. */
347 		PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode));
348 	}
349 	printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode);
350 	vm_config_init();
351 
352 	PE_parse_boot_argn("vm_protect_privileged_from_untrusted",
353 	    &vm_protect_privileged_from_untrusted,
354 	    sizeof(vm_protect_privileged_from_untrusted));
355 
356 #if DEBUG || DEVELOPMENT
357 	(void)PE_parse_boot_argn("text_corruption_panic", &vmtc_panic_instead, sizeof(vmtc_panic_instead));
358 
359 	if (kern_feature_override(KF_MADVISE_FREE_DEBUG_OVRD)) {
360 		madvise_free_debug = 0;
361 		madvise_free_debug_sometimes = 0;
362 	}
363 
364 	PE_parse_boot_argn("panic_object_not_alive", &panic_object_not_alive, sizeof(panic_object_not_alive));
365 #endif /* DEBUG || DEVELOPMENT */
366 }
367 
368 __startup_func
369 static void
vm_rtfault_record_init(void)370 vm_rtfault_record_init(void)
371 {
372 	size_t size;
373 
374 	vmrtf_num_records = MAX(vmrtf_num_records, 1);
375 	size = vmrtf_num_records * sizeof(vm_rtfault_record_t);
376 	vmrtfrs.vm_rtf_records = zalloc_permanent_tag(size,
377 	    ZALIGN(vm_rtfault_record_t), VM_KERN_MEMORY_DIAG);
378 	vmrtfrs.vmrtfr_maxi = vmrtf_num_records - 1;
379 }
380 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_rtfault_record_init);
381 
382 /*
383  *	Routine:	vm_fault_cleanup
384  *	Purpose:
385  *		Clean up the result of vm_fault_page.
386  *	Results:
387  *		The paging reference for "object" is released.
388  *		"object" is unlocked.
389  *		If "top_page" is not null,  "top_page" is
390  *		freed and the paging reference for the object
391  *		containing it is released.
392  *
393  *	In/out conditions:
394  *		"object" must be locked.
395  */
396 void
vm_fault_cleanup(vm_object_t object,vm_page_t top_page)397 vm_fault_cleanup(
398 	vm_object_t     object,
399 	vm_page_t       top_page)
400 {
401 	vm_object_paging_end(object);
402 	vm_object_unlock(object);
403 
404 	if (top_page != VM_PAGE_NULL) {
405 		object = VM_PAGE_OBJECT(top_page);
406 
407 		vm_object_lock(object);
408 		VM_PAGE_FREE(top_page);
409 		vm_object_paging_end(object);
410 		vm_object_unlock(object);
411 	}
412 }
413 
414 #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
415 
416 
417 boolean_t       vm_page_deactivate_behind = TRUE;
418 /*
419  * default sizes given VM_BEHAVIOR_DEFAULT reference behavior
420  */
421 #define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW     128
422 #define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER    16              /* don't make this too big... */
423                                                                 /* we use it to size an array on the stack */
424 
425 int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW;
426 
427 #define MAX_SEQUENTIAL_RUN      (1024 * 1024 * 1024)
428 
429 /*
430  * vm_page_is_sequential
431  *
432  * Determine if sequential access is in progress
433  * in accordance with the behavior specified.
434  * Update state to indicate current access pattern.
435  *
436  * object must have at least the shared lock held
437  */
438 static
439 void
vm_fault_is_sequential(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)440 vm_fault_is_sequential(
441 	vm_object_t             object,
442 	vm_object_offset_t      offset,
443 	vm_behavior_t           behavior)
444 {
445 	vm_object_offset_t      last_alloc;
446 	int                     sequential;
447 	int                     orig_sequential;
448 
449 	last_alloc = object->last_alloc;
450 	sequential = object->sequential;
451 	orig_sequential = sequential;
452 
453 	offset = vm_object_trunc_page(offset);
454 	if (offset == last_alloc && behavior != VM_BEHAVIOR_RANDOM) {
455 		/* re-faulting in the same page: no change in behavior */
456 		return;
457 	}
458 
459 	switch (behavior) {
460 	case VM_BEHAVIOR_RANDOM:
461 		/*
462 		 * reset indicator of sequential behavior
463 		 */
464 		sequential = 0;
465 		break;
466 
467 	case VM_BEHAVIOR_SEQUENTIAL:
468 		if (offset && last_alloc == offset - PAGE_SIZE_64) {
469 			/*
470 			 * advance indicator of sequential behavior
471 			 */
472 			if (sequential < MAX_SEQUENTIAL_RUN) {
473 				sequential += PAGE_SIZE;
474 			}
475 		} else {
476 			/*
477 			 * reset indicator of sequential behavior
478 			 */
479 			sequential = 0;
480 		}
481 		break;
482 
483 	case VM_BEHAVIOR_RSEQNTL:
484 		if (last_alloc && last_alloc == offset + PAGE_SIZE_64) {
485 			/*
486 			 * advance indicator of sequential behavior
487 			 */
488 			if (sequential > -MAX_SEQUENTIAL_RUN) {
489 				sequential -= PAGE_SIZE;
490 			}
491 		} else {
492 			/*
493 			 * reset indicator of sequential behavior
494 			 */
495 			sequential = 0;
496 		}
497 		break;
498 
499 	case VM_BEHAVIOR_DEFAULT:
500 	default:
501 		if (offset && last_alloc == (offset - PAGE_SIZE_64)) {
502 			/*
503 			 * advance indicator of sequential behavior
504 			 */
505 			if (sequential < 0) {
506 				sequential = 0;
507 			}
508 			if (sequential < MAX_SEQUENTIAL_RUN) {
509 				sequential += PAGE_SIZE;
510 			}
511 		} else if (last_alloc && last_alloc == (offset + PAGE_SIZE_64)) {
512 			/*
513 			 * advance indicator of sequential behavior
514 			 */
515 			if (sequential > 0) {
516 				sequential = 0;
517 			}
518 			if (sequential > -MAX_SEQUENTIAL_RUN) {
519 				sequential -= PAGE_SIZE;
520 			}
521 		} else {
522 			/*
523 			 * reset indicator of sequential behavior
524 			 */
525 			sequential = 0;
526 		}
527 		break;
528 	}
529 	if (sequential != orig_sequential) {
530 		if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) {
531 			/*
532 			 * if someone else has already updated object->sequential
533 			 * don't bother trying to update it or object->last_alloc
534 			 */
535 			return;
536 		}
537 	}
538 	/*
539 	 * I'd like to do this with a OSCompareAndSwap64, but that
540 	 * doesn't exist for PPC...  however, it shouldn't matter
541 	 * that much... last_alloc is maintained so that we can determine
542 	 * if a sequential access pattern is taking place... if only
543 	 * one thread is banging on this object, no problem with the unprotected
544 	 * update... if 2 or more threads are banging away, we run the risk of
545 	 * someone seeing a mangled update... however, in the face of multiple
546 	 * accesses, no sequential access pattern can develop anyway, so we
547 	 * haven't lost any real info.
548 	 */
549 	object->last_alloc = offset;
550 }
551 
552 #if DEVELOPMENT || DEBUG
553 uint64_t vm_page_deactivate_behind_count = 0;
554 #endif /* DEVELOPMENT || DEBUG */
555 
556 /*
557  * vm_page_deactivate_behind
558  *
559  * Determine if sequential access is in progress
560  * in accordance with the behavior specified.  If
561  * so, compute a potential page to deactivate and
562  * deactivate it.
563  *
564  * object must be locked.
565  *
566  * return TRUE if we actually deactivate a page
567  */
568 static
569 boolean_t
vm_fault_deactivate_behind(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)570 vm_fault_deactivate_behind(
571 	vm_object_t             object,
572 	vm_object_offset_t      offset,
573 	vm_behavior_t           behavior)
574 {
575 	int             n;
576 	int             pages_in_run = 0;
577 	int             max_pages_in_run = 0;
578 	int             sequential_run;
579 	int             sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
580 	vm_object_offset_t      run_offset = 0;
581 	vm_object_offset_t      pg_offset = 0;
582 	vm_page_t       m;
583 	vm_page_t       page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER];
584 
585 	pages_in_run = 0;
586 #if TRACEFAULTPAGE
587 	dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
588 #endif
589 	if (is_kernel_object(object) || vm_page_deactivate_behind == FALSE || (vm_object_trunc_page(offset) != offset)) {
590 		/*
591 		 * Do not deactivate pages from the kernel object: they
592 		 * are not intended to become pageable.
593 		 * or we've disabled the deactivate behind mechanism
594 		 * or we are dealing with an offset that is not aligned to
595 		 * the system's PAGE_SIZE because in that case we will
596 		 * handle the deactivation on the aligned offset and, thus,
597 		 * the full PAGE_SIZE page once. This helps us avoid the redundant
598 		 * deactivates and the extra faults.
599 		 */
600 		return FALSE;
601 	}
602 	if ((sequential_run = object->sequential)) {
603 		if (sequential_run < 0) {
604 			sequential_behavior = VM_BEHAVIOR_RSEQNTL;
605 			sequential_run = 0 - sequential_run;
606 		} else {
607 			sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
608 		}
609 	}
610 	switch (behavior) {
611 	case VM_BEHAVIOR_RANDOM:
612 		break;
613 	case VM_BEHAVIOR_SEQUENTIAL:
614 		if (sequential_run >= (int)PAGE_SIZE) {
615 			run_offset = 0 - PAGE_SIZE_64;
616 			max_pages_in_run = 1;
617 		}
618 		break;
619 	case VM_BEHAVIOR_RSEQNTL:
620 		if (sequential_run >= (int)PAGE_SIZE) {
621 			run_offset = PAGE_SIZE_64;
622 			max_pages_in_run = 1;
623 		}
624 		break;
625 	case VM_BEHAVIOR_DEFAULT:
626 	default:
627 	{       vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
628 
629 		/*
630 		 * determine if the run of sequential accesss has been
631 		 * long enough on an object with default access behavior
632 		 * to consider it for deactivation
633 		 */
634 		if ((uint64_t)sequential_run >= behind && (sequential_run % (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER * PAGE_SIZE)) == 0) {
635 			/*
636 			 * the comparisons between offset and behind are done
637 			 * in this kind of odd fashion in order to prevent wrap around
638 			 * at the end points
639 			 */
640 			if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
641 				if (offset >= behind) {
642 					run_offset = 0 - behind;
643 					pg_offset = PAGE_SIZE_64;
644 					max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
645 				}
646 			} else {
647 				if (offset < -behind) {
648 					run_offset = behind;
649 					pg_offset = 0 - PAGE_SIZE_64;
650 					max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
651 				}
652 			}
653 		}
654 		break;}
655 	}
656 	for (n = 0; n < max_pages_in_run; n++) {
657 		m = vm_page_lookup(object, offset + run_offset + (n * pg_offset));
658 
659 		if (m && !m->vmp_laundry && !m->vmp_busy && !m->vmp_no_cache &&
660 		    (m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) &&
661 		    !vm_page_is_fictitious(m) && !m->vmp_absent) {
662 			page_run[pages_in_run++] = m;
663 
664 			/*
665 			 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
666 			 *
667 			 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
668 			 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
669 			 * new reference happens. If no futher references happen on the page after that remote TLB flushes
670 			 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
671 			 * by pageout_scan, which is just fine since the last reference would have happened quite far
672 			 * in the past (TLB caches don't hang around for very long), and of course could just as easily
673 			 * have happened before we did the deactivate_behind.
674 			 */
675 			pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
676 		}
677 	}
678 	if (pages_in_run) {
679 		vm_page_lockspin_queues();
680 
681 		for (n = 0; n < pages_in_run; n++) {
682 			m = page_run[n];
683 
684 			vm_page_deactivate_internal(m, FALSE);
685 
686 #if DEVELOPMENT || DEBUG
687 			vm_page_deactivate_behind_count++;
688 #endif /* DEVELOPMENT || DEBUG */
689 
690 #if TRACEFAULTPAGE
691 			dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
692 #endif
693 		}
694 		vm_page_unlock_queues();
695 
696 		return TRUE;
697 	}
698 	return FALSE;
699 }
700 
701 
702 #if (DEVELOPMENT || DEBUG)
703 uint32_t        vm_page_creation_throttled_hard = 0;
704 uint32_t        vm_page_creation_throttled_soft = 0;
705 uint64_t        vm_page_creation_throttle_avoided = 0;
706 #endif /* DEVELOPMENT || DEBUG */
707 
708 static int
vm_page_throttled(boolean_t page_kept)709 vm_page_throttled(boolean_t page_kept)
710 {
711 	clock_sec_t     elapsed_sec;
712 	clock_sec_t     tv_sec;
713 	clock_usec_t    tv_usec;
714 	task_t          curtask = current_task_early();
715 
716 	thread_t thread = current_thread();
717 
718 	if (thread->options & TH_OPT_VMPRIV) {
719 		return 0;
720 	}
721 
722 	if (curtask && !curtask->active) {
723 		return 0;
724 	}
725 
726 	if (thread->t_page_creation_throttled) {
727 		thread->t_page_creation_throttled = 0;
728 
729 		if (page_kept == FALSE) {
730 			goto no_throttle;
731 		}
732 	}
733 	if (NEED_TO_HARD_THROTTLE_THIS_TASK()) {
734 #if (DEVELOPMENT || DEBUG)
735 		thread->t_page_creation_throttled_hard++;
736 		OSAddAtomic(1, &vm_page_creation_throttled_hard);
737 #endif /* DEVELOPMENT || DEBUG */
738 		return HARD_THROTTLE_DELAY;
739 	}
740 
741 	if ((vm_page_free_count < vm_page_throttle_limit || (VM_CONFIG_COMPRESSOR_IS_PRESENT && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
742 	    thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) {
743 		if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) {
744 #if (DEVELOPMENT || DEBUG)
745 			OSAddAtomic64(1, &vm_page_creation_throttle_avoided);
746 #endif
747 			goto no_throttle;
748 		}
749 		clock_get_system_microtime(&tv_sec, &tv_usec);
750 
751 		elapsed_sec = tv_sec - thread->t_page_creation_time;
752 
753 		if (elapsed_sec <= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS ||
754 		    (thread->t_page_creation_count / elapsed_sec) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC) {
755 			if (elapsed_sec >= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS)) {
756 				/*
757 				 * we'll reset our stats to give a well behaved app
758 				 * that was unlucky enough to accumulate a bunch of pages
759 				 * over a long period of time a chance to get out of
760 				 * the throttled state... we reset the counter and timestamp
761 				 * so that if it stays under the rate limit for the next second
762 				 * it will be back in our good graces... if it exceeds it, it
763 				 * will remain in the throttled state
764 				 */
765 				thread->t_page_creation_time = tv_sec;
766 				thread->t_page_creation_count = VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC * (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS - 1);
767 			}
768 			VM_PAGEOUT_DEBUG(vm_page_throttle_count, 1);
769 
770 			thread->t_page_creation_throttled = 1;
771 
772 			if (VM_CONFIG_COMPRESSOR_IS_PRESENT && HARD_THROTTLE_LIMIT_REACHED()) {
773 #if (DEVELOPMENT || DEBUG)
774 				thread->t_page_creation_throttled_hard++;
775 				OSAddAtomic(1, &vm_page_creation_throttled_hard);
776 #endif /* DEVELOPMENT || DEBUG */
777 				return HARD_THROTTLE_DELAY;
778 			} else {
779 #if (DEVELOPMENT || DEBUG)
780 				thread->t_page_creation_throttled_soft++;
781 				OSAddAtomic(1, &vm_page_creation_throttled_soft);
782 #endif /* DEVELOPMENT || DEBUG */
783 				return SOFT_THROTTLE_DELAY;
784 			}
785 		}
786 		thread->t_page_creation_time = tv_sec;
787 		thread->t_page_creation_count = 0;
788 	}
789 no_throttle:
790 	thread->t_page_creation_count++;
791 
792 	return 0;
793 }
794 
795 extern boolean_t vm_pageout_running;
796 static __attribute__((noinline, not_tail_called)) void
__VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(int throttle_delay)797 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(
798 	int throttle_delay)
799 {
800 	/* make sure vm_pageout_scan() gets to work while we're throttled */
801 	if (!vm_pageout_running) {
802 		thread_wakeup((event_t)&vm_page_free_wanted);
803 	}
804 	delay(throttle_delay);
805 }
806 
807 
808 /*
809  * check for various conditions that would
810  * prevent us from creating a ZF page...
811  * cleanup is based on being called from vm_fault_page
812  *
813  * object must be locked
814  * object == m->vmp_object
815  */
816 static vm_fault_return_t
vm_fault_check(vm_object_t object,vm_page_t m,vm_page_t first_m,wait_interrupt_t interruptible_state,boolean_t page_throttle)817 vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrupt_t interruptible_state, boolean_t page_throttle)
818 {
819 	int throttle_delay;
820 
821 	if (object->shadow_severed ||
822 	    VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
823 		/*
824 		 * Either:
825 		 * 1. the shadow chain was severed,
826 		 * 2. the purgeable object is volatile or empty and is marked
827 		 *    to fault on access while volatile.
828 		 * Just have to return an error at this point
829 		 */
830 		if (m != VM_PAGE_NULL) {
831 			VM_PAGE_FREE(m);
832 		}
833 		vm_fault_cleanup(object, first_m);
834 
835 		thread_interrupt_level(interruptible_state);
836 
837 		if (VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
838 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
839 		}
840 
841 		if (object->shadow_severed) {
842 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
843 		}
844 		return VM_FAULT_MEMORY_ERROR;
845 	}
846 	if (page_throttle == TRUE) {
847 		if ((throttle_delay = vm_page_throttled(FALSE))) {
848 			/*
849 			 * we're throttling zero-fills...
850 			 * treat this as if we couldn't grab a page
851 			 */
852 			if (m != VM_PAGE_NULL) {
853 				VM_PAGE_FREE(m);
854 			}
855 			vm_fault_cleanup(object, first_m);
856 
857 			VM_DEBUG_EVENT(vmf_check_zfdelay, DBG_VM_FAULT_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
858 
859 			__VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
860 
861 			if (current_thread_aborted()) {
862 				thread_interrupt_level(interruptible_state);
863 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
864 				return VM_FAULT_INTERRUPTED;
865 			}
866 			thread_interrupt_level(interruptible_state);
867 
868 			return VM_FAULT_MEMORY_SHORTAGE;
869 		}
870 	}
871 	return VM_FAULT_SUCCESS;
872 }
873 
874 /*
875  * Clear the code signing bits on the given page_t
876  */
877 static void
vm_fault_cs_clear(vm_page_t m)878 vm_fault_cs_clear(vm_page_t m)
879 {
880 	m->vmp_cs_validated = VMP_CS_ALL_FALSE;
881 	m->vmp_cs_tainted = VMP_CS_ALL_FALSE;
882 	m->vmp_cs_nx = VMP_CS_ALL_FALSE;
883 }
884 
885 /*
886  * Enqueues the given page on the throttled queue.
887  * The caller must hold the vm_page_queue_lock and it will be held on return.
888  */
889 static void
vm_fault_enqueue_throttled_locked(vm_page_t m)890 vm_fault_enqueue_throttled_locked(vm_page_t m)
891 {
892 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
893 	assert(!VM_PAGE_WIRED(m));
894 
895 	/*
896 	 * can't be on the pageout queue since we don't
897 	 * have a pager to try and clean to
898 	 */
899 	vm_page_queues_remove(m, TRUE);
900 	vm_page_check_pageable_safe(m);
901 	vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
902 	m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
903 	vm_page_throttled_count++;
904 }
905 
906 /*
907  * do the work to zero fill a page and
908  * inject it into the correct paging queue
909  *
910  * m->vmp_object must be locked
911  * page queue lock must NOT be held
912  */
913 static int
vm_fault_zero_page(vm_page_t m,boolean_t no_zero_fill)914 vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
915 {
916 	int my_fault = DBG_ZERO_FILL_FAULT;
917 	vm_object_t     object;
918 
919 	object = VM_PAGE_OBJECT(m);
920 
921 	/*
922 	 * This is is a zero-fill page fault...
923 	 *
924 	 * Checking the page lock is a waste of
925 	 * time;  this page was absent, so
926 	 * it can't be page locked by a pager.
927 	 *
928 	 * we also consider it undefined
929 	 * with respect to instruction
930 	 * execution.  i.e. it is the responsibility
931 	 * of higher layers to call for an instruction
932 	 * sync after changing the contents and before
933 	 * sending a program into this area.  We
934 	 * choose this approach for performance
935 	 */
936 	vm_fault_cs_clear(m);
937 	m->vmp_pmapped = TRUE;
938 
939 	if (no_zero_fill == TRUE) {
940 		my_fault = DBG_NZF_PAGE_FAULT;
941 
942 		if (m->vmp_absent && m->vmp_busy) {
943 			return my_fault;
944 		}
945 	} else {
946 		vm_page_zero_fill(
947 			m
948 			);
949 
950 		counter_inc(&vm_statistics_zero_fill_count);
951 		DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
952 	}
953 	assert(!m->vmp_laundry);
954 	assert(!is_kernel_object(object));
955 	//assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
956 	if (!VM_DYNAMIC_PAGING_ENABLED() &&
957 	    (object->purgable == VM_PURGABLE_DENY ||
958 	    object->purgable == VM_PURGABLE_NONVOLATILE ||
959 	    object->purgable == VM_PURGABLE_VOLATILE)) {
960 		vm_page_lockspin_queues();
961 		if (!VM_DYNAMIC_PAGING_ENABLED()) {
962 			vm_fault_enqueue_throttled_locked(m);
963 		}
964 		vm_page_unlock_queues();
965 	}
966 	return my_fault;
967 }
968 
969 /*
970  * Recovery actions for vm_fault_page
971  */
972 __attribute__((always_inline))
973 static void
vm_fault_page_release_page(vm_page_t m,bool * clear_absent_on_error)974 vm_fault_page_release_page(
975 	vm_page_t m,                    /* Page to release */
976 	bool *clear_absent_on_error /* IN/OUT */)
977 {
978 	vm_page_wakeup_done(VM_PAGE_OBJECT(m), m);
979 	if (!VM_PAGE_PAGEABLE(m)) {
980 		vm_page_lockspin_queues();
981 		if (*clear_absent_on_error && m->vmp_absent) {
982 			vm_page_zero_fill(
983 				m
984 				);
985 			counter_inc(&vm_statistics_zero_fill_count);
986 			DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
987 			m->vmp_absent = false;
988 		}
989 		if (!VM_PAGE_PAGEABLE(m)) {
990 			if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
991 				vm_page_deactivate(m);
992 			} else {
993 				vm_page_activate(m);
994 			}
995 		}
996 		vm_page_unlock_queues();
997 	}
998 	*clear_absent_on_error = false;
999 }
1000 /*
1001  *	Routine:	vm_fault_page
1002  *	Purpose:
1003  *		Find the resident page for the virtual memory
1004  *		specified by the given virtual memory object
1005  *		and offset.
1006  *	Additional arguments:
1007  *		The required permissions for the page is given
1008  *		in "fault_type".  Desired permissions are included
1009  *		in "protection".
1010  *		fault_info is passed along to determine pagein cluster
1011  *		limits... it contains the expected reference pattern,
1012  *		cluster size if available, etc...
1013  *
1014  *		If the desired page is known to be resident (for
1015  *		example, because it was previously wired down), asserting
1016  *		the "unwiring" parameter will speed the search.
1017  *
1018  *		If the operation can be interrupted (by thread_abort
1019  *		or thread_terminate), then the "interruptible"
1020  *		parameter should be asserted.
1021  *
1022  *	Results:
1023  *		The page containing the proper data is returned
1024  *		in "result_page".
1025  *
1026  *	In/out conditions:
1027  *		The source object must be locked and referenced,
1028  *		and must donate one paging reference.  The reference
1029  *		is not affected.  The paging reference and lock are
1030  *		consumed.
1031  *
1032  *		If the call succeeds, the object in which "result_page"
1033  *		resides is left locked and holding a paging reference.
1034  *		If this is not the original object, a busy page in the
1035  *		original object is returned in "top_page", to prevent other
1036  *		callers from pursuing this same data, along with a paging
1037  *		reference for the original object.  The "top_page" should
1038  *		be destroyed when this guarantee is no longer required.
1039  *		The "result_page" is also left busy.  It is not removed
1040  *		from the pageout queues.
1041  *	Special Case:
1042  *		A return value of VM_FAULT_SUCCESS_NO_PAGE means that the
1043  *		fault succeeded but there's no VM page (i.e. the VM object
1044  *              does not actually hold VM pages, but device memory or
1045  *		large pages).  The object is still locked and we still hold a
1046  *		paging_in_progress reference.
1047  */
1048 unsigned int vm_fault_page_blocked_access = 0;
1049 unsigned int vm_fault_page_forced_retry = 0;
1050 
1051 vm_fault_return_t
vm_fault_page(vm_object_t first_object,vm_object_offset_t first_offset,vm_prot_t fault_type,boolean_t must_be_resident,boolean_t caller_lookup,vm_prot_t * protection,vm_page_t * result_page,vm_page_t * top_page,int * type_of_fault,kern_return_t * error_code,boolean_t no_zero_fill,vm_object_fault_info_t fault_info)1052 vm_fault_page(
1053 	/* Arguments: */
1054 	vm_object_t     first_object,   /* Object to begin search */
1055 	vm_object_offset_t first_offset,        /* Offset into object */
1056 	vm_prot_t       fault_type,     /* What access is requested */
1057 	boolean_t       must_be_resident,/* Must page be resident? */
1058 	boolean_t       caller_lookup,  /* caller looked up page */
1059 	/* Modifies in place: */
1060 	vm_prot_t       *protection,    /* Protection for mapping */
1061 	vm_page_t       *result_page,   /* Page found, if successful */
1062 	/* Returns: */
1063 	vm_page_t       *top_page,      /* Page in top object, if
1064                                          * not result_page.  */
1065 	int             *type_of_fault, /* if non-null, fill in with type of fault
1066                                          * COW, zero-fill, etc... returned in trace point */
1067 	/* More arguments: */
1068 	kern_return_t   *error_code,    /* code if page is in error */
1069 	boolean_t       no_zero_fill,   /* don't zero fill absent pages */
1070 	vm_object_fault_info_t fault_info)
1071 {
1072 	vm_page_t               m;
1073 	vm_object_t             object;
1074 	vm_object_offset_t      offset;
1075 	vm_page_t               first_m;
1076 	vm_object_t             next_object;
1077 	vm_object_t             copy_object;
1078 	boolean_t               look_for_page;
1079 	boolean_t               force_fault_retry = FALSE;
1080 	vm_prot_t               access_required = fault_type;
1081 	vm_prot_t               wants_copy_flag;
1082 	kern_return_t           wait_result;
1083 	wait_interrupt_t        interruptible_state;
1084 	boolean_t               data_already_requested = FALSE;
1085 	vm_behavior_t           orig_behavior;
1086 	vm_size_t               orig_cluster_size;
1087 	vm_fault_return_t       error;
1088 	int                     my_fault;
1089 	uint32_t                try_failed_count;
1090 	wait_interrupt_t        interruptible; /* how may fault be interrupted? */
1091 	int                     external_state = VM_EXTERNAL_STATE_UNKNOWN;
1092 	memory_object_t         pager;
1093 	vm_fault_return_t       retval;
1094 	int                     grab_options;
1095 	bool                    clear_absent_on_error = false;
1096 
1097 /*
1098  * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
1099  * marked as paged out in the compressor pager or the pager doesn't exist.
1100  * Note also that if the pager for an internal object
1101  * has not been created, the pager is not invoked regardless of the value
1102  * of MUST_ASK_PAGER().
1103  *
1104  * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
1105  * is marked as paged out in the compressor pager.
1106  * PAGED_OUT() is used to determine if a page has already been pushed
1107  * into a copy object in order to avoid a redundant page out operation.
1108  */
1109 #define MUST_ASK_PAGER(o, f, s)                                 \
1110 	((s = vm_object_compressor_pager_state_get((o), (f))) != VM_EXTERNAL_STATE_ABSENT)
1111 
1112 #define PAGED_OUT(o, f) \
1113 	(vm_object_compressor_pager_state_get((o), (f)) == VM_EXTERNAL_STATE_EXISTS)
1114 
1115 #if TRACEFAULTPAGE
1116 	dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */
1117 #endif
1118 
1119 	interruptible = fault_info->interruptible;
1120 	interruptible_state = thread_interrupt_level(interruptible);
1121 
1122 	/*
1123 	 *	INVARIANTS (through entire routine):
1124 	 *
1125 	 *	1)	At all times, we must either have the object
1126 	 *		lock or a busy page in some object to prevent
1127 	 *		some other thread from trying to bring in
1128 	 *		the same page.
1129 	 *
1130 	 *		Note that we cannot hold any locks during the
1131 	 *		pager access or when waiting for memory, so
1132 	 *		we use a busy page then.
1133 	 *
1134 	 *	2)	To prevent another thread from racing us down the
1135 	 *		shadow chain and entering a new page in the top
1136 	 *		object before we do, we must keep a busy page in
1137 	 *		the top object while following the shadow chain.
1138 	 *
1139 	 *	3)	We must increment paging_in_progress on any object
1140 	 *		for which we have a busy page before dropping
1141 	 *		the object lock
1142 	 *
1143 	 *	4)	We leave busy pages on the pageout queues.
1144 	 *		If the pageout daemon comes across a busy page,
1145 	 *		it will remove the page from the pageout queues.
1146 	 */
1147 
1148 	object = first_object;
1149 	offset = first_offset;
1150 	first_m = VM_PAGE_NULL;
1151 	access_required = fault_type;
1152 
1153 	/*
1154 	 * default type of fault
1155 	 */
1156 	my_fault = DBG_CACHE_HIT_FAULT;
1157 	thread_pri_floor_t token;
1158 	bool    drop_floor = false;
1159 
1160 	while (TRUE) {
1161 #if TRACEFAULTPAGE
1162 		dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0);       /* (TEST/DEBUG) */
1163 #endif
1164 
1165 		grab_options = 0;
1166 #if CONFIG_SECLUDED_MEMORY
1167 		if (object->can_grab_secluded) {
1168 			grab_options |= VM_PAGE_GRAB_SECLUDED;
1169 		}
1170 #endif /* CONFIG_SECLUDED_MEMORY */
1171 
1172 		if (!object->alive) {
1173 			/*
1174 			 * object is no longer valid
1175 			 * clean up and return error
1176 			 */
1177 #if DEVELOPMENT || DEBUG
1178 			printf("FBDP rdar://93769854 %s:%d object %p internal %d pager %p (%s) copy %p shadow %p alive %d terminating %d named %d ref %d shadow_severed %d\n", __FUNCTION__, __LINE__, object, object->internal, object->pager, object->pager ? object->pager->mo_pager_ops->memory_object_pager_name : "?", object->vo_copy, object->shadow, object->alive, object->terminating, object->named, os_ref_get_count_raw(&object->ref_count), object->shadow_severed);
1179 			if (panic_object_not_alive) {
1180 				panic("FBDP rdar://93769854 %s:%d object %p internal %d pager %p (%s) copy %p shadow %p alive %d terminating %d named %d ref %d shadow_severed %d\n", __FUNCTION__, __LINE__, object, object->internal, object->pager, object->pager ? object->pager->mo_pager_ops->memory_object_pager_name : "?", object->vo_copy, object->shadow, object->alive, object->terminating, object->named, os_ref_get_count_raw(&object->ref_count), object->shadow_severed);
1181 			}
1182 #endif /* DEVELOPMENT || DEBUG */
1183 			vm_fault_cleanup(object, first_m);
1184 			thread_interrupt_level(interruptible_state);
1185 
1186 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_NOT_ALIVE), 0 /* arg */);
1187 			return VM_FAULT_MEMORY_ERROR;
1188 		}
1189 
1190 		if (!object->pager_created && object->phys_contiguous) {
1191 			/*
1192 			 * A physically-contiguous object without a pager:
1193 			 * must be a "large page" object.  We do not deal
1194 			 * with VM pages for this object.
1195 			 */
1196 			caller_lookup = FALSE;
1197 			m = VM_PAGE_NULL;
1198 			goto phys_contig_object;
1199 		}
1200 
1201 		if (object->blocked_access) {
1202 			/*
1203 			 * Access to this VM object has been blocked.
1204 			 * Replace our "paging_in_progress" reference with
1205 			 * a "activity_in_progress" reference and wait for
1206 			 * access to be unblocked.
1207 			 */
1208 			caller_lookup = FALSE; /* no longer valid after sleep */
1209 			vm_object_activity_begin(object);
1210 			vm_object_paging_end(object);
1211 			while (object->blocked_access) {
1212 				vm_object_sleep(object,
1213 				    VM_OBJECT_EVENT_UNBLOCKED,
1214 				    THREAD_UNINT, LCK_SLEEP_EXCLUSIVE);
1215 			}
1216 			vm_fault_page_blocked_access++;
1217 			vm_object_paging_begin(object);
1218 			vm_object_activity_end(object);
1219 		}
1220 
1221 		/*
1222 		 * See whether the page at 'offset' is resident
1223 		 */
1224 		if (caller_lookup == TRUE) {
1225 			/*
1226 			 * The caller has already looked up the page
1227 			 * and gave us the result in "result_page".
1228 			 * We can use this for the first lookup but
1229 			 * it loses its validity as soon as we unlock
1230 			 * the object.
1231 			 */
1232 			m = *result_page;
1233 			caller_lookup = FALSE; /* no longer valid after that */
1234 		} else {
1235 			m = vm_page_lookup(object, vm_object_trunc_page(offset));
1236 		}
1237 #if TRACEFAULTPAGE
1238 		dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object);  /* (TEST/DEBUG) */
1239 #endif
1240 		if (m != VM_PAGE_NULL) {
1241 			if (m->vmp_busy) {
1242 				/*
1243 				 * The page is being brought in,
1244 				 * wait for it and then retry.
1245 				 */
1246 #if TRACEFAULTPAGE
1247 				dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
1248 #endif
1249 				if (fault_info->fi_no_sleep) {
1250 					/* Caller has requested not to sleep on busy pages */
1251 					vm_fault_cleanup(object, first_m);
1252 					thread_interrupt_level(interruptible_state);
1253 					return VM_FAULT_BUSY;
1254 				}
1255 
1256 				wait_result = vm_page_sleep(object, m, interruptible, LCK_SLEEP_DEFAULT);
1257 
1258 				if (wait_result != THREAD_AWAKENED) {
1259 					vm_fault_cleanup(object, first_m);
1260 					thread_interrupt_level(interruptible_state);
1261 
1262 					if (wait_result == THREAD_RESTART) {
1263 						return VM_FAULT_RETRY;
1264 					} else {
1265 						ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
1266 						return VM_FAULT_INTERRUPTED;
1267 					}
1268 				}
1269 				continue;
1270 			}
1271 			if (m->vmp_laundry) {
1272 				m->vmp_free_when_done = FALSE;
1273 
1274 				if (!m->vmp_cleaning) {
1275 					vm_pageout_steal_laundry(m, FALSE);
1276 				}
1277 			}
1278 			vm_object_lock_assert_exclusive(VM_PAGE_OBJECT(m));
1279 			if (vm_page_is_guard(m)) {
1280 				/*
1281 				 * Guard page: off limits !
1282 				 */
1283 				if (fault_type == VM_PROT_NONE) {
1284 					/*
1285 					 * The fault is not requesting any
1286 					 * access to the guard page, so it must
1287 					 * be just to wire or unwire it.
1288 					 * Let's pretend it succeeded...
1289 					 */
1290 					m->vmp_busy = TRUE;
1291 					*result_page = m;
1292 					assert(first_m == VM_PAGE_NULL);
1293 					*top_page = first_m;
1294 					if (type_of_fault) {
1295 						*type_of_fault = DBG_GUARD_FAULT;
1296 					}
1297 					thread_interrupt_level(interruptible_state);
1298 					return VM_FAULT_SUCCESS;
1299 				} else {
1300 					/*
1301 					 * The fault requests access to the
1302 					 * guard page: let's deny that !
1303 					 */
1304 					vm_fault_cleanup(object, first_m);
1305 					thread_interrupt_level(interruptible_state);
1306 					ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_GUARDPAGE_FAULT), 0 /* arg */);
1307 					return VM_FAULT_MEMORY_ERROR;
1308 				}
1309 			}
1310 
1311 
1312 			if (m->vmp_error) {
1313 				/*
1314 				 * The page is in error, give up now.
1315 				 */
1316 #if TRACEFAULTPAGE
1317 				dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code);      /* (TEST/DEBUG) */
1318 #endif
1319 				if (error_code) {
1320 					*error_code = KERN_MEMORY_ERROR;
1321 				}
1322 				VM_PAGE_FREE(m);
1323 
1324 				vm_fault_cleanup(object, first_m);
1325 				thread_interrupt_level(interruptible_state);
1326 
1327 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_ERROR), 0 /* arg */);
1328 				return VM_FAULT_MEMORY_ERROR;
1329 			}
1330 			if (m->vmp_restart) {
1331 				/*
1332 				 * The pager wants us to restart
1333 				 * at the top of the chain,
1334 				 * typically because it has moved the
1335 				 * page to another pager, then do so.
1336 				 */
1337 #if TRACEFAULTPAGE
1338 				dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
1339 #endif
1340 				VM_PAGE_FREE(m);
1341 
1342 				vm_fault_cleanup(object, first_m);
1343 				thread_interrupt_level(interruptible_state);
1344 
1345 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_RESTART), 0 /* arg */);
1346 				return VM_FAULT_RETRY;
1347 			}
1348 			if (m->vmp_absent) {
1349 				/*
1350 				 * The page isn't busy, but is absent,
1351 				 * therefore it's deemed "unavailable".
1352 				 *
1353 				 * Remove the non-existent page (unless it's
1354 				 * in the top object) and move on down to the
1355 				 * next object (if there is one).
1356 				 */
1357 #if TRACEFAULTPAGE
1358 				dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow);  /* (TEST/DEBUG) */
1359 #endif
1360 				next_object = object->shadow;
1361 
1362 				if (next_object == VM_OBJECT_NULL) {
1363 					/*
1364 					 * Absent page at bottom of shadow
1365 					 * chain; zero fill the page we left
1366 					 * busy in the first object, and free
1367 					 * the absent page.
1368 					 */
1369 					assert(!must_be_resident);
1370 
1371 					/*
1372 					 * check for any conditions that prevent
1373 					 * us from creating a new zero-fill page
1374 					 * vm_fault_check will do all of the
1375 					 * fault cleanup in the case of an error condition
1376 					 * including resetting the thread_interrupt_level
1377 					 */
1378 					error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
1379 
1380 					if (error != VM_FAULT_SUCCESS) {
1381 						return error;
1382 					}
1383 
1384 					if (object != first_object) {
1385 						/*
1386 						 * free the absent page we just found
1387 						 */
1388 						VM_PAGE_FREE(m);
1389 
1390 						/*
1391 						 * drop reference and lock on current object
1392 						 */
1393 						vm_object_paging_end(object);
1394 						vm_object_unlock(object);
1395 
1396 						/*
1397 						 * grab the original page we
1398 						 * 'soldered' in place and
1399 						 * retake lock on 'first_object'
1400 						 */
1401 						m = first_m;
1402 						first_m = VM_PAGE_NULL;
1403 
1404 						object = first_object;
1405 						offset = first_offset;
1406 
1407 						vm_object_lock(object);
1408 					} else {
1409 						/*
1410 						 * we're going to use the absent page we just found
1411 						 * so convert it to a 'busy' page
1412 						 */
1413 						m->vmp_absent = FALSE;
1414 						m->vmp_busy = TRUE;
1415 					}
1416 					if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
1417 						m->vmp_absent = TRUE;
1418 						clear_absent_on_error = true;
1419 					}
1420 					/*
1421 					 * zero-fill the page and put it on
1422 					 * the correct paging queue
1423 					 */
1424 					my_fault = vm_fault_zero_page(m, no_zero_fill);
1425 
1426 					break;
1427 				} else {
1428 					if (must_be_resident) {
1429 						vm_object_paging_end(object);
1430 					} else if (object != first_object) {
1431 						vm_object_paging_end(object);
1432 						VM_PAGE_FREE(m);
1433 					} else {
1434 						first_m = m;
1435 						m->vmp_absent = FALSE;
1436 						m->vmp_busy = TRUE;
1437 
1438 						vm_page_lockspin_queues();
1439 						vm_page_queues_remove(m, FALSE);
1440 						vm_page_unlock_queues();
1441 					}
1442 
1443 					offset += object->vo_shadow_offset;
1444 					fault_info->lo_offset += object->vo_shadow_offset;
1445 					fault_info->hi_offset += object->vo_shadow_offset;
1446 					access_required = VM_PROT_READ;
1447 
1448 					vm_object_lock(next_object);
1449 					vm_object_unlock(object);
1450 					object = next_object;
1451 					vm_object_paging_begin(object);
1452 
1453 					/*
1454 					 * reset to default type of fault
1455 					 */
1456 					my_fault = DBG_CACHE_HIT_FAULT;
1457 
1458 					continue;
1459 				}
1460 			}
1461 			if ((m->vmp_cleaning)
1462 			    && ((object != first_object) || (object->vo_copy != VM_OBJECT_NULL))
1463 			    && (fault_type & VM_PROT_WRITE)) {
1464 				/*
1465 				 * This is a copy-on-write fault that will
1466 				 * cause us to revoke access to this page, but
1467 				 * this page is in the process of being cleaned
1468 				 * in a clustered pageout. We must wait until
1469 				 * the cleaning operation completes before
1470 				 * revoking access to the original page,
1471 				 * otherwise we might attempt to remove a
1472 				 * wired mapping.
1473 				 */
1474 #if TRACEFAULTPAGE
1475 				dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset);  /* (TEST/DEBUG) */
1476 #endif
1477 				/*
1478 				 * take an extra ref so that object won't die
1479 				 */
1480 				vm_object_reference_locked(object);
1481 
1482 				vm_fault_cleanup(object, first_m);
1483 
1484 				vm_object_lock(object);
1485 				assert(os_ref_get_count_raw(&object->ref_count) > 0);
1486 
1487 				m = vm_page_lookup(object, vm_object_trunc_page(offset));
1488 
1489 				if (m != VM_PAGE_NULL && m->vmp_cleaning) {
1490 					wait_result = vm_page_sleep(object, m, interruptible, LCK_SLEEP_UNLOCK);
1491 					vm_object_deallocate(object);
1492 					goto backoff;
1493 				} else {
1494 					vm_object_unlock(object);
1495 
1496 					vm_object_deallocate(object);
1497 					thread_interrupt_level(interruptible_state);
1498 
1499 					return VM_FAULT_RETRY;
1500 				}
1501 			}
1502 			if (type_of_fault == NULL && (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) &&
1503 			    !(fault_info != NULL && fault_info->stealth)) {
1504 				/*
1505 				 * If we were passed a non-NULL pointer for
1506 				 * "type_of_fault", than we came from
1507 				 * vm_fault... we'll let it deal with
1508 				 * this condition, since it
1509 				 * needs to see m->vmp_speculative to correctly
1510 				 * account the pageins, otherwise...
1511 				 * take it off the speculative queue, we'll
1512 				 * let the caller of vm_fault_page deal
1513 				 * with getting it onto the correct queue
1514 				 *
1515 				 * If the caller specified in fault_info that
1516 				 * it wants a "stealth" fault, we also leave
1517 				 * the page in the speculative queue.
1518 				 */
1519 				vm_page_lockspin_queues();
1520 				if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
1521 					vm_page_queues_remove(m, FALSE);
1522 				}
1523 				vm_page_unlock_queues();
1524 			}
1525 			assert(object == VM_PAGE_OBJECT(m));
1526 
1527 			if (object->code_signed) {
1528 				/*
1529 				 * CODE SIGNING:
1530 				 * We just paged in a page from a signed
1531 				 * memory object but we don't need to
1532 				 * validate it now.  We'll validate it if
1533 				 * when it gets mapped into a user address
1534 				 * space for the first time or when the page
1535 				 * gets copied to another object as a result
1536 				 * of a copy-on-write.
1537 				 */
1538 			}
1539 
1540 			/*
1541 			 * We mark the page busy and leave it on
1542 			 * the pageout queues.  If the pageout
1543 			 * deamon comes across it, then it will
1544 			 * remove the page from the queue, but not the object
1545 			 */
1546 #if TRACEFAULTPAGE
1547 			dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
1548 #endif
1549 			assert(!m->vmp_busy);
1550 			assert(!m->vmp_absent);
1551 
1552 			m->vmp_busy = TRUE;
1553 			break;
1554 		}
1555 
1556 		/*
1557 		 * we get here when there is no page present in the object at
1558 		 * the offset we're interested in... we'll allocate a page
1559 		 * at this point if the pager associated with
1560 		 * this object can provide the data or we're the top object...
1561 		 * object is locked;  m == NULL
1562 		 */
1563 
1564 		if (must_be_resident) {
1565 			if (fault_type == VM_PROT_NONE &&
1566 			    is_kernel_object(object)) {
1567 				/*
1568 				 * We've been called from vm_fault_unwire()
1569 				 * while removing a map entry that was allocated
1570 				 * with KMA_KOBJECT and KMA_VAONLY.  This page
1571 				 * is not present and there's nothing more to
1572 				 * do here (nothing to unwire).
1573 				 */
1574 				vm_fault_cleanup(object, first_m);
1575 				thread_interrupt_level(interruptible_state);
1576 
1577 				return VM_FAULT_MEMORY_ERROR;
1578 			}
1579 
1580 			goto dont_look_for_page;
1581 		}
1582 
1583 		/* Don't expect to fault pages into the kernel object. */
1584 		assert(!is_kernel_object(object));
1585 
1586 		look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE));
1587 
1588 #if TRACEFAULTPAGE
1589 		dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object);      /* (TEST/DEBUG) */
1590 #endif
1591 		if (!look_for_page && object == first_object && !object->phys_contiguous) {
1592 			/*
1593 			 * Allocate a new page for this object/offset pair as a placeholder
1594 			 */
1595 			m = vm_page_grab_options(grab_options);
1596 #if TRACEFAULTPAGE
1597 			dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object);  /* (TEST/DEBUG) */
1598 #endif
1599 			if (m == VM_PAGE_NULL) {
1600 				vm_fault_cleanup(object, first_m);
1601 				thread_interrupt_level(interruptible_state);
1602 
1603 				return VM_FAULT_MEMORY_SHORTAGE;
1604 			}
1605 
1606 			if (fault_info && fault_info->batch_pmap_op == TRUE) {
1607 				vm_page_insert_internal(m, object,
1608 				    vm_object_trunc_page(offset),
1609 				    VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1610 			} else {
1611 				vm_page_insert(m, object, vm_object_trunc_page(offset));
1612 			}
1613 		}
1614 		if (look_for_page) {
1615 			kern_return_t   rc;
1616 			int             my_fault_type;
1617 
1618 			/*
1619 			 *	If the memory manager is not ready, we
1620 			 *	cannot make requests.
1621 			 */
1622 			if (!object->pager_ready) {
1623 #if TRACEFAULTPAGE
1624 				dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0);       /* (TEST/DEBUG) */
1625 #endif
1626 				if (m != VM_PAGE_NULL) {
1627 					VM_PAGE_FREE(m);
1628 				}
1629 
1630 				/*
1631 				 * take an extra ref so object won't die
1632 				 */
1633 				vm_object_reference_locked(object);
1634 				vm_fault_cleanup(object, first_m);
1635 
1636 				vm_object_lock(object);
1637 				assert(os_ref_get_count_raw(&object->ref_count) > 0);
1638 
1639 				if (!object->pager_ready) {
1640 					wait_result = vm_object_sleep(object, VM_OBJECT_EVENT_PAGER_READY, interruptible, LCK_SLEEP_UNLOCK);
1641 					vm_object_deallocate(object);
1642 
1643 					goto backoff;
1644 				} else {
1645 					vm_object_unlock(object);
1646 					vm_object_deallocate(object);
1647 					thread_interrupt_level(interruptible_state);
1648 
1649 					return VM_FAULT_RETRY;
1650 				}
1651 			}
1652 			if (!object->internal && !object->phys_contiguous && object->paging_in_progress > vm_object_pagein_throttle) {
1653 				/*
1654 				 * If there are too many outstanding page
1655 				 * requests pending on this external object, we
1656 				 * wait for them to be resolved now.
1657 				 */
1658 #if TRACEFAULTPAGE
1659 				dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
1660 #endif
1661 				if (m != VM_PAGE_NULL) {
1662 					VM_PAGE_FREE(m);
1663 				}
1664 				/*
1665 				 * take an extra ref so object won't die
1666 				 */
1667 				vm_object_reference_locked(object);
1668 
1669 				vm_fault_cleanup(object, first_m);
1670 
1671 				vm_object_lock(object);
1672 				assert(os_ref_get_count_raw(&object->ref_count) > 0);
1673 
1674 				if (object->paging_in_progress >= vm_object_pagein_throttle) {
1675 					wait_result = vm_object_paging_throttle_wait(object, interruptible);
1676 					vm_object_unlock(object);
1677 					vm_object_deallocate(object);
1678 					goto backoff;
1679 				} else {
1680 					vm_object_unlock(object);
1681 					vm_object_deallocate(object);
1682 					thread_interrupt_level(interruptible_state);
1683 
1684 					return VM_FAULT_RETRY;
1685 				}
1686 			}
1687 			if (object->internal) {
1688 				int compressed_count_delta;
1689 				vm_compressor_options_t c_flags = 0;
1690 
1691 				assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
1692 
1693 				if (m == VM_PAGE_NULL) {
1694 					/*
1695 					 * Allocate a new page for this object/offset pair as a placeholder
1696 					 */
1697 					m = vm_page_grab_options(grab_options);
1698 #if TRACEFAULTPAGE
1699 					dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object);  /* (TEST/DEBUG) */
1700 #endif
1701 					if (m == VM_PAGE_NULL) {
1702 						vm_fault_cleanup(object, first_m);
1703 						thread_interrupt_level(interruptible_state);
1704 
1705 						return VM_FAULT_MEMORY_SHORTAGE;
1706 					}
1707 
1708 					m->vmp_absent = TRUE;
1709 					if (fault_info && fault_info->batch_pmap_op == TRUE) {
1710 						vm_page_insert_internal(m, object, vm_object_trunc_page(offset), VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1711 					} else {
1712 						vm_page_insert(m, object, vm_object_trunc_page(offset));
1713 					}
1714 				}
1715 				assert(m->vmp_busy);
1716 
1717 				m->vmp_absent = TRUE;
1718 				pager = object->pager;
1719 
1720 				assert(object->paging_in_progress > 0);
1721 
1722 				page_worker_token_t pw_token;
1723 #if PAGE_SLEEP_WITH_INHERITOR
1724 				page_worker_register_worker((event_t)m, &pw_token);
1725 #endif /* PAGE_SLEEP_WITH_INHERITOR */
1726 
1727 				vm_object_unlock(object);
1728 				rc = vm_compressor_pager_get(
1729 					pager,
1730 					offset + object->paging_offset,
1731 					VM_PAGE_GET_PHYS_PAGE(m),
1732 					&my_fault_type,
1733 					c_flags,
1734 					&compressed_count_delta);
1735 
1736 				if (type_of_fault == NULL) {
1737 					int     throttle_delay;
1738 
1739 					/*
1740 					 * we weren't called from vm_fault, so we
1741 					 * need to apply page creation throttling
1742 					 * do it before we re-acquire any locks
1743 					 */
1744 					if (my_fault_type == DBG_COMPRESSOR_FAULT) {
1745 						if ((throttle_delay = vm_page_throttled(TRUE))) {
1746 							VM_DEBUG_EVENT(vmf_compressordelay, DBG_VM_FAULT_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 1, 0);
1747 							__VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
1748 						}
1749 					}
1750 				}
1751 				vm_object_lock(object);
1752 				assert(object->paging_in_progress > 0);
1753 
1754 				vm_compressor_pager_count(
1755 					pager,
1756 					compressed_count_delta,
1757 					FALSE, /* shared_lock */
1758 					object);
1759 
1760 				switch (rc) {
1761 				case KERN_SUCCESS:
1762 					m->vmp_absent = FALSE;
1763 					m->vmp_dirty = TRUE;
1764 					if (!HAS_DEFAULT_CACHEABILITY(object->wimg_bits &
1765 					    VM_WIMG_MASK)) {
1766 						/*
1767 						 * If the page is not cacheable,
1768 						 * we can't let its contents
1769 						 * linger in the data cache
1770 						 * after the decompression.
1771 						 */
1772 						pmap_sync_page_attributes_phys(
1773 							VM_PAGE_GET_PHYS_PAGE(m));
1774 					} else {
1775 						m->vmp_written_by_kernel = TRUE;
1776 					}
1777 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
1778 					if ((fault_type & VM_PROT_WRITE) == 0) {
1779 						vm_object_lock_assert_exclusive(object);
1780 						vm_page_lockspin_queues();
1781 						m->vmp_unmodified_ro = true;
1782 						vm_page_unlock_queues();
1783 						os_atomic_inc(&compressor_ro_uncompressed, relaxed);
1784 						*protection &= ~VM_PROT_WRITE;
1785 					}
1786 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
1787 
1788 					/*
1789 					 * If the object is purgeable, its
1790 					 * owner's purgeable ledgers have been
1791 					 * updated in vm_page_insert() but the
1792 					 * page was also accounted for in a
1793 					 * "compressed purgeable" ledger, so
1794 					 * update that now.
1795 					 */
1796 					if (((object->purgable !=
1797 					    VM_PURGABLE_DENY) ||
1798 					    object->vo_ledger_tag) &&
1799 					    (object->vo_owner !=
1800 					    NULL)) {
1801 						/*
1802 						 * One less compressed
1803 						 * purgeable/tagged page.
1804 						 */
1805 						if (compressed_count_delta) {
1806 							vm_object_owner_compressed_update(
1807 								object,
1808 								-1);
1809 						}
1810 					}
1811 
1812 					break;
1813 				case KERN_MEMORY_FAILURE:
1814 					m->vmp_unusual = TRUE;
1815 					m->vmp_error = TRUE;
1816 					m->vmp_absent = FALSE;
1817 					break;
1818 				case KERN_MEMORY_ERROR:
1819 					assert(m->vmp_absent);
1820 					break;
1821 				default:
1822 					panic("vm_fault_page(): unexpected "
1823 					    "error %d from "
1824 					    "vm_compressor_pager_get()\n",
1825 					    rc);
1826 				}
1827 				vm_page_wakeup_done_with_inheritor(object, m, &pw_token);
1828 
1829 				rc = KERN_SUCCESS;
1830 				goto data_requested;
1831 			}
1832 			my_fault_type = DBG_PAGEIN_FAULT;
1833 
1834 			if (m != VM_PAGE_NULL) {
1835 				VM_PAGE_FREE(m);
1836 				m = VM_PAGE_NULL;
1837 			}
1838 
1839 #if TRACEFAULTPAGE
1840 			dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0);  /* (TEST/DEBUG) */
1841 #endif
1842 
1843 			/*
1844 			 * It's possible someone called vm_object_destroy while we weren't
1845 			 * holding the object lock.  If that has happened, then bail out
1846 			 * here.
1847 			 */
1848 
1849 			pager = object->pager;
1850 
1851 			if (pager == MEMORY_OBJECT_NULL) {
1852 				vm_fault_cleanup(object, first_m);
1853 				thread_interrupt_level(interruptible_state);
1854 
1855 				static const enum vm_subsys_error_codes object_destroy_errors[VM_OBJECT_DESTROY_MAX + 1] = {
1856 					[VM_OBJECT_DESTROY_UNKNOWN_REASON] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER,
1857 					[VM_OBJECT_DESTROY_UNMOUNT] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_UNMOUNT,
1858 					[VM_OBJECT_DESTROY_FORCED_UNMOUNT] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_FORCED_UNMOUNT,
1859 					[VM_OBJECT_DESTROY_UNGRAFT] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_UNGRAFT,
1860 					[VM_OBJECT_DESTROY_PAGER] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_DEALLOC_PAGER,
1861 					[VM_OBJECT_DESTROY_RECLAIM] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_RECLAIM,
1862 				};
1863 				enum vm_subsys_error_codes kdbg_code = object_destroy_errors[(vm_object_destroy_reason_t)object->no_pager_reason];
1864 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, kdbg_code), 0 /* arg */);
1865 				return VM_FAULT_MEMORY_ERROR;
1866 			}
1867 
1868 			/*
1869 			 * We have an absent page in place for the faulting offset,
1870 			 * so we can release the object lock.
1871 			 */
1872 
1873 			if (object->object_is_shared_cache) {
1874 				token = thread_priority_floor_start();
1875 				/*
1876 				 * A non-native shared cache object might
1877 				 * be getting set up in parallel with this
1878 				 * fault and so we can't assume that this
1879 				 * check will be valid after we drop the
1880 				 * object lock below.
1881 				 */
1882 				drop_floor = true;
1883 			}
1884 
1885 			vm_object_unlock(object);
1886 
1887 			/*
1888 			 * If this object uses a copy_call strategy,
1889 			 * and we are interested in a copy of this object
1890 			 * (having gotten here only by following a
1891 			 * shadow chain), then tell the memory manager
1892 			 * via a flag added to the desired_access
1893 			 * parameter, so that it can detect a race
1894 			 * between our walking down the shadow chain
1895 			 * and its pushing pages up into a copy of
1896 			 * the object that it manages.
1897 			 */
1898 			if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object) {
1899 				wants_copy_flag = VM_PROT_WANTS_COPY;
1900 			} else {
1901 				wants_copy_flag = VM_PROT_NONE;
1902 			}
1903 
1904 			if (object->vo_copy == first_object) {
1905 				/*
1906 				 * if we issue the memory_object_data_request in
1907 				 * this state, we are subject to a deadlock with
1908 				 * the underlying filesystem if it is trying to
1909 				 * shrink the file resulting in a push of pages
1910 				 * into the copy object...  that push will stall
1911 				 * on the placeholder page, and if the pushing thread
1912 				 * is holding a lock that is required on the pagein
1913 				 * path (such as a truncate lock), we'll deadlock...
1914 				 * to avoid this potential deadlock, we throw away
1915 				 * our placeholder page before calling memory_object_data_request
1916 				 * and force this thread to retry the vm_fault_page after
1917 				 * we have issued the I/O.  the second time through this path
1918 				 * we will find the page already in the cache (presumably still
1919 				 * busy waiting for the I/O to complete) and then complete
1920 				 * the fault w/o having to go through memory_object_data_request again
1921 				 */
1922 				assert(first_m != VM_PAGE_NULL);
1923 				assert(VM_PAGE_OBJECT(first_m) == first_object);
1924 
1925 				vm_object_lock(first_object);
1926 				VM_PAGE_FREE(first_m);
1927 				vm_object_paging_end(first_object);
1928 				vm_object_unlock(first_object);
1929 
1930 				first_m = VM_PAGE_NULL;
1931 				force_fault_retry = TRUE;
1932 
1933 				vm_fault_page_forced_retry++;
1934 			}
1935 
1936 			if (data_already_requested == TRUE) {
1937 				orig_behavior = fault_info->behavior;
1938 				orig_cluster_size = fault_info->cluster_size;
1939 
1940 				fault_info->behavior = VM_BEHAVIOR_RANDOM;
1941 				fault_info->cluster_size = PAGE_SIZE;
1942 			}
1943 			/*
1944 			 * Call the memory manager to retrieve the data.
1945 			 */
1946 			rc = memory_object_data_request(
1947 				pager,
1948 				vm_object_trunc_page(offset) + object->paging_offset,
1949 				PAGE_SIZE,
1950 				access_required | wants_copy_flag,
1951 				(memory_object_fault_info_t)fault_info);
1952 
1953 			if (data_already_requested == TRUE) {
1954 				fault_info->behavior = orig_behavior;
1955 				fault_info->cluster_size = orig_cluster_size;
1956 			} else {
1957 				data_already_requested = TRUE;
1958 			}
1959 
1960 			DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL);
1961 #if TRACEFAULTPAGE
1962 			dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */
1963 #endif
1964 			vm_object_lock(object);
1965 
1966 			if (drop_floor && object->object_is_shared_cache) {
1967 				thread_priority_floor_end(&token);
1968 				drop_floor = false;
1969 			}
1970 
1971 data_requested:
1972 			if (rc != ERR_SUCCESS) {
1973 				vm_fault_cleanup(object, first_m);
1974 				thread_interrupt_level(interruptible_state);
1975 
1976 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NO_DATA), 0 /* arg */);
1977 
1978 				if (rc == MACH_SEND_INTERRUPTED) {
1979 					return VM_FAULT_INTERRUPTED;
1980 				} else if (rc == KERN_ALREADY_WAITING) {
1981 					return VM_FAULT_BUSY;
1982 				} else {
1983 					return VM_FAULT_MEMORY_ERROR;
1984 				}
1985 			} else {
1986 				clock_sec_t     tv_sec;
1987 				clock_usec_t    tv_usec;
1988 
1989 				if (my_fault_type == DBG_PAGEIN_FAULT) {
1990 					clock_get_system_microtime(&tv_sec, &tv_usec);
1991 					current_thread()->t_page_creation_time = tv_sec;
1992 					current_thread()->t_page_creation_count = 0;
1993 				}
1994 			}
1995 			if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) {
1996 				vm_fault_cleanup(object, first_m);
1997 				thread_interrupt_level(interruptible_state);
1998 
1999 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
2000 				return VM_FAULT_INTERRUPTED;
2001 			}
2002 			if (force_fault_retry == TRUE) {
2003 				vm_fault_cleanup(object, first_m);
2004 				thread_interrupt_level(interruptible_state);
2005 
2006 				return VM_FAULT_RETRY;
2007 			}
2008 			if (m == VM_PAGE_NULL && object->phys_contiguous) {
2009 				/*
2010 				 * No page here means that the object we
2011 				 * initially looked up was "physically
2012 				 * contiguous" (i.e. device memory).  However,
2013 				 * with Virtual VRAM, the object might not
2014 				 * be backed by that device memory anymore,
2015 				 * so we're done here only if the object is
2016 				 * still "phys_contiguous".
2017 				 * Otherwise, if the object is no longer
2018 				 * "phys_contiguous", we need to retry the
2019 				 * page fault against the object's new backing
2020 				 * store (different memory object).
2021 				 */
2022 phys_contig_object:
2023 				assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
2024 				assert(object == first_object);
2025 				goto done;
2026 			}
2027 			/*
2028 			 * potentially a pagein fault
2029 			 * if we make it through the state checks
2030 			 * above, than we'll count it as such
2031 			 */
2032 			my_fault = my_fault_type;
2033 
2034 			/*
2035 			 * Retry with same object/offset, since new data may
2036 			 * be in a different page (i.e., m is meaningless at
2037 			 * this point).
2038 			 */
2039 			continue;
2040 		}
2041 dont_look_for_page:
2042 		/*
2043 		 * We get here if the object has no pager, or an existence map
2044 		 * exists and indicates the page isn't present on the pager
2045 		 * or we're unwiring a page.  If a pager exists, but there
2046 		 * is no existence map, then the m->vmp_absent case above handles
2047 		 * the ZF case when the pager can't provide the page
2048 		 */
2049 #if TRACEFAULTPAGE
2050 		dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
2051 #endif
2052 		if (object == first_object) {
2053 			first_m = m;
2054 		} else {
2055 			assert(m == VM_PAGE_NULL);
2056 		}
2057 
2058 		next_object = object->shadow;
2059 
2060 		if (next_object == VM_OBJECT_NULL) {
2061 			/*
2062 			 * we've hit the bottom of the shadown chain,
2063 			 * fill the page in the top object with zeros.
2064 			 */
2065 			assert(!must_be_resident);
2066 
2067 			if (object != first_object) {
2068 				vm_object_paging_end(object);
2069 				vm_object_unlock(object);
2070 
2071 				object = first_object;
2072 				offset = first_offset;
2073 				vm_object_lock(object);
2074 			}
2075 			m = first_m;
2076 			assert(VM_PAGE_OBJECT(m) == object);
2077 			first_m = VM_PAGE_NULL;
2078 
2079 			/*
2080 			 * check for any conditions that prevent
2081 			 * us from creating a new zero-fill page
2082 			 * vm_fault_check will do all of the
2083 			 * fault cleanup in the case of an error condition
2084 			 * including resetting the thread_interrupt_level
2085 			 */
2086 			error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
2087 
2088 			if (error != VM_FAULT_SUCCESS) {
2089 				return error;
2090 			}
2091 
2092 			if (m == VM_PAGE_NULL) {
2093 				m = vm_page_grab_options(grab_options);
2094 
2095 				if (m == VM_PAGE_NULL) {
2096 					vm_fault_cleanup(object, VM_PAGE_NULL);
2097 					thread_interrupt_level(interruptible_state);
2098 
2099 					return VM_FAULT_MEMORY_SHORTAGE;
2100 				}
2101 				vm_page_insert(m, object, vm_object_trunc_page(offset));
2102 			}
2103 			if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
2104 				m->vmp_absent = TRUE;
2105 				clear_absent_on_error = true;
2106 			}
2107 
2108 			my_fault = vm_fault_zero_page(m, no_zero_fill);
2109 
2110 			break;
2111 		} else {
2112 			/*
2113 			 * Move on to the next object.  Lock the next
2114 			 * object before unlocking the current one.
2115 			 */
2116 			if ((object != first_object) || must_be_resident) {
2117 				vm_object_paging_end(object);
2118 			}
2119 
2120 			offset += object->vo_shadow_offset;
2121 			fault_info->lo_offset += object->vo_shadow_offset;
2122 			fault_info->hi_offset += object->vo_shadow_offset;
2123 			access_required = VM_PROT_READ;
2124 
2125 			vm_object_lock(next_object);
2126 			vm_object_unlock(object);
2127 
2128 			object = next_object;
2129 			vm_object_paging_begin(object);
2130 		}
2131 	}
2132 
2133 	/*
2134 	 *	PAGE HAS BEEN FOUND.
2135 	 *
2136 	 *	This page (m) is:
2137 	 *		busy, so that we can play with it;
2138 	 *		not absent, so that nobody else will fill it;
2139 	 *		possibly eligible for pageout;
2140 	 *
2141 	 *	The top-level page (first_m) is:
2142 	 *		VM_PAGE_NULL if the page was found in the
2143 	 *		 top-level object;
2144 	 *		busy, not absent, and ineligible for pageout.
2145 	 *
2146 	 *	The current object (object) is locked.  A paging
2147 	 *	reference is held for the current and top-level
2148 	 *	objects.
2149 	 */
2150 
2151 #if TRACEFAULTPAGE
2152 	dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
2153 #endif
2154 #if     EXTRA_ASSERTIONS
2155 	assert(m->vmp_busy && !m->vmp_absent);
2156 	assert((first_m == VM_PAGE_NULL) ||
2157 	    (first_m->vmp_busy && !first_m->vmp_absent &&
2158 	    !first_m->vmp_active && !first_m->vmp_inactive && !first_m->vmp_secluded));
2159 #endif  /* EXTRA_ASSERTIONS */
2160 
2161 	/*
2162 	 * If the page is being written, but isn't
2163 	 * already owned by the top-level object,
2164 	 * we have to copy it into a new page owned
2165 	 * by the top-level object.
2166 	 */
2167 	if (object != first_object) {
2168 #if TRACEFAULTPAGE
2169 		dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */
2170 #endif
2171 		if (fault_type & VM_PROT_WRITE) {
2172 			vm_page_t copy_m;
2173 
2174 			/*
2175 			 * We only really need to copy if we
2176 			 * want to write it.
2177 			 */
2178 			assert(!must_be_resident);
2179 
2180 			/*
2181 			 * If we try to collapse first_object at this
2182 			 * point, we may deadlock when we try to get
2183 			 * the lock on an intermediate object (since we
2184 			 * have the bottom object locked).  We can't
2185 			 * unlock the bottom object, because the page
2186 			 * we found may move (by collapse) if we do.
2187 			 *
2188 			 * Instead, we first copy the page.  Then, when
2189 			 * we have no more use for the bottom object,
2190 			 * we unlock it and try to collapse.
2191 			 *
2192 			 * Note that we copy the page even if we didn't
2193 			 * need to... that's the breaks.
2194 			 */
2195 
2196 			/*
2197 			 * Allocate a page for the copy
2198 			 */
2199 			copy_m = vm_page_grab_options(grab_options);
2200 
2201 			if (copy_m == VM_PAGE_NULL) {
2202 				vm_fault_page_release_page(m, &clear_absent_on_error);
2203 
2204 				vm_fault_cleanup(object, first_m);
2205 				thread_interrupt_level(interruptible_state);
2206 
2207 				return VM_FAULT_MEMORY_SHORTAGE;
2208 			}
2209 
2210 			vm_page_copy(m, copy_m);
2211 
2212 			/*
2213 			 * If another map is truly sharing this
2214 			 * page with us, we have to flush all
2215 			 * uses of the original page, since we
2216 			 * can't distinguish those which want the
2217 			 * original from those which need the
2218 			 * new copy.
2219 			 *
2220 			 * XXXO If we know that only one map has
2221 			 * access to this page, then we could
2222 			 * avoid the pmap_disconnect() call.
2223 			 */
2224 			if (m->vmp_pmapped) {
2225 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2226 			}
2227 
2228 			if (m->vmp_clustered) {
2229 				VM_PAGE_COUNT_AS_PAGEIN(m);
2230 				VM_PAGE_CONSUME_CLUSTERED(m);
2231 			}
2232 			assert(!m->vmp_cleaning);
2233 
2234 			/*
2235 			 * We no longer need the old page or object.
2236 			 */
2237 			vm_fault_page_release_page(m, &clear_absent_on_error);
2238 
2239 			/*
2240 			 * This check helps with marking the object as having a sequential pattern
2241 			 * Normally we'll miss doing this below because this fault is about COW to
2242 			 * the first_object i.e. bring page in from disk, push to object above but
2243 			 * don't update the file object's sequential pattern.
2244 			 */
2245 			if (object->internal == FALSE) {
2246 				vm_fault_is_sequential(object, offset, fault_info->behavior);
2247 			}
2248 
2249 			vm_object_paging_end(object);
2250 			vm_object_unlock(object);
2251 
2252 			my_fault = DBG_COW_FAULT;
2253 			counter_inc(&vm_statistics_cow_faults);
2254 			DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
2255 			counter_inc(&current_task()->cow_faults);
2256 
2257 			object = first_object;
2258 			offset = first_offset;
2259 
2260 			vm_object_lock(object);
2261 			/*
2262 			 * get rid of the place holder
2263 			 * page that we soldered in earlier
2264 			 */
2265 			VM_PAGE_FREE(first_m);
2266 			first_m = VM_PAGE_NULL;
2267 
2268 			/*
2269 			 * and replace it with the
2270 			 * page we just copied into
2271 			 */
2272 			assert(copy_m->vmp_busy);
2273 			vm_page_insert(copy_m, object, vm_object_trunc_page(offset));
2274 			SET_PAGE_DIRTY(copy_m, TRUE);
2275 
2276 			m = copy_m;
2277 			/*
2278 			 * Now that we've gotten the copy out of the
2279 			 * way, let's try to collapse the top object.
2280 			 * But we have to play ugly games with
2281 			 * paging_in_progress to do that...
2282 			 */
2283 			vm_object_paging_end(object);
2284 			vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
2285 			vm_object_paging_begin(object);
2286 		} else {
2287 			*protection &= (~VM_PROT_WRITE);
2288 		}
2289 	}
2290 	/*
2291 	 * Now check whether the page needs to be pushed into the
2292 	 * copy object.  The use of asymmetric copy on write for
2293 	 * shared temporary objects means that we may do two copies to
2294 	 * satisfy the fault; one above to get the page from a
2295 	 * shadowed object, and one here to push it into the copy.
2296 	 */
2297 	try_failed_count = 0;
2298 
2299 	while ((copy_object = first_object->vo_copy) != VM_OBJECT_NULL) {
2300 		vm_object_offset_t      copy_offset;
2301 		vm_page_t               copy_m;
2302 
2303 #if TRACEFAULTPAGE
2304 		dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type);    /* (TEST/DEBUG) */
2305 #endif
2306 		/*
2307 		 * If the page is being written, but hasn't been
2308 		 * copied to the copy-object, we have to copy it there.
2309 		 */
2310 		if ((fault_type & VM_PROT_WRITE) == 0) {
2311 			*protection &= ~VM_PROT_WRITE;
2312 			break;
2313 		}
2314 
2315 		/*
2316 		 * If the page was guaranteed to be resident,
2317 		 * we must have already performed the copy.
2318 		 */
2319 		if (must_be_resident) {
2320 			break;
2321 		}
2322 
2323 		/*
2324 		 * Try to get the lock on the copy_object.
2325 		 */
2326 		if (!vm_object_lock_try(copy_object)) {
2327 			vm_object_unlock(object);
2328 			try_failed_count++;
2329 
2330 			mutex_pause(try_failed_count);  /* wait a bit */
2331 			vm_object_lock(object);
2332 
2333 			continue;
2334 		}
2335 		try_failed_count = 0;
2336 
2337 		/*
2338 		 * Make another reference to the copy-object,
2339 		 * to keep it from disappearing during the
2340 		 * copy.
2341 		 */
2342 		vm_object_reference_locked(copy_object);
2343 
2344 		/*
2345 		 * Does the page exist in the copy?
2346 		 */
2347 		copy_offset = first_offset - copy_object->vo_shadow_offset;
2348 		copy_offset = vm_object_trunc_page(copy_offset);
2349 
2350 		if (copy_object->vo_size <= copy_offset) {
2351 			/*
2352 			 * Copy object doesn't cover this page -- do nothing.
2353 			 */
2354 			;
2355 		} else if ((copy_m = vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) {
2356 			/*
2357 			 * Page currently exists in the copy object
2358 			 */
2359 			if (copy_m->vmp_busy) {
2360 				/*
2361 				 * If the page is being brought
2362 				 * in, wait for it and then retry.
2363 				 */
2364 				vm_fault_page_release_page(m, &clear_absent_on_error);
2365 
2366 				/*
2367 				 * take an extra ref so object won't die
2368 				 */
2369 				vm_object_reference_locked(copy_object);
2370 				vm_object_unlock(copy_object);
2371 				vm_fault_cleanup(object, first_m);
2372 
2373 				vm_object_lock(copy_object);
2374 				vm_object_lock_assert_exclusive(copy_object);
2375 				os_ref_release_live_locked_raw(&copy_object->ref_count,
2376 				    &vm_object_refgrp);
2377 				copy_m = vm_page_lookup(copy_object, copy_offset);
2378 
2379 				if (copy_m != VM_PAGE_NULL && copy_m->vmp_busy) {
2380 					wait_result = vm_page_sleep(copy_object, copy_m, interruptible, LCK_SLEEP_UNLOCK);
2381 					vm_object_deallocate(copy_object);
2382 
2383 					goto backoff;
2384 				} else {
2385 					vm_object_unlock(copy_object);
2386 					vm_object_deallocate(copy_object);
2387 					thread_interrupt_level(interruptible_state);
2388 
2389 					return VM_FAULT_RETRY;
2390 				}
2391 			}
2392 		} else if (!PAGED_OUT(copy_object, copy_offset)) {
2393 			/*
2394 			 * If PAGED_OUT is TRUE, then the page used to exist
2395 			 * in the copy-object, and has already been paged out.
2396 			 * We don't need to repeat this. If PAGED_OUT is
2397 			 * FALSE, then either we don't know (!pager_created,
2398 			 * for example) or it hasn't been paged out.
2399 			 * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT)
2400 			 * We must copy the page to the copy object.
2401 			 *
2402 			 * Allocate a page for the copy
2403 			 */
2404 			copy_m = vm_page_alloc(copy_object, copy_offset);
2405 
2406 			if (copy_m == VM_PAGE_NULL) {
2407 				vm_fault_page_release_page(m, &clear_absent_on_error);
2408 
2409 				vm_object_lock_assert_exclusive(copy_object);
2410 				os_ref_release_live_locked_raw(&copy_object->ref_count,
2411 				    &vm_object_refgrp);
2412 
2413 				vm_object_unlock(copy_object);
2414 				vm_fault_cleanup(object, first_m);
2415 				thread_interrupt_level(interruptible_state);
2416 
2417 				return VM_FAULT_MEMORY_SHORTAGE;
2418 			}
2419 			/*
2420 			 * Must copy page into copy-object.
2421 			 */
2422 			vm_page_copy(m, copy_m);
2423 
2424 			/*
2425 			 * If the old page was in use by any users
2426 			 * of the copy-object, it must be removed
2427 			 * from all pmaps.  (We can't know which
2428 			 * pmaps use it.)
2429 			 */
2430 			if (m->vmp_pmapped) {
2431 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2432 			}
2433 
2434 			if (m->vmp_clustered) {
2435 				VM_PAGE_COUNT_AS_PAGEIN(m);
2436 				VM_PAGE_CONSUME_CLUSTERED(m);
2437 			}
2438 			/*
2439 			 * If there's a pager, then immediately
2440 			 * page out this page, using the "initialize"
2441 			 * option.  Else, we use the copy.
2442 			 */
2443 			if ((!copy_object->pager_ready)
2444 			    || vm_object_compressor_pager_state_get(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT
2445 			    ) {
2446 				vm_page_lockspin_queues();
2447 				assert(!m->vmp_cleaning);
2448 				vm_page_activate(copy_m);
2449 				vm_page_unlock_queues();
2450 
2451 				SET_PAGE_DIRTY(copy_m, TRUE);
2452 				vm_page_wakeup_done(copy_object, copy_m);
2453 			} else {
2454 				assert(copy_m->vmp_busy == TRUE);
2455 				assert(!m->vmp_cleaning);
2456 
2457 				/*
2458 				 * dirty is protected by the object lock
2459 				 */
2460 				SET_PAGE_DIRTY(copy_m, TRUE);
2461 
2462 				/*
2463 				 * The page is already ready for pageout:
2464 				 * not on pageout queues and busy.
2465 				 * Unlock everything except the
2466 				 * copy_object itself.
2467 				 */
2468 				vm_object_unlock(object);
2469 
2470 				/*
2471 				 * Write the page to the copy-object,
2472 				 * flushing it from the kernel.
2473 				 */
2474 				vm_pageout_initialize_page(copy_m);
2475 
2476 				/*
2477 				 * Since the pageout may have
2478 				 * temporarily dropped the
2479 				 * copy_object's lock, we
2480 				 * check whether we'll have
2481 				 * to deallocate the hard way.
2482 				 */
2483 				if ((copy_object->shadow != object) ||
2484 				    (os_ref_get_count_raw(&copy_object->ref_count) == 1)) {
2485 					vm_object_unlock(copy_object);
2486 					vm_object_deallocate(copy_object);
2487 					vm_object_lock(object);
2488 
2489 					continue;
2490 				}
2491 				/*
2492 				 * Pick back up the old object's
2493 				 * lock.  [It is safe to do so,
2494 				 * since it must be deeper in the
2495 				 * object tree.]
2496 				 */
2497 				vm_object_lock(object);
2498 			}
2499 
2500 			/*
2501 			 * Because we're pushing a page upward
2502 			 * in the object tree, we must restart
2503 			 * any faults that are waiting here.
2504 			 * [Note that this is an expansion of
2505 			 * vm_page_wakeup() that uses the THREAD_RESTART
2506 			 * wait result].  Can't turn off the page's
2507 			 * busy bit because we're not done with it.
2508 			 */
2509 			if (m->vmp_wanted) {
2510 				m->vmp_wanted = FALSE;
2511 				thread_wakeup_with_result((event_t) m, THREAD_RESTART);
2512 			}
2513 		}
2514 		/*
2515 		 * The reference count on copy_object must be
2516 		 * at least 2: one for our extra reference,
2517 		 * and at least one from the outside world
2518 		 * (we checked that when we last locked
2519 		 * copy_object).
2520 		 */
2521 		vm_object_lock_assert_exclusive(copy_object);
2522 		os_ref_release_live_locked_raw(&copy_object->ref_count,
2523 		    &vm_object_refgrp);
2524 
2525 		vm_object_unlock(copy_object);
2526 
2527 		break;
2528 	}
2529 
2530 done:
2531 	*result_page = m;
2532 	*top_page = first_m;
2533 
2534 	if (m != VM_PAGE_NULL) {
2535 		assert(VM_PAGE_OBJECT(m) == object);
2536 
2537 		retval = VM_FAULT_SUCCESS;
2538 
2539 		if (my_fault == DBG_PAGEIN_FAULT) {
2540 			VM_PAGE_COUNT_AS_PAGEIN(m);
2541 
2542 			if (object->internal) {
2543 				my_fault = DBG_PAGEIND_FAULT;
2544 			} else {
2545 				my_fault = DBG_PAGEINV_FAULT;
2546 			}
2547 
2548 			/*
2549 			 * evaluate access pattern and update state
2550 			 * vm_fault_deactivate_behind depends on the
2551 			 * state being up to date
2552 			 */
2553 			vm_fault_is_sequential(object, offset, fault_info->behavior);
2554 			vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2555 		} else if (type_of_fault == NULL && my_fault == DBG_CACHE_HIT_FAULT) {
2556 			/*
2557 			 * we weren't called from vm_fault, so handle the
2558 			 * accounting here for hits in the cache
2559 			 */
2560 			if (m->vmp_clustered) {
2561 				VM_PAGE_COUNT_AS_PAGEIN(m);
2562 				VM_PAGE_CONSUME_CLUSTERED(m);
2563 			}
2564 			vm_fault_is_sequential(object, offset, fault_info->behavior);
2565 			vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2566 		} else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) {
2567 			VM_STAT_DECOMPRESSIONS();
2568 		}
2569 		if (type_of_fault) {
2570 			*type_of_fault = my_fault;
2571 		}
2572 	} else {
2573 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_SUCCESS_NO_PAGE), 0 /* arg */);
2574 		retval = VM_FAULT_SUCCESS_NO_VM_PAGE;
2575 		assert(first_m == VM_PAGE_NULL);
2576 		assert(object == first_object);
2577 	}
2578 
2579 	thread_interrupt_level(interruptible_state);
2580 
2581 #if TRACEFAULTPAGE
2582 	dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0);       /* (TEST/DEBUG) */
2583 #endif
2584 	return retval;
2585 
2586 backoff:
2587 	thread_interrupt_level(interruptible_state);
2588 
2589 	if (wait_result == THREAD_INTERRUPTED) {
2590 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
2591 		return VM_FAULT_INTERRUPTED;
2592 	}
2593 	return VM_FAULT_RETRY;
2594 }
2595 
2596 #if MACH_ASSERT && (XNU_PLATFORM_WatchOS || __x86_64__)
2597 #define PANIC_ON_CS_KILLED_DEFAULT true
2598 #else
2599 #define PANIC_ON_CS_KILLED_DEFAULT false
2600 #endif
2601 static TUNABLE(bool, panic_on_cs_killed, "panic_on_cs_killed",
2602     PANIC_ON_CS_KILLED_DEFAULT);
2603 
2604 extern int proc_selfpid(void);
2605 extern char *proc_name_address(struct proc *p);
2606 extern const char *proc_best_name(struct proc *);
2607 unsigned long cs_enter_tainted_rejected = 0;
2608 unsigned long cs_enter_tainted_accepted = 0;
2609 
2610 /*
2611  * CODE SIGNING:
2612  * When soft faulting a page, we have to validate the page if:
2613  * 1. the page is being mapped in user space
2614  * 2. the page hasn't already been found to be "tainted"
2615  * 3. the page belongs to a code-signed object
2616  * 4. the page has not been validated yet or has been mapped for write.
2617  */
2618 static bool
vm_fault_cs_need_validation(pmap_t pmap,vm_page_t page,vm_object_t page_obj,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2619 vm_fault_cs_need_validation(
2620 	pmap_t pmap,
2621 	vm_page_t page,
2622 	vm_object_t page_obj,
2623 	vm_map_size_t fault_page_size,
2624 	vm_map_offset_t fault_phys_offset)
2625 {
2626 	if (pmap == kernel_pmap) {
2627 		/* 1 - not user space */
2628 		return false;
2629 	}
2630 	if (!page_obj->code_signed) {
2631 		/* 3 - page does not belong to a code-signed object */
2632 		return false;
2633 	}
2634 	if (fault_page_size == PAGE_SIZE) {
2635 		/* looking at the whole page */
2636 		assertf(fault_phys_offset == 0,
2637 		    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
2638 		    (uint64_t)fault_page_size,
2639 		    (uint64_t)fault_phys_offset);
2640 		if (page->vmp_cs_tainted == VMP_CS_ALL_TRUE) {
2641 			/* 2 - page is all tainted */
2642 			return false;
2643 		}
2644 		if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
2645 		    !page->vmp_wpmapped) {
2646 			/* 4 - already fully validated and never mapped writable */
2647 			return false;
2648 		}
2649 	} else {
2650 		/* looking at a specific sub-page */
2651 		if (VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
2652 			/* 2 - sub-page was already marked as tainted */
2653 			return false;
2654 		}
2655 		if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) &&
2656 		    !page->vmp_wpmapped) {
2657 			/* 4 - already validated and never mapped writable */
2658 			return false;
2659 		}
2660 	}
2661 	/* page needs to be validated */
2662 	return true;
2663 }
2664 
2665 
2666 static bool
vm_fault_cs_page_immutable(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot __unused)2667 vm_fault_cs_page_immutable(
2668 	vm_page_t m,
2669 	vm_map_size_t fault_page_size,
2670 	vm_map_offset_t fault_phys_offset,
2671 	vm_prot_t prot __unused)
2672 {
2673 	if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)
2674 	    /*&& ((prot) & VM_PROT_EXECUTE)*/) {
2675 		return true;
2676 	}
2677 	return false;
2678 }
2679 
2680 static bool
vm_fault_cs_page_nx(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2681 vm_fault_cs_page_nx(
2682 	vm_page_t m,
2683 	vm_map_size_t fault_page_size,
2684 	vm_map_offset_t fault_phys_offset)
2685 {
2686 	return VMP_CS_NX(m, fault_page_size, fault_phys_offset);
2687 }
2688 
2689 /*
2690  * Check if the page being entered into the pmap violates code signing.
2691  */
2692 static kern_return_t
vm_fault_cs_check_violation(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool map_is_switched,bool map_is_switch_protected,bool * cs_violation)2693 vm_fault_cs_check_violation(
2694 	bool cs_bypass,
2695 	vm_object_t object,
2696 	vm_page_t m,
2697 	pmap_t pmap,
2698 	vm_prot_t prot,
2699 	vm_prot_t caller_prot,
2700 	vm_map_size_t fault_page_size,
2701 	vm_map_offset_t fault_phys_offset,
2702 	vm_object_fault_info_t fault_info,
2703 	bool map_is_switched,
2704 	bool map_is_switch_protected,
2705 	bool *cs_violation)
2706 {
2707 #if !CODE_SIGNING_MONITOR
2708 #pragma unused(caller_prot)
2709 #pragma unused(fault_info)
2710 #endif /* !CODE_SIGNING_MONITOR */
2711 
2712 	int             cs_enforcement_enabled;
2713 	if (!cs_bypass &&
2714 	    vm_fault_cs_need_validation(pmap, m, object,
2715 	    fault_page_size, fault_phys_offset)) {
2716 		vm_object_lock_assert_exclusive(object);
2717 
2718 		if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)) {
2719 			vm_cs_revalidates++;
2720 		}
2721 
2722 		/* VM map is locked, so 1 ref will remain on VM object -
2723 		 * so no harm if vm_page_validate_cs drops the object lock */
2724 
2725 #if CODE_SIGNING_MONITOR
2726 		if (fault_info->csm_associated &&
2727 		    csm_enabled() &&
2728 		    !VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
2729 		    !VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) &&
2730 		    !VMP_CS_NX(m, fault_page_size, fault_phys_offset) &&
2731 		    (prot & VM_PROT_EXECUTE) &&
2732 		    (caller_prot & VM_PROT_EXECUTE)) {
2733 			/*
2734 			 * When we have a code signing monitor, the monitor will evaluate the code signature
2735 			 * for any executable page mapping. No need for the VM to also validate the page.
2736 			 * In the code signing monitor we trust :)
2737 			 */
2738 			vm_cs_defer_to_csm++;
2739 		} else {
2740 			vm_cs_defer_to_csm_not++;
2741 			vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
2742 		}
2743 #else /* CODE_SIGNING_MONITOR */
2744 		vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
2745 #endif /* CODE_SIGNING_MONITOR */
2746 	}
2747 
2748 	/* If the map is switched, and is switch-protected, we must protect
2749 	 * some pages from being write-faulted: immutable pages because by
2750 	 * definition they may not be written, and executable pages because that
2751 	 * would provide a way to inject unsigned code.
2752 	 * If the page is immutable, we can simply return. However, we can't
2753 	 * immediately determine whether a page is executable anywhere. But,
2754 	 * we can disconnect it everywhere and remove the executable protection
2755 	 * from the current map. We do that below right before we do the
2756 	 * PMAP_ENTER.
2757 	 */
2758 	if (pmap == kernel_pmap) {
2759 		/* kernel fault: cs_enforcement does not apply */
2760 		cs_enforcement_enabled = 0;
2761 	} else {
2762 		cs_enforcement_enabled = pmap_get_vm_map_cs_enforced(pmap);
2763 	}
2764 
2765 	if (cs_enforcement_enabled && map_is_switched &&
2766 	    map_is_switch_protected &&
2767 	    vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2768 	    (prot & VM_PROT_WRITE)) {
2769 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_IMMUTABLE_PAGE_WRITE), 0 /* arg */);
2770 		return KERN_CODESIGN_ERROR;
2771 	}
2772 
2773 	if (cs_enforcement_enabled &&
2774 	    vm_fault_cs_page_nx(m, fault_page_size, fault_phys_offset) &&
2775 	    (prot & VM_PROT_EXECUTE)) {
2776 		if (cs_debug) {
2777 			printf("page marked to be NX, not letting it be mapped EXEC\n");
2778 		}
2779 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_NX_PAGE_EXEC_MAPPING), 0 /* arg */);
2780 		return KERN_CODESIGN_ERROR;
2781 	}
2782 
2783 	/* A page could be tainted, or pose a risk of being tainted later.
2784 	 * Check whether the receiving process wants it, and make it feel
2785 	 * the consequences (that hapens in cs_invalid_page()).
2786 	 * For CS Enforcement, two other conditions will
2787 	 * cause that page to be tainted as well:
2788 	 * - pmapping an unsigned page executable - this means unsigned code;
2789 	 * - writeable mapping of a validated page - the content of that page
2790 	 *   can be changed without the kernel noticing, therefore unsigned
2791 	 *   code can be created
2792 	 */
2793 	if (cs_bypass) {
2794 		/* code-signing is bypassed */
2795 		*cs_violation = FALSE;
2796 	} else if (VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
2797 		/* tainted page */
2798 		*cs_violation = TRUE;
2799 	} else if (!cs_enforcement_enabled) {
2800 		/* no further code-signing enforcement */
2801 		*cs_violation = FALSE;
2802 	} else if (vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2803 	    ((prot & VM_PROT_WRITE) ||
2804 	    m->vmp_wpmapped)) {
2805 		/*
2806 		 * The page should be immutable, but is in danger of being
2807 		 * modified.
2808 		 * This is the case where we want policy from the code
2809 		 * directory - is the page immutable or not? For now we have
2810 		 * to assume that code pages will be immutable, data pages not.
2811 		 * We'll assume a page is a code page if it has a code directory
2812 		 * and we fault for execution.
2813 		 * That is good enough since if we faulted the code page for
2814 		 * writing in another map before, it is wpmapped; if we fault
2815 		 * it for writing in this map later it will also be faulted for
2816 		 * executing at the same time; and if we fault for writing in
2817 		 * another map later, we will disconnect it from this pmap so
2818 		 * we'll notice the change.
2819 		 */
2820 		*cs_violation = TRUE;
2821 	} else if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
2822 	    (prot & VM_PROT_EXECUTE)
2823 #if CODE_SIGNING_MONITOR
2824 	    /*
2825 	     * Executable pages will be validated by the code signing monitor. If the
2826 	     * code signing monitor is turned off, then this is a code-signing violation.
2827 	     */
2828 	    && !csm_enabled()
2829 #endif /* CODE_SIGNING_MONITOR */
2830 	    ) {
2831 		*cs_violation = TRUE;
2832 	} else {
2833 		*cs_violation = FALSE;
2834 	}
2835 	return KERN_SUCCESS;
2836 }
2837 
2838 /*
2839  * Handles a code signing violation by either rejecting the page or forcing a disconnect.
2840  * @param must_disconnect This value will be set to true if the caller must disconnect
2841  * this page.
2842  * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
2843  */
2844 static kern_return_t
vm_fault_cs_handle_violation(vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,bool map_is_switched,bool map_is_switch_protected,bool * must_disconnect)2845 vm_fault_cs_handle_violation(
2846 	vm_object_t object,
2847 	vm_page_t m,
2848 	pmap_t pmap,
2849 	vm_prot_t prot,
2850 	vm_map_offset_t vaddr,
2851 	vm_map_size_t fault_page_size,
2852 	vm_map_offset_t fault_phys_offset,
2853 	bool map_is_switched,
2854 	bool map_is_switch_protected,
2855 	bool *must_disconnect)
2856 {
2857 #if !MACH_ASSERT
2858 #pragma unused(pmap)
2859 #pragma unused(map_is_switch_protected)
2860 #endif /* !MACH_ASSERT */
2861 	/*
2862 	 * We will have a tainted page. Have to handle the special case
2863 	 * of a switched map now. If the map is not switched, standard
2864 	 * procedure applies - call cs_invalid_page().
2865 	 * If the map is switched, the real owner is invalid already.
2866 	 * There is no point in invalidating the switching process since
2867 	 * it will not be executing from the map. So we don't call
2868 	 * cs_invalid_page() in that case.
2869 	 */
2870 	boolean_t reject_page, cs_killed;
2871 	kern_return_t kr;
2872 	if (map_is_switched) {
2873 		assert(pmap == vm_map_pmap(current_thread()->map));
2874 		assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
2875 		reject_page = FALSE;
2876 	} else {
2877 		if (cs_debug > 5) {
2878 			printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n",
2879 			    object->code_signed ? "yes" : "no",
2880 			    VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2881 			    VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2882 			    m->vmp_wpmapped ? "yes" : "no",
2883 			    (int)prot);
2884 		}
2885 		reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed);
2886 	}
2887 
2888 	if (reject_page) {
2889 		/* reject the invalid page: abort the page fault */
2890 		int                     pid;
2891 		const char              *procname;
2892 		task_t                  task;
2893 		vm_object_t             file_object, shadow;
2894 		vm_object_offset_t      file_offset;
2895 		char                    *pathname, *filename;
2896 		vm_size_t               pathname_len, filename_len;
2897 		boolean_t               truncated_path;
2898 #define __PATH_MAX 1024
2899 		struct timespec         mtime, cs_mtime;
2900 		int                     shadow_depth;
2901 		os_reason_t             codesigning_exit_reason = OS_REASON_NULL;
2902 
2903 		kr = KERN_CODESIGN_ERROR;
2904 		cs_enter_tainted_rejected++;
2905 
2906 		/* get process name and pid */
2907 		procname = "?";
2908 		task = current_task();
2909 		pid = proc_selfpid();
2910 		if (get_bsdtask_info(task) != NULL) {
2911 			procname = proc_name_address(get_bsdtask_info(task));
2912 		}
2913 
2914 		/* get file's VM object */
2915 		file_object = object;
2916 		file_offset = m->vmp_offset;
2917 		for (shadow = file_object->shadow,
2918 		    shadow_depth = 0;
2919 		    shadow != VM_OBJECT_NULL;
2920 		    shadow = file_object->shadow,
2921 		    shadow_depth++) {
2922 			vm_object_lock_shared(shadow);
2923 			if (file_object != object) {
2924 				vm_object_unlock(file_object);
2925 			}
2926 			file_offset += file_object->vo_shadow_offset;
2927 			file_object = shadow;
2928 		}
2929 
2930 		mtime.tv_sec = 0;
2931 		mtime.tv_nsec = 0;
2932 		cs_mtime.tv_sec = 0;
2933 		cs_mtime.tv_nsec = 0;
2934 
2935 		/* get file's pathname and/or filename */
2936 		pathname = NULL;
2937 		filename = NULL;
2938 		pathname_len = 0;
2939 		filename_len = 0;
2940 		truncated_path = FALSE;
2941 		/* no pager -> no file -> no pathname, use "<nil>" in that case */
2942 		if (file_object->pager != NULL) {
2943 			pathname = kalloc_data(__PATH_MAX * 2, Z_WAITOK);
2944 			if (pathname) {
2945 				pathname[0] = '\0';
2946 				pathname_len = __PATH_MAX;
2947 				filename = pathname + pathname_len;
2948 				filename_len = __PATH_MAX;
2949 
2950 				if (vnode_pager_get_object_name(file_object->pager,
2951 				    pathname,
2952 				    pathname_len,
2953 				    filename,
2954 				    filename_len,
2955 				    &truncated_path) == KERN_SUCCESS) {
2956 					/* safety first... */
2957 					pathname[__PATH_MAX - 1] = '\0';
2958 					filename[__PATH_MAX - 1] = '\0';
2959 
2960 					vnode_pager_get_object_mtime(file_object->pager,
2961 					    &mtime,
2962 					    &cs_mtime);
2963 				} else {
2964 					kfree_data(pathname, __PATH_MAX * 2);
2965 					pathname = NULL;
2966 					filename = NULL;
2967 					pathname_len = 0;
2968 					filename_len = 0;
2969 					truncated_path = FALSE;
2970 				}
2971 			}
2972 		}
2973 		printf("CODE SIGNING: process %d[%s]: "
2974 		    "rejecting invalid page at address 0x%llx "
2975 		    "from offset 0x%llx in file \"%s%s%s\" "
2976 		    "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
2977 		    "(signed:%d validated:%d tainted:%d nx:%d "
2978 		    "wpmapped:%d dirty:%d depth:%d)\n",
2979 		    pid, procname, (addr64_t) vaddr,
2980 		    file_offset,
2981 		    (pathname ? pathname : "<nil>"),
2982 		    (truncated_path ? "/.../" : ""),
2983 		    (truncated_path ? filename : ""),
2984 		    cs_mtime.tv_sec, cs_mtime.tv_nsec,
2985 		    ((cs_mtime.tv_sec == mtime.tv_sec &&
2986 		    cs_mtime.tv_nsec == mtime.tv_nsec)
2987 		    ? "=="
2988 		    : "!="),
2989 		    mtime.tv_sec, mtime.tv_nsec,
2990 		    object->code_signed,
2991 		    VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
2992 		    VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
2993 		    VMP_CS_NX(m, fault_page_size, fault_phys_offset),
2994 		    m->vmp_wpmapped,
2995 		    m->vmp_dirty,
2996 		    shadow_depth);
2997 
2998 		/*
2999 		 * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page
3000 		 * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the
3001 		 * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler
3002 		 * will deal with the segmentation fault.
3003 		 */
3004 		if (cs_killed) {
3005 			KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
3006 			    pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
3007 
3008 			codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
3009 			if (codesigning_exit_reason == NULL) {
3010 				printf("vm_fault_enter: failed to allocate codesigning exit reason\n");
3011 			} else {
3012 				mach_vm_address_t data_addr = 0;
3013 				struct codesigning_exit_reason_info *ceri = NULL;
3014 				uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri));
3015 
3016 				if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) {
3017 					printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n");
3018 				} else {
3019 					if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor,
3020 					    EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) {
3021 						ceri = (struct codesigning_exit_reason_info *)data_addr;
3022 						static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname));
3023 
3024 						ceri->ceri_virt_addr = vaddr;
3025 						ceri->ceri_file_offset = file_offset;
3026 						if (pathname) {
3027 							strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname));
3028 						} else {
3029 							ceri->ceri_pathname[0] = '\0';
3030 						}
3031 						if (filename) {
3032 							strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename));
3033 						} else {
3034 							ceri->ceri_filename[0] = '\0';
3035 						}
3036 						ceri->ceri_path_truncated = (truncated_path ? 1 : 0);
3037 						ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec;
3038 						ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec;
3039 						ceri->ceri_page_modtime_secs = mtime.tv_sec;
3040 						ceri->ceri_page_modtime_nsecs = mtime.tv_nsec;
3041 						ceri->ceri_object_codesigned = (object->code_signed);
3042 						ceri->ceri_page_codesig_validated = VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset);
3043 						ceri->ceri_page_codesig_tainted = VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset);
3044 						ceri->ceri_page_codesig_nx = VMP_CS_NX(m, fault_page_size, fault_phys_offset);
3045 						ceri->ceri_page_wpmapped = (m->vmp_wpmapped);
3046 						ceri->ceri_page_slid = 0;
3047 						ceri->ceri_page_dirty = (m->vmp_dirty);
3048 						ceri->ceri_page_shadow_depth = shadow_depth;
3049 					} else {
3050 #if DEBUG || DEVELOPMENT
3051 						panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason");
3052 #else
3053 						printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n");
3054 #endif /* DEBUG || DEVELOPMENT */
3055 						/* Free the buffer */
3056 						os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0);
3057 					}
3058 				}
3059 			}
3060 
3061 			set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE);
3062 		}
3063 		if (panic_on_cs_killed &&
3064 		    object->object_is_shared_cache) {
3065 			char *tainted_contents;
3066 			vm_map_offset_t src_vaddr;
3067 			src_vaddr = (vm_map_offset_t) phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m) << PAGE_SHIFT);
3068 			tainted_contents = kalloc_data(PAGE_SIZE, Z_WAITOK);
3069 			bcopy((const char *)src_vaddr, tainted_contents, PAGE_SIZE);
3070 			printf("CODE SIGNING: tainted page %p phys 0x%x phystokv 0x%llx copied to %p\n", m, VM_PAGE_GET_PHYS_PAGE(m), (uint64_t)src_vaddr, tainted_contents);
3071 			panic("CODE SIGNING: process %d[%s]: "
3072 			    "rejecting invalid page (phys#0x%x) at address 0x%llx "
3073 			    "from offset 0x%llx in file \"%s%s%s\" "
3074 			    "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
3075 			    "(signed:%d validated:%d tainted:%d nx:%d"
3076 			    "wpmapped:%d dirty:%d depth:%d)\n",
3077 			    pid, procname,
3078 			    VM_PAGE_GET_PHYS_PAGE(m),
3079 			    (addr64_t) vaddr,
3080 			    file_offset,
3081 			    (pathname ? pathname : "<nil>"),
3082 			    (truncated_path ? "/.../" : ""),
3083 			    (truncated_path ? filename : ""),
3084 			    cs_mtime.tv_sec, cs_mtime.tv_nsec,
3085 			    ((cs_mtime.tv_sec == mtime.tv_sec &&
3086 			    cs_mtime.tv_nsec == mtime.tv_nsec)
3087 			    ? "=="
3088 			    : "!="),
3089 			    mtime.tv_sec, mtime.tv_nsec,
3090 			    object->code_signed,
3091 			    VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
3092 			    VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
3093 			    VMP_CS_NX(m, fault_page_size, fault_phys_offset),
3094 			    m->vmp_wpmapped,
3095 			    m->vmp_dirty,
3096 			    shadow_depth);
3097 		}
3098 
3099 		if (file_object != object) {
3100 			vm_object_unlock(file_object);
3101 		}
3102 		if (pathname_len != 0) {
3103 			kfree_data(pathname, __PATH_MAX * 2);
3104 			pathname = NULL;
3105 			filename = NULL;
3106 		}
3107 	} else {
3108 		/* proceed with the invalid page */
3109 		kr = KERN_SUCCESS;
3110 		if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
3111 		    !object->code_signed) {
3112 			/*
3113 			 * This page has not been (fully) validated but
3114 			 * does not belong to a code-signed object
3115 			 * so it should not be forcefully considered
3116 			 * as tainted.
3117 			 * We're just concerned about it here because
3118 			 * we've been asked to "execute" it but that
3119 			 * does not mean that it should cause other
3120 			 * accesses to fail.
3121 			 * This happens when a debugger sets a
3122 			 * breakpoint and we then execute code in
3123 			 * that page.  Marking the page as "tainted"
3124 			 * would cause any inspection tool ("leaks",
3125 			 * "vmmap", "CrashReporter", ...) to get killed
3126 			 * due to code-signing violation on that page,
3127 			 * even though they're just reading it and not
3128 			 * executing from it.
3129 			 */
3130 		} else {
3131 			/*
3132 			 * Page might have been tainted before or not;
3133 			 * now it definitively is. If the page wasn't
3134 			 * tainted, we must disconnect it from all
3135 			 * pmaps later, to force existing mappings
3136 			 * through that code path for re-consideration
3137 			 * of the validity of that page.
3138 			 */
3139 			if (!VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
3140 				*must_disconnect = TRUE;
3141 				VMP_CS_SET_TAINTED(m, fault_page_size, fault_phys_offset, TRUE);
3142 			}
3143 		}
3144 		cs_enter_tainted_accepted++;
3145 	}
3146 	if (kr != KERN_SUCCESS) {
3147 		if (cs_debug) {
3148 			printf("CODESIGNING: vm_fault_enter(0x%llx): "
3149 			    "*** INVALID PAGE ***\n",
3150 			    (long long)vaddr);
3151 		}
3152 #if !SECURE_KERNEL
3153 		if (cs_enforcement_panic) {
3154 			panic("CODESIGNING: panicking on invalid page");
3155 		}
3156 #endif
3157 	}
3158 	return kr;
3159 }
3160 
3161 /*
3162  * Check that the code signature is valid for the given page being inserted into
3163  * the pmap.
3164  *
3165  * @param must_disconnect This value will be set to true if the caller must disconnect
3166  * this page.
3167  * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
3168  */
3169 static kern_return_t
vm_fault_validate_cs(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool * must_disconnect)3170 vm_fault_validate_cs(
3171 	bool cs_bypass,
3172 	vm_object_t object,
3173 	vm_page_t m,
3174 	pmap_t pmap,
3175 	vm_map_offset_t vaddr,
3176 	vm_prot_t prot,
3177 	vm_prot_t caller_prot,
3178 	vm_map_size_t fault_page_size,
3179 	vm_map_offset_t fault_phys_offset,
3180 	vm_object_fault_info_t fault_info,
3181 	bool *must_disconnect)
3182 {
3183 	bool map_is_switched, map_is_switch_protected, cs_violation;
3184 	kern_return_t kr;
3185 	/* Validate code signature if necessary. */
3186 	map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) &&
3187 	    (pmap == vm_map_pmap(current_thread()->map)));
3188 	map_is_switch_protected = current_thread()->map->switch_protect;
3189 	kr = vm_fault_cs_check_violation(cs_bypass, object, m, pmap,
3190 	    prot, caller_prot, fault_page_size, fault_phys_offset, fault_info,
3191 	    map_is_switched, map_is_switch_protected, &cs_violation);
3192 	if (kr != KERN_SUCCESS) {
3193 		return kr;
3194 	}
3195 	if (cs_violation) {
3196 		kr = vm_fault_cs_handle_violation(object, m, pmap, prot, vaddr,
3197 		    fault_page_size, fault_phys_offset,
3198 		    map_is_switched, map_is_switch_protected, must_disconnect);
3199 	}
3200 	return kr;
3201 }
3202 
3203 /*
3204  * Enqueue the page on the appropriate paging queue.
3205  */
3206 static void
vm_fault_enqueue_page(vm_object_t object,vm_page_t m,bool wired,bool change_wiring,vm_tag_t wire_tag,bool no_cache,int * type_of_fault,kern_return_t kr)3207 vm_fault_enqueue_page(
3208 	vm_object_t object,
3209 	vm_page_t m,
3210 	bool wired,
3211 	bool change_wiring,
3212 	vm_tag_t wire_tag,
3213 	bool no_cache,
3214 	int *type_of_fault,
3215 	kern_return_t kr)
3216 {
3217 	assert((m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object);
3218 	boolean_t       page_queues_locked = FALSE;
3219 	boolean_t       previously_pmapped = m->vmp_pmapped;
3220 #define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED()   \
3221 MACRO_BEGIN                                     \
3222 	if (! page_queues_locked) {             \
3223 	        page_queues_locked = TRUE;      \
3224 	        vm_page_lockspin_queues();      \
3225 	}                                       \
3226 MACRO_END
3227 #define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED()     \
3228 MACRO_BEGIN                                     \
3229 	if (page_queues_locked) {               \
3230 	        page_queues_locked = FALSE;     \
3231 	        vm_page_unlock_queues();        \
3232 	}                                       \
3233 MACRO_END
3234 
3235 	vm_page_update_special_state(m);
3236 	if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
3237 		/*
3238 		 * Compressor pages are neither wired
3239 		 * nor pageable and should never change.
3240 		 */
3241 		assert(object == compressor_object);
3242 	} else if (change_wiring) {
3243 		__VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3244 
3245 		if (wired) {
3246 			if (kr == KERN_SUCCESS) {
3247 				vm_page_wire(m, wire_tag, TRUE);
3248 			}
3249 		} else {
3250 			vm_page_unwire(m, TRUE);
3251 		}
3252 		/* we keep the page queues lock, if we need it later */
3253 	} else {
3254 		if (object->internal == TRUE) {
3255 			/*
3256 			 * don't allow anonymous pages on
3257 			 * the speculative queues
3258 			 */
3259 			no_cache = FALSE;
3260 		}
3261 		if (kr != KERN_SUCCESS) {
3262 			__VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3263 			vm_page_deactivate(m);
3264 			/* we keep the page queues lock, if we need it later */
3265 		} else if (((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
3266 		    (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3267 		    (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
3268 		    ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) &&
3269 		    !VM_PAGE_WIRED(m)) {
3270 			if (vm_page_local_q &&
3271 			    (*type_of_fault == DBG_COW_FAULT ||
3272 			    *type_of_fault == DBG_ZERO_FILL_FAULT)) {
3273 				struct vpl      *lq;
3274 				uint32_t        lid;
3275 
3276 				assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
3277 
3278 				__VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3279 				vm_object_lock_assert_exclusive(object);
3280 
3281 				/*
3282 				 * we got a local queue to stuff this
3283 				 * new page on...
3284 				 * its safe to manipulate local and
3285 				 * local_id at this point since we're
3286 				 * behind an exclusive object lock and
3287 				 * the page is not on any global queue.
3288 				 *
3289 				 * we'll use the current cpu number to
3290 				 * select the queue note that we don't
3291 				 * need to disable preemption... we're
3292 				 * going to be behind the local queue's
3293 				 * lock to do the real work
3294 				 */
3295 				lid = cpu_number();
3296 
3297 				lq = zpercpu_get_cpu(vm_page_local_q, lid);
3298 
3299 				VPL_LOCK(&lq->vpl_lock);
3300 
3301 				vm_page_check_pageable_safe(m);
3302 				vm_page_queue_enter(&lq->vpl_queue, m, vmp_pageq);
3303 				m->vmp_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q;
3304 				m->vmp_local_id = lid;
3305 				lq->vpl_count++;
3306 
3307 				if (object->internal) {
3308 					lq->vpl_internal_count++;
3309 				} else {
3310 					lq->vpl_external_count++;
3311 				}
3312 
3313 				VPL_UNLOCK(&lq->vpl_lock);
3314 
3315 				if (lq->vpl_count > vm_page_local_q_soft_limit) {
3316 					/*
3317 					 * we're beyond the soft limit
3318 					 * for the local queue
3319 					 * vm_page_reactivate_local will
3320 					 * 'try' to take the global page
3321 					 * queue lock... if it can't
3322 					 * that's ok... we'll let the
3323 					 * queue continue to grow up
3324 					 * to the hard limit... at that
3325 					 * point we'll wait for the
3326 					 * lock... once we've got the
3327 					 * lock, we'll transfer all of
3328 					 * the pages from the local
3329 					 * queue to the global active
3330 					 * queue
3331 					 */
3332 					vm_page_reactivate_local(lid, FALSE, FALSE);
3333 				}
3334 			} else {
3335 				__VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3336 
3337 				/*
3338 				 * test again now that we hold the
3339 				 * page queue lock
3340 				 */
3341 				if (!VM_PAGE_WIRED(m)) {
3342 					if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3343 						vm_page_queues_remove(m, FALSE);
3344 
3345 						VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3346 						VM_PAGEOUT_DEBUG(vm_pageout_cleaned_fault_reactivated, 1);
3347 					}
3348 
3349 					if (!VM_PAGE_ACTIVE_OR_INACTIVE(m) ||
3350 					    no_cache) {
3351 						/*
3352 						 * If this is a no_cache mapping
3353 						 * and the page has never been
3354 						 * mapped before or was
3355 						 * previously a no_cache page,
3356 						 * then we want to leave pages
3357 						 * in the speculative state so
3358 						 * that they can be readily
3359 						 * recycled if free memory runs
3360 						 * low.  Otherwise the page is
3361 						 * activated as normal.
3362 						 */
3363 
3364 						if (no_cache &&
3365 						    (!previously_pmapped ||
3366 						    m->vmp_no_cache)) {
3367 							m->vmp_no_cache = TRUE;
3368 
3369 							if (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
3370 								vm_page_speculate(m, FALSE);
3371 							}
3372 						} else if (!VM_PAGE_ACTIVE_OR_INACTIVE(m)) {
3373 							vm_page_activate(m);
3374 						}
3375 					}
3376 				}
3377 				/* we keep the page queues lock, if we need it later */
3378 			}
3379 		}
3380 	}
3381 	/* we're done with the page queues lock, if we ever took it */
3382 	__VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3383 }
3384 
3385 /*
3386  * Sets the pmmpped, xpmapped, and wpmapped bits on the vm_page_t and updates accounting.
3387  * @return true if the page needs to be sync'ed via pmap_sync-page_data_physo
3388  * before being inserted into the pmap.
3389  */
3390 static bool
vm_fault_enter_set_mapped(vm_object_t object,vm_page_t m,vm_prot_t prot,vm_prot_t fault_type)3391 vm_fault_enter_set_mapped(
3392 	vm_object_t object,
3393 	vm_page_t m,
3394 	vm_prot_t prot,
3395 	vm_prot_t fault_type)
3396 {
3397 	bool page_needs_sync = false;
3398 	/*
3399 	 * NOTE: we may only hold the vm_object lock SHARED
3400 	 * at this point, so we need the phys_page lock to
3401 	 * properly serialize updating the pmapped and
3402 	 * xpmapped bits
3403 	 */
3404 	if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) {
3405 		ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3406 
3407 		pmap_lock_phys_page(phys_page);
3408 		m->vmp_pmapped = TRUE;
3409 
3410 		if (!m->vmp_xpmapped) {
3411 			m->vmp_xpmapped = TRUE;
3412 
3413 			pmap_unlock_phys_page(phys_page);
3414 
3415 			if (!object->internal) {
3416 				OSAddAtomic(1, &vm_page_xpmapped_external_count);
3417 			}
3418 
3419 #if defined(__arm64__)
3420 			page_needs_sync = true;
3421 #else
3422 			if (object->internal &&
3423 			    object->pager != NULL) {
3424 				/*
3425 				 * This page could have been
3426 				 * uncompressed by the
3427 				 * compressor pager and its
3428 				 * contents might be only in
3429 				 * the data cache.
3430 				 * Since it's being mapped for
3431 				 * "execute" for the fist time,
3432 				 * make sure the icache is in
3433 				 * sync.
3434 				 */
3435 				assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
3436 				page_needs_sync = true;
3437 			}
3438 #endif
3439 		} else {
3440 			pmap_unlock_phys_page(phys_page);
3441 		}
3442 	} else {
3443 		if (m->vmp_pmapped == FALSE) {
3444 			ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3445 
3446 			pmap_lock_phys_page(phys_page);
3447 			m->vmp_pmapped = TRUE;
3448 			pmap_unlock_phys_page(phys_page);
3449 		}
3450 	}
3451 
3452 	if (fault_type & VM_PROT_WRITE) {
3453 		if (m->vmp_wpmapped == FALSE) {
3454 			vm_object_lock_assert_exclusive(object);
3455 			if (!object->internal && object->pager) {
3456 				task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
3457 			}
3458 			m->vmp_wpmapped = TRUE;
3459 		}
3460 	}
3461 	return page_needs_sync;
3462 }
3463 
3464 /*
3465  * wrappers for pmap_enter_options()
3466  */
3467 kern_return_t
pmap_enter_object_options_check(pmap_t pmap,vm_map_address_t virtual_address,vm_map_offset_t fault_phys_offset,vm_object_t obj,ppnum_t pn,vm_prot_t protection,vm_prot_t fault_type,boolean_t wired,unsigned int options)3468 pmap_enter_object_options_check(
3469 	pmap_t           pmap,
3470 	vm_map_address_t virtual_address,
3471 	vm_map_offset_t  fault_phys_offset,
3472 	vm_object_t      obj,
3473 	ppnum_t          pn,
3474 	vm_prot_t        protection,
3475 	vm_prot_t        fault_type,
3476 	boolean_t        wired,
3477 	unsigned int     options)
3478 {
3479 	unsigned int flags = 0;
3480 	unsigned int extra_options = 0;
3481 
3482 	if (obj->internal) {
3483 		extra_options |= PMAP_OPTIONS_INTERNAL;
3484 	}
3485 	pmap_paddr_t physical_address = (pmap_paddr_t)ptoa(pn) + fault_phys_offset;
3486 	return pmap_enter_options_addr(pmap,
3487 	           virtual_address,
3488 	           physical_address,
3489 	           protection,
3490 	           fault_type,
3491 	           flags,
3492 	           wired,
3493 	           options | extra_options,
3494 	           NULL,
3495 	           PMAP_MAPPING_TYPE_INFER);
3496 }
3497 
3498 kern_return_t
pmap_enter_options_check(pmap_t pmap,vm_map_address_t virtual_address,vm_map_offset_t fault_phys_offset,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,boolean_t wired,unsigned int options)3499 pmap_enter_options_check(
3500 	pmap_t           pmap,
3501 	vm_map_address_t virtual_address,
3502 	vm_map_offset_t  fault_phys_offset,
3503 	vm_page_t        page,
3504 	vm_prot_t        protection,
3505 	vm_prot_t        fault_type,
3506 	boolean_t        wired,
3507 	unsigned int     options)
3508 {
3509 	if (page->vmp_error) {
3510 		return KERN_MEMORY_FAILURE;
3511 	}
3512 	vm_object_t obj = VM_PAGE_OBJECT(page);
3513 	if (page->vmp_reusable || obj->all_reusable) {
3514 		options |= PMAP_OPTIONS_REUSABLE;
3515 	}
3516 	return pmap_enter_object_options_check(
3517 		pmap,
3518 		virtual_address,
3519 		fault_phys_offset,
3520 		obj,
3521 		VM_PAGE_GET_PHYS_PAGE(page),
3522 		protection,
3523 		fault_type,
3524 		wired,
3525 		options);
3526 }
3527 
3528 kern_return_t
pmap_enter_check(pmap_t pmap,vm_map_address_t virtual_address,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,boolean_t wired)3529 pmap_enter_check(
3530 	pmap_t           pmap,
3531 	vm_map_address_t virtual_address,
3532 	vm_page_t        page,
3533 	vm_prot_t        protection,
3534 	vm_prot_t        fault_type,
3535 	boolean_t        wired)
3536 {
3537 	return pmap_enter_options_check(pmap,
3538 	           virtual_address,
3539 	           0 /* fault_phys_offset */,
3540 	           page,
3541 	           protection,
3542 	           fault_type,
3543 	           wired,
3544 	           0 /* options */);
3545 }
3546 
3547 /*
3548  * Try to enter the given page into the pmap.
3549  * Will retry without execute permission if the code signing monitor is enabled and
3550  * we encounter a codesigning failure on a non-execute fault.
3551  */
3552 static kern_return_t
vm_fault_attempt_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options)3553 vm_fault_attempt_pmap_enter(
3554 	pmap_t pmap,
3555 	vm_map_offset_t vaddr,
3556 	vm_map_size_t fault_page_size,
3557 	vm_map_offset_t fault_phys_offset,
3558 	vm_page_t m,
3559 	vm_prot_t *prot,
3560 	vm_prot_t caller_prot,
3561 	vm_prot_t fault_type,
3562 	bool wired,
3563 	int pmap_options)
3564 {
3565 #if !CODE_SIGNING_MONITOR
3566 #pragma unused(caller_prot)
3567 #endif /* !CODE_SIGNING_MONITOR */
3568 
3569 	kern_return_t kr;
3570 	if (fault_page_size != PAGE_SIZE) {
3571 		DEBUG4K_FAULT("pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x fault_type 0x%x\n", pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, *prot, fault_type);
3572 		assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
3573 		    fault_phys_offset < PAGE_SIZE),
3574 		    "0x%llx\n", (uint64_t)fault_phys_offset);
3575 	} else {
3576 		assertf(fault_phys_offset == 0,
3577 		    "0x%llx\n", (uint64_t)fault_phys_offset);
3578 	}
3579 
3580 	kr = pmap_enter_options_check(pmap, vaddr,
3581 	    fault_phys_offset,
3582 	    m, *prot, fault_type,
3583 	    wired, pmap_options);
3584 
3585 #if CODE_SIGNING_MONITOR
3586 	/*
3587 	 * Retry without execute permission if we encountered a codesigning
3588 	 * failure on a non-execute fault.  This allows applications which
3589 	 * don't actually need to execute code to still map it for read access.
3590 	 */
3591 	if (kr == KERN_CODESIGN_ERROR &&
3592 	    csm_enabled() &&
3593 	    (*prot & VM_PROT_EXECUTE) &&
3594 	    !(caller_prot & VM_PROT_EXECUTE)) {
3595 		*prot &= ~VM_PROT_EXECUTE;
3596 		kr = pmap_enter_options_check(pmap, vaddr,
3597 		    fault_phys_offset,
3598 		    m, *prot, fault_type,
3599 		    wired, pmap_options);
3600 	}
3601 #endif /* CODE_SIGNING_MONITOR */
3602 
3603 	return kr;
3604 }
3605 
3606 /*
3607  * Enter the given page into the pmap.
3608  * The map must be locked shared.
3609  * The vm object must NOT be locked.
3610  *
3611  * @param need_retry if not null, avoid making a (potentially) blocking call into
3612  * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3613  */
3614 static kern_return_t
vm_fault_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry)3615 vm_fault_pmap_enter(
3616 	pmap_t pmap,
3617 	vm_map_offset_t vaddr,
3618 	vm_map_size_t fault_page_size,
3619 	vm_map_offset_t fault_phys_offset,
3620 	vm_page_t m,
3621 	vm_prot_t *prot,
3622 	vm_prot_t caller_prot,
3623 	vm_prot_t fault_type,
3624 	bool wired,
3625 	int pmap_options,
3626 	boolean_t *need_retry)
3627 {
3628 	kern_return_t kr;
3629 	if (need_retry != NULL) {
3630 		/*
3631 		 * Although we don't hold a lock on this object, we hold a lock
3632 		 * on the top object in the chain. To prevent a deadlock, we
3633 		 * can't allow the pmap layer to block.
3634 		 */
3635 		pmap_options |= PMAP_OPTIONS_NOWAIT;
3636 	}
3637 	kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3638 	    fault_page_size, fault_phys_offset,
3639 	    m, prot, caller_prot, fault_type, wired, pmap_options);
3640 	if (kr == KERN_RESOURCE_SHORTAGE) {
3641 		if (need_retry) {
3642 			/*
3643 			 * There's nothing we can do here since we hold the
3644 			 * lock on the top object in the chain. The caller
3645 			 * will need to deal with this by dropping that lock and retrying.
3646 			 */
3647 			*need_retry = TRUE;
3648 			vm_pmap_enter_retried++;
3649 		}
3650 	}
3651 	return kr;
3652 }
3653 
3654 /*
3655  * Enter the given page into the pmap.
3656  * The vm map must be locked shared.
3657  * The vm object must be locked exclusive, unless this is a soft fault.
3658  * For a soft fault, the object must be locked shared or exclusive.
3659  *
3660  * @param need_retry if not null, avoid making a (potentially) blocking call into
3661  * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3662  */
3663 static kern_return_t
vm_fault_pmap_enter_with_object_lock(vm_object_t object,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry,uint8_t * object_lock_type)3664 vm_fault_pmap_enter_with_object_lock(
3665 	vm_object_t object,
3666 	pmap_t pmap,
3667 	vm_map_offset_t vaddr,
3668 	vm_map_size_t fault_page_size,
3669 	vm_map_offset_t fault_phys_offset,
3670 	vm_page_t m,
3671 	vm_prot_t *prot,
3672 	vm_prot_t caller_prot,
3673 	vm_prot_t fault_type,
3674 	bool wired,
3675 	int pmap_options,
3676 	boolean_t *need_retry,
3677 	uint8_t *object_lock_type)
3678 {
3679 	kern_return_t kr;
3680 	/*
3681 	 * Prevent a deadlock by not
3682 	 * holding the object lock if we need to wait for a page in
3683 	 * pmap_enter() - <rdar://problem/7138958>
3684 	 */
3685 	kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3686 	    fault_page_size, fault_phys_offset,
3687 	    m, prot, caller_prot, fault_type, wired, pmap_options | PMAP_OPTIONS_NOWAIT);
3688 #if __x86_64__
3689 	if (kr == KERN_INVALID_ARGUMENT &&
3690 	    pmap == PMAP_NULL &&
3691 	    wired) {
3692 		/*
3693 		 * Wiring a page in a pmap-less VM map:
3694 		 * VMware's "vmmon" kernel extension does this
3695 		 * to grab pages.
3696 		 * Let it proceed even though the PMAP_ENTER() failed.
3697 		 */
3698 		kr = KERN_SUCCESS;
3699 	}
3700 #endif /* __x86_64__ */
3701 
3702 	if (kr == KERN_RESOURCE_SHORTAGE) {
3703 		if (need_retry) {
3704 			/*
3705 			 * this will be non-null in the case where we hold the lock
3706 			 * on the top-object in this chain... we can't just drop
3707 			 * the lock on the object we're inserting the page into
3708 			 * and recall the PMAP_ENTER since we can still cause
3709 			 * a deadlock if one of the critical paths tries to
3710 			 * acquire the lock on the top-object and we're blocked
3711 			 * in PMAP_ENTER waiting for memory... our only recourse
3712 			 * is to deal with it at a higher level where we can
3713 			 * drop both locks.
3714 			 */
3715 			*need_retry = TRUE;
3716 			vm_pmap_enter_retried++;
3717 			goto done;
3718 		}
3719 		/*
3720 		 * The nonblocking version of pmap_enter did not succeed.
3721 		 * and we don't need to drop other locks and retry
3722 		 * at the level above us, so
3723 		 * use the blocking version instead. Requires marking
3724 		 * the page busy and unlocking the object
3725 		 */
3726 		boolean_t was_busy = m->vmp_busy;
3727 
3728 		vm_object_lock_assert_exclusive(object);
3729 
3730 		m->vmp_busy = TRUE;
3731 		vm_object_unlock(object);
3732 
3733 		kr = pmap_enter_options_check(pmap, vaddr,
3734 		    fault_phys_offset,
3735 		    m, *prot, fault_type,
3736 		    wired, pmap_options);
3737 
3738 		assert(VM_PAGE_OBJECT(m) == object);
3739 
3740 		/* Take the object lock again. */
3741 		vm_object_lock(object);
3742 
3743 		/* If the page was busy, someone else will wake it up.
3744 		 * Otherwise, we have to do it now. */
3745 		assert(m->vmp_busy);
3746 		if (!was_busy) {
3747 			vm_page_wakeup_done(object, m);
3748 		}
3749 		vm_pmap_enter_blocked++;
3750 	}
3751 
3752 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
3753 	if ((*prot & VM_PROT_WRITE) && m->vmp_unmodified_ro) {
3754 		if (*object_lock_type == OBJECT_LOCK_SHARED) {
3755 			boolean_t was_busy = m->vmp_busy;
3756 			m->vmp_busy = TRUE;
3757 
3758 			*object_lock_type = OBJECT_LOCK_EXCLUSIVE;
3759 
3760 			if (vm_object_lock_upgrade(object) == FALSE) {
3761 				vm_object_lock(object);
3762 			}
3763 
3764 			if (!was_busy) {
3765 				vm_page_wakeup_done(object, m);
3766 			}
3767 		}
3768 		vm_object_lock_assert_exclusive(object);
3769 		vm_page_lockspin_queues();
3770 		m->vmp_unmodified_ro = false;
3771 		vm_page_unlock_queues();
3772 		os_atomic_dec(&compressor_ro_uncompressed, relaxed);
3773 
3774 		vm_object_compressor_pager_state_clr(VM_PAGE_OBJECT(m), m->vmp_offset);
3775 	}
3776 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
3777 #pragma unused(object_lock_type)
3778 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
3779 
3780 done:
3781 	return kr;
3782 }
3783 
3784 /*
3785  * Prepare to enter a page into the pmap by checking CS, protection bits,
3786  * and setting mapped bits on the page_t.
3787  * Does not modify the page's paging queue.
3788  *
3789  * page queue lock must NOT be held
3790  * m->vmp_object must be locked
3791  *
3792  * NOTE: m->vmp_object could be locked "shared" only if we are called
3793  * from vm_fault() as part of a soft fault.
3794  */
3795 static kern_return_t
vm_fault_enter_prepare(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t * prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t fault_type,vm_object_fault_info_t fault_info,int * type_of_fault,bool * page_needs_data_sync)3796 vm_fault_enter_prepare(
3797 	vm_page_t m,
3798 	pmap_t pmap,
3799 	vm_map_offset_t vaddr,
3800 	vm_prot_t *prot,
3801 	vm_prot_t caller_prot,
3802 	vm_map_size_t fault_page_size,
3803 	vm_map_offset_t fault_phys_offset,
3804 	vm_prot_t fault_type,
3805 	vm_object_fault_info_t fault_info,
3806 	int *type_of_fault,
3807 	bool *page_needs_data_sync)
3808 {
3809 	kern_return_t   kr;
3810 	bool            is_tainted = false;
3811 	vm_object_t     object;
3812 	boolean_t       cs_bypass = fault_info->cs_bypass;
3813 
3814 	object = VM_PAGE_OBJECT(m);
3815 
3816 	vm_object_lock_assert_held(object);
3817 
3818 #if KASAN
3819 	if (pmap == kernel_pmap) {
3820 		kasan_notify_address(vaddr, PAGE_SIZE);
3821 	}
3822 #endif
3823 
3824 #if CODE_SIGNING_MONITOR
3825 	if (csm_address_space_exempt(pmap) == KERN_SUCCESS) {
3826 		cs_bypass = TRUE;
3827 	}
3828 #endif
3829 
3830 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3831 
3832 	if (*type_of_fault == DBG_ZERO_FILL_FAULT) {
3833 		vm_object_lock_assert_exclusive(object);
3834 	} else if ((fault_type & VM_PROT_WRITE) == 0 &&
3835 	    !fault_info->fi_change_wiring &&
3836 	    (!m->vmp_wpmapped
3837 #if VM_OBJECT_ACCESS_TRACKING
3838 	    || object->access_tracking
3839 #endif /* VM_OBJECT_ACCESS_TRACKING */
3840 	    )) {
3841 		/*
3842 		 * This is not a "write" fault, so we
3843 		 * might not have taken the object lock
3844 		 * exclusively and we might not be able
3845 		 * to update the "wpmapped" bit in
3846 		 * vm_fault_enter().
3847 		 * Let's just grant read access to
3848 		 * the page for now and we'll
3849 		 * soft-fault again if we need write
3850 		 * access later...
3851 		 */
3852 
3853 		/* This had better not be a JIT page. */
3854 		if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) {
3855 			/*
3856 			 * This pmap enforces extra constraints for this set of
3857 			 * protections, so we can't modify them.
3858 			 */
3859 			if (!cs_bypass) {
3860 				panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x !cs_bypass",
3861 				    __FUNCTION__, pmap, (uint64_t)vaddr,
3862 				    *prot, fault_info->pmap_options);
3863 			}
3864 		} else {
3865 			*prot &= ~VM_PROT_WRITE;
3866 		}
3867 	}
3868 	if (m->vmp_pmapped == FALSE) {
3869 		if (m->vmp_clustered) {
3870 			if (*type_of_fault == DBG_CACHE_HIT_FAULT) {
3871 				/*
3872 				 * found it in the cache, but this
3873 				 * is the first fault-in of the page (m->vmp_pmapped == FALSE)
3874 				 * so it must have come in as part of
3875 				 * a cluster... account 1 pagein against it
3876 				 */
3877 				if (object->internal) {
3878 					*type_of_fault = DBG_PAGEIND_FAULT;
3879 				} else {
3880 					*type_of_fault = DBG_PAGEINV_FAULT;
3881 				}
3882 
3883 				VM_PAGE_COUNT_AS_PAGEIN(m);
3884 			}
3885 			VM_PAGE_CONSUME_CLUSTERED(m);
3886 		}
3887 	}
3888 
3889 	if (*type_of_fault != DBG_COW_FAULT) {
3890 		DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL);
3891 
3892 		if (pmap == kernel_pmap) {
3893 			DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL);
3894 		}
3895 	}
3896 
3897 	kr = vm_fault_validate_cs(cs_bypass, object, m, pmap, vaddr,
3898 	    *prot, caller_prot, fault_page_size, fault_phys_offset,
3899 	    fault_info, &is_tainted);
3900 	if (kr == KERN_SUCCESS) {
3901 		/*
3902 		 * We either have a good page, or a tainted page that has been accepted by the process.
3903 		 * In both cases the page will be entered into the pmap.
3904 		 */
3905 		*page_needs_data_sync = vm_fault_enter_set_mapped(object, m, *prot, fault_type);
3906 		if ((fault_type & VM_PROT_WRITE) && is_tainted) {
3907 			/*
3908 			 * This page is tainted but we're inserting it anyways.
3909 			 * Since it's writeable, we need to disconnect it from other pmaps
3910 			 * now so those processes can take note.
3911 			 */
3912 
3913 			/*
3914 			 * We can only get here
3915 			 * because of the CSE logic
3916 			 */
3917 			assert(pmap_get_vm_map_cs_enforced(pmap));
3918 			pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
3919 			/*
3920 			 * If we are faulting for a write, we can clear
3921 			 * the execute bit - that will ensure the page is
3922 			 * checked again before being executable, which
3923 			 * protects against a map switch.
3924 			 * This only happens the first time the page
3925 			 * gets tainted, so we won't get stuck here
3926 			 * to make an already writeable page executable.
3927 			 */
3928 			if (!cs_bypass) {
3929 				if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) {
3930 					/*
3931 					 * This pmap enforces extra constraints
3932 					 * for this set of protections, so we
3933 					 * can't change the protections.
3934 					 */
3935 					panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
3936 					    __FUNCTION__, pmap,
3937 					    (uint64_t)vaddr, *prot,
3938 					    fault_info->pmap_options);
3939 				}
3940 				*prot &= ~VM_PROT_EXECUTE;
3941 			}
3942 		}
3943 		assert(VM_PAGE_OBJECT(m) == object);
3944 
3945 #if VM_OBJECT_ACCESS_TRACKING
3946 		if (object->access_tracking) {
3947 			DTRACE_VM2(access_tracking, vm_map_offset_t, vaddr, int, fault_type);
3948 			if (fault_type & VM_PROT_WRITE) {
3949 				object->access_tracking_writes++;
3950 				vm_object_access_tracking_writes++;
3951 			} else {
3952 				object->access_tracking_reads++;
3953 				vm_object_access_tracking_reads++;
3954 			}
3955 		}
3956 #endif /* VM_OBJECT_ACCESS_TRACKING */
3957 	}
3958 
3959 	return kr;
3960 }
3961 
3962 /*
3963  * page queue lock must NOT be held
3964  * m->vmp_object must be locked
3965  *
3966  * NOTE: m->vmp_object could be locked "shared" only if we are called
3967  * from vm_fault() as part of a soft fault.  If so, we must be
3968  * careful not to modify the VM object in any way that is not
3969  * legal under a shared lock...
3970  */
3971 kern_return_t
vm_fault_enter(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot,vm_prot_t caller_prot,boolean_t wired,vm_tag_t wire_tag,vm_object_fault_info_t fault_info,boolean_t * need_retry,int * type_of_fault,uint8_t * object_lock_type)3972 vm_fault_enter(
3973 	vm_page_t m,
3974 	pmap_t pmap,
3975 	vm_map_offset_t vaddr,
3976 	vm_map_size_t fault_page_size,
3977 	vm_map_offset_t fault_phys_offset,
3978 	vm_prot_t prot,
3979 	vm_prot_t caller_prot,
3980 	boolean_t wired,
3981 	vm_tag_t  wire_tag,
3982 	vm_object_fault_info_t fault_info,
3983 	boolean_t *need_retry,
3984 	int *type_of_fault,
3985 	uint8_t *object_lock_type)
3986 {
3987 	kern_return_t   kr;
3988 	vm_object_t     object;
3989 	bool            page_needs_data_sync;
3990 	vm_prot_t       fault_type;
3991 	int             pmap_options = fault_info->pmap_options;
3992 
3993 	if (vm_page_is_guard(m)) {
3994 		return KERN_SUCCESS;
3995 	}
3996 
3997 	fault_type = fault_info->fi_change_wiring ? VM_PROT_NONE : caller_prot;
3998 
3999 	assertf(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL, "m=%p", m);
4000 	kr = vm_fault_enter_prepare(m, pmap, vaddr, &prot, caller_prot,
4001 	    fault_page_size, fault_phys_offset, fault_type,
4002 	    fault_info, type_of_fault, &page_needs_data_sync);
4003 	object = VM_PAGE_OBJECT(m);
4004 
4005 	vm_fault_enqueue_page(object, m, wired, fault_info->fi_change_wiring, wire_tag, fault_info->no_cache, type_of_fault, kr);
4006 
4007 	if (kr == KERN_SUCCESS) {
4008 		if (page_needs_data_sync) {
4009 			pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
4010 		}
4011 
4012 		if (fault_info->fi_xnu_user_debug && !object->code_signed) {
4013 			pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
4014 		}
4015 
4016 
4017 		kr = vm_fault_pmap_enter_with_object_lock(object, pmap, vaddr,
4018 		    fault_page_size, fault_phys_offset, m,
4019 		    &prot, caller_prot, fault_type, wired, pmap_options, need_retry, object_lock_type);
4020 	}
4021 
4022 	return kr;
4023 }
4024 
4025 kern_return_t
vm_pre_fault_with_info(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t prot,vm_object_fault_info_t fault_info)4026 vm_pre_fault_with_info(
4027 	vm_map_t                map,
4028 	vm_map_offset_t         vaddr,
4029 	vm_prot_t               prot,
4030 	vm_object_fault_info_t  fault_info)
4031 {
4032 	assert(fault_info != NULL);
4033 	if (pmap_find_phys(map->pmap, vaddr) == 0) {
4034 		return vm_fault_internal(map,
4035 		           vaddr,               /* vaddr */
4036 		           prot,                /* fault_type */
4037 		           VM_KERN_MEMORY_NONE, /* tag - not wiring */
4038 		           NULL,                /* caller_pmap */
4039 		           0,                   /* caller_pmap_addr */
4040 		           NULL,
4041 		           fault_info);
4042 	}
4043 	return KERN_SUCCESS;
4044 }
4045 
4046 /*
4047  * Fault on the given vaddr iff the page is not already entered in the pmap.
4048  */
4049 kern_return_t
vm_pre_fault(vm_map_offset_t vaddr,vm_prot_t prot)4050 vm_pre_fault(vm_map_offset_t vaddr, vm_prot_t prot)
4051 {
4052 	struct vm_object_fault_info fault_info = {
4053 		.interruptible = THREAD_UNINT,
4054 	};
4055 	return vm_pre_fault_with_info(current_map(), vaddr, prot, &fault_info);
4056 }
4057 
4058 /*
4059  *	Routine:	vm_fault
4060  *	Purpose:
4061  *		Handle page faults, including pseudo-faults
4062  *		used to change the wiring status of pages.
4063  *	Returns:
4064  *		Explicit continuations have been removed.
4065  *	Implementation:
4066  *		vm_fault and vm_fault_page save mucho state
4067  *		in the moral equivalent of a closure.  The state
4068  *		structure is allocated when first entering vm_fault
4069  *		and deallocated when leaving vm_fault.
4070  */
4071 
4072 extern uint64_t get_current_unique_pid(void);
4073 
4074 unsigned long vm_fault_collapse_total = 0;
4075 unsigned long vm_fault_collapse_skipped = 0;
4076 
4077 
4078 kern_return_t
vm_fault_external(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)4079 vm_fault_external(
4080 	vm_map_t        map,
4081 	vm_map_offset_t vaddr,
4082 	vm_prot_t       fault_type,
4083 	boolean_t       change_wiring,
4084 	int             interruptible,
4085 	pmap_t          caller_pmap,
4086 	vm_map_offset_t caller_pmap_addr)
4087 {
4088 	struct vm_object_fault_info fault_info = {
4089 		.interruptible = interruptible,
4090 		.fi_change_wiring = change_wiring,
4091 	};
4092 
4093 	return vm_fault_internal(map, vaddr, fault_type,
4094 	           change_wiring ? vm_tag_bt() : VM_KERN_MEMORY_NONE,
4095 	           caller_pmap, caller_pmap_addr,
4096 	           NULL, &fault_info);
4097 }
4098 
4099 kern_return_t
vm_fault(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,vm_tag_t wire_tag,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)4100 vm_fault(
4101 	vm_map_t        map,
4102 	vm_map_offset_t vaddr,
4103 	vm_prot_t       fault_type,
4104 	boolean_t       change_wiring,
4105 	vm_tag_t        wire_tag,               /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
4106 	int             interruptible,
4107 	pmap_t          caller_pmap,
4108 	vm_map_offset_t caller_pmap_addr)
4109 {
4110 	struct vm_object_fault_info fault_info = {
4111 		.interruptible = interruptible,
4112 		.fi_change_wiring = change_wiring,
4113 	};
4114 
4115 	return vm_fault_internal(map, vaddr, fault_type, wire_tag,
4116 	           caller_pmap, caller_pmap_addr,
4117 	           NULL, &fault_info);
4118 }
4119 
4120 static boolean_t
current_proc_is_privileged(void)4121 current_proc_is_privileged(void)
4122 {
4123 	return csproc_get_platform_binary(current_proc());
4124 }
4125 
4126 uint64_t vm_copied_on_read = 0;
4127 
4128 /*
4129  * Cleanup after a vm_fault_enter.
4130  * At this point, the fault should either have failed (kr != KERN_SUCCESS)
4131  * or the page should be in the pmap and on the correct paging queue.
4132  *
4133  * Precondition:
4134  * map must be locked shared.
4135  * m_object must be locked.
4136  * If top_object != VM_OBJECT_NULL, it must be locked.
4137  * real_map must be locked.
4138  *
4139  * Postcondition:
4140  * map will be unlocked
4141  * m_object will be unlocked
4142  * top_object will be unlocked
4143  * If real_map != map, it will be unlocked
4144  */
4145 static void
vm_fault_complete(vm_map_t map,vm_map_t real_map,vm_object_t object,vm_object_t m_object,vm_page_t m,vm_map_offset_t offset,vm_map_offset_t trace_real_vaddr,vm_object_fault_info_t fault_info,vm_prot_t caller_prot,vm_map_offset_t real_vaddr,int type_of_fault,boolean_t need_retry,kern_return_t kr,ppnum_t * physpage_p,vm_prot_t prot,vm_object_t top_object,boolean_t need_collapse,vm_map_offset_t cur_offset,vm_prot_t fault_type,vm_object_t * written_on_object,memory_object_t * written_on_pager,vm_object_offset_t * written_on_offset)4146 vm_fault_complete(
4147 	vm_map_t map,
4148 	vm_map_t real_map,
4149 	vm_object_t object,
4150 	vm_object_t m_object,
4151 	vm_page_t m,
4152 	vm_map_offset_t offset,
4153 	vm_map_offset_t trace_real_vaddr,
4154 	vm_object_fault_info_t fault_info,
4155 	vm_prot_t caller_prot,
4156 #if CONFIG_DTRACE
4157 	vm_map_offset_t real_vaddr,
4158 #else
4159 	__unused vm_map_offset_t real_vaddr,
4160 #endif /* CONFIG_DTRACE */
4161 	int type_of_fault,
4162 	boolean_t need_retry,
4163 	kern_return_t kr,
4164 	ppnum_t *physpage_p,
4165 	vm_prot_t prot,
4166 	vm_object_t top_object,
4167 	boolean_t need_collapse,
4168 	vm_map_offset_t cur_offset,
4169 	vm_prot_t fault_type,
4170 	vm_object_t *written_on_object,
4171 	memory_object_t *written_on_pager,
4172 	vm_object_offset_t *written_on_offset)
4173 {
4174 	int     event_code = 0;
4175 	vm_map_lock_assert_shared(map);
4176 	vm_object_lock_assert_held(m_object);
4177 	if (top_object != VM_OBJECT_NULL) {
4178 		vm_object_lock_assert_held(top_object);
4179 	}
4180 	vm_map_lock_assert_held(real_map);
4181 
4182 	if (m_object->internal) {
4183 		event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
4184 	} else if (m_object->object_is_shared_cache) {
4185 		event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
4186 	} else {
4187 		event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
4188 	}
4189 	KDBG_RELEASE(event_code | DBG_FUNC_NONE, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid());
4190 	if (need_retry == FALSE) {
4191 		KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_FAST), get_current_unique_pid());
4192 	}
4193 	DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag);
4194 	if (kr == KERN_SUCCESS &&
4195 	    physpage_p != NULL) {
4196 		/* for vm_map_wire_and_extract() */
4197 		*physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
4198 		if (prot & VM_PROT_WRITE) {
4199 			vm_object_lock_assert_exclusive(m_object);
4200 			m->vmp_dirty = TRUE;
4201 		}
4202 	}
4203 
4204 	if (top_object != VM_OBJECT_NULL) {
4205 		/*
4206 		 * It's safe to drop the top object
4207 		 * now that we've done our
4208 		 * vm_fault_enter().  Any other fault
4209 		 * in progress for that virtual
4210 		 * address will either find our page
4211 		 * and translation or put in a new page
4212 		 * and translation.
4213 		 */
4214 		vm_object_unlock(top_object);
4215 		top_object = VM_OBJECT_NULL;
4216 	}
4217 
4218 	if (need_collapse == TRUE) {
4219 		vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
4220 	}
4221 
4222 	if (need_retry == FALSE &&
4223 	    (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) {
4224 		/*
4225 		 * evaluate access pattern and update state
4226 		 * vm_fault_deactivate_behind depends on the
4227 		 * state being up to date
4228 		 */
4229 		vm_fault_is_sequential(m_object, cur_offset, fault_info->behavior);
4230 
4231 		vm_fault_deactivate_behind(m_object, cur_offset, fault_info->behavior);
4232 	}
4233 	/*
4234 	 * That's it, clean up and return.
4235 	 */
4236 	if (m->vmp_busy) {
4237 		vm_object_lock_assert_exclusive(m_object);
4238 		vm_page_wakeup_done(m_object, m);
4239 	}
4240 
4241 	if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) {
4242 		vm_object_paging_begin(m_object);
4243 
4244 		assert(*written_on_object == VM_OBJECT_NULL);
4245 		*written_on_object = m_object;
4246 		*written_on_pager = m_object->pager;
4247 		*written_on_offset = m_object->paging_offset + m->vmp_offset;
4248 	}
4249 	vm_object_unlock(object);
4250 
4251 	vm_map_unlock_read(map);
4252 	if (real_map != map) {
4253 		vm_map_unlock(real_map);
4254 	}
4255 }
4256 
4257 static inline int
vm_fault_type_for_tracing(boolean_t need_copy_on_read,int type_of_fault)4258 vm_fault_type_for_tracing(boolean_t need_copy_on_read, int type_of_fault)
4259 {
4260 	if (need_copy_on_read && type_of_fault == DBG_COW_FAULT) {
4261 		return DBG_COR_FAULT;
4262 	}
4263 	return type_of_fault;
4264 }
4265 
4266 uint64_t vm_fault_resilient_media_initiate = 0;
4267 uint64_t vm_fault_resilient_media_retry = 0;
4268 uint64_t vm_fault_resilient_media_proceed = 0;
4269 uint64_t vm_fault_resilient_media_release = 0;
4270 uint64_t vm_fault_resilient_media_abort1 = 0;
4271 uint64_t vm_fault_resilient_media_abort2 = 0;
4272 
4273 #if MACH_ASSERT
4274 int vm_fault_resilient_media_inject_error1_rate = 0;
4275 int vm_fault_resilient_media_inject_error1 = 0;
4276 int vm_fault_resilient_media_inject_error2_rate = 0;
4277 int vm_fault_resilient_media_inject_error2 = 0;
4278 int vm_fault_resilient_media_inject_error3_rate = 0;
4279 int vm_fault_resilient_media_inject_error3 = 0;
4280 #endif /* MACH_ASSERT */
4281 
4282 kern_return_t
vm_fault_internal(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t caller_prot,vm_tag_t wire_tag,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr,ppnum_t * physpage_p,vm_object_fault_info_t fault_info)4283 vm_fault_internal(
4284 	vm_map_t           map,
4285 	vm_map_offset_t    vaddr,
4286 	vm_prot_t          caller_prot,
4287 	vm_tag_t           wire_tag,               /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
4288 	pmap_t             caller_pmap,
4289 	vm_map_offset_t    caller_pmap_addr,
4290 	ppnum_t            *physpage_p,
4291 	vm_object_fault_info_t fault_info)
4292 {
4293 	vm_map_version_t        version;        /* Map version for verificiation */
4294 	boolean_t               wired;          /* Should mapping be wired down? */
4295 	vm_object_t             object;         /* Top-level object */
4296 	vm_object_offset_t      offset;         /* Top-level offset */
4297 	vm_prot_t               prot;           /* Protection for mapping */
4298 	vm_object_t             old_copy_object; /* Saved copy object */
4299 	uint32_t                old_copy_version;
4300 	vm_page_t               result_page;    /* Result of vm_fault_page */
4301 	vm_page_t               top_page;       /* Placeholder page */
4302 	kern_return_t           kr;
4303 
4304 	vm_page_t               m;      /* Fast access to result_page */
4305 	kern_return_t           error_code;
4306 	vm_object_t             cur_object;
4307 	vm_object_t             m_object = NULL;
4308 	vm_object_offset_t      cur_offset;
4309 	vm_page_t               cur_m;
4310 	vm_object_t             new_object;
4311 	int                     type_of_fault;
4312 	pmap_t                  pmap;
4313 	wait_interrupt_t        interruptible_state;
4314 	vm_map_t                real_map = map;
4315 	vm_map_t                original_map = map;
4316 	bool                    object_locks_dropped = FALSE;
4317 	vm_prot_t               fault_type;
4318 	vm_prot_t               original_fault_type;
4319 	bool                    need_collapse = FALSE;
4320 	boolean_t               need_retry = FALSE;
4321 	boolean_t               *need_retry_ptr = NULL;
4322 	uint8_t                 object_lock_type = 0;
4323 	uint8_t                 cur_object_lock_type;
4324 	vm_object_t             top_object = VM_OBJECT_NULL;
4325 	vm_object_t             written_on_object = VM_OBJECT_NULL;
4326 	memory_object_t         written_on_pager = NULL;
4327 	vm_object_offset_t      written_on_offset = 0;
4328 	int                     throttle_delay;
4329 	int                     compressed_count_delta;
4330 	uint8_t                 grab_options;
4331 	bool                    need_copy;
4332 	bool                    need_copy_on_read;
4333 	vm_map_offset_t         trace_vaddr;
4334 	vm_map_offset_t         trace_real_vaddr;
4335 	vm_map_size_t           fault_page_size;
4336 	vm_map_size_t           fault_page_mask;
4337 	int                     fault_page_shift;
4338 	vm_map_offset_t         fault_phys_offset;
4339 	vm_map_offset_t         real_vaddr;
4340 	bool                    resilient_media_retry = false;
4341 	bool                    resilient_media_ref_transfer = false;
4342 	vm_object_t             resilient_media_object = VM_OBJECT_NULL;
4343 	vm_object_offset_t      resilient_media_offset = (vm_object_offset_t)-1;
4344 	bool                    page_needs_data_sync = false;
4345 	/*
4346 	 * Was the VM object contended when vm_map_lookup_and_lock_object locked it?
4347 	 * If so, the zero fill path will drop the lock
4348 	 * NB: Ideally we would always drop the lock rather than rely on
4349 	 * this heuristic, but vm_object_unlock currently takes > 30 cycles.
4350 	 */
4351 	bool                    object_is_contended = false;
4352 
4353 
4354 	real_vaddr = vaddr;
4355 	trace_real_vaddr = vaddr;
4356 
4357 	/*
4358 	 * Some (kernel) submaps are marked with "should never fault".
4359 	 *
4360 	 * We do this for two reasons:
4361 	 * - PGZ which is inside the zone map range can't go down the normal
4362 	 *   lookup path (vm_map_lookup_entry() would panic).
4363 	 *
4364 	 * - we want for guard pages to not have to use fictitious pages at all
4365 	 *   to prevent from ZFOD pages to be made.
4366 	 *
4367 	 * We also want capture the fault address easily so that the zone
4368 	 * allocator might present an enhanced panic log.
4369 	 */
4370 	if (map->never_faults || (pgz_owned(vaddr) && map->pmap == kernel_pmap)) {
4371 		assert(map->pmap == kernel_pmap);
4372 		return KERN_INVALID_ADDRESS;
4373 	}
4374 
4375 	if (VM_MAP_PAGE_SIZE(original_map) < PAGE_SIZE) {
4376 		fault_phys_offset = (vm_map_offset_t)-1;
4377 		fault_page_size = VM_MAP_PAGE_SIZE(original_map);
4378 		fault_page_mask = VM_MAP_PAGE_MASK(original_map);
4379 		fault_page_shift = VM_MAP_PAGE_SHIFT(original_map);
4380 		if (fault_page_size < PAGE_SIZE) {
4381 			DEBUG4K_FAULT("map %p vaddr 0x%llx caller_prot 0x%x\n", map, (uint64_t)trace_real_vaddr, caller_prot);
4382 			vaddr = vm_map_trunc_page(vaddr, fault_page_mask);
4383 		}
4384 	} else {
4385 		fault_phys_offset = 0;
4386 		fault_page_size = PAGE_SIZE;
4387 		fault_page_mask = PAGE_MASK;
4388 		fault_page_shift = PAGE_SHIFT;
4389 		vaddr = vm_map_trunc_page(vaddr, PAGE_MASK);
4390 	}
4391 
4392 	if (map == kernel_map) {
4393 		trace_vaddr = VM_KERNEL_ADDRHIDE(vaddr);
4394 		trace_real_vaddr = VM_KERNEL_ADDRHIDE(trace_real_vaddr);
4395 	} else {
4396 		trace_vaddr = vaddr;
4397 	}
4398 
4399 	KDBG_RELEASE(
4400 		(VMDBG_CODE(DBG_VM_FAULT_INTERNAL)) | DBG_FUNC_START,
4401 		((uint64_t)trace_vaddr >> 32),
4402 		trace_vaddr,
4403 		(map == kernel_map));
4404 
4405 	if (get_preemption_level() != 0) {
4406 		KDBG_RELEASE(
4407 			(VMDBG_CODE(DBG_VM_FAULT_INTERNAL)) | DBG_FUNC_END,
4408 			((uint64_t)trace_vaddr >> 32),
4409 			trace_vaddr,
4410 			KERN_FAILURE);
4411 
4412 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NONZERO_PREEMPTION_LEVEL), 0 /* arg */);
4413 		return KERN_FAILURE;
4414 	}
4415 
4416 	thread_t cthread = current_thread();
4417 
4418 	if (cthread->th_vm_faults_disabled) {
4419 		KDBG_RELEASE(
4420 			(MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
4421 			((uint64_t)trace_vaddr >> 32),
4422 			trace_vaddr,
4423 			KERN_FAILURE);
4424 		ktriage_record(thread_tid(cthread),
4425 		    KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM,
4426 		    KDBG_TRIAGE_RESERVED,
4427 		    KDBG_TRIAGE_VM_FAULTS_DISABLED),
4428 		    0 /* arg */);
4429 		return KERN_FAILURE;
4430 	}
4431 
4432 	bool     rtfault = (cthread->sched_mode == TH_MODE_REALTIME);
4433 	uint64_t fstart = 0;
4434 
4435 	if (rtfault) {
4436 		fstart = mach_continuous_time();
4437 	}
4438 
4439 	assert(fault_info != NULL);
4440 	interruptible_state = thread_interrupt_level(fault_info->interruptible);
4441 
4442 	fault_type = (fault_info->fi_change_wiring ? VM_PROT_NONE : caller_prot);
4443 
4444 	counter_inc(&vm_statistics_faults);
4445 	counter_inc(&current_task()->faults);
4446 	original_fault_type = fault_type;
4447 
4448 	need_copy = FALSE;
4449 	if (fault_type & VM_PROT_WRITE) {
4450 		need_copy = TRUE;
4451 	}
4452 
4453 	if (need_copy || fault_info->fi_change_wiring) {
4454 		object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4455 	} else {
4456 		object_lock_type = OBJECT_LOCK_SHARED;
4457 	}
4458 
4459 	cur_object_lock_type = OBJECT_LOCK_SHARED;
4460 
4461 	if ((map == kernel_map) && (caller_prot & VM_PROT_WRITE)) {
4462 		if (compressor_map) {
4463 			if ((vaddr >= vm_map_min(compressor_map)) && (vaddr < vm_map_max(compressor_map))) {
4464 				panic("Write fault on compressor map, va: %p type: %u bounds: %p->%p", (void *) vaddr, caller_prot, (void *) vm_map_min(compressor_map), (void *) vm_map_max(compressor_map));
4465 			}
4466 		}
4467 	}
4468 RetryFault:
4469 	assert(written_on_object == VM_OBJECT_NULL);
4470 
4471 	/*
4472 	 * assume we will hit a page in the cache
4473 	 * otherwise, explicitly override with
4474 	 * the real fault type once we determine it
4475 	 */
4476 	type_of_fault = DBG_CACHE_HIT_FAULT;
4477 
4478 	/*
4479 	 *	Find the backing store object and offset into
4480 	 *	it to begin the search.
4481 	 */
4482 	fault_type = original_fault_type;
4483 	map = original_map;
4484 	vm_map_lock_read(map);
4485 
4486 	if (resilient_media_retry) {
4487 		/*
4488 		 * If we have to insert a fake zero-filled page to hide
4489 		 * a media failure to provide the real page, we need to
4490 		 * resolve any pending copy-on-write on this mapping.
4491 		 * VM_PROT_COPY tells vm_map_lookup_and_lock_object() to deal
4492 		 * with that even if this is not a "write" fault.
4493 		 */
4494 		need_copy = TRUE;
4495 		object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4496 		vm_fault_resilient_media_retry++;
4497 	}
4498 
4499 	kr = vm_map_lookup_and_lock_object(&map, vaddr,
4500 	    (fault_type | (need_copy ? VM_PROT_COPY : 0)),
4501 	    object_lock_type, &version,
4502 	    &object, &offset, &prot, &wired,
4503 	    fault_info,
4504 	    &real_map,
4505 	    &object_is_contended);
4506 	object_is_contended = false; /* avoid unsafe optimization */
4507 
4508 	if (kr != KERN_SUCCESS) {
4509 		vm_map_unlock_read(map);
4510 		/*
4511 		 * This can be seen in a crash report if indeed the
4512 		 * thread is crashing due to an invalid access in a non-existent
4513 		 * range.
4514 		 * Turning this OFF for now because it is noisy and not always fatal
4515 		 * eg prefaulting.
4516 		 *
4517 		 * if (kr == KERN_INVALID_ADDRESS) {
4518 		 *	ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0);
4519 		 * }
4520 		 */
4521 		goto done;
4522 	}
4523 
4524 
4525 	pmap = real_map->pmap;
4526 	fault_info->io_sync = FALSE;
4527 	fault_info->mark_zf_absent = FALSE;
4528 	fault_info->batch_pmap_op = FALSE;
4529 
4530 	if (resilient_media_retry) {
4531 		/*
4532 		 * We're retrying this fault after having detected a media
4533 		 * failure from a "resilient_media" mapping.
4534 		 * Check that the mapping is still pointing at the object
4535 		 * that just failed to provide a page.
4536 		 */
4537 		assert(resilient_media_object != VM_OBJECT_NULL);
4538 		assert(resilient_media_offset != (vm_object_offset_t)-1);
4539 		if ((object != VM_OBJECT_NULL &&
4540 		    object == resilient_media_object &&
4541 		    offset == resilient_media_offset &&
4542 		    fault_info->resilient_media)
4543 #if MACH_ASSERT
4544 		    && (vm_fault_resilient_media_inject_error1_rate == 0 ||
4545 		    (++vm_fault_resilient_media_inject_error1 % vm_fault_resilient_media_inject_error1_rate) != 0)
4546 #endif /* MACH_ASSERT */
4547 		    ) {
4548 			/*
4549 			 * This mapping still points at the same object
4550 			 * and is still "resilient_media": proceed in
4551 			 * "recovery-from-media-failure" mode, where we'll
4552 			 * insert a zero-filled page in the top object.
4553 			 */
4554 //                     printf("RESILIENT_MEDIA %s:%d recovering for object %p offset 0x%llx\n", __FUNCTION__, __LINE__, object, offset);
4555 			vm_fault_resilient_media_proceed++;
4556 		} else {
4557 			/* not recovering: reset state and retry fault */
4558 //                     printf("RESILIENT_MEDIA %s:%d no recovery resilient %d object %p/%p offset 0x%llx/0x%llx\n", __FUNCTION__, __LINE__, fault_info->resilient_media, object, resilient_media_object, offset, resilient_media_offset);
4559 			vm_object_unlock(object);
4560 			if (real_map != map) {
4561 				vm_map_unlock(real_map);
4562 			}
4563 			vm_map_unlock_read(map);
4564 			/* release our extra reference on failed object */
4565 //                     printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
4566 			vm_object_deallocate(resilient_media_object);
4567 			resilient_media_object = VM_OBJECT_NULL;
4568 			resilient_media_offset = (vm_object_offset_t)-1;
4569 			resilient_media_retry = false;
4570 			vm_fault_resilient_media_abort1++;
4571 			goto RetryFault;
4572 		}
4573 	} else {
4574 		assert(resilient_media_object == VM_OBJECT_NULL);
4575 		resilient_media_offset = (vm_object_offset_t)-1;
4576 	}
4577 
4578 	/*
4579 	 * If the page is wired, we must fault for the current protection
4580 	 * value, to avoid further faults.
4581 	 */
4582 	if (wired) {
4583 		fault_type = prot | VM_PROT_WRITE;
4584 	}
4585 	if (wired || need_copy) {
4586 		/*
4587 		 * since we're treating this fault as a 'write'
4588 		 * we must hold the top object lock exclusively
4589 		 */
4590 		if (object_lock_type == OBJECT_LOCK_SHARED) {
4591 			object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4592 
4593 			if (vm_object_lock_upgrade(object) == FALSE) {
4594 				/*
4595 				 * couldn't upgrade, so explictly
4596 				 * take the lock exclusively
4597 				 */
4598 				vm_object_lock(object);
4599 			}
4600 		}
4601 	}
4602 
4603 #if     VM_FAULT_CLASSIFY
4604 	/*
4605 	 *	Temporary data gathering code
4606 	 */
4607 	vm_fault_classify(object, offset, fault_type);
4608 #endif
4609 	/*
4610 	 *	Fast fault code.  The basic idea is to do as much as
4611 	 *	possible while holding the map lock and object locks.
4612 	 *      Busy pages are not used until the object lock has to
4613 	 *	be dropped to do something (copy, zero fill, pmap enter).
4614 	 *	Similarly, paging references aren't acquired until that
4615 	 *	point, and object references aren't used.
4616 	 *
4617 	 *	If we can figure out what to do
4618 	 *	(zero fill, copy on write, pmap enter) while holding
4619 	 *	the locks, then it gets done.  Otherwise, we give up,
4620 	 *	and use the original fault path (which doesn't hold
4621 	 *	the map lock, and relies on busy pages).
4622 	 *	The give up cases include:
4623 	 *              - Have to talk to pager.
4624 	 *		- Page is busy, absent or in error.
4625 	 *		- Pager has locked out desired access.
4626 	 *		- Fault needs to be restarted.
4627 	 *		- Have to push page into copy object.
4628 	 *
4629 	 *	The code is an infinite loop that moves one level down
4630 	 *	the shadow chain each time.  cur_object and cur_offset
4631 	 *      refer to the current object being examined. object and offset
4632 	 *	are the original object from the map.  The loop is at the
4633 	 *	top level if and only if object and cur_object are the same.
4634 	 *
4635 	 *	Invariants:  Map lock is held throughout.  Lock is held on
4636 	 *		original object and cur_object (if different) when
4637 	 *		continuing or exiting loop.
4638 	 *
4639 	 */
4640 
4641 #if defined(__arm64__)
4642 	/*
4643 	 * Fail if reading an execute-only page in a
4644 	 * pmap that enforces execute-only protection.
4645 	 */
4646 	if (fault_type == VM_PROT_READ &&
4647 	    (prot & VM_PROT_EXECUTE) &&
4648 	    !(prot & VM_PROT_READ) &&
4649 	    pmap_enforces_execute_only(pmap)) {
4650 		vm_object_unlock(object);
4651 		vm_map_unlock_read(map);
4652 		if (real_map != map) {
4653 			vm_map_unlock(real_map);
4654 		}
4655 		kr = KERN_PROTECTION_FAILURE;
4656 		goto done;
4657 	}
4658 #endif
4659 
4660 	fault_phys_offset = (vm_map_offset_t)offset - vm_map_trunc_page((vm_map_offset_t)offset, PAGE_MASK);
4661 
4662 	/*
4663 	 * If this page is to be inserted in a copy delay object
4664 	 * for writing, and if the object has a copy, then the
4665 	 * copy delay strategy is implemented in the slow fault page.
4666 	 */
4667 	if ((object->copy_strategy == MEMORY_OBJECT_COPY_DELAY ||
4668 	    object->copy_strategy == MEMORY_OBJECT_COPY_DELAY_FORK) &&
4669 	    object->vo_copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE)) {
4670 		if (resilient_media_retry && object && object->internal) {
4671 			/*
4672 			 * We're handling a "resilient media retry" and we
4673 			 * just want to insert of zero-filled page in this
4674 			 * top object (if there's not already a page there),
4675 			 * so this is not a real "write" and we want to stay
4676 			 * on this code path.
4677 			 */
4678 		} else {
4679 			goto handle_copy_delay;
4680 		}
4681 	}
4682 
4683 	cur_object = object;
4684 	cur_offset = offset;
4685 
4686 	grab_options = 0;
4687 #if CONFIG_SECLUDED_MEMORY
4688 	if (object->can_grab_secluded) {
4689 		grab_options |= VM_PAGE_GRAB_SECLUDED;
4690 	}
4691 #endif /* CONFIG_SECLUDED_MEMORY */
4692 
4693 	while (TRUE) {
4694 		if (!cur_object->pager_created &&
4695 		    cur_object->phys_contiguous) { /* superpage */
4696 			break;
4697 		}
4698 
4699 		if (cur_object->blocked_access) {
4700 			/*
4701 			 * Access to this VM object has been blocked.
4702 			 * Let the slow path handle it.
4703 			 */
4704 			break;
4705 		}
4706 
4707 		m = vm_page_lookup(cur_object, vm_object_trunc_page(cur_offset));
4708 		m_object = NULL;
4709 
4710 		if (m != VM_PAGE_NULL) {
4711 			m_object = cur_object;
4712 
4713 			if (m->vmp_busy) {
4714 				wait_result_t   result;
4715 
4716 				/*
4717 				 * in order to vm_page_sleep(), we must
4718 				 * have object that 'm' belongs to locked exclusively
4719 				 */
4720 				if (object != cur_object) {
4721 					if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4722 						cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4723 
4724 						if (vm_object_lock_upgrade(cur_object) == FALSE) {
4725 							/*
4726 							 * couldn't upgrade so go do a full retry
4727 							 * immediately since we can no longer be
4728 							 * certain about cur_object (since we
4729 							 * don't hold a reference on it)...
4730 							 * first drop the top object lock
4731 							 */
4732 							vm_object_unlock(object);
4733 
4734 							vm_map_unlock_read(map);
4735 							if (real_map != map) {
4736 								vm_map_unlock(real_map);
4737 							}
4738 
4739 							goto RetryFault;
4740 						}
4741 					}
4742 				} else if (object_lock_type == OBJECT_LOCK_SHARED) {
4743 					object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4744 
4745 					if (vm_object_lock_upgrade(object) == FALSE) {
4746 						/*
4747 						 * couldn't upgrade, so explictly take the lock
4748 						 * exclusively and go relookup the page since we
4749 						 * will have dropped the object lock and
4750 						 * a different thread could have inserted
4751 						 * a page at this offset
4752 						 * no need for a full retry since we're
4753 						 * at the top level of the object chain
4754 						 */
4755 						vm_object_lock(object);
4756 
4757 						continue;
4758 					}
4759 				}
4760 				if ((m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) && m_object->internal) {
4761 					/*
4762 					 * m->vmp_busy == TRUE and the object is locked exclusively
4763 					 * if m->pageout_queue == TRUE after we acquire the
4764 					 * queues lock, we are guaranteed that it is stable on
4765 					 * the pageout queue and therefore reclaimable
4766 					 *
4767 					 * NOTE: this is only true for the internal pageout queue
4768 					 * in the compressor world
4769 					 */
4770 					assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
4771 
4772 					vm_page_lock_queues();
4773 
4774 					if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
4775 						vm_pageout_throttle_up(m);
4776 						vm_page_unlock_queues();
4777 
4778 						vm_page_wakeup_done(m_object, m);
4779 						goto reclaimed_from_pageout;
4780 					}
4781 					vm_page_unlock_queues();
4782 				}
4783 				if (object != cur_object) {
4784 					vm_object_unlock(object);
4785 				}
4786 
4787 				vm_map_unlock_read(map);
4788 				if (real_map != map) {
4789 					vm_map_unlock(real_map);
4790 				}
4791 
4792 				result = vm_page_sleep(cur_object, m, fault_info->interruptible, LCK_SLEEP_UNLOCK);
4793 				if (result == THREAD_AWAKENED || result == THREAD_RESTART) {
4794 					goto RetryFault;
4795 				}
4796 
4797 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
4798 				kr = KERN_ABORTED;
4799 				goto done;
4800 			}
4801 reclaimed_from_pageout:
4802 			if (m->vmp_laundry) {
4803 				if (object != cur_object) {
4804 					if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4805 						cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4806 
4807 						vm_object_unlock(object);
4808 						vm_object_unlock(cur_object);
4809 
4810 						vm_map_unlock_read(map);
4811 						if (real_map != map) {
4812 							vm_map_unlock(real_map);
4813 						}
4814 
4815 						goto RetryFault;
4816 					}
4817 				} else if (object_lock_type == OBJECT_LOCK_SHARED) {
4818 					object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4819 
4820 					if (vm_object_lock_upgrade(object) == FALSE) {
4821 						/*
4822 						 * couldn't upgrade, so explictly take the lock
4823 						 * exclusively and go relookup the page since we
4824 						 * will have dropped the object lock and
4825 						 * a different thread could have inserted
4826 						 * a page at this offset
4827 						 * no need for a full retry since we're
4828 						 * at the top level of the object chain
4829 						 */
4830 						vm_object_lock(object);
4831 
4832 						continue;
4833 					}
4834 				}
4835 				vm_object_lock_assert_exclusive(VM_PAGE_OBJECT(m));
4836 				vm_pageout_steal_laundry(m, FALSE);
4837 			}
4838 
4839 
4840 			if (vm_page_is_guard(m)) {
4841 				/*
4842 				 * Guard page: let the slow path deal with it
4843 				 */
4844 				break;
4845 			}
4846 			if (m->vmp_unusual && (m->vmp_error || m->vmp_restart ||
4847 			    vm_page_is_private(m) || m->vmp_absent)) {
4848 				/*
4849 				 * Unusual case... let the slow path deal with it
4850 				 */
4851 				break;
4852 			}
4853 			if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m_object)) {
4854 				if (object != cur_object) {
4855 					vm_object_unlock(object);
4856 				}
4857 				vm_map_unlock_read(map);
4858 				if (real_map != map) {
4859 					vm_map_unlock(real_map);
4860 				}
4861 				vm_object_unlock(cur_object);
4862 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
4863 				kr = KERN_MEMORY_ERROR;
4864 				goto done;
4865 			}
4866 			assert(m_object == VM_PAGE_OBJECT(m));
4867 
4868 			if (vm_fault_cs_need_validation(map->pmap, m, m_object,
4869 			    PAGE_SIZE, 0) ||
4870 			    (physpage_p != NULL && (prot & VM_PROT_WRITE))) {
4871 upgrade_lock_and_retry:
4872 				/*
4873 				 * We might need to validate this page
4874 				 * against its code signature, so we
4875 				 * want to hold the VM object exclusively.
4876 				 */
4877 				if (object != cur_object) {
4878 					if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4879 						vm_object_unlock(object);
4880 						vm_object_unlock(cur_object);
4881 
4882 						cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4883 
4884 						vm_map_unlock_read(map);
4885 						if (real_map != map) {
4886 							vm_map_unlock(real_map);
4887 						}
4888 
4889 						goto RetryFault;
4890 					}
4891 				} else if (object_lock_type == OBJECT_LOCK_SHARED) {
4892 					object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4893 
4894 					if (vm_object_lock_upgrade(object) == FALSE) {
4895 						/*
4896 						 * couldn't upgrade, so explictly take the lock
4897 						 * exclusively and go relookup the page since we
4898 						 * will have dropped the object lock and
4899 						 * a different thread could have inserted
4900 						 * a page at this offset
4901 						 * no need for a full retry since we're
4902 						 * at the top level of the object chain
4903 						 */
4904 						vm_object_lock(object);
4905 
4906 						continue;
4907 					}
4908 				}
4909 			}
4910 			/*
4911 			 *	Two cases of map in faults:
4912 			 *	    - At top level w/o copy object.
4913 			 *	    - Read fault anywhere.
4914 			 *		--> must disallow write.
4915 			 */
4916 
4917 			if (object == cur_object && object->vo_copy == VM_OBJECT_NULL) {
4918 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4919 				if ((fault_type & VM_PROT_WRITE) && m->vmp_unmodified_ro) {
4920 					assert(cur_object == VM_PAGE_OBJECT(m));
4921 					assert(cur_object->internal);
4922 					vm_object_lock_assert_exclusive(cur_object);
4923 					vm_page_lockspin_queues();
4924 					m->vmp_unmodified_ro = false;
4925 					vm_page_unlock_queues();
4926 					os_atomic_dec(&compressor_ro_uncompressed, relaxed);
4927 					vm_object_compressor_pager_state_clr(cur_object, m->vmp_offset);
4928 				}
4929 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4930 				goto FastPmapEnter;
4931 			}
4932 
4933 			if (!need_copy &&
4934 			    !fault_info->no_copy_on_read &&
4935 			    cur_object != object &&
4936 			    !cur_object->internal &&
4937 			    !cur_object->pager_trusted &&
4938 			    vm_protect_privileged_from_untrusted &&
4939 			    !cur_object->code_signed &&
4940 			    current_proc_is_privileged()) {
4941 				/*
4942 				 * We're faulting on a page in "object" and
4943 				 * went down the shadow chain to "cur_object"
4944 				 * to find out that "cur_object"'s pager
4945 				 * is not "trusted", i.e. we can not trust it
4946 				 * to always return the same contents.
4947 				 * Since the target is a "privileged" process,
4948 				 * let's treat this as a copy-on-read fault, as
4949 				 * if it was a copy-on-write fault.
4950 				 * Once "object" gets a copy of this page, it
4951 				 * won't have to rely on "cur_object" to
4952 				 * provide the contents again.
4953 				 *
4954 				 * This is done by setting "need_copy" and
4955 				 * retrying the fault from the top with the
4956 				 * appropriate locking.
4957 				 *
4958 				 * Special case: if the mapping is executable
4959 				 * and the untrusted object is code-signed and
4960 				 * the process is "cs_enforced", we do not
4961 				 * copy-on-read because that would break
4962 				 * code-signing enforcement expectations (an
4963 				 * executable page must belong to a code-signed
4964 				 * object) and we can rely on code-signing
4965 				 * to re-validate the page if it gets evicted
4966 				 * and paged back in.
4967 				 */
4968 //				printf("COPY-ON-READ %s:%d map %p va 0x%llx page %p object %p offset 0x%llx UNTRUSTED: need copy-on-read!\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, m, VM_PAGE_OBJECT(m), m->vmp_offset);
4969 				vm_copied_on_read++;
4970 				need_copy = TRUE;
4971 
4972 				vm_object_unlock(object);
4973 				vm_object_unlock(cur_object);
4974 				object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4975 				vm_map_unlock_read(map);
4976 				if (real_map != map) {
4977 					vm_map_unlock(real_map);
4978 				}
4979 				goto RetryFault;
4980 			}
4981 
4982 			if (!(fault_type & VM_PROT_WRITE) && !need_copy) {
4983 				if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
4984 					/*
4985 					 * For a protection that the pmap cares
4986 					 * about, we must hand over the full
4987 					 * set of protections (so that the pmap
4988 					 * layer can apply any desired policy).
4989 					 * This means that cs_bypass must be
4990 					 * set, as this can force us to pass
4991 					 * RWX.
4992 					 */
4993 					if (!fault_info->cs_bypass) {
4994 						panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
4995 						    __FUNCTION__, pmap,
4996 						    (uint64_t)vaddr, prot,
4997 						    fault_info->pmap_options);
4998 					}
4999 				} else {
5000 					prot &= ~VM_PROT_WRITE;
5001 				}
5002 
5003 				if (object != cur_object) {
5004 					/*
5005 					 * We still need to hold the top object
5006 					 * lock here to prevent a race between
5007 					 * a read fault (taking only "shared"
5008 					 * locks) and a write fault (taking
5009 					 * an "exclusive" lock on the top
5010 					 * object.
5011 					 * Otherwise, as soon as we release the
5012 					 * top lock, the write fault could
5013 					 * proceed and actually complete before
5014 					 * the read fault, and the copied page's
5015 					 * translation could then be overwritten
5016 					 * by the read fault's translation for
5017 					 * the original page.
5018 					 *
5019 					 * Let's just record what the top object
5020 					 * is and we'll release it later.
5021 					 */
5022 					top_object = object;
5023 
5024 					/*
5025 					 * switch to the object that has the new page
5026 					 */
5027 					object = cur_object;
5028 					object_lock_type = cur_object_lock_type;
5029 				}
5030 FastPmapEnter:
5031 				assert(m_object == VM_PAGE_OBJECT(m));
5032 
5033 				if (resilient_media_retry && (prot & VM_PROT_WRITE)) {
5034 					/*
5035 					 * We might have bypassed some copy-on-write
5036 					 * mechanism to get here (theoretically inserting
5037 					 * a zero-filled page in the top object to avoid
5038 					 * raising an exception on an unavailable page at
5039 					 * the bottom of the shadow chain.
5040 					 * So let's not grant write access to this page yet.
5041 					 * If write access is needed, the next fault should
5042 					 * handle any copy-on-write obligations.
5043 					 */
5044 					if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
5045 						/*
5046 						 * For a protection that the pmap cares
5047 						 * about, we must hand over the full
5048 						 * set of protections (so that the pmap
5049 						 * layer can apply any desired policy).
5050 						 * This means that cs_bypass must be
5051 						 * set, as this can force us to pass
5052 						 * RWX.
5053 						 */
5054 						if (!fault_info->cs_bypass) {
5055 							panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
5056 							    __FUNCTION__, pmap,
5057 							    (uint64_t)vaddr, prot,
5058 							    fault_info->pmap_options);
5059 						}
5060 					} else {
5061 						prot &= ~VM_PROT_WRITE;
5062 					}
5063 				}
5064 
5065 				/*
5066 				 * prepare for the pmap_enter...
5067 				 * object and map are both locked
5068 				 * m contains valid data
5069 				 * object == m->vmp_object
5070 				 * cur_object == NULL or it's been unlocked
5071 				 * no paging references on either object or cur_object
5072 				 */
5073 				if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5074 					need_retry_ptr = &need_retry;
5075 				} else {
5076 					need_retry_ptr = NULL;
5077 				}
5078 
5079 				if (fault_page_size < PAGE_SIZE) {
5080 					DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx caller pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, caller_pmap, (uint64_t)caller_pmap_addr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
5081 					assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
5082 					    fault_phys_offset < PAGE_SIZE),
5083 					    "0x%llx\n", (uint64_t)fault_phys_offset);
5084 				} else {
5085 					assertf(fault_phys_offset == 0,
5086 					    "0x%llx\n", (uint64_t)fault_phys_offset);
5087 				}
5088 
5089 				if (__improbable(rtfault &&
5090 				    !m->vmp_realtime &&
5091 				    vm_pageout_protect_realtime)) {
5092 					vm_page_lock_queues();
5093 					if (!m->vmp_realtime) {
5094 						m->vmp_realtime = true;
5095 						vm_page_realtime_count++;
5096 					}
5097 					vm_page_unlock_queues();
5098 				}
5099 				assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p object=%p", m, m_object, object);
5100 				assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
5101 				if (caller_pmap) {
5102 					kr = vm_fault_enter(m,
5103 					    caller_pmap,
5104 					    caller_pmap_addr,
5105 					    fault_page_size,
5106 					    fault_phys_offset,
5107 					    prot,
5108 					    caller_prot,
5109 					    wired,
5110 					    wire_tag,
5111 					    fault_info,
5112 					    need_retry_ptr,
5113 					    &type_of_fault,
5114 					    &object_lock_type);
5115 				} else {
5116 					kr = vm_fault_enter(m,
5117 					    pmap,
5118 					    vaddr,
5119 					    fault_page_size,
5120 					    fault_phys_offset,
5121 					    prot,
5122 					    caller_prot,
5123 					    wired,
5124 					    wire_tag,
5125 					    fault_info,
5126 					    need_retry_ptr,
5127 					    &type_of_fault,
5128 					    &object_lock_type);
5129 				}
5130 
5131 				vm_fault_complete(
5132 					map,
5133 					real_map,
5134 					object,
5135 					m_object,
5136 					m,
5137 					offset,
5138 					trace_real_vaddr,
5139 					fault_info,
5140 					caller_prot,
5141 					real_vaddr,
5142 					vm_fault_type_for_tracing(need_copy_on_read, type_of_fault),
5143 					need_retry,
5144 					kr,
5145 					physpage_p,
5146 					prot,
5147 					top_object,
5148 					need_collapse,
5149 					cur_offset,
5150 					fault_type,
5151 					&written_on_object,
5152 					&written_on_pager,
5153 					&written_on_offset);
5154 				top_object = VM_OBJECT_NULL;
5155 				if (need_retry == TRUE) {
5156 					/*
5157 					 * vm_fault_enter couldn't complete the PMAP_ENTER...
5158 					 * at this point we don't hold any locks so it's safe
5159 					 * to ask the pmap layer to expand the page table to
5160 					 * accommodate this mapping... once expanded, we'll
5161 					 * re-drive the fault which should result in vm_fault_enter
5162 					 * being able to successfully enter the mapping this time around
5163 					 */
5164 					(void)pmap_enter_options(
5165 						pmap, vaddr, 0, 0, 0, 0, 0,
5166 						PMAP_OPTIONS_NOENTER, NULL, PMAP_MAPPING_TYPE_INFER);
5167 
5168 					need_retry = FALSE;
5169 					goto RetryFault;
5170 				}
5171 				goto done;
5172 			}
5173 			/*
5174 			 * COPY ON WRITE FAULT
5175 			 */
5176 			assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE);
5177 
5178 			/*
5179 			 * If objects match, then
5180 			 * object->vo_copy must not be NULL (else control
5181 			 * would be in previous code block), and we
5182 			 * have a potential push into the copy object
5183 			 * with which we can't cope with here.
5184 			 */
5185 			if (cur_object == object) {
5186 				/*
5187 				 * must take the slow path to
5188 				 * deal with the copy push
5189 				 */
5190 				break;
5191 			}
5192 
5193 			/*
5194 			 * This is now a shadow based copy on write
5195 			 * fault -- it requires a copy up the shadow
5196 			 * chain.
5197 			 */
5198 			assert(m_object == VM_PAGE_OBJECT(m));
5199 
5200 			if ((cur_object_lock_type == OBJECT_LOCK_SHARED) &&
5201 			    vm_fault_cs_need_validation(NULL, m, m_object,
5202 			    PAGE_SIZE, 0)) {
5203 				goto upgrade_lock_and_retry;
5204 			}
5205 
5206 #if MACH_ASSERT
5207 			if (resilient_media_retry &&
5208 			    vm_fault_resilient_media_inject_error2_rate != 0 &&
5209 			    (++vm_fault_resilient_media_inject_error2 % vm_fault_resilient_media_inject_error2_rate) == 0) {
5210 				/* inject an error */
5211 				cur_m = m;
5212 				m = VM_PAGE_NULL;
5213 				m_object = VM_OBJECT_NULL;
5214 				break;
5215 			}
5216 #endif /* MACH_ASSERT */
5217 			/*
5218 			 * Allocate a page in the original top level
5219 			 * object. Give up if allocate fails.  Also
5220 			 * need to remember current page, as it's the
5221 			 * source of the copy.
5222 			 *
5223 			 * at this point we hold locks on both
5224 			 * object and cur_object... no need to take
5225 			 * paging refs or mark pages BUSY since
5226 			 * we don't drop either object lock until
5227 			 * the page has been copied and inserted
5228 			 */
5229 
5230 
5231 			cur_m = m;
5232 			m = vm_page_grab_options(grab_options);
5233 			m_object = NULL;
5234 
5235 			if (m == VM_PAGE_NULL) {
5236 				/*
5237 				 * no free page currently available...
5238 				 * must take the slow path
5239 				 */
5240 				break;
5241 			}
5242 
5243 			/*
5244 			 * Now do the copy.  Mark the source page busy...
5245 			 *
5246 			 *	NOTE: This code holds the map lock across
5247 			 *	the page copy.
5248 			 */
5249 			vm_page_copy(cur_m, m);
5250 			vm_page_insert(m, object, vm_object_trunc_page(offset));
5251 			if (VM_MAP_PAGE_MASK(map) != PAGE_MASK) {
5252 				DEBUG4K_FAULT("map %p vaddr 0x%llx page %p [%p 0x%llx] copied to %p [%p 0x%llx]\n", map, (uint64_t)vaddr, cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
5253 			}
5254 			m_object = object;
5255 			SET_PAGE_DIRTY(m, FALSE);
5256 
5257 			/*
5258 			 * Now cope with the source page and object
5259 			 */
5260 			if (os_ref_get_count_raw(&object->ref_count) > 1 &&
5261 			    cur_m->vmp_pmapped) {
5262 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
5263 			} else if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) {
5264 				/*
5265 				 * We've copied the full 16K page but we're
5266 				 * about to call vm_fault_enter() only for
5267 				 * the 4K chunk we're faulting on.  The other
5268 				 * three 4K chunks in that page could still
5269 				 * be pmapped in this pmap.
5270 				 * Since the VM object layer thinks that the
5271 				 * entire page has been dealt with and the
5272 				 * original page might no longer be needed,
5273 				 * it might collapse/bypass the original VM
5274 				 * object and free its pages, which would be
5275 				 * bad (and would trigger pmap_verify_free()
5276 				 * assertions) if the other 4K chunks are still
5277 				 * pmapped.
5278 				 */
5279 				/*
5280 				 * XXX FBDP TODO4K: to be revisisted
5281 				 * Technically, we need to pmap_disconnect()
5282 				 * only the target pmap's mappings for the 4K
5283 				 * chunks of this 16K VM page.  If other pmaps
5284 				 * have PTEs on these chunks, that means that
5285 				 * the associated VM map must have a reference
5286 				 * on the VM object, so no need to worry about
5287 				 * those.
5288 				 * pmap_protect() for each 4K chunk would be
5289 				 * better but we'd have to check which chunks
5290 				 * are actually mapped before and after this
5291 				 * one.
5292 				 * A full-blown pmap_disconnect() is easier
5293 				 * for now but not efficient.
5294 				 */
5295 				DEBUG4K_FAULT("pmap_disconnect() page %p object %p offset 0x%llx phys 0x%x\n", cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, VM_PAGE_GET_PHYS_PAGE(cur_m));
5296 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
5297 			}
5298 
5299 			if (cur_m->vmp_clustered) {
5300 				VM_PAGE_COUNT_AS_PAGEIN(cur_m);
5301 				VM_PAGE_CONSUME_CLUSTERED(cur_m);
5302 				vm_fault_is_sequential(cur_object, cur_offset, fault_info->behavior);
5303 			}
5304 			need_collapse = TRUE;
5305 
5306 			if (!cur_object->internal &&
5307 			    cur_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
5308 				/*
5309 				 * The object from which we've just
5310 				 * copied a page is most probably backed
5311 				 * by a vnode.  We don't want to waste too
5312 				 * much time trying to collapse the VM objects
5313 				 * and create a bottleneck when several tasks
5314 				 * map the same file.
5315 				 */
5316 				if (cur_object->vo_copy == object) {
5317 					/*
5318 					 * Shared mapping or no COW yet.
5319 					 * We can never collapse a copy
5320 					 * object into its backing object.
5321 					 */
5322 					need_collapse = FALSE;
5323 				} else if (cur_object->vo_copy == object->shadow &&
5324 				    object->shadow->resident_page_count == 0) {
5325 					/*
5326 					 * Shared mapping after a COW occurred.
5327 					 */
5328 					need_collapse = FALSE;
5329 				}
5330 			}
5331 			vm_object_unlock(cur_object);
5332 
5333 			if (need_collapse == FALSE) {
5334 				vm_fault_collapse_skipped++;
5335 			}
5336 			vm_fault_collapse_total++;
5337 
5338 			type_of_fault = DBG_COW_FAULT;
5339 			counter_inc(&vm_statistics_cow_faults);
5340 			DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
5341 			counter_inc(&current_task()->cow_faults);
5342 
5343 			goto FastPmapEnter;
5344 		} else {
5345 			/*
5346 			 * No page at cur_object, cur_offset... m == NULL
5347 			 */
5348 			if (cur_object->pager_created) {
5349 				vm_external_state_t compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
5350 
5351 				if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) {
5352 					int             my_fault_type;
5353 					vm_compressor_options_t         c_flags = C_DONT_BLOCK;
5354 					bool            insert_cur_object = FALSE;
5355 
5356 					/*
5357 					 * May have to talk to a pager...
5358 					 * if so, take the slow path by
5359 					 * doing a 'break' from the while (TRUE) loop
5360 					 *
5361 					 * external_state will only be set to VM_EXTERNAL_STATE_EXISTS
5362 					 * if the compressor is active and the page exists there
5363 					 */
5364 					if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS) {
5365 						break;
5366 					}
5367 
5368 					if (map == kernel_map || real_map == kernel_map) {
5369 						/*
5370 						 * can't call into the compressor with the kernel_map
5371 						 * lock held, since the compressor may try to operate
5372 						 * on the kernel map in order to return an empty c_segment
5373 						 */
5374 						break;
5375 					}
5376 					if (object != cur_object) {
5377 						if (fault_type & VM_PROT_WRITE) {
5378 							c_flags |= C_KEEP;
5379 						} else {
5380 							insert_cur_object = TRUE;
5381 						}
5382 					}
5383 					if (insert_cur_object == TRUE) {
5384 						if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5385 							cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5386 
5387 							if (vm_object_lock_upgrade(cur_object) == FALSE) {
5388 								/*
5389 								 * couldn't upgrade so go do a full retry
5390 								 * immediately since we can no longer be
5391 								 * certain about cur_object (since we
5392 								 * don't hold a reference on it)...
5393 								 * first drop the top object lock
5394 								 */
5395 								vm_object_unlock(object);
5396 
5397 								vm_map_unlock_read(map);
5398 								if (real_map != map) {
5399 									vm_map_unlock(real_map);
5400 								}
5401 
5402 								goto RetryFault;
5403 							}
5404 						}
5405 					} else if (object_lock_type == OBJECT_LOCK_SHARED) {
5406 						object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5407 
5408 						if (object != cur_object) {
5409 							/*
5410 							 * we can't go for the upgrade on the top
5411 							 * lock since the upgrade may block waiting
5412 							 * for readers to drain... since we hold
5413 							 * cur_object locked at this point, waiting
5414 							 * for the readers to drain would represent
5415 							 * a lock order inversion since the lock order
5416 							 * for objects is the reference order in the
5417 							 * shadown chain
5418 							 */
5419 							vm_object_unlock(object);
5420 							vm_object_unlock(cur_object);
5421 
5422 							vm_map_unlock_read(map);
5423 							if (real_map != map) {
5424 								vm_map_unlock(real_map);
5425 							}
5426 
5427 							goto RetryFault;
5428 						}
5429 						if (vm_object_lock_upgrade(object) == FALSE) {
5430 							/*
5431 							 * couldn't upgrade, so explictly take the lock
5432 							 * exclusively and go relookup the page since we
5433 							 * will have dropped the object lock and
5434 							 * a different thread could have inserted
5435 							 * a page at this offset
5436 							 * no need for a full retry since we're
5437 							 * at the top level of the object chain
5438 							 */
5439 							vm_object_lock(object);
5440 
5441 							continue;
5442 						}
5443 					}
5444 
5445 					m = vm_page_grab_options(grab_options);
5446 					m_object = NULL;
5447 
5448 					if (m == VM_PAGE_NULL) {
5449 						/*
5450 						 * no free page currently available...
5451 						 * must take the slow path
5452 						 */
5453 						break;
5454 					}
5455 
5456 					/*
5457 					 * The object is and remains locked
5458 					 * so no need to take a
5459 					 * "paging_in_progress" reference.
5460 					 */
5461 					bool      shared_lock;
5462 					if ((object == cur_object &&
5463 					    object_lock_type == OBJECT_LOCK_EXCLUSIVE) ||
5464 					    (object != cur_object &&
5465 					    cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) {
5466 						shared_lock = FALSE;
5467 					} else {
5468 						shared_lock = TRUE;
5469 					}
5470 
5471 					kr = vm_compressor_pager_get(
5472 						cur_object->pager,
5473 						(vm_object_trunc_page(cur_offset)
5474 						+ cur_object->paging_offset),
5475 						VM_PAGE_GET_PHYS_PAGE(m),
5476 						&my_fault_type,
5477 						c_flags,
5478 						&compressed_count_delta);
5479 
5480 					vm_compressor_pager_count(
5481 						cur_object->pager,
5482 						compressed_count_delta,
5483 						shared_lock,
5484 						cur_object);
5485 
5486 					if (kr != KERN_SUCCESS) {
5487 						vm_page_release(m, FALSE);
5488 						m = VM_PAGE_NULL;
5489 					}
5490 					/*
5491 					 * If vm_compressor_pager_get() returns
5492 					 * KERN_MEMORY_FAILURE, then the
5493 					 * compressed data is permanently lost,
5494 					 * so return this error immediately.
5495 					 */
5496 					if (kr == KERN_MEMORY_FAILURE) {
5497 						if (object != cur_object) {
5498 							vm_object_unlock(cur_object);
5499 						}
5500 						vm_object_unlock(object);
5501 						vm_map_unlock_read(map);
5502 						if (real_map != map) {
5503 							vm_map_unlock(real_map);
5504 						}
5505 
5506 						goto done;
5507 					} else if (kr != KERN_SUCCESS) {
5508 						break;
5509 					}
5510 					m->vmp_dirty = TRUE;
5511 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5512 					if ((fault_type & VM_PROT_WRITE) == 0) {
5513 						prot &= ~VM_PROT_WRITE;
5514 						/*
5515 						 * The page, m, has yet to be inserted
5516 						 * into an object. So we are fine with
5517 						 * the object/cur_object lock being held
5518 						 * shared.
5519 						 */
5520 						vm_page_lockspin_queues();
5521 						m->vmp_unmodified_ro = true;
5522 						vm_page_unlock_queues();
5523 						os_atomic_inc(&compressor_ro_uncompressed, relaxed);
5524 					}
5525 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5526 
5527 					/*
5528 					 * If the object is purgeable, its
5529 					 * owner's purgeable ledgers will be
5530 					 * updated in vm_page_insert() but the
5531 					 * page was also accounted for in a
5532 					 * "compressed purgeable" ledger, so
5533 					 * update that now.
5534 					 */
5535 					if (object != cur_object &&
5536 					    !insert_cur_object) {
5537 						/*
5538 						 * We're not going to insert
5539 						 * the decompressed page into
5540 						 * the object it came from.
5541 						 *
5542 						 * We're dealing with a
5543 						 * copy-on-write fault on
5544 						 * "object".
5545 						 * We're going to decompress
5546 						 * the page directly into the
5547 						 * target "object" while
5548 						 * keepin the compressed
5549 						 * page for "cur_object", so
5550 						 * no ledger update in that
5551 						 * case.
5552 						 */
5553 					} else if (((cur_object->purgable ==
5554 					    VM_PURGABLE_DENY) &&
5555 					    (!cur_object->vo_ledger_tag)) ||
5556 					    (cur_object->vo_owner ==
5557 					    NULL)) {
5558 						/*
5559 						 * "cur_object" is not purgeable
5560 						 * and is not ledger-taged, or
5561 						 * there's no owner for it,
5562 						 * so no owner's ledgers to
5563 						 * update.
5564 						 */
5565 					} else {
5566 						/*
5567 						 * One less compressed
5568 						 * purgeable/tagged page for
5569 						 * cur_object's owner.
5570 						 */
5571 						if (compressed_count_delta) {
5572 							vm_object_owner_compressed_update(
5573 								cur_object,
5574 								-1);
5575 						}
5576 					}
5577 
5578 					if (insert_cur_object) {
5579 						vm_page_insert(m, cur_object, vm_object_trunc_page(cur_offset));
5580 						m_object = cur_object;
5581 					} else {
5582 						vm_page_insert(m, object, vm_object_trunc_page(offset));
5583 						m_object = object;
5584 					}
5585 
5586 					if (!HAS_DEFAULT_CACHEABILITY(m_object->wimg_bits & VM_WIMG_MASK)) {
5587 						/*
5588 						 * If the page is not cacheable,
5589 						 * we can't let its contents
5590 						 * linger in the data cache
5591 						 * after the decompression.
5592 						 */
5593 						pmap_sync_page_attributes_phys(VM_PAGE_GET_PHYS_PAGE(m));
5594 					}
5595 
5596 					type_of_fault = my_fault_type;
5597 
5598 					VM_STAT_DECOMPRESSIONS();
5599 
5600 					if (cur_object != object) {
5601 						if (insert_cur_object) {
5602 							top_object = object;
5603 							/*
5604 							 * switch to the object that has the new page
5605 							 */
5606 							object = cur_object;
5607 							object_lock_type = cur_object_lock_type;
5608 						} else {
5609 							vm_object_unlock(cur_object);
5610 							cur_object = object;
5611 						}
5612 					}
5613 					goto FastPmapEnter;
5614 				}
5615 				/*
5616 				 * existence map present and indicates
5617 				 * that the pager doesn't have this page
5618 				 */
5619 			}
5620 			if (cur_object->shadow == VM_OBJECT_NULL ||
5621 			    resilient_media_retry) {
5622 				/*
5623 				 * Zero fill fault.  Page gets
5624 				 * inserted into the original object.
5625 				 */
5626 				if (cur_object->shadow_severed ||
5627 				    VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object) ||
5628 				    cur_object == compressor_object ||
5629 				    is_kernel_object(cur_object)) {
5630 					if (object != cur_object) {
5631 						vm_object_unlock(cur_object);
5632 					}
5633 					vm_object_unlock(object);
5634 
5635 					vm_map_unlock_read(map);
5636 					if (real_map != map) {
5637 						vm_map_unlock(real_map);
5638 					}
5639 					if (VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object)) {
5640 						ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
5641 					}
5642 
5643 					if (cur_object->shadow_severed) {
5644 						ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
5645 					}
5646 
5647 					kr = KERN_MEMORY_ERROR;
5648 					goto done;
5649 				}
5650 				if (cur_object != object) {
5651 					vm_object_unlock(cur_object);
5652 
5653 					cur_object = object;
5654 				}
5655 				if (object_lock_type == OBJECT_LOCK_SHARED) {
5656 					object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5657 
5658 					if (vm_object_lock_upgrade(object) == FALSE) {
5659 						/*
5660 						 * couldn't upgrade so do a full retry on the fault
5661 						 * since we dropped the object lock which
5662 						 * could allow another thread to insert
5663 						 * a page at this offset
5664 						 */
5665 						vm_map_unlock_read(map);
5666 						if (real_map != map) {
5667 							vm_map_unlock(real_map);
5668 						}
5669 
5670 						goto RetryFault;
5671 					}
5672 				}
5673 				if (!object->internal) {
5674 					panic("%s:%d should not zero-fill page at offset 0x%llx in external object %p", __FUNCTION__, __LINE__, (uint64_t)offset, object);
5675 				}
5676 #if MACH_ASSERT
5677 				if (resilient_media_retry &&
5678 				    vm_fault_resilient_media_inject_error3_rate != 0 &&
5679 				    (++vm_fault_resilient_media_inject_error3 % vm_fault_resilient_media_inject_error3_rate) == 0) {
5680 					/* inject an error */
5681 					m_object = NULL;
5682 					break;
5683 				}
5684 #endif /* MACH_ASSERT */
5685 				m = vm_page_alloc(object, vm_object_trunc_page(offset));
5686 				m_object = NULL;
5687 
5688 				if (m == VM_PAGE_NULL) {
5689 					/*
5690 					 * no free page currently available...
5691 					 * must take the slow path
5692 					 */
5693 					break;
5694 				}
5695 				m_object = object;
5696 
5697 				if ((prot & VM_PROT_WRITE) &&
5698 				    !(fault_type & VM_PROT_WRITE) &&
5699 				    object->vo_copy != VM_OBJECT_NULL) {
5700 					/*
5701 					 * This is not a write fault and
5702 					 * we might have a copy-on-write
5703 					 * obligation to honor (copy object or
5704 					 * "needs_copy" map entry), so do not
5705 					 * give write access yet.
5706 					 * We'll need to catch the first write
5707 					 * to resolve the copy-on-write by
5708 					 * pushing this page to a copy object
5709 					 * or making a shadow object.
5710 					 */
5711 					if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
5712 						/*
5713 						 * This pmap enforces extra
5714 						 * constraints for this set of
5715 						 * protections, so we can't
5716 						 * change the protections.
5717 						 * We would expect code-signing
5718 						 * to be bypassed in this case.
5719 						 */
5720 						if (!fault_info->cs_bypass) {
5721 							panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
5722 							    __FUNCTION__,
5723 							    pmap,
5724 							    (uint64_t)vaddr,
5725 							    prot,
5726 							    fault_info->pmap_options);
5727 						}
5728 					} else {
5729 						prot &= ~VM_PROT_WRITE;
5730 					}
5731 				}
5732 				if (resilient_media_retry) {
5733 					/*
5734 					 * Not a real write, so no reason to assert.
5735 					 * We've just allocated a new page for this
5736 					 * <object,offset> so we know nobody has any
5737 					 * PTE pointing at any previous version of this
5738 					 * page and no copy-on-write is involved here.
5739 					 * We're just inserting a page of zeroes at this
5740 					 * stage of the shadow chain because the pager
5741 					 * for the lowest object in the shadow chain
5742 					 * said it could not provide that page and we
5743 					 * want to avoid failing the fault and causing
5744 					 * a crash on this "resilient_media" mapping.
5745 					 */
5746 				} else {
5747 					assertf(!((fault_type & VM_PROT_WRITE) && object->vo_copy),
5748 					    "map %p va 0x%llx wrong path for write fault (fault_type 0x%x) on object %p with copy %p\n",
5749 					    map, (uint64_t)vaddr, fault_type, object, object->vo_copy);
5750 				}
5751 
5752 				vm_object_t saved_copy_object;
5753 				uint32_t saved_copy_version;
5754 				saved_copy_object = object->vo_copy;
5755 				saved_copy_version = object->vo_copy_version;
5756 
5757 				/*
5758 				 * Zeroing the page and entering into it into the pmap
5759 				 * represents a significant amount of the zero fill fault handler's work.
5760 				 *
5761 				 * To improve fault scalability, we'll drop the object lock, if it appears contended,
5762 				 * now that we've inserted the page into the vm object.
5763 				 * Before dropping the lock, we need to check protection bits and set the
5764 				 * mapped bits on the page. Then we can mark the page busy, drop the lock,
5765 				 * zero it, and do the pmap enter. We'll need to reacquire the lock
5766 				 * to clear the busy bit and wake up any waiters.
5767 				 */
5768 				vm_fault_cs_clear(m);
5769 				m->vmp_pmapped = TRUE;
5770 				if (map->no_zero_fill) {
5771 					type_of_fault = DBG_NZF_PAGE_FAULT;
5772 				} else {
5773 					type_of_fault = DBG_ZERO_FILL_FAULT;
5774 				}
5775 				{
5776 					pmap_t destination_pmap;
5777 					vm_map_offset_t destination_pmap_vaddr;
5778 					vm_prot_t enter_fault_type;
5779 					if (caller_pmap) {
5780 						destination_pmap = caller_pmap;
5781 						destination_pmap_vaddr = caller_pmap_addr;
5782 					} else {
5783 						destination_pmap = pmap;
5784 						destination_pmap_vaddr = vaddr;
5785 					}
5786 					if (fault_info->fi_change_wiring) {
5787 						enter_fault_type = VM_PROT_NONE;
5788 					} else {
5789 						enter_fault_type = caller_prot;
5790 					}
5791 					assertf(VM_PAGE_OBJECT(m) == object, "m=%p object=%p", m, object);
5792 					kr = vm_fault_enter_prepare(m,
5793 					    destination_pmap,
5794 					    destination_pmap_vaddr,
5795 					    &prot,
5796 					    caller_prot,
5797 					    fault_page_size,
5798 					    fault_phys_offset,
5799 					    enter_fault_type,
5800 					    fault_info,
5801 					    &type_of_fault,
5802 					    &page_needs_data_sync);
5803 					if (kr != KERN_SUCCESS) {
5804 						goto zero_fill_cleanup;
5805 					}
5806 
5807 					if (object_is_contended) {
5808 						/*
5809 						 * At this point the page is in the vm object, but not on a paging queue.
5810 						 * Since it's accessible to another thread but its contents are invalid
5811 						 * (it hasn't been zeroed) mark it busy before dropping the object lock.
5812 						 */
5813 						m->vmp_busy = TRUE;
5814 						vm_object_paging_begin(object); /* keep object alive */
5815 						vm_object_unlock(object);
5816 					}
5817 					if (type_of_fault == DBG_ZERO_FILL_FAULT) {
5818 						/*
5819 						 * Now zero fill page...
5820 						 * the page is probably going to
5821 						 * be written soon, so don't bother
5822 						 * to clear the modified bit
5823 						 *
5824 						 *   NOTE: This code holds the map
5825 						 *   lock across the zero fill.
5826 						 */
5827 						vm_page_zero_fill(
5828 							m
5829 							);
5830 						counter_inc(&vm_statistics_zero_fill_count);
5831 						DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
5832 					}
5833 
5834 					if (object_is_contended) {
5835 						/*
5836 						 * It's not safe to do the pmap_enter() without holding
5837 						 * the object lock because its "vo_copy" could change.
5838 						 */
5839 						object_is_contended = false; /* get out of that code path */
5840 
5841 						vm_object_lock(object);
5842 						vm_object_paging_end(object);
5843 						if (object->vo_copy != saved_copy_object ||
5844 						    object->vo_copy_version != saved_copy_version) {
5845 							/*
5846 							 * The COPY_DELAY copy-on-write situation for
5847 							 * this VM object has changed while it was
5848 							 * unlocked, so do not grant write access to
5849 							 * this page.
5850 							 * The write access will fault again and we'll
5851 							 * resolve the copy-on-write then.
5852 							 */
5853 							if (pmap_has_prot_policy(pmap,
5854 							    fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE,
5855 							    prot)) {
5856 								/* we should not do CoW on pmap_has_prot_policy mappings */
5857 								panic("%s: map %p va 0x%llx obj %p,%u saved %p,%u: unexpected CoW",
5858 								    __FUNCTION__,
5859 								    map, (uint64_t)vaddr,
5860 								    object, object->vo_copy_version,
5861 								    saved_copy_object, saved_copy_version);
5862 							} else {
5863 								/* the pmap layer is OK with changing the PTE's prot */
5864 								prot &= ~VM_PROT_WRITE;
5865 							}
5866 						}
5867 					}
5868 
5869 					if (page_needs_data_sync) {
5870 						pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
5871 					}
5872 
5873 					if (top_object != VM_OBJECT_NULL) {
5874 						need_retry_ptr = &need_retry;
5875 					} else {
5876 						need_retry_ptr = NULL;
5877 					}
5878 					if (fault_info->fi_xnu_user_debug &&
5879 					    !object->code_signed) {
5880 						fault_info->pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
5881 					}
5882 					if (object_is_contended) {
5883 						panic("object_is_contended");
5884 						kr = vm_fault_pmap_enter(destination_pmap, destination_pmap_vaddr,
5885 						    fault_page_size, fault_phys_offset,
5886 						    m, &prot, caller_prot, enter_fault_type, wired,
5887 						    fault_info->pmap_options, need_retry_ptr);
5888 						vm_object_lock(object);
5889 						assertf(!((prot & VM_PROT_WRITE) && object->vo_copy),
5890 						    "prot 0x%x object %p copy %p\n",
5891 						    prot, object, object->vo_copy);
5892 					} else {
5893 						kr = vm_fault_pmap_enter_with_object_lock(object, destination_pmap, destination_pmap_vaddr,
5894 						    fault_page_size, fault_phys_offset,
5895 						    m, &prot, caller_prot, enter_fault_type, wired,
5896 						    fault_info->pmap_options, need_retry_ptr, &object_lock_type);
5897 					}
5898 				}
5899 zero_fill_cleanup:
5900 				if (!VM_DYNAMIC_PAGING_ENABLED() &&
5901 				    (object->purgable == VM_PURGABLE_DENY ||
5902 				    object->purgable == VM_PURGABLE_NONVOLATILE ||
5903 				    object->purgable == VM_PURGABLE_VOLATILE)) {
5904 					vm_page_lockspin_queues();
5905 					if (!VM_DYNAMIC_PAGING_ENABLED()) {
5906 						vm_fault_enqueue_throttled_locked(m);
5907 					}
5908 					vm_page_unlock_queues();
5909 				}
5910 				vm_fault_enqueue_page(object, m, wired, fault_info->fi_change_wiring, wire_tag, fault_info->no_cache, &type_of_fault, kr);
5911 
5912 				if (__improbable(rtfault &&
5913 				    !m->vmp_realtime &&
5914 				    vm_pageout_protect_realtime)) {
5915 					vm_page_lock_queues();
5916 					if (!m->vmp_realtime) {
5917 						m->vmp_realtime = true;
5918 						vm_page_realtime_count++;
5919 					}
5920 					vm_page_unlock_queues();
5921 				}
5922 				vm_fault_complete(
5923 					map,
5924 					real_map,
5925 					object,
5926 					m_object,
5927 					m,
5928 					offset,
5929 					trace_real_vaddr,
5930 					fault_info,
5931 					caller_prot,
5932 					real_vaddr,
5933 					type_of_fault,
5934 					need_retry,
5935 					kr,
5936 					physpage_p,
5937 					prot,
5938 					top_object,
5939 					need_collapse,
5940 					cur_offset,
5941 					fault_type,
5942 					&written_on_object,
5943 					&written_on_pager,
5944 					&written_on_offset);
5945 				top_object = VM_OBJECT_NULL;
5946 				if (need_retry == TRUE) {
5947 					/*
5948 					 * vm_fault_enter couldn't complete the PMAP_ENTER...
5949 					 * at this point we don't hold any locks so it's safe
5950 					 * to ask the pmap layer to expand the page table to
5951 					 * accommodate this mapping... once expanded, we'll
5952 					 * re-drive the fault which should result in vm_fault_enter
5953 					 * being able to successfully enter the mapping this time around
5954 					 */
5955 					(void)pmap_enter_options(
5956 						pmap, vaddr, 0, 0, 0, 0, 0,
5957 						PMAP_OPTIONS_NOENTER, NULL, PMAP_MAPPING_TYPE_INFER);
5958 
5959 					need_retry = FALSE;
5960 					goto RetryFault;
5961 				}
5962 				goto done;
5963 			}
5964 			/*
5965 			 * On to the next level in the shadow chain
5966 			 */
5967 			cur_offset += cur_object->vo_shadow_offset;
5968 			new_object = cur_object->shadow;
5969 			fault_phys_offset = cur_offset - vm_object_trunc_page(cur_offset);
5970 
5971 			/*
5972 			 * take the new_object's lock with the indicated state
5973 			 */
5974 			if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5975 				vm_object_lock_shared(new_object);
5976 			} else {
5977 				vm_object_lock(new_object);
5978 			}
5979 
5980 			if (cur_object != object) {
5981 				vm_object_unlock(cur_object);
5982 			}
5983 
5984 			cur_object = new_object;
5985 
5986 			continue;
5987 		}
5988 	}
5989 	/*
5990 	 * Cleanup from fast fault failure.  Drop any object
5991 	 * lock other than original and drop map lock.
5992 	 */
5993 	if (object != cur_object) {
5994 		vm_object_unlock(cur_object);
5995 	}
5996 
5997 	/*
5998 	 * must own the object lock exclusively at this point
5999 	 */
6000 	if (object_lock_type == OBJECT_LOCK_SHARED) {
6001 		object_lock_type = OBJECT_LOCK_EXCLUSIVE;
6002 
6003 		if (vm_object_lock_upgrade(object) == FALSE) {
6004 			/*
6005 			 * couldn't upgrade, so explictly
6006 			 * take the lock exclusively
6007 			 * no need to retry the fault at this
6008 			 * point since "vm_fault_page" will
6009 			 * completely re-evaluate the state
6010 			 */
6011 			vm_object_lock(object);
6012 		}
6013 	}
6014 
6015 handle_copy_delay:
6016 	vm_map_unlock_read(map);
6017 	if (real_map != map) {
6018 		vm_map_unlock(real_map);
6019 	}
6020 
6021 	if (__improbable(object == compressor_object ||
6022 	    is_kernel_object(object))) {
6023 		/*
6024 		 * These objects are explicitly managed and populated by the
6025 		 * kernel.  The virtual ranges backed by these objects should
6026 		 * either have wired pages or "holes" that are not supposed to
6027 		 * be accessed at all until they get explicitly populated.
6028 		 * We should never have to resolve a fault on a mapping backed
6029 		 * by one of these VM objects and providing a zero-filled page
6030 		 * would be wrong here, so let's fail the fault and let the
6031 		 * caller crash or recover.
6032 		 */
6033 		vm_object_unlock(object);
6034 		kr = KERN_MEMORY_ERROR;
6035 		goto done;
6036 	}
6037 
6038 	resilient_media_ref_transfer = false;
6039 	if (resilient_media_retry) {
6040 		/*
6041 		 * We could get here if we failed to get a free page
6042 		 * to zero-fill and had to take the slow path again.
6043 		 * Reset our "recovery-from-failed-media" state.
6044 		 */
6045 		assert(resilient_media_object != VM_OBJECT_NULL);
6046 		assert(resilient_media_offset != (vm_object_offset_t)-1);
6047 		/* release our extra reference on failed object */
6048 //             printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
6049 		if (object == resilient_media_object) {
6050 			/*
6051 			 * We're holding "object"'s lock, so we can't release
6052 			 * our extra reference at this point.
6053 			 * We need an extra reference on "object" anyway
6054 			 * (see below), so let's just transfer this reference.
6055 			 */
6056 			resilient_media_ref_transfer = true;
6057 		} else {
6058 			vm_object_deallocate(resilient_media_object);
6059 		}
6060 		resilient_media_object = VM_OBJECT_NULL;
6061 		resilient_media_offset = (vm_object_offset_t)-1;
6062 		resilient_media_retry = false;
6063 		vm_fault_resilient_media_abort2++;
6064 	}
6065 
6066 	/*
6067 	 * Make a reference to this object to
6068 	 * prevent its disposal while we are messing with
6069 	 * it.  Once we have the reference, the map is free
6070 	 * to be diddled.  Since objects reference their
6071 	 * shadows (and copies), they will stay around as well.
6072 	 */
6073 	if (resilient_media_ref_transfer) {
6074 		/* we already have an extra reference on this object */
6075 		resilient_media_ref_transfer = false;
6076 	} else {
6077 		vm_object_reference_locked(object);
6078 	}
6079 	vm_object_paging_begin(object);
6080 
6081 	set_thread_pagein_error(cthread, 0);
6082 	error_code = 0;
6083 
6084 	result_page = VM_PAGE_NULL;
6085 	vm_fault_return_t err = vm_fault_page(object, offset, fault_type,
6086 	    (fault_info->fi_change_wiring && !wired),
6087 	    FALSE,                /* page not looked up */
6088 	    &prot, &result_page, &top_page,
6089 	    &type_of_fault,
6090 	    &error_code, map->no_zero_fill,
6091 	    fault_info);
6092 
6093 	/*
6094 	 * if kr != VM_FAULT_SUCCESS, then the paging reference
6095 	 * has been dropped and the object unlocked... the ref_count
6096 	 * is still held
6097 	 *
6098 	 * if kr == VM_FAULT_SUCCESS, then the paging reference
6099 	 * is still held along with the ref_count on the original object
6100 	 *
6101 	 *	the object is returned locked with a paging reference
6102 	 *
6103 	 *	if top_page != NULL, then it's BUSY and the
6104 	 *	object it belongs to has a paging reference
6105 	 *	but is returned unlocked
6106 	 */
6107 	if (err != VM_FAULT_SUCCESS &&
6108 	    err != VM_FAULT_SUCCESS_NO_VM_PAGE) {
6109 		if (err == VM_FAULT_MEMORY_ERROR &&
6110 		    fault_info->resilient_media) {
6111 			assertf(object->internal, "object %p", object);
6112 			/*
6113 			 * This fault failed but the mapping was
6114 			 * "media resilient", so we'll retry the fault in
6115 			 * recovery mode to get a zero-filled page in the
6116 			 * top object.
6117 			 * Keep the reference on the failing object so
6118 			 * that we can check that the mapping is still
6119 			 * pointing to it when we retry the fault.
6120 			 */
6121 //                     printf("RESILIENT_MEDIA %s:%d: object %p offset 0x%llx recover from media error 0x%x kr 0x%x top_page %p result_page %p\n", __FUNCTION__, __LINE__, object, offset, error_code, kr, top_page, result_page);
6122 			assert(!resilient_media_retry); /* no double retry */
6123 			assert(resilient_media_object == VM_OBJECT_NULL);
6124 			assert(resilient_media_offset == (vm_object_offset_t)-1);
6125 			resilient_media_retry = true;
6126 			resilient_media_object = object;
6127 			resilient_media_offset = offset;
6128 //                     printf("FBDP %s:%d resilient_media_object %p offset 0x%llx kept reference\n", __FUNCTION__, __LINE__, resilient_media_object, resilient_mmedia_offset);
6129 			vm_fault_resilient_media_initiate++;
6130 			goto RetryFault;
6131 		} else {
6132 			/*
6133 			 * we didn't succeed, lose the object reference
6134 			 * immediately.
6135 			 */
6136 			vm_object_deallocate(object);
6137 			object = VM_OBJECT_NULL; /* no longer valid */
6138 		}
6139 
6140 		/*
6141 		 * See why we failed, and take corrective action.
6142 		 */
6143 		switch (err) {
6144 		case VM_FAULT_SUCCESS:
6145 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
6146 			/* These aren't possible but needed to make the switch exhaustive */
6147 			break;
6148 		case VM_FAULT_MEMORY_SHORTAGE:
6149 			if (vm_page_wait((fault_info->fi_change_wiring) ?
6150 			    THREAD_UNINT :
6151 			    THREAD_ABORTSAFE)) {
6152 				goto RetryFault;
6153 			}
6154 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_MEMORY_SHORTAGE), 0 /* arg */);
6155 			OS_FALLTHROUGH;
6156 		case VM_FAULT_INTERRUPTED:
6157 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
6158 			kr = KERN_ABORTED;
6159 			goto done;
6160 		case VM_FAULT_RETRY:
6161 			goto RetryFault;
6162 		case VM_FAULT_MEMORY_ERROR:
6163 			if (error_code) {
6164 				kr = error_code;
6165 			} else {
6166 				kr = KERN_MEMORY_ERROR;
6167 			}
6168 			goto done;
6169 		case VM_FAULT_BUSY:
6170 			kr = KERN_ALREADY_WAITING;
6171 			goto done;
6172 		}
6173 	}
6174 	m = result_page;
6175 	m_object = NULL;
6176 
6177 	if (m != VM_PAGE_NULL) {
6178 		m_object = VM_PAGE_OBJECT(m);
6179 		assert((fault_info->fi_change_wiring && !wired) ?
6180 		    (top_page == VM_PAGE_NULL) :
6181 		    ((top_page == VM_PAGE_NULL) == (m_object == object)));
6182 	}
6183 
6184 	/*
6185 	 * What to do with the resulting page from vm_fault_page
6186 	 * if it doesn't get entered into the physical map:
6187 	 */
6188 #define RELEASE_PAGE(m)                                 \
6189 	MACRO_BEGIN                                     \
6190 	vm_page_wakeup_done(VM_PAGE_OBJECT(m), m);                            \
6191 	if ( !VM_PAGE_PAGEABLE(m)) {                    \
6192 	        vm_page_lockspin_queues();              \
6193 	        if ( !VM_PAGE_PAGEABLE(m))              \
6194 	                vm_page_activate(m);            \
6195 	        vm_page_unlock_queues();                \
6196 	}                                               \
6197 	MACRO_END
6198 
6199 
6200 	object_locks_dropped = FALSE;
6201 	/*
6202 	 * We must verify that the maps have not changed
6203 	 * since our last lookup. vm_map_verify() needs the
6204 	 * map lock (shared) but we are holding object locks.
6205 	 * So we do a try_lock() first and, if that fails, we
6206 	 * drop the object locks and go in for the map lock again.
6207 	 */
6208 	if (m != VM_PAGE_NULL) {
6209 		old_copy_object = m_object->vo_copy;
6210 		old_copy_version = m_object->vo_copy_version;
6211 	} else {
6212 		old_copy_object = VM_OBJECT_NULL;
6213 		old_copy_version = 0;
6214 	}
6215 	if (!vm_map_try_lock_read(original_map)) {
6216 		if (m != VM_PAGE_NULL) {
6217 			vm_object_unlock(m_object);
6218 		} else {
6219 			vm_object_unlock(object);
6220 		}
6221 
6222 		object_locks_dropped = TRUE;
6223 
6224 		vm_map_lock_read(original_map);
6225 	}
6226 
6227 	if ((map != original_map) || !vm_map_verify(map, &version)) {
6228 		if (object_locks_dropped == FALSE) {
6229 			if (m != VM_PAGE_NULL) {
6230 				vm_object_unlock(m_object);
6231 			} else {
6232 				vm_object_unlock(object);
6233 			}
6234 
6235 			object_locks_dropped = TRUE;
6236 		}
6237 
6238 		/*
6239 		 * no object locks are held at this point
6240 		 */
6241 		vm_object_t             retry_object;
6242 		vm_object_offset_t      retry_offset;
6243 		vm_prot_t               retry_prot;
6244 
6245 		/*
6246 		 * To avoid trying to write_lock the map while another
6247 		 * thread has it read_locked (in vm_map_pageable), we
6248 		 * do not try for write permission.  If the page is
6249 		 * still writable, we will get write permission.  If it
6250 		 * is not, or has been marked needs_copy, we enter the
6251 		 * mapping without write permission, and will merely
6252 		 * take another fault.
6253 		 */
6254 		map = original_map;
6255 
6256 		kr = vm_map_lookup_and_lock_object(&map, vaddr,
6257 		    fault_type & ~VM_PROT_WRITE,
6258 		    OBJECT_LOCK_EXCLUSIVE, &version,
6259 		    &retry_object, &retry_offset, &retry_prot,
6260 		    &wired,
6261 		    fault_info,
6262 		    &real_map,
6263 		    NULL);
6264 		pmap = real_map->pmap;
6265 
6266 		if (kr != KERN_SUCCESS) {
6267 			vm_map_unlock_read(map);
6268 
6269 			if (m != VM_PAGE_NULL) {
6270 				assert(VM_PAGE_OBJECT(m) == m_object);
6271 
6272 				/*
6273 				 * retake the lock so that
6274 				 * we can drop the paging reference
6275 				 * in vm_fault_cleanup and do the
6276 				 * vm_page_wakeup_done() in RELEASE_PAGE
6277 				 */
6278 				vm_object_lock(m_object);
6279 
6280 				RELEASE_PAGE(m);
6281 
6282 				vm_fault_cleanup(m_object, top_page);
6283 			} else {
6284 				/*
6285 				 * retake the lock so that
6286 				 * we can drop the paging reference
6287 				 * in vm_fault_cleanup
6288 				 */
6289 				vm_object_lock(object);
6290 
6291 				vm_fault_cleanup(object, top_page);
6292 			}
6293 			vm_object_deallocate(object);
6294 
6295 			if (kr == KERN_INVALID_ADDRESS) {
6296 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0 /* arg */);
6297 			}
6298 			goto done;
6299 		}
6300 		vm_object_unlock(retry_object);
6301 
6302 		if ((retry_object != object) || (retry_offset != offset)) {
6303 			vm_map_unlock_read(map);
6304 			if (real_map != map) {
6305 				vm_map_unlock(real_map);
6306 			}
6307 
6308 			if (m != VM_PAGE_NULL) {
6309 				assert(VM_PAGE_OBJECT(m) == m_object);
6310 
6311 				/*
6312 				 * retake the lock so that
6313 				 * we can drop the paging reference
6314 				 * in vm_fault_cleanup and do the
6315 				 * vm_page_wakeup_done() in RELEASE_PAGE
6316 				 */
6317 				vm_object_lock(m_object);
6318 
6319 				RELEASE_PAGE(m);
6320 
6321 				vm_fault_cleanup(m_object, top_page);
6322 			} else {
6323 				/*
6324 				 * retake the lock so that
6325 				 * we can drop the paging reference
6326 				 * in vm_fault_cleanup
6327 				 */
6328 				vm_object_lock(object);
6329 
6330 				vm_fault_cleanup(object, top_page);
6331 			}
6332 			vm_object_deallocate(object);
6333 
6334 			goto RetryFault;
6335 		}
6336 		/*
6337 		 * Check whether the protection has changed or the object
6338 		 * has been copied while we left the map unlocked.
6339 		 */
6340 		if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, retry_prot)) {
6341 			/* If the pmap layer cares, pass the full set. */
6342 			prot = retry_prot;
6343 		} else {
6344 			prot &= retry_prot;
6345 		}
6346 	}
6347 
6348 	if (object_locks_dropped == TRUE) {
6349 		if (m != VM_PAGE_NULL) {
6350 			assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p", m, m_object);
6351 			assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6352 			vm_object_lock(m_object);
6353 		} else {
6354 			vm_object_lock(object);
6355 		}
6356 
6357 		object_locks_dropped = FALSE;
6358 	}
6359 
6360 	if ((prot & VM_PROT_WRITE) &&
6361 	    m != VM_PAGE_NULL &&
6362 	    (m_object->vo_copy != old_copy_object ||
6363 	    m_object->vo_copy_version != old_copy_version)) {
6364 		/*
6365 		 * The copy object changed while the top-level object
6366 		 * was unlocked, so take away write permission.
6367 		 */
6368 		if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
6369 			/*
6370 			 * This pmap enforces extra constraints for this set
6371 			 * of protections, so we can't change the protections.
6372 			 * This mapping should have been setup to avoid
6373 			 * copy-on-write since that requires removing write
6374 			 * access.
6375 			 */
6376 			panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x m%p obj %p copyobj %p",
6377 			    __FUNCTION__, pmap, (uint64_t)vaddr, prot,
6378 			    fault_info->pmap_options,
6379 			    m, m_object, m_object->vo_copy);
6380 		}
6381 		prot &= ~VM_PROT_WRITE;
6382 	}
6383 
6384 	if (!need_copy &&
6385 	    !fault_info->no_copy_on_read &&
6386 	    m != VM_PAGE_NULL &&
6387 	    VM_PAGE_OBJECT(m) != object &&
6388 	    !VM_PAGE_OBJECT(m)->pager_trusted &&
6389 	    vm_protect_privileged_from_untrusted &&
6390 	    !VM_PAGE_OBJECT(m)->code_signed &&
6391 	    current_proc_is_privileged()) {
6392 		/*
6393 		 * We found the page we want in an "untrusted" VM object
6394 		 * down the shadow chain.  Since the target is "privileged"
6395 		 * we want to perform a copy-on-read of that page, so that the
6396 		 * mapped object gets a stable copy and does not have to
6397 		 * rely on the "untrusted" object to provide the same
6398 		 * contents if the page gets reclaimed and has to be paged
6399 		 * in again later on.
6400 		 *
6401 		 * Special case: if the mapping is executable and the untrusted
6402 		 * object is code-signed and the process is "cs_enforced", we
6403 		 * do not copy-on-read because that would break code-signing
6404 		 * enforcement expectations (an executable page must belong
6405 		 * to a code-signed object) and we can rely on code-signing
6406 		 * to re-validate the page if it gets evicted and paged back in.
6407 		 */
6408 //		printf("COPY-ON-READ %s:%d map %p vaddr 0x%llx obj %p offset 0x%llx found page %p (obj %p offset 0x%llx) UNTRUSTED -> need copy-on-read\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, object, offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
6409 		vm_copied_on_read++;
6410 		need_copy_on_read = TRUE;
6411 		need_copy = TRUE;
6412 	} else {
6413 		need_copy_on_read = FALSE;
6414 	}
6415 
6416 	/*
6417 	 * If we want to wire down this page, but no longer have
6418 	 * adequate permissions, we must start all over.
6419 	 * If we decided to copy-on-read, we must also start all over.
6420 	 */
6421 	if ((wired && (fault_type != (prot | VM_PROT_WRITE))) ||
6422 	    need_copy_on_read) {
6423 		vm_map_unlock_read(map);
6424 		if (real_map != map) {
6425 			vm_map_unlock(real_map);
6426 		}
6427 
6428 		if (m != VM_PAGE_NULL) {
6429 			assert(VM_PAGE_OBJECT(m) == m_object);
6430 
6431 			RELEASE_PAGE(m);
6432 
6433 			vm_fault_cleanup(m_object, top_page);
6434 		} else {
6435 			vm_fault_cleanup(object, top_page);
6436 		}
6437 
6438 		vm_object_deallocate(object);
6439 
6440 		goto RetryFault;
6441 	}
6442 	if (m != VM_PAGE_NULL) {
6443 		/*
6444 		 * Put this page into the physical map.
6445 		 * We had to do the unlock above because pmap_enter
6446 		 * may cause other faults.  The page may be on
6447 		 * the pageout queues.  If the pageout daemon comes
6448 		 * across the page, it will remove it from the queues.
6449 		 */
6450 		if (fault_page_size < PAGE_SIZE) {
6451 			DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx pa 0x%llx(0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
6452 			assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
6453 			    fault_phys_offset < PAGE_SIZE),
6454 			    "0x%llx\n", (uint64_t)fault_phys_offset);
6455 		} else {
6456 			assertf(fault_phys_offset == 0,
6457 			    "0x%llx\n", (uint64_t)fault_phys_offset);
6458 		}
6459 		assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p", m, m_object);
6460 		assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6461 		if (caller_pmap) {
6462 			kr = vm_fault_enter(m,
6463 			    caller_pmap,
6464 			    caller_pmap_addr,
6465 			    fault_page_size,
6466 			    fault_phys_offset,
6467 			    prot,
6468 			    caller_prot,
6469 			    wired,
6470 			    wire_tag,
6471 			    fault_info,
6472 			    NULL,
6473 			    &type_of_fault,
6474 			    &object_lock_type);
6475 		} else {
6476 			kr = vm_fault_enter(m,
6477 			    pmap,
6478 			    vaddr,
6479 			    fault_page_size,
6480 			    fault_phys_offset,
6481 			    prot,
6482 			    caller_prot,
6483 			    wired,
6484 			    wire_tag,
6485 			    fault_info,
6486 			    NULL,
6487 			    &type_of_fault,
6488 			    &object_lock_type);
6489 		}
6490 		assert(VM_PAGE_OBJECT(m) == m_object);
6491 
6492 		{
6493 			int     event_code = 0;
6494 
6495 			if (m_object->internal) {
6496 				event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
6497 			} else if (m_object->object_is_shared_cache) {
6498 				event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
6499 			} else {
6500 				event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
6501 			}
6502 
6503 			KDBG_RELEASE(event_code | DBG_FUNC_NONE, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), m->vmp_offset, get_current_unique_pid());
6504 			KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_SLOW), get_current_unique_pid());
6505 
6506 			DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag);
6507 		}
6508 		if (kr != KERN_SUCCESS) {
6509 			/* abort this page fault */
6510 			vm_map_unlock_read(map);
6511 			if (real_map != map) {
6512 				vm_map_unlock(real_map);
6513 			}
6514 			vm_page_wakeup_done(m_object, m);
6515 			vm_fault_cleanup(m_object, top_page);
6516 			vm_object_deallocate(object);
6517 			goto done;
6518 		}
6519 		if (physpage_p != NULL) {
6520 			/* for vm_map_wire_and_extract() */
6521 			*physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
6522 			if (prot & VM_PROT_WRITE) {
6523 				vm_object_lock_assert_exclusive(m_object);
6524 				m->vmp_dirty = TRUE;
6525 			}
6526 		}
6527 	} else {
6528 		vm_map_entry_t          entry;
6529 		vm_map_offset_t         laddr;
6530 		vm_map_offset_t         ldelta, hdelta;
6531 
6532 		/*
6533 		 * do a pmap block mapping from the physical address
6534 		 * in the object
6535 		 */
6536 
6537 		if (real_map != map) {
6538 			vm_map_unlock(real_map);
6539 		}
6540 
6541 		if (original_map != map) {
6542 			vm_map_unlock_read(map);
6543 			vm_map_lock_read(original_map);
6544 			map = original_map;
6545 		}
6546 		real_map = map;
6547 
6548 		laddr = vaddr;
6549 		hdelta = ldelta = (vm_map_offset_t)0xFFFFFFFFFFFFF000ULL;
6550 
6551 		while (vm_map_lookup_entry(map, laddr, &entry)) {
6552 			if (ldelta > (laddr - entry->vme_start)) {
6553 				ldelta = laddr - entry->vme_start;
6554 			}
6555 			if (hdelta > (entry->vme_end - laddr)) {
6556 				hdelta = entry->vme_end - laddr;
6557 			}
6558 			if (entry->is_sub_map) {
6559 				vm_map_t sub_map;
6560 				bool use_pmap;
6561 
6562 				laddr = ((laddr - entry->vme_start)
6563 				    + VME_OFFSET(entry));
6564 				vm_map_lock_read(VME_SUBMAP(entry));
6565 				sub_map = VME_SUBMAP(entry);
6566 				use_pmap = entry->use_pmap;
6567 				entry = VM_MAP_ENTRY_NULL; /* not valid after unlock */
6568 				if (map != real_map) {
6569 					vm_map_unlock_read(map);
6570 				}
6571 				if (use_pmap) {
6572 					vm_map_unlock_read(real_map);
6573 					real_map = sub_map;
6574 				}
6575 				map = sub_map;
6576 			} else {
6577 				break;
6578 			}
6579 		}
6580 
6581 		if (vm_map_lookup_entry(map, laddr, &entry) &&
6582 		    (!entry->is_sub_map) &&
6583 		    (object != VM_OBJECT_NULL) &&
6584 		    (VME_OBJECT(entry) == object)) {
6585 			uint16_t superpage;
6586 
6587 			if (!object->pager_created &&
6588 			    object->phys_contiguous &&
6589 			    VME_OFFSET(entry) == 0 &&
6590 			    (entry->vme_end - entry->vme_start == object->vo_size) &&
6591 			    VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size - 1))) {
6592 				superpage = VM_MEM_SUPERPAGE;
6593 			} else {
6594 				superpage = 0;
6595 			}
6596 
6597 			if (superpage && physpage_p) {
6598 				/* for vm_map_wire_and_extract() */
6599 				*physpage_p = (ppnum_t)
6600 				    ((((vm_map_offset_t)
6601 				    object->vo_shadow_offset)
6602 				    + VME_OFFSET(entry)
6603 				    + (laddr - entry->vme_start))
6604 				    >> PAGE_SHIFT);
6605 			}
6606 
6607 			/*
6608 			 * Set up a block mapped area
6609 			 */
6610 			assert((uint32_t)((ldelta + hdelta) >> fault_page_shift) == ((ldelta + hdelta) >> fault_page_shift));
6611 			pmap_t block_map_pmap;
6612 			addr64_t block_map_va;
6613 			pmap_paddr_t block_map_pa = (pmap_paddr_t)(((vm_map_offset_t)(object->vo_shadow_offset)) +
6614 			    VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta);
6615 			int block_map_wimg = VM_WIMG_MASK & (int)object->wimg_bits;
6616 			if (caller_pmap) {
6617 				block_map_pmap = caller_pmap;
6618 				block_map_va = (addr64_t)(caller_pmap_addr - ldelta);
6619 			} else {
6620 				block_map_pmap = real_map->pmap;
6621 				block_map_va = (addr64_t)(vaddr - ldelta);
6622 			}
6623 			kr = pmap_map_block_addr(block_map_pmap,
6624 			    block_map_va,
6625 			    block_map_pa,
6626 			    (uint32_t)((ldelta + hdelta) >> fault_page_shift),
6627 			    prot,
6628 			    block_map_wimg | superpage,
6629 			    0);
6630 
6631 			if (kr != KERN_SUCCESS) {
6632 				goto cleanup;
6633 			}
6634 		}
6635 	}
6636 
6637 	/*
6638 	 * Success
6639 	 */
6640 	kr = KERN_SUCCESS;
6641 
6642 	/*
6643 	 * TODO: could most of the done cases just use cleanup?
6644 	 */
6645 cleanup:
6646 	/*
6647 	 * Unlock everything, and return
6648 	 */
6649 	vm_map_unlock_read(map);
6650 	if (real_map != map) {
6651 		vm_map_unlock(real_map);
6652 	}
6653 
6654 	if (m != VM_PAGE_NULL) {
6655 		if (__improbable(rtfault &&
6656 		    !m->vmp_realtime &&
6657 		    vm_pageout_protect_realtime)) {
6658 			vm_page_lock_queues();
6659 			if (!m->vmp_realtime) {
6660 				m->vmp_realtime = true;
6661 				vm_page_realtime_count++;
6662 			}
6663 			vm_page_unlock_queues();
6664 		}
6665 		assert(VM_PAGE_OBJECT(m) == m_object);
6666 
6667 		if (!m_object->internal && (fault_type & VM_PROT_WRITE)) {
6668 			vm_object_paging_begin(m_object);
6669 
6670 			assert(written_on_object == VM_OBJECT_NULL);
6671 			written_on_object = m_object;
6672 			written_on_pager = m_object->pager;
6673 			written_on_offset = m_object->paging_offset + m->vmp_offset;
6674 		}
6675 		vm_page_wakeup_done(m_object, m);
6676 
6677 		vm_fault_cleanup(m_object, top_page);
6678 	} else {
6679 		vm_fault_cleanup(object, top_page);
6680 	}
6681 
6682 	vm_object_deallocate(object);
6683 
6684 #undef  RELEASE_PAGE
6685 
6686 done:
6687 	thread_interrupt_level(interruptible_state);
6688 
6689 	if (resilient_media_object != VM_OBJECT_NULL) {
6690 		assert(resilient_media_retry);
6691 		assert(resilient_media_offset != (vm_object_offset_t)-1);
6692 		/* release extra reference on failed object */
6693 //             printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
6694 		vm_object_deallocate(resilient_media_object);
6695 		resilient_media_object = VM_OBJECT_NULL;
6696 		resilient_media_offset = (vm_object_offset_t)-1;
6697 		resilient_media_retry = false;
6698 		vm_fault_resilient_media_release++;
6699 	}
6700 	assert(!resilient_media_retry);
6701 
6702 	/*
6703 	 * Only I/O throttle on faults which cause a pagein/swapin.
6704 	 */
6705 	if ((type_of_fault == DBG_PAGEIND_FAULT) || (type_of_fault == DBG_PAGEINV_FAULT) || (type_of_fault == DBG_COMPRESSOR_SWAPIN_FAULT)) {
6706 		throttle_lowpri_io(1);
6707 	} else {
6708 		if (kr == KERN_SUCCESS && type_of_fault != DBG_CACHE_HIT_FAULT && type_of_fault != DBG_GUARD_FAULT) {
6709 			if ((throttle_delay = vm_page_throttled(TRUE))) {
6710 				if (vm_debug_events) {
6711 					if (type_of_fault == DBG_COMPRESSOR_FAULT) {
6712 						VM_DEBUG_EVENT(vmf_compressordelay, DBG_VM_FAULT_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6713 					} else if (type_of_fault == DBG_COW_FAULT) {
6714 						VM_DEBUG_EVENT(vmf_cowdelay, DBG_VM_FAULT_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6715 					} else {
6716 						VM_DEBUG_EVENT(vmf_zfdelay, DBG_VM_FAULT_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6717 					}
6718 				}
6719 				__VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
6720 			}
6721 		}
6722 	}
6723 
6724 	if (written_on_object) {
6725 		vnode_pager_dirtied(written_on_pager, written_on_offset, written_on_offset + PAGE_SIZE_64);
6726 
6727 		vm_object_lock(written_on_object);
6728 		vm_object_paging_end(written_on_object);
6729 		vm_object_unlock(written_on_object);
6730 
6731 		written_on_object = VM_OBJECT_NULL;
6732 	}
6733 
6734 	if (rtfault) {
6735 		vm_record_rtfault(cthread, fstart, trace_vaddr, type_of_fault);
6736 	}
6737 
6738 	KDBG_RELEASE(
6739 		(VMDBG_CODE(DBG_VM_FAULT_INTERNAL)) | DBG_FUNC_END,
6740 		((uint64_t)trace_vaddr >> 32),
6741 		trace_vaddr,
6742 		kr,
6743 		vm_fault_type_for_tracing(need_copy_on_read, type_of_fault));
6744 
6745 	if (fault_page_size < PAGE_SIZE && kr != KERN_SUCCESS) {
6746 		DEBUG4K_FAULT("map %p original %p vaddr 0x%llx -> 0x%x\n", map, original_map, (uint64_t)trace_real_vaddr, kr);
6747 	}
6748 
6749 	return kr;
6750 }
6751 
6752 /*
6753  *	vm_fault_wire:
6754  *
6755  *	Wire down a range of virtual addresses in a map.
6756  */
6757 kern_return_t
vm_fault_wire(vm_map_t map,vm_map_entry_t entry,vm_prot_t prot,vm_tag_t wire_tag,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)6758 vm_fault_wire(
6759 	vm_map_t        map,
6760 	vm_map_entry_t  entry,
6761 	vm_prot_t       prot,
6762 	vm_tag_t        wire_tag,
6763 	pmap_t          pmap,
6764 	vm_map_offset_t pmap_addr,
6765 	ppnum_t         *physpage_p)
6766 {
6767 	vm_map_offset_t va;
6768 	vm_map_offset_t end_addr = entry->vme_end;
6769 	kern_return_t   rc;
6770 	vm_map_size_t   effective_page_size;
6771 
6772 	assert(entry->in_transition);
6773 
6774 	if (!entry->is_sub_map &&
6775 	    VME_OBJECT(entry) != VM_OBJECT_NULL &&
6776 	    VME_OBJECT(entry)->phys_contiguous) {
6777 		return KERN_SUCCESS;
6778 	}
6779 
6780 	/*
6781 	 *	Inform the physical mapping system that the
6782 	 *	range of addresses may not fault, so that
6783 	 *	page tables and such can be locked down as well.
6784 	 */
6785 
6786 	pmap_pageable(pmap, pmap_addr,
6787 	    pmap_addr + (end_addr - entry->vme_start), FALSE);
6788 
6789 	/*
6790 	 *	We simulate a fault to get the page and enter it
6791 	 *	in the physical map.
6792 	 */
6793 
6794 	effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6795 	for (va = entry->vme_start;
6796 	    va < end_addr;
6797 	    va += effective_page_size) {
6798 		rc = vm_fault_wire_fast(map, va, prot, wire_tag, entry, pmap,
6799 		    pmap_addr + (va - entry->vme_start),
6800 		    physpage_p);
6801 		if (rc != KERN_SUCCESS) {
6802 			struct vm_object_fault_info fault_info = {
6803 				.interruptible = (pmap == kernel_pmap) ? THREAD_UNINT : THREAD_ABORTSAFE,
6804 				.behavior = VM_BEHAVIOR_SEQUENTIAL,
6805 				.fi_change_wiring = true,
6806 			};
6807 			if (os_sub_overflow(end_addr, va, &fault_info.cluster_size)) {
6808 				fault_info.cluster_size = UPL_SIZE_MAX;
6809 			}
6810 			rc = vm_fault_internal(map, va, prot, wire_tag,
6811 			    pmap,
6812 			    (pmap_addr +
6813 			    (va - entry->vme_start)),
6814 			    physpage_p,
6815 			    &fault_info);
6816 			DTRACE_VM2(softlock, int, 1, (uint64_t *), NULL);
6817 		}
6818 
6819 		if (rc != KERN_SUCCESS) {
6820 			struct vm_map_entry     tmp_entry = *entry;
6821 
6822 			/* unwire wired pages */
6823 			tmp_entry.vme_end = va;
6824 			vm_fault_unwire(map, &tmp_entry, FALSE,
6825 			    pmap, pmap_addr, tmp_entry.vme_end);
6826 
6827 			return rc;
6828 		}
6829 	}
6830 	return KERN_SUCCESS;
6831 }
6832 
6833 /*
6834  *	vm_fault_unwire:
6835  *
6836  *	Unwire a range of virtual addresses in a map.
6837  */
6838 void
vm_fault_unwire(vm_map_t map,vm_map_entry_t entry,boolean_t deallocate,pmap_t pmap,vm_map_offset_t pmap_addr,vm_map_offset_t end_addr)6839 vm_fault_unwire(
6840 	vm_map_t        map,
6841 	vm_map_entry_t  entry,
6842 	boolean_t       deallocate,
6843 	pmap_t          pmap,
6844 	vm_map_offset_t pmap_addr,
6845 	vm_map_offset_t end_addr)
6846 {
6847 	vm_map_offset_t va;
6848 	vm_object_t     object;
6849 	struct vm_object_fault_info fault_info = {
6850 		.interruptible = THREAD_UNINT,
6851 	};
6852 	unsigned int    unwired_pages;
6853 	vm_map_size_t   effective_page_size;
6854 
6855 	object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry);
6856 
6857 	/*
6858 	 * If it's marked phys_contiguous, then vm_fault_wire() didn't actually
6859 	 * do anything since such memory is wired by default.  So we don't have
6860 	 * anything to undo here.
6861 	 */
6862 
6863 	if (object != VM_OBJECT_NULL && object->phys_contiguous) {
6864 		return;
6865 	}
6866 
6867 	fault_info.interruptible = THREAD_UNINT;
6868 	fault_info.behavior = entry->behavior;
6869 	fault_info.user_tag = VME_ALIAS(entry);
6870 	if (entry->iokit_acct ||
6871 	    (!entry->is_sub_map && !entry->use_pmap)) {
6872 		fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
6873 	}
6874 	fault_info.lo_offset = VME_OFFSET(entry);
6875 	fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry);
6876 	fault_info.no_cache = entry->no_cache;
6877 	fault_info.stealth = TRUE;
6878 	if (entry->vme_xnu_user_debug) {
6879 		/*
6880 		 * Modified code-signed executable region: wired pages must
6881 		 * have been copied, so they should be XNU_USER_DEBUG rather
6882 		 * than XNU_USER_EXEC.
6883 		 */
6884 		fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
6885 	}
6886 
6887 	unwired_pages = 0;
6888 
6889 	/*
6890 	 *	Since the pages are wired down, we must be able to
6891 	 *	get their mappings from the physical map system.
6892 	 */
6893 
6894 	effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6895 	for (va = entry->vme_start;
6896 	    va < end_addr;
6897 	    va += effective_page_size) {
6898 		if (object == VM_OBJECT_NULL) {
6899 			if (pmap) {
6900 				pmap_change_wiring(pmap,
6901 				    pmap_addr + (va - entry->vme_start), FALSE);
6902 			}
6903 			(void) vm_fault(map, va, VM_PROT_NONE,
6904 			    TRUE, VM_KERN_MEMORY_NONE, THREAD_UNINT, pmap, pmap_addr);
6905 		} else {
6906 			vm_prot_t       prot;
6907 			vm_page_t       result_page;
6908 			vm_page_t       top_page;
6909 			vm_object_t     result_object;
6910 			vm_fault_return_t result;
6911 
6912 			/* cap cluster size at maximum UPL size */
6913 			upl_size_t cluster_size;
6914 			if (os_sub_overflow(end_addr, va, &cluster_size)) {
6915 				cluster_size = UPL_SIZE_MAX;
6916 			}
6917 			fault_info.cluster_size = cluster_size;
6918 
6919 			do {
6920 				prot = VM_PROT_NONE;
6921 
6922 				vm_object_lock(object);
6923 				vm_object_paging_begin(object);
6924 				result_page = VM_PAGE_NULL;
6925 				result = vm_fault_page(
6926 					object,
6927 					(VME_OFFSET(entry) +
6928 					(va - entry->vme_start)),
6929 					VM_PROT_NONE, TRUE,
6930 					FALSE, /* page not looked up */
6931 					&prot, &result_page, &top_page,
6932 					(int *)0,
6933 					NULL, map->no_zero_fill,
6934 					&fault_info);
6935 			} while (result == VM_FAULT_RETRY);
6936 
6937 			/*
6938 			 * If this was a mapping to a file on a device that has been forcibly
6939 			 * unmounted, then we won't get a page back from vm_fault_page().  Just
6940 			 * move on to the next one in case the remaining pages are mapped from
6941 			 * different objects.  During a forced unmount, the object is terminated
6942 			 * so the alive flag will be false if this happens.  A forced unmount will
6943 			 * will occur when an external disk is unplugged before the user does an
6944 			 * eject, so we don't want to panic in that situation.
6945 			 */
6946 
6947 			if (result == VM_FAULT_MEMORY_ERROR) {
6948 				if (!object->alive) {
6949 					continue;
6950 				}
6951 				if (!object->internal && object->pager == NULL) {
6952 					continue;
6953 				}
6954 			}
6955 
6956 			if (result == VM_FAULT_MEMORY_ERROR &&
6957 			    is_kernel_object(object)) {
6958 				/*
6959 				 * This must have been allocated with
6960 				 * KMA_KOBJECT and KMA_VAONLY and there's
6961 				 * no physical page at this offset.
6962 				 * We're done (no page to free).
6963 				 */
6964 				assert(deallocate);
6965 				continue;
6966 			}
6967 
6968 			if (result != VM_FAULT_SUCCESS) {
6969 				panic("vm_fault_unwire: failure");
6970 			}
6971 
6972 			result_object = VM_PAGE_OBJECT(result_page);
6973 
6974 			if (deallocate) {
6975 				assert(VM_PAGE_GET_PHYS_PAGE(result_page) !=
6976 				    vm_page_fictitious_addr);
6977 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(result_page));
6978 				if (VM_PAGE_WIRED(result_page)) {
6979 					unwired_pages++;
6980 				}
6981 				VM_PAGE_FREE(result_page);
6982 			} else {
6983 				if (pmap && !vm_page_is_guard(result_page)) {
6984 					pmap_change_wiring(pmap,
6985 					    pmap_addr + (va - entry->vme_start), FALSE);
6986 				}
6987 
6988 
6989 				if (VM_PAGE_WIRED(result_page)) {
6990 					vm_page_lockspin_queues();
6991 					vm_page_unwire(result_page, TRUE);
6992 					vm_page_unlock_queues();
6993 					unwired_pages++;
6994 				}
6995 				if (entry->zero_wired_pages &&
6996 				    (entry->protection & VM_PROT_WRITE) &&
6997 #if __arm64e__
6998 				    !entry->used_for_tpro &&
6999 #endif /* __arm64e__ */
7000 				    !entry->used_for_jit) {
7001 					pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(result_page));
7002 				}
7003 
7004 				vm_page_wakeup_done(result_object, result_page);
7005 			}
7006 			vm_fault_cleanup(result_object, top_page);
7007 		}
7008 	}
7009 
7010 	/*
7011 	 *	Inform the physical mapping system that the range
7012 	 *	of addresses may fault, so that page tables and
7013 	 *	such may be unwired themselves.
7014 	 */
7015 
7016 	pmap_pageable(pmap, pmap_addr,
7017 	    pmap_addr + (end_addr - entry->vme_start), TRUE);
7018 
7019 	if (is_kernel_object(object)) {
7020 		/*
7021 		 * Would like to make user_tag in vm_object_fault_info
7022 		 * vm_tag_t (unsigned short) but user_tag derives its value from
7023 		 * VME_ALIAS(entry) at a few places and VME_ALIAS, in turn, casts
7024 		 * to an _unsigned int_ which is used by non-fault_info paths throughout the
7025 		 * code at many places.
7026 		 *
7027 		 * So, for now, an explicit truncation to unsigned short (vm_tag_t).
7028 		 */
7029 		assertf((fault_info.user_tag & VME_ALIAS_MASK) == fault_info.user_tag,
7030 		    "VM Tag truncated from 0x%x to 0x%x\n", fault_info.user_tag, (fault_info.user_tag & VME_ALIAS_MASK));
7031 		vm_tag_update_size((vm_tag_t) fault_info.user_tag, -ptoa_64(unwired_pages), NULL);
7032 	}
7033 }
7034 
7035 /*
7036  *	vm_fault_wire_fast:
7037  *
7038  *	Handle common case of a wire down page fault at the given address.
7039  *	If successful, the page is inserted into the associated physical map.
7040  *	The map entry is passed in to avoid the overhead of a map lookup.
7041  *
7042  *	NOTE: the given address should be truncated to the
7043  *	proper page address.
7044  *
7045  *	KERN_SUCCESS is returned if the page fault is handled; otherwise,
7046  *	a standard error specifying why the fault is fatal is returned.
7047  *
7048  *	The map in question must be referenced, and remains so.
7049  *	Caller has a read lock on the map.
7050  *
7051  *	This is a stripped version of vm_fault() for wiring pages.  Anything
7052  *	other than the common case will return KERN_FAILURE, and the caller
7053  *	is expected to call vm_fault().
7054  */
7055 static kern_return_t
vm_fault_wire_fast(__unused vm_map_t map,vm_map_offset_t va,__unused vm_prot_t caller_prot,vm_tag_t wire_tag,vm_map_entry_t entry,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)7056 vm_fault_wire_fast(
7057 	__unused vm_map_t       map,
7058 	vm_map_offset_t va,
7059 	__unused vm_prot_t       caller_prot,
7060 	vm_tag_t        wire_tag,
7061 	vm_map_entry_t  entry,
7062 	pmap_t          pmap,
7063 	vm_map_offset_t pmap_addr,
7064 	ppnum_t         *physpage_p)
7065 {
7066 	vm_object_t             object;
7067 	vm_object_offset_t      offset;
7068 	vm_page_t               m;
7069 	vm_prot_t               prot;
7070 	thread_t                thread = current_thread();
7071 	int                     type_of_fault;
7072 	kern_return_t           kr;
7073 	vm_map_size_t           fault_page_size;
7074 	vm_map_offset_t         fault_phys_offset;
7075 	struct vm_object_fault_info fault_info = {
7076 		.interruptible = THREAD_UNINT,
7077 	};
7078 	uint8_t                 object_lock_type = 0;
7079 
7080 	counter_inc(&vm_statistics_faults);
7081 
7082 	if (thread != THREAD_NULL) {
7083 		counter_inc(&get_threadtask(thread)->faults);
7084 	}
7085 
7086 /*
7087  *	Recovery actions
7088  */
7089 
7090 #undef  RELEASE_PAGE
7091 #define RELEASE_PAGE(m) {                               \
7092 	vm_page_wakeup_done(VM_PAGE_OBJECT(m), m);                            \
7093 	vm_page_lockspin_queues();                      \
7094 	vm_page_unwire(m, TRUE);                        \
7095 	vm_page_unlock_queues();                        \
7096 }
7097 
7098 
7099 #undef  UNLOCK_THINGS
7100 #define UNLOCK_THINGS   {                               \
7101 	vm_object_paging_end(object);                      \
7102 	vm_object_unlock(object);                          \
7103 }
7104 
7105 #undef  UNLOCK_AND_DEALLOCATE
7106 #define UNLOCK_AND_DEALLOCATE   {                       \
7107 	UNLOCK_THINGS;                                  \
7108 	vm_object_deallocate(object);                   \
7109 }
7110 /*
7111  *	Give up and have caller do things the hard way.
7112  */
7113 
7114 #define GIVE_UP {                                       \
7115 	UNLOCK_AND_DEALLOCATE;                          \
7116 	return(KERN_FAILURE);                           \
7117 }
7118 
7119 
7120 	/*
7121 	 *	If this entry is not directly to a vm_object, bail out.
7122 	 */
7123 	if (entry->is_sub_map) {
7124 		assert(physpage_p == NULL);
7125 		return KERN_FAILURE;
7126 	}
7127 
7128 	/*
7129 	 *	Find the backing store object and offset into it.
7130 	 */
7131 
7132 	object = VME_OBJECT(entry);
7133 	offset = (va - entry->vme_start) + VME_OFFSET(entry);
7134 	prot = entry->protection;
7135 
7136 	/*
7137 	 *	Make a reference to this object to prevent its
7138 	 *	disposal while we are messing with it.
7139 	 */
7140 
7141 	object_lock_type = OBJECT_LOCK_EXCLUSIVE;
7142 	vm_object_lock(object);
7143 	vm_object_reference_locked(object);
7144 	vm_object_paging_begin(object);
7145 
7146 	/*
7147 	 *	INVARIANTS (through entire routine):
7148 	 *
7149 	 *	1)	At all times, we must either have the object
7150 	 *		lock or a busy page in some object to prevent
7151 	 *		some other thread from trying to bring in
7152 	 *		the same page.
7153 	 *
7154 	 *	2)	Once we have a busy page, we must remove it from
7155 	 *		the pageout queues, so that the pageout daemon
7156 	 *		will not grab it away.
7157 	 *
7158 	 */
7159 
7160 	/*
7161 	 *	Look for page in top-level object.  If it's not there or
7162 	 *	there's something going on, give up.
7163 	 */
7164 	m = vm_page_lookup(object, vm_object_trunc_page(offset));
7165 	if ((m == VM_PAGE_NULL) || (m->vmp_busy) ||
7166 	    (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_absent))) {
7167 		GIVE_UP;
7168 	}
7169 	if (vm_page_is_guard(m)) {
7170 		/*
7171 		 * Guard pages are fictitious pages and are never
7172 		 * entered into a pmap, so let's say it's been wired...
7173 		 */
7174 		kr = KERN_SUCCESS;
7175 		goto done;
7176 	}
7177 
7178 	/*
7179 	 *	Wire the page down now.  All bail outs beyond this
7180 	 *	point must unwire the page.
7181 	 */
7182 
7183 	vm_page_lockspin_queues();
7184 	vm_page_wire(m, wire_tag, TRUE);
7185 	vm_page_unlock_queues();
7186 
7187 	/*
7188 	 *	Mark page busy for other threads.
7189 	 */
7190 	assert(!m->vmp_busy);
7191 	m->vmp_busy = TRUE;
7192 	assert(!m->vmp_absent);
7193 
7194 	/*
7195 	 *	Give up if the page is being written and there's a copy object
7196 	 */
7197 	if ((object->vo_copy != VM_OBJECT_NULL) && (prot & VM_PROT_WRITE)) {
7198 		RELEASE_PAGE(m);
7199 		GIVE_UP;
7200 	}
7201 
7202 	fault_info.user_tag = VME_ALIAS(entry);
7203 	fault_info.pmap_options = 0;
7204 	if (entry->iokit_acct ||
7205 	    (!entry->is_sub_map && !entry->use_pmap)) {
7206 		fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
7207 	}
7208 	if (entry->vme_xnu_user_debug) {
7209 		/*
7210 		 * Modified code-signed executable region: wiring will
7211 		 * copy the pages, so they should be XNU_USER_DEBUG rather
7212 		 * than XNU_USER_EXEC.
7213 		 */
7214 		fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
7215 	}
7216 
7217 	if (entry->translated_allow_execute) {
7218 		fault_info.pmap_options |= PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE;
7219 	}
7220 
7221 	fault_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
7222 	fault_phys_offset = offset - vm_object_trunc_page(offset);
7223 
7224 	/*
7225 	 *	Put this page into the physical map.
7226 	 */
7227 	type_of_fault = DBG_CACHE_HIT_FAULT;
7228 	assert3p(VM_PAGE_OBJECT(m), ==, object);
7229 	kr = vm_fault_enter(m,
7230 	    pmap,
7231 	    pmap_addr,
7232 	    fault_page_size,
7233 	    fault_phys_offset,
7234 	    prot,
7235 	    prot,
7236 	    TRUE,                  /* wired */
7237 	    wire_tag,
7238 	    &fault_info,
7239 	    NULL,
7240 	    &type_of_fault,
7241 	    &object_lock_type); /* Exclusive lock mode. Will remain unchanged.*/
7242 	if (kr != KERN_SUCCESS) {
7243 		RELEASE_PAGE(m);
7244 		GIVE_UP;
7245 	}
7246 
7247 
7248 done:
7249 	/*
7250 	 *	Unlock everything, and return
7251 	 */
7252 
7253 	if (physpage_p) {
7254 		/* for vm_map_wire_and_extract() */
7255 		if (kr == KERN_SUCCESS) {
7256 			assert3p(object, ==, VM_PAGE_OBJECT(m));
7257 			*physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
7258 			if (prot & VM_PROT_WRITE) {
7259 				vm_object_lock_assert_exclusive(object);
7260 				m->vmp_dirty = TRUE;
7261 			}
7262 		} else {
7263 			*physpage_p = 0;
7264 		}
7265 	}
7266 
7267 	if (m->vmp_busy) {
7268 		vm_page_wakeup_done(object, m);
7269 	}
7270 
7271 	UNLOCK_AND_DEALLOCATE;
7272 
7273 	return kr;
7274 }
7275 
7276 /*
7277  *	Routine:	vm_fault_copy_cleanup
7278  *	Purpose:
7279  *		Release a page used by vm_fault_copy.
7280  */
7281 
7282 static void
vm_fault_copy_cleanup(vm_page_t page,vm_page_t top_page)7283 vm_fault_copy_cleanup(
7284 	vm_page_t       page,
7285 	vm_page_t       top_page)
7286 {
7287 	vm_object_t     object = VM_PAGE_OBJECT(page);
7288 
7289 	vm_object_lock(object);
7290 	vm_page_wakeup_done(object, page);
7291 	if (!VM_PAGE_PAGEABLE(page)) {
7292 		vm_page_lockspin_queues();
7293 		if (!VM_PAGE_PAGEABLE(page)) {
7294 			vm_page_activate(page);
7295 		}
7296 		vm_page_unlock_queues();
7297 	}
7298 	vm_fault_cleanup(object, top_page);
7299 }
7300 
7301 static void
vm_fault_copy_dst_cleanup(vm_page_t page)7302 vm_fault_copy_dst_cleanup(
7303 	vm_page_t       page)
7304 {
7305 	vm_object_t     object;
7306 
7307 	if (page != VM_PAGE_NULL) {
7308 		object = VM_PAGE_OBJECT(page);
7309 		vm_object_lock(object);
7310 		vm_page_lockspin_queues();
7311 		vm_page_unwire(page, TRUE);
7312 		vm_page_unlock_queues();
7313 		vm_object_paging_end(object);
7314 		vm_object_unlock(object);
7315 	}
7316 }
7317 
7318 /*
7319  *	Routine:	vm_fault_copy
7320  *
7321  *	Purpose:
7322  *		Copy pages from one virtual memory object to another --
7323  *		neither the source nor destination pages need be resident.
7324  *
7325  *		Before actually copying a page, the version associated with
7326  *		the destination address map wil be verified.
7327  *
7328  *	In/out conditions:
7329  *		The caller must hold a reference, but not a lock, to
7330  *		each of the source and destination objects and to the
7331  *		destination map.
7332  *
7333  *	Results:
7334  *		Returns KERN_SUCCESS if no errors were encountered in
7335  *		reading or writing the data.  Returns KERN_INTERRUPTED if
7336  *		the operation was interrupted (only possible if the
7337  *		"interruptible" argument is asserted).  Other return values
7338  *		indicate a permanent error in copying the data.
7339  *
7340  *		The actual amount of data copied will be returned in the
7341  *		"copy_size" argument.  In the event that the destination map
7342  *		verification failed, this amount may be less than the amount
7343  *		requested.
7344  */
7345 kern_return_t
vm_fault_copy(vm_object_t src_object,vm_object_offset_t src_offset,vm_map_size_t * copy_size,vm_object_t dst_object,vm_object_offset_t dst_offset,vm_map_t dst_map,vm_map_version_t * dst_version,int interruptible)7346 vm_fault_copy(
7347 	vm_object_t             src_object,
7348 	vm_object_offset_t      src_offset,
7349 	vm_map_size_t           *copy_size,             /* INOUT */
7350 	vm_object_t             dst_object,
7351 	vm_object_offset_t      dst_offset,
7352 	vm_map_t                dst_map,
7353 	vm_map_version_t         *dst_version,
7354 	int                     interruptible)
7355 {
7356 	vm_page_t               result_page;
7357 
7358 	vm_page_t               src_page;
7359 	vm_page_t               src_top_page;
7360 	vm_prot_t               src_prot;
7361 
7362 	vm_page_t               dst_page;
7363 	vm_page_t               dst_top_page;
7364 	vm_prot_t               dst_prot;
7365 
7366 	vm_map_size_t           amount_left;
7367 	vm_object_t             old_copy_object;
7368 	uint32_t                old_copy_version;
7369 	vm_object_t             result_page_object = NULL;
7370 	kern_return_t           error = 0;
7371 	vm_fault_return_t       result;
7372 
7373 	vm_map_size_t           part_size;
7374 	struct vm_object_fault_info fault_info_src = {};
7375 	struct vm_object_fault_info fault_info_dst = {};
7376 
7377 	/*
7378 	 * In order not to confuse the clustered pageins, align
7379 	 * the different offsets on a page boundary.
7380 	 */
7381 
7382 #define RETURN(x)                                       \
7383 	MACRO_BEGIN                                     \
7384 	*copy_size -= amount_left;                      \
7385 	MACRO_RETURN(x);                                \
7386 	MACRO_END
7387 
7388 	amount_left = *copy_size;
7389 
7390 	fault_info_src.interruptible = interruptible;
7391 	fault_info_src.behavior = VM_BEHAVIOR_SEQUENTIAL;
7392 	fault_info_src.lo_offset = vm_object_trunc_page(src_offset);
7393 	fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left;
7394 	fault_info_src.stealth = TRUE;
7395 
7396 	fault_info_dst.interruptible = interruptible;
7397 	fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL;
7398 	fault_info_dst.lo_offset = vm_object_trunc_page(dst_offset);
7399 	fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left;
7400 	fault_info_dst.stealth = TRUE;
7401 
7402 	do { /* while (amount_left > 0) */
7403 		/*
7404 		 * There may be a deadlock if both source and destination
7405 		 * pages are the same. To avoid this deadlock, the copy must
7406 		 * start by getting the destination page in order to apply
7407 		 * COW semantics if any.
7408 		 */
7409 
7410 RetryDestinationFault:;
7411 
7412 		dst_prot = VM_PROT_WRITE | VM_PROT_READ;
7413 
7414 		vm_object_lock(dst_object);
7415 		vm_object_paging_begin(dst_object);
7416 
7417 		/* cap cluster size at maximum UPL size */
7418 		upl_size_t cluster_size;
7419 		if (os_convert_overflow(amount_left, &cluster_size)) {
7420 			cluster_size = 0 - (upl_size_t)PAGE_SIZE;
7421 		}
7422 		fault_info_dst.cluster_size = cluster_size;
7423 
7424 		dst_page = VM_PAGE_NULL;
7425 		result = vm_fault_page(dst_object,
7426 		    vm_object_trunc_page(dst_offset),
7427 		    VM_PROT_WRITE | VM_PROT_READ,
7428 		    FALSE,
7429 		    FALSE,                    /* page not looked up */
7430 		    &dst_prot, &dst_page, &dst_top_page,
7431 		    (int *)0,
7432 		    &error,
7433 		    dst_map->no_zero_fill,
7434 		    &fault_info_dst);
7435 		switch (result) {
7436 		case VM_FAULT_SUCCESS:
7437 			break;
7438 		case VM_FAULT_RETRY:
7439 			goto RetryDestinationFault;
7440 		case VM_FAULT_MEMORY_SHORTAGE:
7441 			if (vm_page_wait(interruptible)) {
7442 				goto RetryDestinationFault;
7443 			}
7444 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_COPY_MEMORY_SHORTAGE), 0 /* arg */);
7445 			OS_FALLTHROUGH;
7446 		case VM_FAULT_INTERRUPTED:
7447 			RETURN(MACH_SEND_INTERRUPTED);
7448 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
7449 			/* success but no VM page: fail the copy */
7450 			vm_object_paging_end(dst_object);
7451 			vm_object_unlock(dst_object);
7452 			OS_FALLTHROUGH;
7453 		case VM_FAULT_MEMORY_ERROR:
7454 			if (error) {
7455 				return error;
7456 			} else {
7457 				return KERN_MEMORY_ERROR;
7458 			}
7459 		default:
7460 			panic("vm_fault_copy: unexpected error 0x%x from "
7461 			    "vm_fault_page()\n", result);
7462 		}
7463 		assert((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE);
7464 
7465 		assert(dst_object == VM_PAGE_OBJECT(dst_page));
7466 		old_copy_object = dst_object->vo_copy;
7467 		old_copy_version = dst_object->vo_copy_version;
7468 
7469 		/*
7470 		 * There exists the possiblity that the source and
7471 		 * destination page are the same.  But we can't
7472 		 * easily determine that now.  If they are the
7473 		 * same, the call to vm_fault_page() for the
7474 		 * destination page will deadlock.  To prevent this we
7475 		 * wire the page so we can drop busy without having
7476 		 * the page daemon steal the page.  We clean up the
7477 		 * top page  but keep the paging reference on the object
7478 		 * holding the dest page so it doesn't go away.
7479 		 */
7480 
7481 		vm_page_lockspin_queues();
7482 		vm_page_wire(dst_page, VM_KERN_MEMORY_OSFMK, TRUE);
7483 		vm_page_unlock_queues();
7484 		vm_page_wakeup_done(dst_object, dst_page);
7485 		vm_object_unlock(dst_object);
7486 
7487 		if (dst_top_page != VM_PAGE_NULL) {
7488 			vm_object_lock(dst_object);
7489 			VM_PAGE_FREE(dst_top_page);
7490 			vm_object_paging_end(dst_object);
7491 			vm_object_unlock(dst_object);
7492 		}
7493 
7494 RetrySourceFault:;
7495 
7496 		if (src_object == VM_OBJECT_NULL) {
7497 			/*
7498 			 *	No source object.  We will just
7499 			 *	zero-fill the page in dst_object.
7500 			 */
7501 			src_page = VM_PAGE_NULL;
7502 			result_page = VM_PAGE_NULL;
7503 		} else {
7504 			vm_object_lock(src_object);
7505 			src_page = vm_page_lookup(src_object,
7506 			    vm_object_trunc_page(src_offset));
7507 			if (src_page == dst_page) {
7508 				src_prot = dst_prot;
7509 				result_page = VM_PAGE_NULL;
7510 			} else {
7511 				src_prot = VM_PROT_READ;
7512 				vm_object_paging_begin(src_object);
7513 
7514 				/* cap cluster size at maximum UPL size */
7515 				if (os_convert_overflow(amount_left, &cluster_size)) {
7516 					cluster_size = 0 - (upl_size_t)PAGE_SIZE;
7517 				}
7518 				fault_info_src.cluster_size = cluster_size;
7519 
7520 				result_page = VM_PAGE_NULL;
7521 				result = vm_fault_page(
7522 					src_object,
7523 					vm_object_trunc_page(src_offset),
7524 					VM_PROT_READ, FALSE,
7525 					FALSE, /* page not looked up */
7526 					&src_prot,
7527 					&result_page, &src_top_page,
7528 					(int *)0, &error, FALSE,
7529 					&fault_info_src);
7530 
7531 				switch (result) {
7532 				case VM_FAULT_SUCCESS:
7533 					break;
7534 				case VM_FAULT_RETRY:
7535 					goto RetrySourceFault;
7536 				case VM_FAULT_MEMORY_SHORTAGE:
7537 					if (vm_page_wait(interruptible)) {
7538 						goto RetrySourceFault;
7539 					}
7540 					OS_FALLTHROUGH;
7541 				case VM_FAULT_INTERRUPTED:
7542 					vm_fault_copy_dst_cleanup(dst_page);
7543 					RETURN(MACH_SEND_INTERRUPTED);
7544 				case VM_FAULT_SUCCESS_NO_VM_PAGE:
7545 					/* success but no VM page: fail */
7546 					vm_object_paging_end(src_object);
7547 					vm_object_unlock(src_object);
7548 					OS_FALLTHROUGH;
7549 				case VM_FAULT_MEMORY_ERROR:
7550 					vm_fault_copy_dst_cleanup(dst_page);
7551 					if (error) {
7552 						return error;
7553 					} else {
7554 						return KERN_MEMORY_ERROR;
7555 					}
7556 				default:
7557 					panic("vm_fault_copy(2): unexpected "
7558 					    "error 0x%x from "
7559 					    "vm_fault_page()\n", result);
7560 				}
7561 
7562 				result_page_object = VM_PAGE_OBJECT(result_page);
7563 				assert((src_top_page == VM_PAGE_NULL) ==
7564 				    (result_page_object == src_object));
7565 			}
7566 			assert((src_prot & VM_PROT_READ) != VM_PROT_NONE);
7567 			vm_object_unlock(result_page_object);
7568 		}
7569 
7570 		vm_map_lock_read(dst_map);
7571 
7572 		if (!vm_map_verify(dst_map, dst_version)) {
7573 			vm_map_unlock_read(dst_map);
7574 			if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7575 				vm_fault_copy_cleanup(result_page, src_top_page);
7576 			}
7577 			vm_fault_copy_dst_cleanup(dst_page);
7578 			break;
7579 		}
7580 		assert(dst_object == VM_PAGE_OBJECT(dst_page));
7581 
7582 		vm_object_lock(dst_object);
7583 
7584 		if ((dst_object->vo_copy != old_copy_object ||
7585 		    dst_object->vo_copy_version != old_copy_version)) {
7586 			vm_object_unlock(dst_object);
7587 			vm_map_unlock_read(dst_map);
7588 			if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7589 				vm_fault_copy_cleanup(result_page, src_top_page);
7590 			}
7591 			vm_fault_copy_dst_cleanup(dst_page);
7592 			break;
7593 		}
7594 		vm_object_unlock(dst_object);
7595 
7596 		/*
7597 		 *	Copy the page, and note that it is dirty
7598 		 *	immediately.
7599 		 */
7600 
7601 		if (!page_aligned(src_offset) ||
7602 		    !page_aligned(dst_offset) ||
7603 		    !page_aligned(amount_left)) {
7604 			vm_object_offset_t      src_po,
7605 			    dst_po;
7606 
7607 			src_po = src_offset - vm_object_trunc_page(src_offset);
7608 			dst_po = dst_offset - vm_object_trunc_page(dst_offset);
7609 
7610 			if (dst_po > src_po) {
7611 				part_size = PAGE_SIZE - dst_po;
7612 			} else {
7613 				part_size = PAGE_SIZE - src_po;
7614 			}
7615 			if (part_size > (amount_left)) {
7616 				part_size = amount_left;
7617 			}
7618 
7619 			if (result_page == VM_PAGE_NULL) {
7620 				assert((vm_offset_t) dst_po == dst_po);
7621 				assert((vm_size_t) part_size == part_size);
7622 				vm_page_part_zero_fill(dst_page,
7623 				    (vm_offset_t) dst_po,
7624 				    (vm_size_t) part_size);
7625 			} else {
7626 				assert((vm_offset_t) src_po == src_po);
7627 				assert((vm_offset_t) dst_po == dst_po);
7628 				assert((vm_size_t) part_size == part_size);
7629 				vm_page_part_copy(result_page,
7630 				    (vm_offset_t) src_po,
7631 				    dst_page,
7632 				    (vm_offset_t) dst_po,
7633 				    (vm_size_t)part_size);
7634 				if (!dst_page->vmp_dirty) {
7635 					vm_object_lock(dst_object);
7636 					SET_PAGE_DIRTY(dst_page, TRUE);
7637 					vm_object_unlock(dst_object);
7638 				}
7639 			}
7640 		} else {
7641 			part_size = PAGE_SIZE;
7642 
7643 			if (result_page == VM_PAGE_NULL) {
7644 				vm_page_zero_fill(
7645 					dst_page
7646 					);
7647 			} else {
7648 				vm_object_lock(result_page_object);
7649 				vm_page_copy(result_page, dst_page);
7650 				vm_object_unlock(result_page_object);
7651 
7652 				if (!dst_page->vmp_dirty) {
7653 					vm_object_lock(dst_object);
7654 					SET_PAGE_DIRTY(dst_page, TRUE);
7655 					vm_object_unlock(dst_object);
7656 				}
7657 			}
7658 		}
7659 
7660 		/*
7661 		 *	Unlock everything, and return
7662 		 */
7663 
7664 		vm_map_unlock_read(dst_map);
7665 
7666 		if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7667 			vm_fault_copy_cleanup(result_page, src_top_page);
7668 		}
7669 		vm_fault_copy_dst_cleanup(dst_page);
7670 
7671 		amount_left -= part_size;
7672 		src_offset += part_size;
7673 		dst_offset += part_size;
7674 	} while (amount_left > 0);
7675 
7676 	RETURN(KERN_SUCCESS);
7677 #undef  RETURN
7678 
7679 	/*NOTREACHED*/
7680 }
7681 
7682 #if     VM_FAULT_CLASSIFY
7683 /*
7684  *	Temporary statistics gathering support.
7685  */
7686 
7687 /*
7688  *	Statistics arrays:
7689  */
7690 #define VM_FAULT_TYPES_MAX      5
7691 #define VM_FAULT_LEVEL_MAX      8
7692 
7693 int     vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX];
7694 
7695 #define VM_FAULT_TYPE_ZERO_FILL 0
7696 #define VM_FAULT_TYPE_MAP_IN    1
7697 #define VM_FAULT_TYPE_PAGER     2
7698 #define VM_FAULT_TYPE_COPY      3
7699 #define VM_FAULT_TYPE_OTHER     4
7700 
7701 
7702 void
vm_fault_classify(vm_object_t object,vm_object_offset_t offset,vm_prot_t fault_type)7703 vm_fault_classify(vm_object_t           object,
7704     vm_object_offset_t    offset,
7705     vm_prot_t             fault_type)
7706 {
7707 	int             type, level = 0;
7708 	vm_page_t       m;
7709 
7710 	while (TRUE) {
7711 		m = vm_page_lookup(object, offset);
7712 		if (m != VM_PAGE_NULL) {
7713 			if (m->vmp_busy || m->vmp_error || m->vmp_restart || m->vmp_absent) {
7714 				type = VM_FAULT_TYPE_OTHER;
7715 				break;
7716 			}
7717 			if (((fault_type & VM_PROT_WRITE) == 0) ||
7718 			    ((level == 0) && object->vo_copy == VM_OBJECT_NULL)) {
7719 				type = VM_FAULT_TYPE_MAP_IN;
7720 				break;
7721 			}
7722 			type = VM_FAULT_TYPE_COPY;
7723 			break;
7724 		} else {
7725 			if (object->pager_created) {
7726 				type = VM_FAULT_TYPE_PAGER;
7727 				break;
7728 			}
7729 			if (object->shadow == VM_OBJECT_NULL) {
7730 				type = VM_FAULT_TYPE_ZERO_FILL;
7731 				break;
7732 			}
7733 
7734 			offset += object->vo_shadow_offset;
7735 			object = object->shadow;
7736 			level++;
7737 			continue;
7738 		}
7739 	}
7740 
7741 	if (level > VM_FAULT_LEVEL_MAX) {
7742 		level = VM_FAULT_LEVEL_MAX;
7743 	}
7744 
7745 	vm_fault_stats[type][level] += 1;
7746 
7747 	return;
7748 }
7749 
7750 /* cleanup routine to call from debugger */
7751 
7752 void
vm_fault_classify_init(void)7753 vm_fault_classify_init(void)
7754 {
7755 	int type, level;
7756 
7757 	for (type = 0; type < VM_FAULT_TYPES_MAX; type++) {
7758 		for (level = 0; level < VM_FAULT_LEVEL_MAX; level++) {
7759 			vm_fault_stats[type][level] = 0;
7760 		}
7761 	}
7762 
7763 	return;
7764 }
7765 #endif  /* VM_FAULT_CLASSIFY */
7766 
7767 static inline bool
object_supports_coredump(const vm_object_t object)7768 object_supports_coredump(const vm_object_t object)
7769 {
7770 	switch (object->wimg_bits & VM_WIMG_MASK) {
7771 	case VM_WIMG_DEFAULT:
7772 		return true;
7773 	default:
7774 		return false;
7775 	}
7776 }
7777 
7778 vm_offset_t
kdp_lightweight_fault(vm_map_t map,vm_offset_t cur_target_addr,bool multi_cpu)7779 kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr, bool multi_cpu)
7780 {
7781 	vm_map_entry_t  entry;
7782 	vm_object_t     object;
7783 	vm_offset_t     object_offset;
7784 	vm_page_t       m;
7785 	int             compressor_external_state, compressed_count_delta;
7786 	vm_compressor_options_t             compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP);
7787 	int             my_fault_type = VM_PROT_READ;
7788 	kern_return_t   kr;
7789 	int effective_page_mask, effective_page_size;
7790 	int             my_cpu_no = cpu_number();
7791 	ppnum_t         decomp_ppnum;
7792 	addr64_t        decomp_paddr;
7793 
7794 	if (multi_cpu) {
7795 		compressor_flags |= C_KDP_MULTICPU;
7796 	}
7797 
7798 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
7799 		effective_page_mask = VM_MAP_PAGE_MASK(map);
7800 		effective_page_size = VM_MAP_PAGE_SIZE(map);
7801 	} else {
7802 		effective_page_mask = PAGE_MASK;
7803 		effective_page_size = PAGE_SIZE;
7804 	}
7805 
7806 	if (not_in_kdp) {
7807 		panic("kdp_lightweight_fault called from outside of debugger context");
7808 	}
7809 
7810 	assert(map != VM_MAP_NULL);
7811 
7812 	assert((cur_target_addr & effective_page_mask) == 0);
7813 	if ((cur_target_addr & effective_page_mask) != 0) {
7814 		return 0;
7815 	}
7816 
7817 	if (kdp_lck_rw_lock_is_acquired_exclusive(&map->lock)) {
7818 		return 0;
7819 	}
7820 
7821 	if (!vm_map_lookup_entry(map, cur_target_addr, &entry)) {
7822 		return 0;
7823 	}
7824 
7825 	if (entry->is_sub_map) {
7826 		return 0;
7827 	}
7828 
7829 	object = VME_OBJECT(entry);
7830 	if (object == VM_OBJECT_NULL) {
7831 		return 0;
7832 	}
7833 
7834 	object_offset = cur_target_addr - entry->vme_start + VME_OFFSET(entry);
7835 
7836 	while (TRUE) {
7837 		if (kdp_lck_rw_lock_is_acquired_exclusive(&object->Lock)) {
7838 			return 0;
7839 		}
7840 
7841 		if (object->pager_created && (object->paging_in_progress ||
7842 		    object->activity_in_progress)) {
7843 			return 0;
7844 		}
7845 
7846 		m = kdp_vm_page_lookup(object, vm_object_trunc_page(object_offset));
7847 
7848 		if (m != VM_PAGE_NULL) {
7849 			if (!object_supports_coredump(object)) {
7850 				return 0;
7851 			}
7852 
7853 			if (m->vmp_laundry || m->vmp_busy || m->vmp_free_when_done ||
7854 			    m->vmp_absent || VMP_ERROR_GET(m) || m->vmp_cleaning ||
7855 			    m->vmp_overwriting || m->vmp_restart || m->vmp_unusual) {
7856 				return 0;
7857 			}
7858 
7859 			assert(!vm_page_is_private(m));
7860 			if (vm_page_is_private(m)) {
7861 				return 0;
7862 			}
7863 
7864 			assert(!vm_page_is_fictitious(m));
7865 			if (vm_page_is_fictitious(m)) {
7866 				return 0;
7867 			}
7868 
7869 			assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
7870 			if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
7871 				return 0;
7872 			}
7873 
7874 			return ptoa(VM_PAGE_GET_PHYS_PAGE(m));
7875 		}
7876 
7877 		compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
7878 
7879 		if (multi_cpu) {
7880 			assert(vm_compressor_kdp_state.kc_decompressed_pages_ppnum != NULL);
7881 			assert(vm_compressor_kdp_state.kc_decompressed_pages_paddr != NULL);
7882 			decomp_ppnum = vm_compressor_kdp_state.kc_decompressed_pages_ppnum[my_cpu_no];
7883 			decomp_paddr = vm_compressor_kdp_state.kc_decompressed_pages_paddr[my_cpu_no];
7884 		} else {
7885 			decomp_ppnum = vm_compressor_kdp_state.kc_panic_decompressed_page_ppnum;
7886 			decomp_paddr = vm_compressor_kdp_state.kc_panic_decompressed_page_paddr;
7887 		}
7888 
7889 		if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) {
7890 			if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) {
7891 				kr = vm_compressor_pager_get(object->pager,
7892 				    vm_object_trunc_page(object_offset + object->paging_offset),
7893 				    decomp_ppnum, &my_fault_type,
7894 				    compressor_flags, &compressed_count_delta);
7895 				if (kr == KERN_SUCCESS) {
7896 					return decomp_paddr;
7897 				} else {
7898 					return 0;
7899 				}
7900 			}
7901 		}
7902 
7903 		if (object->shadow == VM_OBJECT_NULL) {
7904 			return 0;
7905 		}
7906 
7907 		object_offset += object->vo_shadow_offset;
7908 		object = object->shadow;
7909 	}
7910 }
7911 
7912 /*
7913  * vm_page_validate_cs_fast():
7914  * Performs a few quick checks to determine if the page's code signature
7915  * really needs to be fully validated.  It could:
7916  *	1. have been modified (i.e. automatically tainted),
7917  *	2. have already been validated,
7918  *	3. have already been found to be tainted,
7919  *	4. no longer have a backing store.
7920  * Returns FALSE if the page needs to be fully validated.
7921  */
7922 static boolean_t
vm_page_validate_cs_fast(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)7923 vm_page_validate_cs_fast(
7924 	vm_page_t       page,
7925 	vm_map_size_t   fault_page_size,
7926 	vm_map_offset_t fault_phys_offset)
7927 {
7928 	vm_object_t     object;
7929 
7930 	object = VM_PAGE_OBJECT(page);
7931 	vm_object_lock_assert_held(object);
7932 
7933 	if (page->vmp_wpmapped &&
7934 	    !VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
7935 		/*
7936 		 * This page was mapped for "write" access sometime in the
7937 		 * past and could still be modifiable in the future.
7938 		 * Consider it tainted.
7939 		 * [ If the page was already found to be "tainted", no
7940 		 * need to re-validate. ]
7941 		 */
7942 		vm_object_lock_assert_exclusive(object);
7943 		VMP_CS_SET_VALIDATED(page, fault_page_size, fault_phys_offset, TRUE);
7944 		VMP_CS_SET_TAINTED(page, fault_page_size, fault_phys_offset, TRUE);
7945 		if (cs_debug) {
7946 			printf("CODESIGNING: %s: "
7947 			    "page %p obj %p off 0x%llx "
7948 			    "was modified\n",
7949 			    __FUNCTION__,
7950 			    page, object, page->vmp_offset);
7951 		}
7952 		vm_cs_validated_dirtied++;
7953 	}
7954 
7955 	if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) ||
7956 	    VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
7957 		return TRUE;
7958 	}
7959 	vm_object_lock_assert_exclusive(object);
7960 
7961 #if CHECK_CS_VALIDATION_BITMAP
7962 	kern_return_t kr;
7963 
7964 	kr = vnode_pager_cs_check_validation_bitmap(
7965 		object->pager,
7966 		page->vmp_offset + object->paging_offset,
7967 		CS_BITMAP_CHECK);
7968 	if (kr == KERN_SUCCESS) {
7969 		page->vmp_cs_validated = VMP_CS_ALL_TRUE;
7970 		page->vmp_cs_tainted = VMP_CS_ALL_FALSE;
7971 		vm_cs_bitmap_validated++;
7972 		return TRUE;
7973 	}
7974 #endif /* CHECK_CS_VALIDATION_BITMAP */
7975 
7976 	if (!object->alive || object->terminating || object->pager == NULL) {
7977 		/*
7978 		 * The object is terminating and we don't have its pager
7979 		 * so we can't validate the data...
7980 		 */
7981 		return TRUE;
7982 	}
7983 
7984 	/* we need to really validate this page */
7985 	vm_object_lock_assert_exclusive(object);
7986 	return FALSE;
7987 }
7988 
7989 void
vm_page_validate_cs_mapped_slow(vm_page_t page,const void * kaddr)7990 vm_page_validate_cs_mapped_slow(
7991 	vm_page_t       page,
7992 	const void      *kaddr)
7993 {
7994 	vm_object_t             object;
7995 	memory_object_offset_t  mo_offset;
7996 	memory_object_t         pager;
7997 	struct vnode            *vnode;
7998 	int                     validated, tainted, nx;
7999 
8000 	assert(page->vmp_busy);
8001 	object = VM_PAGE_OBJECT(page);
8002 	vm_object_lock_assert_exclusive(object);
8003 
8004 	vm_cs_validates++;
8005 
8006 	/*
8007 	 * Since we get here to validate a page that was brought in by
8008 	 * the pager, we know that this pager is all setup and ready
8009 	 * by now.
8010 	 */
8011 	assert(object->code_signed);
8012 	assert(!object->internal);
8013 	assert(object->pager != NULL);
8014 	assert(object->pager_ready);
8015 
8016 	pager = object->pager;
8017 	assert(object->paging_in_progress);
8018 	vnode = vnode_pager_lookup_vnode(pager);
8019 	mo_offset = page->vmp_offset + object->paging_offset;
8020 
8021 	/* verify the SHA1 hash for this page */
8022 	validated = 0;
8023 	tainted = 0;
8024 	nx = 0;
8025 	cs_validate_page(vnode,
8026 	    pager,
8027 	    mo_offset,
8028 	    (const void *)((const char *)kaddr),
8029 	    &validated,
8030 	    &tainted,
8031 	    &nx);
8032 
8033 	page->vmp_cs_validated |= validated;
8034 	page->vmp_cs_tainted |= tainted;
8035 	page->vmp_cs_nx |= nx;
8036 
8037 #if CHECK_CS_VALIDATION_BITMAP
8038 	if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
8039 	    page->vmp_cs_tainted == VMP_CS_ALL_FALSE) {
8040 		vnode_pager_cs_check_validation_bitmap(object->pager,
8041 		    mo_offset,
8042 		    CS_BITMAP_SET);
8043 	}
8044 #endif /* CHECK_CS_VALIDATION_BITMAP */
8045 }
8046 
8047 void
vm_page_validate_cs_mapped(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,const void * kaddr)8048 vm_page_validate_cs_mapped(
8049 	vm_page_t       page,
8050 	vm_map_size_t   fault_page_size,
8051 	vm_map_offset_t fault_phys_offset,
8052 	const void      *kaddr)
8053 {
8054 	if (!vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
8055 		vm_page_validate_cs_mapped_slow(page, kaddr);
8056 	}
8057 }
8058 
8059 static void
vm_page_map_and_validate_cs(vm_object_t object,vm_page_t page)8060 vm_page_map_and_validate_cs(
8061 	vm_object_t     object,
8062 	vm_page_t       page)
8063 {
8064 	vm_object_offset_t      offset;
8065 	vm_map_offset_t         koffset;
8066 	vm_map_size_t           ksize;
8067 	vm_offset_t             kaddr;
8068 	kern_return_t           kr;
8069 	boolean_t               busy_page;
8070 	boolean_t               need_unmap;
8071 
8072 	vm_object_lock_assert_exclusive(object);
8073 
8074 	assert(object->code_signed);
8075 	offset = page->vmp_offset;
8076 
8077 	busy_page = page->vmp_busy;
8078 	if (!busy_page) {
8079 		/* keep page busy while we map (and unlock) the VM object */
8080 		page->vmp_busy = TRUE;
8081 	}
8082 
8083 	/*
8084 	 * Take a paging reference on the VM object
8085 	 * to protect it from collapse or bypass,
8086 	 * and keep it from disappearing too.
8087 	 */
8088 	vm_object_paging_begin(object);
8089 
8090 	/* map the page in the kernel address space */
8091 	ksize = PAGE_SIZE_64;
8092 	koffset = 0;
8093 	need_unmap = FALSE;
8094 	kr = vm_paging_map_object(page,
8095 	    object,
8096 	    offset,
8097 	    VM_PROT_READ,
8098 	    FALSE,                       /* can't unlock object ! */
8099 	    &ksize,
8100 	    &koffset,
8101 	    &need_unmap);
8102 	if (kr != KERN_SUCCESS) {
8103 		panic("%s: could not map page: 0x%x", __FUNCTION__, kr);
8104 	}
8105 	kaddr = CAST_DOWN(vm_offset_t, koffset);
8106 
8107 	/* validate the mapped page */
8108 	vm_page_validate_cs_mapped_slow(page, (const void *) kaddr);
8109 
8110 	assert(page->vmp_busy);
8111 	assert(object == VM_PAGE_OBJECT(page));
8112 	vm_object_lock_assert_exclusive(object);
8113 
8114 	if (!busy_page) {
8115 		vm_page_wakeup_done(object, page);
8116 	}
8117 	if (need_unmap) {
8118 		/* unmap the map from the kernel address space */
8119 		vm_paging_unmap_object(object, koffset, koffset + ksize);
8120 		koffset = 0;
8121 		ksize = 0;
8122 		kaddr = 0;
8123 	}
8124 	vm_object_paging_end(object);
8125 }
8126 
8127 void
vm_page_validate_cs(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)8128 vm_page_validate_cs(
8129 	vm_page_t       page,
8130 	vm_map_size_t   fault_page_size,
8131 	vm_map_offset_t fault_phys_offset)
8132 {
8133 	vm_object_t             object;
8134 
8135 	object = VM_PAGE_OBJECT(page);
8136 	vm_object_lock_assert_held(object);
8137 
8138 	if (vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
8139 		return;
8140 	}
8141 	vm_page_map_and_validate_cs(object, page);
8142 }
8143 
8144 void
vm_page_validate_cs_mapped_chunk(vm_page_t page,const void * kaddr,vm_offset_t chunk_offset,vm_size_t chunk_size,boolean_t * validated_p,unsigned * tainted_p)8145 vm_page_validate_cs_mapped_chunk(
8146 	vm_page_t       page,
8147 	const void      *kaddr,
8148 	vm_offset_t     chunk_offset,
8149 	vm_size_t       chunk_size,
8150 	boolean_t       *validated_p,
8151 	unsigned        *tainted_p)
8152 {
8153 	vm_object_t             object;
8154 	vm_object_offset_t      offset, offset_in_page;
8155 	memory_object_t         pager;
8156 	struct vnode            *vnode;
8157 	boolean_t               validated;
8158 	unsigned                tainted;
8159 
8160 	*validated_p = FALSE;
8161 	*tainted_p = 0;
8162 
8163 	assert(page->vmp_busy);
8164 	object = VM_PAGE_OBJECT(page);
8165 	vm_object_lock_assert_exclusive(object);
8166 
8167 	assert(object->code_signed);
8168 	offset = page->vmp_offset;
8169 
8170 	if (!object->alive || object->terminating || object->pager == NULL) {
8171 		/*
8172 		 * The object is terminating and we don't have its pager
8173 		 * so we can't validate the data...
8174 		 */
8175 		return;
8176 	}
8177 	/*
8178 	 * Since we get here to validate a page that was brought in by
8179 	 * the pager, we know that this pager is all setup and ready
8180 	 * by now.
8181 	 */
8182 	assert(!object->internal);
8183 	assert(object->pager != NULL);
8184 	assert(object->pager_ready);
8185 
8186 	pager = object->pager;
8187 	assert(object->paging_in_progress);
8188 	vnode = vnode_pager_lookup_vnode(pager);
8189 
8190 	/* verify the signature for this chunk */
8191 	offset_in_page = chunk_offset;
8192 	assert(offset_in_page < PAGE_SIZE);
8193 
8194 	tainted = 0;
8195 	validated = cs_validate_range(vnode,
8196 	    pager,
8197 	    (object->paging_offset +
8198 	    offset +
8199 	    offset_in_page),
8200 	    (const void *)((const char *)kaddr
8201 	    + offset_in_page),
8202 	    chunk_size,
8203 	    &tainted);
8204 	if (validated) {
8205 		*validated_p = TRUE;
8206 	}
8207 	if (tainted) {
8208 		*tainted_p = tainted;
8209 	}
8210 }
8211 
8212 static void
vm_rtfrecord_lock(void)8213 vm_rtfrecord_lock(void)
8214 {
8215 	lck_spin_lock(&vm_rtfr_slock);
8216 }
8217 
8218 static void
vm_rtfrecord_unlock(void)8219 vm_rtfrecord_unlock(void)
8220 {
8221 	lck_spin_unlock(&vm_rtfr_slock);
8222 }
8223 
8224 unsigned int
vmrtfaultinfo_bufsz(void)8225 vmrtfaultinfo_bufsz(void)
8226 {
8227 	return vmrtf_num_records * sizeof(vm_rtfault_record_t);
8228 }
8229 
8230 #include <kern/backtrace.h>
8231 
8232 __attribute__((noinline))
8233 static void
vm_record_rtfault(thread_t cthread,uint64_t fstart,vm_map_offset_t fault_vaddr,int type_of_fault)8234 vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr, int type_of_fault)
8235 {
8236 	uint64_t fend = mach_continuous_time();
8237 
8238 	uint64_t cfpc = 0;
8239 	uint64_t ctid = cthread->thread_id;
8240 	uint64_t cupid = get_current_unique_pid();
8241 
8242 	uintptr_t bpc = 0;
8243 	errno_t btr = 0;
8244 
8245 	/*
8246 	 * Capture a single-frame backtrace.  This extracts just the program
8247 	 * counter at the point of the fault, and should not use copyin to get
8248 	 * Rosetta save state.
8249 	 */
8250 	struct backtrace_control ctl = {
8251 		.btc_user_thread = cthread,
8252 		.btc_user_copy = backtrace_user_copy_error,
8253 	};
8254 	unsigned int bfrs = backtrace_user(&bpc, 1U, &ctl, NULL);
8255 	if ((btr == 0) && (bfrs > 0)) {
8256 		cfpc = bpc;
8257 	}
8258 
8259 	assert((fstart != 0) && fend >= fstart);
8260 	vm_rtfrecord_lock();
8261 	assert(vmrtfrs.vmrtfr_curi <= vmrtfrs.vmrtfr_maxi);
8262 
8263 	vmrtfrs.vmrtf_total++;
8264 	vm_rtfault_record_t *cvmr = &vmrtfrs.vm_rtf_records[vmrtfrs.vmrtfr_curi++];
8265 
8266 	cvmr->rtfabstime = fstart;
8267 	cvmr->rtfduration = fend - fstart;
8268 	cvmr->rtfaddr = fault_vaddr;
8269 	cvmr->rtfpc = cfpc;
8270 	cvmr->rtftype = type_of_fault;
8271 	cvmr->rtfupid = cupid;
8272 	cvmr->rtftid = ctid;
8273 
8274 	if (vmrtfrs.vmrtfr_curi > vmrtfrs.vmrtfr_maxi) {
8275 		vmrtfrs.vmrtfr_curi = 0;
8276 	}
8277 
8278 	vm_rtfrecord_unlock();
8279 }
8280 
8281 int
vmrtf_extract(uint64_t cupid,__unused boolean_t isroot,unsigned long vrecordsz,void * vrecords,unsigned long * vmrtfrv)8282 vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, unsigned long vrecordsz, void *vrecords, unsigned long *vmrtfrv)
8283 {
8284 	vm_rtfault_record_t *cvmrd = vrecords;
8285 	size_t residue = vrecordsz;
8286 	size_t numextracted = 0;
8287 	boolean_t early_exit = FALSE;
8288 
8289 	vm_rtfrecord_lock();
8290 
8291 	for (int vmfi = 0; vmfi <= vmrtfrs.vmrtfr_maxi; vmfi++) {
8292 		if (residue < sizeof(vm_rtfault_record_t)) {
8293 			early_exit = TRUE;
8294 			break;
8295 		}
8296 
8297 		if (vmrtfrs.vm_rtf_records[vmfi].rtfupid != cupid) {
8298 #if     DEVELOPMENT || DEBUG
8299 			if (isroot == FALSE) {
8300 				continue;
8301 			}
8302 #else
8303 			continue;
8304 #endif /* DEVDEBUG */
8305 		}
8306 
8307 		*cvmrd = vmrtfrs.vm_rtf_records[vmfi];
8308 		cvmrd++;
8309 		residue -= sizeof(vm_rtfault_record_t);
8310 		numextracted++;
8311 	}
8312 
8313 	vm_rtfrecord_unlock();
8314 
8315 	*vmrtfrv = numextracted;
8316 	return early_exit;
8317 }
8318 
8319 /*
8320  * Only allow one diagnosis to be in flight at a time, to avoid
8321  * creating too much additional memory usage.
8322  */
8323 static volatile uint_t vmtc_diagnosing;
8324 unsigned int vmtc_total = 0;
8325 
8326 /*
8327  * Type used to update telemetry for the diagnosis counts.
8328  */
8329 CA_EVENT(vmtc_telemetry,
8330     CA_INT, vmtc_num_byte,            /* number of corrupt bytes found */
8331     CA_BOOL, vmtc_undiagnosed,        /* undiagnosed because more than 1 at a time */
8332     CA_BOOL, vmtc_not_eligible,       /* the page didn't qualify */
8333     CA_BOOL, vmtc_copyin_fail,        /* unable to copy in the page */
8334     CA_BOOL, vmtc_not_found,          /* no corruption found even though CS failed */
8335     CA_BOOL, vmtc_one_bit_flip,       /* single bit flip */
8336     CA_BOOL, vmtc_testing);           /* caused on purpose by testing */
8337 
8338 #if DEVELOPMENT || DEBUG
8339 /*
8340  * Buffers used to compare before/after page contents.
8341  * Stashed to aid when debugging crashes.
8342  */
8343 static size_t vmtc_last_buffer_size = 0;
8344 static uint64_t *vmtc_last_before_buffer = NULL;
8345 static uint64_t *vmtc_last_after_buffer = NULL;
8346 
8347 /*
8348  * Needed to record corruptions due to testing.
8349  */
8350 static uintptr_t corruption_test_va = 0;
8351 #endif /* DEVELOPMENT || DEBUG */
8352 
8353 /*
8354  * Stash a copy of data from a possibly corrupt page.
8355  */
8356 static uint64_t *
vmtc_get_page_data(vm_map_offset_t code_addr,vm_page_t page)8357 vmtc_get_page_data(
8358 	vm_map_offset_t code_addr,
8359 	vm_page_t       page)
8360 {
8361 	uint64_t        *buffer = NULL;
8362 	addr64_t        buffer_paddr;
8363 	addr64_t        page_paddr;
8364 	extern void     bcopy_phys(addr64_t from, addr64_t to, vm_size_t bytes);
8365 	uint_t          size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
8366 
8367 	/*
8368 	 * Need an aligned buffer to do a physical copy.
8369 	 */
8370 	if (kernel_memory_allocate(kernel_map, (vm_offset_t *)&buffer,
8371 	    size, size - 1, KMA_KOBJECT, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
8372 		return NULL;
8373 	}
8374 	buffer_paddr = kvtophys((vm_offset_t)buffer);
8375 	page_paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(page));
8376 
8377 	/* adjust the page start address if we need only 4K of a 16K page */
8378 	if (size < PAGE_SIZE) {
8379 		uint_t subpage_start = ((code_addr & (PAGE_SIZE - 1)) & ~(size - 1));
8380 		page_paddr += subpage_start;
8381 	}
8382 
8383 	bcopy_phys(page_paddr, buffer_paddr, size);
8384 	return buffer;
8385 }
8386 
8387 /*
8388  * Set things up so we can diagnose a potential text page corruption.
8389  */
8390 static uint64_t *
vmtc_text_page_diagnose_setup(vm_map_offset_t code_addr,vm_page_t page,CA_EVENT_TYPE (vmtc_telemetry)* event)8391 vmtc_text_page_diagnose_setup(
8392 	vm_map_offset_t code_addr,
8393 	vm_page_t       page,
8394 	CA_EVENT_TYPE(vmtc_telemetry) *event)
8395 {
8396 	uint64_t        *buffer = NULL;
8397 
8398 	/*
8399 	 * If another is being diagnosed, skip this one.
8400 	 */
8401 	if (!OSCompareAndSwap(0, 1, &vmtc_diagnosing)) {
8402 		event->vmtc_undiagnosed = true;
8403 		return NULL;
8404 	}
8405 
8406 	/*
8407 	 * Get the contents of the corrupt page.
8408 	 */
8409 	buffer = vmtc_get_page_data(code_addr, page);
8410 	if (buffer == NULL) {
8411 		event->vmtc_copyin_fail = true;
8412 		if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
8413 			panic("Bad compare and swap in setup!");
8414 		}
8415 		return NULL;
8416 	}
8417 	return buffer;
8418 }
8419 
8420 /*
8421  * Diagnose the text page by comparing its contents with
8422  * the one we've previously saved.
8423  */
8424 static void
vmtc_text_page_diagnose(vm_map_offset_t code_addr,uint64_t * old_code_buffer,CA_EVENT_TYPE (vmtc_telemetry)* event)8425 vmtc_text_page_diagnose(
8426 	vm_map_offset_t code_addr,
8427 	uint64_t        *old_code_buffer,
8428 	CA_EVENT_TYPE(vmtc_telemetry) *event)
8429 {
8430 	uint64_t        *new_code_buffer;
8431 	size_t          size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
8432 	uint_t          count = (uint_t)size / sizeof(uint64_t);
8433 	uint_t          diff_count = 0;
8434 	bool            bit_flip = false;
8435 	uint_t          b;
8436 	uint64_t        *new;
8437 	uint64_t        *old;
8438 
8439 	new_code_buffer = kalloc_data(size, Z_WAITOK);
8440 	assert(new_code_buffer != NULL);
8441 	if (copyin((user_addr_t)vm_map_trunc_page(code_addr, size - 1), new_code_buffer, size) != 0) {
8442 		/* copyin error, so undo things */
8443 		event->vmtc_copyin_fail = true;
8444 		goto done;
8445 	}
8446 
8447 	new = new_code_buffer;
8448 	old = old_code_buffer;
8449 	for (; count-- > 0; ++new, ++old) {
8450 		if (*new == *old) {
8451 			continue;
8452 		}
8453 
8454 		/*
8455 		 * On first diff, check for a single bit flip
8456 		 */
8457 		if (diff_count == 0) {
8458 			uint64_t x = (*new ^ *old);
8459 			assert(x != 0);
8460 			if ((x & (x - 1)) == 0) {
8461 				bit_flip = true;
8462 				++diff_count;
8463 				continue;
8464 			}
8465 		}
8466 
8467 		/*
8468 		 * count up the number of different bytes.
8469 		 */
8470 		for (b = 0; b < sizeof(uint64_t); ++b) {
8471 			char *n = (char *)new;
8472 			char *o = (char *)old;
8473 			if (n[b] != o[b]) {
8474 				++diff_count;
8475 			}
8476 		}
8477 	}
8478 
8479 	if (diff_count > 1) {
8480 		bit_flip = false;
8481 	}
8482 
8483 	if (diff_count == 0) {
8484 		event->vmtc_not_found = true;
8485 	} else {
8486 		event->vmtc_num_byte = diff_count;
8487 	}
8488 	if (bit_flip) {
8489 		event->vmtc_one_bit_flip = true;
8490 	}
8491 
8492 done:
8493 	/*
8494 	 * Free up the code copy buffers, but save the last
8495 	 * set on development / debug kernels in case they
8496 	 * can provide evidence for debugging memory stomps.
8497 	 */
8498 #if DEVELOPMENT || DEBUG
8499 	if (vmtc_last_before_buffer != NULL) {
8500 		kmem_free(kernel_map, (vm_offset_t)vmtc_last_before_buffer, vmtc_last_buffer_size);
8501 	}
8502 	if (vmtc_last_after_buffer != NULL) {
8503 		kfree_data(vmtc_last_after_buffer, vmtc_last_buffer_size);
8504 	}
8505 	vmtc_last_before_buffer = old_code_buffer;
8506 	vmtc_last_after_buffer = new_code_buffer;
8507 	vmtc_last_buffer_size = size;
8508 #else /* DEVELOPMENT || DEBUG */
8509 	kfree_data(new_code_buffer, size);
8510 	kmem_free(kernel_map, (vm_offset_t)old_code_buffer, size);
8511 #endif /* DEVELOPMENT || DEBUG */
8512 
8513 	/*
8514 	 * We're finished, so clear the diagnosing flag.
8515 	 */
8516 	if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
8517 		panic("Bad compare and swap in diagnose!");
8518 	}
8519 }
8520 
8521 /*
8522  * For the given map, virt address, find the object, offset, and page.
8523  * This has to lookup the map entry, verify protections, walk any shadow chains.
8524  * If found, returns with the object locked.
8525  */
8526 static kern_return_t
vmtc_revalidate_lookup(vm_map_t map,vm_map_offset_t vaddr,vm_object_t * ret_object,vm_object_offset_t * ret_offset,vm_page_t * ret_page,vm_prot_t * ret_prot)8527 vmtc_revalidate_lookup(
8528 	vm_map_t               map,
8529 	vm_map_offset_t        vaddr,
8530 	vm_object_t            *ret_object,
8531 	vm_object_offset_t     *ret_offset,
8532 	vm_page_t              *ret_page,
8533 	vm_prot_t              *ret_prot)
8534 {
8535 	vm_object_t            object;
8536 	vm_object_offset_t     offset;
8537 	vm_page_t              page;
8538 	kern_return_t          kr = KERN_SUCCESS;
8539 	uint8_t                object_lock_type = OBJECT_LOCK_EXCLUSIVE;
8540 	vm_map_version_t       version;
8541 	boolean_t              wired;
8542 	struct vm_object_fault_info fault_info = {
8543 		.interruptible = THREAD_UNINT
8544 	};
8545 	vm_map_t               real_map = NULL;
8546 	vm_prot_t              prot;
8547 	vm_object_t            shadow;
8548 
8549 	/*
8550 	 * Find the object/offset for the given location/map.
8551 	 * Note this returns with the object locked.
8552 	 */
8553 restart:
8554 	vm_map_lock_read(map);
8555 	object = VM_OBJECT_NULL;        /* in case we come around the restart path */
8556 	kr = vm_map_lookup_and_lock_object(&map, vaddr, VM_PROT_READ,
8557 	    object_lock_type, &version, &object, &offset, &prot, &wired,
8558 	    &fault_info, &real_map, NULL);
8559 	vm_map_unlock_read(map);
8560 	if (real_map != NULL && real_map != map) {
8561 		vm_map_unlock(real_map);
8562 	}
8563 
8564 	/*
8565 	 * If there's no page here, fail.
8566 	 */
8567 	if (kr != KERN_SUCCESS || object == NULL) {
8568 		kr = KERN_FAILURE;
8569 		goto done;
8570 	}
8571 
8572 	/*
8573 	 * Chase down any shadow chains to find the actual page.
8574 	 */
8575 	for (;;) {
8576 		/*
8577 		 * See if the page is on the current object.
8578 		 */
8579 		page = vm_page_lookup(object, vm_object_trunc_page(offset));
8580 		if (page != NULL) {
8581 			/* restart the lookup */
8582 			if (page->vmp_restart) {
8583 				vm_object_unlock(object);
8584 				goto restart;
8585 			}
8586 
8587 			/*
8588 			 * If this page is busy, we need to wait for it.
8589 			 */
8590 			if (page->vmp_busy) {
8591 				vm_page_sleep(object, page, THREAD_INTERRUPTIBLE, LCK_SLEEP_UNLOCK);
8592 				goto restart;
8593 			}
8594 			break;
8595 		}
8596 
8597 		/*
8598 		 * If the object doesn't have the page and
8599 		 * has no shadow, then we can quit.
8600 		 */
8601 		shadow = object->shadow;
8602 		if (shadow == NULL) {
8603 			kr = KERN_FAILURE;
8604 			goto done;
8605 		}
8606 
8607 		/*
8608 		 * Move to the next object
8609 		 */
8610 		offset += object->vo_shadow_offset;
8611 		vm_object_lock(shadow);
8612 		vm_object_unlock(object);
8613 		object = shadow;
8614 		shadow = VM_OBJECT_NULL;
8615 	}
8616 	*ret_object = object;
8617 	*ret_offset = vm_object_trunc_page(offset);
8618 	*ret_page = page;
8619 	*ret_prot = prot;
8620 
8621 done:
8622 	if (kr != KERN_SUCCESS && object != NULL) {
8623 		vm_object_unlock(object);
8624 	}
8625 	return kr;
8626 }
8627 
8628 /*
8629  * Check if a page is wired, needs extra locking.
8630  */
8631 static bool
is_page_wired(vm_page_t page)8632 is_page_wired(vm_page_t page)
8633 {
8634 	bool result;
8635 	vm_page_lock_queues();
8636 	result = VM_PAGE_WIRED(page);
8637 	vm_page_unlock_queues();
8638 	return result;
8639 }
8640 
8641 /*
8642  * A fatal process error has occurred in the given task.
8643  * Recheck the code signing of the text page at the given
8644  * address to check for a text page corruption.
8645  *
8646  * Returns KERN_FAILURE if a page was found to be corrupt
8647  * by failing to match its code signature. KERN_SUCCESS
8648  * means the page is either valid or we don't have the
8649  * information to say it's corrupt.
8650  */
8651 kern_return_t
revalidate_text_page(task_t task,vm_map_offset_t code_addr)8652 revalidate_text_page(task_t task, vm_map_offset_t code_addr)
8653 {
8654 	kern_return_t          kr;
8655 	vm_map_t               map;
8656 	vm_object_t            object = NULL;
8657 	vm_object_offset_t     offset;
8658 	vm_page_t              page = NULL;
8659 	struct vnode           *vnode;
8660 	uint64_t               *diagnose_buffer = NULL;
8661 	CA_EVENT_TYPE(vmtc_telemetry) * event = NULL;
8662 	ca_event_t             ca_event = NULL;
8663 	vm_prot_t              prot;
8664 
8665 	map = task->map;
8666 	if (task->map == NULL) {
8667 		return KERN_SUCCESS;
8668 	}
8669 
8670 	kr = vmtc_revalidate_lookup(map, code_addr, &object, &offset, &page, &prot);
8671 	if (kr != KERN_SUCCESS) {
8672 		goto done;
8673 	}
8674 
8675 	/*
8676 	 * The page must be executable.
8677 	 */
8678 	if (!(prot & VM_PROT_EXECUTE)) {
8679 		goto done;
8680 	}
8681 
8682 	/*
8683 	 * The object needs to have a pager.
8684 	 */
8685 	if (object->pager == NULL) {
8686 		goto done;
8687 	}
8688 
8689 	/*
8690 	 * Needs to be a vnode backed page to have a signature.
8691 	 */
8692 	vnode = vnode_pager_lookup_vnode(object->pager);
8693 	if (vnode == NULL) {
8694 		goto done;
8695 	}
8696 
8697 	/*
8698 	 * Object checks to see if we should proceed.
8699 	 */
8700 	if (!object->code_signed ||     /* no code signature to check */
8701 	    object->internal ||         /* internal objects aren't signed */
8702 	    object->terminating ||      /* the object and its pages are already going away */
8703 	    !object->pager_ready) {     /* this should happen, but check shouldn't hurt */
8704 		goto done;
8705 	}
8706 
8707 
8708 	/*
8709 	 * Check the code signature of the page in question.
8710 	 */
8711 	vm_page_map_and_validate_cs(object, page);
8712 
8713 	/*
8714 	 * At this point:
8715 	 * vmp_cs_validated |= validated (set if a code signature exists)
8716 	 * vmp_cs_tainted |= tainted (set if code signature violation)
8717 	 * vmp_cs_nx |= nx;  ??
8718 	 *
8719 	 * if vmp_pmapped then have to pmap_disconnect..
8720 	 * other flags to check on object or page?
8721 	 */
8722 	if (page->vmp_cs_tainted != VMP_CS_ALL_FALSE) {
8723 #if DEBUG || DEVELOPMENT
8724 		/*
8725 		 * On development builds, a boot-arg can be used to cause
8726 		 * a panic, instead of a quiet repair.
8727 		 */
8728 		if (vmtc_panic_instead) {
8729 			panic("Text page corruption detected: vm_page_t 0x%llx", (long long)(uintptr_t)page);
8730 		}
8731 #endif /* DEBUG || DEVELOPMENT */
8732 
8733 		/*
8734 		 * We're going to invalidate this page. Grab a copy of it for comparison.
8735 		 */
8736 		ca_event = CA_EVENT_ALLOCATE(vmtc_telemetry);
8737 		event = ca_event->data;
8738 		diagnose_buffer = vmtc_text_page_diagnose_setup(code_addr, page, event);
8739 
8740 		/*
8741 		 * Invalidate, i.e. toss, the corrupted page.
8742 		 */
8743 		if (!page->vmp_cleaning &&
8744 		    !page->vmp_laundry &&
8745 		    !vm_page_is_fictitious(page) &&
8746 		    !page->vmp_precious &&
8747 		    !page->vmp_absent &&
8748 		    !VMP_ERROR_GET(page) &&
8749 		    !page->vmp_dirty &&
8750 		    !is_page_wired(page)) {
8751 			if (page->vmp_pmapped) {
8752 				int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(page));
8753 				if (refmod & VM_MEM_MODIFIED) {
8754 					SET_PAGE_DIRTY(page, FALSE);
8755 				}
8756 				if (refmod & VM_MEM_REFERENCED) {
8757 					page->vmp_reference = TRUE;
8758 				}
8759 			}
8760 			/* If the page seems intentionally modified, don't trash it. */
8761 			if (!page->vmp_dirty) {
8762 				VM_PAGE_FREE(page);
8763 			} else {
8764 				event->vmtc_not_eligible = true;
8765 			}
8766 		} else {
8767 			event->vmtc_not_eligible = true;
8768 		}
8769 		vm_object_unlock(object);
8770 		object = VM_OBJECT_NULL;
8771 
8772 		/*
8773 		 * Now try to diagnose the type of failure by faulting
8774 		 * in a new copy and diff'ing it with what we saved.
8775 		 */
8776 		if (diagnose_buffer != NULL) {
8777 			vmtc_text_page_diagnose(code_addr, diagnose_buffer, event);
8778 		}
8779 #if DEBUG || DEVELOPMENT
8780 		if (corruption_test_va != 0) {
8781 			corruption_test_va = 0;
8782 			event->vmtc_testing = true;
8783 		}
8784 #endif /* DEBUG || DEVELOPMENT */
8785 		ktriage_record(thread_tid(current_thread()),
8786 		    KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_TEXT_CORRUPTION),
8787 		    0 /* arg */);
8788 		CA_EVENT_SEND(ca_event);
8789 		printf("Text page corruption detected for pid %d\n", proc_selfpid());
8790 		++vmtc_total;
8791 		return KERN_FAILURE; /* failure means we definitely found a corrupt page */
8792 	}
8793 done:
8794 	if (object != NULL) {
8795 		vm_object_unlock(object);
8796 	}
8797 	return KERN_SUCCESS;
8798 }
8799 
8800 #if DEBUG || DEVELOPMENT
8801 /*
8802  * For implementing unit tests - ask the pmap to corrupt a text page.
8803  * We have to find the page, to get the physical address, then invoke
8804  * the pmap.
8805  */
8806 extern kern_return_t vm_corrupt_text_addr(uintptr_t);
8807 
8808 kern_return_t
vm_corrupt_text_addr(uintptr_t va)8809 vm_corrupt_text_addr(uintptr_t va)
8810 {
8811 	task_t                 task = current_task();
8812 	vm_map_t               map;
8813 	kern_return_t          kr = KERN_SUCCESS;
8814 	vm_object_t            object = VM_OBJECT_NULL;
8815 	vm_object_offset_t     offset;
8816 	vm_page_t              page = NULL;
8817 	pmap_paddr_t           pa;
8818 	vm_prot_t              prot;
8819 
8820 	map = task->map;
8821 	if (task->map == NULL) {
8822 		printf("corrupt_text_addr: no map\n");
8823 		return KERN_FAILURE;
8824 	}
8825 
8826 	kr = vmtc_revalidate_lookup(map, (vm_map_offset_t)va, &object, &offset, &page, &prot);
8827 	if (kr != KERN_SUCCESS) {
8828 		printf("corrupt_text_addr: page lookup failed\n");
8829 		return kr;
8830 	}
8831 	if (!(prot & VM_PROT_EXECUTE)) {
8832 		printf("corrupt_text_addr: page not executable\n");
8833 		return KERN_FAILURE;
8834 	}
8835 
8836 	/* get the physical address to use */
8837 	pa = ptoa(VM_PAGE_GET_PHYS_PAGE(page)) + (va - vm_object_trunc_page(va));
8838 
8839 	/*
8840 	 * Check we have something we can work with.
8841 	 * Due to racing with pageout as we enter the sysctl,
8842 	 * it's theoretically possible to have the page disappear, just
8843 	 * before the lookup.
8844 	 *
8845 	 * That's highly likely to happen often. I've filed a radar 72857482
8846 	 * to bubble up the error here to the sysctl result and have the
8847 	 * test not FAIL in that case.
8848 	 */
8849 	if (page->vmp_busy) {
8850 		printf("corrupt_text_addr: vmp_busy\n");
8851 		kr = KERN_FAILURE;
8852 	}
8853 	if (page->vmp_cleaning) {
8854 		printf("corrupt_text_addr: vmp_cleaning\n");
8855 		kr = KERN_FAILURE;
8856 	}
8857 	if (page->vmp_laundry) {
8858 		printf("corrupt_text_addr: vmp_cleaning\n");
8859 		kr = KERN_FAILURE;
8860 	}
8861 	if (vm_page_is_fictitious(page)) {
8862 		printf("corrupt_text_addr: vmp_fictitious\n");
8863 		kr = KERN_FAILURE;
8864 	}
8865 	if (page->vmp_precious) {
8866 		printf("corrupt_text_addr: vmp_precious\n");
8867 		kr = KERN_FAILURE;
8868 	}
8869 	if (page->vmp_absent) {
8870 		printf("corrupt_text_addr: vmp_absent\n");
8871 		kr = KERN_FAILURE;
8872 	}
8873 	if (VMP_ERROR_GET(page)) {
8874 		printf("corrupt_text_addr: vmp_error\n");
8875 		kr = KERN_FAILURE;
8876 	}
8877 	if (page->vmp_dirty) {
8878 		printf("corrupt_text_addr: vmp_dirty\n");
8879 		kr = KERN_FAILURE;
8880 	}
8881 	if (is_page_wired(page)) {
8882 		printf("corrupt_text_addr: wired\n");
8883 		kr = KERN_FAILURE;
8884 	}
8885 	if (!page->vmp_pmapped) {
8886 		printf("corrupt_text_addr: !vmp_pmapped\n");
8887 		kr = KERN_FAILURE;
8888 	}
8889 
8890 	if (kr == KERN_SUCCESS) {
8891 		printf("corrupt_text_addr: using physaddr 0x%llx\n", (long long)pa);
8892 		kr = pmap_test_text_corruption(pa);
8893 		if (kr != KERN_SUCCESS) {
8894 			printf("corrupt_text_addr: pmap error %d\n", kr);
8895 		} else {
8896 			corruption_test_va = va;
8897 		}
8898 	} else {
8899 		printf("corrupt_text_addr: object %p\n", object);
8900 		printf("corrupt_text_addr: offset 0x%llx\n", (uint64_t)offset);
8901 		printf("corrupt_text_addr: va 0x%llx\n", (uint64_t)va);
8902 		printf("corrupt_text_addr: vm_object_trunc_page(va) 0x%llx\n", (uint64_t)vm_object_trunc_page(va));
8903 		printf("corrupt_text_addr: vm_page_t %p\n", page);
8904 		printf("corrupt_text_addr: ptoa(PHYS_PAGE) 0x%llx\n", (uint64_t)ptoa(VM_PAGE_GET_PHYS_PAGE(page)));
8905 		printf("corrupt_text_addr: using physaddr 0x%llx\n", (uint64_t)pa);
8906 	}
8907 
8908 	if (object != VM_OBJECT_NULL) {
8909 		vm_object_unlock(object);
8910 	}
8911 	return kr;
8912 }
8913 
8914 #endif /* DEBUG || DEVELOPMENT */
8915