xref: /xnu-8020.101.4/osfmk/vm/vm_fault.c (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm_fault.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	Page fault handling module.
63  */
64 
65 #include <mach_cluster_stats.h>
66 #include <mach_pagemap.h>
67 #include <libkern/OSAtomic.h>
68 
69 #include <mach/mach_types.h>
70 #include <mach/kern_return.h>
71 #include <mach/message.h>       /* for error codes */
72 #include <mach/vm_param.h>
73 #include <mach/vm_behavior.h>
74 #include <mach/memory_object.h>
75 /* For memory_object_data_{request,unlock} */
76 #include <mach/sdt.h>
77 
78 #include <pexpert/pexpert.h>
79 #include <pexpert/device_tree.h>
80 
81 #include <kern/kern_types.h>
82 #include <kern/host_statistics.h>
83 #include <kern/counter.h>
84 #include <kern/task.h>
85 #include <kern/thread.h>
86 #include <kern/sched_prim.h>
87 #include <kern/host.h>
88 #include <kern/mach_param.h>
89 #include <kern/macro_help.h>
90 #include <kern/zalloc_internal.h>
91 #include <kern/misc_protos.h>
92 #include <kern/policy_internal.h>
93 
94 #include <vm/vm_compressor.h>
95 #include <vm/vm_compressor_pager.h>
96 #include <vm/vm_fault.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_object.h>
99 #include <vm/vm_page.h>
100 #include <vm/vm_kern.h>
101 #include <vm/pmap.h>
102 #include <vm/vm_pageout.h>
103 #include <vm/vm_protos.h>
104 #include <vm/vm_external.h>
105 #include <vm/memory_object.h>
106 #include <vm/vm_purgeable_internal.h>   /* Needed by some vm_page.h macros */
107 #include <vm/vm_shared_region.h>
108 
109 #include <sys/codesign.h>
110 #include <sys/reason.h>
111 #include <sys/signalvar.h>
112 
113 #include <sys/kdebug_triage.h>
114 
115 #include <san/kasan.h>
116 #include <libkern/coreanalytics/coreanalytics.h>
117 
118 #define VM_FAULT_CLASSIFY       0
119 
120 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
121 
122 int vm_protect_privileged_from_untrusted = 1;
123 
124 unsigned int    vm_object_pagein_throttle = 16;
125 
126 /*
127  * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which
128  * kicks in when swap space runs out.  64-bit programs have massive address spaces and can leak enormous amounts
129  * of memory if they're buggy and can run the system completely out of swap space.  If this happens, we
130  * impose a hard throttle on them to prevent them from taking the last bit of memory left.  This helps
131  * keep the UI active so that the user has a chance to kill the offending task before the system
132  * completely hangs.
133  *
134  * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied
135  * to tasks that appear to be bloated.  When swap runs out, any task using more than vm_hard_throttle_threshold
136  * will be throttled.  The throttling is done by giving the thread that's trying to demand zero a page a
137  * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
138  */
139 
140 extern void throttle_lowpri_io(int);
141 
142 extern struct vnode *vnode_pager_lookup_vnode(memory_object_t);
143 
144 uint64_t vm_hard_throttle_threshold;
145 
146 #if DEBUG || DEVELOPMENT
147 static bool vmtc_panic_instead = false;
148 #endif /* DEBUG || DEVELOPMENT */
149 
150 OS_ALWAYS_INLINE
151 boolean_t
NEED_TO_HARD_THROTTLE_THIS_TASK(void)152 NEED_TO_HARD_THROTTLE_THIS_TASK(void)
153 {
154 	return vm_wants_task_throttled(current_task()) ||
155 	       ((vm_page_free_count < vm_page_throttle_limit ||
156 	       HARD_THROTTLE_LIMIT_REACHED()) &&
157 	       proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED);
158 }
159 
160 #define HARD_THROTTLE_DELAY     10000   /* 10000 us == 10 ms */
161 #define SOFT_THROTTLE_DELAY     200     /* 200 us == .2 ms */
162 
163 #define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS   6
164 #define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC  20000
165 
166 
167 #define VM_STAT_DECOMPRESSIONS()        \
168 MACRO_BEGIN                             \
169 	counter_inc(&vm_statistics_decompressions); \
170 	current_thread()->decompressions++; \
171 MACRO_END
172 
173 boolean_t current_thread_aborted(void);
174 
175 /* Forward declarations of internal routines. */
176 static kern_return_t vm_fault_wire_fast(
177 	vm_map_t        map,
178 	vm_map_offset_t va,
179 	vm_prot_t       prot,
180 	vm_tag_t        wire_tag,
181 	vm_map_entry_t  entry,
182 	pmap_t          pmap,
183 	vm_map_offset_t pmap_addr,
184 	ppnum_t         *physpage_p);
185 
186 static kern_return_t vm_fault_internal(
187 	vm_map_t        map,
188 	vm_map_offset_t vaddr,
189 	vm_prot_t       caller_prot,
190 	boolean_t       change_wiring,
191 	vm_tag_t        wire_tag,
192 	int             interruptible,
193 	pmap_t          pmap,
194 	vm_map_offset_t pmap_addr,
195 	ppnum_t         *physpage_p);
196 
197 static void vm_fault_copy_cleanup(
198 	vm_page_t       page,
199 	vm_page_t       top_page);
200 
201 static void vm_fault_copy_dst_cleanup(
202 	vm_page_t       page);
203 
204 #if     VM_FAULT_CLASSIFY
205 extern void vm_fault_classify(vm_object_t       object,
206     vm_object_offset_t    offset,
207     vm_prot_t             fault_type);
208 
209 extern void vm_fault_classify_init(void);
210 #endif
211 
212 unsigned long vm_pmap_enter_blocked = 0;
213 unsigned long vm_pmap_enter_retried = 0;
214 
215 unsigned long vm_cs_validates = 0;
216 unsigned long vm_cs_revalidates = 0;
217 unsigned long vm_cs_query_modified = 0;
218 unsigned long vm_cs_validated_dirtied = 0;
219 unsigned long vm_cs_bitmap_validated = 0;
220 
221 void vm_pre_fault(vm_map_offset_t, vm_prot_t);
222 
223 extern char *kdp_compressor_decompressed_page;
224 extern addr64_t kdp_compressor_decompressed_page_paddr;
225 extern ppnum_t  kdp_compressor_decompressed_page_ppnum;
226 
227 struct vmrtfr {
228 	int vmrtfr_maxi;
229 	int vmrtfr_curi;
230 	int64_t vmrtf_total;
231 	vm_rtfault_record_t *vm_rtf_records;
232 } vmrtfrs;
233 #define VMRTF_DEFAULT_BUFSIZE (4096)
234 #define VMRTF_NUM_RECORDS_DEFAULT (VMRTF_DEFAULT_BUFSIZE / sizeof(vm_rtfault_record_t))
235 TUNABLE(int, vmrtf_num_records, "vm_rtfault_records", VMRTF_NUM_RECORDS_DEFAULT);
236 
237 static void vm_rtfrecord_lock(void);
238 static void vm_rtfrecord_unlock(void);
239 static void vm_record_rtfault(thread_t, uint64_t, vm_map_offset_t, int);
240 
241 extern lck_grp_t vm_page_lck_grp_bucket;
242 extern lck_attr_t vm_page_lck_attr;
243 LCK_SPIN_DECLARE_ATTR(vm_rtfr_slock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
244 
245 #if DEVELOPMENT || DEBUG
246 extern int madvise_free_debug;
247 #endif /* DEVELOPMENT || DEBUG */
248 
249 #if CONFIG_FREEZE
250 __startup_func
251 static bool
osenvironment_is_diagnostics(void)252 osenvironment_is_diagnostics(void)
253 {
254 	DTEntry chosen;
255 	const char *osenvironment;
256 	unsigned int size;
257 	if (kSuccess == SecureDTLookupEntry(0, "/chosen", &chosen)) {
258 		if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &osenvironment, &size)) {
259 			return strcmp(osenvironment, "diagnostics") == 0;
260 		}
261 	}
262 	return false;
263 }
264 #endif /* CONFIG_FREEZE */
265 
266 /*
267  *	Routine:	vm_fault_init
268  *	Purpose:
269  *		Initialize our private data structures.
270  */
271 __startup_func
272 void
vm_fault_init(void)273 vm_fault_init(void)
274 {
275 	int i, vm_compressor_temp;
276 	boolean_t need_default_val = TRUE;
277 	/*
278 	 * Choose a value for the hard throttle threshold based on the amount of ram.  The threshold is
279 	 * computed as a percentage of available memory, and the percentage used is scaled inversely with
280 	 * the amount of memory.  The percentage runs between 10% and 35%.  We use 35% for small memory systems
281 	 * and reduce the value down to 10% for very large memory configurations.  This helps give us a
282 	 * definition of a memory hog that makes more sense relative to the amount of ram in the machine.
283 	 * The formula here simply uses the number of gigabytes of ram to adjust the percentage.
284 	 */
285 
286 	vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024 * 1024 * 1024)), 25)) / 100;
287 
288 	/*
289 	 * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry.
290 	 */
291 
292 	if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof(vm_compressor_temp))) {
293 		for (i = 0; i < VM_PAGER_MAX_MODES; i++) {
294 			if (((vm_compressor_temp & (1 << i)) == vm_compressor_temp)) {
295 				need_default_val = FALSE;
296 				vm_compressor_mode = vm_compressor_temp;
297 				break;
298 			}
299 		}
300 		if (need_default_val) {
301 			printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp);
302 		}
303 	}
304 #if CONFIG_FREEZE
305 	if (need_default_val) {
306 		if (osenvironment_is_diagnostics()) {
307 			printf("osenvironment == \"diagnostics\". Setting \"vm_compressor_mode\" to in-core compressor only\n");
308 			vm_compressor_mode = VM_PAGER_COMPRESSOR_NO_SWAP;
309 			need_default_val = false;
310 		}
311 	}
312 #endif /* CONFIG_FREEZE */
313 	if (need_default_val) {
314 		/* If no boot arg or incorrect boot arg, try device tree. */
315 		PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode));
316 	}
317 	printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode);
318 	vm_config_init();
319 
320 	PE_parse_boot_argn("vm_protect_privileged_from_untrusted",
321 	    &vm_protect_privileged_from_untrusted,
322 	    sizeof(vm_protect_privileged_from_untrusted));
323 
324 #if DEBUG || DEVELOPMENT
325 	(void)PE_parse_boot_argn("text_corruption_panic", &vmtc_panic_instead, sizeof(vmtc_panic_instead));
326 
327 	if (kern_feature_override(KF_MADVISE_FREE_DEBUG_OVRD)) {
328 		madvise_free_debug = 0;
329 	}
330 
331 #endif /* DEBUG || DEVELOPMENT */
332 }
333 
334 __startup_func
335 static void
vm_rtfault_record_init(void)336 vm_rtfault_record_init(void)
337 {
338 	size_t size;
339 
340 	vmrtf_num_records = MAX(vmrtf_num_records, 1);
341 	size = vmrtf_num_records * sizeof(vm_rtfault_record_t);
342 	vmrtfrs.vm_rtf_records = zalloc_permanent_tag(size,
343 	    ZALIGN(vm_rtfault_record_t), VM_KERN_MEMORY_DIAG);
344 	vmrtfrs.vmrtfr_maxi = vmrtf_num_records - 1;
345 }
346 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_rtfault_record_init);
347 
348 /*
349  *	Routine:	vm_fault_cleanup
350  *	Purpose:
351  *		Clean up the result of vm_fault_page.
352  *	Results:
353  *		The paging reference for "object" is released.
354  *		"object" is unlocked.
355  *		If "top_page" is not null,  "top_page" is
356  *		freed and the paging reference for the object
357  *		containing it is released.
358  *
359  *	In/out conditions:
360  *		"object" must be locked.
361  */
362 void
vm_fault_cleanup(vm_object_t object,vm_page_t top_page)363 vm_fault_cleanup(
364 	vm_object_t     object,
365 	vm_page_t       top_page)
366 {
367 	vm_object_paging_end(object);
368 	vm_object_unlock(object);
369 
370 	if (top_page != VM_PAGE_NULL) {
371 		object = VM_PAGE_OBJECT(top_page);
372 
373 		vm_object_lock(object);
374 		VM_PAGE_FREE(top_page);
375 		vm_object_paging_end(object);
376 		vm_object_unlock(object);
377 	}
378 }
379 
380 #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
381 
382 
383 boolean_t       vm_page_deactivate_behind = TRUE;
384 /*
385  * default sizes given VM_BEHAVIOR_DEFAULT reference behavior
386  */
387 #define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW     128
388 #define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER    16              /* don't make this too big... */
389                                                                 /* we use it to size an array on the stack */
390 
391 int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW;
392 
393 #define MAX_SEQUENTIAL_RUN      (1024 * 1024 * 1024)
394 
395 /*
396  * vm_page_is_sequential
397  *
398  * Determine if sequential access is in progress
399  * in accordance with the behavior specified.
400  * Update state to indicate current access pattern.
401  *
402  * object must have at least the shared lock held
403  */
404 static
405 void
vm_fault_is_sequential(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)406 vm_fault_is_sequential(
407 	vm_object_t             object,
408 	vm_object_offset_t      offset,
409 	vm_behavior_t           behavior)
410 {
411 	vm_object_offset_t      last_alloc;
412 	int                     sequential;
413 	int                     orig_sequential;
414 
415 	last_alloc = object->last_alloc;
416 	sequential = object->sequential;
417 	orig_sequential = sequential;
418 
419 	offset = vm_object_trunc_page(offset);
420 	if (offset == last_alloc && behavior != VM_BEHAVIOR_RANDOM) {
421 		/* re-faulting in the same page: no change in behavior */
422 		return;
423 	}
424 
425 	switch (behavior) {
426 	case VM_BEHAVIOR_RANDOM:
427 		/*
428 		 * reset indicator of sequential behavior
429 		 */
430 		sequential = 0;
431 		break;
432 
433 	case VM_BEHAVIOR_SEQUENTIAL:
434 		if (offset && last_alloc == offset - PAGE_SIZE_64) {
435 			/*
436 			 * advance indicator of sequential behavior
437 			 */
438 			if (sequential < MAX_SEQUENTIAL_RUN) {
439 				sequential += PAGE_SIZE;
440 			}
441 		} else {
442 			/*
443 			 * reset indicator of sequential behavior
444 			 */
445 			sequential = 0;
446 		}
447 		break;
448 
449 	case VM_BEHAVIOR_RSEQNTL:
450 		if (last_alloc && last_alloc == offset + PAGE_SIZE_64) {
451 			/*
452 			 * advance indicator of sequential behavior
453 			 */
454 			if (sequential > -MAX_SEQUENTIAL_RUN) {
455 				sequential -= PAGE_SIZE;
456 			}
457 		} else {
458 			/*
459 			 * reset indicator of sequential behavior
460 			 */
461 			sequential = 0;
462 		}
463 		break;
464 
465 	case VM_BEHAVIOR_DEFAULT:
466 	default:
467 		if (offset && last_alloc == (offset - PAGE_SIZE_64)) {
468 			/*
469 			 * advance indicator of sequential behavior
470 			 */
471 			if (sequential < 0) {
472 				sequential = 0;
473 			}
474 			if (sequential < MAX_SEQUENTIAL_RUN) {
475 				sequential += PAGE_SIZE;
476 			}
477 		} else if (last_alloc && last_alloc == (offset + PAGE_SIZE_64)) {
478 			/*
479 			 * advance indicator of sequential behavior
480 			 */
481 			if (sequential > 0) {
482 				sequential = 0;
483 			}
484 			if (sequential > -MAX_SEQUENTIAL_RUN) {
485 				sequential -= PAGE_SIZE;
486 			}
487 		} else {
488 			/*
489 			 * reset indicator of sequential behavior
490 			 */
491 			sequential = 0;
492 		}
493 		break;
494 	}
495 	if (sequential != orig_sequential) {
496 		if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) {
497 			/*
498 			 * if someone else has already updated object->sequential
499 			 * don't bother trying to update it or object->last_alloc
500 			 */
501 			return;
502 		}
503 	}
504 	/*
505 	 * I'd like to do this with a OSCompareAndSwap64, but that
506 	 * doesn't exist for PPC...  however, it shouldn't matter
507 	 * that much... last_alloc is maintained so that we can determine
508 	 * if a sequential access pattern is taking place... if only
509 	 * one thread is banging on this object, no problem with the unprotected
510 	 * update... if 2 or more threads are banging away, we run the risk of
511 	 * someone seeing a mangled update... however, in the face of multiple
512 	 * accesses, no sequential access pattern can develop anyway, so we
513 	 * haven't lost any real info.
514 	 */
515 	object->last_alloc = offset;
516 }
517 
518 #if DEVELOPMENT || DEBUG
519 uint64_t vm_page_deactivate_behind_count = 0;
520 #endif /* DEVELOPMENT || DEBUG */
521 
522 /*
523  * vm_page_deactivate_behind
524  *
525  * Determine if sequential access is in progress
526  * in accordance with the behavior specified.  If
527  * so, compute a potential page to deactivate and
528  * deactivate it.
529  *
530  * object must be locked.
531  *
532  * return TRUE if we actually deactivate a page
533  */
534 static
535 boolean_t
vm_fault_deactivate_behind(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)536 vm_fault_deactivate_behind(
537 	vm_object_t             object,
538 	vm_object_offset_t      offset,
539 	vm_behavior_t           behavior)
540 {
541 	int             n;
542 	int             pages_in_run = 0;
543 	int             max_pages_in_run = 0;
544 	int             sequential_run;
545 	int             sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
546 	vm_object_offset_t      run_offset = 0;
547 	vm_object_offset_t      pg_offset = 0;
548 	vm_page_t       m;
549 	vm_page_t       page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER];
550 
551 	pages_in_run = 0;
552 #if TRACEFAULTPAGE
553 	dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
554 #endif
555 	if (object == kernel_object || vm_page_deactivate_behind == FALSE || (vm_object_trunc_page(offset) != offset)) {
556 		/*
557 		 * Do not deactivate pages from the kernel object: they
558 		 * are not intended to become pageable.
559 		 * or we've disabled the deactivate behind mechanism
560 		 * or we are dealing with an offset that is not aligned to
561 		 * the system's PAGE_SIZE because in that case we will
562 		 * handle the deactivation on the aligned offset and, thus,
563 		 * the full PAGE_SIZE page once. This helps us avoid the redundant
564 		 * deactivates and the extra faults.
565 		 */
566 		return FALSE;
567 	}
568 	if ((sequential_run = object->sequential)) {
569 		if (sequential_run < 0) {
570 			sequential_behavior = VM_BEHAVIOR_RSEQNTL;
571 			sequential_run = 0 - sequential_run;
572 		} else {
573 			sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
574 		}
575 	}
576 	switch (behavior) {
577 	case VM_BEHAVIOR_RANDOM:
578 		break;
579 	case VM_BEHAVIOR_SEQUENTIAL:
580 		if (sequential_run >= (int)PAGE_SIZE) {
581 			run_offset = 0 - PAGE_SIZE_64;
582 			max_pages_in_run = 1;
583 		}
584 		break;
585 	case VM_BEHAVIOR_RSEQNTL:
586 		if (sequential_run >= (int)PAGE_SIZE) {
587 			run_offset = PAGE_SIZE_64;
588 			max_pages_in_run = 1;
589 		}
590 		break;
591 	case VM_BEHAVIOR_DEFAULT:
592 	default:
593 	{       vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
594 
595 		/*
596 		 * determine if the run of sequential accesss has been
597 		 * long enough on an object with default access behavior
598 		 * to consider it for deactivation
599 		 */
600 		if ((uint64_t)sequential_run >= behind && (sequential_run % (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER * PAGE_SIZE)) == 0) {
601 			/*
602 			 * the comparisons between offset and behind are done
603 			 * in this kind of odd fashion in order to prevent wrap around
604 			 * at the end points
605 			 */
606 			if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
607 				if (offset >= behind) {
608 					run_offset = 0 - behind;
609 					pg_offset = PAGE_SIZE_64;
610 					max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
611 				}
612 			} else {
613 				if (offset < -behind) {
614 					run_offset = behind;
615 					pg_offset = 0 - PAGE_SIZE_64;
616 					max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
617 				}
618 			}
619 		}
620 		break;}
621 	}
622 	for (n = 0; n < max_pages_in_run; n++) {
623 		m = vm_page_lookup(object, offset + run_offset + (n * pg_offset));
624 
625 		if (m && !m->vmp_laundry && !m->vmp_busy && !m->vmp_no_cache && (m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && !m->vmp_fictitious && !m->vmp_absent) {
626 			page_run[pages_in_run++] = m;
627 
628 			/*
629 			 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
630 			 *
631 			 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
632 			 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
633 			 * new reference happens. If no futher references happen on the page after that remote TLB flushes
634 			 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
635 			 * by pageout_scan, which is just fine since the last reference would have happened quite far
636 			 * in the past (TLB caches don't hang around for very long), and of course could just as easily
637 			 * have happened before we did the deactivate_behind.
638 			 */
639 			pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
640 		}
641 	}
642 	if (pages_in_run) {
643 		vm_page_lockspin_queues();
644 
645 		for (n = 0; n < pages_in_run; n++) {
646 			m = page_run[n];
647 
648 			vm_page_deactivate_internal(m, FALSE);
649 
650 #if DEVELOPMENT || DEBUG
651 			vm_page_deactivate_behind_count++;
652 #endif /* DEVELOPMENT || DEBUG */
653 
654 #if TRACEFAULTPAGE
655 			dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
656 #endif
657 		}
658 		vm_page_unlock_queues();
659 
660 		return TRUE;
661 	}
662 	return FALSE;
663 }
664 
665 
666 #if (DEVELOPMENT || DEBUG)
667 uint32_t        vm_page_creation_throttled_hard = 0;
668 uint32_t        vm_page_creation_throttled_soft = 0;
669 uint64_t        vm_page_creation_throttle_avoided = 0;
670 #endif /* DEVELOPMENT || DEBUG */
671 
672 static int
vm_page_throttled(boolean_t page_kept)673 vm_page_throttled(boolean_t page_kept)
674 {
675 	clock_sec_t     elapsed_sec;
676 	clock_sec_t     tv_sec;
677 	clock_usec_t    tv_usec;
678 
679 	thread_t thread = current_thread();
680 
681 	if (thread->options & TH_OPT_VMPRIV) {
682 		return 0;
683 	}
684 
685 	if (thread->t_page_creation_throttled) {
686 		thread->t_page_creation_throttled = 0;
687 
688 		if (page_kept == FALSE) {
689 			goto no_throttle;
690 		}
691 	}
692 	if (NEED_TO_HARD_THROTTLE_THIS_TASK()) {
693 #if (DEVELOPMENT || DEBUG)
694 		thread->t_page_creation_throttled_hard++;
695 		OSAddAtomic(1, &vm_page_creation_throttled_hard);
696 #endif /* DEVELOPMENT || DEBUG */
697 		return HARD_THROTTLE_DELAY;
698 	}
699 
700 	if ((vm_page_free_count < vm_page_throttle_limit || (VM_CONFIG_COMPRESSOR_IS_PRESENT && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
701 	    thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) {
702 		if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) {
703 #if (DEVELOPMENT || DEBUG)
704 			OSAddAtomic64(1, &vm_page_creation_throttle_avoided);
705 #endif
706 			goto no_throttle;
707 		}
708 		clock_get_system_microtime(&tv_sec, &tv_usec);
709 
710 		elapsed_sec = tv_sec - thread->t_page_creation_time;
711 
712 		if (elapsed_sec <= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS ||
713 		    (thread->t_page_creation_count / elapsed_sec) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC) {
714 			if (elapsed_sec >= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS)) {
715 				/*
716 				 * we'll reset our stats to give a well behaved app
717 				 * that was unlucky enough to accumulate a bunch of pages
718 				 * over a long period of time a chance to get out of
719 				 * the throttled state... we reset the counter and timestamp
720 				 * so that if it stays under the rate limit for the next second
721 				 * it will be back in our good graces... if it exceeds it, it
722 				 * will remain in the throttled state
723 				 */
724 				thread->t_page_creation_time = tv_sec;
725 				thread->t_page_creation_count = VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC * (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS - 1);
726 			}
727 			VM_PAGEOUT_DEBUG(vm_page_throttle_count, 1);
728 
729 			thread->t_page_creation_throttled = 1;
730 
731 			if (VM_CONFIG_COMPRESSOR_IS_PRESENT && HARD_THROTTLE_LIMIT_REACHED()) {
732 #if (DEVELOPMENT || DEBUG)
733 				thread->t_page_creation_throttled_hard++;
734 				OSAddAtomic(1, &vm_page_creation_throttled_hard);
735 #endif /* DEVELOPMENT || DEBUG */
736 				return HARD_THROTTLE_DELAY;
737 			} else {
738 #if (DEVELOPMENT || DEBUG)
739 				thread->t_page_creation_throttled_soft++;
740 				OSAddAtomic(1, &vm_page_creation_throttled_soft);
741 #endif /* DEVELOPMENT || DEBUG */
742 				return SOFT_THROTTLE_DELAY;
743 			}
744 		}
745 		thread->t_page_creation_time = tv_sec;
746 		thread->t_page_creation_count = 0;
747 	}
748 no_throttle:
749 	thread->t_page_creation_count++;
750 
751 	return 0;
752 }
753 
754 
755 /*
756  * check for various conditions that would
757  * prevent us from creating a ZF page...
758  * cleanup is based on being called from vm_fault_page
759  *
760  * object must be locked
761  * object == m->vmp_object
762  */
763 static vm_fault_return_t
vm_fault_check(vm_object_t object,vm_page_t m,vm_page_t first_m,wait_interrupt_t interruptible_state,boolean_t page_throttle)764 vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrupt_t interruptible_state, boolean_t page_throttle)
765 {
766 	int throttle_delay;
767 
768 	if (object->shadow_severed ||
769 	    VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
770 		/*
771 		 * Either:
772 		 * 1. the shadow chain was severed,
773 		 * 2. the purgeable object is volatile or empty and is marked
774 		 *    to fault on access while volatile.
775 		 * Just have to return an error at this point
776 		 */
777 		if (m != VM_PAGE_NULL) {
778 			VM_PAGE_FREE(m);
779 		}
780 		vm_fault_cleanup(object, first_m);
781 
782 		thread_interrupt_level(interruptible_state);
783 
784 		if (VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
785 			kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
786 		}
787 
788 		if (object->shadow_severed) {
789 			kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
790 		}
791 		return VM_FAULT_MEMORY_ERROR;
792 	}
793 	if (page_throttle == TRUE) {
794 		if ((throttle_delay = vm_page_throttled(FALSE))) {
795 			/*
796 			 * we're throttling zero-fills...
797 			 * treat this as if we couldn't grab a page
798 			 */
799 			if (m != VM_PAGE_NULL) {
800 				VM_PAGE_FREE(m);
801 			}
802 			vm_fault_cleanup(object, first_m);
803 
804 			VM_DEBUG_EVENT(vmf_check_zfdelay, VMF_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
805 
806 			delay(throttle_delay);
807 
808 			if (current_thread_aborted()) {
809 				thread_interrupt_level(interruptible_state);
810 				kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
811 				return VM_FAULT_INTERRUPTED;
812 			}
813 			thread_interrupt_level(interruptible_state);
814 
815 			kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_MEMORY_SHORTAGE), 0 /* arg */);
816 			return VM_FAULT_MEMORY_SHORTAGE;
817 		}
818 	}
819 	return VM_FAULT_SUCCESS;
820 }
821 
822 /*
823  * Clear the code signing bits on the given page_t
824  */
825 static void
vm_fault_cs_clear(vm_page_t m)826 vm_fault_cs_clear(vm_page_t m)
827 {
828 	m->vmp_cs_validated = VMP_CS_ALL_FALSE;
829 	m->vmp_cs_tainted = VMP_CS_ALL_FALSE;
830 	m->vmp_cs_nx = VMP_CS_ALL_FALSE;
831 }
832 
833 /*
834  * Enqueues the given page on the throttled queue.
835  * The caller must hold the vm_page_queue_lock and it will be held on return.
836  */
837 static void
vm_fault_enqueue_throttled_locked(vm_page_t m)838 vm_fault_enqueue_throttled_locked(vm_page_t m)
839 {
840 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
841 	assert(!VM_PAGE_WIRED(m));
842 
843 	/*
844 	 * can't be on the pageout queue since we don't
845 	 * have a pager to try and clean to
846 	 */
847 	vm_page_queues_remove(m, TRUE);
848 	vm_page_check_pageable_safe(m);
849 	vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
850 	m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
851 	vm_page_throttled_count++;
852 }
853 
854 /*
855  * do the work to zero fill a page and
856  * inject it into the correct paging queue
857  *
858  * m->vmp_object must be locked
859  * page queue lock must NOT be held
860  */
861 static int
vm_fault_zero_page(vm_page_t m,boolean_t no_zero_fill)862 vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
863 {
864 	int my_fault = DBG_ZERO_FILL_FAULT;
865 	vm_object_t     object;
866 
867 	object = VM_PAGE_OBJECT(m);
868 
869 	/*
870 	 * This is is a zero-fill page fault...
871 	 *
872 	 * Checking the page lock is a waste of
873 	 * time;  this page was absent, so
874 	 * it can't be page locked by a pager.
875 	 *
876 	 * we also consider it undefined
877 	 * with respect to instruction
878 	 * execution.  i.e. it is the responsibility
879 	 * of higher layers to call for an instruction
880 	 * sync after changing the contents and before
881 	 * sending a program into this area.  We
882 	 * choose this approach for performance
883 	 */
884 	vm_fault_cs_clear(m);
885 	m->vmp_pmapped = TRUE;
886 
887 	if (no_zero_fill == TRUE) {
888 		my_fault = DBG_NZF_PAGE_FAULT;
889 
890 		if (m->vmp_absent && m->vmp_busy) {
891 			return my_fault;
892 		}
893 	} else {
894 		vm_page_zero_fill(m);
895 
896 		counter_inc(&vm_statistics_zero_fill_count);
897 		DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
898 	}
899 	assert(!m->vmp_laundry);
900 	assert(object != kernel_object);
901 	//assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
902 	if (!VM_DYNAMIC_PAGING_ENABLED() &&
903 	    (object->purgable == VM_PURGABLE_DENY ||
904 	    object->purgable == VM_PURGABLE_NONVOLATILE ||
905 	    object->purgable == VM_PURGABLE_VOLATILE)) {
906 		vm_page_lockspin_queues();
907 		if (!VM_DYNAMIC_PAGING_ENABLED()) {
908 			vm_fault_enqueue_throttled_locked(m);
909 		}
910 		vm_page_unlock_queues();
911 	}
912 	return my_fault;
913 }
914 
915 
916 /*
917  *	Routine:	vm_fault_page
918  *	Purpose:
919  *		Find the resident page for the virtual memory
920  *		specified by the given virtual memory object
921  *		and offset.
922  *	Additional arguments:
923  *		The required permissions for the page is given
924  *		in "fault_type".  Desired permissions are included
925  *		in "protection".
926  *		fault_info is passed along to determine pagein cluster
927  *		limits... it contains the expected reference pattern,
928  *		cluster size if available, etc...
929  *
930  *		If the desired page is known to be resident (for
931  *		example, because it was previously wired down), asserting
932  *		the "unwiring" parameter will speed the search.
933  *
934  *		If the operation can be interrupted (by thread_abort
935  *		or thread_terminate), then the "interruptible"
936  *		parameter should be asserted.
937  *
938  *	Results:
939  *		The page containing the proper data is returned
940  *		in "result_page".
941  *
942  *	In/out conditions:
943  *		The source object must be locked and referenced,
944  *		and must donate one paging reference.  The reference
945  *		is not affected.  The paging reference and lock are
946  *		consumed.
947  *
948  *		If the call succeeds, the object in which "result_page"
949  *		resides is left locked and holding a paging reference.
950  *		If this is not the original object, a busy page in the
951  *		original object is returned in "top_page", to prevent other
952  *		callers from pursuing this same data, along with a paging
953  *		reference for the original object.  The "top_page" should
954  *		be destroyed when this guarantee is no longer required.
955  *		The "result_page" is also left busy.  It is not removed
956  *		from the pageout queues.
957  *	Special Case:
958  *		A return value of VM_FAULT_SUCCESS_NO_PAGE means that the
959  *		fault succeeded but there's no VM page (i.e. the VM object
960  *              does not actually hold VM pages, but device memory or
961  *		large pages).  The object is still locked and we still hold a
962  *		paging_in_progress reference.
963  */
964 unsigned int vm_fault_page_blocked_access = 0;
965 unsigned int vm_fault_page_forced_retry = 0;
966 
967 vm_fault_return_t
vm_fault_page(vm_object_t first_object,vm_object_offset_t first_offset,vm_prot_t fault_type,boolean_t must_be_resident,boolean_t caller_lookup,vm_prot_t * protection,vm_page_t * result_page,vm_page_t * top_page,int * type_of_fault,kern_return_t * error_code,boolean_t no_zero_fill,boolean_t data_supply,vm_object_fault_info_t fault_info)968 vm_fault_page(
969 	/* Arguments: */
970 	vm_object_t     first_object,   /* Object to begin search */
971 	vm_object_offset_t first_offset,        /* Offset into object */
972 	vm_prot_t       fault_type,     /* What access is requested */
973 	boolean_t       must_be_resident,/* Must page be resident? */
974 	boolean_t       caller_lookup,  /* caller looked up page */
975 	/* Modifies in place: */
976 	vm_prot_t       *protection,    /* Protection for mapping */
977 	vm_page_t       *result_page,   /* Page found, if successful */
978 	/* Returns: */
979 	vm_page_t       *top_page,      /* Page in top object, if
980                                          * not result_page.  */
981 	int             *type_of_fault, /* if non-null, fill in with type of fault
982                                          * COW, zero-fill, etc... returned in trace point */
983 	/* More arguments: */
984 	kern_return_t   *error_code,    /* code if page is in error */
985 	boolean_t       no_zero_fill,   /* don't zero fill absent pages */
986 	boolean_t       data_supply,    /* treat as data_supply if
987                                          * it is a write fault and a full
988                                          * page is provided */
989 	vm_object_fault_info_t fault_info)
990 {
991 	vm_page_t               m;
992 	vm_object_t             object;
993 	vm_object_offset_t      offset;
994 	vm_page_t               first_m;
995 	vm_object_t             next_object;
996 	vm_object_t             copy_object;
997 	boolean_t               look_for_page;
998 	boolean_t               force_fault_retry = FALSE;
999 	vm_prot_t               access_required = fault_type;
1000 	vm_prot_t               wants_copy_flag;
1001 	kern_return_t           wait_result;
1002 	wait_interrupt_t        interruptible_state;
1003 	boolean_t               data_already_requested = FALSE;
1004 	vm_behavior_t           orig_behavior;
1005 	vm_size_t               orig_cluster_size;
1006 	vm_fault_return_t       error;
1007 	int                     my_fault;
1008 	uint32_t                try_failed_count;
1009 	int                     interruptible; /* how may fault be interrupted? */
1010 	int                     external_state = VM_EXTERNAL_STATE_UNKNOWN;
1011 	memory_object_t         pager;
1012 	vm_fault_return_t       retval;
1013 	int                     grab_options;
1014 	bool                    clear_absent_on_error = false;
1015 
1016 /*
1017  * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
1018  * marked as paged out in the compressor pager or the pager doesn't exist.
1019  * Note also that if the pager for an internal object
1020  * has not been created, the pager is not invoked regardless of the value
1021  * of MUST_ASK_PAGER().
1022  *
1023  * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
1024  * is marked as paged out in the compressor pager.
1025  * PAGED_OUT() is used to determine if a page has already been pushed
1026  * into a copy object in order to avoid a redundant page out operation.
1027  */
1028 #define MUST_ASK_PAGER(o, f, s)                                 \
1029 	((s = VM_COMPRESSOR_PAGER_STATE_GET((o), (f))) != VM_EXTERNAL_STATE_ABSENT)
1030 
1031 #define PAGED_OUT(o, f) \
1032 	(VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) == VM_EXTERNAL_STATE_EXISTS)
1033 
1034 /*
1035  *	Recovery actions
1036  */
1037 #define RELEASE_PAGE(m)                                 \
1038 	MACRO_BEGIN                                     \
1039 	PAGE_WAKEUP_DONE(m);                            \
1040 	if ( !VM_PAGE_PAGEABLE(m)) {                    \
1041 	        vm_page_lockspin_queues();              \
1042 	        if (clear_absent_on_error && m->vmp_absent) {\
1043 	                vm_page_zero_fill(m);           \
1044 	                counter_inc(&vm_statistics_zero_fill_count);\
1045 	                DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);\
1046 	                m->vmp_absent = false;          \
1047 	        }                                       \
1048 	        if ( !VM_PAGE_PAGEABLE(m)) {            \
1049 	                if (VM_CONFIG_COMPRESSOR_IS_ACTIVE)     \
1050 	                        vm_page_deactivate(m);          \
1051 	                else                                    \
1052 	                        vm_page_activate(m);            \
1053 	        }                                               \
1054 	        vm_page_unlock_queues();                        \
1055 	}                                                       \
1056 	clear_absent_on_error = false;                  \
1057 	MACRO_END
1058 
1059 #if TRACEFAULTPAGE
1060 	dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */
1061 #endif
1062 
1063 	interruptible = fault_info->interruptible;
1064 	interruptible_state = thread_interrupt_level(interruptible);
1065 
1066 	/*
1067 	 *	INVARIANTS (through entire routine):
1068 	 *
1069 	 *	1)	At all times, we must either have the object
1070 	 *		lock or a busy page in some object to prevent
1071 	 *		some other thread from trying to bring in
1072 	 *		the same page.
1073 	 *
1074 	 *		Note that we cannot hold any locks during the
1075 	 *		pager access or when waiting for memory, so
1076 	 *		we use a busy page then.
1077 	 *
1078 	 *	2)	To prevent another thread from racing us down the
1079 	 *		shadow chain and entering a new page in the top
1080 	 *		object before we do, we must keep a busy page in
1081 	 *		the top object while following the shadow chain.
1082 	 *
1083 	 *	3)	We must increment paging_in_progress on any object
1084 	 *		for which we have a busy page before dropping
1085 	 *		the object lock
1086 	 *
1087 	 *	4)	We leave busy pages on the pageout queues.
1088 	 *		If the pageout daemon comes across a busy page,
1089 	 *		it will remove the page from the pageout queues.
1090 	 */
1091 
1092 	object = first_object;
1093 	offset = first_offset;
1094 	first_m = VM_PAGE_NULL;
1095 	access_required = fault_type;
1096 
1097 	/*
1098 	 * default type of fault
1099 	 */
1100 	my_fault = DBG_CACHE_HIT_FAULT;
1101 	thread_pri_floor_t token;
1102 	bool    drop_floor = false;
1103 
1104 	while (TRUE) {
1105 #if TRACEFAULTPAGE
1106 		dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0);       /* (TEST/DEBUG) */
1107 #endif
1108 
1109 		grab_options = 0;
1110 #if CONFIG_SECLUDED_MEMORY
1111 		if (object->can_grab_secluded) {
1112 			grab_options |= VM_PAGE_GRAB_SECLUDED;
1113 		}
1114 #endif /* CONFIG_SECLUDED_MEMORY */
1115 
1116 		if (!object->alive) {
1117 			/*
1118 			 * object is no longer valid
1119 			 * clean up and return error
1120 			 */
1121 			vm_fault_cleanup(object, first_m);
1122 			thread_interrupt_level(interruptible_state);
1123 
1124 			kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_NOT_ALIVE), 0 /* arg */);
1125 			return VM_FAULT_MEMORY_ERROR;
1126 		}
1127 
1128 		if (!object->pager_created && object->phys_contiguous) {
1129 			/*
1130 			 * A physically-contiguous object without a pager:
1131 			 * must be a "large page" object.  We do not deal
1132 			 * with VM pages for this object.
1133 			 */
1134 			caller_lookup = FALSE;
1135 			m = VM_PAGE_NULL;
1136 			goto phys_contig_object;
1137 		}
1138 
1139 		if (object->blocked_access) {
1140 			/*
1141 			 * Access to this VM object has been blocked.
1142 			 * Replace our "paging_in_progress" reference with
1143 			 * a "activity_in_progress" reference and wait for
1144 			 * access to be unblocked.
1145 			 */
1146 			caller_lookup = FALSE; /* no longer valid after sleep */
1147 			vm_object_activity_begin(object);
1148 			vm_object_paging_end(object);
1149 			while (object->blocked_access) {
1150 				vm_object_sleep(object,
1151 				    VM_OBJECT_EVENT_UNBLOCKED,
1152 				    THREAD_UNINT);
1153 			}
1154 			vm_fault_page_blocked_access++;
1155 			vm_object_paging_begin(object);
1156 			vm_object_activity_end(object);
1157 		}
1158 
1159 		/*
1160 		 * See whether the page at 'offset' is resident
1161 		 */
1162 		if (caller_lookup == TRUE) {
1163 			/*
1164 			 * The caller has already looked up the page
1165 			 * and gave us the result in "result_page".
1166 			 * We can use this for the first lookup but
1167 			 * it loses its validity as soon as we unlock
1168 			 * the object.
1169 			 */
1170 			m = *result_page;
1171 			caller_lookup = FALSE; /* no longer valid after that */
1172 		} else {
1173 			m = vm_page_lookup(object, vm_object_trunc_page(offset));
1174 		}
1175 #if TRACEFAULTPAGE
1176 		dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object);  /* (TEST/DEBUG) */
1177 #endif
1178 		if (m != VM_PAGE_NULL) {
1179 			if (m->vmp_busy) {
1180 				/*
1181 				 * The page is being brought in,
1182 				 * wait for it and then retry.
1183 				 */
1184 #if TRACEFAULTPAGE
1185 				dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
1186 #endif
1187 				wait_result = PAGE_SLEEP(object, m, interruptible);
1188 
1189 				if (wait_result != THREAD_AWAKENED) {
1190 					vm_fault_cleanup(object, first_m);
1191 					thread_interrupt_level(interruptible_state);
1192 
1193 					if (wait_result == THREAD_RESTART) {
1194 						return VM_FAULT_RETRY;
1195 					} else {
1196 						kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
1197 						return VM_FAULT_INTERRUPTED;
1198 					}
1199 				}
1200 				continue;
1201 			}
1202 			if (m->vmp_laundry) {
1203 				m->vmp_free_when_done = FALSE;
1204 
1205 				if (!m->vmp_cleaning) {
1206 					vm_pageout_steal_laundry(m, FALSE);
1207 				}
1208 			}
1209 			vm_object_lock_assert_exclusive(VM_PAGE_OBJECT(m));
1210 			if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
1211 				/*
1212 				 * Guard page: off limits !
1213 				 */
1214 				if (fault_type == VM_PROT_NONE) {
1215 					/*
1216 					 * The fault is not requesting any
1217 					 * access to the guard page, so it must
1218 					 * be just to wire or unwire it.
1219 					 * Let's pretend it succeeded...
1220 					 */
1221 					m->vmp_busy = TRUE;
1222 					*result_page = m;
1223 					assert(first_m == VM_PAGE_NULL);
1224 					*top_page = first_m;
1225 					if (type_of_fault) {
1226 						*type_of_fault = DBG_GUARD_FAULT;
1227 					}
1228 					thread_interrupt_level(interruptible_state);
1229 					return VM_FAULT_SUCCESS;
1230 				} else {
1231 					/*
1232 					 * The fault requests access to the
1233 					 * guard page: let's deny that !
1234 					 */
1235 					vm_fault_cleanup(object, first_m);
1236 					thread_interrupt_level(interruptible_state);
1237 					kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_GUARDPAGE_FAULT), 0 /* arg */);
1238 					return VM_FAULT_MEMORY_ERROR;
1239 				}
1240 			}
1241 
1242 			if (m->vmp_error) {
1243 				/*
1244 				 * The page is in error, give up now.
1245 				 */
1246 #if TRACEFAULTPAGE
1247 				dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code);      /* (TEST/DEBUG) */
1248 #endif
1249 				if (error_code) {
1250 					*error_code = KERN_MEMORY_ERROR;
1251 				}
1252 				VM_PAGE_FREE(m);
1253 
1254 				vm_fault_cleanup(object, first_m);
1255 				thread_interrupt_level(interruptible_state);
1256 
1257 				kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_ERROR), 0 /* arg */);
1258 				return VM_FAULT_MEMORY_ERROR;
1259 			}
1260 			if (m->vmp_restart) {
1261 				/*
1262 				 * The pager wants us to restart
1263 				 * at the top of the chain,
1264 				 * typically because it has moved the
1265 				 * page to another pager, then do so.
1266 				 */
1267 #if TRACEFAULTPAGE
1268 				dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
1269 #endif
1270 				VM_PAGE_FREE(m);
1271 
1272 				vm_fault_cleanup(object, first_m);
1273 				thread_interrupt_level(interruptible_state);
1274 
1275 				kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_RESTART), 0 /* arg */);
1276 				return VM_FAULT_RETRY;
1277 			}
1278 			if (m->vmp_absent) {
1279 				/*
1280 				 * The page isn't busy, but is absent,
1281 				 * therefore it's deemed "unavailable".
1282 				 *
1283 				 * Remove the non-existent page (unless it's
1284 				 * in the top object) and move on down to the
1285 				 * next object (if there is one).
1286 				 */
1287 #if TRACEFAULTPAGE
1288 				dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow);  /* (TEST/DEBUG) */
1289 #endif
1290 				next_object = object->shadow;
1291 
1292 				if (next_object == VM_OBJECT_NULL) {
1293 					/*
1294 					 * Absent page at bottom of shadow
1295 					 * chain; zero fill the page we left
1296 					 * busy in the first object, and free
1297 					 * the absent page.
1298 					 */
1299 					assert(!must_be_resident);
1300 
1301 					/*
1302 					 * check for any conditions that prevent
1303 					 * us from creating a new zero-fill page
1304 					 * vm_fault_check will do all of the
1305 					 * fault cleanup in the case of an error condition
1306 					 * including resetting the thread_interrupt_level
1307 					 */
1308 					error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
1309 
1310 					if (error != VM_FAULT_SUCCESS) {
1311 						return error;
1312 					}
1313 
1314 					if (object != first_object) {
1315 						/*
1316 						 * free the absent page we just found
1317 						 */
1318 						VM_PAGE_FREE(m);
1319 
1320 						/*
1321 						 * drop reference and lock on current object
1322 						 */
1323 						vm_object_paging_end(object);
1324 						vm_object_unlock(object);
1325 
1326 						/*
1327 						 * grab the original page we
1328 						 * 'soldered' in place and
1329 						 * retake lock on 'first_object'
1330 						 */
1331 						m = first_m;
1332 						first_m = VM_PAGE_NULL;
1333 
1334 						object = first_object;
1335 						offset = first_offset;
1336 
1337 						vm_object_lock(object);
1338 					} else {
1339 						/*
1340 						 * we're going to use the absent page we just found
1341 						 * so convert it to a 'busy' page
1342 						 */
1343 						m->vmp_absent = FALSE;
1344 						m->vmp_busy = TRUE;
1345 					}
1346 					if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
1347 						m->vmp_absent = TRUE;
1348 						clear_absent_on_error = true;
1349 					}
1350 					/*
1351 					 * zero-fill the page and put it on
1352 					 * the correct paging queue
1353 					 */
1354 					my_fault = vm_fault_zero_page(m, no_zero_fill);
1355 
1356 					break;
1357 				} else {
1358 					if (must_be_resident) {
1359 						vm_object_paging_end(object);
1360 					} else if (object != first_object) {
1361 						vm_object_paging_end(object);
1362 						VM_PAGE_FREE(m);
1363 					} else {
1364 						first_m = m;
1365 						m->vmp_absent = FALSE;
1366 						m->vmp_busy = TRUE;
1367 
1368 						vm_page_lockspin_queues();
1369 						vm_page_queues_remove(m, FALSE);
1370 						vm_page_unlock_queues();
1371 					}
1372 
1373 					offset += object->vo_shadow_offset;
1374 					fault_info->lo_offset += object->vo_shadow_offset;
1375 					fault_info->hi_offset += object->vo_shadow_offset;
1376 					access_required = VM_PROT_READ;
1377 
1378 					vm_object_lock(next_object);
1379 					vm_object_unlock(object);
1380 					object = next_object;
1381 					vm_object_paging_begin(object);
1382 
1383 					/*
1384 					 * reset to default type of fault
1385 					 */
1386 					my_fault = DBG_CACHE_HIT_FAULT;
1387 
1388 					continue;
1389 				}
1390 			}
1391 			if ((m->vmp_cleaning)
1392 			    && ((object != first_object) || (object->copy != VM_OBJECT_NULL))
1393 			    && (fault_type & VM_PROT_WRITE)) {
1394 				/*
1395 				 * This is a copy-on-write fault that will
1396 				 * cause us to revoke access to this page, but
1397 				 * this page is in the process of being cleaned
1398 				 * in a clustered pageout. We must wait until
1399 				 * the cleaning operation completes before
1400 				 * revoking access to the original page,
1401 				 * otherwise we might attempt to remove a
1402 				 * wired mapping.
1403 				 */
1404 #if TRACEFAULTPAGE
1405 				dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset);  /* (TEST/DEBUG) */
1406 #endif
1407 				/*
1408 				 * take an extra ref so that object won't die
1409 				 */
1410 				vm_object_reference_locked(object);
1411 
1412 				vm_fault_cleanup(object, first_m);
1413 
1414 				vm_object_lock(object);
1415 				assert(object->ref_count > 0);
1416 
1417 				m = vm_page_lookup(object, vm_object_trunc_page(offset));
1418 
1419 				if (m != VM_PAGE_NULL && m->vmp_cleaning) {
1420 					PAGE_ASSERT_WAIT(m, interruptible);
1421 
1422 					vm_object_unlock(object);
1423 					wait_result = thread_block(THREAD_CONTINUE_NULL);
1424 					vm_object_deallocate(object);
1425 
1426 					goto backoff;
1427 				} else {
1428 					vm_object_unlock(object);
1429 
1430 					vm_object_deallocate(object);
1431 					thread_interrupt_level(interruptible_state);
1432 
1433 					return VM_FAULT_RETRY;
1434 				}
1435 			}
1436 			if (type_of_fault == NULL && (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) &&
1437 			    !(fault_info != NULL && fault_info->stealth)) {
1438 				/*
1439 				 * If we were passed a non-NULL pointer for
1440 				 * "type_of_fault", than we came from
1441 				 * vm_fault... we'll let it deal with
1442 				 * this condition, since it
1443 				 * needs to see m->vmp_speculative to correctly
1444 				 * account the pageins, otherwise...
1445 				 * take it off the speculative queue, we'll
1446 				 * let the caller of vm_fault_page deal
1447 				 * with getting it onto the correct queue
1448 				 *
1449 				 * If the caller specified in fault_info that
1450 				 * it wants a "stealth" fault, we also leave
1451 				 * the page in the speculative queue.
1452 				 */
1453 				vm_page_lockspin_queues();
1454 				if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
1455 					vm_page_queues_remove(m, FALSE);
1456 				}
1457 				vm_page_unlock_queues();
1458 			}
1459 			assert(object == VM_PAGE_OBJECT(m));
1460 
1461 			if (object->code_signed) {
1462 				/*
1463 				 * CODE SIGNING:
1464 				 * We just paged in a page from a signed
1465 				 * memory object but we don't need to
1466 				 * validate it now.  We'll validate it if
1467 				 * when it gets mapped into a user address
1468 				 * space for the first time or when the page
1469 				 * gets copied to another object as a result
1470 				 * of a copy-on-write.
1471 				 */
1472 			}
1473 
1474 			/*
1475 			 * We mark the page busy and leave it on
1476 			 * the pageout queues.  If the pageout
1477 			 * deamon comes across it, then it will
1478 			 * remove the page from the queue, but not the object
1479 			 */
1480 #if TRACEFAULTPAGE
1481 			dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
1482 #endif
1483 			assert(!m->vmp_busy);
1484 			assert(!m->vmp_absent);
1485 
1486 			m->vmp_busy = TRUE;
1487 			break;
1488 		}
1489 
1490 #if __arm__ && !__arm64__
1491 		if (__improbable(object->internal &&
1492 		    offset >= object->vo_size &&
1493 		    offset < ((object->vo_size + SIXTEENK_PAGE_MASK) & ~SIXTEENK_PAGE_MASK) &&
1494 		    PAGE_SIZE == FOURK_PAGE_SIZE)) {
1495 			/*
1496 			 * On devices with a 4k kernel page size
1497 			 * and a 16k user page size (i.e. 32-bit watches),
1498 			 * IOKit could have created a VM object with a
1499 			 * 4k-aligned size.
1500 			 * IOKit could have then mapped that VM object
1501 			 * in a user address space, and VM would have extended
1502 			 * the mapping to the next 16k boundary.
1503 			 * So we could now be, somewhat illegally, trying to
1504 			 * access one of the up to 3 non-existent 4k pages
1505 			 * beyond the end of the VM object.
1506 			 * We would not be allowed to insert a page beyond the
1507 			 * the end of the object, so let's fail the fault.
1508 			 */
1509 			DTRACE_VM2(vm_fault_page_beyond_end_of_internal,
1510 			    vm_object_offset_t, offset,
1511 			    vm_object_size_t, object->vo_size);
1512 			vm_fault_cleanup(object, first_m);
1513 			thread_interrupt_level(interruptible_state);
1514 			return VM_FAULT_MEMORY_ERROR;
1515 		}
1516 #endif /* __arm__ && !__arm64__ */
1517 
1518 		/*
1519 		 * we get here when there is no page present in the object at
1520 		 * the offset we're interested in... we'll allocate a page
1521 		 * at this point if the pager associated with
1522 		 * this object can provide the data or we're the top object...
1523 		 * object is locked;  m == NULL
1524 		 */
1525 
1526 		if (must_be_resident) {
1527 			if (fault_type == VM_PROT_NONE &&
1528 			    object == kernel_object) {
1529 				/*
1530 				 * We've been called from vm_fault_unwire()
1531 				 * while removing a map entry that was allocated
1532 				 * with KMA_KOBJECT and KMA_VAONLY.  This page
1533 				 * is not present and there's nothing more to
1534 				 * do here (nothing to unwire).
1535 				 */
1536 				vm_fault_cleanup(object, first_m);
1537 				thread_interrupt_level(interruptible_state);
1538 
1539 				return VM_FAULT_MEMORY_ERROR;
1540 			}
1541 
1542 			goto dont_look_for_page;
1543 		}
1544 
1545 		/* Don't expect to fault pages into the kernel object. */
1546 		assert(object != kernel_object);
1547 
1548 		data_supply = FALSE;
1549 
1550 		look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE) && !data_supply);
1551 
1552 #if TRACEFAULTPAGE
1553 		dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object);      /* (TEST/DEBUG) */
1554 #endif
1555 		if (!look_for_page && object == first_object && !object->phys_contiguous) {
1556 			/*
1557 			 * Allocate a new page for this object/offset pair as a placeholder
1558 			 */
1559 			m = vm_page_grab_options(grab_options);
1560 #if TRACEFAULTPAGE
1561 			dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object);  /* (TEST/DEBUG) */
1562 #endif
1563 			if (m == VM_PAGE_NULL) {
1564 				vm_fault_cleanup(object, first_m);
1565 				thread_interrupt_level(interruptible_state);
1566 
1567 				kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_MEMORY_SHORTAGE), 0 /* arg */);
1568 				return VM_FAULT_MEMORY_SHORTAGE;
1569 			}
1570 
1571 			if (fault_info && fault_info->batch_pmap_op == TRUE) {
1572 				vm_page_insert_internal(m, object,
1573 				    vm_object_trunc_page(offset),
1574 				    VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1575 			} else {
1576 				vm_page_insert(m, object, vm_object_trunc_page(offset));
1577 			}
1578 		}
1579 		if (look_for_page) {
1580 			kern_return_t   rc;
1581 			int             my_fault_type;
1582 
1583 			/*
1584 			 *	If the memory manager is not ready, we
1585 			 *	cannot make requests.
1586 			 */
1587 			if (!object->pager_ready) {
1588 #if TRACEFAULTPAGE
1589 				dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0);       /* (TEST/DEBUG) */
1590 #endif
1591 				if (m != VM_PAGE_NULL) {
1592 					VM_PAGE_FREE(m);
1593 				}
1594 
1595 				/*
1596 				 * take an extra ref so object won't die
1597 				 */
1598 				vm_object_reference_locked(object);
1599 				vm_fault_cleanup(object, first_m);
1600 
1601 				vm_object_lock(object);
1602 				assert(object->ref_count > 0);
1603 
1604 				if (!object->pager_ready) {
1605 					wait_result = vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGER_READY, interruptible);
1606 
1607 					vm_object_unlock(object);
1608 					if (wait_result == THREAD_WAITING) {
1609 						wait_result = thread_block(THREAD_CONTINUE_NULL);
1610 					}
1611 					vm_object_deallocate(object);
1612 
1613 					goto backoff;
1614 				} else {
1615 					vm_object_unlock(object);
1616 					vm_object_deallocate(object);
1617 					thread_interrupt_level(interruptible_state);
1618 
1619 					return VM_FAULT_RETRY;
1620 				}
1621 			}
1622 			if (!object->internal && !object->phys_contiguous && object->paging_in_progress > vm_object_pagein_throttle) {
1623 				/*
1624 				 * If there are too many outstanding page
1625 				 * requests pending on this external object, we
1626 				 * wait for them to be resolved now.
1627 				 */
1628 #if TRACEFAULTPAGE
1629 				dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
1630 #endif
1631 				if (m != VM_PAGE_NULL) {
1632 					VM_PAGE_FREE(m);
1633 				}
1634 				/*
1635 				 * take an extra ref so object won't die
1636 				 */
1637 				vm_object_reference_locked(object);
1638 
1639 				vm_fault_cleanup(object, first_m);
1640 
1641 				vm_object_lock(object);
1642 				assert(object->ref_count > 0);
1643 
1644 				if (object->paging_in_progress >= vm_object_pagein_throttle) {
1645 					vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS, interruptible);
1646 
1647 					vm_object_unlock(object);
1648 					wait_result = thread_block(THREAD_CONTINUE_NULL);
1649 					vm_object_deallocate(object);
1650 
1651 					goto backoff;
1652 				} else {
1653 					vm_object_unlock(object);
1654 					vm_object_deallocate(object);
1655 					thread_interrupt_level(interruptible_state);
1656 
1657 					return VM_FAULT_RETRY;
1658 				}
1659 			}
1660 			if (object->internal) {
1661 				int compressed_count_delta;
1662 
1663 				assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
1664 
1665 				if (m == VM_PAGE_NULL) {
1666 					/*
1667 					 * Allocate a new page for this object/offset pair as a placeholder
1668 					 */
1669 					m = vm_page_grab_options(grab_options);
1670 #if TRACEFAULTPAGE
1671 					dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object);  /* (TEST/DEBUG) */
1672 #endif
1673 					if (m == VM_PAGE_NULL) {
1674 						vm_fault_cleanup(object, first_m);
1675 						thread_interrupt_level(interruptible_state);
1676 
1677 						kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_MEMORY_SHORTAGE), 0 /* arg */);
1678 						return VM_FAULT_MEMORY_SHORTAGE;
1679 					}
1680 
1681 					m->vmp_absent = TRUE;
1682 					if (fault_info && fault_info->batch_pmap_op == TRUE) {
1683 						vm_page_insert_internal(m, object, vm_object_trunc_page(offset), VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1684 					} else {
1685 						vm_page_insert(m, object, vm_object_trunc_page(offset));
1686 					}
1687 				}
1688 				assert(m->vmp_busy);
1689 
1690 				m->vmp_absent = TRUE;
1691 				pager = object->pager;
1692 
1693 				assert(object->paging_in_progress > 0);
1694 				vm_object_unlock(object);
1695 
1696 				rc = vm_compressor_pager_get(
1697 					pager,
1698 					offset + object->paging_offset,
1699 					VM_PAGE_GET_PHYS_PAGE(m),
1700 					&my_fault_type,
1701 					0,
1702 					&compressed_count_delta);
1703 
1704 				if (type_of_fault == NULL) {
1705 					int     throttle_delay;
1706 
1707 					/*
1708 					 * we weren't called from vm_fault, so we
1709 					 * need to apply page creation throttling
1710 					 * do it before we re-acquire any locks
1711 					 */
1712 					if (my_fault_type == DBG_COMPRESSOR_FAULT) {
1713 						if ((throttle_delay = vm_page_throttled(TRUE))) {
1714 							VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 1, 0);
1715 							delay(throttle_delay);
1716 						}
1717 					}
1718 				}
1719 				vm_object_lock(object);
1720 				assert(object->paging_in_progress > 0);
1721 
1722 				vm_compressor_pager_count(
1723 					pager,
1724 					compressed_count_delta,
1725 					FALSE, /* shared_lock */
1726 					object);
1727 
1728 				switch (rc) {
1729 				case KERN_SUCCESS:
1730 					m->vmp_absent = FALSE;
1731 					m->vmp_dirty = TRUE;
1732 					if ((object->wimg_bits &
1733 					    VM_WIMG_MASK) !=
1734 					    VM_WIMG_USE_DEFAULT) {
1735 						/*
1736 						 * If the page is not cacheable,
1737 						 * we can't let its contents
1738 						 * linger in the data cache
1739 						 * after the decompression.
1740 						 */
1741 						pmap_sync_page_attributes_phys(
1742 							VM_PAGE_GET_PHYS_PAGE(m));
1743 					} else {
1744 						m->vmp_written_by_kernel = TRUE;
1745 					}
1746 
1747 					/*
1748 					 * If the object is purgeable, its
1749 					 * owner's purgeable ledgers have been
1750 					 * updated in vm_page_insert() but the
1751 					 * page was also accounted for in a
1752 					 * "compressed purgeable" ledger, so
1753 					 * update that now.
1754 					 */
1755 					if (((object->purgable !=
1756 					    VM_PURGABLE_DENY) ||
1757 					    object->vo_ledger_tag) &&
1758 					    (object->vo_owner !=
1759 					    NULL)) {
1760 						/*
1761 						 * One less compressed
1762 						 * purgeable/tagged page.
1763 						 */
1764 						vm_object_owner_compressed_update(
1765 							object,
1766 							-1);
1767 					}
1768 
1769 					break;
1770 				case KERN_MEMORY_FAILURE:
1771 					m->vmp_unusual = TRUE;
1772 					m->vmp_error = TRUE;
1773 					m->vmp_absent = FALSE;
1774 					break;
1775 				case KERN_MEMORY_ERROR:
1776 					assert(m->vmp_absent);
1777 					break;
1778 				default:
1779 					panic("vm_fault_page(): unexpected "
1780 					    "error %d from "
1781 					    "vm_compressor_pager_get()\n",
1782 					    rc);
1783 				}
1784 				PAGE_WAKEUP_DONE(m);
1785 
1786 				rc = KERN_SUCCESS;
1787 				goto data_requested;
1788 			}
1789 			my_fault_type = DBG_PAGEIN_FAULT;
1790 
1791 			if (m != VM_PAGE_NULL) {
1792 				VM_PAGE_FREE(m);
1793 				m = VM_PAGE_NULL;
1794 			}
1795 
1796 #if TRACEFAULTPAGE
1797 			dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0);  /* (TEST/DEBUG) */
1798 #endif
1799 
1800 			/*
1801 			 * It's possible someone called vm_object_destroy while we weren't
1802 			 * holding the object lock.  If that has happened, then bail out
1803 			 * here.
1804 			 */
1805 
1806 			pager = object->pager;
1807 
1808 			if (pager == MEMORY_OBJECT_NULL) {
1809 				vm_fault_cleanup(object, first_m);
1810 				thread_interrupt_level(interruptible_state);
1811 				kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_NO_PAGER), 0 /* arg */);
1812 				return VM_FAULT_MEMORY_ERROR;
1813 			}
1814 
1815 			/*
1816 			 * We have an absent page in place for the faulting offset,
1817 			 * so we can release the object lock.
1818 			 */
1819 
1820 			if (object->object_is_shared_cache) {
1821 				token = thread_priority_floor_start();
1822 				/*
1823 				 * A non-native shared cache object might
1824 				 * be getting set up in parallel with this
1825 				 * fault and so we can't assume that this
1826 				 * check will be valid after we drop the
1827 				 * object lock below.
1828 				 */
1829 				drop_floor = true;
1830 			}
1831 
1832 			vm_object_unlock(object);
1833 
1834 			/*
1835 			 * If this object uses a copy_call strategy,
1836 			 * and we are interested in a copy of this object
1837 			 * (having gotten here only by following a
1838 			 * shadow chain), then tell the memory manager
1839 			 * via a flag added to the desired_access
1840 			 * parameter, so that it can detect a race
1841 			 * between our walking down the shadow chain
1842 			 * and its pushing pages up into a copy of
1843 			 * the object that it manages.
1844 			 */
1845 			if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object) {
1846 				wants_copy_flag = VM_PROT_WANTS_COPY;
1847 			} else {
1848 				wants_copy_flag = VM_PROT_NONE;
1849 			}
1850 
1851 			if (object->copy == first_object) {
1852 				/*
1853 				 * if we issue the memory_object_data_request in
1854 				 * this state, we are subject to a deadlock with
1855 				 * the underlying filesystem if it is trying to
1856 				 * shrink the file resulting in a push of pages
1857 				 * into the copy object...  that push will stall
1858 				 * on the placeholder page, and if the pushing thread
1859 				 * is holding a lock that is required on the pagein
1860 				 * path (such as a truncate lock), we'll deadlock...
1861 				 * to avoid this potential deadlock, we throw away
1862 				 * our placeholder page before calling memory_object_data_request
1863 				 * and force this thread to retry the vm_fault_page after
1864 				 * we have issued the I/O.  the second time through this path
1865 				 * we will find the page already in the cache (presumably still
1866 				 * busy waiting for the I/O to complete) and then complete
1867 				 * the fault w/o having to go through memory_object_data_request again
1868 				 */
1869 				assert(first_m != VM_PAGE_NULL);
1870 				assert(VM_PAGE_OBJECT(first_m) == first_object);
1871 
1872 				vm_object_lock(first_object);
1873 				VM_PAGE_FREE(first_m);
1874 				vm_object_paging_end(first_object);
1875 				vm_object_unlock(first_object);
1876 
1877 				first_m = VM_PAGE_NULL;
1878 				force_fault_retry = TRUE;
1879 
1880 				vm_fault_page_forced_retry++;
1881 			}
1882 
1883 			if (data_already_requested == TRUE) {
1884 				orig_behavior = fault_info->behavior;
1885 				orig_cluster_size = fault_info->cluster_size;
1886 
1887 				fault_info->behavior = VM_BEHAVIOR_RANDOM;
1888 				fault_info->cluster_size = PAGE_SIZE;
1889 			}
1890 			/*
1891 			 * Call the memory manager to retrieve the data.
1892 			 */
1893 			rc = memory_object_data_request(
1894 				pager,
1895 				vm_object_trunc_page(offset) + object->paging_offset,
1896 				PAGE_SIZE,
1897 				access_required | wants_copy_flag,
1898 				(memory_object_fault_info_t)fault_info);
1899 
1900 			if (data_already_requested == TRUE) {
1901 				fault_info->behavior = orig_behavior;
1902 				fault_info->cluster_size = orig_cluster_size;
1903 			} else {
1904 				data_already_requested = TRUE;
1905 			}
1906 
1907 			DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL);
1908 #if TRACEFAULTPAGE
1909 			dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */
1910 #endif
1911 			vm_object_lock(object);
1912 
1913 			if (drop_floor && object->object_is_shared_cache) {
1914 				thread_priority_floor_end(&token);
1915 				drop_floor = false;
1916 			}
1917 
1918 data_requested:
1919 			if (rc != KERN_SUCCESS) {
1920 				vm_fault_cleanup(object, first_m);
1921 				thread_interrupt_level(interruptible_state);
1922 
1923 				kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NO_DATA), 0 /* arg */);
1924 
1925 				return (rc == MACH_SEND_INTERRUPTED) ?
1926 				       VM_FAULT_INTERRUPTED :
1927 				       VM_FAULT_MEMORY_ERROR;
1928 			} else {
1929 				clock_sec_t     tv_sec;
1930 				clock_usec_t    tv_usec;
1931 
1932 				if (my_fault_type == DBG_PAGEIN_FAULT) {
1933 					clock_get_system_microtime(&tv_sec, &tv_usec);
1934 					current_thread()->t_page_creation_time = tv_sec;
1935 					current_thread()->t_page_creation_count = 0;
1936 				}
1937 			}
1938 			if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) {
1939 				vm_fault_cleanup(object, first_m);
1940 				thread_interrupt_level(interruptible_state);
1941 
1942 				kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
1943 				return VM_FAULT_INTERRUPTED;
1944 			}
1945 			if (force_fault_retry == TRUE) {
1946 				vm_fault_cleanup(object, first_m);
1947 				thread_interrupt_level(interruptible_state);
1948 
1949 				return VM_FAULT_RETRY;
1950 			}
1951 			if (m == VM_PAGE_NULL && object->phys_contiguous) {
1952 				/*
1953 				 * No page here means that the object we
1954 				 * initially looked up was "physically
1955 				 * contiguous" (i.e. device memory).  However,
1956 				 * with Virtual VRAM, the object might not
1957 				 * be backed by that device memory anymore,
1958 				 * so we're done here only if the object is
1959 				 * still "phys_contiguous".
1960 				 * Otherwise, if the object is no longer
1961 				 * "phys_contiguous", we need to retry the
1962 				 * page fault against the object's new backing
1963 				 * store (different memory object).
1964 				 */
1965 phys_contig_object:
1966 				goto done;
1967 			}
1968 			/*
1969 			 * potentially a pagein fault
1970 			 * if we make it through the state checks
1971 			 * above, than we'll count it as such
1972 			 */
1973 			my_fault = my_fault_type;
1974 
1975 			/*
1976 			 * Retry with same object/offset, since new data may
1977 			 * be in a different page (i.e., m is meaningless at
1978 			 * this point).
1979 			 */
1980 			continue;
1981 		}
1982 dont_look_for_page:
1983 		/*
1984 		 * We get here if the object has no pager, or an existence map
1985 		 * exists and indicates the page isn't present on the pager
1986 		 * or we're unwiring a page.  If a pager exists, but there
1987 		 * is no existence map, then the m->vmp_absent case above handles
1988 		 * the ZF case when the pager can't provide the page
1989 		 */
1990 #if TRACEFAULTPAGE
1991 		dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
1992 #endif
1993 		if (object == first_object) {
1994 			first_m = m;
1995 		} else {
1996 			assert(m == VM_PAGE_NULL);
1997 		}
1998 
1999 		next_object = object->shadow;
2000 
2001 		if (next_object == VM_OBJECT_NULL) {
2002 			/*
2003 			 * we've hit the bottom of the shadown chain,
2004 			 * fill the page in the top object with zeros.
2005 			 */
2006 			assert(!must_be_resident);
2007 
2008 			if (object != first_object) {
2009 				vm_object_paging_end(object);
2010 				vm_object_unlock(object);
2011 
2012 				object = first_object;
2013 				offset = first_offset;
2014 				vm_object_lock(object);
2015 			}
2016 			m = first_m;
2017 			assert(VM_PAGE_OBJECT(m) == object);
2018 			first_m = VM_PAGE_NULL;
2019 
2020 			/*
2021 			 * check for any conditions that prevent
2022 			 * us from creating a new zero-fill page
2023 			 * vm_fault_check will do all of the
2024 			 * fault cleanup in the case of an error condition
2025 			 * including resetting the thread_interrupt_level
2026 			 */
2027 			error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
2028 
2029 			if (error != VM_FAULT_SUCCESS) {
2030 				return error;
2031 			}
2032 
2033 			if (m == VM_PAGE_NULL) {
2034 				m = vm_page_grab_options(grab_options);
2035 
2036 				if (m == VM_PAGE_NULL) {
2037 					vm_fault_cleanup(object, VM_PAGE_NULL);
2038 					thread_interrupt_level(interruptible_state);
2039 
2040 					return VM_FAULT_MEMORY_SHORTAGE;
2041 				}
2042 				vm_page_insert(m, object, vm_object_trunc_page(offset));
2043 			}
2044 			if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
2045 				m->vmp_absent = TRUE;
2046 				clear_absent_on_error = true;
2047 			}
2048 
2049 			my_fault = vm_fault_zero_page(m, no_zero_fill);
2050 
2051 			break;
2052 		} else {
2053 			/*
2054 			 * Move on to the next object.  Lock the next
2055 			 * object before unlocking the current one.
2056 			 */
2057 			if ((object != first_object) || must_be_resident) {
2058 				vm_object_paging_end(object);
2059 			}
2060 
2061 			offset += object->vo_shadow_offset;
2062 			fault_info->lo_offset += object->vo_shadow_offset;
2063 			fault_info->hi_offset += object->vo_shadow_offset;
2064 			access_required = VM_PROT_READ;
2065 
2066 			vm_object_lock(next_object);
2067 			vm_object_unlock(object);
2068 
2069 			object = next_object;
2070 			vm_object_paging_begin(object);
2071 		}
2072 	}
2073 
2074 	/*
2075 	 *	PAGE HAS BEEN FOUND.
2076 	 *
2077 	 *	This page (m) is:
2078 	 *		busy, so that we can play with it;
2079 	 *		not absent, so that nobody else will fill it;
2080 	 *		possibly eligible for pageout;
2081 	 *
2082 	 *	The top-level page (first_m) is:
2083 	 *		VM_PAGE_NULL if the page was found in the
2084 	 *		 top-level object;
2085 	 *		busy, not absent, and ineligible for pageout.
2086 	 *
2087 	 *	The current object (object) is locked.  A paging
2088 	 *	reference is held for the current and top-level
2089 	 *	objects.
2090 	 */
2091 
2092 #if TRACEFAULTPAGE
2093 	dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
2094 #endif
2095 #if     EXTRA_ASSERTIONS
2096 	assert(m->vmp_busy && !m->vmp_absent);
2097 	assert((first_m == VM_PAGE_NULL) ||
2098 	    (first_m->vmp_busy && !first_m->vmp_absent &&
2099 	    !first_m->vmp_active && !first_m->vmp_inactive && !first_m->vmp_secluded));
2100 #endif  /* EXTRA_ASSERTIONS */
2101 
2102 	/*
2103 	 * If the page is being written, but isn't
2104 	 * already owned by the top-level object,
2105 	 * we have to copy it into a new page owned
2106 	 * by the top-level object.
2107 	 */
2108 	if (object != first_object) {
2109 #if TRACEFAULTPAGE
2110 		dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */
2111 #endif
2112 		if (fault_type & VM_PROT_WRITE) {
2113 			vm_page_t copy_m;
2114 
2115 			/*
2116 			 * We only really need to copy if we
2117 			 * want to write it.
2118 			 */
2119 			assert(!must_be_resident);
2120 
2121 			/*
2122 			 * If we try to collapse first_object at this
2123 			 * point, we may deadlock when we try to get
2124 			 * the lock on an intermediate object (since we
2125 			 * have the bottom object locked).  We can't
2126 			 * unlock the bottom object, because the page
2127 			 * we found may move (by collapse) if we do.
2128 			 *
2129 			 * Instead, we first copy the page.  Then, when
2130 			 * we have no more use for the bottom object,
2131 			 * we unlock it and try to collapse.
2132 			 *
2133 			 * Note that we copy the page even if we didn't
2134 			 * need to... that's the breaks.
2135 			 */
2136 
2137 			/*
2138 			 * Allocate a page for the copy
2139 			 */
2140 			copy_m = vm_page_grab_options(grab_options);
2141 
2142 			if (copy_m == VM_PAGE_NULL) {
2143 				RELEASE_PAGE(m);
2144 
2145 				vm_fault_cleanup(object, first_m);
2146 				thread_interrupt_level(interruptible_state);
2147 
2148 				kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_MEMORY_SHORTAGE), 0 /* arg */);
2149 				return VM_FAULT_MEMORY_SHORTAGE;
2150 			}
2151 
2152 			vm_page_copy(m, copy_m);
2153 
2154 			/*
2155 			 * If another map is truly sharing this
2156 			 * page with us, we have to flush all
2157 			 * uses of the original page, since we
2158 			 * can't distinguish those which want the
2159 			 * original from those which need the
2160 			 * new copy.
2161 			 *
2162 			 * XXXO If we know that only one map has
2163 			 * access to this page, then we could
2164 			 * avoid the pmap_disconnect() call.
2165 			 */
2166 			if (m->vmp_pmapped) {
2167 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2168 			}
2169 
2170 			if (m->vmp_clustered) {
2171 				VM_PAGE_COUNT_AS_PAGEIN(m);
2172 				VM_PAGE_CONSUME_CLUSTERED(m);
2173 			}
2174 			assert(!m->vmp_cleaning);
2175 
2176 			/*
2177 			 * We no longer need the old page or object.
2178 			 */
2179 			RELEASE_PAGE(m);
2180 
2181 			/*
2182 			 * This check helps with marking the object as having a sequential pattern
2183 			 * Normally we'll miss doing this below because this fault is about COW to
2184 			 * the first_object i.e. bring page in from disk, push to object above but
2185 			 * don't update the file object's sequential pattern.
2186 			 */
2187 			if (object->internal == FALSE) {
2188 				vm_fault_is_sequential(object, offset, fault_info->behavior);
2189 			}
2190 
2191 			vm_object_paging_end(object);
2192 			vm_object_unlock(object);
2193 
2194 			my_fault = DBG_COW_FAULT;
2195 			counter_inc(&vm_statistics_cow_faults);
2196 			DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
2197 			counter_inc(&current_task()->cow_faults);
2198 
2199 			object = first_object;
2200 			offset = first_offset;
2201 
2202 			vm_object_lock(object);
2203 			/*
2204 			 * get rid of the place holder
2205 			 * page that we soldered in earlier
2206 			 */
2207 			VM_PAGE_FREE(first_m);
2208 			first_m = VM_PAGE_NULL;
2209 
2210 			/*
2211 			 * and replace it with the
2212 			 * page we just copied into
2213 			 */
2214 			assert(copy_m->vmp_busy);
2215 			vm_page_insert(copy_m, object, vm_object_trunc_page(offset));
2216 			SET_PAGE_DIRTY(copy_m, TRUE);
2217 
2218 			m = copy_m;
2219 			/*
2220 			 * Now that we've gotten the copy out of the
2221 			 * way, let's try to collapse the top object.
2222 			 * But we have to play ugly games with
2223 			 * paging_in_progress to do that...
2224 			 */
2225 			vm_object_paging_end(object);
2226 			vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
2227 			vm_object_paging_begin(object);
2228 		} else {
2229 			*protection &= (~VM_PROT_WRITE);
2230 		}
2231 	}
2232 	/*
2233 	 * Now check whether the page needs to be pushed into the
2234 	 * copy object.  The use of asymmetric copy on write for
2235 	 * shared temporary objects means that we may do two copies to
2236 	 * satisfy the fault; one above to get the page from a
2237 	 * shadowed object, and one here to push it into the copy.
2238 	 */
2239 	try_failed_count = 0;
2240 
2241 	while ((copy_object = first_object->copy) != VM_OBJECT_NULL) {
2242 		vm_object_offset_t      copy_offset;
2243 		vm_page_t               copy_m;
2244 
2245 #if TRACEFAULTPAGE
2246 		dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type);    /* (TEST/DEBUG) */
2247 #endif
2248 		/*
2249 		 * If the page is being written, but hasn't been
2250 		 * copied to the copy-object, we have to copy it there.
2251 		 */
2252 		if ((fault_type & VM_PROT_WRITE) == 0) {
2253 			*protection &= ~VM_PROT_WRITE;
2254 			break;
2255 		}
2256 
2257 		/*
2258 		 * If the page was guaranteed to be resident,
2259 		 * we must have already performed the copy.
2260 		 */
2261 		if (must_be_resident) {
2262 			break;
2263 		}
2264 
2265 		/*
2266 		 * Try to get the lock on the copy_object.
2267 		 */
2268 		if (!vm_object_lock_try(copy_object)) {
2269 			vm_object_unlock(object);
2270 			try_failed_count++;
2271 
2272 			mutex_pause(try_failed_count);  /* wait a bit */
2273 			vm_object_lock(object);
2274 
2275 			continue;
2276 		}
2277 		try_failed_count = 0;
2278 
2279 		/*
2280 		 * Make another reference to the copy-object,
2281 		 * to keep it from disappearing during the
2282 		 * copy.
2283 		 */
2284 		vm_object_reference_locked(copy_object);
2285 
2286 		/*
2287 		 * Does the page exist in the copy?
2288 		 */
2289 		copy_offset = first_offset - copy_object->vo_shadow_offset;
2290 		copy_offset = vm_object_trunc_page(copy_offset);
2291 
2292 		if (copy_object->vo_size <= copy_offset) {
2293 			/*
2294 			 * Copy object doesn't cover this page -- do nothing.
2295 			 */
2296 			;
2297 		} else if ((copy_m = vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) {
2298 			/*
2299 			 * Page currently exists in the copy object
2300 			 */
2301 			if (copy_m->vmp_busy) {
2302 				/*
2303 				 * If the page is being brought
2304 				 * in, wait for it and then retry.
2305 				 */
2306 				RELEASE_PAGE(m);
2307 
2308 				/*
2309 				 * take an extra ref so object won't die
2310 				 */
2311 				vm_object_reference_locked(copy_object);
2312 				vm_object_unlock(copy_object);
2313 				vm_fault_cleanup(object, first_m);
2314 
2315 				vm_object_lock(copy_object);
2316 				assert(copy_object->ref_count > 0);
2317 				vm_object_lock_assert_exclusive(copy_object);
2318 				copy_object->ref_count--;
2319 				assert(copy_object->ref_count > 0);
2320 				copy_m = vm_page_lookup(copy_object, copy_offset);
2321 
2322 				if (copy_m != VM_PAGE_NULL && copy_m->vmp_busy) {
2323 					PAGE_ASSERT_WAIT(copy_m, interruptible);
2324 
2325 					vm_object_unlock(copy_object);
2326 					wait_result = thread_block(THREAD_CONTINUE_NULL);
2327 					vm_object_deallocate(copy_object);
2328 
2329 					goto backoff;
2330 				} else {
2331 					vm_object_unlock(copy_object);
2332 					vm_object_deallocate(copy_object);
2333 					thread_interrupt_level(interruptible_state);
2334 
2335 					return VM_FAULT_RETRY;
2336 				}
2337 			}
2338 		} else if (!PAGED_OUT(copy_object, copy_offset)) {
2339 			/*
2340 			 * If PAGED_OUT is TRUE, then the page used to exist
2341 			 * in the copy-object, and has already been paged out.
2342 			 * We don't need to repeat this. If PAGED_OUT is
2343 			 * FALSE, then either we don't know (!pager_created,
2344 			 * for example) or it hasn't been paged out.
2345 			 * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT)
2346 			 * We must copy the page to the copy object.
2347 			 *
2348 			 * Allocate a page for the copy
2349 			 */
2350 			copy_m = vm_page_alloc(copy_object, copy_offset);
2351 
2352 			if (copy_m == VM_PAGE_NULL) {
2353 				RELEASE_PAGE(m);
2354 
2355 				vm_object_lock_assert_exclusive(copy_object);
2356 				copy_object->ref_count--;
2357 				assert(copy_object->ref_count > 0);
2358 
2359 				vm_object_unlock(copy_object);
2360 				vm_fault_cleanup(object, first_m);
2361 				thread_interrupt_level(interruptible_state);
2362 
2363 				kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_MEMORY_SHORTAGE), 0 /* arg */);
2364 				return VM_FAULT_MEMORY_SHORTAGE;
2365 			}
2366 			/*
2367 			 * Must copy page into copy-object.
2368 			 */
2369 			vm_page_copy(m, copy_m);
2370 
2371 			/*
2372 			 * If the old page was in use by any users
2373 			 * of the copy-object, it must be removed
2374 			 * from all pmaps.  (We can't know which
2375 			 * pmaps use it.)
2376 			 */
2377 			if (m->vmp_pmapped) {
2378 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2379 			}
2380 
2381 			if (m->vmp_clustered) {
2382 				VM_PAGE_COUNT_AS_PAGEIN(m);
2383 				VM_PAGE_CONSUME_CLUSTERED(m);
2384 			}
2385 			/*
2386 			 * If there's a pager, then immediately
2387 			 * page out this page, using the "initialize"
2388 			 * option.  Else, we use the copy.
2389 			 */
2390 			if ((!copy_object->pager_ready)
2391 			    || VM_COMPRESSOR_PAGER_STATE_GET(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT
2392 			    ) {
2393 				vm_page_lockspin_queues();
2394 				assert(!m->vmp_cleaning);
2395 				vm_page_activate(copy_m);
2396 				vm_page_unlock_queues();
2397 
2398 				SET_PAGE_DIRTY(copy_m, TRUE);
2399 				PAGE_WAKEUP_DONE(copy_m);
2400 			} else {
2401 				assert(copy_m->vmp_busy == TRUE);
2402 				assert(!m->vmp_cleaning);
2403 
2404 				/*
2405 				 * dirty is protected by the object lock
2406 				 */
2407 				SET_PAGE_DIRTY(copy_m, TRUE);
2408 
2409 				/*
2410 				 * The page is already ready for pageout:
2411 				 * not on pageout queues and busy.
2412 				 * Unlock everything except the
2413 				 * copy_object itself.
2414 				 */
2415 				vm_object_unlock(object);
2416 
2417 				/*
2418 				 * Write the page to the copy-object,
2419 				 * flushing it from the kernel.
2420 				 */
2421 				vm_pageout_initialize_page(copy_m);
2422 
2423 				/*
2424 				 * Since the pageout may have
2425 				 * temporarily dropped the
2426 				 * copy_object's lock, we
2427 				 * check whether we'll have
2428 				 * to deallocate the hard way.
2429 				 */
2430 				if ((copy_object->shadow != object) || (copy_object->ref_count == 1)) {
2431 					vm_object_unlock(copy_object);
2432 					vm_object_deallocate(copy_object);
2433 					vm_object_lock(object);
2434 
2435 					continue;
2436 				}
2437 				/*
2438 				 * Pick back up the old object's
2439 				 * lock.  [It is safe to do so,
2440 				 * since it must be deeper in the
2441 				 * object tree.]
2442 				 */
2443 				vm_object_lock(object);
2444 			}
2445 
2446 			/*
2447 			 * Because we're pushing a page upward
2448 			 * in the object tree, we must restart
2449 			 * any faults that are waiting here.
2450 			 * [Note that this is an expansion of
2451 			 * PAGE_WAKEUP that uses the THREAD_RESTART
2452 			 * wait result].  Can't turn off the page's
2453 			 * busy bit because we're not done with it.
2454 			 */
2455 			if (m->vmp_wanted) {
2456 				m->vmp_wanted = FALSE;
2457 				thread_wakeup_with_result((event_t) m, THREAD_RESTART);
2458 			}
2459 		}
2460 		/*
2461 		 * The reference count on copy_object must be
2462 		 * at least 2: one for our extra reference,
2463 		 * and at least one from the outside world
2464 		 * (we checked that when we last locked
2465 		 * copy_object).
2466 		 */
2467 		vm_object_lock_assert_exclusive(copy_object);
2468 		copy_object->ref_count--;
2469 		assert(copy_object->ref_count > 0);
2470 
2471 		vm_object_unlock(copy_object);
2472 
2473 		break;
2474 	}
2475 
2476 done:
2477 	*result_page = m;
2478 	*top_page = first_m;
2479 
2480 	if (m != VM_PAGE_NULL) {
2481 		assert(VM_PAGE_OBJECT(m) == object);
2482 
2483 		retval = VM_FAULT_SUCCESS;
2484 
2485 		if (my_fault == DBG_PAGEIN_FAULT) {
2486 			VM_PAGE_COUNT_AS_PAGEIN(m);
2487 
2488 			if (object->internal) {
2489 				my_fault = DBG_PAGEIND_FAULT;
2490 			} else {
2491 				my_fault = DBG_PAGEINV_FAULT;
2492 			}
2493 
2494 			/*
2495 			 * evaluate access pattern and update state
2496 			 * vm_fault_deactivate_behind depends on the
2497 			 * state being up to date
2498 			 */
2499 			vm_fault_is_sequential(object, offset, fault_info->behavior);
2500 			vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2501 		} else if (type_of_fault == NULL && my_fault == DBG_CACHE_HIT_FAULT) {
2502 			/*
2503 			 * we weren't called from vm_fault, so handle the
2504 			 * accounting here for hits in the cache
2505 			 */
2506 			if (m->vmp_clustered) {
2507 				VM_PAGE_COUNT_AS_PAGEIN(m);
2508 				VM_PAGE_CONSUME_CLUSTERED(m);
2509 			}
2510 			vm_fault_is_sequential(object, offset, fault_info->behavior);
2511 			vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2512 		} else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) {
2513 			VM_STAT_DECOMPRESSIONS();
2514 		}
2515 		if (type_of_fault) {
2516 			*type_of_fault = my_fault;
2517 		}
2518 	} else {
2519 		kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_SUCCESS_NO_PAGE), 0 /* arg */);
2520 		retval = VM_FAULT_SUCCESS_NO_VM_PAGE;
2521 		assert(first_m == VM_PAGE_NULL);
2522 		assert(object == first_object);
2523 	}
2524 
2525 	thread_interrupt_level(interruptible_state);
2526 
2527 #if TRACEFAULTPAGE
2528 	dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0);       /* (TEST/DEBUG) */
2529 #endif
2530 	return retval;
2531 
2532 backoff:
2533 	thread_interrupt_level(interruptible_state);
2534 
2535 	if (wait_result == THREAD_INTERRUPTED) {
2536 		kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
2537 		return VM_FAULT_INTERRUPTED;
2538 	}
2539 	return VM_FAULT_RETRY;
2540 
2541 #undef  RELEASE_PAGE
2542 }
2543 
2544 #if MACH_ASSERT && (PLATFORM_WatchOS || __x86_64__)
2545 #define PANIC_ON_CS_KILLED_DEFAULT true
2546 #else
2547 #define PANIC_ON_CS_KILLED_DEFAULT false
2548 #endif
2549 static TUNABLE(bool, panic_on_cs_killed, "panic_on_cs_killed",
2550     PANIC_ON_CS_KILLED_DEFAULT);
2551 
2552 extern int proc_selfpid(void);
2553 extern char *proc_name_address(void *p);
2554 unsigned long cs_enter_tainted_rejected = 0;
2555 unsigned long cs_enter_tainted_accepted = 0;
2556 
2557 /*
2558  * CODE SIGNING:
2559  * When soft faulting a page, we have to validate the page if:
2560  * 1. the page is being mapped in user space
2561  * 2. the page hasn't already been found to be "tainted"
2562  * 3. the page belongs to a code-signed object
2563  * 4. the page has not been validated yet or has been mapped for write.
2564  */
2565 static bool
vm_fault_cs_need_validation(pmap_t pmap,vm_page_t page,vm_object_t page_obj,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2566 vm_fault_cs_need_validation(
2567 	pmap_t pmap,
2568 	vm_page_t page,
2569 	vm_object_t page_obj,
2570 	vm_map_size_t fault_page_size,
2571 	vm_map_offset_t fault_phys_offset)
2572 {
2573 	if (pmap == kernel_pmap) {
2574 		/* 1 - not user space */
2575 		return false;
2576 	}
2577 	if (!page_obj->code_signed) {
2578 		/* 3 - page does not belong to a code-signed object */
2579 		return false;
2580 	}
2581 	if (fault_page_size == PAGE_SIZE) {
2582 		/* looking at the whole page */
2583 		assertf(fault_phys_offset == 0,
2584 		    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
2585 		    (uint64_t)fault_page_size,
2586 		    (uint64_t)fault_phys_offset);
2587 		if (page->vmp_cs_tainted == VMP_CS_ALL_TRUE) {
2588 			/* 2 - page is all tainted */
2589 			return false;
2590 		}
2591 		if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
2592 		    !page->vmp_wpmapped) {
2593 			/* 4 - already fully validated and never mapped writable */
2594 			return false;
2595 		}
2596 	} else {
2597 		/* looking at a specific sub-page */
2598 		if (VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
2599 			/* 2 - sub-page was already marked as tainted */
2600 			return false;
2601 		}
2602 		if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) &&
2603 		    !page->vmp_wpmapped) {
2604 			/* 4 - already validated and never mapped writable */
2605 			return false;
2606 		}
2607 	}
2608 	/* page needs to be validated */
2609 	return true;
2610 }
2611 
2612 
2613 static bool
vm_fault_cs_page_immutable(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot __unused)2614 vm_fault_cs_page_immutable(
2615 	vm_page_t m,
2616 	vm_map_size_t fault_page_size,
2617 	vm_map_offset_t fault_phys_offset,
2618 	vm_prot_t prot __unused)
2619 {
2620 	if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)
2621 	    /*&& ((prot) & VM_PROT_EXECUTE)*/) {
2622 		return true;
2623 	}
2624 	return false;
2625 }
2626 
2627 static bool
vm_fault_cs_page_nx(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2628 vm_fault_cs_page_nx(
2629 	vm_page_t m,
2630 	vm_map_size_t fault_page_size,
2631 	vm_map_offset_t fault_phys_offset)
2632 {
2633 	return VMP_CS_NX(m, fault_page_size, fault_phys_offset);
2634 }
2635 
2636 /*
2637  * Check if the page being entered into the pmap violates code signing.
2638  */
2639 static kern_return_t
vm_fault_cs_check_violation(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool map_is_switched,bool map_is_switch_protected,bool * cs_violation)2640 vm_fault_cs_check_violation(
2641 	bool cs_bypass,
2642 	vm_object_t object,
2643 	vm_page_t m,
2644 	pmap_t pmap,
2645 	vm_prot_t prot,
2646 	vm_prot_t caller_prot,
2647 	vm_map_size_t fault_page_size,
2648 	vm_map_offset_t fault_phys_offset,
2649 	vm_object_fault_info_t fault_info,
2650 	bool map_is_switched,
2651 	bool map_is_switch_protected,
2652 	bool *cs_violation)
2653 {
2654 #if !PMAP_CS
2655 #pragma unused(caller_prot)
2656 #pragma unused(fault_info)
2657 #endif /* !PMAP_CS */
2658 	int             cs_enforcement_enabled;
2659 	if (!cs_bypass &&
2660 	    vm_fault_cs_need_validation(pmap, m, object,
2661 	    fault_page_size, fault_phys_offset)) {
2662 		vm_object_lock_assert_exclusive(object);
2663 
2664 		if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)) {
2665 			vm_cs_revalidates++;
2666 		}
2667 
2668 		/* VM map is locked, so 1 ref will remain on VM object -
2669 		 * so no harm if vm_page_validate_cs drops the object lock */
2670 
2671 		vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
2672 	}
2673 
2674 	/* If the map is switched, and is switch-protected, we must protect
2675 	 * some pages from being write-faulted: immutable pages because by
2676 	 * definition they may not be written, and executable pages because that
2677 	 * would provide a way to inject unsigned code.
2678 	 * If the page is immutable, we can simply return. However, we can't
2679 	 * immediately determine whether a page is executable anywhere. But,
2680 	 * we can disconnect it everywhere and remove the executable protection
2681 	 * from the current map. We do that below right before we do the
2682 	 * PMAP_ENTER.
2683 	 */
2684 	if (pmap == kernel_pmap) {
2685 		/* kernel fault: cs_enforcement does not apply */
2686 		cs_enforcement_enabled = 0;
2687 	} else {
2688 		cs_enforcement_enabled = pmap_get_vm_map_cs_enforced(pmap);
2689 	}
2690 
2691 	if (cs_enforcement_enabled && map_is_switched &&
2692 	    map_is_switch_protected &&
2693 	    vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2694 	    (prot & VM_PROT_WRITE)) {
2695 		kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_IMMUTABLE_PAGE_WRITE), 0 /* arg */);
2696 		return KERN_CODESIGN_ERROR;
2697 	}
2698 
2699 	if (cs_enforcement_enabled &&
2700 	    vm_fault_cs_page_nx(m, fault_page_size, fault_phys_offset) &&
2701 	    (prot & VM_PROT_EXECUTE)) {
2702 		if (cs_debug) {
2703 			printf("page marked to be NX, not letting it be mapped EXEC\n");
2704 		}
2705 		kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_NX_PAGE_EXEC_MAPPING), 0 /* arg */);
2706 		return KERN_CODESIGN_ERROR;
2707 	}
2708 
2709 	/* A page could be tainted, or pose a risk of being tainted later.
2710 	 * Check whether the receiving process wants it, and make it feel
2711 	 * the consequences (that hapens in cs_invalid_page()).
2712 	 * For CS Enforcement, two other conditions will
2713 	 * cause that page to be tainted as well:
2714 	 * - pmapping an unsigned page executable - this means unsigned code;
2715 	 * - writeable mapping of a validated page - the content of that page
2716 	 *   can be changed without the kernel noticing, therefore unsigned
2717 	 *   code can be created
2718 	 */
2719 	if (cs_bypass) {
2720 		/* code-signing is bypassed */
2721 		*cs_violation = FALSE;
2722 	} else if (VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
2723 		/* tainted page */
2724 		*cs_violation = TRUE;
2725 	} else if (!cs_enforcement_enabled) {
2726 		/* no further code-signing enforcement */
2727 		*cs_violation = FALSE;
2728 	} else if (vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2729 	    ((prot & VM_PROT_WRITE) ||
2730 	    m->vmp_wpmapped)) {
2731 		/*
2732 		 * The page should be immutable, but is in danger of being
2733 		 * modified.
2734 		 * This is the case where we want policy from the code
2735 		 * directory - is the page immutable or not? For now we have
2736 		 * to assume that code pages will be immutable, data pages not.
2737 		 * We'll assume a page is a code page if it has a code directory
2738 		 * and we fault for execution.
2739 		 * That is good enough since if we faulted the code page for
2740 		 * writing in another map before, it is wpmapped; if we fault
2741 		 * it for writing in this map later it will also be faulted for
2742 		 * executing at the same time; and if we fault for writing in
2743 		 * another map later, we will disconnect it from this pmap so
2744 		 * we'll notice the change.
2745 		 */
2746 		*cs_violation = TRUE;
2747 	} else if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
2748 	    (prot & VM_PROT_EXECUTE)
2749 	    ) {
2750 		*cs_violation = TRUE;
2751 	} else {
2752 		*cs_violation = FALSE;
2753 	}
2754 	return KERN_SUCCESS;
2755 }
2756 
2757 /*
2758  * Handles a code signing violation by either rejecting the page or forcing a disconnect.
2759  * @param must_disconnect This value will be set to true if the caller must disconnect
2760  * this page.
2761  * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
2762  */
2763 static kern_return_t
vm_fault_cs_handle_violation(vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,bool map_is_switched,bool map_is_switch_protected,bool * must_disconnect)2764 vm_fault_cs_handle_violation(
2765 	vm_object_t object,
2766 	vm_page_t m,
2767 	pmap_t pmap,
2768 	vm_prot_t prot,
2769 	vm_map_offset_t vaddr,
2770 	vm_map_size_t fault_page_size,
2771 	vm_map_offset_t fault_phys_offset,
2772 	bool map_is_switched,
2773 	bool map_is_switch_protected,
2774 	bool *must_disconnect)
2775 {
2776 #if !MACH_ASSERT
2777 #pragma unused(pmap)
2778 #pragma unused(map_is_switch_protected)
2779 #endif /* !MACH_ASSERT */
2780 	/*
2781 	 * We will have a tainted page. Have to handle the special case
2782 	 * of a switched map now. If the map is not switched, standard
2783 	 * procedure applies - call cs_invalid_page().
2784 	 * If the map is switched, the real owner is invalid already.
2785 	 * There is no point in invalidating the switching process since
2786 	 * it will not be executing from the map. So we don't call
2787 	 * cs_invalid_page() in that case.
2788 	 */
2789 	boolean_t reject_page, cs_killed;
2790 	kern_return_t kr;
2791 	if (map_is_switched) {
2792 		assert(pmap == vm_map_pmap(current_thread()->map));
2793 		assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
2794 		reject_page = FALSE;
2795 	} else {
2796 		if (cs_debug > 5) {
2797 			printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n",
2798 			    object->code_signed ? "yes" : "no",
2799 			    VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2800 			    VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2801 			    m->vmp_wpmapped ? "yes" : "no",
2802 			    (int)prot);
2803 		}
2804 		reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed);
2805 	}
2806 
2807 	if (reject_page) {
2808 		/* reject the invalid page: abort the page fault */
2809 		int                     pid;
2810 		const char              *procname;
2811 		task_t                  task;
2812 		vm_object_t             file_object, shadow;
2813 		vm_object_offset_t      file_offset;
2814 		char                    *pathname, *filename;
2815 		vm_size_t               pathname_len, filename_len;
2816 		boolean_t               truncated_path;
2817 #define __PATH_MAX 1024
2818 		struct timespec         mtime, cs_mtime;
2819 		int                     shadow_depth;
2820 		os_reason_t             codesigning_exit_reason = OS_REASON_NULL;
2821 
2822 		kr = KERN_CODESIGN_ERROR;
2823 		cs_enter_tainted_rejected++;
2824 
2825 		/* get process name and pid */
2826 		procname = "?";
2827 		task = current_task();
2828 		pid = proc_selfpid();
2829 		if (task->bsd_info != NULL) {
2830 			procname = proc_name_address(task->bsd_info);
2831 		}
2832 
2833 		/* get file's VM object */
2834 		file_object = object;
2835 		file_offset = m->vmp_offset;
2836 		for (shadow = file_object->shadow,
2837 		    shadow_depth = 0;
2838 		    shadow != VM_OBJECT_NULL;
2839 		    shadow = file_object->shadow,
2840 		    shadow_depth++) {
2841 			vm_object_lock_shared(shadow);
2842 			if (file_object != object) {
2843 				vm_object_unlock(file_object);
2844 			}
2845 			file_offset += file_object->vo_shadow_offset;
2846 			file_object = shadow;
2847 		}
2848 
2849 		mtime.tv_sec = 0;
2850 		mtime.tv_nsec = 0;
2851 		cs_mtime.tv_sec = 0;
2852 		cs_mtime.tv_nsec = 0;
2853 
2854 		/* get file's pathname and/or filename */
2855 		pathname = NULL;
2856 		filename = NULL;
2857 		pathname_len = 0;
2858 		filename_len = 0;
2859 		truncated_path = FALSE;
2860 		/* no pager -> no file -> no pathname, use "<nil>" in that case */
2861 		if (file_object->pager != NULL) {
2862 			pathname = kalloc_data(__PATH_MAX * 2, Z_WAITOK);
2863 			if (pathname) {
2864 				pathname[0] = '\0';
2865 				pathname_len = __PATH_MAX;
2866 				filename = pathname + pathname_len;
2867 				filename_len = __PATH_MAX;
2868 
2869 				if (vnode_pager_get_object_name(file_object->pager,
2870 				    pathname,
2871 				    pathname_len,
2872 				    filename,
2873 				    filename_len,
2874 				    &truncated_path) == KERN_SUCCESS) {
2875 					/* safety first... */
2876 					pathname[__PATH_MAX - 1] = '\0';
2877 					filename[__PATH_MAX - 1] = '\0';
2878 
2879 					vnode_pager_get_object_mtime(file_object->pager,
2880 					    &mtime,
2881 					    &cs_mtime);
2882 				} else {
2883 					kfree_data(pathname, __PATH_MAX * 2);
2884 					pathname = NULL;
2885 					filename = NULL;
2886 					pathname_len = 0;
2887 					filename_len = 0;
2888 					truncated_path = FALSE;
2889 				}
2890 			}
2891 		}
2892 		printf("CODE SIGNING: process %d[%s]: "
2893 		    "rejecting invalid page at address 0x%llx "
2894 		    "from offset 0x%llx in file \"%s%s%s\" "
2895 		    "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
2896 		    "(signed:%d validated:%d tainted:%d nx:%d "
2897 		    "wpmapped:%d dirty:%d depth:%d)\n",
2898 		    pid, procname, (addr64_t) vaddr,
2899 		    file_offset,
2900 		    (pathname ? pathname : "<nil>"),
2901 		    (truncated_path ? "/.../" : ""),
2902 		    (truncated_path ? filename : ""),
2903 		    cs_mtime.tv_sec, cs_mtime.tv_nsec,
2904 		    ((cs_mtime.tv_sec == mtime.tv_sec &&
2905 		    cs_mtime.tv_nsec == mtime.tv_nsec)
2906 		    ? "=="
2907 		    : "!="),
2908 		    mtime.tv_sec, mtime.tv_nsec,
2909 		    object->code_signed,
2910 		    VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
2911 		    VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
2912 		    VMP_CS_NX(m, fault_page_size, fault_phys_offset),
2913 		    m->vmp_wpmapped,
2914 		    m->vmp_dirty,
2915 		    shadow_depth);
2916 
2917 		/*
2918 		 * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page
2919 		 * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the
2920 		 * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler
2921 		 * will deal with the segmentation fault.
2922 		 */
2923 		if (cs_killed) {
2924 			KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
2925 			    pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE, 0, 0);
2926 
2927 			codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
2928 			if (codesigning_exit_reason == NULL) {
2929 				printf("vm_fault_enter: failed to allocate codesigning exit reason\n");
2930 			} else {
2931 				mach_vm_address_t data_addr = 0;
2932 				struct codesigning_exit_reason_info *ceri = NULL;
2933 				uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri));
2934 
2935 				if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) {
2936 					printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n");
2937 				} else {
2938 					if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor,
2939 					    EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) {
2940 						ceri = (struct codesigning_exit_reason_info *)data_addr;
2941 						static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname));
2942 
2943 						ceri->ceri_virt_addr = vaddr;
2944 						ceri->ceri_file_offset = file_offset;
2945 						if (pathname) {
2946 							strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname));
2947 						} else {
2948 							ceri->ceri_pathname[0] = '\0';
2949 						}
2950 						if (filename) {
2951 							strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename));
2952 						} else {
2953 							ceri->ceri_filename[0] = '\0';
2954 						}
2955 						ceri->ceri_path_truncated = (truncated_path ? 1 : 0);
2956 						ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec;
2957 						ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec;
2958 						ceri->ceri_page_modtime_secs = mtime.tv_sec;
2959 						ceri->ceri_page_modtime_nsecs = mtime.tv_nsec;
2960 						ceri->ceri_object_codesigned = (object->code_signed);
2961 						ceri->ceri_page_codesig_validated = VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset);
2962 						ceri->ceri_page_codesig_tainted = VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset);
2963 						ceri->ceri_page_codesig_nx = VMP_CS_NX(m, fault_page_size, fault_phys_offset);
2964 						ceri->ceri_page_wpmapped = (m->vmp_wpmapped);
2965 						ceri->ceri_page_slid = 0;
2966 						ceri->ceri_page_dirty = (m->vmp_dirty);
2967 						ceri->ceri_page_shadow_depth = shadow_depth;
2968 					} else {
2969 #if DEBUG || DEVELOPMENT
2970 						panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason");
2971 #else
2972 						printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n");
2973 #endif /* DEBUG || DEVELOPMENT */
2974 						/* Free the buffer */
2975 						os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0);
2976 					}
2977 				}
2978 			}
2979 
2980 			set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE);
2981 		}
2982 		if (panic_on_cs_killed &&
2983 		    object->object_is_shared_cache) {
2984 			char *tainted_contents;
2985 			vm_map_offset_t src_vaddr;
2986 			src_vaddr = (vm_map_offset_t) phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m) << PAGE_SHIFT);
2987 			tainted_contents = kalloc_data(PAGE_SIZE, Z_WAITOK);
2988 			bcopy((const char *)src_vaddr, tainted_contents, PAGE_SIZE);
2989 			printf("CODE SIGNING: tainted page %p phys 0x%x phystokv 0x%llx copied to %p\n", m, VM_PAGE_GET_PHYS_PAGE(m), (uint64_t)src_vaddr, tainted_contents);
2990 			panic("CODE SIGNING: process %d[%s]: "
2991 			    "rejecting invalid page (phys#0x%x) at address 0x%llx "
2992 			    "from offset 0x%llx in file \"%s%s%s\" "
2993 			    "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
2994 			    "(signed:%d validated:%d tainted:%d nx:%d"
2995 			    "wpmapped:%d dirty:%d depth:%d)\n",
2996 			    pid, procname,
2997 			    VM_PAGE_GET_PHYS_PAGE(m),
2998 			    (addr64_t) vaddr,
2999 			    file_offset,
3000 			    (pathname ? pathname : "<nil>"),
3001 			    (truncated_path ? "/.../" : ""),
3002 			    (truncated_path ? filename : ""),
3003 			    cs_mtime.tv_sec, cs_mtime.tv_nsec,
3004 			    ((cs_mtime.tv_sec == mtime.tv_sec &&
3005 			    cs_mtime.tv_nsec == mtime.tv_nsec)
3006 			    ? "=="
3007 			    : "!="),
3008 			    mtime.tv_sec, mtime.tv_nsec,
3009 			    object->code_signed,
3010 			    VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
3011 			    VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
3012 			    VMP_CS_NX(m, fault_page_size, fault_phys_offset),
3013 			    m->vmp_wpmapped,
3014 			    m->vmp_dirty,
3015 			    shadow_depth);
3016 		}
3017 
3018 		if (file_object != object) {
3019 			vm_object_unlock(file_object);
3020 		}
3021 		if (pathname_len != 0) {
3022 			kfree_data(pathname, __PATH_MAX * 2);
3023 			pathname = NULL;
3024 			filename = NULL;
3025 		}
3026 	} else {
3027 		/* proceed with the invalid page */
3028 		kr = KERN_SUCCESS;
3029 		if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
3030 		    !object->code_signed) {
3031 			/*
3032 			 * This page has not been (fully) validated but
3033 			 * does not belong to a code-signed object
3034 			 * so it should not be forcefully considered
3035 			 * as tainted.
3036 			 * We're just concerned about it here because
3037 			 * we've been asked to "execute" it but that
3038 			 * does not mean that it should cause other
3039 			 * accesses to fail.
3040 			 * This happens when a debugger sets a
3041 			 * breakpoint and we then execute code in
3042 			 * that page.  Marking the page as "tainted"
3043 			 * would cause any inspection tool ("leaks",
3044 			 * "vmmap", "CrashReporter", ...) to get killed
3045 			 * due to code-signing violation on that page,
3046 			 * even though they're just reading it and not
3047 			 * executing from it.
3048 			 */
3049 		} else {
3050 			/*
3051 			 * Page might have been tainted before or not;
3052 			 * now it definitively is. If the page wasn't
3053 			 * tainted, we must disconnect it from all
3054 			 * pmaps later, to force existing mappings
3055 			 * through that code path for re-consideration
3056 			 * of the validity of that page.
3057 			 */
3058 			if (!VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
3059 				*must_disconnect = TRUE;
3060 				VMP_CS_SET_TAINTED(m, fault_page_size, fault_phys_offset, TRUE);
3061 			}
3062 		}
3063 		cs_enter_tainted_accepted++;
3064 	}
3065 	if (kr != KERN_SUCCESS) {
3066 		if (cs_debug) {
3067 			printf("CODESIGNING: vm_fault_enter(0x%llx): "
3068 			    "*** INVALID PAGE ***\n",
3069 			    (long long)vaddr);
3070 		}
3071 #if !SECURE_KERNEL
3072 		if (cs_enforcement_panic) {
3073 			panic("CODESIGNING: panicking on invalid page");
3074 		}
3075 #endif
3076 	}
3077 	return kr;
3078 }
3079 
3080 /*
3081  * Check that the code signature is valid for the given page being inserted into
3082  * the pmap.
3083  *
3084  * @param must_disconnect This value will be set to true if the caller must disconnect
3085  * this page.
3086  * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
3087  */
3088 static kern_return_t
vm_fault_validate_cs(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool * must_disconnect)3089 vm_fault_validate_cs(
3090 	bool cs_bypass,
3091 	vm_object_t object,
3092 	vm_page_t m,
3093 	pmap_t pmap,
3094 	vm_map_offset_t vaddr,
3095 	vm_prot_t prot,
3096 	vm_prot_t caller_prot,
3097 	vm_map_size_t fault_page_size,
3098 	vm_map_offset_t fault_phys_offset,
3099 	vm_object_fault_info_t fault_info,
3100 	bool *must_disconnect)
3101 {
3102 	bool map_is_switched, map_is_switch_protected, cs_violation;
3103 	kern_return_t kr;
3104 	/* Validate code signature if necessary. */
3105 	map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) &&
3106 	    (pmap == vm_map_pmap(current_thread()->map)));
3107 	map_is_switch_protected = current_thread()->map->switch_protect;
3108 	kr = vm_fault_cs_check_violation(cs_bypass, object, m, pmap,
3109 	    prot, caller_prot, fault_page_size, fault_phys_offset, fault_info,
3110 	    map_is_switched, map_is_switch_protected, &cs_violation);
3111 	if (kr != KERN_SUCCESS) {
3112 		return kr;
3113 	}
3114 	if (cs_violation) {
3115 		kr = vm_fault_cs_handle_violation(object, m, pmap, prot, vaddr,
3116 		    fault_page_size, fault_phys_offset,
3117 		    map_is_switched, map_is_switch_protected, must_disconnect);
3118 	}
3119 	return kr;
3120 }
3121 
3122 /*
3123  * Enqueue the page on the appropriate paging queue.
3124  */
3125 static void
vm_fault_enqueue_page(vm_object_t object,vm_page_t m,bool wired,bool change_wiring,vm_tag_t wire_tag,bool no_cache,int * type_of_fault,kern_return_t kr)3126 vm_fault_enqueue_page(
3127 	vm_object_t object,
3128 	vm_page_t m,
3129 	bool wired,
3130 	bool change_wiring,
3131 	vm_tag_t wire_tag,
3132 	bool no_cache,
3133 	int *type_of_fault,
3134 	kern_return_t kr)
3135 {
3136 	assert((m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object);
3137 	boolean_t       page_queues_locked = FALSE;
3138 	boolean_t       previously_pmapped = m->vmp_pmapped;
3139 #define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED()   \
3140 MACRO_BEGIN                                     \
3141 	if (! page_queues_locked) {             \
3142 	        page_queues_locked = TRUE;      \
3143 	        vm_page_lockspin_queues();      \
3144 	}                                       \
3145 MACRO_END
3146 #define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED()     \
3147 MACRO_BEGIN                                     \
3148 	if (page_queues_locked) {               \
3149 	        page_queues_locked = FALSE;     \
3150 	        vm_page_unlock_queues();        \
3151 	}                                       \
3152 MACRO_END
3153 
3154 #if CONFIG_BACKGROUND_QUEUE
3155 	vm_page_update_background_state(m);
3156 #endif
3157 	if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
3158 		/*
3159 		 * Compressor pages are neither wired
3160 		 * nor pageable and should never change.
3161 		 */
3162 		assert(object == compressor_object);
3163 	} else if (change_wiring) {
3164 		__VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3165 
3166 		if (wired) {
3167 			if (kr == KERN_SUCCESS) {
3168 				vm_page_wire(m, wire_tag, TRUE);
3169 			}
3170 		} else {
3171 			vm_page_unwire(m, TRUE);
3172 		}
3173 		/* we keep the page queues lock, if we need it later */
3174 	} else {
3175 		if (object->internal == TRUE) {
3176 			/*
3177 			 * don't allow anonymous pages on
3178 			 * the speculative queues
3179 			 */
3180 			no_cache = FALSE;
3181 		}
3182 		if (kr != KERN_SUCCESS) {
3183 			__VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3184 			vm_page_deactivate(m);
3185 			/* we keep the page queues lock, if we need it later */
3186 		} else if (((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
3187 		    (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3188 		    (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
3189 		    ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) &&
3190 		    !VM_PAGE_WIRED(m)) {
3191 			if (vm_page_local_q &&
3192 			    (*type_of_fault == DBG_COW_FAULT ||
3193 			    *type_of_fault == DBG_ZERO_FILL_FAULT)) {
3194 				struct vpl      *lq;
3195 				uint32_t        lid;
3196 
3197 				assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
3198 
3199 				__VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3200 				vm_object_lock_assert_exclusive(object);
3201 
3202 				/*
3203 				 * we got a local queue to stuff this
3204 				 * new page on...
3205 				 * its safe to manipulate local and
3206 				 * local_id at this point since we're
3207 				 * behind an exclusive object lock and
3208 				 * the page is not on any global queue.
3209 				 *
3210 				 * we'll use the current cpu number to
3211 				 * select the queue note that we don't
3212 				 * need to disable preemption... we're
3213 				 * going to be behind the local queue's
3214 				 * lock to do the real work
3215 				 */
3216 				lid = cpu_number();
3217 
3218 				lq = zpercpu_get_cpu(vm_page_local_q, lid);
3219 
3220 				VPL_LOCK(&lq->vpl_lock);
3221 
3222 				vm_page_check_pageable_safe(m);
3223 				vm_page_queue_enter(&lq->vpl_queue, m, vmp_pageq);
3224 				m->vmp_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q;
3225 				m->vmp_local_id = lid;
3226 				lq->vpl_count++;
3227 
3228 				if (object->internal) {
3229 					lq->vpl_internal_count++;
3230 				} else {
3231 					lq->vpl_external_count++;
3232 				}
3233 
3234 				VPL_UNLOCK(&lq->vpl_lock);
3235 
3236 				if (lq->vpl_count > vm_page_local_q_soft_limit) {
3237 					/*
3238 					 * we're beyond the soft limit
3239 					 * for the local queue
3240 					 * vm_page_reactivate_local will
3241 					 * 'try' to take the global page
3242 					 * queue lock... if it can't
3243 					 * that's ok... we'll let the
3244 					 * queue continue to grow up
3245 					 * to the hard limit... at that
3246 					 * point we'll wait for the
3247 					 * lock... once we've got the
3248 					 * lock, we'll transfer all of
3249 					 * the pages from the local
3250 					 * queue to the global active
3251 					 * queue
3252 					 */
3253 					vm_page_reactivate_local(lid, FALSE, FALSE);
3254 				}
3255 			} else {
3256 				__VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3257 
3258 				/*
3259 				 * test again now that we hold the
3260 				 * page queue lock
3261 				 */
3262 				if (!VM_PAGE_WIRED(m)) {
3263 					if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3264 						vm_page_queues_remove(m, FALSE);
3265 
3266 						VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3267 						VM_PAGEOUT_DEBUG(vm_pageout_cleaned_fault_reactivated, 1);
3268 					}
3269 
3270 					if (!VM_PAGE_ACTIVE_OR_INACTIVE(m) ||
3271 					    no_cache) {
3272 						/*
3273 						 * If this is a no_cache mapping
3274 						 * and the page has never been
3275 						 * mapped before or was
3276 						 * previously a no_cache page,
3277 						 * then we want to leave pages
3278 						 * in the speculative state so
3279 						 * that they can be readily
3280 						 * recycled if free memory runs
3281 						 * low.  Otherwise the page is
3282 						 * activated as normal.
3283 						 */
3284 
3285 						if (no_cache &&
3286 						    (!previously_pmapped ||
3287 						    m->vmp_no_cache)) {
3288 							m->vmp_no_cache = TRUE;
3289 
3290 							if (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
3291 								vm_page_speculate(m, FALSE);
3292 							}
3293 						} else if (!VM_PAGE_ACTIVE_OR_INACTIVE(m)) {
3294 							vm_page_activate(m);
3295 						}
3296 					}
3297 				}
3298 				/* we keep the page queues lock, if we need it later */
3299 			}
3300 		}
3301 	}
3302 	/* we're done with the page queues lock, if we ever took it */
3303 	__VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3304 }
3305 
3306 /*
3307  * Sets the pmmpped, xpmapped, and wpmapped bits on the vm_page_t and updates accounting.
3308  * @return true if the page needs to be sync'ed via pmap_sync-page_data_physo
3309  * before being inserted into the pmap.
3310  */
3311 static bool
vm_fault_enter_set_mapped(vm_object_t object,vm_page_t m,vm_prot_t prot,vm_prot_t fault_type)3312 vm_fault_enter_set_mapped(
3313 	vm_object_t object,
3314 	vm_page_t m,
3315 	vm_prot_t prot,
3316 	vm_prot_t fault_type)
3317 {
3318 	bool page_needs_sync = false;
3319 	/*
3320 	 * NOTE: we may only hold the vm_object lock SHARED
3321 	 * at this point, so we need the phys_page lock to
3322 	 * properly serialize updating the pmapped and
3323 	 * xpmapped bits
3324 	 */
3325 	if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) {
3326 		ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3327 
3328 		pmap_lock_phys_page(phys_page);
3329 		m->vmp_pmapped = TRUE;
3330 
3331 		if (!m->vmp_xpmapped) {
3332 			m->vmp_xpmapped = TRUE;
3333 
3334 			pmap_unlock_phys_page(phys_page);
3335 
3336 			if (!object->internal) {
3337 				OSAddAtomic(1, &vm_page_xpmapped_external_count);
3338 			}
3339 
3340 #if defined(__arm__) || defined(__arm64__)
3341 			page_needs_sync = true;
3342 #else
3343 			if (object->internal &&
3344 			    object->pager != NULL) {
3345 				/*
3346 				 * This page could have been
3347 				 * uncompressed by the
3348 				 * compressor pager and its
3349 				 * contents might be only in
3350 				 * the data cache.
3351 				 * Since it's being mapped for
3352 				 * "execute" for the fist time,
3353 				 * make sure the icache is in
3354 				 * sync.
3355 				 */
3356 				assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
3357 				page_needs_sync = true;
3358 			}
3359 #endif
3360 		} else {
3361 			pmap_unlock_phys_page(phys_page);
3362 		}
3363 	} else {
3364 		if (m->vmp_pmapped == FALSE) {
3365 			ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3366 
3367 			pmap_lock_phys_page(phys_page);
3368 			m->vmp_pmapped = TRUE;
3369 			pmap_unlock_phys_page(phys_page);
3370 		}
3371 	}
3372 
3373 	if (fault_type & VM_PROT_WRITE) {
3374 		if (m->vmp_wpmapped == FALSE) {
3375 			vm_object_lock_assert_exclusive(object);
3376 			if (!object->internal && object->pager) {
3377 				task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
3378 			}
3379 			m->vmp_wpmapped = TRUE;
3380 		}
3381 	}
3382 	return page_needs_sync;
3383 }
3384 
3385 /*
3386  * Try to enter the given page into the pmap.
3387  * Will retry without execute permission iff PMAP_CS is enabled and we encounter
3388  * a codesigning failure on a non-execute fault.
3389  */
3390 static kern_return_t
vm_fault_attempt_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options)3391 vm_fault_attempt_pmap_enter(
3392 	pmap_t pmap,
3393 	vm_map_offset_t vaddr,
3394 	vm_map_size_t fault_page_size,
3395 	vm_map_offset_t fault_phys_offset,
3396 	vm_page_t m,
3397 	vm_prot_t *prot,
3398 	vm_prot_t caller_prot,
3399 	vm_prot_t fault_type,
3400 	bool wired,
3401 	int pmap_options)
3402 {
3403 #if !PMAP_CS
3404 #pragma unused(caller_prot)
3405 #endif /* !PMAP_CS */
3406 	kern_return_t kr;
3407 	if (fault_page_size != PAGE_SIZE) {
3408 		DEBUG4K_FAULT("pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x fault_type 0x%x\n", pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, *prot, fault_type);
3409 		assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
3410 		    fault_phys_offset < PAGE_SIZE),
3411 		    "0x%llx\n", (uint64_t)fault_phys_offset);
3412 	} else {
3413 		assertf(fault_phys_offset == 0,
3414 		    "0x%llx\n", (uint64_t)fault_phys_offset);
3415 	}
3416 
3417 	PMAP_ENTER_OPTIONS(pmap, vaddr,
3418 	    fault_phys_offset,
3419 	    m, *prot, fault_type, 0,
3420 	    wired,
3421 	    pmap_options,
3422 	    kr);
3423 	return kr;
3424 }
3425 
3426 /*
3427  * Enter the given page into the pmap.
3428  * The map must be locked shared.
3429  * The vm object must NOT be locked.
3430  *
3431  * @param need_retry if not null, avoid making a (potentially) blocking call into
3432  * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3433  */
3434 static kern_return_t
vm_fault_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry)3435 vm_fault_pmap_enter(
3436 	pmap_t pmap,
3437 	vm_map_offset_t vaddr,
3438 	vm_map_size_t fault_page_size,
3439 	vm_map_offset_t fault_phys_offset,
3440 	vm_page_t m,
3441 	vm_prot_t *prot,
3442 	vm_prot_t caller_prot,
3443 	vm_prot_t fault_type,
3444 	bool wired,
3445 	int pmap_options,
3446 	boolean_t *need_retry)
3447 {
3448 	kern_return_t kr;
3449 	if (need_retry != NULL) {
3450 		/*
3451 		 * Although we don't hold a lock on this object, we hold a lock
3452 		 * on the top object in the chain. To prevent a deadlock, we
3453 		 * can't allow the pmap layer to block.
3454 		 */
3455 		pmap_options |= PMAP_OPTIONS_NOWAIT;
3456 	}
3457 	kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3458 	    fault_page_size, fault_phys_offset,
3459 	    m, prot, caller_prot, fault_type, wired, pmap_options);
3460 	if (kr == KERN_RESOURCE_SHORTAGE) {
3461 		if (need_retry) {
3462 			/*
3463 			 * There's nothing we can do here since we hold the
3464 			 * lock on the top object in the chain. The caller
3465 			 * will need to deal with this by dropping that lock and retrying.
3466 			 */
3467 			*need_retry = TRUE;
3468 			vm_pmap_enter_retried++;
3469 		}
3470 	}
3471 	return kr;
3472 }
3473 
3474 /*
3475  * Enter the given page into the pmap.
3476  * The vm map must be locked shared.
3477  * The vm object must be locked exclusive, unless this is a soft fault.
3478  * For a soft fault, the object must be locked shared or exclusive.
3479  *
3480  * @param need_retry if not null, avoid making a (potentially) blocking call into
3481  * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3482  */
3483 static kern_return_t
vm_fault_pmap_enter_with_object_lock(vm_object_t object,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry)3484 vm_fault_pmap_enter_with_object_lock(
3485 	vm_object_t object,
3486 	pmap_t pmap,
3487 	vm_map_offset_t vaddr,
3488 	vm_map_size_t fault_page_size,
3489 	vm_map_offset_t fault_phys_offset,
3490 	vm_page_t m,
3491 	vm_prot_t *prot,
3492 	vm_prot_t caller_prot,
3493 	vm_prot_t fault_type,
3494 	bool wired,
3495 	int pmap_options,
3496 	boolean_t *need_retry)
3497 {
3498 	kern_return_t kr;
3499 	/*
3500 	 * Prevent a deadlock by not
3501 	 * holding the object lock if we need to wait for a page in
3502 	 * pmap_enter() - <rdar://problem/7138958>
3503 	 */
3504 	kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3505 	    fault_page_size, fault_phys_offset,
3506 	    m, prot, caller_prot, fault_type, wired, pmap_options | PMAP_OPTIONS_NOWAIT);
3507 #if __x86_64__
3508 	if (kr == KERN_INVALID_ARGUMENT &&
3509 	    pmap == PMAP_NULL &&
3510 	    wired) {
3511 		/*
3512 		 * Wiring a page in a pmap-less VM map:
3513 		 * VMware's "vmmon" kernel extension does this
3514 		 * to grab pages.
3515 		 * Let it proceed even though the PMAP_ENTER() failed.
3516 		 */
3517 		kr = KERN_SUCCESS;
3518 	}
3519 #endif /* __x86_64__ */
3520 
3521 	if (kr == KERN_RESOURCE_SHORTAGE) {
3522 		if (need_retry) {
3523 			/*
3524 			 * this will be non-null in the case where we hold the lock
3525 			 * on the top-object in this chain... we can't just drop
3526 			 * the lock on the object we're inserting the page into
3527 			 * and recall the PMAP_ENTER since we can still cause
3528 			 * a deadlock if one of the critical paths tries to
3529 			 * acquire the lock on the top-object and we're blocked
3530 			 * in PMAP_ENTER waiting for memory... our only recourse
3531 			 * is to deal with it at a higher level where we can
3532 			 * drop both locks.
3533 			 */
3534 			*need_retry = TRUE;
3535 			kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PMAP_ENTER_RESOURCE_SHORTAGE), 0 /* arg */);
3536 			vm_pmap_enter_retried++;
3537 			goto done;
3538 		}
3539 		/*
3540 		 * The nonblocking version of pmap_enter did not succeed.
3541 		 * and we don't need to drop other locks and retry
3542 		 * at the level above us, so
3543 		 * use the blocking version instead. Requires marking
3544 		 * the page busy and unlocking the object
3545 		 */
3546 		boolean_t was_busy = m->vmp_busy;
3547 
3548 		vm_object_lock_assert_exclusive(object);
3549 
3550 		m->vmp_busy = TRUE;
3551 		vm_object_unlock(object);
3552 
3553 		PMAP_ENTER_OPTIONS(pmap, vaddr,
3554 		    fault_phys_offset,
3555 		    m, *prot, fault_type,
3556 		    0, wired,
3557 		    pmap_options, kr);
3558 
3559 		assert(VM_PAGE_OBJECT(m) == object);
3560 
3561 		/* Take the object lock again. */
3562 		vm_object_lock(object);
3563 
3564 		/* If the page was busy, someone else will wake it up.
3565 		 * Otherwise, we have to do it now. */
3566 		assert(m->vmp_busy);
3567 		if (!was_busy) {
3568 			PAGE_WAKEUP_DONE(m);
3569 		}
3570 		vm_pmap_enter_blocked++;
3571 	}
3572 
3573 done:
3574 	return kr;
3575 }
3576 
3577 /*
3578  * Prepare to enter a page into the pmap by checking CS, protection bits,
3579  * and setting mapped bits on the page_t.
3580  * Does not modify the page's paging queue.
3581  *
3582  * page queue lock must NOT be held
3583  * m->vmp_object must be locked
3584  *
3585  * NOTE: m->vmp_object could be locked "shared" only if we are called
3586  * from vm_fault() as part of a soft fault.
3587  */
3588 static kern_return_t
vm_fault_enter_prepare(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t * prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t change_wiring,vm_prot_t fault_type,vm_object_fault_info_t fault_info,int * type_of_fault,bool * page_needs_data_sync)3589 vm_fault_enter_prepare(
3590 	vm_page_t m,
3591 	pmap_t pmap,
3592 	vm_map_offset_t vaddr,
3593 	vm_prot_t *prot,
3594 	vm_prot_t caller_prot,
3595 	vm_map_size_t fault_page_size,
3596 	vm_map_offset_t fault_phys_offset,
3597 	boolean_t change_wiring,
3598 	vm_prot_t fault_type,
3599 	vm_object_fault_info_t fault_info,
3600 	int *type_of_fault,
3601 	bool *page_needs_data_sync)
3602 {
3603 	kern_return_t   kr;
3604 	bool            is_tainted = false;
3605 	vm_object_t     object;
3606 	boolean_t       cs_bypass = fault_info->cs_bypass;
3607 
3608 	object = VM_PAGE_OBJECT(m);
3609 
3610 	vm_object_lock_assert_held(object);
3611 
3612 #if KASAN
3613 	if (pmap == kernel_pmap) {
3614 		kasan_notify_address(vaddr, PAGE_SIZE);
3615 	}
3616 #endif
3617 
3618 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3619 
3620 	if (*type_of_fault == DBG_ZERO_FILL_FAULT) {
3621 		vm_object_lock_assert_exclusive(object);
3622 	} else if ((fault_type & VM_PROT_WRITE) == 0 &&
3623 	    !change_wiring &&
3624 	    (!m->vmp_wpmapped
3625 #if VM_OBJECT_ACCESS_TRACKING
3626 	    || object->access_tracking
3627 #endif /* VM_OBJECT_ACCESS_TRACKING */
3628 	    )) {
3629 		/*
3630 		 * This is not a "write" fault, so we
3631 		 * might not have taken the object lock
3632 		 * exclusively and we might not be able
3633 		 * to update the "wpmapped" bit in
3634 		 * vm_fault_enter().
3635 		 * Let's just grant read access to
3636 		 * the page for now and we'll
3637 		 * soft-fault again if we need write
3638 		 * access later...
3639 		 */
3640 
3641 		/* This had better not be a JIT page. */
3642 		if (!pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) {
3643 			*prot &= ~VM_PROT_WRITE;
3644 		} else {
3645 			assert(cs_bypass);
3646 		}
3647 	}
3648 	if (m->vmp_pmapped == FALSE) {
3649 		if (m->vmp_clustered) {
3650 			if (*type_of_fault == DBG_CACHE_HIT_FAULT) {
3651 				/*
3652 				 * found it in the cache, but this
3653 				 * is the first fault-in of the page (m->vmp_pmapped == FALSE)
3654 				 * so it must have come in as part of
3655 				 * a cluster... account 1 pagein against it
3656 				 */
3657 				if (object->internal) {
3658 					*type_of_fault = DBG_PAGEIND_FAULT;
3659 				} else {
3660 					*type_of_fault = DBG_PAGEINV_FAULT;
3661 				}
3662 
3663 				VM_PAGE_COUNT_AS_PAGEIN(m);
3664 			}
3665 			VM_PAGE_CONSUME_CLUSTERED(m);
3666 		}
3667 	}
3668 
3669 	if (*type_of_fault != DBG_COW_FAULT) {
3670 		DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL);
3671 
3672 		if (pmap == kernel_pmap) {
3673 			DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL);
3674 		}
3675 	}
3676 
3677 	kr = vm_fault_validate_cs(cs_bypass, object, m, pmap, vaddr,
3678 	    *prot, caller_prot, fault_page_size, fault_phys_offset,
3679 	    fault_info, &is_tainted);
3680 	if (kr == KERN_SUCCESS) {
3681 		/*
3682 		 * We either have a good page, or a tainted page that has been accepted by the process.
3683 		 * In both cases the page will be entered into the pmap.
3684 		 */
3685 		*page_needs_data_sync = vm_fault_enter_set_mapped(object, m, *prot, fault_type);
3686 		if ((fault_type & VM_PROT_WRITE) && is_tainted) {
3687 			/*
3688 			 * This page is tainted but we're inserting it anyways.
3689 			 * Since it's writeable, we need to disconnect it from other pmaps
3690 			 * now so those processes can take note.
3691 			 */
3692 
3693 			/*
3694 			 * We can only get here
3695 			 * because of the CSE logic
3696 			 */
3697 			assert(pmap_get_vm_map_cs_enforced(pmap));
3698 			pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
3699 			/*
3700 			 * If we are faulting for a write, we can clear
3701 			 * the execute bit - that will ensure the page is
3702 			 * checked again before being executable, which
3703 			 * protects against a map switch.
3704 			 * This only happens the first time the page
3705 			 * gets tainted, so we won't get stuck here
3706 			 * to make an already writeable page executable.
3707 			 */
3708 			if (!cs_bypass) {
3709 				assert(!pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot));
3710 				*prot &= ~VM_PROT_EXECUTE;
3711 			}
3712 		}
3713 		assert(VM_PAGE_OBJECT(m) == object);
3714 
3715 #if VM_OBJECT_ACCESS_TRACKING
3716 		if (object->access_tracking) {
3717 			DTRACE_VM2(access_tracking, vm_map_offset_t, vaddr, int, fault_type);
3718 			if (fault_type & VM_PROT_WRITE) {
3719 				object->access_tracking_writes++;
3720 				vm_object_access_tracking_writes++;
3721 			} else {
3722 				object->access_tracking_reads++;
3723 				vm_object_access_tracking_reads++;
3724 			}
3725 		}
3726 #endif /* VM_OBJECT_ACCESS_TRACKING */
3727 	}
3728 
3729 	return kr;
3730 }
3731 
3732 /*
3733  * page queue lock must NOT be held
3734  * m->vmp_object must be locked
3735  *
3736  * NOTE: m->vmp_object could be locked "shared" only if we are called
3737  * from vm_fault() as part of a soft fault.  If so, we must be
3738  * careful not to modify the VM object in any way that is not
3739  * legal under a shared lock...
3740  */
3741 kern_return_t
vm_fault_enter(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot,vm_prot_t caller_prot,boolean_t wired,boolean_t change_wiring,vm_tag_t wire_tag,vm_object_fault_info_t fault_info,boolean_t * need_retry,int * type_of_fault)3742 vm_fault_enter(
3743 	vm_page_t m,
3744 	pmap_t pmap,
3745 	vm_map_offset_t vaddr,
3746 	vm_map_size_t fault_page_size,
3747 	vm_map_offset_t fault_phys_offset,
3748 	vm_prot_t prot,
3749 	vm_prot_t caller_prot,
3750 	boolean_t wired,
3751 	boolean_t change_wiring,
3752 	vm_tag_t  wire_tag,
3753 	vm_object_fault_info_t fault_info,
3754 	boolean_t *need_retry,
3755 	int *type_of_fault)
3756 {
3757 	kern_return_t   kr;
3758 	vm_object_t     object;
3759 	bool            page_needs_data_sync;
3760 	vm_prot_t       fault_type;
3761 	int             pmap_options = fault_info->pmap_options;
3762 
3763 	if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
3764 		assert(m->vmp_fictitious);
3765 		return KERN_SUCCESS;
3766 	}
3767 
3768 	fault_type = change_wiring ? VM_PROT_NONE : caller_prot;
3769 
3770 	assertf(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL, "m=%p", m);
3771 	kr = vm_fault_enter_prepare(m, pmap, vaddr, &prot, caller_prot,
3772 	    fault_page_size, fault_phys_offset, change_wiring, fault_type,
3773 	    fault_info, type_of_fault, &page_needs_data_sync);
3774 	object = VM_PAGE_OBJECT(m);
3775 
3776 	vm_fault_enqueue_page(object, m, wired, change_wiring, wire_tag, fault_info->no_cache, type_of_fault, kr);
3777 
3778 	if (kr == KERN_SUCCESS) {
3779 		if (page_needs_data_sync) {
3780 			pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
3781 		}
3782 
3783 		kr = vm_fault_pmap_enter_with_object_lock(object, pmap, vaddr,
3784 		    fault_page_size, fault_phys_offset, m,
3785 		    &prot, caller_prot, fault_type, wired, pmap_options, need_retry);
3786 	}
3787 
3788 	return kr;
3789 }
3790 
3791 void
vm_pre_fault(vm_map_offset_t vaddr,vm_prot_t prot)3792 vm_pre_fault(vm_map_offset_t vaddr, vm_prot_t prot)
3793 {
3794 	if (pmap_find_phys(current_map()->pmap, vaddr) == 0) {
3795 		vm_fault(current_map(),      /* map */
3796 		    vaddr,                   /* vaddr */
3797 		    prot,                    /* fault_type */
3798 		    FALSE,                   /* change_wiring */
3799 		    VM_KERN_MEMORY_NONE,     /* tag - not wiring */
3800 		    THREAD_UNINT,            /* interruptible */
3801 		    NULL,                    /* caller_pmap */
3802 		    0 /* caller_pmap_addr */);
3803 	}
3804 }
3805 
3806 
3807 /*
3808  *	Routine:	vm_fault
3809  *	Purpose:
3810  *		Handle page faults, including pseudo-faults
3811  *		used to change the wiring status of pages.
3812  *	Returns:
3813  *		Explicit continuations have been removed.
3814  *	Implementation:
3815  *		vm_fault and vm_fault_page save mucho state
3816  *		in the moral equivalent of a closure.  The state
3817  *		structure is allocated when first entering vm_fault
3818  *		and deallocated when leaving vm_fault.
3819  */
3820 
3821 extern uint64_t get_current_unique_pid(void);
3822 
3823 unsigned long vm_fault_collapse_total = 0;
3824 unsigned long vm_fault_collapse_skipped = 0;
3825 
3826 
3827 kern_return_t
vm_fault_external(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)3828 vm_fault_external(
3829 	vm_map_t        map,
3830 	vm_map_offset_t vaddr,
3831 	vm_prot_t       fault_type,
3832 	boolean_t       change_wiring,
3833 	int             interruptible,
3834 	pmap_t          caller_pmap,
3835 	vm_map_offset_t caller_pmap_addr)
3836 {
3837 	return vm_fault_internal(map, vaddr, fault_type, change_wiring,
3838 	           change_wiring ? vm_tag_bt() : VM_KERN_MEMORY_NONE,
3839 	           interruptible, caller_pmap, caller_pmap_addr,
3840 	           NULL);
3841 }
3842 
3843 kern_return_t
vm_fault(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,vm_tag_t wire_tag,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)3844 vm_fault(
3845 	vm_map_t        map,
3846 	vm_map_offset_t vaddr,
3847 	vm_prot_t       fault_type,
3848 	boolean_t       change_wiring,
3849 	vm_tag_t        wire_tag,               /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
3850 	int             interruptible,
3851 	pmap_t          caller_pmap,
3852 	vm_map_offset_t caller_pmap_addr)
3853 {
3854 	return vm_fault_internal(map, vaddr, fault_type, change_wiring, wire_tag,
3855 	           interruptible, caller_pmap, caller_pmap_addr,
3856 	           NULL);
3857 }
3858 
3859 static boolean_t
current_proc_is_privileged(void)3860 current_proc_is_privileged(void)
3861 {
3862 	return csproc_get_platform_binary(current_proc());
3863 }
3864 
3865 uint64_t vm_copied_on_read = 0;
3866 
3867 /*
3868  * Cleanup after a vm_fault_enter.
3869  * At this point, the fault should either have failed (kr != KERN_SUCCESS)
3870  * or the page should be in the pmap and on the correct paging queue.
3871  *
3872  * Precondition:
3873  * map must be locked shared.
3874  * m_object must be locked.
3875  * If top_object != VM_OBJECT_NULL, it must be locked.
3876  * real_map must be locked.
3877  *
3878  * Postcondition:
3879  * map will be unlocked
3880  * m_object will be unlocked
3881  * top_object will be unlocked
3882  * If real_map != map, it will be unlocked
3883  */
3884 static void
vm_fault_complete(vm_map_t map,vm_map_t real_map,vm_object_t object,vm_object_t m_object,vm_page_t m,vm_map_offset_t offset,vm_map_offset_t trace_real_vaddr,vm_object_fault_info_t fault_info,vm_prot_t caller_prot,vm_map_offset_t real_vaddr,int type_of_fault,boolean_t need_retry,kern_return_t kr,ppnum_t * physpage_p,vm_prot_t prot,vm_object_t top_object,boolean_t need_collapse,vm_map_offset_t cur_offset,vm_prot_t fault_type,vm_object_t * written_on_object,memory_object_t * written_on_pager,vm_object_offset_t * written_on_offset)3885 vm_fault_complete(
3886 	vm_map_t map,
3887 	vm_map_t real_map,
3888 	vm_object_t object,
3889 	vm_object_t m_object,
3890 	vm_page_t m,
3891 	vm_map_offset_t offset,
3892 	vm_map_offset_t trace_real_vaddr,
3893 	vm_object_fault_info_t fault_info,
3894 	vm_prot_t caller_prot,
3895 #if CONFIG_DTRACE
3896 	vm_map_offset_t real_vaddr,
3897 #else
3898 	__unused vm_map_offset_t real_vaddr,
3899 #endif /* CONFIG_DTRACE */
3900 	int type_of_fault,
3901 	boolean_t need_retry,
3902 	kern_return_t kr,
3903 	ppnum_t *physpage_p,
3904 	vm_prot_t prot,
3905 	vm_object_t top_object,
3906 	boolean_t need_collapse,
3907 	vm_map_offset_t cur_offset,
3908 	vm_prot_t fault_type,
3909 	vm_object_t *written_on_object,
3910 	memory_object_t *written_on_pager,
3911 	vm_object_offset_t *written_on_offset)
3912 {
3913 	int     event_code = 0;
3914 	vm_map_lock_assert_shared(map);
3915 	vm_object_lock_assert_held(m_object);
3916 	if (top_object != VM_OBJECT_NULL) {
3917 		vm_object_lock_assert_held(top_object);
3918 	}
3919 	vm_map_lock_assert_held(real_map);
3920 
3921 	if (m_object->internal) {
3922 		event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
3923 	} else if (m_object->object_is_shared_cache) {
3924 		event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
3925 	} else {
3926 		event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
3927 	}
3928 
3929 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0);
3930 	if (need_retry == FALSE) {
3931 		KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_FAST), get_current_unique_pid(), 0, 0, 0, 0);
3932 	}
3933 	DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag);
3934 	if (kr == KERN_SUCCESS &&
3935 	    physpage_p != NULL) {
3936 		/* for vm_map_wire_and_extract() */
3937 		*physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
3938 		if (prot & VM_PROT_WRITE) {
3939 			vm_object_lock_assert_exclusive(m_object);
3940 			m->vmp_dirty = TRUE;
3941 		}
3942 	}
3943 
3944 	if (top_object != VM_OBJECT_NULL) {
3945 		/*
3946 		 * It's safe to drop the top object
3947 		 * now that we've done our
3948 		 * vm_fault_enter().  Any other fault
3949 		 * in progress for that virtual
3950 		 * address will either find our page
3951 		 * and translation or put in a new page
3952 		 * and translation.
3953 		 */
3954 		vm_object_unlock(top_object);
3955 		top_object = VM_OBJECT_NULL;
3956 	}
3957 
3958 	if (need_collapse == TRUE) {
3959 		vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
3960 	}
3961 
3962 	if (need_retry == FALSE &&
3963 	    (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) {
3964 		/*
3965 		 * evaluate access pattern and update state
3966 		 * vm_fault_deactivate_behind depends on the
3967 		 * state being up to date
3968 		 */
3969 		vm_fault_is_sequential(m_object, cur_offset, fault_info->behavior);
3970 
3971 		vm_fault_deactivate_behind(m_object, cur_offset, fault_info->behavior);
3972 	}
3973 	/*
3974 	 * That's it, clean up and return.
3975 	 */
3976 	if (m->vmp_busy) {
3977 		vm_object_lock_assert_exclusive(m_object);
3978 		PAGE_WAKEUP_DONE(m);
3979 	}
3980 
3981 	if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) {
3982 		vm_object_paging_begin(m_object);
3983 
3984 		assert(*written_on_object == VM_OBJECT_NULL);
3985 		*written_on_object = m_object;
3986 		*written_on_pager = m_object->pager;
3987 		*written_on_offset = m_object->paging_offset + m->vmp_offset;
3988 	}
3989 	vm_object_unlock(object);
3990 
3991 	vm_map_unlock_read(map);
3992 	if (real_map != map) {
3993 		vm_map_unlock(real_map);
3994 	}
3995 }
3996 
3997 static inline int
vm_fault_type_for_tracing(boolean_t need_copy_on_read,int type_of_fault)3998 vm_fault_type_for_tracing(boolean_t need_copy_on_read, int type_of_fault)
3999 {
4000 	if (need_copy_on_read && type_of_fault == DBG_COW_FAULT) {
4001 		return DBG_COR_FAULT;
4002 	}
4003 	return type_of_fault;
4004 }
4005 
4006 uint64_t vm_fault_resilient_media_initiate = 0;
4007 uint64_t vm_fault_resilient_media_retry = 0;
4008 uint64_t vm_fault_resilient_media_proceed = 0;
4009 uint64_t vm_fault_resilient_media_release = 0;
4010 uint64_t vm_fault_resilient_media_abort1 = 0;
4011 uint64_t vm_fault_resilient_media_abort2 = 0;
4012 
4013 #if MACH_ASSERT
4014 int vm_fault_resilient_media_inject_error1_rate = 0;
4015 int vm_fault_resilient_media_inject_error1 = 0;
4016 int vm_fault_resilient_media_inject_error2_rate = 0;
4017 int vm_fault_resilient_media_inject_error2 = 0;
4018 int vm_fault_resilient_media_inject_error3_rate = 0;
4019 int vm_fault_resilient_media_inject_error3 = 0;
4020 #endif /* MACH_ASSERT */
4021 
4022 kern_return_t
vm_fault_internal(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t caller_prot,boolean_t change_wiring,vm_tag_t wire_tag,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr,ppnum_t * physpage_p)4023 vm_fault_internal(
4024 	vm_map_t        map,
4025 	vm_map_offset_t vaddr,
4026 	vm_prot_t       caller_prot,
4027 	boolean_t       change_wiring,
4028 	vm_tag_t        wire_tag,               /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
4029 	int             interruptible,
4030 	pmap_t          caller_pmap,
4031 	vm_map_offset_t caller_pmap_addr,
4032 	ppnum_t         *physpage_p)
4033 {
4034 	vm_map_version_t        version;        /* Map version for verificiation */
4035 	boolean_t               wired;          /* Should mapping be wired down? */
4036 	vm_object_t             object;         /* Top-level object */
4037 	vm_object_offset_t      offset;         /* Top-level offset */
4038 	vm_prot_t               prot;           /* Protection for mapping */
4039 	vm_object_t             old_copy_object; /* Saved copy object */
4040 	vm_page_t               result_page;    /* Result of vm_fault_page */
4041 	vm_page_t               top_page;       /* Placeholder page */
4042 	kern_return_t           kr;
4043 
4044 	vm_page_t               m;      /* Fast access to result_page */
4045 	kern_return_t           error_code;
4046 	vm_object_t             cur_object;
4047 	vm_object_t             m_object = NULL;
4048 	vm_object_offset_t      cur_offset;
4049 	vm_page_t               cur_m;
4050 	vm_object_t             new_object;
4051 	int                     type_of_fault;
4052 	pmap_t                  pmap;
4053 	wait_interrupt_t        interruptible_state;
4054 	vm_map_t                real_map = map;
4055 	vm_map_t                original_map = map;
4056 	bool                    object_locks_dropped = FALSE;
4057 	vm_prot_t               fault_type;
4058 	vm_prot_t               original_fault_type;
4059 	struct vm_object_fault_info fault_info = {};
4060 	bool                    need_collapse = FALSE;
4061 	boolean_t               need_retry = FALSE;
4062 	boolean_t               *need_retry_ptr = NULL;
4063 	uint8_t                 object_lock_type = 0;
4064 	uint8_t                 cur_object_lock_type;
4065 	vm_object_t             top_object = VM_OBJECT_NULL;
4066 	vm_object_t             written_on_object = VM_OBJECT_NULL;
4067 	memory_object_t         written_on_pager = NULL;
4068 	vm_object_offset_t      written_on_offset = 0;
4069 	int                     throttle_delay;
4070 	int                     compressed_count_delta;
4071 	uint8_t                 grab_options;
4072 	bool                    need_copy;
4073 	bool                    need_copy_on_read;
4074 	vm_map_offset_t         trace_vaddr;
4075 	vm_map_offset_t         trace_real_vaddr;
4076 	vm_map_size_t           fault_page_size;
4077 	vm_map_size_t           fault_page_mask;
4078 	int                     fault_page_shift;
4079 	vm_map_offset_t         fault_phys_offset;
4080 	vm_map_offset_t         real_vaddr;
4081 	bool                    resilient_media_retry = false;
4082 	bool                    resilient_media_ref_transfer = false;
4083 	vm_object_t             resilient_media_object = VM_OBJECT_NULL;
4084 	vm_object_offset_t      resilient_media_offset = (vm_object_offset_t)-1;
4085 	bool                    page_needs_data_sync = false;
4086 	/*
4087 	 * Was the VM object contended when vm_map_lookup_locked locked it?
4088 	 * If so, the zero fill path will drop the lock
4089 	 * NB: Ideally we would always drop the lock rather than rely on
4090 	 * this heuristic, but vm_object_unlock currently takes > 30 cycles.
4091 	 */
4092 	bool                    object_is_contended = false;
4093 
4094 	real_vaddr = vaddr;
4095 	trace_real_vaddr = vaddr;
4096 
4097 	/*
4098 	 * Some (kernel) submaps are marked with "should never fault".
4099 	 *
4100 	 * We do this for two reasons:
4101 	 * - PGZ which is inside the zone map range can't go down the normal
4102 	 *   lookup path (vm_map_lookup_entry() would panic).
4103 	 *
4104 	 * - we want for guard pages to not have to use fictitious pages at all
4105 	 *   to prevent from ZFOD pages to be made.
4106 	 *
4107 	 * We also want capture the fault address easily so that the zone
4108 	 * allocator might present an enhanced panic log.
4109 	 */
4110 	if (map->never_faults) {
4111 		assert(map->pmap == kernel_pmap);
4112 		panic_fault_address = vaddr;
4113 		return KERN_INVALID_ADDRESS;
4114 	}
4115 
4116 	if (VM_MAP_PAGE_SIZE(original_map) < PAGE_SIZE) {
4117 		fault_phys_offset = (vm_map_offset_t)-1;
4118 		fault_page_size = VM_MAP_PAGE_SIZE(original_map);
4119 		fault_page_mask = VM_MAP_PAGE_MASK(original_map);
4120 		fault_page_shift = VM_MAP_PAGE_SHIFT(original_map);
4121 		if (fault_page_size < PAGE_SIZE) {
4122 			DEBUG4K_FAULT("map %p vaddr 0x%llx caller_prot 0x%x\n", map, (uint64_t)trace_real_vaddr, caller_prot);
4123 			vaddr = vm_map_trunc_page(vaddr, fault_page_mask);
4124 		}
4125 	} else {
4126 		fault_phys_offset = 0;
4127 		fault_page_size = PAGE_SIZE;
4128 		fault_page_mask = PAGE_MASK;
4129 		fault_page_shift = PAGE_SHIFT;
4130 		vaddr = vm_map_trunc_page(vaddr, PAGE_MASK);
4131 	}
4132 
4133 	if (map == kernel_map) {
4134 		trace_vaddr = VM_KERNEL_ADDRHIDE(vaddr);
4135 		trace_real_vaddr = VM_KERNEL_ADDRHIDE(trace_real_vaddr);
4136 	} else {
4137 		trace_vaddr = vaddr;
4138 	}
4139 
4140 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4141 	    (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START,
4142 	    ((uint64_t)trace_vaddr >> 32),
4143 	    trace_vaddr,
4144 	    (map == kernel_map),
4145 	    0,
4146 	    0);
4147 
4148 	if (get_preemption_level() != 0) {
4149 		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4150 		    (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
4151 		    ((uint64_t)trace_vaddr >> 32),
4152 		    trace_vaddr,
4153 		    KERN_FAILURE,
4154 		    0,
4155 		    0);
4156 
4157 		kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NONZERO_PREEMPTION_LEVEL), 0 /* arg */);
4158 		return KERN_FAILURE;
4159 	}
4160 
4161 	thread_t cthread = current_thread();
4162 	bool      rtfault = (cthread->sched_mode == TH_MODE_REALTIME);
4163 	uint64_t fstart = 0;
4164 
4165 	if (rtfault) {
4166 		fstart = mach_continuous_time();
4167 	}
4168 
4169 	interruptible_state = thread_interrupt_level(interruptible);
4170 
4171 	fault_type = (change_wiring ? VM_PROT_NONE : caller_prot);
4172 
4173 	counter_inc(&vm_statistics_faults);
4174 	counter_inc(&current_task()->faults);
4175 	original_fault_type = fault_type;
4176 
4177 	need_copy = FALSE;
4178 	if (fault_type & VM_PROT_WRITE) {
4179 		need_copy = TRUE;
4180 	}
4181 
4182 	if (need_copy || change_wiring) {
4183 		object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4184 	} else {
4185 		object_lock_type = OBJECT_LOCK_SHARED;
4186 	}
4187 
4188 	cur_object_lock_type = OBJECT_LOCK_SHARED;
4189 
4190 	if ((map == kernel_map) && (caller_prot & VM_PROT_WRITE)) {
4191 		if (compressor_map) {
4192 			if ((vaddr >= vm_map_min(compressor_map)) && (vaddr < vm_map_max(compressor_map))) {
4193 				panic("Write fault on compressor map, va: %p type: %u bounds: %p->%p", (void *) vaddr, caller_prot, (void *) vm_map_min(compressor_map), (void *) vm_map_max(compressor_map));
4194 			}
4195 		}
4196 	}
4197 RetryFault:
4198 	assert(written_on_object == VM_OBJECT_NULL);
4199 
4200 	/*
4201 	 * assume we will hit a page in the cache
4202 	 * otherwise, explicitly override with
4203 	 * the real fault type once we determine it
4204 	 */
4205 	type_of_fault = DBG_CACHE_HIT_FAULT;
4206 
4207 	/*
4208 	 *	Find the backing store object and offset into
4209 	 *	it to begin the search.
4210 	 */
4211 	fault_type = original_fault_type;
4212 	map = original_map;
4213 	vm_map_lock_read(map);
4214 
4215 	if (resilient_media_retry) {
4216 		/*
4217 		 * If we have to insert a fake zero-filled page to hide
4218 		 * a media failure to provide the real page, we need to
4219 		 * resolve any pending copy-on-write on this mapping.
4220 		 * VM_PROT_COPY tells vm_map_lookup_locked() to deal
4221 		 * with that even if this is not a "write" fault.
4222 		 */
4223 		need_copy = TRUE;
4224 		object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4225 		vm_fault_resilient_media_retry++;
4226 	}
4227 
4228 	kr = vm_map_lookup_locked(&map, vaddr,
4229 	    (fault_type | (need_copy ? VM_PROT_COPY : 0)),
4230 	    object_lock_type, &version,
4231 	    &object, &offset, &prot, &wired,
4232 	    &fault_info,
4233 	    &real_map,
4234 	    &object_is_contended);
4235 
4236 	if (kr != KERN_SUCCESS) {
4237 		vm_map_unlock_read(map);
4238 		/*
4239 		 * This can be seen in a crash report if indeed the
4240 		 * thread is crashing due to an invalid access in a non-existent
4241 		 * range.
4242 		 * Turning this OFF for now because it is noisy and not always fatal
4243 		 * eg prefaulting.
4244 		 *
4245 		 * if (kr == KERN_INVALID_ADDRESS) {
4246 		 *	kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0);
4247 		 * }
4248 		 */
4249 		goto done;
4250 	}
4251 
4252 
4253 	pmap = real_map->pmap;
4254 	fault_info.interruptible = interruptible;
4255 	fault_info.stealth = FALSE;
4256 	fault_info.io_sync = FALSE;
4257 	fault_info.mark_zf_absent = FALSE;
4258 	fault_info.batch_pmap_op = FALSE;
4259 
4260 	if (resilient_media_retry) {
4261 		/*
4262 		 * We're retrying this fault after having detected a media
4263 		 * failure from a "resilient_media" mapping.
4264 		 * Check that the mapping is still pointing at the object
4265 		 * that just failed to provide a page.
4266 		 */
4267 		assert(resilient_media_object != VM_OBJECT_NULL);
4268 		assert(resilient_media_offset != (vm_object_offset_t)-1);
4269 		if ((object != VM_OBJECT_NULL &&
4270 		    object == resilient_media_object &&
4271 		    offset == resilient_media_offset &&
4272 		    fault_info.resilient_media)
4273 #if MACH_ASSERT
4274 		    && (vm_fault_resilient_media_inject_error1_rate == 0 ||
4275 		    (++vm_fault_resilient_media_inject_error1 % vm_fault_resilient_media_inject_error1_rate) != 0)
4276 #endif /* MACH_ASSERT */
4277 		    ) {
4278 			/*
4279 			 * This mapping still points at the same object
4280 			 * and is still "resilient_media": proceed in
4281 			 * "recovery-from-media-failure" mode, where we'll
4282 			 * insert a zero-filled page in the top object.
4283 			 */
4284 //                     printf("RESILIENT_MEDIA %s:%d recovering for object %p offset 0x%llx\n", __FUNCTION__, __LINE__, object, offset);
4285 			vm_fault_resilient_media_proceed++;
4286 		} else {
4287 			/* not recovering: reset state and retry fault */
4288 //                     printf("RESILIENT_MEDIA %s:%d no recovery resilient %d object %p/%p offset 0x%llx/0x%llx\n", __FUNCTION__, __LINE__, fault_info.resilient_media, object, resilient_media_object, offset, resilient_media_offset);
4289 			vm_object_unlock(object);
4290 			if (real_map != map) {
4291 				vm_map_unlock(real_map);
4292 			}
4293 			vm_map_unlock_read(map);
4294 			/* release our extra reference on failed object */
4295 //                     printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
4296 			vm_object_lock_assert_notheld(resilient_media_object);
4297 			vm_object_deallocate(resilient_media_object);
4298 			resilient_media_object = VM_OBJECT_NULL;
4299 			resilient_media_offset = (vm_object_offset_t)-1;
4300 			resilient_media_retry = false;
4301 			vm_fault_resilient_media_abort1++;
4302 			goto RetryFault;
4303 		}
4304 	} else {
4305 		assert(resilient_media_object == VM_OBJECT_NULL);
4306 		resilient_media_offset = (vm_object_offset_t)-1;
4307 	}
4308 
4309 	/*
4310 	 * If the page is wired, we must fault for the current protection
4311 	 * value, to avoid further faults.
4312 	 */
4313 	if (wired) {
4314 		fault_type = prot | VM_PROT_WRITE;
4315 	}
4316 	if (wired || need_copy) {
4317 		/*
4318 		 * since we're treating this fault as a 'write'
4319 		 * we must hold the top object lock exclusively
4320 		 */
4321 		if (object_lock_type == OBJECT_LOCK_SHARED) {
4322 			object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4323 
4324 			if (vm_object_lock_upgrade(object) == FALSE) {
4325 				/*
4326 				 * couldn't upgrade, so explictly
4327 				 * take the lock exclusively
4328 				 */
4329 				vm_object_lock(object);
4330 			}
4331 		}
4332 	}
4333 
4334 #if     VM_FAULT_CLASSIFY
4335 	/*
4336 	 *	Temporary data gathering code
4337 	 */
4338 	vm_fault_classify(object, offset, fault_type);
4339 #endif
4340 	/*
4341 	 *	Fast fault code.  The basic idea is to do as much as
4342 	 *	possible while holding the map lock and object locks.
4343 	 *      Busy pages are not used until the object lock has to
4344 	 *	be dropped to do something (copy, zero fill, pmap enter).
4345 	 *	Similarly, paging references aren't acquired until that
4346 	 *	point, and object references aren't used.
4347 	 *
4348 	 *	If we can figure out what to do
4349 	 *	(zero fill, copy on write, pmap enter) while holding
4350 	 *	the locks, then it gets done.  Otherwise, we give up,
4351 	 *	and use the original fault path (which doesn't hold
4352 	 *	the map lock, and relies on busy pages).
4353 	 *	The give up cases include:
4354 	 *              - Have to talk to pager.
4355 	 *		- Page is busy, absent or in error.
4356 	 *		- Pager has locked out desired access.
4357 	 *		- Fault needs to be restarted.
4358 	 *		- Have to push page into copy object.
4359 	 *
4360 	 *	The code is an infinite loop that moves one level down
4361 	 *	the shadow chain each time.  cur_object and cur_offset
4362 	 *      refer to the current object being examined. object and offset
4363 	 *	are the original object from the map.  The loop is at the
4364 	 *	top level if and only if object and cur_object are the same.
4365 	 *
4366 	 *	Invariants:  Map lock is held throughout.  Lock is held on
4367 	 *		original object and cur_object (if different) when
4368 	 *		continuing or exiting loop.
4369 	 *
4370 	 */
4371 
4372 #if defined(__arm64__)
4373 	/*
4374 	 * Fail if reading an execute-only page in a
4375 	 * pmap that enforces execute-only protection.
4376 	 */
4377 	if (fault_type == VM_PROT_READ &&
4378 	    (prot & VM_PROT_EXECUTE) &&
4379 	    !(prot & VM_PROT_READ) &&
4380 	    pmap_enforces_execute_only(pmap)) {
4381 		vm_object_unlock(object);
4382 		vm_map_unlock_read(map);
4383 		if (real_map != map) {
4384 			vm_map_unlock(real_map);
4385 		}
4386 		kr = KERN_PROTECTION_FAILURE;
4387 		goto done;
4388 	}
4389 #endif
4390 
4391 	fault_phys_offset = (vm_map_offset_t)offset - vm_map_trunc_page((vm_map_offset_t)offset, PAGE_MASK);
4392 
4393 	/*
4394 	 * If this page is to be inserted in a copy delay object
4395 	 * for writing, and if the object has a copy, then the
4396 	 * copy delay strategy is implemented in the slow fault page.
4397 	 */
4398 	if (object->copy_strategy == MEMORY_OBJECT_COPY_DELAY &&
4399 	    object->copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE)) {
4400 		goto handle_copy_delay;
4401 	}
4402 
4403 	cur_object = object;
4404 	cur_offset = offset;
4405 
4406 	grab_options = 0;
4407 #if CONFIG_SECLUDED_MEMORY
4408 	if (object->can_grab_secluded) {
4409 		grab_options |= VM_PAGE_GRAB_SECLUDED;
4410 	}
4411 #endif /* CONFIG_SECLUDED_MEMORY */
4412 
4413 	while (TRUE) {
4414 		if (!cur_object->pager_created &&
4415 		    cur_object->phys_contiguous) { /* superpage */
4416 			break;
4417 		}
4418 
4419 		if (cur_object->blocked_access) {
4420 			/*
4421 			 * Access to this VM object has been blocked.
4422 			 * Let the slow path handle it.
4423 			 */
4424 			break;
4425 		}
4426 
4427 #if __arm__ && !__arm64__
4428 		if (__improbable(cur_object->internal &&
4429 		    cur_offset >= cur_object->vo_size &&
4430 		    cur_offset < VM_MAP_ROUND_PAGE(cur_object->vo_size, VM_MAP_PAGE_MASK(map)) &&
4431 		    VM_MAP_PAGE_SHIFT(map) > PAGE_SHIFT)) {
4432 			/*
4433 			 * On devices with a 4k kernel page size
4434 			 * and a 16k user page size (i.e. 32-bit watches),
4435 			 * IOKit could have created a VM object with a
4436 			 * 4k-aligned size.
4437 			 * IOKit could have then mapped that VM object
4438 			 * in a user address space, and VM would have extended
4439 			 * the mapping to the next 16k boundary.
4440 			 * So we could now be, somewhat illegally, trying to
4441 			 * access one of the up to 3 non-existent 4k pages
4442 			 * beyond the end of the VM object.
4443 			 * We would not be allowed to insert a page beyond the
4444 			 * the end of the object, so let's fail the fault.
4445 			 */
4446 			DTRACE_VM3(vm_fault_beyond_end_of_internal,
4447 			    vm_object_offset_t, offset,
4448 			    vm_object_size_t, object->vo_size,
4449 			    vm_map_address_t, vaddr);
4450 			vm_object_unlock(object);
4451 			vm_map_unlock_read(map);
4452 			if (real_map != map) {
4453 				vm_map_unlock(real_map);
4454 			}
4455 			kr = KERN_MEMORY_ERROR;
4456 			goto done;
4457 		}
4458 #endif /* __arm__ && !__arm64__ */
4459 
4460 		m = vm_page_lookup(cur_object, vm_object_trunc_page(cur_offset));
4461 		m_object = NULL;
4462 
4463 		if (m != VM_PAGE_NULL) {
4464 			m_object = cur_object;
4465 
4466 			if (m->vmp_busy) {
4467 				wait_result_t   result;
4468 
4469 				/*
4470 				 * in order to do the PAGE_ASSERT_WAIT, we must
4471 				 * have object that 'm' belongs to locked exclusively
4472 				 */
4473 				if (object != cur_object) {
4474 					if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4475 						cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4476 
4477 						if (vm_object_lock_upgrade(cur_object) == FALSE) {
4478 							/*
4479 							 * couldn't upgrade so go do a full retry
4480 							 * immediately since we can no longer be
4481 							 * certain about cur_object (since we
4482 							 * don't hold a reference on it)...
4483 							 * first drop the top object lock
4484 							 */
4485 							vm_object_unlock(object);
4486 
4487 							vm_map_unlock_read(map);
4488 							if (real_map != map) {
4489 								vm_map_unlock(real_map);
4490 							}
4491 
4492 							goto RetryFault;
4493 						}
4494 					}
4495 				} else if (object_lock_type == OBJECT_LOCK_SHARED) {
4496 					object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4497 
4498 					if (vm_object_lock_upgrade(object) == FALSE) {
4499 						/*
4500 						 * couldn't upgrade, so explictly take the lock
4501 						 * exclusively and go relookup the page since we
4502 						 * will have dropped the object lock and
4503 						 * a different thread could have inserted
4504 						 * a page at this offset
4505 						 * no need for a full retry since we're
4506 						 * at the top level of the object chain
4507 						 */
4508 						vm_object_lock(object);
4509 
4510 						continue;
4511 					}
4512 				}
4513 				if ((m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) && m_object->internal) {
4514 					/*
4515 					 * m->vmp_busy == TRUE and the object is locked exclusively
4516 					 * if m->pageout_queue == TRUE after we acquire the
4517 					 * queues lock, we are guaranteed that it is stable on
4518 					 * the pageout queue and therefore reclaimable
4519 					 *
4520 					 * NOTE: this is only true for the internal pageout queue
4521 					 * in the compressor world
4522 					 */
4523 					assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
4524 
4525 					vm_page_lock_queues();
4526 
4527 					if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
4528 						vm_pageout_throttle_up(m);
4529 						vm_page_unlock_queues();
4530 
4531 						PAGE_WAKEUP_DONE(m);
4532 						goto reclaimed_from_pageout;
4533 					}
4534 					vm_page_unlock_queues();
4535 				}
4536 				if (object != cur_object) {
4537 					vm_object_unlock(object);
4538 				}
4539 
4540 				vm_map_unlock_read(map);
4541 				if (real_map != map) {
4542 					vm_map_unlock(real_map);
4543 				}
4544 
4545 				result = PAGE_ASSERT_WAIT(m, interruptible);
4546 
4547 				vm_object_unlock(cur_object);
4548 
4549 				if (result == THREAD_WAITING) {
4550 					result = thread_block(THREAD_CONTINUE_NULL);
4551 				}
4552 				if (result == THREAD_AWAKENED || result == THREAD_RESTART) {
4553 					goto RetryFault;
4554 				}
4555 
4556 				kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
4557 				kr = KERN_ABORTED;
4558 				goto done;
4559 			}
4560 reclaimed_from_pageout:
4561 			if (m->vmp_laundry) {
4562 				if (object != cur_object) {
4563 					if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4564 						cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4565 
4566 						vm_object_unlock(object);
4567 						vm_object_unlock(cur_object);
4568 
4569 						vm_map_unlock_read(map);
4570 						if (real_map != map) {
4571 							vm_map_unlock(real_map);
4572 						}
4573 
4574 						goto RetryFault;
4575 					}
4576 				} else if (object_lock_type == OBJECT_LOCK_SHARED) {
4577 					object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4578 
4579 					if (vm_object_lock_upgrade(object) == FALSE) {
4580 						/*
4581 						 * couldn't upgrade, so explictly take the lock
4582 						 * exclusively and go relookup the page since we
4583 						 * will have dropped the object lock and
4584 						 * a different thread could have inserted
4585 						 * a page at this offset
4586 						 * no need for a full retry since we're
4587 						 * at the top level of the object chain
4588 						 */
4589 						vm_object_lock(object);
4590 
4591 						continue;
4592 					}
4593 				}
4594 				vm_object_lock_assert_exclusive(VM_PAGE_OBJECT(m));
4595 				vm_pageout_steal_laundry(m, FALSE);
4596 			}
4597 
4598 			if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
4599 				/*
4600 				 * Guard page: let the slow path deal with it
4601 				 */
4602 				break;
4603 			}
4604 			if (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_private || m->vmp_absent)) {
4605 				/*
4606 				 * Unusual case... let the slow path deal with it
4607 				 */
4608 				break;
4609 			}
4610 			if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m_object)) {
4611 				if (object != cur_object) {
4612 					vm_object_unlock(object);
4613 				}
4614 				vm_map_unlock_read(map);
4615 				if (real_map != map) {
4616 					vm_map_unlock(real_map);
4617 				}
4618 				vm_object_unlock(cur_object);
4619 				kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
4620 				kr = KERN_MEMORY_ERROR;
4621 				goto done;
4622 			}
4623 			assert(m_object == VM_PAGE_OBJECT(m));
4624 
4625 			if (vm_fault_cs_need_validation(map->pmap, m, m_object,
4626 			    PAGE_SIZE, 0) ||
4627 			    (physpage_p != NULL && (prot & VM_PROT_WRITE))) {
4628 upgrade_lock_and_retry:
4629 				/*
4630 				 * We might need to validate this page
4631 				 * against its code signature, so we
4632 				 * want to hold the VM object exclusively.
4633 				 */
4634 				if (object != cur_object) {
4635 					if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4636 						vm_object_unlock(object);
4637 						vm_object_unlock(cur_object);
4638 
4639 						cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4640 
4641 						vm_map_unlock_read(map);
4642 						if (real_map != map) {
4643 							vm_map_unlock(real_map);
4644 						}
4645 
4646 						goto RetryFault;
4647 					}
4648 				} else if (object_lock_type == OBJECT_LOCK_SHARED) {
4649 					object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4650 
4651 					if (vm_object_lock_upgrade(object) == FALSE) {
4652 						/*
4653 						 * couldn't upgrade, so explictly take the lock
4654 						 * exclusively and go relookup the page since we
4655 						 * will have dropped the object lock and
4656 						 * a different thread could have inserted
4657 						 * a page at this offset
4658 						 * no need for a full retry since we're
4659 						 * at the top level of the object chain
4660 						 */
4661 						vm_object_lock(object);
4662 
4663 						continue;
4664 					}
4665 				}
4666 			}
4667 			/*
4668 			 *	Two cases of map in faults:
4669 			 *	    - At top level w/o copy object.
4670 			 *	    - Read fault anywhere.
4671 			 *		--> must disallow write.
4672 			 */
4673 
4674 			if (object == cur_object && object->copy == VM_OBJECT_NULL) {
4675 				goto FastPmapEnter;
4676 			}
4677 
4678 			if (!need_copy &&
4679 			    !fault_info.no_copy_on_read &&
4680 			    cur_object != object &&
4681 			    !cur_object->internal &&
4682 			    !cur_object->pager_trusted &&
4683 			    vm_protect_privileged_from_untrusted &&
4684 			    !cur_object->code_signed &&
4685 			    current_proc_is_privileged()) {
4686 				/*
4687 				 * We're faulting on a page in "object" and
4688 				 * went down the shadow chain to "cur_object"
4689 				 * to find out that "cur_object"'s pager
4690 				 * is not "trusted", i.e. we can not trust it
4691 				 * to always return the same contents.
4692 				 * Since the target is a "privileged" process,
4693 				 * let's treat this as a copy-on-read fault, as
4694 				 * if it was a copy-on-write fault.
4695 				 * Once "object" gets a copy of this page, it
4696 				 * won't have to rely on "cur_object" to
4697 				 * provide the contents again.
4698 				 *
4699 				 * This is done by setting "need_copy" and
4700 				 * retrying the fault from the top with the
4701 				 * appropriate locking.
4702 				 *
4703 				 * Special case: if the mapping is executable
4704 				 * and the untrusted object is code-signed and
4705 				 * the process is "cs_enforced", we do not
4706 				 * copy-on-read because that would break
4707 				 * code-signing enforcement expectations (an
4708 				 * executable page must belong to a code-signed
4709 				 * object) and we can rely on code-signing
4710 				 * to re-validate the page if it gets evicted
4711 				 * and paged back in.
4712 				 */
4713 //				printf("COPY-ON-READ %s:%d map %p va 0x%llx page %p object %p offset 0x%llx UNTRUSTED: need copy-on-read!\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, m, VM_PAGE_OBJECT(m), m->vmp_offset);
4714 				vm_copied_on_read++;
4715 				need_copy = TRUE;
4716 
4717 				vm_object_unlock(object);
4718 				vm_object_unlock(cur_object);
4719 				object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4720 				vm_map_unlock_read(map);
4721 				if (real_map != map) {
4722 					vm_map_unlock(real_map);
4723 				}
4724 				goto RetryFault;
4725 			}
4726 
4727 			if (!(fault_type & VM_PROT_WRITE) && !need_copy) {
4728 				if (!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
4729 					prot &= ~VM_PROT_WRITE;
4730 				} else {
4731 					/*
4732 					 * For a protection that the pmap cares
4733 					 * about, we must hand over the full
4734 					 * set of protections (so that the pmap
4735 					 * layer can apply any desired policy).
4736 					 * This means that cs_bypass must be
4737 					 * set, as this can force us to pass
4738 					 * RWX.
4739 					 */
4740 					assert(fault_info.cs_bypass);
4741 				}
4742 
4743 				if (object != cur_object) {
4744 					/*
4745 					 * We still need to hold the top object
4746 					 * lock here to prevent a race between
4747 					 * a read fault (taking only "shared"
4748 					 * locks) and a write fault (taking
4749 					 * an "exclusive" lock on the top
4750 					 * object.
4751 					 * Otherwise, as soon as we release the
4752 					 * top lock, the write fault could
4753 					 * proceed and actually complete before
4754 					 * the read fault, and the copied page's
4755 					 * translation could then be overwritten
4756 					 * by the read fault's translation for
4757 					 * the original page.
4758 					 *
4759 					 * Let's just record what the top object
4760 					 * is and we'll release it later.
4761 					 */
4762 					top_object = object;
4763 
4764 					/*
4765 					 * switch to the object that has the new page
4766 					 */
4767 					object = cur_object;
4768 					object_lock_type = cur_object_lock_type;
4769 				}
4770 FastPmapEnter:
4771 				assert(m_object == VM_PAGE_OBJECT(m));
4772 
4773 				/*
4774 				 * prepare for the pmap_enter...
4775 				 * object and map are both locked
4776 				 * m contains valid data
4777 				 * object == m->vmp_object
4778 				 * cur_object == NULL or it's been unlocked
4779 				 * no paging references on either object or cur_object
4780 				 */
4781 				if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
4782 					need_retry_ptr = &need_retry;
4783 				} else {
4784 					need_retry_ptr = NULL;
4785 				}
4786 
4787 				if (fault_page_size < PAGE_SIZE) {
4788 					DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx caller pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, caller_pmap, (uint64_t)caller_pmap_addr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
4789 					assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
4790 					    fault_phys_offset < PAGE_SIZE),
4791 					    "0x%llx\n", (uint64_t)fault_phys_offset);
4792 				} else {
4793 					assertf(fault_phys_offset == 0,
4794 					    "0x%llx\n", (uint64_t)fault_phys_offset);
4795 				}
4796 
4797 				assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p object=%p", m, m_object, object);
4798 				assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
4799 				if (caller_pmap) {
4800 					kr = vm_fault_enter(m,
4801 					    caller_pmap,
4802 					    caller_pmap_addr,
4803 					    fault_page_size,
4804 					    fault_phys_offset,
4805 					    prot,
4806 					    caller_prot,
4807 					    wired,
4808 					    change_wiring,
4809 					    wire_tag,
4810 					    &fault_info,
4811 					    need_retry_ptr,
4812 					    &type_of_fault);
4813 				} else {
4814 					kr = vm_fault_enter(m,
4815 					    pmap,
4816 					    vaddr,
4817 					    fault_page_size,
4818 					    fault_phys_offset,
4819 					    prot,
4820 					    caller_prot,
4821 					    wired,
4822 					    change_wiring,
4823 					    wire_tag,
4824 					    &fault_info,
4825 					    need_retry_ptr,
4826 					    &type_of_fault);
4827 				}
4828 
4829 				vm_fault_complete(
4830 					map,
4831 					real_map,
4832 					object,
4833 					m_object,
4834 					m,
4835 					offset,
4836 					trace_real_vaddr,
4837 					&fault_info,
4838 					caller_prot,
4839 					real_vaddr,
4840 					vm_fault_type_for_tracing(need_copy_on_read, type_of_fault),
4841 					need_retry,
4842 					kr,
4843 					physpage_p,
4844 					prot,
4845 					top_object,
4846 					need_collapse,
4847 					cur_offset,
4848 					fault_type,
4849 					&written_on_object,
4850 					&written_on_pager,
4851 					&written_on_offset);
4852 				top_object = VM_OBJECT_NULL;
4853 				if (need_retry == TRUE) {
4854 					/*
4855 					 * vm_fault_enter couldn't complete the PMAP_ENTER...
4856 					 * at this point we don't hold any locks so it's safe
4857 					 * to ask the pmap layer to expand the page table to
4858 					 * accommodate this mapping... once expanded, we'll
4859 					 * re-drive the fault which should result in vm_fault_enter
4860 					 * being able to successfully enter the mapping this time around
4861 					 */
4862 					(void)pmap_enter_options(
4863 						pmap, vaddr, 0, 0, 0, 0, 0,
4864 						PMAP_OPTIONS_NOENTER, NULL);
4865 
4866 					need_retry = FALSE;
4867 					goto RetryFault;
4868 				}
4869 				goto done;
4870 			}
4871 			/*
4872 			 * COPY ON WRITE FAULT
4873 			 */
4874 			assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE);
4875 
4876 			/*
4877 			 * If objects match, then
4878 			 * object->copy must not be NULL (else control
4879 			 * would be in previous code block), and we
4880 			 * have a potential push into the copy object
4881 			 * with which we can't cope with here.
4882 			 */
4883 			if (cur_object == object) {
4884 				/*
4885 				 * must take the slow path to
4886 				 * deal with the copy push
4887 				 */
4888 				break;
4889 			}
4890 
4891 			/*
4892 			 * This is now a shadow based copy on write
4893 			 * fault -- it requires a copy up the shadow
4894 			 * chain.
4895 			 */
4896 			assert(m_object == VM_PAGE_OBJECT(m));
4897 
4898 			if ((cur_object_lock_type == OBJECT_LOCK_SHARED) &&
4899 			    vm_fault_cs_need_validation(NULL, m, m_object,
4900 			    PAGE_SIZE, 0)) {
4901 				goto upgrade_lock_and_retry;
4902 			}
4903 
4904 #if MACH_ASSERT
4905 			if (resilient_media_retry &&
4906 			    vm_fault_resilient_media_inject_error2_rate != 0 &&
4907 			    (++vm_fault_resilient_media_inject_error2 % vm_fault_resilient_media_inject_error2_rate) == 0) {
4908 				/* inject an error */
4909 				cur_m = m;
4910 				m = VM_PAGE_NULL;
4911 				m_object = VM_OBJECT_NULL;
4912 				break;
4913 			}
4914 #endif /* MACH_ASSERT */
4915 			/*
4916 			 * Allocate a page in the original top level
4917 			 * object. Give up if allocate fails.  Also
4918 			 * need to remember current page, as it's the
4919 			 * source of the copy.
4920 			 *
4921 			 * at this point we hold locks on both
4922 			 * object and cur_object... no need to take
4923 			 * paging refs or mark pages BUSY since
4924 			 * we don't drop either object lock until
4925 			 * the page has been copied and inserted
4926 			 */
4927 			cur_m = m;
4928 			m = vm_page_grab_options(grab_options);
4929 			m_object = NULL;
4930 
4931 			if (m == VM_PAGE_NULL) {
4932 				/*
4933 				 * no free page currently available...
4934 				 * must take the slow path
4935 				 */
4936 				break;
4937 			}
4938 			/*
4939 			 * Now do the copy.  Mark the source page busy...
4940 			 *
4941 			 *	NOTE: This code holds the map lock across
4942 			 *	the page copy.
4943 			 */
4944 			vm_page_copy(cur_m, m);
4945 			vm_page_insert(m, object, vm_object_trunc_page(offset));
4946 			if (VM_MAP_PAGE_MASK(map) != PAGE_MASK) {
4947 				DEBUG4K_FAULT("map %p vaddr 0x%llx page %p [%p 0x%llx] copied to %p [%p 0x%llx]\n", map, (uint64_t)vaddr, cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
4948 			}
4949 			m_object = object;
4950 			SET_PAGE_DIRTY(m, FALSE);
4951 
4952 			/*
4953 			 * Now cope with the source page and object
4954 			 */
4955 			if (object->ref_count > 1 && cur_m->vmp_pmapped) {
4956 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
4957 			} else if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) {
4958 				/*
4959 				 * We've copied the full 16K page but we're
4960 				 * about to call vm_fault_enter() only for
4961 				 * the 4K chunk we're faulting on.  The other
4962 				 * three 4K chunks in that page could still
4963 				 * be pmapped in this pmap.
4964 				 * Since the VM object layer thinks that the
4965 				 * entire page has been dealt with and the
4966 				 * original page might no longer be needed,
4967 				 * it might collapse/bypass the original VM
4968 				 * object and free its pages, which would be
4969 				 * bad (and would trigger pmap_verify_free()
4970 				 * assertions) if the other 4K chunks are still
4971 				 * pmapped.
4972 				 */
4973 				/*
4974 				 * XXX FBDP TODO4K: to be revisisted
4975 				 * Technically, we need to pmap_disconnect()
4976 				 * only the target pmap's mappings for the 4K
4977 				 * chunks of this 16K VM page.  If other pmaps
4978 				 * have PTEs on these chunks, that means that
4979 				 * the associated VM map must have a reference
4980 				 * on the VM object, so no need to worry about
4981 				 * those.
4982 				 * pmap_protect() for each 4K chunk would be
4983 				 * better but we'd have to check which chunks
4984 				 * are actually mapped before and after this
4985 				 * one.
4986 				 * A full-blown pmap_disconnect() is easier
4987 				 * for now but not efficient.
4988 				 */
4989 				DEBUG4K_FAULT("pmap_disconnect() page %p object %p offset 0x%llx phys 0x%x\n", cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, VM_PAGE_GET_PHYS_PAGE(cur_m));
4990 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
4991 			}
4992 
4993 			if (cur_m->vmp_clustered) {
4994 				VM_PAGE_COUNT_AS_PAGEIN(cur_m);
4995 				VM_PAGE_CONSUME_CLUSTERED(cur_m);
4996 				vm_fault_is_sequential(cur_object, cur_offset, fault_info.behavior);
4997 			}
4998 			need_collapse = TRUE;
4999 
5000 			if (!cur_object->internal &&
5001 			    cur_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
5002 				/*
5003 				 * The object from which we've just
5004 				 * copied a page is most probably backed
5005 				 * by a vnode.  We don't want to waste too
5006 				 * much time trying to collapse the VM objects
5007 				 * and create a bottleneck when several tasks
5008 				 * map the same file.
5009 				 */
5010 				if (cur_object->copy == object) {
5011 					/*
5012 					 * Shared mapping or no COW yet.
5013 					 * We can never collapse a copy
5014 					 * object into its backing object.
5015 					 */
5016 					need_collapse = FALSE;
5017 				} else if (cur_object->copy == object->shadow &&
5018 				    object->shadow->resident_page_count == 0) {
5019 					/*
5020 					 * Shared mapping after a COW occurred.
5021 					 */
5022 					need_collapse = FALSE;
5023 				}
5024 			}
5025 			vm_object_unlock(cur_object);
5026 
5027 			if (need_collapse == FALSE) {
5028 				vm_fault_collapse_skipped++;
5029 			}
5030 			vm_fault_collapse_total++;
5031 
5032 			type_of_fault = DBG_COW_FAULT;
5033 			counter_inc(&vm_statistics_cow_faults);
5034 			DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
5035 			counter_inc(&current_task()->cow_faults);
5036 
5037 			goto FastPmapEnter;
5038 		} else {
5039 			/*
5040 			 * No page at cur_object, cur_offset... m == NULL
5041 			 */
5042 			if (cur_object->pager_created) {
5043 				vm_external_state_t compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
5044 
5045 				if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) {
5046 					int             my_fault_type;
5047 					uint8_t         c_flags = C_DONT_BLOCK;
5048 					bool            insert_cur_object = FALSE;
5049 
5050 					/*
5051 					 * May have to talk to a pager...
5052 					 * if so, take the slow path by
5053 					 * doing a 'break' from the while (TRUE) loop
5054 					 *
5055 					 * external_state will only be set to VM_EXTERNAL_STATE_EXISTS
5056 					 * if the compressor is active and the page exists there
5057 					 */
5058 					if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS) {
5059 						break;
5060 					}
5061 
5062 					if (map == kernel_map || real_map == kernel_map) {
5063 						/*
5064 						 * can't call into the compressor with the kernel_map
5065 						 * lock held, since the compressor may try to operate
5066 						 * on the kernel map in order to return an empty c_segment
5067 						 */
5068 						break;
5069 					}
5070 					if (object != cur_object) {
5071 						if (fault_type & VM_PROT_WRITE) {
5072 							c_flags |= C_KEEP;
5073 						} else {
5074 							insert_cur_object = TRUE;
5075 						}
5076 					}
5077 					if (insert_cur_object == TRUE) {
5078 						if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5079 							cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5080 
5081 							if (vm_object_lock_upgrade(cur_object) == FALSE) {
5082 								/*
5083 								 * couldn't upgrade so go do a full retry
5084 								 * immediately since we can no longer be
5085 								 * certain about cur_object (since we
5086 								 * don't hold a reference on it)...
5087 								 * first drop the top object lock
5088 								 */
5089 								vm_object_unlock(object);
5090 
5091 								vm_map_unlock_read(map);
5092 								if (real_map != map) {
5093 									vm_map_unlock(real_map);
5094 								}
5095 
5096 								goto RetryFault;
5097 							}
5098 						}
5099 					} else if (object_lock_type == OBJECT_LOCK_SHARED) {
5100 						object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5101 
5102 						if (object != cur_object) {
5103 							/*
5104 							 * we can't go for the upgrade on the top
5105 							 * lock since the upgrade may block waiting
5106 							 * for readers to drain... since we hold
5107 							 * cur_object locked at this point, waiting
5108 							 * for the readers to drain would represent
5109 							 * a lock order inversion since the lock order
5110 							 * for objects is the reference order in the
5111 							 * shadown chain
5112 							 */
5113 							vm_object_unlock(object);
5114 							vm_object_unlock(cur_object);
5115 
5116 							vm_map_unlock_read(map);
5117 							if (real_map != map) {
5118 								vm_map_unlock(real_map);
5119 							}
5120 
5121 							goto RetryFault;
5122 						}
5123 						if (vm_object_lock_upgrade(object) == FALSE) {
5124 							/*
5125 							 * couldn't upgrade, so explictly take the lock
5126 							 * exclusively and go relookup the page since we
5127 							 * will have dropped the object lock and
5128 							 * a different thread could have inserted
5129 							 * a page at this offset
5130 							 * no need for a full retry since we're
5131 							 * at the top level of the object chain
5132 							 */
5133 							vm_object_lock(object);
5134 
5135 							continue;
5136 						}
5137 					}
5138 					m = vm_page_grab_options(grab_options);
5139 					m_object = NULL;
5140 
5141 					if (m == VM_PAGE_NULL) {
5142 						/*
5143 						 * no free page currently available...
5144 						 * must take the slow path
5145 						 */
5146 						break;
5147 					}
5148 
5149 					/*
5150 					 * The object is and remains locked
5151 					 * so no need to take a
5152 					 * "paging_in_progress" reference.
5153 					 */
5154 					bool      shared_lock;
5155 					if ((object == cur_object &&
5156 					    object_lock_type == OBJECT_LOCK_EXCLUSIVE) ||
5157 					    (object != cur_object &&
5158 					    cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) {
5159 						shared_lock = FALSE;
5160 					} else {
5161 						shared_lock = TRUE;
5162 					}
5163 
5164 					kr = vm_compressor_pager_get(
5165 						cur_object->pager,
5166 						(vm_object_trunc_page(cur_offset)
5167 						+ cur_object->paging_offset),
5168 						VM_PAGE_GET_PHYS_PAGE(m),
5169 						&my_fault_type,
5170 						c_flags,
5171 						&compressed_count_delta);
5172 
5173 					vm_compressor_pager_count(
5174 						cur_object->pager,
5175 						compressed_count_delta,
5176 						shared_lock,
5177 						cur_object);
5178 
5179 					if (kr != KERN_SUCCESS) {
5180 						vm_page_release(m, FALSE);
5181 						m = VM_PAGE_NULL;
5182 					}
5183 					/*
5184 					 * If vm_compressor_pager_get() returns
5185 					 * KERN_MEMORY_FAILURE, then the
5186 					 * compressed data is permanently lost,
5187 					 * so return this error immediately.
5188 					 */
5189 					if (kr == KERN_MEMORY_FAILURE) {
5190 						if (object != cur_object) {
5191 							vm_object_unlock(cur_object);
5192 						}
5193 						vm_object_unlock(object);
5194 						vm_map_unlock_read(map);
5195 						if (real_map != map) {
5196 							vm_map_unlock(real_map);
5197 						}
5198 
5199 						goto done;
5200 					} else if (kr != KERN_SUCCESS) {
5201 						break;
5202 					}
5203 					m->vmp_dirty = TRUE;
5204 
5205 					/*
5206 					 * If the object is purgeable, its
5207 					 * owner's purgeable ledgers will be
5208 					 * updated in vm_page_insert() but the
5209 					 * page was also accounted for in a
5210 					 * "compressed purgeable" ledger, so
5211 					 * update that now.
5212 					 */
5213 					if (object != cur_object &&
5214 					    !insert_cur_object) {
5215 						/*
5216 						 * We're not going to insert
5217 						 * the decompressed page into
5218 						 * the object it came from.
5219 						 *
5220 						 * We're dealing with a
5221 						 * copy-on-write fault on
5222 						 * "object".
5223 						 * We're going to decompress
5224 						 * the page directly into the
5225 						 * target "object" while
5226 						 * keepin the compressed
5227 						 * page for "cur_object", so
5228 						 * no ledger update in that
5229 						 * case.
5230 						 */
5231 					} else if (((cur_object->purgable ==
5232 					    VM_PURGABLE_DENY) &&
5233 					    (!cur_object->vo_ledger_tag)) ||
5234 					    (cur_object->vo_owner ==
5235 					    NULL)) {
5236 						/*
5237 						 * "cur_object" is not purgeable
5238 						 * and is not ledger-taged, or
5239 						 * there's no owner for it,
5240 						 * so no owner's ledgers to
5241 						 * update.
5242 						 */
5243 					} else {
5244 						/*
5245 						 * One less compressed
5246 						 * purgeable/tagged page for
5247 						 * cur_object's owner.
5248 						 */
5249 						vm_object_owner_compressed_update(
5250 							cur_object,
5251 							-1);
5252 					}
5253 
5254 					if (insert_cur_object) {
5255 						vm_page_insert(m, cur_object, vm_object_trunc_page(cur_offset));
5256 						m_object = cur_object;
5257 					} else {
5258 						vm_page_insert(m, object, vm_object_trunc_page(offset));
5259 						m_object = object;
5260 					}
5261 
5262 					if ((m_object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_USE_DEFAULT) {
5263 						/*
5264 						 * If the page is not cacheable,
5265 						 * we can't let its contents
5266 						 * linger in the data cache
5267 						 * after the decompression.
5268 						 */
5269 						pmap_sync_page_attributes_phys(VM_PAGE_GET_PHYS_PAGE(m));
5270 					}
5271 
5272 					type_of_fault = my_fault_type;
5273 
5274 					VM_STAT_DECOMPRESSIONS();
5275 
5276 					if (cur_object != object) {
5277 						if (insert_cur_object) {
5278 							top_object = object;
5279 							/*
5280 							 * switch to the object that has the new page
5281 							 */
5282 							object = cur_object;
5283 							object_lock_type = cur_object_lock_type;
5284 						} else {
5285 							vm_object_unlock(cur_object);
5286 							cur_object = object;
5287 						}
5288 					}
5289 					goto FastPmapEnter;
5290 				}
5291 				/*
5292 				 * existence map present and indicates
5293 				 * that the pager doesn't have this page
5294 				 */
5295 			}
5296 			if (cur_object->shadow == VM_OBJECT_NULL ||
5297 			    resilient_media_retry) {
5298 				/*
5299 				 * Zero fill fault.  Page gets
5300 				 * inserted into the original object.
5301 				 */
5302 				if (cur_object->shadow_severed ||
5303 				    VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object) ||
5304 				    cur_object == compressor_object ||
5305 				    cur_object == kernel_object ||
5306 				    cur_object == vm_submap_object) {
5307 					if (object != cur_object) {
5308 						vm_object_unlock(cur_object);
5309 					}
5310 					vm_object_unlock(object);
5311 
5312 					vm_map_unlock_read(map);
5313 					if (real_map != map) {
5314 						vm_map_unlock(real_map);
5315 					}
5316 					if (VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object)) {
5317 						kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
5318 					}
5319 
5320 					if (cur_object->shadow_severed) {
5321 						kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
5322 					}
5323 
5324 					kr = KERN_MEMORY_ERROR;
5325 					goto done;
5326 				}
5327 				if (cur_object != object) {
5328 					vm_object_unlock(cur_object);
5329 
5330 					cur_object = object;
5331 				}
5332 				if (object_lock_type == OBJECT_LOCK_SHARED) {
5333 					object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5334 
5335 					if (vm_object_lock_upgrade(object) == FALSE) {
5336 						/*
5337 						 * couldn't upgrade so do a full retry on the fault
5338 						 * since we dropped the object lock which
5339 						 * could allow another thread to insert
5340 						 * a page at this offset
5341 						 */
5342 						vm_map_unlock_read(map);
5343 						if (real_map != map) {
5344 							vm_map_unlock(real_map);
5345 						}
5346 
5347 						goto RetryFault;
5348 					}
5349 				}
5350 				if (!object->internal) {
5351 					panic("%s:%d should not zero-fill page at offset 0x%llx in external object %p", __FUNCTION__, __LINE__, (uint64_t)offset, object);
5352 				}
5353 #if MACH_ASSERT
5354 				if (resilient_media_retry &&
5355 				    vm_fault_resilient_media_inject_error3_rate != 0 &&
5356 				    (++vm_fault_resilient_media_inject_error3 % vm_fault_resilient_media_inject_error3_rate) == 0) {
5357 					/* inject an error */
5358 					m_object = NULL;
5359 					break;
5360 				}
5361 #endif /* MACH_ASSERT */
5362 				m = vm_page_alloc(object, vm_object_trunc_page(offset));
5363 				m_object = NULL;
5364 
5365 				if (m == VM_PAGE_NULL) {
5366 					/*
5367 					 * no free page currently available...
5368 					 * must take the slow path
5369 					 */
5370 					break;
5371 				}
5372 				m_object = object;
5373 
5374 				/*
5375 				 * Zeroing the page and entering into it into the pmap
5376 				 * represents a significant amount of the zero fill fault handler's work.
5377 				 *
5378 				 * To improve fault scalability, we'll drop the object lock, if it appears contended,
5379 				 * now that we've inserted the page into the vm object.
5380 				 * Before dropping the lock, we need to check protection bits and set the
5381 				 * mapped bits on the page. Then we can mark the page busy, drop the lock,
5382 				 * zero it, and do the pmap enter. We'll need to reacquire the lock
5383 				 * to clear the busy bit and wake up any waiters.
5384 				 */
5385 				vm_fault_cs_clear(m);
5386 				m->vmp_pmapped = TRUE;
5387 				if (map->no_zero_fill) {
5388 					type_of_fault = DBG_NZF_PAGE_FAULT;
5389 				} else {
5390 					type_of_fault = DBG_ZERO_FILL_FAULT;
5391 				}
5392 				{
5393 					pmap_t destination_pmap;
5394 					vm_map_offset_t destination_pmap_vaddr;
5395 					vm_prot_t enter_fault_type;
5396 					if (caller_pmap) {
5397 						destination_pmap = caller_pmap;
5398 						destination_pmap_vaddr = caller_pmap_addr;
5399 					} else {
5400 						destination_pmap = pmap;
5401 						destination_pmap_vaddr = vaddr;
5402 					}
5403 					if (change_wiring) {
5404 						enter_fault_type = VM_PROT_NONE;
5405 					} else {
5406 						enter_fault_type = caller_prot;
5407 					}
5408 					assertf(VM_PAGE_OBJECT(m) == object, "m=%p object=%p", m, object);
5409 					kr = vm_fault_enter_prepare(m,
5410 					    destination_pmap,
5411 					    destination_pmap_vaddr,
5412 					    &prot,
5413 					    caller_prot,
5414 					    fault_page_size,
5415 					    fault_phys_offset,
5416 					    change_wiring,
5417 					    enter_fault_type,
5418 					    &fault_info,
5419 					    &type_of_fault,
5420 					    &page_needs_data_sync);
5421 					if (kr != KERN_SUCCESS) {
5422 						goto zero_fill_cleanup;
5423 					}
5424 
5425 					if (object_is_contended) {
5426 						/*
5427 						 * At this point the page is in the vm object, but not on a paging queue.
5428 						 * Since it's accessible to another thread but its contents are invalid
5429 						 * (it hasn't been zeroed) mark it busy before dropping the object lock.
5430 						 */
5431 						m->vmp_busy = TRUE;
5432 						vm_object_unlock(object);
5433 					}
5434 					if (type_of_fault == DBG_ZERO_FILL_FAULT) {
5435 						/*
5436 						 * Now zero fill page...
5437 						 * the page is probably going to
5438 						 * be written soon, so don't bother
5439 						 * to clear the modified bit
5440 						 *
5441 						 *   NOTE: This code holds the map
5442 						 *   lock across the zero fill.
5443 						 */
5444 						vm_page_zero_fill(m);
5445 						counter_inc(&vm_statistics_zero_fill_count);
5446 						DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
5447 					}
5448 					if (page_needs_data_sync) {
5449 						pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
5450 					}
5451 
5452 					if (top_object != VM_OBJECT_NULL) {
5453 						need_retry_ptr = &need_retry;
5454 					} else {
5455 						need_retry_ptr = NULL;
5456 					}
5457 					if (object_is_contended) {
5458 						kr = vm_fault_pmap_enter(destination_pmap, destination_pmap_vaddr,
5459 						    fault_page_size, fault_phys_offset,
5460 						    m, &prot, caller_prot, enter_fault_type, wired,
5461 						    fault_info.pmap_options, need_retry_ptr);
5462 						vm_object_lock(object);
5463 					} else {
5464 						kr = vm_fault_pmap_enter_with_object_lock(object, destination_pmap, destination_pmap_vaddr,
5465 						    fault_page_size, fault_phys_offset,
5466 						    m, &prot, caller_prot, enter_fault_type, wired,
5467 						    fault_info.pmap_options, need_retry_ptr);
5468 					}
5469 				}
5470 zero_fill_cleanup:
5471 				if (!VM_DYNAMIC_PAGING_ENABLED() &&
5472 				    (object->purgable == VM_PURGABLE_DENY ||
5473 				    object->purgable == VM_PURGABLE_NONVOLATILE ||
5474 				    object->purgable == VM_PURGABLE_VOLATILE)) {
5475 					vm_page_lockspin_queues();
5476 					if (!VM_DYNAMIC_PAGING_ENABLED()) {
5477 						vm_fault_enqueue_throttled_locked(m);
5478 					}
5479 					vm_page_unlock_queues();
5480 				}
5481 				vm_fault_enqueue_page(object, m, wired, change_wiring, wire_tag, fault_info.no_cache, &type_of_fault, kr);
5482 
5483 				vm_fault_complete(
5484 					map,
5485 					real_map,
5486 					object,
5487 					m_object,
5488 					m,
5489 					offset,
5490 					trace_real_vaddr,
5491 					&fault_info,
5492 					caller_prot,
5493 					real_vaddr,
5494 					type_of_fault,
5495 					need_retry,
5496 					kr,
5497 					physpage_p,
5498 					prot,
5499 					top_object,
5500 					need_collapse,
5501 					cur_offset,
5502 					fault_type,
5503 					&written_on_object,
5504 					&written_on_pager,
5505 					&written_on_offset);
5506 				top_object = VM_OBJECT_NULL;
5507 				if (need_retry == TRUE) {
5508 					/*
5509 					 * vm_fault_enter couldn't complete the PMAP_ENTER...
5510 					 * at this point we don't hold any locks so it's safe
5511 					 * to ask the pmap layer to expand the page table to
5512 					 * accommodate this mapping... once expanded, we'll
5513 					 * re-drive the fault which should result in vm_fault_enter
5514 					 * being able to successfully enter the mapping this time around
5515 					 */
5516 					(void)pmap_enter_options(
5517 						pmap, vaddr, 0, 0, 0, 0, 0,
5518 						PMAP_OPTIONS_NOENTER, NULL);
5519 
5520 					need_retry = FALSE;
5521 					goto RetryFault;
5522 				}
5523 				goto done;
5524 			}
5525 			/*
5526 			 * On to the next level in the shadow chain
5527 			 */
5528 			cur_offset += cur_object->vo_shadow_offset;
5529 			new_object = cur_object->shadow;
5530 			fault_phys_offset = cur_offset - vm_object_trunc_page(cur_offset);
5531 
5532 			/*
5533 			 * take the new_object's lock with the indicated state
5534 			 */
5535 			if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5536 				vm_object_lock_shared(new_object);
5537 			} else {
5538 				vm_object_lock(new_object);
5539 			}
5540 
5541 			if (cur_object != object) {
5542 				vm_object_unlock(cur_object);
5543 			}
5544 
5545 			cur_object = new_object;
5546 
5547 			continue;
5548 		}
5549 	}
5550 	/*
5551 	 * Cleanup from fast fault failure.  Drop any object
5552 	 * lock other than original and drop map lock.
5553 	 */
5554 	if (object != cur_object) {
5555 		vm_object_unlock(cur_object);
5556 	}
5557 
5558 	/*
5559 	 * must own the object lock exclusively at this point
5560 	 */
5561 	if (object_lock_type == OBJECT_LOCK_SHARED) {
5562 		object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5563 
5564 		if (vm_object_lock_upgrade(object) == FALSE) {
5565 			/*
5566 			 * couldn't upgrade, so explictly
5567 			 * take the lock exclusively
5568 			 * no need to retry the fault at this
5569 			 * point since "vm_fault_page" will
5570 			 * completely re-evaluate the state
5571 			 */
5572 			vm_object_lock(object);
5573 		}
5574 	}
5575 
5576 handle_copy_delay:
5577 	vm_map_unlock_read(map);
5578 	if (real_map != map) {
5579 		vm_map_unlock(real_map);
5580 	}
5581 
5582 	if (__improbable(object == compressor_object ||
5583 	    object == kernel_object ||
5584 	    object == vm_submap_object)) {
5585 		/*
5586 		 * These objects are explicitly managed and populated by the
5587 		 * kernel.  The virtual ranges backed by these objects should
5588 		 * either have wired pages or "holes" that are not supposed to
5589 		 * be accessed at all until they get explicitly populated.
5590 		 * We should never have to resolve a fault on a mapping backed
5591 		 * by one of these VM objects and providing a zero-filled page
5592 		 * would be wrong here, so let's fail the fault and let the
5593 		 * caller crash or recover.
5594 		 */
5595 		vm_object_unlock(object);
5596 		kr = KERN_MEMORY_ERROR;
5597 		goto done;
5598 	}
5599 
5600 	assert(object != compressor_object);
5601 	assert(object != kernel_object);
5602 	assert(object != vm_submap_object);
5603 
5604 	resilient_media_ref_transfer = false;
5605 	if (resilient_media_retry) {
5606 		/*
5607 		 * We could get here if we failed to get a free page
5608 		 * to zero-fill and had to take the slow path again.
5609 		 * Reset our "recovery-from-failed-media" state.
5610 		 */
5611 		assert(resilient_media_object != VM_OBJECT_NULL);
5612 		assert(resilient_media_offset != (vm_object_offset_t)-1);
5613 		/* release our extra reference on failed object */
5614 //             printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
5615 		if (object == resilient_media_object) {
5616 			/*
5617 			 * We're holding "object"'s lock, so we can't release
5618 			 * our extra reference at this point.
5619 			 * We need an extra reference on "object" anyway
5620 			 * (see below), so let's just transfer this reference.
5621 			 */
5622 			resilient_media_ref_transfer = true;
5623 		} else {
5624 			vm_object_lock_assert_notheld(resilient_media_object);
5625 			vm_object_deallocate(resilient_media_object);
5626 		}
5627 		resilient_media_object = VM_OBJECT_NULL;
5628 		resilient_media_offset = (vm_object_offset_t)-1;
5629 		resilient_media_retry = false;
5630 		vm_fault_resilient_media_abort2++;
5631 	}
5632 
5633 	/*
5634 	 * Make a reference to this object to
5635 	 * prevent its disposal while we are messing with
5636 	 * it.  Once we have the reference, the map is free
5637 	 * to be diddled.  Since objects reference their
5638 	 * shadows (and copies), they will stay around as well.
5639 	 */
5640 	if (resilient_media_ref_transfer) {
5641 		/* we already have an extra reference on this object */
5642 		resilient_media_ref_transfer = false;
5643 	} else {
5644 		vm_object_reference_locked(object);
5645 	}
5646 	vm_object_paging_begin(object);
5647 
5648 	set_thread_pagein_error(cthread, 0);
5649 	error_code = 0;
5650 
5651 	result_page = VM_PAGE_NULL;
5652 	kr = vm_fault_page(object, offset, fault_type,
5653 	    (change_wiring && !wired),
5654 	    FALSE,                /* page not looked up */
5655 	    &prot, &result_page, &top_page,
5656 	    &type_of_fault,
5657 	    &error_code, map->no_zero_fill,
5658 	    FALSE, &fault_info);
5659 
5660 	/*
5661 	 * if kr != VM_FAULT_SUCCESS, then the paging reference
5662 	 * has been dropped and the object unlocked... the ref_count
5663 	 * is still held
5664 	 *
5665 	 * if kr == VM_FAULT_SUCCESS, then the paging reference
5666 	 * is still held along with the ref_count on the original object
5667 	 *
5668 	 *	the object is returned locked with a paging reference
5669 	 *
5670 	 *	if top_page != NULL, then it's BUSY and the
5671 	 *	object it belongs to has a paging reference
5672 	 *	but is returned unlocked
5673 	 */
5674 	if (kr != VM_FAULT_SUCCESS &&
5675 	    kr != VM_FAULT_SUCCESS_NO_VM_PAGE) {
5676 		if (kr == VM_FAULT_MEMORY_ERROR &&
5677 		    fault_info.resilient_media) {
5678 			assertf(object->internal, "object %p", object);
5679 			/*
5680 			 * This fault failed but the mapping was
5681 			 * "media resilient", so we'll retry the fault in
5682 			 * recovery mode to get a zero-filled page in the
5683 			 * top object.
5684 			 * Keep the reference on the failing object so
5685 			 * that we can check that the mapping is still
5686 			 * pointing to it when we retry the fault.
5687 			 */
5688 //                     printf("RESILIENT_MEDIA %s:%d: object %p offset 0x%llx recover from media error 0x%x kr 0x%x top_page %p result_page %p\n", __FUNCTION__, __LINE__, object, offset, error_code, kr, top_page, result_page);
5689 			assert(!resilient_media_retry); /* no double retry */
5690 			assert(resilient_media_object == VM_OBJECT_NULL);
5691 			assert(resilient_media_offset == (vm_object_offset_t)-1);
5692 			resilient_media_retry = true;
5693 			resilient_media_object = object;
5694 			resilient_media_offset = offset;
5695 //                     printf("FBDP %s:%d resilient_media_object %p offset 0x%llx kept reference\n", __FUNCTION__, __LINE__, resilient_media_object, resilient_mmedia_offset);
5696 			vm_fault_resilient_media_initiate++;
5697 			goto RetryFault;
5698 		} else {
5699 			/*
5700 			 * we didn't succeed, lose the object reference
5701 			 * immediately.
5702 			 */
5703 			vm_object_deallocate(object);
5704 			object = VM_OBJECT_NULL; /* no longer valid */
5705 		}
5706 
5707 		/*
5708 		 * See why we failed, and take corrective action.
5709 		 */
5710 		switch (kr) {
5711 		case VM_FAULT_MEMORY_SHORTAGE:
5712 			if (vm_page_wait((change_wiring) ?
5713 			    THREAD_UNINT :
5714 			    THREAD_ABORTSAFE)) {
5715 				goto RetryFault;
5716 			}
5717 			kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_MEMORY_SHORTAGE), 0 /* arg */);
5718 			OS_FALLTHROUGH;
5719 		case VM_FAULT_INTERRUPTED:
5720 			kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
5721 			kr = KERN_ABORTED;
5722 			goto done;
5723 		case VM_FAULT_RETRY:
5724 			goto RetryFault;
5725 		case VM_FAULT_MEMORY_ERROR:
5726 			if (error_code) {
5727 				kr = error_code;
5728 			} else {
5729 				kr = KERN_MEMORY_ERROR;
5730 			}
5731 			goto done;
5732 		default:
5733 			panic("vm_fault: unexpected error 0x%x from "
5734 			    "vm_fault_page()\n", kr);
5735 		}
5736 	}
5737 	m = result_page;
5738 	m_object = NULL;
5739 
5740 	if (m != VM_PAGE_NULL) {
5741 		m_object = VM_PAGE_OBJECT(m);
5742 		assert((change_wiring && !wired) ?
5743 		    (top_page == VM_PAGE_NULL) :
5744 		    ((top_page == VM_PAGE_NULL) == (m_object == object)));
5745 	}
5746 
5747 	/*
5748 	 * What to do with the resulting page from vm_fault_page
5749 	 * if it doesn't get entered into the physical map:
5750 	 */
5751 #define RELEASE_PAGE(m)                                 \
5752 	MACRO_BEGIN                                     \
5753 	PAGE_WAKEUP_DONE(m);                            \
5754 	if ( !VM_PAGE_PAGEABLE(m)) {                    \
5755 	        vm_page_lockspin_queues();              \
5756 	        if ( !VM_PAGE_PAGEABLE(m))              \
5757 	                vm_page_activate(m);            \
5758 	        vm_page_unlock_queues();                \
5759 	}                                               \
5760 	MACRO_END
5761 
5762 
5763 	object_locks_dropped = FALSE;
5764 	/*
5765 	 * We must verify that the maps have not changed
5766 	 * since our last lookup. vm_map_verify() needs the
5767 	 * map lock (shared) but we are holding object locks.
5768 	 * So we do a try_lock() first and, if that fails, we
5769 	 * drop the object locks and go in for the map lock again.
5770 	 */
5771 	if (!vm_map_try_lock_read(original_map)) {
5772 		if (m != VM_PAGE_NULL) {
5773 			old_copy_object = m_object->copy;
5774 			vm_object_unlock(m_object);
5775 		} else {
5776 			old_copy_object = VM_OBJECT_NULL;
5777 			vm_object_unlock(object);
5778 		}
5779 
5780 		object_locks_dropped = TRUE;
5781 
5782 		vm_map_lock_read(original_map);
5783 	}
5784 
5785 	if ((map != original_map) || !vm_map_verify(map, &version)) {
5786 		if (object_locks_dropped == FALSE) {
5787 			if (m != VM_PAGE_NULL) {
5788 				old_copy_object = m_object->copy;
5789 				vm_object_unlock(m_object);
5790 			} else {
5791 				old_copy_object = VM_OBJECT_NULL;
5792 				vm_object_unlock(object);
5793 			}
5794 
5795 			object_locks_dropped = TRUE;
5796 		}
5797 
5798 		/*
5799 		 * no object locks are held at this point
5800 		 */
5801 		vm_object_t             retry_object;
5802 		vm_object_offset_t      retry_offset;
5803 		vm_prot_t               retry_prot;
5804 
5805 		/*
5806 		 * To avoid trying to write_lock the map while another
5807 		 * thread has it read_locked (in vm_map_pageable), we
5808 		 * do not try for write permission.  If the page is
5809 		 * still writable, we will get write permission.  If it
5810 		 * is not, or has been marked needs_copy, we enter the
5811 		 * mapping without write permission, and will merely
5812 		 * take another fault.
5813 		 */
5814 		map = original_map;
5815 
5816 		kr = vm_map_lookup_locked(&map, vaddr,
5817 		    fault_type & ~VM_PROT_WRITE,
5818 		    OBJECT_LOCK_EXCLUSIVE, &version,
5819 		    &retry_object, &retry_offset, &retry_prot,
5820 		    &wired,
5821 		    &fault_info,
5822 		    &real_map,
5823 		    NULL);
5824 		pmap = real_map->pmap;
5825 
5826 		if (kr != KERN_SUCCESS) {
5827 			vm_map_unlock_read(map);
5828 
5829 			if (m != VM_PAGE_NULL) {
5830 				assert(VM_PAGE_OBJECT(m) == m_object);
5831 
5832 				/*
5833 				 * retake the lock so that
5834 				 * we can drop the paging reference
5835 				 * in vm_fault_cleanup and do the
5836 				 * PAGE_WAKEUP_DONE in RELEASE_PAGE
5837 				 */
5838 				vm_object_lock(m_object);
5839 
5840 				RELEASE_PAGE(m);
5841 
5842 				vm_fault_cleanup(m_object, top_page);
5843 			} else {
5844 				/*
5845 				 * retake the lock so that
5846 				 * we can drop the paging reference
5847 				 * in vm_fault_cleanup
5848 				 */
5849 				vm_object_lock(object);
5850 
5851 				vm_fault_cleanup(object, top_page);
5852 			}
5853 			vm_object_deallocate(object);
5854 
5855 			if (kr == KERN_INVALID_ADDRESS) {
5856 				kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0 /* arg */);
5857 			}
5858 			goto done;
5859 		}
5860 		vm_object_unlock(retry_object);
5861 
5862 		if ((retry_object != object) || (retry_offset != offset)) {
5863 			vm_map_unlock_read(map);
5864 			if (real_map != map) {
5865 				vm_map_unlock(real_map);
5866 			}
5867 
5868 			if (m != VM_PAGE_NULL) {
5869 				assert(VM_PAGE_OBJECT(m) == m_object);
5870 
5871 				/*
5872 				 * retake the lock so that
5873 				 * we can drop the paging reference
5874 				 * in vm_fault_cleanup and do the
5875 				 * PAGE_WAKEUP_DONE in RELEASE_PAGE
5876 				 */
5877 				vm_object_lock(m_object);
5878 
5879 				RELEASE_PAGE(m);
5880 
5881 				vm_fault_cleanup(m_object, top_page);
5882 			} else {
5883 				/*
5884 				 * retake the lock so that
5885 				 * we can drop the paging reference
5886 				 * in vm_fault_cleanup
5887 				 */
5888 				vm_object_lock(object);
5889 
5890 				vm_fault_cleanup(object, top_page);
5891 			}
5892 			vm_object_deallocate(object);
5893 
5894 			goto RetryFault;
5895 		}
5896 		/*
5897 		 * Check whether the protection has changed or the object
5898 		 * has been copied while we left the map unlocked.
5899 		 */
5900 		if (pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, retry_prot)) {
5901 			/* If the pmap layer cares, pass the full set. */
5902 			prot = retry_prot;
5903 		} else {
5904 			prot &= retry_prot;
5905 		}
5906 	}
5907 
5908 	if (object_locks_dropped == TRUE) {
5909 		if (m != VM_PAGE_NULL) {
5910 			assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p", m, m_object);
5911 			assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
5912 			vm_object_lock(m_object);
5913 
5914 			if (m_object->copy != old_copy_object) {
5915 				/*
5916 				 * The copy object changed while the top-level object
5917 				 * was unlocked, so take away write permission.
5918 				 */
5919 				assert(!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot));
5920 				prot &= ~VM_PROT_WRITE;
5921 			}
5922 		} else {
5923 			vm_object_lock(object);
5924 		}
5925 
5926 		object_locks_dropped = FALSE;
5927 	}
5928 
5929 	if (!need_copy &&
5930 	    !fault_info.no_copy_on_read &&
5931 	    m != VM_PAGE_NULL &&
5932 	    VM_PAGE_OBJECT(m) != object &&
5933 	    !VM_PAGE_OBJECT(m)->pager_trusted &&
5934 	    vm_protect_privileged_from_untrusted &&
5935 	    !VM_PAGE_OBJECT(m)->code_signed &&
5936 	    current_proc_is_privileged()) {
5937 		/*
5938 		 * We found the page we want in an "untrusted" VM object
5939 		 * down the shadow chain.  Since the target is "privileged"
5940 		 * we want to perform a copy-on-read of that page, so that the
5941 		 * mapped object gets a stable copy and does not have to
5942 		 * rely on the "untrusted" object to provide the same
5943 		 * contents if the page gets reclaimed and has to be paged
5944 		 * in again later on.
5945 		 *
5946 		 * Special case: if the mapping is executable and the untrusted
5947 		 * object is code-signed and the process is "cs_enforced", we
5948 		 * do not copy-on-read because that would break code-signing
5949 		 * enforcement expectations (an executable page must belong
5950 		 * to a code-signed object) and we can rely on code-signing
5951 		 * to re-validate the page if it gets evicted and paged back in.
5952 		 */
5953 //		printf("COPY-ON-READ %s:%d map %p vaddr 0x%llx obj %p offset 0x%llx found page %p (obj %p offset 0x%llx) UNTRUSTED -> need copy-on-read\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, object, offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
5954 		vm_copied_on_read++;
5955 		need_copy_on_read = TRUE;
5956 		need_copy = TRUE;
5957 	} else {
5958 		need_copy_on_read = FALSE;
5959 	}
5960 
5961 	/*
5962 	 * If we want to wire down this page, but no longer have
5963 	 * adequate permissions, we must start all over.
5964 	 * If we decided to copy-on-read, we must also start all over.
5965 	 */
5966 	if ((wired && (fault_type != (prot | VM_PROT_WRITE))) ||
5967 	    need_copy_on_read) {
5968 		vm_map_unlock_read(map);
5969 		if (real_map != map) {
5970 			vm_map_unlock(real_map);
5971 		}
5972 
5973 		if (m != VM_PAGE_NULL) {
5974 			assert(VM_PAGE_OBJECT(m) == m_object);
5975 
5976 			RELEASE_PAGE(m);
5977 
5978 			vm_fault_cleanup(m_object, top_page);
5979 		} else {
5980 			vm_fault_cleanup(object, top_page);
5981 		}
5982 
5983 		vm_object_deallocate(object);
5984 
5985 		goto RetryFault;
5986 	}
5987 	if (m != VM_PAGE_NULL) {
5988 		/*
5989 		 * Put this page into the physical map.
5990 		 * We had to do the unlock above because pmap_enter
5991 		 * may cause other faults.  The page may be on
5992 		 * the pageout queues.  If the pageout daemon comes
5993 		 * across the page, it will remove it from the queues.
5994 		 */
5995 		if (fault_page_size < PAGE_SIZE) {
5996 			DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx pa 0x%llx(0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
5997 			assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
5998 			    fault_phys_offset < PAGE_SIZE),
5999 			    "0x%llx\n", (uint64_t)fault_phys_offset);
6000 		} else {
6001 			assertf(fault_phys_offset == 0,
6002 			    "0x%llx\n", (uint64_t)fault_phys_offset);
6003 		}
6004 		assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p", m, m_object);
6005 		assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6006 		if (caller_pmap) {
6007 			kr = vm_fault_enter(m,
6008 			    caller_pmap,
6009 			    caller_pmap_addr,
6010 			    fault_page_size,
6011 			    fault_phys_offset,
6012 			    prot,
6013 			    caller_prot,
6014 			    wired,
6015 			    change_wiring,
6016 			    wire_tag,
6017 			    &fault_info,
6018 			    NULL,
6019 			    &type_of_fault);
6020 		} else {
6021 			kr = vm_fault_enter(m,
6022 			    pmap,
6023 			    vaddr,
6024 			    fault_page_size,
6025 			    fault_phys_offset,
6026 			    prot,
6027 			    caller_prot,
6028 			    wired,
6029 			    change_wiring,
6030 			    wire_tag,
6031 			    &fault_info,
6032 			    NULL,
6033 			    &type_of_fault);
6034 		}
6035 		assert(VM_PAGE_OBJECT(m) == m_object);
6036 
6037 		{
6038 			int     event_code = 0;
6039 
6040 			if (m_object->internal) {
6041 				event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
6042 			} else if (m_object->object_is_shared_cache) {
6043 				event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
6044 			} else {
6045 				event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
6046 			}
6047 
6048 			KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), m->vmp_offset, get_current_unique_pid(), 0);
6049 			KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_SLOW), get_current_unique_pid(), 0, 0, 0, 0);
6050 
6051 			DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag);
6052 		}
6053 		if (kr != KERN_SUCCESS) {
6054 			/* abort this page fault */
6055 			vm_map_unlock_read(map);
6056 			if (real_map != map) {
6057 				vm_map_unlock(real_map);
6058 			}
6059 			PAGE_WAKEUP_DONE(m);
6060 			vm_fault_cleanup(m_object, top_page);
6061 			vm_object_deallocate(object);
6062 			goto done;
6063 		}
6064 		if (physpage_p != NULL) {
6065 			/* for vm_map_wire_and_extract() */
6066 			*physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
6067 			if (prot & VM_PROT_WRITE) {
6068 				vm_object_lock_assert_exclusive(m_object);
6069 				m->vmp_dirty = TRUE;
6070 			}
6071 		}
6072 	} else {
6073 		vm_map_entry_t          entry;
6074 		vm_map_offset_t         laddr;
6075 		vm_map_offset_t         ldelta, hdelta;
6076 
6077 		/*
6078 		 * do a pmap block mapping from the physical address
6079 		 * in the object
6080 		 */
6081 
6082 		if (real_map != map) {
6083 			vm_map_unlock(real_map);
6084 		}
6085 
6086 		if (original_map != map) {
6087 			vm_map_unlock_read(map);
6088 			vm_map_lock_read(original_map);
6089 			map = original_map;
6090 		}
6091 		real_map = map;
6092 
6093 		laddr = vaddr;
6094 		hdelta = ldelta = (vm_map_offset_t)0xFFFFFFFFFFFFF000ULL;
6095 
6096 		while (vm_map_lookup_entry(map, laddr, &entry)) {
6097 			if (ldelta > (laddr - entry->vme_start)) {
6098 				ldelta = laddr - entry->vme_start;
6099 			}
6100 			if (hdelta > (entry->vme_end - laddr)) {
6101 				hdelta = entry->vme_end - laddr;
6102 			}
6103 			if (entry->is_sub_map) {
6104 				laddr = ((laddr - entry->vme_start)
6105 				    + VME_OFFSET(entry));
6106 				vm_map_lock_read(VME_SUBMAP(entry));
6107 
6108 				if (map != real_map) {
6109 					vm_map_unlock_read(map);
6110 				}
6111 				if (entry->use_pmap) {
6112 					vm_map_unlock_read(real_map);
6113 					real_map = VME_SUBMAP(entry);
6114 				}
6115 				map = VME_SUBMAP(entry);
6116 			} else {
6117 				break;
6118 			}
6119 		}
6120 
6121 		if (vm_map_lookup_entry(map, laddr, &entry) &&
6122 		    (VME_OBJECT(entry) != NULL) &&
6123 		    (VME_OBJECT(entry) == object)) {
6124 			uint16_t superpage;
6125 
6126 			if (!object->pager_created &&
6127 			    object->phys_contiguous &&
6128 			    VME_OFFSET(entry) == 0 &&
6129 			    (entry->vme_end - entry->vme_start == object->vo_size) &&
6130 			    VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size - 1))) {
6131 				superpage = VM_MEM_SUPERPAGE;
6132 			} else {
6133 				superpage = 0;
6134 			}
6135 
6136 			if (superpage && physpage_p) {
6137 				/* for vm_map_wire_and_extract() */
6138 				*physpage_p = (ppnum_t)
6139 				    ((((vm_map_offset_t)
6140 				    object->vo_shadow_offset)
6141 				    + VME_OFFSET(entry)
6142 				    + (laddr - entry->vme_start))
6143 				    >> PAGE_SHIFT);
6144 			}
6145 
6146 			if (caller_pmap) {
6147 				/*
6148 				 * Set up a block mapped area
6149 				 */
6150 				assert((uint32_t)((ldelta + hdelta) >> fault_page_shift) == ((ldelta + hdelta) >> fault_page_shift));
6151 				kr = pmap_map_block_addr(caller_pmap,
6152 				    (addr64_t)(caller_pmap_addr - ldelta),
6153 				    (pmap_paddr_t)(((vm_map_offset_t) (VME_OBJECT(entry)->vo_shadow_offset)) +
6154 				    VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta),
6155 				    (uint32_t)((ldelta + hdelta) >> fault_page_shift), prot,
6156 				    (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
6157 
6158 				if (kr != KERN_SUCCESS) {
6159 					goto cleanup;
6160 				}
6161 			} else {
6162 				/*
6163 				 * Set up a block mapped area
6164 				 */
6165 				assert((uint32_t)((ldelta + hdelta) >> fault_page_shift) == ((ldelta + hdelta) >> fault_page_shift));
6166 				kr = pmap_map_block_addr(real_map->pmap,
6167 				    (addr64_t)(vaddr - ldelta),
6168 				    (pmap_paddr_t)(((vm_map_offset_t)(VME_OBJECT(entry)->vo_shadow_offset)) +
6169 				    VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta),
6170 				    (uint32_t)((ldelta + hdelta) >> fault_page_shift), prot,
6171 				    (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
6172 
6173 				if (kr != KERN_SUCCESS) {
6174 					goto cleanup;
6175 				}
6176 			}
6177 		}
6178 	}
6179 
6180 	/*
6181 	 * Success
6182 	 */
6183 	kr = KERN_SUCCESS;
6184 
6185 	/*
6186 	 * TODO: could most of the done cases just use cleanup?
6187 	 */
6188 cleanup:
6189 	/*
6190 	 * Unlock everything, and return
6191 	 */
6192 	vm_map_unlock_read(map);
6193 	if (real_map != map) {
6194 		vm_map_unlock(real_map);
6195 	}
6196 
6197 	if (m != VM_PAGE_NULL) {
6198 		assert(VM_PAGE_OBJECT(m) == m_object);
6199 
6200 		if (!m_object->internal && (fault_type & VM_PROT_WRITE)) {
6201 			vm_object_paging_begin(m_object);
6202 
6203 			assert(written_on_object == VM_OBJECT_NULL);
6204 			written_on_object = m_object;
6205 			written_on_pager = m_object->pager;
6206 			written_on_offset = m_object->paging_offset + m->vmp_offset;
6207 		}
6208 		PAGE_WAKEUP_DONE(m);
6209 
6210 		vm_fault_cleanup(m_object, top_page);
6211 	} else {
6212 		vm_fault_cleanup(object, top_page);
6213 	}
6214 
6215 	vm_object_deallocate(object);
6216 
6217 #undef  RELEASE_PAGE
6218 
6219 done:
6220 	thread_interrupt_level(interruptible_state);
6221 
6222 	if (resilient_media_object != VM_OBJECT_NULL) {
6223 		assert(resilient_media_retry);
6224 		assert(resilient_media_offset != (vm_object_offset_t)-1);
6225 		/* release extra reference on failed object */
6226 //             printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
6227 		vm_object_lock_assert_notheld(resilient_media_object);
6228 		vm_object_deallocate(resilient_media_object);
6229 		resilient_media_object = VM_OBJECT_NULL;
6230 		resilient_media_offset = (vm_object_offset_t)-1;
6231 		resilient_media_retry = false;
6232 		vm_fault_resilient_media_release++;
6233 	}
6234 	assert(!resilient_media_retry);
6235 
6236 	/*
6237 	 * Only I/O throttle on faults which cause a pagein/swapin.
6238 	 */
6239 	if ((type_of_fault == DBG_PAGEIND_FAULT) || (type_of_fault == DBG_PAGEINV_FAULT) || (type_of_fault == DBG_COMPRESSOR_SWAPIN_FAULT)) {
6240 		throttle_lowpri_io(1);
6241 	} else {
6242 		if (kr == KERN_SUCCESS && type_of_fault != DBG_CACHE_HIT_FAULT && type_of_fault != DBG_GUARD_FAULT) {
6243 			if ((throttle_delay = vm_page_throttled(TRUE))) {
6244 				if (vm_debug_events) {
6245 					if (type_of_fault == DBG_COMPRESSOR_FAULT) {
6246 						VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6247 					} else if (type_of_fault == DBG_COW_FAULT) {
6248 						VM_DEBUG_EVENT(vmf_cowdelay, VMF_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6249 					} else {
6250 						VM_DEBUG_EVENT(vmf_zfdelay, VMF_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6251 					}
6252 				}
6253 				delay(throttle_delay);
6254 			}
6255 		}
6256 	}
6257 
6258 	if (written_on_object) {
6259 		vnode_pager_dirtied(written_on_pager, written_on_offset, written_on_offset + PAGE_SIZE_64);
6260 
6261 		vm_object_lock(written_on_object);
6262 		vm_object_paging_end(written_on_object);
6263 		vm_object_unlock(written_on_object);
6264 
6265 		written_on_object = VM_OBJECT_NULL;
6266 	}
6267 
6268 	if (rtfault) {
6269 		vm_record_rtfault(cthread, fstart, trace_vaddr, type_of_fault);
6270 	}
6271 
6272 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6273 	    (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
6274 	    ((uint64_t)trace_vaddr >> 32),
6275 	    trace_vaddr,
6276 	    kr,
6277 	    vm_fault_type_for_tracing(need_copy_on_read, type_of_fault),
6278 	    0);
6279 
6280 	if (fault_page_size < PAGE_SIZE && kr != KERN_SUCCESS) {
6281 		DEBUG4K_FAULT("map %p original %p vaddr 0x%llx -> 0x%x\n", map, original_map, (uint64_t)trace_real_vaddr, kr);
6282 	}
6283 
6284 	return kr;
6285 }
6286 
6287 /*
6288  *	vm_fault_wire:
6289  *
6290  *	Wire down a range of virtual addresses in a map.
6291  */
6292 kern_return_t
vm_fault_wire(vm_map_t map,vm_map_entry_t entry,vm_prot_t prot,vm_tag_t wire_tag,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)6293 vm_fault_wire(
6294 	vm_map_t        map,
6295 	vm_map_entry_t  entry,
6296 	vm_prot_t       prot,
6297 	vm_tag_t        wire_tag,
6298 	pmap_t          pmap,
6299 	vm_map_offset_t pmap_addr,
6300 	ppnum_t         *physpage_p)
6301 {
6302 	vm_map_offset_t va;
6303 	vm_map_offset_t end_addr = entry->vme_end;
6304 	kern_return_t   rc;
6305 	vm_map_size_t   effective_page_size;
6306 
6307 	assert(entry->in_transition);
6308 
6309 	if ((VME_OBJECT(entry) != NULL) &&
6310 	    !entry->is_sub_map &&
6311 	    VME_OBJECT(entry)->phys_contiguous) {
6312 		return KERN_SUCCESS;
6313 	}
6314 
6315 	/*
6316 	 *	Inform the physical mapping system that the
6317 	 *	range of addresses may not fault, so that
6318 	 *	page tables and such can be locked down as well.
6319 	 */
6320 
6321 	pmap_pageable(pmap, pmap_addr,
6322 	    pmap_addr + (end_addr - entry->vme_start), FALSE);
6323 
6324 	/*
6325 	 *	We simulate a fault to get the page and enter it
6326 	 *	in the physical map.
6327 	 */
6328 
6329 	effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6330 	for (va = entry->vme_start;
6331 	    va < end_addr;
6332 	    va += effective_page_size) {
6333 		rc = vm_fault_wire_fast(map, va, prot, wire_tag, entry, pmap,
6334 		    pmap_addr + (va - entry->vme_start),
6335 		    physpage_p);
6336 		if (rc != KERN_SUCCESS) {
6337 			rc = vm_fault_internal(map, va, prot, TRUE, wire_tag,
6338 			    ((pmap == kernel_pmap)
6339 			    ? THREAD_UNINT
6340 			    : THREAD_ABORTSAFE),
6341 			    pmap,
6342 			    (pmap_addr +
6343 			    (va - entry->vme_start)),
6344 			    physpage_p);
6345 			DTRACE_VM2(softlock, int, 1, (uint64_t *), NULL);
6346 		}
6347 
6348 		if (rc != KERN_SUCCESS) {
6349 			struct vm_map_entry     tmp_entry = *entry;
6350 
6351 			/* unwire wired pages */
6352 			tmp_entry.vme_end = va;
6353 			vm_fault_unwire(map,
6354 			    &tmp_entry, FALSE, pmap, pmap_addr);
6355 
6356 			return rc;
6357 		}
6358 	}
6359 	return KERN_SUCCESS;
6360 }
6361 
6362 /*
6363  *	vm_fault_unwire:
6364  *
6365  *	Unwire a range of virtual addresses in a map.
6366  */
6367 void
vm_fault_unwire(vm_map_t map,vm_map_entry_t entry,boolean_t deallocate,pmap_t pmap,vm_map_offset_t pmap_addr)6368 vm_fault_unwire(
6369 	vm_map_t        map,
6370 	vm_map_entry_t  entry,
6371 	boolean_t       deallocate,
6372 	pmap_t          pmap,
6373 	vm_map_offset_t pmap_addr)
6374 {
6375 	vm_map_offset_t va;
6376 	vm_map_offset_t end_addr = entry->vme_end;
6377 	vm_object_t             object;
6378 	struct vm_object_fault_info fault_info = {};
6379 	unsigned int    unwired_pages;
6380 	vm_map_size_t   effective_page_size;
6381 
6382 	object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry);
6383 
6384 	/*
6385 	 * If it's marked phys_contiguous, then vm_fault_wire() didn't actually
6386 	 * do anything since such memory is wired by default.  So we don't have
6387 	 * anything to undo here.
6388 	 */
6389 
6390 	if (object != VM_OBJECT_NULL && object->phys_contiguous) {
6391 		return;
6392 	}
6393 
6394 	fault_info.interruptible = THREAD_UNINT;
6395 	fault_info.behavior = entry->behavior;
6396 	fault_info.user_tag = VME_ALIAS(entry);
6397 	if (entry->iokit_acct ||
6398 	    (!entry->is_sub_map && !entry->use_pmap)) {
6399 		fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
6400 	}
6401 	fault_info.lo_offset = VME_OFFSET(entry);
6402 	fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry);
6403 	fault_info.no_cache = entry->no_cache;
6404 	fault_info.stealth = TRUE;
6405 
6406 	unwired_pages = 0;
6407 
6408 	/*
6409 	 *	Since the pages are wired down, we must be able to
6410 	 *	get their mappings from the physical map system.
6411 	 */
6412 
6413 	effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6414 	for (va = entry->vme_start;
6415 	    va < end_addr;
6416 	    va += effective_page_size) {
6417 		if (object == VM_OBJECT_NULL) {
6418 			if (pmap) {
6419 				pmap_change_wiring(pmap,
6420 				    pmap_addr + (va - entry->vme_start), FALSE);
6421 			}
6422 			(void) vm_fault(map, va, VM_PROT_NONE,
6423 			    TRUE, VM_KERN_MEMORY_NONE, THREAD_UNINT, pmap, pmap_addr);
6424 		} else {
6425 			vm_prot_t       prot;
6426 			vm_page_t       result_page;
6427 			vm_page_t       top_page;
6428 			vm_object_t     result_object;
6429 			vm_fault_return_t result;
6430 
6431 			/* cap cluster size at maximum UPL size */
6432 			upl_size_t cluster_size;
6433 			if (os_sub_overflow(end_addr, va, &cluster_size)) {
6434 				cluster_size = 0 - (upl_size_t)PAGE_SIZE;
6435 			}
6436 			fault_info.cluster_size = cluster_size;
6437 
6438 			do {
6439 				prot = VM_PROT_NONE;
6440 
6441 				vm_object_lock(object);
6442 				vm_object_paging_begin(object);
6443 				result_page = VM_PAGE_NULL;
6444 				result = vm_fault_page(
6445 					object,
6446 					(VME_OFFSET(entry) +
6447 					(va - entry->vme_start)),
6448 					VM_PROT_NONE, TRUE,
6449 					FALSE, /* page not looked up */
6450 					&prot, &result_page, &top_page,
6451 					(int *)0,
6452 					NULL, map->no_zero_fill,
6453 					FALSE, &fault_info);
6454 			} while (result == VM_FAULT_RETRY);
6455 
6456 			/*
6457 			 * If this was a mapping to a file on a device that has been forcibly
6458 			 * unmounted, then we won't get a page back from vm_fault_page().  Just
6459 			 * move on to the next one in case the remaining pages are mapped from
6460 			 * different objects.  During a forced unmount, the object is terminated
6461 			 * so the alive flag will be false if this happens.  A forced unmount will
6462 			 * will occur when an external disk is unplugged before the user does an
6463 			 * eject, so we don't want to panic in that situation.
6464 			 */
6465 
6466 			if (result == VM_FAULT_MEMORY_ERROR && !object->alive) {
6467 				continue;
6468 			}
6469 
6470 			if (result == VM_FAULT_MEMORY_ERROR &&
6471 			    object == kernel_object) {
6472 				/*
6473 				 * This must have been allocated with
6474 				 * KMA_KOBJECT and KMA_VAONLY and there's
6475 				 * no physical page at this offset.
6476 				 * We're done (no page to free).
6477 				 */
6478 				assert(deallocate);
6479 				continue;
6480 			}
6481 
6482 			if (result != VM_FAULT_SUCCESS) {
6483 				panic("vm_fault_unwire: failure");
6484 			}
6485 
6486 			result_object = VM_PAGE_OBJECT(result_page);
6487 
6488 			if (deallocate) {
6489 				assert(VM_PAGE_GET_PHYS_PAGE(result_page) !=
6490 				    vm_page_fictitious_addr);
6491 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(result_page));
6492 				if (VM_PAGE_WIRED(result_page)) {
6493 					unwired_pages++;
6494 				}
6495 				VM_PAGE_FREE(result_page);
6496 			} else {
6497 				if ((pmap) && (VM_PAGE_GET_PHYS_PAGE(result_page) != vm_page_guard_addr)) {
6498 					pmap_change_wiring(pmap,
6499 					    pmap_addr + (va - entry->vme_start), FALSE);
6500 				}
6501 
6502 
6503 				if (VM_PAGE_WIRED(result_page)) {
6504 					vm_page_lockspin_queues();
6505 					vm_page_unwire(result_page, TRUE);
6506 					vm_page_unlock_queues();
6507 					unwired_pages++;
6508 				}
6509 				if (entry->zero_wired_pages) {
6510 					pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(result_page));
6511 					entry->zero_wired_pages = FALSE;
6512 				}
6513 
6514 				PAGE_WAKEUP_DONE(result_page);
6515 			}
6516 			vm_fault_cleanup(result_object, top_page);
6517 		}
6518 	}
6519 
6520 	/*
6521 	 *	Inform the physical mapping system that the range
6522 	 *	of addresses may fault, so that page tables and
6523 	 *	such may be unwired themselves.
6524 	 */
6525 
6526 	pmap_pageable(pmap, pmap_addr,
6527 	    pmap_addr + (end_addr - entry->vme_start), TRUE);
6528 
6529 	if (kernel_object == object) {
6530 		/*
6531 		 * Would like to make user_tag in vm_object_fault_info
6532 		 * vm_tag_t (unsigned short) but user_tag derives its value from
6533 		 * VME_ALIAS(entry) at a few places and VME_ALIAS, in turn, casts
6534 		 * to an _unsigned int_ which is used by non-fault_info paths throughout the
6535 		 * code at many places.
6536 		 *
6537 		 * So, for now, an explicit truncation to unsigned short (vm_tag_t).
6538 		 */
6539 		assertf((fault_info.user_tag & VME_ALIAS_MASK) == fault_info.user_tag,
6540 		    "VM Tag truncated from 0x%x to 0x%x\n", fault_info.user_tag, (fault_info.user_tag & VME_ALIAS_MASK));
6541 		vm_tag_update_size((vm_tag_t) fault_info.user_tag, -ptoa_64(unwired_pages));
6542 	}
6543 }
6544 
6545 /*
6546  *	vm_fault_wire_fast:
6547  *
6548  *	Handle common case of a wire down page fault at the given address.
6549  *	If successful, the page is inserted into the associated physical map.
6550  *	The map entry is passed in to avoid the overhead of a map lookup.
6551  *
6552  *	NOTE: the given address should be truncated to the
6553  *	proper page address.
6554  *
6555  *	KERN_SUCCESS is returned if the page fault is handled; otherwise,
6556  *	a standard error specifying why the fault is fatal is returned.
6557  *
6558  *	The map in question must be referenced, and remains so.
6559  *	Caller has a read lock on the map.
6560  *
6561  *	This is a stripped version of vm_fault() for wiring pages.  Anything
6562  *	other than the common case will return KERN_FAILURE, and the caller
6563  *	is expected to call vm_fault().
6564  */
6565 static kern_return_t
vm_fault_wire_fast(__unused vm_map_t map,vm_map_offset_t va,__unused vm_prot_t caller_prot,vm_tag_t wire_tag,vm_map_entry_t entry,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)6566 vm_fault_wire_fast(
6567 	__unused vm_map_t       map,
6568 	vm_map_offset_t va,
6569 	__unused vm_prot_t       caller_prot,
6570 	vm_tag_t        wire_tag,
6571 	vm_map_entry_t  entry,
6572 	pmap_t          pmap,
6573 	vm_map_offset_t pmap_addr,
6574 	ppnum_t         *physpage_p)
6575 {
6576 	vm_object_t             object;
6577 	vm_object_offset_t      offset;
6578 	vm_page_t               m;
6579 	vm_prot_t               prot;
6580 	thread_t                thread = current_thread();
6581 	int                     type_of_fault;
6582 	kern_return_t           kr;
6583 	vm_map_size_t           fault_page_size;
6584 	vm_map_offset_t         fault_phys_offset;
6585 	struct vm_object_fault_info fault_info = {};
6586 
6587 	counter_inc(&vm_statistics_faults);
6588 
6589 	if (thread != THREAD_NULL) {
6590 		counter_inc(&get_threadtask(thread)->faults);
6591 	}
6592 
6593 /*
6594  *	Recovery actions
6595  */
6596 
6597 #undef  RELEASE_PAGE
6598 #define RELEASE_PAGE(m) {                               \
6599 	PAGE_WAKEUP_DONE(m);                            \
6600 	vm_page_lockspin_queues();                      \
6601 	vm_page_unwire(m, TRUE);                        \
6602 	vm_page_unlock_queues();                        \
6603 }
6604 
6605 
6606 #undef  UNLOCK_THINGS
6607 #define UNLOCK_THINGS   {                               \
6608 	vm_object_paging_end(object);                      \
6609 	vm_object_unlock(object);                          \
6610 }
6611 
6612 #undef  UNLOCK_AND_DEALLOCATE
6613 #define UNLOCK_AND_DEALLOCATE   {                       \
6614 	UNLOCK_THINGS;                                  \
6615 	vm_object_deallocate(object);                   \
6616 }
6617 /*
6618  *	Give up and have caller do things the hard way.
6619  */
6620 
6621 #define GIVE_UP {                                       \
6622 	UNLOCK_AND_DEALLOCATE;                          \
6623 	return(KERN_FAILURE);                           \
6624 }
6625 
6626 
6627 	/*
6628 	 *	If this entry is not directly to a vm_object, bail out.
6629 	 */
6630 	if (entry->is_sub_map) {
6631 		assert(physpage_p == NULL);
6632 		return KERN_FAILURE;
6633 	}
6634 
6635 	/*
6636 	 *	Find the backing store object and offset into it.
6637 	 */
6638 
6639 	object = VME_OBJECT(entry);
6640 	offset = (va - entry->vme_start) + VME_OFFSET(entry);
6641 	prot = entry->protection;
6642 
6643 	/*
6644 	 *	Make a reference to this object to prevent its
6645 	 *	disposal while we are messing with it.
6646 	 */
6647 
6648 	vm_object_lock(object);
6649 	vm_object_reference_locked(object);
6650 	vm_object_paging_begin(object);
6651 
6652 	/*
6653 	 *	INVARIANTS (through entire routine):
6654 	 *
6655 	 *	1)	At all times, we must either have the object
6656 	 *		lock or a busy page in some object to prevent
6657 	 *		some other thread from trying to bring in
6658 	 *		the same page.
6659 	 *
6660 	 *	2)	Once we have a busy page, we must remove it from
6661 	 *		the pageout queues, so that the pageout daemon
6662 	 *		will not grab it away.
6663 	 *
6664 	 */
6665 
6666 	/*
6667 	 *	Look for page in top-level object.  If it's not there or
6668 	 *	there's something going on, give up.
6669 	 */
6670 	m = vm_page_lookup(object, vm_object_trunc_page(offset));
6671 	if ((m == VM_PAGE_NULL) || (m->vmp_busy) ||
6672 	    (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_absent))) {
6673 		GIVE_UP;
6674 	}
6675 	if (m->vmp_fictitious &&
6676 	    VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
6677 		/*
6678 		 * Guard pages are fictitious pages and are never
6679 		 * entered into a pmap, so let's say it's been wired...
6680 		 */
6681 		kr = KERN_SUCCESS;
6682 		goto done;
6683 	}
6684 
6685 	/*
6686 	 *	Wire the page down now.  All bail outs beyond this
6687 	 *	point must unwire the page.
6688 	 */
6689 
6690 	vm_page_lockspin_queues();
6691 	vm_page_wire(m, wire_tag, TRUE);
6692 	vm_page_unlock_queues();
6693 
6694 	/*
6695 	 *	Mark page busy for other threads.
6696 	 */
6697 	assert(!m->vmp_busy);
6698 	m->vmp_busy = TRUE;
6699 	assert(!m->vmp_absent);
6700 
6701 	/*
6702 	 *	Give up if the page is being written and there's a copy object
6703 	 */
6704 	if ((object->copy != VM_OBJECT_NULL) && (prot & VM_PROT_WRITE)) {
6705 		RELEASE_PAGE(m);
6706 		GIVE_UP;
6707 	}
6708 
6709 	fault_info.user_tag = VME_ALIAS(entry);
6710 	fault_info.pmap_options = 0;
6711 	if (entry->iokit_acct ||
6712 	    (!entry->is_sub_map && !entry->use_pmap)) {
6713 		fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
6714 	}
6715 
6716 	fault_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6717 	fault_phys_offset = offset - vm_object_trunc_page(offset);
6718 
6719 	/*
6720 	 *	Put this page into the physical map.
6721 	 */
6722 	type_of_fault = DBG_CACHE_HIT_FAULT;
6723 	assertf(VM_PAGE_OBJECT(m) == object, "m=%p object=%p", m, object);
6724 	assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6725 	kr = vm_fault_enter(m,
6726 	    pmap,
6727 	    pmap_addr,
6728 	    fault_page_size,
6729 	    fault_phys_offset,
6730 	    prot,
6731 	    prot,
6732 	    TRUE,                  /* wired */
6733 	    FALSE,                 /* change_wiring */
6734 	    wire_tag,
6735 	    &fault_info,
6736 	    NULL,
6737 	    &type_of_fault);
6738 	if (kr != KERN_SUCCESS) {
6739 		RELEASE_PAGE(m);
6740 		GIVE_UP;
6741 	}
6742 
6743 done:
6744 	/*
6745 	 *	Unlock everything, and return
6746 	 */
6747 
6748 	if (physpage_p) {
6749 		/* for vm_map_wire_and_extract() */
6750 		if (kr == KERN_SUCCESS) {
6751 			assert(object == VM_PAGE_OBJECT(m));
6752 			*physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
6753 			if (prot & VM_PROT_WRITE) {
6754 				vm_object_lock_assert_exclusive(object);
6755 				m->vmp_dirty = TRUE;
6756 			}
6757 		} else {
6758 			*physpage_p = 0;
6759 		}
6760 	}
6761 
6762 	PAGE_WAKEUP_DONE(m);
6763 	UNLOCK_AND_DEALLOCATE;
6764 
6765 	return kr;
6766 }
6767 
6768 /*
6769  *	Routine:	vm_fault_copy_cleanup
6770  *	Purpose:
6771  *		Release a page used by vm_fault_copy.
6772  */
6773 
6774 static void
vm_fault_copy_cleanup(vm_page_t page,vm_page_t top_page)6775 vm_fault_copy_cleanup(
6776 	vm_page_t       page,
6777 	vm_page_t       top_page)
6778 {
6779 	vm_object_t     object = VM_PAGE_OBJECT(page);
6780 
6781 	vm_object_lock(object);
6782 	PAGE_WAKEUP_DONE(page);
6783 	if (!VM_PAGE_PAGEABLE(page)) {
6784 		vm_page_lockspin_queues();
6785 		if (!VM_PAGE_PAGEABLE(page)) {
6786 			vm_page_activate(page);
6787 		}
6788 		vm_page_unlock_queues();
6789 	}
6790 	vm_fault_cleanup(object, top_page);
6791 }
6792 
6793 static void
vm_fault_copy_dst_cleanup(vm_page_t page)6794 vm_fault_copy_dst_cleanup(
6795 	vm_page_t       page)
6796 {
6797 	vm_object_t     object;
6798 
6799 	if (page != VM_PAGE_NULL) {
6800 		object = VM_PAGE_OBJECT(page);
6801 		vm_object_lock(object);
6802 		vm_page_lockspin_queues();
6803 		vm_page_unwire(page, TRUE);
6804 		vm_page_unlock_queues();
6805 		vm_object_paging_end(object);
6806 		vm_object_unlock(object);
6807 	}
6808 }
6809 
6810 /*
6811  *	Routine:	vm_fault_copy
6812  *
6813  *	Purpose:
6814  *		Copy pages from one virtual memory object to another --
6815  *		neither the source nor destination pages need be resident.
6816  *
6817  *		Before actually copying a page, the version associated with
6818  *		the destination address map wil be verified.
6819  *
6820  *	In/out conditions:
6821  *		The caller must hold a reference, but not a lock, to
6822  *		each of the source and destination objects and to the
6823  *		destination map.
6824  *
6825  *	Results:
6826  *		Returns KERN_SUCCESS if no errors were encountered in
6827  *		reading or writing the data.  Returns KERN_INTERRUPTED if
6828  *		the operation was interrupted (only possible if the
6829  *		"interruptible" argument is asserted).  Other return values
6830  *		indicate a permanent error in copying the data.
6831  *
6832  *		The actual amount of data copied will be returned in the
6833  *		"copy_size" argument.  In the event that the destination map
6834  *		verification failed, this amount may be less than the amount
6835  *		requested.
6836  */
6837 kern_return_t
vm_fault_copy(vm_object_t src_object,vm_object_offset_t src_offset,vm_map_size_t * copy_size,vm_object_t dst_object,vm_object_offset_t dst_offset,vm_map_t dst_map,vm_map_version_t * dst_version,int interruptible)6838 vm_fault_copy(
6839 	vm_object_t             src_object,
6840 	vm_object_offset_t      src_offset,
6841 	vm_map_size_t           *copy_size,             /* INOUT */
6842 	vm_object_t             dst_object,
6843 	vm_object_offset_t      dst_offset,
6844 	vm_map_t                dst_map,
6845 	vm_map_version_t         *dst_version,
6846 	int                     interruptible)
6847 {
6848 	vm_page_t               result_page;
6849 
6850 	vm_page_t               src_page;
6851 	vm_page_t               src_top_page;
6852 	vm_prot_t               src_prot;
6853 
6854 	vm_page_t               dst_page;
6855 	vm_page_t               dst_top_page;
6856 	vm_prot_t               dst_prot;
6857 
6858 	vm_map_size_t           amount_left;
6859 	vm_object_t             old_copy_object;
6860 	vm_object_t             result_page_object = NULL;
6861 	kern_return_t           error = 0;
6862 	vm_fault_return_t       result;
6863 
6864 	vm_map_size_t           part_size;
6865 	struct vm_object_fault_info fault_info_src = {};
6866 	struct vm_object_fault_info fault_info_dst = {};
6867 
6868 	/*
6869 	 * In order not to confuse the clustered pageins, align
6870 	 * the different offsets on a page boundary.
6871 	 */
6872 
6873 #define RETURN(x)                                       \
6874 	MACRO_BEGIN                                     \
6875 	*copy_size -= amount_left;                      \
6876 	MACRO_RETURN(x);                                \
6877 	MACRO_END
6878 
6879 	amount_left = *copy_size;
6880 
6881 	fault_info_src.interruptible = interruptible;
6882 	fault_info_src.behavior = VM_BEHAVIOR_SEQUENTIAL;
6883 	fault_info_src.lo_offset = vm_object_trunc_page(src_offset);
6884 	fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left;
6885 	fault_info_src.stealth = TRUE;
6886 
6887 	fault_info_dst.interruptible = interruptible;
6888 	fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL;
6889 	fault_info_dst.lo_offset = vm_object_trunc_page(dst_offset);
6890 	fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left;
6891 	fault_info_dst.stealth = TRUE;
6892 
6893 	do { /* while (amount_left > 0) */
6894 		/*
6895 		 * There may be a deadlock if both source and destination
6896 		 * pages are the same. To avoid this deadlock, the copy must
6897 		 * start by getting the destination page in order to apply
6898 		 * COW semantics if any.
6899 		 */
6900 
6901 RetryDestinationFault:;
6902 
6903 		dst_prot = VM_PROT_WRITE | VM_PROT_READ;
6904 
6905 		vm_object_lock(dst_object);
6906 		vm_object_paging_begin(dst_object);
6907 
6908 		/* cap cluster size at maximum UPL size */
6909 		upl_size_t cluster_size;
6910 		if (os_convert_overflow(amount_left, &cluster_size)) {
6911 			cluster_size = 0 - (upl_size_t)PAGE_SIZE;
6912 		}
6913 		fault_info_dst.cluster_size = cluster_size;
6914 
6915 		dst_page = VM_PAGE_NULL;
6916 		result = vm_fault_page(dst_object,
6917 		    vm_object_trunc_page(dst_offset),
6918 		    VM_PROT_WRITE | VM_PROT_READ,
6919 		    FALSE,
6920 		    FALSE,                    /* page not looked up */
6921 		    &dst_prot, &dst_page, &dst_top_page,
6922 		    (int *)0,
6923 		    &error,
6924 		    dst_map->no_zero_fill,
6925 		    FALSE, &fault_info_dst);
6926 		switch (result) {
6927 		case VM_FAULT_SUCCESS:
6928 			break;
6929 		case VM_FAULT_RETRY:
6930 			goto RetryDestinationFault;
6931 		case VM_FAULT_MEMORY_SHORTAGE:
6932 			if (vm_page_wait(interruptible)) {
6933 				goto RetryDestinationFault;
6934 			}
6935 			OS_FALLTHROUGH;
6936 		case VM_FAULT_INTERRUPTED:
6937 			RETURN(MACH_SEND_INTERRUPTED);
6938 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
6939 			/* success but no VM page: fail the copy */
6940 			vm_object_paging_end(dst_object);
6941 			vm_object_unlock(dst_object);
6942 			OS_FALLTHROUGH;
6943 		case VM_FAULT_MEMORY_ERROR:
6944 			if (error) {
6945 				return error;
6946 			} else {
6947 				return KERN_MEMORY_ERROR;
6948 			}
6949 		default:
6950 			panic("vm_fault_copy: unexpected error 0x%x from "
6951 			    "vm_fault_page()\n", result);
6952 		}
6953 		assert((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE);
6954 
6955 		assert(dst_object == VM_PAGE_OBJECT(dst_page));
6956 		old_copy_object = dst_object->copy;
6957 
6958 		/*
6959 		 * There exists the possiblity that the source and
6960 		 * destination page are the same.  But we can't
6961 		 * easily determine that now.  If they are the
6962 		 * same, the call to vm_fault_page() for the
6963 		 * destination page will deadlock.  To prevent this we
6964 		 * wire the page so we can drop busy without having
6965 		 * the page daemon steal the page.  We clean up the
6966 		 * top page  but keep the paging reference on the object
6967 		 * holding the dest page so it doesn't go away.
6968 		 */
6969 
6970 		vm_page_lockspin_queues();
6971 		vm_page_wire(dst_page, VM_KERN_MEMORY_OSFMK, TRUE);
6972 		vm_page_unlock_queues();
6973 		PAGE_WAKEUP_DONE(dst_page);
6974 		vm_object_unlock(dst_object);
6975 
6976 		if (dst_top_page != VM_PAGE_NULL) {
6977 			vm_object_lock(dst_object);
6978 			VM_PAGE_FREE(dst_top_page);
6979 			vm_object_paging_end(dst_object);
6980 			vm_object_unlock(dst_object);
6981 		}
6982 
6983 RetrySourceFault:;
6984 
6985 		if (src_object == VM_OBJECT_NULL) {
6986 			/*
6987 			 *	No source object.  We will just
6988 			 *	zero-fill the page in dst_object.
6989 			 */
6990 			src_page = VM_PAGE_NULL;
6991 			result_page = VM_PAGE_NULL;
6992 		} else {
6993 			vm_object_lock(src_object);
6994 			src_page = vm_page_lookup(src_object,
6995 			    vm_object_trunc_page(src_offset));
6996 			if (src_page == dst_page) {
6997 				src_prot = dst_prot;
6998 				result_page = VM_PAGE_NULL;
6999 			} else {
7000 				src_prot = VM_PROT_READ;
7001 				vm_object_paging_begin(src_object);
7002 
7003 				/* cap cluster size at maximum UPL size */
7004 				if (os_convert_overflow(amount_left, &cluster_size)) {
7005 					cluster_size = 0 - (upl_size_t)PAGE_SIZE;
7006 				}
7007 				fault_info_src.cluster_size = cluster_size;
7008 
7009 				result_page = VM_PAGE_NULL;
7010 				result = vm_fault_page(
7011 					src_object,
7012 					vm_object_trunc_page(src_offset),
7013 					VM_PROT_READ, FALSE,
7014 					FALSE, /* page not looked up */
7015 					&src_prot,
7016 					&result_page, &src_top_page,
7017 					(int *)0, &error, FALSE,
7018 					FALSE, &fault_info_src);
7019 
7020 				switch (result) {
7021 				case VM_FAULT_SUCCESS:
7022 					break;
7023 				case VM_FAULT_RETRY:
7024 					goto RetrySourceFault;
7025 				case VM_FAULT_MEMORY_SHORTAGE:
7026 					if (vm_page_wait(interruptible)) {
7027 						goto RetrySourceFault;
7028 					}
7029 					OS_FALLTHROUGH;
7030 				case VM_FAULT_INTERRUPTED:
7031 					vm_fault_copy_dst_cleanup(dst_page);
7032 					RETURN(MACH_SEND_INTERRUPTED);
7033 				case VM_FAULT_SUCCESS_NO_VM_PAGE:
7034 					/* success but no VM page: fail */
7035 					vm_object_paging_end(src_object);
7036 					vm_object_unlock(src_object);
7037 					OS_FALLTHROUGH;
7038 				case VM_FAULT_MEMORY_ERROR:
7039 					vm_fault_copy_dst_cleanup(dst_page);
7040 					if (error) {
7041 						return error;
7042 					} else {
7043 						return KERN_MEMORY_ERROR;
7044 					}
7045 				default:
7046 					panic("vm_fault_copy(2): unexpected "
7047 					    "error 0x%x from "
7048 					    "vm_fault_page()\n", result);
7049 				}
7050 
7051 				result_page_object = VM_PAGE_OBJECT(result_page);
7052 				assert((src_top_page == VM_PAGE_NULL) ==
7053 				    (result_page_object == src_object));
7054 			}
7055 			assert((src_prot & VM_PROT_READ) != VM_PROT_NONE);
7056 			vm_object_unlock(result_page_object);
7057 		}
7058 
7059 		vm_map_lock_read(dst_map);
7060 
7061 		if (!vm_map_verify(dst_map, dst_version)) {
7062 			vm_map_unlock_read(dst_map);
7063 			if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7064 				vm_fault_copy_cleanup(result_page, src_top_page);
7065 			}
7066 			vm_fault_copy_dst_cleanup(dst_page);
7067 			break;
7068 		}
7069 		assert(dst_object == VM_PAGE_OBJECT(dst_page));
7070 
7071 		vm_object_lock(dst_object);
7072 
7073 		if (dst_object->copy != old_copy_object) {
7074 			vm_object_unlock(dst_object);
7075 			vm_map_unlock_read(dst_map);
7076 			if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7077 				vm_fault_copy_cleanup(result_page, src_top_page);
7078 			}
7079 			vm_fault_copy_dst_cleanup(dst_page);
7080 			break;
7081 		}
7082 		vm_object_unlock(dst_object);
7083 
7084 		/*
7085 		 *	Copy the page, and note that it is dirty
7086 		 *	immediately.
7087 		 */
7088 
7089 		if (!page_aligned(src_offset) ||
7090 		    !page_aligned(dst_offset) ||
7091 		    !page_aligned(amount_left)) {
7092 			vm_object_offset_t      src_po,
7093 			    dst_po;
7094 
7095 			src_po = src_offset - vm_object_trunc_page(src_offset);
7096 			dst_po = dst_offset - vm_object_trunc_page(dst_offset);
7097 
7098 			if (dst_po > src_po) {
7099 				part_size = PAGE_SIZE - dst_po;
7100 			} else {
7101 				part_size = PAGE_SIZE - src_po;
7102 			}
7103 			if (part_size > (amount_left)) {
7104 				part_size = amount_left;
7105 			}
7106 
7107 			if (result_page == VM_PAGE_NULL) {
7108 				assert((vm_offset_t) dst_po == dst_po);
7109 				assert((vm_size_t) part_size == part_size);
7110 				vm_page_part_zero_fill(dst_page,
7111 				    (vm_offset_t) dst_po,
7112 				    (vm_size_t) part_size);
7113 			} else {
7114 				assert((vm_offset_t) src_po == src_po);
7115 				assert((vm_offset_t) dst_po == dst_po);
7116 				assert((vm_size_t) part_size == part_size);
7117 				vm_page_part_copy(result_page,
7118 				    (vm_offset_t) src_po,
7119 				    dst_page,
7120 				    (vm_offset_t) dst_po,
7121 				    (vm_size_t)part_size);
7122 				if (!dst_page->vmp_dirty) {
7123 					vm_object_lock(dst_object);
7124 					SET_PAGE_DIRTY(dst_page, TRUE);
7125 					vm_object_unlock(dst_object);
7126 				}
7127 			}
7128 		} else {
7129 			part_size = PAGE_SIZE;
7130 
7131 			if (result_page == VM_PAGE_NULL) {
7132 				vm_page_zero_fill(dst_page);
7133 			} else {
7134 				vm_object_lock(result_page_object);
7135 				vm_page_copy(result_page, dst_page);
7136 				vm_object_unlock(result_page_object);
7137 
7138 				if (!dst_page->vmp_dirty) {
7139 					vm_object_lock(dst_object);
7140 					SET_PAGE_DIRTY(dst_page, TRUE);
7141 					vm_object_unlock(dst_object);
7142 				}
7143 			}
7144 		}
7145 
7146 		/*
7147 		 *	Unlock everything, and return
7148 		 */
7149 
7150 		vm_map_unlock_read(dst_map);
7151 
7152 		if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7153 			vm_fault_copy_cleanup(result_page, src_top_page);
7154 		}
7155 		vm_fault_copy_dst_cleanup(dst_page);
7156 
7157 		amount_left -= part_size;
7158 		src_offset += part_size;
7159 		dst_offset += part_size;
7160 	} while (amount_left > 0);
7161 
7162 	RETURN(KERN_SUCCESS);
7163 #undef  RETURN
7164 
7165 	/*NOTREACHED*/
7166 }
7167 
7168 #if     VM_FAULT_CLASSIFY
7169 /*
7170  *	Temporary statistics gathering support.
7171  */
7172 
7173 /*
7174  *	Statistics arrays:
7175  */
7176 #define VM_FAULT_TYPES_MAX      5
7177 #define VM_FAULT_LEVEL_MAX      8
7178 
7179 int     vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX];
7180 
7181 #define VM_FAULT_TYPE_ZERO_FILL 0
7182 #define VM_FAULT_TYPE_MAP_IN    1
7183 #define VM_FAULT_TYPE_PAGER     2
7184 #define VM_FAULT_TYPE_COPY      3
7185 #define VM_FAULT_TYPE_OTHER     4
7186 
7187 
7188 void
vm_fault_classify(vm_object_t object,vm_object_offset_t offset,vm_prot_t fault_type)7189 vm_fault_classify(vm_object_t           object,
7190     vm_object_offset_t    offset,
7191     vm_prot_t             fault_type)
7192 {
7193 	int             type, level = 0;
7194 	vm_page_t       m;
7195 
7196 	while (TRUE) {
7197 		m = vm_page_lookup(object, offset);
7198 		if (m != VM_PAGE_NULL) {
7199 			if (m->vmp_busy || m->vmp_error || m->vmp_restart || m->vmp_absent) {
7200 				type = VM_FAULT_TYPE_OTHER;
7201 				break;
7202 			}
7203 			if (((fault_type & VM_PROT_WRITE) == 0) ||
7204 			    ((level == 0) && object->copy == VM_OBJECT_NULL)) {
7205 				type = VM_FAULT_TYPE_MAP_IN;
7206 				break;
7207 			}
7208 			type = VM_FAULT_TYPE_COPY;
7209 			break;
7210 		} else {
7211 			if (object->pager_created) {
7212 				type = VM_FAULT_TYPE_PAGER;
7213 				break;
7214 			}
7215 			if (object->shadow == VM_OBJECT_NULL) {
7216 				type = VM_FAULT_TYPE_ZERO_FILL;
7217 				break;
7218 			}
7219 
7220 			offset += object->vo_shadow_offset;
7221 			object = object->shadow;
7222 			level++;
7223 			continue;
7224 		}
7225 	}
7226 
7227 	if (level > VM_FAULT_LEVEL_MAX) {
7228 		level = VM_FAULT_LEVEL_MAX;
7229 	}
7230 
7231 	vm_fault_stats[type][level] += 1;
7232 
7233 	return;
7234 }
7235 
7236 /* cleanup routine to call from debugger */
7237 
7238 void
vm_fault_classify_init(void)7239 vm_fault_classify_init(void)
7240 {
7241 	int type, level;
7242 
7243 	for (type = 0; type < VM_FAULT_TYPES_MAX; type++) {
7244 		for (level = 0; level < VM_FAULT_LEVEL_MAX; level++) {
7245 			vm_fault_stats[type][level] = 0;
7246 		}
7247 	}
7248 
7249 	return;
7250 }
7251 #endif  /* VM_FAULT_CLASSIFY */
7252 
7253 vm_offset_t
kdp_lightweight_fault(vm_map_t map,vm_offset_t cur_target_addr)7254 kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr)
7255 {
7256 	vm_map_entry_t  entry;
7257 	vm_object_t     object;
7258 	vm_offset_t     object_offset;
7259 	vm_page_t       m;
7260 	int             compressor_external_state, compressed_count_delta;
7261 	int             compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP);
7262 	int             my_fault_type = VM_PROT_READ;
7263 	kern_return_t   kr;
7264 	int effective_page_mask, effective_page_size;
7265 
7266 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
7267 		effective_page_mask = VM_MAP_PAGE_MASK(map);
7268 		effective_page_size = VM_MAP_PAGE_SIZE(map);
7269 	} else {
7270 		effective_page_mask = PAGE_MASK;
7271 		effective_page_size = PAGE_SIZE;
7272 	}
7273 
7274 	if (not_in_kdp) {
7275 		panic("kdp_lightweight_fault called from outside of debugger context");
7276 	}
7277 
7278 	assert(map != VM_MAP_NULL);
7279 
7280 	assert((cur_target_addr & effective_page_mask) == 0);
7281 	if ((cur_target_addr & effective_page_mask) != 0) {
7282 		return 0;
7283 	}
7284 
7285 	if (kdp_lck_rw_lock_is_acquired_exclusive(&map->lock)) {
7286 		return 0;
7287 	}
7288 
7289 	if (!vm_map_lookup_entry(map, cur_target_addr, &entry)) {
7290 		return 0;
7291 	}
7292 
7293 	if (entry->is_sub_map) {
7294 		return 0;
7295 	}
7296 
7297 	object = VME_OBJECT(entry);
7298 	if (object == VM_OBJECT_NULL) {
7299 		return 0;
7300 	}
7301 
7302 	object_offset = cur_target_addr - entry->vme_start + VME_OFFSET(entry);
7303 
7304 	while (TRUE) {
7305 		if (kdp_lck_rw_lock_is_acquired_exclusive(&object->Lock)) {
7306 			return 0;
7307 		}
7308 
7309 		if (object->pager_created && (object->paging_in_progress ||
7310 		    object->activity_in_progress)) {
7311 			return 0;
7312 		}
7313 
7314 		m = kdp_vm_page_lookup(object, vm_object_trunc_page(object_offset));
7315 
7316 		if (m != VM_PAGE_NULL) {
7317 			if ((object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
7318 				return 0;
7319 			}
7320 
7321 			if (m->vmp_laundry || m->vmp_busy || m->vmp_free_when_done || m->vmp_absent || m->vmp_error || m->vmp_cleaning ||
7322 			    m->vmp_overwriting || m->vmp_restart || m->vmp_unusual) {
7323 				return 0;
7324 			}
7325 
7326 			assert(!m->vmp_private);
7327 			if (m->vmp_private) {
7328 				return 0;
7329 			}
7330 
7331 			assert(!m->vmp_fictitious);
7332 			if (m->vmp_fictitious) {
7333 				return 0;
7334 			}
7335 
7336 			assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
7337 			if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
7338 				return 0;
7339 			}
7340 
7341 			return ptoa(VM_PAGE_GET_PHYS_PAGE(m));
7342 		}
7343 
7344 		compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
7345 
7346 		if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) {
7347 			if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) {
7348 				kr = vm_compressor_pager_get(object->pager,
7349 				    vm_object_trunc_page(object_offset + object->paging_offset),
7350 				    kdp_compressor_decompressed_page_ppnum, &my_fault_type,
7351 				    compressor_flags, &compressed_count_delta);
7352 				if (kr == KERN_SUCCESS) {
7353 					return kdp_compressor_decompressed_page_paddr;
7354 				} else {
7355 					return 0;
7356 				}
7357 			}
7358 		}
7359 
7360 		if (object->shadow == VM_OBJECT_NULL) {
7361 			return 0;
7362 		}
7363 
7364 		object_offset += object->vo_shadow_offset;
7365 		object = object->shadow;
7366 	}
7367 }
7368 
7369 /*
7370  * vm_page_validate_cs_fast():
7371  * Performs a few quick checks to determine if the page's code signature
7372  * really needs to be fully validated.  It could:
7373  *	1. have been modified (i.e. automatically tainted),
7374  *	2. have already been validated,
7375  *	3. have already been found to be tainted,
7376  *	4. no longer have a backing store.
7377  * Returns FALSE if the page needs to be fully validated.
7378  */
7379 static boolean_t
vm_page_validate_cs_fast(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)7380 vm_page_validate_cs_fast(
7381 	vm_page_t       page,
7382 	vm_map_size_t   fault_page_size,
7383 	vm_map_offset_t fault_phys_offset)
7384 {
7385 	vm_object_t     object;
7386 
7387 	object = VM_PAGE_OBJECT(page);
7388 	vm_object_lock_assert_held(object);
7389 
7390 	if (page->vmp_wpmapped &&
7391 	    !VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
7392 		/*
7393 		 * This page was mapped for "write" access sometime in the
7394 		 * past and could still be modifiable in the future.
7395 		 * Consider it tainted.
7396 		 * [ If the page was already found to be "tainted", no
7397 		 * need to re-validate. ]
7398 		 */
7399 		vm_object_lock_assert_exclusive(object);
7400 		VMP_CS_SET_VALIDATED(page, fault_page_size, fault_phys_offset, TRUE);
7401 		VMP_CS_SET_TAINTED(page, fault_page_size, fault_phys_offset, TRUE);
7402 		if (cs_debug) {
7403 			printf("CODESIGNING: %s: "
7404 			    "page %p obj %p off 0x%llx "
7405 			    "was modified\n",
7406 			    __FUNCTION__,
7407 			    page, object, page->vmp_offset);
7408 		}
7409 		vm_cs_validated_dirtied++;
7410 	}
7411 
7412 	if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) ||
7413 	    VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
7414 		return TRUE;
7415 	}
7416 	vm_object_lock_assert_exclusive(object);
7417 
7418 #if CHECK_CS_VALIDATION_BITMAP
7419 	kern_return_t kr;
7420 
7421 	kr = vnode_pager_cs_check_validation_bitmap(
7422 		object->pager,
7423 		page->vmp_offset + object->paging_offset,
7424 		CS_BITMAP_CHECK);
7425 	if (kr == KERN_SUCCESS) {
7426 		page->vmp_cs_validated = VMP_CS_ALL_TRUE;
7427 		page->vmp_cs_tainted = VMP_CS_ALL_FALSE;
7428 		vm_cs_bitmap_validated++;
7429 		return TRUE;
7430 	}
7431 #endif /* CHECK_CS_VALIDATION_BITMAP */
7432 
7433 	if (!object->alive || object->terminating || object->pager == NULL) {
7434 		/*
7435 		 * The object is terminating and we don't have its pager
7436 		 * so we can't validate the data...
7437 		 */
7438 		return TRUE;
7439 	}
7440 
7441 	/* we need to really validate this page */
7442 	vm_object_lock_assert_exclusive(object);
7443 	return FALSE;
7444 }
7445 
7446 void
vm_page_validate_cs_mapped_slow(vm_page_t page,const void * kaddr)7447 vm_page_validate_cs_mapped_slow(
7448 	vm_page_t       page,
7449 	const void      *kaddr)
7450 {
7451 	vm_object_t             object;
7452 	memory_object_offset_t  mo_offset;
7453 	memory_object_t         pager;
7454 	struct vnode            *vnode;
7455 	int                     validated, tainted, nx;
7456 
7457 	assert(page->vmp_busy);
7458 	object = VM_PAGE_OBJECT(page);
7459 	vm_object_lock_assert_exclusive(object);
7460 
7461 	vm_cs_validates++;
7462 
7463 	/*
7464 	 * Since we get here to validate a page that was brought in by
7465 	 * the pager, we know that this pager is all setup and ready
7466 	 * by now.
7467 	 */
7468 	assert(object->code_signed);
7469 	assert(!object->internal);
7470 	assert(object->pager != NULL);
7471 	assert(object->pager_ready);
7472 
7473 	pager = object->pager;
7474 	assert(object->paging_in_progress);
7475 	vnode = vnode_pager_lookup_vnode(pager);
7476 	mo_offset = page->vmp_offset + object->paging_offset;
7477 
7478 	/* verify the SHA1 hash for this page */
7479 	validated = 0;
7480 	tainted = 0;
7481 	nx = 0;
7482 	cs_validate_page(vnode,
7483 	    pager,
7484 	    mo_offset,
7485 	    (const void *)((const char *)kaddr),
7486 	    &validated,
7487 	    &tainted,
7488 	    &nx);
7489 
7490 	page->vmp_cs_validated |= validated;
7491 	page->vmp_cs_tainted |= tainted;
7492 	page->vmp_cs_nx |= nx;
7493 
7494 #if CHECK_CS_VALIDATION_BITMAP
7495 	if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
7496 	    page->vmp_cs_tainted == VMP_CS_ALL_FALSE) {
7497 		vnode_pager_cs_check_validation_bitmap(object->pager,
7498 		    mo_offset,
7499 		    CS_BITMAP_SET);
7500 	}
7501 #endif /* CHECK_CS_VALIDATION_BITMAP */
7502 }
7503 
7504 void
vm_page_validate_cs_mapped(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,const void * kaddr)7505 vm_page_validate_cs_mapped(
7506 	vm_page_t       page,
7507 	vm_map_size_t   fault_page_size,
7508 	vm_map_offset_t fault_phys_offset,
7509 	const void      *kaddr)
7510 {
7511 	if (!vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
7512 		vm_page_validate_cs_mapped_slow(page, kaddr);
7513 	}
7514 }
7515 
7516 static void
vm_page_map_and_validate_cs(vm_object_t object,vm_page_t page)7517 vm_page_map_and_validate_cs(
7518 	vm_object_t     object,
7519 	vm_page_t       page)
7520 {
7521 	vm_object_offset_t      offset;
7522 	vm_map_offset_t         koffset;
7523 	vm_map_size_t           ksize;
7524 	vm_offset_t             kaddr;
7525 	kern_return_t           kr;
7526 	boolean_t               busy_page;
7527 	boolean_t               need_unmap;
7528 
7529 	vm_object_lock_assert_exclusive(object);
7530 
7531 	assert(object->code_signed);
7532 	offset = page->vmp_offset;
7533 
7534 	busy_page = page->vmp_busy;
7535 	if (!busy_page) {
7536 		/* keep page busy while we map (and unlock) the VM object */
7537 		page->vmp_busy = TRUE;
7538 	}
7539 
7540 	/*
7541 	 * Take a paging reference on the VM object
7542 	 * to protect it from collapse or bypass,
7543 	 * and keep it from disappearing too.
7544 	 */
7545 	vm_object_paging_begin(object);
7546 
7547 	/* map the page in the kernel address space */
7548 	ksize = PAGE_SIZE_64;
7549 	koffset = 0;
7550 	need_unmap = FALSE;
7551 	kr = vm_paging_map_object(page,
7552 	    object,
7553 	    offset,
7554 	    VM_PROT_READ,
7555 	    FALSE,                       /* can't unlock object ! */
7556 	    &ksize,
7557 	    &koffset,
7558 	    &need_unmap);
7559 	if (kr != KERN_SUCCESS) {
7560 		panic("%s: could not map page: 0x%x", __FUNCTION__, kr);
7561 	}
7562 	kaddr = CAST_DOWN(vm_offset_t, koffset);
7563 
7564 	/* validate the mapped page */
7565 	vm_page_validate_cs_mapped_slow(page, (const void *) kaddr);
7566 
7567 	assert(page->vmp_busy);
7568 	assert(object == VM_PAGE_OBJECT(page));
7569 	vm_object_lock_assert_exclusive(object);
7570 
7571 	if (!busy_page) {
7572 		PAGE_WAKEUP_DONE(page);
7573 	}
7574 	if (need_unmap) {
7575 		/* unmap the map from the kernel address space */
7576 		vm_paging_unmap_object(object, koffset, koffset + ksize);
7577 		koffset = 0;
7578 		ksize = 0;
7579 		kaddr = 0;
7580 	}
7581 	vm_object_paging_end(object);
7582 }
7583 
7584 void
vm_page_validate_cs(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)7585 vm_page_validate_cs(
7586 	vm_page_t       page,
7587 	vm_map_size_t   fault_page_size,
7588 	vm_map_offset_t fault_phys_offset)
7589 {
7590 	vm_object_t             object;
7591 
7592 	object = VM_PAGE_OBJECT(page);
7593 	vm_object_lock_assert_held(object);
7594 
7595 	if (vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
7596 		return;
7597 	}
7598 	vm_page_map_and_validate_cs(object, page);
7599 }
7600 
7601 void
vm_page_validate_cs_mapped_chunk(vm_page_t page,const void * kaddr,vm_offset_t chunk_offset,vm_size_t chunk_size,boolean_t * validated_p,unsigned * tainted_p)7602 vm_page_validate_cs_mapped_chunk(
7603 	vm_page_t       page,
7604 	const void      *kaddr,
7605 	vm_offset_t     chunk_offset,
7606 	vm_size_t       chunk_size,
7607 	boolean_t       *validated_p,
7608 	unsigned        *tainted_p)
7609 {
7610 	vm_object_t             object;
7611 	vm_object_offset_t      offset, offset_in_page;
7612 	memory_object_t         pager;
7613 	struct vnode            *vnode;
7614 	boolean_t               validated;
7615 	unsigned                tainted;
7616 
7617 	*validated_p = FALSE;
7618 	*tainted_p = 0;
7619 
7620 	assert(page->vmp_busy);
7621 	object = VM_PAGE_OBJECT(page);
7622 	vm_object_lock_assert_exclusive(object);
7623 
7624 	assert(object->code_signed);
7625 	offset = page->vmp_offset;
7626 
7627 	if (!object->alive || object->terminating || object->pager == NULL) {
7628 		/*
7629 		 * The object is terminating and we don't have its pager
7630 		 * so we can't validate the data...
7631 		 */
7632 		return;
7633 	}
7634 	/*
7635 	 * Since we get here to validate a page that was brought in by
7636 	 * the pager, we know that this pager is all setup and ready
7637 	 * by now.
7638 	 */
7639 	assert(!object->internal);
7640 	assert(object->pager != NULL);
7641 	assert(object->pager_ready);
7642 
7643 	pager = object->pager;
7644 	assert(object->paging_in_progress);
7645 	vnode = vnode_pager_lookup_vnode(pager);
7646 
7647 	/* verify the signature for this chunk */
7648 	offset_in_page = chunk_offset;
7649 	assert(offset_in_page < PAGE_SIZE);
7650 
7651 	tainted = 0;
7652 	validated = cs_validate_range(vnode,
7653 	    pager,
7654 	    (object->paging_offset +
7655 	    offset +
7656 	    offset_in_page),
7657 	    (const void *)((const char *)kaddr
7658 	    + offset_in_page),
7659 	    chunk_size,
7660 	    &tainted);
7661 	if (validated) {
7662 		*validated_p = TRUE;
7663 	}
7664 	if (tainted) {
7665 		*tainted_p = tainted;
7666 	}
7667 }
7668 
7669 static void
vm_rtfrecord_lock(void)7670 vm_rtfrecord_lock(void)
7671 {
7672 	lck_spin_lock(&vm_rtfr_slock);
7673 }
7674 
7675 static void
vm_rtfrecord_unlock(void)7676 vm_rtfrecord_unlock(void)
7677 {
7678 	lck_spin_unlock(&vm_rtfr_slock);
7679 }
7680 
7681 unsigned int
vmrtfaultinfo_bufsz(void)7682 vmrtfaultinfo_bufsz(void)
7683 {
7684 	return vmrtf_num_records * sizeof(vm_rtfault_record_t);
7685 }
7686 
7687 #include <kern/backtrace.h>
7688 
7689 __attribute__((noinline))
7690 static void
vm_record_rtfault(thread_t cthread,uint64_t fstart,vm_map_offset_t fault_vaddr,int type_of_fault)7691 vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr, int type_of_fault)
7692 {
7693 	uint64_t fend = mach_continuous_time();
7694 
7695 	uint64_t cfpc = 0;
7696 	uint64_t ctid = cthread->thread_id;
7697 	uint64_t cupid = get_current_unique_pid();
7698 
7699 	uintptr_t bpc = 0;
7700 	errno_t btr = 0;
7701 
7702 	/*
7703 	 * Capture a single-frame backtrace.  This extracts just the program
7704 	 * counter at the point of the fault, and should not use copyin to get
7705 	 * Rosetta save state.
7706 	 */
7707 	struct backtrace_control ctl = {
7708 		.btc_user_thread = cthread,
7709 		.btc_user_copy = backtrace_user_copy_error,
7710 	};
7711 	unsigned int bfrs = backtrace_user(&bpc, 1U, &ctl, NULL);
7712 	if ((btr == 0) && (bfrs > 0)) {
7713 		cfpc = bpc;
7714 	}
7715 
7716 	assert((fstart != 0) && fend >= fstart);
7717 	vm_rtfrecord_lock();
7718 	assert(vmrtfrs.vmrtfr_curi <= vmrtfrs.vmrtfr_maxi);
7719 
7720 	vmrtfrs.vmrtf_total++;
7721 	vm_rtfault_record_t *cvmr = &vmrtfrs.vm_rtf_records[vmrtfrs.vmrtfr_curi++];
7722 
7723 	cvmr->rtfabstime = fstart;
7724 	cvmr->rtfduration = fend - fstart;
7725 	cvmr->rtfaddr = fault_vaddr;
7726 	cvmr->rtfpc = cfpc;
7727 	cvmr->rtftype = type_of_fault;
7728 	cvmr->rtfupid = cupid;
7729 	cvmr->rtftid = ctid;
7730 
7731 	if (vmrtfrs.vmrtfr_curi > vmrtfrs.vmrtfr_maxi) {
7732 		vmrtfrs.vmrtfr_curi = 0;
7733 	}
7734 
7735 	vm_rtfrecord_unlock();
7736 }
7737 
7738 int
vmrtf_extract(uint64_t cupid,__unused boolean_t isroot,unsigned long vrecordsz,void * vrecords,unsigned long * vmrtfrv)7739 vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, unsigned long vrecordsz, void *vrecords, unsigned long *vmrtfrv)
7740 {
7741 	vm_rtfault_record_t *cvmrd = vrecords;
7742 	size_t residue = vrecordsz;
7743 	size_t numextracted = 0;
7744 	boolean_t early_exit = FALSE;
7745 
7746 	vm_rtfrecord_lock();
7747 
7748 	for (int vmfi = 0; vmfi <= vmrtfrs.vmrtfr_maxi; vmfi++) {
7749 		if (residue < sizeof(vm_rtfault_record_t)) {
7750 			early_exit = TRUE;
7751 			break;
7752 		}
7753 
7754 		if (vmrtfrs.vm_rtf_records[vmfi].rtfupid != cupid) {
7755 #if     DEVELOPMENT || DEBUG
7756 			if (isroot == FALSE) {
7757 				continue;
7758 			}
7759 #else
7760 			continue;
7761 #endif /* DEVDEBUG */
7762 		}
7763 
7764 		*cvmrd = vmrtfrs.vm_rtf_records[vmfi];
7765 		cvmrd++;
7766 		residue -= sizeof(vm_rtfault_record_t);
7767 		numextracted++;
7768 	}
7769 
7770 	vm_rtfrecord_unlock();
7771 
7772 	*vmrtfrv = numextracted;
7773 	return early_exit;
7774 }
7775 
7776 /*
7777  * Only allow one diagnosis to be in flight at a time, to avoid
7778  * creating too much additional memory usage.
7779  */
7780 static volatile uint_t vmtc_diagnosing;
7781 unsigned int vmtc_total = 0;
7782 
7783 /*
7784  * Type used to update telemetry for the diagnosis counts.
7785  */
7786 CA_EVENT(vmtc_telemetry,
7787     CA_INT, vmtc_num_byte,            /* number of corrupt bytes found */
7788     CA_BOOL, vmtc_undiagnosed,        /* undiagnosed because more than 1 at a time */
7789     CA_BOOL, vmtc_not_eligible,       /* the page didn't qualify */
7790     CA_BOOL, vmtc_copyin_fail,        /* unable to copy in the page */
7791     CA_BOOL, vmtc_not_found,          /* no corruption found even though CS failed */
7792     CA_BOOL, vmtc_one_bit_flip,       /* single bit flip */
7793     CA_BOOL, vmtc_testing);           /* caused on purpose by testing */
7794 
7795 #if DEVELOPMENT || DEBUG
7796 /*
7797  * Buffers used to compare before/after page contents.
7798  * Stashed to aid when debugging crashes.
7799  */
7800 static size_t vmtc_last_buffer_size = 0;
7801 static uint64_t *vmtc_last_before_buffer = NULL;
7802 static uint64_t *vmtc_last_after_buffer = NULL;
7803 
7804 /*
7805  * Needed to record corruptions due to testing.
7806  */
7807 static uintptr_t corruption_test_va = 0;
7808 #endif /* DEVELOPMENT || DEBUG */
7809 
7810 /*
7811  * Stash a copy of data from a possibly corrupt page.
7812  */
7813 static uint64_t *
vmtc_get_page_data(vm_map_offset_t code_addr,vm_page_t page)7814 vmtc_get_page_data(
7815 	vm_map_offset_t code_addr,
7816 	vm_page_t       page)
7817 {
7818 	uint64_t        *buffer = NULL;
7819 	addr64_t        buffer_paddr;
7820 	addr64_t        page_paddr;
7821 	extern void     bcopy_phys(addr64_t from, addr64_t to, vm_size_t bytes);
7822 	uint_t          size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
7823 
7824 	/*
7825 	 * Need an aligned buffer to do a physical copy.
7826 	 */
7827 	if (kernel_memory_allocate(kernel_map, (vm_offset_t *)&buffer,
7828 	    size, size - 1, KMA_KOBJECT, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
7829 		return NULL;
7830 	}
7831 	buffer_paddr = kvtophys((vm_offset_t)buffer);
7832 	page_paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(page));
7833 
7834 	/* adjust the page start address if we need only 4K of a 16K page */
7835 	if (size < PAGE_SIZE) {
7836 		uint_t subpage_start = ((code_addr & (PAGE_SIZE - 1)) & ~(size - 1));
7837 		page_paddr += subpage_start;
7838 	}
7839 
7840 	bcopy_phys(page_paddr, buffer_paddr, size);
7841 	return buffer;
7842 }
7843 
7844 /*
7845  * Set things up so we can diagnose a potential text page corruption.
7846  */
7847 static uint64_t *
vmtc_text_page_diagnose_setup(vm_map_offset_t code_addr,vm_page_t page,CA_EVENT_TYPE (vmtc_telemetry)* event)7848 vmtc_text_page_diagnose_setup(
7849 	vm_map_offset_t code_addr,
7850 	vm_page_t       page,
7851 	CA_EVENT_TYPE(vmtc_telemetry) *event)
7852 {
7853 	uint64_t        *buffer = NULL;
7854 
7855 	/*
7856 	 * If another is being diagnosed, skip this one.
7857 	 */
7858 	if (!OSCompareAndSwap(0, 1, &vmtc_diagnosing)) {
7859 		event->vmtc_undiagnosed = true;
7860 		return NULL;
7861 	}
7862 
7863 	/*
7864 	 * Get the contents of the corrupt page.
7865 	 */
7866 	buffer = vmtc_get_page_data(code_addr, page);
7867 	if (buffer == NULL) {
7868 		event->vmtc_copyin_fail = true;
7869 		if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
7870 			panic("Bad compare and swap in setup!");
7871 		}
7872 		return NULL;
7873 	}
7874 	return buffer;
7875 }
7876 
7877 /*
7878  * Diagnose the text page by comparing its contents with
7879  * the one we've previously saved.
7880  */
7881 static void
vmtc_text_page_diagnose(vm_map_offset_t code_addr,uint64_t * old_code_buffer,CA_EVENT_TYPE (vmtc_telemetry)* event)7882 vmtc_text_page_diagnose(
7883 	vm_map_offset_t code_addr,
7884 	uint64_t        *old_code_buffer,
7885 	CA_EVENT_TYPE(vmtc_telemetry) *event)
7886 {
7887 	uint64_t        *new_code_buffer;
7888 	size_t          size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
7889 	uint_t          count = (uint_t)size / sizeof(uint64_t);
7890 	uint_t          diff_count = 0;
7891 	bool            bit_flip = false;
7892 	uint_t          b;
7893 	uint64_t        *new;
7894 	uint64_t        *old;
7895 
7896 	new_code_buffer = kalloc_data(size, Z_WAITOK);
7897 	assert(new_code_buffer != NULL);
7898 	if (copyin((user_addr_t)vm_map_trunc_page(code_addr, size - 1), new_code_buffer, size) != 0) {
7899 		/* copyin error, so undo things */
7900 		event->vmtc_copyin_fail = true;
7901 		goto done;
7902 	}
7903 
7904 	new = new_code_buffer;
7905 	old = old_code_buffer;
7906 	for (; count-- > 0; ++new, ++old) {
7907 		if (*new == *old) {
7908 			continue;
7909 		}
7910 
7911 		/*
7912 		 * On first diff, check for a single bit flip
7913 		 */
7914 		if (diff_count == 0) {
7915 			uint64_t x = (*new ^ *old);
7916 			assert(x != 0);
7917 			if ((x & (x - 1)) == 0) {
7918 				bit_flip = true;
7919 				++diff_count;
7920 				continue;
7921 			}
7922 		}
7923 
7924 		/*
7925 		 * count up the number of different bytes.
7926 		 */
7927 		for (b = 0; b < sizeof(uint64_t); ++b) {
7928 			char *n = (char *)new;
7929 			char *o = (char *)old;
7930 			if (n[b] != o[b]) {
7931 				++diff_count;
7932 			}
7933 		}
7934 	}
7935 
7936 	if (diff_count > 1) {
7937 		bit_flip = false;
7938 	}
7939 
7940 	if (diff_count == 0) {
7941 		event->vmtc_not_found = true;
7942 	} else {
7943 		event->vmtc_num_byte = diff_count;
7944 	}
7945 	if (bit_flip) {
7946 		event->vmtc_one_bit_flip = true;
7947 	}
7948 
7949 done:
7950 	/*
7951 	 * Free up the code copy buffers, but save the last
7952 	 * set on development / debug kernels in case they
7953 	 * can provide evidence for debugging memory stomps.
7954 	 */
7955 #if DEVELOPMENT || DEBUG
7956 	if (vmtc_last_before_buffer != NULL) {
7957 		kmem_free(kernel_map, (vm_offset_t)vmtc_last_before_buffer, vmtc_last_buffer_size);
7958 	}
7959 	if (vmtc_last_after_buffer != NULL) {
7960 		kfree_data(vmtc_last_after_buffer, vmtc_last_buffer_size);
7961 	}
7962 	vmtc_last_before_buffer = old_code_buffer;
7963 	vmtc_last_after_buffer = new_code_buffer;
7964 	vmtc_last_buffer_size = size;
7965 #else /* DEVELOPMENT || DEBUG */
7966 	kfree_data(new_code_buffer, size);
7967 	kmem_free(kernel_map, (vm_offset_t)old_code_buffer, size);
7968 #endif /* DEVELOPMENT || DEBUG */
7969 
7970 	/*
7971 	 * We're finished, so clear the diagnosing flag.
7972 	 */
7973 	if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
7974 		panic("Bad compare and swap in diagnose!");
7975 	}
7976 }
7977 
7978 /*
7979  * For the given map, virt address, find the object, offset, and page.
7980  * This has to lookup the map entry, verify protections, walk any shadow chains.
7981  * If found, returns with the object locked.
7982  */
7983 static kern_return_t
vmtc_revalidate_lookup(vm_map_t map,vm_map_offset_t vaddr,vm_object_t * ret_object,vm_object_offset_t * ret_offset,vm_page_t * ret_page)7984 vmtc_revalidate_lookup(
7985 	vm_map_t               map,
7986 	vm_map_offset_t        vaddr,
7987 	vm_object_t            *ret_object,
7988 	vm_object_offset_t     *ret_offset,
7989 	vm_page_t              *ret_page)
7990 {
7991 	vm_object_t            object;
7992 	vm_object_offset_t     offset;
7993 	vm_page_t              page;
7994 	kern_return_t          kr = KERN_SUCCESS;
7995 	uint8_t                object_lock_type = OBJECT_LOCK_EXCLUSIVE;
7996 	vm_map_version_t       version;
7997 	boolean_t              wired;
7998 	struct vm_object_fault_info fault_info = {};
7999 	vm_map_t               real_map = NULL;
8000 	vm_prot_t              prot;
8001 	vm_object_t            shadow;
8002 
8003 	/*
8004 	 * Find the object/offset for the given location/map.
8005 	 * Note this returns with the object locked.
8006 	 */
8007 restart:
8008 	vm_map_lock_read(map);
8009 	object = VM_OBJECT_NULL;        /* in case we come around the restart path */
8010 	kr = vm_map_lookup_locked(&map, vaddr, VM_PROT_READ,
8011 	    object_lock_type, &version, &object, &offset, &prot, &wired,
8012 	    &fault_info, &real_map, NULL);
8013 	vm_map_unlock_read(map);
8014 	if (real_map != NULL && real_map != map) {
8015 		vm_map_unlock(real_map);
8016 	}
8017 
8018 	/*
8019 	 * If there's no mapping here, or if we fail because the page
8020 	 * wasn't mapped executable, we can ignore this.
8021 	 */
8022 	if (kr != KERN_SUCCESS ||
8023 	    object == NULL ||
8024 	    !(prot & VM_PROT_EXECUTE)) {
8025 		kr = KERN_FAILURE;
8026 		goto done;
8027 	}
8028 
8029 	/*
8030 	 * Chase down any shadow chains to find the actual page.
8031 	 */
8032 	for (;;) {
8033 		/*
8034 		 * See if the page is on the current object.
8035 		 */
8036 		page = vm_page_lookup(object, vm_object_trunc_page(offset));
8037 		if (page != NULL) {
8038 			/* restart the lookup */
8039 			if (page->vmp_restart) {
8040 				vm_object_unlock(object);
8041 				goto restart;
8042 			}
8043 
8044 			/*
8045 			 * If this page is busy, we need to wait for it.
8046 			 */
8047 			if (page->vmp_busy) {
8048 				PAGE_SLEEP(object, page, TRUE);
8049 				vm_object_unlock(object);
8050 				goto restart;
8051 			}
8052 			break;
8053 		}
8054 
8055 		/*
8056 		 * If the object doesn't have the page and
8057 		 * has no shadow, then we can quit.
8058 		 */
8059 		shadow = object->shadow;
8060 		if (shadow == NULL) {
8061 			kr = KERN_FAILURE;
8062 			goto done;
8063 		}
8064 
8065 		/*
8066 		 * Move to the next object
8067 		 */
8068 		offset += object->vo_shadow_offset;
8069 		vm_object_lock(shadow);
8070 		vm_object_unlock(object);
8071 		object = shadow;
8072 		shadow = VM_OBJECT_NULL;
8073 	}
8074 	*ret_object = object;
8075 	*ret_offset = vm_object_trunc_page(offset);
8076 	*ret_page = page;
8077 
8078 done:
8079 	if (kr != KERN_SUCCESS && object != NULL) {
8080 		vm_object_unlock(object);
8081 	}
8082 	return kr;
8083 }
8084 
8085 /*
8086  * Check if a page is wired, needs extra locking.
8087  */
8088 static bool
is_page_wired(vm_page_t page)8089 is_page_wired(vm_page_t page)
8090 {
8091 	bool result;
8092 	vm_page_lock_queues();
8093 	result = VM_PAGE_WIRED(page);
8094 	vm_page_unlock_queues();
8095 	return result;
8096 }
8097 
8098 /*
8099  * A fatal process error has occurred in the given task.
8100  * Recheck the code signing of the text page at the given
8101  * address to check for a text page corruption.
8102  *
8103  * Returns KERN_FAILURE if a page was found to be corrupt
8104  * by failing to match its code signature. KERN_SUCCESS
8105  * means the page is either valid or we don't have the
8106  * information to say it's corrupt.
8107  */
8108 kern_return_t
revalidate_text_page(task_t task,vm_map_offset_t code_addr)8109 revalidate_text_page(task_t task, vm_map_offset_t code_addr)
8110 {
8111 	kern_return_t          kr;
8112 	vm_map_t               map;
8113 	vm_object_t            object = NULL;
8114 	vm_object_offset_t     offset;
8115 	vm_page_t              page = NULL;
8116 	struct vnode           *vnode;
8117 	uint64_t               *diagnose_buffer = NULL;
8118 	CA_EVENT_TYPE(vmtc_telemetry) * event = NULL;
8119 	ca_event_t ca_event = NULL;
8120 
8121 	map = task->map;
8122 	if (task->map == NULL) {
8123 		return KERN_SUCCESS;
8124 	}
8125 
8126 	kr = vmtc_revalidate_lookup(map, code_addr, &object, &offset, &page);
8127 	if (kr != KERN_SUCCESS) {
8128 		goto done;
8129 	}
8130 
8131 	/*
8132 	 * The object needs to have a pager.
8133 	 */
8134 	if (object->pager == NULL) {
8135 		goto done;
8136 	}
8137 
8138 	/*
8139 	 * Needs to be a vnode backed page to have a signature.
8140 	 */
8141 	vnode = vnode_pager_lookup_vnode(object->pager);
8142 	if (vnode == NULL) {
8143 		goto done;
8144 	}
8145 
8146 	/*
8147 	 * Object checks to see if we should proceed.
8148 	 */
8149 	if (!object->code_signed ||     /* no code signature to check */
8150 	    object->internal ||         /* internal objects aren't signed */
8151 	    object->terminating ||      /* the object and its pages are already going away */
8152 	    !object->pager_ready) {     /* this should happen, but check shouldn't hurt */
8153 		goto done;
8154 	}
8155 
8156 	/*
8157 	 * Check the code signature of the page in question.
8158 	 */
8159 	vm_page_map_and_validate_cs(object, page);
8160 
8161 	/*
8162 	 * At this point:
8163 	 * vmp_cs_validated |= validated (set if a code signature exists)
8164 	 * vmp_cs_tainted |= tainted (set if code signature violation)
8165 	 * vmp_cs_nx |= nx;  ??
8166 	 *
8167 	 * if vmp_pmapped then have to pmap_disconnect..
8168 	 * other flags to check on object or page?
8169 	 */
8170 	if (page->vmp_cs_tainted != VMP_CS_ALL_FALSE) {
8171 #if DEBUG || DEVELOPMENT
8172 		/*
8173 		 * On development builds, a boot-arg can be used to cause
8174 		 * a panic, instead of a quiet repair.
8175 		 */
8176 		if (vmtc_panic_instead) {
8177 			panic("Text page corruption detected: vm_page_t 0x%llx", (long long)(uintptr_t)page);
8178 		}
8179 #endif /* DEBUG || DEVELOPMENT */
8180 
8181 		/*
8182 		 * We're going to invalidate this page. Grab a copy of it for comparison.
8183 		 */
8184 		ca_event = CA_EVENT_ALLOCATE(vmtc_telemetry);
8185 		event = ca_event->data;
8186 		diagnose_buffer = vmtc_text_page_diagnose_setup(code_addr, page, event);
8187 
8188 		/*
8189 		 * Invalidate, i.e. toss, the corrupted page.
8190 		 */
8191 		if (!page->vmp_cleaning &&
8192 		    !page->vmp_laundry &&
8193 		    !page->vmp_fictitious &&
8194 		    !page->vmp_precious &&
8195 		    !page->vmp_absent &&
8196 		    !page->vmp_error &&
8197 		    !page->vmp_dirty &&
8198 		    !is_page_wired(page)) {
8199 			if (page->vmp_pmapped) {
8200 				int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(page));
8201 				if (refmod & VM_MEM_MODIFIED) {
8202 					SET_PAGE_DIRTY(page, FALSE);
8203 				}
8204 				if (refmod & VM_MEM_REFERENCED) {
8205 					page->vmp_reference = TRUE;
8206 				}
8207 			}
8208 			/* If the page seems intentionally modified, don't trash it. */
8209 			if (!page->vmp_dirty) {
8210 				VM_PAGE_FREE(page);
8211 			} else {
8212 				event->vmtc_not_eligible = true;
8213 			}
8214 		} else {
8215 			event->vmtc_not_eligible = true;
8216 		}
8217 		vm_object_unlock(object);
8218 		object = VM_OBJECT_NULL;
8219 
8220 		/*
8221 		 * Now try to diagnose the type of failure by faulting
8222 		 * in a new copy and diff'ing it with what we saved.
8223 		 */
8224 		if (diagnose_buffer != NULL) {
8225 			vmtc_text_page_diagnose(code_addr, diagnose_buffer, event);
8226 		}
8227 #if DEBUG || DEVELOPMENT
8228 		if (corruption_test_va != 0) {
8229 			corruption_test_va = 0;
8230 			event->vmtc_testing = true;
8231 		}
8232 #endif /* DEBUG || DEVELOPMENT */
8233 		kernel_triage_record(thread_tid(current_thread()),
8234 		    KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_TEXT_CORRUPTION),
8235 		    0 /* arg */);
8236 		CA_EVENT_SEND(ca_event);
8237 		printf("Text page corruption detected for pid %d\n", proc_selfpid());
8238 		++vmtc_total;
8239 		return KERN_FAILURE;
8240 	}
8241 done:
8242 	if (object != NULL) {
8243 		vm_object_unlock(object);
8244 	}
8245 	return KERN_SUCCESS;
8246 }
8247 
8248 #if DEBUG || DEVELOPMENT
8249 /*
8250  * For implementing unit tests - ask the pmap to corrupt a text page.
8251  * We have to find the page, to get the physical address, then invoke
8252  * the pmap.
8253  */
8254 extern kern_return_t vm_corrupt_text_addr(uintptr_t);
8255 
8256 kern_return_t
vm_corrupt_text_addr(uintptr_t va)8257 vm_corrupt_text_addr(uintptr_t va)
8258 {
8259 	task_t                 task = current_task();
8260 	vm_map_t               map;
8261 	kern_return_t          kr = KERN_SUCCESS;
8262 	vm_object_t            object = VM_OBJECT_NULL;
8263 	vm_object_offset_t     offset;
8264 	vm_page_t              page = NULL;
8265 	pmap_paddr_t           pa;
8266 
8267 	map = task->map;
8268 	if (task->map == NULL) {
8269 		printf("corrupt_text_addr: no map\n");
8270 		return KERN_FAILURE;
8271 	}
8272 
8273 	kr = vmtc_revalidate_lookup(map, (vm_map_offset_t)va, &object, &offset, &page);
8274 	if (kr != KERN_SUCCESS) {
8275 		printf("corrupt_text_addr: page lookup failed\n");
8276 		return kr;
8277 	}
8278 	/* get the physical address to use */
8279 	pa = ptoa(VM_PAGE_GET_PHYS_PAGE(page)) + (va - vm_object_trunc_page(va));
8280 
8281 	/*
8282 	 * Check we have something we can work with.
8283 	 * Due to racing with pageout as we enter the sysctl,
8284 	 * it's theoretically possible to have the page disappear, just
8285 	 * before the lookup.
8286 	 *
8287 	 * That's highly likely to happen often. I've filed a radar 72857482
8288 	 * to bubble up the error here to the sysctl result and have the
8289 	 * test not FAIL in that case.
8290 	 */
8291 	if (page->vmp_busy) {
8292 		printf("corrupt_text_addr: vmp_busy\n");
8293 		kr = KERN_FAILURE;
8294 	}
8295 	if (page->vmp_cleaning) {
8296 		printf("corrupt_text_addr: vmp_cleaning\n");
8297 		kr = KERN_FAILURE;
8298 	}
8299 	if (page->vmp_laundry) {
8300 		printf("corrupt_text_addr: vmp_cleaning\n");
8301 		kr = KERN_FAILURE;
8302 	}
8303 	if (page->vmp_fictitious) {
8304 		printf("corrupt_text_addr: vmp_fictitious\n");
8305 		kr = KERN_FAILURE;
8306 	}
8307 	if (page->vmp_precious) {
8308 		printf("corrupt_text_addr: vmp_precious\n");
8309 		kr = KERN_FAILURE;
8310 	}
8311 	if (page->vmp_absent) {
8312 		printf("corrupt_text_addr: vmp_absent\n");
8313 		kr = KERN_FAILURE;
8314 	}
8315 	if (page->vmp_error) {
8316 		printf("corrupt_text_addr: vmp_error\n");
8317 		kr = KERN_FAILURE;
8318 	}
8319 	if (page->vmp_dirty) {
8320 		printf("corrupt_text_addr: vmp_dirty\n");
8321 		kr = KERN_FAILURE;
8322 	}
8323 	if (is_page_wired(page)) {
8324 		printf("corrupt_text_addr: wired\n");
8325 		kr = KERN_FAILURE;
8326 	}
8327 	if (!page->vmp_pmapped) {
8328 		printf("corrupt_text_addr: !vmp_pmapped\n");
8329 		kr = KERN_FAILURE;
8330 	}
8331 
8332 	if (kr == KERN_SUCCESS) {
8333 		printf("corrupt_text_addr: using physaddr 0x%llx\n", (long long)pa);
8334 		kr = pmap_test_text_corruption(pa);
8335 		if (kr != KERN_SUCCESS) {
8336 			printf("corrupt_text_addr: pmap error %d\n", kr);
8337 		} else {
8338 			corruption_test_va = va;
8339 		}
8340 	} else {
8341 		printf("corrupt_text_addr: object %p\n", object);
8342 		printf("corrupt_text_addr: offset 0x%llx\n", (uint64_t)offset);
8343 		printf("corrupt_text_addr: va 0x%llx\n", (uint64_t)va);
8344 		printf("corrupt_text_addr: vm_object_trunc_page(va) 0x%llx\n", (uint64_t)vm_object_trunc_page(va));
8345 		printf("corrupt_text_addr: vm_page_t %p\n", page);
8346 		printf("corrupt_text_addr: ptoa(PHYS_PAGE) 0x%llx\n", (uint64_t)ptoa(VM_PAGE_GET_PHYS_PAGE(page)));
8347 		printf("corrupt_text_addr: using physaddr 0x%llx\n", (uint64_t)pa);
8348 	}
8349 
8350 	if (object != VM_OBJECT_NULL) {
8351 		vm_object_unlock(object);
8352 	}
8353 	return kr;
8354 }
8355 #endif /* DEBUG || DEVELOPMENT */
8356