xref: /xnu-11417.140.69/osfmk/vm/vm_fault.c (revision 43a90889846e00bfb5cf1d255cdc0a701a1e05a4)
1 /*
2  * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm_fault.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	Page fault handling module.
63  */
64 
65 #include <libkern/OSAtomic.h>
66 
67 #include <mach/mach_types.h>
68 #include <mach/kern_return.h>
69 #include <mach/message.h>       /* for error codes */
70 #include <mach/vm_param.h>
71 #include <mach/vm_behavior.h>
72 #include <mach/memory_object.h>
73 /* For memory_object_data_{request,unlock} */
74 #include <mach/sdt.h>
75 
76 #include <kern/kern_types.h>
77 #include <kern/host_statistics.h>
78 #include <kern/counter.h>
79 #include <kern/task.h>
80 #include <kern/thread.h>
81 #include <kern/sched_prim.h>
82 #include <kern/host.h>
83 #include <kern/mach_param.h>
84 #include <kern/macro_help.h>
85 #include <kern/zalloc_internal.h>
86 #include <kern/misc_protos.h>
87 #include <kern/policy_internal.h>
88 #include <kern/exc_guard.h>
89 
90 #include <vm/vm_compressor_internal.h>
91 #include <vm/vm_compressor_pager_internal.h>
92 #include <vm/vm_fault_internal.h>
93 #include <vm/vm_map_internal.h>
94 #include <vm/vm_object_internal.h>
95 #include <vm/vm_page_internal.h>
96 #include <vm/vm_kern_internal.h>
97 #include <vm/pmap.h>
98 #include <vm/vm_pageout_internal.h>
99 #include <vm/vm_protos_internal.h>
100 #include <vm/vm_external.h>
101 #include <vm/memory_object.h>
102 #include <vm/vm_purgeable_internal.h>   /* Needed by some vm_page.h macros */
103 #include <vm/vm_shared_region.h>
104 #include <vm/vm_page_internal.h>
105 
106 #include <sys/codesign.h>
107 #include <sys/code_signing.h>
108 #include <sys/kdebug.h>
109 #include <sys/kdebug_triage.h>
110 #include <sys/reason.h>
111 #include <sys/signalvar.h>
112 
113 #include <san/kasan.h>
114 #include <libkern/coreanalytics/coreanalytics.h>
115 
116 #define VM_FAULT_CLASSIFY       0
117 
118 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
119 
120 int vm_protect_privileged_from_untrusted = 1;
121 
122 /*
123  * Enforce a maximum number of concurrent PageIns per vm-object to prevent
124  * high-I/O-volume tasks from saturating storage and starving the rest of the
125  * system.
126  *
127  * TODO: This throttling mechanism may be more naturally done by the pager,
128  * filesystem, or storage layers, which will have better information about how
129  * much concurrency the backing store can reasonably support.
130  */
131 TUNABLE(uint16_t, vm_object_pagein_throttle, "vm_object_pagein_throttle", 16);
132 
133 /*
134  * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which
135  * kicks in when swap space runs out.  64-bit programs have massive address spaces and can leak enormous amounts
136  * of memory if they're buggy and can run the system completely out of swap space.  If this happens, we
137  * impose a hard throttle on them to prevent them from taking the last bit of memory left.  This helps
138  * keep the UI active so that the user has a chance to kill the offending task before the system
139  * completely hangs.
140  *
141  * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied
142  * to tasks that appear to be bloated.  When swap runs out, any task using more than vm_hard_throttle_threshold
143  * will be throttled.  The throttling is done by giving the thread that's trying to demand zero a page a
144  * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
145  */
146 
147 extern void throttle_lowpri_io(int);
148 
149 extern struct vnode *vnode_pager_lookup_vnode(memory_object_t);
150 
151 uint64_t vm_hard_throttle_threshold;
152 
153 #if DEBUG || DEVELOPMENT
154 static bool vmtc_panic_instead = false;
155 int panic_object_not_alive = 1;
156 #endif /* DEBUG || DEVELOPMENT */
157 
158 OS_ALWAYS_INLINE
159 boolean_t
NEED_TO_HARD_THROTTLE_THIS_TASK(void)160 NEED_TO_HARD_THROTTLE_THIS_TASK(void)
161 {
162 	return vm_wants_task_throttled(current_task()) ||
163 	       ((vm_page_free_count < vm_page_throttle_limit ||
164 	       HARD_THROTTLE_LIMIT_REACHED()) &&
165 	       proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED);
166 }
167 
168 
169 /*
170  * XXX: For now, vm faults cannot be recursively disabled. If the need for
171  * nested code that disables faults arises, the implementation can be modified
172  * to track a disabled-count.
173  */
174 
175 OS_ALWAYS_INLINE
176 void
vm_fault_disable(void)177 vm_fault_disable(void)
178 {
179 	thread_t t = current_thread();
180 	assert(!t->th_vm_faults_disabled);
181 	t->th_vm_faults_disabled = true;
182 	act_set_debug_assert();
183 }
184 
185 OS_ALWAYS_INLINE
186 void
vm_fault_enable(void)187 vm_fault_enable(void)
188 {
189 	thread_t t = current_thread();
190 	assert(t->th_vm_faults_disabled);
191 	t->th_vm_faults_disabled = false;
192 }
193 
194 OS_ALWAYS_INLINE
195 bool
vm_fault_get_disabled(void)196 vm_fault_get_disabled(void)
197 {
198 	thread_t t = current_thread();
199 	return t->th_vm_faults_disabled;
200 }
201 
202 #define HARD_THROTTLE_DELAY     10000   /* 10000 us == 10 ms */
203 #define SOFT_THROTTLE_DELAY     200     /* 200 us == .2 ms */
204 
205 #define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS   6
206 #define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC  20000
207 
208 
209 #define VM_STAT_DECOMPRESSIONS()        \
210 MACRO_BEGIN                             \
211 	counter_inc(&vm_statistics_decompressions); \
212 	current_thread()->decompressions++; \
213 MACRO_END
214 
215 boolean_t current_thread_aborted(void);
216 
217 /* Forward declarations of internal routines. */
218 static kern_return_t vm_fault_wire_fast(
219 	vm_map_t        map,
220 	vm_map_offset_t va,
221 	vm_prot_t       prot,
222 	vm_tag_t        wire_tag,
223 	vm_map_entry_t  entry,
224 	pmap_t          pmap,
225 	vm_map_offset_t pmap_addr,
226 	ppnum_t         *physpage_p);
227 
228 static kern_return_t vm_fault_internal(
229 	vm_map_t               map,
230 	vm_map_offset_t        vaddr,
231 	vm_prot_t              caller_prot,
232 	vm_tag_t               wire_tag,
233 	pmap_t                 pmap,
234 	vm_map_offset_t        pmap_addr,
235 	ppnum_t                *physpage_p,
236 	vm_object_fault_info_t fault_info);
237 
238 static void vm_fault_copy_cleanup(
239 	vm_page_t       page,
240 	vm_page_t       top_page);
241 
242 static void vm_fault_copy_dst_cleanup(
243 	vm_page_t       page);
244 
245 #if     VM_FAULT_CLASSIFY
246 extern void vm_fault_classify(vm_object_t       object,
247     vm_object_offset_t    offset,
248     vm_prot_t             fault_type);
249 
250 extern void vm_fault_classify_init(void);
251 #endif
252 
253 unsigned long vm_pmap_enter_blocked = 0;
254 unsigned long vm_pmap_enter_retried = 0;
255 
256 unsigned long vm_cs_validates = 0;
257 unsigned long vm_cs_revalidates = 0;
258 unsigned long vm_cs_query_modified = 0;
259 unsigned long vm_cs_validated_dirtied = 0;
260 unsigned long vm_cs_bitmap_validated = 0;
261 
262 #if CODE_SIGNING_MONITOR
263 uint64_t vm_cs_defer_to_csm = 0;
264 uint64_t vm_cs_defer_to_csm_not = 0;
265 #endif /* CODE_SIGNING_MONITOR */
266 
267 extern char *kdp_compressor_decompressed_page;
268 extern addr64_t kdp_compressor_decompressed_page_paddr;
269 extern ppnum_t  kdp_compressor_decompressed_page_ppnum;
270 
271 struct vmrtfr {
272 	int vmrtfr_maxi;
273 	int vmrtfr_curi;
274 	int64_t vmrtf_total;
275 	vm_rtfault_record_t *vm_rtf_records;
276 } vmrtfrs;
277 #define VMRTF_DEFAULT_BUFSIZE (4096)
278 #define VMRTF_NUM_RECORDS_DEFAULT (VMRTF_DEFAULT_BUFSIZE / sizeof(vm_rtfault_record_t))
279 TUNABLE(int, vmrtf_num_records, "vm_rtfault_records", VMRTF_NUM_RECORDS_DEFAULT);
280 
281 static void vm_rtfrecord_lock(void);
282 static void vm_rtfrecord_unlock(void);
283 static void vm_record_rtfault(thread_t, uint64_t, vm_map_offset_t, int);
284 
285 extern lck_grp_t vm_page_lck_grp_bucket;
286 extern lck_attr_t vm_page_lck_attr;
287 LCK_SPIN_DECLARE_ATTR(vm_rtfr_slock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
288 
289 #if DEVELOPMENT || DEBUG
290 extern int madvise_free_debug;
291 extern int madvise_free_debug_sometimes;
292 #endif /* DEVELOPMENT || DEBUG */
293 
294 extern int vm_pageout_protect_realtime;
295 
296 #if CONFIG_FREEZE
297 #endif /* CONFIG_FREEZE */
298 
299 /*
300  *	Routine:	vm_fault_init
301  *	Purpose:
302  *		Initialize our private data structures.
303  */
304 __startup_func
305 void
vm_fault_init(void)306 vm_fault_init(void)
307 {
308 	int i, vm_compressor_temp;
309 	boolean_t need_default_val = TRUE;
310 	/*
311 	 * Choose a value for the hard throttle threshold based on the amount of ram.  The threshold is
312 	 * computed as a percentage of available memory, and the percentage used is scaled inversely with
313 	 * the amount of memory.  The percentage runs between 10% and 35%.  We use 35% for small memory systems
314 	 * and reduce the value down to 10% for very large memory configurations.  This helps give us a
315 	 * definition of a memory hog that makes more sense relative to the amount of ram in the machine.
316 	 * The formula here simply uses the number of gigabytes of ram to adjust the percentage.
317 	 */
318 
319 	vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024 * 1024 * 1024)), 25)) / 100;
320 
321 	/*
322 	 * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry.
323 	 */
324 
325 	if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof(vm_compressor_temp))) {
326 		for (i = 0; i < VM_PAGER_MAX_MODES; i++) {
327 			if (((vm_compressor_temp & (1 << i)) == vm_compressor_temp)) {
328 				need_default_val = FALSE;
329 				vm_compressor_mode = vm_compressor_temp;
330 				break;
331 			}
332 		}
333 		if (need_default_val) {
334 			printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp);
335 		}
336 	}
337 #if CONFIG_FREEZE
338 	if (need_default_val) {
339 		if (osenvironment_is_diagnostics()) {
340 			printf("osenvironment == \"diagnostics\". Setting \"vm_compressor_mode\" to in-core compressor only\n");
341 			vm_compressor_mode = VM_PAGER_COMPRESSOR_NO_SWAP;
342 			need_default_val = false;
343 		}
344 	}
345 #endif /* CONFIG_FREEZE */
346 	if (need_default_val) {
347 		/* If no boot arg or incorrect boot arg, try device tree. */
348 		PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode));
349 	}
350 	printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode);
351 	vm_config_init();
352 
353 	PE_parse_boot_argn("vm_protect_privileged_from_untrusted",
354 	    &vm_protect_privileged_from_untrusted,
355 	    sizeof(vm_protect_privileged_from_untrusted));
356 
357 #if DEBUG || DEVELOPMENT
358 	(void)PE_parse_boot_argn("text_corruption_panic", &vmtc_panic_instead, sizeof(vmtc_panic_instead));
359 
360 	if (kern_feature_override(KF_MADVISE_FREE_DEBUG_OVRD)) {
361 		madvise_free_debug = 0;
362 		madvise_free_debug_sometimes = 0;
363 	}
364 
365 	PE_parse_boot_argn("panic_object_not_alive", &panic_object_not_alive, sizeof(panic_object_not_alive));
366 #endif /* DEBUG || DEVELOPMENT */
367 }
368 
369 __startup_func
370 static void
vm_rtfault_record_init(void)371 vm_rtfault_record_init(void)
372 {
373 	size_t size;
374 
375 	vmrtf_num_records = MAX(vmrtf_num_records, 1);
376 	size = vmrtf_num_records * sizeof(vm_rtfault_record_t);
377 	vmrtfrs.vm_rtf_records = zalloc_permanent_tag(size,
378 	    ZALIGN(vm_rtfault_record_t), VM_KERN_MEMORY_DIAG);
379 	vmrtfrs.vmrtfr_maxi = vmrtf_num_records - 1;
380 }
381 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_rtfault_record_init);
382 
383 /*
384  *	Routine:	vm_fault_cleanup
385  *	Purpose:
386  *		Clean up the result of vm_fault_page.
387  *	Results:
388  *		The paging reference for "object" is released.
389  *		"object" is unlocked.
390  *		If "top_page" is not null,  "top_page" is
391  *		freed and the paging reference for the object
392  *		containing it is released.
393  *
394  *	In/out conditions:
395  *		"object" must be locked.
396  */
397 void
vm_fault_cleanup(vm_object_t object,vm_page_t top_page)398 vm_fault_cleanup(
399 	vm_object_t     object,
400 	vm_page_t       top_page)
401 {
402 	vm_object_paging_end(object);
403 	vm_object_unlock(object);
404 
405 	if (top_page != VM_PAGE_NULL) {
406 		object = VM_PAGE_OBJECT(top_page);
407 
408 		vm_object_lock(object);
409 		VM_PAGE_FREE(top_page);
410 		vm_object_paging_end(object);
411 		vm_object_unlock(object);
412 	}
413 }
414 
415 #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
416 
417 
418 TUNABLE(bool, vm_page_deactivate_behind, "vm_deactivate_behind", true);
419 TUNABLE(uint32_t, vm_page_deactivate_behind_min_resident_ratio, "vm_deactivate_behind_min_resident_ratio", 3);
420 /*
421  * default sizes given VM_BEHAVIOR_DEFAULT reference behavior
422  */
423 #define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW     128
424 #define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER    16              /* don't make this too big... */
425                                                                 /* we use it to size an array on the stack */
426 
427 int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW;
428 
429 #define MAX_SEQUENTIAL_RUN      (1024 * 1024 * 1024)
430 
431 /*
432  * vm_page_is_sequential
433  *
434  * Determine if sequential access is in progress
435  * in accordance with the behavior specified.
436  * Update state to indicate current access pattern.
437  *
438  * object must have at least the shared lock held
439  */
440 static
441 void
vm_fault_is_sequential(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)442 vm_fault_is_sequential(
443 	vm_object_t             object,
444 	vm_object_offset_t      offset,
445 	vm_behavior_t           behavior)
446 {
447 	vm_object_offset_t      last_alloc;
448 	int                     sequential;
449 	int                     orig_sequential;
450 
451 	last_alloc = object->last_alloc;
452 	sequential = object->sequential;
453 	orig_sequential = sequential;
454 
455 	offset = vm_object_trunc_page(offset);
456 	if (offset == last_alloc && behavior != VM_BEHAVIOR_RANDOM) {
457 		/* re-faulting in the same page: no change in behavior */
458 		return;
459 	}
460 
461 	switch (behavior) {
462 	case VM_BEHAVIOR_RANDOM:
463 		/*
464 		 * reset indicator of sequential behavior
465 		 */
466 		sequential = 0;
467 		break;
468 
469 	case VM_BEHAVIOR_SEQUENTIAL:
470 		if (offset && last_alloc == offset - PAGE_SIZE_64) {
471 			/*
472 			 * advance indicator of sequential behavior
473 			 */
474 			if (sequential < MAX_SEQUENTIAL_RUN) {
475 				sequential += PAGE_SIZE;
476 			}
477 		} else {
478 			/*
479 			 * reset indicator of sequential behavior
480 			 */
481 			sequential = 0;
482 		}
483 		break;
484 
485 	case VM_BEHAVIOR_RSEQNTL:
486 		if (last_alloc && last_alloc == offset + PAGE_SIZE_64) {
487 			/*
488 			 * advance indicator of sequential behavior
489 			 */
490 			if (sequential > -MAX_SEQUENTIAL_RUN) {
491 				sequential -= PAGE_SIZE;
492 			}
493 		} else {
494 			/*
495 			 * reset indicator of sequential behavior
496 			 */
497 			sequential = 0;
498 		}
499 		break;
500 
501 	case VM_BEHAVIOR_DEFAULT:
502 	default:
503 		if (offset && last_alloc == (offset - PAGE_SIZE_64)) {
504 			/*
505 			 * advance indicator of sequential behavior
506 			 */
507 			if (sequential < 0) {
508 				sequential = 0;
509 			}
510 			if (sequential < MAX_SEQUENTIAL_RUN) {
511 				sequential += PAGE_SIZE;
512 			}
513 		} else if (last_alloc && last_alloc == (offset + PAGE_SIZE_64)) {
514 			/*
515 			 * advance indicator of sequential behavior
516 			 */
517 			if (sequential > 0) {
518 				sequential = 0;
519 			}
520 			if (sequential > -MAX_SEQUENTIAL_RUN) {
521 				sequential -= PAGE_SIZE;
522 			}
523 		} else {
524 			/*
525 			 * reset indicator of sequential behavior
526 			 */
527 			sequential = 0;
528 		}
529 		break;
530 	}
531 	if (sequential != orig_sequential) {
532 		if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) {
533 			/*
534 			 * if someone else has already updated object->sequential
535 			 * don't bother trying to update it or object->last_alloc
536 			 */
537 			return;
538 		}
539 	}
540 	/*
541 	 * I'd like to do this with a OSCompareAndSwap64, but that
542 	 * doesn't exist for PPC...  however, it shouldn't matter
543 	 * that much... last_alloc is maintained so that we can determine
544 	 * if a sequential access pattern is taking place... if only
545 	 * one thread is banging on this object, no problem with the unprotected
546 	 * update... if 2 or more threads are banging away, we run the risk of
547 	 * someone seeing a mangled update... however, in the face of multiple
548 	 * accesses, no sequential access pattern can develop anyway, so we
549 	 * haven't lost any real info.
550 	 */
551 	object->last_alloc = offset;
552 }
553 
554 #if DEVELOPMENT || DEBUG
555 SCALABLE_COUNTER_DEFINE(vm_page_deactivate_behind_count);
556 #endif /* DEVELOPMENT || DEBUG */
557 
558 /*
559  * @func vm_fault_deactivate_behind
560  *
561  * @description
562  * Determine if sequential access is in progress
563  * in accordance with the behavior specified.  If
564  * so, compute a potential page to deactivate and
565  * deactivate it.
566  *
567  * object must be locked.
568  *
569  * @returns the number of deactivated pages
570  */
571 static
572 uint32_t
vm_fault_deactivate_behind(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)573 vm_fault_deactivate_behind(
574 	vm_object_t             object,
575 	vm_object_offset_t      offset,
576 	vm_behavior_t           behavior)
577 {
578 	uint32_t        pages_in_run = 0;
579 	uint32_t        max_pages_in_run = 0;
580 	int32_t         sequential_run;
581 	vm_behavior_t   sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
582 	vm_object_offset_t      run_offset = 0;
583 	vm_object_offset_t      pg_offset = 0;
584 	vm_page_t       m;
585 	vm_page_t       page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER];
586 
587 #if TRACEFAULTPAGE
588 	dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
589 #endif
590 	if (is_kernel_object(object) ||
591 	    !vm_page_deactivate_behind ||
592 	    (vm_object_trunc_page(offset) != offset) ||
593 	    (object->resident_page_count <
594 	    vm_page_active_count / vm_page_deactivate_behind_min_resident_ratio)) {
595 		/*
596 		 * Do not deactivate pages from the kernel object: they
597 		 * are not intended to become pageable.
598 		 * or we've disabled the deactivate behind mechanism
599 		 * or we are dealing with an offset that is not aligned to
600 		 * the system's PAGE_SIZE because in that case we will
601 		 * handle the deactivation on the aligned offset and, thus,
602 		 * the full PAGE_SIZE page once. This helps us avoid the redundant
603 		 * deactivates and the extra faults.
604 		 *
605 		 * Objects need only participate in backwards
606 		 * deactivation if they are exceedingly large (i.e. their
607 		 * resident pages are liable to comprise a substantially large
608 		 * portion of the active queue and push out the rest of the
609 		 * system's working set).
610 		 */
611 		return 0;
612 	}
613 
614 	KDBG_FILTERED(VMDBG_CODE(DBG_VM_FAULT_DEACTIVATE_BEHIND) | DBG_FUNC_START,
615 	    VM_KERNEL_ADDRHIDE(object), offset, behavior);
616 
617 	if ((sequential_run = object->sequential)) {
618 		if (sequential_run < 0) {
619 			sequential_behavior = VM_BEHAVIOR_RSEQNTL;
620 			sequential_run = 0 - sequential_run;
621 		} else {
622 			sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
623 		}
624 	}
625 	switch (behavior) {
626 	case VM_BEHAVIOR_RANDOM:
627 		break;
628 	case VM_BEHAVIOR_SEQUENTIAL:
629 		if (sequential_run >= (int)PAGE_SIZE) {
630 			run_offset = 0 - PAGE_SIZE_64;
631 			max_pages_in_run = 1;
632 		}
633 		break;
634 	case VM_BEHAVIOR_RSEQNTL:
635 		if (sequential_run >= (int)PAGE_SIZE) {
636 			run_offset = PAGE_SIZE_64;
637 			max_pages_in_run = 1;
638 		}
639 		break;
640 	case VM_BEHAVIOR_DEFAULT:
641 	default:
642 	{       vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
643 
644 		/*
645 		 * determine if the run of sequential accesss has been
646 		 * long enough on an object with default access behavior
647 		 * to consider it for deactivation
648 		 */
649 		if ((uint64_t)sequential_run >= behind && (sequential_run % (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER * PAGE_SIZE)) == 0) {
650 			/*
651 			 * the comparisons between offset and behind are done
652 			 * in this kind of odd fashion in order to prevent wrap around
653 			 * at the end points
654 			 */
655 			if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
656 				if (offset >= behind) {
657 					run_offset = 0 - behind;
658 					pg_offset = PAGE_SIZE_64;
659 					max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
660 				}
661 			} else {
662 				if (offset < -behind) {
663 					run_offset = behind;
664 					pg_offset = 0 - PAGE_SIZE_64;
665 					max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
666 				}
667 			}
668 		}
669 		break;}
670 	}
671 	for (unsigned n = 0; n < max_pages_in_run; n++) {
672 		m = vm_page_lookup(object, offset + run_offset + (n * pg_offset));
673 
674 		if (m && !m->vmp_laundry && !m->vmp_busy && !m->vmp_no_cache &&
675 		    (m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) &&
676 		    !vm_page_is_fictitious(m) && !m->vmp_absent) {
677 			page_run[pages_in_run++] = m;
678 
679 			/*
680 			 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
681 			 *
682 			 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
683 			 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
684 			 * new reference happens. If no futher references happen on the page after that remote TLB flushes
685 			 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
686 			 * by pageout_scan, which is just fine since the last reference would have happened quite far
687 			 * in the past (TLB caches don't hang around for very long), and of course could just as easily
688 			 * have happened before we did the deactivate_behind.
689 			 */
690 			pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
691 		}
692 	}
693 
694 	if (pages_in_run) {
695 		vm_page_lockspin_queues();
696 
697 		for (unsigned n = 0; n < pages_in_run; n++) {
698 			m = page_run[n];
699 
700 			vm_page_deactivate_internal(m, FALSE);
701 
702 #if DEVELOPMENT || DEBUG
703 			counter_inc(&vm_page_deactivate_behind_count);
704 #endif /* DEVELOPMENT || DEBUG */
705 
706 #if TRACEFAULTPAGE
707 			dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
708 #endif
709 		}
710 		vm_page_unlock_queues();
711 	}
712 
713 	KDBG_FILTERED(VMDBG_CODE(DBG_VM_FAULT_DEACTIVATE_BEHIND) | DBG_FUNC_END,
714 	    pages_in_run);
715 
716 	return pages_in_run;
717 }
718 
719 
720 #if (DEVELOPMENT || DEBUG)
721 uint32_t        vm_page_creation_throttled_hard = 0;
722 uint32_t        vm_page_creation_throttled_soft = 0;
723 uint64_t        vm_page_creation_throttle_avoided = 0;
724 #endif /* DEVELOPMENT || DEBUG */
725 
726 static int
vm_page_throttled(boolean_t page_kept)727 vm_page_throttled(boolean_t page_kept)
728 {
729 	clock_sec_t     elapsed_sec;
730 	clock_sec_t     tv_sec;
731 	clock_usec_t    tv_usec;
732 	task_t          curtask = current_task_early();
733 
734 	thread_t thread = current_thread();
735 
736 	if (thread->options & TH_OPT_VMPRIV) {
737 		return 0;
738 	}
739 
740 	if (curtask && !curtask->active) {
741 		return 0;
742 	}
743 
744 	if (thread->t_page_creation_throttled) {
745 		thread->t_page_creation_throttled = 0;
746 
747 		if (page_kept == FALSE) {
748 			goto no_throttle;
749 		}
750 	}
751 	if (NEED_TO_HARD_THROTTLE_THIS_TASK()) {
752 #if (DEVELOPMENT || DEBUG)
753 		thread->t_page_creation_throttled_hard++;
754 		OSAddAtomic(1, &vm_page_creation_throttled_hard);
755 #endif /* DEVELOPMENT || DEBUG */
756 		return HARD_THROTTLE_DELAY;
757 	}
758 
759 	if ((vm_page_free_count < vm_page_throttle_limit || (VM_CONFIG_COMPRESSOR_IS_PRESENT && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
760 	    thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) {
761 		if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) {
762 #if (DEVELOPMENT || DEBUG)
763 			OSAddAtomic64(1, &vm_page_creation_throttle_avoided);
764 #endif
765 			goto no_throttle;
766 		}
767 		clock_get_system_microtime(&tv_sec, &tv_usec);
768 
769 		elapsed_sec = tv_sec - thread->t_page_creation_time;
770 
771 		if (elapsed_sec <= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS ||
772 		    (thread->t_page_creation_count / elapsed_sec) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC) {
773 			if (elapsed_sec >= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS)) {
774 				/*
775 				 * we'll reset our stats to give a well behaved app
776 				 * that was unlucky enough to accumulate a bunch of pages
777 				 * over a long period of time a chance to get out of
778 				 * the throttled state... we reset the counter and timestamp
779 				 * so that if it stays under the rate limit for the next second
780 				 * it will be back in our good graces... if it exceeds it, it
781 				 * will remain in the throttled state
782 				 */
783 				thread->t_page_creation_time = tv_sec;
784 				thread->t_page_creation_count = VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC * (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS - 1);
785 			}
786 			VM_PAGEOUT_DEBUG(vm_page_throttle_count, 1);
787 
788 			thread->t_page_creation_throttled = 1;
789 
790 			if (VM_CONFIG_COMPRESSOR_IS_PRESENT && HARD_THROTTLE_LIMIT_REACHED()) {
791 #if (DEVELOPMENT || DEBUG)
792 				thread->t_page_creation_throttled_hard++;
793 				OSAddAtomic(1, &vm_page_creation_throttled_hard);
794 #endif /* DEVELOPMENT || DEBUG */
795 				return HARD_THROTTLE_DELAY;
796 			} else {
797 #if (DEVELOPMENT || DEBUG)
798 				thread->t_page_creation_throttled_soft++;
799 				OSAddAtomic(1, &vm_page_creation_throttled_soft);
800 #endif /* DEVELOPMENT || DEBUG */
801 				return SOFT_THROTTLE_DELAY;
802 			}
803 		}
804 		thread->t_page_creation_time = tv_sec;
805 		thread->t_page_creation_count = 0;
806 	}
807 no_throttle:
808 	thread->t_page_creation_count++;
809 
810 	return 0;
811 }
812 
813 extern boolean_t vm_pageout_running;
814 static __attribute__((noinline, not_tail_called)) void
__VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(int throttle_delay)815 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(
816 	int throttle_delay)
817 {
818 	/* make sure vm_pageout_scan() gets to work while we're throttled */
819 	if (!vm_pageout_running) {
820 		thread_wakeup((event_t)&vm_page_free_wanted);
821 	}
822 	delay(throttle_delay);
823 }
824 
825 
826 /*
827  * check for various conditions that would
828  * prevent us from creating a ZF page...
829  * cleanup is based on being called from vm_fault_page
830  *
831  * object must be locked
832  * object == m->vmp_object
833  */
834 static vm_fault_return_t
vm_fault_check(vm_object_t object,vm_page_t m,vm_page_t first_m,wait_interrupt_t interruptible_state,boolean_t page_throttle)835 vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrupt_t interruptible_state, boolean_t page_throttle)
836 {
837 	int throttle_delay;
838 
839 	if (object->shadow_severed ||
840 	    VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
841 		/*
842 		 * Either:
843 		 * 1. the shadow chain was severed,
844 		 * 2. the purgeable object is volatile or empty and is marked
845 		 *    to fault on access while volatile.
846 		 * Just have to return an error at this point
847 		 */
848 		if (m != VM_PAGE_NULL) {
849 			VM_PAGE_FREE(m);
850 		}
851 		vm_fault_cleanup(object, first_m);
852 
853 		thread_interrupt_level(interruptible_state);
854 
855 		if (VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
856 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
857 		}
858 
859 		if (object->shadow_severed) {
860 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
861 		}
862 		return VM_FAULT_MEMORY_ERROR;
863 	}
864 	if (page_throttle == TRUE) {
865 		if ((throttle_delay = vm_page_throttled(FALSE))) {
866 			/*
867 			 * we're throttling zero-fills...
868 			 * treat this as if we couldn't grab a page
869 			 */
870 			if (m != VM_PAGE_NULL) {
871 				VM_PAGE_FREE(m);
872 			}
873 			vm_fault_cleanup(object, first_m);
874 
875 			VM_DEBUG_EVENT(vmf_check_zfdelay, DBG_VM_FAULT_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
876 
877 			__VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
878 
879 			if (current_thread_aborted()) {
880 				thread_interrupt_level(interruptible_state);
881 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
882 				return VM_FAULT_INTERRUPTED;
883 			}
884 			thread_interrupt_level(interruptible_state);
885 
886 			return VM_FAULT_MEMORY_SHORTAGE;
887 		}
888 	}
889 	return VM_FAULT_SUCCESS;
890 }
891 
892 /*
893  * Clear the code signing bits on the given page_t
894  */
895 static void
vm_fault_cs_clear(vm_page_t m)896 vm_fault_cs_clear(vm_page_t m)
897 {
898 	m->vmp_cs_validated = VMP_CS_ALL_FALSE;
899 	m->vmp_cs_tainted = VMP_CS_ALL_FALSE;
900 	m->vmp_cs_nx = VMP_CS_ALL_FALSE;
901 }
902 
903 /*
904  * Enqueues the given page on the throttled queue.
905  * The caller must hold the vm_page_queue_lock and it will be held on return.
906  */
907 static void
vm_fault_enqueue_throttled_locked(vm_page_t m)908 vm_fault_enqueue_throttled_locked(vm_page_t m)
909 {
910 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
911 	assert(!VM_PAGE_WIRED(m));
912 
913 	/*
914 	 * can't be on the pageout queue since we don't
915 	 * have a pager to try and clean to
916 	 */
917 	vm_page_queues_remove(m, TRUE);
918 	vm_page_check_pageable_safe(m);
919 	vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
920 	m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
921 	vm_page_throttled_count++;
922 }
923 
924 /*
925  * do the work to zero fill a page and
926  * inject it into the correct paging queue
927  *
928  * m->vmp_object must be locked
929  * page queue lock must NOT be held
930  */
931 static int
vm_fault_zero_page(vm_page_t m,boolean_t no_zero_fill)932 vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
933 {
934 	int my_fault = DBG_ZERO_FILL_FAULT;
935 	vm_object_t     object;
936 
937 	object = VM_PAGE_OBJECT(m);
938 
939 	/*
940 	 * This is is a zero-fill page fault...
941 	 *
942 	 * Checking the page lock is a waste of
943 	 * time;  this page was absent, so
944 	 * it can't be page locked by a pager.
945 	 *
946 	 * we also consider it undefined
947 	 * with respect to instruction
948 	 * execution.  i.e. it is the responsibility
949 	 * of higher layers to call for an instruction
950 	 * sync after changing the contents and before
951 	 * sending a program into this area.  We
952 	 * choose this approach for performance
953 	 */
954 	vm_fault_cs_clear(m);
955 	m->vmp_pmapped = TRUE;
956 
957 	if (no_zero_fill == TRUE) {
958 		my_fault = DBG_NZF_PAGE_FAULT;
959 
960 		if (m->vmp_absent && m->vmp_busy) {
961 			return my_fault;
962 		}
963 	} else {
964 		vm_page_zero_fill(
965 			m
966 			);
967 
968 		counter_inc(&vm_statistics_zero_fill_count);
969 		DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
970 	}
971 	assert(!m->vmp_laundry);
972 	assert(!is_kernel_object(object));
973 	//assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
974 	if (!VM_DYNAMIC_PAGING_ENABLED() &&
975 	    (object->purgable == VM_PURGABLE_DENY ||
976 	    object->purgable == VM_PURGABLE_NONVOLATILE ||
977 	    object->purgable == VM_PURGABLE_VOLATILE)) {
978 		vm_page_lockspin_queues();
979 		if (!VM_DYNAMIC_PAGING_ENABLED()) {
980 			vm_fault_enqueue_throttled_locked(m);
981 		}
982 		vm_page_unlock_queues();
983 	}
984 	return my_fault;
985 }
986 
987 /*
988  * Recovery actions for vm_fault_page
989  */
990 __attribute__((always_inline))
991 static void
vm_fault_page_release_page(vm_page_t m,bool * clear_absent_on_error)992 vm_fault_page_release_page(
993 	vm_page_t m,                    /* Page to release */
994 	bool *clear_absent_on_error /* IN/OUT */)
995 {
996 	vm_page_wakeup_done(VM_PAGE_OBJECT(m), m);
997 	if (!VM_PAGE_PAGEABLE(m)) {
998 		vm_page_lockspin_queues();
999 		if (*clear_absent_on_error && m->vmp_absent) {
1000 			vm_page_zero_fill(
1001 				m
1002 				);
1003 			counter_inc(&vm_statistics_zero_fill_count);
1004 			DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
1005 			m->vmp_absent = false;
1006 		}
1007 		if (!VM_PAGE_PAGEABLE(m)) {
1008 			if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
1009 				vm_page_deactivate(m);
1010 			} else {
1011 				vm_page_activate(m);
1012 			}
1013 		}
1014 		vm_page_unlock_queues();
1015 	}
1016 	*clear_absent_on_error = false;
1017 }
1018 /*
1019  *	Routine:	vm_fault_page
1020  *	Purpose:
1021  *		Find the resident page for the virtual memory
1022  *		specified by the given virtual memory object
1023  *		and offset.
1024  *	Additional arguments:
1025  *		The required permissions for the page is given
1026  *		in "fault_type".  Desired permissions are included
1027  *		in "protection".
1028  *		fault_info is passed along to determine pagein cluster
1029  *		limits... it contains the expected reference pattern,
1030  *		cluster size if available, etc...
1031  *
1032  *		If the desired page is known to be resident (for
1033  *		example, because it was previously wired down), asserting
1034  *		the "unwiring" parameter will speed the search.
1035  *
1036  *		If the operation can be interrupted (by thread_abort
1037  *		or thread_terminate), then the "interruptible"
1038  *		parameter should be asserted.
1039  *
1040  *	Results:
1041  *		The page containing the proper data is returned
1042  *		in "result_page".
1043  *
1044  *	In/out conditions:
1045  *		The source object must be locked and referenced,
1046  *		and must donate one paging reference.  The reference
1047  *		is not affected.  The paging reference and lock are
1048  *		consumed.
1049  *
1050  *		If the call succeeds, the object in which "result_page"
1051  *		resides is left locked and holding a paging reference.
1052  *		If this is not the original object, a busy page in the
1053  *		original object is returned in "top_page", to prevent other
1054  *		callers from pursuing this same data, along with a paging
1055  *		reference for the original object.  The "top_page" should
1056  *		be destroyed when this guarantee is no longer required.
1057  *		The "result_page" is also left busy.  It is not removed
1058  *		from the pageout queues.
1059  *	Special Case:
1060  *		A return value of VM_FAULT_SUCCESS_NO_PAGE means that the
1061  *		fault succeeded but there's no VM page (i.e. the VM object
1062  *              does not actually hold VM pages, but device memory or
1063  *		large pages).  The object is still locked and we still hold a
1064  *		paging_in_progress reference.
1065  */
1066 unsigned int vm_fault_page_blocked_access = 0;
1067 unsigned int vm_fault_page_forced_retry = 0;
1068 
1069 vm_fault_return_t
vm_fault_page(vm_object_t first_object,vm_object_offset_t first_offset,vm_prot_t fault_type,boolean_t must_be_resident,boolean_t caller_lookup,vm_prot_t * protection,vm_page_t * result_page,vm_page_t * top_page,int * type_of_fault,kern_return_t * error_code,boolean_t no_zero_fill,vm_object_fault_info_t fault_info)1070 vm_fault_page(
1071 	/* Arguments: */
1072 	vm_object_t     first_object,   /* Object to begin search */
1073 	vm_object_offset_t first_offset,        /* Offset into object */
1074 	vm_prot_t       fault_type,     /* What access is requested */
1075 	boolean_t       must_be_resident,/* Must page be resident? */
1076 	boolean_t       caller_lookup,  /* caller looked up page */
1077 	/* Modifies in place: */
1078 	vm_prot_t       *protection,    /* Protection for mapping */
1079 	vm_page_t       *result_page,   /* Page found, if successful */
1080 	/* Returns: */
1081 	vm_page_t       *top_page,      /* Page in top object, if
1082                                          * not result_page.  */
1083 	int             *type_of_fault, /* if non-null, fill in with type of fault
1084                                          * COW, zero-fill, etc... returned in trace point */
1085 	/* More arguments: */
1086 	kern_return_t   *error_code,    /* code if page is in error */
1087 	boolean_t       no_zero_fill,   /* don't zero fill absent pages */
1088 	vm_object_fault_info_t fault_info)
1089 {
1090 	vm_page_t               m;
1091 	vm_object_t             object;
1092 	vm_object_offset_t      offset;
1093 	vm_page_t               first_m;
1094 	vm_object_t             next_object;
1095 	vm_object_t             copy_object;
1096 	boolean_t               look_for_page;
1097 	boolean_t               force_fault_retry = FALSE;
1098 	vm_prot_t               access_required = fault_type;
1099 	vm_prot_t               wants_copy_flag;
1100 	kern_return_t           wait_result;
1101 	wait_interrupt_t        interruptible_state;
1102 	boolean_t               data_already_requested = FALSE;
1103 	vm_behavior_t           orig_behavior;
1104 	vm_size_t               orig_cluster_size;
1105 	vm_fault_return_t       error;
1106 	int                     my_fault;
1107 	uint32_t                try_failed_count;
1108 	wait_interrupt_t        interruptible; /* how may fault be interrupted? */
1109 	int                     external_state = VM_EXTERNAL_STATE_UNKNOWN;
1110 	memory_object_t         pager;
1111 	vm_fault_return_t       retval;
1112 	vm_grab_options_t       grab_options;
1113 	bool                    clear_absent_on_error = false;
1114 
1115 /*
1116  * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
1117  * marked as paged out in the compressor pager or the pager doesn't exist.
1118  * Note also that if the pager for an internal object
1119  * has not been created, the pager is not invoked regardless of the value
1120  * of MUST_ASK_PAGER().
1121  *
1122  * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
1123  * is marked as paged out in the compressor pager.
1124  * PAGED_OUT() is used to determine if a page has already been pushed
1125  * into a copy object in order to avoid a redundant page out operation.
1126  */
1127 #define MUST_ASK_PAGER(o, f, s)                                 \
1128 	((s = vm_object_compressor_pager_state_get((o), (f))) != VM_EXTERNAL_STATE_ABSENT)
1129 
1130 #define PAGED_OUT(o, f) \
1131 	(vm_object_compressor_pager_state_get((o), (f)) == VM_EXTERNAL_STATE_EXISTS)
1132 
1133 #if TRACEFAULTPAGE
1134 	dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */
1135 #endif
1136 
1137 	interruptible = fault_info->interruptible;
1138 	interruptible_state = thread_interrupt_level(interruptible);
1139 
1140 	/*
1141 	 *	INVARIANTS (through entire routine):
1142 	 *
1143 	 *	1)	At all times, we must either have the object
1144 	 *		lock or a busy page in some object to prevent
1145 	 *		some other thread from trying to bring in
1146 	 *		the same page.
1147 	 *
1148 	 *		Note that we cannot hold any locks during the
1149 	 *		pager access or when waiting for memory, so
1150 	 *		we use a busy page then.
1151 	 *
1152 	 *	2)	To prevent another thread from racing us down the
1153 	 *		shadow chain and entering a new page in the top
1154 	 *		object before we do, we must keep a busy page in
1155 	 *		the top object while following the shadow chain.
1156 	 *
1157 	 *	3)	We must increment paging_in_progress on any object
1158 	 *		for which we have a busy page before dropping
1159 	 *		the object lock
1160 	 *
1161 	 *	4)	We leave busy pages on the pageout queues.
1162 	 *		If the pageout daemon comes across a busy page,
1163 	 *		it will remove the page from the pageout queues.
1164 	 */
1165 
1166 	object = first_object;
1167 	offset = first_offset;
1168 	first_m = VM_PAGE_NULL;
1169 	access_required = fault_type;
1170 
1171 	/*
1172 	 * default type of fault
1173 	 */
1174 	my_fault = DBG_CACHE_HIT_FAULT;
1175 	thread_pri_floor_t token;
1176 	bool    drop_floor = false;
1177 
1178 	while (TRUE) {
1179 #if TRACEFAULTPAGE
1180 		dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0);       /* (TEST/DEBUG) */
1181 #endif
1182 
1183 		grab_options = vm_page_grab_options_for_object(object);
1184 
1185 		if (!object->alive) {
1186 			/*
1187 			 * object is no longer valid
1188 			 * clean up and return error
1189 			 */
1190 #if DEVELOPMENT || DEBUG
1191 			printf("FBDP rdar://93769854 %s:%d object %p internal %d pager %p (%s) copy %p shadow %p alive %d terminating %d named %d ref %d shadow_severed %d\n", __FUNCTION__, __LINE__, object, object->internal, object->pager, object->pager ? object->pager->mo_pager_ops->memory_object_pager_name : "?", object->vo_copy, object->shadow, object->alive, object->terminating, object->named, os_ref_get_count_raw(&object->ref_count), object->shadow_severed);
1192 			if (panic_object_not_alive) {
1193 				panic("FBDP rdar://93769854 %s:%d object %p internal %d pager %p (%s) copy %p shadow %p alive %d terminating %d named %d ref %d shadow_severed %d\n", __FUNCTION__, __LINE__, object, object->internal, object->pager, object->pager ? object->pager->mo_pager_ops->memory_object_pager_name : "?", object->vo_copy, object->shadow, object->alive, object->terminating, object->named, os_ref_get_count_raw(&object->ref_count), object->shadow_severed);
1194 			}
1195 #endif /* DEVELOPMENT || DEBUG */
1196 			vm_fault_cleanup(object, first_m);
1197 			thread_interrupt_level(interruptible_state);
1198 
1199 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_NOT_ALIVE), 0 /* arg */);
1200 			return VM_FAULT_MEMORY_ERROR;
1201 		}
1202 
1203 		if (!object->pager_created && object->phys_contiguous) {
1204 			/*
1205 			 * A physically-contiguous object without a pager:
1206 			 * must be a "large page" object.  We do not deal
1207 			 * with VM pages for this object.
1208 			 */
1209 			caller_lookup = FALSE;
1210 			m = VM_PAGE_NULL;
1211 			goto phys_contig_object;
1212 		}
1213 
1214 		if (object->blocked_access) {
1215 			/*
1216 			 * Access to this VM object has been blocked.
1217 			 * Replace our "paging_in_progress" reference with
1218 			 * a "activity_in_progress" reference and wait for
1219 			 * access to be unblocked.
1220 			 */
1221 			caller_lookup = FALSE; /* no longer valid after sleep */
1222 			vm_object_activity_begin(object);
1223 			vm_object_paging_end(object);
1224 			while (object->blocked_access) {
1225 				vm_object_sleep(object,
1226 				    VM_OBJECT_EVENT_UNBLOCKED,
1227 				    THREAD_UNINT, LCK_SLEEP_EXCLUSIVE);
1228 			}
1229 			vm_fault_page_blocked_access++;
1230 			vm_object_paging_begin(object);
1231 			vm_object_activity_end(object);
1232 		}
1233 
1234 		/*
1235 		 * See whether the page at 'offset' is resident
1236 		 */
1237 		if (caller_lookup == TRUE) {
1238 			/*
1239 			 * The caller has already looked up the page
1240 			 * and gave us the result in "result_page".
1241 			 * We can use this for the first lookup but
1242 			 * it loses its validity as soon as we unlock
1243 			 * the object.
1244 			 */
1245 			m = *result_page;
1246 			caller_lookup = FALSE; /* no longer valid after that */
1247 		} else {
1248 			m = vm_page_lookup(object, vm_object_trunc_page(offset));
1249 		}
1250 #if TRACEFAULTPAGE
1251 		dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object);  /* (TEST/DEBUG) */
1252 #endif
1253 		if (m != VM_PAGE_NULL) {
1254 			if (m->vmp_busy) {
1255 				/*
1256 				 * The page is being brought in,
1257 				 * wait for it and then retry.
1258 				 */
1259 #if TRACEFAULTPAGE
1260 				dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
1261 #endif
1262 				if (fault_info->fi_no_sleep) {
1263 					/* Caller has requested not to sleep on busy pages */
1264 					vm_fault_cleanup(object, first_m);
1265 					thread_interrupt_level(interruptible_state);
1266 					return VM_FAULT_BUSY;
1267 				}
1268 
1269 				wait_result = vm_page_sleep(object, m, interruptible, LCK_SLEEP_DEFAULT);
1270 
1271 				if (wait_result != THREAD_AWAKENED) {
1272 					vm_fault_cleanup(object, first_m);
1273 					thread_interrupt_level(interruptible_state);
1274 
1275 					if (wait_result == THREAD_RESTART) {
1276 						return VM_FAULT_RETRY;
1277 					} else {
1278 						ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
1279 						return VM_FAULT_INTERRUPTED;
1280 					}
1281 				}
1282 				continue;
1283 			}
1284 			if (m->vmp_laundry) {
1285 				m->vmp_free_when_done = FALSE;
1286 
1287 				if (!m->vmp_cleaning) {
1288 					vm_pageout_steal_laundry(m, FALSE);
1289 				}
1290 			}
1291 			vm_object_lock_assert_exclusive(VM_PAGE_OBJECT(m));
1292 			if (vm_page_is_guard(m)) {
1293 				/*
1294 				 * Guard page: off limits !
1295 				 */
1296 				if (fault_type == VM_PROT_NONE) {
1297 					/*
1298 					 * The fault is not requesting any
1299 					 * access to the guard page, so it must
1300 					 * be just to wire or unwire it.
1301 					 * Let's pretend it succeeded...
1302 					 */
1303 					m->vmp_busy = TRUE;
1304 					*result_page = m;
1305 					assert(first_m == VM_PAGE_NULL);
1306 					*top_page = first_m;
1307 					if (type_of_fault) {
1308 						*type_of_fault = DBG_GUARD_FAULT;
1309 					}
1310 					thread_interrupt_level(interruptible_state);
1311 					return VM_FAULT_SUCCESS;
1312 				} else {
1313 					/*
1314 					 * The fault requests access to the
1315 					 * guard page: let's deny that !
1316 					 */
1317 					vm_fault_cleanup(object, first_m);
1318 					thread_interrupt_level(interruptible_state);
1319 					ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_GUARDPAGE_FAULT), 0 /* arg */);
1320 					return VM_FAULT_MEMORY_ERROR;
1321 				}
1322 			}
1323 
1324 
1325 			if (m->vmp_error) {
1326 				/*
1327 				 * The page is in error, give up now.
1328 				 */
1329 #if TRACEFAULTPAGE
1330 				dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code);      /* (TEST/DEBUG) */
1331 #endif
1332 				if (error_code) {
1333 					*error_code = KERN_MEMORY_ERROR;
1334 				}
1335 				VM_PAGE_FREE(m);
1336 
1337 				vm_fault_cleanup(object, first_m);
1338 				thread_interrupt_level(interruptible_state);
1339 
1340 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_ERROR), 0 /* arg */);
1341 				return VM_FAULT_MEMORY_ERROR;
1342 			}
1343 			if (m->vmp_restart) {
1344 				/*
1345 				 * The pager wants us to restart
1346 				 * at the top of the chain,
1347 				 * typically because it has moved the
1348 				 * page to another pager, then do so.
1349 				 */
1350 #if TRACEFAULTPAGE
1351 				dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
1352 #endif
1353 				VM_PAGE_FREE(m);
1354 
1355 				vm_fault_cleanup(object, first_m);
1356 				thread_interrupt_level(interruptible_state);
1357 
1358 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_RESTART), 0 /* arg */);
1359 				return VM_FAULT_RETRY;
1360 			}
1361 			if (m->vmp_absent) {
1362 				/*
1363 				 * The page isn't busy, but is absent,
1364 				 * therefore it's deemed "unavailable".
1365 				 *
1366 				 * Remove the non-existent page (unless it's
1367 				 * in the top object) and move on down to the
1368 				 * next object (if there is one).
1369 				 */
1370 #if TRACEFAULTPAGE
1371 				dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow);  /* (TEST/DEBUG) */
1372 #endif
1373 				next_object = object->shadow;
1374 
1375 				if (next_object == VM_OBJECT_NULL) {
1376 					/*
1377 					 * Absent page at bottom of shadow
1378 					 * chain; zero fill the page we left
1379 					 * busy in the first object, and free
1380 					 * the absent page.
1381 					 */
1382 					assert(!must_be_resident);
1383 
1384 					/*
1385 					 * check for any conditions that prevent
1386 					 * us from creating a new zero-fill page
1387 					 * vm_fault_check will do all of the
1388 					 * fault cleanup in the case of an error condition
1389 					 * including resetting the thread_interrupt_level
1390 					 */
1391 					error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
1392 
1393 					if (error != VM_FAULT_SUCCESS) {
1394 						return error;
1395 					}
1396 
1397 					if (object != first_object) {
1398 						/*
1399 						 * free the absent page we just found
1400 						 */
1401 						VM_PAGE_FREE(m);
1402 
1403 						/*
1404 						 * drop reference and lock on current object
1405 						 */
1406 						vm_object_paging_end(object);
1407 						vm_object_unlock(object);
1408 
1409 						/*
1410 						 * grab the original page we
1411 						 * 'soldered' in place and
1412 						 * retake lock on 'first_object'
1413 						 */
1414 						m = first_m;
1415 						first_m = VM_PAGE_NULL;
1416 
1417 						object = first_object;
1418 						offset = first_offset;
1419 
1420 						vm_object_lock(object);
1421 					} else {
1422 						/*
1423 						 * we're going to use the absent page we just found
1424 						 * so convert it to a 'busy' page
1425 						 */
1426 						m->vmp_absent = FALSE;
1427 						m->vmp_busy = TRUE;
1428 					}
1429 					if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
1430 						m->vmp_absent = TRUE;
1431 						clear_absent_on_error = true;
1432 					}
1433 					/*
1434 					 * zero-fill the page and put it on
1435 					 * the correct paging queue
1436 					 */
1437 					my_fault = vm_fault_zero_page(m, no_zero_fill);
1438 
1439 					break;
1440 				} else {
1441 					if (must_be_resident) {
1442 						vm_object_paging_end(object);
1443 					} else if (object != first_object) {
1444 						vm_object_paging_end(object);
1445 						VM_PAGE_FREE(m);
1446 					} else {
1447 						first_m = m;
1448 						m->vmp_absent = FALSE;
1449 						m->vmp_busy = TRUE;
1450 
1451 						vm_page_lockspin_queues();
1452 						vm_page_queues_remove(m, FALSE);
1453 						vm_page_unlock_queues();
1454 					}
1455 
1456 					offset += object->vo_shadow_offset;
1457 					fault_info->lo_offset += object->vo_shadow_offset;
1458 					fault_info->hi_offset += object->vo_shadow_offset;
1459 					access_required = VM_PROT_READ;
1460 
1461 					vm_object_lock(next_object);
1462 					vm_object_unlock(object);
1463 					object = next_object;
1464 					vm_object_paging_begin(object);
1465 
1466 					/*
1467 					 * reset to default type of fault
1468 					 */
1469 					my_fault = DBG_CACHE_HIT_FAULT;
1470 
1471 					continue;
1472 				}
1473 			}
1474 			if ((m->vmp_cleaning)
1475 			    && ((object != first_object) || (object->vo_copy != VM_OBJECT_NULL))
1476 			    && (fault_type & VM_PROT_WRITE)) {
1477 				/*
1478 				 * This is a copy-on-write fault that will
1479 				 * cause us to revoke access to this page, but
1480 				 * this page is in the process of being cleaned
1481 				 * in a clustered pageout. We must wait until
1482 				 * the cleaning operation completes before
1483 				 * revoking access to the original page,
1484 				 * otherwise we might attempt to remove a
1485 				 * wired mapping.
1486 				 */
1487 #if TRACEFAULTPAGE
1488 				dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset);  /* (TEST/DEBUG) */
1489 #endif
1490 				/*
1491 				 * take an extra ref so that object won't die
1492 				 */
1493 				vm_object_reference_locked(object);
1494 
1495 				vm_fault_cleanup(object, first_m);
1496 
1497 				vm_object_lock(object);
1498 				assert(os_ref_get_count_raw(&object->ref_count) > 0);
1499 
1500 				m = vm_page_lookup(object, vm_object_trunc_page(offset));
1501 
1502 				if (m != VM_PAGE_NULL && m->vmp_cleaning) {
1503 					wait_result = vm_page_sleep(object, m, interruptible, LCK_SLEEP_UNLOCK);
1504 					vm_object_deallocate(object);
1505 					goto backoff;
1506 				} else {
1507 					vm_object_unlock(object);
1508 
1509 					vm_object_deallocate(object);
1510 					thread_interrupt_level(interruptible_state);
1511 
1512 					return VM_FAULT_RETRY;
1513 				}
1514 			}
1515 			if (type_of_fault == NULL && (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) &&
1516 			    !(fault_info != NULL && fault_info->stealth)) {
1517 				/*
1518 				 * If we were passed a non-NULL pointer for
1519 				 * "type_of_fault", than we came from
1520 				 * vm_fault... we'll let it deal with
1521 				 * this condition, since it
1522 				 * needs to see m->vmp_speculative to correctly
1523 				 * account the pageins, otherwise...
1524 				 * take it off the speculative queue, we'll
1525 				 * let the caller of vm_fault_page deal
1526 				 * with getting it onto the correct queue
1527 				 *
1528 				 * If the caller specified in fault_info that
1529 				 * it wants a "stealth" fault, we also leave
1530 				 * the page in the speculative queue.
1531 				 */
1532 				vm_page_lockspin_queues();
1533 				if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
1534 					vm_page_queues_remove(m, FALSE);
1535 				}
1536 				vm_page_unlock_queues();
1537 			}
1538 			assert(object == VM_PAGE_OBJECT(m));
1539 
1540 			if (object->code_signed) {
1541 				/*
1542 				 * CODE SIGNING:
1543 				 * We just paged in a page from a signed
1544 				 * memory object but we don't need to
1545 				 * validate it now.  We'll validate it if
1546 				 * when it gets mapped into a user address
1547 				 * space for the first time or when the page
1548 				 * gets copied to another object as a result
1549 				 * of a copy-on-write.
1550 				 */
1551 			}
1552 
1553 			/*
1554 			 * We mark the page busy and leave it on
1555 			 * the pageout queues.  If the pageout
1556 			 * deamon comes across it, then it will
1557 			 * remove the page from the queue, but not the object
1558 			 */
1559 #if TRACEFAULTPAGE
1560 			dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
1561 #endif
1562 			assert(!m->vmp_busy);
1563 			assert(!m->vmp_absent);
1564 
1565 			m->vmp_busy = TRUE;
1566 			break;
1567 		}
1568 
1569 		/*
1570 		 * we get here when there is no page present in the object at
1571 		 * the offset we're interested in... we'll allocate a page
1572 		 * at this point if the pager associated with
1573 		 * this object can provide the data or we're the top object...
1574 		 * object is locked;  m == NULL
1575 		 */
1576 
1577 		if (must_be_resident) {
1578 			if (fault_type == VM_PROT_NONE &&
1579 			    is_kernel_object(object)) {
1580 				/*
1581 				 * We've been called from vm_fault_unwire()
1582 				 * while removing a map entry that was allocated
1583 				 * with KMA_KOBJECT and KMA_VAONLY.  This page
1584 				 * is not present and there's nothing more to
1585 				 * do here (nothing to unwire).
1586 				 */
1587 				vm_fault_cleanup(object, first_m);
1588 				thread_interrupt_level(interruptible_state);
1589 
1590 				return VM_FAULT_MEMORY_ERROR;
1591 			}
1592 
1593 			goto dont_look_for_page;
1594 		}
1595 
1596 		/* Don't expect to fault pages into the kernel object. */
1597 		assert(!is_kernel_object(object));
1598 
1599 		look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE));
1600 
1601 #if TRACEFAULTPAGE
1602 		dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object);      /* (TEST/DEBUG) */
1603 #endif
1604 		if (!look_for_page && object == first_object && !object->phys_contiguous) {
1605 			/*
1606 			 * Allocate a new page for this object/offset pair as a placeholder
1607 			 */
1608 			m = vm_page_grab_options(grab_options);
1609 #if TRACEFAULTPAGE
1610 			dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object);  /* (TEST/DEBUG) */
1611 #endif
1612 			if (m == VM_PAGE_NULL) {
1613 				vm_fault_cleanup(object, first_m);
1614 				thread_interrupt_level(interruptible_state);
1615 
1616 				return VM_FAULT_MEMORY_SHORTAGE;
1617 			}
1618 
1619 			if (fault_info && fault_info->batch_pmap_op == TRUE) {
1620 				vm_page_insert_internal(m, object,
1621 				    vm_object_trunc_page(offset),
1622 				    VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1623 			} else {
1624 				vm_page_insert(m, object, vm_object_trunc_page(offset));
1625 			}
1626 		}
1627 		if (look_for_page) {
1628 			kern_return_t   rc;
1629 			int             my_fault_type;
1630 
1631 			/*
1632 			 *	If the memory manager is not ready, we
1633 			 *	cannot make requests.
1634 			 */
1635 			if (!object->pager_ready) {
1636 #if TRACEFAULTPAGE
1637 				dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0);       /* (TEST/DEBUG) */
1638 #endif
1639 				if (m != VM_PAGE_NULL) {
1640 					VM_PAGE_FREE(m);
1641 				}
1642 
1643 				/*
1644 				 * take an extra ref so object won't die
1645 				 */
1646 				vm_object_reference_locked(object);
1647 				vm_fault_cleanup(object, first_m);
1648 
1649 				vm_object_lock(object);
1650 				assert(os_ref_get_count_raw(&object->ref_count) > 0);
1651 
1652 				if (!object->pager_ready) {
1653 					wait_result = vm_object_sleep(object, VM_OBJECT_EVENT_PAGER_READY, interruptible, LCK_SLEEP_UNLOCK);
1654 					vm_object_deallocate(object);
1655 
1656 					goto backoff;
1657 				} else {
1658 					vm_object_unlock(object);
1659 					vm_object_deallocate(object);
1660 					thread_interrupt_level(interruptible_state);
1661 
1662 					return VM_FAULT_RETRY;
1663 				}
1664 			}
1665 			if (!object->internal && !object->phys_contiguous && object->paging_in_progress > vm_object_pagein_throttle) {
1666 				/*
1667 				 * If there are too many outstanding page
1668 				 * requests pending on this external object, we
1669 				 * wait for them to be resolved now.
1670 				 */
1671 #if TRACEFAULTPAGE
1672 				dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0);       /* (TEST/DEBUG) */
1673 #endif
1674 				if (m != VM_PAGE_NULL) {
1675 					VM_PAGE_FREE(m);
1676 				}
1677 				/*
1678 				 * take an extra ref so object won't die
1679 				 */
1680 				vm_object_reference_locked(object);
1681 
1682 				vm_fault_cleanup(object, first_m);
1683 
1684 				vm_object_lock(object);
1685 				assert(os_ref_get_count_raw(&object->ref_count) > 0);
1686 
1687 				if (object->paging_in_progress >= vm_object_pagein_throttle) {
1688 					wait_result = vm_object_paging_throttle_wait(object, interruptible);
1689 					vm_object_unlock(object);
1690 					vm_object_deallocate(object);
1691 					goto backoff;
1692 				} else {
1693 					vm_object_unlock(object);
1694 					vm_object_deallocate(object);
1695 					thread_interrupt_level(interruptible_state);
1696 
1697 					return VM_FAULT_RETRY;
1698 				}
1699 			}
1700 			if (object->internal) {
1701 				int compressed_count_delta;
1702 				vm_compressor_options_t c_flags = 0;
1703 
1704 				assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
1705 
1706 				if (m == VM_PAGE_NULL) {
1707 					/*
1708 					 * Allocate a new page for this object/offset pair as a placeholder
1709 					 */
1710 					m = vm_page_grab_options(grab_options);
1711 #if TRACEFAULTPAGE
1712 					dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object);  /* (TEST/DEBUG) */
1713 #endif
1714 					if (m == VM_PAGE_NULL) {
1715 						vm_fault_cleanup(object, first_m);
1716 						thread_interrupt_level(interruptible_state);
1717 
1718 						return VM_FAULT_MEMORY_SHORTAGE;
1719 					}
1720 
1721 					m->vmp_absent = TRUE;
1722 					if (fault_info && fault_info->batch_pmap_op == TRUE) {
1723 						vm_page_insert_internal(m, object, vm_object_trunc_page(offset), VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1724 					} else {
1725 						vm_page_insert(m, object, vm_object_trunc_page(offset));
1726 					}
1727 				}
1728 				assert(m->vmp_busy);
1729 
1730 				m->vmp_absent = TRUE;
1731 				pager = object->pager;
1732 
1733 				assert(object->paging_in_progress > 0);
1734 
1735 				page_worker_token_t pw_token;
1736 #if PAGE_SLEEP_WITH_INHERITOR
1737 				page_worker_register_worker((event_t)m, &pw_token);
1738 #endif /* PAGE_SLEEP_WITH_INHERITOR */
1739 
1740 				vm_object_unlock(object);
1741 				rc = vm_compressor_pager_get(
1742 					pager,
1743 					offset + object->paging_offset,
1744 					VM_PAGE_GET_PHYS_PAGE(m),
1745 					&my_fault_type,
1746 					c_flags,
1747 					&compressed_count_delta);
1748 
1749 				if (type_of_fault == NULL) {
1750 					int     throttle_delay;
1751 
1752 					/*
1753 					 * we weren't called from vm_fault, so we
1754 					 * need to apply page creation throttling
1755 					 * do it before we re-acquire any locks
1756 					 */
1757 					if (my_fault_type == DBG_COMPRESSOR_FAULT) {
1758 						if ((throttle_delay = vm_page_throttled(TRUE))) {
1759 							VM_DEBUG_EVENT(vmf_compressordelay, DBG_VM_FAULT_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 1, 0);
1760 							__VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
1761 						}
1762 					}
1763 				}
1764 				vm_object_lock(object);
1765 				assert(object->paging_in_progress > 0);
1766 
1767 				vm_compressor_pager_count(
1768 					pager,
1769 					compressed_count_delta,
1770 					FALSE, /* shared_lock */
1771 					object);
1772 
1773 				switch (rc) {
1774 				case KERN_SUCCESS:
1775 					m->vmp_absent = FALSE;
1776 					m->vmp_dirty = TRUE;
1777 					if (!HAS_DEFAULT_CACHEABILITY(object->wimg_bits &
1778 					    VM_WIMG_MASK)) {
1779 						/*
1780 						 * If the page is not cacheable,
1781 						 * we can't let its contents
1782 						 * linger in the data cache
1783 						 * after the decompression.
1784 						 */
1785 						pmap_sync_page_attributes_phys(
1786 							VM_PAGE_GET_PHYS_PAGE(m));
1787 					} else {
1788 						m->vmp_written_by_kernel = TRUE;
1789 					}
1790 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
1791 					if ((fault_type & VM_PROT_WRITE) == 0) {
1792 						vm_object_lock_assert_exclusive(object);
1793 						vm_page_lockspin_queues();
1794 						m->vmp_unmodified_ro = true;
1795 						vm_page_unlock_queues();
1796 						os_atomic_inc(&compressor_ro_uncompressed, relaxed);
1797 						*protection &= ~VM_PROT_WRITE;
1798 					}
1799 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
1800 
1801 					/*
1802 					 * If the object is purgeable, its
1803 					 * owner's purgeable ledgers have been
1804 					 * updated in vm_page_insert() but the
1805 					 * page was also accounted for in a
1806 					 * "compressed purgeable" ledger, so
1807 					 * update that now.
1808 					 */
1809 					if (((object->purgable !=
1810 					    VM_PURGABLE_DENY) ||
1811 					    object->vo_ledger_tag) &&
1812 					    (object->vo_owner !=
1813 					    NULL)) {
1814 						/*
1815 						 * One less compressed
1816 						 * purgeable/tagged page.
1817 						 */
1818 						if (compressed_count_delta) {
1819 							vm_object_owner_compressed_update(
1820 								object,
1821 								-1);
1822 						}
1823 					}
1824 
1825 					break;
1826 				case KERN_MEMORY_FAILURE:
1827 					m->vmp_unusual = TRUE;
1828 					m->vmp_error = TRUE;
1829 					m->vmp_absent = FALSE;
1830 					break;
1831 				case KERN_MEMORY_ERROR:
1832 					assert(m->vmp_absent);
1833 					break;
1834 				default:
1835 					panic("vm_fault_page(): unexpected "
1836 					    "error %d from "
1837 					    "vm_compressor_pager_get()\n",
1838 					    rc);
1839 				}
1840 				vm_page_wakeup_done_with_inheritor(object, m, &pw_token);
1841 
1842 				rc = KERN_SUCCESS;
1843 				goto data_requested;
1844 			}
1845 			my_fault_type = DBG_PAGEIN_FAULT;
1846 
1847 			if (m != VM_PAGE_NULL) {
1848 				VM_PAGE_FREE(m);
1849 				m = VM_PAGE_NULL;
1850 			}
1851 
1852 #if TRACEFAULTPAGE
1853 			dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0);  /* (TEST/DEBUG) */
1854 #endif
1855 
1856 			/*
1857 			 * It's possible someone called vm_object_destroy while we weren't
1858 			 * holding the object lock.  If that has happened, then bail out
1859 			 * here.
1860 			 */
1861 
1862 			pager = object->pager;
1863 
1864 			if (pager == MEMORY_OBJECT_NULL) {
1865 				vm_fault_cleanup(object, first_m);
1866 				thread_interrupt_level(interruptible_state);
1867 
1868 				static const enum vm_subsys_error_codes object_destroy_errors[VM_OBJECT_DESTROY_MAX + 1] = {
1869 					[VM_OBJECT_DESTROY_UNKNOWN_REASON] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER,
1870 					[VM_OBJECT_DESTROY_UNMOUNT] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_UNMOUNT,
1871 					[VM_OBJECT_DESTROY_FORCED_UNMOUNT] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_FORCED_UNMOUNT,
1872 					[VM_OBJECT_DESTROY_UNGRAFT] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_UNGRAFT,
1873 					[VM_OBJECT_DESTROY_PAGER] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_DEALLOC_PAGER,
1874 					[VM_OBJECT_DESTROY_RECLAIM] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_RECLAIM,
1875 				};
1876 				enum vm_subsys_error_codes kdbg_code = object_destroy_errors[(vm_object_destroy_reason_t)object->no_pager_reason];
1877 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, kdbg_code), 0 /* arg */);
1878 				return VM_FAULT_MEMORY_ERROR;
1879 			}
1880 
1881 			/*
1882 			 * We have an absent page in place for the faulting offset,
1883 			 * so we can release the object lock.
1884 			 */
1885 
1886 			if (object->object_is_shared_cache) {
1887 				token = thread_priority_floor_start();
1888 				/*
1889 				 * A non-native shared cache object might
1890 				 * be getting set up in parallel with this
1891 				 * fault and so we can't assume that this
1892 				 * check will be valid after we drop the
1893 				 * object lock below.
1894 				 */
1895 				drop_floor = true;
1896 			}
1897 
1898 			vm_object_unlock(object);
1899 
1900 			/*
1901 			 * If this object uses a copy_call strategy,
1902 			 * and we are interested in a copy of this object
1903 			 * (having gotten here only by following a
1904 			 * shadow chain), then tell the memory manager
1905 			 * via a flag added to the desired_access
1906 			 * parameter, so that it can detect a race
1907 			 * between our walking down the shadow chain
1908 			 * and its pushing pages up into a copy of
1909 			 * the object that it manages.
1910 			 */
1911 			if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object) {
1912 				wants_copy_flag = VM_PROT_WANTS_COPY;
1913 			} else {
1914 				wants_copy_flag = VM_PROT_NONE;
1915 			}
1916 
1917 			if (object->vo_copy == first_object) {
1918 				/*
1919 				 * if we issue the memory_object_data_request in
1920 				 * this state, we are subject to a deadlock with
1921 				 * the underlying filesystem if it is trying to
1922 				 * shrink the file resulting in a push of pages
1923 				 * into the copy object...  that push will stall
1924 				 * on the placeholder page, and if the pushing thread
1925 				 * is holding a lock that is required on the pagein
1926 				 * path (such as a truncate lock), we'll deadlock...
1927 				 * to avoid this potential deadlock, we throw away
1928 				 * our placeholder page before calling memory_object_data_request
1929 				 * and force this thread to retry the vm_fault_page after
1930 				 * we have issued the I/O.  the second time through this path
1931 				 * we will find the page already in the cache (presumably still
1932 				 * busy waiting for the I/O to complete) and then complete
1933 				 * the fault w/o having to go through memory_object_data_request again
1934 				 */
1935 				assert(first_m != VM_PAGE_NULL);
1936 				assert(VM_PAGE_OBJECT(first_m) == first_object);
1937 
1938 				vm_object_lock(first_object);
1939 				VM_PAGE_FREE(first_m);
1940 				vm_object_paging_end(first_object);
1941 				vm_object_unlock(first_object);
1942 
1943 				first_m = VM_PAGE_NULL;
1944 				force_fault_retry = TRUE;
1945 
1946 				vm_fault_page_forced_retry++;
1947 			}
1948 
1949 			if (data_already_requested == TRUE) {
1950 				orig_behavior = fault_info->behavior;
1951 				orig_cluster_size = fault_info->cluster_size;
1952 
1953 				fault_info->behavior = VM_BEHAVIOR_RANDOM;
1954 				fault_info->cluster_size = PAGE_SIZE;
1955 			}
1956 			/*
1957 			 * Call the memory manager to retrieve the data.
1958 			 */
1959 			rc = memory_object_data_request(
1960 				pager,
1961 				vm_object_trunc_page(offset) + object->paging_offset,
1962 				PAGE_SIZE,
1963 				access_required | wants_copy_flag,
1964 				(memory_object_fault_info_t)fault_info);
1965 
1966 			if (data_already_requested == TRUE) {
1967 				fault_info->behavior = orig_behavior;
1968 				fault_info->cluster_size = orig_cluster_size;
1969 			} else {
1970 				data_already_requested = TRUE;
1971 			}
1972 
1973 			DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL);
1974 #if TRACEFAULTPAGE
1975 			dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */
1976 #endif
1977 			vm_object_lock(object);
1978 
1979 			if (drop_floor && object->object_is_shared_cache) {
1980 				thread_priority_floor_end(&token);
1981 				drop_floor = false;
1982 			}
1983 
1984 data_requested:
1985 			if (rc != ERR_SUCCESS) {
1986 				vm_fault_cleanup(object, first_m);
1987 				thread_interrupt_level(interruptible_state);
1988 
1989 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NO_DATA), 0 /* arg */);
1990 
1991 				if (rc == MACH_SEND_INTERRUPTED) {
1992 					return VM_FAULT_INTERRUPTED;
1993 				} else if (rc == KERN_ALREADY_WAITING) {
1994 					return VM_FAULT_BUSY;
1995 				} else {
1996 					return VM_FAULT_MEMORY_ERROR;
1997 				}
1998 			} else {
1999 				clock_sec_t     tv_sec;
2000 				clock_usec_t    tv_usec;
2001 
2002 				if (my_fault_type == DBG_PAGEIN_FAULT) {
2003 					clock_get_system_microtime(&tv_sec, &tv_usec);
2004 					current_thread()->t_page_creation_time = tv_sec;
2005 					current_thread()->t_page_creation_count = 0;
2006 				}
2007 			}
2008 			if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) {
2009 				vm_fault_cleanup(object, first_m);
2010 				thread_interrupt_level(interruptible_state);
2011 
2012 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
2013 				return VM_FAULT_INTERRUPTED;
2014 			}
2015 			if (force_fault_retry == TRUE) {
2016 				vm_fault_cleanup(object, first_m);
2017 				thread_interrupt_level(interruptible_state);
2018 
2019 				return VM_FAULT_RETRY;
2020 			}
2021 			if (m == VM_PAGE_NULL && object->phys_contiguous) {
2022 				/*
2023 				 * No page here means that the object we
2024 				 * initially looked up was "physically
2025 				 * contiguous" (i.e. device memory).  However,
2026 				 * with Virtual VRAM, the object might not
2027 				 * be backed by that device memory anymore,
2028 				 * so we're done here only if the object is
2029 				 * still "phys_contiguous".
2030 				 * Otherwise, if the object is no longer
2031 				 * "phys_contiguous", we need to retry the
2032 				 * page fault against the object's new backing
2033 				 * store (different memory object).
2034 				 */
2035 phys_contig_object:
2036 				assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
2037 				assert(object == first_object);
2038 				goto done;
2039 			}
2040 			/*
2041 			 * potentially a pagein fault
2042 			 * if we make it through the state checks
2043 			 * above, than we'll count it as such
2044 			 */
2045 			my_fault = my_fault_type;
2046 
2047 			/*
2048 			 * Retry with same object/offset, since new data may
2049 			 * be in a different page (i.e., m is meaningless at
2050 			 * this point).
2051 			 */
2052 			continue;
2053 		}
2054 dont_look_for_page:
2055 		/*
2056 		 * We get here if the object has no pager, or an existence map
2057 		 * exists and indicates the page isn't present on the pager
2058 		 * or we're unwiring a page.  If a pager exists, but there
2059 		 * is no existence map, then the m->vmp_absent case above handles
2060 		 * the ZF case when the pager can't provide the page
2061 		 */
2062 #if TRACEFAULTPAGE
2063 		dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
2064 #endif
2065 		if (object == first_object) {
2066 			first_m = m;
2067 		} else {
2068 			assert(m == VM_PAGE_NULL);
2069 		}
2070 
2071 		next_object = object->shadow;
2072 
2073 		if (next_object == VM_OBJECT_NULL) {
2074 			/*
2075 			 * we've hit the bottom of the shadown chain,
2076 			 * fill the page in the top object with zeros.
2077 			 */
2078 			assert(!must_be_resident);
2079 
2080 			if (object != first_object) {
2081 				vm_object_paging_end(object);
2082 				vm_object_unlock(object);
2083 
2084 				object = first_object;
2085 				offset = first_offset;
2086 				vm_object_lock(object);
2087 			}
2088 			m = first_m;
2089 			assert(VM_PAGE_OBJECT(m) == object);
2090 			first_m = VM_PAGE_NULL;
2091 
2092 			/*
2093 			 * check for any conditions that prevent
2094 			 * us from creating a new zero-fill page
2095 			 * vm_fault_check will do all of the
2096 			 * fault cleanup in the case of an error condition
2097 			 * including resetting the thread_interrupt_level
2098 			 */
2099 			error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
2100 
2101 			if (error != VM_FAULT_SUCCESS) {
2102 				return error;
2103 			}
2104 
2105 			if (m == VM_PAGE_NULL) {
2106 				m = vm_page_grab_options(grab_options);
2107 
2108 				if (m == VM_PAGE_NULL) {
2109 					vm_fault_cleanup(object, VM_PAGE_NULL);
2110 					thread_interrupt_level(interruptible_state);
2111 
2112 					return VM_FAULT_MEMORY_SHORTAGE;
2113 				}
2114 				vm_page_insert(m, object, vm_object_trunc_page(offset));
2115 			}
2116 			if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
2117 				m->vmp_absent = TRUE;
2118 				clear_absent_on_error = true;
2119 			}
2120 
2121 			my_fault = vm_fault_zero_page(m, no_zero_fill);
2122 
2123 			break;
2124 		} else {
2125 			/*
2126 			 * Move on to the next object.  Lock the next
2127 			 * object before unlocking the current one.
2128 			 */
2129 			if ((object != first_object) || must_be_resident) {
2130 				vm_object_paging_end(object);
2131 			}
2132 
2133 			offset += object->vo_shadow_offset;
2134 			fault_info->lo_offset += object->vo_shadow_offset;
2135 			fault_info->hi_offset += object->vo_shadow_offset;
2136 			access_required = VM_PROT_READ;
2137 
2138 			vm_object_lock(next_object);
2139 			vm_object_unlock(object);
2140 
2141 			object = next_object;
2142 			vm_object_paging_begin(object);
2143 		}
2144 	}
2145 
2146 	/*
2147 	 *	PAGE HAS BEEN FOUND.
2148 	 *
2149 	 *	This page (m) is:
2150 	 *		busy, so that we can play with it;
2151 	 *		not absent, so that nobody else will fill it;
2152 	 *		possibly eligible for pageout;
2153 	 *
2154 	 *	The top-level page (first_m) is:
2155 	 *		VM_PAGE_NULL if the page was found in the
2156 	 *		 top-level object;
2157 	 *		busy, not absent, and ineligible for pageout.
2158 	 *
2159 	 *	The current object (object) is locked.  A paging
2160 	 *	reference is held for the current and top-level
2161 	 *	objects.
2162 	 */
2163 
2164 #if TRACEFAULTPAGE
2165 	dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m);  /* (TEST/DEBUG) */
2166 #endif
2167 #if     EXTRA_ASSERTIONS
2168 	assert(m->vmp_busy && !m->vmp_absent);
2169 	assert((first_m == VM_PAGE_NULL) ||
2170 	    (first_m->vmp_busy && !first_m->vmp_absent &&
2171 	    !first_m->vmp_active && !first_m->vmp_inactive && !first_m->vmp_secluded));
2172 #endif  /* EXTRA_ASSERTIONS */
2173 
2174 	/*
2175 	 * If the page is being written, but isn't
2176 	 * already owned by the top-level object,
2177 	 * we have to copy it into a new page owned
2178 	 * by the top-level object.
2179 	 */
2180 	if (object != first_object) {
2181 #if TRACEFAULTPAGE
2182 		dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */
2183 #endif
2184 		if (fault_type & VM_PROT_WRITE) {
2185 			vm_page_t copy_m;
2186 
2187 			/*
2188 			 * We only really need to copy if we
2189 			 * want to write it.
2190 			 */
2191 			assert(!must_be_resident);
2192 
2193 			/*
2194 			 * If we try to collapse first_object at this
2195 			 * point, we may deadlock when we try to get
2196 			 * the lock on an intermediate object (since we
2197 			 * have the bottom object locked).  We can't
2198 			 * unlock the bottom object, because the page
2199 			 * we found may move (by collapse) if we do.
2200 			 *
2201 			 * Instead, we first copy the page.  Then, when
2202 			 * we have no more use for the bottom object,
2203 			 * we unlock it and try to collapse.
2204 			 *
2205 			 * Note that we copy the page even if we didn't
2206 			 * need to... that's the breaks.
2207 			 */
2208 
2209 			/*
2210 			 * Allocate a page for the copy
2211 			 */
2212 			copy_m = vm_page_grab_options(grab_options);
2213 
2214 			if (copy_m == VM_PAGE_NULL) {
2215 				vm_fault_page_release_page(m, &clear_absent_on_error);
2216 
2217 				vm_fault_cleanup(object, first_m);
2218 				thread_interrupt_level(interruptible_state);
2219 
2220 				return VM_FAULT_MEMORY_SHORTAGE;
2221 			}
2222 
2223 			vm_page_copy(m, copy_m);
2224 
2225 			/*
2226 			 * If another map is truly sharing this
2227 			 * page with us, we have to flush all
2228 			 * uses of the original page, since we
2229 			 * can't distinguish those which want the
2230 			 * original from those which need the
2231 			 * new copy.
2232 			 *
2233 			 * XXXO If we know that only one map has
2234 			 * access to this page, then we could
2235 			 * avoid the pmap_disconnect() call.
2236 			 */
2237 			if (m->vmp_pmapped) {
2238 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2239 			}
2240 
2241 			if (m->vmp_clustered) {
2242 				VM_PAGE_COUNT_AS_PAGEIN(m);
2243 				VM_PAGE_CONSUME_CLUSTERED(m);
2244 			}
2245 			assert(!m->vmp_cleaning);
2246 
2247 			/*
2248 			 * We no longer need the old page or object.
2249 			 */
2250 			vm_fault_page_release_page(m, &clear_absent_on_error);
2251 
2252 			/*
2253 			 * This check helps with marking the object as having a sequential pattern
2254 			 * Normally we'll miss doing this below because this fault is about COW to
2255 			 * the first_object i.e. bring page in from disk, push to object above but
2256 			 * don't update the file object's sequential pattern.
2257 			 */
2258 			if (object->internal == FALSE) {
2259 				vm_fault_is_sequential(object, offset, fault_info->behavior);
2260 			}
2261 
2262 			vm_object_paging_end(object);
2263 			vm_object_unlock(object);
2264 
2265 			my_fault = DBG_COW_FAULT;
2266 			counter_inc(&vm_statistics_cow_faults);
2267 			DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
2268 			counter_inc(&current_task()->cow_faults);
2269 
2270 			object = first_object;
2271 			offset = first_offset;
2272 
2273 			vm_object_lock(object);
2274 			/*
2275 			 * get rid of the place holder
2276 			 * page that we soldered in earlier
2277 			 */
2278 			VM_PAGE_FREE(first_m);
2279 			first_m = VM_PAGE_NULL;
2280 
2281 			/*
2282 			 * and replace it with the
2283 			 * page we just copied into
2284 			 */
2285 			assert(copy_m->vmp_busy);
2286 			vm_page_insert(copy_m, object, vm_object_trunc_page(offset));
2287 			SET_PAGE_DIRTY(copy_m, TRUE);
2288 
2289 			m = copy_m;
2290 			/*
2291 			 * Now that we've gotten the copy out of the
2292 			 * way, let's try to collapse the top object.
2293 			 * But we have to play ugly games with
2294 			 * paging_in_progress to do that...
2295 			 */
2296 			vm_object_paging_end(object);
2297 			vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
2298 			vm_object_paging_begin(object);
2299 		} else {
2300 			*protection &= (~VM_PROT_WRITE);
2301 		}
2302 	}
2303 	/*
2304 	 * Now check whether the page needs to be pushed into the
2305 	 * copy object.  The use of asymmetric copy on write for
2306 	 * shared temporary objects means that we may do two copies to
2307 	 * satisfy the fault; one above to get the page from a
2308 	 * shadowed object, and one here to push it into the copy.
2309 	 */
2310 	try_failed_count = 0;
2311 
2312 	while ((copy_object = first_object->vo_copy) != VM_OBJECT_NULL) {
2313 		vm_object_offset_t      copy_offset;
2314 		vm_page_t               copy_m;
2315 
2316 #if TRACEFAULTPAGE
2317 		dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type);    /* (TEST/DEBUG) */
2318 #endif
2319 		/*
2320 		 * If the page is being written, but hasn't been
2321 		 * copied to the copy-object, we have to copy it there.
2322 		 */
2323 		if ((fault_type & VM_PROT_WRITE) == 0) {
2324 			*protection &= ~VM_PROT_WRITE;
2325 			break;
2326 		}
2327 
2328 		/*
2329 		 * If the page was guaranteed to be resident,
2330 		 * we must have already performed the copy.
2331 		 */
2332 		if (must_be_resident) {
2333 			break;
2334 		}
2335 
2336 		/*
2337 		 * Try to get the lock on the copy_object.
2338 		 */
2339 		if (!vm_object_lock_try(copy_object)) {
2340 			vm_object_unlock(object);
2341 			try_failed_count++;
2342 
2343 			mutex_pause(try_failed_count);  /* wait a bit */
2344 			vm_object_lock(object);
2345 
2346 			continue;
2347 		}
2348 		try_failed_count = 0;
2349 
2350 		/*
2351 		 * Make another reference to the copy-object,
2352 		 * to keep it from disappearing during the
2353 		 * copy.
2354 		 */
2355 		vm_object_reference_locked(copy_object);
2356 
2357 		/*
2358 		 * Does the page exist in the copy?
2359 		 */
2360 		copy_offset = first_offset - copy_object->vo_shadow_offset;
2361 		copy_offset = vm_object_trunc_page(copy_offset);
2362 
2363 		if (copy_object->vo_size <= copy_offset) {
2364 			/*
2365 			 * Copy object doesn't cover this page -- do nothing.
2366 			 */
2367 			;
2368 		} else if ((copy_m = vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) {
2369 			/*
2370 			 * Page currently exists in the copy object
2371 			 */
2372 			if (copy_m->vmp_busy) {
2373 				/*
2374 				 * If the page is being brought
2375 				 * in, wait for it and then retry.
2376 				 */
2377 				vm_fault_page_release_page(m, &clear_absent_on_error);
2378 
2379 				/*
2380 				 * take an extra ref so object won't die
2381 				 */
2382 				vm_object_reference_locked(copy_object);
2383 				vm_object_unlock(copy_object);
2384 				vm_fault_cleanup(object, first_m);
2385 
2386 				vm_object_lock(copy_object);
2387 				vm_object_lock_assert_exclusive(copy_object);
2388 				os_ref_release_live_locked_raw(&copy_object->ref_count,
2389 				    &vm_object_refgrp);
2390 				copy_m = vm_page_lookup(copy_object, copy_offset);
2391 
2392 				if (copy_m != VM_PAGE_NULL && copy_m->vmp_busy) {
2393 					wait_result = vm_page_sleep(copy_object, copy_m, interruptible, LCK_SLEEP_UNLOCK);
2394 					vm_object_deallocate(copy_object);
2395 
2396 					goto backoff;
2397 				} else {
2398 					vm_object_unlock(copy_object);
2399 					vm_object_deallocate(copy_object);
2400 					thread_interrupt_level(interruptible_state);
2401 
2402 					return VM_FAULT_RETRY;
2403 				}
2404 			}
2405 		} else if (!PAGED_OUT(copy_object, copy_offset)) {
2406 			/*
2407 			 * If PAGED_OUT is TRUE, then the page used to exist
2408 			 * in the copy-object, and has already been paged out.
2409 			 * We don't need to repeat this. If PAGED_OUT is
2410 			 * FALSE, then either we don't know (!pager_created,
2411 			 * for example) or it hasn't been paged out.
2412 			 * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT)
2413 			 * We must copy the page to the copy object.
2414 			 *
2415 			 * Allocate a page for the copy
2416 			 */
2417 			copy_m = vm_page_grab_options(grab_options);
2418 
2419 			if (copy_m == VM_PAGE_NULL) {
2420 				vm_fault_page_release_page(m, &clear_absent_on_error);
2421 
2422 				vm_object_lock_assert_exclusive(copy_object);
2423 				os_ref_release_live_locked_raw(&copy_object->ref_count,
2424 				    &vm_object_refgrp);
2425 
2426 				vm_object_unlock(copy_object);
2427 				vm_fault_cleanup(object, first_m);
2428 				thread_interrupt_level(interruptible_state);
2429 
2430 				return VM_FAULT_MEMORY_SHORTAGE;
2431 			}
2432 
2433 			/*
2434 			 * Must copy page into copy-object.
2435 			 */
2436 			vm_page_insert(copy_m, copy_object, copy_offset);
2437 			vm_page_copy(m, copy_m);
2438 
2439 			/*
2440 			 * If the old page was in use by any users
2441 			 * of the copy-object, it must be removed
2442 			 * from all pmaps.  (We can't know which
2443 			 * pmaps use it.)
2444 			 */
2445 			if (m->vmp_pmapped) {
2446 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2447 			}
2448 
2449 			if (m->vmp_clustered) {
2450 				VM_PAGE_COUNT_AS_PAGEIN(m);
2451 				VM_PAGE_CONSUME_CLUSTERED(m);
2452 			}
2453 			/*
2454 			 * If there's a pager, then immediately
2455 			 * page out this page, using the "initialize"
2456 			 * option.  Else, we use the copy.
2457 			 */
2458 			if ((!copy_object->pager_ready)
2459 			    || vm_object_compressor_pager_state_get(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT
2460 			    ) {
2461 				vm_page_lockspin_queues();
2462 				assert(!m->vmp_cleaning);
2463 				vm_page_activate(copy_m);
2464 				vm_page_unlock_queues();
2465 
2466 				SET_PAGE_DIRTY(copy_m, TRUE);
2467 				vm_page_wakeup_done(copy_object, copy_m);
2468 			} else {
2469 				assert(copy_m->vmp_busy == TRUE);
2470 				assert(!m->vmp_cleaning);
2471 
2472 				/*
2473 				 * dirty is protected by the object lock
2474 				 */
2475 				SET_PAGE_DIRTY(copy_m, TRUE);
2476 
2477 				/*
2478 				 * The page is already ready for pageout:
2479 				 * not on pageout queues and busy.
2480 				 * Unlock everything except the
2481 				 * copy_object itself.
2482 				 */
2483 				vm_object_unlock(object);
2484 
2485 				/*
2486 				 * Write the page to the copy-object,
2487 				 * flushing it from the kernel.
2488 				 */
2489 				vm_pageout_initialize_page(copy_m);
2490 
2491 				/*
2492 				 * Since the pageout may have
2493 				 * temporarily dropped the
2494 				 * copy_object's lock, we
2495 				 * check whether we'll have
2496 				 * to deallocate the hard way.
2497 				 */
2498 				if ((copy_object->shadow != object) ||
2499 				    (os_ref_get_count_raw(&copy_object->ref_count) == 1)) {
2500 					vm_object_unlock(copy_object);
2501 					vm_object_deallocate(copy_object);
2502 					vm_object_lock(object);
2503 
2504 					continue;
2505 				}
2506 				/*
2507 				 * Pick back up the old object's
2508 				 * lock.  [It is safe to do so,
2509 				 * since it must be deeper in the
2510 				 * object tree.]
2511 				 */
2512 				vm_object_lock(object);
2513 			}
2514 
2515 			/*
2516 			 * Because we're pushing a page upward
2517 			 * in the object tree, we must restart
2518 			 * any faults that are waiting here.
2519 			 * [Note that this is an expansion of
2520 			 * vm_page_wakeup() that uses the THREAD_RESTART
2521 			 * wait result].  Can't turn off the page's
2522 			 * busy bit because we're not done with it.
2523 			 */
2524 			if (m->vmp_wanted) {
2525 				m->vmp_wanted = FALSE;
2526 				thread_wakeup_with_result((event_t) m, THREAD_RESTART);
2527 			}
2528 		}
2529 		/*
2530 		 * The reference count on copy_object must be
2531 		 * at least 2: one for our extra reference,
2532 		 * and at least one from the outside world
2533 		 * (we checked that when we last locked
2534 		 * copy_object).
2535 		 */
2536 		vm_object_lock_assert_exclusive(copy_object);
2537 		os_ref_release_live_locked_raw(&copy_object->ref_count,
2538 		    &vm_object_refgrp);
2539 
2540 		vm_object_unlock(copy_object);
2541 
2542 		break;
2543 	}
2544 
2545 done:
2546 	*result_page = m;
2547 	*top_page = first_m;
2548 
2549 	if (m != VM_PAGE_NULL) {
2550 		assert(VM_PAGE_OBJECT(m) == object);
2551 
2552 		retval = VM_FAULT_SUCCESS;
2553 
2554 		if (my_fault == DBG_PAGEIN_FAULT) {
2555 			VM_PAGE_COUNT_AS_PAGEIN(m);
2556 
2557 			if (object->internal) {
2558 				my_fault = DBG_PAGEIND_FAULT;
2559 			} else {
2560 				my_fault = DBG_PAGEINV_FAULT;
2561 			}
2562 
2563 			/*
2564 			 * evaluate access pattern and update state
2565 			 * vm_fault_deactivate_behind depends on the
2566 			 * state being up to date
2567 			 */
2568 			vm_fault_is_sequential(object, offset, fault_info->behavior);
2569 			vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2570 		} else if (type_of_fault == NULL && my_fault == DBG_CACHE_HIT_FAULT) {
2571 			/*
2572 			 * we weren't called from vm_fault, so handle the
2573 			 * accounting here for hits in the cache
2574 			 */
2575 			if (m->vmp_clustered) {
2576 				VM_PAGE_COUNT_AS_PAGEIN(m);
2577 				VM_PAGE_CONSUME_CLUSTERED(m);
2578 			}
2579 			vm_fault_is_sequential(object, offset, fault_info->behavior);
2580 			vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2581 		} else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) {
2582 			VM_STAT_DECOMPRESSIONS();
2583 		}
2584 		if (type_of_fault) {
2585 			*type_of_fault = my_fault;
2586 		}
2587 	} else {
2588 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_SUCCESS_NO_PAGE), 0 /* arg */);
2589 		retval = VM_FAULT_SUCCESS_NO_VM_PAGE;
2590 		assert(first_m == VM_PAGE_NULL);
2591 		assert(object == first_object);
2592 	}
2593 
2594 	thread_interrupt_level(interruptible_state);
2595 
2596 #if TRACEFAULTPAGE
2597 	dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0);       /* (TEST/DEBUG) */
2598 #endif
2599 	return retval;
2600 
2601 backoff:
2602 	thread_interrupt_level(interruptible_state);
2603 
2604 	if (wait_result == THREAD_INTERRUPTED) {
2605 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
2606 		return VM_FAULT_INTERRUPTED;
2607 	}
2608 	return VM_FAULT_RETRY;
2609 }
2610 
2611 #if MACH_ASSERT && (XNU_PLATFORM_WatchOS || __x86_64__)
2612 #define PANIC_ON_CS_KILLED_DEFAULT true
2613 #else
2614 #define PANIC_ON_CS_KILLED_DEFAULT false
2615 #endif
2616 static TUNABLE(bool, panic_on_cs_killed, "panic_on_cs_killed",
2617     PANIC_ON_CS_KILLED_DEFAULT);
2618 
2619 extern int proc_selfpid(void);
2620 extern char *proc_name_address(struct proc *p);
2621 extern const char *proc_best_name(struct proc *);
2622 unsigned long cs_enter_tainted_rejected = 0;
2623 unsigned long cs_enter_tainted_accepted = 0;
2624 
2625 /*
2626  * CODE SIGNING:
2627  * When soft faulting a page, we have to validate the page if:
2628  * 1. the page is being mapped in user space
2629  * 2. the page hasn't already been found to be "tainted"
2630  * 3. the page belongs to a code-signed object
2631  * 4. the page has not been validated yet or has been mapped for write.
2632  */
2633 static bool
vm_fault_cs_need_validation(pmap_t pmap,vm_page_t page,vm_object_t page_obj,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2634 vm_fault_cs_need_validation(
2635 	pmap_t pmap,
2636 	vm_page_t page,
2637 	vm_object_t page_obj,
2638 	vm_map_size_t fault_page_size,
2639 	vm_map_offset_t fault_phys_offset)
2640 {
2641 	if (pmap == kernel_pmap) {
2642 		/* 1 - not user space */
2643 		return false;
2644 	}
2645 	if (!page_obj->code_signed) {
2646 		/* 3 - page does not belong to a code-signed object */
2647 		return false;
2648 	}
2649 	if (fault_page_size == PAGE_SIZE) {
2650 		/* looking at the whole page */
2651 		assertf(fault_phys_offset == 0,
2652 		    "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
2653 		    (uint64_t)fault_page_size,
2654 		    (uint64_t)fault_phys_offset);
2655 		if (page->vmp_cs_tainted == VMP_CS_ALL_TRUE) {
2656 			/* 2 - page is all tainted */
2657 			return false;
2658 		}
2659 		if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
2660 		    !page->vmp_wpmapped) {
2661 			/* 4 - already fully validated and never mapped writable */
2662 			return false;
2663 		}
2664 	} else {
2665 		/* looking at a specific sub-page */
2666 		if (VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
2667 			/* 2 - sub-page was already marked as tainted */
2668 			return false;
2669 		}
2670 		if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) &&
2671 		    !page->vmp_wpmapped) {
2672 			/* 4 - already validated and never mapped writable */
2673 			return false;
2674 		}
2675 	}
2676 	/* page needs to be validated */
2677 	return true;
2678 }
2679 
2680 
2681 static bool
vm_fault_cs_page_immutable(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot __unused)2682 vm_fault_cs_page_immutable(
2683 	vm_page_t m,
2684 	vm_map_size_t fault_page_size,
2685 	vm_map_offset_t fault_phys_offset,
2686 	vm_prot_t prot __unused)
2687 {
2688 	if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)
2689 	    /*&& ((prot) & VM_PROT_EXECUTE)*/) {
2690 		return true;
2691 	}
2692 	return false;
2693 }
2694 
2695 static bool
vm_fault_cs_page_nx(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2696 vm_fault_cs_page_nx(
2697 	vm_page_t m,
2698 	vm_map_size_t fault_page_size,
2699 	vm_map_offset_t fault_phys_offset)
2700 {
2701 	return VMP_CS_NX(m, fault_page_size, fault_phys_offset);
2702 }
2703 
2704 /*
2705  * Check if the page being entered into the pmap violates code signing.
2706  */
2707 static kern_return_t
vm_fault_cs_check_violation(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool map_is_switched,bool map_is_switch_protected,bool * cs_violation)2708 vm_fault_cs_check_violation(
2709 	bool cs_bypass,
2710 	vm_object_t object,
2711 	vm_page_t m,
2712 	pmap_t pmap,
2713 	vm_prot_t prot,
2714 	vm_prot_t caller_prot,
2715 	vm_map_size_t fault_page_size,
2716 	vm_map_offset_t fault_phys_offset,
2717 	vm_object_fault_info_t fault_info,
2718 	bool map_is_switched,
2719 	bool map_is_switch_protected,
2720 	bool *cs_violation)
2721 {
2722 #if !CODE_SIGNING_MONITOR
2723 #pragma unused(caller_prot)
2724 #pragma unused(fault_info)
2725 #endif /* !CODE_SIGNING_MONITOR */
2726 
2727 	int             cs_enforcement_enabled;
2728 	if (!cs_bypass &&
2729 	    vm_fault_cs_need_validation(pmap, m, object,
2730 	    fault_page_size, fault_phys_offset)) {
2731 		vm_object_lock_assert_exclusive(object);
2732 
2733 		if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)) {
2734 			vm_cs_revalidates++;
2735 		}
2736 
2737 		/* VM map is locked, so 1 ref will remain on VM object -
2738 		 * so no harm if vm_page_validate_cs drops the object lock */
2739 
2740 #if CODE_SIGNING_MONITOR
2741 		if (fault_info->csm_associated &&
2742 		    csm_enabled() &&
2743 		    !VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
2744 		    !VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) &&
2745 		    !VMP_CS_NX(m, fault_page_size, fault_phys_offset) &&
2746 		    (prot & VM_PROT_EXECUTE) &&
2747 		    (caller_prot & VM_PROT_EXECUTE)) {
2748 			/*
2749 			 * When we have a code signing monitor, the monitor will evaluate the code signature
2750 			 * for any executable page mapping. No need for the VM to also validate the page.
2751 			 * In the code signing monitor we trust :)
2752 			 */
2753 			vm_cs_defer_to_csm++;
2754 		} else {
2755 			vm_cs_defer_to_csm_not++;
2756 			vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
2757 		}
2758 #else /* CODE_SIGNING_MONITOR */
2759 		vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
2760 #endif /* CODE_SIGNING_MONITOR */
2761 	}
2762 
2763 	/* If the map is switched, and is switch-protected, we must protect
2764 	 * some pages from being write-faulted: immutable pages because by
2765 	 * definition they may not be written, and executable pages because that
2766 	 * would provide a way to inject unsigned code.
2767 	 * If the page is immutable, we can simply return. However, we can't
2768 	 * immediately determine whether a page is executable anywhere. But,
2769 	 * we can disconnect it everywhere and remove the executable protection
2770 	 * from the current map. We do that below right before we do the
2771 	 * PMAP_ENTER.
2772 	 */
2773 	if (pmap == kernel_pmap) {
2774 		/* kernel fault: cs_enforcement does not apply */
2775 		cs_enforcement_enabled = 0;
2776 	} else {
2777 		cs_enforcement_enabled = pmap_get_vm_map_cs_enforced(pmap);
2778 	}
2779 
2780 	if (cs_enforcement_enabled && map_is_switched &&
2781 	    map_is_switch_protected &&
2782 	    vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2783 	    (prot & VM_PROT_WRITE)) {
2784 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_IMMUTABLE_PAGE_WRITE), 0 /* arg */);
2785 		return KERN_CODESIGN_ERROR;
2786 	}
2787 
2788 	if (cs_enforcement_enabled &&
2789 	    vm_fault_cs_page_nx(m, fault_page_size, fault_phys_offset) &&
2790 	    (prot & VM_PROT_EXECUTE)) {
2791 		if (cs_debug) {
2792 			printf("page marked to be NX, not letting it be mapped EXEC\n");
2793 		}
2794 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_NX_PAGE_EXEC_MAPPING), 0 /* arg */);
2795 		return KERN_CODESIGN_ERROR;
2796 	}
2797 
2798 	/* A page could be tainted, or pose a risk of being tainted later.
2799 	 * Check whether the receiving process wants it, and make it feel
2800 	 * the consequences (that hapens in cs_invalid_page()).
2801 	 * For CS Enforcement, two other conditions will
2802 	 * cause that page to be tainted as well:
2803 	 * - pmapping an unsigned page executable - this means unsigned code;
2804 	 * - writeable mapping of a validated page - the content of that page
2805 	 *   can be changed without the kernel noticing, therefore unsigned
2806 	 *   code can be created
2807 	 */
2808 	if (cs_bypass) {
2809 		/* code-signing is bypassed */
2810 		*cs_violation = FALSE;
2811 	} else if (VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
2812 		/* tainted page */
2813 		*cs_violation = TRUE;
2814 	} else if (!cs_enforcement_enabled) {
2815 		/* no further code-signing enforcement */
2816 		*cs_violation = FALSE;
2817 	} else if (vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2818 	    ((prot & VM_PROT_WRITE) ||
2819 	    m->vmp_wpmapped)) {
2820 		/*
2821 		 * The page should be immutable, but is in danger of being
2822 		 * modified.
2823 		 * This is the case where we want policy from the code
2824 		 * directory - is the page immutable or not? For now we have
2825 		 * to assume that code pages will be immutable, data pages not.
2826 		 * We'll assume a page is a code page if it has a code directory
2827 		 * and we fault for execution.
2828 		 * That is good enough since if we faulted the code page for
2829 		 * writing in another map before, it is wpmapped; if we fault
2830 		 * it for writing in this map later it will also be faulted for
2831 		 * executing at the same time; and if we fault for writing in
2832 		 * another map later, we will disconnect it from this pmap so
2833 		 * we'll notice the change.
2834 		 */
2835 		*cs_violation = TRUE;
2836 	} else if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
2837 	    (prot & VM_PROT_EXECUTE)
2838 #if CODE_SIGNING_MONITOR
2839 	    /*
2840 	     * Executable pages will be validated by the code signing monitor. If the
2841 	     * code signing monitor is turned off, then this is a code-signing violation.
2842 	     */
2843 	    && !csm_enabled()
2844 #endif /* CODE_SIGNING_MONITOR */
2845 	    ) {
2846 		*cs_violation = TRUE;
2847 	} else {
2848 		*cs_violation = FALSE;
2849 	}
2850 	return KERN_SUCCESS;
2851 }
2852 
2853 /*
2854  * Handles a code signing violation by either rejecting the page or forcing a disconnect.
2855  * @param must_disconnect This value will be set to true if the caller must disconnect
2856  * this page.
2857  * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
2858  */
2859 static kern_return_t
vm_fault_cs_handle_violation(vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,bool map_is_switched,bool map_is_switch_protected,bool * must_disconnect)2860 vm_fault_cs_handle_violation(
2861 	vm_object_t object,
2862 	vm_page_t m,
2863 	pmap_t pmap,
2864 	vm_prot_t prot,
2865 	vm_map_offset_t vaddr,
2866 	vm_map_size_t fault_page_size,
2867 	vm_map_offset_t fault_phys_offset,
2868 	bool map_is_switched,
2869 	bool map_is_switch_protected,
2870 	bool *must_disconnect)
2871 {
2872 #if !MACH_ASSERT
2873 #pragma unused(pmap)
2874 #pragma unused(map_is_switch_protected)
2875 #endif /* !MACH_ASSERT */
2876 	/*
2877 	 * We will have a tainted page. Have to handle the special case
2878 	 * of a switched map now. If the map is not switched, standard
2879 	 * procedure applies - call cs_invalid_page().
2880 	 * If the map is switched, the real owner is invalid already.
2881 	 * There is no point in invalidating the switching process since
2882 	 * it will not be executing from the map. So we don't call
2883 	 * cs_invalid_page() in that case.
2884 	 */
2885 	boolean_t reject_page, cs_killed;
2886 	kern_return_t kr;
2887 	if (map_is_switched) {
2888 		assert(pmap == vm_map_pmap(current_thread()->map));
2889 		assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
2890 		reject_page = FALSE;
2891 	} else {
2892 		if (cs_debug > 5) {
2893 			printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n",
2894 			    object->code_signed ? "yes" : "no",
2895 			    VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2896 			    VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2897 			    m->vmp_wpmapped ? "yes" : "no",
2898 			    (int)prot);
2899 		}
2900 		reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed);
2901 	}
2902 
2903 	if (reject_page) {
2904 		/* reject the invalid page: abort the page fault */
2905 		int                     pid;
2906 		const char              *procname;
2907 		task_t                  task;
2908 		vm_object_t             file_object, shadow;
2909 		vm_object_offset_t      file_offset;
2910 		char                    *pathname, *filename;
2911 		vm_size_t               pathname_len, filename_len;
2912 		boolean_t               truncated_path;
2913 #define __PATH_MAX 1024
2914 		struct timespec         mtime, cs_mtime;
2915 		int                     shadow_depth;
2916 		os_reason_t             codesigning_exit_reason = OS_REASON_NULL;
2917 
2918 		kr = KERN_CODESIGN_ERROR;
2919 		cs_enter_tainted_rejected++;
2920 
2921 		/* get process name and pid */
2922 		procname = "?";
2923 		task = current_task();
2924 		pid = proc_selfpid();
2925 		if (get_bsdtask_info(task) != NULL) {
2926 			procname = proc_name_address(get_bsdtask_info(task));
2927 		}
2928 
2929 		/* get file's VM object */
2930 		file_object = object;
2931 		file_offset = m->vmp_offset;
2932 		for (shadow = file_object->shadow,
2933 		    shadow_depth = 0;
2934 		    shadow != VM_OBJECT_NULL;
2935 		    shadow = file_object->shadow,
2936 		    shadow_depth++) {
2937 			vm_object_lock_shared(shadow);
2938 			if (file_object != object) {
2939 				vm_object_unlock(file_object);
2940 			}
2941 			file_offset += file_object->vo_shadow_offset;
2942 			file_object = shadow;
2943 		}
2944 
2945 		mtime.tv_sec = 0;
2946 		mtime.tv_nsec = 0;
2947 		cs_mtime.tv_sec = 0;
2948 		cs_mtime.tv_nsec = 0;
2949 
2950 		/* get file's pathname and/or filename */
2951 		pathname = NULL;
2952 		filename = NULL;
2953 		pathname_len = 0;
2954 		filename_len = 0;
2955 		truncated_path = FALSE;
2956 		/* no pager -> no file -> no pathname, use "<nil>" in that case */
2957 		if (file_object->pager != NULL) {
2958 			pathname = kalloc_data(__PATH_MAX * 2, Z_WAITOK);
2959 			if (pathname) {
2960 				pathname[0] = '\0';
2961 				pathname_len = __PATH_MAX;
2962 				filename = pathname + pathname_len;
2963 				filename_len = __PATH_MAX;
2964 
2965 				if (vnode_pager_get_object_name(file_object->pager,
2966 				    pathname,
2967 				    pathname_len,
2968 				    filename,
2969 				    filename_len,
2970 				    &truncated_path) == KERN_SUCCESS) {
2971 					/* safety first... */
2972 					pathname[__PATH_MAX - 1] = '\0';
2973 					filename[__PATH_MAX - 1] = '\0';
2974 
2975 					vnode_pager_get_object_mtime(file_object->pager,
2976 					    &mtime,
2977 					    &cs_mtime);
2978 				} else {
2979 					kfree_data(pathname, __PATH_MAX * 2);
2980 					pathname = NULL;
2981 					filename = NULL;
2982 					pathname_len = 0;
2983 					filename_len = 0;
2984 					truncated_path = FALSE;
2985 				}
2986 			}
2987 		}
2988 		printf("CODE SIGNING: process %d[%s]: "
2989 		    "rejecting invalid page at address 0x%llx "
2990 		    "from offset 0x%llx in file \"%s%s%s\" "
2991 		    "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
2992 		    "(signed:%d validated:%d tainted:%d nx:%d "
2993 		    "wpmapped:%d dirty:%d depth:%d)\n",
2994 		    pid, procname, (addr64_t) vaddr,
2995 		    file_offset,
2996 		    (pathname ? pathname : "<nil>"),
2997 		    (truncated_path ? "/.../" : ""),
2998 		    (truncated_path ? filename : ""),
2999 		    cs_mtime.tv_sec, cs_mtime.tv_nsec,
3000 		    ((cs_mtime.tv_sec == mtime.tv_sec &&
3001 		    cs_mtime.tv_nsec == mtime.tv_nsec)
3002 		    ? "=="
3003 		    : "!="),
3004 		    mtime.tv_sec, mtime.tv_nsec,
3005 		    object->code_signed,
3006 		    VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
3007 		    VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
3008 		    VMP_CS_NX(m, fault_page_size, fault_phys_offset),
3009 		    m->vmp_wpmapped,
3010 		    m->vmp_dirty,
3011 		    shadow_depth);
3012 
3013 		/*
3014 		 * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page
3015 		 * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the
3016 		 * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler
3017 		 * will deal with the segmentation fault.
3018 		 */
3019 		if (cs_killed) {
3020 			KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
3021 			    pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
3022 
3023 			codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
3024 			if (codesigning_exit_reason == NULL) {
3025 				printf("vm_fault_enter: failed to allocate codesigning exit reason\n");
3026 			} else {
3027 				mach_vm_address_t data_addr = 0;
3028 				struct codesigning_exit_reason_info *ceri = NULL;
3029 				uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri));
3030 
3031 				if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) {
3032 					printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n");
3033 				} else {
3034 					if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor,
3035 					    EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) {
3036 						ceri = (struct codesigning_exit_reason_info *)data_addr;
3037 						static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname));
3038 
3039 						ceri->ceri_virt_addr = vaddr;
3040 						ceri->ceri_file_offset = file_offset;
3041 						if (pathname) {
3042 							strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname));
3043 						} else {
3044 							ceri->ceri_pathname[0] = '\0';
3045 						}
3046 						if (filename) {
3047 							strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename));
3048 						} else {
3049 							ceri->ceri_filename[0] = '\0';
3050 						}
3051 						ceri->ceri_path_truncated = (truncated_path ? 1 : 0);
3052 						ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec;
3053 						ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec;
3054 						ceri->ceri_page_modtime_secs = mtime.tv_sec;
3055 						ceri->ceri_page_modtime_nsecs = mtime.tv_nsec;
3056 						ceri->ceri_object_codesigned = (object->code_signed);
3057 						ceri->ceri_page_codesig_validated = VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset);
3058 						ceri->ceri_page_codesig_tainted = VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset);
3059 						ceri->ceri_page_codesig_nx = VMP_CS_NX(m, fault_page_size, fault_phys_offset);
3060 						ceri->ceri_page_wpmapped = (m->vmp_wpmapped);
3061 						ceri->ceri_page_slid = 0;
3062 						ceri->ceri_page_dirty = (m->vmp_dirty);
3063 						ceri->ceri_page_shadow_depth = shadow_depth;
3064 					} else {
3065 #if DEBUG || DEVELOPMENT
3066 						panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason");
3067 #else
3068 						printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n");
3069 #endif /* DEBUG || DEVELOPMENT */
3070 						/* Free the buffer */
3071 						os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0);
3072 					}
3073 				}
3074 			}
3075 
3076 			set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE);
3077 		}
3078 		if (panic_on_cs_killed &&
3079 		    object->object_is_shared_cache) {
3080 			char *tainted_contents;
3081 			vm_map_offset_t src_vaddr;
3082 			src_vaddr = (vm_map_offset_t) phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m) << PAGE_SHIFT);
3083 			tainted_contents = kalloc_data(PAGE_SIZE, Z_WAITOK);
3084 			bcopy((const char *)src_vaddr, tainted_contents, PAGE_SIZE);
3085 			printf("CODE SIGNING: tainted page %p phys 0x%x phystokv 0x%llx copied to %p\n", m, VM_PAGE_GET_PHYS_PAGE(m), (uint64_t)src_vaddr, tainted_contents);
3086 			panic("CODE SIGNING: process %d[%s]: "
3087 			    "rejecting invalid page (phys#0x%x) at address 0x%llx "
3088 			    "from offset 0x%llx in file \"%s%s%s\" "
3089 			    "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
3090 			    "(signed:%d validated:%d tainted:%d nx:%d"
3091 			    "wpmapped:%d dirty:%d depth:%d)\n",
3092 			    pid, procname,
3093 			    VM_PAGE_GET_PHYS_PAGE(m),
3094 			    (addr64_t) vaddr,
3095 			    file_offset,
3096 			    (pathname ? pathname : "<nil>"),
3097 			    (truncated_path ? "/.../" : ""),
3098 			    (truncated_path ? filename : ""),
3099 			    cs_mtime.tv_sec, cs_mtime.tv_nsec,
3100 			    ((cs_mtime.tv_sec == mtime.tv_sec &&
3101 			    cs_mtime.tv_nsec == mtime.tv_nsec)
3102 			    ? "=="
3103 			    : "!="),
3104 			    mtime.tv_sec, mtime.tv_nsec,
3105 			    object->code_signed,
3106 			    VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
3107 			    VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
3108 			    VMP_CS_NX(m, fault_page_size, fault_phys_offset),
3109 			    m->vmp_wpmapped,
3110 			    m->vmp_dirty,
3111 			    shadow_depth);
3112 		}
3113 
3114 		if (file_object != object) {
3115 			vm_object_unlock(file_object);
3116 		}
3117 		if (pathname_len != 0) {
3118 			kfree_data(pathname, __PATH_MAX * 2);
3119 			pathname = NULL;
3120 			filename = NULL;
3121 		}
3122 	} else {
3123 		/* proceed with the invalid page */
3124 		kr = KERN_SUCCESS;
3125 		if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
3126 		    !object->code_signed) {
3127 			/*
3128 			 * This page has not been (fully) validated but
3129 			 * does not belong to a code-signed object
3130 			 * so it should not be forcefully considered
3131 			 * as tainted.
3132 			 * We're just concerned about it here because
3133 			 * we've been asked to "execute" it but that
3134 			 * does not mean that it should cause other
3135 			 * accesses to fail.
3136 			 * This happens when a debugger sets a
3137 			 * breakpoint and we then execute code in
3138 			 * that page.  Marking the page as "tainted"
3139 			 * would cause any inspection tool ("leaks",
3140 			 * "vmmap", "CrashReporter", ...) to get killed
3141 			 * due to code-signing violation on that page,
3142 			 * even though they're just reading it and not
3143 			 * executing from it.
3144 			 */
3145 		} else {
3146 			/*
3147 			 * Page might have been tainted before or not;
3148 			 * now it definitively is. If the page wasn't
3149 			 * tainted, we must disconnect it from all
3150 			 * pmaps later, to force existing mappings
3151 			 * through that code path for re-consideration
3152 			 * of the validity of that page.
3153 			 */
3154 			if (!VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
3155 				*must_disconnect = TRUE;
3156 				VMP_CS_SET_TAINTED(m, fault_page_size, fault_phys_offset, TRUE);
3157 			}
3158 		}
3159 		cs_enter_tainted_accepted++;
3160 	}
3161 	if (kr != KERN_SUCCESS) {
3162 		if (cs_debug) {
3163 			printf("CODESIGNING: vm_fault_enter(0x%llx): "
3164 			    "*** INVALID PAGE ***\n",
3165 			    (long long)vaddr);
3166 		}
3167 #if !SECURE_KERNEL
3168 		if (cs_enforcement_panic) {
3169 			panic("CODESIGNING: panicking on invalid page");
3170 		}
3171 #endif
3172 	}
3173 	return kr;
3174 }
3175 
3176 /*
3177  * Check that the code signature is valid for the given page being inserted into
3178  * the pmap.
3179  *
3180  * @param must_disconnect This value will be set to true if the caller must disconnect
3181  * this page.
3182  * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
3183  */
3184 static kern_return_t
vm_fault_validate_cs(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool * must_disconnect)3185 vm_fault_validate_cs(
3186 	bool cs_bypass,
3187 	vm_object_t object,
3188 	vm_page_t m,
3189 	pmap_t pmap,
3190 	vm_map_offset_t vaddr,
3191 	vm_prot_t prot,
3192 	vm_prot_t caller_prot,
3193 	vm_map_size_t fault_page_size,
3194 	vm_map_offset_t fault_phys_offset,
3195 	vm_object_fault_info_t fault_info,
3196 	bool *must_disconnect)
3197 {
3198 	bool map_is_switched, map_is_switch_protected, cs_violation;
3199 	kern_return_t kr;
3200 	/* Validate code signature if necessary. */
3201 	map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) &&
3202 	    (pmap == vm_map_pmap(current_thread()->map)));
3203 	map_is_switch_protected = current_thread()->map->switch_protect;
3204 	kr = vm_fault_cs_check_violation(cs_bypass, object, m, pmap,
3205 	    prot, caller_prot, fault_page_size, fault_phys_offset, fault_info,
3206 	    map_is_switched, map_is_switch_protected, &cs_violation);
3207 	if (kr != KERN_SUCCESS) {
3208 		return kr;
3209 	}
3210 	if (cs_violation) {
3211 		kr = vm_fault_cs_handle_violation(object, m, pmap, prot, vaddr,
3212 		    fault_page_size, fault_phys_offset,
3213 		    map_is_switched, map_is_switch_protected, must_disconnect);
3214 	}
3215 	return kr;
3216 }
3217 
3218 /*
3219  * Enqueue the page on the appropriate paging queue.
3220  */
3221 static void
vm_fault_enqueue_page(vm_object_t object,vm_page_t m,bool wired,bool change_wiring,vm_tag_t wire_tag,bool no_cache,int * type_of_fault,kern_return_t kr)3222 vm_fault_enqueue_page(
3223 	vm_object_t object,
3224 	vm_page_t m,
3225 	bool wired,
3226 	bool change_wiring,
3227 	vm_tag_t wire_tag,
3228 	bool no_cache,
3229 	int *type_of_fault,
3230 	kern_return_t kr)
3231 {
3232 	assert((m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object);
3233 	boolean_t       page_queues_locked = FALSE;
3234 	boolean_t       previously_pmapped = m->vmp_pmapped;
3235 #define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED()   \
3236 MACRO_BEGIN                                     \
3237 	if (! page_queues_locked) {             \
3238 	        page_queues_locked = TRUE;      \
3239 	        vm_page_lockspin_queues();      \
3240 	}                                       \
3241 MACRO_END
3242 #define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED()     \
3243 MACRO_BEGIN                                     \
3244 	if (page_queues_locked) {               \
3245 	        page_queues_locked = FALSE;     \
3246 	        vm_page_unlock_queues();        \
3247 	}                                       \
3248 MACRO_END
3249 
3250 	vm_page_update_special_state(m);
3251 	if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
3252 		/*
3253 		 * Compressor pages are neither wired
3254 		 * nor pageable and should never change.
3255 		 */
3256 		assert(object == compressor_object);
3257 	} else if (change_wiring) {
3258 		__VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3259 
3260 		if (wired) {
3261 			if (kr == KERN_SUCCESS) {
3262 				vm_page_wire(m, wire_tag, TRUE);
3263 			}
3264 		} else {
3265 			vm_page_unwire(m, TRUE);
3266 		}
3267 		/* we keep the page queues lock, if we need it later */
3268 	} else {
3269 		if (object->internal == TRUE) {
3270 			/*
3271 			 * don't allow anonymous pages on
3272 			 * the speculative queues
3273 			 */
3274 			no_cache = FALSE;
3275 		}
3276 		if (kr != KERN_SUCCESS) {
3277 			__VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3278 			vm_page_deactivate(m);
3279 			/* we keep the page queues lock, if we need it later */
3280 		} else if (((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
3281 		    (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3282 		    (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
3283 		    ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) &&
3284 		    !VM_PAGE_WIRED(m)) {
3285 			if (vm_page_local_q &&
3286 			    (*type_of_fault == DBG_COW_FAULT ||
3287 			    *type_of_fault == DBG_ZERO_FILL_FAULT)) {
3288 				struct vpl      *lq;
3289 				uint32_t        lid;
3290 
3291 				assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
3292 
3293 				__VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3294 				vm_object_lock_assert_exclusive(object);
3295 
3296 				/*
3297 				 * we got a local queue to stuff this
3298 				 * new page on...
3299 				 * its safe to manipulate local and
3300 				 * local_id at this point since we're
3301 				 * behind an exclusive object lock and
3302 				 * the page is not on any global queue.
3303 				 *
3304 				 * we'll use the current cpu number to
3305 				 * select the queue note that we don't
3306 				 * need to disable preemption... we're
3307 				 * going to be behind the local queue's
3308 				 * lock to do the real work
3309 				 */
3310 				lid = cpu_number();
3311 
3312 				lq = zpercpu_get_cpu(vm_page_local_q, lid);
3313 
3314 				VPL_LOCK(&lq->vpl_lock);
3315 
3316 				vm_page_check_pageable_safe(m);
3317 				vm_page_queue_enter(&lq->vpl_queue, m, vmp_pageq);
3318 				m->vmp_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q;
3319 				m->vmp_local_id = (uint16_t)lid;
3320 				lq->vpl_count++;
3321 
3322 				if (object->internal) {
3323 					lq->vpl_internal_count++;
3324 				} else {
3325 					lq->vpl_external_count++;
3326 				}
3327 
3328 				VPL_UNLOCK(&lq->vpl_lock);
3329 
3330 				if (lq->vpl_count > vm_page_local_q_soft_limit) {
3331 					/*
3332 					 * we're beyond the soft limit
3333 					 * for the local queue
3334 					 * vm_page_reactivate_local will
3335 					 * 'try' to take the global page
3336 					 * queue lock... if it can't
3337 					 * that's ok... we'll let the
3338 					 * queue continue to grow up
3339 					 * to the hard limit... at that
3340 					 * point we'll wait for the
3341 					 * lock... once we've got the
3342 					 * lock, we'll transfer all of
3343 					 * the pages from the local
3344 					 * queue to the global active
3345 					 * queue
3346 					 */
3347 					vm_page_reactivate_local(lid, FALSE, FALSE);
3348 				}
3349 			} else {
3350 				__VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3351 
3352 				/*
3353 				 * test again now that we hold the
3354 				 * page queue lock
3355 				 */
3356 				if (!VM_PAGE_WIRED(m)) {
3357 					if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3358 						vm_page_queues_remove(m, FALSE);
3359 
3360 						VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3361 						VM_PAGEOUT_DEBUG(vm_pageout_cleaned_fault_reactivated, 1);
3362 					}
3363 
3364 					if (!VM_PAGE_ACTIVE_OR_INACTIVE(m) ||
3365 					    no_cache) {
3366 						/*
3367 						 * If this is a no_cache mapping
3368 						 * and the page has never been
3369 						 * mapped before or was
3370 						 * previously a no_cache page,
3371 						 * then we want to leave pages
3372 						 * in the speculative state so
3373 						 * that they can be readily
3374 						 * recycled if free memory runs
3375 						 * low.  Otherwise the page is
3376 						 * activated as normal.
3377 						 */
3378 
3379 						if (no_cache &&
3380 						    (!previously_pmapped ||
3381 						    m->vmp_no_cache)) {
3382 							m->vmp_no_cache = TRUE;
3383 
3384 							if (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
3385 								vm_page_speculate(m, FALSE);
3386 							}
3387 						} else if (!VM_PAGE_ACTIVE_OR_INACTIVE(m)) {
3388 							vm_page_activate(m);
3389 						}
3390 					}
3391 				}
3392 				/* we keep the page queues lock, if we need it later */
3393 			}
3394 		}
3395 	}
3396 	/* we're done with the page queues lock, if we ever took it */
3397 	__VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3398 }
3399 
3400 /*
3401  * Sets the pmmpped, xpmapped, and wpmapped bits on the vm_page_t and updates accounting.
3402  * @return true if the page needs to be sync'ed via pmap_sync-page_data_physo
3403  * before being inserted into the pmap.
3404  */
3405 static bool
vm_fault_enter_set_mapped(vm_object_t object,vm_page_t m,vm_prot_t prot,vm_prot_t fault_type)3406 vm_fault_enter_set_mapped(
3407 	vm_object_t object,
3408 	vm_page_t m,
3409 	vm_prot_t prot,
3410 	vm_prot_t fault_type)
3411 {
3412 	bool page_needs_sync = false;
3413 	/*
3414 	 * NOTE: we may only hold the vm_object lock SHARED
3415 	 * at this point, so we need the phys_page lock to
3416 	 * properly serialize updating the pmapped and
3417 	 * xpmapped bits
3418 	 */
3419 	if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) {
3420 		ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3421 
3422 		pmap_lock_phys_page(phys_page);
3423 		m->vmp_pmapped = TRUE;
3424 
3425 		if (!m->vmp_xpmapped) {
3426 			m->vmp_xpmapped = TRUE;
3427 
3428 			pmap_unlock_phys_page(phys_page);
3429 
3430 			if (!object->internal) {
3431 				OSAddAtomic(1, &vm_page_xpmapped_external_count);
3432 			}
3433 
3434 #if defined(__arm64__)
3435 			page_needs_sync = true;
3436 #else
3437 			if (object->internal &&
3438 			    object->pager != NULL) {
3439 				/*
3440 				 * This page could have been
3441 				 * uncompressed by the
3442 				 * compressor pager and its
3443 				 * contents might be only in
3444 				 * the data cache.
3445 				 * Since it's being mapped for
3446 				 * "execute" for the fist time,
3447 				 * make sure the icache is in
3448 				 * sync.
3449 				 */
3450 				assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
3451 				page_needs_sync = true;
3452 			}
3453 #endif
3454 		} else {
3455 			pmap_unlock_phys_page(phys_page);
3456 		}
3457 	} else {
3458 		if (m->vmp_pmapped == FALSE) {
3459 			ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3460 
3461 			pmap_lock_phys_page(phys_page);
3462 			m->vmp_pmapped = TRUE;
3463 			pmap_unlock_phys_page(phys_page);
3464 		}
3465 	}
3466 
3467 	if (fault_type & VM_PROT_WRITE) {
3468 		if (m->vmp_wpmapped == FALSE) {
3469 			vm_object_lock_assert_exclusive(object);
3470 			if (!object->internal && object->pager) {
3471 				task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
3472 			}
3473 			m->vmp_wpmapped = TRUE;
3474 		}
3475 	}
3476 	return page_needs_sync;
3477 }
3478 
3479 /*
3480  * wrappers for pmap_enter_options()
3481  */
3482 kern_return_t
pmap_enter_object_options_check(pmap_t pmap,vm_map_address_t virtual_address,vm_map_offset_t fault_phys_offset,vm_object_t obj,ppnum_t pn,vm_prot_t protection,vm_prot_t fault_type,boolean_t wired,unsigned int options)3483 pmap_enter_object_options_check(
3484 	pmap_t           pmap,
3485 	vm_map_address_t virtual_address,
3486 	vm_map_offset_t  fault_phys_offset,
3487 	vm_object_t      obj,
3488 	ppnum_t          pn,
3489 	vm_prot_t        protection,
3490 	vm_prot_t        fault_type,
3491 	boolean_t        wired,
3492 	unsigned int     options)
3493 {
3494 	unsigned int flags = 0;
3495 	unsigned int extra_options = 0;
3496 
3497 	if (obj->internal) {
3498 		extra_options |= PMAP_OPTIONS_INTERNAL;
3499 	}
3500 	pmap_paddr_t physical_address = (pmap_paddr_t)ptoa(pn) + fault_phys_offset;
3501 
3502 
3503 	return pmap_enter_options_addr(pmap,
3504 	           virtual_address,
3505 	           physical_address,
3506 	           protection,
3507 	           fault_type,
3508 	           flags,
3509 	           wired,
3510 	           options | extra_options,
3511 	           NULL,
3512 	           PMAP_MAPPING_TYPE_INFER);
3513 }
3514 
3515 kern_return_t
pmap_enter_options_check(pmap_t pmap,vm_map_address_t virtual_address,vm_map_offset_t fault_phys_offset,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,boolean_t wired,unsigned int options)3516 pmap_enter_options_check(
3517 	pmap_t           pmap,
3518 	vm_map_address_t virtual_address,
3519 	vm_map_offset_t  fault_phys_offset,
3520 	vm_page_t        page,
3521 	vm_prot_t        protection,
3522 	vm_prot_t        fault_type,
3523 	boolean_t        wired,
3524 	unsigned int     options)
3525 {
3526 	if (page->vmp_error) {
3527 		return KERN_MEMORY_FAILURE;
3528 	}
3529 	vm_object_t obj = VM_PAGE_OBJECT(page);
3530 	if (page->vmp_reusable || obj->all_reusable) {
3531 		options |= PMAP_OPTIONS_REUSABLE;
3532 	}
3533 	return pmap_enter_object_options_check(
3534 		pmap,
3535 		virtual_address,
3536 		fault_phys_offset,
3537 		obj,
3538 		VM_PAGE_GET_PHYS_PAGE(page),
3539 		protection,
3540 		fault_type,
3541 		wired,
3542 		options);
3543 }
3544 
3545 kern_return_t
pmap_enter_check(pmap_t pmap,vm_map_address_t virtual_address,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,boolean_t wired)3546 pmap_enter_check(
3547 	pmap_t           pmap,
3548 	vm_map_address_t virtual_address,
3549 	vm_page_t        page,
3550 	vm_prot_t        protection,
3551 	vm_prot_t        fault_type,
3552 	boolean_t        wired)
3553 {
3554 	return pmap_enter_options_check(pmap,
3555 	           virtual_address,
3556 	           0 /* fault_phys_offset */,
3557 	           page,
3558 	           protection,
3559 	           fault_type,
3560 	           wired,
3561 	           0 /* options */);
3562 }
3563 
3564 /*
3565  * Try to enter the given page into the pmap.
3566  * Will retry without execute permission if the code signing monitor is enabled and
3567  * we encounter a codesigning failure on a non-execute fault.
3568  */
3569 static kern_return_t
vm_fault_attempt_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options)3570 vm_fault_attempt_pmap_enter(
3571 	pmap_t pmap,
3572 	vm_map_offset_t vaddr,
3573 	vm_map_size_t fault_page_size,
3574 	vm_map_offset_t fault_phys_offset,
3575 	vm_page_t m,
3576 	vm_prot_t *prot,
3577 	vm_prot_t caller_prot,
3578 	vm_prot_t fault_type,
3579 	bool wired,
3580 	int pmap_options)
3581 {
3582 #if !CODE_SIGNING_MONITOR
3583 #pragma unused(caller_prot)
3584 #endif /* !CODE_SIGNING_MONITOR */
3585 
3586 	kern_return_t kr;
3587 	if (fault_page_size != PAGE_SIZE) {
3588 		DEBUG4K_FAULT("pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x fault_type 0x%x\n", pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, *prot, fault_type);
3589 		assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
3590 		    fault_phys_offset < PAGE_SIZE),
3591 		    "0x%llx\n", (uint64_t)fault_phys_offset);
3592 	} else {
3593 		assertf(fault_phys_offset == 0,
3594 		    "0x%llx\n", (uint64_t)fault_phys_offset);
3595 	}
3596 
3597 	kr = pmap_enter_options_check(pmap, vaddr,
3598 	    fault_phys_offset,
3599 	    m, *prot, fault_type,
3600 	    wired, pmap_options);
3601 
3602 #if CODE_SIGNING_MONITOR
3603 	/*
3604 	 * Retry without execute permission if we encountered a codesigning
3605 	 * failure on a non-execute fault.  This allows applications which
3606 	 * don't actually need to execute code to still map it for read access.
3607 	 */
3608 	if (kr == KERN_CODESIGN_ERROR &&
3609 	    csm_enabled() &&
3610 	    (*prot & VM_PROT_EXECUTE) &&
3611 	    !(caller_prot & VM_PROT_EXECUTE)) {
3612 		*prot &= ~VM_PROT_EXECUTE;
3613 		kr = pmap_enter_options_check(pmap, vaddr,
3614 		    fault_phys_offset,
3615 		    m, *prot, fault_type,
3616 		    wired, pmap_options);
3617 	}
3618 #endif /* CODE_SIGNING_MONITOR */
3619 
3620 	return kr;
3621 }
3622 
3623 /*
3624  * Enter the given page into the pmap.
3625  * The map must be locked shared.
3626  * The vm object must NOT be locked.
3627  *
3628  * @param need_retry if not null, avoid making a (potentially) blocking call into
3629  * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3630  */
3631 static kern_return_t
vm_fault_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry)3632 vm_fault_pmap_enter(
3633 	pmap_t pmap,
3634 	vm_map_offset_t vaddr,
3635 	vm_map_size_t fault_page_size,
3636 	vm_map_offset_t fault_phys_offset,
3637 	vm_page_t m,
3638 	vm_prot_t *prot,
3639 	vm_prot_t caller_prot,
3640 	vm_prot_t fault_type,
3641 	bool wired,
3642 	int pmap_options,
3643 	boolean_t *need_retry)
3644 {
3645 	kern_return_t kr;
3646 	if (need_retry != NULL) {
3647 		/*
3648 		 * Although we don't hold a lock on this object, we hold a lock
3649 		 * on the top object in the chain. To prevent a deadlock, we
3650 		 * can't allow the pmap layer to block.
3651 		 */
3652 		pmap_options |= PMAP_OPTIONS_NOWAIT;
3653 	}
3654 	kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3655 	    fault_page_size, fault_phys_offset,
3656 	    m, prot, caller_prot, fault_type, wired, pmap_options);
3657 	if (kr == KERN_RESOURCE_SHORTAGE) {
3658 		if (need_retry) {
3659 			/*
3660 			 * There's nothing we can do here since we hold the
3661 			 * lock on the top object in the chain. The caller
3662 			 * will need to deal with this by dropping that lock and retrying.
3663 			 */
3664 			*need_retry = TRUE;
3665 			vm_pmap_enter_retried++;
3666 		}
3667 	}
3668 	return kr;
3669 }
3670 
3671 /*
3672  * Enter the given page into the pmap.
3673  * The vm map must be locked shared.
3674  * The vm object must be locked exclusive, unless this is a soft fault.
3675  * For a soft fault, the object must be locked shared or exclusive.
3676  *
3677  * @param need_retry if not null, avoid making a (potentially) blocking call into
3678  * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3679  */
3680 static kern_return_t
vm_fault_pmap_enter_with_object_lock(vm_object_t object,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry,uint8_t * object_lock_type)3681 vm_fault_pmap_enter_with_object_lock(
3682 	vm_object_t object,
3683 	pmap_t pmap,
3684 	vm_map_offset_t vaddr,
3685 	vm_map_size_t fault_page_size,
3686 	vm_map_offset_t fault_phys_offset,
3687 	vm_page_t m,
3688 	vm_prot_t *prot,
3689 	vm_prot_t caller_prot,
3690 	vm_prot_t fault_type,
3691 	bool wired,
3692 	int pmap_options,
3693 	boolean_t *need_retry,
3694 	uint8_t *object_lock_type)
3695 {
3696 	kern_return_t kr;
3697 	/*
3698 	 * Prevent a deadlock by not
3699 	 * holding the object lock if we need to wait for a page in
3700 	 * pmap_enter() - <rdar://problem/7138958>
3701 	 */
3702 	kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3703 	    fault_page_size, fault_phys_offset,
3704 	    m, prot, caller_prot, fault_type, wired, pmap_options | PMAP_OPTIONS_NOWAIT);
3705 #if __x86_64__
3706 	if (kr == KERN_INVALID_ARGUMENT &&
3707 	    pmap == PMAP_NULL &&
3708 	    wired) {
3709 		/*
3710 		 * Wiring a page in a pmap-less VM map:
3711 		 * VMware's "vmmon" kernel extension does this
3712 		 * to grab pages.
3713 		 * Let it proceed even though the PMAP_ENTER() failed.
3714 		 */
3715 		kr = KERN_SUCCESS;
3716 	}
3717 #endif /* __x86_64__ */
3718 
3719 	if (kr == KERN_RESOURCE_SHORTAGE) {
3720 		if (need_retry) {
3721 			/*
3722 			 * this will be non-null in the case where we hold the lock
3723 			 * on the top-object in this chain... we can't just drop
3724 			 * the lock on the object we're inserting the page into
3725 			 * and recall the PMAP_ENTER since we can still cause
3726 			 * a deadlock if one of the critical paths tries to
3727 			 * acquire the lock on the top-object and we're blocked
3728 			 * in PMAP_ENTER waiting for memory... our only recourse
3729 			 * is to deal with it at a higher level where we can
3730 			 * drop both locks.
3731 			 */
3732 			*need_retry = TRUE;
3733 			vm_pmap_enter_retried++;
3734 			goto done;
3735 		}
3736 		/*
3737 		 * The nonblocking version of pmap_enter did not succeed.
3738 		 * and we don't need to drop other locks and retry
3739 		 * at the level above us, so
3740 		 * use the blocking version instead. Requires marking
3741 		 * the page busy and unlocking the object
3742 		 */
3743 		boolean_t was_busy = m->vmp_busy;
3744 
3745 		vm_object_lock_assert_exclusive(object);
3746 
3747 		m->vmp_busy = TRUE;
3748 		vm_object_unlock(object);
3749 
3750 		kr = pmap_enter_options_check(pmap, vaddr,
3751 		    fault_phys_offset,
3752 		    m, *prot, fault_type,
3753 		    wired, pmap_options);
3754 
3755 		assert(VM_PAGE_OBJECT(m) == object);
3756 
3757 		/* Take the object lock again. */
3758 		vm_object_lock(object);
3759 
3760 		/* If the page was busy, someone else will wake it up.
3761 		 * Otherwise, we have to do it now. */
3762 		assert(m->vmp_busy);
3763 		if (!was_busy) {
3764 			vm_page_wakeup_done(object, m);
3765 		}
3766 		vm_pmap_enter_blocked++;
3767 	}
3768 
3769 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
3770 	if ((*prot & VM_PROT_WRITE) && m->vmp_unmodified_ro) {
3771 		if (*object_lock_type == OBJECT_LOCK_SHARED) {
3772 			boolean_t was_busy = m->vmp_busy;
3773 			m->vmp_busy = TRUE;
3774 
3775 			*object_lock_type = OBJECT_LOCK_EXCLUSIVE;
3776 
3777 			if (vm_object_lock_upgrade(object) == FALSE) {
3778 				vm_object_lock(object);
3779 			}
3780 
3781 			if (!was_busy) {
3782 				vm_page_wakeup_done(object, m);
3783 			}
3784 		}
3785 		vm_object_lock_assert_exclusive(object);
3786 		vm_page_lockspin_queues();
3787 		m->vmp_unmodified_ro = false;
3788 		vm_page_unlock_queues();
3789 		os_atomic_dec(&compressor_ro_uncompressed, relaxed);
3790 
3791 		vm_object_compressor_pager_state_clr(VM_PAGE_OBJECT(m), m->vmp_offset);
3792 	}
3793 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
3794 #pragma unused(object_lock_type)
3795 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
3796 
3797 done:
3798 	return kr;
3799 }
3800 
3801 /*
3802  * Prepare to enter a page into the pmap by checking CS, protection bits,
3803  * and setting mapped bits on the page_t.
3804  * Does not modify the page's paging queue.
3805  *
3806  * page queue lock must NOT be held
3807  * m->vmp_object must be locked
3808  *
3809  * NOTE: m->vmp_object could be locked "shared" only if we are called
3810  * from vm_fault() as part of a soft fault.
3811  */
3812 static kern_return_t
vm_fault_enter_prepare(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t * prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t fault_type,vm_object_fault_info_t fault_info,int * type_of_fault,bool * page_needs_data_sync)3813 vm_fault_enter_prepare(
3814 	vm_page_t m,
3815 	pmap_t pmap,
3816 	vm_map_offset_t vaddr,
3817 	vm_prot_t *prot,
3818 	vm_prot_t caller_prot,
3819 	vm_map_size_t fault_page_size,
3820 	vm_map_offset_t fault_phys_offset,
3821 	vm_prot_t fault_type,
3822 	vm_object_fault_info_t fault_info,
3823 	int *type_of_fault,
3824 	bool *page_needs_data_sync)
3825 {
3826 	kern_return_t   kr;
3827 	bool            is_tainted = false;
3828 	vm_object_t     object;
3829 	boolean_t       cs_bypass = fault_info->cs_bypass;
3830 
3831 	object = VM_PAGE_OBJECT(m);
3832 
3833 	vm_object_lock_assert_held(object);
3834 
3835 #if KASAN
3836 	if (pmap == kernel_pmap) {
3837 		kasan_notify_address(vaddr, PAGE_SIZE);
3838 	}
3839 #endif
3840 
3841 #if CODE_SIGNING_MONITOR
3842 	if (csm_address_space_exempt(pmap) == KERN_SUCCESS) {
3843 		cs_bypass = TRUE;
3844 	}
3845 #endif
3846 
3847 	LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3848 
3849 	if (*type_of_fault == DBG_ZERO_FILL_FAULT) {
3850 		vm_object_lock_assert_exclusive(object);
3851 	} else if ((fault_type & VM_PROT_WRITE) == 0 &&
3852 	    !fault_info->fi_change_wiring &&
3853 	    (!m->vmp_wpmapped
3854 #if VM_OBJECT_ACCESS_TRACKING
3855 	    || object->access_tracking
3856 #endif /* VM_OBJECT_ACCESS_TRACKING */
3857 	    )) {
3858 		/*
3859 		 * This is not a "write" fault, so we
3860 		 * might not have taken the object lock
3861 		 * exclusively and we might not be able
3862 		 * to update the "wpmapped" bit in
3863 		 * vm_fault_enter().
3864 		 * Let's just grant read access to
3865 		 * the page for now and we'll
3866 		 * soft-fault again if we need write
3867 		 * access later...
3868 		 */
3869 
3870 		/* This had better not be a JIT page. */
3871 		if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) {
3872 			/*
3873 			 * This pmap enforces extra constraints for this set of
3874 			 * protections, so we can't modify them.
3875 			 */
3876 			if (!cs_bypass) {
3877 				panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x !cs_bypass",
3878 				    __FUNCTION__, pmap, (uint64_t)vaddr,
3879 				    *prot, fault_info->pmap_options);
3880 			}
3881 		} else {
3882 			*prot &= ~VM_PROT_WRITE;
3883 		}
3884 	}
3885 	if (m->vmp_pmapped == FALSE) {
3886 		if (m->vmp_clustered) {
3887 			if (*type_of_fault == DBG_CACHE_HIT_FAULT) {
3888 				/*
3889 				 * found it in the cache, but this
3890 				 * is the first fault-in of the page (m->vmp_pmapped == FALSE)
3891 				 * so it must have come in as part of
3892 				 * a cluster... account 1 pagein against it
3893 				 */
3894 				if (object->internal) {
3895 					*type_of_fault = DBG_PAGEIND_FAULT;
3896 				} else {
3897 					*type_of_fault = DBG_PAGEINV_FAULT;
3898 				}
3899 
3900 				VM_PAGE_COUNT_AS_PAGEIN(m);
3901 			}
3902 			VM_PAGE_CONSUME_CLUSTERED(m);
3903 		}
3904 	}
3905 
3906 	if (*type_of_fault != DBG_COW_FAULT) {
3907 		DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL);
3908 
3909 		if (pmap == kernel_pmap) {
3910 			DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL);
3911 		}
3912 	}
3913 
3914 	kr = vm_fault_validate_cs(cs_bypass, object, m, pmap, vaddr,
3915 	    *prot, caller_prot, fault_page_size, fault_phys_offset,
3916 	    fault_info, &is_tainted);
3917 	if (kr == KERN_SUCCESS) {
3918 		/*
3919 		 * We either have a good page, or a tainted page that has been accepted by the process.
3920 		 * In both cases the page will be entered into the pmap.
3921 		 */
3922 		*page_needs_data_sync = vm_fault_enter_set_mapped(object, m, *prot, fault_type);
3923 		if ((fault_type & VM_PROT_WRITE) && is_tainted) {
3924 			/*
3925 			 * This page is tainted but we're inserting it anyways.
3926 			 * Since it's writeable, we need to disconnect it from other pmaps
3927 			 * now so those processes can take note.
3928 			 */
3929 
3930 			/*
3931 			 * We can only get here
3932 			 * because of the CSE logic
3933 			 */
3934 			assert(pmap_get_vm_map_cs_enforced(pmap));
3935 			pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
3936 			/*
3937 			 * If we are faulting for a write, we can clear
3938 			 * the execute bit - that will ensure the page is
3939 			 * checked again before being executable, which
3940 			 * protects against a map switch.
3941 			 * This only happens the first time the page
3942 			 * gets tainted, so we won't get stuck here
3943 			 * to make an already writeable page executable.
3944 			 */
3945 			if (!cs_bypass) {
3946 				if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) {
3947 					/*
3948 					 * This pmap enforces extra constraints
3949 					 * for this set of protections, so we
3950 					 * can't change the protections.
3951 					 */
3952 					panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
3953 					    __FUNCTION__, pmap,
3954 					    (uint64_t)vaddr, *prot,
3955 					    fault_info->pmap_options);
3956 				}
3957 				*prot &= ~VM_PROT_EXECUTE;
3958 			}
3959 		}
3960 		assert(VM_PAGE_OBJECT(m) == object);
3961 
3962 #if VM_OBJECT_ACCESS_TRACKING
3963 		if (object->access_tracking) {
3964 			DTRACE_VM2(access_tracking, vm_map_offset_t, vaddr, int, fault_type);
3965 			if (fault_type & VM_PROT_WRITE) {
3966 				object->access_tracking_writes++;
3967 				vm_object_access_tracking_writes++;
3968 			} else {
3969 				object->access_tracking_reads++;
3970 				vm_object_access_tracking_reads++;
3971 			}
3972 		}
3973 #endif /* VM_OBJECT_ACCESS_TRACKING */
3974 	}
3975 
3976 	return kr;
3977 }
3978 
3979 /*
3980  * page queue lock must NOT be held
3981  * m->vmp_object must be locked
3982  *
3983  * NOTE: m->vmp_object could be locked "shared" only if we are called
3984  * from vm_fault() as part of a soft fault.  If so, we must be
3985  * careful not to modify the VM object in any way that is not
3986  * legal under a shared lock...
3987  */
3988 kern_return_t
vm_fault_enter(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot,vm_prot_t caller_prot,boolean_t wired,vm_tag_t wire_tag,vm_object_fault_info_t fault_info,boolean_t * need_retry,int * type_of_fault,uint8_t * object_lock_type)3989 vm_fault_enter(
3990 	vm_page_t m,
3991 	pmap_t pmap,
3992 	vm_map_offset_t vaddr,
3993 	vm_map_size_t fault_page_size,
3994 	vm_map_offset_t fault_phys_offset,
3995 	vm_prot_t prot,
3996 	vm_prot_t caller_prot,
3997 	boolean_t wired,
3998 	vm_tag_t  wire_tag,
3999 	vm_object_fault_info_t fault_info,
4000 	boolean_t *need_retry,
4001 	int *type_of_fault,
4002 	uint8_t *object_lock_type)
4003 {
4004 	kern_return_t   kr;
4005 	vm_object_t     object;
4006 	bool            page_needs_data_sync;
4007 	vm_prot_t       fault_type;
4008 	int             pmap_options = fault_info->pmap_options;
4009 
4010 	if (vm_page_is_guard(m)) {
4011 		return KERN_SUCCESS;
4012 	}
4013 
4014 	fault_type = fault_info->fi_change_wiring ? VM_PROT_NONE : caller_prot;
4015 
4016 	assertf(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL, "m=%p", m);
4017 	kr = vm_fault_enter_prepare(m, pmap, vaddr, &prot, caller_prot,
4018 	    fault_page_size, fault_phys_offset, fault_type,
4019 	    fault_info, type_of_fault, &page_needs_data_sync);
4020 	object = VM_PAGE_OBJECT(m);
4021 
4022 	vm_fault_enqueue_page(object, m, wired, fault_info->fi_change_wiring, wire_tag, fault_info->no_cache, type_of_fault, kr);
4023 
4024 	if (kr == KERN_SUCCESS) {
4025 		if (page_needs_data_sync) {
4026 			pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
4027 		}
4028 
4029 		if (fault_info->fi_xnu_user_debug && !object->code_signed) {
4030 			pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
4031 		}
4032 
4033 
4034 		kr = vm_fault_pmap_enter_with_object_lock(object, pmap, vaddr,
4035 		    fault_page_size, fault_phys_offset, m,
4036 		    &prot, caller_prot, fault_type, wired, pmap_options, need_retry, object_lock_type);
4037 	}
4038 
4039 	return kr;
4040 }
4041 
4042 kern_return_t
vm_pre_fault_with_info(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t prot,vm_object_fault_info_t fault_info)4043 vm_pre_fault_with_info(
4044 	vm_map_t                map,
4045 	vm_map_offset_t         vaddr,
4046 	vm_prot_t               prot,
4047 	vm_object_fault_info_t  fault_info)
4048 {
4049 	assert(fault_info != NULL);
4050 	if (pmap_find_phys(map->pmap, vaddr) == 0) {
4051 		return vm_fault_internal(map,
4052 		           vaddr,               /* vaddr */
4053 		           prot,                /* fault_type */
4054 		           VM_KERN_MEMORY_NONE, /* tag - not wiring */
4055 		           NULL,                /* caller_pmap */
4056 		           0,                   /* caller_pmap_addr */
4057 		           NULL,
4058 		           fault_info);
4059 	}
4060 	return KERN_SUCCESS;
4061 }
4062 
4063 /*
4064  * Fault on the given vaddr iff the page is not already entered in the pmap.
4065  */
4066 kern_return_t
vm_pre_fault(vm_map_offset_t vaddr,vm_prot_t prot)4067 vm_pre_fault(vm_map_offset_t vaddr, vm_prot_t prot)
4068 {
4069 	struct vm_object_fault_info fault_info = {
4070 		.interruptible = THREAD_UNINT,
4071 	};
4072 	return vm_pre_fault_with_info(current_map(), vaddr, prot, &fault_info);
4073 }
4074 
4075 /*
4076  *	Routine:	vm_fault
4077  *	Purpose:
4078  *		Handle page faults, including pseudo-faults
4079  *		used to change the wiring status of pages.
4080  *	Returns:
4081  *		Explicit continuations have been removed.
4082  *	Implementation:
4083  *		vm_fault and vm_fault_page save mucho state
4084  *		in the moral equivalent of a closure.  The state
4085  *		structure is allocated when first entering vm_fault
4086  *		and deallocated when leaving vm_fault.
4087  */
4088 
4089 extern uint64_t get_current_unique_pid(void);
4090 
4091 unsigned long vm_fault_collapse_total = 0;
4092 unsigned long vm_fault_collapse_skipped = 0;
4093 
4094 
4095 kern_return_t
vm_fault_external(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)4096 vm_fault_external(
4097 	vm_map_t        map,
4098 	vm_map_offset_t vaddr,
4099 	vm_prot_t       fault_type,
4100 	boolean_t       change_wiring,
4101 	int             interruptible,
4102 	pmap_t          caller_pmap,
4103 	vm_map_offset_t caller_pmap_addr)
4104 {
4105 	struct vm_object_fault_info fault_info = {
4106 		.interruptible = interruptible,
4107 		.fi_change_wiring = change_wiring,
4108 	};
4109 
4110 	return vm_fault_internal(map, vaddr, fault_type,
4111 	           change_wiring ? vm_tag_bt() : VM_KERN_MEMORY_NONE,
4112 	           caller_pmap, caller_pmap_addr,
4113 	           NULL, &fault_info);
4114 }
4115 
4116 kern_return_t
vm_fault(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,vm_tag_t wire_tag,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)4117 vm_fault(
4118 	vm_map_t        map,
4119 	vm_map_offset_t vaddr,
4120 	vm_prot_t       fault_type,
4121 	boolean_t       change_wiring,
4122 	vm_tag_t        wire_tag,               /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
4123 	int             interruptible,
4124 	pmap_t          caller_pmap,
4125 	vm_map_offset_t caller_pmap_addr)
4126 {
4127 	struct vm_object_fault_info fault_info = {
4128 		.interruptible = interruptible,
4129 		.fi_change_wiring = change_wiring,
4130 	};
4131 
4132 	return vm_fault_internal(map, vaddr, fault_type, wire_tag,
4133 	           caller_pmap, caller_pmap_addr,
4134 	           NULL, &fault_info);
4135 }
4136 
4137 static boolean_t
current_proc_is_privileged(void)4138 current_proc_is_privileged(void)
4139 {
4140 	return csproc_get_platform_binary(current_proc());
4141 }
4142 
4143 uint64_t vm_copied_on_read = 0;
4144 
4145 /*
4146  * Cleanup after a vm_fault_enter.
4147  * At this point, the fault should either have failed (kr != KERN_SUCCESS)
4148  * or the page should be in the pmap and on the correct paging queue.
4149  *
4150  * Precondition:
4151  * map must be locked shared.
4152  * m_object must be locked.
4153  * If top_object != VM_OBJECT_NULL, it must be locked.
4154  * real_map must be locked.
4155  *
4156  * Postcondition:
4157  * map will be unlocked
4158  * m_object will be unlocked
4159  * top_object will be unlocked
4160  * If real_map != map, it will be unlocked
4161  */
4162 static void
vm_fault_complete(vm_map_t map,vm_map_t real_map,vm_object_t object,vm_object_t m_object,vm_page_t m,vm_map_offset_t offset,vm_map_offset_t trace_real_vaddr,vm_object_fault_info_t fault_info,vm_prot_t caller_prot,vm_map_offset_t real_vaddr,int type_of_fault,boolean_t need_retry,kern_return_t kr,ppnum_t * physpage_p,vm_prot_t prot,vm_object_t top_object,boolean_t need_collapse,vm_map_offset_t cur_offset,vm_prot_t fault_type,vm_object_t * written_on_object,memory_object_t * written_on_pager,vm_object_offset_t * written_on_offset)4163 vm_fault_complete(
4164 	vm_map_t map,
4165 	vm_map_t real_map,
4166 	vm_object_t object,
4167 	vm_object_t m_object,
4168 	vm_page_t m,
4169 	vm_map_offset_t offset,
4170 	vm_map_offset_t trace_real_vaddr,
4171 	vm_object_fault_info_t fault_info,
4172 	vm_prot_t caller_prot,
4173 #if CONFIG_DTRACE
4174 	vm_map_offset_t real_vaddr,
4175 #else
4176 	__unused vm_map_offset_t real_vaddr,
4177 #endif /* CONFIG_DTRACE */
4178 	int type_of_fault,
4179 	boolean_t need_retry,
4180 	kern_return_t kr,
4181 	ppnum_t *physpage_p,
4182 	vm_prot_t prot,
4183 	vm_object_t top_object,
4184 	boolean_t need_collapse,
4185 	vm_map_offset_t cur_offset,
4186 	vm_prot_t fault_type,
4187 	vm_object_t *written_on_object,
4188 	memory_object_t *written_on_pager,
4189 	vm_object_offset_t *written_on_offset)
4190 {
4191 	int     event_code = 0;
4192 	vm_map_lock_assert_shared(map);
4193 	vm_object_lock_assert_held(m_object);
4194 	if (top_object != VM_OBJECT_NULL) {
4195 		vm_object_lock_assert_held(top_object);
4196 	}
4197 	vm_map_lock_assert_held(real_map);
4198 
4199 	if (m_object->internal) {
4200 		event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
4201 	} else if (m_object->object_is_shared_cache) {
4202 		event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
4203 	} else {
4204 		event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
4205 	}
4206 	KDBG_RELEASE(event_code | DBG_FUNC_NONE, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid());
4207 	if (need_retry == FALSE) {
4208 		KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_FAST), get_current_unique_pid());
4209 	}
4210 	DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag);
4211 	if (kr == KERN_SUCCESS &&
4212 	    physpage_p != NULL) {
4213 		/* for vm_map_wire_and_extract() */
4214 		*physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
4215 		if (prot & VM_PROT_WRITE) {
4216 			vm_object_lock_assert_exclusive(m_object);
4217 			m->vmp_dirty = TRUE;
4218 		}
4219 	}
4220 
4221 	if (top_object != VM_OBJECT_NULL) {
4222 		/*
4223 		 * It's safe to drop the top object
4224 		 * now that we've done our
4225 		 * vm_fault_enter().  Any other fault
4226 		 * in progress for that virtual
4227 		 * address will either find our page
4228 		 * and translation or put in a new page
4229 		 * and translation.
4230 		 */
4231 		vm_object_unlock(top_object);
4232 		top_object = VM_OBJECT_NULL;
4233 	}
4234 
4235 	if (need_collapse == TRUE) {
4236 		vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
4237 	}
4238 
4239 	if (need_retry == FALSE &&
4240 	    (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) {
4241 		/*
4242 		 * evaluate access pattern and update state
4243 		 * vm_fault_deactivate_behind depends on the
4244 		 * state being up to date
4245 		 */
4246 		vm_fault_is_sequential(m_object, cur_offset, fault_info->behavior);
4247 
4248 		vm_fault_deactivate_behind(m_object, cur_offset, fault_info->behavior);
4249 	}
4250 	/*
4251 	 * That's it, clean up and return.
4252 	 */
4253 	if (m->vmp_busy) {
4254 		vm_object_lock_assert_exclusive(m_object);
4255 		vm_page_wakeup_done(m_object, m);
4256 	}
4257 
4258 	if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) {
4259 		vm_object_paging_begin(m_object);
4260 
4261 		assert(*written_on_object == VM_OBJECT_NULL);
4262 		*written_on_object = m_object;
4263 		*written_on_pager = m_object->pager;
4264 		*written_on_offset = m_object->paging_offset + m->vmp_offset;
4265 	}
4266 	vm_object_unlock(object);
4267 
4268 	vm_map_unlock_read(map);
4269 	if (real_map != map) {
4270 		vm_map_unlock(real_map);
4271 	}
4272 }
4273 
4274 static inline int
vm_fault_type_for_tracing(boolean_t need_copy_on_read,int type_of_fault)4275 vm_fault_type_for_tracing(boolean_t need_copy_on_read, int type_of_fault)
4276 {
4277 	if (need_copy_on_read && type_of_fault == DBG_COW_FAULT) {
4278 		return DBG_COR_FAULT;
4279 	}
4280 	return type_of_fault;
4281 }
4282 
4283 uint64_t vm_fault_resilient_media_initiate = 0;
4284 uint64_t vm_fault_resilient_media_retry = 0;
4285 uint64_t vm_fault_resilient_media_proceed = 0;
4286 uint64_t vm_fault_resilient_media_release = 0;
4287 uint64_t vm_fault_resilient_media_abort1 = 0;
4288 uint64_t vm_fault_resilient_media_abort2 = 0;
4289 
4290 #if MACH_ASSERT
4291 int vm_fault_resilient_media_inject_error1_rate = 0;
4292 int vm_fault_resilient_media_inject_error1 = 0;
4293 int vm_fault_resilient_media_inject_error2_rate = 0;
4294 int vm_fault_resilient_media_inject_error2 = 0;
4295 int vm_fault_resilient_media_inject_error3_rate = 0;
4296 int vm_fault_resilient_media_inject_error3 = 0;
4297 #endif /* MACH_ASSERT */
4298 
4299 kern_return_t
vm_fault_internal(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t caller_prot,vm_tag_t wire_tag,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr,ppnum_t * physpage_p,vm_object_fault_info_t fault_info)4300 vm_fault_internal(
4301 	vm_map_t           map,
4302 	vm_map_offset_t    vaddr,
4303 	vm_prot_t          caller_prot,
4304 	vm_tag_t           wire_tag,               /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
4305 	pmap_t             caller_pmap,
4306 	vm_map_offset_t    caller_pmap_addr,
4307 	ppnum_t            *physpage_p,
4308 	vm_object_fault_info_t fault_info)
4309 {
4310 	vm_map_version_t        version;        /* Map version for verificiation */
4311 	boolean_t               wired;          /* Should mapping be wired down? */
4312 	vm_object_t             object;         /* Top-level object */
4313 	vm_object_offset_t      offset;         /* Top-level offset */
4314 	vm_prot_t               prot;           /* Protection for mapping */
4315 	vm_object_t             old_copy_object; /* Saved copy object */
4316 	uint32_t                old_copy_version;
4317 	vm_page_t               result_page;    /* Result of vm_fault_page */
4318 	vm_page_t               top_page;       /* Placeholder page */
4319 	kern_return_t           kr;
4320 
4321 	vm_page_t               m;      /* Fast access to result_page */
4322 	kern_return_t           error_code;
4323 	vm_object_t             cur_object;
4324 	vm_object_t             m_object = NULL;
4325 	vm_object_offset_t      cur_offset;
4326 	vm_page_t               cur_m;
4327 	vm_object_t             new_object;
4328 	int                     type_of_fault;
4329 	pmap_t                  pmap;
4330 	wait_interrupt_t        interruptible_state;
4331 	vm_map_t                real_map = map;
4332 	vm_map_t                original_map = map;
4333 	bool                    object_locks_dropped = FALSE;
4334 	vm_prot_t               fault_type;
4335 	vm_prot_t               original_fault_type;
4336 	bool                    need_collapse = FALSE;
4337 	boolean_t               need_retry = FALSE;
4338 	boolean_t               *need_retry_ptr = NULL;
4339 	uint8_t                 object_lock_type = 0;
4340 	uint8_t                 cur_object_lock_type;
4341 	vm_object_t             top_object = VM_OBJECT_NULL;
4342 	vm_object_t             written_on_object = VM_OBJECT_NULL;
4343 	memory_object_t         written_on_pager = NULL;
4344 	vm_object_offset_t      written_on_offset = 0;
4345 	int                     throttle_delay;
4346 	int                     compressed_count_delta;
4347 	vm_grab_options_t       grab_options;
4348 	bool                    need_copy;
4349 	bool                    need_copy_on_read;
4350 	vm_map_offset_t         trace_vaddr;
4351 	vm_map_offset_t         trace_real_vaddr;
4352 	vm_map_size_t           fault_page_size;
4353 	vm_map_size_t           fault_page_mask;
4354 	int                     fault_page_shift;
4355 	vm_map_offset_t         fault_phys_offset;
4356 	vm_map_offset_t         real_vaddr;
4357 	bool                    resilient_media_retry = false;
4358 	bool                    resilient_media_ref_transfer = false;
4359 	vm_object_t             resilient_media_object = VM_OBJECT_NULL;
4360 	vm_object_offset_t      resilient_media_offset = (vm_object_offset_t)-1;
4361 	bool                    page_needs_data_sync = false;
4362 	/*
4363 	 * Was the VM object contended when vm_map_lookup_and_lock_object locked it?
4364 	 * If so, the zero fill path will drop the lock
4365 	 * NB: Ideally we would always drop the lock rather than rely on
4366 	 * this heuristic, but vm_object_unlock currently takes > 30 cycles.
4367 	 */
4368 	bool                    object_is_contended = false;
4369 
4370 
4371 	real_vaddr = vaddr;
4372 	trace_real_vaddr = vaddr;
4373 
4374 	/*
4375 	 * Some (kernel) submaps are marked with "should never fault".
4376 	 *
4377 	 * We do this for two reasons:
4378 	 * - PGZ which is inside the zone map range can't go down the normal
4379 	 *   lookup path (vm_map_lookup_entry() would panic).
4380 	 *
4381 	 * - we want for guard pages to not have to use fictitious pages at all
4382 	 *   to prevent from ZFOD pages to be made.
4383 	 *
4384 	 * We also want capture the fault address easily so that the zone
4385 	 * allocator might present an enhanced panic log.
4386 	 */
4387 	if (map->never_faults || (pgz_owned(vaddr) && map->pmap == kernel_pmap)) {
4388 		assert(map->pmap == kernel_pmap);
4389 		return KERN_INVALID_ADDRESS;
4390 	}
4391 
4392 	if (VM_MAP_PAGE_SIZE(original_map) < PAGE_SIZE) {
4393 		fault_phys_offset = (vm_map_offset_t)-1;
4394 		fault_page_size = VM_MAP_PAGE_SIZE(original_map);
4395 		fault_page_mask = VM_MAP_PAGE_MASK(original_map);
4396 		fault_page_shift = VM_MAP_PAGE_SHIFT(original_map);
4397 		if (fault_page_size < PAGE_SIZE) {
4398 			DEBUG4K_FAULT("map %p vaddr 0x%llx caller_prot 0x%x\n", map, (uint64_t)trace_real_vaddr, caller_prot);
4399 			vaddr = vm_map_trunc_page(vaddr, fault_page_mask);
4400 		}
4401 	} else {
4402 		fault_phys_offset = 0;
4403 		fault_page_size = PAGE_SIZE;
4404 		fault_page_mask = PAGE_MASK;
4405 		fault_page_shift = PAGE_SHIFT;
4406 		vaddr = vm_map_trunc_page(vaddr, PAGE_MASK);
4407 	}
4408 
4409 	if (map == kernel_map) {
4410 		trace_vaddr = VM_KERNEL_ADDRHIDE(vaddr);
4411 		trace_real_vaddr = VM_KERNEL_ADDRHIDE(trace_real_vaddr);
4412 	} else {
4413 		trace_vaddr = vaddr;
4414 	}
4415 
4416 	KDBG_RELEASE(
4417 		(VMDBG_CODE(DBG_VM_FAULT_INTERNAL)) | DBG_FUNC_START,
4418 		((uint64_t)trace_vaddr >> 32),
4419 		trace_vaddr,
4420 		(map == kernel_map));
4421 
4422 	if (get_preemption_level() != 0) {
4423 		KDBG_RELEASE(
4424 			(VMDBG_CODE(DBG_VM_FAULT_INTERNAL)) | DBG_FUNC_END,
4425 			((uint64_t)trace_vaddr >> 32),
4426 			trace_vaddr,
4427 			KERN_FAILURE);
4428 
4429 		ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NONZERO_PREEMPTION_LEVEL), 0 /* arg */);
4430 		return KERN_FAILURE;
4431 	}
4432 
4433 	thread_t cthread = current_thread();
4434 
4435 	if (cthread->th_vm_faults_disabled) {
4436 		KDBG_RELEASE(
4437 			(MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
4438 			((uint64_t)trace_vaddr >> 32),
4439 			trace_vaddr,
4440 			KERN_FAILURE);
4441 		ktriage_record(thread_tid(cthread),
4442 		    KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM,
4443 		    KDBG_TRIAGE_RESERVED,
4444 		    KDBG_TRIAGE_VM_FAULTS_DISABLED),
4445 		    0 /* arg */);
4446 		return KERN_FAILURE;
4447 	}
4448 
4449 	bool     rtfault = (cthread->sched_mode == TH_MODE_REALTIME);
4450 	uint64_t fstart = 0;
4451 
4452 	if (rtfault) {
4453 		fstart = mach_continuous_time();
4454 	}
4455 
4456 	assert(fault_info != NULL);
4457 	interruptible_state = thread_interrupt_level(fault_info->interruptible);
4458 
4459 	fault_type = (fault_info->fi_change_wiring ? VM_PROT_NONE : caller_prot);
4460 
4461 	counter_inc(&vm_statistics_faults);
4462 	counter_inc(&current_task()->faults);
4463 	original_fault_type = fault_type;
4464 
4465 	need_copy = FALSE;
4466 	if (fault_type & VM_PROT_WRITE) {
4467 		need_copy = TRUE;
4468 	}
4469 
4470 	if (need_copy || fault_info->fi_change_wiring) {
4471 		object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4472 	} else {
4473 		object_lock_type = OBJECT_LOCK_SHARED;
4474 	}
4475 
4476 	cur_object_lock_type = OBJECT_LOCK_SHARED;
4477 
4478 	if ((map == kernel_map) && (caller_prot & VM_PROT_WRITE)) {
4479 		if (compressor_map) {
4480 			if ((vaddr >= vm_map_min(compressor_map)) && (vaddr < vm_map_max(compressor_map))) {
4481 				panic("Write fault on compressor map, va: %p type: %u bounds: %p->%p", (void *) vaddr, caller_prot, (void *) vm_map_min(compressor_map), (void *) vm_map_max(compressor_map));
4482 			}
4483 		}
4484 	}
4485 RetryFault:
4486 	assert(written_on_object == VM_OBJECT_NULL);
4487 
4488 	/*
4489 	 * assume we will hit a page in the cache
4490 	 * otherwise, explicitly override with
4491 	 * the real fault type once we determine it
4492 	 */
4493 	type_of_fault = DBG_CACHE_HIT_FAULT;
4494 
4495 	/*
4496 	 *	Find the backing store object and offset into
4497 	 *	it to begin the search.
4498 	 */
4499 	fault_type = original_fault_type;
4500 	map = original_map;
4501 	vm_map_lock_read(map);
4502 
4503 	if (resilient_media_retry) {
4504 		/*
4505 		 * If we have to insert a fake zero-filled page to hide
4506 		 * a media failure to provide the real page, we need to
4507 		 * resolve any pending copy-on-write on this mapping.
4508 		 * VM_PROT_COPY tells vm_map_lookup_and_lock_object() to deal
4509 		 * with that even if this is not a "write" fault.
4510 		 */
4511 		need_copy = TRUE;
4512 		object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4513 		vm_fault_resilient_media_retry++;
4514 	}
4515 
4516 	kr = vm_map_lookup_and_lock_object(&map, vaddr,
4517 	    (fault_type | (need_copy ? VM_PROT_COPY : 0)),
4518 	    object_lock_type, &version,
4519 	    &object, &offset, &prot, &wired,
4520 	    fault_info,
4521 	    &real_map,
4522 	    &object_is_contended);
4523 	object_is_contended = false; /* avoid unsafe optimization */
4524 
4525 	if (kr != KERN_SUCCESS) {
4526 		vm_map_unlock_read(map);
4527 		/*
4528 		 * This can be seen in a crash report if indeed the
4529 		 * thread is crashing due to an invalid access in a non-existent
4530 		 * range.
4531 		 * Turning this OFF for now because it is noisy and not always fatal
4532 		 * eg prefaulting.
4533 		 *
4534 		 * if (kr == KERN_INVALID_ADDRESS) {
4535 		 *	ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0);
4536 		 * }
4537 		 */
4538 		goto done;
4539 	}
4540 
4541 
4542 	pmap = real_map->pmap;
4543 	fault_info->io_sync = FALSE;
4544 	fault_info->mark_zf_absent = FALSE;
4545 	fault_info->batch_pmap_op = FALSE;
4546 
4547 	if (resilient_media_retry) {
4548 		/*
4549 		 * We're retrying this fault after having detected a media
4550 		 * failure from a "resilient_media" mapping.
4551 		 * Check that the mapping is still pointing at the object
4552 		 * that just failed to provide a page.
4553 		 */
4554 		assert(resilient_media_object != VM_OBJECT_NULL);
4555 		assert(resilient_media_offset != (vm_object_offset_t)-1);
4556 		if ((object != VM_OBJECT_NULL &&
4557 		    object == resilient_media_object &&
4558 		    offset == resilient_media_offset &&
4559 		    fault_info->resilient_media)
4560 #if MACH_ASSERT
4561 		    && (vm_fault_resilient_media_inject_error1_rate == 0 ||
4562 		    (++vm_fault_resilient_media_inject_error1 % vm_fault_resilient_media_inject_error1_rate) != 0)
4563 #endif /* MACH_ASSERT */
4564 		    ) {
4565 			/*
4566 			 * This mapping still points at the same object
4567 			 * and is still "resilient_media": proceed in
4568 			 * "recovery-from-media-failure" mode, where we'll
4569 			 * insert a zero-filled page in the top object.
4570 			 */
4571 //                     printf("RESILIENT_MEDIA %s:%d recovering for object %p offset 0x%llx\n", __FUNCTION__, __LINE__, object, offset);
4572 			vm_fault_resilient_media_proceed++;
4573 		} else {
4574 			/* not recovering: reset state and retry fault */
4575 //                     printf("RESILIENT_MEDIA %s:%d no recovery resilient %d object %p/%p offset 0x%llx/0x%llx\n", __FUNCTION__, __LINE__, fault_info->resilient_media, object, resilient_media_object, offset, resilient_media_offset);
4576 			vm_object_unlock(object);
4577 			if (real_map != map) {
4578 				vm_map_unlock(real_map);
4579 			}
4580 			vm_map_unlock_read(map);
4581 			/* release our extra reference on failed object */
4582 //                     printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
4583 			vm_object_deallocate(resilient_media_object);
4584 			resilient_media_object = VM_OBJECT_NULL;
4585 			resilient_media_offset = (vm_object_offset_t)-1;
4586 			resilient_media_retry = false;
4587 			vm_fault_resilient_media_abort1++;
4588 			goto RetryFault;
4589 		}
4590 	} else {
4591 		assert(resilient_media_object == VM_OBJECT_NULL);
4592 		resilient_media_offset = (vm_object_offset_t)-1;
4593 	}
4594 
4595 	/*
4596 	 * If the page is wired, we must fault for the current protection
4597 	 * value, to avoid further faults.
4598 	 */
4599 	if (wired) {
4600 		fault_type = prot | VM_PROT_WRITE;
4601 	}
4602 	if (wired || need_copy) {
4603 		/*
4604 		 * since we're treating this fault as a 'write'
4605 		 * we must hold the top object lock exclusively
4606 		 */
4607 		if (object_lock_type == OBJECT_LOCK_SHARED) {
4608 			object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4609 
4610 			if (vm_object_lock_upgrade(object) == FALSE) {
4611 				/*
4612 				 * couldn't upgrade, so explictly
4613 				 * take the lock exclusively
4614 				 */
4615 				vm_object_lock(object);
4616 			}
4617 		}
4618 	}
4619 
4620 #if     VM_FAULT_CLASSIFY
4621 	/*
4622 	 *	Temporary data gathering code
4623 	 */
4624 	vm_fault_classify(object, offset, fault_type);
4625 #endif
4626 	/*
4627 	 *	Fast fault code.  The basic idea is to do as much as
4628 	 *	possible while holding the map lock and object locks.
4629 	 *      Busy pages are not used until the object lock has to
4630 	 *	be dropped to do something (copy, zero fill, pmap enter).
4631 	 *	Similarly, paging references aren't acquired until that
4632 	 *	point, and object references aren't used.
4633 	 *
4634 	 *	If we can figure out what to do
4635 	 *	(zero fill, copy on write, pmap enter) while holding
4636 	 *	the locks, then it gets done.  Otherwise, we give up,
4637 	 *	and use the original fault path (which doesn't hold
4638 	 *	the map lock, and relies on busy pages).
4639 	 *	The give up cases include:
4640 	 *              - Have to talk to pager.
4641 	 *		- Page is busy, absent or in error.
4642 	 *		- Pager has locked out desired access.
4643 	 *		- Fault needs to be restarted.
4644 	 *		- Have to push page into copy object.
4645 	 *
4646 	 *	The code is an infinite loop that moves one level down
4647 	 *	the shadow chain each time.  cur_object and cur_offset
4648 	 *      refer to the current object being examined. object and offset
4649 	 *	are the original object from the map.  The loop is at the
4650 	 *	top level if and only if object and cur_object are the same.
4651 	 *
4652 	 *	Invariants:  Map lock is held throughout.  Lock is held on
4653 	 *		original object and cur_object (if different) when
4654 	 *		continuing or exiting loop.
4655 	 *
4656 	 */
4657 
4658 #if defined(__arm64__)
4659 	/*
4660 	 * Fail if reading an execute-only page in a
4661 	 * pmap that enforces execute-only protection.
4662 	 */
4663 	if (fault_type == VM_PROT_READ &&
4664 	    (prot & VM_PROT_EXECUTE) &&
4665 	    !(prot & VM_PROT_READ) &&
4666 	    pmap_enforces_execute_only(pmap)) {
4667 		vm_object_unlock(object);
4668 		vm_map_unlock_read(map);
4669 		if (real_map != map) {
4670 			vm_map_unlock(real_map);
4671 		}
4672 		kr = KERN_PROTECTION_FAILURE;
4673 		goto done;
4674 	}
4675 #endif
4676 
4677 	fault_phys_offset = (vm_map_offset_t)offset - vm_map_trunc_page((vm_map_offset_t)offset, PAGE_MASK);
4678 
4679 	/*
4680 	 * If this page is to be inserted in a copy delay object
4681 	 * for writing, and if the object has a copy, then the
4682 	 * copy delay strategy is implemented in the slow fault page.
4683 	 */
4684 	if ((object->copy_strategy == MEMORY_OBJECT_COPY_DELAY ||
4685 	    object->copy_strategy == MEMORY_OBJECT_COPY_DELAY_FORK) &&
4686 	    object->vo_copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE)) {
4687 		if (resilient_media_retry && object && object->internal) {
4688 			/*
4689 			 * We're handling a "resilient media retry" and we
4690 			 * just want to insert of zero-filled page in this
4691 			 * top object (if there's not already a page there),
4692 			 * so this is not a real "write" and we want to stay
4693 			 * on this code path.
4694 			 */
4695 		} else {
4696 			goto handle_copy_delay;
4697 		}
4698 	}
4699 
4700 	cur_object = object;
4701 	cur_offset = offset;
4702 
4703 	grab_options = vm_page_grab_options_for_object(object);
4704 
4705 	while (TRUE) {
4706 		if (!cur_object->pager_created &&
4707 		    cur_object->phys_contiguous) { /* superpage */
4708 			break;
4709 		}
4710 
4711 		if (cur_object->blocked_access) {
4712 			/*
4713 			 * Access to this VM object has been blocked.
4714 			 * Let the slow path handle it.
4715 			 */
4716 			break;
4717 		}
4718 
4719 		m = vm_page_lookup(cur_object, vm_object_trunc_page(cur_offset));
4720 		m_object = NULL;
4721 
4722 		if (m != VM_PAGE_NULL) {
4723 			m_object = cur_object;
4724 
4725 			if (m->vmp_busy) {
4726 				wait_result_t   result;
4727 
4728 				/*
4729 				 * in order to vm_page_sleep(), we must
4730 				 * have object that 'm' belongs to locked exclusively
4731 				 */
4732 				if (object != cur_object) {
4733 					if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4734 						cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4735 
4736 						if (vm_object_lock_upgrade(cur_object) == FALSE) {
4737 							/*
4738 							 * couldn't upgrade so go do a full retry
4739 							 * immediately since we can no longer be
4740 							 * certain about cur_object (since we
4741 							 * don't hold a reference on it)...
4742 							 * first drop the top object lock
4743 							 */
4744 							vm_object_unlock(object);
4745 
4746 							vm_map_unlock_read(map);
4747 							if (real_map != map) {
4748 								vm_map_unlock(real_map);
4749 							}
4750 
4751 							goto RetryFault;
4752 						}
4753 					}
4754 				} else if (object_lock_type == OBJECT_LOCK_SHARED) {
4755 					object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4756 
4757 					if (vm_object_lock_upgrade(object) == FALSE) {
4758 						/*
4759 						 * couldn't upgrade, so explictly take the lock
4760 						 * exclusively and go relookup the page since we
4761 						 * will have dropped the object lock and
4762 						 * a different thread could have inserted
4763 						 * a page at this offset
4764 						 * no need for a full retry since we're
4765 						 * at the top level of the object chain
4766 						 */
4767 						vm_object_lock(object);
4768 
4769 						continue;
4770 					}
4771 				}
4772 				if ((m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) && m_object->internal) {
4773 					/*
4774 					 * m->vmp_busy == TRUE and the object is locked exclusively
4775 					 * if m->pageout_queue == TRUE after we acquire the
4776 					 * queues lock, we are guaranteed that it is stable on
4777 					 * the pageout queue and therefore reclaimable
4778 					 *
4779 					 * NOTE: this is only true for the internal pageout queue
4780 					 * in the compressor world
4781 					 */
4782 					assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
4783 
4784 					vm_page_lock_queues();
4785 
4786 					if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
4787 						vm_pageout_throttle_up(m);
4788 						vm_page_unlock_queues();
4789 
4790 						vm_page_wakeup_done(m_object, m);
4791 						goto reclaimed_from_pageout;
4792 					}
4793 					vm_page_unlock_queues();
4794 				}
4795 				if (object != cur_object) {
4796 					vm_object_unlock(object);
4797 				}
4798 
4799 				vm_map_unlock_read(map);
4800 				if (real_map != map) {
4801 					vm_map_unlock(real_map);
4802 				}
4803 
4804 				result = vm_page_sleep(cur_object, m, fault_info->interruptible, LCK_SLEEP_UNLOCK);
4805 				if (result == THREAD_AWAKENED || result == THREAD_RESTART) {
4806 					goto RetryFault;
4807 				}
4808 
4809 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
4810 				kr = KERN_ABORTED;
4811 				goto done;
4812 			}
4813 reclaimed_from_pageout:
4814 			if (m->vmp_laundry) {
4815 				if (object != cur_object) {
4816 					if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4817 						cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4818 
4819 						vm_object_unlock(object);
4820 						vm_object_unlock(cur_object);
4821 
4822 						vm_map_unlock_read(map);
4823 						if (real_map != map) {
4824 							vm_map_unlock(real_map);
4825 						}
4826 
4827 						goto RetryFault;
4828 					}
4829 				} else if (object_lock_type == OBJECT_LOCK_SHARED) {
4830 					object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4831 
4832 					if (vm_object_lock_upgrade(object) == FALSE) {
4833 						/*
4834 						 * couldn't upgrade, so explictly take the lock
4835 						 * exclusively and go relookup the page since we
4836 						 * will have dropped the object lock and
4837 						 * a different thread could have inserted
4838 						 * a page at this offset
4839 						 * no need for a full retry since we're
4840 						 * at the top level of the object chain
4841 						 */
4842 						vm_object_lock(object);
4843 
4844 						continue;
4845 					}
4846 				}
4847 				vm_object_lock_assert_exclusive(VM_PAGE_OBJECT(m));
4848 				vm_pageout_steal_laundry(m, FALSE);
4849 			}
4850 
4851 
4852 			if (vm_page_is_guard(m)) {
4853 				/*
4854 				 * Guard page: let the slow path deal with it
4855 				 */
4856 				break;
4857 			}
4858 			if (m->vmp_unusual && (m->vmp_error || m->vmp_restart ||
4859 			    vm_page_is_private(m) || m->vmp_absent)) {
4860 				/*
4861 				 * Unusual case... let the slow path deal with it
4862 				 */
4863 				break;
4864 			}
4865 			if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m_object)) {
4866 				if (object != cur_object) {
4867 					vm_object_unlock(object);
4868 				}
4869 				vm_map_unlock_read(map);
4870 				if (real_map != map) {
4871 					vm_map_unlock(real_map);
4872 				}
4873 				vm_object_unlock(cur_object);
4874 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
4875 				kr = KERN_MEMORY_ERROR;
4876 				goto done;
4877 			}
4878 			assert(m_object == VM_PAGE_OBJECT(m));
4879 
4880 			if (vm_fault_cs_need_validation(map->pmap, m, m_object,
4881 			    PAGE_SIZE, 0) ||
4882 			    (physpage_p != NULL && (prot & VM_PROT_WRITE))) {
4883 upgrade_lock_and_retry:
4884 				/*
4885 				 * We might need to validate this page
4886 				 * against its code signature, so we
4887 				 * want to hold the VM object exclusively.
4888 				 */
4889 				if (object != cur_object) {
4890 					if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4891 						vm_object_unlock(object);
4892 						vm_object_unlock(cur_object);
4893 
4894 						cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4895 
4896 						vm_map_unlock_read(map);
4897 						if (real_map != map) {
4898 							vm_map_unlock(real_map);
4899 						}
4900 
4901 						goto RetryFault;
4902 					}
4903 				} else if (object_lock_type == OBJECT_LOCK_SHARED) {
4904 					object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4905 
4906 					if (vm_object_lock_upgrade(object) == FALSE) {
4907 						/*
4908 						 * couldn't upgrade, so explictly take the lock
4909 						 * exclusively and go relookup the page since we
4910 						 * will have dropped the object lock and
4911 						 * a different thread could have inserted
4912 						 * a page at this offset
4913 						 * no need for a full retry since we're
4914 						 * at the top level of the object chain
4915 						 */
4916 						vm_object_lock(object);
4917 
4918 						continue;
4919 					}
4920 				}
4921 			}
4922 			/*
4923 			 *	Two cases of map in faults:
4924 			 *	    - At top level w/o copy object.
4925 			 *	    - Read fault anywhere.
4926 			 *		--> must disallow write.
4927 			 */
4928 
4929 			if (object == cur_object && object->vo_copy == VM_OBJECT_NULL) {
4930 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4931 				if ((fault_type & VM_PROT_WRITE) && m->vmp_unmodified_ro) {
4932 					assert(cur_object == VM_PAGE_OBJECT(m));
4933 					assert(cur_object->internal);
4934 					vm_object_lock_assert_exclusive(cur_object);
4935 					vm_page_lockspin_queues();
4936 					m->vmp_unmodified_ro = false;
4937 					vm_page_unlock_queues();
4938 					os_atomic_dec(&compressor_ro_uncompressed, relaxed);
4939 					vm_object_compressor_pager_state_clr(cur_object, m->vmp_offset);
4940 				}
4941 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4942 				goto FastPmapEnter;
4943 			}
4944 
4945 			if (!need_copy &&
4946 			    !fault_info->no_copy_on_read &&
4947 			    cur_object != object &&
4948 			    !cur_object->internal &&
4949 			    !cur_object->pager_trusted &&
4950 			    vm_protect_privileged_from_untrusted &&
4951 			    !cur_object->code_signed &&
4952 			    current_proc_is_privileged()) {
4953 				/*
4954 				 * We're faulting on a page in "object" and
4955 				 * went down the shadow chain to "cur_object"
4956 				 * to find out that "cur_object"'s pager
4957 				 * is not "trusted", i.e. we can not trust it
4958 				 * to always return the same contents.
4959 				 * Since the target is a "privileged" process,
4960 				 * let's treat this as a copy-on-read fault, as
4961 				 * if it was a copy-on-write fault.
4962 				 * Once "object" gets a copy of this page, it
4963 				 * won't have to rely on "cur_object" to
4964 				 * provide the contents again.
4965 				 *
4966 				 * This is done by setting "need_copy" and
4967 				 * retrying the fault from the top with the
4968 				 * appropriate locking.
4969 				 *
4970 				 * Special case: if the mapping is executable
4971 				 * and the untrusted object is code-signed and
4972 				 * the process is "cs_enforced", we do not
4973 				 * copy-on-read because that would break
4974 				 * code-signing enforcement expectations (an
4975 				 * executable page must belong to a code-signed
4976 				 * object) and we can rely on code-signing
4977 				 * to re-validate the page if it gets evicted
4978 				 * and paged back in.
4979 				 */
4980 //				printf("COPY-ON-READ %s:%d map %p va 0x%llx page %p object %p offset 0x%llx UNTRUSTED: need copy-on-read!\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, m, VM_PAGE_OBJECT(m), m->vmp_offset);
4981 				vm_copied_on_read++;
4982 				need_copy = TRUE;
4983 
4984 				vm_object_unlock(object);
4985 				vm_object_unlock(cur_object);
4986 				object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4987 				vm_map_unlock_read(map);
4988 				if (real_map != map) {
4989 					vm_map_unlock(real_map);
4990 				}
4991 				goto RetryFault;
4992 			}
4993 
4994 			if (!(fault_type & VM_PROT_WRITE) && !need_copy) {
4995 				if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
4996 					/*
4997 					 * For a protection that the pmap cares
4998 					 * about, we must hand over the full
4999 					 * set of protections (so that the pmap
5000 					 * layer can apply any desired policy).
5001 					 * This means that cs_bypass must be
5002 					 * set, as this can force us to pass
5003 					 * RWX.
5004 					 */
5005 					if (!fault_info->cs_bypass) {
5006 						panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
5007 						    __FUNCTION__, pmap,
5008 						    (uint64_t)vaddr, prot,
5009 						    fault_info->pmap_options);
5010 					}
5011 				} else {
5012 					prot &= ~VM_PROT_WRITE;
5013 				}
5014 
5015 				if (object != cur_object) {
5016 					/*
5017 					 * We still need to hold the top object
5018 					 * lock here to prevent a race between
5019 					 * a read fault (taking only "shared"
5020 					 * locks) and a write fault (taking
5021 					 * an "exclusive" lock on the top
5022 					 * object.
5023 					 * Otherwise, as soon as we release the
5024 					 * top lock, the write fault could
5025 					 * proceed and actually complete before
5026 					 * the read fault, and the copied page's
5027 					 * translation could then be overwritten
5028 					 * by the read fault's translation for
5029 					 * the original page.
5030 					 *
5031 					 * Let's just record what the top object
5032 					 * is and we'll release it later.
5033 					 */
5034 					top_object = object;
5035 
5036 					/*
5037 					 * switch to the object that has the new page
5038 					 */
5039 					object = cur_object;
5040 					object_lock_type = cur_object_lock_type;
5041 				}
5042 FastPmapEnter:
5043 				assert(m_object == VM_PAGE_OBJECT(m));
5044 
5045 				if (resilient_media_retry && (prot & VM_PROT_WRITE)) {
5046 					/*
5047 					 * We might have bypassed some copy-on-write
5048 					 * mechanism to get here (theoretically inserting
5049 					 * a zero-filled page in the top object to avoid
5050 					 * raising an exception on an unavailable page at
5051 					 * the bottom of the shadow chain.
5052 					 * So let's not grant write access to this page yet.
5053 					 * If write access is needed, the next fault should
5054 					 * handle any copy-on-write obligations.
5055 					 */
5056 					if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
5057 						/*
5058 						 * For a protection that the pmap cares
5059 						 * about, we must hand over the full
5060 						 * set of protections (so that the pmap
5061 						 * layer can apply any desired policy).
5062 						 * This means that cs_bypass must be
5063 						 * set, as this can force us to pass
5064 						 * RWX.
5065 						 */
5066 						if (!fault_info->cs_bypass) {
5067 							panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
5068 							    __FUNCTION__, pmap,
5069 							    (uint64_t)vaddr, prot,
5070 							    fault_info->pmap_options);
5071 						}
5072 					} else {
5073 						prot &= ~VM_PROT_WRITE;
5074 					}
5075 				}
5076 
5077 				/*
5078 				 * prepare for the pmap_enter...
5079 				 * object and map are both locked
5080 				 * m contains valid data
5081 				 * object == m->vmp_object
5082 				 * cur_object == NULL or it's been unlocked
5083 				 * no paging references on either object or cur_object
5084 				 */
5085 				if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5086 					need_retry_ptr = &need_retry;
5087 				} else {
5088 					need_retry_ptr = NULL;
5089 				}
5090 
5091 				if (fault_page_size < PAGE_SIZE) {
5092 					DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx caller pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, caller_pmap, (uint64_t)caller_pmap_addr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
5093 					assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
5094 					    fault_phys_offset < PAGE_SIZE),
5095 					    "0x%llx\n", (uint64_t)fault_phys_offset);
5096 				} else {
5097 					assertf(fault_phys_offset == 0,
5098 					    "0x%llx\n", (uint64_t)fault_phys_offset);
5099 				}
5100 
5101 				if (__improbable(rtfault &&
5102 				    !m->vmp_realtime &&
5103 				    vm_pageout_protect_realtime)) {
5104 					vm_page_lock_queues();
5105 					if (!m->vmp_realtime) {
5106 						m->vmp_realtime = true;
5107 						VM_COUNTER_INC(&vm_page_realtime_count);
5108 					}
5109 					vm_page_unlock_queues();
5110 				}
5111 				assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p object=%p", m, m_object, object);
5112 				assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
5113 				if (caller_pmap) {
5114 					kr = vm_fault_enter(m,
5115 					    caller_pmap,
5116 					    caller_pmap_addr,
5117 					    fault_page_size,
5118 					    fault_phys_offset,
5119 					    prot,
5120 					    caller_prot,
5121 					    wired,
5122 					    wire_tag,
5123 					    fault_info,
5124 					    need_retry_ptr,
5125 					    &type_of_fault,
5126 					    &object_lock_type);
5127 				} else {
5128 					kr = vm_fault_enter(m,
5129 					    pmap,
5130 					    vaddr,
5131 					    fault_page_size,
5132 					    fault_phys_offset,
5133 					    prot,
5134 					    caller_prot,
5135 					    wired,
5136 					    wire_tag,
5137 					    fault_info,
5138 					    need_retry_ptr,
5139 					    &type_of_fault,
5140 					    &object_lock_type);
5141 				}
5142 
5143 				vm_fault_complete(
5144 					map,
5145 					real_map,
5146 					object,
5147 					m_object,
5148 					m,
5149 					offset,
5150 					trace_real_vaddr,
5151 					fault_info,
5152 					caller_prot,
5153 					real_vaddr,
5154 					vm_fault_type_for_tracing(need_copy_on_read, type_of_fault),
5155 					need_retry,
5156 					kr,
5157 					physpage_p,
5158 					prot,
5159 					top_object,
5160 					need_collapse,
5161 					cur_offset,
5162 					fault_type,
5163 					&written_on_object,
5164 					&written_on_pager,
5165 					&written_on_offset);
5166 				top_object = VM_OBJECT_NULL;
5167 				if (need_retry == TRUE) {
5168 					/*
5169 					 * vm_fault_enter couldn't complete the PMAP_ENTER...
5170 					 * at this point we don't hold any locks so it's safe
5171 					 * to ask the pmap layer to expand the page table to
5172 					 * accommodate this mapping... once expanded, we'll
5173 					 * re-drive the fault which should result in vm_fault_enter
5174 					 * being able to successfully enter the mapping this time around
5175 					 */
5176 					(void)pmap_enter_options(
5177 						pmap, vaddr, 0, 0, 0, 0, 0,
5178 						PMAP_OPTIONS_NOENTER, NULL, PMAP_MAPPING_TYPE_INFER);
5179 
5180 					need_retry = FALSE;
5181 					goto RetryFault;
5182 				}
5183 				goto done;
5184 			}
5185 			/*
5186 			 * COPY ON WRITE FAULT
5187 			 */
5188 			assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE);
5189 
5190 			/*
5191 			 * If objects match, then
5192 			 * object->vo_copy must not be NULL (else control
5193 			 * would be in previous code block), and we
5194 			 * have a potential push into the copy object
5195 			 * with which we can't cope with here.
5196 			 */
5197 			if (cur_object == object) {
5198 				/*
5199 				 * must take the slow path to
5200 				 * deal with the copy push
5201 				 */
5202 				break;
5203 			}
5204 
5205 			/*
5206 			 * This is now a shadow based copy on write
5207 			 * fault -- it requires a copy up the shadow
5208 			 * chain.
5209 			 */
5210 			assert(m_object == VM_PAGE_OBJECT(m));
5211 
5212 			if ((cur_object_lock_type == OBJECT_LOCK_SHARED) &&
5213 			    vm_fault_cs_need_validation(NULL, m, m_object,
5214 			    PAGE_SIZE, 0)) {
5215 				goto upgrade_lock_and_retry;
5216 			}
5217 
5218 #if MACH_ASSERT
5219 			if (resilient_media_retry &&
5220 			    vm_fault_resilient_media_inject_error2_rate != 0 &&
5221 			    (++vm_fault_resilient_media_inject_error2 % vm_fault_resilient_media_inject_error2_rate) == 0) {
5222 				/* inject an error */
5223 				cur_m = m;
5224 				m = VM_PAGE_NULL;
5225 				m_object = VM_OBJECT_NULL;
5226 				break;
5227 			}
5228 #endif /* MACH_ASSERT */
5229 			/*
5230 			 * Allocate a page in the original top level
5231 			 * object. Give up if allocate fails.  Also
5232 			 * need to remember current page, as it's the
5233 			 * source of the copy.
5234 			 *
5235 			 * at this point we hold locks on both
5236 			 * object and cur_object... no need to take
5237 			 * paging refs or mark pages BUSY since
5238 			 * we don't drop either object lock until
5239 			 * the page has been copied and inserted
5240 			 */
5241 			cur_m = m;
5242 			m = vm_page_grab_options(grab_options);
5243 			m_object = NULL;
5244 
5245 			if (m == VM_PAGE_NULL) {
5246 				/*
5247 				 * no free page currently available...
5248 				 * must take the slow path
5249 				 */
5250 				break;
5251 			}
5252 
5253 			/*
5254 			 * Now do the copy.  Mark the source page busy...
5255 			 *
5256 			 *	NOTE: This code holds the map lock across
5257 			 *	the page copy.
5258 			 */
5259 			vm_page_copy(cur_m, m);
5260 			vm_page_insert(m, object, vm_object_trunc_page(offset));
5261 			if (VM_MAP_PAGE_MASK(map) != PAGE_MASK) {
5262 				DEBUG4K_FAULT("map %p vaddr 0x%llx page %p [%p 0x%llx] copied to %p [%p 0x%llx]\n", map, (uint64_t)vaddr, cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
5263 			}
5264 			m_object = object;
5265 			SET_PAGE_DIRTY(m, FALSE);
5266 
5267 			/*
5268 			 * Now cope with the source page and object
5269 			 */
5270 			if (os_ref_get_count_raw(&object->ref_count) > 1 &&
5271 			    cur_m->vmp_pmapped) {
5272 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
5273 			} else if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) {
5274 				/*
5275 				 * We've copied the full 16K page but we're
5276 				 * about to call vm_fault_enter() only for
5277 				 * the 4K chunk we're faulting on.  The other
5278 				 * three 4K chunks in that page could still
5279 				 * be pmapped in this pmap.
5280 				 * Since the VM object layer thinks that the
5281 				 * entire page has been dealt with and the
5282 				 * original page might no longer be needed,
5283 				 * it might collapse/bypass the original VM
5284 				 * object and free its pages, which would be
5285 				 * bad (and would trigger pmap_verify_free()
5286 				 * assertions) if the other 4K chunks are still
5287 				 * pmapped.
5288 				 */
5289 				/*
5290 				 * XXX FBDP TODO4K: to be revisisted
5291 				 * Technically, we need to pmap_disconnect()
5292 				 * only the target pmap's mappings for the 4K
5293 				 * chunks of this 16K VM page.  If other pmaps
5294 				 * have PTEs on these chunks, that means that
5295 				 * the associated VM map must have a reference
5296 				 * on the VM object, so no need to worry about
5297 				 * those.
5298 				 * pmap_protect() for each 4K chunk would be
5299 				 * better but we'd have to check which chunks
5300 				 * are actually mapped before and after this
5301 				 * one.
5302 				 * A full-blown pmap_disconnect() is easier
5303 				 * for now but not efficient.
5304 				 */
5305 				DEBUG4K_FAULT("pmap_disconnect() page %p object %p offset 0x%llx phys 0x%x\n", cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, VM_PAGE_GET_PHYS_PAGE(cur_m));
5306 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
5307 			}
5308 
5309 			if (cur_m->vmp_clustered) {
5310 				VM_PAGE_COUNT_AS_PAGEIN(cur_m);
5311 				VM_PAGE_CONSUME_CLUSTERED(cur_m);
5312 				vm_fault_is_sequential(cur_object, cur_offset, fault_info->behavior);
5313 			}
5314 			need_collapse = TRUE;
5315 
5316 			if (!cur_object->internal &&
5317 			    cur_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
5318 				/*
5319 				 * The object from which we've just
5320 				 * copied a page is most probably backed
5321 				 * by a vnode.  We don't want to waste too
5322 				 * much time trying to collapse the VM objects
5323 				 * and create a bottleneck when several tasks
5324 				 * map the same file.
5325 				 */
5326 				if (cur_object->vo_copy == object) {
5327 					/*
5328 					 * Shared mapping or no COW yet.
5329 					 * We can never collapse a copy
5330 					 * object into its backing object.
5331 					 */
5332 					need_collapse = FALSE;
5333 				} else if (cur_object->vo_copy == object->shadow &&
5334 				    object->shadow->resident_page_count == 0) {
5335 					/*
5336 					 * Shared mapping after a COW occurred.
5337 					 */
5338 					need_collapse = FALSE;
5339 				}
5340 			}
5341 			vm_object_unlock(cur_object);
5342 
5343 			if (need_collapse == FALSE) {
5344 				vm_fault_collapse_skipped++;
5345 			}
5346 			vm_fault_collapse_total++;
5347 
5348 			type_of_fault = DBG_COW_FAULT;
5349 			counter_inc(&vm_statistics_cow_faults);
5350 			DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
5351 			counter_inc(&current_task()->cow_faults);
5352 
5353 			goto FastPmapEnter;
5354 		} else {
5355 			/*
5356 			 * No page at cur_object, cur_offset... m == NULL
5357 			 */
5358 			if (cur_object->pager_created) {
5359 				vm_external_state_t compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
5360 
5361 				if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) {
5362 					int             my_fault_type;
5363 					vm_compressor_options_t         c_flags = C_DONT_BLOCK;
5364 					bool            insert_cur_object = FALSE;
5365 
5366 					/*
5367 					 * May have to talk to a pager...
5368 					 * if so, take the slow path by
5369 					 * doing a 'break' from the while (TRUE) loop
5370 					 *
5371 					 * external_state will only be set to VM_EXTERNAL_STATE_EXISTS
5372 					 * if the compressor is active and the page exists there
5373 					 */
5374 					if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS) {
5375 						break;
5376 					}
5377 
5378 					if (map == kernel_map || real_map == kernel_map) {
5379 						/*
5380 						 * can't call into the compressor with the kernel_map
5381 						 * lock held, since the compressor may try to operate
5382 						 * on the kernel map in order to return an empty c_segment
5383 						 */
5384 						break;
5385 					}
5386 					if (object != cur_object) {
5387 						if (fault_type & VM_PROT_WRITE) {
5388 							c_flags |= C_KEEP;
5389 						} else {
5390 							insert_cur_object = TRUE;
5391 						}
5392 					}
5393 					if (insert_cur_object == TRUE) {
5394 						if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5395 							cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5396 
5397 							if (vm_object_lock_upgrade(cur_object) == FALSE) {
5398 								/*
5399 								 * couldn't upgrade so go do a full retry
5400 								 * immediately since we can no longer be
5401 								 * certain about cur_object (since we
5402 								 * don't hold a reference on it)...
5403 								 * first drop the top object lock
5404 								 */
5405 								vm_object_unlock(object);
5406 
5407 								vm_map_unlock_read(map);
5408 								if (real_map != map) {
5409 									vm_map_unlock(real_map);
5410 								}
5411 
5412 								goto RetryFault;
5413 							}
5414 						}
5415 					} else if (object_lock_type == OBJECT_LOCK_SHARED) {
5416 						object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5417 
5418 						if (object != cur_object) {
5419 							/*
5420 							 * we can't go for the upgrade on the top
5421 							 * lock since the upgrade may block waiting
5422 							 * for readers to drain... since we hold
5423 							 * cur_object locked at this point, waiting
5424 							 * for the readers to drain would represent
5425 							 * a lock order inversion since the lock order
5426 							 * for objects is the reference order in the
5427 							 * shadown chain
5428 							 */
5429 							vm_object_unlock(object);
5430 							vm_object_unlock(cur_object);
5431 
5432 							vm_map_unlock_read(map);
5433 							if (real_map != map) {
5434 								vm_map_unlock(real_map);
5435 							}
5436 
5437 							goto RetryFault;
5438 						}
5439 						if (vm_object_lock_upgrade(object) == FALSE) {
5440 							/*
5441 							 * couldn't upgrade, so explictly take the lock
5442 							 * exclusively and go relookup the page since we
5443 							 * will have dropped the object lock and
5444 							 * a different thread could have inserted
5445 							 * a page at this offset
5446 							 * no need for a full retry since we're
5447 							 * at the top level of the object chain
5448 							 */
5449 							vm_object_lock(object);
5450 
5451 							continue;
5452 						}
5453 					}
5454 
5455 					m = vm_page_grab_options(grab_options);
5456 					m_object = NULL;
5457 
5458 					if (m == VM_PAGE_NULL) {
5459 						/*
5460 						 * no free page currently available...
5461 						 * must take the slow path
5462 						 */
5463 						break;
5464 					}
5465 
5466 					/*
5467 					 * The object is and remains locked
5468 					 * so no need to take a
5469 					 * "paging_in_progress" reference.
5470 					 */
5471 					bool      shared_lock;
5472 					if ((object == cur_object &&
5473 					    object_lock_type == OBJECT_LOCK_EXCLUSIVE) ||
5474 					    (object != cur_object &&
5475 					    cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) {
5476 						shared_lock = FALSE;
5477 					} else {
5478 						shared_lock = TRUE;
5479 					}
5480 
5481 					kr = vm_compressor_pager_get(
5482 						cur_object->pager,
5483 						(vm_object_trunc_page(cur_offset)
5484 						+ cur_object->paging_offset),
5485 						VM_PAGE_GET_PHYS_PAGE(m),
5486 						&my_fault_type,
5487 						c_flags,
5488 						&compressed_count_delta);
5489 
5490 					vm_compressor_pager_count(
5491 						cur_object->pager,
5492 						compressed_count_delta,
5493 						shared_lock,
5494 						cur_object);
5495 
5496 					if (kr != KERN_SUCCESS) {
5497 						vm_page_release(m,
5498 						    VMP_RELEASE_NONE);
5499 						m = VM_PAGE_NULL;
5500 					}
5501 					/*
5502 					 * If vm_compressor_pager_get() returns
5503 					 * KERN_MEMORY_FAILURE, then the
5504 					 * compressed data is permanently lost,
5505 					 * so return this error immediately.
5506 					 */
5507 					if (kr == KERN_MEMORY_FAILURE) {
5508 						if (object != cur_object) {
5509 							vm_object_unlock(cur_object);
5510 						}
5511 						vm_object_unlock(object);
5512 						vm_map_unlock_read(map);
5513 						if (real_map != map) {
5514 							vm_map_unlock(real_map);
5515 						}
5516 
5517 						goto done;
5518 					} else if (kr != KERN_SUCCESS) {
5519 						break;
5520 					}
5521 					m->vmp_dirty = TRUE;
5522 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5523 					if ((fault_type & VM_PROT_WRITE) == 0) {
5524 						prot &= ~VM_PROT_WRITE;
5525 						/*
5526 						 * The page, m, has yet to be inserted
5527 						 * into an object. So we are fine with
5528 						 * the object/cur_object lock being held
5529 						 * shared.
5530 						 */
5531 						vm_page_lockspin_queues();
5532 						m->vmp_unmodified_ro = true;
5533 						vm_page_unlock_queues();
5534 						os_atomic_inc(&compressor_ro_uncompressed, relaxed);
5535 					}
5536 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5537 
5538 					/*
5539 					 * If the object is purgeable, its
5540 					 * owner's purgeable ledgers will be
5541 					 * updated in vm_page_insert() but the
5542 					 * page was also accounted for in a
5543 					 * "compressed purgeable" ledger, so
5544 					 * update that now.
5545 					 */
5546 					if (object != cur_object &&
5547 					    !insert_cur_object) {
5548 						/*
5549 						 * We're not going to insert
5550 						 * the decompressed page into
5551 						 * the object it came from.
5552 						 *
5553 						 * We're dealing with a
5554 						 * copy-on-write fault on
5555 						 * "object".
5556 						 * We're going to decompress
5557 						 * the page directly into the
5558 						 * target "object" while
5559 						 * keepin the compressed
5560 						 * page for "cur_object", so
5561 						 * no ledger update in that
5562 						 * case.
5563 						 */
5564 					} else if (((cur_object->purgable ==
5565 					    VM_PURGABLE_DENY) &&
5566 					    (!cur_object->vo_ledger_tag)) ||
5567 					    (cur_object->vo_owner ==
5568 					    NULL)) {
5569 						/*
5570 						 * "cur_object" is not purgeable
5571 						 * and is not ledger-taged, or
5572 						 * there's no owner for it,
5573 						 * so no owner's ledgers to
5574 						 * update.
5575 						 */
5576 					} else {
5577 						/*
5578 						 * One less compressed
5579 						 * purgeable/tagged page for
5580 						 * cur_object's owner.
5581 						 */
5582 						if (compressed_count_delta) {
5583 							vm_object_owner_compressed_update(
5584 								cur_object,
5585 								-1);
5586 						}
5587 					}
5588 
5589 					if (insert_cur_object) {
5590 						vm_page_insert(m, cur_object, vm_object_trunc_page(cur_offset));
5591 						m_object = cur_object;
5592 					} else {
5593 						vm_page_insert(m, object, vm_object_trunc_page(offset));
5594 						m_object = object;
5595 					}
5596 
5597 					if (!HAS_DEFAULT_CACHEABILITY(m_object->wimg_bits & VM_WIMG_MASK)) {
5598 						/*
5599 						 * If the page is not cacheable,
5600 						 * we can't let its contents
5601 						 * linger in the data cache
5602 						 * after the decompression.
5603 						 */
5604 						pmap_sync_page_attributes_phys(VM_PAGE_GET_PHYS_PAGE(m));
5605 					}
5606 
5607 					type_of_fault = my_fault_type;
5608 
5609 					VM_STAT_DECOMPRESSIONS();
5610 
5611 					if (cur_object != object) {
5612 						if (insert_cur_object) {
5613 							top_object = object;
5614 							/*
5615 							 * switch to the object that has the new page
5616 							 */
5617 							object = cur_object;
5618 							object_lock_type = cur_object_lock_type;
5619 						} else {
5620 							vm_object_unlock(cur_object);
5621 							cur_object = object;
5622 						}
5623 					}
5624 					goto FastPmapEnter;
5625 				}
5626 				/*
5627 				 * existence map present and indicates
5628 				 * that the pager doesn't have this page
5629 				 */
5630 			}
5631 			if (cur_object->shadow == VM_OBJECT_NULL ||
5632 			    resilient_media_retry) {
5633 				/*
5634 				 * Zero fill fault.  Page gets
5635 				 * inserted into the original object.
5636 				 */
5637 				if (cur_object->shadow_severed ||
5638 				    VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object) ||
5639 				    cur_object == compressor_object ||
5640 				    is_kernel_object(cur_object)) {
5641 					if (object != cur_object) {
5642 						vm_object_unlock(cur_object);
5643 					}
5644 					vm_object_unlock(object);
5645 
5646 					vm_map_unlock_read(map);
5647 					if (real_map != map) {
5648 						vm_map_unlock(real_map);
5649 					}
5650 					if (VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object)) {
5651 						ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
5652 					}
5653 
5654 					if (cur_object->shadow_severed) {
5655 						ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
5656 					}
5657 
5658 					kr = KERN_MEMORY_ERROR;
5659 					goto done;
5660 				}
5661 				if (cur_object != object) {
5662 					vm_object_unlock(cur_object);
5663 
5664 					cur_object = object;
5665 				}
5666 				if (object_lock_type == OBJECT_LOCK_SHARED) {
5667 					object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5668 
5669 					if (vm_object_lock_upgrade(object) == FALSE) {
5670 						/*
5671 						 * couldn't upgrade so do a full retry on the fault
5672 						 * since we dropped the object lock which
5673 						 * could allow another thread to insert
5674 						 * a page at this offset
5675 						 */
5676 						vm_map_unlock_read(map);
5677 						if (real_map != map) {
5678 							vm_map_unlock(real_map);
5679 						}
5680 
5681 						goto RetryFault;
5682 					}
5683 				}
5684 				if (!object->internal) {
5685 					panic("%s:%d should not zero-fill page at offset 0x%llx in external object %p", __FUNCTION__, __LINE__, (uint64_t)offset, object);
5686 				}
5687 #if MACH_ASSERT
5688 				if (resilient_media_retry &&
5689 				    vm_fault_resilient_media_inject_error3_rate != 0 &&
5690 				    (++vm_fault_resilient_media_inject_error3 % vm_fault_resilient_media_inject_error3_rate) == 0) {
5691 					/* inject an error */
5692 					m_object = NULL;
5693 					break;
5694 				}
5695 #endif /* MACH_ASSERT */
5696 
5697 				m = vm_page_grab_options(grab_options);
5698 				m_object = NULL;
5699 
5700 				if (m == VM_PAGE_NULL) {
5701 					/*
5702 					 * no free page currently available...
5703 					 * must take the slow path
5704 					 */
5705 					break;
5706 				}
5707 				m_object = object;
5708 				vm_page_insert(m, m_object, vm_object_trunc_page(offset));
5709 
5710 				if ((prot & VM_PROT_WRITE) &&
5711 				    !(fault_type & VM_PROT_WRITE) &&
5712 				    object->vo_copy != VM_OBJECT_NULL) {
5713 					/*
5714 					 * This is not a write fault and
5715 					 * we might have a copy-on-write
5716 					 * obligation to honor (copy object or
5717 					 * "needs_copy" map entry), so do not
5718 					 * give write access yet.
5719 					 * We'll need to catch the first write
5720 					 * to resolve the copy-on-write by
5721 					 * pushing this page to a copy object
5722 					 * or making a shadow object.
5723 					 */
5724 					if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
5725 						/*
5726 						 * This pmap enforces extra
5727 						 * constraints for this set of
5728 						 * protections, so we can't
5729 						 * change the protections.
5730 						 * We would expect code-signing
5731 						 * to be bypassed in this case.
5732 						 */
5733 						if (!fault_info->cs_bypass) {
5734 							panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
5735 							    __FUNCTION__,
5736 							    pmap,
5737 							    (uint64_t)vaddr,
5738 							    prot,
5739 							    fault_info->pmap_options);
5740 						}
5741 					} else {
5742 						prot &= ~VM_PROT_WRITE;
5743 					}
5744 				}
5745 				if (resilient_media_retry) {
5746 					/*
5747 					 * Not a real write, so no reason to assert.
5748 					 * We've just allocated a new page for this
5749 					 * <object,offset> so we know nobody has any
5750 					 * PTE pointing at any previous version of this
5751 					 * page and no copy-on-write is involved here.
5752 					 * We're just inserting a page of zeroes at this
5753 					 * stage of the shadow chain because the pager
5754 					 * for the lowest object in the shadow chain
5755 					 * said it could not provide that page and we
5756 					 * want to avoid failing the fault and causing
5757 					 * a crash on this "resilient_media" mapping.
5758 					 */
5759 				} else {
5760 					assertf(!((fault_type & VM_PROT_WRITE) && object->vo_copy),
5761 					    "map %p va 0x%llx wrong path for write fault (fault_type 0x%x) on object %p with copy %p\n",
5762 					    map, (uint64_t)vaddr, fault_type, object, object->vo_copy);
5763 				}
5764 
5765 				vm_object_t saved_copy_object;
5766 				uint32_t saved_copy_version;
5767 				saved_copy_object = object->vo_copy;
5768 				saved_copy_version = object->vo_copy_version;
5769 
5770 				/*
5771 				 * Zeroing the page and entering into it into the pmap
5772 				 * represents a significant amount of the zero fill fault handler's work.
5773 				 *
5774 				 * To improve fault scalability, we'll drop the object lock, if it appears contended,
5775 				 * now that we've inserted the page into the vm object.
5776 				 * Before dropping the lock, we need to check protection bits and set the
5777 				 * mapped bits on the page. Then we can mark the page busy, drop the lock,
5778 				 * zero it, and do the pmap enter. We'll need to reacquire the lock
5779 				 * to clear the busy bit and wake up any waiters.
5780 				 */
5781 				vm_fault_cs_clear(m);
5782 				m->vmp_pmapped = TRUE;
5783 				if (map->no_zero_fill) {
5784 					type_of_fault = DBG_NZF_PAGE_FAULT;
5785 				} else {
5786 					type_of_fault = DBG_ZERO_FILL_FAULT;
5787 				}
5788 				{
5789 					pmap_t destination_pmap;
5790 					vm_map_offset_t destination_pmap_vaddr;
5791 					vm_prot_t enter_fault_type;
5792 					if (caller_pmap) {
5793 						destination_pmap = caller_pmap;
5794 						destination_pmap_vaddr = caller_pmap_addr;
5795 					} else {
5796 						destination_pmap = pmap;
5797 						destination_pmap_vaddr = vaddr;
5798 					}
5799 					if (fault_info->fi_change_wiring) {
5800 						enter_fault_type = VM_PROT_NONE;
5801 					} else {
5802 						enter_fault_type = caller_prot;
5803 					}
5804 					assertf(VM_PAGE_OBJECT(m) == object, "m=%p object=%p", m, object);
5805 					kr = vm_fault_enter_prepare(m,
5806 					    destination_pmap,
5807 					    destination_pmap_vaddr,
5808 					    &prot,
5809 					    caller_prot,
5810 					    fault_page_size,
5811 					    fault_phys_offset,
5812 					    enter_fault_type,
5813 					    fault_info,
5814 					    &type_of_fault,
5815 					    &page_needs_data_sync);
5816 					if (kr != KERN_SUCCESS) {
5817 						goto zero_fill_cleanup;
5818 					}
5819 
5820 					if (object_is_contended) {
5821 						/*
5822 						 * At this point the page is in the vm object, but not on a paging queue.
5823 						 * Since it's accessible to another thread but its contents are invalid
5824 						 * (it hasn't been zeroed) mark it busy before dropping the object lock.
5825 						 */
5826 						m->vmp_busy = TRUE;
5827 						vm_object_paging_begin(object); /* keep object alive */
5828 						vm_object_unlock(object);
5829 					}
5830 					if (type_of_fault == DBG_ZERO_FILL_FAULT) {
5831 						/*
5832 						 * Now zero fill page...
5833 						 * the page is probably going to
5834 						 * be written soon, so don't bother
5835 						 * to clear the modified bit
5836 						 *
5837 						 *   NOTE: This code holds the map
5838 						 *   lock across the zero fill.
5839 						 */
5840 						vm_page_zero_fill(
5841 							m
5842 							);
5843 						counter_inc(&vm_statistics_zero_fill_count);
5844 						DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
5845 					}
5846 
5847 					if (object_is_contended) {
5848 						/*
5849 						 * It's not safe to do the pmap_enter() without holding
5850 						 * the object lock because its "vo_copy" could change.
5851 						 */
5852 						object_is_contended = false; /* get out of that code path */
5853 
5854 						vm_object_lock(object);
5855 						vm_object_paging_end(object);
5856 						if (object->vo_copy != saved_copy_object ||
5857 						    object->vo_copy_version != saved_copy_version) {
5858 							/*
5859 							 * The COPY_DELAY copy-on-write situation for
5860 							 * this VM object has changed while it was
5861 							 * unlocked, so do not grant write access to
5862 							 * this page.
5863 							 * The write access will fault again and we'll
5864 							 * resolve the copy-on-write then.
5865 							 */
5866 							if (pmap_has_prot_policy(pmap,
5867 							    fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE,
5868 							    prot)) {
5869 								/* we should not do CoW on pmap_has_prot_policy mappings */
5870 								panic("%s: map %p va 0x%llx obj %p,%u saved %p,%u: unexpected CoW",
5871 								    __FUNCTION__,
5872 								    map, (uint64_t)vaddr,
5873 								    object, object->vo_copy_version,
5874 								    saved_copy_object, saved_copy_version);
5875 							} else {
5876 								/* the pmap layer is OK with changing the PTE's prot */
5877 								prot &= ~VM_PROT_WRITE;
5878 							}
5879 						}
5880 					}
5881 
5882 					if (page_needs_data_sync) {
5883 						pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
5884 					}
5885 
5886 					if (top_object != VM_OBJECT_NULL) {
5887 						need_retry_ptr = &need_retry;
5888 					} else {
5889 						need_retry_ptr = NULL;
5890 					}
5891 					if (fault_info->fi_xnu_user_debug &&
5892 					    !object->code_signed) {
5893 						fault_info->pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
5894 					}
5895 					if (object_is_contended) {
5896 						panic("object_is_contended");
5897 						kr = vm_fault_pmap_enter(destination_pmap, destination_pmap_vaddr,
5898 						    fault_page_size, fault_phys_offset,
5899 						    m, &prot, caller_prot, enter_fault_type, wired,
5900 						    fault_info->pmap_options, need_retry_ptr);
5901 						vm_object_lock(object);
5902 						assertf(!((prot & VM_PROT_WRITE) && object->vo_copy),
5903 						    "prot 0x%x object %p copy %p\n",
5904 						    prot, object, object->vo_copy);
5905 					} else {
5906 						kr = vm_fault_pmap_enter_with_object_lock(object, destination_pmap, destination_pmap_vaddr,
5907 						    fault_page_size, fault_phys_offset,
5908 						    m, &prot, caller_prot, enter_fault_type, wired,
5909 						    fault_info->pmap_options, need_retry_ptr, &object_lock_type);
5910 					}
5911 				}
5912 zero_fill_cleanup:
5913 				if (!VM_DYNAMIC_PAGING_ENABLED() &&
5914 				    (object->purgable == VM_PURGABLE_DENY ||
5915 				    object->purgable == VM_PURGABLE_NONVOLATILE ||
5916 				    object->purgable == VM_PURGABLE_VOLATILE)) {
5917 					vm_page_lockspin_queues();
5918 					if (!VM_DYNAMIC_PAGING_ENABLED()) {
5919 						vm_fault_enqueue_throttled_locked(m);
5920 					}
5921 					vm_page_unlock_queues();
5922 				}
5923 				vm_fault_enqueue_page(object, m, wired, fault_info->fi_change_wiring, wire_tag, fault_info->no_cache, &type_of_fault, kr);
5924 
5925 				if (__improbable(rtfault &&
5926 				    !m->vmp_realtime &&
5927 				    vm_pageout_protect_realtime)) {
5928 					vm_page_lock_queues();
5929 					if (!m->vmp_realtime) {
5930 						m->vmp_realtime = true;
5931 						VM_COUNTER_INC(&vm_page_realtime_count);
5932 					}
5933 					vm_page_unlock_queues();
5934 				}
5935 				vm_fault_complete(
5936 					map,
5937 					real_map,
5938 					object,
5939 					m_object,
5940 					m,
5941 					offset,
5942 					trace_real_vaddr,
5943 					fault_info,
5944 					caller_prot,
5945 					real_vaddr,
5946 					type_of_fault,
5947 					need_retry,
5948 					kr,
5949 					physpage_p,
5950 					prot,
5951 					top_object,
5952 					need_collapse,
5953 					cur_offset,
5954 					fault_type,
5955 					&written_on_object,
5956 					&written_on_pager,
5957 					&written_on_offset);
5958 				top_object = VM_OBJECT_NULL;
5959 				if (need_retry == TRUE) {
5960 					/*
5961 					 * vm_fault_enter couldn't complete the PMAP_ENTER...
5962 					 * at this point we don't hold any locks so it's safe
5963 					 * to ask the pmap layer to expand the page table to
5964 					 * accommodate this mapping... once expanded, we'll
5965 					 * re-drive the fault which should result in vm_fault_enter
5966 					 * being able to successfully enter the mapping this time around
5967 					 */
5968 					(void)pmap_enter_options(
5969 						pmap, vaddr, 0, 0, 0, 0, 0,
5970 						PMAP_OPTIONS_NOENTER, NULL, PMAP_MAPPING_TYPE_INFER);
5971 
5972 					need_retry = FALSE;
5973 					goto RetryFault;
5974 				}
5975 				goto done;
5976 			}
5977 			/*
5978 			 * On to the next level in the shadow chain
5979 			 */
5980 			cur_offset += cur_object->vo_shadow_offset;
5981 			new_object = cur_object->shadow;
5982 			fault_phys_offset = cur_offset - vm_object_trunc_page(cur_offset);
5983 
5984 			/*
5985 			 * take the new_object's lock with the indicated state
5986 			 */
5987 			if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5988 				vm_object_lock_shared(new_object);
5989 			} else {
5990 				vm_object_lock(new_object);
5991 			}
5992 
5993 			if (cur_object != object) {
5994 				vm_object_unlock(cur_object);
5995 			}
5996 
5997 			cur_object = new_object;
5998 
5999 			continue;
6000 		}
6001 	}
6002 	/*
6003 	 * Cleanup from fast fault failure.  Drop any object
6004 	 * lock other than original and drop map lock.
6005 	 */
6006 	if (object != cur_object) {
6007 		vm_object_unlock(cur_object);
6008 	}
6009 
6010 	/*
6011 	 * must own the object lock exclusively at this point
6012 	 */
6013 	if (object_lock_type == OBJECT_LOCK_SHARED) {
6014 		object_lock_type = OBJECT_LOCK_EXCLUSIVE;
6015 
6016 		if (vm_object_lock_upgrade(object) == FALSE) {
6017 			/*
6018 			 * couldn't upgrade, so explictly
6019 			 * take the lock exclusively
6020 			 * no need to retry the fault at this
6021 			 * point since "vm_fault_page" will
6022 			 * completely re-evaluate the state
6023 			 */
6024 			vm_object_lock(object);
6025 		}
6026 	}
6027 
6028 handle_copy_delay:
6029 	vm_map_unlock_read(map);
6030 	if (real_map != map) {
6031 		vm_map_unlock(real_map);
6032 	}
6033 
6034 	if (__improbable(object == compressor_object ||
6035 	    is_kernel_object(object))) {
6036 		/*
6037 		 * These objects are explicitly managed and populated by the
6038 		 * kernel.  The virtual ranges backed by these objects should
6039 		 * either have wired pages or "holes" that are not supposed to
6040 		 * be accessed at all until they get explicitly populated.
6041 		 * We should never have to resolve a fault on a mapping backed
6042 		 * by one of these VM objects and providing a zero-filled page
6043 		 * would be wrong here, so let's fail the fault and let the
6044 		 * caller crash or recover.
6045 		 */
6046 		vm_object_unlock(object);
6047 		kr = KERN_MEMORY_ERROR;
6048 		goto done;
6049 	}
6050 
6051 	resilient_media_ref_transfer = false;
6052 	if (resilient_media_retry) {
6053 		/*
6054 		 * We could get here if we failed to get a free page
6055 		 * to zero-fill and had to take the slow path again.
6056 		 * Reset our "recovery-from-failed-media" state.
6057 		 */
6058 		assert(resilient_media_object != VM_OBJECT_NULL);
6059 		assert(resilient_media_offset != (vm_object_offset_t)-1);
6060 		/* release our extra reference on failed object */
6061 //             printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
6062 		if (object == resilient_media_object) {
6063 			/*
6064 			 * We're holding "object"'s lock, so we can't release
6065 			 * our extra reference at this point.
6066 			 * We need an extra reference on "object" anyway
6067 			 * (see below), so let's just transfer this reference.
6068 			 */
6069 			resilient_media_ref_transfer = true;
6070 		} else {
6071 			vm_object_deallocate(resilient_media_object);
6072 		}
6073 		resilient_media_object = VM_OBJECT_NULL;
6074 		resilient_media_offset = (vm_object_offset_t)-1;
6075 		resilient_media_retry = false;
6076 		vm_fault_resilient_media_abort2++;
6077 	}
6078 
6079 	/*
6080 	 * Make a reference to this object to
6081 	 * prevent its disposal while we are messing with
6082 	 * it.  Once we have the reference, the map is free
6083 	 * to be diddled.  Since objects reference their
6084 	 * shadows (and copies), they will stay around as well.
6085 	 */
6086 	if (resilient_media_ref_transfer) {
6087 		/* we already have an extra reference on this object */
6088 		resilient_media_ref_transfer = false;
6089 	} else {
6090 		vm_object_reference_locked(object);
6091 	}
6092 	vm_object_paging_begin(object);
6093 
6094 	set_thread_pagein_error(cthread, 0);
6095 	error_code = 0;
6096 
6097 	result_page = VM_PAGE_NULL;
6098 	vm_fault_return_t err = vm_fault_page(object, offset, fault_type,
6099 	    (fault_info->fi_change_wiring && !wired),
6100 	    FALSE,                /* page not looked up */
6101 	    &prot, &result_page, &top_page,
6102 	    &type_of_fault,
6103 	    &error_code, map->no_zero_fill,
6104 	    fault_info);
6105 
6106 	/*
6107 	 * if kr != VM_FAULT_SUCCESS, then the paging reference
6108 	 * has been dropped and the object unlocked... the ref_count
6109 	 * is still held
6110 	 *
6111 	 * if kr == VM_FAULT_SUCCESS, then the paging reference
6112 	 * is still held along with the ref_count on the original object
6113 	 *
6114 	 *	the object is returned locked with a paging reference
6115 	 *
6116 	 *	if top_page != NULL, then it's BUSY and the
6117 	 *	object it belongs to has a paging reference
6118 	 *	but is returned unlocked
6119 	 */
6120 	if (err != VM_FAULT_SUCCESS &&
6121 	    err != VM_FAULT_SUCCESS_NO_VM_PAGE) {
6122 		if (err == VM_FAULT_MEMORY_ERROR &&
6123 		    fault_info->resilient_media) {
6124 			assertf(object->internal, "object %p", object);
6125 			/*
6126 			 * This fault failed but the mapping was
6127 			 * "media resilient", so we'll retry the fault in
6128 			 * recovery mode to get a zero-filled page in the
6129 			 * top object.
6130 			 * Keep the reference on the failing object so
6131 			 * that we can check that the mapping is still
6132 			 * pointing to it when we retry the fault.
6133 			 */
6134 //                     printf("RESILIENT_MEDIA %s:%d: object %p offset 0x%llx recover from media error 0x%x kr 0x%x top_page %p result_page %p\n", __FUNCTION__, __LINE__, object, offset, error_code, kr, top_page, result_page);
6135 			assert(!resilient_media_retry); /* no double retry */
6136 			assert(resilient_media_object == VM_OBJECT_NULL);
6137 			assert(resilient_media_offset == (vm_object_offset_t)-1);
6138 			resilient_media_retry = true;
6139 			resilient_media_object = object;
6140 			resilient_media_offset = offset;
6141 //                     printf("FBDP %s:%d resilient_media_object %p offset 0x%llx kept reference\n", __FUNCTION__, __LINE__, resilient_media_object, resilient_mmedia_offset);
6142 			vm_fault_resilient_media_initiate++;
6143 			goto RetryFault;
6144 		} else {
6145 			/*
6146 			 * we didn't succeed, lose the object reference
6147 			 * immediately.
6148 			 */
6149 			vm_object_deallocate(object);
6150 			object = VM_OBJECT_NULL; /* no longer valid */
6151 		}
6152 
6153 		/*
6154 		 * See why we failed, and take corrective action.
6155 		 */
6156 		switch (err) {
6157 		case VM_FAULT_SUCCESS:
6158 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
6159 			/* These aren't possible but needed to make the switch exhaustive */
6160 			break;
6161 		case VM_FAULT_MEMORY_SHORTAGE:
6162 			if (vm_page_wait((fault_info->fi_change_wiring) ?
6163 			    THREAD_UNINT :
6164 			    THREAD_ABORTSAFE)) {
6165 				goto RetryFault;
6166 			}
6167 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_MEMORY_SHORTAGE), 0 /* arg */);
6168 			OS_FALLTHROUGH;
6169 		case VM_FAULT_INTERRUPTED:
6170 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
6171 			kr = KERN_ABORTED;
6172 			goto done;
6173 		case VM_FAULT_RETRY:
6174 			goto RetryFault;
6175 		case VM_FAULT_MEMORY_ERROR:
6176 			if (error_code) {
6177 				kr = error_code;
6178 			} else {
6179 				kr = KERN_MEMORY_ERROR;
6180 			}
6181 			goto done;
6182 		case VM_FAULT_BUSY:
6183 			kr = KERN_ALREADY_WAITING;
6184 			goto done;
6185 		}
6186 	}
6187 	m = result_page;
6188 	m_object = NULL;
6189 
6190 	if (m != VM_PAGE_NULL) {
6191 		m_object = VM_PAGE_OBJECT(m);
6192 		assert((fault_info->fi_change_wiring && !wired) ?
6193 		    (top_page == VM_PAGE_NULL) :
6194 		    ((top_page == VM_PAGE_NULL) == (m_object == object)));
6195 	}
6196 
6197 	/*
6198 	 * What to do with the resulting page from vm_fault_page
6199 	 * if it doesn't get entered into the physical map:
6200 	 */
6201 #define RELEASE_PAGE(m)                                 \
6202 	MACRO_BEGIN                                     \
6203 	vm_page_wakeup_done(VM_PAGE_OBJECT(m), m);                            \
6204 	if ( !VM_PAGE_PAGEABLE(m)) {                    \
6205 	        vm_page_lockspin_queues();              \
6206 	        if ( !VM_PAGE_PAGEABLE(m))              \
6207 	                vm_page_activate(m);            \
6208 	        vm_page_unlock_queues();                \
6209 	}                                               \
6210 	MACRO_END
6211 
6212 
6213 	object_locks_dropped = FALSE;
6214 	/*
6215 	 * We must verify that the maps have not changed
6216 	 * since our last lookup. vm_map_verify() needs the
6217 	 * map lock (shared) but we are holding object locks.
6218 	 * So we do a try_lock() first and, if that fails, we
6219 	 * drop the object locks and go in for the map lock again.
6220 	 */
6221 	if (m != VM_PAGE_NULL) {
6222 		old_copy_object = m_object->vo_copy;
6223 		old_copy_version = m_object->vo_copy_version;
6224 	} else {
6225 		old_copy_object = VM_OBJECT_NULL;
6226 		old_copy_version = 0;
6227 	}
6228 	if (!vm_map_try_lock_read(original_map)) {
6229 		if (m != VM_PAGE_NULL) {
6230 			vm_object_unlock(m_object);
6231 		} else {
6232 			vm_object_unlock(object);
6233 		}
6234 
6235 		object_locks_dropped = TRUE;
6236 
6237 		vm_map_lock_read(original_map);
6238 	}
6239 
6240 	if ((map != original_map) || !vm_map_verify(map, &version)) {
6241 		if (object_locks_dropped == FALSE) {
6242 			if (m != VM_PAGE_NULL) {
6243 				vm_object_unlock(m_object);
6244 			} else {
6245 				vm_object_unlock(object);
6246 			}
6247 
6248 			object_locks_dropped = TRUE;
6249 		}
6250 
6251 		/*
6252 		 * no object locks are held at this point
6253 		 */
6254 		vm_object_t             retry_object;
6255 		vm_object_offset_t      retry_offset;
6256 		vm_prot_t               retry_prot;
6257 
6258 		/*
6259 		 * To avoid trying to write_lock the map while another
6260 		 * thread has it read_locked (in vm_map_pageable), we
6261 		 * do not try for write permission.  If the page is
6262 		 * still writable, we will get write permission.  If it
6263 		 * is not, or has been marked needs_copy, we enter the
6264 		 * mapping without write permission, and will merely
6265 		 * take another fault.
6266 		 */
6267 		map = original_map;
6268 
6269 		kr = vm_map_lookup_and_lock_object(&map, vaddr,
6270 		    fault_type & ~VM_PROT_WRITE,
6271 		    OBJECT_LOCK_EXCLUSIVE, &version,
6272 		    &retry_object, &retry_offset, &retry_prot,
6273 		    &wired,
6274 		    fault_info,
6275 		    &real_map,
6276 		    NULL);
6277 		pmap = real_map->pmap;
6278 
6279 		if (kr != KERN_SUCCESS) {
6280 			vm_map_unlock_read(map);
6281 
6282 			if (m != VM_PAGE_NULL) {
6283 				assert(VM_PAGE_OBJECT(m) == m_object);
6284 
6285 				/*
6286 				 * retake the lock so that
6287 				 * we can drop the paging reference
6288 				 * in vm_fault_cleanup and do the
6289 				 * vm_page_wakeup_done() in RELEASE_PAGE
6290 				 */
6291 				vm_object_lock(m_object);
6292 
6293 				RELEASE_PAGE(m);
6294 
6295 				vm_fault_cleanup(m_object, top_page);
6296 			} else {
6297 				/*
6298 				 * retake the lock so that
6299 				 * we can drop the paging reference
6300 				 * in vm_fault_cleanup
6301 				 */
6302 				vm_object_lock(object);
6303 
6304 				vm_fault_cleanup(object, top_page);
6305 			}
6306 			vm_object_deallocate(object);
6307 
6308 			if (kr == KERN_INVALID_ADDRESS) {
6309 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0 /* arg */);
6310 			}
6311 			goto done;
6312 		}
6313 		vm_object_unlock(retry_object);
6314 
6315 		if ((retry_object != object) || (retry_offset != offset)) {
6316 			vm_map_unlock_read(map);
6317 			if (real_map != map) {
6318 				vm_map_unlock(real_map);
6319 			}
6320 
6321 			if (m != VM_PAGE_NULL) {
6322 				assert(VM_PAGE_OBJECT(m) == m_object);
6323 
6324 				/*
6325 				 * retake the lock so that
6326 				 * we can drop the paging reference
6327 				 * in vm_fault_cleanup and do the
6328 				 * vm_page_wakeup_done() in RELEASE_PAGE
6329 				 */
6330 				vm_object_lock(m_object);
6331 
6332 				RELEASE_PAGE(m);
6333 
6334 				vm_fault_cleanup(m_object, top_page);
6335 			} else {
6336 				/*
6337 				 * retake the lock so that
6338 				 * we can drop the paging reference
6339 				 * in vm_fault_cleanup
6340 				 */
6341 				vm_object_lock(object);
6342 
6343 				vm_fault_cleanup(object, top_page);
6344 			}
6345 			vm_object_deallocate(object);
6346 
6347 			goto RetryFault;
6348 		}
6349 		/*
6350 		 * Check whether the protection has changed or the object
6351 		 * has been copied while we left the map unlocked.
6352 		 */
6353 		if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, retry_prot)) {
6354 			/* If the pmap layer cares, pass the full set. */
6355 			prot = retry_prot;
6356 		} else {
6357 			prot &= retry_prot;
6358 		}
6359 	}
6360 
6361 	if (object_locks_dropped == TRUE) {
6362 		if (m != VM_PAGE_NULL) {
6363 			assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p", m, m_object);
6364 			assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6365 			vm_object_lock(m_object);
6366 		} else {
6367 			vm_object_lock(object);
6368 		}
6369 
6370 		object_locks_dropped = FALSE;
6371 	}
6372 
6373 	if ((prot & VM_PROT_WRITE) &&
6374 	    m != VM_PAGE_NULL &&
6375 	    (m_object->vo_copy != old_copy_object ||
6376 	    m_object->vo_copy_version != old_copy_version)) {
6377 		/*
6378 		 * The copy object changed while the top-level object
6379 		 * was unlocked, so take away write permission.
6380 		 */
6381 		if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
6382 			/*
6383 			 * This pmap enforces extra constraints for this set
6384 			 * of protections, so we can't change the protections.
6385 			 * This mapping should have been setup to avoid
6386 			 * copy-on-write since that requires removing write
6387 			 * access.
6388 			 */
6389 			panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x m%p obj %p copyobj %p",
6390 			    __FUNCTION__, pmap, (uint64_t)vaddr, prot,
6391 			    fault_info->pmap_options,
6392 			    m, m_object, m_object->vo_copy);
6393 		}
6394 		prot &= ~VM_PROT_WRITE;
6395 	}
6396 
6397 	if (!need_copy &&
6398 	    !fault_info->no_copy_on_read &&
6399 	    m != VM_PAGE_NULL &&
6400 	    VM_PAGE_OBJECT(m) != object &&
6401 	    !VM_PAGE_OBJECT(m)->pager_trusted &&
6402 	    vm_protect_privileged_from_untrusted &&
6403 	    !VM_PAGE_OBJECT(m)->code_signed &&
6404 	    current_proc_is_privileged()) {
6405 		/*
6406 		 * We found the page we want in an "untrusted" VM object
6407 		 * down the shadow chain.  Since the target is "privileged"
6408 		 * we want to perform a copy-on-read of that page, so that the
6409 		 * mapped object gets a stable copy and does not have to
6410 		 * rely on the "untrusted" object to provide the same
6411 		 * contents if the page gets reclaimed and has to be paged
6412 		 * in again later on.
6413 		 *
6414 		 * Special case: if the mapping is executable and the untrusted
6415 		 * object is code-signed and the process is "cs_enforced", we
6416 		 * do not copy-on-read because that would break code-signing
6417 		 * enforcement expectations (an executable page must belong
6418 		 * to a code-signed object) and we can rely on code-signing
6419 		 * to re-validate the page if it gets evicted and paged back in.
6420 		 */
6421 //		printf("COPY-ON-READ %s:%d map %p vaddr 0x%llx obj %p offset 0x%llx found page %p (obj %p offset 0x%llx) UNTRUSTED -> need copy-on-read\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, object, offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
6422 		vm_copied_on_read++;
6423 		need_copy_on_read = TRUE;
6424 		need_copy = TRUE;
6425 	} else {
6426 		need_copy_on_read = FALSE;
6427 	}
6428 
6429 	/*
6430 	 * If we want to wire down this page, but no longer have
6431 	 * adequate permissions, we must start all over.
6432 	 * If we decided to copy-on-read, we must also start all over.
6433 	 */
6434 	if ((wired && (fault_type != (prot | VM_PROT_WRITE))) ||
6435 	    need_copy_on_read) {
6436 		vm_map_unlock_read(map);
6437 		if (real_map != map) {
6438 			vm_map_unlock(real_map);
6439 		}
6440 
6441 		if (m != VM_PAGE_NULL) {
6442 			assert(VM_PAGE_OBJECT(m) == m_object);
6443 
6444 			RELEASE_PAGE(m);
6445 
6446 			vm_fault_cleanup(m_object, top_page);
6447 		} else {
6448 			vm_fault_cleanup(object, top_page);
6449 		}
6450 
6451 		vm_object_deallocate(object);
6452 
6453 		goto RetryFault;
6454 	}
6455 	if (m != VM_PAGE_NULL) {
6456 		/*
6457 		 * Put this page into the physical map.
6458 		 * We had to do the unlock above because pmap_enter
6459 		 * may cause other faults.  The page may be on
6460 		 * the pageout queues.  If the pageout daemon comes
6461 		 * across the page, it will remove it from the queues.
6462 		 */
6463 		if (fault_page_size < PAGE_SIZE) {
6464 			DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx pa 0x%llx(0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
6465 			assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
6466 			    fault_phys_offset < PAGE_SIZE),
6467 			    "0x%llx\n", (uint64_t)fault_phys_offset);
6468 		} else {
6469 			assertf(fault_phys_offset == 0,
6470 			    "0x%llx\n", (uint64_t)fault_phys_offset);
6471 		}
6472 		assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p", m, m_object);
6473 		assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6474 		if (caller_pmap) {
6475 			kr = vm_fault_enter(m,
6476 			    caller_pmap,
6477 			    caller_pmap_addr,
6478 			    fault_page_size,
6479 			    fault_phys_offset,
6480 			    prot,
6481 			    caller_prot,
6482 			    wired,
6483 			    wire_tag,
6484 			    fault_info,
6485 			    NULL,
6486 			    &type_of_fault,
6487 			    &object_lock_type);
6488 		} else {
6489 			kr = vm_fault_enter(m,
6490 			    pmap,
6491 			    vaddr,
6492 			    fault_page_size,
6493 			    fault_phys_offset,
6494 			    prot,
6495 			    caller_prot,
6496 			    wired,
6497 			    wire_tag,
6498 			    fault_info,
6499 			    NULL,
6500 			    &type_of_fault,
6501 			    &object_lock_type);
6502 		}
6503 		assert(VM_PAGE_OBJECT(m) == m_object);
6504 
6505 		{
6506 			int     event_code = 0;
6507 
6508 			if (m_object->internal) {
6509 				event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
6510 			} else if (m_object->object_is_shared_cache) {
6511 				event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
6512 			} else {
6513 				event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
6514 			}
6515 
6516 			KDBG_RELEASE(event_code | DBG_FUNC_NONE, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), m->vmp_offset, get_current_unique_pid());
6517 			KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_SLOW), get_current_unique_pid());
6518 
6519 			DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag);
6520 		}
6521 		if (kr != KERN_SUCCESS) {
6522 			/* abort this page fault */
6523 			vm_map_unlock_read(map);
6524 			if (real_map != map) {
6525 				vm_map_unlock(real_map);
6526 			}
6527 			vm_page_wakeup_done(m_object, m);
6528 			vm_fault_cleanup(m_object, top_page);
6529 			vm_object_deallocate(object);
6530 			goto done;
6531 		}
6532 		if (physpage_p != NULL) {
6533 			/* for vm_map_wire_and_extract() */
6534 			*physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
6535 			if (prot & VM_PROT_WRITE) {
6536 				vm_object_lock_assert_exclusive(m_object);
6537 				m->vmp_dirty = TRUE;
6538 			}
6539 		}
6540 	} else {
6541 		vm_map_entry_t          entry;
6542 		vm_map_offset_t         laddr;
6543 		vm_map_offset_t         ldelta, hdelta;
6544 
6545 		/*
6546 		 * do a pmap block mapping from the physical address
6547 		 * in the object
6548 		 */
6549 
6550 		if (real_map != map) {
6551 			vm_map_unlock(real_map);
6552 		}
6553 
6554 		if (original_map != map) {
6555 			vm_map_unlock_read(map);
6556 			vm_map_lock_read(original_map);
6557 			map = original_map;
6558 		}
6559 		real_map = map;
6560 
6561 		laddr = vaddr;
6562 		hdelta = ldelta = (vm_map_offset_t)0xFFFFFFFFFFFFF000ULL;
6563 
6564 		while (vm_map_lookup_entry(map, laddr, &entry)) {
6565 			if (ldelta > (laddr - entry->vme_start)) {
6566 				ldelta = laddr - entry->vme_start;
6567 			}
6568 			if (hdelta > (entry->vme_end - laddr)) {
6569 				hdelta = entry->vme_end - laddr;
6570 			}
6571 			if (entry->is_sub_map) {
6572 				vm_map_t sub_map;
6573 				bool use_pmap;
6574 
6575 				laddr = ((laddr - entry->vme_start)
6576 				    + VME_OFFSET(entry));
6577 				vm_map_lock_read(VME_SUBMAP(entry));
6578 				sub_map = VME_SUBMAP(entry);
6579 				use_pmap = entry->use_pmap;
6580 				entry = VM_MAP_ENTRY_NULL; /* not valid after unlock */
6581 				if (map != real_map) {
6582 					vm_map_unlock_read(map);
6583 				}
6584 				if (use_pmap) {
6585 					vm_map_unlock_read(real_map);
6586 					real_map = sub_map;
6587 				}
6588 				map = sub_map;
6589 			} else {
6590 				break;
6591 			}
6592 		}
6593 
6594 		if (vm_map_lookup_entry(map, laddr, &entry) &&
6595 		    (!entry->is_sub_map) &&
6596 		    (object != VM_OBJECT_NULL) &&
6597 		    (VME_OBJECT(entry) == object)) {
6598 			uint16_t superpage;
6599 
6600 			if (!object->pager_created &&
6601 			    object->phys_contiguous &&
6602 			    VME_OFFSET(entry) == 0 &&
6603 			    (entry->vme_end - entry->vme_start == object->vo_size) &&
6604 			    VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size - 1))) {
6605 				superpage = VM_MEM_SUPERPAGE;
6606 			} else {
6607 				superpage = 0;
6608 			}
6609 
6610 			if (superpage && physpage_p) {
6611 				/* for vm_map_wire_and_extract() */
6612 				*physpage_p = (ppnum_t)
6613 				    ((((vm_map_offset_t)
6614 				    object->vo_shadow_offset)
6615 				    + VME_OFFSET(entry)
6616 				    + (laddr - entry->vme_start))
6617 				    >> PAGE_SHIFT);
6618 			}
6619 
6620 			/*
6621 			 * Set up a block mapped area
6622 			 */
6623 			assert((uint32_t)((ldelta + hdelta) >> fault_page_shift) == ((ldelta + hdelta) >> fault_page_shift));
6624 			pmap_t block_map_pmap;
6625 			addr64_t block_map_va;
6626 			pmap_paddr_t block_map_pa = (pmap_paddr_t)(((vm_map_offset_t)(object->vo_shadow_offset)) +
6627 			    VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta);
6628 			int block_map_wimg = VM_WIMG_MASK & (int)object->wimg_bits;
6629 			if (caller_pmap) {
6630 				block_map_pmap = caller_pmap;
6631 				block_map_va = (addr64_t)(caller_pmap_addr - ldelta);
6632 			} else {
6633 				block_map_pmap = real_map->pmap;
6634 				block_map_va = (addr64_t)(vaddr - ldelta);
6635 			}
6636 			kr = pmap_map_block_addr(block_map_pmap,
6637 			    block_map_va,
6638 			    block_map_pa,
6639 			    (uint32_t)((ldelta + hdelta) >> fault_page_shift),
6640 			    prot,
6641 			    block_map_wimg | superpage,
6642 			    0);
6643 
6644 			if (kr != KERN_SUCCESS) {
6645 				goto cleanup;
6646 			}
6647 		}
6648 	}
6649 
6650 	/*
6651 	 * Success
6652 	 */
6653 	kr = KERN_SUCCESS;
6654 
6655 	/*
6656 	 * TODO: could most of the done cases just use cleanup?
6657 	 */
6658 cleanup:
6659 	/*
6660 	 * Unlock everything, and return
6661 	 */
6662 	vm_map_unlock_read(map);
6663 	if (real_map != map) {
6664 		vm_map_unlock(real_map);
6665 	}
6666 
6667 	if (m != VM_PAGE_NULL) {
6668 		if (__improbable(rtfault &&
6669 		    !m->vmp_realtime &&
6670 		    vm_pageout_protect_realtime)) {
6671 			vm_page_lock_queues();
6672 			if (!m->vmp_realtime) {
6673 				m->vmp_realtime = true;
6674 				VM_COUNTER_INC(&vm_page_realtime_count);
6675 			}
6676 			vm_page_unlock_queues();
6677 		}
6678 		assert(VM_PAGE_OBJECT(m) == m_object);
6679 
6680 		if (!m_object->internal && (fault_type & VM_PROT_WRITE)) {
6681 			vm_object_paging_begin(m_object);
6682 
6683 			assert(written_on_object == VM_OBJECT_NULL);
6684 			written_on_object = m_object;
6685 			written_on_pager = m_object->pager;
6686 			written_on_offset = m_object->paging_offset + m->vmp_offset;
6687 		}
6688 		vm_page_wakeup_done(m_object, m);
6689 
6690 		vm_fault_cleanup(m_object, top_page);
6691 	} else {
6692 		vm_fault_cleanup(object, top_page);
6693 	}
6694 
6695 	vm_object_deallocate(object);
6696 
6697 #undef  RELEASE_PAGE
6698 
6699 done:
6700 	thread_interrupt_level(interruptible_state);
6701 
6702 	if (resilient_media_object != VM_OBJECT_NULL) {
6703 		assert(resilient_media_retry);
6704 		assert(resilient_media_offset != (vm_object_offset_t)-1);
6705 		/* release extra reference on failed object */
6706 //             printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
6707 		vm_object_deallocate(resilient_media_object);
6708 		resilient_media_object = VM_OBJECT_NULL;
6709 		resilient_media_offset = (vm_object_offset_t)-1;
6710 		resilient_media_retry = false;
6711 		vm_fault_resilient_media_release++;
6712 	}
6713 	assert(!resilient_media_retry);
6714 
6715 	/*
6716 	 * Only I/O throttle on faults which cause a pagein/swapin.
6717 	 */
6718 	if ((type_of_fault == DBG_PAGEIND_FAULT) || (type_of_fault == DBG_PAGEINV_FAULT) || (type_of_fault == DBG_COMPRESSOR_SWAPIN_FAULT)) {
6719 		throttle_lowpri_io(1);
6720 	} else {
6721 		if (kr == KERN_SUCCESS && type_of_fault != DBG_CACHE_HIT_FAULT && type_of_fault != DBG_GUARD_FAULT) {
6722 			if ((throttle_delay = vm_page_throttled(TRUE))) {
6723 				if (vm_debug_events) {
6724 					if (type_of_fault == DBG_COMPRESSOR_FAULT) {
6725 						VM_DEBUG_EVENT(vmf_compressordelay, DBG_VM_FAULT_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6726 					} else if (type_of_fault == DBG_COW_FAULT) {
6727 						VM_DEBUG_EVENT(vmf_cowdelay, DBG_VM_FAULT_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6728 					} else {
6729 						VM_DEBUG_EVENT(vmf_zfdelay, DBG_VM_FAULT_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6730 					}
6731 				}
6732 				__VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
6733 			}
6734 		}
6735 	}
6736 
6737 	if (written_on_object) {
6738 		vnode_pager_dirtied(written_on_pager, written_on_offset, written_on_offset + PAGE_SIZE_64);
6739 
6740 		vm_object_lock(written_on_object);
6741 		vm_object_paging_end(written_on_object);
6742 		vm_object_unlock(written_on_object);
6743 
6744 		written_on_object = VM_OBJECT_NULL;
6745 	}
6746 
6747 	if (rtfault) {
6748 		vm_record_rtfault(cthread, fstart, trace_vaddr, type_of_fault);
6749 	}
6750 
6751 	KDBG_RELEASE(
6752 		(VMDBG_CODE(DBG_VM_FAULT_INTERNAL)) | DBG_FUNC_END,
6753 		((uint64_t)trace_vaddr >> 32),
6754 		trace_vaddr,
6755 		kr,
6756 		vm_fault_type_for_tracing(need_copy_on_read, type_of_fault));
6757 
6758 	if (fault_page_size < PAGE_SIZE && kr != KERN_SUCCESS) {
6759 		DEBUG4K_FAULT("map %p original %p vaddr 0x%llx -> 0x%x\n", map, original_map, (uint64_t)trace_real_vaddr, kr);
6760 	}
6761 
6762 	return kr;
6763 }
6764 
6765 /*
6766  *	vm_fault_wire:
6767  *
6768  *	Wire down a range of virtual addresses in a map.
6769  */
6770 kern_return_t
vm_fault_wire(vm_map_t map,vm_map_entry_t entry,vm_prot_t prot,vm_tag_t wire_tag,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)6771 vm_fault_wire(
6772 	vm_map_t        map,
6773 	vm_map_entry_t  entry,
6774 	vm_prot_t       prot,
6775 	vm_tag_t        wire_tag,
6776 	pmap_t          pmap,
6777 	vm_map_offset_t pmap_addr,
6778 	ppnum_t         *physpage_p)
6779 {
6780 	vm_map_offset_t va;
6781 	vm_map_offset_t end_addr = entry->vme_end;
6782 	kern_return_t   rc;
6783 	vm_map_size_t   effective_page_size;
6784 
6785 	assert(entry->in_transition);
6786 
6787 	if (!entry->is_sub_map &&
6788 	    VME_OBJECT(entry) != VM_OBJECT_NULL &&
6789 	    VME_OBJECT(entry)->phys_contiguous) {
6790 		return KERN_SUCCESS;
6791 	}
6792 
6793 	/*
6794 	 *	Inform the physical mapping system that the
6795 	 *	range of addresses may not fault, so that
6796 	 *	page tables and such can be locked down as well.
6797 	 */
6798 
6799 	pmap_pageable(pmap, pmap_addr,
6800 	    pmap_addr + (end_addr - entry->vme_start), FALSE);
6801 
6802 	/*
6803 	 *	We simulate a fault to get the page and enter it
6804 	 *	in the physical map.
6805 	 */
6806 
6807 	effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6808 	for (va = entry->vme_start;
6809 	    va < end_addr;
6810 	    va += effective_page_size) {
6811 		rc = vm_fault_wire_fast(map, va, prot, wire_tag, entry, pmap,
6812 		    pmap_addr + (va - entry->vme_start),
6813 		    physpage_p);
6814 		if (rc != KERN_SUCCESS) {
6815 			struct vm_object_fault_info fault_info = {
6816 				.interruptible = (pmap == kernel_pmap) ? THREAD_UNINT : THREAD_ABORTSAFE,
6817 				.behavior = VM_BEHAVIOR_SEQUENTIAL,
6818 				.fi_change_wiring = true,
6819 			};
6820 			if (os_sub_overflow(end_addr, va, &fault_info.cluster_size)) {
6821 				fault_info.cluster_size = UPL_SIZE_MAX;
6822 			}
6823 			rc = vm_fault_internal(map, va, prot, wire_tag,
6824 			    pmap,
6825 			    (pmap_addr +
6826 			    (va - entry->vme_start)),
6827 			    physpage_p,
6828 			    &fault_info);
6829 			DTRACE_VM2(softlock, int, 1, (uint64_t *), NULL);
6830 		}
6831 
6832 		if (rc != KERN_SUCCESS) {
6833 			struct vm_map_entry     tmp_entry = *entry;
6834 
6835 			/* unwire wired pages */
6836 			tmp_entry.vme_end = va;
6837 			vm_fault_unwire(map, &tmp_entry, FALSE,
6838 			    pmap, pmap_addr, tmp_entry.vme_end);
6839 
6840 			return rc;
6841 		}
6842 	}
6843 	return KERN_SUCCESS;
6844 }
6845 
6846 /*
6847  *	vm_fault_unwire:
6848  *
6849  *	Unwire a range of virtual addresses in a map.
6850  */
6851 void
vm_fault_unwire(vm_map_t map,vm_map_entry_t entry,boolean_t deallocate,pmap_t pmap,vm_map_offset_t pmap_addr,vm_map_offset_t end_addr)6852 vm_fault_unwire(
6853 	vm_map_t        map,
6854 	vm_map_entry_t  entry,
6855 	boolean_t       deallocate,
6856 	pmap_t          pmap,
6857 	vm_map_offset_t pmap_addr,
6858 	vm_map_offset_t end_addr)
6859 {
6860 	vm_map_offset_t va;
6861 	vm_object_t     object;
6862 	struct vm_object_fault_info fault_info = {
6863 		.interruptible = THREAD_UNINT,
6864 	};
6865 	unsigned int    unwired_pages;
6866 	vm_map_size_t   effective_page_size;
6867 
6868 	object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry);
6869 
6870 	/*
6871 	 * If it's marked phys_contiguous, then vm_fault_wire() didn't actually
6872 	 * do anything since such memory is wired by default.  So we don't have
6873 	 * anything to undo here.
6874 	 */
6875 
6876 	if (object != VM_OBJECT_NULL && object->phys_contiguous) {
6877 		return;
6878 	}
6879 
6880 	fault_info.interruptible = THREAD_UNINT;
6881 	fault_info.behavior = entry->behavior;
6882 	fault_info.user_tag = VME_ALIAS(entry);
6883 	if (entry->iokit_acct ||
6884 	    (!entry->is_sub_map && !entry->use_pmap)) {
6885 		fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
6886 	}
6887 	fault_info.lo_offset = VME_OFFSET(entry);
6888 	fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry);
6889 	fault_info.no_cache = entry->no_cache;
6890 	fault_info.stealth = TRUE;
6891 	if (entry->vme_xnu_user_debug) {
6892 		/*
6893 		 * Modified code-signed executable region: wired pages must
6894 		 * have been copied, so they should be XNU_USER_DEBUG rather
6895 		 * than XNU_USER_EXEC.
6896 		 */
6897 		fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
6898 	}
6899 
6900 	unwired_pages = 0;
6901 
6902 	/*
6903 	 *	Since the pages are wired down, we must be able to
6904 	 *	get their mappings from the physical map system.
6905 	 */
6906 
6907 	effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6908 	for (va = entry->vme_start;
6909 	    va < end_addr;
6910 	    va += effective_page_size) {
6911 		if (object == VM_OBJECT_NULL) {
6912 			if (pmap) {
6913 				pmap_change_wiring(pmap,
6914 				    pmap_addr + (va - entry->vme_start), FALSE);
6915 			}
6916 			(void) vm_fault(map, va, VM_PROT_NONE,
6917 			    TRUE, VM_KERN_MEMORY_NONE, THREAD_UNINT, pmap, pmap_addr);
6918 		} else {
6919 			vm_prot_t       prot;
6920 			vm_page_t       result_page;
6921 			vm_page_t       top_page;
6922 			vm_object_t     result_object;
6923 			vm_fault_return_t result;
6924 
6925 			/* cap cluster size at maximum UPL size */
6926 			upl_size_t cluster_size;
6927 			if (os_sub_overflow(end_addr, va, &cluster_size)) {
6928 				cluster_size = UPL_SIZE_MAX;
6929 			}
6930 			fault_info.cluster_size = cluster_size;
6931 
6932 			do {
6933 				prot = VM_PROT_NONE;
6934 
6935 				vm_object_lock(object);
6936 				vm_object_paging_begin(object);
6937 				result_page = VM_PAGE_NULL;
6938 				result = vm_fault_page(
6939 					object,
6940 					(VME_OFFSET(entry) +
6941 					(va - entry->vme_start)),
6942 					VM_PROT_NONE, TRUE,
6943 					FALSE, /* page not looked up */
6944 					&prot, &result_page, &top_page,
6945 					(int *)0,
6946 					NULL, map->no_zero_fill,
6947 					&fault_info);
6948 			} while (result == VM_FAULT_RETRY);
6949 
6950 			/*
6951 			 * If this was a mapping to a file on a device that has been forcibly
6952 			 * unmounted, then we won't get a page back from vm_fault_page().  Just
6953 			 * move on to the next one in case the remaining pages are mapped from
6954 			 * different objects.  During a forced unmount, the object is terminated
6955 			 * so the alive flag will be false if this happens.  A forced unmount will
6956 			 * will occur when an external disk is unplugged before the user does an
6957 			 * eject, so we don't want to panic in that situation.
6958 			 */
6959 
6960 			if (result == VM_FAULT_MEMORY_ERROR) {
6961 				if (!object->alive) {
6962 					continue;
6963 				}
6964 				if (!object->internal && object->pager == NULL) {
6965 					continue;
6966 				}
6967 			}
6968 
6969 			if (result == VM_FAULT_MEMORY_ERROR &&
6970 			    is_kernel_object(object)) {
6971 				/*
6972 				 * This must have been allocated with
6973 				 * KMA_KOBJECT and KMA_VAONLY and there's
6974 				 * no physical page at this offset.
6975 				 * We're done (no page to free).
6976 				 */
6977 				assert(deallocate);
6978 				continue;
6979 			}
6980 
6981 			if (result != VM_FAULT_SUCCESS) {
6982 				panic("vm_fault_unwire: failure");
6983 			}
6984 
6985 			result_object = VM_PAGE_OBJECT(result_page);
6986 
6987 			if (deallocate) {
6988 				assert(VM_PAGE_GET_PHYS_PAGE(result_page) !=
6989 				    vm_page_fictitious_addr);
6990 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(result_page));
6991 				if (VM_PAGE_WIRED(result_page)) {
6992 					unwired_pages++;
6993 				}
6994 				VM_PAGE_FREE(result_page);
6995 			} else {
6996 				if (pmap && !vm_page_is_guard(result_page)) {
6997 					pmap_change_wiring(pmap,
6998 					    pmap_addr + (va - entry->vme_start), FALSE);
6999 				}
7000 
7001 
7002 				if (VM_PAGE_WIRED(result_page)) {
7003 					vm_page_lockspin_queues();
7004 					vm_page_unwire(result_page, TRUE);
7005 					vm_page_unlock_queues();
7006 					unwired_pages++;
7007 				}
7008 				if (entry->zero_wired_pages &&
7009 				    (entry->protection & VM_PROT_WRITE) &&
7010 #if __arm64e__
7011 				    !entry->used_for_tpro &&
7012 #endif /* __arm64e__ */
7013 				    !entry->used_for_jit) {
7014 					pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(result_page));
7015 				}
7016 
7017 				vm_page_wakeup_done(result_object, result_page);
7018 			}
7019 			vm_fault_cleanup(result_object, top_page);
7020 		}
7021 	}
7022 
7023 	/*
7024 	 *	Inform the physical mapping system that the range
7025 	 *	of addresses may fault, so that page tables and
7026 	 *	such may be unwired themselves.
7027 	 */
7028 
7029 	pmap_pageable(pmap, pmap_addr,
7030 	    pmap_addr + (end_addr - entry->vme_start), TRUE);
7031 
7032 	if (is_kernel_object(object)) {
7033 		/*
7034 		 * Would like to make user_tag in vm_object_fault_info
7035 		 * vm_tag_t (unsigned short) but user_tag derives its value from
7036 		 * VME_ALIAS(entry) at a few places and VME_ALIAS, in turn, casts
7037 		 * to an _unsigned int_ which is used by non-fault_info paths throughout the
7038 		 * code at many places.
7039 		 *
7040 		 * So, for now, an explicit truncation to unsigned short (vm_tag_t).
7041 		 */
7042 		assertf((fault_info.user_tag & VME_ALIAS_MASK) == fault_info.user_tag,
7043 		    "VM Tag truncated from 0x%x to 0x%x\n", fault_info.user_tag, (fault_info.user_tag & VME_ALIAS_MASK));
7044 		vm_tag_update_size((vm_tag_t) fault_info.user_tag, -ptoa_64(unwired_pages), NULL);
7045 	}
7046 }
7047 
7048 /*
7049  *	vm_fault_wire_fast:
7050  *
7051  *	Handle common case of a wire down page fault at the given address.
7052  *	If successful, the page is inserted into the associated physical map.
7053  *	The map entry is passed in to avoid the overhead of a map lookup.
7054  *
7055  *	NOTE: the given address should be truncated to the
7056  *	proper page address.
7057  *
7058  *	KERN_SUCCESS is returned if the page fault is handled; otherwise,
7059  *	a standard error specifying why the fault is fatal is returned.
7060  *
7061  *	The map in question must be referenced, and remains so.
7062  *	Caller has a read lock on the map.
7063  *
7064  *	This is a stripped version of vm_fault() for wiring pages.  Anything
7065  *	other than the common case will return KERN_FAILURE, and the caller
7066  *	is expected to call vm_fault().
7067  */
7068 static kern_return_t
vm_fault_wire_fast(__unused vm_map_t map,vm_map_offset_t va,__unused vm_prot_t caller_prot,vm_tag_t wire_tag,vm_map_entry_t entry,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)7069 vm_fault_wire_fast(
7070 	__unused vm_map_t       map,
7071 	vm_map_offset_t va,
7072 	__unused vm_prot_t       caller_prot,
7073 	vm_tag_t        wire_tag,
7074 	vm_map_entry_t  entry,
7075 	pmap_t          pmap,
7076 	vm_map_offset_t pmap_addr,
7077 	ppnum_t         *physpage_p)
7078 {
7079 	vm_object_t             object;
7080 	vm_object_offset_t      offset;
7081 	vm_page_t               m;
7082 	vm_prot_t               prot;
7083 	thread_t                thread = current_thread();
7084 	int                     type_of_fault;
7085 	kern_return_t           kr;
7086 	vm_map_size_t           fault_page_size;
7087 	vm_map_offset_t         fault_phys_offset;
7088 	struct vm_object_fault_info fault_info = {
7089 		.interruptible = THREAD_UNINT,
7090 	};
7091 	uint8_t                 object_lock_type = 0;
7092 
7093 	counter_inc(&vm_statistics_faults);
7094 
7095 	if (thread != THREAD_NULL) {
7096 		counter_inc(&get_threadtask(thread)->faults);
7097 	}
7098 
7099 /*
7100  *	Recovery actions
7101  */
7102 
7103 #undef  RELEASE_PAGE
7104 #define RELEASE_PAGE(m) {                               \
7105 	vm_page_wakeup_done(VM_PAGE_OBJECT(m), m);                            \
7106 	vm_page_lockspin_queues();                      \
7107 	vm_page_unwire(m, TRUE);                        \
7108 	vm_page_unlock_queues();                        \
7109 }
7110 
7111 
7112 #undef  UNLOCK_THINGS
7113 #define UNLOCK_THINGS   {                               \
7114 	vm_object_paging_end(object);                      \
7115 	vm_object_unlock(object);                          \
7116 }
7117 
7118 #undef  UNLOCK_AND_DEALLOCATE
7119 #define UNLOCK_AND_DEALLOCATE   {                       \
7120 	UNLOCK_THINGS;                                  \
7121 	vm_object_deallocate(object);                   \
7122 }
7123 /*
7124  *	Give up and have caller do things the hard way.
7125  */
7126 
7127 #define GIVE_UP {                                       \
7128 	UNLOCK_AND_DEALLOCATE;                          \
7129 	return(KERN_FAILURE);                           \
7130 }
7131 
7132 
7133 	/*
7134 	 *	If this entry is not directly to a vm_object, bail out.
7135 	 */
7136 	if (entry->is_sub_map) {
7137 		assert(physpage_p == NULL);
7138 		return KERN_FAILURE;
7139 	}
7140 
7141 	/*
7142 	 *	Find the backing store object and offset into it.
7143 	 */
7144 
7145 	object = VME_OBJECT(entry);
7146 	offset = (va - entry->vme_start) + VME_OFFSET(entry);
7147 	prot = entry->protection;
7148 
7149 	/*
7150 	 *	Make a reference to this object to prevent its
7151 	 *	disposal while we are messing with it.
7152 	 */
7153 
7154 	object_lock_type = OBJECT_LOCK_EXCLUSIVE;
7155 	vm_object_lock(object);
7156 	vm_object_reference_locked(object);
7157 	vm_object_paging_begin(object);
7158 
7159 	/*
7160 	 *	INVARIANTS (through entire routine):
7161 	 *
7162 	 *	1)	At all times, we must either have the object
7163 	 *		lock or a busy page in some object to prevent
7164 	 *		some other thread from trying to bring in
7165 	 *		the same page.
7166 	 *
7167 	 *	2)	Once we have a busy page, we must remove it from
7168 	 *		the pageout queues, so that the pageout daemon
7169 	 *		will not grab it away.
7170 	 *
7171 	 */
7172 
7173 	/*
7174 	 *	Look for page in top-level object.  If it's not there or
7175 	 *	there's something going on, give up.
7176 	 */
7177 	m = vm_page_lookup(object, vm_object_trunc_page(offset));
7178 	if ((m == VM_PAGE_NULL) || (m->vmp_busy) ||
7179 	    (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_absent))) {
7180 		GIVE_UP;
7181 	}
7182 	if (vm_page_is_guard(m)) {
7183 		/*
7184 		 * Guard pages are fictitious pages and are never
7185 		 * entered into a pmap, so let's say it's been wired...
7186 		 */
7187 		kr = KERN_SUCCESS;
7188 		goto done;
7189 	}
7190 
7191 	/*
7192 	 *	Wire the page down now.  All bail outs beyond this
7193 	 *	point must unwire the page.
7194 	 */
7195 
7196 	vm_page_lockspin_queues();
7197 	vm_page_wire(m, wire_tag, TRUE);
7198 	vm_page_unlock_queues();
7199 
7200 	/*
7201 	 *	Mark page busy for other threads.
7202 	 */
7203 	assert(!m->vmp_busy);
7204 	m->vmp_busy = TRUE;
7205 	assert(!m->vmp_absent);
7206 
7207 	/*
7208 	 *	Give up if the page is being written and there's a copy object
7209 	 */
7210 	if ((object->vo_copy != VM_OBJECT_NULL) && (prot & VM_PROT_WRITE)) {
7211 		RELEASE_PAGE(m);
7212 		GIVE_UP;
7213 	}
7214 
7215 	fault_info.user_tag = VME_ALIAS(entry);
7216 	fault_info.pmap_options = 0;
7217 	if (entry->iokit_acct ||
7218 	    (!entry->is_sub_map && !entry->use_pmap)) {
7219 		fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
7220 	}
7221 	if (entry->vme_xnu_user_debug) {
7222 		/*
7223 		 * Modified code-signed executable region: wiring will
7224 		 * copy the pages, so they should be XNU_USER_DEBUG rather
7225 		 * than XNU_USER_EXEC.
7226 		 */
7227 		fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
7228 	}
7229 
7230 	if (entry->translated_allow_execute) {
7231 		fault_info.pmap_options |= PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE;
7232 	}
7233 
7234 	fault_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
7235 	fault_phys_offset = offset - vm_object_trunc_page(offset);
7236 
7237 	/*
7238 	 *	Put this page into the physical map.
7239 	 */
7240 	type_of_fault = DBG_CACHE_HIT_FAULT;
7241 	assert3p(VM_PAGE_OBJECT(m), ==, object);
7242 	kr = vm_fault_enter(m,
7243 	    pmap,
7244 	    pmap_addr,
7245 	    fault_page_size,
7246 	    fault_phys_offset,
7247 	    prot,
7248 	    prot,
7249 	    TRUE,                  /* wired */
7250 	    wire_tag,
7251 	    &fault_info,
7252 	    NULL,
7253 	    &type_of_fault,
7254 	    &object_lock_type); /* Exclusive lock mode. Will remain unchanged.*/
7255 	if (kr != KERN_SUCCESS) {
7256 		RELEASE_PAGE(m);
7257 		GIVE_UP;
7258 	}
7259 
7260 
7261 done:
7262 	/*
7263 	 *	Unlock everything, and return
7264 	 */
7265 
7266 	if (physpage_p) {
7267 		/* for vm_map_wire_and_extract() */
7268 		if (kr == KERN_SUCCESS) {
7269 			assert3p(object, ==, VM_PAGE_OBJECT(m));
7270 			*physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
7271 			if (prot & VM_PROT_WRITE) {
7272 				vm_object_lock_assert_exclusive(object);
7273 				m->vmp_dirty = TRUE;
7274 			}
7275 		} else {
7276 			*physpage_p = 0;
7277 		}
7278 	}
7279 
7280 	if (m->vmp_busy) {
7281 		vm_page_wakeup_done(object, m);
7282 	}
7283 
7284 	UNLOCK_AND_DEALLOCATE;
7285 
7286 	return kr;
7287 }
7288 
7289 /*
7290  *	Routine:	vm_fault_copy_cleanup
7291  *	Purpose:
7292  *		Release a page used by vm_fault_copy.
7293  */
7294 
7295 static void
vm_fault_copy_cleanup(vm_page_t page,vm_page_t top_page)7296 vm_fault_copy_cleanup(
7297 	vm_page_t       page,
7298 	vm_page_t       top_page)
7299 {
7300 	vm_object_t     object = VM_PAGE_OBJECT(page);
7301 
7302 	vm_object_lock(object);
7303 	vm_page_wakeup_done(object, page);
7304 	if (!VM_PAGE_PAGEABLE(page)) {
7305 		vm_page_lockspin_queues();
7306 		if (!VM_PAGE_PAGEABLE(page)) {
7307 			vm_page_activate(page);
7308 		}
7309 		vm_page_unlock_queues();
7310 	}
7311 	vm_fault_cleanup(object, top_page);
7312 }
7313 
7314 static void
vm_fault_copy_dst_cleanup(vm_page_t page)7315 vm_fault_copy_dst_cleanup(
7316 	vm_page_t       page)
7317 {
7318 	vm_object_t     object;
7319 
7320 	if (page != VM_PAGE_NULL) {
7321 		object = VM_PAGE_OBJECT(page);
7322 		vm_object_lock(object);
7323 		vm_page_lockspin_queues();
7324 		vm_page_unwire(page, TRUE);
7325 		vm_page_unlock_queues();
7326 		vm_object_paging_end(object);
7327 		vm_object_unlock(object);
7328 	}
7329 }
7330 
7331 /*
7332  *	Routine:	vm_fault_copy
7333  *
7334  *	Purpose:
7335  *		Copy pages from one virtual memory object to another --
7336  *		neither the source nor destination pages need be resident.
7337  *
7338  *		Before actually copying a page, the version associated with
7339  *		the destination address map wil be verified.
7340  *
7341  *	In/out conditions:
7342  *		The caller must hold a reference, but not a lock, to
7343  *		each of the source and destination objects and to the
7344  *		destination map.
7345  *
7346  *	Results:
7347  *		Returns KERN_SUCCESS if no errors were encountered in
7348  *		reading or writing the data.  Returns KERN_INTERRUPTED if
7349  *		the operation was interrupted (only possible if the
7350  *		"interruptible" argument is asserted).  Other return values
7351  *		indicate a permanent error in copying the data.
7352  *
7353  *		The actual amount of data copied will be returned in the
7354  *		"copy_size" argument.  In the event that the destination map
7355  *		verification failed, this amount may be less than the amount
7356  *		requested.
7357  */
7358 kern_return_t
vm_fault_copy(vm_object_t src_object,vm_object_offset_t src_offset,vm_map_size_t * copy_size,vm_object_t dst_object,vm_object_offset_t dst_offset,vm_map_t dst_map,vm_map_version_t * dst_version,int interruptible)7359 vm_fault_copy(
7360 	vm_object_t             src_object,
7361 	vm_object_offset_t      src_offset,
7362 	vm_map_size_t           *copy_size,             /* INOUT */
7363 	vm_object_t             dst_object,
7364 	vm_object_offset_t      dst_offset,
7365 	vm_map_t                dst_map,
7366 	vm_map_version_t         *dst_version,
7367 	int                     interruptible)
7368 {
7369 	vm_page_t               result_page;
7370 
7371 	vm_page_t               src_page;
7372 	vm_page_t               src_top_page;
7373 	vm_prot_t               src_prot;
7374 
7375 	vm_page_t               dst_page;
7376 	vm_page_t               dst_top_page;
7377 	vm_prot_t               dst_prot;
7378 
7379 	vm_map_size_t           amount_left;
7380 	vm_object_t             old_copy_object;
7381 	uint32_t                old_copy_version;
7382 	vm_object_t             result_page_object = NULL;
7383 	kern_return_t           error = 0;
7384 	vm_fault_return_t       result;
7385 
7386 	vm_map_size_t           part_size;
7387 	struct vm_object_fault_info fault_info_src = {};
7388 	struct vm_object_fault_info fault_info_dst = {};
7389 
7390 	/*
7391 	 * In order not to confuse the clustered pageins, align
7392 	 * the different offsets on a page boundary.
7393 	 */
7394 
7395 #define RETURN(x)                                       \
7396 	MACRO_BEGIN                                     \
7397 	*copy_size -= amount_left;                      \
7398 	MACRO_RETURN(x);                                \
7399 	MACRO_END
7400 
7401 	amount_left = *copy_size;
7402 
7403 	fault_info_src.interruptible = interruptible;
7404 	fault_info_src.behavior = VM_BEHAVIOR_SEQUENTIAL;
7405 	fault_info_src.lo_offset = vm_object_trunc_page(src_offset);
7406 	fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left;
7407 	fault_info_src.stealth = TRUE;
7408 
7409 	fault_info_dst.interruptible = interruptible;
7410 	fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL;
7411 	fault_info_dst.lo_offset = vm_object_trunc_page(dst_offset);
7412 	fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left;
7413 	fault_info_dst.stealth = TRUE;
7414 
7415 	do { /* while (amount_left > 0) */
7416 		/*
7417 		 * There may be a deadlock if both source and destination
7418 		 * pages are the same. To avoid this deadlock, the copy must
7419 		 * start by getting the destination page in order to apply
7420 		 * COW semantics if any.
7421 		 */
7422 
7423 RetryDestinationFault:;
7424 
7425 		dst_prot = VM_PROT_WRITE | VM_PROT_READ;
7426 
7427 		vm_object_lock(dst_object);
7428 		vm_object_paging_begin(dst_object);
7429 
7430 		/* cap cluster size at maximum UPL size */
7431 		upl_size_t cluster_size;
7432 		if (os_convert_overflow(amount_left, &cluster_size)) {
7433 			cluster_size = 0 - (upl_size_t)PAGE_SIZE;
7434 		}
7435 		fault_info_dst.cluster_size = cluster_size;
7436 
7437 		dst_page = VM_PAGE_NULL;
7438 		result = vm_fault_page(dst_object,
7439 		    vm_object_trunc_page(dst_offset),
7440 		    VM_PROT_WRITE | VM_PROT_READ,
7441 		    FALSE,
7442 		    FALSE,                    /* page not looked up */
7443 		    &dst_prot, &dst_page, &dst_top_page,
7444 		    (int *)0,
7445 		    &error,
7446 		    dst_map->no_zero_fill,
7447 		    &fault_info_dst);
7448 		switch (result) {
7449 		case VM_FAULT_SUCCESS:
7450 			break;
7451 		case VM_FAULT_RETRY:
7452 			goto RetryDestinationFault;
7453 		case VM_FAULT_MEMORY_SHORTAGE:
7454 			if (vm_page_wait(interruptible)) {
7455 				goto RetryDestinationFault;
7456 			}
7457 			ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_COPY_MEMORY_SHORTAGE), 0 /* arg */);
7458 			OS_FALLTHROUGH;
7459 		case VM_FAULT_INTERRUPTED:
7460 			RETURN(MACH_SEND_INTERRUPTED);
7461 		case VM_FAULT_SUCCESS_NO_VM_PAGE:
7462 			/* success but no VM page: fail the copy */
7463 			vm_object_paging_end(dst_object);
7464 			vm_object_unlock(dst_object);
7465 			OS_FALLTHROUGH;
7466 		case VM_FAULT_MEMORY_ERROR:
7467 			if (error) {
7468 				return error;
7469 			} else {
7470 				return KERN_MEMORY_ERROR;
7471 			}
7472 		default:
7473 			panic("vm_fault_copy: unexpected error 0x%x from "
7474 			    "vm_fault_page()\n", result);
7475 		}
7476 		assert((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE);
7477 
7478 		assert(dst_object == VM_PAGE_OBJECT(dst_page));
7479 		old_copy_object = dst_object->vo_copy;
7480 		old_copy_version = dst_object->vo_copy_version;
7481 
7482 		/*
7483 		 * There exists the possiblity that the source and
7484 		 * destination page are the same.  But we can't
7485 		 * easily determine that now.  If they are the
7486 		 * same, the call to vm_fault_page() for the
7487 		 * destination page will deadlock.  To prevent this we
7488 		 * wire the page so we can drop busy without having
7489 		 * the page daemon steal the page.  We clean up the
7490 		 * top page  but keep the paging reference on the object
7491 		 * holding the dest page so it doesn't go away.
7492 		 */
7493 
7494 		vm_page_lockspin_queues();
7495 		vm_page_wire(dst_page, VM_KERN_MEMORY_OSFMK, TRUE);
7496 		vm_page_unlock_queues();
7497 		vm_page_wakeup_done(dst_object, dst_page);
7498 		vm_object_unlock(dst_object);
7499 
7500 		if (dst_top_page != VM_PAGE_NULL) {
7501 			vm_object_lock(dst_object);
7502 			VM_PAGE_FREE(dst_top_page);
7503 			vm_object_paging_end(dst_object);
7504 			vm_object_unlock(dst_object);
7505 		}
7506 
7507 RetrySourceFault:;
7508 
7509 		if (src_object == VM_OBJECT_NULL) {
7510 			/*
7511 			 *	No source object.  We will just
7512 			 *	zero-fill the page in dst_object.
7513 			 */
7514 			src_page = VM_PAGE_NULL;
7515 			result_page = VM_PAGE_NULL;
7516 		} else {
7517 			vm_object_lock(src_object);
7518 			src_page = vm_page_lookup(src_object,
7519 			    vm_object_trunc_page(src_offset));
7520 			if (src_page == dst_page) {
7521 				src_prot = dst_prot;
7522 				result_page = VM_PAGE_NULL;
7523 			} else {
7524 				src_prot = VM_PROT_READ;
7525 				vm_object_paging_begin(src_object);
7526 
7527 				/* cap cluster size at maximum UPL size */
7528 				if (os_convert_overflow(amount_left, &cluster_size)) {
7529 					cluster_size = 0 - (upl_size_t)PAGE_SIZE;
7530 				}
7531 				fault_info_src.cluster_size = cluster_size;
7532 
7533 				result_page = VM_PAGE_NULL;
7534 				result = vm_fault_page(
7535 					src_object,
7536 					vm_object_trunc_page(src_offset),
7537 					VM_PROT_READ, FALSE,
7538 					FALSE, /* page not looked up */
7539 					&src_prot,
7540 					&result_page, &src_top_page,
7541 					(int *)0, &error, FALSE,
7542 					&fault_info_src);
7543 
7544 				switch (result) {
7545 				case VM_FAULT_SUCCESS:
7546 					break;
7547 				case VM_FAULT_RETRY:
7548 					goto RetrySourceFault;
7549 				case VM_FAULT_MEMORY_SHORTAGE:
7550 					if (vm_page_wait(interruptible)) {
7551 						goto RetrySourceFault;
7552 					}
7553 					OS_FALLTHROUGH;
7554 				case VM_FAULT_INTERRUPTED:
7555 					vm_fault_copy_dst_cleanup(dst_page);
7556 					RETURN(MACH_SEND_INTERRUPTED);
7557 				case VM_FAULT_SUCCESS_NO_VM_PAGE:
7558 					/* success but no VM page: fail */
7559 					vm_object_paging_end(src_object);
7560 					vm_object_unlock(src_object);
7561 					OS_FALLTHROUGH;
7562 				case VM_FAULT_MEMORY_ERROR:
7563 					vm_fault_copy_dst_cleanup(dst_page);
7564 					if (error) {
7565 						return error;
7566 					} else {
7567 						return KERN_MEMORY_ERROR;
7568 					}
7569 				default:
7570 					panic("vm_fault_copy(2): unexpected "
7571 					    "error 0x%x from "
7572 					    "vm_fault_page()\n", result);
7573 				}
7574 
7575 				result_page_object = VM_PAGE_OBJECT(result_page);
7576 				assert((src_top_page == VM_PAGE_NULL) ==
7577 				    (result_page_object == src_object));
7578 			}
7579 			assert((src_prot & VM_PROT_READ) != VM_PROT_NONE);
7580 			vm_object_unlock(result_page_object);
7581 		}
7582 
7583 		vm_map_lock_read(dst_map);
7584 
7585 		if (!vm_map_verify(dst_map, dst_version)) {
7586 			vm_map_unlock_read(dst_map);
7587 			if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7588 				vm_fault_copy_cleanup(result_page, src_top_page);
7589 			}
7590 			vm_fault_copy_dst_cleanup(dst_page);
7591 			break;
7592 		}
7593 		assert(dst_object == VM_PAGE_OBJECT(dst_page));
7594 
7595 		vm_object_lock(dst_object);
7596 
7597 		if ((dst_object->vo_copy != old_copy_object ||
7598 		    dst_object->vo_copy_version != old_copy_version)) {
7599 			vm_object_unlock(dst_object);
7600 			vm_map_unlock_read(dst_map);
7601 			if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7602 				vm_fault_copy_cleanup(result_page, src_top_page);
7603 			}
7604 			vm_fault_copy_dst_cleanup(dst_page);
7605 			break;
7606 		}
7607 		vm_object_unlock(dst_object);
7608 
7609 		/*
7610 		 *	Copy the page, and note that it is dirty
7611 		 *	immediately.
7612 		 */
7613 
7614 		if (!page_aligned(src_offset) ||
7615 		    !page_aligned(dst_offset) ||
7616 		    !page_aligned(amount_left)) {
7617 			vm_object_offset_t      src_po,
7618 			    dst_po;
7619 
7620 			src_po = src_offset - vm_object_trunc_page(src_offset);
7621 			dst_po = dst_offset - vm_object_trunc_page(dst_offset);
7622 
7623 			if (dst_po > src_po) {
7624 				part_size = PAGE_SIZE - dst_po;
7625 			} else {
7626 				part_size = PAGE_SIZE - src_po;
7627 			}
7628 			if (part_size > (amount_left)) {
7629 				part_size = amount_left;
7630 			}
7631 
7632 			if (result_page == VM_PAGE_NULL) {
7633 				assert((vm_offset_t) dst_po == dst_po);
7634 				assert((vm_size_t) part_size == part_size);
7635 				vm_page_part_zero_fill(dst_page,
7636 				    (vm_offset_t) dst_po,
7637 				    (vm_size_t) part_size);
7638 			} else {
7639 				assert((vm_offset_t) src_po == src_po);
7640 				assert((vm_offset_t) dst_po == dst_po);
7641 				assert((vm_size_t) part_size == part_size);
7642 				vm_page_part_copy(result_page,
7643 				    (vm_offset_t) src_po,
7644 				    dst_page,
7645 				    (vm_offset_t) dst_po,
7646 				    (vm_size_t)part_size);
7647 				if (!dst_page->vmp_dirty) {
7648 					vm_object_lock(dst_object);
7649 					SET_PAGE_DIRTY(dst_page, TRUE);
7650 					vm_object_unlock(dst_object);
7651 				}
7652 			}
7653 		} else {
7654 			part_size = PAGE_SIZE;
7655 
7656 			if (result_page == VM_PAGE_NULL) {
7657 				vm_page_zero_fill(
7658 					dst_page
7659 					);
7660 			} else {
7661 				vm_object_lock(result_page_object);
7662 				vm_page_copy(result_page, dst_page);
7663 				vm_object_unlock(result_page_object);
7664 
7665 				if (!dst_page->vmp_dirty) {
7666 					vm_object_lock(dst_object);
7667 					SET_PAGE_DIRTY(dst_page, TRUE);
7668 					vm_object_unlock(dst_object);
7669 				}
7670 			}
7671 		}
7672 
7673 		/*
7674 		 *	Unlock everything, and return
7675 		 */
7676 
7677 		vm_map_unlock_read(dst_map);
7678 
7679 		if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7680 			vm_fault_copy_cleanup(result_page, src_top_page);
7681 		}
7682 		vm_fault_copy_dst_cleanup(dst_page);
7683 
7684 		amount_left -= part_size;
7685 		src_offset += part_size;
7686 		dst_offset += part_size;
7687 	} while (amount_left > 0);
7688 
7689 	RETURN(KERN_SUCCESS);
7690 #undef  RETURN
7691 
7692 	/*NOTREACHED*/
7693 }
7694 
7695 #if     VM_FAULT_CLASSIFY
7696 /*
7697  *	Temporary statistics gathering support.
7698  */
7699 
7700 /*
7701  *	Statistics arrays:
7702  */
7703 #define VM_FAULT_TYPES_MAX      5
7704 #define VM_FAULT_LEVEL_MAX      8
7705 
7706 int     vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX];
7707 
7708 #define VM_FAULT_TYPE_ZERO_FILL 0
7709 #define VM_FAULT_TYPE_MAP_IN    1
7710 #define VM_FAULT_TYPE_PAGER     2
7711 #define VM_FAULT_TYPE_COPY      3
7712 #define VM_FAULT_TYPE_OTHER     4
7713 
7714 
7715 void
vm_fault_classify(vm_object_t object,vm_object_offset_t offset,vm_prot_t fault_type)7716 vm_fault_classify(vm_object_t           object,
7717     vm_object_offset_t    offset,
7718     vm_prot_t             fault_type)
7719 {
7720 	int             type, level = 0;
7721 	vm_page_t       m;
7722 
7723 	while (TRUE) {
7724 		m = vm_page_lookup(object, offset);
7725 		if (m != VM_PAGE_NULL) {
7726 			if (m->vmp_busy || m->vmp_error || m->vmp_restart || m->vmp_absent) {
7727 				type = VM_FAULT_TYPE_OTHER;
7728 				break;
7729 			}
7730 			if (((fault_type & VM_PROT_WRITE) == 0) ||
7731 			    ((level == 0) && object->vo_copy == VM_OBJECT_NULL)) {
7732 				type = VM_FAULT_TYPE_MAP_IN;
7733 				break;
7734 			}
7735 			type = VM_FAULT_TYPE_COPY;
7736 			break;
7737 		} else {
7738 			if (object->pager_created) {
7739 				type = VM_FAULT_TYPE_PAGER;
7740 				break;
7741 			}
7742 			if (object->shadow == VM_OBJECT_NULL) {
7743 				type = VM_FAULT_TYPE_ZERO_FILL;
7744 				break;
7745 			}
7746 
7747 			offset += object->vo_shadow_offset;
7748 			object = object->shadow;
7749 			level++;
7750 			continue;
7751 		}
7752 	}
7753 
7754 	if (level > VM_FAULT_LEVEL_MAX) {
7755 		level = VM_FAULT_LEVEL_MAX;
7756 	}
7757 
7758 	vm_fault_stats[type][level] += 1;
7759 
7760 	return;
7761 }
7762 
7763 /* cleanup routine to call from debugger */
7764 
7765 void
vm_fault_classify_init(void)7766 vm_fault_classify_init(void)
7767 {
7768 	int type, level;
7769 
7770 	for (type = 0; type < VM_FAULT_TYPES_MAX; type++) {
7771 		for (level = 0; level < VM_FAULT_LEVEL_MAX; level++) {
7772 			vm_fault_stats[type][level] = 0;
7773 		}
7774 	}
7775 
7776 	return;
7777 }
7778 #endif  /* VM_FAULT_CLASSIFY */
7779 
7780 static inline bool
object_supports_coredump(const vm_object_t object)7781 object_supports_coredump(const vm_object_t object)
7782 {
7783 	switch (object->wimg_bits & VM_WIMG_MASK) {
7784 	case VM_WIMG_DEFAULT:
7785 		return true;
7786 	default:
7787 		return false;
7788 	}
7789 }
7790 
7791 vm_offset_t
kdp_lightweight_fault(vm_map_t map,vm_offset_t cur_target_addr,bool multi_cpu)7792 kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr, bool multi_cpu)
7793 {
7794 	vm_map_entry_t  entry;
7795 	vm_object_t     object;
7796 	vm_offset_t     object_offset;
7797 	vm_page_t       m;
7798 	int             compressor_external_state, compressed_count_delta;
7799 	vm_compressor_options_t             compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP);
7800 	int             my_fault_type = VM_PROT_READ;
7801 	kern_return_t   kr;
7802 	int effective_page_mask, effective_page_size;
7803 	int             my_cpu_no = cpu_number();
7804 	ppnum_t         decomp_ppnum;
7805 	addr64_t        decomp_paddr;
7806 
7807 	if (multi_cpu) {
7808 		compressor_flags |= C_KDP_MULTICPU;
7809 	}
7810 
7811 	if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
7812 		effective_page_mask = VM_MAP_PAGE_MASK(map);
7813 		effective_page_size = VM_MAP_PAGE_SIZE(map);
7814 	} else {
7815 		effective_page_mask = PAGE_MASK;
7816 		effective_page_size = PAGE_SIZE;
7817 	}
7818 
7819 	if (not_in_kdp) {
7820 		panic("kdp_lightweight_fault called from outside of debugger context");
7821 	}
7822 
7823 	assert(map != VM_MAP_NULL);
7824 
7825 	assert((cur_target_addr & effective_page_mask) == 0);
7826 	if ((cur_target_addr & effective_page_mask) != 0) {
7827 		return 0;
7828 	}
7829 
7830 	if (kdp_lck_rw_lock_is_acquired_exclusive(&map->lock)) {
7831 		return 0;
7832 	}
7833 
7834 	if (!vm_map_lookup_entry(map, cur_target_addr, &entry)) {
7835 		return 0;
7836 	}
7837 
7838 	if (entry->is_sub_map) {
7839 		return 0;
7840 	}
7841 
7842 	object = VME_OBJECT(entry);
7843 	if (object == VM_OBJECT_NULL) {
7844 		return 0;
7845 	}
7846 
7847 	object_offset = cur_target_addr - entry->vme_start + VME_OFFSET(entry);
7848 
7849 	while (TRUE) {
7850 		if (kdp_lck_rw_lock_is_acquired_exclusive(&object->Lock)) {
7851 			return 0;
7852 		}
7853 
7854 		if (object->pager_created && (object->paging_in_progress ||
7855 		    object->activity_in_progress)) {
7856 			return 0;
7857 		}
7858 
7859 		m = kdp_vm_page_lookup(object, vm_object_trunc_page(object_offset));
7860 
7861 		if (m != VM_PAGE_NULL) {
7862 			if (!object_supports_coredump(object)) {
7863 				return 0;
7864 			}
7865 
7866 			if (m->vmp_laundry || m->vmp_busy || m->vmp_free_when_done ||
7867 			    m->vmp_absent || VMP_ERROR_GET(m) || m->vmp_cleaning ||
7868 			    m->vmp_overwriting || m->vmp_restart || m->vmp_unusual) {
7869 				return 0;
7870 			}
7871 
7872 			assert(!vm_page_is_private(m));
7873 			if (vm_page_is_private(m)) {
7874 				return 0;
7875 			}
7876 
7877 			assert(!vm_page_is_fictitious(m));
7878 			if (vm_page_is_fictitious(m)) {
7879 				return 0;
7880 			}
7881 
7882 			assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
7883 			if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
7884 				return 0;
7885 			}
7886 
7887 			return ptoa(VM_PAGE_GET_PHYS_PAGE(m));
7888 		}
7889 
7890 		compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
7891 
7892 		if (multi_cpu) {
7893 			assert(vm_compressor_kdp_state.kc_decompressed_pages_ppnum != NULL);
7894 			assert(vm_compressor_kdp_state.kc_decompressed_pages_paddr != NULL);
7895 			decomp_ppnum = vm_compressor_kdp_state.kc_decompressed_pages_ppnum[my_cpu_no];
7896 			decomp_paddr = vm_compressor_kdp_state.kc_decompressed_pages_paddr[my_cpu_no];
7897 		} else {
7898 			decomp_ppnum = vm_compressor_kdp_state.kc_panic_decompressed_page_ppnum;
7899 			decomp_paddr = vm_compressor_kdp_state.kc_panic_decompressed_page_paddr;
7900 		}
7901 
7902 		if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) {
7903 			if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) {
7904 				kr = vm_compressor_pager_get(object->pager,
7905 				    vm_object_trunc_page(object_offset + object->paging_offset),
7906 				    decomp_ppnum, &my_fault_type,
7907 				    compressor_flags, &compressed_count_delta);
7908 				if (kr == KERN_SUCCESS) {
7909 					return decomp_paddr;
7910 				} else {
7911 					return 0;
7912 				}
7913 			}
7914 		}
7915 
7916 		if (object->shadow == VM_OBJECT_NULL) {
7917 			return 0;
7918 		}
7919 
7920 		object_offset += object->vo_shadow_offset;
7921 		object = object->shadow;
7922 	}
7923 }
7924 
7925 /*
7926  * vm_page_validate_cs_fast():
7927  * Performs a few quick checks to determine if the page's code signature
7928  * really needs to be fully validated.  It could:
7929  *	1. have been modified (i.e. automatically tainted),
7930  *	2. have already been validated,
7931  *	3. have already been found to be tainted,
7932  *	4. no longer have a backing store.
7933  * Returns FALSE if the page needs to be fully validated.
7934  */
7935 static boolean_t
vm_page_validate_cs_fast(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)7936 vm_page_validate_cs_fast(
7937 	vm_page_t       page,
7938 	vm_map_size_t   fault_page_size,
7939 	vm_map_offset_t fault_phys_offset)
7940 {
7941 	vm_object_t     object;
7942 
7943 	object = VM_PAGE_OBJECT(page);
7944 	vm_object_lock_assert_held(object);
7945 
7946 	if (page->vmp_wpmapped &&
7947 	    !VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
7948 		/*
7949 		 * This page was mapped for "write" access sometime in the
7950 		 * past and could still be modifiable in the future.
7951 		 * Consider it tainted.
7952 		 * [ If the page was already found to be "tainted", no
7953 		 * need to re-validate. ]
7954 		 */
7955 		vm_object_lock_assert_exclusive(object);
7956 		VMP_CS_SET_VALIDATED(page, fault_page_size, fault_phys_offset, TRUE);
7957 		VMP_CS_SET_TAINTED(page, fault_page_size, fault_phys_offset, TRUE);
7958 		if (cs_debug) {
7959 			printf("CODESIGNING: %s: "
7960 			    "page %p obj %p off 0x%llx "
7961 			    "was modified\n",
7962 			    __FUNCTION__,
7963 			    page, object, page->vmp_offset);
7964 		}
7965 		vm_cs_validated_dirtied++;
7966 	}
7967 
7968 	if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) ||
7969 	    VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
7970 		return TRUE;
7971 	}
7972 	vm_object_lock_assert_exclusive(object);
7973 
7974 #if CHECK_CS_VALIDATION_BITMAP
7975 	kern_return_t kr;
7976 
7977 	kr = vnode_pager_cs_check_validation_bitmap(
7978 		object->pager,
7979 		page->vmp_offset + object->paging_offset,
7980 		CS_BITMAP_CHECK);
7981 	if (kr == KERN_SUCCESS) {
7982 		page->vmp_cs_validated = VMP_CS_ALL_TRUE;
7983 		page->vmp_cs_tainted = VMP_CS_ALL_FALSE;
7984 		vm_cs_bitmap_validated++;
7985 		return TRUE;
7986 	}
7987 #endif /* CHECK_CS_VALIDATION_BITMAP */
7988 
7989 	if (!object->alive || object->terminating || object->pager == NULL) {
7990 		/*
7991 		 * The object is terminating and we don't have its pager
7992 		 * so we can't validate the data...
7993 		 */
7994 		return TRUE;
7995 	}
7996 
7997 	/* we need to really validate this page */
7998 	vm_object_lock_assert_exclusive(object);
7999 	return FALSE;
8000 }
8001 
8002 void
vm_page_validate_cs_mapped_slow(vm_page_t page,const void * kaddr)8003 vm_page_validate_cs_mapped_slow(
8004 	vm_page_t       page,
8005 	const void      *kaddr)
8006 {
8007 	vm_object_t             object;
8008 	memory_object_offset_t  mo_offset;
8009 	memory_object_t         pager;
8010 	struct vnode            *vnode;
8011 	int                     validated, tainted, nx;
8012 
8013 	assert(page->vmp_busy);
8014 	object = VM_PAGE_OBJECT(page);
8015 	vm_object_lock_assert_exclusive(object);
8016 
8017 	vm_cs_validates++;
8018 
8019 	/*
8020 	 * Since we get here to validate a page that was brought in by
8021 	 * the pager, we know that this pager is all setup and ready
8022 	 * by now.
8023 	 */
8024 	assert(object->code_signed);
8025 	assert(!object->internal);
8026 	assert(object->pager != NULL);
8027 	assert(object->pager_ready);
8028 
8029 	pager = object->pager;
8030 	assert(object->paging_in_progress);
8031 	vnode = vnode_pager_lookup_vnode(pager);
8032 	mo_offset = page->vmp_offset + object->paging_offset;
8033 
8034 	/* verify the SHA1 hash for this page */
8035 	validated = 0;
8036 	tainted = 0;
8037 	nx = 0;
8038 	cs_validate_page(vnode,
8039 	    pager,
8040 	    mo_offset,
8041 	    (const void *)((const char *)kaddr),
8042 	    &validated,
8043 	    &tainted,
8044 	    &nx);
8045 
8046 	page->vmp_cs_validated |= validated;
8047 	page->vmp_cs_tainted |= tainted;
8048 	page->vmp_cs_nx |= nx;
8049 
8050 #if CHECK_CS_VALIDATION_BITMAP
8051 	if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
8052 	    page->vmp_cs_tainted == VMP_CS_ALL_FALSE) {
8053 		vnode_pager_cs_check_validation_bitmap(object->pager,
8054 		    mo_offset,
8055 		    CS_BITMAP_SET);
8056 	}
8057 #endif /* CHECK_CS_VALIDATION_BITMAP */
8058 }
8059 
8060 void
vm_page_validate_cs_mapped(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,const void * kaddr)8061 vm_page_validate_cs_mapped(
8062 	vm_page_t       page,
8063 	vm_map_size_t   fault_page_size,
8064 	vm_map_offset_t fault_phys_offset,
8065 	const void      *kaddr)
8066 {
8067 	if (!vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
8068 		vm_page_validate_cs_mapped_slow(page, kaddr);
8069 	}
8070 }
8071 
8072 static void
vm_page_map_and_validate_cs(vm_object_t object,vm_page_t page)8073 vm_page_map_and_validate_cs(
8074 	vm_object_t     object,
8075 	vm_page_t       page)
8076 {
8077 	vm_object_offset_t      offset;
8078 	vm_map_offset_t         koffset;
8079 	vm_map_size_t           ksize;
8080 	vm_offset_t             kaddr;
8081 	kern_return_t           kr;
8082 	boolean_t               busy_page;
8083 	boolean_t               need_unmap;
8084 
8085 	vm_object_lock_assert_exclusive(object);
8086 
8087 	assert(object->code_signed);
8088 	offset = page->vmp_offset;
8089 
8090 	busy_page = page->vmp_busy;
8091 	if (!busy_page) {
8092 		/* keep page busy while we map (and unlock) the VM object */
8093 		page->vmp_busy = TRUE;
8094 	}
8095 
8096 	/*
8097 	 * Take a paging reference on the VM object
8098 	 * to protect it from collapse or bypass,
8099 	 * and keep it from disappearing too.
8100 	 */
8101 	vm_object_paging_begin(object);
8102 
8103 	/* map the page in the kernel address space */
8104 	ksize = PAGE_SIZE_64;
8105 	koffset = 0;
8106 	need_unmap = FALSE;
8107 	kr = vm_paging_map_object(page,
8108 	    object,
8109 	    offset,
8110 	    VM_PROT_READ,
8111 	    FALSE,                       /* can't unlock object ! */
8112 	    &ksize,
8113 	    &koffset,
8114 	    &need_unmap);
8115 	if (kr != KERN_SUCCESS) {
8116 		panic("%s: could not map page: 0x%x", __FUNCTION__, kr);
8117 	}
8118 	kaddr = CAST_DOWN(vm_offset_t, koffset);
8119 
8120 	/* validate the mapped page */
8121 	vm_page_validate_cs_mapped_slow(page, (const void *) kaddr);
8122 
8123 	assert(page->vmp_busy);
8124 	assert(object == VM_PAGE_OBJECT(page));
8125 	vm_object_lock_assert_exclusive(object);
8126 
8127 	if (!busy_page) {
8128 		vm_page_wakeup_done(object, page);
8129 	}
8130 	if (need_unmap) {
8131 		/* unmap the map from the kernel address space */
8132 		vm_paging_unmap_object(object, koffset, koffset + ksize);
8133 		koffset = 0;
8134 		ksize = 0;
8135 		kaddr = 0;
8136 	}
8137 	vm_object_paging_end(object);
8138 }
8139 
8140 void
vm_page_validate_cs(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)8141 vm_page_validate_cs(
8142 	vm_page_t       page,
8143 	vm_map_size_t   fault_page_size,
8144 	vm_map_offset_t fault_phys_offset)
8145 {
8146 	vm_object_t             object;
8147 
8148 	object = VM_PAGE_OBJECT(page);
8149 	vm_object_lock_assert_held(object);
8150 
8151 	if (vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
8152 		return;
8153 	}
8154 	vm_page_map_and_validate_cs(object, page);
8155 }
8156 
8157 void
vm_page_validate_cs_mapped_chunk(vm_page_t page,const void * kaddr,vm_offset_t chunk_offset,vm_size_t chunk_size,boolean_t * validated_p,unsigned * tainted_p)8158 vm_page_validate_cs_mapped_chunk(
8159 	vm_page_t       page,
8160 	const void      *kaddr,
8161 	vm_offset_t     chunk_offset,
8162 	vm_size_t       chunk_size,
8163 	boolean_t       *validated_p,
8164 	unsigned        *tainted_p)
8165 {
8166 	vm_object_t             object;
8167 	vm_object_offset_t      offset, offset_in_page;
8168 	memory_object_t         pager;
8169 	struct vnode            *vnode;
8170 	boolean_t               validated;
8171 	unsigned                tainted;
8172 
8173 	*validated_p = FALSE;
8174 	*tainted_p = 0;
8175 
8176 	assert(page->vmp_busy);
8177 	object = VM_PAGE_OBJECT(page);
8178 	vm_object_lock_assert_exclusive(object);
8179 
8180 	assert(object->code_signed);
8181 	offset = page->vmp_offset;
8182 
8183 	if (!object->alive || object->terminating || object->pager == NULL) {
8184 		/*
8185 		 * The object is terminating and we don't have its pager
8186 		 * so we can't validate the data...
8187 		 */
8188 		return;
8189 	}
8190 	/*
8191 	 * Since we get here to validate a page that was brought in by
8192 	 * the pager, we know that this pager is all setup and ready
8193 	 * by now.
8194 	 */
8195 	assert(!object->internal);
8196 	assert(object->pager != NULL);
8197 	assert(object->pager_ready);
8198 
8199 	pager = object->pager;
8200 	assert(object->paging_in_progress);
8201 	vnode = vnode_pager_lookup_vnode(pager);
8202 
8203 	/* verify the signature for this chunk */
8204 	offset_in_page = chunk_offset;
8205 	assert(offset_in_page < PAGE_SIZE);
8206 
8207 	tainted = 0;
8208 	validated = cs_validate_range(vnode,
8209 	    pager,
8210 	    (object->paging_offset +
8211 	    offset +
8212 	    offset_in_page),
8213 	    (const void *)((const char *)kaddr
8214 	    + offset_in_page),
8215 	    chunk_size,
8216 	    &tainted);
8217 	if (validated) {
8218 		*validated_p = TRUE;
8219 	}
8220 	if (tainted) {
8221 		*tainted_p = tainted;
8222 	}
8223 }
8224 
8225 static void
vm_rtfrecord_lock(void)8226 vm_rtfrecord_lock(void)
8227 {
8228 	lck_spin_lock(&vm_rtfr_slock);
8229 }
8230 
8231 static void
vm_rtfrecord_unlock(void)8232 vm_rtfrecord_unlock(void)
8233 {
8234 	lck_spin_unlock(&vm_rtfr_slock);
8235 }
8236 
8237 unsigned int
vmrtfaultinfo_bufsz(void)8238 vmrtfaultinfo_bufsz(void)
8239 {
8240 	return vmrtf_num_records * sizeof(vm_rtfault_record_t);
8241 }
8242 
8243 #include <kern/backtrace.h>
8244 
8245 __attribute__((noinline))
8246 static void
vm_record_rtfault(thread_t cthread,uint64_t fstart,vm_map_offset_t fault_vaddr,int type_of_fault)8247 vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr, int type_of_fault)
8248 {
8249 	uint64_t fend = mach_continuous_time();
8250 
8251 	uint64_t cfpc = 0;
8252 	uint64_t ctid = cthread->thread_id;
8253 	uint64_t cupid = get_current_unique_pid();
8254 
8255 	uintptr_t bpc = 0;
8256 	errno_t btr = 0;
8257 
8258 	/*
8259 	 * Capture a single-frame backtrace.  This extracts just the program
8260 	 * counter at the point of the fault, and should not use copyin to get
8261 	 * Rosetta save state.
8262 	 */
8263 	struct backtrace_control ctl = {
8264 		.btc_user_thread = cthread,
8265 		.btc_user_copy = backtrace_user_copy_error,
8266 	};
8267 	unsigned int bfrs = backtrace_user(&bpc, 1U, &ctl, NULL);
8268 	if ((btr == 0) && (bfrs > 0)) {
8269 		cfpc = bpc;
8270 	}
8271 
8272 	assert((fstart != 0) && fend >= fstart);
8273 	vm_rtfrecord_lock();
8274 	assert(vmrtfrs.vmrtfr_curi <= vmrtfrs.vmrtfr_maxi);
8275 
8276 	vmrtfrs.vmrtf_total++;
8277 	vm_rtfault_record_t *cvmr = &vmrtfrs.vm_rtf_records[vmrtfrs.vmrtfr_curi++];
8278 
8279 	cvmr->rtfabstime = fstart;
8280 	cvmr->rtfduration = fend - fstart;
8281 	cvmr->rtfaddr = fault_vaddr;
8282 	cvmr->rtfpc = cfpc;
8283 	cvmr->rtftype = type_of_fault;
8284 	cvmr->rtfupid = cupid;
8285 	cvmr->rtftid = ctid;
8286 
8287 	if (vmrtfrs.vmrtfr_curi > vmrtfrs.vmrtfr_maxi) {
8288 		vmrtfrs.vmrtfr_curi = 0;
8289 	}
8290 
8291 	vm_rtfrecord_unlock();
8292 }
8293 
8294 int
vmrtf_extract(uint64_t cupid,__unused boolean_t isroot,unsigned long vrecordsz,void * vrecords,unsigned long * vmrtfrv)8295 vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, unsigned long vrecordsz, void *vrecords, unsigned long *vmrtfrv)
8296 {
8297 	vm_rtfault_record_t *cvmrd = vrecords;
8298 	size_t residue = vrecordsz;
8299 	size_t numextracted = 0;
8300 	boolean_t early_exit = FALSE;
8301 
8302 	vm_rtfrecord_lock();
8303 
8304 	for (int vmfi = 0; vmfi <= vmrtfrs.vmrtfr_maxi; vmfi++) {
8305 		if (residue < sizeof(vm_rtfault_record_t)) {
8306 			early_exit = TRUE;
8307 			break;
8308 		}
8309 
8310 		if (vmrtfrs.vm_rtf_records[vmfi].rtfupid != cupid) {
8311 #if     DEVELOPMENT || DEBUG
8312 			if (isroot == FALSE) {
8313 				continue;
8314 			}
8315 #else
8316 			continue;
8317 #endif /* DEVDEBUG */
8318 		}
8319 
8320 		*cvmrd = vmrtfrs.vm_rtf_records[vmfi];
8321 		cvmrd++;
8322 		residue -= sizeof(vm_rtfault_record_t);
8323 		numextracted++;
8324 	}
8325 
8326 	vm_rtfrecord_unlock();
8327 
8328 	*vmrtfrv = numextracted;
8329 	return early_exit;
8330 }
8331 
8332 /*
8333  * Only allow one diagnosis to be in flight at a time, to avoid
8334  * creating too much additional memory usage.
8335  */
8336 static volatile uint_t vmtc_diagnosing;
8337 unsigned int vmtc_total = 0;
8338 
8339 /*
8340  * Type used to update telemetry for the diagnosis counts.
8341  */
8342 CA_EVENT(vmtc_telemetry,
8343     CA_INT, vmtc_num_byte,            /* number of corrupt bytes found */
8344     CA_BOOL, vmtc_undiagnosed,        /* undiagnosed because more than 1 at a time */
8345     CA_BOOL, vmtc_not_eligible,       /* the page didn't qualify */
8346     CA_BOOL, vmtc_copyin_fail,        /* unable to copy in the page */
8347     CA_BOOL, vmtc_not_found,          /* no corruption found even though CS failed */
8348     CA_BOOL, vmtc_one_bit_flip,       /* single bit flip */
8349     CA_BOOL, vmtc_testing);           /* caused on purpose by testing */
8350 
8351 #if DEVELOPMENT || DEBUG
8352 /*
8353  * Buffers used to compare before/after page contents.
8354  * Stashed to aid when debugging crashes.
8355  */
8356 static size_t vmtc_last_buffer_size = 0;
8357 static uint64_t *vmtc_last_before_buffer = NULL;
8358 static uint64_t *vmtc_last_after_buffer = NULL;
8359 
8360 /*
8361  * Needed to record corruptions due to testing.
8362  */
8363 static uintptr_t corruption_test_va = 0;
8364 #endif /* DEVELOPMENT || DEBUG */
8365 
8366 /*
8367  * Stash a copy of data from a possibly corrupt page.
8368  */
8369 static uint64_t *
vmtc_get_page_data(vm_map_offset_t code_addr,vm_page_t page)8370 vmtc_get_page_data(
8371 	vm_map_offset_t code_addr,
8372 	vm_page_t       page)
8373 {
8374 	uint64_t        *buffer = NULL;
8375 	addr64_t        buffer_paddr;
8376 	addr64_t        page_paddr;
8377 	extern void     bcopy_phys(addr64_t from, addr64_t to, vm_size_t bytes);
8378 	uint_t          size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
8379 
8380 	/*
8381 	 * Need an aligned buffer to do a physical copy.
8382 	 */
8383 	if (kernel_memory_allocate(kernel_map, (vm_offset_t *)&buffer,
8384 	    size, size - 1, KMA_KOBJECT, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
8385 		return NULL;
8386 	}
8387 	buffer_paddr = kvtophys((vm_offset_t)buffer);
8388 	page_paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(page));
8389 
8390 	/* adjust the page start address if we need only 4K of a 16K page */
8391 	if (size < PAGE_SIZE) {
8392 		uint_t subpage_start = ((code_addr & (PAGE_SIZE - 1)) & ~(size - 1));
8393 		page_paddr += subpage_start;
8394 	}
8395 
8396 	bcopy_phys(page_paddr, buffer_paddr, size);
8397 	return buffer;
8398 }
8399 
8400 /*
8401  * Set things up so we can diagnose a potential text page corruption.
8402  */
8403 static uint64_t *
vmtc_text_page_diagnose_setup(vm_map_offset_t code_addr,vm_page_t page,CA_EVENT_TYPE (vmtc_telemetry)* event)8404 vmtc_text_page_diagnose_setup(
8405 	vm_map_offset_t code_addr,
8406 	vm_page_t       page,
8407 	CA_EVENT_TYPE(vmtc_telemetry) *event)
8408 {
8409 	uint64_t        *buffer = NULL;
8410 
8411 	/*
8412 	 * If another is being diagnosed, skip this one.
8413 	 */
8414 	if (!OSCompareAndSwap(0, 1, &vmtc_diagnosing)) {
8415 		event->vmtc_undiagnosed = true;
8416 		return NULL;
8417 	}
8418 
8419 	/*
8420 	 * Get the contents of the corrupt page.
8421 	 */
8422 	buffer = vmtc_get_page_data(code_addr, page);
8423 	if (buffer == NULL) {
8424 		event->vmtc_copyin_fail = true;
8425 		if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
8426 			panic("Bad compare and swap in setup!");
8427 		}
8428 		return NULL;
8429 	}
8430 	return buffer;
8431 }
8432 
8433 /*
8434  * Diagnose the text page by comparing its contents with
8435  * the one we've previously saved.
8436  */
8437 static void
vmtc_text_page_diagnose(vm_map_offset_t code_addr,uint64_t * old_code_buffer,CA_EVENT_TYPE (vmtc_telemetry)* event)8438 vmtc_text_page_diagnose(
8439 	vm_map_offset_t code_addr,
8440 	uint64_t        *old_code_buffer,
8441 	CA_EVENT_TYPE(vmtc_telemetry) *event)
8442 {
8443 	uint64_t        *new_code_buffer;
8444 	size_t          size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
8445 	uint_t          count = (uint_t)size / sizeof(uint64_t);
8446 	uint_t          diff_count = 0;
8447 	bool            bit_flip = false;
8448 	uint_t          b;
8449 	uint64_t        *new;
8450 	uint64_t        *old;
8451 
8452 	new_code_buffer = kalloc_data(size, Z_WAITOK);
8453 	assert(new_code_buffer != NULL);
8454 	if (copyin((user_addr_t)vm_map_trunc_page(code_addr, size - 1), new_code_buffer, size) != 0) {
8455 		/* copyin error, so undo things */
8456 		event->vmtc_copyin_fail = true;
8457 		goto done;
8458 	}
8459 
8460 	new = new_code_buffer;
8461 	old = old_code_buffer;
8462 	for (; count-- > 0; ++new, ++old) {
8463 		if (*new == *old) {
8464 			continue;
8465 		}
8466 
8467 		/*
8468 		 * On first diff, check for a single bit flip
8469 		 */
8470 		if (diff_count == 0) {
8471 			uint64_t x = (*new ^ *old);
8472 			assert(x != 0);
8473 			if ((x & (x - 1)) == 0) {
8474 				bit_flip = true;
8475 				++diff_count;
8476 				continue;
8477 			}
8478 		}
8479 
8480 		/*
8481 		 * count up the number of different bytes.
8482 		 */
8483 		for (b = 0; b < sizeof(uint64_t); ++b) {
8484 			char *n = (char *)new;
8485 			char *o = (char *)old;
8486 			if (n[b] != o[b]) {
8487 				++diff_count;
8488 			}
8489 		}
8490 	}
8491 
8492 	if (diff_count > 1) {
8493 		bit_flip = false;
8494 	}
8495 
8496 	if (diff_count == 0) {
8497 		event->vmtc_not_found = true;
8498 	} else {
8499 		event->vmtc_num_byte = diff_count;
8500 	}
8501 	if (bit_flip) {
8502 		event->vmtc_one_bit_flip = true;
8503 	}
8504 
8505 done:
8506 	/*
8507 	 * Free up the code copy buffers, but save the last
8508 	 * set on development / debug kernels in case they
8509 	 * can provide evidence for debugging memory stomps.
8510 	 */
8511 #if DEVELOPMENT || DEBUG
8512 	if (vmtc_last_before_buffer != NULL) {
8513 		kmem_free(kernel_map, (vm_offset_t)vmtc_last_before_buffer, vmtc_last_buffer_size);
8514 	}
8515 	if (vmtc_last_after_buffer != NULL) {
8516 		kfree_data(vmtc_last_after_buffer, vmtc_last_buffer_size);
8517 	}
8518 	vmtc_last_before_buffer = old_code_buffer;
8519 	vmtc_last_after_buffer = new_code_buffer;
8520 	vmtc_last_buffer_size = size;
8521 #else /* DEVELOPMENT || DEBUG */
8522 	kfree_data(new_code_buffer, size);
8523 	kmem_free(kernel_map, (vm_offset_t)old_code_buffer, size);
8524 #endif /* DEVELOPMENT || DEBUG */
8525 
8526 	/*
8527 	 * We're finished, so clear the diagnosing flag.
8528 	 */
8529 	if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
8530 		panic("Bad compare and swap in diagnose!");
8531 	}
8532 }
8533 
8534 /*
8535  * For the given map, virt address, find the object, offset, and page.
8536  * This has to lookup the map entry, verify protections, walk any shadow chains.
8537  * If found, returns with the object locked.
8538  */
8539 static kern_return_t
vmtc_revalidate_lookup(vm_map_t map,vm_map_offset_t vaddr,vm_object_t * ret_object,vm_object_offset_t * ret_offset,vm_page_t * ret_page,vm_prot_t * ret_prot)8540 vmtc_revalidate_lookup(
8541 	vm_map_t               map,
8542 	vm_map_offset_t        vaddr,
8543 	vm_object_t            *ret_object,
8544 	vm_object_offset_t     *ret_offset,
8545 	vm_page_t              *ret_page,
8546 	vm_prot_t              *ret_prot)
8547 {
8548 	vm_object_t            object;
8549 	vm_object_offset_t     offset;
8550 	vm_page_t              page;
8551 	kern_return_t          kr = KERN_SUCCESS;
8552 	uint8_t                object_lock_type = OBJECT_LOCK_EXCLUSIVE;
8553 	vm_map_version_t       version;
8554 	boolean_t              wired;
8555 	struct vm_object_fault_info fault_info = {
8556 		.interruptible = THREAD_UNINT
8557 	};
8558 	vm_map_t               real_map = NULL;
8559 	vm_prot_t              prot;
8560 	vm_object_t            shadow;
8561 
8562 	/*
8563 	 * Find the object/offset for the given location/map.
8564 	 * Note this returns with the object locked.
8565 	 */
8566 restart:
8567 	vm_map_lock_read(map);
8568 	object = VM_OBJECT_NULL;        /* in case we come around the restart path */
8569 	kr = vm_map_lookup_and_lock_object(&map, vaddr, VM_PROT_READ,
8570 	    object_lock_type, &version, &object, &offset, &prot, &wired,
8571 	    &fault_info, &real_map, NULL);
8572 	vm_map_unlock_read(map);
8573 	if (real_map != NULL && real_map != map) {
8574 		vm_map_unlock(real_map);
8575 	}
8576 
8577 	/*
8578 	 * If there's no page here, fail.
8579 	 */
8580 	if (kr != KERN_SUCCESS || object == NULL) {
8581 		kr = KERN_FAILURE;
8582 		goto done;
8583 	}
8584 
8585 	/*
8586 	 * Chase down any shadow chains to find the actual page.
8587 	 */
8588 	for (;;) {
8589 		/*
8590 		 * See if the page is on the current object.
8591 		 */
8592 		page = vm_page_lookup(object, vm_object_trunc_page(offset));
8593 		if (page != NULL) {
8594 			/* restart the lookup */
8595 			if (page->vmp_restart) {
8596 				vm_object_unlock(object);
8597 				goto restart;
8598 			}
8599 
8600 			/*
8601 			 * If this page is busy, we need to wait for it.
8602 			 */
8603 			if (page->vmp_busy) {
8604 				vm_page_sleep(object, page, THREAD_INTERRUPTIBLE, LCK_SLEEP_UNLOCK);
8605 				goto restart;
8606 			}
8607 			break;
8608 		}
8609 
8610 		/*
8611 		 * If the object doesn't have the page and
8612 		 * has no shadow, then we can quit.
8613 		 */
8614 		shadow = object->shadow;
8615 		if (shadow == NULL) {
8616 			kr = KERN_FAILURE;
8617 			goto done;
8618 		}
8619 
8620 		/*
8621 		 * Move to the next object
8622 		 */
8623 		offset += object->vo_shadow_offset;
8624 		vm_object_lock(shadow);
8625 		vm_object_unlock(object);
8626 		object = shadow;
8627 		shadow = VM_OBJECT_NULL;
8628 	}
8629 	*ret_object = object;
8630 	*ret_offset = vm_object_trunc_page(offset);
8631 	*ret_page = page;
8632 	*ret_prot = prot;
8633 
8634 done:
8635 	if (kr != KERN_SUCCESS && object != NULL) {
8636 		vm_object_unlock(object);
8637 	}
8638 	return kr;
8639 }
8640 
8641 /*
8642  * Check if a page is wired, needs extra locking.
8643  */
8644 static bool
is_page_wired(vm_page_t page)8645 is_page_wired(vm_page_t page)
8646 {
8647 	bool result;
8648 	vm_page_lock_queues();
8649 	result = VM_PAGE_WIRED(page);
8650 	vm_page_unlock_queues();
8651 	return result;
8652 }
8653 
8654 /*
8655  * A fatal process error has occurred in the given task.
8656  * Recheck the code signing of the text page at the given
8657  * address to check for a text page corruption.
8658  *
8659  * Returns KERN_FAILURE if a page was found to be corrupt
8660  * by failing to match its code signature. KERN_SUCCESS
8661  * means the page is either valid or we don't have the
8662  * information to say it's corrupt.
8663  */
8664 kern_return_t
revalidate_text_page(task_t task,vm_map_offset_t code_addr)8665 revalidate_text_page(task_t task, vm_map_offset_t code_addr)
8666 {
8667 	kern_return_t          kr;
8668 	vm_map_t               map;
8669 	vm_object_t            object = NULL;
8670 	vm_object_offset_t     offset;
8671 	vm_page_t              page = NULL;
8672 	struct vnode           *vnode;
8673 	uint64_t               *diagnose_buffer = NULL;
8674 	CA_EVENT_TYPE(vmtc_telemetry) * event = NULL;
8675 	ca_event_t             ca_event = NULL;
8676 	vm_prot_t              prot;
8677 
8678 	map = task->map;
8679 	if (task->map == NULL) {
8680 		return KERN_SUCCESS;
8681 	}
8682 
8683 	kr = vmtc_revalidate_lookup(map, code_addr, &object, &offset, &page, &prot);
8684 	if (kr != KERN_SUCCESS) {
8685 		goto done;
8686 	}
8687 
8688 	/*
8689 	 * The page must be executable.
8690 	 */
8691 	if (!(prot & VM_PROT_EXECUTE)) {
8692 		goto done;
8693 	}
8694 
8695 	/*
8696 	 * The object needs to have a pager.
8697 	 */
8698 	if (object->pager == NULL) {
8699 		goto done;
8700 	}
8701 
8702 	/*
8703 	 * Needs to be a vnode backed page to have a signature.
8704 	 */
8705 	vnode = vnode_pager_lookup_vnode(object->pager);
8706 	if (vnode == NULL) {
8707 		goto done;
8708 	}
8709 
8710 	/*
8711 	 * Object checks to see if we should proceed.
8712 	 */
8713 	if (!object->code_signed ||     /* no code signature to check */
8714 	    object->internal ||         /* internal objects aren't signed */
8715 	    object->terminating ||      /* the object and its pages are already going away */
8716 	    !object->pager_ready) {     /* this should happen, but check shouldn't hurt */
8717 		goto done;
8718 	}
8719 
8720 
8721 	/*
8722 	 * Check the code signature of the page in question.
8723 	 */
8724 	vm_page_map_and_validate_cs(object, page);
8725 
8726 	/*
8727 	 * At this point:
8728 	 * vmp_cs_validated |= validated (set if a code signature exists)
8729 	 * vmp_cs_tainted |= tainted (set if code signature violation)
8730 	 * vmp_cs_nx |= nx;  ??
8731 	 *
8732 	 * if vmp_pmapped then have to pmap_disconnect..
8733 	 * other flags to check on object or page?
8734 	 */
8735 	if (page->vmp_cs_tainted != VMP_CS_ALL_FALSE) {
8736 #if DEBUG || DEVELOPMENT
8737 		/*
8738 		 * On development builds, a boot-arg can be used to cause
8739 		 * a panic, instead of a quiet repair.
8740 		 */
8741 		if (vmtc_panic_instead) {
8742 			panic("Text page corruption detected: vm_page_t 0x%llx", (long long)(uintptr_t)page);
8743 		}
8744 #endif /* DEBUG || DEVELOPMENT */
8745 
8746 		/*
8747 		 * We're going to invalidate this page. Grab a copy of it for comparison.
8748 		 */
8749 		ca_event = CA_EVENT_ALLOCATE(vmtc_telemetry);
8750 		event = ca_event->data;
8751 		diagnose_buffer = vmtc_text_page_diagnose_setup(code_addr, page, event);
8752 
8753 		/*
8754 		 * Invalidate, i.e. toss, the corrupted page.
8755 		 */
8756 		if (!page->vmp_cleaning &&
8757 		    !page->vmp_laundry &&
8758 		    !vm_page_is_fictitious(page) &&
8759 		    !page->vmp_precious &&
8760 		    !page->vmp_absent &&
8761 		    !VMP_ERROR_GET(page) &&
8762 		    !page->vmp_dirty &&
8763 		    !is_page_wired(page)) {
8764 			if (page->vmp_pmapped) {
8765 				int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(page));
8766 				if (refmod & VM_MEM_MODIFIED) {
8767 					SET_PAGE_DIRTY(page, FALSE);
8768 				}
8769 				if (refmod & VM_MEM_REFERENCED) {
8770 					page->vmp_reference = TRUE;
8771 				}
8772 			}
8773 			/* If the page seems intentionally modified, don't trash it. */
8774 			if (!page->vmp_dirty) {
8775 				VM_PAGE_FREE(page);
8776 			} else {
8777 				event->vmtc_not_eligible = true;
8778 			}
8779 		} else {
8780 			event->vmtc_not_eligible = true;
8781 		}
8782 		vm_object_unlock(object);
8783 		object = VM_OBJECT_NULL;
8784 
8785 		/*
8786 		 * Now try to diagnose the type of failure by faulting
8787 		 * in a new copy and diff'ing it with what we saved.
8788 		 */
8789 		if (diagnose_buffer != NULL) {
8790 			vmtc_text_page_diagnose(code_addr, diagnose_buffer, event);
8791 		}
8792 #if DEBUG || DEVELOPMENT
8793 		if (corruption_test_va != 0) {
8794 			corruption_test_va = 0;
8795 			event->vmtc_testing = true;
8796 		}
8797 #endif /* DEBUG || DEVELOPMENT */
8798 		ktriage_record(thread_tid(current_thread()),
8799 		    KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_TEXT_CORRUPTION),
8800 		    0 /* arg */);
8801 		CA_EVENT_SEND(ca_event);
8802 		printf("Text page corruption detected for pid %d\n", proc_selfpid());
8803 		++vmtc_total;
8804 		return KERN_FAILURE; /* failure means we definitely found a corrupt page */
8805 	}
8806 done:
8807 	if (object != NULL) {
8808 		vm_object_unlock(object);
8809 	}
8810 	return KERN_SUCCESS;
8811 }
8812 
8813 #if DEBUG || DEVELOPMENT
8814 /*
8815  * For implementing unit tests - ask the pmap to corrupt a text page.
8816  * We have to find the page, to get the physical address, then invoke
8817  * the pmap.
8818  */
8819 extern kern_return_t vm_corrupt_text_addr(uintptr_t);
8820 
8821 kern_return_t
vm_corrupt_text_addr(uintptr_t va)8822 vm_corrupt_text_addr(uintptr_t va)
8823 {
8824 	task_t                 task = current_task();
8825 	vm_map_t               map;
8826 	kern_return_t          kr = KERN_SUCCESS;
8827 	vm_object_t            object = VM_OBJECT_NULL;
8828 	vm_object_offset_t     offset;
8829 	vm_page_t              page = NULL;
8830 	pmap_paddr_t           pa;
8831 	vm_prot_t              prot;
8832 
8833 	map = task->map;
8834 	if (task->map == NULL) {
8835 		printf("corrupt_text_addr: no map\n");
8836 		return KERN_FAILURE;
8837 	}
8838 
8839 	kr = vmtc_revalidate_lookup(map, (vm_map_offset_t)va, &object, &offset, &page, &prot);
8840 	if (kr != KERN_SUCCESS) {
8841 		printf("corrupt_text_addr: page lookup failed\n");
8842 		return kr;
8843 	}
8844 	if (!(prot & VM_PROT_EXECUTE)) {
8845 		printf("corrupt_text_addr: page not executable\n");
8846 		return KERN_FAILURE;
8847 	}
8848 
8849 	/* get the physical address to use */
8850 	pa = ptoa(VM_PAGE_GET_PHYS_PAGE(page)) + (va - vm_object_trunc_page(va));
8851 
8852 	/*
8853 	 * Check we have something we can work with.
8854 	 * Due to racing with pageout as we enter the sysctl,
8855 	 * it's theoretically possible to have the page disappear, just
8856 	 * before the lookup.
8857 	 *
8858 	 * That's highly likely to happen often. I've filed a radar 72857482
8859 	 * to bubble up the error here to the sysctl result and have the
8860 	 * test not FAIL in that case.
8861 	 */
8862 	if (page->vmp_busy) {
8863 		printf("corrupt_text_addr: vmp_busy\n");
8864 		kr = KERN_FAILURE;
8865 	}
8866 	if (page->vmp_cleaning) {
8867 		printf("corrupt_text_addr: vmp_cleaning\n");
8868 		kr = KERN_FAILURE;
8869 	}
8870 	if (page->vmp_laundry) {
8871 		printf("corrupt_text_addr: vmp_cleaning\n");
8872 		kr = KERN_FAILURE;
8873 	}
8874 	if (vm_page_is_fictitious(page)) {
8875 		printf("corrupt_text_addr: vmp_fictitious\n");
8876 		kr = KERN_FAILURE;
8877 	}
8878 	if (page->vmp_precious) {
8879 		printf("corrupt_text_addr: vmp_precious\n");
8880 		kr = KERN_FAILURE;
8881 	}
8882 	if (page->vmp_absent) {
8883 		printf("corrupt_text_addr: vmp_absent\n");
8884 		kr = KERN_FAILURE;
8885 	}
8886 	if (VMP_ERROR_GET(page)) {
8887 		printf("corrupt_text_addr: vmp_error\n");
8888 		kr = KERN_FAILURE;
8889 	}
8890 	if (page->vmp_dirty) {
8891 		printf("corrupt_text_addr: vmp_dirty\n");
8892 		kr = KERN_FAILURE;
8893 	}
8894 	if (is_page_wired(page)) {
8895 		printf("corrupt_text_addr: wired\n");
8896 		kr = KERN_FAILURE;
8897 	}
8898 	if (!page->vmp_pmapped) {
8899 		printf("corrupt_text_addr: !vmp_pmapped\n");
8900 		kr = KERN_FAILURE;
8901 	}
8902 
8903 	if (kr == KERN_SUCCESS) {
8904 		printf("corrupt_text_addr: using physaddr 0x%llx\n", (long long)pa);
8905 		kr = pmap_test_text_corruption(pa);
8906 		if (kr != KERN_SUCCESS) {
8907 			printf("corrupt_text_addr: pmap error %d\n", kr);
8908 		} else {
8909 			corruption_test_va = va;
8910 		}
8911 	} else {
8912 		printf("corrupt_text_addr: object %p\n", object);
8913 		printf("corrupt_text_addr: offset 0x%llx\n", (uint64_t)offset);
8914 		printf("corrupt_text_addr: va 0x%llx\n", (uint64_t)va);
8915 		printf("corrupt_text_addr: vm_object_trunc_page(va) 0x%llx\n", (uint64_t)vm_object_trunc_page(va));
8916 		printf("corrupt_text_addr: vm_page_t %p\n", page);
8917 		printf("corrupt_text_addr: ptoa(PHYS_PAGE) 0x%llx\n", (uint64_t)ptoa(VM_PAGE_GET_PHYS_PAGE(page)));
8918 		printf("corrupt_text_addr: using physaddr 0x%llx\n", (uint64_t)pa);
8919 	}
8920 
8921 	if (object != VM_OBJECT_NULL) {
8922 		vm_object_unlock(object);
8923 	}
8924 	return kr;
8925 }
8926 
8927 #endif /* DEBUG || DEVELOPMENT */
8928