1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm_fault.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Page fault handling module.
63 */
64
65 #include <libkern/OSAtomic.h>
66
67 #include <mach/mach_types.h>
68 #include <mach/kern_return.h>
69 #include <mach/message.h> /* for error codes */
70 #include <mach/vm_param.h>
71 #include <mach/vm_behavior.h>
72 #include <mach/memory_object.h>
73 /* For memory_object_data_{request,unlock} */
74 #include <mach/sdt.h>
75
76 #include <kern/kern_types.h>
77 #include <kern/host_statistics.h>
78 #include <kern/counter.h>
79 #include <kern/task.h>
80 #include <kern/thread.h>
81 #include <kern/sched_prim.h>
82 #include <kern/host.h>
83 #include <kern/mach_param.h>
84 #include <kern/macro_help.h>
85 #include <kern/zalloc_internal.h>
86 #include <kern/misc_protos.h>
87 #include <kern/policy_internal.h>
88
89 #include <vm/vm_compressor.h>
90 #include <vm/vm_compressor_pager.h>
91 #include <vm/vm_fault.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_kern.h>
96 #include <vm/pmap.h>
97 #include <vm/vm_pageout.h>
98 #include <vm/vm_protos.h>
99 #include <vm/vm_external.h>
100 #include <vm/memory_object.h>
101 #include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */
102 #include <vm/vm_shared_region.h>
103
104 #include <sys/codesign.h>
105 #include <sys/code_signing.h>
106 #include <sys/reason.h>
107 #include <sys/signalvar.h>
108
109 #include <sys/kdebug_triage.h>
110
111 #include <san/kasan.h>
112 #include <libkern/coreanalytics/coreanalytics.h>
113
114 #define VM_FAULT_CLASSIFY 0
115
116 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
117
118 int vm_protect_privileged_from_untrusted = 1;
119
120 unsigned int vm_object_pagein_throttle = 16;
121
122 /*
123 * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which
124 * kicks in when swap space runs out. 64-bit programs have massive address spaces and can leak enormous amounts
125 * of memory if they're buggy and can run the system completely out of swap space. If this happens, we
126 * impose a hard throttle on them to prevent them from taking the last bit of memory left. This helps
127 * keep the UI active so that the user has a chance to kill the offending task before the system
128 * completely hangs.
129 *
130 * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied
131 * to tasks that appear to be bloated. When swap runs out, any task using more than vm_hard_throttle_threshold
132 * will be throttled. The throttling is done by giving the thread that's trying to demand zero a page a
133 * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
134 */
135
136 extern void throttle_lowpri_io(int);
137
138 extern struct vnode *vnode_pager_lookup_vnode(memory_object_t);
139
140 uint64_t vm_hard_throttle_threshold;
141
142 #if DEBUG || DEVELOPMENT
143 static bool vmtc_panic_instead = false;
144 int panic_object_not_alive = 1;
145 #endif /* DEBUG || DEVELOPMENT */
146
147 OS_ALWAYS_INLINE
148 boolean_t
NEED_TO_HARD_THROTTLE_THIS_TASK(void)149 NEED_TO_HARD_THROTTLE_THIS_TASK(void)
150 {
151 return vm_wants_task_throttled(current_task()) ||
152 ((vm_page_free_count < vm_page_throttle_limit ||
153 HARD_THROTTLE_LIMIT_REACHED()) &&
154 proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED);
155 }
156
157 #define HARD_THROTTLE_DELAY 10000 /* 10000 us == 10 ms */
158 #define SOFT_THROTTLE_DELAY 200 /* 200 us == .2 ms */
159
160 #define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS 6
161 #define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC 20000
162
163
164 #define VM_STAT_DECOMPRESSIONS() \
165 MACRO_BEGIN \
166 counter_inc(&vm_statistics_decompressions); \
167 current_thread()->decompressions++; \
168 MACRO_END
169
170 boolean_t current_thread_aborted(void);
171
172 /* Forward declarations of internal routines. */
173 static kern_return_t vm_fault_wire_fast(
174 vm_map_t map,
175 vm_map_offset_t va,
176 vm_prot_t prot,
177 vm_tag_t wire_tag,
178 vm_map_entry_t entry,
179 pmap_t pmap,
180 vm_map_offset_t pmap_addr,
181 ppnum_t *physpage_p);
182
183 static kern_return_t vm_fault_internal(
184 vm_map_t map,
185 vm_map_offset_t vaddr,
186 vm_prot_t caller_prot,
187 boolean_t change_wiring,
188 vm_tag_t wire_tag,
189 int interruptible,
190 pmap_t pmap,
191 vm_map_offset_t pmap_addr,
192 ppnum_t *physpage_p);
193
194 static void vm_fault_copy_cleanup(
195 vm_page_t page,
196 vm_page_t top_page);
197
198 static void vm_fault_copy_dst_cleanup(
199 vm_page_t page);
200
201 #if VM_FAULT_CLASSIFY
202 extern void vm_fault_classify(vm_object_t object,
203 vm_object_offset_t offset,
204 vm_prot_t fault_type);
205
206 extern void vm_fault_classify_init(void);
207 #endif
208
209 unsigned long vm_pmap_enter_blocked = 0;
210 unsigned long vm_pmap_enter_retried = 0;
211
212 unsigned long vm_cs_validates = 0;
213 unsigned long vm_cs_revalidates = 0;
214 unsigned long vm_cs_query_modified = 0;
215 unsigned long vm_cs_validated_dirtied = 0;
216 unsigned long vm_cs_bitmap_validated = 0;
217
218 #if CODE_SIGNING_MONITOR
219 uint64_t vm_cs_defer_to_csm = 0;
220 uint64_t vm_cs_defer_to_csm_not = 0;
221 #endif /* CODE_SIGNING_MONITOR */
222
223 void vm_pre_fault(vm_map_offset_t, vm_prot_t);
224
225 extern char *kdp_compressor_decompressed_page;
226 extern addr64_t kdp_compressor_decompressed_page_paddr;
227 extern ppnum_t kdp_compressor_decompressed_page_ppnum;
228
229 struct vmrtfr {
230 int vmrtfr_maxi;
231 int vmrtfr_curi;
232 int64_t vmrtf_total;
233 vm_rtfault_record_t *vm_rtf_records;
234 } vmrtfrs;
235 #define VMRTF_DEFAULT_BUFSIZE (4096)
236 #define VMRTF_NUM_RECORDS_DEFAULT (VMRTF_DEFAULT_BUFSIZE / sizeof(vm_rtfault_record_t))
237 TUNABLE(int, vmrtf_num_records, "vm_rtfault_records", VMRTF_NUM_RECORDS_DEFAULT);
238
239 static void vm_rtfrecord_lock(void);
240 static void vm_rtfrecord_unlock(void);
241 static void vm_record_rtfault(thread_t, uint64_t, vm_map_offset_t, int);
242
243 extern lck_grp_t vm_page_lck_grp_bucket;
244 extern lck_attr_t vm_page_lck_attr;
245 LCK_SPIN_DECLARE_ATTR(vm_rtfr_slock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
246
247 #if DEVELOPMENT || DEBUG
248 extern int madvise_free_debug;
249 extern int madvise_free_debug_sometimes;
250 #endif /* DEVELOPMENT || DEBUG */
251
252 extern int vm_pageout_protect_realtime;
253
254 #if CONFIG_FREEZE
255 #endif /* CONFIG_FREEZE */
256
257 /*
258 * Routine: vm_fault_init
259 * Purpose:
260 * Initialize our private data structures.
261 */
262 __startup_func
263 void
vm_fault_init(void)264 vm_fault_init(void)
265 {
266 int i, vm_compressor_temp;
267 boolean_t need_default_val = TRUE;
268 /*
269 * Choose a value for the hard throttle threshold based on the amount of ram. The threshold is
270 * computed as a percentage of available memory, and the percentage used is scaled inversely with
271 * the amount of memory. The percentage runs between 10% and 35%. We use 35% for small memory systems
272 * and reduce the value down to 10% for very large memory configurations. This helps give us a
273 * definition of a memory hog that makes more sense relative to the amount of ram in the machine.
274 * The formula here simply uses the number of gigabytes of ram to adjust the percentage.
275 */
276
277 vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024 * 1024 * 1024)), 25)) / 100;
278
279 /*
280 * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry.
281 */
282
283 if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof(vm_compressor_temp))) {
284 for (i = 0; i < VM_PAGER_MAX_MODES; i++) {
285 if (((vm_compressor_temp & (1 << i)) == vm_compressor_temp)) {
286 need_default_val = FALSE;
287 vm_compressor_mode = vm_compressor_temp;
288 break;
289 }
290 }
291 if (need_default_val) {
292 printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp);
293 }
294 }
295 #if CONFIG_FREEZE
296 if (need_default_val) {
297 if (osenvironment_is_diagnostics()) {
298 printf("osenvironment == \"diagnostics\". Setting \"vm_compressor_mode\" to in-core compressor only\n");
299 vm_compressor_mode = VM_PAGER_COMPRESSOR_NO_SWAP;
300 need_default_val = false;
301 }
302 }
303 #endif /* CONFIG_FREEZE */
304 if (need_default_val) {
305 /* If no boot arg or incorrect boot arg, try device tree. */
306 PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode));
307 }
308 printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode);
309 vm_config_init();
310
311 PE_parse_boot_argn("vm_protect_privileged_from_untrusted",
312 &vm_protect_privileged_from_untrusted,
313 sizeof(vm_protect_privileged_from_untrusted));
314
315 #if DEBUG || DEVELOPMENT
316 (void)PE_parse_boot_argn("text_corruption_panic", &vmtc_panic_instead, sizeof(vmtc_panic_instead));
317
318 if (kern_feature_override(KF_MADVISE_FREE_DEBUG_OVRD)) {
319 madvise_free_debug = 0;
320 madvise_free_debug_sometimes = 0;
321 }
322
323 PE_parse_boot_argn("panic_object_not_alive", &panic_object_not_alive, sizeof(panic_object_not_alive));
324 #endif /* DEBUG || DEVELOPMENT */
325 }
326
327 __startup_func
328 static void
vm_rtfault_record_init(void)329 vm_rtfault_record_init(void)
330 {
331 size_t size;
332
333 vmrtf_num_records = MAX(vmrtf_num_records, 1);
334 size = vmrtf_num_records * sizeof(vm_rtfault_record_t);
335 vmrtfrs.vm_rtf_records = zalloc_permanent_tag(size,
336 ZALIGN(vm_rtfault_record_t), VM_KERN_MEMORY_DIAG);
337 vmrtfrs.vmrtfr_maxi = vmrtf_num_records - 1;
338 }
339 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_rtfault_record_init);
340
341 /*
342 * Routine: vm_fault_cleanup
343 * Purpose:
344 * Clean up the result of vm_fault_page.
345 * Results:
346 * The paging reference for "object" is released.
347 * "object" is unlocked.
348 * If "top_page" is not null, "top_page" is
349 * freed and the paging reference for the object
350 * containing it is released.
351 *
352 * In/out conditions:
353 * "object" must be locked.
354 */
355 void
vm_fault_cleanup(vm_object_t object,vm_page_t top_page)356 vm_fault_cleanup(
357 vm_object_t object,
358 vm_page_t top_page)
359 {
360 vm_object_paging_end(object);
361 vm_object_unlock(object);
362
363 if (top_page != VM_PAGE_NULL) {
364 object = VM_PAGE_OBJECT(top_page);
365
366 vm_object_lock(object);
367 VM_PAGE_FREE(top_page);
368 vm_object_paging_end(object);
369 vm_object_unlock(object);
370 }
371 }
372
373 #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
374
375
376 boolean_t vm_page_deactivate_behind = TRUE;
377 /*
378 * default sizes given VM_BEHAVIOR_DEFAULT reference behavior
379 */
380 #define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW 128
381 #define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER 16 /* don't make this too big... */
382 /* we use it to size an array on the stack */
383
384 int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW;
385
386 #define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024)
387
388 /*
389 * vm_page_is_sequential
390 *
391 * Determine if sequential access is in progress
392 * in accordance with the behavior specified.
393 * Update state to indicate current access pattern.
394 *
395 * object must have at least the shared lock held
396 */
397 static
398 void
vm_fault_is_sequential(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)399 vm_fault_is_sequential(
400 vm_object_t object,
401 vm_object_offset_t offset,
402 vm_behavior_t behavior)
403 {
404 vm_object_offset_t last_alloc;
405 int sequential;
406 int orig_sequential;
407
408 last_alloc = object->last_alloc;
409 sequential = object->sequential;
410 orig_sequential = sequential;
411
412 offset = vm_object_trunc_page(offset);
413 if (offset == last_alloc && behavior != VM_BEHAVIOR_RANDOM) {
414 /* re-faulting in the same page: no change in behavior */
415 return;
416 }
417
418 switch (behavior) {
419 case VM_BEHAVIOR_RANDOM:
420 /*
421 * reset indicator of sequential behavior
422 */
423 sequential = 0;
424 break;
425
426 case VM_BEHAVIOR_SEQUENTIAL:
427 if (offset && last_alloc == offset - PAGE_SIZE_64) {
428 /*
429 * advance indicator of sequential behavior
430 */
431 if (sequential < MAX_SEQUENTIAL_RUN) {
432 sequential += PAGE_SIZE;
433 }
434 } else {
435 /*
436 * reset indicator of sequential behavior
437 */
438 sequential = 0;
439 }
440 break;
441
442 case VM_BEHAVIOR_RSEQNTL:
443 if (last_alloc && last_alloc == offset + PAGE_SIZE_64) {
444 /*
445 * advance indicator of sequential behavior
446 */
447 if (sequential > -MAX_SEQUENTIAL_RUN) {
448 sequential -= PAGE_SIZE;
449 }
450 } else {
451 /*
452 * reset indicator of sequential behavior
453 */
454 sequential = 0;
455 }
456 break;
457
458 case VM_BEHAVIOR_DEFAULT:
459 default:
460 if (offset && last_alloc == (offset - PAGE_SIZE_64)) {
461 /*
462 * advance indicator of sequential behavior
463 */
464 if (sequential < 0) {
465 sequential = 0;
466 }
467 if (sequential < MAX_SEQUENTIAL_RUN) {
468 sequential += PAGE_SIZE;
469 }
470 } else if (last_alloc && last_alloc == (offset + PAGE_SIZE_64)) {
471 /*
472 * advance indicator of sequential behavior
473 */
474 if (sequential > 0) {
475 sequential = 0;
476 }
477 if (sequential > -MAX_SEQUENTIAL_RUN) {
478 sequential -= PAGE_SIZE;
479 }
480 } else {
481 /*
482 * reset indicator of sequential behavior
483 */
484 sequential = 0;
485 }
486 break;
487 }
488 if (sequential != orig_sequential) {
489 if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) {
490 /*
491 * if someone else has already updated object->sequential
492 * don't bother trying to update it or object->last_alloc
493 */
494 return;
495 }
496 }
497 /*
498 * I'd like to do this with a OSCompareAndSwap64, but that
499 * doesn't exist for PPC... however, it shouldn't matter
500 * that much... last_alloc is maintained so that we can determine
501 * if a sequential access pattern is taking place... if only
502 * one thread is banging on this object, no problem with the unprotected
503 * update... if 2 or more threads are banging away, we run the risk of
504 * someone seeing a mangled update... however, in the face of multiple
505 * accesses, no sequential access pattern can develop anyway, so we
506 * haven't lost any real info.
507 */
508 object->last_alloc = offset;
509 }
510
511 #if DEVELOPMENT || DEBUG
512 uint64_t vm_page_deactivate_behind_count = 0;
513 #endif /* DEVELOPMENT || DEBUG */
514
515 /*
516 * vm_page_deactivate_behind
517 *
518 * Determine if sequential access is in progress
519 * in accordance with the behavior specified. If
520 * so, compute a potential page to deactivate and
521 * deactivate it.
522 *
523 * object must be locked.
524 *
525 * return TRUE if we actually deactivate a page
526 */
527 static
528 boolean_t
vm_fault_deactivate_behind(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)529 vm_fault_deactivate_behind(
530 vm_object_t object,
531 vm_object_offset_t offset,
532 vm_behavior_t behavior)
533 {
534 int n;
535 int pages_in_run = 0;
536 int max_pages_in_run = 0;
537 int sequential_run;
538 int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
539 vm_object_offset_t run_offset = 0;
540 vm_object_offset_t pg_offset = 0;
541 vm_page_t m;
542 vm_page_t page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER];
543
544 pages_in_run = 0;
545 #if TRACEFAULTPAGE
546 dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
547 #endif
548 if (is_kernel_object(object) || vm_page_deactivate_behind == FALSE || (vm_object_trunc_page(offset) != offset)) {
549 /*
550 * Do not deactivate pages from the kernel object: they
551 * are not intended to become pageable.
552 * or we've disabled the deactivate behind mechanism
553 * or we are dealing with an offset that is not aligned to
554 * the system's PAGE_SIZE because in that case we will
555 * handle the deactivation on the aligned offset and, thus,
556 * the full PAGE_SIZE page once. This helps us avoid the redundant
557 * deactivates and the extra faults.
558 */
559 return FALSE;
560 }
561 if ((sequential_run = object->sequential)) {
562 if (sequential_run < 0) {
563 sequential_behavior = VM_BEHAVIOR_RSEQNTL;
564 sequential_run = 0 - sequential_run;
565 } else {
566 sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
567 }
568 }
569 switch (behavior) {
570 case VM_BEHAVIOR_RANDOM:
571 break;
572 case VM_BEHAVIOR_SEQUENTIAL:
573 if (sequential_run >= (int)PAGE_SIZE) {
574 run_offset = 0 - PAGE_SIZE_64;
575 max_pages_in_run = 1;
576 }
577 break;
578 case VM_BEHAVIOR_RSEQNTL:
579 if (sequential_run >= (int)PAGE_SIZE) {
580 run_offset = PAGE_SIZE_64;
581 max_pages_in_run = 1;
582 }
583 break;
584 case VM_BEHAVIOR_DEFAULT:
585 default:
586 { vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
587
588 /*
589 * determine if the run of sequential accesss has been
590 * long enough on an object with default access behavior
591 * to consider it for deactivation
592 */
593 if ((uint64_t)sequential_run >= behind && (sequential_run % (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER * PAGE_SIZE)) == 0) {
594 /*
595 * the comparisons between offset and behind are done
596 * in this kind of odd fashion in order to prevent wrap around
597 * at the end points
598 */
599 if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
600 if (offset >= behind) {
601 run_offset = 0 - behind;
602 pg_offset = PAGE_SIZE_64;
603 max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
604 }
605 } else {
606 if (offset < -behind) {
607 run_offset = behind;
608 pg_offset = 0 - PAGE_SIZE_64;
609 max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
610 }
611 }
612 }
613 break;}
614 }
615 for (n = 0; n < max_pages_in_run; n++) {
616 m = vm_page_lookup(object, offset + run_offset + (n * pg_offset));
617
618 if (m && !m->vmp_laundry && !m->vmp_busy && !m->vmp_no_cache && (m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && !m->vmp_fictitious && !m->vmp_absent) {
619 page_run[pages_in_run++] = m;
620
621 /*
622 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
623 *
624 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
625 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
626 * new reference happens. If no futher references happen on the page after that remote TLB flushes
627 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
628 * by pageout_scan, which is just fine since the last reference would have happened quite far
629 * in the past (TLB caches don't hang around for very long), and of course could just as easily
630 * have happened before we did the deactivate_behind.
631 */
632 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
633 }
634 }
635 if (pages_in_run) {
636 vm_page_lockspin_queues();
637
638 for (n = 0; n < pages_in_run; n++) {
639 m = page_run[n];
640
641 vm_page_deactivate_internal(m, FALSE);
642
643 #if DEVELOPMENT || DEBUG
644 vm_page_deactivate_behind_count++;
645 #endif /* DEVELOPMENT || DEBUG */
646
647 #if TRACEFAULTPAGE
648 dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
649 #endif
650 }
651 vm_page_unlock_queues();
652
653 return TRUE;
654 }
655 return FALSE;
656 }
657
658
659 #if (DEVELOPMENT || DEBUG)
660 uint32_t vm_page_creation_throttled_hard = 0;
661 uint32_t vm_page_creation_throttled_soft = 0;
662 uint64_t vm_page_creation_throttle_avoided = 0;
663 #endif /* DEVELOPMENT || DEBUG */
664
665 static int
vm_page_throttled(boolean_t page_kept)666 vm_page_throttled(boolean_t page_kept)
667 {
668 clock_sec_t elapsed_sec;
669 clock_sec_t tv_sec;
670 clock_usec_t tv_usec;
671 task_t curtask = current_task_early();
672
673 thread_t thread = current_thread();
674
675 if (thread->options & TH_OPT_VMPRIV) {
676 return 0;
677 }
678
679 if (curtask && !curtask->active) {
680 return 0;
681 }
682
683 if (thread->t_page_creation_throttled) {
684 thread->t_page_creation_throttled = 0;
685
686 if (page_kept == FALSE) {
687 goto no_throttle;
688 }
689 }
690 if (NEED_TO_HARD_THROTTLE_THIS_TASK()) {
691 #if (DEVELOPMENT || DEBUG)
692 thread->t_page_creation_throttled_hard++;
693 OSAddAtomic(1, &vm_page_creation_throttled_hard);
694 #endif /* DEVELOPMENT || DEBUG */
695 return HARD_THROTTLE_DELAY;
696 }
697
698 if ((vm_page_free_count < vm_page_throttle_limit || (VM_CONFIG_COMPRESSOR_IS_PRESENT && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
699 thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) {
700 if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) {
701 #if (DEVELOPMENT || DEBUG)
702 OSAddAtomic64(1, &vm_page_creation_throttle_avoided);
703 #endif
704 goto no_throttle;
705 }
706 clock_get_system_microtime(&tv_sec, &tv_usec);
707
708 elapsed_sec = tv_sec - thread->t_page_creation_time;
709
710 if (elapsed_sec <= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS ||
711 (thread->t_page_creation_count / elapsed_sec) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC) {
712 if (elapsed_sec >= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS)) {
713 /*
714 * we'll reset our stats to give a well behaved app
715 * that was unlucky enough to accumulate a bunch of pages
716 * over a long period of time a chance to get out of
717 * the throttled state... we reset the counter and timestamp
718 * so that if it stays under the rate limit for the next second
719 * it will be back in our good graces... if it exceeds it, it
720 * will remain in the throttled state
721 */
722 thread->t_page_creation_time = tv_sec;
723 thread->t_page_creation_count = VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC * (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS - 1);
724 }
725 VM_PAGEOUT_DEBUG(vm_page_throttle_count, 1);
726
727 thread->t_page_creation_throttled = 1;
728
729 if (VM_CONFIG_COMPRESSOR_IS_PRESENT && HARD_THROTTLE_LIMIT_REACHED()) {
730 #if (DEVELOPMENT || DEBUG)
731 thread->t_page_creation_throttled_hard++;
732 OSAddAtomic(1, &vm_page_creation_throttled_hard);
733 #endif /* DEVELOPMENT || DEBUG */
734 return HARD_THROTTLE_DELAY;
735 } else {
736 #if (DEVELOPMENT || DEBUG)
737 thread->t_page_creation_throttled_soft++;
738 OSAddAtomic(1, &vm_page_creation_throttled_soft);
739 #endif /* DEVELOPMENT || DEBUG */
740 return SOFT_THROTTLE_DELAY;
741 }
742 }
743 thread->t_page_creation_time = tv_sec;
744 thread->t_page_creation_count = 0;
745 }
746 no_throttle:
747 thread->t_page_creation_count++;
748
749 return 0;
750 }
751
752 extern boolean_t vm_pageout_running;
753 static __attribute__((noinline, not_tail_called)) void
__VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(int throttle_delay)754 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(
755 int throttle_delay)
756 {
757 /* make sure vm_pageout_scan() gets to work while we're throttled */
758 if (!vm_pageout_running) {
759 thread_wakeup((event_t)&vm_page_free_wanted);
760 }
761 delay(throttle_delay);
762 }
763
764
765 /*
766 * check for various conditions that would
767 * prevent us from creating a ZF page...
768 * cleanup is based on being called from vm_fault_page
769 *
770 * object must be locked
771 * object == m->vmp_object
772 */
773 static vm_fault_return_t
vm_fault_check(vm_object_t object,vm_page_t m,vm_page_t first_m,wait_interrupt_t interruptible_state,boolean_t page_throttle)774 vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrupt_t interruptible_state, boolean_t page_throttle)
775 {
776 int throttle_delay;
777
778 if (object->shadow_severed ||
779 VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
780 /*
781 * Either:
782 * 1. the shadow chain was severed,
783 * 2. the purgeable object is volatile or empty and is marked
784 * to fault on access while volatile.
785 * Just have to return an error at this point
786 */
787 if (m != VM_PAGE_NULL) {
788 VM_PAGE_FREE(m);
789 }
790 vm_fault_cleanup(object, first_m);
791
792 thread_interrupt_level(interruptible_state);
793
794 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
795 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
796 }
797
798 if (object->shadow_severed) {
799 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
800 }
801 return VM_FAULT_MEMORY_ERROR;
802 }
803 if (page_throttle == TRUE) {
804 if ((throttle_delay = vm_page_throttled(FALSE))) {
805 /*
806 * we're throttling zero-fills...
807 * treat this as if we couldn't grab a page
808 */
809 if (m != VM_PAGE_NULL) {
810 VM_PAGE_FREE(m);
811 }
812 vm_fault_cleanup(object, first_m);
813
814 VM_DEBUG_EVENT(vmf_check_zfdelay, VMF_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
815
816 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
817
818 if (current_thread_aborted()) {
819 thread_interrupt_level(interruptible_state);
820 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
821 return VM_FAULT_INTERRUPTED;
822 }
823 thread_interrupt_level(interruptible_state);
824
825 return VM_FAULT_MEMORY_SHORTAGE;
826 }
827 }
828 return VM_FAULT_SUCCESS;
829 }
830
831 /*
832 * Clear the code signing bits on the given page_t
833 */
834 static void
vm_fault_cs_clear(vm_page_t m)835 vm_fault_cs_clear(vm_page_t m)
836 {
837 m->vmp_cs_validated = VMP_CS_ALL_FALSE;
838 m->vmp_cs_tainted = VMP_CS_ALL_FALSE;
839 m->vmp_cs_nx = VMP_CS_ALL_FALSE;
840 }
841
842 /*
843 * Enqueues the given page on the throttled queue.
844 * The caller must hold the vm_page_queue_lock and it will be held on return.
845 */
846 static void
vm_fault_enqueue_throttled_locked(vm_page_t m)847 vm_fault_enqueue_throttled_locked(vm_page_t m)
848 {
849 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
850 assert(!VM_PAGE_WIRED(m));
851
852 /*
853 * can't be on the pageout queue since we don't
854 * have a pager to try and clean to
855 */
856 vm_page_queues_remove(m, TRUE);
857 vm_page_check_pageable_safe(m);
858 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
859 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
860 vm_page_throttled_count++;
861 }
862
863 /*
864 * do the work to zero fill a page and
865 * inject it into the correct paging queue
866 *
867 * m->vmp_object must be locked
868 * page queue lock must NOT be held
869 */
870 static int
vm_fault_zero_page(vm_page_t m,boolean_t no_zero_fill)871 vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
872 {
873 int my_fault = DBG_ZERO_FILL_FAULT;
874 vm_object_t object;
875
876 object = VM_PAGE_OBJECT(m);
877
878 /*
879 * This is is a zero-fill page fault...
880 *
881 * Checking the page lock is a waste of
882 * time; this page was absent, so
883 * it can't be page locked by a pager.
884 *
885 * we also consider it undefined
886 * with respect to instruction
887 * execution. i.e. it is the responsibility
888 * of higher layers to call for an instruction
889 * sync after changing the contents and before
890 * sending a program into this area. We
891 * choose this approach for performance
892 */
893 vm_fault_cs_clear(m);
894 m->vmp_pmapped = TRUE;
895
896 if (no_zero_fill == TRUE) {
897 my_fault = DBG_NZF_PAGE_FAULT;
898
899 if (m->vmp_absent && m->vmp_busy) {
900 return my_fault;
901 }
902 } else {
903 vm_page_zero_fill(m);
904
905 counter_inc(&vm_statistics_zero_fill_count);
906 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
907 }
908 assert(!m->vmp_laundry);
909 assert(!is_kernel_object(object));
910 //assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
911 if (!VM_DYNAMIC_PAGING_ENABLED() &&
912 (object->purgable == VM_PURGABLE_DENY ||
913 object->purgable == VM_PURGABLE_NONVOLATILE ||
914 object->purgable == VM_PURGABLE_VOLATILE)) {
915 vm_page_lockspin_queues();
916 if (!VM_DYNAMIC_PAGING_ENABLED()) {
917 vm_fault_enqueue_throttled_locked(m);
918 }
919 vm_page_unlock_queues();
920 }
921 return my_fault;
922 }
923
924
925 /*
926 * Routine: vm_fault_page
927 * Purpose:
928 * Find the resident page for the virtual memory
929 * specified by the given virtual memory object
930 * and offset.
931 * Additional arguments:
932 * The required permissions for the page is given
933 * in "fault_type". Desired permissions are included
934 * in "protection".
935 * fault_info is passed along to determine pagein cluster
936 * limits... it contains the expected reference pattern,
937 * cluster size if available, etc...
938 *
939 * If the desired page is known to be resident (for
940 * example, because it was previously wired down), asserting
941 * the "unwiring" parameter will speed the search.
942 *
943 * If the operation can be interrupted (by thread_abort
944 * or thread_terminate), then the "interruptible"
945 * parameter should be asserted.
946 *
947 * Results:
948 * The page containing the proper data is returned
949 * in "result_page".
950 *
951 * In/out conditions:
952 * The source object must be locked and referenced,
953 * and must donate one paging reference. The reference
954 * is not affected. The paging reference and lock are
955 * consumed.
956 *
957 * If the call succeeds, the object in which "result_page"
958 * resides is left locked and holding a paging reference.
959 * If this is not the original object, a busy page in the
960 * original object is returned in "top_page", to prevent other
961 * callers from pursuing this same data, along with a paging
962 * reference for the original object. The "top_page" should
963 * be destroyed when this guarantee is no longer required.
964 * The "result_page" is also left busy. It is not removed
965 * from the pageout queues.
966 * Special Case:
967 * A return value of VM_FAULT_SUCCESS_NO_PAGE means that the
968 * fault succeeded but there's no VM page (i.e. the VM object
969 * does not actually hold VM pages, but device memory or
970 * large pages). The object is still locked and we still hold a
971 * paging_in_progress reference.
972 */
973 unsigned int vm_fault_page_blocked_access = 0;
974 unsigned int vm_fault_page_forced_retry = 0;
975
976 vm_fault_return_t
vm_fault_page(vm_object_t first_object,vm_object_offset_t first_offset,vm_prot_t fault_type,boolean_t must_be_resident,boolean_t caller_lookup,vm_prot_t * protection,vm_page_t * result_page,vm_page_t * top_page,int * type_of_fault,kern_return_t * error_code,boolean_t no_zero_fill,vm_object_fault_info_t fault_info)977 vm_fault_page(
978 /* Arguments: */
979 vm_object_t first_object, /* Object to begin search */
980 vm_object_offset_t first_offset, /* Offset into object */
981 vm_prot_t fault_type, /* What access is requested */
982 boolean_t must_be_resident,/* Must page be resident? */
983 boolean_t caller_lookup, /* caller looked up page */
984 /* Modifies in place: */
985 vm_prot_t *protection, /* Protection for mapping */
986 vm_page_t *result_page, /* Page found, if successful */
987 /* Returns: */
988 vm_page_t *top_page, /* Page in top object, if
989 * not result_page. */
990 int *type_of_fault, /* if non-null, fill in with type of fault
991 * COW, zero-fill, etc... returned in trace point */
992 /* More arguments: */
993 kern_return_t *error_code, /* code if page is in error */
994 boolean_t no_zero_fill, /* don't zero fill absent pages */
995 vm_object_fault_info_t fault_info)
996 {
997 vm_page_t m;
998 vm_object_t object;
999 vm_object_offset_t offset;
1000 vm_page_t first_m;
1001 vm_object_t next_object;
1002 vm_object_t copy_object;
1003 boolean_t look_for_page;
1004 boolean_t force_fault_retry = FALSE;
1005 vm_prot_t access_required = fault_type;
1006 vm_prot_t wants_copy_flag;
1007 kern_return_t wait_result;
1008 wait_interrupt_t interruptible_state;
1009 boolean_t data_already_requested = FALSE;
1010 vm_behavior_t orig_behavior;
1011 vm_size_t orig_cluster_size;
1012 vm_fault_return_t error;
1013 int my_fault;
1014 uint32_t try_failed_count;
1015 int interruptible; /* how may fault be interrupted? */
1016 int external_state = VM_EXTERNAL_STATE_UNKNOWN;
1017 memory_object_t pager;
1018 vm_fault_return_t retval;
1019 int grab_options;
1020 bool clear_absent_on_error = false;
1021
1022 /*
1023 * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
1024 * marked as paged out in the compressor pager or the pager doesn't exist.
1025 * Note also that if the pager for an internal object
1026 * has not been created, the pager is not invoked regardless of the value
1027 * of MUST_ASK_PAGER().
1028 *
1029 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
1030 * is marked as paged out in the compressor pager.
1031 * PAGED_OUT() is used to determine if a page has already been pushed
1032 * into a copy object in order to avoid a redundant page out operation.
1033 */
1034 #define MUST_ASK_PAGER(o, f, s) \
1035 ((s = VM_COMPRESSOR_PAGER_STATE_GET((o), (f))) != VM_EXTERNAL_STATE_ABSENT)
1036
1037 #define PAGED_OUT(o, f) \
1038 (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) == VM_EXTERNAL_STATE_EXISTS)
1039
1040 /*
1041 * Recovery actions
1042 */
1043 #define RELEASE_PAGE(m) \
1044 MACRO_BEGIN \
1045 PAGE_WAKEUP_DONE(m); \
1046 if ( !VM_PAGE_PAGEABLE(m)) { \
1047 vm_page_lockspin_queues(); \
1048 if (clear_absent_on_error && m->vmp_absent) {\
1049 vm_page_zero_fill(m); \
1050 counter_inc(&vm_statistics_zero_fill_count);\
1051 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);\
1052 m->vmp_absent = false; \
1053 } \
1054 if ( !VM_PAGE_PAGEABLE(m)) { \
1055 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) \
1056 vm_page_deactivate(m); \
1057 else \
1058 vm_page_activate(m); \
1059 } \
1060 vm_page_unlock_queues(); \
1061 } \
1062 clear_absent_on_error = false; \
1063 MACRO_END
1064
1065 #if TRACEFAULTPAGE
1066 dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */
1067 #endif
1068
1069 interruptible = fault_info->interruptible;
1070 interruptible_state = thread_interrupt_level(interruptible);
1071
1072 /*
1073 * INVARIANTS (through entire routine):
1074 *
1075 * 1) At all times, we must either have the object
1076 * lock or a busy page in some object to prevent
1077 * some other thread from trying to bring in
1078 * the same page.
1079 *
1080 * Note that we cannot hold any locks during the
1081 * pager access or when waiting for memory, so
1082 * we use a busy page then.
1083 *
1084 * 2) To prevent another thread from racing us down the
1085 * shadow chain and entering a new page in the top
1086 * object before we do, we must keep a busy page in
1087 * the top object while following the shadow chain.
1088 *
1089 * 3) We must increment paging_in_progress on any object
1090 * for which we have a busy page before dropping
1091 * the object lock
1092 *
1093 * 4) We leave busy pages on the pageout queues.
1094 * If the pageout daemon comes across a busy page,
1095 * it will remove the page from the pageout queues.
1096 */
1097
1098 object = first_object;
1099 offset = first_offset;
1100 first_m = VM_PAGE_NULL;
1101 access_required = fault_type;
1102
1103 /*
1104 * default type of fault
1105 */
1106 my_fault = DBG_CACHE_HIT_FAULT;
1107 thread_pri_floor_t token;
1108 bool drop_floor = false;
1109
1110 while (TRUE) {
1111 #if TRACEFAULTPAGE
1112 dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1113 #endif
1114
1115 grab_options = 0;
1116 #if CONFIG_SECLUDED_MEMORY
1117 if (object->can_grab_secluded) {
1118 grab_options |= VM_PAGE_GRAB_SECLUDED;
1119 }
1120 #endif /* CONFIG_SECLUDED_MEMORY */
1121
1122 if (!object->alive) {
1123 /*
1124 * object is no longer valid
1125 * clean up and return error
1126 */
1127 #if DEVELOPMENT || DEBUG
1128 printf("FBDP rdar://93769854 %s:%d object %p internal %d pager %p (%s) copy %p shadow %p alive %d terminating %d named %d ref %d shadow_severed %d\n", __FUNCTION__, __LINE__, object, object->internal, object->pager, object->pager ? object->pager->mo_pager_ops->memory_object_pager_name : "?", object->vo_copy, object->shadow, object->alive, object->terminating, object->named, object->ref_count, object->shadow_severed);
1129 if (panic_object_not_alive) {
1130 panic("FBDP rdar://93769854 %s:%d object %p internal %d pager %p (%s) copy %p shadow %p alive %d terminating %d named %d ref %d shadow_severed %d\n", __FUNCTION__, __LINE__, object, object->internal, object->pager, object->pager ? object->pager->mo_pager_ops->memory_object_pager_name : "?", object->vo_copy, object->shadow, object->alive, object->terminating, object->named, object->ref_count, object->shadow_severed);
1131 }
1132 #endif /* DEVELOPMENT || DEBUG */
1133 vm_fault_cleanup(object, first_m);
1134 thread_interrupt_level(interruptible_state);
1135
1136 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_NOT_ALIVE), 0 /* arg */);
1137 return VM_FAULT_MEMORY_ERROR;
1138 }
1139
1140 if (!object->pager_created && object->phys_contiguous) {
1141 /*
1142 * A physically-contiguous object without a pager:
1143 * must be a "large page" object. We do not deal
1144 * with VM pages for this object.
1145 */
1146 caller_lookup = FALSE;
1147 m = VM_PAGE_NULL;
1148 goto phys_contig_object;
1149 }
1150
1151 if (object->blocked_access) {
1152 /*
1153 * Access to this VM object has been blocked.
1154 * Replace our "paging_in_progress" reference with
1155 * a "activity_in_progress" reference and wait for
1156 * access to be unblocked.
1157 */
1158 caller_lookup = FALSE; /* no longer valid after sleep */
1159 vm_object_activity_begin(object);
1160 vm_object_paging_end(object);
1161 while (object->blocked_access) {
1162 vm_object_sleep(object,
1163 VM_OBJECT_EVENT_UNBLOCKED,
1164 THREAD_UNINT);
1165 }
1166 vm_fault_page_blocked_access++;
1167 vm_object_paging_begin(object);
1168 vm_object_activity_end(object);
1169 }
1170
1171 /*
1172 * See whether the page at 'offset' is resident
1173 */
1174 if (caller_lookup == TRUE) {
1175 /*
1176 * The caller has already looked up the page
1177 * and gave us the result in "result_page".
1178 * We can use this for the first lookup but
1179 * it loses its validity as soon as we unlock
1180 * the object.
1181 */
1182 m = *result_page;
1183 caller_lookup = FALSE; /* no longer valid after that */
1184 } else {
1185 m = vm_page_lookup(object, vm_object_trunc_page(offset));
1186 }
1187 #if TRACEFAULTPAGE
1188 dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1189 #endif
1190 if (m != VM_PAGE_NULL) {
1191 if (m->vmp_busy) {
1192 /*
1193 * The page is being brought in,
1194 * wait for it and then retry.
1195 */
1196 #if TRACEFAULTPAGE
1197 dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1198 #endif
1199 wait_result = PAGE_SLEEP(object, m, interruptible);
1200
1201 if (wait_result != THREAD_AWAKENED) {
1202 vm_fault_cleanup(object, first_m);
1203 thread_interrupt_level(interruptible_state);
1204
1205 if (wait_result == THREAD_RESTART) {
1206 return VM_FAULT_RETRY;
1207 } else {
1208 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
1209 return VM_FAULT_INTERRUPTED;
1210 }
1211 }
1212 continue;
1213 }
1214 if (m->vmp_laundry) {
1215 m->vmp_free_when_done = FALSE;
1216
1217 if (!m->vmp_cleaning) {
1218 vm_pageout_steal_laundry(m, FALSE);
1219 }
1220 }
1221 vm_object_lock_assert_exclusive(VM_PAGE_OBJECT(m));
1222 if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
1223 /*
1224 * Guard page: off limits !
1225 */
1226 if (fault_type == VM_PROT_NONE) {
1227 /*
1228 * The fault is not requesting any
1229 * access to the guard page, so it must
1230 * be just to wire or unwire it.
1231 * Let's pretend it succeeded...
1232 */
1233 m->vmp_busy = TRUE;
1234 *result_page = m;
1235 assert(first_m == VM_PAGE_NULL);
1236 *top_page = first_m;
1237 if (type_of_fault) {
1238 *type_of_fault = DBG_GUARD_FAULT;
1239 }
1240 thread_interrupt_level(interruptible_state);
1241 return VM_FAULT_SUCCESS;
1242 } else {
1243 /*
1244 * The fault requests access to the
1245 * guard page: let's deny that !
1246 */
1247 vm_fault_cleanup(object, first_m);
1248 thread_interrupt_level(interruptible_state);
1249 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_GUARDPAGE_FAULT), 0 /* arg */);
1250 return VM_FAULT_MEMORY_ERROR;
1251 }
1252 }
1253
1254
1255 if (m->vmp_error) {
1256 /*
1257 * The page is in error, give up now.
1258 */
1259 #if TRACEFAULTPAGE
1260 dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code); /* (TEST/DEBUG) */
1261 #endif
1262 if (error_code) {
1263 *error_code = KERN_MEMORY_ERROR;
1264 }
1265 VM_PAGE_FREE(m);
1266
1267 vm_fault_cleanup(object, first_m);
1268 thread_interrupt_level(interruptible_state);
1269
1270 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_ERROR), 0 /* arg */);
1271 return VM_FAULT_MEMORY_ERROR;
1272 }
1273 if (m->vmp_restart) {
1274 /*
1275 * The pager wants us to restart
1276 * at the top of the chain,
1277 * typically because it has moved the
1278 * page to another pager, then do so.
1279 */
1280 #if TRACEFAULTPAGE
1281 dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1282 #endif
1283 VM_PAGE_FREE(m);
1284
1285 vm_fault_cleanup(object, first_m);
1286 thread_interrupt_level(interruptible_state);
1287
1288 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_RESTART), 0 /* arg */);
1289 return VM_FAULT_RETRY;
1290 }
1291 if (m->vmp_absent) {
1292 /*
1293 * The page isn't busy, but is absent,
1294 * therefore it's deemed "unavailable".
1295 *
1296 * Remove the non-existent page (unless it's
1297 * in the top object) and move on down to the
1298 * next object (if there is one).
1299 */
1300 #if TRACEFAULTPAGE
1301 dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow); /* (TEST/DEBUG) */
1302 #endif
1303 next_object = object->shadow;
1304
1305 if (next_object == VM_OBJECT_NULL) {
1306 /*
1307 * Absent page at bottom of shadow
1308 * chain; zero fill the page we left
1309 * busy in the first object, and free
1310 * the absent page.
1311 */
1312 assert(!must_be_resident);
1313
1314 /*
1315 * check for any conditions that prevent
1316 * us from creating a new zero-fill page
1317 * vm_fault_check will do all of the
1318 * fault cleanup in the case of an error condition
1319 * including resetting the thread_interrupt_level
1320 */
1321 error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
1322
1323 if (error != VM_FAULT_SUCCESS) {
1324 return error;
1325 }
1326
1327 if (object != first_object) {
1328 /*
1329 * free the absent page we just found
1330 */
1331 VM_PAGE_FREE(m);
1332
1333 /*
1334 * drop reference and lock on current object
1335 */
1336 vm_object_paging_end(object);
1337 vm_object_unlock(object);
1338
1339 /*
1340 * grab the original page we
1341 * 'soldered' in place and
1342 * retake lock on 'first_object'
1343 */
1344 m = first_m;
1345 first_m = VM_PAGE_NULL;
1346
1347 object = first_object;
1348 offset = first_offset;
1349
1350 vm_object_lock(object);
1351 } else {
1352 /*
1353 * we're going to use the absent page we just found
1354 * so convert it to a 'busy' page
1355 */
1356 m->vmp_absent = FALSE;
1357 m->vmp_busy = TRUE;
1358 }
1359 if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
1360 m->vmp_absent = TRUE;
1361 clear_absent_on_error = true;
1362 }
1363 /*
1364 * zero-fill the page and put it on
1365 * the correct paging queue
1366 */
1367 my_fault = vm_fault_zero_page(m, no_zero_fill);
1368
1369 break;
1370 } else {
1371 if (must_be_resident) {
1372 vm_object_paging_end(object);
1373 } else if (object != first_object) {
1374 vm_object_paging_end(object);
1375 VM_PAGE_FREE(m);
1376 } else {
1377 first_m = m;
1378 m->vmp_absent = FALSE;
1379 m->vmp_busy = TRUE;
1380
1381 vm_page_lockspin_queues();
1382 vm_page_queues_remove(m, FALSE);
1383 vm_page_unlock_queues();
1384 }
1385
1386 offset += object->vo_shadow_offset;
1387 fault_info->lo_offset += object->vo_shadow_offset;
1388 fault_info->hi_offset += object->vo_shadow_offset;
1389 access_required = VM_PROT_READ;
1390
1391 vm_object_lock(next_object);
1392 vm_object_unlock(object);
1393 object = next_object;
1394 vm_object_paging_begin(object);
1395
1396 /*
1397 * reset to default type of fault
1398 */
1399 my_fault = DBG_CACHE_HIT_FAULT;
1400
1401 continue;
1402 }
1403 }
1404 if ((m->vmp_cleaning)
1405 && ((object != first_object) || (object->vo_copy != VM_OBJECT_NULL))
1406 && (fault_type & VM_PROT_WRITE)) {
1407 /*
1408 * This is a copy-on-write fault that will
1409 * cause us to revoke access to this page, but
1410 * this page is in the process of being cleaned
1411 * in a clustered pageout. We must wait until
1412 * the cleaning operation completes before
1413 * revoking access to the original page,
1414 * otherwise we might attempt to remove a
1415 * wired mapping.
1416 */
1417 #if TRACEFAULTPAGE
1418 dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset); /* (TEST/DEBUG) */
1419 #endif
1420 /*
1421 * take an extra ref so that object won't die
1422 */
1423 vm_object_reference_locked(object);
1424
1425 vm_fault_cleanup(object, first_m);
1426
1427 vm_object_lock(object);
1428 assert(object->ref_count > 0);
1429
1430 m = vm_page_lookup(object, vm_object_trunc_page(offset));
1431
1432 if (m != VM_PAGE_NULL && m->vmp_cleaning) {
1433 PAGE_ASSERT_WAIT(m, interruptible);
1434
1435 vm_object_unlock(object);
1436 wait_result = thread_block(THREAD_CONTINUE_NULL);
1437 vm_object_deallocate(object);
1438
1439 goto backoff;
1440 } else {
1441 vm_object_unlock(object);
1442
1443 vm_object_deallocate(object);
1444 thread_interrupt_level(interruptible_state);
1445
1446 return VM_FAULT_RETRY;
1447 }
1448 }
1449 if (type_of_fault == NULL && (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) &&
1450 !(fault_info != NULL && fault_info->stealth)) {
1451 /*
1452 * If we were passed a non-NULL pointer for
1453 * "type_of_fault", than we came from
1454 * vm_fault... we'll let it deal with
1455 * this condition, since it
1456 * needs to see m->vmp_speculative to correctly
1457 * account the pageins, otherwise...
1458 * take it off the speculative queue, we'll
1459 * let the caller of vm_fault_page deal
1460 * with getting it onto the correct queue
1461 *
1462 * If the caller specified in fault_info that
1463 * it wants a "stealth" fault, we also leave
1464 * the page in the speculative queue.
1465 */
1466 vm_page_lockspin_queues();
1467 if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
1468 vm_page_queues_remove(m, FALSE);
1469 }
1470 vm_page_unlock_queues();
1471 }
1472 assert(object == VM_PAGE_OBJECT(m));
1473
1474 if (object->code_signed) {
1475 /*
1476 * CODE SIGNING:
1477 * We just paged in a page from a signed
1478 * memory object but we don't need to
1479 * validate it now. We'll validate it if
1480 * when it gets mapped into a user address
1481 * space for the first time or when the page
1482 * gets copied to another object as a result
1483 * of a copy-on-write.
1484 */
1485 }
1486
1487 /*
1488 * We mark the page busy and leave it on
1489 * the pageout queues. If the pageout
1490 * deamon comes across it, then it will
1491 * remove the page from the queue, but not the object
1492 */
1493 #if TRACEFAULTPAGE
1494 dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1495 #endif
1496 assert(!m->vmp_busy);
1497 assert(!m->vmp_absent);
1498
1499 m->vmp_busy = TRUE;
1500 break;
1501 }
1502
1503 /*
1504 * we get here when there is no page present in the object at
1505 * the offset we're interested in... we'll allocate a page
1506 * at this point if the pager associated with
1507 * this object can provide the data or we're the top object...
1508 * object is locked; m == NULL
1509 */
1510
1511 if (must_be_resident) {
1512 if (fault_type == VM_PROT_NONE &&
1513 is_kernel_object(object)) {
1514 /*
1515 * We've been called from vm_fault_unwire()
1516 * while removing a map entry that was allocated
1517 * with KMA_KOBJECT and KMA_VAONLY. This page
1518 * is not present and there's nothing more to
1519 * do here (nothing to unwire).
1520 */
1521 vm_fault_cleanup(object, first_m);
1522 thread_interrupt_level(interruptible_state);
1523
1524 return VM_FAULT_MEMORY_ERROR;
1525 }
1526
1527 goto dont_look_for_page;
1528 }
1529
1530 /* Don't expect to fault pages into the kernel object. */
1531 assert(!is_kernel_object(object));
1532
1533 look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE));
1534
1535 #if TRACEFAULTPAGE
1536 dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object); /* (TEST/DEBUG) */
1537 #endif
1538 if (!look_for_page && object == first_object && !object->phys_contiguous) {
1539 /*
1540 * Allocate a new page for this object/offset pair as a placeholder
1541 */
1542 m = vm_page_grab_options(grab_options);
1543 #if TRACEFAULTPAGE
1544 dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1545 #endif
1546 if (m == VM_PAGE_NULL) {
1547 vm_fault_cleanup(object, first_m);
1548 thread_interrupt_level(interruptible_state);
1549
1550 return VM_FAULT_MEMORY_SHORTAGE;
1551 }
1552
1553 if (fault_info && fault_info->batch_pmap_op == TRUE) {
1554 vm_page_insert_internal(m, object,
1555 vm_object_trunc_page(offset),
1556 VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1557 } else {
1558 vm_page_insert(m, object, vm_object_trunc_page(offset));
1559 }
1560 }
1561 if (look_for_page) {
1562 kern_return_t rc;
1563 int my_fault_type;
1564
1565 /*
1566 * If the memory manager is not ready, we
1567 * cannot make requests.
1568 */
1569 if (!object->pager_ready) {
1570 #if TRACEFAULTPAGE
1571 dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1572 #endif
1573 if (m != VM_PAGE_NULL) {
1574 VM_PAGE_FREE(m);
1575 }
1576
1577 /*
1578 * take an extra ref so object won't die
1579 */
1580 vm_object_reference_locked(object);
1581 vm_fault_cleanup(object, first_m);
1582
1583 vm_object_lock(object);
1584 assert(object->ref_count > 0);
1585
1586 if (!object->pager_ready) {
1587 wait_result = vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGER_READY, interruptible);
1588
1589 vm_object_unlock(object);
1590 if (wait_result == THREAD_WAITING) {
1591 wait_result = thread_block(THREAD_CONTINUE_NULL);
1592 }
1593 vm_object_deallocate(object);
1594
1595 goto backoff;
1596 } else {
1597 vm_object_unlock(object);
1598 vm_object_deallocate(object);
1599 thread_interrupt_level(interruptible_state);
1600
1601 return VM_FAULT_RETRY;
1602 }
1603 }
1604 if (!object->internal && !object->phys_contiguous && object->paging_in_progress > vm_object_pagein_throttle) {
1605 /*
1606 * If there are too many outstanding page
1607 * requests pending on this external object, we
1608 * wait for them to be resolved now.
1609 */
1610 #if TRACEFAULTPAGE
1611 dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1612 #endif
1613 if (m != VM_PAGE_NULL) {
1614 VM_PAGE_FREE(m);
1615 }
1616 /*
1617 * take an extra ref so object won't die
1618 */
1619 vm_object_reference_locked(object);
1620
1621 vm_fault_cleanup(object, first_m);
1622
1623 vm_object_lock(object);
1624 assert(object->ref_count > 0);
1625
1626 if (object->paging_in_progress >= vm_object_pagein_throttle) {
1627 vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS, interruptible);
1628
1629 vm_object_unlock(object);
1630 wait_result = thread_block(THREAD_CONTINUE_NULL);
1631 vm_object_deallocate(object);
1632
1633 goto backoff;
1634 } else {
1635 vm_object_unlock(object);
1636 vm_object_deallocate(object);
1637 thread_interrupt_level(interruptible_state);
1638
1639 return VM_FAULT_RETRY;
1640 }
1641 }
1642 if (object->internal) {
1643 int compressed_count_delta;
1644
1645 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
1646
1647 if (m == VM_PAGE_NULL) {
1648 /*
1649 * Allocate a new page for this object/offset pair as a placeholder
1650 */
1651 m = vm_page_grab_options(grab_options);
1652 #if TRACEFAULTPAGE
1653 dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1654 #endif
1655 if (m == VM_PAGE_NULL) {
1656 vm_fault_cleanup(object, first_m);
1657 thread_interrupt_level(interruptible_state);
1658
1659 return VM_FAULT_MEMORY_SHORTAGE;
1660 }
1661
1662 m->vmp_absent = TRUE;
1663 if (fault_info && fault_info->batch_pmap_op == TRUE) {
1664 vm_page_insert_internal(m, object, vm_object_trunc_page(offset), VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1665 } else {
1666 vm_page_insert(m, object, vm_object_trunc_page(offset));
1667 }
1668 }
1669 assert(m->vmp_busy);
1670
1671 m->vmp_absent = TRUE;
1672 pager = object->pager;
1673
1674 assert(object->paging_in_progress > 0);
1675 vm_object_unlock(object);
1676
1677 rc = vm_compressor_pager_get(
1678 pager,
1679 offset + object->paging_offset,
1680 VM_PAGE_GET_PHYS_PAGE(m),
1681 &my_fault_type,
1682 0,
1683 &compressed_count_delta);
1684
1685 if (type_of_fault == NULL) {
1686 int throttle_delay;
1687
1688 /*
1689 * we weren't called from vm_fault, so we
1690 * need to apply page creation throttling
1691 * do it before we re-acquire any locks
1692 */
1693 if (my_fault_type == DBG_COMPRESSOR_FAULT) {
1694 if ((throttle_delay = vm_page_throttled(TRUE))) {
1695 VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 1, 0);
1696 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
1697 }
1698 }
1699 }
1700 vm_object_lock(object);
1701 assert(object->paging_in_progress > 0);
1702
1703 vm_compressor_pager_count(
1704 pager,
1705 compressed_count_delta,
1706 FALSE, /* shared_lock */
1707 object);
1708
1709 switch (rc) {
1710 case KERN_SUCCESS:
1711 m->vmp_absent = FALSE;
1712 m->vmp_dirty = TRUE;
1713 if ((object->wimg_bits &
1714 VM_WIMG_MASK) !=
1715 VM_WIMG_USE_DEFAULT) {
1716 /*
1717 * If the page is not cacheable,
1718 * we can't let its contents
1719 * linger in the data cache
1720 * after the decompression.
1721 */
1722 pmap_sync_page_attributes_phys(
1723 VM_PAGE_GET_PHYS_PAGE(m));
1724 } else {
1725 m->vmp_written_by_kernel = TRUE;
1726 }
1727 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
1728 if ((fault_type & VM_PROT_WRITE) == 0) {
1729 vm_object_lock_assert_exclusive(object);
1730 vm_page_lockspin_queues();
1731 m->vmp_unmodified_ro = true;
1732 vm_page_unlock_queues();
1733 os_atomic_inc(&compressor_ro_uncompressed, relaxed);
1734 *protection &= ~VM_PROT_WRITE;
1735 }
1736 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
1737
1738 /*
1739 * If the object is purgeable, its
1740 * owner's purgeable ledgers have been
1741 * updated in vm_page_insert() but the
1742 * page was also accounted for in a
1743 * "compressed purgeable" ledger, so
1744 * update that now.
1745 */
1746 if (((object->purgable !=
1747 VM_PURGABLE_DENY) ||
1748 object->vo_ledger_tag) &&
1749 (object->vo_owner !=
1750 NULL)) {
1751 /*
1752 * One less compressed
1753 * purgeable/tagged page.
1754 */
1755 if (compressed_count_delta) {
1756 vm_object_owner_compressed_update(
1757 object,
1758 -1);
1759 }
1760 }
1761
1762 break;
1763 case KERN_MEMORY_FAILURE:
1764 m->vmp_unusual = TRUE;
1765 m->vmp_error = TRUE;
1766 m->vmp_absent = FALSE;
1767 break;
1768 case KERN_MEMORY_ERROR:
1769 assert(m->vmp_absent);
1770 break;
1771 default:
1772 panic("vm_fault_page(): unexpected "
1773 "error %d from "
1774 "vm_compressor_pager_get()\n",
1775 rc);
1776 }
1777 PAGE_WAKEUP_DONE(m);
1778
1779 rc = KERN_SUCCESS;
1780 goto data_requested;
1781 }
1782 my_fault_type = DBG_PAGEIN_FAULT;
1783
1784 if (m != VM_PAGE_NULL) {
1785 VM_PAGE_FREE(m);
1786 m = VM_PAGE_NULL;
1787 }
1788
1789 #if TRACEFAULTPAGE
1790 dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0); /* (TEST/DEBUG) */
1791 #endif
1792
1793 /*
1794 * It's possible someone called vm_object_destroy while we weren't
1795 * holding the object lock. If that has happened, then bail out
1796 * here.
1797 */
1798
1799 pager = object->pager;
1800
1801 if (pager == MEMORY_OBJECT_NULL) {
1802 vm_fault_cleanup(object, first_m);
1803 thread_interrupt_level(interruptible_state);
1804
1805 static const enum vm_subsys_error_codes object_destroy_errors[VM_OBJECT_DESTROY_MAX + 1] = {
1806 [VM_OBJECT_DESTROY_UNKNOWN_REASON] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER,
1807 [VM_OBJECT_DESTROY_FORCED_UNMOUNT] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_FORCED_UNMOUNT,
1808 [VM_OBJECT_DESTROY_UNGRAFT] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_UNGRAFT,
1809 };
1810 enum vm_subsys_error_codes kdbg_code = object_destroy_errors[(vm_object_destroy_reason_t)object->no_pager_reason];
1811 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, kdbg_code), 0 /* arg */);
1812 return VM_FAULT_MEMORY_ERROR;
1813 }
1814
1815 /*
1816 * We have an absent page in place for the faulting offset,
1817 * so we can release the object lock.
1818 */
1819
1820 if (object->object_is_shared_cache) {
1821 token = thread_priority_floor_start();
1822 /*
1823 * A non-native shared cache object might
1824 * be getting set up in parallel with this
1825 * fault and so we can't assume that this
1826 * check will be valid after we drop the
1827 * object lock below.
1828 */
1829 drop_floor = true;
1830 }
1831
1832 vm_object_unlock(object);
1833
1834 /*
1835 * If this object uses a copy_call strategy,
1836 * and we are interested in a copy of this object
1837 * (having gotten here only by following a
1838 * shadow chain), then tell the memory manager
1839 * via a flag added to the desired_access
1840 * parameter, so that it can detect a race
1841 * between our walking down the shadow chain
1842 * and its pushing pages up into a copy of
1843 * the object that it manages.
1844 */
1845 if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object) {
1846 wants_copy_flag = VM_PROT_WANTS_COPY;
1847 } else {
1848 wants_copy_flag = VM_PROT_NONE;
1849 }
1850
1851 if (object->vo_copy == first_object) {
1852 /*
1853 * if we issue the memory_object_data_request in
1854 * this state, we are subject to a deadlock with
1855 * the underlying filesystem if it is trying to
1856 * shrink the file resulting in a push of pages
1857 * into the copy object... that push will stall
1858 * on the placeholder page, and if the pushing thread
1859 * is holding a lock that is required on the pagein
1860 * path (such as a truncate lock), we'll deadlock...
1861 * to avoid this potential deadlock, we throw away
1862 * our placeholder page before calling memory_object_data_request
1863 * and force this thread to retry the vm_fault_page after
1864 * we have issued the I/O. the second time through this path
1865 * we will find the page already in the cache (presumably still
1866 * busy waiting for the I/O to complete) and then complete
1867 * the fault w/o having to go through memory_object_data_request again
1868 */
1869 assert(first_m != VM_PAGE_NULL);
1870 assert(VM_PAGE_OBJECT(first_m) == first_object);
1871
1872 vm_object_lock(first_object);
1873 VM_PAGE_FREE(first_m);
1874 vm_object_paging_end(first_object);
1875 vm_object_unlock(first_object);
1876
1877 first_m = VM_PAGE_NULL;
1878 force_fault_retry = TRUE;
1879
1880 vm_fault_page_forced_retry++;
1881 }
1882
1883 if (data_already_requested == TRUE) {
1884 orig_behavior = fault_info->behavior;
1885 orig_cluster_size = fault_info->cluster_size;
1886
1887 fault_info->behavior = VM_BEHAVIOR_RANDOM;
1888 fault_info->cluster_size = PAGE_SIZE;
1889 }
1890 /*
1891 * Call the memory manager to retrieve the data.
1892 */
1893 rc = memory_object_data_request(
1894 pager,
1895 vm_object_trunc_page(offset) + object->paging_offset,
1896 PAGE_SIZE,
1897 access_required | wants_copy_flag,
1898 (memory_object_fault_info_t)fault_info);
1899
1900 if (data_already_requested == TRUE) {
1901 fault_info->behavior = orig_behavior;
1902 fault_info->cluster_size = orig_cluster_size;
1903 } else {
1904 data_already_requested = TRUE;
1905 }
1906
1907 DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL);
1908 #if TRACEFAULTPAGE
1909 dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */
1910 #endif
1911 vm_object_lock(object);
1912
1913 if (drop_floor && object->object_is_shared_cache) {
1914 thread_priority_floor_end(&token);
1915 drop_floor = false;
1916 }
1917
1918 data_requested:
1919 if (rc != KERN_SUCCESS) {
1920 vm_fault_cleanup(object, first_m);
1921 thread_interrupt_level(interruptible_state);
1922
1923 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NO_DATA), 0 /* arg */);
1924
1925 return (rc == MACH_SEND_INTERRUPTED) ?
1926 VM_FAULT_INTERRUPTED :
1927 VM_FAULT_MEMORY_ERROR;
1928 } else {
1929 clock_sec_t tv_sec;
1930 clock_usec_t tv_usec;
1931
1932 if (my_fault_type == DBG_PAGEIN_FAULT) {
1933 clock_get_system_microtime(&tv_sec, &tv_usec);
1934 current_thread()->t_page_creation_time = tv_sec;
1935 current_thread()->t_page_creation_count = 0;
1936 }
1937 }
1938 if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) {
1939 vm_fault_cleanup(object, first_m);
1940 thread_interrupt_level(interruptible_state);
1941
1942 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
1943 return VM_FAULT_INTERRUPTED;
1944 }
1945 if (force_fault_retry == TRUE) {
1946 vm_fault_cleanup(object, first_m);
1947 thread_interrupt_level(interruptible_state);
1948
1949 return VM_FAULT_RETRY;
1950 }
1951 if (m == VM_PAGE_NULL && object->phys_contiguous) {
1952 /*
1953 * No page here means that the object we
1954 * initially looked up was "physically
1955 * contiguous" (i.e. device memory). However,
1956 * with Virtual VRAM, the object might not
1957 * be backed by that device memory anymore,
1958 * so we're done here only if the object is
1959 * still "phys_contiguous".
1960 * Otherwise, if the object is no longer
1961 * "phys_contiguous", we need to retry the
1962 * page fault against the object's new backing
1963 * store (different memory object).
1964 */
1965 phys_contig_object:
1966 goto done;
1967 }
1968 /*
1969 * potentially a pagein fault
1970 * if we make it through the state checks
1971 * above, than we'll count it as such
1972 */
1973 my_fault = my_fault_type;
1974
1975 /*
1976 * Retry with same object/offset, since new data may
1977 * be in a different page (i.e., m is meaningless at
1978 * this point).
1979 */
1980 continue;
1981 }
1982 dont_look_for_page:
1983 /*
1984 * We get here if the object has no pager, or an existence map
1985 * exists and indicates the page isn't present on the pager
1986 * or we're unwiring a page. If a pager exists, but there
1987 * is no existence map, then the m->vmp_absent case above handles
1988 * the ZF case when the pager can't provide the page
1989 */
1990 #if TRACEFAULTPAGE
1991 dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
1992 #endif
1993 if (object == first_object) {
1994 first_m = m;
1995 } else {
1996 assert(m == VM_PAGE_NULL);
1997 }
1998
1999 next_object = object->shadow;
2000
2001 if (next_object == VM_OBJECT_NULL) {
2002 /*
2003 * we've hit the bottom of the shadown chain,
2004 * fill the page in the top object with zeros.
2005 */
2006 assert(!must_be_resident);
2007
2008 if (object != first_object) {
2009 vm_object_paging_end(object);
2010 vm_object_unlock(object);
2011
2012 object = first_object;
2013 offset = first_offset;
2014 vm_object_lock(object);
2015 }
2016 m = first_m;
2017 assert(VM_PAGE_OBJECT(m) == object);
2018 first_m = VM_PAGE_NULL;
2019
2020 /*
2021 * check for any conditions that prevent
2022 * us from creating a new zero-fill page
2023 * vm_fault_check will do all of the
2024 * fault cleanup in the case of an error condition
2025 * including resetting the thread_interrupt_level
2026 */
2027 error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
2028
2029 if (error != VM_FAULT_SUCCESS) {
2030 return error;
2031 }
2032
2033 if (m == VM_PAGE_NULL) {
2034 m = vm_page_grab_options(grab_options);
2035
2036 if (m == VM_PAGE_NULL) {
2037 vm_fault_cleanup(object, VM_PAGE_NULL);
2038 thread_interrupt_level(interruptible_state);
2039
2040 return VM_FAULT_MEMORY_SHORTAGE;
2041 }
2042 vm_page_insert(m, object, vm_object_trunc_page(offset));
2043 }
2044 if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
2045 m->vmp_absent = TRUE;
2046 clear_absent_on_error = true;
2047 }
2048
2049 my_fault = vm_fault_zero_page(m, no_zero_fill);
2050
2051 break;
2052 } else {
2053 /*
2054 * Move on to the next object. Lock the next
2055 * object before unlocking the current one.
2056 */
2057 if ((object != first_object) || must_be_resident) {
2058 vm_object_paging_end(object);
2059 }
2060
2061 offset += object->vo_shadow_offset;
2062 fault_info->lo_offset += object->vo_shadow_offset;
2063 fault_info->hi_offset += object->vo_shadow_offset;
2064 access_required = VM_PROT_READ;
2065
2066 vm_object_lock(next_object);
2067 vm_object_unlock(object);
2068
2069 object = next_object;
2070 vm_object_paging_begin(object);
2071 }
2072 }
2073
2074 /*
2075 * PAGE HAS BEEN FOUND.
2076 *
2077 * This page (m) is:
2078 * busy, so that we can play with it;
2079 * not absent, so that nobody else will fill it;
2080 * possibly eligible for pageout;
2081 *
2082 * The top-level page (first_m) is:
2083 * VM_PAGE_NULL if the page was found in the
2084 * top-level object;
2085 * busy, not absent, and ineligible for pageout.
2086 *
2087 * The current object (object) is locked. A paging
2088 * reference is held for the current and top-level
2089 * objects.
2090 */
2091
2092 #if TRACEFAULTPAGE
2093 dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
2094 #endif
2095 #if EXTRA_ASSERTIONS
2096 assert(m->vmp_busy && !m->vmp_absent);
2097 assert((first_m == VM_PAGE_NULL) ||
2098 (first_m->vmp_busy && !first_m->vmp_absent &&
2099 !first_m->vmp_active && !first_m->vmp_inactive && !first_m->vmp_secluded));
2100 #endif /* EXTRA_ASSERTIONS */
2101
2102 /*
2103 * If the page is being written, but isn't
2104 * already owned by the top-level object,
2105 * we have to copy it into a new page owned
2106 * by the top-level object.
2107 */
2108 if (object != first_object) {
2109 #if TRACEFAULTPAGE
2110 dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */
2111 #endif
2112 if (fault_type & VM_PROT_WRITE) {
2113 vm_page_t copy_m;
2114
2115 /*
2116 * We only really need to copy if we
2117 * want to write it.
2118 */
2119 assert(!must_be_resident);
2120
2121 /*
2122 * If we try to collapse first_object at this
2123 * point, we may deadlock when we try to get
2124 * the lock on an intermediate object (since we
2125 * have the bottom object locked). We can't
2126 * unlock the bottom object, because the page
2127 * we found may move (by collapse) if we do.
2128 *
2129 * Instead, we first copy the page. Then, when
2130 * we have no more use for the bottom object,
2131 * we unlock it and try to collapse.
2132 *
2133 * Note that we copy the page even if we didn't
2134 * need to... that's the breaks.
2135 */
2136
2137 /*
2138 * Allocate a page for the copy
2139 */
2140 copy_m = vm_page_grab_options(grab_options);
2141
2142 if (copy_m == VM_PAGE_NULL) {
2143 RELEASE_PAGE(m);
2144
2145 vm_fault_cleanup(object, first_m);
2146 thread_interrupt_level(interruptible_state);
2147
2148 return VM_FAULT_MEMORY_SHORTAGE;
2149 }
2150
2151 vm_page_copy(m, copy_m);
2152
2153 /*
2154 * If another map is truly sharing this
2155 * page with us, we have to flush all
2156 * uses of the original page, since we
2157 * can't distinguish those which want the
2158 * original from those which need the
2159 * new copy.
2160 *
2161 * XXXO If we know that only one map has
2162 * access to this page, then we could
2163 * avoid the pmap_disconnect() call.
2164 */
2165 if (m->vmp_pmapped) {
2166 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2167 }
2168
2169 if (m->vmp_clustered) {
2170 VM_PAGE_COUNT_AS_PAGEIN(m);
2171 VM_PAGE_CONSUME_CLUSTERED(m);
2172 }
2173 assert(!m->vmp_cleaning);
2174
2175 /*
2176 * We no longer need the old page or object.
2177 */
2178 RELEASE_PAGE(m);
2179
2180 /*
2181 * This check helps with marking the object as having a sequential pattern
2182 * Normally we'll miss doing this below because this fault is about COW to
2183 * the first_object i.e. bring page in from disk, push to object above but
2184 * don't update the file object's sequential pattern.
2185 */
2186 if (object->internal == FALSE) {
2187 vm_fault_is_sequential(object, offset, fault_info->behavior);
2188 }
2189
2190 vm_object_paging_end(object);
2191 vm_object_unlock(object);
2192
2193 my_fault = DBG_COW_FAULT;
2194 counter_inc(&vm_statistics_cow_faults);
2195 DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
2196 counter_inc(¤t_task()->cow_faults);
2197
2198 object = first_object;
2199 offset = first_offset;
2200
2201 vm_object_lock(object);
2202 /*
2203 * get rid of the place holder
2204 * page that we soldered in earlier
2205 */
2206 VM_PAGE_FREE(first_m);
2207 first_m = VM_PAGE_NULL;
2208
2209 /*
2210 * and replace it with the
2211 * page we just copied into
2212 */
2213 assert(copy_m->vmp_busy);
2214 vm_page_insert(copy_m, object, vm_object_trunc_page(offset));
2215 SET_PAGE_DIRTY(copy_m, TRUE);
2216
2217 m = copy_m;
2218 /*
2219 * Now that we've gotten the copy out of the
2220 * way, let's try to collapse the top object.
2221 * But we have to play ugly games with
2222 * paging_in_progress to do that...
2223 */
2224 vm_object_paging_end(object);
2225 vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
2226 vm_object_paging_begin(object);
2227 } else {
2228 *protection &= (~VM_PROT_WRITE);
2229 }
2230 }
2231 /*
2232 * Now check whether the page needs to be pushed into the
2233 * copy object. The use of asymmetric copy on write for
2234 * shared temporary objects means that we may do two copies to
2235 * satisfy the fault; one above to get the page from a
2236 * shadowed object, and one here to push it into the copy.
2237 */
2238 try_failed_count = 0;
2239
2240 while ((copy_object = first_object->vo_copy) != VM_OBJECT_NULL) {
2241 vm_object_offset_t copy_offset;
2242 vm_page_t copy_m;
2243
2244 #if TRACEFAULTPAGE
2245 dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type); /* (TEST/DEBUG) */
2246 #endif
2247 /*
2248 * If the page is being written, but hasn't been
2249 * copied to the copy-object, we have to copy it there.
2250 */
2251 if ((fault_type & VM_PROT_WRITE) == 0) {
2252 *protection &= ~VM_PROT_WRITE;
2253 break;
2254 }
2255
2256 /*
2257 * If the page was guaranteed to be resident,
2258 * we must have already performed the copy.
2259 */
2260 if (must_be_resident) {
2261 break;
2262 }
2263
2264 /*
2265 * Try to get the lock on the copy_object.
2266 */
2267 if (!vm_object_lock_try(copy_object)) {
2268 vm_object_unlock(object);
2269 try_failed_count++;
2270
2271 mutex_pause(try_failed_count); /* wait a bit */
2272 vm_object_lock(object);
2273
2274 continue;
2275 }
2276 try_failed_count = 0;
2277
2278 /*
2279 * Make another reference to the copy-object,
2280 * to keep it from disappearing during the
2281 * copy.
2282 */
2283 vm_object_reference_locked(copy_object);
2284
2285 /*
2286 * Does the page exist in the copy?
2287 */
2288 copy_offset = first_offset - copy_object->vo_shadow_offset;
2289 copy_offset = vm_object_trunc_page(copy_offset);
2290
2291 if (copy_object->vo_size <= copy_offset) {
2292 /*
2293 * Copy object doesn't cover this page -- do nothing.
2294 */
2295 ;
2296 } else if ((copy_m = vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) {
2297 /*
2298 * Page currently exists in the copy object
2299 */
2300 if (copy_m->vmp_busy) {
2301 /*
2302 * If the page is being brought
2303 * in, wait for it and then retry.
2304 */
2305 RELEASE_PAGE(m);
2306
2307 /*
2308 * take an extra ref so object won't die
2309 */
2310 vm_object_reference_locked(copy_object);
2311 vm_object_unlock(copy_object);
2312 vm_fault_cleanup(object, first_m);
2313
2314 vm_object_lock(copy_object);
2315 assert(copy_object->ref_count > 0);
2316 vm_object_lock_assert_exclusive(copy_object);
2317 copy_object->ref_count--;
2318 assert(copy_object->ref_count > 0);
2319 copy_m = vm_page_lookup(copy_object, copy_offset);
2320
2321 if (copy_m != VM_PAGE_NULL && copy_m->vmp_busy) {
2322 PAGE_ASSERT_WAIT(copy_m, interruptible);
2323
2324 vm_object_unlock(copy_object);
2325 wait_result = thread_block(THREAD_CONTINUE_NULL);
2326 vm_object_deallocate(copy_object);
2327
2328 goto backoff;
2329 } else {
2330 vm_object_unlock(copy_object);
2331 vm_object_deallocate(copy_object);
2332 thread_interrupt_level(interruptible_state);
2333
2334 return VM_FAULT_RETRY;
2335 }
2336 }
2337 } else if (!PAGED_OUT(copy_object, copy_offset)) {
2338 /*
2339 * If PAGED_OUT is TRUE, then the page used to exist
2340 * in the copy-object, and has already been paged out.
2341 * We don't need to repeat this. If PAGED_OUT is
2342 * FALSE, then either we don't know (!pager_created,
2343 * for example) or it hasn't been paged out.
2344 * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT)
2345 * We must copy the page to the copy object.
2346 *
2347 * Allocate a page for the copy
2348 */
2349 copy_m = vm_page_alloc(copy_object, copy_offset);
2350
2351 if (copy_m == VM_PAGE_NULL) {
2352 RELEASE_PAGE(m);
2353
2354 vm_object_lock_assert_exclusive(copy_object);
2355 copy_object->ref_count--;
2356 assert(copy_object->ref_count > 0);
2357
2358 vm_object_unlock(copy_object);
2359 vm_fault_cleanup(object, first_m);
2360 thread_interrupt_level(interruptible_state);
2361
2362 return VM_FAULT_MEMORY_SHORTAGE;
2363 }
2364 /*
2365 * Must copy page into copy-object.
2366 */
2367 vm_page_copy(m, copy_m);
2368
2369 /*
2370 * If the old page was in use by any users
2371 * of the copy-object, it must be removed
2372 * from all pmaps. (We can't know which
2373 * pmaps use it.)
2374 */
2375 if (m->vmp_pmapped) {
2376 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2377 }
2378
2379 if (m->vmp_clustered) {
2380 VM_PAGE_COUNT_AS_PAGEIN(m);
2381 VM_PAGE_CONSUME_CLUSTERED(m);
2382 }
2383 /*
2384 * If there's a pager, then immediately
2385 * page out this page, using the "initialize"
2386 * option. Else, we use the copy.
2387 */
2388 if ((!copy_object->pager_ready)
2389 || VM_COMPRESSOR_PAGER_STATE_GET(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT
2390 ) {
2391 vm_page_lockspin_queues();
2392 assert(!m->vmp_cleaning);
2393 vm_page_activate(copy_m);
2394 vm_page_unlock_queues();
2395
2396 SET_PAGE_DIRTY(copy_m, TRUE);
2397 PAGE_WAKEUP_DONE(copy_m);
2398 } else {
2399 assert(copy_m->vmp_busy == TRUE);
2400 assert(!m->vmp_cleaning);
2401
2402 /*
2403 * dirty is protected by the object lock
2404 */
2405 SET_PAGE_DIRTY(copy_m, TRUE);
2406
2407 /*
2408 * The page is already ready for pageout:
2409 * not on pageout queues and busy.
2410 * Unlock everything except the
2411 * copy_object itself.
2412 */
2413 vm_object_unlock(object);
2414
2415 /*
2416 * Write the page to the copy-object,
2417 * flushing it from the kernel.
2418 */
2419 vm_pageout_initialize_page(copy_m);
2420
2421 /*
2422 * Since the pageout may have
2423 * temporarily dropped the
2424 * copy_object's lock, we
2425 * check whether we'll have
2426 * to deallocate the hard way.
2427 */
2428 if ((copy_object->shadow != object) || (copy_object->ref_count == 1)) {
2429 vm_object_unlock(copy_object);
2430 vm_object_deallocate(copy_object);
2431 vm_object_lock(object);
2432
2433 continue;
2434 }
2435 /*
2436 * Pick back up the old object's
2437 * lock. [It is safe to do so,
2438 * since it must be deeper in the
2439 * object tree.]
2440 */
2441 vm_object_lock(object);
2442 }
2443
2444 /*
2445 * Because we're pushing a page upward
2446 * in the object tree, we must restart
2447 * any faults that are waiting here.
2448 * [Note that this is an expansion of
2449 * PAGE_WAKEUP that uses the THREAD_RESTART
2450 * wait result]. Can't turn off the page's
2451 * busy bit because we're not done with it.
2452 */
2453 if (m->vmp_wanted) {
2454 m->vmp_wanted = FALSE;
2455 thread_wakeup_with_result((event_t) m, THREAD_RESTART);
2456 }
2457 }
2458 /*
2459 * The reference count on copy_object must be
2460 * at least 2: one for our extra reference,
2461 * and at least one from the outside world
2462 * (we checked that when we last locked
2463 * copy_object).
2464 */
2465 vm_object_lock_assert_exclusive(copy_object);
2466 copy_object->ref_count--;
2467 assert(copy_object->ref_count > 0);
2468
2469 vm_object_unlock(copy_object);
2470
2471 break;
2472 }
2473
2474 done:
2475 *result_page = m;
2476 *top_page = first_m;
2477
2478 if (m != VM_PAGE_NULL) {
2479 assert(VM_PAGE_OBJECT(m) == object);
2480
2481 retval = VM_FAULT_SUCCESS;
2482
2483 if (my_fault == DBG_PAGEIN_FAULT) {
2484 VM_PAGE_COUNT_AS_PAGEIN(m);
2485
2486 if (object->internal) {
2487 my_fault = DBG_PAGEIND_FAULT;
2488 } else {
2489 my_fault = DBG_PAGEINV_FAULT;
2490 }
2491
2492 /*
2493 * evaluate access pattern and update state
2494 * vm_fault_deactivate_behind depends on the
2495 * state being up to date
2496 */
2497 vm_fault_is_sequential(object, offset, fault_info->behavior);
2498 vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2499 } else if (type_of_fault == NULL && my_fault == DBG_CACHE_HIT_FAULT) {
2500 /*
2501 * we weren't called from vm_fault, so handle the
2502 * accounting here for hits in the cache
2503 */
2504 if (m->vmp_clustered) {
2505 VM_PAGE_COUNT_AS_PAGEIN(m);
2506 VM_PAGE_CONSUME_CLUSTERED(m);
2507 }
2508 vm_fault_is_sequential(object, offset, fault_info->behavior);
2509 vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2510 } else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) {
2511 VM_STAT_DECOMPRESSIONS();
2512 }
2513 if (type_of_fault) {
2514 *type_of_fault = my_fault;
2515 }
2516 } else {
2517 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_SUCCESS_NO_PAGE), 0 /* arg */);
2518 retval = VM_FAULT_SUCCESS_NO_VM_PAGE;
2519 assert(first_m == VM_PAGE_NULL);
2520 assert(object == first_object);
2521 }
2522
2523 thread_interrupt_level(interruptible_state);
2524
2525 #if TRACEFAULTPAGE
2526 dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */
2527 #endif
2528 return retval;
2529
2530 backoff:
2531 thread_interrupt_level(interruptible_state);
2532
2533 if (wait_result == THREAD_INTERRUPTED) {
2534 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
2535 return VM_FAULT_INTERRUPTED;
2536 }
2537 return VM_FAULT_RETRY;
2538
2539 #undef RELEASE_PAGE
2540 }
2541
2542 #if MACH_ASSERT && (XNU_PLATFORM_WatchOS || __x86_64__)
2543 #define PANIC_ON_CS_KILLED_DEFAULT true
2544 #else
2545 #define PANIC_ON_CS_KILLED_DEFAULT false
2546 #endif
2547 static TUNABLE(bool, panic_on_cs_killed, "panic_on_cs_killed",
2548 PANIC_ON_CS_KILLED_DEFAULT);
2549
2550 extern int proc_selfpid(void);
2551 extern char *proc_name_address(struct proc *p);
2552 extern char *proc_best_name(struct proc *);
2553 unsigned long cs_enter_tainted_rejected = 0;
2554 unsigned long cs_enter_tainted_accepted = 0;
2555
2556 /*
2557 * CODE SIGNING:
2558 * When soft faulting a page, we have to validate the page if:
2559 * 1. the page is being mapped in user space
2560 * 2. the page hasn't already been found to be "tainted"
2561 * 3. the page belongs to a code-signed object
2562 * 4. the page has not been validated yet or has been mapped for write.
2563 */
2564 static bool
vm_fault_cs_need_validation(pmap_t pmap,vm_page_t page,vm_object_t page_obj,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2565 vm_fault_cs_need_validation(
2566 pmap_t pmap,
2567 vm_page_t page,
2568 vm_object_t page_obj,
2569 vm_map_size_t fault_page_size,
2570 vm_map_offset_t fault_phys_offset)
2571 {
2572 if (pmap == kernel_pmap) {
2573 /* 1 - not user space */
2574 return false;
2575 }
2576 if (!page_obj->code_signed) {
2577 /* 3 - page does not belong to a code-signed object */
2578 return false;
2579 }
2580 if (fault_page_size == PAGE_SIZE) {
2581 /* looking at the whole page */
2582 assertf(fault_phys_offset == 0,
2583 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
2584 (uint64_t)fault_page_size,
2585 (uint64_t)fault_phys_offset);
2586 if (page->vmp_cs_tainted == VMP_CS_ALL_TRUE) {
2587 /* 2 - page is all tainted */
2588 return false;
2589 }
2590 if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
2591 !page->vmp_wpmapped) {
2592 /* 4 - already fully validated and never mapped writable */
2593 return false;
2594 }
2595 } else {
2596 /* looking at a specific sub-page */
2597 if (VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
2598 /* 2 - sub-page was already marked as tainted */
2599 return false;
2600 }
2601 if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) &&
2602 !page->vmp_wpmapped) {
2603 /* 4 - already validated and never mapped writable */
2604 return false;
2605 }
2606 }
2607 /* page needs to be validated */
2608 return true;
2609 }
2610
2611
2612 static bool
vm_fault_cs_page_immutable(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot __unused)2613 vm_fault_cs_page_immutable(
2614 vm_page_t m,
2615 vm_map_size_t fault_page_size,
2616 vm_map_offset_t fault_phys_offset,
2617 vm_prot_t prot __unused)
2618 {
2619 if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)
2620 /*&& ((prot) & VM_PROT_EXECUTE)*/) {
2621 return true;
2622 }
2623 return false;
2624 }
2625
2626 static bool
vm_fault_cs_page_nx(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2627 vm_fault_cs_page_nx(
2628 vm_page_t m,
2629 vm_map_size_t fault_page_size,
2630 vm_map_offset_t fault_phys_offset)
2631 {
2632 return VMP_CS_NX(m, fault_page_size, fault_phys_offset);
2633 }
2634
2635 /*
2636 * Check if the page being entered into the pmap violates code signing.
2637 */
2638 static kern_return_t
vm_fault_cs_check_violation(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool map_is_switched,bool map_is_switch_protected,bool * cs_violation)2639 vm_fault_cs_check_violation(
2640 bool cs_bypass,
2641 vm_object_t object,
2642 vm_page_t m,
2643 pmap_t pmap,
2644 vm_prot_t prot,
2645 vm_prot_t caller_prot,
2646 vm_map_size_t fault_page_size,
2647 vm_map_offset_t fault_phys_offset,
2648 vm_object_fault_info_t fault_info,
2649 bool map_is_switched,
2650 bool map_is_switch_protected,
2651 bool *cs_violation)
2652 {
2653 #if !CODE_SIGNING_MONITOR
2654 #pragma unused(caller_prot)
2655 #pragma unused(fault_info)
2656 #endif /* !CODE_SIGNING_MONITOR */
2657
2658 int cs_enforcement_enabled;
2659 if (!cs_bypass &&
2660 vm_fault_cs_need_validation(pmap, m, object,
2661 fault_page_size, fault_phys_offset)) {
2662 vm_object_lock_assert_exclusive(object);
2663
2664 if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)) {
2665 vm_cs_revalidates++;
2666 }
2667
2668 /* VM map is locked, so 1 ref will remain on VM object -
2669 * so no harm if vm_page_validate_cs drops the object lock */
2670
2671 #if CODE_SIGNING_MONITOR
2672 if (fault_info->csm_associated &&
2673 csm_enabled() &&
2674 !VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
2675 !VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) &&
2676 !VMP_CS_NX(m, fault_page_size, fault_phys_offset) &&
2677 (prot & VM_PROT_EXECUTE) &&
2678 (caller_prot & VM_PROT_EXECUTE)) {
2679 /*
2680 * When we have a code signing monitor, the monitor will evaluate the code signature
2681 * for any executable page mapping. No need for the VM to also validate the page.
2682 * In the code signing monitor we trust :)
2683 */
2684 vm_cs_defer_to_csm++;
2685 } else {
2686 vm_cs_defer_to_csm_not++;
2687 vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
2688 }
2689 #else /* CODE_SIGNING_MONITOR */
2690 vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
2691 #endif /* CODE_SIGNING_MONITOR */
2692 }
2693
2694 /* If the map is switched, and is switch-protected, we must protect
2695 * some pages from being write-faulted: immutable pages because by
2696 * definition they may not be written, and executable pages because that
2697 * would provide a way to inject unsigned code.
2698 * If the page is immutable, we can simply return. However, we can't
2699 * immediately determine whether a page is executable anywhere. But,
2700 * we can disconnect it everywhere and remove the executable protection
2701 * from the current map. We do that below right before we do the
2702 * PMAP_ENTER.
2703 */
2704 if (pmap == kernel_pmap) {
2705 /* kernel fault: cs_enforcement does not apply */
2706 cs_enforcement_enabled = 0;
2707 } else {
2708 cs_enforcement_enabled = pmap_get_vm_map_cs_enforced(pmap);
2709 }
2710
2711 if (cs_enforcement_enabled && map_is_switched &&
2712 map_is_switch_protected &&
2713 vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2714 (prot & VM_PROT_WRITE)) {
2715 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_IMMUTABLE_PAGE_WRITE), 0 /* arg */);
2716 return KERN_CODESIGN_ERROR;
2717 }
2718
2719 if (cs_enforcement_enabled &&
2720 vm_fault_cs_page_nx(m, fault_page_size, fault_phys_offset) &&
2721 (prot & VM_PROT_EXECUTE)) {
2722 if (cs_debug) {
2723 printf("page marked to be NX, not letting it be mapped EXEC\n");
2724 }
2725 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_NX_PAGE_EXEC_MAPPING), 0 /* arg */);
2726 return KERN_CODESIGN_ERROR;
2727 }
2728
2729 /* A page could be tainted, or pose a risk of being tainted later.
2730 * Check whether the receiving process wants it, and make it feel
2731 * the consequences (that hapens in cs_invalid_page()).
2732 * For CS Enforcement, two other conditions will
2733 * cause that page to be tainted as well:
2734 * - pmapping an unsigned page executable - this means unsigned code;
2735 * - writeable mapping of a validated page - the content of that page
2736 * can be changed without the kernel noticing, therefore unsigned
2737 * code can be created
2738 */
2739 if (cs_bypass) {
2740 /* code-signing is bypassed */
2741 *cs_violation = FALSE;
2742 } else if (VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
2743 /* tainted page */
2744 *cs_violation = TRUE;
2745 } else if (!cs_enforcement_enabled) {
2746 /* no further code-signing enforcement */
2747 *cs_violation = FALSE;
2748 } else if (vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2749 ((prot & VM_PROT_WRITE) ||
2750 m->vmp_wpmapped)) {
2751 /*
2752 * The page should be immutable, but is in danger of being
2753 * modified.
2754 * This is the case where we want policy from the code
2755 * directory - is the page immutable or not? For now we have
2756 * to assume that code pages will be immutable, data pages not.
2757 * We'll assume a page is a code page if it has a code directory
2758 * and we fault for execution.
2759 * That is good enough since if we faulted the code page for
2760 * writing in another map before, it is wpmapped; if we fault
2761 * it for writing in this map later it will also be faulted for
2762 * executing at the same time; and if we fault for writing in
2763 * another map later, we will disconnect it from this pmap so
2764 * we'll notice the change.
2765 */
2766 *cs_violation = TRUE;
2767 } else if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
2768 (prot & VM_PROT_EXECUTE)
2769 #if CODE_SIGNING_MONITOR
2770 /*
2771 * Executable pages will be validated by the code signing monitor. If the
2772 * code signing monitor is turned off, then this is a code-signing violation.
2773 */
2774 && !csm_enabled()
2775 #endif /* CODE_SIGNING_MONITOR */
2776 ) {
2777 *cs_violation = TRUE;
2778 } else {
2779 *cs_violation = FALSE;
2780 }
2781 return KERN_SUCCESS;
2782 }
2783
2784 /*
2785 * Handles a code signing violation by either rejecting the page or forcing a disconnect.
2786 * @param must_disconnect This value will be set to true if the caller must disconnect
2787 * this page.
2788 * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
2789 */
2790 static kern_return_t
vm_fault_cs_handle_violation(vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,bool map_is_switched,bool map_is_switch_protected,bool * must_disconnect)2791 vm_fault_cs_handle_violation(
2792 vm_object_t object,
2793 vm_page_t m,
2794 pmap_t pmap,
2795 vm_prot_t prot,
2796 vm_map_offset_t vaddr,
2797 vm_map_size_t fault_page_size,
2798 vm_map_offset_t fault_phys_offset,
2799 bool map_is_switched,
2800 bool map_is_switch_protected,
2801 bool *must_disconnect)
2802 {
2803 #if !MACH_ASSERT
2804 #pragma unused(pmap)
2805 #pragma unused(map_is_switch_protected)
2806 #endif /* !MACH_ASSERT */
2807 /*
2808 * We will have a tainted page. Have to handle the special case
2809 * of a switched map now. If the map is not switched, standard
2810 * procedure applies - call cs_invalid_page().
2811 * If the map is switched, the real owner is invalid already.
2812 * There is no point in invalidating the switching process since
2813 * it will not be executing from the map. So we don't call
2814 * cs_invalid_page() in that case.
2815 */
2816 boolean_t reject_page, cs_killed;
2817 kern_return_t kr;
2818 if (map_is_switched) {
2819 assert(pmap == vm_map_pmap(current_thread()->map));
2820 assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
2821 reject_page = FALSE;
2822 } else {
2823 if (cs_debug > 5) {
2824 printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n",
2825 object->code_signed ? "yes" : "no",
2826 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2827 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2828 m->vmp_wpmapped ? "yes" : "no",
2829 (int)prot);
2830 }
2831 reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed);
2832 }
2833
2834 if (reject_page) {
2835 /* reject the invalid page: abort the page fault */
2836 int pid;
2837 const char *procname;
2838 task_t task;
2839 vm_object_t file_object, shadow;
2840 vm_object_offset_t file_offset;
2841 char *pathname, *filename;
2842 vm_size_t pathname_len, filename_len;
2843 boolean_t truncated_path;
2844 #define __PATH_MAX 1024
2845 struct timespec mtime, cs_mtime;
2846 int shadow_depth;
2847 os_reason_t codesigning_exit_reason = OS_REASON_NULL;
2848
2849 kr = KERN_CODESIGN_ERROR;
2850 cs_enter_tainted_rejected++;
2851
2852 /* get process name and pid */
2853 procname = "?";
2854 task = current_task();
2855 pid = proc_selfpid();
2856 if (get_bsdtask_info(task) != NULL) {
2857 procname = proc_name_address(get_bsdtask_info(task));
2858 }
2859
2860 /* get file's VM object */
2861 file_object = object;
2862 file_offset = m->vmp_offset;
2863 for (shadow = file_object->shadow,
2864 shadow_depth = 0;
2865 shadow != VM_OBJECT_NULL;
2866 shadow = file_object->shadow,
2867 shadow_depth++) {
2868 vm_object_lock_shared(shadow);
2869 if (file_object != object) {
2870 vm_object_unlock(file_object);
2871 }
2872 file_offset += file_object->vo_shadow_offset;
2873 file_object = shadow;
2874 }
2875
2876 mtime.tv_sec = 0;
2877 mtime.tv_nsec = 0;
2878 cs_mtime.tv_sec = 0;
2879 cs_mtime.tv_nsec = 0;
2880
2881 /* get file's pathname and/or filename */
2882 pathname = NULL;
2883 filename = NULL;
2884 pathname_len = 0;
2885 filename_len = 0;
2886 truncated_path = FALSE;
2887 /* no pager -> no file -> no pathname, use "<nil>" in that case */
2888 if (file_object->pager != NULL) {
2889 pathname = kalloc_data(__PATH_MAX * 2, Z_WAITOK);
2890 if (pathname) {
2891 pathname[0] = '\0';
2892 pathname_len = __PATH_MAX;
2893 filename = pathname + pathname_len;
2894 filename_len = __PATH_MAX;
2895
2896 if (vnode_pager_get_object_name(file_object->pager,
2897 pathname,
2898 pathname_len,
2899 filename,
2900 filename_len,
2901 &truncated_path) == KERN_SUCCESS) {
2902 /* safety first... */
2903 pathname[__PATH_MAX - 1] = '\0';
2904 filename[__PATH_MAX - 1] = '\0';
2905
2906 vnode_pager_get_object_mtime(file_object->pager,
2907 &mtime,
2908 &cs_mtime);
2909 } else {
2910 kfree_data(pathname, __PATH_MAX * 2);
2911 pathname = NULL;
2912 filename = NULL;
2913 pathname_len = 0;
2914 filename_len = 0;
2915 truncated_path = FALSE;
2916 }
2917 }
2918 }
2919 printf("CODE SIGNING: process %d[%s]: "
2920 "rejecting invalid page at address 0x%llx "
2921 "from offset 0x%llx in file \"%s%s%s\" "
2922 "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
2923 "(signed:%d validated:%d tainted:%d nx:%d "
2924 "wpmapped:%d dirty:%d depth:%d)\n",
2925 pid, procname, (addr64_t) vaddr,
2926 file_offset,
2927 (pathname ? pathname : "<nil>"),
2928 (truncated_path ? "/.../" : ""),
2929 (truncated_path ? filename : ""),
2930 cs_mtime.tv_sec, cs_mtime.tv_nsec,
2931 ((cs_mtime.tv_sec == mtime.tv_sec &&
2932 cs_mtime.tv_nsec == mtime.tv_nsec)
2933 ? "=="
2934 : "!="),
2935 mtime.tv_sec, mtime.tv_nsec,
2936 object->code_signed,
2937 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
2938 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
2939 VMP_CS_NX(m, fault_page_size, fault_phys_offset),
2940 m->vmp_wpmapped,
2941 m->vmp_dirty,
2942 shadow_depth);
2943
2944 /*
2945 * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page
2946 * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the
2947 * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler
2948 * will deal with the segmentation fault.
2949 */
2950 if (cs_killed) {
2951 KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
2952 pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
2953
2954 codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
2955 if (codesigning_exit_reason == NULL) {
2956 printf("vm_fault_enter: failed to allocate codesigning exit reason\n");
2957 } else {
2958 mach_vm_address_t data_addr = 0;
2959 struct codesigning_exit_reason_info *ceri = NULL;
2960 uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri));
2961
2962 if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) {
2963 printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n");
2964 } else {
2965 if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor,
2966 EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) {
2967 ceri = (struct codesigning_exit_reason_info *)data_addr;
2968 static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname));
2969
2970 ceri->ceri_virt_addr = vaddr;
2971 ceri->ceri_file_offset = file_offset;
2972 if (pathname) {
2973 strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname));
2974 } else {
2975 ceri->ceri_pathname[0] = '\0';
2976 }
2977 if (filename) {
2978 strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename));
2979 } else {
2980 ceri->ceri_filename[0] = '\0';
2981 }
2982 ceri->ceri_path_truncated = (truncated_path ? 1 : 0);
2983 ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec;
2984 ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec;
2985 ceri->ceri_page_modtime_secs = mtime.tv_sec;
2986 ceri->ceri_page_modtime_nsecs = mtime.tv_nsec;
2987 ceri->ceri_object_codesigned = (object->code_signed);
2988 ceri->ceri_page_codesig_validated = VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset);
2989 ceri->ceri_page_codesig_tainted = VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset);
2990 ceri->ceri_page_codesig_nx = VMP_CS_NX(m, fault_page_size, fault_phys_offset);
2991 ceri->ceri_page_wpmapped = (m->vmp_wpmapped);
2992 ceri->ceri_page_slid = 0;
2993 ceri->ceri_page_dirty = (m->vmp_dirty);
2994 ceri->ceri_page_shadow_depth = shadow_depth;
2995 } else {
2996 #if DEBUG || DEVELOPMENT
2997 panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason");
2998 #else
2999 printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n");
3000 #endif /* DEBUG || DEVELOPMENT */
3001 /* Free the buffer */
3002 os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0);
3003 }
3004 }
3005 }
3006
3007 set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE);
3008 }
3009 if (panic_on_cs_killed &&
3010 object->object_is_shared_cache) {
3011 char *tainted_contents;
3012 vm_map_offset_t src_vaddr;
3013 src_vaddr = (vm_map_offset_t) phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m) << PAGE_SHIFT);
3014 tainted_contents = kalloc_data(PAGE_SIZE, Z_WAITOK);
3015 bcopy((const char *)src_vaddr, tainted_contents, PAGE_SIZE);
3016 printf("CODE SIGNING: tainted page %p phys 0x%x phystokv 0x%llx copied to %p\n", m, VM_PAGE_GET_PHYS_PAGE(m), (uint64_t)src_vaddr, tainted_contents);
3017 panic("CODE SIGNING: process %d[%s]: "
3018 "rejecting invalid page (phys#0x%x) at address 0x%llx "
3019 "from offset 0x%llx in file \"%s%s%s\" "
3020 "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
3021 "(signed:%d validated:%d tainted:%d nx:%d"
3022 "wpmapped:%d dirty:%d depth:%d)\n",
3023 pid, procname,
3024 VM_PAGE_GET_PHYS_PAGE(m),
3025 (addr64_t) vaddr,
3026 file_offset,
3027 (pathname ? pathname : "<nil>"),
3028 (truncated_path ? "/.../" : ""),
3029 (truncated_path ? filename : ""),
3030 cs_mtime.tv_sec, cs_mtime.tv_nsec,
3031 ((cs_mtime.tv_sec == mtime.tv_sec &&
3032 cs_mtime.tv_nsec == mtime.tv_nsec)
3033 ? "=="
3034 : "!="),
3035 mtime.tv_sec, mtime.tv_nsec,
3036 object->code_signed,
3037 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
3038 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
3039 VMP_CS_NX(m, fault_page_size, fault_phys_offset),
3040 m->vmp_wpmapped,
3041 m->vmp_dirty,
3042 shadow_depth);
3043 }
3044
3045 if (file_object != object) {
3046 vm_object_unlock(file_object);
3047 }
3048 if (pathname_len != 0) {
3049 kfree_data(pathname, __PATH_MAX * 2);
3050 pathname = NULL;
3051 filename = NULL;
3052 }
3053 } else {
3054 /* proceed with the invalid page */
3055 kr = KERN_SUCCESS;
3056 if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
3057 !object->code_signed) {
3058 /*
3059 * This page has not been (fully) validated but
3060 * does not belong to a code-signed object
3061 * so it should not be forcefully considered
3062 * as tainted.
3063 * We're just concerned about it here because
3064 * we've been asked to "execute" it but that
3065 * does not mean that it should cause other
3066 * accesses to fail.
3067 * This happens when a debugger sets a
3068 * breakpoint and we then execute code in
3069 * that page. Marking the page as "tainted"
3070 * would cause any inspection tool ("leaks",
3071 * "vmmap", "CrashReporter", ...) to get killed
3072 * due to code-signing violation on that page,
3073 * even though they're just reading it and not
3074 * executing from it.
3075 */
3076 } else {
3077 /*
3078 * Page might have been tainted before or not;
3079 * now it definitively is. If the page wasn't
3080 * tainted, we must disconnect it from all
3081 * pmaps later, to force existing mappings
3082 * through that code path for re-consideration
3083 * of the validity of that page.
3084 */
3085 if (!VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
3086 *must_disconnect = TRUE;
3087 VMP_CS_SET_TAINTED(m, fault_page_size, fault_phys_offset, TRUE);
3088 }
3089 }
3090 cs_enter_tainted_accepted++;
3091 }
3092 if (kr != KERN_SUCCESS) {
3093 if (cs_debug) {
3094 printf("CODESIGNING: vm_fault_enter(0x%llx): "
3095 "*** INVALID PAGE ***\n",
3096 (long long)vaddr);
3097 }
3098 #if !SECURE_KERNEL
3099 if (cs_enforcement_panic) {
3100 panic("CODESIGNING: panicking on invalid page");
3101 }
3102 #endif
3103 }
3104 return kr;
3105 }
3106
3107 /*
3108 * Check that the code signature is valid for the given page being inserted into
3109 * the pmap.
3110 *
3111 * @param must_disconnect This value will be set to true if the caller must disconnect
3112 * this page.
3113 * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
3114 */
3115 static kern_return_t
vm_fault_validate_cs(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool * must_disconnect)3116 vm_fault_validate_cs(
3117 bool cs_bypass,
3118 vm_object_t object,
3119 vm_page_t m,
3120 pmap_t pmap,
3121 vm_map_offset_t vaddr,
3122 vm_prot_t prot,
3123 vm_prot_t caller_prot,
3124 vm_map_size_t fault_page_size,
3125 vm_map_offset_t fault_phys_offset,
3126 vm_object_fault_info_t fault_info,
3127 bool *must_disconnect)
3128 {
3129 bool map_is_switched, map_is_switch_protected, cs_violation;
3130 kern_return_t kr;
3131 /* Validate code signature if necessary. */
3132 map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) &&
3133 (pmap == vm_map_pmap(current_thread()->map)));
3134 map_is_switch_protected = current_thread()->map->switch_protect;
3135 kr = vm_fault_cs_check_violation(cs_bypass, object, m, pmap,
3136 prot, caller_prot, fault_page_size, fault_phys_offset, fault_info,
3137 map_is_switched, map_is_switch_protected, &cs_violation);
3138 if (kr != KERN_SUCCESS) {
3139 return kr;
3140 }
3141 if (cs_violation) {
3142 kr = vm_fault_cs_handle_violation(object, m, pmap, prot, vaddr,
3143 fault_page_size, fault_phys_offset,
3144 map_is_switched, map_is_switch_protected, must_disconnect);
3145 }
3146 return kr;
3147 }
3148
3149 /*
3150 * Enqueue the page on the appropriate paging queue.
3151 */
3152 static void
vm_fault_enqueue_page(vm_object_t object,vm_page_t m,bool wired,bool change_wiring,vm_tag_t wire_tag,bool no_cache,int * type_of_fault,kern_return_t kr)3153 vm_fault_enqueue_page(
3154 vm_object_t object,
3155 vm_page_t m,
3156 bool wired,
3157 bool change_wiring,
3158 vm_tag_t wire_tag,
3159 bool no_cache,
3160 int *type_of_fault,
3161 kern_return_t kr)
3162 {
3163 assert((m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object);
3164 boolean_t page_queues_locked = FALSE;
3165 boolean_t previously_pmapped = m->vmp_pmapped;
3166 #define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \
3167 MACRO_BEGIN \
3168 if (! page_queues_locked) { \
3169 page_queues_locked = TRUE; \
3170 vm_page_lockspin_queues(); \
3171 } \
3172 MACRO_END
3173 #define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \
3174 MACRO_BEGIN \
3175 if (page_queues_locked) { \
3176 page_queues_locked = FALSE; \
3177 vm_page_unlock_queues(); \
3178 } \
3179 MACRO_END
3180
3181 vm_page_update_special_state(m);
3182 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
3183 /*
3184 * Compressor pages are neither wired
3185 * nor pageable and should never change.
3186 */
3187 assert(object == compressor_object);
3188 } else if (change_wiring) {
3189 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3190
3191 if (wired) {
3192 if (kr == KERN_SUCCESS) {
3193 vm_page_wire(m, wire_tag, TRUE);
3194 }
3195 } else {
3196 vm_page_unwire(m, TRUE);
3197 }
3198 /* we keep the page queues lock, if we need it later */
3199 } else {
3200 if (object->internal == TRUE) {
3201 /*
3202 * don't allow anonymous pages on
3203 * the speculative queues
3204 */
3205 no_cache = FALSE;
3206 }
3207 if (kr != KERN_SUCCESS) {
3208 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3209 vm_page_deactivate(m);
3210 /* we keep the page queues lock, if we need it later */
3211 } else if (((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
3212 (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3213 (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
3214 ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) &&
3215 !VM_PAGE_WIRED(m)) {
3216 if (vm_page_local_q &&
3217 (*type_of_fault == DBG_COW_FAULT ||
3218 *type_of_fault == DBG_ZERO_FILL_FAULT)) {
3219 struct vpl *lq;
3220 uint32_t lid;
3221
3222 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
3223
3224 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3225 vm_object_lock_assert_exclusive(object);
3226
3227 /*
3228 * we got a local queue to stuff this
3229 * new page on...
3230 * its safe to manipulate local and
3231 * local_id at this point since we're
3232 * behind an exclusive object lock and
3233 * the page is not on any global queue.
3234 *
3235 * we'll use the current cpu number to
3236 * select the queue note that we don't
3237 * need to disable preemption... we're
3238 * going to be behind the local queue's
3239 * lock to do the real work
3240 */
3241 lid = cpu_number();
3242
3243 lq = zpercpu_get_cpu(vm_page_local_q, lid);
3244
3245 VPL_LOCK(&lq->vpl_lock);
3246
3247 vm_page_check_pageable_safe(m);
3248 vm_page_queue_enter(&lq->vpl_queue, m, vmp_pageq);
3249 m->vmp_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q;
3250 m->vmp_local_id = lid;
3251 lq->vpl_count++;
3252
3253 if (object->internal) {
3254 lq->vpl_internal_count++;
3255 } else {
3256 lq->vpl_external_count++;
3257 }
3258
3259 VPL_UNLOCK(&lq->vpl_lock);
3260
3261 if (lq->vpl_count > vm_page_local_q_soft_limit) {
3262 /*
3263 * we're beyond the soft limit
3264 * for the local queue
3265 * vm_page_reactivate_local will
3266 * 'try' to take the global page
3267 * queue lock... if it can't
3268 * that's ok... we'll let the
3269 * queue continue to grow up
3270 * to the hard limit... at that
3271 * point we'll wait for the
3272 * lock... once we've got the
3273 * lock, we'll transfer all of
3274 * the pages from the local
3275 * queue to the global active
3276 * queue
3277 */
3278 vm_page_reactivate_local(lid, FALSE, FALSE);
3279 }
3280 } else {
3281 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3282
3283 /*
3284 * test again now that we hold the
3285 * page queue lock
3286 */
3287 if (!VM_PAGE_WIRED(m)) {
3288 if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3289 vm_page_queues_remove(m, FALSE);
3290
3291 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3292 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_fault_reactivated, 1);
3293 }
3294
3295 if (!VM_PAGE_ACTIVE_OR_INACTIVE(m) ||
3296 no_cache) {
3297 /*
3298 * If this is a no_cache mapping
3299 * and the page has never been
3300 * mapped before or was
3301 * previously a no_cache page,
3302 * then we want to leave pages
3303 * in the speculative state so
3304 * that they can be readily
3305 * recycled if free memory runs
3306 * low. Otherwise the page is
3307 * activated as normal.
3308 */
3309
3310 if (no_cache &&
3311 (!previously_pmapped ||
3312 m->vmp_no_cache)) {
3313 m->vmp_no_cache = TRUE;
3314
3315 if (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
3316 vm_page_speculate(m, FALSE);
3317 }
3318 } else if (!VM_PAGE_ACTIVE_OR_INACTIVE(m)) {
3319 vm_page_activate(m);
3320 }
3321 }
3322 }
3323 /* we keep the page queues lock, if we need it later */
3324 }
3325 }
3326 }
3327 /* we're done with the page queues lock, if we ever took it */
3328 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3329 }
3330
3331 /*
3332 * Sets the pmmpped, xpmapped, and wpmapped bits on the vm_page_t and updates accounting.
3333 * @return true if the page needs to be sync'ed via pmap_sync-page_data_physo
3334 * before being inserted into the pmap.
3335 */
3336 static bool
vm_fault_enter_set_mapped(vm_object_t object,vm_page_t m,vm_prot_t prot,vm_prot_t fault_type)3337 vm_fault_enter_set_mapped(
3338 vm_object_t object,
3339 vm_page_t m,
3340 vm_prot_t prot,
3341 vm_prot_t fault_type)
3342 {
3343 bool page_needs_sync = false;
3344 /*
3345 * NOTE: we may only hold the vm_object lock SHARED
3346 * at this point, so we need the phys_page lock to
3347 * properly serialize updating the pmapped and
3348 * xpmapped bits
3349 */
3350 if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) {
3351 ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3352
3353 pmap_lock_phys_page(phys_page);
3354 m->vmp_pmapped = TRUE;
3355
3356 if (!m->vmp_xpmapped) {
3357 m->vmp_xpmapped = TRUE;
3358
3359 pmap_unlock_phys_page(phys_page);
3360
3361 if (!object->internal) {
3362 OSAddAtomic(1, &vm_page_xpmapped_external_count);
3363 }
3364
3365 #if defined(__arm64__)
3366 page_needs_sync = true;
3367 #else
3368 if (object->internal &&
3369 object->pager != NULL) {
3370 /*
3371 * This page could have been
3372 * uncompressed by the
3373 * compressor pager and its
3374 * contents might be only in
3375 * the data cache.
3376 * Since it's being mapped for
3377 * "execute" for the fist time,
3378 * make sure the icache is in
3379 * sync.
3380 */
3381 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
3382 page_needs_sync = true;
3383 }
3384 #endif
3385 } else {
3386 pmap_unlock_phys_page(phys_page);
3387 }
3388 } else {
3389 if (m->vmp_pmapped == FALSE) {
3390 ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3391
3392 pmap_lock_phys_page(phys_page);
3393 m->vmp_pmapped = TRUE;
3394 pmap_unlock_phys_page(phys_page);
3395 }
3396 }
3397
3398 if (fault_type & VM_PROT_WRITE) {
3399 if (m->vmp_wpmapped == FALSE) {
3400 vm_object_lock_assert_exclusive(object);
3401 if (!object->internal && object->pager) {
3402 task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
3403 }
3404 m->vmp_wpmapped = TRUE;
3405 }
3406 }
3407 return page_needs_sync;
3408 }
3409
3410 /*
3411 * wrapper for pmap_enter_options()
3412 */
3413 static kern_return_t
pmap_enter_options_check(pmap_t pmap,vm_map_address_t virtual_address,vm_map_offset_t fault_phys_offset,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,unsigned int flags,boolean_t wired,unsigned int options)3414 pmap_enter_options_check(
3415 pmap_t pmap,
3416 vm_map_address_t virtual_address,
3417 vm_map_offset_t fault_phys_offset,
3418 vm_page_t page,
3419 vm_prot_t protection,
3420 vm_prot_t fault_type,
3421 unsigned int flags,
3422 boolean_t wired,
3423 unsigned int options)
3424 {
3425 int extra_options = 0;
3426 vm_object_t obj;
3427
3428 if (page->vmp_error) {
3429 return KERN_MEMORY_FAILURE;
3430 }
3431 obj = VM_PAGE_OBJECT(page);
3432 if (obj->internal) {
3433 extra_options |= PMAP_OPTIONS_INTERNAL;
3434 }
3435 if (page->vmp_reusable || obj->all_reusable) {
3436 extra_options |= PMAP_OPTIONS_REUSABLE;
3437 }
3438 return pmap_enter_options_addr(pmap,
3439 virtual_address,
3440 (pmap_paddr_t)ptoa(VM_PAGE_GET_PHYS_PAGE(page)) + fault_phys_offset,
3441 protection,
3442 fault_type,
3443 flags,
3444 wired,
3445 options | extra_options,
3446 NULL,
3447 PMAP_MAPPING_TYPE_INFER);
3448 }
3449
3450 /*
3451 * Try to enter the given page into the pmap.
3452 * Will retry without execute permission if the code signing monitor is enabled and
3453 * we encounter a codesigning failure on a non-execute fault.
3454 */
3455 static kern_return_t
vm_fault_attempt_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options)3456 vm_fault_attempt_pmap_enter(
3457 pmap_t pmap,
3458 vm_map_offset_t vaddr,
3459 vm_map_size_t fault_page_size,
3460 vm_map_offset_t fault_phys_offset,
3461 vm_page_t m,
3462 vm_prot_t *prot,
3463 vm_prot_t caller_prot,
3464 vm_prot_t fault_type,
3465 bool wired,
3466 int pmap_options)
3467 {
3468 #if !CODE_SIGNING_MONITOR
3469 #pragma unused(caller_prot)
3470 #endif /* !CODE_SIGNING_MONITOR */
3471
3472 kern_return_t kr;
3473 if (fault_page_size != PAGE_SIZE) {
3474 DEBUG4K_FAULT("pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x fault_type 0x%x\n", pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, *prot, fault_type);
3475 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
3476 fault_phys_offset < PAGE_SIZE),
3477 "0x%llx\n", (uint64_t)fault_phys_offset);
3478 } else {
3479 assertf(fault_phys_offset == 0,
3480 "0x%llx\n", (uint64_t)fault_phys_offset);
3481 }
3482
3483 kr = pmap_enter_options_check(pmap, vaddr,
3484 fault_phys_offset,
3485 m, *prot, fault_type, 0,
3486 wired,
3487 pmap_options);
3488
3489 #if CODE_SIGNING_MONITOR
3490 /*
3491 * Retry without execute permission if we encountered a codesigning
3492 * failure on a non-execute fault. This allows applications which
3493 * don't actually need to execute code to still map it for read access.
3494 */
3495 if (kr == KERN_CODESIGN_ERROR &&
3496 csm_enabled() &&
3497 (*prot & VM_PROT_EXECUTE) &&
3498 !(caller_prot & VM_PROT_EXECUTE)) {
3499 *prot &= ~VM_PROT_EXECUTE;
3500 kr = pmap_enter_options_check(pmap, vaddr,
3501 fault_phys_offset,
3502 m, *prot, fault_type, 0,
3503 wired,
3504 pmap_options);
3505 }
3506 #endif /* CODE_SIGNING_MONITOR */
3507
3508 return kr;
3509 }
3510
3511 /*
3512 * Enter the given page into the pmap.
3513 * The map must be locked shared.
3514 * The vm object must NOT be locked.
3515 *
3516 * @param need_retry if not null, avoid making a (potentially) blocking call into
3517 * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3518 */
3519 static kern_return_t
vm_fault_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry)3520 vm_fault_pmap_enter(
3521 pmap_t pmap,
3522 vm_map_offset_t vaddr,
3523 vm_map_size_t fault_page_size,
3524 vm_map_offset_t fault_phys_offset,
3525 vm_page_t m,
3526 vm_prot_t *prot,
3527 vm_prot_t caller_prot,
3528 vm_prot_t fault_type,
3529 bool wired,
3530 int pmap_options,
3531 boolean_t *need_retry)
3532 {
3533 kern_return_t kr;
3534 if (need_retry != NULL) {
3535 /*
3536 * Although we don't hold a lock on this object, we hold a lock
3537 * on the top object in the chain. To prevent a deadlock, we
3538 * can't allow the pmap layer to block.
3539 */
3540 pmap_options |= PMAP_OPTIONS_NOWAIT;
3541 }
3542 kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3543 fault_page_size, fault_phys_offset,
3544 m, prot, caller_prot, fault_type, wired, pmap_options);
3545 if (kr == KERN_RESOURCE_SHORTAGE) {
3546 if (need_retry) {
3547 /*
3548 * There's nothing we can do here since we hold the
3549 * lock on the top object in the chain. The caller
3550 * will need to deal with this by dropping that lock and retrying.
3551 */
3552 *need_retry = TRUE;
3553 vm_pmap_enter_retried++;
3554 }
3555 }
3556 return kr;
3557 }
3558
3559 /*
3560 * Enter the given page into the pmap.
3561 * The vm map must be locked shared.
3562 * The vm object must be locked exclusive, unless this is a soft fault.
3563 * For a soft fault, the object must be locked shared or exclusive.
3564 *
3565 * @param need_retry if not null, avoid making a (potentially) blocking call into
3566 * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3567 */
3568 static kern_return_t
vm_fault_pmap_enter_with_object_lock(vm_object_t object,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry,uint8_t * object_lock_type)3569 vm_fault_pmap_enter_with_object_lock(
3570 vm_object_t object,
3571 pmap_t pmap,
3572 vm_map_offset_t vaddr,
3573 vm_map_size_t fault_page_size,
3574 vm_map_offset_t fault_phys_offset,
3575 vm_page_t m,
3576 vm_prot_t *prot,
3577 vm_prot_t caller_prot,
3578 vm_prot_t fault_type,
3579 bool wired,
3580 int pmap_options,
3581 boolean_t *need_retry,
3582 uint8_t *object_lock_type)
3583 {
3584 kern_return_t kr;
3585 /*
3586 * Prevent a deadlock by not
3587 * holding the object lock if we need to wait for a page in
3588 * pmap_enter() - <rdar://problem/7138958>
3589 */
3590 kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3591 fault_page_size, fault_phys_offset,
3592 m, prot, caller_prot, fault_type, wired, pmap_options | PMAP_OPTIONS_NOWAIT);
3593 #if __x86_64__
3594 if (kr == KERN_INVALID_ARGUMENT &&
3595 pmap == PMAP_NULL &&
3596 wired) {
3597 /*
3598 * Wiring a page in a pmap-less VM map:
3599 * VMware's "vmmon" kernel extension does this
3600 * to grab pages.
3601 * Let it proceed even though the PMAP_ENTER() failed.
3602 */
3603 kr = KERN_SUCCESS;
3604 }
3605 #endif /* __x86_64__ */
3606
3607 if (kr == KERN_RESOURCE_SHORTAGE) {
3608 if (need_retry) {
3609 /*
3610 * this will be non-null in the case where we hold the lock
3611 * on the top-object in this chain... we can't just drop
3612 * the lock on the object we're inserting the page into
3613 * and recall the PMAP_ENTER since we can still cause
3614 * a deadlock if one of the critical paths tries to
3615 * acquire the lock on the top-object and we're blocked
3616 * in PMAP_ENTER waiting for memory... our only recourse
3617 * is to deal with it at a higher level where we can
3618 * drop both locks.
3619 */
3620 *need_retry = TRUE;
3621 vm_pmap_enter_retried++;
3622 goto done;
3623 }
3624 /*
3625 * The nonblocking version of pmap_enter did not succeed.
3626 * and we don't need to drop other locks and retry
3627 * at the level above us, so
3628 * use the blocking version instead. Requires marking
3629 * the page busy and unlocking the object
3630 */
3631 boolean_t was_busy = m->vmp_busy;
3632
3633 vm_object_lock_assert_exclusive(object);
3634
3635 m->vmp_busy = TRUE;
3636 vm_object_unlock(object);
3637
3638 kr = pmap_enter_options_check(pmap, vaddr,
3639 fault_phys_offset,
3640 m, *prot, fault_type,
3641 0, wired,
3642 pmap_options);
3643
3644 assert(VM_PAGE_OBJECT(m) == object);
3645
3646 /* Take the object lock again. */
3647 vm_object_lock(object);
3648
3649 /* If the page was busy, someone else will wake it up.
3650 * Otherwise, we have to do it now. */
3651 assert(m->vmp_busy);
3652 if (!was_busy) {
3653 PAGE_WAKEUP_DONE(m);
3654 }
3655 vm_pmap_enter_blocked++;
3656 }
3657
3658 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
3659 if ((*prot & VM_PROT_WRITE) && m->vmp_unmodified_ro) {
3660 if (*object_lock_type == OBJECT_LOCK_SHARED) {
3661 boolean_t was_busy = m->vmp_busy;
3662 m->vmp_busy = TRUE;
3663
3664 *object_lock_type = OBJECT_LOCK_EXCLUSIVE;
3665
3666 if (vm_object_lock_upgrade(object) == FALSE) {
3667 vm_object_lock(object);
3668 }
3669
3670 if (!was_busy) {
3671 PAGE_WAKEUP_DONE(m);
3672 }
3673 }
3674 vm_object_lock_assert_exclusive(object);
3675 vm_page_lockspin_queues();
3676 m->vmp_unmodified_ro = false;
3677 vm_page_unlock_queues();
3678 os_atomic_dec(&compressor_ro_uncompressed, relaxed);
3679
3680 VM_COMPRESSOR_PAGER_STATE_CLR(VM_PAGE_OBJECT(m), m->vmp_offset);
3681 }
3682 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
3683 #pragma unused(object_lock_type)
3684 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
3685
3686 done:
3687 return kr;
3688 }
3689
3690 /*
3691 * Prepare to enter a page into the pmap by checking CS, protection bits,
3692 * and setting mapped bits on the page_t.
3693 * Does not modify the page's paging queue.
3694 *
3695 * page queue lock must NOT be held
3696 * m->vmp_object must be locked
3697 *
3698 * NOTE: m->vmp_object could be locked "shared" only if we are called
3699 * from vm_fault() as part of a soft fault.
3700 */
3701 static kern_return_t
vm_fault_enter_prepare(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t * prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t change_wiring,vm_prot_t fault_type,vm_object_fault_info_t fault_info,int * type_of_fault,bool * page_needs_data_sync)3702 vm_fault_enter_prepare(
3703 vm_page_t m,
3704 pmap_t pmap,
3705 vm_map_offset_t vaddr,
3706 vm_prot_t *prot,
3707 vm_prot_t caller_prot,
3708 vm_map_size_t fault_page_size,
3709 vm_map_offset_t fault_phys_offset,
3710 boolean_t change_wiring,
3711 vm_prot_t fault_type,
3712 vm_object_fault_info_t fault_info,
3713 int *type_of_fault,
3714 bool *page_needs_data_sync)
3715 {
3716 kern_return_t kr;
3717 bool is_tainted = false;
3718 vm_object_t object;
3719 boolean_t cs_bypass = fault_info->cs_bypass;
3720
3721 object = VM_PAGE_OBJECT(m);
3722
3723 vm_object_lock_assert_held(object);
3724
3725 #if KASAN
3726 if (pmap == kernel_pmap) {
3727 kasan_notify_address(vaddr, PAGE_SIZE);
3728 }
3729 #endif
3730
3731 #if CODE_SIGNING_MONITOR
3732 if (csm_address_space_exempt(pmap) == KERN_SUCCESS) {
3733 cs_bypass = TRUE;
3734 }
3735 #endif
3736
3737 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3738
3739 if (*type_of_fault == DBG_ZERO_FILL_FAULT) {
3740 vm_object_lock_assert_exclusive(object);
3741 } else if ((fault_type & VM_PROT_WRITE) == 0 &&
3742 !change_wiring &&
3743 (!m->vmp_wpmapped
3744 #if VM_OBJECT_ACCESS_TRACKING
3745 || object->access_tracking
3746 #endif /* VM_OBJECT_ACCESS_TRACKING */
3747 )) {
3748 /*
3749 * This is not a "write" fault, so we
3750 * might not have taken the object lock
3751 * exclusively and we might not be able
3752 * to update the "wpmapped" bit in
3753 * vm_fault_enter().
3754 * Let's just grant read access to
3755 * the page for now and we'll
3756 * soft-fault again if we need write
3757 * access later...
3758 */
3759
3760 /* This had better not be a JIT page. */
3761 if (!pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) {
3762 *prot &= ~VM_PROT_WRITE;
3763 } else {
3764 assert(cs_bypass);
3765 }
3766 }
3767 if (m->vmp_pmapped == FALSE) {
3768 if (m->vmp_clustered) {
3769 if (*type_of_fault == DBG_CACHE_HIT_FAULT) {
3770 /*
3771 * found it in the cache, but this
3772 * is the first fault-in of the page (m->vmp_pmapped == FALSE)
3773 * so it must have come in as part of
3774 * a cluster... account 1 pagein against it
3775 */
3776 if (object->internal) {
3777 *type_of_fault = DBG_PAGEIND_FAULT;
3778 } else {
3779 *type_of_fault = DBG_PAGEINV_FAULT;
3780 }
3781
3782 VM_PAGE_COUNT_AS_PAGEIN(m);
3783 }
3784 VM_PAGE_CONSUME_CLUSTERED(m);
3785 }
3786 }
3787
3788 if (*type_of_fault != DBG_COW_FAULT) {
3789 DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL);
3790
3791 if (pmap == kernel_pmap) {
3792 DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL);
3793 }
3794 }
3795
3796 kr = vm_fault_validate_cs(cs_bypass, object, m, pmap, vaddr,
3797 *prot, caller_prot, fault_page_size, fault_phys_offset,
3798 fault_info, &is_tainted);
3799 if (kr == KERN_SUCCESS) {
3800 /*
3801 * We either have a good page, or a tainted page that has been accepted by the process.
3802 * In both cases the page will be entered into the pmap.
3803 */
3804 *page_needs_data_sync = vm_fault_enter_set_mapped(object, m, *prot, fault_type);
3805 if ((fault_type & VM_PROT_WRITE) && is_tainted) {
3806 /*
3807 * This page is tainted but we're inserting it anyways.
3808 * Since it's writeable, we need to disconnect it from other pmaps
3809 * now so those processes can take note.
3810 */
3811
3812 /*
3813 * We can only get here
3814 * because of the CSE logic
3815 */
3816 assert(pmap_get_vm_map_cs_enforced(pmap));
3817 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
3818 /*
3819 * If we are faulting for a write, we can clear
3820 * the execute bit - that will ensure the page is
3821 * checked again before being executable, which
3822 * protects against a map switch.
3823 * This only happens the first time the page
3824 * gets tainted, so we won't get stuck here
3825 * to make an already writeable page executable.
3826 */
3827 if (!cs_bypass) {
3828 assert(!pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot));
3829 *prot &= ~VM_PROT_EXECUTE;
3830 }
3831 }
3832 assert(VM_PAGE_OBJECT(m) == object);
3833
3834 #if VM_OBJECT_ACCESS_TRACKING
3835 if (object->access_tracking) {
3836 DTRACE_VM2(access_tracking, vm_map_offset_t, vaddr, int, fault_type);
3837 if (fault_type & VM_PROT_WRITE) {
3838 object->access_tracking_writes++;
3839 vm_object_access_tracking_writes++;
3840 } else {
3841 object->access_tracking_reads++;
3842 vm_object_access_tracking_reads++;
3843 }
3844 }
3845 #endif /* VM_OBJECT_ACCESS_TRACKING */
3846 }
3847
3848 return kr;
3849 }
3850
3851 /*
3852 * page queue lock must NOT be held
3853 * m->vmp_object must be locked
3854 *
3855 * NOTE: m->vmp_object could be locked "shared" only if we are called
3856 * from vm_fault() as part of a soft fault. If so, we must be
3857 * careful not to modify the VM object in any way that is not
3858 * legal under a shared lock...
3859 */
3860 kern_return_t
vm_fault_enter(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot,vm_prot_t caller_prot,boolean_t wired,boolean_t change_wiring,vm_tag_t wire_tag,vm_object_fault_info_t fault_info,boolean_t * need_retry,int * type_of_fault,uint8_t * object_lock_type)3861 vm_fault_enter(
3862 vm_page_t m,
3863 pmap_t pmap,
3864 vm_map_offset_t vaddr,
3865 vm_map_size_t fault_page_size,
3866 vm_map_offset_t fault_phys_offset,
3867 vm_prot_t prot,
3868 vm_prot_t caller_prot,
3869 boolean_t wired,
3870 boolean_t change_wiring,
3871 vm_tag_t wire_tag,
3872 vm_object_fault_info_t fault_info,
3873 boolean_t *need_retry,
3874 int *type_of_fault,
3875 uint8_t *object_lock_type)
3876 {
3877 kern_return_t kr;
3878 vm_object_t object;
3879 bool page_needs_data_sync;
3880 vm_prot_t fault_type;
3881 int pmap_options = fault_info->pmap_options;
3882
3883 if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
3884 assert(m->vmp_fictitious);
3885 return KERN_SUCCESS;
3886 }
3887
3888 fault_type = change_wiring ? VM_PROT_NONE : caller_prot;
3889
3890 assertf(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL, "m=%p", m);
3891 kr = vm_fault_enter_prepare(m, pmap, vaddr, &prot, caller_prot,
3892 fault_page_size, fault_phys_offset, change_wiring, fault_type,
3893 fault_info, type_of_fault, &page_needs_data_sync);
3894 object = VM_PAGE_OBJECT(m);
3895
3896 vm_fault_enqueue_page(object, m, wired, change_wiring, wire_tag, fault_info->no_cache, type_of_fault, kr);
3897
3898 if (kr == KERN_SUCCESS) {
3899 if (page_needs_data_sync) {
3900 pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
3901 }
3902
3903 if (fault_info->fi_xnu_user_debug && !object->code_signed) {
3904 pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
3905 }
3906
3907
3908 kr = vm_fault_pmap_enter_with_object_lock(object, pmap, vaddr,
3909 fault_page_size, fault_phys_offset, m,
3910 &prot, caller_prot, fault_type, wired, pmap_options, need_retry, object_lock_type);
3911 }
3912
3913 return kr;
3914 }
3915
3916 void
vm_pre_fault(vm_map_offset_t vaddr,vm_prot_t prot)3917 vm_pre_fault(vm_map_offset_t vaddr, vm_prot_t prot)
3918 {
3919 if (pmap_find_phys(current_map()->pmap, vaddr) == 0) {
3920 vm_fault(current_map(), /* map */
3921 vaddr, /* vaddr */
3922 prot, /* fault_type */
3923 FALSE, /* change_wiring */
3924 VM_KERN_MEMORY_NONE, /* tag - not wiring */
3925 THREAD_UNINT, /* interruptible */
3926 NULL, /* caller_pmap */
3927 0 /* caller_pmap_addr */);
3928 }
3929 }
3930
3931
3932 /*
3933 * Routine: vm_fault
3934 * Purpose:
3935 * Handle page faults, including pseudo-faults
3936 * used to change the wiring status of pages.
3937 * Returns:
3938 * Explicit continuations have been removed.
3939 * Implementation:
3940 * vm_fault and vm_fault_page save mucho state
3941 * in the moral equivalent of a closure. The state
3942 * structure is allocated when first entering vm_fault
3943 * and deallocated when leaving vm_fault.
3944 */
3945
3946 extern uint64_t get_current_unique_pid(void);
3947
3948 unsigned long vm_fault_collapse_total = 0;
3949 unsigned long vm_fault_collapse_skipped = 0;
3950
3951
3952 kern_return_t
vm_fault_external(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)3953 vm_fault_external(
3954 vm_map_t map,
3955 vm_map_offset_t vaddr,
3956 vm_prot_t fault_type,
3957 boolean_t change_wiring,
3958 int interruptible,
3959 pmap_t caller_pmap,
3960 vm_map_offset_t caller_pmap_addr)
3961 {
3962 return vm_fault_internal(map, vaddr, fault_type, change_wiring,
3963 change_wiring ? vm_tag_bt() : VM_KERN_MEMORY_NONE,
3964 interruptible, caller_pmap, caller_pmap_addr,
3965 NULL);
3966 }
3967
3968 kern_return_t
vm_fault(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,vm_tag_t wire_tag,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)3969 vm_fault(
3970 vm_map_t map,
3971 vm_map_offset_t vaddr,
3972 vm_prot_t fault_type,
3973 boolean_t change_wiring,
3974 vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
3975 int interruptible,
3976 pmap_t caller_pmap,
3977 vm_map_offset_t caller_pmap_addr)
3978 {
3979 return vm_fault_internal(map, vaddr, fault_type, change_wiring, wire_tag,
3980 interruptible, caller_pmap, caller_pmap_addr,
3981 NULL);
3982 }
3983
3984 static boolean_t
current_proc_is_privileged(void)3985 current_proc_is_privileged(void)
3986 {
3987 return csproc_get_platform_binary(current_proc());
3988 }
3989
3990 uint64_t vm_copied_on_read = 0;
3991
3992 /*
3993 * Cleanup after a vm_fault_enter.
3994 * At this point, the fault should either have failed (kr != KERN_SUCCESS)
3995 * or the page should be in the pmap and on the correct paging queue.
3996 *
3997 * Precondition:
3998 * map must be locked shared.
3999 * m_object must be locked.
4000 * If top_object != VM_OBJECT_NULL, it must be locked.
4001 * real_map must be locked.
4002 *
4003 * Postcondition:
4004 * map will be unlocked
4005 * m_object will be unlocked
4006 * top_object will be unlocked
4007 * If real_map != map, it will be unlocked
4008 */
4009 static void
vm_fault_complete(vm_map_t map,vm_map_t real_map,vm_object_t object,vm_object_t m_object,vm_page_t m,vm_map_offset_t offset,vm_map_offset_t trace_real_vaddr,vm_object_fault_info_t fault_info,vm_prot_t caller_prot,vm_map_offset_t real_vaddr,int type_of_fault,boolean_t need_retry,kern_return_t kr,ppnum_t * physpage_p,vm_prot_t prot,vm_object_t top_object,boolean_t need_collapse,vm_map_offset_t cur_offset,vm_prot_t fault_type,vm_object_t * written_on_object,memory_object_t * written_on_pager,vm_object_offset_t * written_on_offset)4010 vm_fault_complete(
4011 vm_map_t map,
4012 vm_map_t real_map,
4013 vm_object_t object,
4014 vm_object_t m_object,
4015 vm_page_t m,
4016 vm_map_offset_t offset,
4017 vm_map_offset_t trace_real_vaddr,
4018 vm_object_fault_info_t fault_info,
4019 vm_prot_t caller_prot,
4020 #if CONFIG_DTRACE
4021 vm_map_offset_t real_vaddr,
4022 #else
4023 __unused vm_map_offset_t real_vaddr,
4024 #endif /* CONFIG_DTRACE */
4025 int type_of_fault,
4026 boolean_t need_retry,
4027 kern_return_t kr,
4028 ppnum_t *physpage_p,
4029 vm_prot_t prot,
4030 vm_object_t top_object,
4031 boolean_t need_collapse,
4032 vm_map_offset_t cur_offset,
4033 vm_prot_t fault_type,
4034 vm_object_t *written_on_object,
4035 memory_object_t *written_on_pager,
4036 vm_object_offset_t *written_on_offset)
4037 {
4038 int event_code = 0;
4039 vm_map_lock_assert_shared(map);
4040 vm_object_lock_assert_held(m_object);
4041 if (top_object != VM_OBJECT_NULL) {
4042 vm_object_lock_assert_held(top_object);
4043 }
4044 vm_map_lock_assert_held(real_map);
4045
4046 if (m_object->internal) {
4047 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
4048 } else if (m_object->object_is_shared_cache) {
4049 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
4050 } else {
4051 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
4052 }
4053 KDBG_RELEASE(event_code | DBG_FUNC_NONE, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid());
4054 if (need_retry == FALSE) {
4055 KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_FAST), get_current_unique_pid());
4056 }
4057 DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag);
4058 if (kr == KERN_SUCCESS &&
4059 physpage_p != NULL) {
4060 /* for vm_map_wire_and_extract() */
4061 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
4062 if (prot & VM_PROT_WRITE) {
4063 vm_object_lock_assert_exclusive(m_object);
4064 m->vmp_dirty = TRUE;
4065 }
4066 }
4067
4068 if (top_object != VM_OBJECT_NULL) {
4069 /*
4070 * It's safe to drop the top object
4071 * now that we've done our
4072 * vm_fault_enter(). Any other fault
4073 * in progress for that virtual
4074 * address will either find our page
4075 * and translation or put in a new page
4076 * and translation.
4077 */
4078 vm_object_unlock(top_object);
4079 top_object = VM_OBJECT_NULL;
4080 }
4081
4082 if (need_collapse == TRUE) {
4083 vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
4084 }
4085
4086 if (need_retry == FALSE &&
4087 (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) {
4088 /*
4089 * evaluate access pattern and update state
4090 * vm_fault_deactivate_behind depends on the
4091 * state being up to date
4092 */
4093 vm_fault_is_sequential(m_object, cur_offset, fault_info->behavior);
4094
4095 vm_fault_deactivate_behind(m_object, cur_offset, fault_info->behavior);
4096 }
4097 /*
4098 * That's it, clean up and return.
4099 */
4100 if (m->vmp_busy) {
4101 vm_object_lock_assert_exclusive(m_object);
4102 PAGE_WAKEUP_DONE(m);
4103 }
4104
4105 if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) {
4106 vm_object_paging_begin(m_object);
4107
4108 assert(*written_on_object == VM_OBJECT_NULL);
4109 *written_on_object = m_object;
4110 *written_on_pager = m_object->pager;
4111 *written_on_offset = m_object->paging_offset + m->vmp_offset;
4112 }
4113 vm_object_unlock(object);
4114
4115 vm_map_unlock_read(map);
4116 if (real_map != map) {
4117 vm_map_unlock(real_map);
4118 }
4119 }
4120
4121 static inline int
vm_fault_type_for_tracing(boolean_t need_copy_on_read,int type_of_fault)4122 vm_fault_type_for_tracing(boolean_t need_copy_on_read, int type_of_fault)
4123 {
4124 if (need_copy_on_read && type_of_fault == DBG_COW_FAULT) {
4125 return DBG_COR_FAULT;
4126 }
4127 return type_of_fault;
4128 }
4129
4130 uint64_t vm_fault_resilient_media_initiate = 0;
4131 uint64_t vm_fault_resilient_media_retry = 0;
4132 uint64_t vm_fault_resilient_media_proceed = 0;
4133 uint64_t vm_fault_resilient_media_release = 0;
4134 uint64_t vm_fault_resilient_media_abort1 = 0;
4135 uint64_t vm_fault_resilient_media_abort2 = 0;
4136
4137 #if MACH_ASSERT
4138 int vm_fault_resilient_media_inject_error1_rate = 0;
4139 int vm_fault_resilient_media_inject_error1 = 0;
4140 int vm_fault_resilient_media_inject_error2_rate = 0;
4141 int vm_fault_resilient_media_inject_error2 = 0;
4142 int vm_fault_resilient_media_inject_error3_rate = 0;
4143 int vm_fault_resilient_media_inject_error3 = 0;
4144 #endif /* MACH_ASSERT */
4145
4146 kern_return_t
vm_fault_internal(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t caller_prot,boolean_t change_wiring,vm_tag_t wire_tag,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr,ppnum_t * physpage_p)4147 vm_fault_internal(
4148 vm_map_t map,
4149 vm_map_offset_t vaddr,
4150 vm_prot_t caller_prot,
4151 boolean_t change_wiring,
4152 vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
4153 int interruptible,
4154 pmap_t caller_pmap,
4155 vm_map_offset_t caller_pmap_addr,
4156 ppnum_t *physpage_p)
4157 {
4158 vm_map_version_t version; /* Map version for verificiation */
4159 boolean_t wired; /* Should mapping be wired down? */
4160 vm_object_t object; /* Top-level object */
4161 vm_object_offset_t offset; /* Top-level offset */
4162 vm_prot_t prot; /* Protection for mapping */
4163 vm_object_t old_copy_object; /* Saved copy object */
4164 uint32_t old_copy_version;
4165 vm_page_t result_page; /* Result of vm_fault_page */
4166 vm_page_t top_page; /* Placeholder page */
4167 kern_return_t kr;
4168
4169 vm_page_t m; /* Fast access to result_page */
4170 kern_return_t error_code;
4171 vm_object_t cur_object;
4172 vm_object_t m_object = NULL;
4173 vm_object_offset_t cur_offset;
4174 vm_page_t cur_m;
4175 vm_object_t new_object;
4176 int type_of_fault;
4177 pmap_t pmap;
4178 wait_interrupt_t interruptible_state;
4179 vm_map_t real_map = map;
4180 vm_map_t original_map = map;
4181 bool object_locks_dropped = FALSE;
4182 vm_prot_t fault_type;
4183 vm_prot_t original_fault_type;
4184 struct vm_object_fault_info fault_info = {};
4185 bool need_collapse = FALSE;
4186 boolean_t need_retry = FALSE;
4187 boolean_t *need_retry_ptr = NULL;
4188 uint8_t object_lock_type = 0;
4189 uint8_t cur_object_lock_type;
4190 vm_object_t top_object = VM_OBJECT_NULL;
4191 vm_object_t written_on_object = VM_OBJECT_NULL;
4192 memory_object_t written_on_pager = NULL;
4193 vm_object_offset_t written_on_offset = 0;
4194 int throttle_delay;
4195 int compressed_count_delta;
4196 uint8_t grab_options;
4197 bool need_copy;
4198 bool need_copy_on_read;
4199 vm_map_offset_t trace_vaddr;
4200 vm_map_offset_t trace_real_vaddr;
4201 vm_map_size_t fault_page_size;
4202 vm_map_size_t fault_page_mask;
4203 int fault_page_shift;
4204 vm_map_offset_t fault_phys_offset;
4205 vm_map_offset_t real_vaddr;
4206 bool resilient_media_retry = false;
4207 bool resilient_media_ref_transfer = false;
4208 vm_object_t resilient_media_object = VM_OBJECT_NULL;
4209 vm_object_offset_t resilient_media_offset = (vm_object_offset_t)-1;
4210 bool page_needs_data_sync = false;
4211 /*
4212 * Was the VM object contended when vm_map_lookup_and_lock_object locked it?
4213 * If so, the zero fill path will drop the lock
4214 * NB: Ideally we would always drop the lock rather than rely on
4215 * this heuristic, but vm_object_unlock currently takes > 30 cycles.
4216 */
4217 bool object_is_contended = false;
4218
4219
4220 real_vaddr = vaddr;
4221 trace_real_vaddr = vaddr;
4222
4223 /*
4224 * Some (kernel) submaps are marked with "should never fault".
4225 *
4226 * We do this for two reasons:
4227 * - PGZ which is inside the zone map range can't go down the normal
4228 * lookup path (vm_map_lookup_entry() would panic).
4229 *
4230 * - we want for guard pages to not have to use fictitious pages at all
4231 * to prevent from ZFOD pages to be made.
4232 *
4233 * We also want capture the fault address easily so that the zone
4234 * allocator might present an enhanced panic log.
4235 */
4236 if (map->never_faults || (pgz_owned(vaddr) && map->pmap == kernel_pmap)) {
4237 assert(map->pmap == kernel_pmap);
4238 return KERN_INVALID_ADDRESS;
4239 }
4240
4241 if (VM_MAP_PAGE_SIZE(original_map) < PAGE_SIZE) {
4242 fault_phys_offset = (vm_map_offset_t)-1;
4243 fault_page_size = VM_MAP_PAGE_SIZE(original_map);
4244 fault_page_mask = VM_MAP_PAGE_MASK(original_map);
4245 fault_page_shift = VM_MAP_PAGE_SHIFT(original_map);
4246 if (fault_page_size < PAGE_SIZE) {
4247 DEBUG4K_FAULT("map %p vaddr 0x%llx caller_prot 0x%x\n", map, (uint64_t)trace_real_vaddr, caller_prot);
4248 vaddr = vm_map_trunc_page(vaddr, fault_page_mask);
4249 }
4250 } else {
4251 fault_phys_offset = 0;
4252 fault_page_size = PAGE_SIZE;
4253 fault_page_mask = PAGE_MASK;
4254 fault_page_shift = PAGE_SHIFT;
4255 vaddr = vm_map_trunc_page(vaddr, PAGE_MASK);
4256 }
4257
4258 if (map == kernel_map) {
4259 trace_vaddr = VM_KERNEL_ADDRHIDE(vaddr);
4260 trace_real_vaddr = VM_KERNEL_ADDRHIDE(trace_real_vaddr);
4261 } else {
4262 trace_vaddr = vaddr;
4263 }
4264
4265 KDBG_RELEASE(
4266 (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START,
4267 ((uint64_t)trace_vaddr >> 32),
4268 trace_vaddr,
4269 (map == kernel_map));
4270
4271 if (get_preemption_level() != 0) {
4272 KDBG_RELEASE(
4273 (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
4274 ((uint64_t)trace_vaddr >> 32),
4275 trace_vaddr,
4276 KERN_FAILURE);
4277
4278 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NONZERO_PREEMPTION_LEVEL), 0 /* arg */);
4279 return KERN_FAILURE;
4280 }
4281
4282 thread_t cthread = current_thread();
4283 bool rtfault = (cthread->sched_mode == TH_MODE_REALTIME);
4284 uint64_t fstart = 0;
4285
4286 if (rtfault) {
4287 fstart = mach_continuous_time();
4288 }
4289
4290 interruptible_state = thread_interrupt_level(interruptible);
4291
4292 fault_type = (change_wiring ? VM_PROT_NONE : caller_prot);
4293
4294 counter_inc(&vm_statistics_faults);
4295 counter_inc(¤t_task()->faults);
4296 original_fault_type = fault_type;
4297
4298 need_copy = FALSE;
4299 if (fault_type & VM_PROT_WRITE) {
4300 need_copy = TRUE;
4301 }
4302
4303 if (need_copy || change_wiring) {
4304 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4305 } else {
4306 object_lock_type = OBJECT_LOCK_SHARED;
4307 }
4308
4309 cur_object_lock_type = OBJECT_LOCK_SHARED;
4310
4311 if ((map == kernel_map) && (caller_prot & VM_PROT_WRITE)) {
4312 if (compressor_map) {
4313 if ((vaddr >= vm_map_min(compressor_map)) && (vaddr < vm_map_max(compressor_map))) {
4314 panic("Write fault on compressor map, va: %p type: %u bounds: %p->%p", (void *) vaddr, caller_prot, (void *) vm_map_min(compressor_map), (void *) vm_map_max(compressor_map));
4315 }
4316 }
4317 }
4318 RetryFault:
4319 assert(written_on_object == VM_OBJECT_NULL);
4320
4321 /*
4322 * assume we will hit a page in the cache
4323 * otherwise, explicitly override with
4324 * the real fault type once we determine it
4325 */
4326 type_of_fault = DBG_CACHE_HIT_FAULT;
4327
4328 /*
4329 * Find the backing store object and offset into
4330 * it to begin the search.
4331 */
4332 fault_type = original_fault_type;
4333 map = original_map;
4334 vm_map_lock_read(map);
4335
4336 if (resilient_media_retry) {
4337 /*
4338 * If we have to insert a fake zero-filled page to hide
4339 * a media failure to provide the real page, we need to
4340 * resolve any pending copy-on-write on this mapping.
4341 * VM_PROT_COPY tells vm_map_lookup_and_lock_object() to deal
4342 * with that even if this is not a "write" fault.
4343 */
4344 need_copy = TRUE;
4345 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4346 vm_fault_resilient_media_retry++;
4347 }
4348
4349 kr = vm_map_lookup_and_lock_object(&map, vaddr,
4350 (fault_type | (need_copy ? VM_PROT_COPY : 0)),
4351 object_lock_type, &version,
4352 &object, &offset, &prot, &wired,
4353 &fault_info,
4354 &real_map,
4355 &object_is_contended);
4356 object_is_contended = false; /* avoid unsafe optimization */
4357
4358 if (kr != KERN_SUCCESS) {
4359 vm_map_unlock_read(map);
4360 /*
4361 * This can be seen in a crash report if indeed the
4362 * thread is crashing due to an invalid access in a non-existent
4363 * range.
4364 * Turning this OFF for now because it is noisy and not always fatal
4365 * eg prefaulting.
4366 *
4367 * if (kr == KERN_INVALID_ADDRESS) {
4368 * ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0);
4369 * }
4370 */
4371 goto done;
4372 }
4373
4374
4375 pmap = real_map->pmap;
4376 fault_info.interruptible = interruptible;
4377 fault_info.stealth = FALSE;
4378 fault_info.io_sync = FALSE;
4379 fault_info.mark_zf_absent = FALSE;
4380 fault_info.batch_pmap_op = FALSE;
4381
4382 if (resilient_media_retry) {
4383 /*
4384 * We're retrying this fault after having detected a media
4385 * failure from a "resilient_media" mapping.
4386 * Check that the mapping is still pointing at the object
4387 * that just failed to provide a page.
4388 */
4389 assert(resilient_media_object != VM_OBJECT_NULL);
4390 assert(resilient_media_offset != (vm_object_offset_t)-1);
4391 if ((object != VM_OBJECT_NULL &&
4392 object == resilient_media_object &&
4393 offset == resilient_media_offset &&
4394 fault_info.resilient_media)
4395 #if MACH_ASSERT
4396 && (vm_fault_resilient_media_inject_error1_rate == 0 ||
4397 (++vm_fault_resilient_media_inject_error1 % vm_fault_resilient_media_inject_error1_rate) != 0)
4398 #endif /* MACH_ASSERT */
4399 ) {
4400 /*
4401 * This mapping still points at the same object
4402 * and is still "resilient_media": proceed in
4403 * "recovery-from-media-failure" mode, where we'll
4404 * insert a zero-filled page in the top object.
4405 */
4406 // printf("RESILIENT_MEDIA %s:%d recovering for object %p offset 0x%llx\n", __FUNCTION__, __LINE__, object, offset);
4407 vm_fault_resilient_media_proceed++;
4408 } else {
4409 /* not recovering: reset state and retry fault */
4410 // printf("RESILIENT_MEDIA %s:%d no recovery resilient %d object %p/%p offset 0x%llx/0x%llx\n", __FUNCTION__, __LINE__, fault_info.resilient_media, object, resilient_media_object, offset, resilient_media_offset);
4411 vm_object_unlock(object);
4412 if (real_map != map) {
4413 vm_map_unlock(real_map);
4414 }
4415 vm_map_unlock_read(map);
4416 /* release our extra reference on failed object */
4417 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
4418 vm_object_deallocate(resilient_media_object);
4419 resilient_media_object = VM_OBJECT_NULL;
4420 resilient_media_offset = (vm_object_offset_t)-1;
4421 resilient_media_retry = false;
4422 vm_fault_resilient_media_abort1++;
4423 goto RetryFault;
4424 }
4425 } else {
4426 assert(resilient_media_object == VM_OBJECT_NULL);
4427 resilient_media_offset = (vm_object_offset_t)-1;
4428 }
4429
4430 /*
4431 * If the page is wired, we must fault for the current protection
4432 * value, to avoid further faults.
4433 */
4434 if (wired) {
4435 fault_type = prot | VM_PROT_WRITE;
4436 }
4437 if (wired || need_copy) {
4438 /*
4439 * since we're treating this fault as a 'write'
4440 * we must hold the top object lock exclusively
4441 */
4442 if (object_lock_type == OBJECT_LOCK_SHARED) {
4443 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4444
4445 if (vm_object_lock_upgrade(object) == FALSE) {
4446 /*
4447 * couldn't upgrade, so explictly
4448 * take the lock exclusively
4449 */
4450 vm_object_lock(object);
4451 }
4452 }
4453 }
4454
4455 #if VM_FAULT_CLASSIFY
4456 /*
4457 * Temporary data gathering code
4458 */
4459 vm_fault_classify(object, offset, fault_type);
4460 #endif
4461 /*
4462 * Fast fault code. The basic idea is to do as much as
4463 * possible while holding the map lock and object locks.
4464 * Busy pages are not used until the object lock has to
4465 * be dropped to do something (copy, zero fill, pmap enter).
4466 * Similarly, paging references aren't acquired until that
4467 * point, and object references aren't used.
4468 *
4469 * If we can figure out what to do
4470 * (zero fill, copy on write, pmap enter) while holding
4471 * the locks, then it gets done. Otherwise, we give up,
4472 * and use the original fault path (which doesn't hold
4473 * the map lock, and relies on busy pages).
4474 * The give up cases include:
4475 * - Have to talk to pager.
4476 * - Page is busy, absent or in error.
4477 * - Pager has locked out desired access.
4478 * - Fault needs to be restarted.
4479 * - Have to push page into copy object.
4480 *
4481 * The code is an infinite loop that moves one level down
4482 * the shadow chain each time. cur_object and cur_offset
4483 * refer to the current object being examined. object and offset
4484 * are the original object from the map. The loop is at the
4485 * top level if and only if object and cur_object are the same.
4486 *
4487 * Invariants: Map lock is held throughout. Lock is held on
4488 * original object and cur_object (if different) when
4489 * continuing or exiting loop.
4490 *
4491 */
4492
4493 #if defined(__arm64__)
4494 /*
4495 * Fail if reading an execute-only page in a
4496 * pmap that enforces execute-only protection.
4497 */
4498 if (fault_type == VM_PROT_READ &&
4499 (prot & VM_PROT_EXECUTE) &&
4500 !(prot & VM_PROT_READ) &&
4501 pmap_enforces_execute_only(pmap)) {
4502 vm_object_unlock(object);
4503 vm_map_unlock_read(map);
4504 if (real_map != map) {
4505 vm_map_unlock(real_map);
4506 }
4507 kr = KERN_PROTECTION_FAILURE;
4508 goto done;
4509 }
4510 #endif
4511
4512 fault_phys_offset = (vm_map_offset_t)offset - vm_map_trunc_page((vm_map_offset_t)offset, PAGE_MASK);
4513
4514 /*
4515 * If this page is to be inserted in a copy delay object
4516 * for writing, and if the object has a copy, then the
4517 * copy delay strategy is implemented in the slow fault page.
4518 */
4519 if ((object->copy_strategy == MEMORY_OBJECT_COPY_DELAY ||
4520 object->copy_strategy == MEMORY_OBJECT_COPY_DELAY_FORK) &&
4521 object->vo_copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE)) {
4522 goto handle_copy_delay;
4523 }
4524
4525 cur_object = object;
4526 cur_offset = offset;
4527
4528 grab_options = 0;
4529 #if CONFIG_SECLUDED_MEMORY
4530 if (object->can_grab_secluded) {
4531 grab_options |= VM_PAGE_GRAB_SECLUDED;
4532 }
4533 #endif /* CONFIG_SECLUDED_MEMORY */
4534
4535 while (TRUE) {
4536 if (!cur_object->pager_created &&
4537 cur_object->phys_contiguous) { /* superpage */
4538 break;
4539 }
4540
4541 if (cur_object->blocked_access) {
4542 /*
4543 * Access to this VM object has been blocked.
4544 * Let the slow path handle it.
4545 */
4546 break;
4547 }
4548
4549 m = vm_page_lookup(cur_object, vm_object_trunc_page(cur_offset));
4550 m_object = NULL;
4551
4552 if (m != VM_PAGE_NULL) {
4553 m_object = cur_object;
4554
4555 if (m->vmp_busy) {
4556 wait_result_t result;
4557
4558 /*
4559 * in order to do the PAGE_ASSERT_WAIT, we must
4560 * have object that 'm' belongs to locked exclusively
4561 */
4562 if (object != cur_object) {
4563 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4564 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4565
4566 if (vm_object_lock_upgrade(cur_object) == FALSE) {
4567 /*
4568 * couldn't upgrade so go do a full retry
4569 * immediately since we can no longer be
4570 * certain about cur_object (since we
4571 * don't hold a reference on it)...
4572 * first drop the top object lock
4573 */
4574 vm_object_unlock(object);
4575
4576 vm_map_unlock_read(map);
4577 if (real_map != map) {
4578 vm_map_unlock(real_map);
4579 }
4580
4581 goto RetryFault;
4582 }
4583 }
4584 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
4585 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4586
4587 if (vm_object_lock_upgrade(object) == FALSE) {
4588 /*
4589 * couldn't upgrade, so explictly take the lock
4590 * exclusively and go relookup the page since we
4591 * will have dropped the object lock and
4592 * a different thread could have inserted
4593 * a page at this offset
4594 * no need for a full retry since we're
4595 * at the top level of the object chain
4596 */
4597 vm_object_lock(object);
4598
4599 continue;
4600 }
4601 }
4602 if ((m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) && m_object->internal) {
4603 /*
4604 * m->vmp_busy == TRUE and the object is locked exclusively
4605 * if m->pageout_queue == TRUE after we acquire the
4606 * queues lock, we are guaranteed that it is stable on
4607 * the pageout queue and therefore reclaimable
4608 *
4609 * NOTE: this is only true for the internal pageout queue
4610 * in the compressor world
4611 */
4612 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
4613
4614 vm_page_lock_queues();
4615
4616 if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
4617 vm_pageout_throttle_up(m);
4618 vm_page_unlock_queues();
4619
4620 PAGE_WAKEUP_DONE(m);
4621 goto reclaimed_from_pageout;
4622 }
4623 vm_page_unlock_queues();
4624 }
4625 if (object != cur_object) {
4626 vm_object_unlock(object);
4627 }
4628
4629 vm_map_unlock_read(map);
4630 if (real_map != map) {
4631 vm_map_unlock(real_map);
4632 }
4633
4634 result = PAGE_ASSERT_WAIT(m, interruptible);
4635
4636 vm_object_unlock(cur_object);
4637
4638 if (result == THREAD_WAITING) {
4639 result = thread_block(THREAD_CONTINUE_NULL);
4640 }
4641 if (result == THREAD_AWAKENED || result == THREAD_RESTART) {
4642 goto RetryFault;
4643 }
4644
4645 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
4646 kr = KERN_ABORTED;
4647 goto done;
4648 }
4649 reclaimed_from_pageout:
4650 if (m->vmp_laundry) {
4651 if (object != cur_object) {
4652 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4653 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4654
4655 vm_object_unlock(object);
4656 vm_object_unlock(cur_object);
4657
4658 vm_map_unlock_read(map);
4659 if (real_map != map) {
4660 vm_map_unlock(real_map);
4661 }
4662
4663 goto RetryFault;
4664 }
4665 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
4666 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4667
4668 if (vm_object_lock_upgrade(object) == FALSE) {
4669 /*
4670 * couldn't upgrade, so explictly take the lock
4671 * exclusively and go relookup the page since we
4672 * will have dropped the object lock and
4673 * a different thread could have inserted
4674 * a page at this offset
4675 * no need for a full retry since we're
4676 * at the top level of the object chain
4677 */
4678 vm_object_lock(object);
4679
4680 continue;
4681 }
4682 }
4683 vm_object_lock_assert_exclusive(VM_PAGE_OBJECT(m));
4684 vm_pageout_steal_laundry(m, FALSE);
4685 }
4686
4687
4688 if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
4689 /*
4690 * Guard page: let the slow path deal with it
4691 */
4692 break;
4693 }
4694 if (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_private || m->vmp_absent)) {
4695 /*
4696 * Unusual case... let the slow path deal with it
4697 */
4698 break;
4699 }
4700 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m_object)) {
4701 if (object != cur_object) {
4702 vm_object_unlock(object);
4703 }
4704 vm_map_unlock_read(map);
4705 if (real_map != map) {
4706 vm_map_unlock(real_map);
4707 }
4708 vm_object_unlock(cur_object);
4709 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
4710 kr = KERN_MEMORY_ERROR;
4711 goto done;
4712 }
4713 assert(m_object == VM_PAGE_OBJECT(m));
4714
4715 if (vm_fault_cs_need_validation(map->pmap, m, m_object,
4716 PAGE_SIZE, 0) ||
4717 (physpage_p != NULL && (prot & VM_PROT_WRITE))) {
4718 upgrade_lock_and_retry:
4719 /*
4720 * We might need to validate this page
4721 * against its code signature, so we
4722 * want to hold the VM object exclusively.
4723 */
4724 if (object != cur_object) {
4725 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4726 vm_object_unlock(object);
4727 vm_object_unlock(cur_object);
4728
4729 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4730
4731 vm_map_unlock_read(map);
4732 if (real_map != map) {
4733 vm_map_unlock(real_map);
4734 }
4735
4736 goto RetryFault;
4737 }
4738 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
4739 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4740
4741 if (vm_object_lock_upgrade(object) == FALSE) {
4742 /*
4743 * couldn't upgrade, so explictly take the lock
4744 * exclusively and go relookup the page since we
4745 * will have dropped the object lock and
4746 * a different thread could have inserted
4747 * a page at this offset
4748 * no need for a full retry since we're
4749 * at the top level of the object chain
4750 */
4751 vm_object_lock(object);
4752
4753 continue;
4754 }
4755 }
4756 }
4757 /*
4758 * Two cases of map in faults:
4759 * - At top level w/o copy object.
4760 * - Read fault anywhere.
4761 * --> must disallow write.
4762 */
4763
4764 if (object == cur_object && object->vo_copy == VM_OBJECT_NULL) {
4765 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4766 if ((fault_type & VM_PROT_WRITE) && m->vmp_unmodified_ro) {
4767 assert(cur_object == VM_PAGE_OBJECT(m));
4768 assert(cur_object->internal);
4769 vm_object_lock_assert_exclusive(cur_object);
4770 vm_page_lockspin_queues();
4771 m->vmp_unmodified_ro = false;
4772 vm_page_unlock_queues();
4773 os_atomic_dec(&compressor_ro_uncompressed, relaxed);
4774 VM_COMPRESSOR_PAGER_STATE_CLR(cur_object, m->vmp_offset);
4775 }
4776 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4777 goto FastPmapEnter;
4778 }
4779
4780 if (!need_copy &&
4781 !fault_info.no_copy_on_read &&
4782 cur_object != object &&
4783 !cur_object->internal &&
4784 !cur_object->pager_trusted &&
4785 vm_protect_privileged_from_untrusted &&
4786 !cur_object->code_signed &&
4787 current_proc_is_privileged()) {
4788 /*
4789 * We're faulting on a page in "object" and
4790 * went down the shadow chain to "cur_object"
4791 * to find out that "cur_object"'s pager
4792 * is not "trusted", i.e. we can not trust it
4793 * to always return the same contents.
4794 * Since the target is a "privileged" process,
4795 * let's treat this as a copy-on-read fault, as
4796 * if it was a copy-on-write fault.
4797 * Once "object" gets a copy of this page, it
4798 * won't have to rely on "cur_object" to
4799 * provide the contents again.
4800 *
4801 * This is done by setting "need_copy" and
4802 * retrying the fault from the top with the
4803 * appropriate locking.
4804 *
4805 * Special case: if the mapping is executable
4806 * and the untrusted object is code-signed and
4807 * the process is "cs_enforced", we do not
4808 * copy-on-read because that would break
4809 * code-signing enforcement expectations (an
4810 * executable page must belong to a code-signed
4811 * object) and we can rely on code-signing
4812 * to re-validate the page if it gets evicted
4813 * and paged back in.
4814 */
4815 // printf("COPY-ON-READ %s:%d map %p va 0x%llx page %p object %p offset 0x%llx UNTRUSTED: need copy-on-read!\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, m, VM_PAGE_OBJECT(m), m->vmp_offset);
4816 vm_copied_on_read++;
4817 need_copy = TRUE;
4818
4819 vm_object_unlock(object);
4820 vm_object_unlock(cur_object);
4821 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4822 vm_map_unlock_read(map);
4823 if (real_map != map) {
4824 vm_map_unlock(real_map);
4825 }
4826 goto RetryFault;
4827 }
4828
4829 if (!(fault_type & VM_PROT_WRITE) && !need_copy) {
4830 if (!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
4831 prot &= ~VM_PROT_WRITE;
4832 } else {
4833 /*
4834 * For a protection that the pmap cares
4835 * about, we must hand over the full
4836 * set of protections (so that the pmap
4837 * layer can apply any desired policy).
4838 * This means that cs_bypass must be
4839 * set, as this can force us to pass
4840 * RWX.
4841 */
4842 assert(fault_info.cs_bypass);
4843 }
4844
4845 if (object != cur_object) {
4846 /*
4847 * We still need to hold the top object
4848 * lock here to prevent a race between
4849 * a read fault (taking only "shared"
4850 * locks) and a write fault (taking
4851 * an "exclusive" lock on the top
4852 * object.
4853 * Otherwise, as soon as we release the
4854 * top lock, the write fault could
4855 * proceed and actually complete before
4856 * the read fault, and the copied page's
4857 * translation could then be overwritten
4858 * by the read fault's translation for
4859 * the original page.
4860 *
4861 * Let's just record what the top object
4862 * is and we'll release it later.
4863 */
4864 top_object = object;
4865
4866 /*
4867 * switch to the object that has the new page
4868 */
4869 object = cur_object;
4870 object_lock_type = cur_object_lock_type;
4871 }
4872 FastPmapEnter:
4873 assert(m_object == VM_PAGE_OBJECT(m));
4874
4875 /*
4876 * prepare for the pmap_enter...
4877 * object and map are both locked
4878 * m contains valid data
4879 * object == m->vmp_object
4880 * cur_object == NULL or it's been unlocked
4881 * no paging references on either object or cur_object
4882 */
4883 if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
4884 need_retry_ptr = &need_retry;
4885 } else {
4886 need_retry_ptr = NULL;
4887 }
4888
4889 if (fault_page_size < PAGE_SIZE) {
4890 DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx caller pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, caller_pmap, (uint64_t)caller_pmap_addr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
4891 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
4892 fault_phys_offset < PAGE_SIZE),
4893 "0x%llx\n", (uint64_t)fault_phys_offset);
4894 } else {
4895 assertf(fault_phys_offset == 0,
4896 "0x%llx\n", (uint64_t)fault_phys_offset);
4897 }
4898
4899 if (__improbable(rtfault &&
4900 !m->vmp_realtime &&
4901 vm_pageout_protect_realtime)) {
4902 vm_page_lock_queues();
4903 if (!m->vmp_realtime) {
4904 m->vmp_realtime = true;
4905 vm_page_realtime_count++;
4906 }
4907 vm_page_unlock_queues();
4908 }
4909 assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p object=%p", m, m_object, object);
4910 assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
4911 if (caller_pmap) {
4912 kr = vm_fault_enter(m,
4913 caller_pmap,
4914 caller_pmap_addr,
4915 fault_page_size,
4916 fault_phys_offset,
4917 prot,
4918 caller_prot,
4919 wired,
4920 change_wiring,
4921 wire_tag,
4922 &fault_info,
4923 need_retry_ptr,
4924 &type_of_fault,
4925 &object_lock_type);
4926 } else {
4927 kr = vm_fault_enter(m,
4928 pmap,
4929 vaddr,
4930 fault_page_size,
4931 fault_phys_offset,
4932 prot,
4933 caller_prot,
4934 wired,
4935 change_wiring,
4936 wire_tag,
4937 &fault_info,
4938 need_retry_ptr,
4939 &type_of_fault,
4940 &object_lock_type);
4941 }
4942
4943 vm_fault_complete(
4944 map,
4945 real_map,
4946 object,
4947 m_object,
4948 m,
4949 offset,
4950 trace_real_vaddr,
4951 &fault_info,
4952 caller_prot,
4953 real_vaddr,
4954 vm_fault_type_for_tracing(need_copy_on_read, type_of_fault),
4955 need_retry,
4956 kr,
4957 physpage_p,
4958 prot,
4959 top_object,
4960 need_collapse,
4961 cur_offset,
4962 fault_type,
4963 &written_on_object,
4964 &written_on_pager,
4965 &written_on_offset);
4966 top_object = VM_OBJECT_NULL;
4967 if (need_retry == TRUE) {
4968 /*
4969 * vm_fault_enter couldn't complete the PMAP_ENTER...
4970 * at this point we don't hold any locks so it's safe
4971 * to ask the pmap layer to expand the page table to
4972 * accommodate this mapping... once expanded, we'll
4973 * re-drive the fault which should result in vm_fault_enter
4974 * being able to successfully enter the mapping this time around
4975 */
4976 (void)pmap_enter_options(
4977 pmap, vaddr, 0, 0, 0, 0, 0,
4978 PMAP_OPTIONS_NOENTER, NULL, PMAP_MAPPING_TYPE_INFER);
4979
4980 need_retry = FALSE;
4981 goto RetryFault;
4982 }
4983 goto done;
4984 }
4985 /*
4986 * COPY ON WRITE FAULT
4987 */
4988 assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE);
4989
4990 /*
4991 * If objects match, then
4992 * object->vo_copy must not be NULL (else control
4993 * would be in previous code block), and we
4994 * have a potential push into the copy object
4995 * with which we can't cope with here.
4996 */
4997 if (cur_object == object) {
4998 /*
4999 * must take the slow path to
5000 * deal with the copy push
5001 */
5002 break;
5003 }
5004
5005 /*
5006 * This is now a shadow based copy on write
5007 * fault -- it requires a copy up the shadow
5008 * chain.
5009 */
5010 assert(m_object == VM_PAGE_OBJECT(m));
5011
5012 if ((cur_object_lock_type == OBJECT_LOCK_SHARED) &&
5013 vm_fault_cs_need_validation(NULL, m, m_object,
5014 PAGE_SIZE, 0)) {
5015 goto upgrade_lock_and_retry;
5016 }
5017
5018 #if MACH_ASSERT
5019 if (resilient_media_retry &&
5020 vm_fault_resilient_media_inject_error2_rate != 0 &&
5021 (++vm_fault_resilient_media_inject_error2 % vm_fault_resilient_media_inject_error2_rate) == 0) {
5022 /* inject an error */
5023 cur_m = m;
5024 m = VM_PAGE_NULL;
5025 m_object = VM_OBJECT_NULL;
5026 break;
5027 }
5028 #endif /* MACH_ASSERT */
5029 /*
5030 * Allocate a page in the original top level
5031 * object. Give up if allocate fails. Also
5032 * need to remember current page, as it's the
5033 * source of the copy.
5034 *
5035 * at this point we hold locks on both
5036 * object and cur_object... no need to take
5037 * paging refs or mark pages BUSY since
5038 * we don't drop either object lock until
5039 * the page has been copied and inserted
5040 */
5041 cur_m = m;
5042 m = vm_page_grab_options(grab_options);
5043 m_object = NULL;
5044
5045 if (m == VM_PAGE_NULL) {
5046 /*
5047 * no free page currently available...
5048 * must take the slow path
5049 */
5050 break;
5051 }
5052
5053 /*
5054 * Now do the copy. Mark the source page busy...
5055 *
5056 * NOTE: This code holds the map lock across
5057 * the page copy.
5058 */
5059 vm_page_copy(cur_m, m);
5060 vm_page_insert(m, object, vm_object_trunc_page(offset));
5061 if (VM_MAP_PAGE_MASK(map) != PAGE_MASK) {
5062 DEBUG4K_FAULT("map %p vaddr 0x%llx page %p [%p 0x%llx] copied to %p [%p 0x%llx]\n", map, (uint64_t)vaddr, cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
5063 }
5064 m_object = object;
5065 SET_PAGE_DIRTY(m, FALSE);
5066
5067 /*
5068 * Now cope with the source page and object
5069 */
5070 if (object->ref_count > 1 && cur_m->vmp_pmapped) {
5071 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
5072 } else if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) {
5073 /*
5074 * We've copied the full 16K page but we're
5075 * about to call vm_fault_enter() only for
5076 * the 4K chunk we're faulting on. The other
5077 * three 4K chunks in that page could still
5078 * be pmapped in this pmap.
5079 * Since the VM object layer thinks that the
5080 * entire page has been dealt with and the
5081 * original page might no longer be needed,
5082 * it might collapse/bypass the original VM
5083 * object and free its pages, which would be
5084 * bad (and would trigger pmap_verify_free()
5085 * assertions) if the other 4K chunks are still
5086 * pmapped.
5087 */
5088 /*
5089 * XXX FBDP TODO4K: to be revisisted
5090 * Technically, we need to pmap_disconnect()
5091 * only the target pmap's mappings for the 4K
5092 * chunks of this 16K VM page. If other pmaps
5093 * have PTEs on these chunks, that means that
5094 * the associated VM map must have a reference
5095 * on the VM object, so no need to worry about
5096 * those.
5097 * pmap_protect() for each 4K chunk would be
5098 * better but we'd have to check which chunks
5099 * are actually mapped before and after this
5100 * one.
5101 * A full-blown pmap_disconnect() is easier
5102 * for now but not efficient.
5103 */
5104 DEBUG4K_FAULT("pmap_disconnect() page %p object %p offset 0x%llx phys 0x%x\n", cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, VM_PAGE_GET_PHYS_PAGE(cur_m));
5105 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
5106 }
5107
5108 if (cur_m->vmp_clustered) {
5109 VM_PAGE_COUNT_AS_PAGEIN(cur_m);
5110 VM_PAGE_CONSUME_CLUSTERED(cur_m);
5111 vm_fault_is_sequential(cur_object, cur_offset, fault_info.behavior);
5112 }
5113 need_collapse = TRUE;
5114
5115 if (!cur_object->internal &&
5116 cur_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
5117 /*
5118 * The object from which we've just
5119 * copied a page is most probably backed
5120 * by a vnode. We don't want to waste too
5121 * much time trying to collapse the VM objects
5122 * and create a bottleneck when several tasks
5123 * map the same file.
5124 */
5125 if (cur_object->vo_copy == object) {
5126 /*
5127 * Shared mapping or no COW yet.
5128 * We can never collapse a copy
5129 * object into its backing object.
5130 */
5131 need_collapse = FALSE;
5132 } else if (cur_object->vo_copy == object->shadow &&
5133 object->shadow->resident_page_count == 0) {
5134 /*
5135 * Shared mapping after a COW occurred.
5136 */
5137 need_collapse = FALSE;
5138 }
5139 }
5140 vm_object_unlock(cur_object);
5141
5142 if (need_collapse == FALSE) {
5143 vm_fault_collapse_skipped++;
5144 }
5145 vm_fault_collapse_total++;
5146
5147 type_of_fault = DBG_COW_FAULT;
5148 counter_inc(&vm_statistics_cow_faults);
5149 DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
5150 counter_inc(¤t_task()->cow_faults);
5151
5152 goto FastPmapEnter;
5153 } else {
5154 /*
5155 * No page at cur_object, cur_offset... m == NULL
5156 */
5157 if (cur_object->pager_created) {
5158 vm_external_state_t compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
5159
5160 if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) {
5161 int my_fault_type;
5162 vm_compressor_options_t c_flags = C_DONT_BLOCK;
5163 bool insert_cur_object = FALSE;
5164
5165 /*
5166 * May have to talk to a pager...
5167 * if so, take the slow path by
5168 * doing a 'break' from the while (TRUE) loop
5169 *
5170 * external_state will only be set to VM_EXTERNAL_STATE_EXISTS
5171 * if the compressor is active and the page exists there
5172 */
5173 if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS) {
5174 break;
5175 }
5176
5177 if (map == kernel_map || real_map == kernel_map) {
5178 /*
5179 * can't call into the compressor with the kernel_map
5180 * lock held, since the compressor may try to operate
5181 * on the kernel map in order to return an empty c_segment
5182 */
5183 break;
5184 }
5185 if (object != cur_object) {
5186 if (fault_type & VM_PROT_WRITE) {
5187 c_flags |= C_KEEP;
5188 } else {
5189 insert_cur_object = TRUE;
5190 }
5191 }
5192 if (insert_cur_object == TRUE) {
5193 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5194 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5195
5196 if (vm_object_lock_upgrade(cur_object) == FALSE) {
5197 /*
5198 * couldn't upgrade so go do a full retry
5199 * immediately since we can no longer be
5200 * certain about cur_object (since we
5201 * don't hold a reference on it)...
5202 * first drop the top object lock
5203 */
5204 vm_object_unlock(object);
5205
5206 vm_map_unlock_read(map);
5207 if (real_map != map) {
5208 vm_map_unlock(real_map);
5209 }
5210
5211 goto RetryFault;
5212 }
5213 }
5214 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
5215 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5216
5217 if (object != cur_object) {
5218 /*
5219 * we can't go for the upgrade on the top
5220 * lock since the upgrade may block waiting
5221 * for readers to drain... since we hold
5222 * cur_object locked at this point, waiting
5223 * for the readers to drain would represent
5224 * a lock order inversion since the lock order
5225 * for objects is the reference order in the
5226 * shadown chain
5227 */
5228 vm_object_unlock(object);
5229 vm_object_unlock(cur_object);
5230
5231 vm_map_unlock_read(map);
5232 if (real_map != map) {
5233 vm_map_unlock(real_map);
5234 }
5235
5236 goto RetryFault;
5237 }
5238 if (vm_object_lock_upgrade(object) == FALSE) {
5239 /*
5240 * couldn't upgrade, so explictly take the lock
5241 * exclusively and go relookup the page since we
5242 * will have dropped the object lock and
5243 * a different thread could have inserted
5244 * a page at this offset
5245 * no need for a full retry since we're
5246 * at the top level of the object chain
5247 */
5248 vm_object_lock(object);
5249
5250 continue;
5251 }
5252 }
5253 m = vm_page_grab_options(grab_options);
5254 m_object = NULL;
5255
5256 if (m == VM_PAGE_NULL) {
5257 /*
5258 * no free page currently available...
5259 * must take the slow path
5260 */
5261 break;
5262 }
5263
5264 /*
5265 * The object is and remains locked
5266 * so no need to take a
5267 * "paging_in_progress" reference.
5268 */
5269 bool shared_lock;
5270 if ((object == cur_object &&
5271 object_lock_type == OBJECT_LOCK_EXCLUSIVE) ||
5272 (object != cur_object &&
5273 cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) {
5274 shared_lock = FALSE;
5275 } else {
5276 shared_lock = TRUE;
5277 }
5278
5279 kr = vm_compressor_pager_get(
5280 cur_object->pager,
5281 (vm_object_trunc_page(cur_offset)
5282 + cur_object->paging_offset),
5283 VM_PAGE_GET_PHYS_PAGE(m),
5284 &my_fault_type,
5285 c_flags,
5286 &compressed_count_delta);
5287
5288 vm_compressor_pager_count(
5289 cur_object->pager,
5290 compressed_count_delta,
5291 shared_lock,
5292 cur_object);
5293
5294 if (kr != KERN_SUCCESS) {
5295 vm_page_release(m, FALSE);
5296 m = VM_PAGE_NULL;
5297 }
5298 /*
5299 * If vm_compressor_pager_get() returns
5300 * KERN_MEMORY_FAILURE, then the
5301 * compressed data is permanently lost,
5302 * so return this error immediately.
5303 */
5304 if (kr == KERN_MEMORY_FAILURE) {
5305 if (object != cur_object) {
5306 vm_object_unlock(cur_object);
5307 }
5308 vm_object_unlock(object);
5309 vm_map_unlock_read(map);
5310 if (real_map != map) {
5311 vm_map_unlock(real_map);
5312 }
5313
5314 goto done;
5315 } else if (kr != KERN_SUCCESS) {
5316 break;
5317 }
5318 m->vmp_dirty = TRUE;
5319 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5320 if ((fault_type & VM_PROT_WRITE) == 0) {
5321 prot &= ~VM_PROT_WRITE;
5322 /*
5323 * The page, m, has yet to be inserted
5324 * into an object. So we are fine with
5325 * the object/cur_object lock being held
5326 * shared.
5327 */
5328 vm_page_lockspin_queues();
5329 m->vmp_unmodified_ro = true;
5330 vm_page_unlock_queues();
5331 os_atomic_inc(&compressor_ro_uncompressed, relaxed);
5332 }
5333 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5334
5335 /*
5336 * If the object is purgeable, its
5337 * owner's purgeable ledgers will be
5338 * updated in vm_page_insert() but the
5339 * page was also accounted for in a
5340 * "compressed purgeable" ledger, so
5341 * update that now.
5342 */
5343 if (object != cur_object &&
5344 !insert_cur_object) {
5345 /*
5346 * We're not going to insert
5347 * the decompressed page into
5348 * the object it came from.
5349 *
5350 * We're dealing with a
5351 * copy-on-write fault on
5352 * "object".
5353 * We're going to decompress
5354 * the page directly into the
5355 * target "object" while
5356 * keepin the compressed
5357 * page for "cur_object", so
5358 * no ledger update in that
5359 * case.
5360 */
5361 } else if (((cur_object->purgable ==
5362 VM_PURGABLE_DENY) &&
5363 (!cur_object->vo_ledger_tag)) ||
5364 (cur_object->vo_owner ==
5365 NULL)) {
5366 /*
5367 * "cur_object" is not purgeable
5368 * and is not ledger-taged, or
5369 * there's no owner for it,
5370 * so no owner's ledgers to
5371 * update.
5372 */
5373 } else {
5374 /*
5375 * One less compressed
5376 * purgeable/tagged page for
5377 * cur_object's owner.
5378 */
5379 if (compressed_count_delta) {
5380 vm_object_owner_compressed_update(
5381 cur_object,
5382 -1);
5383 }
5384 }
5385
5386 if (insert_cur_object) {
5387 vm_page_insert(m, cur_object, vm_object_trunc_page(cur_offset));
5388 m_object = cur_object;
5389 } else {
5390 vm_page_insert(m, object, vm_object_trunc_page(offset));
5391 m_object = object;
5392 }
5393
5394 if ((m_object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_USE_DEFAULT) {
5395 /*
5396 * If the page is not cacheable,
5397 * we can't let its contents
5398 * linger in the data cache
5399 * after the decompression.
5400 */
5401 pmap_sync_page_attributes_phys(VM_PAGE_GET_PHYS_PAGE(m));
5402 }
5403
5404 type_of_fault = my_fault_type;
5405
5406 VM_STAT_DECOMPRESSIONS();
5407
5408 if (cur_object != object) {
5409 if (insert_cur_object) {
5410 top_object = object;
5411 /*
5412 * switch to the object that has the new page
5413 */
5414 object = cur_object;
5415 object_lock_type = cur_object_lock_type;
5416 } else {
5417 vm_object_unlock(cur_object);
5418 cur_object = object;
5419 }
5420 }
5421 goto FastPmapEnter;
5422 }
5423 /*
5424 * existence map present and indicates
5425 * that the pager doesn't have this page
5426 */
5427 }
5428 if (cur_object->shadow == VM_OBJECT_NULL ||
5429 resilient_media_retry) {
5430 /*
5431 * Zero fill fault. Page gets
5432 * inserted into the original object.
5433 */
5434 if (cur_object->shadow_severed ||
5435 VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object) ||
5436 cur_object == compressor_object ||
5437 is_kernel_object(cur_object)) {
5438 if (object != cur_object) {
5439 vm_object_unlock(cur_object);
5440 }
5441 vm_object_unlock(object);
5442
5443 vm_map_unlock_read(map);
5444 if (real_map != map) {
5445 vm_map_unlock(real_map);
5446 }
5447 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object)) {
5448 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
5449 }
5450
5451 if (cur_object->shadow_severed) {
5452 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
5453 }
5454
5455 kr = KERN_MEMORY_ERROR;
5456 goto done;
5457 }
5458 if (cur_object != object) {
5459 vm_object_unlock(cur_object);
5460
5461 cur_object = object;
5462 }
5463 if (object_lock_type == OBJECT_LOCK_SHARED) {
5464 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5465
5466 if (vm_object_lock_upgrade(object) == FALSE) {
5467 /*
5468 * couldn't upgrade so do a full retry on the fault
5469 * since we dropped the object lock which
5470 * could allow another thread to insert
5471 * a page at this offset
5472 */
5473 vm_map_unlock_read(map);
5474 if (real_map != map) {
5475 vm_map_unlock(real_map);
5476 }
5477
5478 goto RetryFault;
5479 }
5480 }
5481 if (!object->internal) {
5482 panic("%s:%d should not zero-fill page at offset 0x%llx in external object %p", __FUNCTION__, __LINE__, (uint64_t)offset, object);
5483 }
5484 #if MACH_ASSERT
5485 if (resilient_media_retry &&
5486 vm_fault_resilient_media_inject_error3_rate != 0 &&
5487 (++vm_fault_resilient_media_inject_error3 % vm_fault_resilient_media_inject_error3_rate) == 0) {
5488 /* inject an error */
5489 m_object = NULL;
5490 break;
5491 }
5492 #endif /* MACH_ASSERT */
5493 m = vm_page_alloc(object, vm_object_trunc_page(offset));
5494 m_object = NULL;
5495
5496 if (m == VM_PAGE_NULL) {
5497 /*
5498 * no free page currently available...
5499 * must take the slow path
5500 */
5501 break;
5502 }
5503 m_object = object;
5504
5505 if ((prot & VM_PROT_WRITE) &&
5506 !(fault_type & VM_PROT_WRITE) &&
5507 object->vo_copy != VM_OBJECT_NULL) {
5508 /*
5509 * This is not a write fault and
5510 * we might have a copy-on-write
5511 * obligation to honor (copy object or
5512 * "needs_copy" map entry), so do not
5513 * give write access yet.
5514 * We'll need to catch the first write
5515 * to resolve the copy-on-write by
5516 * pushing this page to a copy object
5517 * or making a shadow object.
5518 */
5519 if (!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
5520 prot &= ~VM_PROT_WRITE;
5521 } else {
5522 assert(fault_info.cs_bypass);
5523 }
5524 }
5525 assertf(!((fault_type & VM_PROT_WRITE) && object->vo_copy),
5526 "map %p va 0x%llx wrong path for write fault (fault_type 0x%x) on object %p with copy %p\n",
5527 map, (uint64_t)vaddr, fault_type, object, object->vo_copy);
5528
5529 vm_object_t saved_copy_object;
5530 uint32_t saved_copy_version;
5531 saved_copy_object = object->vo_copy;
5532 saved_copy_version = object->vo_copy_version;
5533
5534 /*
5535 * Zeroing the page and entering into it into the pmap
5536 * represents a significant amount of the zero fill fault handler's work.
5537 *
5538 * To improve fault scalability, we'll drop the object lock, if it appears contended,
5539 * now that we've inserted the page into the vm object.
5540 * Before dropping the lock, we need to check protection bits and set the
5541 * mapped bits on the page. Then we can mark the page busy, drop the lock,
5542 * zero it, and do the pmap enter. We'll need to reacquire the lock
5543 * to clear the busy bit and wake up any waiters.
5544 */
5545 vm_fault_cs_clear(m);
5546 m->vmp_pmapped = TRUE;
5547 if (map->no_zero_fill) {
5548 type_of_fault = DBG_NZF_PAGE_FAULT;
5549 } else {
5550 type_of_fault = DBG_ZERO_FILL_FAULT;
5551 }
5552 {
5553 pmap_t destination_pmap;
5554 vm_map_offset_t destination_pmap_vaddr;
5555 vm_prot_t enter_fault_type;
5556 if (caller_pmap) {
5557 destination_pmap = caller_pmap;
5558 destination_pmap_vaddr = caller_pmap_addr;
5559 } else {
5560 destination_pmap = pmap;
5561 destination_pmap_vaddr = vaddr;
5562 }
5563 if (change_wiring) {
5564 enter_fault_type = VM_PROT_NONE;
5565 } else {
5566 enter_fault_type = caller_prot;
5567 }
5568 assertf(VM_PAGE_OBJECT(m) == object, "m=%p object=%p", m, object);
5569 kr = vm_fault_enter_prepare(m,
5570 destination_pmap,
5571 destination_pmap_vaddr,
5572 &prot,
5573 caller_prot,
5574 fault_page_size,
5575 fault_phys_offset,
5576 change_wiring,
5577 enter_fault_type,
5578 &fault_info,
5579 &type_of_fault,
5580 &page_needs_data_sync);
5581 if (kr != KERN_SUCCESS) {
5582 goto zero_fill_cleanup;
5583 }
5584
5585 if (object_is_contended) {
5586 /*
5587 * At this point the page is in the vm object, but not on a paging queue.
5588 * Since it's accessible to another thread but its contents are invalid
5589 * (it hasn't been zeroed) mark it busy before dropping the object lock.
5590 */
5591 m->vmp_busy = TRUE;
5592 vm_object_paging_begin(object); /* keep object alive */
5593 vm_object_unlock(object);
5594 }
5595 if (type_of_fault == DBG_ZERO_FILL_FAULT) {
5596 /*
5597 * Now zero fill page...
5598 * the page is probably going to
5599 * be written soon, so don't bother
5600 * to clear the modified bit
5601 *
5602 * NOTE: This code holds the map
5603 * lock across the zero fill.
5604 */
5605 vm_page_zero_fill(m);
5606 counter_inc(&vm_statistics_zero_fill_count);
5607 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
5608 }
5609
5610 if (object_is_contended) {
5611 /*
5612 * It's not safe to do the pmap_enter() without holding
5613 * the object lock because its "vo_copy" could change.
5614 */
5615 object_is_contended = false; /* get out of that code path */
5616
5617 vm_object_lock(object);
5618 vm_object_paging_end(object);
5619 if (object->vo_copy != saved_copy_object ||
5620 object->vo_copy_version != saved_copy_version) {
5621 /*
5622 * The COPY_DELAY copy-on-write situation for
5623 * this VM object has changed while it was
5624 * unlocked, so do not grant write access to
5625 * this page.
5626 * The write access will fault again and we'll
5627 * resolve the copy-on-write then.
5628 */
5629 if (!pmap_has_prot_policy(pmap,
5630 fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE,
5631 prot)) {
5632 /* the pmap layer is OK with changing the PTE's prot */
5633 prot &= ~VM_PROT_WRITE;
5634 } else {
5635 /* we should not do CoW on pmap_has_prot_policy mappings */
5636 panic("map %p va 0x%llx obj %p,%u saved %p,%u: unexpected CoW",
5637 map, (uint64_t)vaddr,
5638 object, object->vo_copy_version,
5639 saved_copy_object, saved_copy_version);
5640 }
5641 }
5642 }
5643
5644 if (page_needs_data_sync) {
5645 pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
5646 }
5647
5648 if (top_object != VM_OBJECT_NULL) {
5649 need_retry_ptr = &need_retry;
5650 } else {
5651 need_retry_ptr = NULL;
5652 }
5653 if (fault_info.fi_xnu_user_debug &&
5654 !object->code_signed) {
5655 fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
5656 }
5657 if (object_is_contended) {
5658 panic("object_is_contended");
5659 kr = vm_fault_pmap_enter(destination_pmap, destination_pmap_vaddr,
5660 fault_page_size, fault_phys_offset,
5661 m, &prot, caller_prot, enter_fault_type, wired,
5662 fault_info.pmap_options, need_retry_ptr);
5663 vm_object_lock(object);
5664 assertf(!((prot & VM_PROT_WRITE) && object->vo_copy),
5665 "prot 0x%x object %p copy %p\n",
5666 prot, object, object->vo_copy);
5667 } else {
5668 kr = vm_fault_pmap_enter_with_object_lock(object, destination_pmap, destination_pmap_vaddr,
5669 fault_page_size, fault_phys_offset,
5670 m, &prot, caller_prot, enter_fault_type, wired,
5671 fault_info.pmap_options, need_retry_ptr, &object_lock_type);
5672 }
5673 }
5674 zero_fill_cleanup:
5675 if (!VM_DYNAMIC_PAGING_ENABLED() &&
5676 (object->purgable == VM_PURGABLE_DENY ||
5677 object->purgable == VM_PURGABLE_NONVOLATILE ||
5678 object->purgable == VM_PURGABLE_VOLATILE)) {
5679 vm_page_lockspin_queues();
5680 if (!VM_DYNAMIC_PAGING_ENABLED()) {
5681 vm_fault_enqueue_throttled_locked(m);
5682 }
5683 vm_page_unlock_queues();
5684 }
5685 vm_fault_enqueue_page(object, m, wired, change_wiring, wire_tag, fault_info.no_cache, &type_of_fault, kr);
5686
5687 if (__improbable(rtfault &&
5688 !m->vmp_realtime &&
5689 vm_pageout_protect_realtime)) {
5690 vm_page_lock_queues();
5691 if (!m->vmp_realtime) {
5692 m->vmp_realtime = true;
5693 vm_page_realtime_count++;
5694 }
5695 vm_page_unlock_queues();
5696 }
5697 vm_fault_complete(
5698 map,
5699 real_map,
5700 object,
5701 m_object,
5702 m,
5703 offset,
5704 trace_real_vaddr,
5705 &fault_info,
5706 caller_prot,
5707 real_vaddr,
5708 type_of_fault,
5709 need_retry,
5710 kr,
5711 physpage_p,
5712 prot,
5713 top_object,
5714 need_collapse,
5715 cur_offset,
5716 fault_type,
5717 &written_on_object,
5718 &written_on_pager,
5719 &written_on_offset);
5720 top_object = VM_OBJECT_NULL;
5721 if (need_retry == TRUE) {
5722 /*
5723 * vm_fault_enter couldn't complete the PMAP_ENTER...
5724 * at this point we don't hold any locks so it's safe
5725 * to ask the pmap layer to expand the page table to
5726 * accommodate this mapping... once expanded, we'll
5727 * re-drive the fault which should result in vm_fault_enter
5728 * being able to successfully enter the mapping this time around
5729 */
5730 (void)pmap_enter_options(
5731 pmap, vaddr, 0, 0, 0, 0, 0,
5732 PMAP_OPTIONS_NOENTER, NULL, PMAP_MAPPING_TYPE_INFER);
5733
5734 need_retry = FALSE;
5735 goto RetryFault;
5736 }
5737 goto done;
5738 }
5739 /*
5740 * On to the next level in the shadow chain
5741 */
5742 cur_offset += cur_object->vo_shadow_offset;
5743 new_object = cur_object->shadow;
5744 fault_phys_offset = cur_offset - vm_object_trunc_page(cur_offset);
5745
5746 /*
5747 * take the new_object's lock with the indicated state
5748 */
5749 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5750 vm_object_lock_shared(new_object);
5751 } else {
5752 vm_object_lock(new_object);
5753 }
5754
5755 if (cur_object != object) {
5756 vm_object_unlock(cur_object);
5757 }
5758
5759 cur_object = new_object;
5760
5761 continue;
5762 }
5763 }
5764 /*
5765 * Cleanup from fast fault failure. Drop any object
5766 * lock other than original and drop map lock.
5767 */
5768 if (object != cur_object) {
5769 vm_object_unlock(cur_object);
5770 }
5771
5772 /*
5773 * must own the object lock exclusively at this point
5774 */
5775 if (object_lock_type == OBJECT_LOCK_SHARED) {
5776 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5777
5778 if (vm_object_lock_upgrade(object) == FALSE) {
5779 /*
5780 * couldn't upgrade, so explictly
5781 * take the lock exclusively
5782 * no need to retry the fault at this
5783 * point since "vm_fault_page" will
5784 * completely re-evaluate the state
5785 */
5786 vm_object_lock(object);
5787 }
5788 }
5789
5790 handle_copy_delay:
5791 vm_map_unlock_read(map);
5792 if (real_map != map) {
5793 vm_map_unlock(real_map);
5794 }
5795
5796 if (__improbable(object == compressor_object ||
5797 is_kernel_object(object))) {
5798 /*
5799 * These objects are explicitly managed and populated by the
5800 * kernel. The virtual ranges backed by these objects should
5801 * either have wired pages or "holes" that are not supposed to
5802 * be accessed at all until they get explicitly populated.
5803 * We should never have to resolve a fault on a mapping backed
5804 * by one of these VM objects and providing a zero-filled page
5805 * would be wrong here, so let's fail the fault and let the
5806 * caller crash or recover.
5807 */
5808 vm_object_unlock(object);
5809 kr = KERN_MEMORY_ERROR;
5810 goto done;
5811 }
5812
5813 resilient_media_ref_transfer = false;
5814 if (resilient_media_retry) {
5815 /*
5816 * We could get here if we failed to get a free page
5817 * to zero-fill and had to take the slow path again.
5818 * Reset our "recovery-from-failed-media" state.
5819 */
5820 assert(resilient_media_object != VM_OBJECT_NULL);
5821 assert(resilient_media_offset != (vm_object_offset_t)-1);
5822 /* release our extra reference on failed object */
5823 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
5824 if (object == resilient_media_object) {
5825 /*
5826 * We're holding "object"'s lock, so we can't release
5827 * our extra reference at this point.
5828 * We need an extra reference on "object" anyway
5829 * (see below), so let's just transfer this reference.
5830 */
5831 resilient_media_ref_transfer = true;
5832 } else {
5833 vm_object_deallocate(resilient_media_object);
5834 }
5835 resilient_media_object = VM_OBJECT_NULL;
5836 resilient_media_offset = (vm_object_offset_t)-1;
5837 resilient_media_retry = false;
5838 vm_fault_resilient_media_abort2++;
5839 }
5840
5841 /*
5842 * Make a reference to this object to
5843 * prevent its disposal while we are messing with
5844 * it. Once we have the reference, the map is free
5845 * to be diddled. Since objects reference their
5846 * shadows (and copies), they will stay around as well.
5847 */
5848 if (resilient_media_ref_transfer) {
5849 /* we already have an extra reference on this object */
5850 resilient_media_ref_transfer = false;
5851 } else {
5852 vm_object_reference_locked(object);
5853 }
5854 vm_object_paging_begin(object);
5855
5856 set_thread_pagein_error(cthread, 0);
5857 error_code = 0;
5858
5859 result_page = VM_PAGE_NULL;
5860 kr = vm_fault_page(object, offset, fault_type,
5861 (change_wiring && !wired),
5862 FALSE, /* page not looked up */
5863 &prot, &result_page, &top_page,
5864 &type_of_fault,
5865 &error_code, map->no_zero_fill,
5866 &fault_info);
5867
5868 /*
5869 * if kr != VM_FAULT_SUCCESS, then the paging reference
5870 * has been dropped and the object unlocked... the ref_count
5871 * is still held
5872 *
5873 * if kr == VM_FAULT_SUCCESS, then the paging reference
5874 * is still held along with the ref_count on the original object
5875 *
5876 * the object is returned locked with a paging reference
5877 *
5878 * if top_page != NULL, then it's BUSY and the
5879 * object it belongs to has a paging reference
5880 * but is returned unlocked
5881 */
5882 if (kr != VM_FAULT_SUCCESS &&
5883 kr != VM_FAULT_SUCCESS_NO_VM_PAGE) {
5884 if (kr == VM_FAULT_MEMORY_ERROR &&
5885 fault_info.resilient_media) {
5886 assertf(object->internal, "object %p", object);
5887 /*
5888 * This fault failed but the mapping was
5889 * "media resilient", so we'll retry the fault in
5890 * recovery mode to get a zero-filled page in the
5891 * top object.
5892 * Keep the reference on the failing object so
5893 * that we can check that the mapping is still
5894 * pointing to it when we retry the fault.
5895 */
5896 // printf("RESILIENT_MEDIA %s:%d: object %p offset 0x%llx recover from media error 0x%x kr 0x%x top_page %p result_page %p\n", __FUNCTION__, __LINE__, object, offset, error_code, kr, top_page, result_page);
5897 assert(!resilient_media_retry); /* no double retry */
5898 assert(resilient_media_object == VM_OBJECT_NULL);
5899 assert(resilient_media_offset == (vm_object_offset_t)-1);
5900 resilient_media_retry = true;
5901 resilient_media_object = object;
5902 resilient_media_offset = offset;
5903 // printf("FBDP %s:%d resilient_media_object %p offset 0x%llx kept reference\n", __FUNCTION__, __LINE__, resilient_media_object, resilient_mmedia_offset);
5904 vm_fault_resilient_media_initiate++;
5905 goto RetryFault;
5906 } else {
5907 /*
5908 * we didn't succeed, lose the object reference
5909 * immediately.
5910 */
5911 vm_object_deallocate(object);
5912 object = VM_OBJECT_NULL; /* no longer valid */
5913 }
5914
5915 /*
5916 * See why we failed, and take corrective action.
5917 */
5918 switch (kr) {
5919 case VM_FAULT_MEMORY_SHORTAGE:
5920 if (vm_page_wait((change_wiring) ?
5921 THREAD_UNINT :
5922 THREAD_ABORTSAFE)) {
5923 goto RetryFault;
5924 }
5925 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_MEMORY_SHORTAGE), 0 /* arg */);
5926 OS_FALLTHROUGH;
5927 case VM_FAULT_INTERRUPTED:
5928 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
5929 kr = KERN_ABORTED;
5930 goto done;
5931 case VM_FAULT_RETRY:
5932 goto RetryFault;
5933 case VM_FAULT_MEMORY_ERROR:
5934 if (error_code) {
5935 kr = error_code;
5936 } else {
5937 kr = KERN_MEMORY_ERROR;
5938 }
5939 goto done;
5940 default:
5941 panic("vm_fault: unexpected error 0x%x from "
5942 "vm_fault_page()\n", kr);
5943 }
5944 }
5945 m = result_page;
5946 m_object = NULL;
5947
5948 if (m != VM_PAGE_NULL) {
5949 m_object = VM_PAGE_OBJECT(m);
5950 assert((change_wiring && !wired) ?
5951 (top_page == VM_PAGE_NULL) :
5952 ((top_page == VM_PAGE_NULL) == (m_object == object)));
5953 }
5954
5955 /*
5956 * What to do with the resulting page from vm_fault_page
5957 * if it doesn't get entered into the physical map:
5958 */
5959 #define RELEASE_PAGE(m) \
5960 MACRO_BEGIN \
5961 PAGE_WAKEUP_DONE(m); \
5962 if ( !VM_PAGE_PAGEABLE(m)) { \
5963 vm_page_lockspin_queues(); \
5964 if ( !VM_PAGE_PAGEABLE(m)) \
5965 vm_page_activate(m); \
5966 vm_page_unlock_queues(); \
5967 } \
5968 MACRO_END
5969
5970
5971 object_locks_dropped = FALSE;
5972 /*
5973 * We must verify that the maps have not changed
5974 * since our last lookup. vm_map_verify() needs the
5975 * map lock (shared) but we are holding object locks.
5976 * So we do a try_lock() first and, if that fails, we
5977 * drop the object locks and go in for the map lock again.
5978 */
5979 if (m != VM_PAGE_NULL) {
5980 old_copy_object = m_object->vo_copy;
5981 old_copy_version = m_object->vo_copy_version;
5982 } else {
5983 old_copy_object = VM_OBJECT_NULL;
5984 old_copy_version = 0;
5985 }
5986 if (!vm_map_try_lock_read(original_map)) {
5987 if (m != VM_PAGE_NULL) {
5988 vm_object_unlock(m_object);
5989 } else {
5990 vm_object_unlock(object);
5991 }
5992
5993 object_locks_dropped = TRUE;
5994
5995 vm_map_lock_read(original_map);
5996 }
5997
5998 if ((map != original_map) || !vm_map_verify(map, &version)) {
5999 if (object_locks_dropped == FALSE) {
6000 if (m != VM_PAGE_NULL) {
6001 vm_object_unlock(m_object);
6002 } else {
6003 vm_object_unlock(object);
6004 }
6005
6006 object_locks_dropped = TRUE;
6007 }
6008
6009 /*
6010 * no object locks are held at this point
6011 */
6012 vm_object_t retry_object;
6013 vm_object_offset_t retry_offset;
6014 vm_prot_t retry_prot;
6015
6016 /*
6017 * To avoid trying to write_lock the map while another
6018 * thread has it read_locked (in vm_map_pageable), we
6019 * do not try for write permission. If the page is
6020 * still writable, we will get write permission. If it
6021 * is not, or has been marked needs_copy, we enter the
6022 * mapping without write permission, and will merely
6023 * take another fault.
6024 */
6025 map = original_map;
6026
6027 kr = vm_map_lookup_and_lock_object(&map, vaddr,
6028 fault_type & ~VM_PROT_WRITE,
6029 OBJECT_LOCK_EXCLUSIVE, &version,
6030 &retry_object, &retry_offset, &retry_prot,
6031 &wired,
6032 &fault_info,
6033 &real_map,
6034 NULL);
6035 pmap = real_map->pmap;
6036
6037 if (kr != KERN_SUCCESS) {
6038 vm_map_unlock_read(map);
6039
6040 if (m != VM_PAGE_NULL) {
6041 assert(VM_PAGE_OBJECT(m) == m_object);
6042
6043 /*
6044 * retake the lock so that
6045 * we can drop the paging reference
6046 * in vm_fault_cleanup and do the
6047 * PAGE_WAKEUP_DONE in RELEASE_PAGE
6048 */
6049 vm_object_lock(m_object);
6050
6051 RELEASE_PAGE(m);
6052
6053 vm_fault_cleanup(m_object, top_page);
6054 } else {
6055 /*
6056 * retake the lock so that
6057 * we can drop the paging reference
6058 * in vm_fault_cleanup
6059 */
6060 vm_object_lock(object);
6061
6062 vm_fault_cleanup(object, top_page);
6063 }
6064 vm_object_deallocate(object);
6065
6066 if (kr == KERN_INVALID_ADDRESS) {
6067 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0 /* arg */);
6068 }
6069 goto done;
6070 }
6071 vm_object_unlock(retry_object);
6072
6073 if ((retry_object != object) || (retry_offset != offset)) {
6074 vm_map_unlock_read(map);
6075 if (real_map != map) {
6076 vm_map_unlock(real_map);
6077 }
6078
6079 if (m != VM_PAGE_NULL) {
6080 assert(VM_PAGE_OBJECT(m) == m_object);
6081
6082 /*
6083 * retake the lock so that
6084 * we can drop the paging reference
6085 * in vm_fault_cleanup and do the
6086 * PAGE_WAKEUP_DONE in RELEASE_PAGE
6087 */
6088 vm_object_lock(m_object);
6089
6090 RELEASE_PAGE(m);
6091
6092 vm_fault_cleanup(m_object, top_page);
6093 } else {
6094 /*
6095 * retake the lock so that
6096 * we can drop the paging reference
6097 * in vm_fault_cleanup
6098 */
6099 vm_object_lock(object);
6100
6101 vm_fault_cleanup(object, top_page);
6102 }
6103 vm_object_deallocate(object);
6104
6105 goto RetryFault;
6106 }
6107 /*
6108 * Check whether the protection has changed or the object
6109 * has been copied while we left the map unlocked.
6110 */
6111 if (pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, retry_prot)) {
6112 /* If the pmap layer cares, pass the full set. */
6113 prot = retry_prot;
6114 } else {
6115 prot &= retry_prot;
6116 }
6117 }
6118
6119 if (object_locks_dropped == TRUE) {
6120 if (m != VM_PAGE_NULL) {
6121 assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p", m, m_object);
6122 assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6123 vm_object_lock(m_object);
6124 } else {
6125 vm_object_lock(object);
6126 }
6127
6128 object_locks_dropped = FALSE;
6129 }
6130
6131 if ((prot & VM_PROT_WRITE) &&
6132 m != VM_PAGE_NULL &&
6133 (m_object->vo_copy != old_copy_object ||
6134 m_object->vo_copy_version != old_copy_version)) {
6135 /*
6136 * The copy object changed while the top-level object
6137 * was unlocked, so take away write permission.
6138 */
6139 assert(!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot));
6140 prot &= ~VM_PROT_WRITE;
6141 }
6142
6143 if (!need_copy &&
6144 !fault_info.no_copy_on_read &&
6145 m != VM_PAGE_NULL &&
6146 VM_PAGE_OBJECT(m) != object &&
6147 !VM_PAGE_OBJECT(m)->pager_trusted &&
6148 vm_protect_privileged_from_untrusted &&
6149 !VM_PAGE_OBJECT(m)->code_signed &&
6150 current_proc_is_privileged()) {
6151 /*
6152 * We found the page we want in an "untrusted" VM object
6153 * down the shadow chain. Since the target is "privileged"
6154 * we want to perform a copy-on-read of that page, so that the
6155 * mapped object gets a stable copy and does not have to
6156 * rely on the "untrusted" object to provide the same
6157 * contents if the page gets reclaimed and has to be paged
6158 * in again later on.
6159 *
6160 * Special case: if the mapping is executable and the untrusted
6161 * object is code-signed and the process is "cs_enforced", we
6162 * do not copy-on-read because that would break code-signing
6163 * enforcement expectations (an executable page must belong
6164 * to a code-signed object) and we can rely on code-signing
6165 * to re-validate the page if it gets evicted and paged back in.
6166 */
6167 // printf("COPY-ON-READ %s:%d map %p vaddr 0x%llx obj %p offset 0x%llx found page %p (obj %p offset 0x%llx) UNTRUSTED -> need copy-on-read\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, object, offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
6168 vm_copied_on_read++;
6169 need_copy_on_read = TRUE;
6170 need_copy = TRUE;
6171 } else {
6172 need_copy_on_read = FALSE;
6173 }
6174
6175 /*
6176 * If we want to wire down this page, but no longer have
6177 * adequate permissions, we must start all over.
6178 * If we decided to copy-on-read, we must also start all over.
6179 */
6180 if ((wired && (fault_type != (prot | VM_PROT_WRITE))) ||
6181 need_copy_on_read) {
6182 vm_map_unlock_read(map);
6183 if (real_map != map) {
6184 vm_map_unlock(real_map);
6185 }
6186
6187 if (m != VM_PAGE_NULL) {
6188 assert(VM_PAGE_OBJECT(m) == m_object);
6189
6190 RELEASE_PAGE(m);
6191
6192 vm_fault_cleanup(m_object, top_page);
6193 } else {
6194 vm_fault_cleanup(object, top_page);
6195 }
6196
6197 vm_object_deallocate(object);
6198
6199 goto RetryFault;
6200 }
6201 if (m != VM_PAGE_NULL) {
6202 /*
6203 * Put this page into the physical map.
6204 * We had to do the unlock above because pmap_enter
6205 * may cause other faults. The page may be on
6206 * the pageout queues. If the pageout daemon comes
6207 * across the page, it will remove it from the queues.
6208 */
6209 if (fault_page_size < PAGE_SIZE) {
6210 DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx pa 0x%llx(0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
6211 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
6212 fault_phys_offset < PAGE_SIZE),
6213 "0x%llx\n", (uint64_t)fault_phys_offset);
6214 } else {
6215 assertf(fault_phys_offset == 0,
6216 "0x%llx\n", (uint64_t)fault_phys_offset);
6217 }
6218 assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p", m, m_object);
6219 assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6220 if (caller_pmap) {
6221 kr = vm_fault_enter(m,
6222 caller_pmap,
6223 caller_pmap_addr,
6224 fault_page_size,
6225 fault_phys_offset,
6226 prot,
6227 caller_prot,
6228 wired,
6229 change_wiring,
6230 wire_tag,
6231 &fault_info,
6232 NULL,
6233 &type_of_fault,
6234 &object_lock_type);
6235 } else {
6236 kr = vm_fault_enter(m,
6237 pmap,
6238 vaddr,
6239 fault_page_size,
6240 fault_phys_offset,
6241 prot,
6242 caller_prot,
6243 wired,
6244 change_wiring,
6245 wire_tag,
6246 &fault_info,
6247 NULL,
6248 &type_of_fault,
6249 &object_lock_type);
6250 }
6251 assert(VM_PAGE_OBJECT(m) == m_object);
6252
6253 {
6254 int event_code = 0;
6255
6256 if (m_object->internal) {
6257 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
6258 } else if (m_object->object_is_shared_cache) {
6259 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
6260 } else {
6261 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
6262 }
6263
6264 KDBG_RELEASE(event_code | DBG_FUNC_NONE, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), m->vmp_offset, get_current_unique_pid());
6265 KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_SLOW), get_current_unique_pid());
6266
6267 DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag);
6268 }
6269 if (kr != KERN_SUCCESS) {
6270 /* abort this page fault */
6271 vm_map_unlock_read(map);
6272 if (real_map != map) {
6273 vm_map_unlock(real_map);
6274 }
6275 PAGE_WAKEUP_DONE(m);
6276 vm_fault_cleanup(m_object, top_page);
6277 vm_object_deallocate(object);
6278 goto done;
6279 }
6280 if (physpage_p != NULL) {
6281 /* for vm_map_wire_and_extract() */
6282 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
6283 if (prot & VM_PROT_WRITE) {
6284 vm_object_lock_assert_exclusive(m_object);
6285 m->vmp_dirty = TRUE;
6286 }
6287 }
6288 } else {
6289 vm_map_entry_t entry;
6290 vm_map_offset_t laddr;
6291 vm_map_offset_t ldelta, hdelta;
6292
6293 /*
6294 * do a pmap block mapping from the physical address
6295 * in the object
6296 */
6297
6298 if (real_map != map) {
6299 vm_map_unlock(real_map);
6300 }
6301
6302 if (original_map != map) {
6303 vm_map_unlock_read(map);
6304 vm_map_lock_read(original_map);
6305 map = original_map;
6306 }
6307 real_map = map;
6308
6309 laddr = vaddr;
6310 hdelta = ldelta = (vm_map_offset_t)0xFFFFFFFFFFFFF000ULL;
6311
6312 while (vm_map_lookup_entry(map, laddr, &entry)) {
6313 if (ldelta > (laddr - entry->vme_start)) {
6314 ldelta = laddr - entry->vme_start;
6315 }
6316 if (hdelta > (entry->vme_end - laddr)) {
6317 hdelta = entry->vme_end - laddr;
6318 }
6319 if (entry->is_sub_map) {
6320 laddr = ((laddr - entry->vme_start)
6321 + VME_OFFSET(entry));
6322 vm_map_lock_read(VME_SUBMAP(entry));
6323
6324 if (map != real_map) {
6325 vm_map_unlock_read(map);
6326 }
6327 if (entry->use_pmap) {
6328 vm_map_unlock_read(real_map);
6329 real_map = VME_SUBMAP(entry);
6330 }
6331 map = VME_SUBMAP(entry);
6332 } else {
6333 break;
6334 }
6335 }
6336
6337 if (vm_map_lookup_entry(map, laddr, &entry) &&
6338 (!entry->is_sub_map) &&
6339 (object != VM_OBJECT_NULL) &&
6340 (VME_OBJECT(entry) == object)) {
6341 uint16_t superpage;
6342
6343 if (!object->pager_created &&
6344 object->phys_contiguous &&
6345 VME_OFFSET(entry) == 0 &&
6346 (entry->vme_end - entry->vme_start == object->vo_size) &&
6347 VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size - 1))) {
6348 superpage = VM_MEM_SUPERPAGE;
6349 } else {
6350 superpage = 0;
6351 }
6352
6353 if (superpage && physpage_p) {
6354 /* for vm_map_wire_and_extract() */
6355 *physpage_p = (ppnum_t)
6356 ((((vm_map_offset_t)
6357 object->vo_shadow_offset)
6358 + VME_OFFSET(entry)
6359 + (laddr - entry->vme_start))
6360 >> PAGE_SHIFT);
6361 }
6362
6363 if (caller_pmap) {
6364 /*
6365 * Set up a block mapped area
6366 */
6367 assert((uint32_t)((ldelta + hdelta) >> fault_page_shift) == ((ldelta + hdelta) >> fault_page_shift));
6368 kr = pmap_map_block_addr(caller_pmap,
6369 (addr64_t)(caller_pmap_addr - ldelta),
6370 (pmap_paddr_t)(((vm_map_offset_t) (object->vo_shadow_offset)) +
6371 VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta),
6372 (uint32_t)((ldelta + hdelta) >> fault_page_shift), prot,
6373 (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
6374
6375 if (kr != KERN_SUCCESS) {
6376 goto cleanup;
6377 }
6378 } else {
6379 /*
6380 * Set up a block mapped area
6381 */
6382 assert((uint32_t)((ldelta + hdelta) >> fault_page_shift) == ((ldelta + hdelta) >> fault_page_shift));
6383 kr = pmap_map_block_addr(real_map->pmap,
6384 (addr64_t)(vaddr - ldelta),
6385 (pmap_paddr_t)(((vm_map_offset_t)(object->vo_shadow_offset)) +
6386 VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta),
6387 (uint32_t)((ldelta + hdelta) >> fault_page_shift), prot,
6388 (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
6389
6390 if (kr != KERN_SUCCESS) {
6391 goto cleanup;
6392 }
6393 }
6394 }
6395 }
6396
6397 /*
6398 * Success
6399 */
6400 kr = KERN_SUCCESS;
6401
6402 /*
6403 * TODO: could most of the done cases just use cleanup?
6404 */
6405 cleanup:
6406 /*
6407 * Unlock everything, and return
6408 */
6409 vm_map_unlock_read(map);
6410 if (real_map != map) {
6411 vm_map_unlock(real_map);
6412 }
6413
6414 if (m != VM_PAGE_NULL) {
6415 if (__improbable(rtfault &&
6416 !m->vmp_realtime &&
6417 vm_pageout_protect_realtime)) {
6418 vm_page_lock_queues();
6419 if (!m->vmp_realtime) {
6420 m->vmp_realtime = true;
6421 vm_page_realtime_count++;
6422 }
6423 vm_page_unlock_queues();
6424 }
6425 assert(VM_PAGE_OBJECT(m) == m_object);
6426
6427 if (!m_object->internal && (fault_type & VM_PROT_WRITE)) {
6428 vm_object_paging_begin(m_object);
6429
6430 assert(written_on_object == VM_OBJECT_NULL);
6431 written_on_object = m_object;
6432 written_on_pager = m_object->pager;
6433 written_on_offset = m_object->paging_offset + m->vmp_offset;
6434 }
6435 PAGE_WAKEUP_DONE(m);
6436
6437 vm_fault_cleanup(m_object, top_page);
6438 } else {
6439 vm_fault_cleanup(object, top_page);
6440 }
6441
6442 vm_object_deallocate(object);
6443
6444 #undef RELEASE_PAGE
6445
6446 done:
6447 thread_interrupt_level(interruptible_state);
6448
6449 if (resilient_media_object != VM_OBJECT_NULL) {
6450 assert(resilient_media_retry);
6451 assert(resilient_media_offset != (vm_object_offset_t)-1);
6452 /* release extra reference on failed object */
6453 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
6454 vm_object_deallocate(resilient_media_object);
6455 resilient_media_object = VM_OBJECT_NULL;
6456 resilient_media_offset = (vm_object_offset_t)-1;
6457 resilient_media_retry = false;
6458 vm_fault_resilient_media_release++;
6459 }
6460 assert(!resilient_media_retry);
6461
6462 /*
6463 * Only I/O throttle on faults which cause a pagein/swapin.
6464 */
6465 if ((type_of_fault == DBG_PAGEIND_FAULT) || (type_of_fault == DBG_PAGEINV_FAULT) || (type_of_fault == DBG_COMPRESSOR_SWAPIN_FAULT)) {
6466 throttle_lowpri_io(1);
6467 } else {
6468 if (kr == KERN_SUCCESS && type_of_fault != DBG_CACHE_HIT_FAULT && type_of_fault != DBG_GUARD_FAULT) {
6469 if ((throttle_delay = vm_page_throttled(TRUE))) {
6470 if (vm_debug_events) {
6471 if (type_of_fault == DBG_COMPRESSOR_FAULT) {
6472 VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6473 } else if (type_of_fault == DBG_COW_FAULT) {
6474 VM_DEBUG_EVENT(vmf_cowdelay, VMF_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6475 } else {
6476 VM_DEBUG_EVENT(vmf_zfdelay, VMF_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6477 }
6478 }
6479 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
6480 }
6481 }
6482 }
6483
6484 if (written_on_object) {
6485 vnode_pager_dirtied(written_on_pager, written_on_offset, written_on_offset + PAGE_SIZE_64);
6486
6487 vm_object_lock(written_on_object);
6488 vm_object_paging_end(written_on_object);
6489 vm_object_unlock(written_on_object);
6490
6491 written_on_object = VM_OBJECT_NULL;
6492 }
6493
6494 if (rtfault) {
6495 vm_record_rtfault(cthread, fstart, trace_vaddr, type_of_fault);
6496 }
6497
6498 KDBG_RELEASE(
6499 (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
6500 ((uint64_t)trace_vaddr >> 32),
6501 trace_vaddr,
6502 kr,
6503 vm_fault_type_for_tracing(need_copy_on_read, type_of_fault));
6504
6505 if (fault_page_size < PAGE_SIZE && kr != KERN_SUCCESS) {
6506 DEBUG4K_FAULT("map %p original %p vaddr 0x%llx -> 0x%x\n", map, original_map, (uint64_t)trace_real_vaddr, kr);
6507 }
6508
6509 return kr;
6510 }
6511
6512 /*
6513 * vm_fault_wire:
6514 *
6515 * Wire down a range of virtual addresses in a map.
6516 */
6517 kern_return_t
vm_fault_wire(vm_map_t map,vm_map_entry_t entry,vm_prot_t prot,vm_tag_t wire_tag,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)6518 vm_fault_wire(
6519 vm_map_t map,
6520 vm_map_entry_t entry,
6521 vm_prot_t prot,
6522 vm_tag_t wire_tag,
6523 pmap_t pmap,
6524 vm_map_offset_t pmap_addr,
6525 ppnum_t *physpage_p)
6526 {
6527 vm_map_offset_t va;
6528 vm_map_offset_t end_addr = entry->vme_end;
6529 kern_return_t rc;
6530 vm_map_size_t effective_page_size;
6531
6532 assert(entry->in_transition);
6533
6534 if (!entry->is_sub_map &&
6535 VME_OBJECT(entry) != VM_OBJECT_NULL &&
6536 VME_OBJECT(entry)->phys_contiguous) {
6537 return KERN_SUCCESS;
6538 }
6539
6540 /*
6541 * Inform the physical mapping system that the
6542 * range of addresses may not fault, so that
6543 * page tables and such can be locked down as well.
6544 */
6545
6546 pmap_pageable(pmap, pmap_addr,
6547 pmap_addr + (end_addr - entry->vme_start), FALSE);
6548
6549 /*
6550 * We simulate a fault to get the page and enter it
6551 * in the physical map.
6552 */
6553
6554 effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6555 for (va = entry->vme_start;
6556 va < end_addr;
6557 va += effective_page_size) {
6558 rc = vm_fault_wire_fast(map, va, prot, wire_tag, entry, pmap,
6559 pmap_addr + (va - entry->vme_start),
6560 physpage_p);
6561 if (rc != KERN_SUCCESS) {
6562 rc = vm_fault_internal(map, va, prot, TRUE, wire_tag,
6563 ((pmap == kernel_pmap)
6564 ? THREAD_UNINT
6565 : THREAD_ABORTSAFE),
6566 pmap,
6567 (pmap_addr +
6568 (va - entry->vme_start)),
6569 physpage_p);
6570 DTRACE_VM2(softlock, int, 1, (uint64_t *), NULL);
6571 }
6572
6573 if (rc != KERN_SUCCESS) {
6574 struct vm_map_entry tmp_entry = *entry;
6575
6576 /* unwire wired pages */
6577 tmp_entry.vme_end = va;
6578 vm_fault_unwire(map, &tmp_entry, FALSE,
6579 pmap, pmap_addr, tmp_entry.vme_end);
6580
6581 return rc;
6582 }
6583 }
6584 return KERN_SUCCESS;
6585 }
6586
6587 /*
6588 * vm_fault_unwire:
6589 *
6590 * Unwire a range of virtual addresses in a map.
6591 */
6592 void
vm_fault_unwire(vm_map_t map,vm_map_entry_t entry,boolean_t deallocate,pmap_t pmap,vm_map_offset_t pmap_addr,vm_map_offset_t end_addr)6593 vm_fault_unwire(
6594 vm_map_t map,
6595 vm_map_entry_t entry,
6596 boolean_t deallocate,
6597 pmap_t pmap,
6598 vm_map_offset_t pmap_addr,
6599 vm_map_offset_t end_addr)
6600 {
6601 vm_map_offset_t va;
6602 vm_object_t object;
6603 struct vm_object_fault_info fault_info = {};
6604 unsigned int unwired_pages;
6605 vm_map_size_t effective_page_size;
6606
6607 object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry);
6608
6609 /*
6610 * If it's marked phys_contiguous, then vm_fault_wire() didn't actually
6611 * do anything since such memory is wired by default. So we don't have
6612 * anything to undo here.
6613 */
6614
6615 if (object != VM_OBJECT_NULL && object->phys_contiguous) {
6616 return;
6617 }
6618
6619 fault_info.interruptible = THREAD_UNINT;
6620 fault_info.behavior = entry->behavior;
6621 fault_info.user_tag = VME_ALIAS(entry);
6622 if (entry->iokit_acct ||
6623 (!entry->is_sub_map && !entry->use_pmap)) {
6624 fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
6625 }
6626 fault_info.lo_offset = VME_OFFSET(entry);
6627 fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry);
6628 fault_info.no_cache = entry->no_cache;
6629 fault_info.stealth = TRUE;
6630 if (entry->vme_xnu_user_debug) {
6631 /*
6632 * Modified code-signed executable region: wired pages must
6633 * have been copied, so they should be XNU_USER_DEBUG rather
6634 * than XNU_USER_EXEC.
6635 */
6636 fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
6637 }
6638
6639 unwired_pages = 0;
6640
6641 /*
6642 * Since the pages are wired down, we must be able to
6643 * get their mappings from the physical map system.
6644 */
6645
6646 effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6647 for (va = entry->vme_start;
6648 va < end_addr;
6649 va += effective_page_size) {
6650 if (object == VM_OBJECT_NULL) {
6651 if (pmap) {
6652 pmap_change_wiring(pmap,
6653 pmap_addr + (va - entry->vme_start), FALSE);
6654 }
6655 (void) vm_fault(map, va, VM_PROT_NONE,
6656 TRUE, VM_KERN_MEMORY_NONE, THREAD_UNINT, pmap, pmap_addr);
6657 } else {
6658 vm_prot_t prot;
6659 vm_page_t result_page;
6660 vm_page_t top_page;
6661 vm_object_t result_object;
6662 vm_fault_return_t result;
6663
6664 /* cap cluster size at maximum UPL size */
6665 upl_size_t cluster_size;
6666 if (os_sub_overflow(end_addr, va, &cluster_size)) {
6667 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
6668 }
6669 fault_info.cluster_size = cluster_size;
6670
6671 do {
6672 prot = VM_PROT_NONE;
6673
6674 vm_object_lock(object);
6675 vm_object_paging_begin(object);
6676 result_page = VM_PAGE_NULL;
6677 result = vm_fault_page(
6678 object,
6679 (VME_OFFSET(entry) +
6680 (va - entry->vme_start)),
6681 VM_PROT_NONE, TRUE,
6682 FALSE, /* page not looked up */
6683 &prot, &result_page, &top_page,
6684 (int *)0,
6685 NULL, map->no_zero_fill,
6686 &fault_info);
6687 } while (result == VM_FAULT_RETRY);
6688
6689 /*
6690 * If this was a mapping to a file on a device that has been forcibly
6691 * unmounted, then we won't get a page back from vm_fault_page(). Just
6692 * move on to the next one in case the remaining pages are mapped from
6693 * different objects. During a forced unmount, the object is terminated
6694 * so the alive flag will be false if this happens. A forced unmount will
6695 * will occur when an external disk is unplugged before the user does an
6696 * eject, so we don't want to panic in that situation.
6697 */
6698
6699 if (result == VM_FAULT_MEMORY_ERROR) {
6700 if (!object->alive) {
6701 continue;
6702 }
6703 if (!object->internal && object->pager == NULL) {
6704 continue;
6705 }
6706 }
6707
6708 if (result == VM_FAULT_MEMORY_ERROR &&
6709 is_kernel_object(object)) {
6710 /*
6711 * This must have been allocated with
6712 * KMA_KOBJECT and KMA_VAONLY and there's
6713 * no physical page at this offset.
6714 * We're done (no page to free).
6715 */
6716 assert(deallocate);
6717 continue;
6718 }
6719
6720 if (result != VM_FAULT_SUCCESS) {
6721 panic("vm_fault_unwire: failure");
6722 }
6723
6724 result_object = VM_PAGE_OBJECT(result_page);
6725
6726 if (deallocate) {
6727 assert(VM_PAGE_GET_PHYS_PAGE(result_page) !=
6728 vm_page_fictitious_addr);
6729 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(result_page));
6730 if (VM_PAGE_WIRED(result_page)) {
6731 unwired_pages++;
6732 }
6733 VM_PAGE_FREE(result_page);
6734 } else {
6735 if ((pmap) && (VM_PAGE_GET_PHYS_PAGE(result_page) != vm_page_guard_addr)) {
6736 pmap_change_wiring(pmap,
6737 pmap_addr + (va - entry->vme_start), FALSE);
6738 }
6739
6740
6741 if (VM_PAGE_WIRED(result_page)) {
6742 vm_page_lockspin_queues();
6743 vm_page_unwire(result_page, TRUE);
6744 vm_page_unlock_queues();
6745 unwired_pages++;
6746 }
6747 if (entry->zero_wired_pages) {
6748 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(result_page));
6749 entry->zero_wired_pages = FALSE;
6750 }
6751
6752 PAGE_WAKEUP_DONE(result_page);
6753 }
6754 vm_fault_cleanup(result_object, top_page);
6755 }
6756 }
6757
6758 /*
6759 * Inform the physical mapping system that the range
6760 * of addresses may fault, so that page tables and
6761 * such may be unwired themselves.
6762 */
6763
6764 pmap_pageable(pmap, pmap_addr,
6765 pmap_addr + (end_addr - entry->vme_start), TRUE);
6766
6767 if (is_kernel_object(object)) {
6768 /*
6769 * Would like to make user_tag in vm_object_fault_info
6770 * vm_tag_t (unsigned short) but user_tag derives its value from
6771 * VME_ALIAS(entry) at a few places and VME_ALIAS, in turn, casts
6772 * to an _unsigned int_ which is used by non-fault_info paths throughout the
6773 * code at many places.
6774 *
6775 * So, for now, an explicit truncation to unsigned short (vm_tag_t).
6776 */
6777 assertf((fault_info.user_tag & VME_ALIAS_MASK) == fault_info.user_tag,
6778 "VM Tag truncated from 0x%x to 0x%x\n", fault_info.user_tag, (fault_info.user_tag & VME_ALIAS_MASK));
6779 vm_tag_update_size((vm_tag_t) fault_info.user_tag, -ptoa_64(unwired_pages), NULL);
6780 }
6781 }
6782
6783 /*
6784 * vm_fault_wire_fast:
6785 *
6786 * Handle common case of a wire down page fault at the given address.
6787 * If successful, the page is inserted into the associated physical map.
6788 * The map entry is passed in to avoid the overhead of a map lookup.
6789 *
6790 * NOTE: the given address should be truncated to the
6791 * proper page address.
6792 *
6793 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
6794 * a standard error specifying why the fault is fatal is returned.
6795 *
6796 * The map in question must be referenced, and remains so.
6797 * Caller has a read lock on the map.
6798 *
6799 * This is a stripped version of vm_fault() for wiring pages. Anything
6800 * other than the common case will return KERN_FAILURE, and the caller
6801 * is expected to call vm_fault().
6802 */
6803 static kern_return_t
vm_fault_wire_fast(__unused vm_map_t map,vm_map_offset_t va,__unused vm_prot_t caller_prot,vm_tag_t wire_tag,vm_map_entry_t entry,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)6804 vm_fault_wire_fast(
6805 __unused vm_map_t map,
6806 vm_map_offset_t va,
6807 __unused vm_prot_t caller_prot,
6808 vm_tag_t wire_tag,
6809 vm_map_entry_t entry,
6810 pmap_t pmap,
6811 vm_map_offset_t pmap_addr,
6812 ppnum_t *physpage_p)
6813 {
6814 vm_object_t object;
6815 vm_object_offset_t offset;
6816 vm_page_t m;
6817 vm_prot_t prot;
6818 thread_t thread = current_thread();
6819 int type_of_fault;
6820 kern_return_t kr;
6821 vm_map_size_t fault_page_size;
6822 vm_map_offset_t fault_phys_offset;
6823 struct vm_object_fault_info fault_info = {};
6824 uint8_t object_lock_type = 0;
6825
6826 counter_inc(&vm_statistics_faults);
6827
6828 if (thread != THREAD_NULL) {
6829 counter_inc(&get_threadtask(thread)->faults);
6830 }
6831
6832 /*
6833 * Recovery actions
6834 */
6835
6836 #undef RELEASE_PAGE
6837 #define RELEASE_PAGE(m) { \
6838 PAGE_WAKEUP_DONE(m); \
6839 vm_page_lockspin_queues(); \
6840 vm_page_unwire(m, TRUE); \
6841 vm_page_unlock_queues(); \
6842 }
6843
6844
6845 #undef UNLOCK_THINGS
6846 #define UNLOCK_THINGS { \
6847 vm_object_paging_end(object); \
6848 vm_object_unlock(object); \
6849 }
6850
6851 #undef UNLOCK_AND_DEALLOCATE
6852 #define UNLOCK_AND_DEALLOCATE { \
6853 UNLOCK_THINGS; \
6854 vm_object_deallocate(object); \
6855 }
6856 /*
6857 * Give up and have caller do things the hard way.
6858 */
6859
6860 #define GIVE_UP { \
6861 UNLOCK_AND_DEALLOCATE; \
6862 return(KERN_FAILURE); \
6863 }
6864
6865
6866 /*
6867 * If this entry is not directly to a vm_object, bail out.
6868 */
6869 if (entry->is_sub_map) {
6870 assert(physpage_p == NULL);
6871 return KERN_FAILURE;
6872 }
6873
6874 /*
6875 * Find the backing store object and offset into it.
6876 */
6877
6878 object = VME_OBJECT(entry);
6879 offset = (va - entry->vme_start) + VME_OFFSET(entry);
6880 prot = entry->protection;
6881
6882 /*
6883 * Make a reference to this object to prevent its
6884 * disposal while we are messing with it.
6885 */
6886
6887 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
6888 vm_object_lock(object);
6889 vm_object_reference_locked(object);
6890 vm_object_paging_begin(object);
6891
6892 /*
6893 * INVARIANTS (through entire routine):
6894 *
6895 * 1) At all times, we must either have the object
6896 * lock or a busy page in some object to prevent
6897 * some other thread from trying to bring in
6898 * the same page.
6899 *
6900 * 2) Once we have a busy page, we must remove it from
6901 * the pageout queues, so that the pageout daemon
6902 * will not grab it away.
6903 *
6904 */
6905
6906 /*
6907 * Look for page in top-level object. If it's not there or
6908 * there's something going on, give up.
6909 */
6910 m = vm_page_lookup(object, vm_object_trunc_page(offset));
6911 if ((m == VM_PAGE_NULL) || (m->vmp_busy) ||
6912 (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_absent))) {
6913 GIVE_UP;
6914 }
6915 if (m->vmp_fictitious &&
6916 VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
6917 /*
6918 * Guard pages are fictitious pages and are never
6919 * entered into a pmap, so let's say it's been wired...
6920 */
6921 kr = KERN_SUCCESS;
6922 goto done;
6923 }
6924
6925 /*
6926 * Wire the page down now. All bail outs beyond this
6927 * point must unwire the page.
6928 */
6929
6930 vm_page_lockspin_queues();
6931 vm_page_wire(m, wire_tag, TRUE);
6932 vm_page_unlock_queues();
6933
6934 /*
6935 * Mark page busy for other threads.
6936 */
6937 assert(!m->vmp_busy);
6938 m->vmp_busy = TRUE;
6939 assert(!m->vmp_absent);
6940
6941 /*
6942 * Give up if the page is being written and there's a copy object
6943 */
6944 if ((object->vo_copy != VM_OBJECT_NULL) && (prot & VM_PROT_WRITE)) {
6945 RELEASE_PAGE(m);
6946 GIVE_UP;
6947 }
6948
6949 fault_info.user_tag = VME_ALIAS(entry);
6950 fault_info.pmap_options = 0;
6951 if (entry->iokit_acct ||
6952 (!entry->is_sub_map && !entry->use_pmap)) {
6953 fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
6954 }
6955 if (entry->vme_xnu_user_debug) {
6956 /*
6957 * Modified code-signed executable region: wiring will
6958 * copy the pages, so they should be XNU_USER_DEBUG rather
6959 * than XNU_USER_EXEC.
6960 */
6961 fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
6962 }
6963
6964 fault_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6965 fault_phys_offset = offset - vm_object_trunc_page(offset);
6966
6967 /*
6968 * Put this page into the physical map.
6969 */
6970 type_of_fault = DBG_CACHE_HIT_FAULT;
6971 assertf(VM_PAGE_OBJECT(m) == object, "m=%p object=%p", m, object);
6972 assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6973 kr = vm_fault_enter(m,
6974 pmap,
6975 pmap_addr,
6976 fault_page_size,
6977 fault_phys_offset,
6978 prot,
6979 prot,
6980 TRUE, /* wired */
6981 FALSE, /* change_wiring */
6982 wire_tag,
6983 &fault_info,
6984 NULL,
6985 &type_of_fault,
6986 &object_lock_type); /* Exclusive lock mode. Will remain unchanged.*/
6987 if (kr != KERN_SUCCESS) {
6988 RELEASE_PAGE(m);
6989 GIVE_UP;
6990 }
6991
6992 done:
6993 /*
6994 * Unlock everything, and return
6995 */
6996
6997 if (physpage_p) {
6998 /* for vm_map_wire_and_extract() */
6999 if (kr == KERN_SUCCESS) {
7000 assert(object == VM_PAGE_OBJECT(m));
7001 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
7002 if (prot & VM_PROT_WRITE) {
7003 vm_object_lock_assert_exclusive(object);
7004 m->vmp_dirty = TRUE;
7005 }
7006 } else {
7007 *physpage_p = 0;
7008 }
7009 }
7010
7011 PAGE_WAKEUP_DONE(m);
7012 UNLOCK_AND_DEALLOCATE;
7013
7014 return kr;
7015 }
7016
7017 /*
7018 * Routine: vm_fault_copy_cleanup
7019 * Purpose:
7020 * Release a page used by vm_fault_copy.
7021 */
7022
7023 static void
vm_fault_copy_cleanup(vm_page_t page,vm_page_t top_page)7024 vm_fault_copy_cleanup(
7025 vm_page_t page,
7026 vm_page_t top_page)
7027 {
7028 vm_object_t object = VM_PAGE_OBJECT(page);
7029
7030 vm_object_lock(object);
7031 PAGE_WAKEUP_DONE(page);
7032 if (!VM_PAGE_PAGEABLE(page)) {
7033 vm_page_lockspin_queues();
7034 if (!VM_PAGE_PAGEABLE(page)) {
7035 vm_page_activate(page);
7036 }
7037 vm_page_unlock_queues();
7038 }
7039 vm_fault_cleanup(object, top_page);
7040 }
7041
7042 static void
vm_fault_copy_dst_cleanup(vm_page_t page)7043 vm_fault_copy_dst_cleanup(
7044 vm_page_t page)
7045 {
7046 vm_object_t object;
7047
7048 if (page != VM_PAGE_NULL) {
7049 object = VM_PAGE_OBJECT(page);
7050 vm_object_lock(object);
7051 vm_page_lockspin_queues();
7052 vm_page_unwire(page, TRUE);
7053 vm_page_unlock_queues();
7054 vm_object_paging_end(object);
7055 vm_object_unlock(object);
7056 }
7057 }
7058
7059 /*
7060 * Routine: vm_fault_copy
7061 *
7062 * Purpose:
7063 * Copy pages from one virtual memory object to another --
7064 * neither the source nor destination pages need be resident.
7065 *
7066 * Before actually copying a page, the version associated with
7067 * the destination address map wil be verified.
7068 *
7069 * In/out conditions:
7070 * The caller must hold a reference, but not a lock, to
7071 * each of the source and destination objects and to the
7072 * destination map.
7073 *
7074 * Results:
7075 * Returns KERN_SUCCESS if no errors were encountered in
7076 * reading or writing the data. Returns KERN_INTERRUPTED if
7077 * the operation was interrupted (only possible if the
7078 * "interruptible" argument is asserted). Other return values
7079 * indicate a permanent error in copying the data.
7080 *
7081 * The actual amount of data copied will be returned in the
7082 * "copy_size" argument. In the event that the destination map
7083 * verification failed, this amount may be less than the amount
7084 * requested.
7085 */
7086 kern_return_t
vm_fault_copy(vm_object_t src_object,vm_object_offset_t src_offset,vm_map_size_t * copy_size,vm_object_t dst_object,vm_object_offset_t dst_offset,vm_map_t dst_map,vm_map_version_t * dst_version,int interruptible)7087 vm_fault_copy(
7088 vm_object_t src_object,
7089 vm_object_offset_t src_offset,
7090 vm_map_size_t *copy_size, /* INOUT */
7091 vm_object_t dst_object,
7092 vm_object_offset_t dst_offset,
7093 vm_map_t dst_map,
7094 vm_map_version_t *dst_version,
7095 int interruptible)
7096 {
7097 vm_page_t result_page;
7098
7099 vm_page_t src_page;
7100 vm_page_t src_top_page;
7101 vm_prot_t src_prot;
7102
7103 vm_page_t dst_page;
7104 vm_page_t dst_top_page;
7105 vm_prot_t dst_prot;
7106
7107 vm_map_size_t amount_left;
7108 vm_object_t old_copy_object;
7109 uint32_t old_copy_version;
7110 vm_object_t result_page_object = NULL;
7111 kern_return_t error = 0;
7112 vm_fault_return_t result;
7113
7114 vm_map_size_t part_size;
7115 struct vm_object_fault_info fault_info_src = {};
7116 struct vm_object_fault_info fault_info_dst = {};
7117
7118 /*
7119 * In order not to confuse the clustered pageins, align
7120 * the different offsets on a page boundary.
7121 */
7122
7123 #define RETURN(x) \
7124 MACRO_BEGIN \
7125 *copy_size -= amount_left; \
7126 MACRO_RETURN(x); \
7127 MACRO_END
7128
7129 amount_left = *copy_size;
7130
7131 fault_info_src.interruptible = interruptible;
7132 fault_info_src.behavior = VM_BEHAVIOR_SEQUENTIAL;
7133 fault_info_src.lo_offset = vm_object_trunc_page(src_offset);
7134 fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left;
7135 fault_info_src.stealth = TRUE;
7136
7137 fault_info_dst.interruptible = interruptible;
7138 fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL;
7139 fault_info_dst.lo_offset = vm_object_trunc_page(dst_offset);
7140 fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left;
7141 fault_info_dst.stealth = TRUE;
7142
7143 do { /* while (amount_left > 0) */
7144 /*
7145 * There may be a deadlock if both source and destination
7146 * pages are the same. To avoid this deadlock, the copy must
7147 * start by getting the destination page in order to apply
7148 * COW semantics if any.
7149 */
7150
7151 RetryDestinationFault:;
7152
7153 dst_prot = VM_PROT_WRITE | VM_PROT_READ;
7154
7155 vm_object_lock(dst_object);
7156 vm_object_paging_begin(dst_object);
7157
7158 /* cap cluster size at maximum UPL size */
7159 upl_size_t cluster_size;
7160 if (os_convert_overflow(amount_left, &cluster_size)) {
7161 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
7162 }
7163 fault_info_dst.cluster_size = cluster_size;
7164
7165 dst_page = VM_PAGE_NULL;
7166 result = vm_fault_page(dst_object,
7167 vm_object_trunc_page(dst_offset),
7168 VM_PROT_WRITE | VM_PROT_READ,
7169 FALSE,
7170 FALSE, /* page not looked up */
7171 &dst_prot, &dst_page, &dst_top_page,
7172 (int *)0,
7173 &error,
7174 dst_map->no_zero_fill,
7175 &fault_info_dst);
7176 switch (result) {
7177 case VM_FAULT_SUCCESS:
7178 break;
7179 case VM_FAULT_RETRY:
7180 goto RetryDestinationFault;
7181 case VM_FAULT_MEMORY_SHORTAGE:
7182 if (vm_page_wait(interruptible)) {
7183 goto RetryDestinationFault;
7184 }
7185 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_COPY_MEMORY_SHORTAGE), 0 /* arg */);
7186 OS_FALLTHROUGH;
7187 case VM_FAULT_INTERRUPTED:
7188 RETURN(MACH_SEND_INTERRUPTED);
7189 case VM_FAULT_SUCCESS_NO_VM_PAGE:
7190 /* success but no VM page: fail the copy */
7191 vm_object_paging_end(dst_object);
7192 vm_object_unlock(dst_object);
7193 OS_FALLTHROUGH;
7194 case VM_FAULT_MEMORY_ERROR:
7195 if (error) {
7196 return error;
7197 } else {
7198 return KERN_MEMORY_ERROR;
7199 }
7200 default:
7201 panic("vm_fault_copy: unexpected error 0x%x from "
7202 "vm_fault_page()\n", result);
7203 }
7204 assert((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE);
7205
7206 assert(dst_object == VM_PAGE_OBJECT(dst_page));
7207 old_copy_object = dst_object->vo_copy;
7208 old_copy_version = dst_object->vo_copy_version;
7209
7210 /*
7211 * There exists the possiblity that the source and
7212 * destination page are the same. But we can't
7213 * easily determine that now. If they are the
7214 * same, the call to vm_fault_page() for the
7215 * destination page will deadlock. To prevent this we
7216 * wire the page so we can drop busy without having
7217 * the page daemon steal the page. We clean up the
7218 * top page but keep the paging reference on the object
7219 * holding the dest page so it doesn't go away.
7220 */
7221
7222 vm_page_lockspin_queues();
7223 vm_page_wire(dst_page, VM_KERN_MEMORY_OSFMK, TRUE);
7224 vm_page_unlock_queues();
7225 PAGE_WAKEUP_DONE(dst_page);
7226 vm_object_unlock(dst_object);
7227
7228 if (dst_top_page != VM_PAGE_NULL) {
7229 vm_object_lock(dst_object);
7230 VM_PAGE_FREE(dst_top_page);
7231 vm_object_paging_end(dst_object);
7232 vm_object_unlock(dst_object);
7233 }
7234
7235 RetrySourceFault:;
7236
7237 if (src_object == VM_OBJECT_NULL) {
7238 /*
7239 * No source object. We will just
7240 * zero-fill the page in dst_object.
7241 */
7242 src_page = VM_PAGE_NULL;
7243 result_page = VM_PAGE_NULL;
7244 } else {
7245 vm_object_lock(src_object);
7246 src_page = vm_page_lookup(src_object,
7247 vm_object_trunc_page(src_offset));
7248 if (src_page == dst_page) {
7249 src_prot = dst_prot;
7250 result_page = VM_PAGE_NULL;
7251 } else {
7252 src_prot = VM_PROT_READ;
7253 vm_object_paging_begin(src_object);
7254
7255 /* cap cluster size at maximum UPL size */
7256 if (os_convert_overflow(amount_left, &cluster_size)) {
7257 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
7258 }
7259 fault_info_src.cluster_size = cluster_size;
7260
7261 result_page = VM_PAGE_NULL;
7262 result = vm_fault_page(
7263 src_object,
7264 vm_object_trunc_page(src_offset),
7265 VM_PROT_READ, FALSE,
7266 FALSE, /* page not looked up */
7267 &src_prot,
7268 &result_page, &src_top_page,
7269 (int *)0, &error, FALSE,
7270 &fault_info_src);
7271
7272 switch (result) {
7273 case VM_FAULT_SUCCESS:
7274 break;
7275 case VM_FAULT_RETRY:
7276 goto RetrySourceFault;
7277 case VM_FAULT_MEMORY_SHORTAGE:
7278 if (vm_page_wait(interruptible)) {
7279 goto RetrySourceFault;
7280 }
7281 OS_FALLTHROUGH;
7282 case VM_FAULT_INTERRUPTED:
7283 vm_fault_copy_dst_cleanup(dst_page);
7284 RETURN(MACH_SEND_INTERRUPTED);
7285 case VM_FAULT_SUCCESS_NO_VM_PAGE:
7286 /* success but no VM page: fail */
7287 vm_object_paging_end(src_object);
7288 vm_object_unlock(src_object);
7289 OS_FALLTHROUGH;
7290 case VM_FAULT_MEMORY_ERROR:
7291 vm_fault_copy_dst_cleanup(dst_page);
7292 if (error) {
7293 return error;
7294 } else {
7295 return KERN_MEMORY_ERROR;
7296 }
7297 default:
7298 panic("vm_fault_copy(2): unexpected "
7299 "error 0x%x from "
7300 "vm_fault_page()\n", result);
7301 }
7302
7303 result_page_object = VM_PAGE_OBJECT(result_page);
7304 assert((src_top_page == VM_PAGE_NULL) ==
7305 (result_page_object == src_object));
7306 }
7307 assert((src_prot & VM_PROT_READ) != VM_PROT_NONE);
7308 vm_object_unlock(result_page_object);
7309 }
7310
7311 vm_map_lock_read(dst_map);
7312
7313 if (!vm_map_verify(dst_map, dst_version)) {
7314 vm_map_unlock_read(dst_map);
7315 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7316 vm_fault_copy_cleanup(result_page, src_top_page);
7317 }
7318 vm_fault_copy_dst_cleanup(dst_page);
7319 break;
7320 }
7321 assert(dst_object == VM_PAGE_OBJECT(dst_page));
7322
7323 vm_object_lock(dst_object);
7324
7325 if ((dst_object->vo_copy != old_copy_object ||
7326 dst_object->vo_copy_version != old_copy_version)) {
7327 vm_object_unlock(dst_object);
7328 vm_map_unlock_read(dst_map);
7329 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7330 vm_fault_copy_cleanup(result_page, src_top_page);
7331 }
7332 vm_fault_copy_dst_cleanup(dst_page);
7333 break;
7334 }
7335 vm_object_unlock(dst_object);
7336
7337 /*
7338 * Copy the page, and note that it is dirty
7339 * immediately.
7340 */
7341
7342 if (!page_aligned(src_offset) ||
7343 !page_aligned(dst_offset) ||
7344 !page_aligned(amount_left)) {
7345 vm_object_offset_t src_po,
7346 dst_po;
7347
7348 src_po = src_offset - vm_object_trunc_page(src_offset);
7349 dst_po = dst_offset - vm_object_trunc_page(dst_offset);
7350
7351 if (dst_po > src_po) {
7352 part_size = PAGE_SIZE - dst_po;
7353 } else {
7354 part_size = PAGE_SIZE - src_po;
7355 }
7356 if (part_size > (amount_left)) {
7357 part_size = amount_left;
7358 }
7359
7360 if (result_page == VM_PAGE_NULL) {
7361 assert((vm_offset_t) dst_po == dst_po);
7362 assert((vm_size_t) part_size == part_size);
7363 vm_page_part_zero_fill(dst_page,
7364 (vm_offset_t) dst_po,
7365 (vm_size_t) part_size);
7366 } else {
7367 assert((vm_offset_t) src_po == src_po);
7368 assert((vm_offset_t) dst_po == dst_po);
7369 assert((vm_size_t) part_size == part_size);
7370 vm_page_part_copy(result_page,
7371 (vm_offset_t) src_po,
7372 dst_page,
7373 (vm_offset_t) dst_po,
7374 (vm_size_t)part_size);
7375 if (!dst_page->vmp_dirty) {
7376 vm_object_lock(dst_object);
7377 SET_PAGE_DIRTY(dst_page, TRUE);
7378 vm_object_unlock(dst_object);
7379 }
7380 }
7381 } else {
7382 part_size = PAGE_SIZE;
7383
7384 if (result_page == VM_PAGE_NULL) {
7385 vm_page_zero_fill(dst_page);
7386 } else {
7387 vm_object_lock(result_page_object);
7388 vm_page_copy(result_page, dst_page);
7389 vm_object_unlock(result_page_object);
7390
7391 if (!dst_page->vmp_dirty) {
7392 vm_object_lock(dst_object);
7393 SET_PAGE_DIRTY(dst_page, TRUE);
7394 vm_object_unlock(dst_object);
7395 }
7396 }
7397 }
7398
7399 /*
7400 * Unlock everything, and return
7401 */
7402
7403 vm_map_unlock_read(dst_map);
7404
7405 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7406 vm_fault_copy_cleanup(result_page, src_top_page);
7407 }
7408 vm_fault_copy_dst_cleanup(dst_page);
7409
7410 amount_left -= part_size;
7411 src_offset += part_size;
7412 dst_offset += part_size;
7413 } while (amount_left > 0);
7414
7415 RETURN(KERN_SUCCESS);
7416 #undef RETURN
7417
7418 /*NOTREACHED*/
7419 }
7420
7421 #if VM_FAULT_CLASSIFY
7422 /*
7423 * Temporary statistics gathering support.
7424 */
7425
7426 /*
7427 * Statistics arrays:
7428 */
7429 #define VM_FAULT_TYPES_MAX 5
7430 #define VM_FAULT_LEVEL_MAX 8
7431
7432 int vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX];
7433
7434 #define VM_FAULT_TYPE_ZERO_FILL 0
7435 #define VM_FAULT_TYPE_MAP_IN 1
7436 #define VM_FAULT_TYPE_PAGER 2
7437 #define VM_FAULT_TYPE_COPY 3
7438 #define VM_FAULT_TYPE_OTHER 4
7439
7440
7441 void
vm_fault_classify(vm_object_t object,vm_object_offset_t offset,vm_prot_t fault_type)7442 vm_fault_classify(vm_object_t object,
7443 vm_object_offset_t offset,
7444 vm_prot_t fault_type)
7445 {
7446 int type, level = 0;
7447 vm_page_t m;
7448
7449 while (TRUE) {
7450 m = vm_page_lookup(object, offset);
7451 if (m != VM_PAGE_NULL) {
7452 if (m->vmp_busy || m->vmp_error || m->vmp_restart || m->vmp_absent) {
7453 type = VM_FAULT_TYPE_OTHER;
7454 break;
7455 }
7456 if (((fault_type & VM_PROT_WRITE) == 0) ||
7457 ((level == 0) && object->vo_copy == VM_OBJECT_NULL)) {
7458 type = VM_FAULT_TYPE_MAP_IN;
7459 break;
7460 }
7461 type = VM_FAULT_TYPE_COPY;
7462 break;
7463 } else {
7464 if (object->pager_created) {
7465 type = VM_FAULT_TYPE_PAGER;
7466 break;
7467 }
7468 if (object->shadow == VM_OBJECT_NULL) {
7469 type = VM_FAULT_TYPE_ZERO_FILL;
7470 break;
7471 }
7472
7473 offset += object->vo_shadow_offset;
7474 object = object->shadow;
7475 level++;
7476 continue;
7477 }
7478 }
7479
7480 if (level > VM_FAULT_LEVEL_MAX) {
7481 level = VM_FAULT_LEVEL_MAX;
7482 }
7483
7484 vm_fault_stats[type][level] += 1;
7485
7486 return;
7487 }
7488
7489 /* cleanup routine to call from debugger */
7490
7491 void
vm_fault_classify_init(void)7492 vm_fault_classify_init(void)
7493 {
7494 int type, level;
7495
7496 for (type = 0; type < VM_FAULT_TYPES_MAX; type++) {
7497 for (level = 0; level < VM_FAULT_LEVEL_MAX; level++) {
7498 vm_fault_stats[type][level] = 0;
7499 }
7500 }
7501
7502 return;
7503 }
7504 #endif /* VM_FAULT_CLASSIFY */
7505
7506 vm_offset_t
kdp_lightweight_fault(vm_map_t map,vm_offset_t cur_target_addr)7507 kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr)
7508 {
7509 vm_map_entry_t entry;
7510 vm_object_t object;
7511 vm_offset_t object_offset;
7512 vm_page_t m;
7513 int compressor_external_state, compressed_count_delta;
7514 vm_compressor_options_t compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP);
7515 int my_fault_type = VM_PROT_READ;
7516 kern_return_t kr;
7517 int effective_page_mask, effective_page_size;
7518
7519 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
7520 effective_page_mask = VM_MAP_PAGE_MASK(map);
7521 effective_page_size = VM_MAP_PAGE_SIZE(map);
7522 } else {
7523 effective_page_mask = PAGE_MASK;
7524 effective_page_size = PAGE_SIZE;
7525 }
7526
7527 if (not_in_kdp) {
7528 panic("kdp_lightweight_fault called from outside of debugger context");
7529 }
7530
7531 assert(map != VM_MAP_NULL);
7532
7533 assert((cur_target_addr & effective_page_mask) == 0);
7534 if ((cur_target_addr & effective_page_mask) != 0) {
7535 return 0;
7536 }
7537
7538 if (kdp_lck_rw_lock_is_acquired_exclusive(&map->lock)) {
7539 return 0;
7540 }
7541
7542 if (!vm_map_lookup_entry(map, cur_target_addr, &entry)) {
7543 return 0;
7544 }
7545
7546 if (entry->is_sub_map) {
7547 return 0;
7548 }
7549
7550 object = VME_OBJECT(entry);
7551 if (object == VM_OBJECT_NULL) {
7552 return 0;
7553 }
7554
7555 object_offset = cur_target_addr - entry->vme_start + VME_OFFSET(entry);
7556
7557 while (TRUE) {
7558 if (kdp_lck_rw_lock_is_acquired_exclusive(&object->Lock)) {
7559 return 0;
7560 }
7561
7562 if (object->pager_created && (object->paging_in_progress ||
7563 object->activity_in_progress)) {
7564 return 0;
7565 }
7566
7567 m = kdp_vm_page_lookup(object, vm_object_trunc_page(object_offset));
7568
7569 if (m != VM_PAGE_NULL) {
7570 if ((object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
7571 return 0;
7572 }
7573
7574 if (m->vmp_laundry || m->vmp_busy || m->vmp_free_when_done || m->vmp_absent || VMP_ERROR_GET(m) || m->vmp_cleaning ||
7575 m->vmp_overwriting || m->vmp_restart || m->vmp_unusual) {
7576 return 0;
7577 }
7578
7579 assert(!m->vmp_private);
7580 if (m->vmp_private) {
7581 return 0;
7582 }
7583
7584 assert(!m->vmp_fictitious);
7585 if (m->vmp_fictitious) {
7586 return 0;
7587 }
7588
7589 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
7590 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
7591 return 0;
7592 }
7593
7594 return ptoa(VM_PAGE_GET_PHYS_PAGE(m));
7595 }
7596
7597 compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
7598
7599 if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) {
7600 if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) {
7601 kr = vm_compressor_pager_get(object->pager,
7602 vm_object_trunc_page(object_offset + object->paging_offset),
7603 kdp_compressor_decompressed_page_ppnum, &my_fault_type,
7604 compressor_flags, &compressed_count_delta);
7605 if (kr == KERN_SUCCESS) {
7606 return kdp_compressor_decompressed_page_paddr;
7607 } else {
7608 return 0;
7609 }
7610 }
7611 }
7612
7613 if (object->shadow == VM_OBJECT_NULL) {
7614 return 0;
7615 }
7616
7617 object_offset += object->vo_shadow_offset;
7618 object = object->shadow;
7619 }
7620 }
7621
7622 /*
7623 * vm_page_validate_cs_fast():
7624 * Performs a few quick checks to determine if the page's code signature
7625 * really needs to be fully validated. It could:
7626 * 1. have been modified (i.e. automatically tainted),
7627 * 2. have already been validated,
7628 * 3. have already been found to be tainted,
7629 * 4. no longer have a backing store.
7630 * Returns FALSE if the page needs to be fully validated.
7631 */
7632 static boolean_t
vm_page_validate_cs_fast(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)7633 vm_page_validate_cs_fast(
7634 vm_page_t page,
7635 vm_map_size_t fault_page_size,
7636 vm_map_offset_t fault_phys_offset)
7637 {
7638 vm_object_t object;
7639
7640 object = VM_PAGE_OBJECT(page);
7641 vm_object_lock_assert_held(object);
7642
7643 if (page->vmp_wpmapped &&
7644 !VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
7645 /*
7646 * This page was mapped for "write" access sometime in the
7647 * past and could still be modifiable in the future.
7648 * Consider it tainted.
7649 * [ If the page was already found to be "tainted", no
7650 * need to re-validate. ]
7651 */
7652 vm_object_lock_assert_exclusive(object);
7653 VMP_CS_SET_VALIDATED(page, fault_page_size, fault_phys_offset, TRUE);
7654 VMP_CS_SET_TAINTED(page, fault_page_size, fault_phys_offset, TRUE);
7655 if (cs_debug) {
7656 printf("CODESIGNING: %s: "
7657 "page %p obj %p off 0x%llx "
7658 "was modified\n",
7659 __FUNCTION__,
7660 page, object, page->vmp_offset);
7661 }
7662 vm_cs_validated_dirtied++;
7663 }
7664
7665 if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) ||
7666 VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
7667 return TRUE;
7668 }
7669 vm_object_lock_assert_exclusive(object);
7670
7671 #if CHECK_CS_VALIDATION_BITMAP
7672 kern_return_t kr;
7673
7674 kr = vnode_pager_cs_check_validation_bitmap(
7675 object->pager,
7676 page->vmp_offset + object->paging_offset,
7677 CS_BITMAP_CHECK);
7678 if (kr == KERN_SUCCESS) {
7679 page->vmp_cs_validated = VMP_CS_ALL_TRUE;
7680 page->vmp_cs_tainted = VMP_CS_ALL_FALSE;
7681 vm_cs_bitmap_validated++;
7682 return TRUE;
7683 }
7684 #endif /* CHECK_CS_VALIDATION_BITMAP */
7685
7686 if (!object->alive || object->terminating || object->pager == NULL) {
7687 /*
7688 * The object is terminating and we don't have its pager
7689 * so we can't validate the data...
7690 */
7691 return TRUE;
7692 }
7693
7694 /* we need to really validate this page */
7695 vm_object_lock_assert_exclusive(object);
7696 return FALSE;
7697 }
7698
7699 void
vm_page_validate_cs_mapped_slow(vm_page_t page,const void * kaddr)7700 vm_page_validate_cs_mapped_slow(
7701 vm_page_t page,
7702 const void *kaddr)
7703 {
7704 vm_object_t object;
7705 memory_object_offset_t mo_offset;
7706 memory_object_t pager;
7707 struct vnode *vnode;
7708 int validated, tainted, nx;
7709
7710 assert(page->vmp_busy);
7711 object = VM_PAGE_OBJECT(page);
7712 vm_object_lock_assert_exclusive(object);
7713
7714 vm_cs_validates++;
7715
7716 /*
7717 * Since we get here to validate a page that was brought in by
7718 * the pager, we know that this pager is all setup and ready
7719 * by now.
7720 */
7721 assert(object->code_signed);
7722 assert(!object->internal);
7723 assert(object->pager != NULL);
7724 assert(object->pager_ready);
7725
7726 pager = object->pager;
7727 assert(object->paging_in_progress);
7728 vnode = vnode_pager_lookup_vnode(pager);
7729 mo_offset = page->vmp_offset + object->paging_offset;
7730
7731 /* verify the SHA1 hash for this page */
7732 validated = 0;
7733 tainted = 0;
7734 nx = 0;
7735 cs_validate_page(vnode,
7736 pager,
7737 mo_offset,
7738 (const void *)((const char *)kaddr),
7739 &validated,
7740 &tainted,
7741 &nx);
7742
7743 page->vmp_cs_validated |= validated;
7744 page->vmp_cs_tainted |= tainted;
7745 page->vmp_cs_nx |= nx;
7746
7747 #if CHECK_CS_VALIDATION_BITMAP
7748 if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
7749 page->vmp_cs_tainted == VMP_CS_ALL_FALSE) {
7750 vnode_pager_cs_check_validation_bitmap(object->pager,
7751 mo_offset,
7752 CS_BITMAP_SET);
7753 }
7754 #endif /* CHECK_CS_VALIDATION_BITMAP */
7755 }
7756
7757 void
vm_page_validate_cs_mapped(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,const void * kaddr)7758 vm_page_validate_cs_mapped(
7759 vm_page_t page,
7760 vm_map_size_t fault_page_size,
7761 vm_map_offset_t fault_phys_offset,
7762 const void *kaddr)
7763 {
7764 if (!vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
7765 vm_page_validate_cs_mapped_slow(page, kaddr);
7766 }
7767 }
7768
7769 static void
vm_page_map_and_validate_cs(vm_object_t object,vm_page_t page)7770 vm_page_map_and_validate_cs(
7771 vm_object_t object,
7772 vm_page_t page)
7773 {
7774 vm_object_offset_t offset;
7775 vm_map_offset_t koffset;
7776 vm_map_size_t ksize;
7777 vm_offset_t kaddr;
7778 kern_return_t kr;
7779 boolean_t busy_page;
7780 boolean_t need_unmap;
7781
7782 vm_object_lock_assert_exclusive(object);
7783
7784 assert(object->code_signed);
7785 offset = page->vmp_offset;
7786
7787 busy_page = page->vmp_busy;
7788 if (!busy_page) {
7789 /* keep page busy while we map (and unlock) the VM object */
7790 page->vmp_busy = TRUE;
7791 }
7792
7793 /*
7794 * Take a paging reference on the VM object
7795 * to protect it from collapse or bypass,
7796 * and keep it from disappearing too.
7797 */
7798 vm_object_paging_begin(object);
7799
7800 /* map the page in the kernel address space */
7801 ksize = PAGE_SIZE_64;
7802 koffset = 0;
7803 need_unmap = FALSE;
7804 kr = vm_paging_map_object(page,
7805 object,
7806 offset,
7807 VM_PROT_READ,
7808 FALSE, /* can't unlock object ! */
7809 &ksize,
7810 &koffset,
7811 &need_unmap);
7812 if (kr != KERN_SUCCESS) {
7813 panic("%s: could not map page: 0x%x", __FUNCTION__, kr);
7814 }
7815 kaddr = CAST_DOWN(vm_offset_t, koffset);
7816
7817 /* validate the mapped page */
7818 vm_page_validate_cs_mapped_slow(page, (const void *) kaddr);
7819
7820 assert(page->vmp_busy);
7821 assert(object == VM_PAGE_OBJECT(page));
7822 vm_object_lock_assert_exclusive(object);
7823
7824 if (!busy_page) {
7825 PAGE_WAKEUP_DONE(page);
7826 }
7827 if (need_unmap) {
7828 /* unmap the map from the kernel address space */
7829 vm_paging_unmap_object(object, koffset, koffset + ksize);
7830 koffset = 0;
7831 ksize = 0;
7832 kaddr = 0;
7833 }
7834 vm_object_paging_end(object);
7835 }
7836
7837 void
vm_page_validate_cs(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)7838 vm_page_validate_cs(
7839 vm_page_t page,
7840 vm_map_size_t fault_page_size,
7841 vm_map_offset_t fault_phys_offset)
7842 {
7843 vm_object_t object;
7844
7845 object = VM_PAGE_OBJECT(page);
7846 vm_object_lock_assert_held(object);
7847
7848 if (vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
7849 return;
7850 }
7851 vm_page_map_and_validate_cs(object, page);
7852 }
7853
7854 void
vm_page_validate_cs_mapped_chunk(vm_page_t page,const void * kaddr,vm_offset_t chunk_offset,vm_size_t chunk_size,boolean_t * validated_p,unsigned * tainted_p)7855 vm_page_validate_cs_mapped_chunk(
7856 vm_page_t page,
7857 const void *kaddr,
7858 vm_offset_t chunk_offset,
7859 vm_size_t chunk_size,
7860 boolean_t *validated_p,
7861 unsigned *tainted_p)
7862 {
7863 vm_object_t object;
7864 vm_object_offset_t offset, offset_in_page;
7865 memory_object_t pager;
7866 struct vnode *vnode;
7867 boolean_t validated;
7868 unsigned tainted;
7869
7870 *validated_p = FALSE;
7871 *tainted_p = 0;
7872
7873 assert(page->vmp_busy);
7874 object = VM_PAGE_OBJECT(page);
7875 vm_object_lock_assert_exclusive(object);
7876
7877 assert(object->code_signed);
7878 offset = page->vmp_offset;
7879
7880 if (!object->alive || object->terminating || object->pager == NULL) {
7881 /*
7882 * The object is terminating and we don't have its pager
7883 * so we can't validate the data...
7884 */
7885 return;
7886 }
7887 /*
7888 * Since we get here to validate a page that was brought in by
7889 * the pager, we know that this pager is all setup and ready
7890 * by now.
7891 */
7892 assert(!object->internal);
7893 assert(object->pager != NULL);
7894 assert(object->pager_ready);
7895
7896 pager = object->pager;
7897 assert(object->paging_in_progress);
7898 vnode = vnode_pager_lookup_vnode(pager);
7899
7900 /* verify the signature for this chunk */
7901 offset_in_page = chunk_offset;
7902 assert(offset_in_page < PAGE_SIZE);
7903
7904 tainted = 0;
7905 validated = cs_validate_range(vnode,
7906 pager,
7907 (object->paging_offset +
7908 offset +
7909 offset_in_page),
7910 (const void *)((const char *)kaddr
7911 + offset_in_page),
7912 chunk_size,
7913 &tainted);
7914 if (validated) {
7915 *validated_p = TRUE;
7916 }
7917 if (tainted) {
7918 *tainted_p = tainted;
7919 }
7920 }
7921
7922 static void
vm_rtfrecord_lock(void)7923 vm_rtfrecord_lock(void)
7924 {
7925 lck_spin_lock(&vm_rtfr_slock);
7926 }
7927
7928 static void
vm_rtfrecord_unlock(void)7929 vm_rtfrecord_unlock(void)
7930 {
7931 lck_spin_unlock(&vm_rtfr_slock);
7932 }
7933
7934 unsigned int
vmrtfaultinfo_bufsz(void)7935 vmrtfaultinfo_bufsz(void)
7936 {
7937 return vmrtf_num_records * sizeof(vm_rtfault_record_t);
7938 }
7939
7940 #include <kern/backtrace.h>
7941
7942 __attribute__((noinline))
7943 static void
vm_record_rtfault(thread_t cthread,uint64_t fstart,vm_map_offset_t fault_vaddr,int type_of_fault)7944 vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr, int type_of_fault)
7945 {
7946 uint64_t fend = mach_continuous_time();
7947
7948 uint64_t cfpc = 0;
7949 uint64_t ctid = cthread->thread_id;
7950 uint64_t cupid = get_current_unique_pid();
7951
7952 uintptr_t bpc = 0;
7953 errno_t btr = 0;
7954
7955 /*
7956 * Capture a single-frame backtrace. This extracts just the program
7957 * counter at the point of the fault, and should not use copyin to get
7958 * Rosetta save state.
7959 */
7960 struct backtrace_control ctl = {
7961 .btc_user_thread = cthread,
7962 .btc_user_copy = backtrace_user_copy_error,
7963 };
7964 unsigned int bfrs = backtrace_user(&bpc, 1U, &ctl, NULL);
7965 if ((btr == 0) && (bfrs > 0)) {
7966 cfpc = bpc;
7967 }
7968
7969 assert((fstart != 0) && fend >= fstart);
7970 vm_rtfrecord_lock();
7971 assert(vmrtfrs.vmrtfr_curi <= vmrtfrs.vmrtfr_maxi);
7972
7973 vmrtfrs.vmrtf_total++;
7974 vm_rtfault_record_t *cvmr = &vmrtfrs.vm_rtf_records[vmrtfrs.vmrtfr_curi++];
7975
7976 cvmr->rtfabstime = fstart;
7977 cvmr->rtfduration = fend - fstart;
7978 cvmr->rtfaddr = fault_vaddr;
7979 cvmr->rtfpc = cfpc;
7980 cvmr->rtftype = type_of_fault;
7981 cvmr->rtfupid = cupid;
7982 cvmr->rtftid = ctid;
7983
7984 if (vmrtfrs.vmrtfr_curi > vmrtfrs.vmrtfr_maxi) {
7985 vmrtfrs.vmrtfr_curi = 0;
7986 }
7987
7988 vm_rtfrecord_unlock();
7989 }
7990
7991 int
vmrtf_extract(uint64_t cupid,__unused boolean_t isroot,unsigned long vrecordsz,void * vrecords,unsigned long * vmrtfrv)7992 vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, unsigned long vrecordsz, void *vrecords, unsigned long *vmrtfrv)
7993 {
7994 vm_rtfault_record_t *cvmrd = vrecords;
7995 size_t residue = vrecordsz;
7996 size_t numextracted = 0;
7997 boolean_t early_exit = FALSE;
7998
7999 vm_rtfrecord_lock();
8000
8001 for (int vmfi = 0; vmfi <= vmrtfrs.vmrtfr_maxi; vmfi++) {
8002 if (residue < sizeof(vm_rtfault_record_t)) {
8003 early_exit = TRUE;
8004 break;
8005 }
8006
8007 if (vmrtfrs.vm_rtf_records[vmfi].rtfupid != cupid) {
8008 #if DEVELOPMENT || DEBUG
8009 if (isroot == FALSE) {
8010 continue;
8011 }
8012 #else
8013 continue;
8014 #endif /* DEVDEBUG */
8015 }
8016
8017 *cvmrd = vmrtfrs.vm_rtf_records[vmfi];
8018 cvmrd++;
8019 residue -= sizeof(vm_rtfault_record_t);
8020 numextracted++;
8021 }
8022
8023 vm_rtfrecord_unlock();
8024
8025 *vmrtfrv = numextracted;
8026 return early_exit;
8027 }
8028
8029 /*
8030 * Only allow one diagnosis to be in flight at a time, to avoid
8031 * creating too much additional memory usage.
8032 */
8033 static volatile uint_t vmtc_diagnosing;
8034 unsigned int vmtc_total = 0;
8035
8036 /*
8037 * Type used to update telemetry for the diagnosis counts.
8038 */
8039 CA_EVENT(vmtc_telemetry,
8040 CA_INT, vmtc_num_byte, /* number of corrupt bytes found */
8041 CA_BOOL, vmtc_undiagnosed, /* undiagnosed because more than 1 at a time */
8042 CA_BOOL, vmtc_not_eligible, /* the page didn't qualify */
8043 CA_BOOL, vmtc_copyin_fail, /* unable to copy in the page */
8044 CA_BOOL, vmtc_not_found, /* no corruption found even though CS failed */
8045 CA_BOOL, vmtc_one_bit_flip, /* single bit flip */
8046 CA_BOOL, vmtc_testing); /* caused on purpose by testing */
8047
8048 #if DEVELOPMENT || DEBUG
8049 /*
8050 * Buffers used to compare before/after page contents.
8051 * Stashed to aid when debugging crashes.
8052 */
8053 static size_t vmtc_last_buffer_size = 0;
8054 static uint64_t *vmtc_last_before_buffer = NULL;
8055 static uint64_t *vmtc_last_after_buffer = NULL;
8056
8057 /*
8058 * Needed to record corruptions due to testing.
8059 */
8060 static uintptr_t corruption_test_va = 0;
8061 #endif /* DEVELOPMENT || DEBUG */
8062
8063 /*
8064 * Stash a copy of data from a possibly corrupt page.
8065 */
8066 static uint64_t *
vmtc_get_page_data(vm_map_offset_t code_addr,vm_page_t page)8067 vmtc_get_page_data(
8068 vm_map_offset_t code_addr,
8069 vm_page_t page)
8070 {
8071 uint64_t *buffer = NULL;
8072 addr64_t buffer_paddr;
8073 addr64_t page_paddr;
8074 extern void bcopy_phys(addr64_t from, addr64_t to, vm_size_t bytes);
8075 uint_t size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
8076
8077 /*
8078 * Need an aligned buffer to do a physical copy.
8079 */
8080 if (kernel_memory_allocate(kernel_map, (vm_offset_t *)&buffer,
8081 size, size - 1, KMA_KOBJECT, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
8082 return NULL;
8083 }
8084 buffer_paddr = kvtophys((vm_offset_t)buffer);
8085 page_paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(page));
8086
8087 /* adjust the page start address if we need only 4K of a 16K page */
8088 if (size < PAGE_SIZE) {
8089 uint_t subpage_start = ((code_addr & (PAGE_SIZE - 1)) & ~(size - 1));
8090 page_paddr += subpage_start;
8091 }
8092
8093 bcopy_phys(page_paddr, buffer_paddr, size);
8094 return buffer;
8095 }
8096
8097 /*
8098 * Set things up so we can diagnose a potential text page corruption.
8099 */
8100 static uint64_t *
vmtc_text_page_diagnose_setup(vm_map_offset_t code_addr,vm_page_t page,CA_EVENT_TYPE (vmtc_telemetry)* event)8101 vmtc_text_page_diagnose_setup(
8102 vm_map_offset_t code_addr,
8103 vm_page_t page,
8104 CA_EVENT_TYPE(vmtc_telemetry) *event)
8105 {
8106 uint64_t *buffer = NULL;
8107
8108 /*
8109 * If another is being diagnosed, skip this one.
8110 */
8111 if (!OSCompareAndSwap(0, 1, &vmtc_diagnosing)) {
8112 event->vmtc_undiagnosed = true;
8113 return NULL;
8114 }
8115
8116 /*
8117 * Get the contents of the corrupt page.
8118 */
8119 buffer = vmtc_get_page_data(code_addr, page);
8120 if (buffer == NULL) {
8121 event->vmtc_copyin_fail = true;
8122 if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
8123 panic("Bad compare and swap in setup!");
8124 }
8125 return NULL;
8126 }
8127 return buffer;
8128 }
8129
8130 /*
8131 * Diagnose the text page by comparing its contents with
8132 * the one we've previously saved.
8133 */
8134 static void
vmtc_text_page_diagnose(vm_map_offset_t code_addr,uint64_t * old_code_buffer,CA_EVENT_TYPE (vmtc_telemetry)* event)8135 vmtc_text_page_diagnose(
8136 vm_map_offset_t code_addr,
8137 uint64_t *old_code_buffer,
8138 CA_EVENT_TYPE(vmtc_telemetry) *event)
8139 {
8140 uint64_t *new_code_buffer;
8141 size_t size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
8142 uint_t count = (uint_t)size / sizeof(uint64_t);
8143 uint_t diff_count = 0;
8144 bool bit_flip = false;
8145 uint_t b;
8146 uint64_t *new;
8147 uint64_t *old;
8148
8149 new_code_buffer = kalloc_data(size, Z_WAITOK);
8150 assert(new_code_buffer != NULL);
8151 if (copyin((user_addr_t)vm_map_trunc_page(code_addr, size - 1), new_code_buffer, size) != 0) {
8152 /* copyin error, so undo things */
8153 event->vmtc_copyin_fail = true;
8154 goto done;
8155 }
8156
8157 new = new_code_buffer;
8158 old = old_code_buffer;
8159 for (; count-- > 0; ++new, ++old) {
8160 if (*new == *old) {
8161 continue;
8162 }
8163
8164 /*
8165 * On first diff, check for a single bit flip
8166 */
8167 if (diff_count == 0) {
8168 uint64_t x = (*new ^ *old);
8169 assert(x != 0);
8170 if ((x & (x - 1)) == 0) {
8171 bit_flip = true;
8172 ++diff_count;
8173 continue;
8174 }
8175 }
8176
8177 /*
8178 * count up the number of different bytes.
8179 */
8180 for (b = 0; b < sizeof(uint64_t); ++b) {
8181 char *n = (char *)new;
8182 char *o = (char *)old;
8183 if (n[b] != o[b]) {
8184 ++diff_count;
8185 }
8186 }
8187 }
8188
8189 if (diff_count > 1) {
8190 bit_flip = false;
8191 }
8192
8193 if (diff_count == 0) {
8194 event->vmtc_not_found = true;
8195 } else {
8196 event->vmtc_num_byte = diff_count;
8197 }
8198 if (bit_flip) {
8199 event->vmtc_one_bit_flip = true;
8200 }
8201
8202 done:
8203 /*
8204 * Free up the code copy buffers, but save the last
8205 * set on development / debug kernels in case they
8206 * can provide evidence for debugging memory stomps.
8207 */
8208 #if DEVELOPMENT || DEBUG
8209 if (vmtc_last_before_buffer != NULL) {
8210 kmem_free(kernel_map, (vm_offset_t)vmtc_last_before_buffer, vmtc_last_buffer_size);
8211 }
8212 if (vmtc_last_after_buffer != NULL) {
8213 kfree_data(vmtc_last_after_buffer, vmtc_last_buffer_size);
8214 }
8215 vmtc_last_before_buffer = old_code_buffer;
8216 vmtc_last_after_buffer = new_code_buffer;
8217 vmtc_last_buffer_size = size;
8218 #else /* DEVELOPMENT || DEBUG */
8219 kfree_data(new_code_buffer, size);
8220 kmem_free(kernel_map, (vm_offset_t)old_code_buffer, size);
8221 #endif /* DEVELOPMENT || DEBUG */
8222
8223 /*
8224 * We're finished, so clear the diagnosing flag.
8225 */
8226 if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
8227 panic("Bad compare and swap in diagnose!");
8228 }
8229 }
8230
8231 /*
8232 * For the given map, virt address, find the object, offset, and page.
8233 * This has to lookup the map entry, verify protections, walk any shadow chains.
8234 * If found, returns with the object locked.
8235 */
8236 static kern_return_t
vmtc_revalidate_lookup(vm_map_t map,vm_map_offset_t vaddr,vm_object_t * ret_object,vm_object_offset_t * ret_offset,vm_page_t * ret_page,vm_prot_t * ret_prot)8237 vmtc_revalidate_lookup(
8238 vm_map_t map,
8239 vm_map_offset_t vaddr,
8240 vm_object_t *ret_object,
8241 vm_object_offset_t *ret_offset,
8242 vm_page_t *ret_page,
8243 vm_prot_t *ret_prot)
8244 {
8245 vm_object_t object;
8246 vm_object_offset_t offset;
8247 vm_page_t page;
8248 kern_return_t kr = KERN_SUCCESS;
8249 uint8_t object_lock_type = OBJECT_LOCK_EXCLUSIVE;
8250 vm_map_version_t version;
8251 boolean_t wired;
8252 struct vm_object_fault_info fault_info = {};
8253 vm_map_t real_map = NULL;
8254 vm_prot_t prot;
8255 vm_object_t shadow;
8256
8257 /*
8258 * Find the object/offset for the given location/map.
8259 * Note this returns with the object locked.
8260 */
8261 restart:
8262 vm_map_lock_read(map);
8263 object = VM_OBJECT_NULL; /* in case we come around the restart path */
8264 kr = vm_map_lookup_and_lock_object(&map, vaddr, VM_PROT_READ,
8265 object_lock_type, &version, &object, &offset, &prot, &wired,
8266 &fault_info, &real_map, NULL);
8267 vm_map_unlock_read(map);
8268 if (real_map != NULL && real_map != map) {
8269 vm_map_unlock(real_map);
8270 }
8271
8272 /*
8273 * If there's no page here, fail.
8274 */
8275 if (kr != KERN_SUCCESS || object == NULL) {
8276 kr = KERN_FAILURE;
8277 goto done;
8278 }
8279
8280 /*
8281 * Chase down any shadow chains to find the actual page.
8282 */
8283 for (;;) {
8284 /*
8285 * See if the page is on the current object.
8286 */
8287 page = vm_page_lookup(object, vm_object_trunc_page(offset));
8288 if (page != NULL) {
8289 /* restart the lookup */
8290 if (page->vmp_restart) {
8291 vm_object_unlock(object);
8292 goto restart;
8293 }
8294
8295 /*
8296 * If this page is busy, we need to wait for it.
8297 */
8298 if (page->vmp_busy) {
8299 PAGE_SLEEP(object, page, TRUE);
8300 vm_object_unlock(object);
8301 goto restart;
8302 }
8303 break;
8304 }
8305
8306 /*
8307 * If the object doesn't have the page and
8308 * has no shadow, then we can quit.
8309 */
8310 shadow = object->shadow;
8311 if (shadow == NULL) {
8312 kr = KERN_FAILURE;
8313 goto done;
8314 }
8315
8316 /*
8317 * Move to the next object
8318 */
8319 offset += object->vo_shadow_offset;
8320 vm_object_lock(shadow);
8321 vm_object_unlock(object);
8322 object = shadow;
8323 shadow = VM_OBJECT_NULL;
8324 }
8325 *ret_object = object;
8326 *ret_offset = vm_object_trunc_page(offset);
8327 *ret_page = page;
8328 *ret_prot = prot;
8329
8330 done:
8331 if (kr != KERN_SUCCESS && object != NULL) {
8332 vm_object_unlock(object);
8333 }
8334 return kr;
8335 }
8336
8337 /*
8338 * Check if a page is wired, needs extra locking.
8339 */
8340 static bool
is_page_wired(vm_page_t page)8341 is_page_wired(vm_page_t page)
8342 {
8343 bool result;
8344 vm_page_lock_queues();
8345 result = VM_PAGE_WIRED(page);
8346 vm_page_unlock_queues();
8347 return result;
8348 }
8349
8350 /*
8351 * A fatal process error has occurred in the given task.
8352 * Recheck the code signing of the text page at the given
8353 * address to check for a text page corruption.
8354 *
8355 * Returns KERN_FAILURE if a page was found to be corrupt
8356 * by failing to match its code signature. KERN_SUCCESS
8357 * means the page is either valid or we don't have the
8358 * information to say it's corrupt.
8359 */
8360 kern_return_t
revalidate_text_page(task_t task,vm_map_offset_t code_addr)8361 revalidate_text_page(task_t task, vm_map_offset_t code_addr)
8362 {
8363 kern_return_t kr;
8364 vm_map_t map;
8365 vm_object_t object = NULL;
8366 vm_object_offset_t offset;
8367 vm_page_t page = NULL;
8368 struct vnode *vnode;
8369 uint64_t *diagnose_buffer = NULL;
8370 CA_EVENT_TYPE(vmtc_telemetry) * event = NULL;
8371 ca_event_t ca_event = NULL;
8372 vm_prot_t prot;
8373
8374 map = task->map;
8375 if (task->map == NULL) {
8376 return KERN_SUCCESS;
8377 }
8378
8379 kr = vmtc_revalidate_lookup(map, code_addr, &object, &offset, &page, &prot);
8380 if (kr != KERN_SUCCESS) {
8381 goto done;
8382 }
8383
8384 /*
8385 * The page must be executable.
8386 */
8387 if (!(prot & VM_PROT_EXECUTE)) {
8388 goto done;
8389 }
8390
8391 /*
8392 * The object needs to have a pager.
8393 */
8394 if (object->pager == NULL) {
8395 goto done;
8396 }
8397
8398 /*
8399 * Needs to be a vnode backed page to have a signature.
8400 */
8401 vnode = vnode_pager_lookup_vnode(object->pager);
8402 if (vnode == NULL) {
8403 goto done;
8404 }
8405
8406 /*
8407 * Object checks to see if we should proceed.
8408 */
8409 if (!object->code_signed || /* no code signature to check */
8410 object->internal || /* internal objects aren't signed */
8411 object->terminating || /* the object and its pages are already going away */
8412 !object->pager_ready) { /* this should happen, but check shouldn't hurt */
8413 goto done;
8414 }
8415
8416
8417 /*
8418 * Check the code signature of the page in question.
8419 */
8420 vm_page_map_and_validate_cs(object, page);
8421
8422 /*
8423 * At this point:
8424 * vmp_cs_validated |= validated (set if a code signature exists)
8425 * vmp_cs_tainted |= tainted (set if code signature violation)
8426 * vmp_cs_nx |= nx; ??
8427 *
8428 * if vmp_pmapped then have to pmap_disconnect..
8429 * other flags to check on object or page?
8430 */
8431 if (page->vmp_cs_tainted != VMP_CS_ALL_FALSE) {
8432 #if DEBUG || DEVELOPMENT
8433 /*
8434 * On development builds, a boot-arg can be used to cause
8435 * a panic, instead of a quiet repair.
8436 */
8437 if (vmtc_panic_instead) {
8438 panic("Text page corruption detected: vm_page_t 0x%llx", (long long)(uintptr_t)page);
8439 }
8440 #endif /* DEBUG || DEVELOPMENT */
8441
8442 /*
8443 * We're going to invalidate this page. Grab a copy of it for comparison.
8444 */
8445 ca_event = CA_EVENT_ALLOCATE(vmtc_telemetry);
8446 event = ca_event->data;
8447 diagnose_buffer = vmtc_text_page_diagnose_setup(code_addr, page, event);
8448
8449 /*
8450 * Invalidate, i.e. toss, the corrupted page.
8451 */
8452 if (!page->vmp_cleaning &&
8453 !page->vmp_laundry &&
8454 !page->vmp_fictitious &&
8455 !page->vmp_precious &&
8456 !page->vmp_absent &&
8457 !VMP_ERROR_GET(page) &&
8458 !page->vmp_dirty &&
8459 !is_page_wired(page)) {
8460 if (page->vmp_pmapped) {
8461 int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(page));
8462 if (refmod & VM_MEM_MODIFIED) {
8463 SET_PAGE_DIRTY(page, FALSE);
8464 }
8465 if (refmod & VM_MEM_REFERENCED) {
8466 page->vmp_reference = TRUE;
8467 }
8468 }
8469 /* If the page seems intentionally modified, don't trash it. */
8470 if (!page->vmp_dirty) {
8471 VM_PAGE_FREE(page);
8472 } else {
8473 event->vmtc_not_eligible = true;
8474 }
8475 } else {
8476 event->vmtc_not_eligible = true;
8477 }
8478 vm_object_unlock(object);
8479 object = VM_OBJECT_NULL;
8480
8481 /*
8482 * Now try to diagnose the type of failure by faulting
8483 * in a new copy and diff'ing it with what we saved.
8484 */
8485 if (diagnose_buffer != NULL) {
8486 vmtc_text_page_diagnose(code_addr, diagnose_buffer, event);
8487 }
8488 #if DEBUG || DEVELOPMENT
8489 if (corruption_test_va != 0) {
8490 corruption_test_va = 0;
8491 event->vmtc_testing = true;
8492 }
8493 #endif /* DEBUG || DEVELOPMENT */
8494 ktriage_record(thread_tid(current_thread()),
8495 KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_TEXT_CORRUPTION),
8496 0 /* arg */);
8497 CA_EVENT_SEND(ca_event);
8498 printf("Text page corruption detected for pid %d\n", proc_selfpid());
8499 ++vmtc_total;
8500 return KERN_FAILURE; /* failure means we definitely found a corrupt page */
8501 }
8502 done:
8503 if (object != NULL) {
8504 vm_object_unlock(object);
8505 }
8506 return KERN_SUCCESS;
8507 }
8508
8509 #if DEBUG || DEVELOPMENT
8510 /*
8511 * For implementing unit tests - ask the pmap to corrupt a text page.
8512 * We have to find the page, to get the physical address, then invoke
8513 * the pmap.
8514 */
8515 extern kern_return_t vm_corrupt_text_addr(uintptr_t);
8516
8517 kern_return_t
vm_corrupt_text_addr(uintptr_t va)8518 vm_corrupt_text_addr(uintptr_t va)
8519 {
8520 task_t task = current_task();
8521 vm_map_t map;
8522 kern_return_t kr = KERN_SUCCESS;
8523 vm_object_t object = VM_OBJECT_NULL;
8524 vm_object_offset_t offset;
8525 vm_page_t page = NULL;
8526 pmap_paddr_t pa;
8527 vm_prot_t prot;
8528
8529 map = task->map;
8530 if (task->map == NULL) {
8531 printf("corrupt_text_addr: no map\n");
8532 return KERN_FAILURE;
8533 }
8534
8535 kr = vmtc_revalidate_lookup(map, (vm_map_offset_t)va, &object, &offset, &page, &prot);
8536 if (kr != KERN_SUCCESS) {
8537 printf("corrupt_text_addr: page lookup failed\n");
8538 return kr;
8539 }
8540 if (!(prot & VM_PROT_EXECUTE)) {
8541 printf("corrupt_text_addr: page not executable\n");
8542 return KERN_FAILURE;
8543 }
8544
8545 /* get the physical address to use */
8546 pa = ptoa(VM_PAGE_GET_PHYS_PAGE(page)) + (va - vm_object_trunc_page(va));
8547
8548 /*
8549 * Check we have something we can work with.
8550 * Due to racing with pageout as we enter the sysctl,
8551 * it's theoretically possible to have the page disappear, just
8552 * before the lookup.
8553 *
8554 * That's highly likely to happen often. I've filed a radar 72857482
8555 * to bubble up the error here to the sysctl result and have the
8556 * test not FAIL in that case.
8557 */
8558 if (page->vmp_busy) {
8559 printf("corrupt_text_addr: vmp_busy\n");
8560 kr = KERN_FAILURE;
8561 }
8562 if (page->vmp_cleaning) {
8563 printf("corrupt_text_addr: vmp_cleaning\n");
8564 kr = KERN_FAILURE;
8565 }
8566 if (page->vmp_laundry) {
8567 printf("corrupt_text_addr: vmp_cleaning\n");
8568 kr = KERN_FAILURE;
8569 }
8570 if (page->vmp_fictitious) {
8571 printf("corrupt_text_addr: vmp_fictitious\n");
8572 kr = KERN_FAILURE;
8573 }
8574 if (page->vmp_precious) {
8575 printf("corrupt_text_addr: vmp_precious\n");
8576 kr = KERN_FAILURE;
8577 }
8578 if (page->vmp_absent) {
8579 printf("corrupt_text_addr: vmp_absent\n");
8580 kr = KERN_FAILURE;
8581 }
8582 if (VMP_ERROR_GET(page)) {
8583 printf("corrupt_text_addr: vmp_error\n");
8584 kr = KERN_FAILURE;
8585 }
8586 if (page->vmp_dirty) {
8587 printf("corrupt_text_addr: vmp_dirty\n");
8588 kr = KERN_FAILURE;
8589 }
8590 if (is_page_wired(page)) {
8591 printf("corrupt_text_addr: wired\n");
8592 kr = KERN_FAILURE;
8593 }
8594 if (!page->vmp_pmapped) {
8595 printf("corrupt_text_addr: !vmp_pmapped\n");
8596 kr = KERN_FAILURE;
8597 }
8598
8599 if (kr == KERN_SUCCESS) {
8600 printf("corrupt_text_addr: using physaddr 0x%llx\n", (long long)pa);
8601 kr = pmap_test_text_corruption(pa);
8602 if (kr != KERN_SUCCESS) {
8603 printf("corrupt_text_addr: pmap error %d\n", kr);
8604 } else {
8605 corruption_test_va = va;
8606 }
8607 } else {
8608 printf("corrupt_text_addr: object %p\n", object);
8609 printf("corrupt_text_addr: offset 0x%llx\n", (uint64_t)offset);
8610 printf("corrupt_text_addr: va 0x%llx\n", (uint64_t)va);
8611 printf("corrupt_text_addr: vm_object_trunc_page(va) 0x%llx\n", (uint64_t)vm_object_trunc_page(va));
8612 printf("corrupt_text_addr: vm_page_t %p\n", page);
8613 printf("corrupt_text_addr: ptoa(PHYS_PAGE) 0x%llx\n", (uint64_t)ptoa(VM_PAGE_GET_PHYS_PAGE(page)));
8614 printf("corrupt_text_addr: using physaddr 0x%llx\n", (uint64_t)pa);
8615 }
8616
8617 if (object != VM_OBJECT_NULL) {
8618 vm_object_unlock(object);
8619 }
8620 return kr;
8621 }
8622
8623 #endif /* DEBUG || DEVELOPMENT */
8624