1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm_fault.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Page fault handling module.
63 */
64
65 #include <mach_cluster_stats.h>
66 #include <mach_pagemap.h>
67 #include <libkern/OSAtomic.h>
68
69 #include <mach/mach_types.h>
70 #include <mach/kern_return.h>
71 #include <mach/message.h> /* for error codes */
72 #include <mach/vm_param.h>
73 #include <mach/vm_behavior.h>
74 #include <mach/memory_object.h>
75 /* For memory_object_data_{request,unlock} */
76 #include <mach/sdt.h>
77
78 #include <pexpert/pexpert.h>
79 #include <pexpert/device_tree.h>
80
81 #include <kern/kern_types.h>
82 #include <kern/host_statistics.h>
83 #include <kern/counter.h>
84 #include <kern/task.h>
85 #include <kern/thread.h>
86 #include <kern/sched_prim.h>
87 #include <kern/host.h>
88 #include <kern/mach_param.h>
89 #include <kern/macro_help.h>
90 #include <kern/zalloc.h>
91 #include <kern/misc_protos.h>
92 #include <kern/policy_internal.h>
93
94 #include <vm/vm_compressor.h>
95 #include <vm/vm_compressor_pager.h>
96 #include <vm/vm_fault.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_object.h>
99 #include <vm/vm_page.h>
100 #include <vm/vm_kern.h>
101 #include <vm/pmap.h>
102 #include <vm/vm_pageout.h>
103 #include <vm/vm_protos.h>
104 #include <vm/vm_external.h>
105 #include <vm/memory_object.h>
106 #include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */
107 #include <vm/vm_shared_region.h>
108
109 #include <sys/codesign.h>
110 #include <sys/reason.h>
111 #include <sys/signalvar.h>
112
113 #include <sys/kdebug_triage.h>
114
115 #include <san/kasan.h>
116 #include <libkern/coreanalytics/coreanalytics.h>
117
118 #define VM_FAULT_CLASSIFY 0
119
120 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
121
122 int vm_protect_privileged_from_untrusted = 1;
123
124 unsigned int vm_object_pagein_throttle = 16;
125
126 /*
127 * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which
128 * kicks in when swap space runs out. 64-bit programs have massive address spaces and can leak enormous amounts
129 * of memory if they're buggy and can run the system completely out of swap space. If this happens, we
130 * impose a hard throttle on them to prevent them from taking the last bit of memory left. This helps
131 * keep the UI active so that the user has a chance to kill the offending task before the system
132 * completely hangs.
133 *
134 * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied
135 * to tasks that appear to be bloated. When swap runs out, any task using more than vm_hard_throttle_threshold
136 * will be throttled. The throttling is done by giving the thread that's trying to demand zero a page a
137 * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
138 */
139
140 extern void throttle_lowpri_io(int);
141
142 extern struct vnode *vnode_pager_lookup_vnode(memory_object_t);
143
144 uint64_t vm_hard_throttle_threshold;
145
146 #if DEBUG || DEVELOPMENT
147 static bool vmtc_panic_instead = false;
148 #endif /* DEBUG || DEVELOPMENT */
149
150 OS_ALWAYS_INLINE
151 boolean_t
NEED_TO_HARD_THROTTLE_THIS_TASK(void)152 NEED_TO_HARD_THROTTLE_THIS_TASK(void)
153 {
154 return vm_wants_task_throttled(current_task()) ||
155 ((vm_page_free_count < vm_page_throttle_limit ||
156 HARD_THROTTLE_LIMIT_REACHED()) &&
157 proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED);
158 }
159
160 #define HARD_THROTTLE_DELAY 10000 /* 10000 us == 10 ms */
161 #define SOFT_THROTTLE_DELAY 200 /* 200 us == .2 ms */
162
163 #define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS 6
164 #define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC 20000
165
166
167 #define VM_STAT_DECOMPRESSIONS() \
168 MACRO_BEGIN \
169 counter_inc(&vm_statistics_decompressions); \
170 current_thread()->decompressions++; \
171 MACRO_END
172
173 boolean_t current_thread_aborted(void);
174
175 /* Forward declarations of internal routines. */
176 static kern_return_t vm_fault_wire_fast(
177 vm_map_t map,
178 vm_map_offset_t va,
179 vm_prot_t prot,
180 vm_tag_t wire_tag,
181 vm_map_entry_t entry,
182 pmap_t pmap,
183 vm_map_offset_t pmap_addr,
184 ppnum_t *physpage_p);
185
186 static kern_return_t vm_fault_internal(
187 vm_map_t map,
188 vm_map_offset_t vaddr,
189 vm_prot_t caller_prot,
190 boolean_t change_wiring,
191 vm_tag_t wire_tag,
192 int interruptible,
193 pmap_t pmap,
194 vm_map_offset_t pmap_addr,
195 ppnum_t *physpage_p);
196
197 static void vm_fault_copy_cleanup(
198 vm_page_t page,
199 vm_page_t top_page);
200
201 static void vm_fault_copy_dst_cleanup(
202 vm_page_t page);
203
204 #if VM_FAULT_CLASSIFY
205 extern void vm_fault_classify(vm_object_t object,
206 vm_object_offset_t offset,
207 vm_prot_t fault_type);
208
209 extern void vm_fault_classify_init(void);
210 #endif
211
212 unsigned long vm_pmap_enter_blocked = 0;
213 unsigned long vm_pmap_enter_retried = 0;
214
215 unsigned long vm_cs_validates = 0;
216 unsigned long vm_cs_revalidates = 0;
217 unsigned long vm_cs_query_modified = 0;
218 unsigned long vm_cs_validated_dirtied = 0;
219 unsigned long vm_cs_bitmap_validated = 0;
220
221 void vm_pre_fault(vm_map_offset_t, vm_prot_t);
222
223 extern char *kdp_compressor_decompressed_page;
224 extern addr64_t kdp_compressor_decompressed_page_paddr;
225 extern ppnum_t kdp_compressor_decompressed_page_ppnum;
226
227 struct vmrtfr {
228 int vmrtfr_maxi;
229 int vmrtfr_curi;
230 int64_t vmrtf_total;
231 vm_rtfault_record_t *vm_rtf_records;
232 } vmrtfrs;
233 #define VMRTF_DEFAULT_BUFSIZE (4096)
234 #define VMRTF_NUM_RECORDS_DEFAULT (VMRTF_DEFAULT_BUFSIZE / sizeof(vm_rtfault_record_t))
235 TUNABLE(int, vmrtf_num_records, "vm_rtfault_records", VMRTF_NUM_RECORDS_DEFAULT);
236
237 static void vm_rtfrecord_lock(void);
238 static void vm_rtfrecord_unlock(void);
239 static void vm_record_rtfault(thread_t, uint64_t, vm_map_offset_t, int);
240
241 extern lck_grp_t vm_page_lck_grp_bucket;
242 extern lck_attr_t vm_page_lck_attr;
243 LCK_SPIN_DECLARE_ATTR(vm_rtfr_slock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
244
245 #if DEVELOPMENT || DEBUG
246 extern int madvise_free_debug;
247 #endif /* DEVELOPMENT || DEBUG */
248
249 #if CONFIG_FREEZE
250 __startup_func
251 static bool
osenvironment_is_diagnostics(void)252 osenvironment_is_diagnostics(void)
253 {
254 DTEntry chosen;
255 const char *osenvironment;
256 unsigned int size;
257 if (kSuccess == SecureDTLookupEntry(0, "/chosen", &chosen)) {
258 if (kSuccess == SecureDTGetProperty(chosen, "osenvironment", (void const **) &osenvironment, &size)) {
259 return strcmp(osenvironment, "diagnostics") == 0;
260 }
261 }
262 return false;
263 }
264 #endif /* CONFIG_FREEZE */
265
266 /*
267 * Routine: vm_fault_init
268 * Purpose:
269 * Initialize our private data structures.
270 */
271 __startup_func
272 void
vm_fault_init(void)273 vm_fault_init(void)
274 {
275 int i, vm_compressor_temp;
276 boolean_t need_default_val = TRUE;
277 /*
278 * Choose a value for the hard throttle threshold based on the amount of ram. The threshold is
279 * computed as a percentage of available memory, and the percentage used is scaled inversely with
280 * the amount of memory. The percentage runs between 10% and 35%. We use 35% for small memory systems
281 * and reduce the value down to 10% for very large memory configurations. This helps give us a
282 * definition of a memory hog that makes more sense relative to the amount of ram in the machine.
283 * The formula here simply uses the number of gigabytes of ram to adjust the percentage.
284 */
285
286 vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024 * 1024 * 1024)), 25)) / 100;
287
288 /*
289 * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry.
290 */
291
292 if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof(vm_compressor_temp))) {
293 for (i = 0; i < VM_PAGER_MAX_MODES; i++) {
294 if (((vm_compressor_temp & (1 << i)) == vm_compressor_temp)) {
295 need_default_val = FALSE;
296 vm_compressor_mode = vm_compressor_temp;
297 break;
298 }
299 }
300 if (need_default_val) {
301 printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp);
302 }
303 }
304 #if CONFIG_FREEZE
305 if (need_default_val) {
306 if (osenvironment_is_diagnostics()) {
307 printf("osenvironment == \"diagnostics\". Setting \"vm_compressor_mode\" to in-core compressor only\n");
308 vm_compressor_mode = VM_PAGER_COMPRESSOR_NO_SWAP;
309 need_default_val = false;
310 }
311 }
312 #endif /* CONFIG_FREEZE */
313 if (need_default_val) {
314 /* If no boot arg or incorrect boot arg, try device tree. */
315 PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode));
316 }
317 printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode);
318 vm_config_init();
319
320 PE_parse_boot_argn("vm_protect_privileged_from_untrusted",
321 &vm_protect_privileged_from_untrusted,
322 sizeof(vm_protect_privileged_from_untrusted));
323
324 #if DEBUG || DEVELOPMENT
325 (void)PE_parse_boot_argn("text_corruption_panic", &vmtc_panic_instead, sizeof(vmtc_panic_instead));
326
327 if (kern_feature_override(KF_MADVISE_FREE_DEBUG_OVRD)) {
328 madvise_free_debug = 0;
329 }
330
331 #endif /* DEBUG || DEVELOPMENT */
332 }
333
334 __startup_func
335 static void
vm_rtfault_record_init(void)336 vm_rtfault_record_init(void)
337 {
338 size_t size;
339
340 vmrtf_num_records = MAX(vmrtf_num_records, 1);
341 size = vmrtf_num_records * sizeof(vm_rtfault_record_t);
342 vmrtfrs.vm_rtf_records = zalloc_permanent(size,
343 ZALIGN(vm_rtfault_record_t));
344 vmrtfrs.vmrtfr_maxi = vmrtf_num_records - 1;
345 }
346 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_rtfault_record_init);
347
348 /*
349 * Routine: vm_fault_cleanup
350 * Purpose:
351 * Clean up the result of vm_fault_page.
352 * Results:
353 * The paging reference for "object" is released.
354 * "object" is unlocked.
355 * If "top_page" is not null, "top_page" is
356 * freed and the paging reference for the object
357 * containing it is released.
358 *
359 * In/out conditions:
360 * "object" must be locked.
361 */
362 void
vm_fault_cleanup(vm_object_t object,vm_page_t top_page)363 vm_fault_cleanup(
364 vm_object_t object,
365 vm_page_t top_page)
366 {
367 vm_object_paging_end(object);
368 vm_object_unlock(object);
369
370 if (top_page != VM_PAGE_NULL) {
371 object = VM_PAGE_OBJECT(top_page);
372
373 vm_object_lock(object);
374 VM_PAGE_FREE(top_page);
375 vm_object_paging_end(object);
376 vm_object_unlock(object);
377 }
378 }
379
380 #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
381
382
383 boolean_t vm_page_deactivate_behind = TRUE;
384 /*
385 * default sizes given VM_BEHAVIOR_DEFAULT reference behavior
386 */
387 #define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW 128
388 #define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER 16 /* don't make this too big... */
389 /* we use it to size an array on the stack */
390
391 int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW;
392
393 #define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024)
394
395 /*
396 * vm_page_is_sequential
397 *
398 * Determine if sequential access is in progress
399 * in accordance with the behavior specified.
400 * Update state to indicate current access pattern.
401 *
402 * object must have at least the shared lock held
403 */
404 static
405 void
vm_fault_is_sequential(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)406 vm_fault_is_sequential(
407 vm_object_t object,
408 vm_object_offset_t offset,
409 vm_behavior_t behavior)
410 {
411 vm_object_offset_t last_alloc;
412 int sequential;
413 int orig_sequential;
414
415 last_alloc = object->last_alloc;
416 sequential = object->sequential;
417 orig_sequential = sequential;
418
419 offset = vm_object_trunc_page(offset);
420 if (offset == last_alloc && behavior != VM_BEHAVIOR_RANDOM) {
421 /* re-faulting in the same page: no change in behavior */
422 return;
423 }
424
425 switch (behavior) {
426 case VM_BEHAVIOR_RANDOM:
427 /*
428 * reset indicator of sequential behavior
429 */
430 sequential = 0;
431 break;
432
433 case VM_BEHAVIOR_SEQUENTIAL:
434 if (offset && last_alloc == offset - PAGE_SIZE_64) {
435 /*
436 * advance indicator of sequential behavior
437 */
438 if (sequential < MAX_SEQUENTIAL_RUN) {
439 sequential += PAGE_SIZE;
440 }
441 } else {
442 /*
443 * reset indicator of sequential behavior
444 */
445 sequential = 0;
446 }
447 break;
448
449 case VM_BEHAVIOR_RSEQNTL:
450 if (last_alloc && last_alloc == offset + PAGE_SIZE_64) {
451 /*
452 * advance indicator of sequential behavior
453 */
454 if (sequential > -MAX_SEQUENTIAL_RUN) {
455 sequential -= PAGE_SIZE;
456 }
457 } else {
458 /*
459 * reset indicator of sequential behavior
460 */
461 sequential = 0;
462 }
463 break;
464
465 case VM_BEHAVIOR_DEFAULT:
466 default:
467 if (offset && last_alloc == (offset - PAGE_SIZE_64)) {
468 /*
469 * advance indicator of sequential behavior
470 */
471 if (sequential < 0) {
472 sequential = 0;
473 }
474 if (sequential < MAX_SEQUENTIAL_RUN) {
475 sequential += PAGE_SIZE;
476 }
477 } else if (last_alloc && last_alloc == (offset + PAGE_SIZE_64)) {
478 /*
479 * advance indicator of sequential behavior
480 */
481 if (sequential > 0) {
482 sequential = 0;
483 }
484 if (sequential > -MAX_SEQUENTIAL_RUN) {
485 sequential -= PAGE_SIZE;
486 }
487 } else {
488 /*
489 * reset indicator of sequential behavior
490 */
491 sequential = 0;
492 }
493 break;
494 }
495 if (sequential != orig_sequential) {
496 if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) {
497 /*
498 * if someone else has already updated object->sequential
499 * don't bother trying to update it or object->last_alloc
500 */
501 return;
502 }
503 }
504 /*
505 * I'd like to do this with a OSCompareAndSwap64, but that
506 * doesn't exist for PPC... however, it shouldn't matter
507 * that much... last_alloc is maintained so that we can determine
508 * if a sequential access pattern is taking place... if only
509 * one thread is banging on this object, no problem with the unprotected
510 * update... if 2 or more threads are banging away, we run the risk of
511 * someone seeing a mangled update... however, in the face of multiple
512 * accesses, no sequential access pattern can develop anyway, so we
513 * haven't lost any real info.
514 */
515 object->last_alloc = offset;
516 }
517
518
519 int vm_page_deactivate_behind_count = 0;
520
521 /*
522 * vm_page_deactivate_behind
523 *
524 * Determine if sequential access is in progress
525 * in accordance with the behavior specified. If
526 * so, compute a potential page to deactivate and
527 * deactivate it.
528 *
529 * object must be locked.
530 *
531 * return TRUE if we actually deactivate a page
532 */
533 static
534 boolean_t
vm_fault_deactivate_behind(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)535 vm_fault_deactivate_behind(
536 vm_object_t object,
537 vm_object_offset_t offset,
538 vm_behavior_t behavior)
539 {
540 int n;
541 int pages_in_run = 0;
542 int max_pages_in_run = 0;
543 int sequential_run;
544 int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
545 vm_object_offset_t run_offset = 0;
546 vm_object_offset_t pg_offset = 0;
547 vm_page_t m;
548 vm_page_t page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER];
549
550 pages_in_run = 0;
551 #if TRACEFAULTPAGE
552 dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
553 #endif
554 if (object == kernel_object || vm_page_deactivate_behind == FALSE || (vm_object_trunc_page(offset) != offset)) {
555 /*
556 * Do not deactivate pages from the kernel object: they
557 * are not intended to become pageable.
558 * or we've disabled the deactivate behind mechanism
559 * or we are dealing with an offset that is not aligned to
560 * the system's PAGE_SIZE because in that case we will
561 * handle the deactivation on the aligned offset and, thus,
562 * the full PAGE_SIZE page once. This helps us avoid the redundant
563 * deactivates and the extra faults.
564 */
565 return FALSE;
566 }
567 if ((sequential_run = object->sequential)) {
568 if (sequential_run < 0) {
569 sequential_behavior = VM_BEHAVIOR_RSEQNTL;
570 sequential_run = 0 - sequential_run;
571 } else {
572 sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
573 }
574 }
575 switch (behavior) {
576 case VM_BEHAVIOR_RANDOM:
577 break;
578 case VM_BEHAVIOR_SEQUENTIAL:
579 if (sequential_run >= (int)PAGE_SIZE) {
580 run_offset = 0 - PAGE_SIZE_64;
581 max_pages_in_run = 1;
582 }
583 break;
584 case VM_BEHAVIOR_RSEQNTL:
585 if (sequential_run >= (int)PAGE_SIZE) {
586 run_offset = PAGE_SIZE_64;
587 max_pages_in_run = 1;
588 }
589 break;
590 case VM_BEHAVIOR_DEFAULT:
591 default:
592 { vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
593
594 /*
595 * determine if the run of sequential accesss has been
596 * long enough on an object with default access behavior
597 * to consider it for deactivation
598 */
599 if ((uint64_t)sequential_run >= behind && (sequential_run % (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER * PAGE_SIZE)) == 0) {
600 /*
601 * the comparisons between offset and behind are done
602 * in this kind of odd fashion in order to prevent wrap around
603 * at the end points
604 */
605 if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
606 if (offset >= behind) {
607 run_offset = 0 - behind;
608 pg_offset = PAGE_SIZE_64;
609 max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
610 }
611 } else {
612 if (offset < -behind) {
613 run_offset = behind;
614 pg_offset = 0 - PAGE_SIZE_64;
615 max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
616 }
617 }
618 }
619 break;}
620 }
621 for (n = 0; n < max_pages_in_run; n++) {
622 m = vm_page_lookup(object, offset + run_offset + (n * pg_offset));
623
624 if (m && !m->vmp_laundry && !m->vmp_busy && !m->vmp_no_cache && (m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && !m->vmp_fictitious && !m->vmp_absent) {
625 page_run[pages_in_run++] = m;
626
627 /*
628 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
629 *
630 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
631 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
632 * new reference happens. If no futher references happen on the page after that remote TLB flushes
633 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
634 * by pageout_scan, which is just fine since the last reference would have happened quite far
635 * in the past (TLB caches don't hang around for very long), and of course could just as easily
636 * have happened before we did the deactivate_behind.
637 */
638 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
639 }
640 }
641 if (pages_in_run) {
642 vm_page_lockspin_queues();
643
644 for (n = 0; n < pages_in_run; n++) {
645 m = page_run[n];
646
647 vm_page_deactivate_internal(m, FALSE);
648
649 vm_page_deactivate_behind_count++;
650 #if TRACEFAULTPAGE
651 dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
652 #endif
653 }
654 vm_page_unlock_queues();
655
656 return TRUE;
657 }
658 return FALSE;
659 }
660
661
662 #if (DEVELOPMENT || DEBUG)
663 uint32_t vm_page_creation_throttled_hard = 0;
664 uint32_t vm_page_creation_throttled_soft = 0;
665 uint64_t vm_page_creation_throttle_avoided = 0;
666 #endif /* DEVELOPMENT || DEBUG */
667
668 static int
vm_page_throttled(boolean_t page_kept)669 vm_page_throttled(boolean_t page_kept)
670 {
671 clock_sec_t elapsed_sec;
672 clock_sec_t tv_sec;
673 clock_usec_t tv_usec;
674
675 thread_t thread = current_thread();
676
677 if (thread->options & TH_OPT_VMPRIV) {
678 return 0;
679 }
680
681 if (thread->t_page_creation_throttled) {
682 thread->t_page_creation_throttled = 0;
683
684 if (page_kept == FALSE) {
685 goto no_throttle;
686 }
687 }
688 if (NEED_TO_HARD_THROTTLE_THIS_TASK()) {
689 #if (DEVELOPMENT || DEBUG)
690 thread->t_page_creation_throttled_hard++;
691 OSAddAtomic(1, &vm_page_creation_throttled_hard);
692 #endif /* DEVELOPMENT || DEBUG */
693 return HARD_THROTTLE_DELAY;
694 }
695
696 if ((vm_page_free_count < vm_page_throttle_limit || (VM_CONFIG_COMPRESSOR_IS_PRESENT && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
697 thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) {
698 if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) {
699 #if (DEVELOPMENT || DEBUG)
700 OSAddAtomic64(1, &vm_page_creation_throttle_avoided);
701 #endif
702 goto no_throttle;
703 }
704 clock_get_system_microtime(&tv_sec, &tv_usec);
705
706 elapsed_sec = tv_sec - thread->t_page_creation_time;
707
708 if (elapsed_sec <= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS ||
709 (thread->t_page_creation_count / elapsed_sec) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC) {
710 if (elapsed_sec >= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS)) {
711 /*
712 * we'll reset our stats to give a well behaved app
713 * that was unlucky enough to accumulate a bunch of pages
714 * over a long period of time a chance to get out of
715 * the throttled state... we reset the counter and timestamp
716 * so that if it stays under the rate limit for the next second
717 * it will be back in our good graces... if it exceeds it, it
718 * will remain in the throttled state
719 */
720 thread->t_page_creation_time = tv_sec;
721 thread->t_page_creation_count = VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC * (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS - 1);
722 }
723 VM_PAGEOUT_DEBUG(vm_page_throttle_count, 1);
724
725 thread->t_page_creation_throttled = 1;
726
727 if (VM_CONFIG_COMPRESSOR_IS_PRESENT && HARD_THROTTLE_LIMIT_REACHED()) {
728 #if (DEVELOPMENT || DEBUG)
729 thread->t_page_creation_throttled_hard++;
730 OSAddAtomic(1, &vm_page_creation_throttled_hard);
731 #endif /* DEVELOPMENT || DEBUG */
732 return HARD_THROTTLE_DELAY;
733 } else {
734 #if (DEVELOPMENT || DEBUG)
735 thread->t_page_creation_throttled_soft++;
736 OSAddAtomic(1, &vm_page_creation_throttled_soft);
737 #endif /* DEVELOPMENT || DEBUG */
738 return SOFT_THROTTLE_DELAY;
739 }
740 }
741 thread->t_page_creation_time = tv_sec;
742 thread->t_page_creation_count = 0;
743 }
744 no_throttle:
745 thread->t_page_creation_count++;
746
747 return 0;
748 }
749
750
751 /*
752 * check for various conditions that would
753 * prevent us from creating a ZF page...
754 * cleanup is based on being called from vm_fault_page
755 *
756 * object must be locked
757 * object == m->vmp_object
758 */
759 static vm_fault_return_t
vm_fault_check(vm_object_t object,vm_page_t m,vm_page_t first_m,wait_interrupt_t interruptible_state,boolean_t page_throttle)760 vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrupt_t interruptible_state, boolean_t page_throttle)
761 {
762 int throttle_delay;
763
764 if (object->shadow_severed ||
765 VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
766 /*
767 * Either:
768 * 1. the shadow chain was severed,
769 * 2. the purgeable object is volatile or empty and is marked
770 * to fault on access while volatile.
771 * Just have to return an error at this point
772 */
773 if (m != VM_PAGE_NULL) {
774 VM_PAGE_FREE(m);
775 }
776 vm_fault_cleanup(object, first_m);
777
778 thread_interrupt_level(interruptible_state);
779
780 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
781 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
782 }
783
784 if (object->shadow_severed) {
785 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
786 }
787 return VM_FAULT_MEMORY_ERROR;
788 }
789 if (page_throttle == TRUE) {
790 if ((throttle_delay = vm_page_throttled(FALSE))) {
791 /*
792 * we're throttling zero-fills...
793 * treat this as if we couldn't grab a page
794 */
795 if (m != VM_PAGE_NULL) {
796 VM_PAGE_FREE(m);
797 }
798 vm_fault_cleanup(object, first_m);
799
800 VM_DEBUG_EVENT(vmf_check_zfdelay, VMF_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
801
802 delay(throttle_delay);
803
804 if (current_thread_aborted()) {
805 thread_interrupt_level(interruptible_state);
806 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
807 return VM_FAULT_INTERRUPTED;
808 }
809 thread_interrupt_level(interruptible_state);
810
811 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_MEMORY_SHORTAGE), 0 /* arg */);
812 return VM_FAULT_MEMORY_SHORTAGE;
813 }
814 }
815 return VM_FAULT_SUCCESS;
816 }
817
818 /*
819 * Clear the code signing bits on the given page_t
820 */
821 static void
vm_fault_cs_clear(vm_page_t m)822 vm_fault_cs_clear(vm_page_t m)
823 {
824 m->vmp_cs_validated = VMP_CS_ALL_FALSE;
825 m->vmp_cs_tainted = VMP_CS_ALL_FALSE;
826 m->vmp_cs_nx = VMP_CS_ALL_FALSE;
827 }
828
829 /*
830 * Enqueues the given page on the throttled queue.
831 * The caller must hold the vm_page_queue_lock and it will be held on return.
832 */
833 static void
vm_fault_enqueue_throttled_locked(vm_page_t m)834 vm_fault_enqueue_throttled_locked(vm_page_t m)
835 {
836 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
837 assert(!VM_PAGE_WIRED(m));
838
839 /*
840 * can't be on the pageout queue since we don't
841 * have a pager to try and clean to
842 */
843 vm_page_queues_remove(m, TRUE);
844 vm_page_check_pageable_safe(m);
845 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
846 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
847 vm_page_throttled_count++;
848 }
849
850 /*
851 * do the work to zero fill a page and
852 * inject it into the correct paging queue
853 *
854 * m->vmp_object must be locked
855 * page queue lock must NOT be held
856 */
857 static int
vm_fault_zero_page(vm_page_t m,boolean_t no_zero_fill)858 vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
859 {
860 int my_fault = DBG_ZERO_FILL_FAULT;
861 vm_object_t object;
862
863 object = VM_PAGE_OBJECT(m);
864
865 /*
866 * This is is a zero-fill page fault...
867 *
868 * Checking the page lock is a waste of
869 * time; this page was absent, so
870 * it can't be page locked by a pager.
871 *
872 * we also consider it undefined
873 * with respect to instruction
874 * execution. i.e. it is the responsibility
875 * of higher layers to call for an instruction
876 * sync after changing the contents and before
877 * sending a program into this area. We
878 * choose this approach for performance
879 */
880 vm_fault_cs_clear(m);
881 m->vmp_pmapped = TRUE;
882
883 if (no_zero_fill == TRUE) {
884 my_fault = DBG_NZF_PAGE_FAULT;
885
886 if (m->vmp_absent && m->vmp_busy) {
887 return my_fault;
888 }
889 } else {
890 vm_page_zero_fill(m);
891
892 counter_inc(&vm_statistics_zero_fill_count);
893 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
894 }
895 assert(!m->vmp_laundry);
896 assert(object != kernel_object);
897 //assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
898 if (!VM_DYNAMIC_PAGING_ENABLED() &&
899 (object->purgable == VM_PURGABLE_DENY ||
900 object->purgable == VM_PURGABLE_NONVOLATILE ||
901 object->purgable == VM_PURGABLE_VOLATILE)) {
902 vm_page_lockspin_queues();
903 if (!VM_DYNAMIC_PAGING_ENABLED()) {
904 vm_fault_enqueue_throttled_locked(m);
905 }
906 vm_page_unlock_queues();
907 }
908 return my_fault;
909 }
910
911
912 /*
913 * Routine: vm_fault_page
914 * Purpose:
915 * Find the resident page for the virtual memory
916 * specified by the given virtual memory object
917 * and offset.
918 * Additional arguments:
919 * The required permissions for the page is given
920 * in "fault_type". Desired permissions are included
921 * in "protection".
922 * fault_info is passed along to determine pagein cluster
923 * limits... it contains the expected reference pattern,
924 * cluster size if available, etc...
925 *
926 * If the desired page is known to be resident (for
927 * example, because it was previously wired down), asserting
928 * the "unwiring" parameter will speed the search.
929 *
930 * If the operation can be interrupted (by thread_abort
931 * or thread_terminate), then the "interruptible"
932 * parameter should be asserted.
933 *
934 * Results:
935 * The page containing the proper data is returned
936 * in "result_page".
937 *
938 * In/out conditions:
939 * The source object must be locked and referenced,
940 * and must donate one paging reference. The reference
941 * is not affected. The paging reference and lock are
942 * consumed.
943 *
944 * If the call succeeds, the object in which "result_page"
945 * resides is left locked and holding a paging reference.
946 * If this is not the original object, a busy page in the
947 * original object is returned in "top_page", to prevent other
948 * callers from pursuing this same data, along with a paging
949 * reference for the original object. The "top_page" should
950 * be destroyed when this guarantee is no longer required.
951 * The "result_page" is also left busy. It is not removed
952 * from the pageout queues.
953 * Special Case:
954 * A return value of VM_FAULT_SUCCESS_NO_PAGE means that the
955 * fault succeeded but there's no VM page (i.e. the VM object
956 * does not actually hold VM pages, but device memory or
957 * large pages). The object is still locked and we still hold a
958 * paging_in_progress reference.
959 */
960 unsigned int vm_fault_page_blocked_access = 0;
961 unsigned int vm_fault_page_forced_retry = 0;
962
963 vm_fault_return_t
vm_fault_page(vm_object_t first_object,vm_object_offset_t first_offset,vm_prot_t fault_type,boolean_t must_be_resident,boolean_t caller_lookup,vm_prot_t * protection,vm_page_t * result_page,vm_page_t * top_page,int * type_of_fault,kern_return_t * error_code,boolean_t no_zero_fill,boolean_t data_supply,vm_object_fault_info_t fault_info)964 vm_fault_page(
965 /* Arguments: */
966 vm_object_t first_object, /* Object to begin search */
967 vm_object_offset_t first_offset, /* Offset into object */
968 vm_prot_t fault_type, /* What access is requested */
969 boolean_t must_be_resident,/* Must page be resident? */
970 boolean_t caller_lookup, /* caller looked up page */
971 /* Modifies in place: */
972 vm_prot_t *protection, /* Protection for mapping */
973 vm_page_t *result_page, /* Page found, if successful */
974 /* Returns: */
975 vm_page_t *top_page, /* Page in top object, if
976 * not result_page. */
977 int *type_of_fault, /* if non-null, fill in with type of fault
978 * COW, zero-fill, etc... returned in trace point */
979 /* More arguments: */
980 kern_return_t *error_code, /* code if page is in error */
981 boolean_t no_zero_fill, /* don't zero fill absent pages */
982 boolean_t data_supply, /* treat as data_supply if
983 * it is a write fault and a full
984 * page is provided */
985 vm_object_fault_info_t fault_info)
986 {
987 vm_page_t m;
988 vm_object_t object;
989 vm_object_offset_t offset;
990 vm_page_t first_m;
991 vm_object_t next_object;
992 vm_object_t copy_object;
993 boolean_t look_for_page;
994 boolean_t force_fault_retry = FALSE;
995 vm_prot_t access_required = fault_type;
996 vm_prot_t wants_copy_flag;
997 kern_return_t wait_result;
998 wait_interrupt_t interruptible_state;
999 boolean_t data_already_requested = FALSE;
1000 vm_behavior_t orig_behavior;
1001 vm_size_t orig_cluster_size;
1002 vm_fault_return_t error;
1003 int my_fault;
1004 uint32_t try_failed_count;
1005 int interruptible; /* how may fault be interrupted? */
1006 int external_state = VM_EXTERNAL_STATE_UNKNOWN;
1007 memory_object_t pager;
1008 vm_fault_return_t retval;
1009 int grab_options;
1010 bool clear_absent_on_error = false;
1011
1012 /*
1013 * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
1014 * marked as paged out in the compressor pager or the pager doesn't exist.
1015 * Note also that if the pager for an internal object
1016 * has not been created, the pager is not invoked regardless of the value
1017 * of MUST_ASK_PAGER().
1018 *
1019 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
1020 * is marked as paged out in the compressor pager.
1021 * PAGED_OUT() is used to determine if a page has already been pushed
1022 * into a copy object in order to avoid a redundant page out operation.
1023 */
1024 #define MUST_ASK_PAGER(o, f, s) \
1025 ((s = VM_COMPRESSOR_PAGER_STATE_GET((o), (f))) != VM_EXTERNAL_STATE_ABSENT)
1026
1027 #define PAGED_OUT(o, f) \
1028 (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) == VM_EXTERNAL_STATE_EXISTS)
1029
1030 /*
1031 * Recovery actions
1032 */
1033 #define RELEASE_PAGE(m) \
1034 MACRO_BEGIN \
1035 PAGE_WAKEUP_DONE(m); \
1036 if ( !VM_PAGE_PAGEABLE(m)) { \
1037 vm_page_lockspin_queues(); \
1038 if (clear_absent_on_error && m->vmp_absent) {\
1039 vm_page_zero_fill(m); \
1040 counter_inc(&vm_statistics_zero_fill_count);\
1041 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);\
1042 m->vmp_absent = false; \
1043 } \
1044 if ( !VM_PAGE_PAGEABLE(m)) { \
1045 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) \
1046 vm_page_deactivate(m); \
1047 else \
1048 vm_page_activate(m); \
1049 } \
1050 vm_page_unlock_queues(); \
1051 } \
1052 clear_absent_on_error = false; \
1053 MACRO_END
1054
1055 #if TRACEFAULTPAGE
1056 dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */
1057 #endif
1058
1059 interruptible = fault_info->interruptible;
1060 interruptible_state = thread_interrupt_level(interruptible);
1061
1062 /*
1063 * INVARIANTS (through entire routine):
1064 *
1065 * 1) At all times, we must either have the object
1066 * lock or a busy page in some object to prevent
1067 * some other thread from trying to bring in
1068 * the same page.
1069 *
1070 * Note that we cannot hold any locks during the
1071 * pager access or when waiting for memory, so
1072 * we use a busy page then.
1073 *
1074 * 2) To prevent another thread from racing us down the
1075 * shadow chain and entering a new page in the top
1076 * object before we do, we must keep a busy page in
1077 * the top object while following the shadow chain.
1078 *
1079 * 3) We must increment paging_in_progress on any object
1080 * for which we have a busy page before dropping
1081 * the object lock
1082 *
1083 * 4) We leave busy pages on the pageout queues.
1084 * If the pageout daemon comes across a busy page,
1085 * it will remove the page from the pageout queues.
1086 */
1087
1088 object = first_object;
1089 offset = first_offset;
1090 first_m = VM_PAGE_NULL;
1091 access_required = fault_type;
1092
1093 /*
1094 * default type of fault
1095 */
1096 my_fault = DBG_CACHE_HIT_FAULT;
1097 thread_pri_floor_t token;
1098 bool drop_floor = false;
1099
1100 while (TRUE) {
1101 #if TRACEFAULTPAGE
1102 dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1103 #endif
1104
1105 grab_options = 0;
1106 #if CONFIG_SECLUDED_MEMORY
1107 if (object->can_grab_secluded) {
1108 grab_options |= VM_PAGE_GRAB_SECLUDED;
1109 }
1110 #endif /* CONFIG_SECLUDED_MEMORY */
1111
1112 if (!object->alive) {
1113 /*
1114 * object is no longer valid
1115 * clean up and return error
1116 */
1117 vm_fault_cleanup(object, first_m);
1118 thread_interrupt_level(interruptible_state);
1119
1120 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_NOT_ALIVE), 0 /* arg */);
1121 return VM_FAULT_MEMORY_ERROR;
1122 }
1123
1124 if (!object->pager_created && object->phys_contiguous) {
1125 /*
1126 * A physically-contiguous object without a pager:
1127 * must be a "large page" object. We do not deal
1128 * with VM pages for this object.
1129 */
1130 caller_lookup = FALSE;
1131 m = VM_PAGE_NULL;
1132 goto phys_contig_object;
1133 }
1134
1135 if (object->blocked_access) {
1136 /*
1137 * Access to this VM object has been blocked.
1138 * Replace our "paging_in_progress" reference with
1139 * a "activity_in_progress" reference and wait for
1140 * access to be unblocked.
1141 */
1142 caller_lookup = FALSE; /* no longer valid after sleep */
1143 vm_object_activity_begin(object);
1144 vm_object_paging_end(object);
1145 while (object->blocked_access) {
1146 vm_object_sleep(object,
1147 VM_OBJECT_EVENT_UNBLOCKED,
1148 THREAD_UNINT);
1149 }
1150 vm_fault_page_blocked_access++;
1151 vm_object_paging_begin(object);
1152 vm_object_activity_end(object);
1153 }
1154
1155 /*
1156 * See whether the page at 'offset' is resident
1157 */
1158 if (caller_lookup == TRUE) {
1159 /*
1160 * The caller has already looked up the page
1161 * and gave us the result in "result_page".
1162 * We can use this for the first lookup but
1163 * it loses its validity as soon as we unlock
1164 * the object.
1165 */
1166 m = *result_page;
1167 caller_lookup = FALSE; /* no longer valid after that */
1168 } else {
1169 m = vm_page_lookup(object, vm_object_trunc_page(offset));
1170 }
1171 #if TRACEFAULTPAGE
1172 dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1173 #endif
1174 if (m != VM_PAGE_NULL) {
1175 if (m->vmp_busy) {
1176 /*
1177 * The page is being brought in,
1178 * wait for it and then retry.
1179 */
1180 #if TRACEFAULTPAGE
1181 dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1182 #endif
1183 wait_result = PAGE_SLEEP(object, m, interruptible);
1184
1185 if (wait_result != THREAD_AWAKENED) {
1186 vm_fault_cleanup(object, first_m);
1187 thread_interrupt_level(interruptible_state);
1188
1189 if (wait_result == THREAD_RESTART) {
1190 return VM_FAULT_RETRY;
1191 } else {
1192 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
1193 return VM_FAULT_INTERRUPTED;
1194 }
1195 }
1196 continue;
1197 }
1198 if (m->vmp_laundry) {
1199 m->vmp_free_when_done = FALSE;
1200
1201 if (!m->vmp_cleaning) {
1202 vm_pageout_steal_laundry(m, FALSE);
1203 }
1204 }
1205 if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
1206 /*
1207 * Guard page: off limits !
1208 */
1209 if (fault_type == VM_PROT_NONE) {
1210 /*
1211 * The fault is not requesting any
1212 * access to the guard page, so it must
1213 * be just to wire or unwire it.
1214 * Let's pretend it succeeded...
1215 */
1216 m->vmp_busy = TRUE;
1217 *result_page = m;
1218 assert(first_m == VM_PAGE_NULL);
1219 *top_page = first_m;
1220 if (type_of_fault) {
1221 *type_of_fault = DBG_GUARD_FAULT;
1222 }
1223 thread_interrupt_level(interruptible_state);
1224 return VM_FAULT_SUCCESS;
1225 } else {
1226 /*
1227 * The fault requests access to the
1228 * guard page: let's deny that !
1229 */
1230 vm_fault_cleanup(object, first_m);
1231 thread_interrupt_level(interruptible_state);
1232 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_GUARDPAGE_FAULT), 0 /* arg */);
1233 return VM_FAULT_MEMORY_ERROR;
1234 }
1235 }
1236
1237 if (m->vmp_error) {
1238 /*
1239 * The page is in error, give up now.
1240 */
1241 #if TRACEFAULTPAGE
1242 dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code); /* (TEST/DEBUG) */
1243 #endif
1244 if (error_code) {
1245 *error_code = KERN_MEMORY_ERROR;
1246 }
1247 VM_PAGE_FREE(m);
1248
1249 vm_fault_cleanup(object, first_m);
1250 thread_interrupt_level(interruptible_state);
1251
1252 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_ERROR), 0 /* arg */);
1253 return VM_FAULT_MEMORY_ERROR;
1254 }
1255 if (m->vmp_restart) {
1256 /*
1257 * The pager wants us to restart
1258 * at the top of the chain,
1259 * typically because it has moved the
1260 * page to another pager, then do so.
1261 */
1262 #if TRACEFAULTPAGE
1263 dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1264 #endif
1265 VM_PAGE_FREE(m);
1266
1267 vm_fault_cleanup(object, first_m);
1268 thread_interrupt_level(interruptible_state);
1269
1270 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_RESTART), 0 /* arg */);
1271 return VM_FAULT_RETRY;
1272 }
1273 if (m->vmp_absent) {
1274 /*
1275 * The page isn't busy, but is absent,
1276 * therefore it's deemed "unavailable".
1277 *
1278 * Remove the non-existent page (unless it's
1279 * in the top object) and move on down to the
1280 * next object (if there is one).
1281 */
1282 #if TRACEFAULTPAGE
1283 dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow); /* (TEST/DEBUG) */
1284 #endif
1285 next_object = object->shadow;
1286
1287 if (next_object == VM_OBJECT_NULL) {
1288 /*
1289 * Absent page at bottom of shadow
1290 * chain; zero fill the page we left
1291 * busy in the first object, and free
1292 * the absent page.
1293 */
1294 assert(!must_be_resident);
1295
1296 /*
1297 * check for any conditions that prevent
1298 * us from creating a new zero-fill page
1299 * vm_fault_check will do all of the
1300 * fault cleanup in the case of an error condition
1301 * including resetting the thread_interrupt_level
1302 */
1303 error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
1304
1305 if (error != VM_FAULT_SUCCESS) {
1306 return error;
1307 }
1308
1309 if (object != first_object) {
1310 /*
1311 * free the absent page we just found
1312 */
1313 VM_PAGE_FREE(m);
1314
1315 /*
1316 * drop reference and lock on current object
1317 */
1318 vm_object_paging_end(object);
1319 vm_object_unlock(object);
1320
1321 /*
1322 * grab the original page we
1323 * 'soldered' in place and
1324 * retake lock on 'first_object'
1325 */
1326 m = first_m;
1327 first_m = VM_PAGE_NULL;
1328
1329 object = first_object;
1330 offset = first_offset;
1331
1332 vm_object_lock(object);
1333 } else {
1334 /*
1335 * we're going to use the absent page we just found
1336 * so convert it to a 'busy' page
1337 */
1338 m->vmp_absent = FALSE;
1339 m->vmp_busy = TRUE;
1340 }
1341 if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
1342 m->vmp_absent = TRUE;
1343 clear_absent_on_error = true;
1344 }
1345 /*
1346 * zero-fill the page and put it on
1347 * the correct paging queue
1348 */
1349 my_fault = vm_fault_zero_page(m, no_zero_fill);
1350
1351 break;
1352 } else {
1353 if (must_be_resident) {
1354 vm_object_paging_end(object);
1355 } else if (object != first_object) {
1356 vm_object_paging_end(object);
1357 VM_PAGE_FREE(m);
1358 } else {
1359 first_m = m;
1360 m->vmp_absent = FALSE;
1361 m->vmp_busy = TRUE;
1362
1363 vm_page_lockspin_queues();
1364 vm_page_queues_remove(m, FALSE);
1365 vm_page_unlock_queues();
1366 }
1367
1368 offset += object->vo_shadow_offset;
1369 fault_info->lo_offset += object->vo_shadow_offset;
1370 fault_info->hi_offset += object->vo_shadow_offset;
1371 access_required = VM_PROT_READ;
1372
1373 vm_object_lock(next_object);
1374 vm_object_unlock(object);
1375 object = next_object;
1376 vm_object_paging_begin(object);
1377
1378 /*
1379 * reset to default type of fault
1380 */
1381 my_fault = DBG_CACHE_HIT_FAULT;
1382
1383 continue;
1384 }
1385 }
1386 if ((m->vmp_cleaning)
1387 && ((object != first_object) || (object->copy != VM_OBJECT_NULL))
1388 && (fault_type & VM_PROT_WRITE)) {
1389 /*
1390 * This is a copy-on-write fault that will
1391 * cause us to revoke access to this page, but
1392 * this page is in the process of being cleaned
1393 * in a clustered pageout. We must wait until
1394 * the cleaning operation completes before
1395 * revoking access to the original page,
1396 * otherwise we might attempt to remove a
1397 * wired mapping.
1398 */
1399 #if TRACEFAULTPAGE
1400 dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset); /* (TEST/DEBUG) */
1401 #endif
1402 /*
1403 * take an extra ref so that object won't die
1404 */
1405 vm_object_reference_locked(object);
1406
1407 vm_fault_cleanup(object, first_m);
1408
1409 vm_object_lock(object);
1410 assert(object->ref_count > 0);
1411
1412 m = vm_page_lookup(object, vm_object_trunc_page(offset));
1413
1414 if (m != VM_PAGE_NULL && m->vmp_cleaning) {
1415 PAGE_ASSERT_WAIT(m, interruptible);
1416
1417 vm_object_unlock(object);
1418 wait_result = thread_block(THREAD_CONTINUE_NULL);
1419 vm_object_deallocate(object);
1420
1421 goto backoff;
1422 } else {
1423 vm_object_unlock(object);
1424
1425 vm_object_deallocate(object);
1426 thread_interrupt_level(interruptible_state);
1427
1428 return VM_FAULT_RETRY;
1429 }
1430 }
1431 if (type_of_fault == NULL && (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) &&
1432 !(fault_info != NULL && fault_info->stealth)) {
1433 /*
1434 * If we were passed a non-NULL pointer for
1435 * "type_of_fault", than we came from
1436 * vm_fault... we'll let it deal with
1437 * this condition, since it
1438 * needs to see m->vmp_speculative to correctly
1439 * account the pageins, otherwise...
1440 * take it off the speculative queue, we'll
1441 * let the caller of vm_fault_page deal
1442 * with getting it onto the correct queue
1443 *
1444 * If the caller specified in fault_info that
1445 * it wants a "stealth" fault, we also leave
1446 * the page in the speculative queue.
1447 */
1448 vm_page_lockspin_queues();
1449 if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
1450 vm_page_queues_remove(m, FALSE);
1451 }
1452 vm_page_unlock_queues();
1453 }
1454 assert(object == VM_PAGE_OBJECT(m));
1455
1456 if (object->code_signed) {
1457 /*
1458 * CODE SIGNING:
1459 * We just paged in a page from a signed
1460 * memory object but we don't need to
1461 * validate it now. We'll validate it if
1462 * when it gets mapped into a user address
1463 * space for the first time or when the page
1464 * gets copied to another object as a result
1465 * of a copy-on-write.
1466 */
1467 }
1468
1469 /*
1470 * We mark the page busy and leave it on
1471 * the pageout queues. If the pageout
1472 * deamon comes across it, then it will
1473 * remove the page from the queue, but not the object
1474 */
1475 #if TRACEFAULTPAGE
1476 dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1477 #endif
1478 assert(!m->vmp_busy);
1479 assert(!m->vmp_absent);
1480
1481 m->vmp_busy = TRUE;
1482 break;
1483 }
1484
1485 #if __arm__ && !__arm64__
1486 if (__improbable(object->internal &&
1487 offset >= object->vo_size &&
1488 offset < ((object->vo_size + SIXTEENK_PAGE_MASK) & ~SIXTEENK_PAGE_MASK) &&
1489 PAGE_SIZE == FOURK_PAGE_SIZE)) {
1490 /*
1491 * On devices with a 4k kernel page size
1492 * and a 16k user page size (i.e. 32-bit watches),
1493 * IOKit could have created a VM object with a
1494 * 4k-aligned size.
1495 * IOKit could have then mapped that VM object
1496 * in a user address space, and VM would have extended
1497 * the mapping to the next 16k boundary.
1498 * So we could now be, somewhat illegally, trying to
1499 * access one of the up to 3 non-existent 4k pages
1500 * beyond the end of the VM object.
1501 * We would not be allowed to insert a page beyond the
1502 * the end of the object, so let's fail the fault.
1503 */
1504 DTRACE_VM2(vm_fault_page_beyond_end_of_internal,
1505 vm_object_offset_t, offset,
1506 vm_object_size_t, object->vo_size);
1507 vm_fault_cleanup(object, first_m);
1508 thread_interrupt_level(interruptible_state);
1509 return VM_FAULT_MEMORY_ERROR;
1510 }
1511 #endif /* __arm__ && !__arm64__ */
1512
1513 /*
1514 * we get here when there is no page present in the object at
1515 * the offset we're interested in... we'll allocate a page
1516 * at this point if the pager associated with
1517 * this object can provide the data or we're the top object...
1518 * object is locked; m == NULL
1519 */
1520
1521 if (must_be_resident) {
1522 if (fault_type == VM_PROT_NONE &&
1523 object == kernel_object) {
1524 /*
1525 * We've been called from vm_fault_unwire()
1526 * while removing a map entry that was allocated
1527 * with KMA_KOBJECT and KMA_VAONLY. This page
1528 * is not present and there's nothing more to
1529 * do here (nothing to unwire).
1530 */
1531 vm_fault_cleanup(object, first_m);
1532 thread_interrupt_level(interruptible_state);
1533
1534 return VM_FAULT_MEMORY_ERROR;
1535 }
1536
1537 goto dont_look_for_page;
1538 }
1539
1540 /* Don't expect to fault pages into the kernel object. */
1541 assert(object != kernel_object);
1542
1543 data_supply = FALSE;
1544
1545 look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE) && !data_supply);
1546
1547 #if TRACEFAULTPAGE
1548 dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object); /* (TEST/DEBUG) */
1549 #endif
1550 if (!look_for_page && object == first_object && !object->phys_contiguous) {
1551 /*
1552 * Allocate a new page for this object/offset pair as a placeholder
1553 */
1554 m = vm_page_grab_options(grab_options);
1555 #if TRACEFAULTPAGE
1556 dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1557 #endif
1558 if (m == VM_PAGE_NULL) {
1559 vm_fault_cleanup(object, first_m);
1560 thread_interrupt_level(interruptible_state);
1561
1562 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_MEMORY_SHORTAGE), 0 /* arg */);
1563 return VM_FAULT_MEMORY_SHORTAGE;
1564 }
1565
1566 if (fault_info && fault_info->batch_pmap_op == TRUE) {
1567 vm_page_insert_internal(m, object,
1568 vm_object_trunc_page(offset),
1569 VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1570 } else {
1571 vm_page_insert(m, object, vm_object_trunc_page(offset));
1572 }
1573 }
1574 if (look_for_page) {
1575 kern_return_t rc;
1576 int my_fault_type;
1577
1578 /*
1579 * If the memory manager is not ready, we
1580 * cannot make requests.
1581 */
1582 if (!object->pager_ready) {
1583 #if TRACEFAULTPAGE
1584 dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1585 #endif
1586 if (m != VM_PAGE_NULL) {
1587 VM_PAGE_FREE(m);
1588 }
1589
1590 /*
1591 * take an extra ref so object won't die
1592 */
1593 vm_object_reference_locked(object);
1594 vm_fault_cleanup(object, first_m);
1595
1596 vm_object_lock(object);
1597 assert(object->ref_count > 0);
1598
1599 if (!object->pager_ready) {
1600 wait_result = vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGER_READY, interruptible);
1601
1602 vm_object_unlock(object);
1603 if (wait_result == THREAD_WAITING) {
1604 wait_result = thread_block(THREAD_CONTINUE_NULL);
1605 }
1606 vm_object_deallocate(object);
1607
1608 goto backoff;
1609 } else {
1610 vm_object_unlock(object);
1611 vm_object_deallocate(object);
1612 thread_interrupt_level(interruptible_state);
1613
1614 return VM_FAULT_RETRY;
1615 }
1616 }
1617 if (!object->internal && !object->phys_contiguous && object->paging_in_progress > vm_object_pagein_throttle) {
1618 /*
1619 * If there are too many outstanding page
1620 * requests pending on this external object, we
1621 * wait for them to be resolved now.
1622 */
1623 #if TRACEFAULTPAGE
1624 dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1625 #endif
1626 if (m != VM_PAGE_NULL) {
1627 VM_PAGE_FREE(m);
1628 }
1629 /*
1630 * take an extra ref so object won't die
1631 */
1632 vm_object_reference_locked(object);
1633
1634 vm_fault_cleanup(object, first_m);
1635
1636 vm_object_lock(object);
1637 assert(object->ref_count > 0);
1638
1639 if (object->paging_in_progress >= vm_object_pagein_throttle) {
1640 vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS, interruptible);
1641
1642 vm_object_unlock(object);
1643 wait_result = thread_block(THREAD_CONTINUE_NULL);
1644 vm_object_deallocate(object);
1645
1646 goto backoff;
1647 } else {
1648 vm_object_unlock(object);
1649 vm_object_deallocate(object);
1650 thread_interrupt_level(interruptible_state);
1651
1652 return VM_FAULT_RETRY;
1653 }
1654 }
1655 if (object->internal) {
1656 int compressed_count_delta;
1657
1658 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
1659
1660 if (m == VM_PAGE_NULL) {
1661 /*
1662 * Allocate a new page for this object/offset pair as a placeholder
1663 */
1664 m = vm_page_grab_options(grab_options);
1665 #if TRACEFAULTPAGE
1666 dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1667 #endif
1668 if (m == VM_PAGE_NULL) {
1669 vm_fault_cleanup(object, first_m);
1670 thread_interrupt_level(interruptible_state);
1671
1672 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_MEMORY_SHORTAGE), 0 /* arg */);
1673 return VM_FAULT_MEMORY_SHORTAGE;
1674 }
1675
1676 m->vmp_absent = TRUE;
1677 if (fault_info && fault_info->batch_pmap_op == TRUE) {
1678 vm_page_insert_internal(m, object, vm_object_trunc_page(offset), VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1679 } else {
1680 vm_page_insert(m, object, vm_object_trunc_page(offset));
1681 }
1682 }
1683 assert(m->vmp_busy);
1684
1685 m->vmp_absent = TRUE;
1686 pager = object->pager;
1687
1688 assert(object->paging_in_progress > 0);
1689 vm_object_unlock(object);
1690
1691 rc = vm_compressor_pager_get(
1692 pager,
1693 offset + object->paging_offset,
1694 VM_PAGE_GET_PHYS_PAGE(m),
1695 &my_fault_type,
1696 0,
1697 &compressed_count_delta);
1698
1699 if (type_of_fault == NULL) {
1700 int throttle_delay;
1701
1702 /*
1703 * we weren't called from vm_fault, so we
1704 * need to apply page creation throttling
1705 * do it before we re-acquire any locks
1706 */
1707 if (my_fault_type == DBG_COMPRESSOR_FAULT) {
1708 if ((throttle_delay = vm_page_throttled(TRUE))) {
1709 VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 1, 0);
1710 delay(throttle_delay);
1711 }
1712 }
1713 }
1714 vm_object_lock(object);
1715 assert(object->paging_in_progress > 0);
1716
1717 vm_compressor_pager_count(
1718 pager,
1719 compressed_count_delta,
1720 FALSE, /* shared_lock */
1721 object);
1722
1723 switch (rc) {
1724 case KERN_SUCCESS:
1725 m->vmp_absent = FALSE;
1726 m->vmp_dirty = TRUE;
1727 if ((object->wimg_bits &
1728 VM_WIMG_MASK) !=
1729 VM_WIMG_USE_DEFAULT) {
1730 /*
1731 * If the page is not cacheable,
1732 * we can't let its contents
1733 * linger in the data cache
1734 * after the decompression.
1735 */
1736 pmap_sync_page_attributes_phys(
1737 VM_PAGE_GET_PHYS_PAGE(m));
1738 } else {
1739 m->vmp_written_by_kernel = TRUE;
1740 }
1741
1742 /*
1743 * If the object is purgeable, its
1744 * owner's purgeable ledgers have been
1745 * updated in vm_page_insert() but the
1746 * page was also accounted for in a
1747 * "compressed purgeable" ledger, so
1748 * update that now.
1749 */
1750 if (((object->purgable !=
1751 VM_PURGABLE_DENY) ||
1752 object->vo_ledger_tag) &&
1753 (object->vo_owner !=
1754 NULL)) {
1755 /*
1756 * One less compressed
1757 * purgeable/tagged page.
1758 */
1759 vm_object_owner_compressed_update(
1760 object,
1761 -1);
1762 }
1763
1764 break;
1765 case KERN_MEMORY_FAILURE:
1766 m->vmp_unusual = TRUE;
1767 m->vmp_error = TRUE;
1768 m->vmp_absent = FALSE;
1769 break;
1770 case KERN_MEMORY_ERROR:
1771 assert(m->vmp_absent);
1772 break;
1773 default:
1774 panic("vm_fault_page(): unexpected "
1775 "error %d from "
1776 "vm_compressor_pager_get()\n",
1777 rc);
1778 }
1779 PAGE_WAKEUP_DONE(m);
1780
1781 rc = KERN_SUCCESS;
1782 goto data_requested;
1783 }
1784 my_fault_type = DBG_PAGEIN_FAULT;
1785
1786 if (m != VM_PAGE_NULL) {
1787 VM_PAGE_FREE(m);
1788 m = VM_PAGE_NULL;
1789 }
1790
1791 #if TRACEFAULTPAGE
1792 dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0); /* (TEST/DEBUG) */
1793 #endif
1794
1795 /*
1796 * It's possible someone called vm_object_destroy while we weren't
1797 * holding the object lock. If that has happened, then bail out
1798 * here.
1799 */
1800
1801 pager = object->pager;
1802
1803 if (pager == MEMORY_OBJECT_NULL) {
1804 vm_fault_cleanup(object, first_m);
1805 thread_interrupt_level(interruptible_state);
1806 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_NO_PAGER), 0 /* arg */);
1807 return VM_FAULT_MEMORY_ERROR;
1808 }
1809
1810 /*
1811 * We have an absent page in place for the faulting offset,
1812 * so we can release the object lock.
1813 */
1814
1815 if (object->object_is_shared_cache) {
1816 token = thread_priority_floor_start();
1817 /*
1818 * A non-native shared cache object might
1819 * be getting set up in parallel with this
1820 * fault and so we can't assume that this
1821 * check will be valid after we drop the
1822 * object lock below.
1823 */
1824 drop_floor = true;
1825 }
1826
1827 vm_object_unlock(object);
1828
1829 /*
1830 * If this object uses a copy_call strategy,
1831 * and we are interested in a copy of this object
1832 * (having gotten here only by following a
1833 * shadow chain), then tell the memory manager
1834 * via a flag added to the desired_access
1835 * parameter, so that it can detect a race
1836 * between our walking down the shadow chain
1837 * and its pushing pages up into a copy of
1838 * the object that it manages.
1839 */
1840 if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object) {
1841 wants_copy_flag = VM_PROT_WANTS_COPY;
1842 } else {
1843 wants_copy_flag = VM_PROT_NONE;
1844 }
1845
1846 if (object->copy == first_object) {
1847 /*
1848 * if we issue the memory_object_data_request in
1849 * this state, we are subject to a deadlock with
1850 * the underlying filesystem if it is trying to
1851 * shrink the file resulting in a push of pages
1852 * into the copy object... that push will stall
1853 * on the placeholder page, and if the pushing thread
1854 * is holding a lock that is required on the pagein
1855 * path (such as a truncate lock), we'll deadlock...
1856 * to avoid this potential deadlock, we throw away
1857 * our placeholder page before calling memory_object_data_request
1858 * and force this thread to retry the vm_fault_page after
1859 * we have issued the I/O. the second time through this path
1860 * we will find the page already in the cache (presumably still
1861 * busy waiting for the I/O to complete) and then complete
1862 * the fault w/o having to go through memory_object_data_request again
1863 */
1864 assert(first_m != VM_PAGE_NULL);
1865 assert(VM_PAGE_OBJECT(first_m) == first_object);
1866
1867 vm_object_lock(first_object);
1868 VM_PAGE_FREE(first_m);
1869 vm_object_paging_end(first_object);
1870 vm_object_unlock(first_object);
1871
1872 first_m = VM_PAGE_NULL;
1873 force_fault_retry = TRUE;
1874
1875 vm_fault_page_forced_retry++;
1876 }
1877
1878 if (data_already_requested == TRUE) {
1879 orig_behavior = fault_info->behavior;
1880 orig_cluster_size = fault_info->cluster_size;
1881
1882 fault_info->behavior = VM_BEHAVIOR_RANDOM;
1883 fault_info->cluster_size = PAGE_SIZE;
1884 }
1885 /*
1886 * Call the memory manager to retrieve the data.
1887 */
1888 rc = memory_object_data_request(
1889 pager,
1890 vm_object_trunc_page(offset) + object->paging_offset,
1891 PAGE_SIZE,
1892 access_required | wants_copy_flag,
1893 (memory_object_fault_info_t)fault_info);
1894
1895 if (data_already_requested == TRUE) {
1896 fault_info->behavior = orig_behavior;
1897 fault_info->cluster_size = orig_cluster_size;
1898 } else {
1899 data_already_requested = TRUE;
1900 }
1901
1902 DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL);
1903 #if TRACEFAULTPAGE
1904 dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */
1905 #endif
1906 vm_object_lock(object);
1907
1908 if (drop_floor && object->object_is_shared_cache) {
1909 thread_priority_floor_end(&token);
1910 drop_floor = false;
1911 }
1912
1913 data_requested:
1914 if (rc != KERN_SUCCESS) {
1915 vm_fault_cleanup(object, first_m);
1916 thread_interrupt_level(interruptible_state);
1917
1918 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NO_DATA), 0 /* arg */);
1919
1920 return (rc == MACH_SEND_INTERRUPTED) ?
1921 VM_FAULT_INTERRUPTED :
1922 VM_FAULT_MEMORY_ERROR;
1923 } else {
1924 clock_sec_t tv_sec;
1925 clock_usec_t tv_usec;
1926
1927 if (my_fault_type == DBG_PAGEIN_FAULT) {
1928 clock_get_system_microtime(&tv_sec, &tv_usec);
1929 current_thread()->t_page_creation_time = tv_sec;
1930 current_thread()->t_page_creation_count = 0;
1931 }
1932 }
1933 if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) {
1934 vm_fault_cleanup(object, first_m);
1935 thread_interrupt_level(interruptible_state);
1936
1937 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
1938 return VM_FAULT_INTERRUPTED;
1939 }
1940 if (force_fault_retry == TRUE) {
1941 vm_fault_cleanup(object, first_m);
1942 thread_interrupt_level(interruptible_state);
1943
1944 return VM_FAULT_RETRY;
1945 }
1946 if (m == VM_PAGE_NULL && object->phys_contiguous) {
1947 /*
1948 * No page here means that the object we
1949 * initially looked up was "physically
1950 * contiguous" (i.e. device memory). However,
1951 * with Virtual VRAM, the object might not
1952 * be backed by that device memory anymore,
1953 * so we're done here only if the object is
1954 * still "phys_contiguous".
1955 * Otherwise, if the object is no longer
1956 * "phys_contiguous", we need to retry the
1957 * page fault against the object's new backing
1958 * store (different memory object).
1959 */
1960 phys_contig_object:
1961 goto done;
1962 }
1963 /*
1964 * potentially a pagein fault
1965 * if we make it through the state checks
1966 * above, than we'll count it as such
1967 */
1968 my_fault = my_fault_type;
1969
1970 /*
1971 * Retry with same object/offset, since new data may
1972 * be in a different page (i.e., m is meaningless at
1973 * this point).
1974 */
1975 continue;
1976 }
1977 dont_look_for_page:
1978 /*
1979 * We get here if the object has no pager, or an existence map
1980 * exists and indicates the page isn't present on the pager
1981 * or we're unwiring a page. If a pager exists, but there
1982 * is no existence map, then the m->vmp_absent case above handles
1983 * the ZF case when the pager can't provide the page
1984 */
1985 #if TRACEFAULTPAGE
1986 dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
1987 #endif
1988 if (object == first_object) {
1989 first_m = m;
1990 } else {
1991 assert(m == VM_PAGE_NULL);
1992 }
1993
1994 next_object = object->shadow;
1995
1996 if (next_object == VM_OBJECT_NULL) {
1997 /*
1998 * we've hit the bottom of the shadown chain,
1999 * fill the page in the top object with zeros.
2000 */
2001 assert(!must_be_resident);
2002
2003 if (object != first_object) {
2004 vm_object_paging_end(object);
2005 vm_object_unlock(object);
2006
2007 object = first_object;
2008 offset = first_offset;
2009 vm_object_lock(object);
2010 }
2011 m = first_m;
2012 assert(VM_PAGE_OBJECT(m) == object);
2013 first_m = VM_PAGE_NULL;
2014
2015 /*
2016 * check for any conditions that prevent
2017 * us from creating a new zero-fill page
2018 * vm_fault_check will do all of the
2019 * fault cleanup in the case of an error condition
2020 * including resetting the thread_interrupt_level
2021 */
2022 error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
2023
2024 if (error != VM_FAULT_SUCCESS) {
2025 return error;
2026 }
2027
2028 if (m == VM_PAGE_NULL) {
2029 m = vm_page_grab_options(grab_options);
2030
2031 if (m == VM_PAGE_NULL) {
2032 vm_fault_cleanup(object, VM_PAGE_NULL);
2033 thread_interrupt_level(interruptible_state);
2034
2035 return VM_FAULT_MEMORY_SHORTAGE;
2036 }
2037 vm_page_insert(m, object, vm_object_trunc_page(offset));
2038 }
2039 if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
2040 m->vmp_absent = TRUE;
2041 clear_absent_on_error = true;
2042 }
2043
2044 my_fault = vm_fault_zero_page(m, no_zero_fill);
2045
2046 break;
2047 } else {
2048 /*
2049 * Move on to the next object. Lock the next
2050 * object before unlocking the current one.
2051 */
2052 if ((object != first_object) || must_be_resident) {
2053 vm_object_paging_end(object);
2054 }
2055
2056 offset += object->vo_shadow_offset;
2057 fault_info->lo_offset += object->vo_shadow_offset;
2058 fault_info->hi_offset += object->vo_shadow_offset;
2059 access_required = VM_PROT_READ;
2060
2061 vm_object_lock(next_object);
2062 vm_object_unlock(object);
2063
2064 object = next_object;
2065 vm_object_paging_begin(object);
2066 }
2067 }
2068
2069 /*
2070 * PAGE HAS BEEN FOUND.
2071 *
2072 * This page (m) is:
2073 * busy, so that we can play with it;
2074 * not absent, so that nobody else will fill it;
2075 * possibly eligible for pageout;
2076 *
2077 * The top-level page (first_m) is:
2078 * VM_PAGE_NULL if the page was found in the
2079 * top-level object;
2080 * busy, not absent, and ineligible for pageout.
2081 *
2082 * The current object (object) is locked. A paging
2083 * reference is held for the current and top-level
2084 * objects.
2085 */
2086
2087 #if TRACEFAULTPAGE
2088 dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
2089 #endif
2090 #if EXTRA_ASSERTIONS
2091 assert(m->vmp_busy && !m->vmp_absent);
2092 assert((first_m == VM_PAGE_NULL) ||
2093 (first_m->vmp_busy && !first_m->vmp_absent &&
2094 !first_m->vmp_active && !first_m->vmp_inactive && !first_m->vmp_secluded));
2095 #endif /* EXTRA_ASSERTIONS */
2096
2097 /*
2098 * If the page is being written, but isn't
2099 * already owned by the top-level object,
2100 * we have to copy it into a new page owned
2101 * by the top-level object.
2102 */
2103 if (object != first_object) {
2104 #if TRACEFAULTPAGE
2105 dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */
2106 #endif
2107 if (fault_type & VM_PROT_WRITE) {
2108 vm_page_t copy_m;
2109
2110 /*
2111 * We only really need to copy if we
2112 * want to write it.
2113 */
2114 assert(!must_be_resident);
2115
2116 /*
2117 * If we try to collapse first_object at this
2118 * point, we may deadlock when we try to get
2119 * the lock on an intermediate object (since we
2120 * have the bottom object locked). We can't
2121 * unlock the bottom object, because the page
2122 * we found may move (by collapse) if we do.
2123 *
2124 * Instead, we first copy the page. Then, when
2125 * we have no more use for the bottom object,
2126 * we unlock it and try to collapse.
2127 *
2128 * Note that we copy the page even if we didn't
2129 * need to... that's the breaks.
2130 */
2131
2132 /*
2133 * Allocate a page for the copy
2134 */
2135 copy_m = vm_page_grab_options(grab_options);
2136
2137 if (copy_m == VM_PAGE_NULL) {
2138 RELEASE_PAGE(m);
2139
2140 vm_fault_cleanup(object, first_m);
2141 thread_interrupt_level(interruptible_state);
2142
2143 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_MEMORY_SHORTAGE), 0 /* arg */);
2144 return VM_FAULT_MEMORY_SHORTAGE;
2145 }
2146
2147 vm_page_copy(m, copy_m);
2148
2149 /*
2150 * If another map is truly sharing this
2151 * page with us, we have to flush all
2152 * uses of the original page, since we
2153 * can't distinguish those which want the
2154 * original from those which need the
2155 * new copy.
2156 *
2157 * XXXO If we know that only one map has
2158 * access to this page, then we could
2159 * avoid the pmap_disconnect() call.
2160 */
2161 if (m->vmp_pmapped) {
2162 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2163 }
2164
2165 if (m->vmp_clustered) {
2166 VM_PAGE_COUNT_AS_PAGEIN(m);
2167 VM_PAGE_CONSUME_CLUSTERED(m);
2168 }
2169 assert(!m->vmp_cleaning);
2170
2171 /*
2172 * We no longer need the old page or object.
2173 */
2174 RELEASE_PAGE(m);
2175
2176 /*
2177 * This check helps with marking the object as having a sequential pattern
2178 * Normally we'll miss doing this below because this fault is about COW to
2179 * the first_object i.e. bring page in from disk, push to object above but
2180 * don't update the file object's sequential pattern.
2181 */
2182 if (object->internal == FALSE) {
2183 vm_fault_is_sequential(object, offset, fault_info->behavior);
2184 }
2185
2186 vm_object_paging_end(object);
2187 vm_object_unlock(object);
2188
2189 my_fault = DBG_COW_FAULT;
2190 counter_inc(&vm_statistics_cow_faults);
2191 DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
2192 counter_inc(¤t_task()->cow_faults);
2193
2194 object = first_object;
2195 offset = first_offset;
2196
2197 vm_object_lock(object);
2198 /*
2199 * get rid of the place holder
2200 * page that we soldered in earlier
2201 */
2202 VM_PAGE_FREE(first_m);
2203 first_m = VM_PAGE_NULL;
2204
2205 /*
2206 * and replace it with the
2207 * page we just copied into
2208 */
2209 assert(copy_m->vmp_busy);
2210 vm_page_insert(copy_m, object, vm_object_trunc_page(offset));
2211 SET_PAGE_DIRTY(copy_m, TRUE);
2212
2213 m = copy_m;
2214 /*
2215 * Now that we've gotten the copy out of the
2216 * way, let's try to collapse the top object.
2217 * But we have to play ugly games with
2218 * paging_in_progress to do that...
2219 */
2220 vm_object_paging_end(object);
2221 vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
2222 vm_object_paging_begin(object);
2223 } else {
2224 *protection &= (~VM_PROT_WRITE);
2225 }
2226 }
2227 /*
2228 * Now check whether the page needs to be pushed into the
2229 * copy object. The use of asymmetric copy on write for
2230 * shared temporary objects means that we may do two copies to
2231 * satisfy the fault; one above to get the page from a
2232 * shadowed object, and one here to push it into the copy.
2233 */
2234 try_failed_count = 0;
2235
2236 while ((copy_object = first_object->copy) != VM_OBJECT_NULL) {
2237 vm_object_offset_t copy_offset;
2238 vm_page_t copy_m;
2239
2240 #if TRACEFAULTPAGE
2241 dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type); /* (TEST/DEBUG) */
2242 #endif
2243 /*
2244 * If the page is being written, but hasn't been
2245 * copied to the copy-object, we have to copy it there.
2246 */
2247 if ((fault_type & VM_PROT_WRITE) == 0) {
2248 *protection &= ~VM_PROT_WRITE;
2249 break;
2250 }
2251
2252 /*
2253 * If the page was guaranteed to be resident,
2254 * we must have already performed the copy.
2255 */
2256 if (must_be_resident) {
2257 break;
2258 }
2259
2260 /*
2261 * Try to get the lock on the copy_object.
2262 */
2263 if (!vm_object_lock_try(copy_object)) {
2264 vm_object_unlock(object);
2265 try_failed_count++;
2266
2267 mutex_pause(try_failed_count); /* wait a bit */
2268 vm_object_lock(object);
2269
2270 continue;
2271 }
2272 try_failed_count = 0;
2273
2274 /*
2275 * Make another reference to the copy-object,
2276 * to keep it from disappearing during the
2277 * copy.
2278 */
2279 vm_object_reference_locked(copy_object);
2280
2281 /*
2282 * Does the page exist in the copy?
2283 */
2284 copy_offset = first_offset - copy_object->vo_shadow_offset;
2285 copy_offset = vm_object_trunc_page(copy_offset);
2286
2287 if (copy_object->vo_size <= copy_offset) {
2288 /*
2289 * Copy object doesn't cover this page -- do nothing.
2290 */
2291 ;
2292 } else if ((copy_m = vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) {
2293 /*
2294 * Page currently exists in the copy object
2295 */
2296 if (copy_m->vmp_busy) {
2297 /*
2298 * If the page is being brought
2299 * in, wait for it and then retry.
2300 */
2301 RELEASE_PAGE(m);
2302
2303 /*
2304 * take an extra ref so object won't die
2305 */
2306 vm_object_reference_locked(copy_object);
2307 vm_object_unlock(copy_object);
2308 vm_fault_cleanup(object, first_m);
2309
2310 vm_object_lock(copy_object);
2311 assert(copy_object->ref_count > 0);
2312 vm_object_lock_assert_exclusive(copy_object);
2313 copy_object->ref_count--;
2314 assert(copy_object->ref_count > 0);
2315 copy_m = vm_page_lookup(copy_object, copy_offset);
2316
2317 if (copy_m != VM_PAGE_NULL && copy_m->vmp_busy) {
2318 PAGE_ASSERT_WAIT(copy_m, interruptible);
2319
2320 vm_object_unlock(copy_object);
2321 wait_result = thread_block(THREAD_CONTINUE_NULL);
2322 vm_object_deallocate(copy_object);
2323
2324 goto backoff;
2325 } else {
2326 vm_object_unlock(copy_object);
2327 vm_object_deallocate(copy_object);
2328 thread_interrupt_level(interruptible_state);
2329
2330 return VM_FAULT_RETRY;
2331 }
2332 }
2333 } else if (!PAGED_OUT(copy_object, copy_offset)) {
2334 /*
2335 * If PAGED_OUT is TRUE, then the page used to exist
2336 * in the copy-object, and has already been paged out.
2337 * We don't need to repeat this. If PAGED_OUT is
2338 * FALSE, then either we don't know (!pager_created,
2339 * for example) or it hasn't been paged out.
2340 * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT)
2341 * We must copy the page to the copy object.
2342 *
2343 * Allocate a page for the copy
2344 */
2345 copy_m = vm_page_alloc(copy_object, copy_offset);
2346
2347 if (copy_m == VM_PAGE_NULL) {
2348 RELEASE_PAGE(m);
2349
2350 vm_object_lock_assert_exclusive(copy_object);
2351 copy_object->ref_count--;
2352 assert(copy_object->ref_count > 0);
2353
2354 vm_object_unlock(copy_object);
2355 vm_fault_cleanup(object, first_m);
2356 thread_interrupt_level(interruptible_state);
2357
2358 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_MEMORY_SHORTAGE), 0 /* arg */);
2359 return VM_FAULT_MEMORY_SHORTAGE;
2360 }
2361 /*
2362 * Must copy page into copy-object.
2363 */
2364 vm_page_copy(m, copy_m);
2365
2366 /*
2367 * If the old page was in use by any users
2368 * of the copy-object, it must be removed
2369 * from all pmaps. (We can't know which
2370 * pmaps use it.)
2371 */
2372 if (m->vmp_pmapped) {
2373 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2374 }
2375
2376 if (m->vmp_clustered) {
2377 VM_PAGE_COUNT_AS_PAGEIN(m);
2378 VM_PAGE_CONSUME_CLUSTERED(m);
2379 }
2380 /*
2381 * If there's a pager, then immediately
2382 * page out this page, using the "initialize"
2383 * option. Else, we use the copy.
2384 */
2385 if ((!copy_object->pager_ready)
2386 || VM_COMPRESSOR_PAGER_STATE_GET(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT
2387 ) {
2388 vm_page_lockspin_queues();
2389 assert(!m->vmp_cleaning);
2390 vm_page_activate(copy_m);
2391 vm_page_unlock_queues();
2392
2393 SET_PAGE_DIRTY(copy_m, TRUE);
2394 PAGE_WAKEUP_DONE(copy_m);
2395 } else {
2396 assert(copy_m->vmp_busy == TRUE);
2397 assert(!m->vmp_cleaning);
2398
2399 /*
2400 * dirty is protected by the object lock
2401 */
2402 SET_PAGE_DIRTY(copy_m, TRUE);
2403
2404 /*
2405 * The page is already ready for pageout:
2406 * not on pageout queues and busy.
2407 * Unlock everything except the
2408 * copy_object itself.
2409 */
2410 vm_object_unlock(object);
2411
2412 /*
2413 * Write the page to the copy-object,
2414 * flushing it from the kernel.
2415 */
2416 vm_pageout_initialize_page(copy_m);
2417
2418 /*
2419 * Since the pageout may have
2420 * temporarily dropped the
2421 * copy_object's lock, we
2422 * check whether we'll have
2423 * to deallocate the hard way.
2424 */
2425 if ((copy_object->shadow != object) || (copy_object->ref_count == 1)) {
2426 vm_object_unlock(copy_object);
2427 vm_object_deallocate(copy_object);
2428 vm_object_lock(object);
2429
2430 continue;
2431 }
2432 /*
2433 * Pick back up the old object's
2434 * lock. [It is safe to do so,
2435 * since it must be deeper in the
2436 * object tree.]
2437 */
2438 vm_object_lock(object);
2439 }
2440
2441 /*
2442 * Because we're pushing a page upward
2443 * in the object tree, we must restart
2444 * any faults that are waiting here.
2445 * [Note that this is an expansion of
2446 * PAGE_WAKEUP that uses the THREAD_RESTART
2447 * wait result]. Can't turn off the page's
2448 * busy bit because we're not done with it.
2449 */
2450 if (m->vmp_wanted) {
2451 m->vmp_wanted = FALSE;
2452 thread_wakeup_with_result((event_t) m, THREAD_RESTART);
2453 }
2454 }
2455 /*
2456 * The reference count on copy_object must be
2457 * at least 2: one for our extra reference,
2458 * and at least one from the outside world
2459 * (we checked that when we last locked
2460 * copy_object).
2461 */
2462 vm_object_lock_assert_exclusive(copy_object);
2463 copy_object->ref_count--;
2464 assert(copy_object->ref_count > 0);
2465
2466 vm_object_unlock(copy_object);
2467
2468 break;
2469 }
2470
2471 done:
2472 *result_page = m;
2473 *top_page = first_m;
2474
2475 if (m != VM_PAGE_NULL) {
2476 assert(VM_PAGE_OBJECT(m) == object);
2477
2478 retval = VM_FAULT_SUCCESS;
2479
2480 if (my_fault == DBG_PAGEIN_FAULT) {
2481 VM_PAGE_COUNT_AS_PAGEIN(m);
2482
2483 if (object->internal) {
2484 my_fault = DBG_PAGEIND_FAULT;
2485 } else {
2486 my_fault = DBG_PAGEINV_FAULT;
2487 }
2488
2489 /*
2490 * evaluate access pattern and update state
2491 * vm_fault_deactivate_behind depends on the
2492 * state being up to date
2493 */
2494 vm_fault_is_sequential(object, offset, fault_info->behavior);
2495 vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2496 } else if (type_of_fault == NULL && my_fault == DBG_CACHE_HIT_FAULT) {
2497 /*
2498 * we weren't called from vm_fault, so handle the
2499 * accounting here for hits in the cache
2500 */
2501 if (m->vmp_clustered) {
2502 VM_PAGE_COUNT_AS_PAGEIN(m);
2503 VM_PAGE_CONSUME_CLUSTERED(m);
2504 }
2505 vm_fault_is_sequential(object, offset, fault_info->behavior);
2506 vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2507 } else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) {
2508 VM_STAT_DECOMPRESSIONS();
2509 }
2510 if (type_of_fault) {
2511 *type_of_fault = my_fault;
2512 }
2513 } else {
2514 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_SUCCESS_NO_PAGE), 0 /* arg */);
2515 retval = VM_FAULT_SUCCESS_NO_VM_PAGE;
2516 assert(first_m == VM_PAGE_NULL);
2517 assert(object == first_object);
2518 }
2519
2520 thread_interrupt_level(interruptible_state);
2521
2522 #if TRACEFAULTPAGE
2523 dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */
2524 #endif
2525 return retval;
2526
2527 backoff:
2528 thread_interrupt_level(interruptible_state);
2529
2530 if (wait_result == THREAD_INTERRUPTED) {
2531 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
2532 return VM_FAULT_INTERRUPTED;
2533 }
2534 return VM_FAULT_RETRY;
2535
2536 #undef RELEASE_PAGE
2537 }
2538
2539 #if MACH_ASSERT && (PLATFORM_WatchOS || __x86_64__)
2540 #define PANIC_ON_CS_KILLED_DEFAULT true
2541 #else
2542 #define PANIC_ON_CS_KILLED_DEFAULT false
2543 #endif
2544 static TUNABLE(bool, panic_on_cs_killed, "panic_on_cs_killed",
2545 PANIC_ON_CS_KILLED_DEFAULT);
2546
2547 extern int proc_selfpid(void);
2548 extern char *proc_name_address(void *p);
2549 unsigned long cs_enter_tainted_rejected = 0;
2550 unsigned long cs_enter_tainted_accepted = 0;
2551
2552 /*
2553 * CODE SIGNING:
2554 * When soft faulting a page, we have to validate the page if:
2555 * 1. the page is being mapped in user space
2556 * 2. the page hasn't already been found to be "tainted"
2557 * 3. the page belongs to a code-signed object
2558 * 4. the page has not been validated yet or has been mapped for write.
2559 */
2560 static bool
vm_fault_cs_need_validation(pmap_t pmap,vm_page_t page,vm_object_t page_obj,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2561 vm_fault_cs_need_validation(
2562 pmap_t pmap,
2563 vm_page_t page,
2564 vm_object_t page_obj,
2565 vm_map_size_t fault_page_size,
2566 vm_map_offset_t fault_phys_offset)
2567 {
2568 if (pmap == kernel_pmap) {
2569 /* 1 - not user space */
2570 return false;
2571 }
2572 if (!page_obj->code_signed) {
2573 /* 3 - page does not belong to a code-signed object */
2574 return false;
2575 }
2576 if (fault_page_size == PAGE_SIZE) {
2577 /* looking at the whole page */
2578 assertf(fault_phys_offset == 0,
2579 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
2580 (uint64_t)fault_page_size,
2581 (uint64_t)fault_phys_offset);
2582 if (page->vmp_cs_tainted == VMP_CS_ALL_TRUE) {
2583 /* 2 - page is all tainted */
2584 return false;
2585 }
2586 if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
2587 !page->vmp_wpmapped) {
2588 /* 4 - already fully validated and never mapped writable */
2589 return false;
2590 }
2591 } else {
2592 /* looking at a specific sub-page */
2593 if (VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
2594 /* 2 - sub-page was already marked as tainted */
2595 return false;
2596 }
2597 if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) &&
2598 !page->vmp_wpmapped) {
2599 /* 4 - already validated and never mapped writable */
2600 return false;
2601 }
2602 }
2603 /* page needs to be validated */
2604 return true;
2605 }
2606
2607
2608 static bool
vm_fault_cs_page_immutable(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot __unused)2609 vm_fault_cs_page_immutable(
2610 vm_page_t m,
2611 vm_map_size_t fault_page_size,
2612 vm_map_offset_t fault_phys_offset,
2613 vm_prot_t prot __unused)
2614 {
2615 if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)
2616 /*&& ((prot) & VM_PROT_EXECUTE)*/) {
2617 return true;
2618 }
2619 return false;
2620 }
2621
2622 static bool
vm_fault_cs_page_nx(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2623 vm_fault_cs_page_nx(
2624 vm_page_t m,
2625 vm_map_size_t fault_page_size,
2626 vm_map_offset_t fault_phys_offset)
2627 {
2628 return VMP_CS_NX(m, fault_page_size, fault_phys_offset);
2629 }
2630
2631 /*
2632 * Check if the page being entered into the pmap violates code signing.
2633 */
2634 static kern_return_t
vm_fault_cs_check_violation(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool map_is_switched,bool map_is_switch_protected,bool * cs_violation)2635 vm_fault_cs_check_violation(
2636 bool cs_bypass,
2637 vm_object_t object,
2638 vm_page_t m,
2639 pmap_t pmap,
2640 vm_prot_t prot,
2641 vm_prot_t caller_prot,
2642 vm_map_size_t fault_page_size,
2643 vm_map_offset_t fault_phys_offset,
2644 vm_object_fault_info_t fault_info,
2645 bool map_is_switched,
2646 bool map_is_switch_protected,
2647 bool *cs_violation)
2648 {
2649 #if !PMAP_CS
2650 #pragma unused(caller_prot)
2651 #pragma unused(fault_info)
2652 #endif /* !PMAP_CS */
2653 int cs_enforcement_enabled;
2654 if (!cs_bypass &&
2655 vm_fault_cs_need_validation(pmap, m, object,
2656 fault_page_size, fault_phys_offset)) {
2657 vm_object_lock_assert_exclusive(object);
2658
2659 if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)) {
2660 vm_cs_revalidates++;
2661 }
2662
2663 /* VM map is locked, so 1 ref will remain on VM object -
2664 * so no harm if vm_page_validate_cs drops the object lock */
2665
2666 vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
2667 }
2668
2669 /* If the map is switched, and is switch-protected, we must protect
2670 * some pages from being write-faulted: immutable pages because by
2671 * definition they may not be written, and executable pages because that
2672 * would provide a way to inject unsigned code.
2673 * If the page is immutable, we can simply return. However, we can't
2674 * immediately determine whether a page is executable anywhere. But,
2675 * we can disconnect it everywhere and remove the executable protection
2676 * from the current map. We do that below right before we do the
2677 * PMAP_ENTER.
2678 */
2679 if (pmap == kernel_pmap) {
2680 /* kernel fault: cs_enforcement does not apply */
2681 cs_enforcement_enabled = 0;
2682 } else {
2683 cs_enforcement_enabled = pmap_get_vm_map_cs_enforced(pmap);
2684 }
2685
2686 if (cs_enforcement_enabled && map_is_switched &&
2687 map_is_switch_protected &&
2688 vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2689 (prot & VM_PROT_WRITE)) {
2690 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_IMMUTABLE_PAGE_WRITE), 0 /* arg */);
2691 return KERN_CODESIGN_ERROR;
2692 }
2693
2694 if (cs_enforcement_enabled &&
2695 vm_fault_cs_page_nx(m, fault_page_size, fault_phys_offset) &&
2696 (prot & VM_PROT_EXECUTE)) {
2697 if (cs_debug) {
2698 printf("page marked to be NX, not letting it be mapped EXEC\n");
2699 }
2700 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_NX_PAGE_EXEC_MAPPING), 0 /* arg */);
2701 return KERN_CODESIGN_ERROR;
2702 }
2703
2704 /* A page could be tainted, or pose a risk of being tainted later.
2705 * Check whether the receiving process wants it, and make it feel
2706 * the consequences (that hapens in cs_invalid_page()).
2707 * For CS Enforcement, two other conditions will
2708 * cause that page to be tainted as well:
2709 * - pmapping an unsigned page executable - this means unsigned code;
2710 * - writeable mapping of a validated page - the content of that page
2711 * can be changed without the kernel noticing, therefore unsigned
2712 * code can be created
2713 */
2714 if (cs_bypass) {
2715 /* code-signing is bypassed */
2716 *cs_violation = FALSE;
2717 } else if (VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
2718 /* tainted page */
2719 *cs_violation = TRUE;
2720 } else if (!cs_enforcement_enabled) {
2721 /* no further code-signing enforcement */
2722 *cs_violation = FALSE;
2723 } else if (vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2724 ((prot & VM_PROT_WRITE) ||
2725 m->vmp_wpmapped)) {
2726 /*
2727 * The page should be immutable, but is in danger of being
2728 * modified.
2729 * This is the case where we want policy from the code
2730 * directory - is the page immutable or not? For now we have
2731 * to assume that code pages will be immutable, data pages not.
2732 * We'll assume a page is a code page if it has a code directory
2733 * and we fault for execution.
2734 * That is good enough since if we faulted the code page for
2735 * writing in another map before, it is wpmapped; if we fault
2736 * it for writing in this map later it will also be faulted for
2737 * executing at the same time; and if we fault for writing in
2738 * another map later, we will disconnect it from this pmap so
2739 * we'll notice the change.
2740 */
2741 *cs_violation = TRUE;
2742 } else if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
2743 (prot & VM_PROT_EXECUTE)
2744 ) {
2745 *cs_violation = TRUE;
2746 } else {
2747 *cs_violation = FALSE;
2748 }
2749 return KERN_SUCCESS;
2750 }
2751
2752 /*
2753 * Handles a code signing violation by either rejecting the page or forcing a disconnect.
2754 * @param must_disconnect This value will be set to true if the caller must disconnect
2755 * this page.
2756 * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
2757 */
2758 static kern_return_t
vm_fault_cs_handle_violation(vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,bool map_is_switched,bool map_is_switch_protected,bool * must_disconnect)2759 vm_fault_cs_handle_violation(
2760 vm_object_t object,
2761 vm_page_t m,
2762 pmap_t pmap,
2763 vm_prot_t prot,
2764 vm_map_offset_t vaddr,
2765 vm_map_size_t fault_page_size,
2766 vm_map_offset_t fault_phys_offset,
2767 bool map_is_switched,
2768 bool map_is_switch_protected,
2769 bool *must_disconnect)
2770 {
2771 #if !MACH_ASSERT
2772 #pragma unused(pmap)
2773 #pragma unused(map_is_switch_protected)
2774 #endif /* !MACH_ASSERT */
2775 /*
2776 * We will have a tainted page. Have to handle the special case
2777 * of a switched map now. If the map is not switched, standard
2778 * procedure applies - call cs_invalid_page().
2779 * If the map is switched, the real owner is invalid already.
2780 * There is no point in invalidating the switching process since
2781 * it will not be executing from the map. So we don't call
2782 * cs_invalid_page() in that case.
2783 */
2784 boolean_t reject_page, cs_killed;
2785 kern_return_t kr;
2786 if (map_is_switched) {
2787 assert(pmap == vm_map_pmap(current_thread()->map));
2788 assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
2789 reject_page = FALSE;
2790 } else {
2791 if (cs_debug > 5) {
2792 printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n",
2793 object->code_signed ? "yes" : "no",
2794 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2795 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2796 m->vmp_wpmapped ? "yes" : "no",
2797 (int)prot);
2798 }
2799 reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed);
2800 }
2801
2802 if (reject_page) {
2803 /* reject the invalid page: abort the page fault */
2804 int pid;
2805 const char *procname;
2806 task_t task;
2807 vm_object_t file_object, shadow;
2808 vm_object_offset_t file_offset;
2809 char *pathname, *filename;
2810 vm_size_t pathname_len, filename_len;
2811 boolean_t truncated_path;
2812 #define __PATH_MAX 1024
2813 struct timespec mtime, cs_mtime;
2814 int shadow_depth;
2815 os_reason_t codesigning_exit_reason = OS_REASON_NULL;
2816
2817 kr = KERN_CODESIGN_ERROR;
2818 cs_enter_tainted_rejected++;
2819
2820 /* get process name and pid */
2821 procname = "?";
2822 task = current_task();
2823 pid = proc_selfpid();
2824 if (task->bsd_info != NULL) {
2825 procname = proc_name_address(task->bsd_info);
2826 }
2827
2828 /* get file's VM object */
2829 file_object = object;
2830 file_offset = m->vmp_offset;
2831 for (shadow = file_object->shadow,
2832 shadow_depth = 0;
2833 shadow != VM_OBJECT_NULL;
2834 shadow = file_object->shadow,
2835 shadow_depth++) {
2836 vm_object_lock_shared(shadow);
2837 if (file_object != object) {
2838 vm_object_unlock(file_object);
2839 }
2840 file_offset += file_object->vo_shadow_offset;
2841 file_object = shadow;
2842 }
2843
2844 mtime.tv_sec = 0;
2845 mtime.tv_nsec = 0;
2846 cs_mtime.tv_sec = 0;
2847 cs_mtime.tv_nsec = 0;
2848
2849 /* get file's pathname and/or filename */
2850 pathname = NULL;
2851 filename = NULL;
2852 pathname_len = 0;
2853 filename_len = 0;
2854 truncated_path = FALSE;
2855 /* no pager -> no file -> no pathname, use "<nil>" in that case */
2856 if (file_object->pager != NULL) {
2857 pathname = kalloc_data(__PATH_MAX * 2, Z_WAITOK);
2858 if (pathname) {
2859 pathname[0] = '\0';
2860 pathname_len = __PATH_MAX;
2861 filename = pathname + pathname_len;
2862 filename_len = __PATH_MAX;
2863
2864 if (vnode_pager_get_object_name(file_object->pager,
2865 pathname,
2866 pathname_len,
2867 filename,
2868 filename_len,
2869 &truncated_path) == KERN_SUCCESS) {
2870 /* safety first... */
2871 pathname[__PATH_MAX - 1] = '\0';
2872 filename[__PATH_MAX - 1] = '\0';
2873
2874 vnode_pager_get_object_mtime(file_object->pager,
2875 &mtime,
2876 &cs_mtime);
2877 } else {
2878 kfree_data(pathname, __PATH_MAX * 2);
2879 pathname = NULL;
2880 filename = NULL;
2881 pathname_len = 0;
2882 filename_len = 0;
2883 truncated_path = FALSE;
2884 }
2885 }
2886 }
2887 printf("CODE SIGNING: process %d[%s]: "
2888 "rejecting invalid page at address 0x%llx "
2889 "from offset 0x%llx in file \"%s%s%s\" "
2890 "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
2891 "(signed:%d validated:%d tainted:%d nx:%d "
2892 "wpmapped:%d dirty:%d depth:%d)\n",
2893 pid, procname, (addr64_t) vaddr,
2894 file_offset,
2895 (pathname ? pathname : "<nil>"),
2896 (truncated_path ? "/.../" : ""),
2897 (truncated_path ? filename : ""),
2898 cs_mtime.tv_sec, cs_mtime.tv_nsec,
2899 ((cs_mtime.tv_sec == mtime.tv_sec &&
2900 cs_mtime.tv_nsec == mtime.tv_nsec)
2901 ? "=="
2902 : "!="),
2903 mtime.tv_sec, mtime.tv_nsec,
2904 object->code_signed,
2905 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
2906 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
2907 VMP_CS_NX(m, fault_page_size, fault_phys_offset),
2908 m->vmp_wpmapped,
2909 m->vmp_dirty,
2910 shadow_depth);
2911
2912 /*
2913 * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page
2914 * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the
2915 * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler
2916 * will deal with the segmentation fault.
2917 */
2918 if (cs_killed) {
2919 KERNEL_DEBUG_CONSTANT(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
2920 pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE, 0, 0);
2921
2922 codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
2923 if (codesigning_exit_reason == NULL) {
2924 printf("vm_fault_enter: failed to allocate codesigning exit reason\n");
2925 } else {
2926 mach_vm_address_t data_addr = 0;
2927 struct codesigning_exit_reason_info *ceri = NULL;
2928 uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri));
2929
2930 if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) {
2931 printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n");
2932 } else {
2933 if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor,
2934 EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) {
2935 ceri = (struct codesigning_exit_reason_info *)data_addr;
2936 static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname));
2937
2938 ceri->ceri_virt_addr = vaddr;
2939 ceri->ceri_file_offset = file_offset;
2940 if (pathname) {
2941 strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname));
2942 } else {
2943 ceri->ceri_pathname[0] = '\0';
2944 }
2945 if (filename) {
2946 strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename));
2947 } else {
2948 ceri->ceri_filename[0] = '\0';
2949 }
2950 ceri->ceri_path_truncated = (truncated_path ? 1 : 0);
2951 ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec;
2952 ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec;
2953 ceri->ceri_page_modtime_secs = mtime.tv_sec;
2954 ceri->ceri_page_modtime_nsecs = mtime.tv_nsec;
2955 ceri->ceri_object_codesigned = (object->code_signed);
2956 ceri->ceri_page_codesig_validated = VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset);
2957 ceri->ceri_page_codesig_tainted = VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset);
2958 ceri->ceri_page_codesig_nx = VMP_CS_NX(m, fault_page_size, fault_phys_offset);
2959 ceri->ceri_page_wpmapped = (m->vmp_wpmapped);
2960 ceri->ceri_page_slid = 0;
2961 ceri->ceri_page_dirty = (m->vmp_dirty);
2962 ceri->ceri_page_shadow_depth = shadow_depth;
2963 } else {
2964 #if DEBUG || DEVELOPMENT
2965 panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason");
2966 #else
2967 printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n");
2968 #endif /* DEBUG || DEVELOPMENT */
2969 /* Free the buffer */
2970 os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0);
2971 }
2972 }
2973 }
2974
2975 set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE);
2976 }
2977 if (panic_on_cs_killed &&
2978 object->object_is_shared_cache) {
2979 char *tainted_contents;
2980 vm_map_offset_t src_vaddr;
2981 src_vaddr = (vm_map_offset_t) phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m) << PAGE_SHIFT);
2982 tainted_contents = kalloc_data(PAGE_SIZE, Z_WAITOK);
2983 bcopy((const char *)src_vaddr, tainted_contents, PAGE_SIZE);
2984 printf("CODE SIGNING: tainted page %p phys 0x%x phystokv 0x%llx copied to %p\n", m, VM_PAGE_GET_PHYS_PAGE(m), (uint64_t)src_vaddr, tainted_contents);
2985 panic("CODE SIGNING: process %d[%s]: "
2986 "rejecting invalid page (phys#0x%x) at address 0x%llx "
2987 "from offset 0x%llx in file \"%s%s%s\" "
2988 "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
2989 "(signed:%d validated:%d tainted:%d nx:%d"
2990 "wpmapped:%d dirty:%d depth:%d)\n",
2991 pid, procname,
2992 VM_PAGE_GET_PHYS_PAGE(m),
2993 (addr64_t) vaddr,
2994 file_offset,
2995 (pathname ? pathname : "<nil>"),
2996 (truncated_path ? "/.../" : ""),
2997 (truncated_path ? filename : ""),
2998 cs_mtime.tv_sec, cs_mtime.tv_nsec,
2999 ((cs_mtime.tv_sec == mtime.tv_sec &&
3000 cs_mtime.tv_nsec == mtime.tv_nsec)
3001 ? "=="
3002 : "!="),
3003 mtime.tv_sec, mtime.tv_nsec,
3004 object->code_signed,
3005 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
3006 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
3007 VMP_CS_NX(m, fault_page_size, fault_phys_offset),
3008 m->vmp_wpmapped,
3009 m->vmp_dirty,
3010 shadow_depth);
3011 }
3012
3013 if (file_object != object) {
3014 vm_object_unlock(file_object);
3015 }
3016 if (pathname_len != 0) {
3017 kfree_data(pathname, __PATH_MAX * 2);
3018 pathname = NULL;
3019 filename = NULL;
3020 }
3021 } else {
3022 /* proceed with the invalid page */
3023 kr = KERN_SUCCESS;
3024 if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
3025 !object->code_signed) {
3026 /*
3027 * This page has not been (fully) validated but
3028 * does not belong to a code-signed object
3029 * so it should not be forcefully considered
3030 * as tainted.
3031 * We're just concerned about it here because
3032 * we've been asked to "execute" it but that
3033 * does not mean that it should cause other
3034 * accesses to fail.
3035 * This happens when a debugger sets a
3036 * breakpoint and we then execute code in
3037 * that page. Marking the page as "tainted"
3038 * would cause any inspection tool ("leaks",
3039 * "vmmap", "CrashReporter", ...) to get killed
3040 * due to code-signing violation on that page,
3041 * even though they're just reading it and not
3042 * executing from it.
3043 */
3044 } else {
3045 /*
3046 * Page might have been tainted before or not;
3047 * now it definitively is. If the page wasn't
3048 * tainted, we must disconnect it from all
3049 * pmaps later, to force existing mappings
3050 * through that code path for re-consideration
3051 * of the validity of that page.
3052 */
3053 if (!VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
3054 *must_disconnect = TRUE;
3055 VMP_CS_SET_TAINTED(m, fault_page_size, fault_phys_offset, TRUE);
3056 }
3057 }
3058 cs_enter_tainted_accepted++;
3059 }
3060 if (kr != KERN_SUCCESS) {
3061 if (cs_debug) {
3062 printf("CODESIGNING: vm_fault_enter(0x%llx): "
3063 "*** INVALID PAGE ***\n",
3064 (long long)vaddr);
3065 }
3066 #if !SECURE_KERNEL
3067 if (cs_enforcement_panic) {
3068 panic("CODESIGNING: panicking on invalid page");
3069 }
3070 #endif
3071 }
3072 return kr;
3073 }
3074
3075 /*
3076 * Check that the code signature is valid for the given page being inserted into
3077 * the pmap.
3078 *
3079 * @param must_disconnect This value will be set to true if the caller must disconnect
3080 * this page.
3081 * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
3082 */
3083 static kern_return_t
vm_fault_validate_cs(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool * must_disconnect)3084 vm_fault_validate_cs(
3085 bool cs_bypass,
3086 vm_object_t object,
3087 vm_page_t m,
3088 pmap_t pmap,
3089 vm_map_offset_t vaddr,
3090 vm_prot_t prot,
3091 vm_prot_t caller_prot,
3092 vm_map_size_t fault_page_size,
3093 vm_map_offset_t fault_phys_offset,
3094 vm_object_fault_info_t fault_info,
3095 bool *must_disconnect)
3096 {
3097 bool map_is_switched, map_is_switch_protected, cs_violation;
3098 kern_return_t kr;
3099 /* Validate code signature if necessary. */
3100 map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) &&
3101 (pmap == vm_map_pmap(current_thread()->map)));
3102 map_is_switch_protected = current_thread()->map->switch_protect;
3103 kr = vm_fault_cs_check_violation(cs_bypass, object, m, pmap,
3104 prot, caller_prot, fault_page_size, fault_phys_offset, fault_info,
3105 map_is_switched, map_is_switch_protected, &cs_violation);
3106 if (kr != KERN_SUCCESS) {
3107 return kr;
3108 }
3109 if (cs_violation) {
3110 kr = vm_fault_cs_handle_violation(object, m, pmap, prot, vaddr,
3111 fault_page_size, fault_phys_offset,
3112 map_is_switched, map_is_switch_protected, must_disconnect);
3113 }
3114 return kr;
3115 }
3116
3117 /*
3118 * Enqueue the page on the appropriate paging queue.
3119 */
3120 static void
vm_fault_enqueue_page(vm_object_t object,vm_page_t m,bool wired,bool change_wiring,vm_tag_t wire_tag,bool no_cache,int * type_of_fault,kern_return_t kr)3121 vm_fault_enqueue_page(
3122 vm_object_t object,
3123 vm_page_t m,
3124 bool wired,
3125 bool change_wiring,
3126 vm_tag_t wire_tag,
3127 bool no_cache,
3128 int *type_of_fault,
3129 kern_return_t kr)
3130 {
3131 assert((m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object);
3132 boolean_t page_queues_locked = FALSE;
3133 boolean_t previously_pmapped = m->vmp_pmapped;
3134 #define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \
3135 MACRO_BEGIN \
3136 if (! page_queues_locked) { \
3137 page_queues_locked = TRUE; \
3138 vm_page_lockspin_queues(); \
3139 } \
3140 MACRO_END
3141 #define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \
3142 MACRO_BEGIN \
3143 if (page_queues_locked) { \
3144 page_queues_locked = FALSE; \
3145 vm_page_unlock_queues(); \
3146 } \
3147 MACRO_END
3148
3149 #if CONFIG_BACKGROUND_QUEUE
3150 vm_page_update_background_state(m);
3151 #endif
3152 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
3153 /*
3154 * Compressor pages are neither wired
3155 * nor pageable and should never change.
3156 */
3157 assert(object == compressor_object);
3158 } else if (change_wiring) {
3159 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3160
3161 if (wired) {
3162 if (kr == KERN_SUCCESS) {
3163 vm_page_wire(m, wire_tag, TRUE);
3164 }
3165 } else {
3166 vm_page_unwire(m, TRUE);
3167 }
3168 /* we keep the page queues lock, if we need it later */
3169 } else {
3170 if (object->internal == TRUE) {
3171 /*
3172 * don't allow anonymous pages on
3173 * the speculative queues
3174 */
3175 no_cache = FALSE;
3176 }
3177 if (kr != KERN_SUCCESS) {
3178 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3179 vm_page_deactivate(m);
3180 /* we keep the page queues lock, if we need it later */
3181 } else if (((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
3182 (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3183 (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
3184 ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) &&
3185 !VM_PAGE_WIRED(m)) {
3186 if (vm_page_local_q &&
3187 (*type_of_fault == DBG_COW_FAULT ||
3188 *type_of_fault == DBG_ZERO_FILL_FAULT)) {
3189 struct vpl *lq;
3190 uint32_t lid;
3191
3192 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
3193
3194 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3195 vm_object_lock_assert_exclusive(object);
3196
3197 /*
3198 * we got a local queue to stuff this
3199 * new page on...
3200 * its safe to manipulate local and
3201 * local_id at this point since we're
3202 * behind an exclusive object lock and
3203 * the page is not on any global queue.
3204 *
3205 * we'll use the current cpu number to
3206 * select the queue note that we don't
3207 * need to disable preemption... we're
3208 * going to be behind the local queue's
3209 * lock to do the real work
3210 */
3211 lid = cpu_number();
3212
3213 lq = zpercpu_get_cpu(vm_page_local_q, lid);
3214
3215 VPL_LOCK(&lq->vpl_lock);
3216
3217 vm_page_check_pageable_safe(m);
3218 vm_page_queue_enter(&lq->vpl_queue, m, vmp_pageq);
3219 m->vmp_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q;
3220 m->vmp_local_id = lid;
3221 lq->vpl_count++;
3222
3223 if (object->internal) {
3224 lq->vpl_internal_count++;
3225 } else {
3226 lq->vpl_external_count++;
3227 }
3228
3229 VPL_UNLOCK(&lq->vpl_lock);
3230
3231 if (lq->vpl_count > vm_page_local_q_soft_limit) {
3232 /*
3233 * we're beyond the soft limit
3234 * for the local queue
3235 * vm_page_reactivate_local will
3236 * 'try' to take the global page
3237 * queue lock... if it can't
3238 * that's ok... we'll let the
3239 * queue continue to grow up
3240 * to the hard limit... at that
3241 * point we'll wait for the
3242 * lock... once we've got the
3243 * lock, we'll transfer all of
3244 * the pages from the local
3245 * queue to the global active
3246 * queue
3247 */
3248 vm_page_reactivate_local(lid, FALSE, FALSE);
3249 }
3250 } else {
3251 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3252
3253 /*
3254 * test again now that we hold the
3255 * page queue lock
3256 */
3257 if (!VM_PAGE_WIRED(m)) {
3258 if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3259 vm_page_queues_remove(m, FALSE);
3260
3261 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3262 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_fault_reactivated, 1);
3263 }
3264
3265 if (!VM_PAGE_ACTIVE_OR_INACTIVE(m) ||
3266 no_cache) {
3267 /*
3268 * If this is a no_cache mapping
3269 * and the page has never been
3270 * mapped before or was
3271 * previously a no_cache page,
3272 * then we want to leave pages
3273 * in the speculative state so
3274 * that they can be readily
3275 * recycled if free memory runs
3276 * low. Otherwise the page is
3277 * activated as normal.
3278 */
3279
3280 if (no_cache &&
3281 (!previously_pmapped ||
3282 m->vmp_no_cache)) {
3283 m->vmp_no_cache = TRUE;
3284
3285 if (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
3286 vm_page_speculate(m, FALSE);
3287 }
3288 } else if (!VM_PAGE_ACTIVE_OR_INACTIVE(m)) {
3289 vm_page_activate(m);
3290 }
3291 }
3292 }
3293 /* we keep the page queues lock, if we need it later */
3294 }
3295 }
3296 }
3297 /* we're done with the page queues lock, if we ever took it */
3298 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3299 }
3300
3301 /*
3302 * Sets the pmmpped, xpmapped, and wpmapped bits on the vm_page_t and updates accounting.
3303 * @return true if the page needs to be sync'ed via pmap_sync-page_data_physo
3304 * before being inserted into the pmap.
3305 */
3306 static bool
vm_fault_enter_set_mapped(vm_object_t object,vm_page_t m,vm_prot_t prot,vm_prot_t fault_type)3307 vm_fault_enter_set_mapped(
3308 vm_object_t object,
3309 vm_page_t m,
3310 vm_prot_t prot,
3311 vm_prot_t fault_type)
3312 {
3313 bool page_needs_sync = false;
3314 /*
3315 * NOTE: we may only hold the vm_object lock SHARED
3316 * at this point, so we need the phys_page lock to
3317 * properly serialize updating the pmapped and
3318 * xpmapped bits
3319 */
3320 if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) {
3321 ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3322
3323 pmap_lock_phys_page(phys_page);
3324 m->vmp_pmapped = TRUE;
3325
3326 if (!m->vmp_xpmapped) {
3327 m->vmp_xpmapped = TRUE;
3328
3329 pmap_unlock_phys_page(phys_page);
3330
3331 if (!object->internal) {
3332 OSAddAtomic(1, &vm_page_xpmapped_external_count);
3333 }
3334
3335 #if defined(__arm__) || defined(__arm64__)
3336 page_needs_sync = true;
3337 #else
3338 if (object->internal &&
3339 object->pager != NULL) {
3340 /*
3341 * This page could have been
3342 * uncompressed by the
3343 * compressor pager and its
3344 * contents might be only in
3345 * the data cache.
3346 * Since it's being mapped for
3347 * "execute" for the fist time,
3348 * make sure the icache is in
3349 * sync.
3350 */
3351 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
3352 page_needs_sync = true;
3353 }
3354 #endif
3355 } else {
3356 pmap_unlock_phys_page(phys_page);
3357 }
3358 } else {
3359 if (m->vmp_pmapped == FALSE) {
3360 ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3361
3362 pmap_lock_phys_page(phys_page);
3363 m->vmp_pmapped = TRUE;
3364 pmap_unlock_phys_page(phys_page);
3365 }
3366 }
3367
3368 if (fault_type & VM_PROT_WRITE) {
3369 if (m->vmp_wpmapped == FALSE) {
3370 vm_object_lock_assert_exclusive(object);
3371 if (!object->internal && object->pager) {
3372 task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
3373 }
3374 m->vmp_wpmapped = TRUE;
3375 }
3376 }
3377 return page_needs_sync;
3378 }
3379
3380 /*
3381 * Try to enter the given page into the pmap.
3382 * Will retry without execute permission iff PMAP_CS is enabled and we encounter
3383 * a codesigning failure on a non-execute fault.
3384 */
3385 static kern_return_t
vm_fault_attempt_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options)3386 vm_fault_attempt_pmap_enter(
3387 pmap_t pmap,
3388 vm_map_offset_t vaddr,
3389 vm_map_size_t fault_page_size,
3390 vm_map_offset_t fault_phys_offset,
3391 vm_page_t m,
3392 vm_prot_t *prot,
3393 vm_prot_t caller_prot,
3394 vm_prot_t fault_type,
3395 bool wired,
3396 int pmap_options)
3397 {
3398 #if !PMAP_CS
3399 #pragma unused(caller_prot)
3400 #endif /* !PMAP_CS */
3401 kern_return_t kr;
3402 if (fault_page_size != PAGE_SIZE) {
3403 DEBUG4K_FAULT("pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x fault_type 0x%x\n", pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, *prot, fault_type);
3404 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
3405 fault_phys_offset < PAGE_SIZE),
3406 "0x%llx\n", (uint64_t)fault_phys_offset);
3407 } else {
3408 assertf(fault_phys_offset == 0,
3409 "0x%llx\n", (uint64_t)fault_phys_offset);
3410 }
3411
3412 PMAP_ENTER_OPTIONS(pmap, vaddr,
3413 fault_phys_offset,
3414 m, *prot, fault_type, 0,
3415 wired,
3416 pmap_options,
3417 kr);
3418 return kr;
3419 }
3420
3421 /*
3422 * Enter the given page into the pmap.
3423 * The map must be locked shared.
3424 * The vm object must NOT be locked.
3425 *
3426 * @param need_retry if not null, avoid making a (potentially) blocking call into
3427 * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3428 */
3429 static kern_return_t
vm_fault_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry)3430 vm_fault_pmap_enter(
3431 pmap_t pmap,
3432 vm_map_offset_t vaddr,
3433 vm_map_size_t fault_page_size,
3434 vm_map_offset_t fault_phys_offset,
3435 vm_page_t m,
3436 vm_prot_t *prot,
3437 vm_prot_t caller_prot,
3438 vm_prot_t fault_type,
3439 bool wired,
3440 int pmap_options,
3441 boolean_t *need_retry)
3442 {
3443 kern_return_t kr;
3444 if (need_retry != NULL) {
3445 /*
3446 * Although we don't hold a lock on this object, we hold a lock
3447 * on the top object in the chain. To prevent a deadlock, we
3448 * can't allow the pmap layer to block.
3449 */
3450 pmap_options |= PMAP_OPTIONS_NOWAIT;
3451 }
3452 kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3453 fault_page_size, fault_phys_offset,
3454 m, prot, caller_prot, fault_type, wired, pmap_options);
3455 if (kr == KERN_RESOURCE_SHORTAGE) {
3456 if (need_retry) {
3457 /*
3458 * There's nothing we can do here since we hold the
3459 * lock on the top object in the chain. The caller
3460 * will need to deal with this by dropping that lock and retrying.
3461 */
3462 *need_retry = TRUE;
3463 vm_pmap_enter_retried++;
3464 }
3465 }
3466 return kr;
3467 }
3468
3469 /*
3470 * Enter the given page into the pmap.
3471 * The vm map must be locked shared.
3472 * The vm object must be locked exclusive, unless this is a soft fault.
3473 * For a soft fault, the object must be locked shared or exclusive.
3474 *
3475 * @param need_retry if not null, avoid making a (potentially) blocking call into
3476 * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3477 */
3478 static kern_return_t
vm_fault_pmap_enter_with_object_lock(vm_object_t object,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry)3479 vm_fault_pmap_enter_with_object_lock(
3480 vm_object_t object,
3481 pmap_t pmap,
3482 vm_map_offset_t vaddr,
3483 vm_map_size_t fault_page_size,
3484 vm_map_offset_t fault_phys_offset,
3485 vm_page_t m,
3486 vm_prot_t *prot,
3487 vm_prot_t caller_prot,
3488 vm_prot_t fault_type,
3489 bool wired,
3490 int pmap_options,
3491 boolean_t *need_retry)
3492 {
3493 kern_return_t kr;
3494 /*
3495 * Prevent a deadlock by not
3496 * holding the object lock if we need to wait for a page in
3497 * pmap_enter() - <rdar://problem/7138958>
3498 */
3499 kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3500 fault_page_size, fault_phys_offset,
3501 m, prot, caller_prot, fault_type, wired, pmap_options | PMAP_OPTIONS_NOWAIT);
3502 #if __x86_64__
3503 if (kr == KERN_INVALID_ARGUMENT &&
3504 pmap == PMAP_NULL &&
3505 wired) {
3506 /*
3507 * Wiring a page in a pmap-less VM map:
3508 * VMware's "vmmon" kernel extension does this
3509 * to grab pages.
3510 * Let it proceed even though the PMAP_ENTER() failed.
3511 */
3512 kr = KERN_SUCCESS;
3513 }
3514 #endif /* __x86_64__ */
3515
3516 if (kr == KERN_RESOURCE_SHORTAGE) {
3517 if (need_retry) {
3518 /*
3519 * this will be non-null in the case where we hold the lock
3520 * on the top-object in this chain... we can't just drop
3521 * the lock on the object we're inserting the page into
3522 * and recall the PMAP_ENTER since we can still cause
3523 * a deadlock if one of the critical paths tries to
3524 * acquire the lock on the top-object and we're blocked
3525 * in PMAP_ENTER waiting for memory... our only recourse
3526 * is to deal with it at a higher level where we can
3527 * drop both locks.
3528 */
3529 *need_retry = TRUE;
3530 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PMAP_ENTER_RESOURCE_SHORTAGE), 0 /* arg */);
3531 vm_pmap_enter_retried++;
3532 goto done;
3533 }
3534 /*
3535 * The nonblocking version of pmap_enter did not succeed.
3536 * and we don't need to drop other locks and retry
3537 * at the level above us, so
3538 * use the blocking version instead. Requires marking
3539 * the page busy and unlocking the object
3540 */
3541 boolean_t was_busy = m->vmp_busy;
3542
3543 vm_object_lock_assert_exclusive(object);
3544
3545 m->vmp_busy = TRUE;
3546 vm_object_unlock(object);
3547
3548 PMAP_ENTER_OPTIONS(pmap, vaddr,
3549 fault_phys_offset,
3550 m, *prot, fault_type,
3551 0, wired,
3552 pmap_options, kr);
3553
3554 assert(VM_PAGE_OBJECT(m) == object);
3555
3556 /* Take the object lock again. */
3557 vm_object_lock(object);
3558
3559 /* If the page was busy, someone else will wake it up.
3560 * Otherwise, we have to do it now. */
3561 assert(m->vmp_busy);
3562 if (!was_busy) {
3563 PAGE_WAKEUP_DONE(m);
3564 }
3565 vm_pmap_enter_blocked++;
3566 }
3567
3568 done:
3569 return kr;
3570 }
3571
3572 /*
3573 * Prepare to enter a page into the pmap by checking CS, protection bits,
3574 * and setting mapped bits on the page_t.
3575 * Does not modify the page's paging queue.
3576 *
3577 * page queue lock must NOT be held
3578 * m->vmp_object must be locked
3579 *
3580 * NOTE: m->vmp_object could be locked "shared" only if we are called
3581 * from vm_fault() as part of a soft fault.
3582 */
3583 static kern_return_t
vm_fault_enter_prepare(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t * prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t change_wiring,vm_prot_t fault_type,vm_object_fault_info_t fault_info,int * type_of_fault,bool * page_needs_data_sync)3584 vm_fault_enter_prepare(
3585 vm_page_t m,
3586 pmap_t pmap,
3587 vm_map_offset_t vaddr,
3588 vm_prot_t *prot,
3589 vm_prot_t caller_prot,
3590 vm_map_size_t fault_page_size,
3591 vm_map_offset_t fault_phys_offset,
3592 boolean_t change_wiring,
3593 vm_prot_t fault_type,
3594 vm_object_fault_info_t fault_info,
3595 int *type_of_fault,
3596 bool *page_needs_data_sync)
3597 {
3598 kern_return_t kr;
3599 bool is_tainted = false;
3600 vm_object_t object;
3601 boolean_t cs_bypass = fault_info->cs_bypass;
3602
3603 object = VM_PAGE_OBJECT(m);
3604
3605 vm_object_lock_assert_held(object);
3606
3607 #if KASAN
3608 if (pmap == kernel_pmap) {
3609 kasan_notify_address(vaddr, PAGE_SIZE);
3610 }
3611 #endif
3612
3613 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3614
3615 if (*type_of_fault == DBG_ZERO_FILL_FAULT) {
3616 vm_object_lock_assert_exclusive(object);
3617 } else if ((fault_type & VM_PROT_WRITE) == 0 &&
3618 !change_wiring &&
3619 (!m->vmp_wpmapped
3620 #if VM_OBJECT_ACCESS_TRACKING
3621 || object->access_tracking
3622 #endif /* VM_OBJECT_ACCESS_TRACKING */
3623 )) {
3624 /*
3625 * This is not a "write" fault, so we
3626 * might not have taken the object lock
3627 * exclusively and we might not be able
3628 * to update the "wpmapped" bit in
3629 * vm_fault_enter().
3630 * Let's just grant read access to
3631 * the page for now and we'll
3632 * soft-fault again if we need write
3633 * access later...
3634 */
3635
3636 /* This had better not be a JIT page. */
3637 if (!pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) {
3638 *prot &= ~VM_PROT_WRITE;
3639 } else {
3640 assert(cs_bypass);
3641 }
3642 }
3643 if (m->vmp_pmapped == FALSE) {
3644 if (m->vmp_clustered) {
3645 if (*type_of_fault == DBG_CACHE_HIT_FAULT) {
3646 /*
3647 * found it in the cache, but this
3648 * is the first fault-in of the page (m->vmp_pmapped == FALSE)
3649 * so it must have come in as part of
3650 * a cluster... account 1 pagein against it
3651 */
3652 if (object->internal) {
3653 *type_of_fault = DBG_PAGEIND_FAULT;
3654 } else {
3655 *type_of_fault = DBG_PAGEINV_FAULT;
3656 }
3657
3658 VM_PAGE_COUNT_AS_PAGEIN(m);
3659 }
3660 VM_PAGE_CONSUME_CLUSTERED(m);
3661 }
3662 }
3663
3664 if (*type_of_fault != DBG_COW_FAULT) {
3665 DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL);
3666
3667 if (pmap == kernel_pmap) {
3668 DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL);
3669 }
3670 }
3671
3672 kr = vm_fault_validate_cs(cs_bypass, object, m, pmap, vaddr,
3673 *prot, caller_prot, fault_page_size, fault_phys_offset,
3674 fault_info, &is_tainted);
3675 if (kr == KERN_SUCCESS) {
3676 /*
3677 * We either have a good page, or a tainted page that has been accepted by the process.
3678 * In both cases the page will be entered into the pmap.
3679 */
3680 *page_needs_data_sync = vm_fault_enter_set_mapped(object, m, *prot, fault_type);
3681 if ((fault_type & VM_PROT_WRITE) && is_tainted) {
3682 /*
3683 * This page is tainted but we're inserting it anyways.
3684 * Since it's writeable, we need to disconnect it from other pmaps
3685 * now so those processes can take note.
3686 */
3687
3688 /*
3689 * We can only get here
3690 * because of the CSE logic
3691 */
3692 assert(pmap_get_vm_map_cs_enforced(pmap));
3693 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
3694 /*
3695 * If we are faulting for a write, we can clear
3696 * the execute bit - that will ensure the page is
3697 * checked again before being executable, which
3698 * protects against a map switch.
3699 * This only happens the first time the page
3700 * gets tainted, so we won't get stuck here
3701 * to make an already writeable page executable.
3702 */
3703 if (!cs_bypass) {
3704 assert(!pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot));
3705 *prot &= ~VM_PROT_EXECUTE;
3706 }
3707 }
3708 assert(VM_PAGE_OBJECT(m) == object);
3709
3710 #if VM_OBJECT_ACCESS_TRACKING
3711 if (object->access_tracking) {
3712 DTRACE_VM2(access_tracking, vm_map_offset_t, vaddr, int, fault_type);
3713 if (fault_type & VM_PROT_WRITE) {
3714 object->access_tracking_writes++;
3715 vm_object_access_tracking_writes++;
3716 } else {
3717 object->access_tracking_reads++;
3718 vm_object_access_tracking_reads++;
3719 }
3720 }
3721 #endif /* VM_OBJECT_ACCESS_TRACKING */
3722 }
3723
3724 return kr;
3725 }
3726
3727 /*
3728 * page queue lock must NOT be held
3729 * m->vmp_object must be locked
3730 *
3731 * NOTE: m->vmp_object could be locked "shared" only if we are called
3732 * from vm_fault() as part of a soft fault. If so, we must be
3733 * careful not to modify the VM object in any way that is not
3734 * legal under a shared lock...
3735 */
3736 kern_return_t
vm_fault_enter(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot,vm_prot_t caller_prot,boolean_t wired,boolean_t change_wiring,vm_tag_t wire_tag,vm_object_fault_info_t fault_info,boolean_t * need_retry,int * type_of_fault)3737 vm_fault_enter(
3738 vm_page_t m,
3739 pmap_t pmap,
3740 vm_map_offset_t vaddr,
3741 vm_map_size_t fault_page_size,
3742 vm_map_offset_t fault_phys_offset,
3743 vm_prot_t prot,
3744 vm_prot_t caller_prot,
3745 boolean_t wired,
3746 boolean_t change_wiring,
3747 vm_tag_t wire_tag,
3748 vm_object_fault_info_t fault_info,
3749 boolean_t *need_retry,
3750 int *type_of_fault)
3751 {
3752 kern_return_t kr;
3753 vm_object_t object;
3754 bool page_needs_data_sync;
3755 vm_prot_t fault_type;
3756 int pmap_options = fault_info->pmap_options;
3757
3758 if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
3759 assert(m->vmp_fictitious);
3760 return KERN_SUCCESS;
3761 }
3762
3763 fault_type = change_wiring ? VM_PROT_NONE : caller_prot;
3764
3765 kr = vm_fault_enter_prepare(m, pmap, vaddr, &prot, caller_prot,
3766 fault_page_size, fault_phys_offset, change_wiring, fault_type,
3767 fault_info, type_of_fault, &page_needs_data_sync);
3768 object = VM_PAGE_OBJECT(m);
3769
3770 vm_fault_enqueue_page(object, m, wired, change_wiring, wire_tag, fault_info->no_cache, type_of_fault, kr);
3771
3772 if (kr == KERN_SUCCESS) {
3773 if (page_needs_data_sync) {
3774 pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
3775 }
3776
3777 kr = vm_fault_pmap_enter_with_object_lock(object, pmap, vaddr,
3778 fault_page_size, fault_phys_offset, m,
3779 &prot, caller_prot, fault_type, wired, pmap_options, need_retry);
3780 }
3781
3782 return kr;
3783 }
3784
3785 void
vm_pre_fault(vm_map_offset_t vaddr,vm_prot_t prot)3786 vm_pre_fault(vm_map_offset_t vaddr, vm_prot_t prot)
3787 {
3788 if (pmap_find_phys(current_map()->pmap, vaddr) == 0) {
3789 vm_fault(current_map(), /* map */
3790 vaddr, /* vaddr */
3791 prot, /* fault_type */
3792 FALSE, /* change_wiring */
3793 VM_KERN_MEMORY_NONE, /* tag - not wiring */
3794 THREAD_UNINT, /* interruptible */
3795 NULL, /* caller_pmap */
3796 0 /* caller_pmap_addr */);
3797 }
3798 }
3799
3800
3801 /*
3802 * Routine: vm_fault
3803 * Purpose:
3804 * Handle page faults, including pseudo-faults
3805 * used to change the wiring status of pages.
3806 * Returns:
3807 * Explicit continuations have been removed.
3808 * Implementation:
3809 * vm_fault and vm_fault_page save mucho state
3810 * in the moral equivalent of a closure. The state
3811 * structure is allocated when first entering vm_fault
3812 * and deallocated when leaving vm_fault.
3813 */
3814
3815 extern uint64_t get_current_unique_pid(void);
3816
3817 unsigned long vm_fault_collapse_total = 0;
3818 unsigned long vm_fault_collapse_skipped = 0;
3819
3820
3821 kern_return_t
vm_fault_external(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)3822 vm_fault_external(
3823 vm_map_t map,
3824 vm_map_offset_t vaddr,
3825 vm_prot_t fault_type,
3826 boolean_t change_wiring,
3827 int interruptible,
3828 pmap_t caller_pmap,
3829 vm_map_offset_t caller_pmap_addr)
3830 {
3831 return vm_fault_internal(map, vaddr, fault_type, change_wiring,
3832 change_wiring ? vm_tag_bt() : VM_KERN_MEMORY_NONE,
3833 interruptible, caller_pmap, caller_pmap_addr,
3834 NULL);
3835 }
3836
3837 kern_return_t
vm_fault(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,vm_tag_t wire_tag,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)3838 vm_fault(
3839 vm_map_t map,
3840 vm_map_offset_t vaddr,
3841 vm_prot_t fault_type,
3842 boolean_t change_wiring,
3843 vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
3844 int interruptible,
3845 pmap_t caller_pmap,
3846 vm_map_offset_t caller_pmap_addr)
3847 {
3848 return vm_fault_internal(map, vaddr, fault_type, change_wiring, wire_tag,
3849 interruptible, caller_pmap, caller_pmap_addr,
3850 NULL);
3851 }
3852
3853 static boolean_t
current_proc_is_privileged(void)3854 current_proc_is_privileged(void)
3855 {
3856 return csproc_get_platform_binary(current_proc());
3857 }
3858
3859 uint64_t vm_copied_on_read = 0;
3860
3861 /*
3862 * Cleanup after a vm_fault_enter.
3863 * At this point, the fault should either have failed (kr != KERN_SUCCESS)
3864 * or the page should be in the pmap and on the correct paging queue.
3865 *
3866 * Precondition:
3867 * map must be locked shared.
3868 * m_object must be locked.
3869 * If top_object != VM_OBJECT_NULL, it must be locked.
3870 * real_map must be locked.
3871 *
3872 * Postcondition:
3873 * map will be unlocked
3874 * m_object will be unlocked
3875 * top_object will be unlocked
3876 * If real_map != map, it will be unlocked
3877 */
3878 static void
vm_fault_complete(vm_map_t map,vm_map_t real_map,vm_object_t object,vm_object_t m_object,vm_page_t m,vm_map_offset_t offset,vm_map_offset_t trace_real_vaddr,vm_object_fault_info_t fault_info,vm_prot_t caller_prot,vm_map_offset_t real_vaddr,int type_of_fault,boolean_t need_retry,kern_return_t kr,ppnum_t * physpage_p,vm_prot_t prot,vm_object_t top_object,boolean_t need_collapse,vm_map_offset_t cur_offset,vm_prot_t fault_type,vm_object_t * written_on_object,memory_object_t * written_on_pager,vm_object_offset_t * written_on_offset)3879 vm_fault_complete(
3880 vm_map_t map,
3881 vm_map_t real_map,
3882 vm_object_t object,
3883 vm_object_t m_object,
3884 vm_page_t m,
3885 vm_map_offset_t offset,
3886 vm_map_offset_t trace_real_vaddr,
3887 vm_object_fault_info_t fault_info,
3888 vm_prot_t caller_prot,
3889 #if CONFIG_DTRACE
3890 vm_map_offset_t real_vaddr,
3891 #else
3892 __unused vm_map_offset_t real_vaddr,
3893 #endif /* CONFIG_DTRACE */
3894 int type_of_fault,
3895 boolean_t need_retry,
3896 kern_return_t kr,
3897 ppnum_t *physpage_p,
3898 vm_prot_t prot,
3899 vm_object_t top_object,
3900 boolean_t need_collapse,
3901 vm_map_offset_t cur_offset,
3902 vm_prot_t fault_type,
3903 vm_object_t *written_on_object,
3904 memory_object_t *written_on_pager,
3905 vm_object_offset_t *written_on_offset)
3906 {
3907 int event_code = 0;
3908 vm_map_lock_assert_shared(map);
3909 vm_object_lock_assert_held(m_object);
3910 if (top_object != VM_OBJECT_NULL) {
3911 vm_object_lock_assert_held(top_object);
3912 }
3913 vm_map_lock_assert_held(real_map);
3914
3915 if (m_object->internal) {
3916 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
3917 } else if (m_object->object_is_shared_cache) {
3918 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
3919 } else {
3920 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
3921 }
3922
3923 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid(), 0);
3924 if (need_retry == FALSE) {
3925 KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_FAST), get_current_unique_pid(), 0, 0, 0, 0);
3926 }
3927 DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag);
3928 if (kr == KERN_SUCCESS &&
3929 physpage_p != NULL) {
3930 /* for vm_map_wire_and_extract() */
3931 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
3932 if (prot & VM_PROT_WRITE) {
3933 vm_object_lock_assert_exclusive(m_object);
3934 m->vmp_dirty = TRUE;
3935 }
3936 }
3937
3938 if (top_object != VM_OBJECT_NULL) {
3939 /*
3940 * It's safe to drop the top object
3941 * now that we've done our
3942 * vm_fault_enter(). Any other fault
3943 * in progress for that virtual
3944 * address will either find our page
3945 * and translation or put in a new page
3946 * and translation.
3947 */
3948 vm_object_unlock(top_object);
3949 top_object = VM_OBJECT_NULL;
3950 }
3951
3952 if (need_collapse == TRUE) {
3953 vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
3954 }
3955
3956 if (need_retry == FALSE &&
3957 (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) {
3958 /*
3959 * evaluate access pattern and update state
3960 * vm_fault_deactivate_behind depends on the
3961 * state being up to date
3962 */
3963 vm_fault_is_sequential(m_object, cur_offset, fault_info->behavior);
3964
3965 vm_fault_deactivate_behind(m_object, cur_offset, fault_info->behavior);
3966 }
3967 /*
3968 * That's it, clean up and return.
3969 */
3970 if (m->vmp_busy) {
3971 vm_object_lock_assert_exclusive(m_object);
3972 PAGE_WAKEUP_DONE(m);
3973 }
3974
3975 if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) {
3976 vm_object_paging_begin(m_object);
3977
3978 assert(*written_on_object == VM_OBJECT_NULL);
3979 *written_on_object = m_object;
3980 *written_on_pager = m_object->pager;
3981 *written_on_offset = m_object->paging_offset + m->vmp_offset;
3982 }
3983 vm_object_unlock(object);
3984
3985 vm_map_unlock_read(map);
3986 if (real_map != map) {
3987 vm_map_unlock(real_map);
3988 }
3989 }
3990
3991 static inline int
vm_fault_type_for_tracing(boolean_t need_copy_on_read,int type_of_fault)3992 vm_fault_type_for_tracing(boolean_t need_copy_on_read, int type_of_fault)
3993 {
3994 if (need_copy_on_read && type_of_fault == DBG_COW_FAULT) {
3995 return DBG_COR_FAULT;
3996 }
3997 return type_of_fault;
3998 }
3999
4000 uint64_t vm_fault_resilient_media_initiate = 0;
4001 uint64_t vm_fault_resilient_media_retry = 0;
4002 uint64_t vm_fault_resilient_media_proceed = 0;
4003 uint64_t vm_fault_resilient_media_release = 0;
4004 uint64_t vm_fault_resilient_media_abort1 = 0;
4005 uint64_t vm_fault_resilient_media_abort2 = 0;
4006
4007 #if MACH_ASSERT
4008 int vm_fault_resilient_media_inject_error1_rate = 0;
4009 int vm_fault_resilient_media_inject_error1 = 0;
4010 int vm_fault_resilient_media_inject_error2_rate = 0;
4011 int vm_fault_resilient_media_inject_error2 = 0;
4012 int vm_fault_resilient_media_inject_error3_rate = 0;
4013 int vm_fault_resilient_media_inject_error3 = 0;
4014 #endif /* MACH_ASSERT */
4015
4016 kern_return_t
vm_fault_internal(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t caller_prot,boolean_t change_wiring,vm_tag_t wire_tag,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr,ppnum_t * physpage_p)4017 vm_fault_internal(
4018 vm_map_t map,
4019 vm_map_offset_t vaddr,
4020 vm_prot_t caller_prot,
4021 boolean_t change_wiring,
4022 vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
4023 int interruptible,
4024 pmap_t caller_pmap,
4025 vm_map_offset_t caller_pmap_addr,
4026 ppnum_t *physpage_p)
4027 {
4028 vm_map_version_t version; /* Map version for verificiation */
4029 boolean_t wired; /* Should mapping be wired down? */
4030 vm_object_t object; /* Top-level object */
4031 vm_object_offset_t offset; /* Top-level offset */
4032 vm_prot_t prot; /* Protection for mapping */
4033 vm_object_t old_copy_object; /* Saved copy object */
4034 vm_page_t result_page; /* Result of vm_fault_page */
4035 vm_page_t top_page; /* Placeholder page */
4036 kern_return_t kr;
4037
4038 vm_page_t m; /* Fast access to result_page */
4039 kern_return_t error_code;
4040 vm_object_t cur_object;
4041 vm_object_t m_object = NULL;
4042 vm_object_offset_t cur_offset;
4043 vm_page_t cur_m;
4044 vm_object_t new_object;
4045 int type_of_fault;
4046 pmap_t pmap;
4047 wait_interrupt_t interruptible_state;
4048 vm_map_t real_map = map;
4049 vm_map_t original_map = map;
4050 bool object_locks_dropped = FALSE;
4051 vm_prot_t fault_type;
4052 vm_prot_t original_fault_type;
4053 struct vm_object_fault_info fault_info = {};
4054 bool need_collapse = FALSE;
4055 boolean_t need_retry = FALSE;
4056 boolean_t *need_retry_ptr = NULL;
4057 uint8_t object_lock_type = 0;
4058 uint8_t cur_object_lock_type;
4059 vm_object_t top_object = VM_OBJECT_NULL;
4060 vm_object_t written_on_object = VM_OBJECT_NULL;
4061 memory_object_t written_on_pager = NULL;
4062 vm_object_offset_t written_on_offset = 0;
4063 int throttle_delay;
4064 int compressed_count_delta;
4065 uint8_t grab_options;
4066 bool need_copy;
4067 bool need_copy_on_read;
4068 vm_map_offset_t trace_vaddr;
4069 vm_map_offset_t trace_real_vaddr;
4070 vm_map_size_t fault_page_size;
4071 vm_map_size_t fault_page_mask;
4072 int fault_page_shift;
4073 vm_map_offset_t fault_phys_offset;
4074 vm_map_offset_t real_vaddr;
4075 bool resilient_media_retry = false;
4076 bool resilient_media_ref_transfer = false;
4077 vm_object_t resilient_media_object = VM_OBJECT_NULL;
4078 vm_object_offset_t resilient_media_offset = (vm_object_offset_t)-1;
4079 bool page_needs_data_sync = false;
4080 /*
4081 * Was the VM object contended when vm_map_lookup_locked locked it?
4082 * If so, the zero fill path will drop the lock
4083 * NB: Ideally we would always drop the lock rather than rely on
4084 * this heuristic, but vm_object_unlock currently takes > 30 cycles.
4085 */
4086 bool object_is_contended = false;
4087
4088 real_vaddr = vaddr;
4089 trace_real_vaddr = vaddr;
4090
4091 if (VM_MAP_PAGE_SIZE(original_map) < PAGE_SIZE) {
4092 fault_phys_offset = (vm_map_offset_t)-1;
4093 fault_page_size = VM_MAP_PAGE_SIZE(original_map);
4094 fault_page_mask = VM_MAP_PAGE_MASK(original_map);
4095 fault_page_shift = VM_MAP_PAGE_SHIFT(original_map);
4096 if (fault_page_size < PAGE_SIZE) {
4097 DEBUG4K_FAULT("map %p vaddr 0x%llx caller_prot 0x%x\n", map, (uint64_t)trace_real_vaddr, caller_prot);
4098 vaddr = vm_map_trunc_page(vaddr, fault_page_mask);
4099 }
4100 } else {
4101 fault_phys_offset = 0;
4102 fault_page_size = PAGE_SIZE;
4103 fault_page_mask = PAGE_MASK;
4104 fault_page_shift = PAGE_SHIFT;
4105 vaddr = vm_map_trunc_page(vaddr, PAGE_MASK);
4106 }
4107
4108 if (map == kernel_map) {
4109 trace_vaddr = VM_KERNEL_ADDRHIDE(vaddr);
4110 trace_real_vaddr = VM_KERNEL_ADDRHIDE(trace_real_vaddr);
4111 } else {
4112 trace_vaddr = vaddr;
4113 }
4114
4115 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4116 (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START,
4117 ((uint64_t)trace_vaddr >> 32),
4118 trace_vaddr,
4119 (map == kernel_map),
4120 0,
4121 0);
4122
4123 if (get_preemption_level() != 0) {
4124 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
4125 (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
4126 ((uint64_t)trace_vaddr >> 32),
4127 trace_vaddr,
4128 KERN_FAILURE,
4129 0,
4130 0);
4131
4132 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NONZERO_PREEMPTION_LEVEL), 0 /* arg */);
4133 return KERN_FAILURE;
4134 }
4135
4136 thread_t cthread = current_thread();
4137 bool rtfault = (cthread->sched_mode == TH_MODE_REALTIME);
4138 uint64_t fstart = 0;
4139
4140 if (rtfault) {
4141 fstart = mach_continuous_time();
4142 }
4143
4144 interruptible_state = thread_interrupt_level(interruptible);
4145
4146 fault_type = (change_wiring ? VM_PROT_NONE : caller_prot);
4147
4148 counter_inc(&vm_statistics_faults);
4149 counter_inc(¤t_task()->faults);
4150 original_fault_type = fault_type;
4151
4152 need_copy = FALSE;
4153 if (fault_type & VM_PROT_WRITE) {
4154 need_copy = TRUE;
4155 }
4156
4157 if (need_copy || change_wiring) {
4158 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4159 } else {
4160 object_lock_type = OBJECT_LOCK_SHARED;
4161 }
4162
4163 cur_object_lock_type = OBJECT_LOCK_SHARED;
4164
4165 if ((map == kernel_map) && (caller_prot & VM_PROT_WRITE)) {
4166 if (compressor_map) {
4167 if ((vaddr >= vm_map_min(compressor_map)) && (vaddr < vm_map_max(compressor_map))) {
4168 panic("Write fault on compressor map, va: %p type: %u bounds: %p->%p", (void *) vaddr, caller_prot, (void *) vm_map_min(compressor_map), (void *) vm_map_max(compressor_map));
4169 }
4170 }
4171 }
4172 RetryFault:
4173 assert(written_on_object == VM_OBJECT_NULL);
4174
4175 /*
4176 * assume we will hit a page in the cache
4177 * otherwise, explicitly override with
4178 * the real fault type once we determine it
4179 */
4180 type_of_fault = DBG_CACHE_HIT_FAULT;
4181
4182 /*
4183 * Find the backing store object and offset into
4184 * it to begin the search.
4185 */
4186 fault_type = original_fault_type;
4187 map = original_map;
4188 vm_map_lock_read(map);
4189
4190 if (resilient_media_retry) {
4191 /*
4192 * If we have to insert a fake zero-filled page to hide
4193 * a media failure to provide the real page, we need to
4194 * resolve any pending copy-on-write on this mapping.
4195 * VM_PROT_COPY tells vm_map_lookup_locked() to deal
4196 * with that even if this is not a "write" fault.
4197 */
4198 need_copy = TRUE;
4199 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4200 vm_fault_resilient_media_retry++;
4201 }
4202
4203 kr = vm_map_lookup_locked(&map, vaddr,
4204 (fault_type | (need_copy ? VM_PROT_COPY : 0)),
4205 object_lock_type, &version,
4206 &object, &offset, &prot, &wired,
4207 &fault_info,
4208 &real_map,
4209 &object_is_contended);
4210
4211 if (kr != KERN_SUCCESS) {
4212 vm_map_unlock_read(map);
4213 /*
4214 * This can be seen in a crash report if indeed the
4215 * thread is crashing due to an invalid access in a non-existent
4216 * range.
4217 * Turning this OFF for now because it is noisy and not always fatal
4218 * eg prefaulting.
4219 *
4220 * if (kr == KERN_INVALID_ADDRESS) {
4221 * kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0);
4222 * }
4223 */
4224 goto done;
4225 }
4226
4227
4228 pmap = real_map->pmap;
4229 fault_info.interruptible = interruptible;
4230 fault_info.stealth = FALSE;
4231 fault_info.io_sync = FALSE;
4232 fault_info.mark_zf_absent = FALSE;
4233 fault_info.batch_pmap_op = FALSE;
4234
4235 if (resilient_media_retry) {
4236 /*
4237 * We're retrying this fault after having detected a media
4238 * failure from a "resilient_media" mapping.
4239 * Check that the mapping is still pointing at the object
4240 * that just failed to provide a page.
4241 */
4242 assert(resilient_media_object != VM_OBJECT_NULL);
4243 assert(resilient_media_offset != (vm_object_offset_t)-1);
4244 if ((object != VM_OBJECT_NULL &&
4245 object == resilient_media_object &&
4246 offset == resilient_media_offset &&
4247 fault_info.resilient_media)
4248 #if MACH_ASSERT
4249 && (vm_fault_resilient_media_inject_error1_rate == 0 ||
4250 (++vm_fault_resilient_media_inject_error1 % vm_fault_resilient_media_inject_error1_rate) != 0)
4251 #endif /* MACH_ASSERT */
4252 ) {
4253 /*
4254 * This mapping still points at the same object
4255 * and is still "resilient_media": proceed in
4256 * "recovery-from-media-failure" mode, where we'll
4257 * insert a zero-filled page in the top object.
4258 */
4259 // printf("RESILIENT_MEDIA %s:%d recovering for object %p offset 0x%llx\n", __FUNCTION__, __LINE__, object, offset);
4260 vm_fault_resilient_media_proceed++;
4261 } else {
4262 /* not recovering: reset state and retry fault */
4263 // printf("RESILIENT_MEDIA %s:%d no recovery resilient %d object %p/%p offset 0x%llx/0x%llx\n", __FUNCTION__, __LINE__, fault_info.resilient_media, object, resilient_media_object, offset, resilient_media_offset);
4264 vm_object_unlock(object);
4265 if (real_map != map) {
4266 vm_map_unlock(real_map);
4267 }
4268 vm_map_unlock_read(map);
4269 /* release our extra reference on failed object */
4270 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
4271 vm_object_lock_assert_notheld(resilient_media_object);
4272 vm_object_deallocate(resilient_media_object);
4273 resilient_media_object = VM_OBJECT_NULL;
4274 resilient_media_offset = (vm_object_offset_t)-1;
4275 resilient_media_retry = false;
4276 vm_fault_resilient_media_abort1++;
4277 goto RetryFault;
4278 }
4279 } else {
4280 assert(resilient_media_object == VM_OBJECT_NULL);
4281 resilient_media_offset = (vm_object_offset_t)-1;
4282 }
4283
4284 /*
4285 * If the page is wired, we must fault for the current protection
4286 * value, to avoid further faults.
4287 */
4288 if (wired) {
4289 fault_type = prot | VM_PROT_WRITE;
4290 }
4291 if (wired || need_copy) {
4292 /*
4293 * since we're treating this fault as a 'write'
4294 * we must hold the top object lock exclusively
4295 */
4296 if (object_lock_type == OBJECT_LOCK_SHARED) {
4297 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4298
4299 if (vm_object_lock_upgrade(object) == FALSE) {
4300 /*
4301 * couldn't upgrade, so explictly
4302 * take the lock exclusively
4303 */
4304 vm_object_lock(object);
4305 }
4306 }
4307 }
4308
4309 #if VM_FAULT_CLASSIFY
4310 /*
4311 * Temporary data gathering code
4312 */
4313 vm_fault_classify(object, offset, fault_type);
4314 #endif
4315 /*
4316 * Fast fault code. The basic idea is to do as much as
4317 * possible while holding the map lock and object locks.
4318 * Busy pages are not used until the object lock has to
4319 * be dropped to do something (copy, zero fill, pmap enter).
4320 * Similarly, paging references aren't acquired until that
4321 * point, and object references aren't used.
4322 *
4323 * If we can figure out what to do
4324 * (zero fill, copy on write, pmap enter) while holding
4325 * the locks, then it gets done. Otherwise, we give up,
4326 * and use the original fault path (which doesn't hold
4327 * the map lock, and relies on busy pages).
4328 * The give up cases include:
4329 * - Have to talk to pager.
4330 * - Page is busy, absent or in error.
4331 * - Pager has locked out desired access.
4332 * - Fault needs to be restarted.
4333 * - Have to push page into copy object.
4334 *
4335 * The code is an infinite loop that moves one level down
4336 * the shadow chain each time. cur_object and cur_offset
4337 * refer to the current object being examined. object and offset
4338 * are the original object from the map. The loop is at the
4339 * top level if and only if object and cur_object are the same.
4340 *
4341 * Invariants: Map lock is held throughout. Lock is held on
4342 * original object and cur_object (if different) when
4343 * continuing or exiting loop.
4344 *
4345 */
4346
4347 #if defined(__arm64__)
4348 /*
4349 * Fail if reading an execute-only page in a
4350 * pmap that enforces execute-only protection.
4351 */
4352 if (fault_type == VM_PROT_READ &&
4353 (prot & VM_PROT_EXECUTE) &&
4354 !(prot & VM_PROT_READ) &&
4355 pmap_enforces_execute_only(pmap)) {
4356 vm_object_unlock(object);
4357 vm_map_unlock_read(map);
4358 if (real_map != map) {
4359 vm_map_unlock(real_map);
4360 }
4361 kr = KERN_PROTECTION_FAILURE;
4362 goto done;
4363 }
4364 #endif
4365
4366 fault_phys_offset = (vm_map_offset_t)offset - vm_map_trunc_page((vm_map_offset_t)offset, PAGE_MASK);
4367
4368 /*
4369 * If this page is to be inserted in a copy delay object
4370 * for writing, and if the object has a copy, then the
4371 * copy delay strategy is implemented in the slow fault page.
4372 */
4373 if (object->copy_strategy == MEMORY_OBJECT_COPY_DELAY &&
4374 object->copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE)) {
4375 goto handle_copy_delay;
4376 }
4377
4378 cur_object = object;
4379 cur_offset = offset;
4380
4381 grab_options = 0;
4382 #if CONFIG_SECLUDED_MEMORY
4383 if (object->can_grab_secluded) {
4384 grab_options |= VM_PAGE_GRAB_SECLUDED;
4385 }
4386 #endif /* CONFIG_SECLUDED_MEMORY */
4387
4388 while (TRUE) {
4389 if (!cur_object->pager_created &&
4390 cur_object->phys_contiguous) { /* superpage */
4391 break;
4392 }
4393
4394 if (cur_object->blocked_access) {
4395 /*
4396 * Access to this VM object has been blocked.
4397 * Let the slow path handle it.
4398 */
4399 break;
4400 }
4401
4402 #if __arm__ && !__arm64__
4403 if (__improbable(cur_object->internal &&
4404 cur_offset >= cur_object->vo_size &&
4405 cur_offset < VM_MAP_ROUND_PAGE(cur_object->vo_size, VM_MAP_PAGE_MASK(map)) &&
4406 VM_MAP_PAGE_SHIFT(map) > PAGE_SHIFT)) {
4407 /*
4408 * On devices with a 4k kernel page size
4409 * and a 16k user page size (i.e. 32-bit watches),
4410 * IOKit could have created a VM object with a
4411 * 4k-aligned size.
4412 * IOKit could have then mapped that VM object
4413 * in a user address space, and VM would have extended
4414 * the mapping to the next 16k boundary.
4415 * So we could now be, somewhat illegally, trying to
4416 * access one of the up to 3 non-existent 4k pages
4417 * beyond the end of the VM object.
4418 * We would not be allowed to insert a page beyond the
4419 * the end of the object, so let's fail the fault.
4420 */
4421 DTRACE_VM3(vm_fault_beyond_end_of_internal,
4422 vm_object_offset_t, offset,
4423 vm_object_size_t, object->vo_size,
4424 vm_map_address_t, vaddr);
4425 vm_object_unlock(object);
4426 vm_map_unlock_read(map);
4427 if (real_map != map) {
4428 vm_map_unlock(real_map);
4429 }
4430 kr = KERN_MEMORY_ERROR;
4431 goto done;
4432 }
4433 #endif /* __arm__ && !__arm64__ */
4434
4435 m = vm_page_lookup(cur_object, vm_object_trunc_page(cur_offset));
4436 m_object = NULL;
4437
4438 if (m != VM_PAGE_NULL) {
4439 m_object = cur_object;
4440
4441 if (m->vmp_busy) {
4442 wait_result_t result;
4443
4444 /*
4445 * in order to do the PAGE_ASSERT_WAIT, we must
4446 * have object that 'm' belongs to locked exclusively
4447 */
4448 if (object != cur_object) {
4449 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4450 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4451
4452 if (vm_object_lock_upgrade(cur_object) == FALSE) {
4453 /*
4454 * couldn't upgrade so go do a full retry
4455 * immediately since we can no longer be
4456 * certain about cur_object (since we
4457 * don't hold a reference on it)...
4458 * first drop the top object lock
4459 */
4460 vm_object_unlock(object);
4461
4462 vm_map_unlock_read(map);
4463 if (real_map != map) {
4464 vm_map_unlock(real_map);
4465 }
4466
4467 goto RetryFault;
4468 }
4469 }
4470 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
4471 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4472
4473 if (vm_object_lock_upgrade(object) == FALSE) {
4474 /*
4475 * couldn't upgrade, so explictly take the lock
4476 * exclusively and go relookup the page since we
4477 * will have dropped the object lock and
4478 * a different thread could have inserted
4479 * a page at this offset
4480 * no need for a full retry since we're
4481 * at the top level of the object chain
4482 */
4483 vm_object_lock(object);
4484
4485 continue;
4486 }
4487 }
4488 if ((m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) && m_object->internal) {
4489 /*
4490 * m->vmp_busy == TRUE and the object is locked exclusively
4491 * if m->pageout_queue == TRUE after we acquire the
4492 * queues lock, we are guaranteed that it is stable on
4493 * the pageout queue and therefore reclaimable
4494 *
4495 * NOTE: this is only true for the internal pageout queue
4496 * in the compressor world
4497 */
4498 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
4499
4500 vm_page_lock_queues();
4501
4502 if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
4503 vm_pageout_throttle_up(m);
4504 vm_page_unlock_queues();
4505
4506 PAGE_WAKEUP_DONE(m);
4507 goto reclaimed_from_pageout;
4508 }
4509 vm_page_unlock_queues();
4510 }
4511 if (object != cur_object) {
4512 vm_object_unlock(object);
4513 }
4514
4515 vm_map_unlock_read(map);
4516 if (real_map != map) {
4517 vm_map_unlock(real_map);
4518 }
4519
4520 result = PAGE_ASSERT_WAIT(m, interruptible);
4521
4522 vm_object_unlock(cur_object);
4523
4524 if (result == THREAD_WAITING) {
4525 result = thread_block(THREAD_CONTINUE_NULL);
4526 }
4527 if (result == THREAD_AWAKENED || result == THREAD_RESTART) {
4528 goto RetryFault;
4529 }
4530
4531 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
4532 kr = KERN_ABORTED;
4533 goto done;
4534 }
4535 reclaimed_from_pageout:
4536 if (m->vmp_laundry) {
4537 if (object != cur_object) {
4538 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4539 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4540
4541 vm_object_unlock(object);
4542 vm_object_unlock(cur_object);
4543
4544 vm_map_unlock_read(map);
4545 if (real_map != map) {
4546 vm_map_unlock(real_map);
4547 }
4548
4549 goto RetryFault;
4550 }
4551 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
4552 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4553
4554 if (vm_object_lock_upgrade(object) == FALSE) {
4555 /*
4556 * couldn't upgrade, so explictly take the lock
4557 * exclusively and go relookup the page since we
4558 * will have dropped the object lock and
4559 * a different thread could have inserted
4560 * a page at this offset
4561 * no need for a full retry since we're
4562 * at the top level of the object chain
4563 */
4564 vm_object_lock(object);
4565
4566 continue;
4567 }
4568 }
4569 vm_pageout_steal_laundry(m, FALSE);
4570 }
4571
4572 if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
4573 /*
4574 * Guard page: let the slow path deal with it
4575 */
4576 break;
4577 }
4578 if (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_private || m->vmp_absent)) {
4579 /*
4580 * Unusual case... let the slow path deal with it
4581 */
4582 break;
4583 }
4584 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m_object)) {
4585 if (object != cur_object) {
4586 vm_object_unlock(object);
4587 }
4588 vm_map_unlock_read(map);
4589 if (real_map != map) {
4590 vm_map_unlock(real_map);
4591 }
4592 vm_object_unlock(cur_object);
4593 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
4594 kr = KERN_MEMORY_ERROR;
4595 goto done;
4596 }
4597 assert(m_object == VM_PAGE_OBJECT(m));
4598
4599 if (vm_fault_cs_need_validation(map->pmap, m, m_object,
4600 PAGE_SIZE, 0) ||
4601 (physpage_p != NULL && (prot & VM_PROT_WRITE))) {
4602 upgrade_lock_and_retry:
4603 /*
4604 * We might need to validate this page
4605 * against its code signature, so we
4606 * want to hold the VM object exclusively.
4607 */
4608 if (object != cur_object) {
4609 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4610 vm_object_unlock(object);
4611 vm_object_unlock(cur_object);
4612
4613 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4614
4615 vm_map_unlock_read(map);
4616 if (real_map != map) {
4617 vm_map_unlock(real_map);
4618 }
4619
4620 goto RetryFault;
4621 }
4622 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
4623 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4624
4625 if (vm_object_lock_upgrade(object) == FALSE) {
4626 /*
4627 * couldn't upgrade, so explictly take the lock
4628 * exclusively and go relookup the page since we
4629 * will have dropped the object lock and
4630 * a different thread could have inserted
4631 * a page at this offset
4632 * no need for a full retry since we're
4633 * at the top level of the object chain
4634 */
4635 vm_object_lock(object);
4636
4637 continue;
4638 }
4639 }
4640 }
4641 /*
4642 * Two cases of map in faults:
4643 * - At top level w/o copy object.
4644 * - Read fault anywhere.
4645 * --> must disallow write.
4646 */
4647
4648 if (object == cur_object && object->copy == VM_OBJECT_NULL) {
4649 goto FastPmapEnter;
4650 }
4651
4652 if (!need_copy &&
4653 !fault_info.no_copy_on_read &&
4654 cur_object != object &&
4655 !cur_object->internal &&
4656 !cur_object->pager_trusted &&
4657 vm_protect_privileged_from_untrusted &&
4658 !cur_object->code_signed &&
4659 current_proc_is_privileged()) {
4660 /*
4661 * We're faulting on a page in "object" and
4662 * went down the shadow chain to "cur_object"
4663 * to find out that "cur_object"'s pager
4664 * is not "trusted", i.e. we can not trust it
4665 * to always return the same contents.
4666 * Since the target is a "privileged" process,
4667 * let's treat this as a copy-on-read fault, as
4668 * if it was a copy-on-write fault.
4669 * Once "object" gets a copy of this page, it
4670 * won't have to rely on "cur_object" to
4671 * provide the contents again.
4672 *
4673 * This is done by setting "need_copy" and
4674 * retrying the fault from the top with the
4675 * appropriate locking.
4676 *
4677 * Special case: if the mapping is executable
4678 * and the untrusted object is code-signed and
4679 * the process is "cs_enforced", we do not
4680 * copy-on-read because that would break
4681 * code-signing enforcement expectations (an
4682 * executable page must belong to a code-signed
4683 * object) and we can rely on code-signing
4684 * to re-validate the page if it gets evicted
4685 * and paged back in.
4686 */
4687 // printf("COPY-ON-READ %s:%d map %p va 0x%llx page %p object %p offset 0x%llx UNTRUSTED: need copy-on-read!\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, m, VM_PAGE_OBJECT(m), m->vmp_offset);
4688 vm_copied_on_read++;
4689 need_copy = TRUE;
4690
4691 vm_object_unlock(object);
4692 vm_object_unlock(cur_object);
4693 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4694 vm_map_unlock_read(map);
4695 if (real_map != map) {
4696 vm_map_unlock(real_map);
4697 }
4698 goto RetryFault;
4699 }
4700
4701 if (!(fault_type & VM_PROT_WRITE) && !need_copy) {
4702 if (!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
4703 prot &= ~VM_PROT_WRITE;
4704 } else {
4705 /*
4706 * For a protection that the pmap cares
4707 * about, we must hand over the full
4708 * set of protections (so that the pmap
4709 * layer can apply any desired policy).
4710 * This means that cs_bypass must be
4711 * set, as this can force us to pass
4712 * RWX.
4713 */
4714 assert(fault_info.cs_bypass);
4715 }
4716
4717 if (object != cur_object) {
4718 /*
4719 * We still need to hold the top object
4720 * lock here to prevent a race between
4721 * a read fault (taking only "shared"
4722 * locks) and a write fault (taking
4723 * an "exclusive" lock on the top
4724 * object.
4725 * Otherwise, as soon as we release the
4726 * top lock, the write fault could
4727 * proceed and actually complete before
4728 * the read fault, and the copied page's
4729 * translation could then be overwritten
4730 * by the read fault's translation for
4731 * the original page.
4732 *
4733 * Let's just record what the top object
4734 * is and we'll release it later.
4735 */
4736 top_object = object;
4737
4738 /*
4739 * switch to the object that has the new page
4740 */
4741 object = cur_object;
4742 object_lock_type = cur_object_lock_type;
4743 }
4744 FastPmapEnter:
4745 assert(m_object == VM_PAGE_OBJECT(m));
4746
4747 /*
4748 * prepare for the pmap_enter...
4749 * object and map are both locked
4750 * m contains valid data
4751 * object == m->vmp_object
4752 * cur_object == NULL or it's been unlocked
4753 * no paging references on either object or cur_object
4754 */
4755 if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
4756 need_retry_ptr = &need_retry;
4757 } else {
4758 need_retry_ptr = NULL;
4759 }
4760
4761 if (fault_page_size < PAGE_SIZE) {
4762 DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx caller pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, caller_pmap, (uint64_t)caller_pmap_addr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
4763 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
4764 fault_phys_offset < PAGE_SIZE),
4765 "0x%llx\n", (uint64_t)fault_phys_offset);
4766 } else {
4767 assertf(fault_phys_offset == 0,
4768 "0x%llx\n", (uint64_t)fault_phys_offset);
4769 }
4770
4771 if (caller_pmap) {
4772 kr = vm_fault_enter(m,
4773 caller_pmap,
4774 caller_pmap_addr,
4775 fault_page_size,
4776 fault_phys_offset,
4777 prot,
4778 caller_prot,
4779 wired,
4780 change_wiring,
4781 wire_tag,
4782 &fault_info,
4783 need_retry_ptr,
4784 &type_of_fault);
4785 } else {
4786 kr = vm_fault_enter(m,
4787 pmap,
4788 vaddr,
4789 fault_page_size,
4790 fault_phys_offset,
4791 prot,
4792 caller_prot,
4793 wired,
4794 change_wiring,
4795 wire_tag,
4796 &fault_info,
4797 need_retry_ptr,
4798 &type_of_fault);
4799 }
4800
4801 vm_fault_complete(
4802 map,
4803 real_map,
4804 object,
4805 m_object,
4806 m,
4807 offset,
4808 trace_real_vaddr,
4809 &fault_info,
4810 caller_prot,
4811 real_vaddr,
4812 vm_fault_type_for_tracing(need_copy_on_read, type_of_fault),
4813 need_retry,
4814 kr,
4815 physpage_p,
4816 prot,
4817 top_object,
4818 need_collapse,
4819 cur_offset,
4820 fault_type,
4821 &written_on_object,
4822 &written_on_pager,
4823 &written_on_offset);
4824 top_object = VM_OBJECT_NULL;
4825 if (need_retry == TRUE) {
4826 /*
4827 * vm_fault_enter couldn't complete the PMAP_ENTER...
4828 * at this point we don't hold any locks so it's safe
4829 * to ask the pmap layer to expand the page table to
4830 * accommodate this mapping... once expanded, we'll
4831 * re-drive the fault which should result in vm_fault_enter
4832 * being able to successfully enter the mapping this time around
4833 */
4834 (void)pmap_enter_options(
4835 pmap, vaddr, 0, 0, 0, 0, 0,
4836 PMAP_OPTIONS_NOENTER, NULL);
4837
4838 need_retry = FALSE;
4839 goto RetryFault;
4840 }
4841 goto done;
4842 }
4843 /*
4844 * COPY ON WRITE FAULT
4845 */
4846 assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE);
4847
4848 /*
4849 * If objects match, then
4850 * object->copy must not be NULL (else control
4851 * would be in previous code block), and we
4852 * have a potential push into the copy object
4853 * with which we can't cope with here.
4854 */
4855 if (cur_object == object) {
4856 /*
4857 * must take the slow path to
4858 * deal with the copy push
4859 */
4860 break;
4861 }
4862
4863 /*
4864 * This is now a shadow based copy on write
4865 * fault -- it requires a copy up the shadow
4866 * chain.
4867 */
4868 assert(m_object == VM_PAGE_OBJECT(m));
4869
4870 if ((cur_object_lock_type == OBJECT_LOCK_SHARED) &&
4871 vm_fault_cs_need_validation(NULL, m, m_object,
4872 PAGE_SIZE, 0)) {
4873 goto upgrade_lock_and_retry;
4874 }
4875
4876 #if MACH_ASSERT
4877 if (resilient_media_retry &&
4878 vm_fault_resilient_media_inject_error2_rate != 0 &&
4879 (++vm_fault_resilient_media_inject_error2 % vm_fault_resilient_media_inject_error2_rate) == 0) {
4880 /* inject an error */
4881 cur_m = m;
4882 m = VM_PAGE_NULL;
4883 m_object = VM_OBJECT_NULL;
4884 break;
4885 }
4886 #endif /* MACH_ASSERT */
4887 /*
4888 * Allocate a page in the original top level
4889 * object. Give up if allocate fails. Also
4890 * need to remember current page, as it's the
4891 * source of the copy.
4892 *
4893 * at this point we hold locks on both
4894 * object and cur_object... no need to take
4895 * paging refs or mark pages BUSY since
4896 * we don't drop either object lock until
4897 * the page has been copied and inserted
4898 */
4899 cur_m = m;
4900 m = vm_page_grab_options(grab_options);
4901 m_object = NULL;
4902
4903 if (m == VM_PAGE_NULL) {
4904 /*
4905 * no free page currently available...
4906 * must take the slow path
4907 */
4908 break;
4909 }
4910 /*
4911 * Now do the copy. Mark the source page busy...
4912 *
4913 * NOTE: This code holds the map lock across
4914 * the page copy.
4915 */
4916 vm_page_copy(cur_m, m);
4917 vm_page_insert(m, object, vm_object_trunc_page(offset));
4918 if (VM_MAP_PAGE_MASK(map) != PAGE_MASK) {
4919 DEBUG4K_FAULT("map %p vaddr 0x%llx page %p [%p 0x%llx] copied to %p [%p 0x%llx]\n", map, (uint64_t)vaddr, cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
4920 }
4921 m_object = object;
4922 SET_PAGE_DIRTY(m, FALSE);
4923
4924 /*
4925 * Now cope with the source page and object
4926 */
4927 if (object->ref_count > 1 && cur_m->vmp_pmapped) {
4928 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
4929 } else if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) {
4930 /*
4931 * We've copied the full 16K page but we're
4932 * about to call vm_fault_enter() only for
4933 * the 4K chunk we're faulting on. The other
4934 * three 4K chunks in that page could still
4935 * be pmapped in this pmap.
4936 * Since the VM object layer thinks that the
4937 * entire page has been dealt with and the
4938 * original page might no longer be needed,
4939 * it might collapse/bypass the original VM
4940 * object and free its pages, which would be
4941 * bad (and would trigger pmap_verify_free()
4942 * assertions) if the other 4K chunks are still
4943 * pmapped.
4944 */
4945 /*
4946 * XXX FBDP TODO4K: to be revisisted
4947 * Technically, we need to pmap_disconnect()
4948 * only the target pmap's mappings for the 4K
4949 * chunks of this 16K VM page. If other pmaps
4950 * have PTEs on these chunks, that means that
4951 * the associated VM map must have a reference
4952 * on the VM object, so no need to worry about
4953 * those.
4954 * pmap_protect() for each 4K chunk would be
4955 * better but we'd have to check which chunks
4956 * are actually mapped before and after this
4957 * one.
4958 * A full-blown pmap_disconnect() is easier
4959 * for now but not efficient.
4960 */
4961 DEBUG4K_FAULT("pmap_disconnect() page %p object %p offset 0x%llx phys 0x%x\n", cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, VM_PAGE_GET_PHYS_PAGE(cur_m));
4962 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
4963 }
4964
4965 if (cur_m->vmp_clustered) {
4966 VM_PAGE_COUNT_AS_PAGEIN(cur_m);
4967 VM_PAGE_CONSUME_CLUSTERED(cur_m);
4968 vm_fault_is_sequential(cur_object, cur_offset, fault_info.behavior);
4969 }
4970 need_collapse = TRUE;
4971
4972 if (!cur_object->internal &&
4973 cur_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
4974 /*
4975 * The object from which we've just
4976 * copied a page is most probably backed
4977 * by a vnode. We don't want to waste too
4978 * much time trying to collapse the VM objects
4979 * and create a bottleneck when several tasks
4980 * map the same file.
4981 */
4982 if (cur_object->copy == object) {
4983 /*
4984 * Shared mapping or no COW yet.
4985 * We can never collapse a copy
4986 * object into its backing object.
4987 */
4988 need_collapse = FALSE;
4989 } else if (cur_object->copy == object->shadow &&
4990 object->shadow->resident_page_count == 0) {
4991 /*
4992 * Shared mapping after a COW occurred.
4993 */
4994 need_collapse = FALSE;
4995 }
4996 }
4997 vm_object_unlock(cur_object);
4998
4999 if (need_collapse == FALSE) {
5000 vm_fault_collapse_skipped++;
5001 }
5002 vm_fault_collapse_total++;
5003
5004 type_of_fault = DBG_COW_FAULT;
5005 counter_inc(&vm_statistics_cow_faults);
5006 DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
5007 counter_inc(¤t_task()->cow_faults);
5008
5009 goto FastPmapEnter;
5010 } else {
5011 /*
5012 * No page at cur_object, cur_offset... m == NULL
5013 */
5014 if (cur_object->pager_created) {
5015 vm_external_state_t compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
5016
5017 if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) {
5018 int my_fault_type;
5019 uint8_t c_flags = C_DONT_BLOCK;
5020 bool insert_cur_object = FALSE;
5021
5022 /*
5023 * May have to talk to a pager...
5024 * if so, take the slow path by
5025 * doing a 'break' from the while (TRUE) loop
5026 *
5027 * external_state will only be set to VM_EXTERNAL_STATE_EXISTS
5028 * if the compressor is active and the page exists there
5029 */
5030 if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS) {
5031 break;
5032 }
5033
5034 if (map == kernel_map || real_map == kernel_map) {
5035 /*
5036 * can't call into the compressor with the kernel_map
5037 * lock held, since the compressor may try to operate
5038 * on the kernel map in order to return an empty c_segment
5039 */
5040 break;
5041 }
5042 if (object != cur_object) {
5043 if (fault_type & VM_PROT_WRITE) {
5044 c_flags |= C_KEEP;
5045 } else {
5046 insert_cur_object = TRUE;
5047 }
5048 }
5049 if (insert_cur_object == TRUE) {
5050 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5051 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5052
5053 if (vm_object_lock_upgrade(cur_object) == FALSE) {
5054 /*
5055 * couldn't upgrade so go do a full retry
5056 * immediately since we can no longer be
5057 * certain about cur_object (since we
5058 * don't hold a reference on it)...
5059 * first drop the top object lock
5060 */
5061 vm_object_unlock(object);
5062
5063 vm_map_unlock_read(map);
5064 if (real_map != map) {
5065 vm_map_unlock(real_map);
5066 }
5067
5068 goto RetryFault;
5069 }
5070 }
5071 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
5072 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5073
5074 if (object != cur_object) {
5075 /*
5076 * we can't go for the upgrade on the top
5077 * lock since the upgrade may block waiting
5078 * for readers to drain... since we hold
5079 * cur_object locked at this point, waiting
5080 * for the readers to drain would represent
5081 * a lock order inversion since the lock order
5082 * for objects is the reference order in the
5083 * shadown chain
5084 */
5085 vm_object_unlock(object);
5086 vm_object_unlock(cur_object);
5087
5088 vm_map_unlock_read(map);
5089 if (real_map != map) {
5090 vm_map_unlock(real_map);
5091 }
5092
5093 goto RetryFault;
5094 }
5095 if (vm_object_lock_upgrade(object) == FALSE) {
5096 /*
5097 * couldn't upgrade, so explictly take the lock
5098 * exclusively and go relookup the page since we
5099 * will have dropped the object lock and
5100 * a different thread could have inserted
5101 * a page at this offset
5102 * no need for a full retry since we're
5103 * at the top level of the object chain
5104 */
5105 vm_object_lock(object);
5106
5107 continue;
5108 }
5109 }
5110 m = vm_page_grab_options(grab_options);
5111 m_object = NULL;
5112
5113 if (m == VM_PAGE_NULL) {
5114 /*
5115 * no free page currently available...
5116 * must take the slow path
5117 */
5118 break;
5119 }
5120
5121 /*
5122 * The object is and remains locked
5123 * so no need to take a
5124 * "paging_in_progress" reference.
5125 */
5126 bool shared_lock;
5127 if ((object == cur_object &&
5128 object_lock_type == OBJECT_LOCK_EXCLUSIVE) ||
5129 (object != cur_object &&
5130 cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) {
5131 shared_lock = FALSE;
5132 } else {
5133 shared_lock = TRUE;
5134 }
5135
5136 kr = vm_compressor_pager_get(
5137 cur_object->pager,
5138 (vm_object_trunc_page(cur_offset)
5139 + cur_object->paging_offset),
5140 VM_PAGE_GET_PHYS_PAGE(m),
5141 &my_fault_type,
5142 c_flags,
5143 &compressed_count_delta);
5144
5145 vm_compressor_pager_count(
5146 cur_object->pager,
5147 compressed_count_delta,
5148 shared_lock,
5149 cur_object);
5150
5151 if (kr != KERN_SUCCESS) {
5152 vm_page_release(m, FALSE);
5153 m = VM_PAGE_NULL;
5154 }
5155 /*
5156 * If vm_compressor_pager_get() returns
5157 * KERN_MEMORY_FAILURE, then the
5158 * compressed data is permanently lost,
5159 * so return this error immediately.
5160 */
5161 if (kr == KERN_MEMORY_FAILURE) {
5162 if (object != cur_object) {
5163 vm_object_unlock(cur_object);
5164 }
5165 vm_object_unlock(object);
5166 vm_map_unlock_read(map);
5167 if (real_map != map) {
5168 vm_map_unlock(real_map);
5169 }
5170
5171 goto done;
5172 } else if (kr != KERN_SUCCESS) {
5173 break;
5174 }
5175 m->vmp_dirty = TRUE;
5176
5177 /*
5178 * If the object is purgeable, its
5179 * owner's purgeable ledgers will be
5180 * updated in vm_page_insert() but the
5181 * page was also accounted for in a
5182 * "compressed purgeable" ledger, so
5183 * update that now.
5184 */
5185 if (object != cur_object &&
5186 !insert_cur_object) {
5187 /*
5188 * We're not going to insert
5189 * the decompressed page into
5190 * the object it came from.
5191 *
5192 * We're dealing with a
5193 * copy-on-write fault on
5194 * "object".
5195 * We're going to decompress
5196 * the page directly into the
5197 * target "object" while
5198 * keepin the compressed
5199 * page for "cur_object", so
5200 * no ledger update in that
5201 * case.
5202 */
5203 } else if (((cur_object->purgable ==
5204 VM_PURGABLE_DENY) &&
5205 (!cur_object->vo_ledger_tag)) ||
5206 (cur_object->vo_owner ==
5207 NULL)) {
5208 /*
5209 * "cur_object" is not purgeable
5210 * and is not ledger-taged, or
5211 * there's no owner for it,
5212 * so no owner's ledgers to
5213 * update.
5214 */
5215 } else {
5216 /*
5217 * One less compressed
5218 * purgeable/tagged page for
5219 * cur_object's owner.
5220 */
5221 vm_object_owner_compressed_update(
5222 cur_object,
5223 -1);
5224 }
5225
5226 if (insert_cur_object) {
5227 vm_page_insert(m, cur_object, vm_object_trunc_page(cur_offset));
5228 m_object = cur_object;
5229 } else {
5230 vm_page_insert(m, object, vm_object_trunc_page(offset));
5231 m_object = object;
5232 }
5233
5234 if ((m_object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_USE_DEFAULT) {
5235 /*
5236 * If the page is not cacheable,
5237 * we can't let its contents
5238 * linger in the data cache
5239 * after the decompression.
5240 */
5241 pmap_sync_page_attributes_phys(VM_PAGE_GET_PHYS_PAGE(m));
5242 }
5243
5244 type_of_fault = my_fault_type;
5245
5246 VM_STAT_DECOMPRESSIONS();
5247
5248 if (cur_object != object) {
5249 if (insert_cur_object) {
5250 top_object = object;
5251 /*
5252 * switch to the object that has the new page
5253 */
5254 object = cur_object;
5255 object_lock_type = cur_object_lock_type;
5256 } else {
5257 vm_object_unlock(cur_object);
5258 cur_object = object;
5259 }
5260 }
5261 goto FastPmapEnter;
5262 }
5263 /*
5264 * existence map present and indicates
5265 * that the pager doesn't have this page
5266 */
5267 }
5268 if (cur_object->shadow == VM_OBJECT_NULL ||
5269 resilient_media_retry) {
5270 /*
5271 * Zero fill fault. Page gets
5272 * inserted into the original object.
5273 */
5274 if (cur_object->shadow_severed ||
5275 VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object) ||
5276 cur_object == compressor_object ||
5277 cur_object == kernel_object ||
5278 cur_object == vm_submap_object) {
5279 if (object != cur_object) {
5280 vm_object_unlock(cur_object);
5281 }
5282 vm_object_unlock(object);
5283
5284 vm_map_unlock_read(map);
5285 if (real_map != map) {
5286 vm_map_unlock(real_map);
5287 }
5288 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object)) {
5289 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
5290 }
5291
5292 if (cur_object->shadow_severed) {
5293 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
5294 }
5295
5296 kr = KERN_MEMORY_ERROR;
5297 goto done;
5298 }
5299 if (cur_object != object) {
5300 vm_object_unlock(cur_object);
5301
5302 cur_object = object;
5303 }
5304 if (object_lock_type == OBJECT_LOCK_SHARED) {
5305 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5306
5307 if (vm_object_lock_upgrade(object) == FALSE) {
5308 /*
5309 * couldn't upgrade so do a full retry on the fault
5310 * since we dropped the object lock which
5311 * could allow another thread to insert
5312 * a page at this offset
5313 */
5314 vm_map_unlock_read(map);
5315 if (real_map != map) {
5316 vm_map_unlock(real_map);
5317 }
5318
5319 goto RetryFault;
5320 }
5321 }
5322 if (!object->internal) {
5323 panic("%s:%d should not zero-fill page at offset 0x%llx in external object %p", __FUNCTION__, __LINE__, (uint64_t)offset, object);
5324 }
5325 #if MACH_ASSERT
5326 if (resilient_media_retry &&
5327 vm_fault_resilient_media_inject_error3_rate != 0 &&
5328 (++vm_fault_resilient_media_inject_error3 % vm_fault_resilient_media_inject_error3_rate) == 0) {
5329 /* inject an error */
5330 m_object = NULL;
5331 break;
5332 }
5333 #endif /* MACH_ASSERT */
5334 m = vm_page_alloc(object, vm_object_trunc_page(offset));
5335 m_object = NULL;
5336
5337 if (m == VM_PAGE_NULL) {
5338 /*
5339 * no free page currently available...
5340 * must take the slow path
5341 */
5342 break;
5343 }
5344 m_object = object;
5345
5346 /*
5347 * Zeroing the page and entering into it into the pmap
5348 * represents a significant amount of the zero fill fault handler's work.
5349 *
5350 * To improve fault scalability, we'll drop the object lock, if it appears contended,
5351 * now that we've inserted the page into the vm object.
5352 * Before dropping the lock, we need to check protection bits and set the
5353 * mapped bits on the page. Then we can mark the page busy, drop the lock,
5354 * zero it, and do the pmap enter. We'll need to reacquire the lock
5355 * to clear the busy bit and wake up any waiters.
5356 */
5357 vm_fault_cs_clear(m);
5358 m->vmp_pmapped = TRUE;
5359 if (map->no_zero_fill) {
5360 type_of_fault = DBG_NZF_PAGE_FAULT;
5361 } else {
5362 type_of_fault = DBG_ZERO_FILL_FAULT;
5363 }
5364 {
5365 pmap_t destination_pmap;
5366 vm_map_offset_t destination_pmap_vaddr;
5367 vm_prot_t enter_fault_type;
5368 if (caller_pmap) {
5369 destination_pmap = caller_pmap;
5370 destination_pmap_vaddr = caller_pmap_addr;
5371 } else {
5372 destination_pmap = pmap;
5373 destination_pmap_vaddr = vaddr;
5374 }
5375 if (change_wiring) {
5376 enter_fault_type = VM_PROT_NONE;
5377 } else {
5378 enter_fault_type = caller_prot;
5379 }
5380 kr = vm_fault_enter_prepare(m,
5381 destination_pmap,
5382 destination_pmap_vaddr,
5383 &prot,
5384 caller_prot,
5385 fault_page_size,
5386 fault_phys_offset,
5387 change_wiring,
5388 enter_fault_type,
5389 &fault_info,
5390 &type_of_fault,
5391 &page_needs_data_sync);
5392 if (kr != KERN_SUCCESS) {
5393 goto zero_fill_cleanup;
5394 }
5395
5396 if (object_is_contended) {
5397 /*
5398 * At this point the page is in the vm object, but not on a paging queue.
5399 * Since it's accessible to another thread but its contents are invalid
5400 * (it hasn't been zeroed) mark it busy before dropping the object lock.
5401 */
5402 m->vmp_busy = TRUE;
5403 vm_object_unlock(object);
5404 }
5405 if (type_of_fault == DBG_ZERO_FILL_FAULT) {
5406 /*
5407 * Now zero fill page...
5408 * the page is probably going to
5409 * be written soon, so don't bother
5410 * to clear the modified bit
5411 *
5412 * NOTE: This code holds the map
5413 * lock across the zero fill.
5414 */
5415 vm_page_zero_fill(m);
5416 counter_inc(&vm_statistics_zero_fill_count);
5417 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
5418 }
5419 if (page_needs_data_sync) {
5420 pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
5421 }
5422
5423 if (top_object != VM_OBJECT_NULL) {
5424 need_retry_ptr = &need_retry;
5425 } else {
5426 need_retry_ptr = NULL;
5427 }
5428 if (object_is_contended) {
5429 kr = vm_fault_pmap_enter(destination_pmap, destination_pmap_vaddr,
5430 fault_page_size, fault_phys_offset,
5431 m, &prot, caller_prot, enter_fault_type, wired,
5432 fault_info.pmap_options, need_retry_ptr);
5433 vm_object_lock(object);
5434 } else {
5435 kr = vm_fault_pmap_enter_with_object_lock(object, destination_pmap, destination_pmap_vaddr,
5436 fault_page_size, fault_phys_offset,
5437 m, &prot, caller_prot, enter_fault_type, wired,
5438 fault_info.pmap_options, need_retry_ptr);
5439 }
5440 }
5441 zero_fill_cleanup:
5442 if (!VM_DYNAMIC_PAGING_ENABLED() &&
5443 (object->purgable == VM_PURGABLE_DENY ||
5444 object->purgable == VM_PURGABLE_NONVOLATILE ||
5445 object->purgable == VM_PURGABLE_VOLATILE)) {
5446 vm_page_lockspin_queues();
5447 if (!VM_DYNAMIC_PAGING_ENABLED()) {
5448 vm_fault_enqueue_throttled_locked(m);
5449 }
5450 vm_page_unlock_queues();
5451 }
5452 vm_fault_enqueue_page(object, m, wired, change_wiring, wire_tag, fault_info.no_cache, &type_of_fault, kr);
5453
5454 vm_fault_complete(
5455 map,
5456 real_map,
5457 object,
5458 m_object,
5459 m,
5460 offset,
5461 trace_real_vaddr,
5462 &fault_info,
5463 caller_prot,
5464 real_vaddr,
5465 type_of_fault,
5466 need_retry,
5467 kr,
5468 physpage_p,
5469 prot,
5470 top_object,
5471 need_collapse,
5472 cur_offset,
5473 fault_type,
5474 &written_on_object,
5475 &written_on_pager,
5476 &written_on_offset);
5477 top_object = VM_OBJECT_NULL;
5478 if (need_retry == TRUE) {
5479 /*
5480 * vm_fault_enter couldn't complete the PMAP_ENTER...
5481 * at this point we don't hold any locks so it's safe
5482 * to ask the pmap layer to expand the page table to
5483 * accommodate this mapping... once expanded, we'll
5484 * re-drive the fault which should result in vm_fault_enter
5485 * being able to successfully enter the mapping this time around
5486 */
5487 (void)pmap_enter_options(
5488 pmap, vaddr, 0, 0, 0, 0, 0,
5489 PMAP_OPTIONS_NOENTER, NULL);
5490
5491 need_retry = FALSE;
5492 goto RetryFault;
5493 }
5494 goto done;
5495 }
5496 /*
5497 * On to the next level in the shadow chain
5498 */
5499 cur_offset += cur_object->vo_shadow_offset;
5500 new_object = cur_object->shadow;
5501 fault_phys_offset = cur_offset - vm_object_trunc_page(cur_offset);
5502
5503 /*
5504 * take the new_object's lock with the indicated state
5505 */
5506 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5507 vm_object_lock_shared(new_object);
5508 } else {
5509 vm_object_lock(new_object);
5510 }
5511
5512 if (cur_object != object) {
5513 vm_object_unlock(cur_object);
5514 }
5515
5516 cur_object = new_object;
5517
5518 continue;
5519 }
5520 }
5521 /*
5522 * Cleanup from fast fault failure. Drop any object
5523 * lock other than original and drop map lock.
5524 */
5525 if (object != cur_object) {
5526 vm_object_unlock(cur_object);
5527 }
5528
5529 /*
5530 * must own the object lock exclusively at this point
5531 */
5532 if (object_lock_type == OBJECT_LOCK_SHARED) {
5533 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5534
5535 if (vm_object_lock_upgrade(object) == FALSE) {
5536 /*
5537 * couldn't upgrade, so explictly
5538 * take the lock exclusively
5539 * no need to retry the fault at this
5540 * point since "vm_fault_page" will
5541 * completely re-evaluate the state
5542 */
5543 vm_object_lock(object);
5544 }
5545 }
5546
5547 handle_copy_delay:
5548 vm_map_unlock_read(map);
5549 if (real_map != map) {
5550 vm_map_unlock(real_map);
5551 }
5552
5553 if (__improbable(object == compressor_object ||
5554 object == kernel_object ||
5555 object == vm_submap_object)) {
5556 /*
5557 * These objects are explicitly managed and populated by the
5558 * kernel. The virtual ranges backed by these objects should
5559 * either have wired pages or "holes" that are not supposed to
5560 * be accessed at all until they get explicitly populated.
5561 * We should never have to resolve a fault on a mapping backed
5562 * by one of these VM objects and providing a zero-filled page
5563 * would be wrong here, so let's fail the fault and let the
5564 * caller crash or recover.
5565 */
5566 vm_object_unlock(object);
5567 kr = KERN_MEMORY_ERROR;
5568 goto done;
5569 }
5570
5571 assert(object != compressor_object);
5572 assert(object != kernel_object);
5573 assert(object != vm_submap_object);
5574
5575 resilient_media_ref_transfer = false;
5576 if (resilient_media_retry) {
5577 /*
5578 * We could get here if we failed to get a free page
5579 * to zero-fill and had to take the slow path again.
5580 * Reset our "recovery-from-failed-media" state.
5581 */
5582 assert(resilient_media_object != VM_OBJECT_NULL);
5583 assert(resilient_media_offset != (vm_object_offset_t)-1);
5584 /* release our extra reference on failed object */
5585 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
5586 if (object == resilient_media_object) {
5587 /*
5588 * We're holding "object"'s lock, so we can't release
5589 * our extra reference at this point.
5590 * We need an extra reference on "object" anyway
5591 * (see below), so let's just transfer this reference.
5592 */
5593 resilient_media_ref_transfer = true;
5594 } else {
5595 vm_object_lock_assert_notheld(resilient_media_object);
5596 vm_object_deallocate(resilient_media_object);
5597 }
5598 resilient_media_object = VM_OBJECT_NULL;
5599 resilient_media_offset = (vm_object_offset_t)-1;
5600 resilient_media_retry = false;
5601 vm_fault_resilient_media_abort2++;
5602 }
5603
5604 /*
5605 * Make a reference to this object to
5606 * prevent its disposal while we are messing with
5607 * it. Once we have the reference, the map is free
5608 * to be diddled. Since objects reference their
5609 * shadows (and copies), they will stay around as well.
5610 */
5611 if (resilient_media_ref_transfer) {
5612 /* we already have an extra reference on this object */
5613 resilient_media_ref_transfer = false;
5614 } else {
5615 vm_object_reference_locked(object);
5616 }
5617 vm_object_paging_begin(object);
5618
5619 set_thread_pagein_error(cthread, 0);
5620 error_code = 0;
5621
5622 result_page = VM_PAGE_NULL;
5623 kr = vm_fault_page(object, offset, fault_type,
5624 (change_wiring && !wired),
5625 FALSE, /* page not looked up */
5626 &prot, &result_page, &top_page,
5627 &type_of_fault,
5628 &error_code, map->no_zero_fill,
5629 FALSE, &fault_info);
5630
5631 /*
5632 * if kr != VM_FAULT_SUCCESS, then the paging reference
5633 * has been dropped and the object unlocked... the ref_count
5634 * is still held
5635 *
5636 * if kr == VM_FAULT_SUCCESS, then the paging reference
5637 * is still held along with the ref_count on the original object
5638 *
5639 * the object is returned locked with a paging reference
5640 *
5641 * if top_page != NULL, then it's BUSY and the
5642 * object it belongs to has a paging reference
5643 * but is returned unlocked
5644 */
5645 if (kr != VM_FAULT_SUCCESS &&
5646 kr != VM_FAULT_SUCCESS_NO_VM_PAGE) {
5647 if (kr == VM_FAULT_MEMORY_ERROR &&
5648 fault_info.resilient_media) {
5649 assertf(object->internal, "object %p", object);
5650 /*
5651 * This fault failed but the mapping was
5652 * "media resilient", so we'll retry the fault in
5653 * recovery mode to get a zero-filled page in the
5654 * top object.
5655 * Keep the reference on the failing object so
5656 * that we can check that the mapping is still
5657 * pointing to it when we retry the fault.
5658 */
5659 // printf("RESILIENT_MEDIA %s:%d: object %p offset 0x%llx recover from media error 0x%x kr 0x%x top_page %p result_page %p\n", __FUNCTION__, __LINE__, object, offset, error_code, kr, top_page, result_page);
5660 assert(!resilient_media_retry); /* no double retry */
5661 assert(resilient_media_object == VM_OBJECT_NULL);
5662 assert(resilient_media_offset == (vm_object_offset_t)-1);
5663 resilient_media_retry = true;
5664 resilient_media_object = object;
5665 resilient_media_offset = offset;
5666 // printf("FBDP %s:%d resilient_media_object %p offset 0x%llx kept reference\n", __FUNCTION__, __LINE__, resilient_media_object, resilient_mmedia_offset);
5667 vm_fault_resilient_media_initiate++;
5668 goto RetryFault;
5669 } else {
5670 /*
5671 * we didn't succeed, lose the object reference
5672 * immediately.
5673 */
5674 vm_object_deallocate(object);
5675 object = VM_OBJECT_NULL; /* no longer valid */
5676 }
5677
5678 /*
5679 * See why we failed, and take corrective action.
5680 */
5681 switch (kr) {
5682 case VM_FAULT_MEMORY_SHORTAGE:
5683 if (vm_page_wait((change_wiring) ?
5684 THREAD_UNINT :
5685 THREAD_ABORTSAFE)) {
5686 goto RetryFault;
5687 }
5688 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_MEMORY_SHORTAGE), 0 /* arg */);
5689 OS_FALLTHROUGH;
5690 case VM_FAULT_INTERRUPTED:
5691 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
5692 kr = KERN_ABORTED;
5693 goto done;
5694 case VM_FAULT_RETRY:
5695 goto RetryFault;
5696 case VM_FAULT_MEMORY_ERROR:
5697 if (error_code) {
5698 kr = error_code;
5699 } else {
5700 kr = KERN_MEMORY_ERROR;
5701 }
5702 goto done;
5703 default:
5704 panic("vm_fault: unexpected error 0x%x from "
5705 "vm_fault_page()\n", kr);
5706 }
5707 }
5708 m = result_page;
5709 m_object = NULL;
5710
5711 if (m != VM_PAGE_NULL) {
5712 m_object = VM_PAGE_OBJECT(m);
5713 assert((change_wiring && !wired) ?
5714 (top_page == VM_PAGE_NULL) :
5715 ((top_page == VM_PAGE_NULL) == (m_object == object)));
5716 }
5717
5718 /*
5719 * What to do with the resulting page from vm_fault_page
5720 * if it doesn't get entered into the physical map:
5721 */
5722 #define RELEASE_PAGE(m) \
5723 MACRO_BEGIN \
5724 PAGE_WAKEUP_DONE(m); \
5725 if ( !VM_PAGE_PAGEABLE(m)) { \
5726 vm_page_lockspin_queues(); \
5727 if ( !VM_PAGE_PAGEABLE(m)) \
5728 vm_page_activate(m); \
5729 vm_page_unlock_queues(); \
5730 } \
5731 MACRO_END
5732
5733
5734 object_locks_dropped = FALSE;
5735 /*
5736 * We must verify that the maps have not changed
5737 * since our last lookup. vm_map_verify() needs the
5738 * map lock (shared) but we are holding object locks.
5739 * So we do a try_lock() first and, if that fails, we
5740 * drop the object locks and go in for the map lock again.
5741 */
5742 if (!vm_map_try_lock_read(original_map)) {
5743 if (m != VM_PAGE_NULL) {
5744 old_copy_object = m_object->copy;
5745 vm_object_unlock(m_object);
5746 } else {
5747 old_copy_object = VM_OBJECT_NULL;
5748 vm_object_unlock(object);
5749 }
5750
5751 object_locks_dropped = TRUE;
5752
5753 vm_map_lock_read(original_map);
5754 }
5755
5756 if ((map != original_map) || !vm_map_verify(map, &version)) {
5757 if (object_locks_dropped == FALSE) {
5758 if (m != VM_PAGE_NULL) {
5759 old_copy_object = m_object->copy;
5760 vm_object_unlock(m_object);
5761 } else {
5762 old_copy_object = VM_OBJECT_NULL;
5763 vm_object_unlock(object);
5764 }
5765
5766 object_locks_dropped = TRUE;
5767 }
5768
5769 /*
5770 * no object locks are held at this point
5771 */
5772 vm_object_t retry_object;
5773 vm_object_offset_t retry_offset;
5774 vm_prot_t retry_prot;
5775
5776 /*
5777 * To avoid trying to write_lock the map while another
5778 * thread has it read_locked (in vm_map_pageable), we
5779 * do not try for write permission. If the page is
5780 * still writable, we will get write permission. If it
5781 * is not, or has been marked needs_copy, we enter the
5782 * mapping without write permission, and will merely
5783 * take another fault.
5784 */
5785 map = original_map;
5786
5787 kr = vm_map_lookup_locked(&map, vaddr,
5788 fault_type & ~VM_PROT_WRITE,
5789 OBJECT_LOCK_EXCLUSIVE, &version,
5790 &retry_object, &retry_offset, &retry_prot,
5791 &wired,
5792 &fault_info,
5793 &real_map,
5794 NULL);
5795 pmap = real_map->pmap;
5796
5797 if (kr != KERN_SUCCESS) {
5798 vm_map_unlock_read(map);
5799
5800 if (m != VM_PAGE_NULL) {
5801 assert(VM_PAGE_OBJECT(m) == m_object);
5802
5803 /*
5804 * retake the lock so that
5805 * we can drop the paging reference
5806 * in vm_fault_cleanup and do the
5807 * PAGE_WAKEUP_DONE in RELEASE_PAGE
5808 */
5809 vm_object_lock(m_object);
5810
5811 RELEASE_PAGE(m);
5812
5813 vm_fault_cleanup(m_object, top_page);
5814 } else {
5815 /*
5816 * retake the lock so that
5817 * we can drop the paging reference
5818 * in vm_fault_cleanup
5819 */
5820 vm_object_lock(object);
5821
5822 vm_fault_cleanup(object, top_page);
5823 }
5824 vm_object_deallocate(object);
5825
5826 if (kr == KERN_INVALID_ADDRESS) {
5827 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0 /* arg */);
5828 }
5829 goto done;
5830 }
5831 vm_object_unlock(retry_object);
5832
5833 if ((retry_object != object) || (retry_offset != offset)) {
5834 vm_map_unlock_read(map);
5835 if (real_map != map) {
5836 vm_map_unlock(real_map);
5837 }
5838
5839 if (m != VM_PAGE_NULL) {
5840 assert(VM_PAGE_OBJECT(m) == m_object);
5841
5842 /*
5843 * retake the lock so that
5844 * we can drop the paging reference
5845 * in vm_fault_cleanup and do the
5846 * PAGE_WAKEUP_DONE in RELEASE_PAGE
5847 */
5848 vm_object_lock(m_object);
5849
5850 RELEASE_PAGE(m);
5851
5852 vm_fault_cleanup(m_object, top_page);
5853 } else {
5854 /*
5855 * retake the lock so that
5856 * we can drop the paging reference
5857 * in vm_fault_cleanup
5858 */
5859 vm_object_lock(object);
5860
5861 vm_fault_cleanup(object, top_page);
5862 }
5863 vm_object_deallocate(object);
5864
5865 goto RetryFault;
5866 }
5867 /*
5868 * Check whether the protection has changed or the object
5869 * has been copied while we left the map unlocked.
5870 */
5871 if (pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, retry_prot)) {
5872 /* If the pmap layer cares, pass the full set. */
5873 prot = retry_prot;
5874 } else {
5875 prot &= retry_prot;
5876 }
5877 }
5878
5879 if (object_locks_dropped == TRUE) {
5880 if (m != VM_PAGE_NULL) {
5881 vm_object_lock(m_object);
5882
5883 if (m_object->copy != old_copy_object) {
5884 /*
5885 * The copy object changed while the top-level object
5886 * was unlocked, so take away write permission.
5887 */
5888 assert(!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot));
5889 prot &= ~VM_PROT_WRITE;
5890 }
5891 } else {
5892 vm_object_lock(object);
5893 }
5894
5895 object_locks_dropped = FALSE;
5896 }
5897
5898 if (!need_copy &&
5899 !fault_info.no_copy_on_read &&
5900 m != VM_PAGE_NULL &&
5901 VM_PAGE_OBJECT(m) != object &&
5902 !VM_PAGE_OBJECT(m)->pager_trusted &&
5903 vm_protect_privileged_from_untrusted &&
5904 !VM_PAGE_OBJECT(m)->code_signed &&
5905 current_proc_is_privileged()) {
5906 /*
5907 * We found the page we want in an "untrusted" VM object
5908 * down the shadow chain. Since the target is "privileged"
5909 * we want to perform a copy-on-read of that page, so that the
5910 * mapped object gets a stable copy and does not have to
5911 * rely on the "untrusted" object to provide the same
5912 * contents if the page gets reclaimed and has to be paged
5913 * in again later on.
5914 *
5915 * Special case: if the mapping is executable and the untrusted
5916 * object is code-signed and the process is "cs_enforced", we
5917 * do not copy-on-read because that would break code-signing
5918 * enforcement expectations (an executable page must belong
5919 * to a code-signed object) and we can rely on code-signing
5920 * to re-validate the page if it gets evicted and paged back in.
5921 */
5922 // printf("COPY-ON-READ %s:%d map %p vaddr 0x%llx obj %p offset 0x%llx found page %p (obj %p offset 0x%llx) UNTRUSTED -> need copy-on-read\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, object, offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
5923 vm_copied_on_read++;
5924 need_copy_on_read = TRUE;
5925 need_copy = TRUE;
5926 } else {
5927 need_copy_on_read = FALSE;
5928 }
5929
5930 /*
5931 * If we want to wire down this page, but no longer have
5932 * adequate permissions, we must start all over.
5933 * If we decided to copy-on-read, we must also start all over.
5934 */
5935 if ((wired && (fault_type != (prot | VM_PROT_WRITE))) ||
5936 need_copy_on_read) {
5937 vm_map_unlock_read(map);
5938 if (real_map != map) {
5939 vm_map_unlock(real_map);
5940 }
5941
5942 if (m != VM_PAGE_NULL) {
5943 assert(VM_PAGE_OBJECT(m) == m_object);
5944
5945 RELEASE_PAGE(m);
5946
5947 vm_fault_cleanup(m_object, top_page);
5948 } else {
5949 vm_fault_cleanup(object, top_page);
5950 }
5951
5952 vm_object_deallocate(object);
5953
5954 goto RetryFault;
5955 }
5956 if (m != VM_PAGE_NULL) {
5957 /*
5958 * Put this page into the physical map.
5959 * We had to do the unlock above because pmap_enter
5960 * may cause other faults. The page may be on
5961 * the pageout queues. If the pageout daemon comes
5962 * across the page, it will remove it from the queues.
5963 */
5964 if (fault_page_size < PAGE_SIZE) {
5965 DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx pa 0x%llx(0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
5966 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
5967 fault_phys_offset < PAGE_SIZE),
5968 "0x%llx\n", (uint64_t)fault_phys_offset);
5969 } else {
5970 assertf(fault_phys_offset == 0,
5971 "0x%llx\n", (uint64_t)fault_phys_offset);
5972 }
5973 if (caller_pmap) {
5974 kr = vm_fault_enter(m,
5975 caller_pmap,
5976 caller_pmap_addr,
5977 fault_page_size,
5978 fault_phys_offset,
5979 prot,
5980 caller_prot,
5981 wired,
5982 change_wiring,
5983 wire_tag,
5984 &fault_info,
5985 NULL,
5986 &type_of_fault);
5987 } else {
5988 kr = vm_fault_enter(m,
5989 pmap,
5990 vaddr,
5991 fault_page_size,
5992 fault_phys_offset,
5993 prot,
5994 caller_prot,
5995 wired,
5996 change_wiring,
5997 wire_tag,
5998 &fault_info,
5999 NULL,
6000 &type_of_fault);
6001 }
6002 assert(VM_PAGE_OBJECT(m) == m_object);
6003
6004 {
6005 int event_code = 0;
6006
6007 if (m_object->internal) {
6008 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
6009 } else if (m_object->object_is_shared_cache) {
6010 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
6011 } else {
6012 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
6013 }
6014
6015 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, event_code, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), m->vmp_offset, get_current_unique_pid(), 0);
6016 KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_SLOW), get_current_unique_pid(), 0, 0, 0, 0);
6017
6018 DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag);
6019 }
6020 if (kr != KERN_SUCCESS) {
6021 /* abort this page fault */
6022 vm_map_unlock_read(map);
6023 if (real_map != map) {
6024 vm_map_unlock(real_map);
6025 }
6026 PAGE_WAKEUP_DONE(m);
6027 vm_fault_cleanup(m_object, top_page);
6028 vm_object_deallocate(object);
6029 goto done;
6030 }
6031 if (physpage_p != NULL) {
6032 /* for vm_map_wire_and_extract() */
6033 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
6034 if (prot & VM_PROT_WRITE) {
6035 vm_object_lock_assert_exclusive(m_object);
6036 m->vmp_dirty = TRUE;
6037 }
6038 }
6039 } else {
6040 vm_map_entry_t entry;
6041 vm_map_offset_t laddr;
6042 vm_map_offset_t ldelta, hdelta;
6043
6044 /*
6045 * do a pmap block mapping from the physical address
6046 * in the object
6047 */
6048
6049 if (real_map != map) {
6050 vm_map_unlock(real_map);
6051 }
6052
6053 if (original_map != map) {
6054 vm_map_unlock_read(map);
6055 vm_map_lock_read(original_map);
6056 map = original_map;
6057 }
6058 real_map = map;
6059
6060 laddr = vaddr;
6061 hdelta = ldelta = (vm_map_offset_t)0xFFFFFFFFFFFFF000ULL;
6062
6063 while (vm_map_lookup_entry(map, laddr, &entry)) {
6064 if (ldelta > (laddr - entry->vme_start)) {
6065 ldelta = laddr - entry->vme_start;
6066 }
6067 if (hdelta > (entry->vme_end - laddr)) {
6068 hdelta = entry->vme_end - laddr;
6069 }
6070 if (entry->is_sub_map) {
6071 laddr = ((laddr - entry->vme_start)
6072 + VME_OFFSET(entry));
6073 vm_map_lock_read(VME_SUBMAP(entry));
6074
6075 if (map != real_map) {
6076 vm_map_unlock_read(map);
6077 }
6078 if (entry->use_pmap) {
6079 vm_map_unlock_read(real_map);
6080 real_map = VME_SUBMAP(entry);
6081 }
6082 map = VME_SUBMAP(entry);
6083 } else {
6084 break;
6085 }
6086 }
6087
6088 if (vm_map_lookup_entry(map, laddr, &entry) &&
6089 (VME_OBJECT(entry) != NULL) &&
6090 (VME_OBJECT(entry) == object)) {
6091 uint16_t superpage;
6092
6093 if (!object->pager_created &&
6094 object->phys_contiguous &&
6095 VME_OFFSET(entry) == 0 &&
6096 (entry->vme_end - entry->vme_start == object->vo_size) &&
6097 VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size - 1))) {
6098 superpage = VM_MEM_SUPERPAGE;
6099 } else {
6100 superpage = 0;
6101 }
6102
6103 if (superpage && physpage_p) {
6104 /* for vm_map_wire_and_extract() */
6105 *physpage_p = (ppnum_t)
6106 ((((vm_map_offset_t)
6107 object->vo_shadow_offset)
6108 + VME_OFFSET(entry)
6109 + (laddr - entry->vme_start))
6110 >> PAGE_SHIFT);
6111 }
6112
6113 if (caller_pmap) {
6114 /*
6115 * Set up a block mapped area
6116 */
6117 assert((uint32_t)((ldelta + hdelta) >> fault_page_shift) == ((ldelta + hdelta) >> fault_page_shift));
6118 kr = pmap_map_block_addr(caller_pmap,
6119 (addr64_t)(caller_pmap_addr - ldelta),
6120 (pmap_paddr_t)(((vm_map_offset_t) (VME_OBJECT(entry)->vo_shadow_offset)) +
6121 VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta),
6122 (uint32_t)((ldelta + hdelta) >> fault_page_shift), prot,
6123 (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
6124
6125 if (kr != KERN_SUCCESS) {
6126 goto cleanup;
6127 }
6128 } else {
6129 /*
6130 * Set up a block mapped area
6131 */
6132 assert((uint32_t)((ldelta + hdelta) >> fault_page_shift) == ((ldelta + hdelta) >> fault_page_shift));
6133 kr = pmap_map_block_addr(real_map->pmap,
6134 (addr64_t)(vaddr - ldelta),
6135 (pmap_paddr_t)(((vm_map_offset_t)(VME_OBJECT(entry)->vo_shadow_offset)) +
6136 VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta),
6137 (uint32_t)((ldelta + hdelta) >> fault_page_shift), prot,
6138 (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
6139
6140 if (kr != KERN_SUCCESS) {
6141 goto cleanup;
6142 }
6143 }
6144 }
6145 }
6146
6147 /*
6148 * Success
6149 */
6150 kr = KERN_SUCCESS;
6151
6152 /*
6153 * TODO: could most of the done cases just use cleanup?
6154 */
6155 cleanup:
6156 /*
6157 * Unlock everything, and return
6158 */
6159 vm_map_unlock_read(map);
6160 if (real_map != map) {
6161 vm_map_unlock(real_map);
6162 }
6163
6164 if (m != VM_PAGE_NULL) {
6165 assert(VM_PAGE_OBJECT(m) == m_object);
6166
6167 if (!m_object->internal && (fault_type & VM_PROT_WRITE)) {
6168 vm_object_paging_begin(m_object);
6169
6170 assert(written_on_object == VM_OBJECT_NULL);
6171 written_on_object = m_object;
6172 written_on_pager = m_object->pager;
6173 written_on_offset = m_object->paging_offset + m->vmp_offset;
6174 }
6175 PAGE_WAKEUP_DONE(m);
6176
6177 vm_fault_cleanup(m_object, top_page);
6178 } else {
6179 vm_fault_cleanup(object, top_page);
6180 }
6181
6182 vm_object_deallocate(object);
6183
6184 #undef RELEASE_PAGE
6185
6186 done:
6187 thread_interrupt_level(interruptible_state);
6188
6189 if (resilient_media_object != VM_OBJECT_NULL) {
6190 assert(resilient_media_retry);
6191 assert(resilient_media_offset != (vm_object_offset_t)-1);
6192 /* release extra reference on failed object */
6193 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
6194 vm_object_lock_assert_notheld(resilient_media_object);
6195 vm_object_deallocate(resilient_media_object);
6196 resilient_media_object = VM_OBJECT_NULL;
6197 resilient_media_offset = (vm_object_offset_t)-1;
6198 resilient_media_retry = false;
6199 vm_fault_resilient_media_release++;
6200 }
6201 assert(!resilient_media_retry);
6202
6203 /*
6204 * Only I/O throttle on faults which cause a pagein/swapin.
6205 */
6206 if ((type_of_fault == DBG_PAGEIND_FAULT) || (type_of_fault == DBG_PAGEINV_FAULT) || (type_of_fault == DBG_COMPRESSOR_SWAPIN_FAULT)) {
6207 throttle_lowpri_io(1);
6208 } else {
6209 if (kr == KERN_SUCCESS && type_of_fault != DBG_CACHE_HIT_FAULT && type_of_fault != DBG_GUARD_FAULT) {
6210 if ((throttle_delay = vm_page_throttled(TRUE))) {
6211 if (vm_debug_events) {
6212 if (type_of_fault == DBG_COMPRESSOR_FAULT) {
6213 VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6214 } else if (type_of_fault == DBG_COW_FAULT) {
6215 VM_DEBUG_EVENT(vmf_cowdelay, VMF_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6216 } else {
6217 VM_DEBUG_EVENT(vmf_zfdelay, VMF_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6218 }
6219 }
6220 delay(throttle_delay);
6221 }
6222 }
6223 }
6224
6225 if (written_on_object) {
6226 vnode_pager_dirtied(written_on_pager, written_on_offset, written_on_offset + PAGE_SIZE_64);
6227
6228 vm_object_lock(written_on_object);
6229 vm_object_paging_end(written_on_object);
6230 vm_object_unlock(written_on_object);
6231
6232 written_on_object = VM_OBJECT_NULL;
6233 }
6234
6235 if (rtfault) {
6236 vm_record_rtfault(cthread, fstart, trace_vaddr, type_of_fault);
6237 }
6238
6239 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE,
6240 (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
6241 ((uint64_t)trace_vaddr >> 32),
6242 trace_vaddr,
6243 kr,
6244 vm_fault_type_for_tracing(need_copy_on_read, type_of_fault),
6245 0);
6246
6247 if (fault_page_size < PAGE_SIZE && kr != KERN_SUCCESS) {
6248 DEBUG4K_FAULT("map %p original %p vaddr 0x%llx -> 0x%x\n", map, original_map, (uint64_t)trace_real_vaddr, kr);
6249 }
6250
6251 return kr;
6252 }
6253
6254 /*
6255 * vm_fault_wire:
6256 *
6257 * Wire down a range of virtual addresses in a map.
6258 */
6259 kern_return_t
vm_fault_wire(vm_map_t map,vm_map_entry_t entry,vm_prot_t prot,vm_tag_t wire_tag,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)6260 vm_fault_wire(
6261 vm_map_t map,
6262 vm_map_entry_t entry,
6263 vm_prot_t prot,
6264 vm_tag_t wire_tag,
6265 pmap_t pmap,
6266 vm_map_offset_t pmap_addr,
6267 ppnum_t *physpage_p)
6268 {
6269 vm_map_offset_t va;
6270 vm_map_offset_t end_addr = entry->vme_end;
6271 kern_return_t rc;
6272 vm_map_size_t effective_page_size;
6273
6274 assert(entry->in_transition);
6275
6276 if ((VME_OBJECT(entry) != NULL) &&
6277 !entry->is_sub_map &&
6278 VME_OBJECT(entry)->phys_contiguous) {
6279 return KERN_SUCCESS;
6280 }
6281
6282 /*
6283 * Inform the physical mapping system that the
6284 * range of addresses may not fault, so that
6285 * page tables and such can be locked down as well.
6286 */
6287
6288 pmap_pageable(pmap, pmap_addr,
6289 pmap_addr + (end_addr - entry->vme_start), FALSE);
6290
6291 /*
6292 * We simulate a fault to get the page and enter it
6293 * in the physical map.
6294 */
6295
6296 effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6297 for (va = entry->vme_start;
6298 va < end_addr;
6299 va += effective_page_size) {
6300 rc = vm_fault_wire_fast(map, va, prot, wire_tag, entry, pmap,
6301 pmap_addr + (va - entry->vme_start),
6302 physpage_p);
6303 if (rc != KERN_SUCCESS) {
6304 rc = vm_fault_internal(map, va, prot, TRUE, wire_tag,
6305 ((pmap == kernel_pmap)
6306 ? THREAD_UNINT
6307 : THREAD_ABORTSAFE),
6308 pmap,
6309 (pmap_addr +
6310 (va - entry->vme_start)),
6311 physpage_p);
6312 DTRACE_VM2(softlock, int, 1, (uint64_t *), NULL);
6313 }
6314
6315 if (rc != KERN_SUCCESS) {
6316 struct vm_map_entry tmp_entry = *entry;
6317
6318 /* unwire wired pages */
6319 tmp_entry.vme_end = va;
6320 vm_fault_unwire(map,
6321 &tmp_entry, FALSE, pmap, pmap_addr);
6322
6323 return rc;
6324 }
6325 }
6326 return KERN_SUCCESS;
6327 }
6328
6329 /*
6330 * vm_fault_unwire:
6331 *
6332 * Unwire a range of virtual addresses in a map.
6333 */
6334 void
vm_fault_unwire(vm_map_t map,vm_map_entry_t entry,boolean_t deallocate,pmap_t pmap,vm_map_offset_t pmap_addr)6335 vm_fault_unwire(
6336 vm_map_t map,
6337 vm_map_entry_t entry,
6338 boolean_t deallocate,
6339 pmap_t pmap,
6340 vm_map_offset_t pmap_addr)
6341 {
6342 vm_map_offset_t va;
6343 vm_map_offset_t end_addr = entry->vme_end;
6344 vm_object_t object;
6345 struct vm_object_fault_info fault_info = {};
6346 unsigned int unwired_pages;
6347 vm_map_size_t effective_page_size;
6348
6349 object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry);
6350
6351 /*
6352 * If it's marked phys_contiguous, then vm_fault_wire() didn't actually
6353 * do anything since such memory is wired by default. So we don't have
6354 * anything to undo here.
6355 */
6356
6357 if (object != VM_OBJECT_NULL && object->phys_contiguous) {
6358 return;
6359 }
6360
6361 fault_info.interruptible = THREAD_UNINT;
6362 fault_info.behavior = entry->behavior;
6363 fault_info.user_tag = VME_ALIAS(entry);
6364 if (entry->iokit_acct ||
6365 (!entry->is_sub_map && !entry->use_pmap)) {
6366 fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
6367 }
6368 fault_info.lo_offset = VME_OFFSET(entry);
6369 fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry);
6370 fault_info.no_cache = entry->no_cache;
6371 fault_info.stealth = TRUE;
6372
6373 unwired_pages = 0;
6374
6375 /*
6376 * Since the pages are wired down, we must be able to
6377 * get their mappings from the physical map system.
6378 */
6379
6380 effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6381 for (va = entry->vme_start;
6382 va < end_addr;
6383 va += effective_page_size) {
6384 if (object == VM_OBJECT_NULL) {
6385 if (pmap) {
6386 pmap_change_wiring(pmap,
6387 pmap_addr + (va - entry->vme_start), FALSE);
6388 }
6389 (void) vm_fault(map, va, VM_PROT_NONE,
6390 TRUE, VM_KERN_MEMORY_NONE, THREAD_UNINT, pmap, pmap_addr);
6391 } else {
6392 vm_prot_t prot;
6393 vm_page_t result_page;
6394 vm_page_t top_page;
6395 vm_object_t result_object;
6396 vm_fault_return_t result;
6397
6398 /* cap cluster size at maximum UPL size */
6399 upl_size_t cluster_size;
6400 if (os_sub_overflow(end_addr, va, &cluster_size)) {
6401 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
6402 }
6403 fault_info.cluster_size = cluster_size;
6404
6405 do {
6406 prot = VM_PROT_NONE;
6407
6408 vm_object_lock(object);
6409 vm_object_paging_begin(object);
6410 result_page = VM_PAGE_NULL;
6411 result = vm_fault_page(
6412 object,
6413 (VME_OFFSET(entry) +
6414 (va - entry->vme_start)),
6415 VM_PROT_NONE, TRUE,
6416 FALSE, /* page not looked up */
6417 &prot, &result_page, &top_page,
6418 (int *)0,
6419 NULL, map->no_zero_fill,
6420 FALSE, &fault_info);
6421 } while (result == VM_FAULT_RETRY);
6422
6423 /*
6424 * If this was a mapping to a file on a device that has been forcibly
6425 * unmounted, then we won't get a page back from vm_fault_page(). Just
6426 * move on to the next one in case the remaining pages are mapped from
6427 * different objects. During a forced unmount, the object is terminated
6428 * so the alive flag will be false if this happens. A forced unmount will
6429 * will occur when an external disk is unplugged before the user does an
6430 * eject, so we don't want to panic in that situation.
6431 */
6432
6433 if (result == VM_FAULT_MEMORY_ERROR && !object->alive) {
6434 continue;
6435 }
6436
6437 if (result == VM_FAULT_MEMORY_ERROR &&
6438 object == kernel_object) {
6439 /*
6440 * This must have been allocated with
6441 * KMA_KOBJECT and KMA_VAONLY and there's
6442 * no physical page at this offset.
6443 * We're done (no page to free).
6444 */
6445 assert(deallocate);
6446 continue;
6447 }
6448
6449 if (result != VM_FAULT_SUCCESS) {
6450 panic("vm_fault_unwire: failure");
6451 }
6452
6453 result_object = VM_PAGE_OBJECT(result_page);
6454
6455 if (deallocate) {
6456 assert(VM_PAGE_GET_PHYS_PAGE(result_page) !=
6457 vm_page_fictitious_addr);
6458 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(result_page));
6459 if (VM_PAGE_WIRED(result_page)) {
6460 unwired_pages++;
6461 }
6462 VM_PAGE_FREE(result_page);
6463 } else {
6464 if ((pmap) && (VM_PAGE_GET_PHYS_PAGE(result_page) != vm_page_guard_addr)) {
6465 pmap_change_wiring(pmap,
6466 pmap_addr + (va - entry->vme_start), FALSE);
6467 }
6468
6469
6470 if (VM_PAGE_WIRED(result_page)) {
6471 vm_page_lockspin_queues();
6472 vm_page_unwire(result_page, TRUE);
6473 vm_page_unlock_queues();
6474 unwired_pages++;
6475 }
6476 if (entry->zero_wired_pages) {
6477 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(result_page));
6478 entry->zero_wired_pages = FALSE;
6479 }
6480
6481 PAGE_WAKEUP_DONE(result_page);
6482 }
6483 vm_fault_cleanup(result_object, top_page);
6484 }
6485 }
6486
6487 /*
6488 * Inform the physical mapping system that the range
6489 * of addresses may fault, so that page tables and
6490 * such may be unwired themselves.
6491 */
6492
6493 pmap_pageable(pmap, pmap_addr,
6494 pmap_addr + (end_addr - entry->vme_start), TRUE);
6495
6496 if (kernel_object == object) {
6497 /*
6498 * Would like to make user_tag in vm_object_fault_info
6499 * vm_tag_t (unsigned short) but user_tag derives its value from
6500 * VME_ALIAS(entry) at a few places and VME_ALIAS, in turn, casts
6501 * to an _unsigned int_ which is used by non-fault_info paths throughout the
6502 * code at many places.
6503 *
6504 * So, for now, an explicit truncation to unsigned short (vm_tag_t).
6505 */
6506 assertf((fault_info.user_tag & VME_ALIAS_MASK) == fault_info.user_tag,
6507 "VM Tag truncated from 0x%x to 0x%x\n", fault_info.user_tag, (fault_info.user_tag & VME_ALIAS_MASK));
6508 vm_tag_update_size((vm_tag_t) fault_info.user_tag, -ptoa_64(unwired_pages));
6509 }
6510 }
6511
6512 /*
6513 * vm_fault_wire_fast:
6514 *
6515 * Handle common case of a wire down page fault at the given address.
6516 * If successful, the page is inserted into the associated physical map.
6517 * The map entry is passed in to avoid the overhead of a map lookup.
6518 *
6519 * NOTE: the given address should be truncated to the
6520 * proper page address.
6521 *
6522 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
6523 * a standard error specifying why the fault is fatal is returned.
6524 *
6525 * The map in question must be referenced, and remains so.
6526 * Caller has a read lock on the map.
6527 *
6528 * This is a stripped version of vm_fault() for wiring pages. Anything
6529 * other than the common case will return KERN_FAILURE, and the caller
6530 * is expected to call vm_fault().
6531 */
6532 static kern_return_t
vm_fault_wire_fast(__unused vm_map_t map,vm_map_offset_t va,__unused vm_prot_t caller_prot,vm_tag_t wire_tag,vm_map_entry_t entry,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)6533 vm_fault_wire_fast(
6534 __unused vm_map_t map,
6535 vm_map_offset_t va,
6536 __unused vm_prot_t caller_prot,
6537 vm_tag_t wire_tag,
6538 vm_map_entry_t entry,
6539 pmap_t pmap,
6540 vm_map_offset_t pmap_addr,
6541 ppnum_t *physpage_p)
6542 {
6543 vm_object_t object;
6544 vm_object_offset_t offset;
6545 vm_page_t m;
6546 vm_prot_t prot;
6547 thread_t thread = current_thread();
6548 int type_of_fault;
6549 kern_return_t kr;
6550 vm_map_size_t fault_page_size;
6551 vm_map_offset_t fault_phys_offset;
6552 struct vm_object_fault_info fault_info = {};
6553
6554 counter_inc(&vm_statistics_faults);
6555
6556 if (thread != THREAD_NULL) {
6557 counter_inc(&get_threadtask(thread)->faults);
6558 }
6559
6560 /*
6561 * Recovery actions
6562 */
6563
6564 #undef RELEASE_PAGE
6565 #define RELEASE_PAGE(m) { \
6566 PAGE_WAKEUP_DONE(m); \
6567 vm_page_lockspin_queues(); \
6568 vm_page_unwire(m, TRUE); \
6569 vm_page_unlock_queues(); \
6570 }
6571
6572
6573 #undef UNLOCK_THINGS
6574 #define UNLOCK_THINGS { \
6575 vm_object_paging_end(object); \
6576 vm_object_unlock(object); \
6577 }
6578
6579 #undef UNLOCK_AND_DEALLOCATE
6580 #define UNLOCK_AND_DEALLOCATE { \
6581 UNLOCK_THINGS; \
6582 vm_object_deallocate(object); \
6583 }
6584 /*
6585 * Give up and have caller do things the hard way.
6586 */
6587
6588 #define GIVE_UP { \
6589 UNLOCK_AND_DEALLOCATE; \
6590 return(KERN_FAILURE); \
6591 }
6592
6593
6594 /*
6595 * If this entry is not directly to a vm_object, bail out.
6596 */
6597 if (entry->is_sub_map) {
6598 assert(physpage_p == NULL);
6599 return KERN_FAILURE;
6600 }
6601
6602 /*
6603 * Find the backing store object and offset into it.
6604 */
6605
6606 object = VME_OBJECT(entry);
6607 offset = (va - entry->vme_start) + VME_OFFSET(entry);
6608 prot = entry->protection;
6609
6610 /*
6611 * Make a reference to this object to prevent its
6612 * disposal while we are messing with it.
6613 */
6614
6615 vm_object_lock(object);
6616 vm_object_reference_locked(object);
6617 vm_object_paging_begin(object);
6618
6619 /*
6620 * INVARIANTS (through entire routine):
6621 *
6622 * 1) At all times, we must either have the object
6623 * lock or a busy page in some object to prevent
6624 * some other thread from trying to bring in
6625 * the same page.
6626 *
6627 * 2) Once we have a busy page, we must remove it from
6628 * the pageout queues, so that the pageout daemon
6629 * will not grab it away.
6630 *
6631 */
6632
6633 /*
6634 * Look for page in top-level object. If it's not there or
6635 * there's something going on, give up.
6636 */
6637 m = vm_page_lookup(object, vm_object_trunc_page(offset));
6638 if ((m == VM_PAGE_NULL) || (m->vmp_busy) ||
6639 (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_absent))) {
6640 GIVE_UP;
6641 }
6642 if (m->vmp_fictitious &&
6643 VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
6644 /*
6645 * Guard pages are fictitious pages and are never
6646 * entered into a pmap, so let's say it's been wired...
6647 */
6648 kr = KERN_SUCCESS;
6649 goto done;
6650 }
6651
6652 /*
6653 * Wire the page down now. All bail outs beyond this
6654 * point must unwire the page.
6655 */
6656
6657 vm_page_lockspin_queues();
6658 vm_page_wire(m, wire_tag, TRUE);
6659 vm_page_unlock_queues();
6660
6661 /*
6662 * Mark page busy for other threads.
6663 */
6664 assert(!m->vmp_busy);
6665 m->vmp_busy = TRUE;
6666 assert(!m->vmp_absent);
6667
6668 /*
6669 * Give up if the page is being written and there's a copy object
6670 */
6671 if ((object->copy != VM_OBJECT_NULL) && (prot & VM_PROT_WRITE)) {
6672 RELEASE_PAGE(m);
6673 GIVE_UP;
6674 }
6675
6676 fault_info.user_tag = VME_ALIAS(entry);
6677 fault_info.pmap_options = 0;
6678 if (entry->iokit_acct ||
6679 (!entry->is_sub_map && !entry->use_pmap)) {
6680 fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
6681 }
6682
6683 fault_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6684 fault_phys_offset = offset - vm_object_trunc_page(offset);
6685
6686 /*
6687 * Put this page into the physical map.
6688 */
6689 type_of_fault = DBG_CACHE_HIT_FAULT;
6690 kr = vm_fault_enter(m,
6691 pmap,
6692 pmap_addr,
6693 fault_page_size,
6694 fault_phys_offset,
6695 prot,
6696 prot,
6697 TRUE, /* wired */
6698 FALSE, /* change_wiring */
6699 wire_tag,
6700 &fault_info,
6701 NULL,
6702 &type_of_fault);
6703 if (kr != KERN_SUCCESS) {
6704 RELEASE_PAGE(m);
6705 GIVE_UP;
6706 }
6707
6708 done:
6709 /*
6710 * Unlock everything, and return
6711 */
6712
6713 if (physpage_p) {
6714 /* for vm_map_wire_and_extract() */
6715 if (kr == KERN_SUCCESS) {
6716 assert(object == VM_PAGE_OBJECT(m));
6717 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
6718 if (prot & VM_PROT_WRITE) {
6719 vm_object_lock_assert_exclusive(object);
6720 m->vmp_dirty = TRUE;
6721 }
6722 } else {
6723 *physpage_p = 0;
6724 }
6725 }
6726
6727 PAGE_WAKEUP_DONE(m);
6728 UNLOCK_AND_DEALLOCATE;
6729
6730 return kr;
6731 }
6732
6733 /*
6734 * Routine: vm_fault_copy_cleanup
6735 * Purpose:
6736 * Release a page used by vm_fault_copy.
6737 */
6738
6739 static void
vm_fault_copy_cleanup(vm_page_t page,vm_page_t top_page)6740 vm_fault_copy_cleanup(
6741 vm_page_t page,
6742 vm_page_t top_page)
6743 {
6744 vm_object_t object = VM_PAGE_OBJECT(page);
6745
6746 vm_object_lock(object);
6747 PAGE_WAKEUP_DONE(page);
6748 if (!VM_PAGE_PAGEABLE(page)) {
6749 vm_page_lockspin_queues();
6750 if (!VM_PAGE_PAGEABLE(page)) {
6751 vm_page_activate(page);
6752 }
6753 vm_page_unlock_queues();
6754 }
6755 vm_fault_cleanup(object, top_page);
6756 }
6757
6758 static void
vm_fault_copy_dst_cleanup(vm_page_t page)6759 vm_fault_copy_dst_cleanup(
6760 vm_page_t page)
6761 {
6762 vm_object_t object;
6763
6764 if (page != VM_PAGE_NULL) {
6765 object = VM_PAGE_OBJECT(page);
6766 vm_object_lock(object);
6767 vm_page_lockspin_queues();
6768 vm_page_unwire(page, TRUE);
6769 vm_page_unlock_queues();
6770 vm_object_paging_end(object);
6771 vm_object_unlock(object);
6772 }
6773 }
6774
6775 /*
6776 * Routine: vm_fault_copy
6777 *
6778 * Purpose:
6779 * Copy pages from one virtual memory object to another --
6780 * neither the source nor destination pages need be resident.
6781 *
6782 * Before actually copying a page, the version associated with
6783 * the destination address map wil be verified.
6784 *
6785 * In/out conditions:
6786 * The caller must hold a reference, but not a lock, to
6787 * each of the source and destination objects and to the
6788 * destination map.
6789 *
6790 * Results:
6791 * Returns KERN_SUCCESS if no errors were encountered in
6792 * reading or writing the data. Returns KERN_INTERRUPTED if
6793 * the operation was interrupted (only possible if the
6794 * "interruptible" argument is asserted). Other return values
6795 * indicate a permanent error in copying the data.
6796 *
6797 * The actual amount of data copied will be returned in the
6798 * "copy_size" argument. In the event that the destination map
6799 * verification failed, this amount may be less than the amount
6800 * requested.
6801 */
6802 kern_return_t
vm_fault_copy(vm_object_t src_object,vm_object_offset_t src_offset,vm_map_size_t * copy_size,vm_object_t dst_object,vm_object_offset_t dst_offset,vm_map_t dst_map,vm_map_version_t * dst_version,int interruptible)6803 vm_fault_copy(
6804 vm_object_t src_object,
6805 vm_object_offset_t src_offset,
6806 vm_map_size_t *copy_size, /* INOUT */
6807 vm_object_t dst_object,
6808 vm_object_offset_t dst_offset,
6809 vm_map_t dst_map,
6810 vm_map_version_t *dst_version,
6811 int interruptible)
6812 {
6813 vm_page_t result_page;
6814
6815 vm_page_t src_page;
6816 vm_page_t src_top_page;
6817 vm_prot_t src_prot;
6818
6819 vm_page_t dst_page;
6820 vm_page_t dst_top_page;
6821 vm_prot_t dst_prot;
6822
6823 vm_map_size_t amount_left;
6824 vm_object_t old_copy_object;
6825 vm_object_t result_page_object = NULL;
6826 kern_return_t error = 0;
6827 vm_fault_return_t result;
6828
6829 vm_map_size_t part_size;
6830 struct vm_object_fault_info fault_info_src = {};
6831 struct vm_object_fault_info fault_info_dst = {};
6832
6833 /*
6834 * In order not to confuse the clustered pageins, align
6835 * the different offsets on a page boundary.
6836 */
6837
6838 #define RETURN(x) \
6839 MACRO_BEGIN \
6840 *copy_size -= amount_left; \
6841 MACRO_RETURN(x); \
6842 MACRO_END
6843
6844 amount_left = *copy_size;
6845
6846 fault_info_src.interruptible = interruptible;
6847 fault_info_src.behavior = VM_BEHAVIOR_SEQUENTIAL;
6848 fault_info_src.lo_offset = vm_object_trunc_page(src_offset);
6849 fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left;
6850 fault_info_src.stealth = TRUE;
6851
6852 fault_info_dst.interruptible = interruptible;
6853 fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL;
6854 fault_info_dst.lo_offset = vm_object_trunc_page(dst_offset);
6855 fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left;
6856 fault_info_dst.stealth = TRUE;
6857
6858 do { /* while (amount_left > 0) */
6859 /*
6860 * There may be a deadlock if both source and destination
6861 * pages are the same. To avoid this deadlock, the copy must
6862 * start by getting the destination page in order to apply
6863 * COW semantics if any.
6864 */
6865
6866 RetryDestinationFault:;
6867
6868 dst_prot = VM_PROT_WRITE | VM_PROT_READ;
6869
6870 vm_object_lock(dst_object);
6871 vm_object_paging_begin(dst_object);
6872
6873 /* cap cluster size at maximum UPL size */
6874 upl_size_t cluster_size;
6875 if (os_convert_overflow(amount_left, &cluster_size)) {
6876 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
6877 }
6878 fault_info_dst.cluster_size = cluster_size;
6879
6880 dst_page = VM_PAGE_NULL;
6881 result = vm_fault_page(dst_object,
6882 vm_object_trunc_page(dst_offset),
6883 VM_PROT_WRITE | VM_PROT_READ,
6884 FALSE,
6885 FALSE, /* page not looked up */
6886 &dst_prot, &dst_page, &dst_top_page,
6887 (int *)0,
6888 &error,
6889 dst_map->no_zero_fill,
6890 FALSE, &fault_info_dst);
6891 switch (result) {
6892 case VM_FAULT_SUCCESS:
6893 break;
6894 case VM_FAULT_RETRY:
6895 goto RetryDestinationFault;
6896 case VM_FAULT_MEMORY_SHORTAGE:
6897 if (vm_page_wait(interruptible)) {
6898 goto RetryDestinationFault;
6899 }
6900 OS_FALLTHROUGH;
6901 case VM_FAULT_INTERRUPTED:
6902 RETURN(MACH_SEND_INTERRUPTED);
6903 case VM_FAULT_SUCCESS_NO_VM_PAGE:
6904 /* success but no VM page: fail the copy */
6905 vm_object_paging_end(dst_object);
6906 vm_object_unlock(dst_object);
6907 OS_FALLTHROUGH;
6908 case VM_FAULT_MEMORY_ERROR:
6909 if (error) {
6910 return error;
6911 } else {
6912 return KERN_MEMORY_ERROR;
6913 }
6914 default:
6915 panic("vm_fault_copy: unexpected error 0x%x from "
6916 "vm_fault_page()\n", result);
6917 }
6918 assert((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE);
6919
6920 assert(dst_object == VM_PAGE_OBJECT(dst_page));
6921 old_copy_object = dst_object->copy;
6922
6923 /*
6924 * There exists the possiblity that the source and
6925 * destination page are the same. But we can't
6926 * easily determine that now. If they are the
6927 * same, the call to vm_fault_page() for the
6928 * destination page will deadlock. To prevent this we
6929 * wire the page so we can drop busy without having
6930 * the page daemon steal the page. We clean up the
6931 * top page but keep the paging reference on the object
6932 * holding the dest page so it doesn't go away.
6933 */
6934
6935 vm_page_lockspin_queues();
6936 vm_page_wire(dst_page, VM_KERN_MEMORY_OSFMK, TRUE);
6937 vm_page_unlock_queues();
6938 PAGE_WAKEUP_DONE(dst_page);
6939 vm_object_unlock(dst_object);
6940
6941 if (dst_top_page != VM_PAGE_NULL) {
6942 vm_object_lock(dst_object);
6943 VM_PAGE_FREE(dst_top_page);
6944 vm_object_paging_end(dst_object);
6945 vm_object_unlock(dst_object);
6946 }
6947
6948 RetrySourceFault:;
6949
6950 if (src_object == VM_OBJECT_NULL) {
6951 /*
6952 * No source object. We will just
6953 * zero-fill the page in dst_object.
6954 */
6955 src_page = VM_PAGE_NULL;
6956 result_page = VM_PAGE_NULL;
6957 } else {
6958 vm_object_lock(src_object);
6959 src_page = vm_page_lookup(src_object,
6960 vm_object_trunc_page(src_offset));
6961 if (src_page == dst_page) {
6962 src_prot = dst_prot;
6963 result_page = VM_PAGE_NULL;
6964 } else {
6965 src_prot = VM_PROT_READ;
6966 vm_object_paging_begin(src_object);
6967
6968 /* cap cluster size at maximum UPL size */
6969 if (os_convert_overflow(amount_left, &cluster_size)) {
6970 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
6971 }
6972 fault_info_src.cluster_size = cluster_size;
6973
6974 result_page = VM_PAGE_NULL;
6975 result = vm_fault_page(
6976 src_object,
6977 vm_object_trunc_page(src_offset),
6978 VM_PROT_READ, FALSE,
6979 FALSE, /* page not looked up */
6980 &src_prot,
6981 &result_page, &src_top_page,
6982 (int *)0, &error, FALSE,
6983 FALSE, &fault_info_src);
6984
6985 switch (result) {
6986 case VM_FAULT_SUCCESS:
6987 break;
6988 case VM_FAULT_RETRY:
6989 goto RetrySourceFault;
6990 case VM_FAULT_MEMORY_SHORTAGE:
6991 if (vm_page_wait(interruptible)) {
6992 goto RetrySourceFault;
6993 }
6994 OS_FALLTHROUGH;
6995 case VM_FAULT_INTERRUPTED:
6996 vm_fault_copy_dst_cleanup(dst_page);
6997 RETURN(MACH_SEND_INTERRUPTED);
6998 case VM_FAULT_SUCCESS_NO_VM_PAGE:
6999 /* success but no VM page: fail */
7000 vm_object_paging_end(src_object);
7001 vm_object_unlock(src_object);
7002 OS_FALLTHROUGH;
7003 case VM_FAULT_MEMORY_ERROR:
7004 vm_fault_copy_dst_cleanup(dst_page);
7005 if (error) {
7006 return error;
7007 } else {
7008 return KERN_MEMORY_ERROR;
7009 }
7010 default:
7011 panic("vm_fault_copy(2): unexpected "
7012 "error 0x%x from "
7013 "vm_fault_page()\n", result);
7014 }
7015
7016 result_page_object = VM_PAGE_OBJECT(result_page);
7017 assert((src_top_page == VM_PAGE_NULL) ==
7018 (result_page_object == src_object));
7019 }
7020 assert((src_prot & VM_PROT_READ) != VM_PROT_NONE);
7021 vm_object_unlock(result_page_object);
7022 }
7023
7024 vm_map_lock_read(dst_map);
7025
7026 if (!vm_map_verify(dst_map, dst_version)) {
7027 vm_map_unlock_read(dst_map);
7028 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7029 vm_fault_copy_cleanup(result_page, src_top_page);
7030 }
7031 vm_fault_copy_dst_cleanup(dst_page);
7032 break;
7033 }
7034 assert(dst_object == VM_PAGE_OBJECT(dst_page));
7035
7036 vm_object_lock(dst_object);
7037
7038 if (dst_object->copy != old_copy_object) {
7039 vm_object_unlock(dst_object);
7040 vm_map_unlock_read(dst_map);
7041 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7042 vm_fault_copy_cleanup(result_page, src_top_page);
7043 }
7044 vm_fault_copy_dst_cleanup(dst_page);
7045 break;
7046 }
7047 vm_object_unlock(dst_object);
7048
7049 /*
7050 * Copy the page, and note that it is dirty
7051 * immediately.
7052 */
7053
7054 if (!page_aligned(src_offset) ||
7055 !page_aligned(dst_offset) ||
7056 !page_aligned(amount_left)) {
7057 vm_object_offset_t src_po,
7058 dst_po;
7059
7060 src_po = src_offset - vm_object_trunc_page(src_offset);
7061 dst_po = dst_offset - vm_object_trunc_page(dst_offset);
7062
7063 if (dst_po > src_po) {
7064 part_size = PAGE_SIZE - dst_po;
7065 } else {
7066 part_size = PAGE_SIZE - src_po;
7067 }
7068 if (part_size > (amount_left)) {
7069 part_size = amount_left;
7070 }
7071
7072 if (result_page == VM_PAGE_NULL) {
7073 assert((vm_offset_t) dst_po == dst_po);
7074 assert((vm_size_t) part_size == part_size);
7075 vm_page_part_zero_fill(dst_page,
7076 (vm_offset_t) dst_po,
7077 (vm_size_t) part_size);
7078 } else {
7079 assert((vm_offset_t) src_po == src_po);
7080 assert((vm_offset_t) dst_po == dst_po);
7081 assert((vm_size_t) part_size == part_size);
7082 vm_page_part_copy(result_page,
7083 (vm_offset_t) src_po,
7084 dst_page,
7085 (vm_offset_t) dst_po,
7086 (vm_size_t)part_size);
7087 if (!dst_page->vmp_dirty) {
7088 vm_object_lock(dst_object);
7089 SET_PAGE_DIRTY(dst_page, TRUE);
7090 vm_object_unlock(dst_object);
7091 }
7092 }
7093 } else {
7094 part_size = PAGE_SIZE;
7095
7096 if (result_page == VM_PAGE_NULL) {
7097 vm_page_zero_fill(dst_page);
7098 } else {
7099 vm_object_lock(result_page_object);
7100 vm_page_copy(result_page, dst_page);
7101 vm_object_unlock(result_page_object);
7102
7103 if (!dst_page->vmp_dirty) {
7104 vm_object_lock(dst_object);
7105 SET_PAGE_DIRTY(dst_page, TRUE);
7106 vm_object_unlock(dst_object);
7107 }
7108 }
7109 }
7110
7111 /*
7112 * Unlock everything, and return
7113 */
7114
7115 vm_map_unlock_read(dst_map);
7116
7117 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7118 vm_fault_copy_cleanup(result_page, src_top_page);
7119 }
7120 vm_fault_copy_dst_cleanup(dst_page);
7121
7122 amount_left -= part_size;
7123 src_offset += part_size;
7124 dst_offset += part_size;
7125 } while (amount_left > 0);
7126
7127 RETURN(KERN_SUCCESS);
7128 #undef RETURN
7129
7130 /*NOTREACHED*/
7131 }
7132
7133 #if VM_FAULT_CLASSIFY
7134 /*
7135 * Temporary statistics gathering support.
7136 */
7137
7138 /*
7139 * Statistics arrays:
7140 */
7141 #define VM_FAULT_TYPES_MAX 5
7142 #define VM_FAULT_LEVEL_MAX 8
7143
7144 int vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX];
7145
7146 #define VM_FAULT_TYPE_ZERO_FILL 0
7147 #define VM_FAULT_TYPE_MAP_IN 1
7148 #define VM_FAULT_TYPE_PAGER 2
7149 #define VM_FAULT_TYPE_COPY 3
7150 #define VM_FAULT_TYPE_OTHER 4
7151
7152
7153 void
vm_fault_classify(vm_object_t object,vm_object_offset_t offset,vm_prot_t fault_type)7154 vm_fault_classify(vm_object_t object,
7155 vm_object_offset_t offset,
7156 vm_prot_t fault_type)
7157 {
7158 int type, level = 0;
7159 vm_page_t m;
7160
7161 while (TRUE) {
7162 m = vm_page_lookup(object, offset);
7163 if (m != VM_PAGE_NULL) {
7164 if (m->vmp_busy || m->vmp_error || m->vmp_restart || m->vmp_absent) {
7165 type = VM_FAULT_TYPE_OTHER;
7166 break;
7167 }
7168 if (((fault_type & VM_PROT_WRITE) == 0) ||
7169 ((level == 0) && object->copy == VM_OBJECT_NULL)) {
7170 type = VM_FAULT_TYPE_MAP_IN;
7171 break;
7172 }
7173 type = VM_FAULT_TYPE_COPY;
7174 break;
7175 } else {
7176 if (object->pager_created) {
7177 type = VM_FAULT_TYPE_PAGER;
7178 break;
7179 }
7180 if (object->shadow == VM_OBJECT_NULL) {
7181 type = VM_FAULT_TYPE_ZERO_FILL;
7182 break;
7183 }
7184
7185 offset += object->vo_shadow_offset;
7186 object = object->shadow;
7187 level++;
7188 continue;
7189 }
7190 }
7191
7192 if (level > VM_FAULT_LEVEL_MAX) {
7193 level = VM_FAULT_LEVEL_MAX;
7194 }
7195
7196 vm_fault_stats[type][level] += 1;
7197
7198 return;
7199 }
7200
7201 /* cleanup routine to call from debugger */
7202
7203 void
vm_fault_classify_init(void)7204 vm_fault_classify_init(void)
7205 {
7206 int type, level;
7207
7208 for (type = 0; type < VM_FAULT_TYPES_MAX; type++) {
7209 for (level = 0; level < VM_FAULT_LEVEL_MAX; level++) {
7210 vm_fault_stats[type][level] = 0;
7211 }
7212 }
7213
7214 return;
7215 }
7216 #endif /* VM_FAULT_CLASSIFY */
7217
7218 vm_offset_t
kdp_lightweight_fault(vm_map_t map,vm_offset_t cur_target_addr)7219 kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr)
7220 {
7221 vm_map_entry_t entry;
7222 vm_object_t object;
7223 vm_offset_t object_offset;
7224 vm_page_t m;
7225 int compressor_external_state, compressed_count_delta;
7226 int compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP);
7227 int my_fault_type = VM_PROT_READ;
7228 kern_return_t kr;
7229 int effective_page_mask, effective_page_size;
7230
7231 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
7232 effective_page_mask = VM_MAP_PAGE_MASK(map);
7233 effective_page_size = VM_MAP_PAGE_SIZE(map);
7234 } else {
7235 effective_page_mask = PAGE_MASK;
7236 effective_page_size = PAGE_SIZE;
7237 }
7238
7239 if (not_in_kdp) {
7240 panic("kdp_lightweight_fault called from outside of debugger context");
7241 }
7242
7243 assert(map != VM_MAP_NULL);
7244
7245 assert((cur_target_addr & effective_page_mask) == 0);
7246 if ((cur_target_addr & effective_page_mask) != 0) {
7247 return 0;
7248 }
7249
7250 if (kdp_lck_rw_lock_is_acquired_exclusive(&map->lock)) {
7251 return 0;
7252 }
7253
7254 if (!vm_map_lookup_entry(map, cur_target_addr, &entry)) {
7255 return 0;
7256 }
7257
7258 if (entry->is_sub_map) {
7259 return 0;
7260 }
7261
7262 object = VME_OBJECT(entry);
7263 if (object == VM_OBJECT_NULL) {
7264 return 0;
7265 }
7266
7267 object_offset = cur_target_addr - entry->vme_start + VME_OFFSET(entry);
7268
7269 while (TRUE) {
7270 if (kdp_lck_rw_lock_is_acquired_exclusive(&object->Lock)) {
7271 return 0;
7272 }
7273
7274 if (object->pager_created && (object->paging_in_progress ||
7275 object->activity_in_progress)) {
7276 return 0;
7277 }
7278
7279 m = kdp_vm_page_lookup(object, vm_object_trunc_page(object_offset));
7280
7281 if (m != VM_PAGE_NULL) {
7282 if ((object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
7283 return 0;
7284 }
7285
7286 if (m->vmp_laundry || m->vmp_busy || m->vmp_free_when_done || m->vmp_absent || m->vmp_error || m->vmp_cleaning ||
7287 m->vmp_overwriting || m->vmp_restart || m->vmp_unusual) {
7288 return 0;
7289 }
7290
7291 assert(!m->vmp_private);
7292 if (m->vmp_private) {
7293 return 0;
7294 }
7295
7296 assert(!m->vmp_fictitious);
7297 if (m->vmp_fictitious) {
7298 return 0;
7299 }
7300
7301 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
7302 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
7303 return 0;
7304 }
7305
7306 return ptoa(VM_PAGE_GET_PHYS_PAGE(m));
7307 }
7308
7309 compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
7310
7311 if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) {
7312 if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) {
7313 kr = vm_compressor_pager_get(object->pager,
7314 vm_object_trunc_page(object_offset + object->paging_offset),
7315 kdp_compressor_decompressed_page_ppnum, &my_fault_type,
7316 compressor_flags, &compressed_count_delta);
7317 if (kr == KERN_SUCCESS) {
7318 return kdp_compressor_decompressed_page_paddr;
7319 } else {
7320 return 0;
7321 }
7322 }
7323 }
7324
7325 if (object->shadow == VM_OBJECT_NULL) {
7326 return 0;
7327 }
7328
7329 object_offset += object->vo_shadow_offset;
7330 object = object->shadow;
7331 }
7332 }
7333
7334 /*
7335 * vm_page_validate_cs_fast():
7336 * Performs a few quick checks to determine if the page's code signature
7337 * really needs to be fully validated. It could:
7338 * 1. have been modified (i.e. automatically tainted),
7339 * 2. have already been validated,
7340 * 3. have already been found to be tainted,
7341 * 4. no longer have a backing store.
7342 * Returns FALSE if the page needs to be fully validated.
7343 */
7344 static boolean_t
vm_page_validate_cs_fast(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)7345 vm_page_validate_cs_fast(
7346 vm_page_t page,
7347 vm_map_size_t fault_page_size,
7348 vm_map_offset_t fault_phys_offset)
7349 {
7350 vm_object_t object;
7351
7352 object = VM_PAGE_OBJECT(page);
7353 vm_object_lock_assert_held(object);
7354
7355 if (page->vmp_wpmapped &&
7356 !VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
7357 /*
7358 * This page was mapped for "write" access sometime in the
7359 * past and could still be modifiable in the future.
7360 * Consider it tainted.
7361 * [ If the page was already found to be "tainted", no
7362 * need to re-validate. ]
7363 */
7364 vm_object_lock_assert_exclusive(object);
7365 VMP_CS_SET_VALIDATED(page, fault_page_size, fault_phys_offset, TRUE);
7366 VMP_CS_SET_TAINTED(page, fault_page_size, fault_phys_offset, TRUE);
7367 if (cs_debug) {
7368 printf("CODESIGNING: %s: "
7369 "page %p obj %p off 0x%llx "
7370 "was modified\n",
7371 __FUNCTION__,
7372 page, object, page->vmp_offset);
7373 }
7374 vm_cs_validated_dirtied++;
7375 }
7376
7377 if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) ||
7378 VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
7379 return TRUE;
7380 }
7381 vm_object_lock_assert_exclusive(object);
7382
7383 #if CHECK_CS_VALIDATION_BITMAP
7384 kern_return_t kr;
7385
7386 kr = vnode_pager_cs_check_validation_bitmap(
7387 object->pager,
7388 page->vmp_offset + object->paging_offset,
7389 CS_BITMAP_CHECK);
7390 if (kr == KERN_SUCCESS) {
7391 page->vmp_cs_validated = VMP_CS_ALL_TRUE;
7392 page->vmp_cs_tainted = VMP_CS_ALL_FALSE;
7393 vm_cs_bitmap_validated++;
7394 return TRUE;
7395 }
7396 #endif /* CHECK_CS_VALIDATION_BITMAP */
7397
7398 if (!object->alive || object->terminating || object->pager == NULL) {
7399 /*
7400 * The object is terminating and we don't have its pager
7401 * so we can't validate the data...
7402 */
7403 return TRUE;
7404 }
7405
7406 /* we need to really validate this page */
7407 vm_object_lock_assert_exclusive(object);
7408 return FALSE;
7409 }
7410
7411 void
vm_page_validate_cs_mapped_slow(vm_page_t page,const void * kaddr)7412 vm_page_validate_cs_mapped_slow(
7413 vm_page_t page,
7414 const void *kaddr)
7415 {
7416 vm_object_t object;
7417 memory_object_offset_t mo_offset;
7418 memory_object_t pager;
7419 struct vnode *vnode;
7420 int validated, tainted, nx;
7421
7422 assert(page->vmp_busy);
7423 object = VM_PAGE_OBJECT(page);
7424 vm_object_lock_assert_exclusive(object);
7425
7426 vm_cs_validates++;
7427
7428 /*
7429 * Since we get here to validate a page that was brought in by
7430 * the pager, we know that this pager is all setup and ready
7431 * by now.
7432 */
7433 assert(object->code_signed);
7434 assert(!object->internal);
7435 assert(object->pager != NULL);
7436 assert(object->pager_ready);
7437
7438 pager = object->pager;
7439 assert(object->paging_in_progress);
7440 vnode = vnode_pager_lookup_vnode(pager);
7441 mo_offset = page->vmp_offset + object->paging_offset;
7442
7443 /* verify the SHA1 hash for this page */
7444 validated = 0;
7445 tainted = 0;
7446 nx = 0;
7447 cs_validate_page(vnode,
7448 pager,
7449 mo_offset,
7450 (const void *)((const char *)kaddr),
7451 &validated,
7452 &tainted,
7453 &nx);
7454
7455 page->vmp_cs_validated |= validated;
7456 page->vmp_cs_tainted |= tainted;
7457 page->vmp_cs_nx |= nx;
7458
7459 #if CHECK_CS_VALIDATION_BITMAP
7460 if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
7461 page->vmp_cs_tainted == VMP_CS_ALL_FALSE) {
7462 vnode_pager_cs_check_validation_bitmap(object->pager,
7463 mo_offset,
7464 CS_BITMAP_SET);
7465 }
7466 #endif /* CHECK_CS_VALIDATION_BITMAP */
7467 }
7468
7469 void
vm_page_validate_cs_mapped(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,const void * kaddr)7470 vm_page_validate_cs_mapped(
7471 vm_page_t page,
7472 vm_map_size_t fault_page_size,
7473 vm_map_offset_t fault_phys_offset,
7474 const void *kaddr)
7475 {
7476 if (!vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
7477 vm_page_validate_cs_mapped_slow(page, kaddr);
7478 }
7479 }
7480
7481 static void
vm_page_map_and_validate_cs(vm_object_t object,vm_page_t page)7482 vm_page_map_and_validate_cs(
7483 vm_object_t object,
7484 vm_page_t page)
7485 {
7486 vm_object_offset_t offset;
7487 vm_map_offset_t koffset;
7488 vm_map_size_t ksize;
7489 vm_offset_t kaddr;
7490 kern_return_t kr;
7491 boolean_t busy_page;
7492 boolean_t need_unmap;
7493
7494 vm_object_lock_assert_exclusive(object);
7495
7496 assert(object->code_signed);
7497 offset = page->vmp_offset;
7498
7499 busy_page = page->vmp_busy;
7500 if (!busy_page) {
7501 /* keep page busy while we map (and unlock) the VM object */
7502 page->vmp_busy = TRUE;
7503 }
7504
7505 /*
7506 * Take a paging reference on the VM object
7507 * to protect it from collapse or bypass,
7508 * and keep it from disappearing too.
7509 */
7510 vm_object_paging_begin(object);
7511
7512 /* map the page in the kernel address space */
7513 ksize = PAGE_SIZE_64;
7514 koffset = 0;
7515 need_unmap = FALSE;
7516 kr = vm_paging_map_object(page,
7517 object,
7518 offset,
7519 VM_PROT_READ,
7520 FALSE, /* can't unlock object ! */
7521 &ksize,
7522 &koffset,
7523 &need_unmap);
7524 if (kr != KERN_SUCCESS) {
7525 panic("%s: could not map page: 0x%x", __FUNCTION__, kr);
7526 }
7527 kaddr = CAST_DOWN(vm_offset_t, koffset);
7528
7529 /* validate the mapped page */
7530 vm_page_validate_cs_mapped_slow(page, (const void *) kaddr);
7531
7532 assert(page->vmp_busy);
7533 assert(object == VM_PAGE_OBJECT(page));
7534 vm_object_lock_assert_exclusive(object);
7535
7536 if (!busy_page) {
7537 PAGE_WAKEUP_DONE(page);
7538 }
7539 if (need_unmap) {
7540 /* unmap the map from the kernel address space */
7541 vm_paging_unmap_object(object, koffset, koffset + ksize);
7542 koffset = 0;
7543 ksize = 0;
7544 kaddr = 0;
7545 }
7546 vm_object_paging_end(object);
7547 }
7548
7549 void
vm_page_validate_cs(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)7550 vm_page_validate_cs(
7551 vm_page_t page,
7552 vm_map_size_t fault_page_size,
7553 vm_map_offset_t fault_phys_offset)
7554 {
7555 vm_object_t object;
7556
7557 object = VM_PAGE_OBJECT(page);
7558 vm_object_lock_assert_held(object);
7559
7560 if (vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
7561 return;
7562 }
7563 vm_page_map_and_validate_cs(object, page);
7564 }
7565
7566 void
vm_page_validate_cs_mapped_chunk(vm_page_t page,const void * kaddr,vm_offset_t chunk_offset,vm_size_t chunk_size,boolean_t * validated_p,unsigned * tainted_p)7567 vm_page_validate_cs_mapped_chunk(
7568 vm_page_t page,
7569 const void *kaddr,
7570 vm_offset_t chunk_offset,
7571 vm_size_t chunk_size,
7572 boolean_t *validated_p,
7573 unsigned *tainted_p)
7574 {
7575 vm_object_t object;
7576 vm_object_offset_t offset, offset_in_page;
7577 memory_object_t pager;
7578 struct vnode *vnode;
7579 boolean_t validated;
7580 unsigned tainted;
7581
7582 *validated_p = FALSE;
7583 *tainted_p = 0;
7584
7585 assert(page->vmp_busy);
7586 object = VM_PAGE_OBJECT(page);
7587 vm_object_lock_assert_exclusive(object);
7588
7589 assert(object->code_signed);
7590 offset = page->vmp_offset;
7591
7592 if (!object->alive || object->terminating || object->pager == NULL) {
7593 /*
7594 * The object is terminating and we don't have its pager
7595 * so we can't validate the data...
7596 */
7597 return;
7598 }
7599 /*
7600 * Since we get here to validate a page that was brought in by
7601 * the pager, we know that this pager is all setup and ready
7602 * by now.
7603 */
7604 assert(!object->internal);
7605 assert(object->pager != NULL);
7606 assert(object->pager_ready);
7607
7608 pager = object->pager;
7609 assert(object->paging_in_progress);
7610 vnode = vnode_pager_lookup_vnode(pager);
7611
7612 /* verify the signature for this chunk */
7613 offset_in_page = chunk_offset;
7614 assert(offset_in_page < PAGE_SIZE);
7615
7616 tainted = 0;
7617 validated = cs_validate_range(vnode,
7618 pager,
7619 (object->paging_offset +
7620 offset +
7621 offset_in_page),
7622 (const void *)((const char *)kaddr
7623 + offset_in_page),
7624 chunk_size,
7625 &tainted);
7626 if (validated) {
7627 *validated_p = TRUE;
7628 }
7629 if (tainted) {
7630 *tainted_p = tainted;
7631 }
7632 }
7633
7634 static void
vm_rtfrecord_lock(void)7635 vm_rtfrecord_lock(void)
7636 {
7637 lck_spin_lock(&vm_rtfr_slock);
7638 }
7639
7640 static void
vm_rtfrecord_unlock(void)7641 vm_rtfrecord_unlock(void)
7642 {
7643 lck_spin_unlock(&vm_rtfr_slock);
7644 }
7645
7646 unsigned int
vmrtfaultinfo_bufsz(void)7647 vmrtfaultinfo_bufsz(void)
7648 {
7649 return vmrtf_num_records * sizeof(vm_rtfault_record_t);
7650 }
7651
7652 #include <kern/backtrace.h>
7653
7654 __attribute__((noinline))
7655 static void
vm_record_rtfault(thread_t cthread,uint64_t fstart,vm_map_offset_t fault_vaddr,int type_of_fault)7656 vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr, int type_of_fault)
7657 {
7658 uint64_t fend = mach_continuous_time();
7659
7660 uint64_t cfpc = 0;
7661 uint64_t ctid = cthread->thread_id;
7662 uint64_t cupid = get_current_unique_pid();
7663
7664 uintptr_t bpc = 0;
7665 errno_t btr = 0;
7666
7667 /*
7668 * Capture a single-frame backtrace. This extracts just the program
7669 * counter at the point of the fault, and should not use copyin to get
7670 * Rosetta save state.
7671 */
7672 struct backtrace_control ctl = {
7673 .btc_user_thread = cthread,
7674 .btc_user_copy = backtrace_user_copy_error,
7675 };
7676 unsigned int bfrs = backtrace_user(&bpc, 1U, &ctl, NULL);
7677 if ((btr == 0) && (bfrs > 0)) {
7678 cfpc = bpc;
7679 }
7680
7681 assert((fstart != 0) && fend >= fstart);
7682 vm_rtfrecord_lock();
7683 assert(vmrtfrs.vmrtfr_curi <= vmrtfrs.vmrtfr_maxi);
7684
7685 vmrtfrs.vmrtf_total++;
7686 vm_rtfault_record_t *cvmr = &vmrtfrs.vm_rtf_records[vmrtfrs.vmrtfr_curi++];
7687
7688 cvmr->rtfabstime = fstart;
7689 cvmr->rtfduration = fend - fstart;
7690 cvmr->rtfaddr = fault_vaddr;
7691 cvmr->rtfpc = cfpc;
7692 cvmr->rtftype = type_of_fault;
7693 cvmr->rtfupid = cupid;
7694 cvmr->rtftid = ctid;
7695
7696 if (vmrtfrs.vmrtfr_curi > vmrtfrs.vmrtfr_maxi) {
7697 vmrtfrs.vmrtfr_curi = 0;
7698 }
7699
7700 vm_rtfrecord_unlock();
7701 }
7702
7703 int
vmrtf_extract(uint64_t cupid,__unused boolean_t isroot,unsigned long vrecordsz,void * vrecords,unsigned long * vmrtfrv)7704 vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, unsigned long vrecordsz, void *vrecords, unsigned long *vmrtfrv)
7705 {
7706 vm_rtfault_record_t *cvmrd = vrecords;
7707 size_t residue = vrecordsz;
7708 size_t numextracted = 0;
7709 boolean_t early_exit = FALSE;
7710
7711 vm_rtfrecord_lock();
7712
7713 for (int vmfi = 0; vmfi <= vmrtfrs.vmrtfr_maxi; vmfi++) {
7714 if (residue < sizeof(vm_rtfault_record_t)) {
7715 early_exit = TRUE;
7716 break;
7717 }
7718
7719 if (vmrtfrs.vm_rtf_records[vmfi].rtfupid != cupid) {
7720 #if DEVELOPMENT || DEBUG
7721 if (isroot == FALSE) {
7722 continue;
7723 }
7724 #else
7725 continue;
7726 #endif /* DEVDEBUG */
7727 }
7728
7729 *cvmrd = vmrtfrs.vm_rtf_records[vmfi];
7730 cvmrd++;
7731 residue -= sizeof(vm_rtfault_record_t);
7732 numextracted++;
7733 }
7734
7735 vm_rtfrecord_unlock();
7736
7737 *vmrtfrv = numextracted;
7738 return early_exit;
7739 }
7740
7741 /*
7742 * Only allow one diagnosis to be in flight at a time, to avoid
7743 * creating too much additional memory usage.
7744 */
7745 static volatile uint_t vmtc_diagnosing;
7746 unsigned int vmtc_total = 0;
7747
7748 /*
7749 * Type used to update telemetry for the diagnosis counts.
7750 */
7751 CA_EVENT(vmtc_telemetry,
7752 CA_INT, vmtc_num_byte, /* number of corrupt bytes found */
7753 CA_BOOL, vmtc_undiagnosed, /* undiagnosed because more than 1 at a time */
7754 CA_BOOL, vmtc_not_eligible, /* the page didn't qualify */
7755 CA_BOOL, vmtc_copyin_fail, /* unable to copy in the page */
7756 CA_BOOL, vmtc_not_found, /* no corruption found even though CS failed */
7757 CA_BOOL, vmtc_one_bit_flip, /* single bit flip */
7758 CA_BOOL, vmtc_testing); /* caused on purpose by testing */
7759
7760 #if DEVELOPMENT || DEBUG
7761 /*
7762 * Buffers used to compare before/after page contents.
7763 * Stashed to aid when debugging crashes.
7764 */
7765 static size_t vmtc_last_buffer_size = 0;
7766 static uint64_t *vmtc_last_before_buffer = NULL;
7767 static uint64_t *vmtc_last_after_buffer = NULL;
7768
7769 /*
7770 * Needed to record corruptions due to testing.
7771 */
7772 static uintptr_t corruption_test_va = 0;
7773 #endif /* DEVELOPMENT || DEBUG */
7774
7775 /*
7776 * Stash a copy of data from a possibly corrupt page.
7777 */
7778 static uint64_t *
vmtc_get_page_data(vm_map_offset_t code_addr,vm_page_t page)7779 vmtc_get_page_data(
7780 vm_map_offset_t code_addr,
7781 vm_page_t page)
7782 {
7783 uint64_t *buffer = NULL;
7784 addr64_t buffer_paddr;
7785 addr64_t page_paddr;
7786 extern void bcopy_phys(addr64_t from, addr64_t to, vm_size_t bytes);
7787 uint_t size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
7788
7789 /*
7790 * Need an aligned buffer to do a physical copy.
7791 */
7792 if (kmem_alloc_aligned(kernel_map, (vm_offset_t *)&buffer, size, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
7793 return NULL;
7794 }
7795 buffer_paddr = kvtophys((vm_offset_t)buffer);
7796 page_paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(page));
7797
7798 /* adjust the page start address if we need only 4K of a 16K page */
7799 if (size < PAGE_SIZE) {
7800 uint_t subpage_start = ((code_addr & (PAGE_SIZE - 1)) & ~(size - 1));
7801 page_paddr += subpage_start;
7802 }
7803
7804 bcopy_phys(page_paddr, buffer_paddr, size);
7805 return buffer;
7806 }
7807
7808 /*
7809 * Set things up so we can diagnose a potential text page corruption.
7810 */
7811 static uint64_t *
vmtc_text_page_diagnose_setup(vm_map_offset_t code_addr,vm_page_t page,CA_EVENT_TYPE (vmtc_telemetry)* event)7812 vmtc_text_page_diagnose_setup(
7813 vm_map_offset_t code_addr,
7814 vm_page_t page,
7815 CA_EVENT_TYPE(vmtc_telemetry) *event)
7816 {
7817 uint64_t *buffer = NULL;
7818
7819 /*
7820 * If another is being diagnosed, skip this one.
7821 */
7822 if (!OSCompareAndSwap(0, 1, &vmtc_diagnosing)) {
7823 event->vmtc_undiagnosed = true;
7824 return NULL;
7825 }
7826
7827 /*
7828 * Get the contents of the corrupt page.
7829 */
7830 buffer = vmtc_get_page_data(code_addr, page);
7831 if (buffer == NULL) {
7832 event->vmtc_copyin_fail = true;
7833 if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
7834 panic("Bad compare and swap in setup!");
7835 }
7836 return NULL;
7837 }
7838 return buffer;
7839 }
7840
7841 /*
7842 * Diagnose the text page by comparing its contents with
7843 * the one we've previously saved.
7844 */
7845 static void
vmtc_text_page_diagnose(vm_map_offset_t code_addr,uint64_t * old_code_buffer,CA_EVENT_TYPE (vmtc_telemetry)* event)7846 vmtc_text_page_diagnose(
7847 vm_map_offset_t code_addr,
7848 uint64_t *old_code_buffer,
7849 CA_EVENT_TYPE(vmtc_telemetry) *event)
7850 {
7851 uint64_t *new_code_buffer;
7852 size_t size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
7853 uint_t count = (uint_t)size / sizeof(uint64_t);
7854 uint_t diff_count = 0;
7855 bool bit_flip = false;
7856 uint_t b;
7857 uint64_t *new;
7858 uint64_t *old;
7859
7860 new_code_buffer = kalloc_data(size, Z_WAITOK);
7861 assert(new_code_buffer != NULL);
7862 if (copyin((user_addr_t)vm_map_trunc_page(code_addr, size - 1), new_code_buffer, size) != 0) {
7863 /* copyin error, so undo things */
7864 event->vmtc_copyin_fail = true;
7865 goto done;
7866 }
7867
7868 new = new_code_buffer;
7869 old = old_code_buffer;
7870 for (; count-- > 0; ++new, ++old) {
7871 if (*new == *old) {
7872 continue;
7873 }
7874
7875 /*
7876 * On first diff, check for a single bit flip
7877 */
7878 if (diff_count == 0) {
7879 uint64_t x = (*new ^ *old);
7880 assert(x != 0);
7881 if ((x & (x - 1)) == 0) {
7882 bit_flip = true;
7883 ++diff_count;
7884 continue;
7885 }
7886 }
7887
7888 /*
7889 * count up the number of different bytes.
7890 */
7891 for (b = 0; b < sizeof(uint64_t); ++b) {
7892 char *n = (char *)new;
7893 char *o = (char *)old;
7894 if (n[b] != o[b]) {
7895 ++diff_count;
7896 }
7897 }
7898 }
7899
7900 if (diff_count > 1) {
7901 bit_flip = false;
7902 }
7903
7904 if (diff_count == 0) {
7905 event->vmtc_not_found = true;
7906 } else {
7907 event->vmtc_num_byte = diff_count;
7908 }
7909 if (bit_flip) {
7910 event->vmtc_one_bit_flip = true;
7911 }
7912
7913 done:
7914 /*
7915 * Free up the code copy buffers, but save the last
7916 * set on development / debug kernels in case they
7917 * can provide evidence for debugging memory stomps.
7918 */
7919 #if DEVELOPMENT || DEBUG
7920 if (vmtc_last_before_buffer != NULL) {
7921 kmem_free(kernel_map, (vm_offset_t)vmtc_last_before_buffer, vmtc_last_buffer_size);
7922 }
7923 if (vmtc_last_after_buffer != NULL) {
7924 kfree_data(vmtc_last_after_buffer, vmtc_last_buffer_size);
7925 }
7926 vmtc_last_before_buffer = old_code_buffer;
7927 vmtc_last_after_buffer = new_code_buffer;
7928 vmtc_last_buffer_size = size;
7929 #else /* DEVELOPMENT || DEBUG */
7930 kfree_data(new_code_buffer, size);
7931 kmem_free(kernel_map, (vm_offset_t)old_code_buffer, size);
7932 #endif /* DEVELOPMENT || DEBUG */
7933
7934 /*
7935 * We're finished, so clear the diagnosing flag.
7936 */
7937 if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
7938 panic("Bad compare and swap in diagnose!");
7939 }
7940 }
7941
7942 /*
7943 * For the given map, virt address, find the object, offset, and page.
7944 * This has to lookup the map entry, verify protections, walk any shadow chains.
7945 * If found, returns with the object locked.
7946 */
7947 static kern_return_t
vmtc_revalidate_lookup(vm_map_t map,vm_map_offset_t vaddr,vm_object_t * ret_object,vm_object_offset_t * ret_offset,vm_page_t * ret_page)7948 vmtc_revalidate_lookup(
7949 vm_map_t map,
7950 vm_map_offset_t vaddr,
7951 vm_object_t *ret_object,
7952 vm_object_offset_t *ret_offset,
7953 vm_page_t *ret_page)
7954 {
7955 vm_object_t object;
7956 vm_object_offset_t offset;
7957 vm_page_t page;
7958 kern_return_t kr = KERN_SUCCESS;
7959 uint8_t object_lock_type = OBJECT_LOCK_EXCLUSIVE;
7960 vm_map_version_t version;
7961 boolean_t wired;
7962 struct vm_object_fault_info fault_info = {};
7963 vm_map_t real_map = NULL;
7964 vm_prot_t prot;
7965 vm_object_t shadow;
7966
7967 /*
7968 * Find the object/offset for the given location/map.
7969 * Note this returns with the object locked.
7970 */
7971 restart:
7972 vm_map_lock_read(map);
7973 object = VM_OBJECT_NULL; /* in case we come around the restart path */
7974 kr = vm_map_lookup_locked(&map, vaddr, VM_PROT_READ,
7975 object_lock_type, &version, &object, &offset, &prot, &wired,
7976 &fault_info, &real_map, NULL);
7977 vm_map_unlock_read(map);
7978 if (real_map != NULL && real_map != map) {
7979 vm_map_unlock(real_map);
7980 }
7981
7982 /*
7983 * If there's no mapping here, or if we fail because the page
7984 * wasn't mapped executable, we can ignore this.
7985 */
7986 if (kr != KERN_SUCCESS ||
7987 object == NULL ||
7988 !(prot & VM_PROT_EXECUTE)) {
7989 kr = KERN_FAILURE;
7990 goto done;
7991 }
7992
7993 /*
7994 * Chase down any shadow chains to find the actual page.
7995 */
7996 for (;;) {
7997 /*
7998 * See if the page is on the current object.
7999 */
8000 page = vm_page_lookup(object, vm_object_trunc_page(offset));
8001 if (page != NULL) {
8002 /* restart the lookup */
8003 if (page->vmp_restart) {
8004 vm_object_unlock(object);
8005 goto restart;
8006 }
8007
8008 /*
8009 * If this page is busy, we need to wait for it.
8010 */
8011 if (page->vmp_busy) {
8012 PAGE_SLEEP(object, page, TRUE);
8013 vm_object_unlock(object);
8014 goto restart;
8015 }
8016 break;
8017 }
8018
8019 /*
8020 * If the object doesn't have the page and
8021 * has no shadow, then we can quit.
8022 */
8023 shadow = object->shadow;
8024 if (shadow == NULL) {
8025 kr = KERN_FAILURE;
8026 goto done;
8027 }
8028
8029 /*
8030 * Move to the next object
8031 */
8032 offset += object->vo_shadow_offset;
8033 vm_object_lock(shadow);
8034 vm_object_unlock(object);
8035 object = shadow;
8036 shadow = VM_OBJECT_NULL;
8037 }
8038 *ret_object = object;
8039 *ret_offset = vm_object_trunc_page(offset);
8040 *ret_page = page;
8041
8042 done:
8043 if (kr != KERN_SUCCESS && object != NULL) {
8044 vm_object_unlock(object);
8045 }
8046 return kr;
8047 }
8048
8049 /*
8050 * Check if a page is wired, needs extra locking.
8051 */
8052 static bool
is_page_wired(vm_page_t page)8053 is_page_wired(vm_page_t page)
8054 {
8055 bool result;
8056 vm_page_lock_queues();
8057 result = VM_PAGE_WIRED(page);
8058 vm_page_unlock_queues();
8059 return result;
8060 }
8061
8062 /*
8063 * A fatal process error has occurred in the given task.
8064 * Recheck the code signing of the text page at the given
8065 * address to check for a text page corruption.
8066 *
8067 * Returns KERN_FAILURE if a page was found to be corrupt
8068 * by failing to match its code signature. KERN_SUCCESS
8069 * means the page is either valid or we don't have the
8070 * information to say it's corrupt.
8071 */
8072 kern_return_t
revalidate_text_page(task_t task,vm_map_offset_t code_addr)8073 revalidate_text_page(task_t task, vm_map_offset_t code_addr)
8074 {
8075 kern_return_t kr;
8076 vm_map_t map;
8077 vm_object_t object = NULL;
8078 vm_object_offset_t offset;
8079 vm_page_t page = NULL;
8080 struct vnode *vnode;
8081 uint64_t *diagnose_buffer = NULL;
8082 CA_EVENT_TYPE(vmtc_telemetry) * event = NULL;
8083 ca_event_t ca_event = NULL;
8084
8085 map = task->map;
8086 if (task->map == NULL) {
8087 return KERN_SUCCESS;
8088 }
8089
8090 kr = vmtc_revalidate_lookup(map, code_addr, &object, &offset, &page);
8091 if (kr != KERN_SUCCESS) {
8092 goto done;
8093 }
8094
8095 /*
8096 * The object needs to have a pager.
8097 */
8098 if (object->pager == NULL) {
8099 goto done;
8100 }
8101
8102 /*
8103 * Needs to be a vnode backed page to have a signature.
8104 */
8105 vnode = vnode_pager_lookup_vnode(object->pager);
8106 if (vnode == NULL) {
8107 goto done;
8108 }
8109
8110 /*
8111 * Object checks to see if we should proceed.
8112 */
8113 if (!object->code_signed || /* no code signature to check */
8114 object->internal || /* internal objects aren't signed */
8115 object->terminating || /* the object and its pages are already going away */
8116 !object->pager_ready) { /* this should happen, but check shouldn't hurt */
8117 goto done;
8118 }
8119
8120 /*
8121 * Check the code signature of the page in question.
8122 */
8123 vm_page_map_and_validate_cs(object, page);
8124
8125 /*
8126 * At this point:
8127 * vmp_cs_validated |= validated (set if a code signature exists)
8128 * vmp_cs_tainted |= tainted (set if code signature violation)
8129 * vmp_cs_nx |= nx; ??
8130 *
8131 * if vmp_pmapped then have to pmap_disconnect..
8132 * other flags to check on object or page?
8133 */
8134 if (page->vmp_cs_tainted != VMP_CS_ALL_FALSE) {
8135 #if DEBUG || DEVELOPMENT
8136 /*
8137 * On development builds, a boot-arg can be used to cause
8138 * a panic, instead of a quiet repair.
8139 */
8140 if (vmtc_panic_instead) {
8141 panic("Text page corruption detected: vm_page_t 0x%llx", (long long)(uintptr_t)page);
8142 }
8143 #endif /* DEBUG || DEVELOPMENT */
8144
8145 /*
8146 * We're going to invalidate this page. Grab a copy of it for comparison.
8147 */
8148 ca_event = CA_EVENT_ALLOCATE(vmtc_telemetry);
8149 event = ca_event->data;
8150 diagnose_buffer = vmtc_text_page_diagnose_setup(code_addr, page, event);
8151
8152 /*
8153 * Invalidate, i.e. toss, the corrupted page.
8154 */
8155 if (!page->vmp_cleaning &&
8156 !page->vmp_laundry &&
8157 !page->vmp_fictitious &&
8158 !page->vmp_precious &&
8159 !page->vmp_absent &&
8160 !page->vmp_error &&
8161 !page->vmp_dirty &&
8162 !is_page_wired(page)) {
8163 if (page->vmp_pmapped) {
8164 int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(page));
8165 if (refmod & VM_MEM_MODIFIED) {
8166 SET_PAGE_DIRTY(page, FALSE);
8167 }
8168 if (refmod & VM_MEM_REFERENCED) {
8169 page->vmp_reference = TRUE;
8170 }
8171 }
8172 /* If the page seems intentionally modified, don't trash it. */
8173 if (!page->vmp_dirty) {
8174 VM_PAGE_FREE(page);
8175 } else {
8176 event->vmtc_not_eligible = true;
8177 }
8178 } else {
8179 event->vmtc_not_eligible = true;
8180 }
8181 vm_object_unlock(object);
8182 object = VM_OBJECT_NULL;
8183
8184 /*
8185 * Now try to diagnose the type of failure by faulting
8186 * in a new copy and diff'ing it with what we saved.
8187 */
8188 if (diagnose_buffer != NULL) {
8189 vmtc_text_page_diagnose(code_addr, diagnose_buffer, event);
8190 }
8191 #if DEBUG || DEVELOPMENT
8192 if (corruption_test_va != 0) {
8193 corruption_test_va = 0;
8194 event->vmtc_testing = true;
8195 }
8196 #endif /* DEBUG || DEVELOPMENT */
8197 kernel_triage_record(thread_tid(current_thread()),
8198 KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_TEXT_CORRUPTION),
8199 0 /* arg */);
8200 CA_EVENT_SEND(ca_event);
8201 printf("Text page corruption detected for pid %d\n", proc_selfpid());
8202 ++vmtc_total;
8203 return KERN_FAILURE;
8204 }
8205 done:
8206 if (object != NULL) {
8207 vm_object_unlock(object);
8208 }
8209 return KERN_SUCCESS;
8210 }
8211
8212 #if DEBUG || DEVELOPMENT
8213 /*
8214 * For implementing unit tests - ask the pmap to corrupt a text page.
8215 * We have to find the page, to get the physical address, then invoke
8216 * the pmap.
8217 */
8218 extern kern_return_t vm_corrupt_text_addr(uintptr_t);
8219
8220 kern_return_t
vm_corrupt_text_addr(uintptr_t va)8221 vm_corrupt_text_addr(uintptr_t va)
8222 {
8223 task_t task = current_task();
8224 vm_map_t map;
8225 kern_return_t kr = KERN_SUCCESS;
8226 vm_object_t object = VM_OBJECT_NULL;
8227 vm_object_offset_t offset;
8228 vm_page_t page = NULL;
8229 pmap_paddr_t pa;
8230
8231 map = task->map;
8232 if (task->map == NULL) {
8233 printf("corrupt_text_addr: no map\n");
8234 return KERN_FAILURE;
8235 }
8236
8237 kr = vmtc_revalidate_lookup(map, (vm_map_offset_t)va, &object, &offset, &page);
8238 if (kr != KERN_SUCCESS) {
8239 printf("corrupt_text_addr: page lookup failed\n");
8240 return kr;
8241 }
8242 /* get the physical address to use */
8243 pa = ptoa(VM_PAGE_GET_PHYS_PAGE(page)) + (va - vm_object_trunc_page(va));
8244
8245 /*
8246 * Check we have something we can work with.
8247 * Due to racing with pageout as we enter the sysctl,
8248 * it's theoretically possible to have the page disappear, just
8249 * before the lookup.
8250 *
8251 * That's highly likely to happen often. I've filed a radar 72857482
8252 * to bubble up the error here to the sysctl result and have the
8253 * test not FAIL in that case.
8254 */
8255 if (page->vmp_busy) {
8256 printf("corrupt_text_addr: vmp_busy\n");
8257 kr = KERN_FAILURE;
8258 }
8259 if (page->vmp_cleaning) {
8260 printf("corrupt_text_addr: vmp_cleaning\n");
8261 kr = KERN_FAILURE;
8262 }
8263 if (page->vmp_laundry) {
8264 printf("corrupt_text_addr: vmp_cleaning\n");
8265 kr = KERN_FAILURE;
8266 }
8267 if (page->vmp_fictitious) {
8268 printf("corrupt_text_addr: vmp_fictitious\n");
8269 kr = KERN_FAILURE;
8270 }
8271 if (page->vmp_precious) {
8272 printf("corrupt_text_addr: vmp_precious\n");
8273 kr = KERN_FAILURE;
8274 }
8275 if (page->vmp_absent) {
8276 printf("corrupt_text_addr: vmp_absent\n");
8277 kr = KERN_FAILURE;
8278 }
8279 if (page->vmp_error) {
8280 printf("corrupt_text_addr: vmp_error\n");
8281 kr = KERN_FAILURE;
8282 }
8283 if (page->vmp_dirty) {
8284 printf("corrupt_text_addr: vmp_dirty\n");
8285 kr = KERN_FAILURE;
8286 }
8287 if (is_page_wired(page)) {
8288 printf("corrupt_text_addr: wired\n");
8289 kr = KERN_FAILURE;
8290 }
8291 if (!page->vmp_pmapped) {
8292 printf("corrupt_text_addr: !vmp_pmapped\n");
8293 kr = KERN_FAILURE;
8294 }
8295
8296 if (kr == KERN_SUCCESS) {
8297 printf("corrupt_text_addr: using physaddr 0x%llx\n", (long long)pa);
8298 kr = pmap_test_text_corruption(pa);
8299 if (kr != KERN_SUCCESS) {
8300 printf("corrupt_text_addr: pmap error %d\n", kr);
8301 } else {
8302 corruption_test_va = va;
8303 }
8304 } else {
8305 printf("corrupt_text_addr: object %p\n", object);
8306 printf("corrupt_text_addr: offset 0x%llx\n", (uint64_t)offset);
8307 printf("corrupt_text_addr: va 0x%llx\n", (uint64_t)va);
8308 printf("corrupt_text_addr: vm_object_trunc_page(va) 0x%llx\n", (uint64_t)vm_object_trunc_page(va));
8309 printf("corrupt_text_addr: vm_page_t %p\n", page);
8310 printf("corrupt_text_addr: ptoa(PHYS_PAGE) 0x%llx\n", (uint64_t)ptoa(VM_PAGE_GET_PHYS_PAGE(page)));
8311 printf("corrupt_text_addr: using physaddr 0x%llx\n", (uint64_t)pa);
8312 }
8313
8314 if (object != VM_OBJECT_NULL) {
8315 vm_object_unlock(object);
8316 }
8317 return kr;
8318 }
8319 #endif /* DEBUG || DEVELOPMENT */
8320