1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm_fault.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Page fault handling module.
63 */
64
65 #include <libkern/OSAtomic.h>
66
67 #include <mach/mach_types.h>
68 #include <mach/kern_return.h>
69 #include <mach/message.h> /* for error codes */
70 #include <mach/vm_param.h>
71 #include <mach/vm_behavior.h>
72 #include <mach/memory_object.h>
73 /* For memory_object_data_{request,unlock} */
74 #include <mach/sdt.h>
75
76 #include <kern/kern_types.h>
77 #include <kern/host_statistics.h>
78 #include <kern/counter.h>
79 #include <kern/task.h>
80 #include <kern/thread.h>
81 #include <kern/sched_prim.h>
82 #include <kern/host.h>
83 #include <kern/mach_param.h>
84 #include <kern/macro_help.h>
85 #include <kern/zalloc_internal.h>
86 #include <kern/misc_protos.h>
87 #include <kern/policy_internal.h>
88
89 #include <vm/vm_compressor.h>
90 #include <vm/vm_compressor_pager.h>
91 #include <vm/vm_fault.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_kern.h>
96 #include <vm/pmap.h>
97 #include <vm/vm_pageout.h>
98 #include <vm/vm_protos.h>
99 #include <vm/vm_external.h>
100 #include <vm/memory_object.h>
101 #include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */
102 #include <vm/vm_shared_region.h>
103
104 #include <sys/codesign.h>
105 #include <sys/code_signing.h>
106 #include <sys/reason.h>
107 #include <sys/signalvar.h>
108
109 #include <sys/kdebug_triage.h>
110
111 #include <san/kasan.h>
112 #include <libkern/coreanalytics/coreanalytics.h>
113
114 #define VM_FAULT_CLASSIFY 0
115
116 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
117
118 int vm_protect_privileged_from_untrusted = 1;
119
120 unsigned int vm_object_pagein_throttle = 16;
121
122 /*
123 * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which
124 * kicks in when swap space runs out. 64-bit programs have massive address spaces and can leak enormous amounts
125 * of memory if they're buggy and can run the system completely out of swap space. If this happens, we
126 * impose a hard throttle on them to prevent them from taking the last bit of memory left. This helps
127 * keep the UI active so that the user has a chance to kill the offending task before the system
128 * completely hangs.
129 *
130 * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied
131 * to tasks that appear to be bloated. When swap runs out, any task using more than vm_hard_throttle_threshold
132 * will be throttled. The throttling is done by giving the thread that's trying to demand zero a page a
133 * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
134 */
135
136 extern void throttle_lowpri_io(int);
137
138 extern struct vnode *vnode_pager_lookup_vnode(memory_object_t);
139
140 uint64_t vm_hard_throttle_threshold;
141
142 #if DEBUG || DEVELOPMENT
143 static bool vmtc_panic_instead = false;
144 int panic_object_not_alive = 1;
145 #endif /* DEBUG || DEVELOPMENT */
146
147 OS_ALWAYS_INLINE
148 boolean_t
NEED_TO_HARD_THROTTLE_THIS_TASK(void)149 NEED_TO_HARD_THROTTLE_THIS_TASK(void)
150 {
151 return vm_wants_task_throttled(current_task()) ||
152 ((vm_page_free_count < vm_page_throttle_limit ||
153 HARD_THROTTLE_LIMIT_REACHED()) &&
154 proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED);
155 }
156
157 #define HARD_THROTTLE_DELAY 10000 /* 10000 us == 10 ms */
158 #define SOFT_THROTTLE_DELAY 200 /* 200 us == .2 ms */
159
160 #define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS 6
161 #define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC 20000
162
163
164 #define VM_STAT_DECOMPRESSIONS() \
165 MACRO_BEGIN \
166 counter_inc(&vm_statistics_decompressions); \
167 current_thread()->decompressions++; \
168 MACRO_END
169
170 boolean_t current_thread_aborted(void);
171
172 /* Forward declarations of internal routines. */
173 static kern_return_t vm_fault_wire_fast(
174 vm_map_t map,
175 vm_map_offset_t va,
176 vm_prot_t prot,
177 vm_tag_t wire_tag,
178 vm_map_entry_t entry,
179 pmap_t pmap,
180 vm_map_offset_t pmap_addr,
181 ppnum_t *physpage_p);
182
183 static kern_return_t vm_fault_internal(
184 vm_map_t map,
185 vm_map_offset_t vaddr,
186 vm_prot_t caller_prot,
187 boolean_t change_wiring,
188 vm_tag_t wire_tag,
189 int interruptible,
190 pmap_t pmap,
191 vm_map_offset_t pmap_addr,
192 ppnum_t *physpage_p);
193
194 static void vm_fault_copy_cleanup(
195 vm_page_t page,
196 vm_page_t top_page);
197
198 static void vm_fault_copy_dst_cleanup(
199 vm_page_t page);
200
201 #if VM_FAULT_CLASSIFY
202 extern void vm_fault_classify(vm_object_t object,
203 vm_object_offset_t offset,
204 vm_prot_t fault_type);
205
206 extern void vm_fault_classify_init(void);
207 #endif
208
209 unsigned long vm_pmap_enter_blocked = 0;
210 unsigned long vm_pmap_enter_retried = 0;
211
212 unsigned long vm_cs_validates = 0;
213 unsigned long vm_cs_revalidates = 0;
214 unsigned long vm_cs_query_modified = 0;
215 unsigned long vm_cs_validated_dirtied = 0;
216 unsigned long vm_cs_bitmap_validated = 0;
217
218 #if CODE_SIGNING_MONITOR
219 uint64_t vm_cs_defer_to_csm = 0;
220 uint64_t vm_cs_defer_to_csm_not = 0;
221 #endif /* CODE_SIGNING_MONITOR */
222
223 void vm_pre_fault(vm_map_offset_t, vm_prot_t);
224
225 extern char *kdp_compressor_decompressed_page;
226 extern addr64_t kdp_compressor_decompressed_page_paddr;
227 extern ppnum_t kdp_compressor_decompressed_page_ppnum;
228
229 struct vmrtfr {
230 int vmrtfr_maxi;
231 int vmrtfr_curi;
232 int64_t vmrtf_total;
233 vm_rtfault_record_t *vm_rtf_records;
234 } vmrtfrs;
235 #define VMRTF_DEFAULT_BUFSIZE (4096)
236 #define VMRTF_NUM_RECORDS_DEFAULT (VMRTF_DEFAULT_BUFSIZE / sizeof(vm_rtfault_record_t))
237 TUNABLE(int, vmrtf_num_records, "vm_rtfault_records", VMRTF_NUM_RECORDS_DEFAULT);
238
239 static void vm_rtfrecord_lock(void);
240 static void vm_rtfrecord_unlock(void);
241 static void vm_record_rtfault(thread_t, uint64_t, vm_map_offset_t, int);
242
243 extern lck_grp_t vm_page_lck_grp_bucket;
244 extern lck_attr_t vm_page_lck_attr;
245 LCK_SPIN_DECLARE_ATTR(vm_rtfr_slock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
246
247 #if DEVELOPMENT || DEBUG
248 extern int madvise_free_debug;
249 extern int madvise_free_debug_sometimes;
250 #endif /* DEVELOPMENT || DEBUG */
251
252 extern int vm_pageout_protect_realtime;
253
254 #if CONFIG_FREEZE
255 #endif /* CONFIG_FREEZE */
256
257 /*
258 * Routine: vm_fault_init
259 * Purpose:
260 * Initialize our private data structures.
261 */
262 __startup_func
263 void
vm_fault_init(void)264 vm_fault_init(void)
265 {
266 int i, vm_compressor_temp;
267 boolean_t need_default_val = TRUE;
268 /*
269 * Choose a value for the hard throttle threshold based on the amount of ram. The threshold is
270 * computed as a percentage of available memory, and the percentage used is scaled inversely with
271 * the amount of memory. The percentage runs between 10% and 35%. We use 35% for small memory systems
272 * and reduce the value down to 10% for very large memory configurations. This helps give us a
273 * definition of a memory hog that makes more sense relative to the amount of ram in the machine.
274 * The formula here simply uses the number of gigabytes of ram to adjust the percentage.
275 */
276
277 vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024 * 1024 * 1024)), 25)) / 100;
278
279 /*
280 * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry.
281 */
282
283 if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof(vm_compressor_temp))) {
284 for (i = 0; i < VM_PAGER_MAX_MODES; i++) {
285 if (((vm_compressor_temp & (1 << i)) == vm_compressor_temp)) {
286 need_default_val = FALSE;
287 vm_compressor_mode = vm_compressor_temp;
288 break;
289 }
290 }
291 if (need_default_val) {
292 printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp);
293 }
294 }
295 #if CONFIG_FREEZE
296 if (need_default_val) {
297 if (osenvironment_is_diagnostics()) {
298 printf("osenvironment == \"diagnostics\". Setting \"vm_compressor_mode\" to in-core compressor only\n");
299 vm_compressor_mode = VM_PAGER_COMPRESSOR_NO_SWAP;
300 need_default_val = false;
301 }
302 }
303 #endif /* CONFIG_FREEZE */
304 if (need_default_val) {
305 /* If no boot arg or incorrect boot arg, try device tree. */
306 PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode));
307 }
308 printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode);
309 vm_config_init();
310
311 PE_parse_boot_argn("vm_protect_privileged_from_untrusted",
312 &vm_protect_privileged_from_untrusted,
313 sizeof(vm_protect_privileged_from_untrusted));
314
315 #if DEBUG || DEVELOPMENT
316 (void)PE_parse_boot_argn("text_corruption_panic", &vmtc_panic_instead, sizeof(vmtc_panic_instead));
317
318 if (kern_feature_override(KF_MADVISE_FREE_DEBUG_OVRD)) {
319 madvise_free_debug = 0;
320 madvise_free_debug_sometimes = 0;
321 }
322
323 PE_parse_boot_argn("panic_object_not_alive", &panic_object_not_alive, sizeof(panic_object_not_alive));
324 #endif /* DEBUG || DEVELOPMENT */
325 }
326
327 __startup_func
328 static void
vm_rtfault_record_init(void)329 vm_rtfault_record_init(void)
330 {
331 size_t size;
332
333 vmrtf_num_records = MAX(vmrtf_num_records, 1);
334 size = vmrtf_num_records * sizeof(vm_rtfault_record_t);
335 vmrtfrs.vm_rtf_records = zalloc_permanent_tag(size,
336 ZALIGN(vm_rtfault_record_t), VM_KERN_MEMORY_DIAG);
337 vmrtfrs.vmrtfr_maxi = vmrtf_num_records - 1;
338 }
339 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_rtfault_record_init);
340
341 /*
342 * Routine: vm_fault_cleanup
343 * Purpose:
344 * Clean up the result of vm_fault_page.
345 * Results:
346 * The paging reference for "object" is released.
347 * "object" is unlocked.
348 * If "top_page" is not null, "top_page" is
349 * freed and the paging reference for the object
350 * containing it is released.
351 *
352 * In/out conditions:
353 * "object" must be locked.
354 */
355 void
vm_fault_cleanup(vm_object_t object,vm_page_t top_page)356 vm_fault_cleanup(
357 vm_object_t object,
358 vm_page_t top_page)
359 {
360 vm_object_paging_end(object);
361 vm_object_unlock(object);
362
363 if (top_page != VM_PAGE_NULL) {
364 object = VM_PAGE_OBJECT(top_page);
365
366 vm_object_lock(object);
367 VM_PAGE_FREE(top_page);
368 vm_object_paging_end(object);
369 vm_object_unlock(object);
370 }
371 }
372
373 #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
374
375
376 boolean_t vm_page_deactivate_behind = TRUE;
377 /*
378 * default sizes given VM_BEHAVIOR_DEFAULT reference behavior
379 */
380 #define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW 128
381 #define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER 16 /* don't make this too big... */
382 /* we use it to size an array on the stack */
383
384 int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW;
385
386 #define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024)
387
388 /*
389 * vm_page_is_sequential
390 *
391 * Determine if sequential access is in progress
392 * in accordance with the behavior specified.
393 * Update state to indicate current access pattern.
394 *
395 * object must have at least the shared lock held
396 */
397 static
398 void
vm_fault_is_sequential(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)399 vm_fault_is_sequential(
400 vm_object_t object,
401 vm_object_offset_t offset,
402 vm_behavior_t behavior)
403 {
404 vm_object_offset_t last_alloc;
405 int sequential;
406 int orig_sequential;
407
408 last_alloc = object->last_alloc;
409 sequential = object->sequential;
410 orig_sequential = sequential;
411
412 offset = vm_object_trunc_page(offset);
413 if (offset == last_alloc && behavior != VM_BEHAVIOR_RANDOM) {
414 /* re-faulting in the same page: no change in behavior */
415 return;
416 }
417
418 switch (behavior) {
419 case VM_BEHAVIOR_RANDOM:
420 /*
421 * reset indicator of sequential behavior
422 */
423 sequential = 0;
424 break;
425
426 case VM_BEHAVIOR_SEQUENTIAL:
427 if (offset && last_alloc == offset - PAGE_SIZE_64) {
428 /*
429 * advance indicator of sequential behavior
430 */
431 if (sequential < MAX_SEQUENTIAL_RUN) {
432 sequential += PAGE_SIZE;
433 }
434 } else {
435 /*
436 * reset indicator of sequential behavior
437 */
438 sequential = 0;
439 }
440 break;
441
442 case VM_BEHAVIOR_RSEQNTL:
443 if (last_alloc && last_alloc == offset + PAGE_SIZE_64) {
444 /*
445 * advance indicator of sequential behavior
446 */
447 if (sequential > -MAX_SEQUENTIAL_RUN) {
448 sequential -= PAGE_SIZE;
449 }
450 } else {
451 /*
452 * reset indicator of sequential behavior
453 */
454 sequential = 0;
455 }
456 break;
457
458 case VM_BEHAVIOR_DEFAULT:
459 default:
460 if (offset && last_alloc == (offset - PAGE_SIZE_64)) {
461 /*
462 * advance indicator of sequential behavior
463 */
464 if (sequential < 0) {
465 sequential = 0;
466 }
467 if (sequential < MAX_SEQUENTIAL_RUN) {
468 sequential += PAGE_SIZE;
469 }
470 } else if (last_alloc && last_alloc == (offset + PAGE_SIZE_64)) {
471 /*
472 * advance indicator of sequential behavior
473 */
474 if (sequential > 0) {
475 sequential = 0;
476 }
477 if (sequential > -MAX_SEQUENTIAL_RUN) {
478 sequential -= PAGE_SIZE;
479 }
480 } else {
481 /*
482 * reset indicator of sequential behavior
483 */
484 sequential = 0;
485 }
486 break;
487 }
488 if (sequential != orig_sequential) {
489 if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) {
490 /*
491 * if someone else has already updated object->sequential
492 * don't bother trying to update it or object->last_alloc
493 */
494 return;
495 }
496 }
497 /*
498 * I'd like to do this with a OSCompareAndSwap64, but that
499 * doesn't exist for PPC... however, it shouldn't matter
500 * that much... last_alloc is maintained so that we can determine
501 * if a sequential access pattern is taking place... if only
502 * one thread is banging on this object, no problem with the unprotected
503 * update... if 2 or more threads are banging away, we run the risk of
504 * someone seeing a mangled update... however, in the face of multiple
505 * accesses, no sequential access pattern can develop anyway, so we
506 * haven't lost any real info.
507 */
508 object->last_alloc = offset;
509 }
510
511 #if DEVELOPMENT || DEBUG
512 uint64_t vm_page_deactivate_behind_count = 0;
513 #endif /* DEVELOPMENT || DEBUG */
514
515 /*
516 * vm_page_deactivate_behind
517 *
518 * Determine if sequential access is in progress
519 * in accordance with the behavior specified. If
520 * so, compute a potential page to deactivate and
521 * deactivate it.
522 *
523 * object must be locked.
524 *
525 * return TRUE if we actually deactivate a page
526 */
527 static
528 boolean_t
vm_fault_deactivate_behind(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)529 vm_fault_deactivate_behind(
530 vm_object_t object,
531 vm_object_offset_t offset,
532 vm_behavior_t behavior)
533 {
534 int n;
535 int pages_in_run = 0;
536 int max_pages_in_run = 0;
537 int sequential_run;
538 int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
539 vm_object_offset_t run_offset = 0;
540 vm_object_offset_t pg_offset = 0;
541 vm_page_t m;
542 vm_page_t page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER];
543
544 pages_in_run = 0;
545 #if TRACEFAULTPAGE
546 dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
547 #endif
548 if (is_kernel_object(object) || vm_page_deactivate_behind == FALSE || (vm_object_trunc_page(offset) != offset)) {
549 /*
550 * Do not deactivate pages from the kernel object: they
551 * are not intended to become pageable.
552 * or we've disabled the deactivate behind mechanism
553 * or we are dealing with an offset that is not aligned to
554 * the system's PAGE_SIZE because in that case we will
555 * handle the deactivation on the aligned offset and, thus,
556 * the full PAGE_SIZE page once. This helps us avoid the redundant
557 * deactivates and the extra faults.
558 */
559 return FALSE;
560 }
561 if ((sequential_run = object->sequential)) {
562 if (sequential_run < 0) {
563 sequential_behavior = VM_BEHAVIOR_RSEQNTL;
564 sequential_run = 0 - sequential_run;
565 } else {
566 sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
567 }
568 }
569 switch (behavior) {
570 case VM_BEHAVIOR_RANDOM:
571 break;
572 case VM_BEHAVIOR_SEQUENTIAL:
573 if (sequential_run >= (int)PAGE_SIZE) {
574 run_offset = 0 - PAGE_SIZE_64;
575 max_pages_in_run = 1;
576 }
577 break;
578 case VM_BEHAVIOR_RSEQNTL:
579 if (sequential_run >= (int)PAGE_SIZE) {
580 run_offset = PAGE_SIZE_64;
581 max_pages_in_run = 1;
582 }
583 break;
584 case VM_BEHAVIOR_DEFAULT:
585 default:
586 { vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
587
588 /*
589 * determine if the run of sequential accesss has been
590 * long enough on an object with default access behavior
591 * to consider it for deactivation
592 */
593 if ((uint64_t)sequential_run >= behind && (sequential_run % (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER * PAGE_SIZE)) == 0) {
594 /*
595 * the comparisons between offset and behind are done
596 * in this kind of odd fashion in order to prevent wrap around
597 * at the end points
598 */
599 if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
600 if (offset >= behind) {
601 run_offset = 0 - behind;
602 pg_offset = PAGE_SIZE_64;
603 max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
604 }
605 } else {
606 if (offset < -behind) {
607 run_offset = behind;
608 pg_offset = 0 - PAGE_SIZE_64;
609 max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
610 }
611 }
612 }
613 break;}
614 }
615 for (n = 0; n < max_pages_in_run; n++) {
616 m = vm_page_lookup(object, offset + run_offset + (n * pg_offset));
617
618 if (m && !m->vmp_laundry && !m->vmp_busy && !m->vmp_no_cache && (m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && !m->vmp_fictitious && !m->vmp_absent) {
619 page_run[pages_in_run++] = m;
620
621 /*
622 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
623 *
624 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
625 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
626 * new reference happens. If no futher references happen on the page after that remote TLB flushes
627 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
628 * by pageout_scan, which is just fine since the last reference would have happened quite far
629 * in the past (TLB caches don't hang around for very long), and of course could just as easily
630 * have happened before we did the deactivate_behind.
631 */
632 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
633 }
634 }
635 if (pages_in_run) {
636 vm_page_lockspin_queues();
637
638 for (n = 0; n < pages_in_run; n++) {
639 m = page_run[n];
640
641 vm_page_deactivate_internal(m, FALSE);
642
643 #if DEVELOPMENT || DEBUG
644 vm_page_deactivate_behind_count++;
645 #endif /* DEVELOPMENT || DEBUG */
646
647 #if TRACEFAULTPAGE
648 dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
649 #endif
650 }
651 vm_page_unlock_queues();
652
653 return TRUE;
654 }
655 return FALSE;
656 }
657
658
659 #if (DEVELOPMENT || DEBUG)
660 uint32_t vm_page_creation_throttled_hard = 0;
661 uint32_t vm_page_creation_throttled_soft = 0;
662 uint64_t vm_page_creation_throttle_avoided = 0;
663 #endif /* DEVELOPMENT || DEBUG */
664
665 static int
vm_page_throttled(boolean_t page_kept)666 vm_page_throttled(boolean_t page_kept)
667 {
668 clock_sec_t elapsed_sec;
669 clock_sec_t tv_sec;
670 clock_usec_t tv_usec;
671 task_t curtask = current_task_early();
672
673 thread_t thread = current_thread();
674
675 if (thread->options & TH_OPT_VMPRIV) {
676 return 0;
677 }
678
679 if (curtask && !curtask->active) {
680 return 0;
681 }
682
683 if (thread->t_page_creation_throttled) {
684 thread->t_page_creation_throttled = 0;
685
686 if (page_kept == FALSE) {
687 goto no_throttle;
688 }
689 }
690 if (NEED_TO_HARD_THROTTLE_THIS_TASK()) {
691 #if (DEVELOPMENT || DEBUG)
692 thread->t_page_creation_throttled_hard++;
693 OSAddAtomic(1, &vm_page_creation_throttled_hard);
694 #endif /* DEVELOPMENT || DEBUG */
695 return HARD_THROTTLE_DELAY;
696 }
697
698 if ((vm_page_free_count < vm_page_throttle_limit || (VM_CONFIG_COMPRESSOR_IS_PRESENT && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
699 thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) {
700 if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) {
701 #if (DEVELOPMENT || DEBUG)
702 OSAddAtomic64(1, &vm_page_creation_throttle_avoided);
703 #endif
704 goto no_throttle;
705 }
706 clock_get_system_microtime(&tv_sec, &tv_usec);
707
708 elapsed_sec = tv_sec - thread->t_page_creation_time;
709
710 if (elapsed_sec <= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS ||
711 (thread->t_page_creation_count / elapsed_sec) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC) {
712 if (elapsed_sec >= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS)) {
713 /*
714 * we'll reset our stats to give a well behaved app
715 * that was unlucky enough to accumulate a bunch of pages
716 * over a long period of time a chance to get out of
717 * the throttled state... we reset the counter and timestamp
718 * so that if it stays under the rate limit for the next second
719 * it will be back in our good graces... if it exceeds it, it
720 * will remain in the throttled state
721 */
722 thread->t_page_creation_time = tv_sec;
723 thread->t_page_creation_count = VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC * (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS - 1);
724 }
725 VM_PAGEOUT_DEBUG(vm_page_throttle_count, 1);
726
727 thread->t_page_creation_throttled = 1;
728
729 if (VM_CONFIG_COMPRESSOR_IS_PRESENT && HARD_THROTTLE_LIMIT_REACHED()) {
730 #if (DEVELOPMENT || DEBUG)
731 thread->t_page_creation_throttled_hard++;
732 OSAddAtomic(1, &vm_page_creation_throttled_hard);
733 #endif /* DEVELOPMENT || DEBUG */
734 return HARD_THROTTLE_DELAY;
735 } else {
736 #if (DEVELOPMENT || DEBUG)
737 thread->t_page_creation_throttled_soft++;
738 OSAddAtomic(1, &vm_page_creation_throttled_soft);
739 #endif /* DEVELOPMENT || DEBUG */
740 return SOFT_THROTTLE_DELAY;
741 }
742 }
743 thread->t_page_creation_time = tv_sec;
744 thread->t_page_creation_count = 0;
745 }
746 no_throttle:
747 thread->t_page_creation_count++;
748
749 return 0;
750 }
751
752 extern boolean_t vm_pageout_running;
753 static __attribute__((noinline, not_tail_called)) void
__VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(int throttle_delay)754 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(
755 int throttle_delay)
756 {
757 /* make sure vm_pageout_scan() gets to work while we're throttled */
758 if (!vm_pageout_running) {
759 thread_wakeup((event_t)&vm_page_free_wanted);
760 }
761 delay(throttle_delay);
762 }
763
764
765 /*
766 * check for various conditions that would
767 * prevent us from creating a ZF page...
768 * cleanup is based on being called from vm_fault_page
769 *
770 * object must be locked
771 * object == m->vmp_object
772 */
773 static vm_fault_return_t
vm_fault_check(vm_object_t object,vm_page_t m,vm_page_t first_m,wait_interrupt_t interruptible_state,boolean_t page_throttle)774 vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrupt_t interruptible_state, boolean_t page_throttle)
775 {
776 int throttle_delay;
777
778 if (object->shadow_severed ||
779 VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
780 /*
781 * Either:
782 * 1. the shadow chain was severed,
783 * 2. the purgeable object is volatile or empty and is marked
784 * to fault on access while volatile.
785 * Just have to return an error at this point
786 */
787 if (m != VM_PAGE_NULL) {
788 VM_PAGE_FREE(m);
789 }
790 vm_fault_cleanup(object, first_m);
791
792 thread_interrupt_level(interruptible_state);
793
794 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
795 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
796 }
797
798 if (object->shadow_severed) {
799 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
800 }
801 return VM_FAULT_MEMORY_ERROR;
802 }
803 if (page_throttle == TRUE) {
804 if ((throttle_delay = vm_page_throttled(FALSE))) {
805 /*
806 * we're throttling zero-fills...
807 * treat this as if we couldn't grab a page
808 */
809 if (m != VM_PAGE_NULL) {
810 VM_PAGE_FREE(m);
811 }
812 vm_fault_cleanup(object, first_m);
813
814 VM_DEBUG_EVENT(vmf_check_zfdelay, VMF_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
815
816 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
817
818 if (current_thread_aborted()) {
819 thread_interrupt_level(interruptible_state);
820 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
821 return VM_FAULT_INTERRUPTED;
822 }
823 thread_interrupt_level(interruptible_state);
824
825 return VM_FAULT_MEMORY_SHORTAGE;
826 }
827 }
828 return VM_FAULT_SUCCESS;
829 }
830
831 /*
832 * Clear the code signing bits on the given page_t
833 */
834 static void
vm_fault_cs_clear(vm_page_t m)835 vm_fault_cs_clear(vm_page_t m)
836 {
837 m->vmp_cs_validated = VMP_CS_ALL_FALSE;
838 m->vmp_cs_tainted = VMP_CS_ALL_FALSE;
839 m->vmp_cs_nx = VMP_CS_ALL_FALSE;
840 }
841
842 /*
843 * Enqueues the given page on the throttled queue.
844 * The caller must hold the vm_page_queue_lock and it will be held on return.
845 */
846 static void
vm_fault_enqueue_throttled_locked(vm_page_t m)847 vm_fault_enqueue_throttled_locked(vm_page_t m)
848 {
849 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
850 assert(!VM_PAGE_WIRED(m));
851
852 /*
853 * can't be on the pageout queue since we don't
854 * have a pager to try and clean to
855 */
856 vm_page_queues_remove(m, TRUE);
857 vm_page_check_pageable_safe(m);
858 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
859 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
860 vm_page_throttled_count++;
861 }
862
863 /*
864 * do the work to zero fill a page and
865 * inject it into the correct paging queue
866 *
867 * m->vmp_object must be locked
868 * page queue lock must NOT be held
869 */
870 static int
vm_fault_zero_page(vm_page_t m,boolean_t no_zero_fill)871 vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
872 {
873 int my_fault = DBG_ZERO_FILL_FAULT;
874 vm_object_t object;
875
876 object = VM_PAGE_OBJECT(m);
877
878 /*
879 * This is is a zero-fill page fault...
880 *
881 * Checking the page lock is a waste of
882 * time; this page was absent, so
883 * it can't be page locked by a pager.
884 *
885 * we also consider it undefined
886 * with respect to instruction
887 * execution. i.e. it is the responsibility
888 * of higher layers to call for an instruction
889 * sync after changing the contents and before
890 * sending a program into this area. We
891 * choose this approach for performance
892 */
893 vm_fault_cs_clear(m);
894 m->vmp_pmapped = TRUE;
895
896 if (no_zero_fill == TRUE) {
897 my_fault = DBG_NZF_PAGE_FAULT;
898
899 if (m->vmp_absent && m->vmp_busy) {
900 return my_fault;
901 }
902 } else {
903 vm_page_zero_fill(m);
904
905 counter_inc(&vm_statistics_zero_fill_count);
906 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
907 }
908 assert(!m->vmp_laundry);
909 assert(!is_kernel_object(object));
910 //assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
911 if (!VM_DYNAMIC_PAGING_ENABLED() &&
912 (object->purgable == VM_PURGABLE_DENY ||
913 object->purgable == VM_PURGABLE_NONVOLATILE ||
914 object->purgable == VM_PURGABLE_VOLATILE)) {
915 vm_page_lockspin_queues();
916 if (!VM_DYNAMIC_PAGING_ENABLED()) {
917 vm_fault_enqueue_throttled_locked(m);
918 }
919 vm_page_unlock_queues();
920 }
921 return my_fault;
922 }
923
924
925 /*
926 * Routine: vm_fault_page
927 * Purpose:
928 * Find the resident page for the virtual memory
929 * specified by the given virtual memory object
930 * and offset.
931 * Additional arguments:
932 * The required permissions for the page is given
933 * in "fault_type". Desired permissions are included
934 * in "protection".
935 * fault_info is passed along to determine pagein cluster
936 * limits... it contains the expected reference pattern,
937 * cluster size if available, etc...
938 *
939 * If the desired page is known to be resident (for
940 * example, because it was previously wired down), asserting
941 * the "unwiring" parameter will speed the search.
942 *
943 * If the operation can be interrupted (by thread_abort
944 * or thread_terminate), then the "interruptible"
945 * parameter should be asserted.
946 *
947 * Results:
948 * The page containing the proper data is returned
949 * in "result_page".
950 *
951 * In/out conditions:
952 * The source object must be locked and referenced,
953 * and must donate one paging reference. The reference
954 * is not affected. The paging reference and lock are
955 * consumed.
956 *
957 * If the call succeeds, the object in which "result_page"
958 * resides is left locked and holding a paging reference.
959 * If this is not the original object, a busy page in the
960 * original object is returned in "top_page", to prevent other
961 * callers from pursuing this same data, along with a paging
962 * reference for the original object. The "top_page" should
963 * be destroyed when this guarantee is no longer required.
964 * The "result_page" is also left busy. It is not removed
965 * from the pageout queues.
966 * Special Case:
967 * A return value of VM_FAULT_SUCCESS_NO_PAGE means that the
968 * fault succeeded but there's no VM page (i.e. the VM object
969 * does not actually hold VM pages, but device memory or
970 * large pages). The object is still locked and we still hold a
971 * paging_in_progress reference.
972 */
973 unsigned int vm_fault_page_blocked_access = 0;
974 unsigned int vm_fault_page_forced_retry = 0;
975
976 vm_fault_return_t
vm_fault_page(vm_object_t first_object,vm_object_offset_t first_offset,vm_prot_t fault_type,boolean_t must_be_resident,boolean_t caller_lookup,vm_prot_t * protection,vm_page_t * result_page,vm_page_t * top_page,int * type_of_fault,kern_return_t * error_code,boolean_t no_zero_fill,vm_object_fault_info_t fault_info)977 vm_fault_page(
978 /* Arguments: */
979 vm_object_t first_object, /* Object to begin search */
980 vm_object_offset_t first_offset, /* Offset into object */
981 vm_prot_t fault_type, /* What access is requested */
982 boolean_t must_be_resident,/* Must page be resident? */
983 boolean_t caller_lookup, /* caller looked up page */
984 /* Modifies in place: */
985 vm_prot_t *protection, /* Protection for mapping */
986 vm_page_t *result_page, /* Page found, if successful */
987 /* Returns: */
988 vm_page_t *top_page, /* Page in top object, if
989 * not result_page. */
990 int *type_of_fault, /* if non-null, fill in with type of fault
991 * COW, zero-fill, etc... returned in trace point */
992 /* More arguments: */
993 kern_return_t *error_code, /* code if page is in error */
994 boolean_t no_zero_fill, /* don't zero fill absent pages */
995 vm_object_fault_info_t fault_info)
996 {
997 vm_page_t m;
998 vm_object_t object;
999 vm_object_offset_t offset;
1000 vm_page_t first_m;
1001 vm_object_t next_object;
1002 vm_object_t copy_object;
1003 boolean_t look_for_page;
1004 boolean_t force_fault_retry = FALSE;
1005 vm_prot_t access_required = fault_type;
1006 vm_prot_t wants_copy_flag;
1007 kern_return_t wait_result;
1008 wait_interrupt_t interruptible_state;
1009 boolean_t data_already_requested = FALSE;
1010 vm_behavior_t orig_behavior;
1011 vm_size_t orig_cluster_size;
1012 vm_fault_return_t error;
1013 int my_fault;
1014 uint32_t try_failed_count;
1015 int interruptible; /* how may fault be interrupted? */
1016 int external_state = VM_EXTERNAL_STATE_UNKNOWN;
1017 memory_object_t pager;
1018 vm_fault_return_t retval;
1019 int grab_options;
1020 bool clear_absent_on_error = false;
1021
1022 /*
1023 * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
1024 * marked as paged out in the compressor pager or the pager doesn't exist.
1025 * Note also that if the pager for an internal object
1026 * has not been created, the pager is not invoked regardless of the value
1027 * of MUST_ASK_PAGER().
1028 *
1029 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
1030 * is marked as paged out in the compressor pager.
1031 * PAGED_OUT() is used to determine if a page has already been pushed
1032 * into a copy object in order to avoid a redundant page out operation.
1033 */
1034 #define MUST_ASK_PAGER(o, f, s) \
1035 ((s = VM_COMPRESSOR_PAGER_STATE_GET((o), (f))) != VM_EXTERNAL_STATE_ABSENT)
1036
1037 #define PAGED_OUT(o, f) \
1038 (VM_COMPRESSOR_PAGER_STATE_GET((o), (f)) == VM_EXTERNAL_STATE_EXISTS)
1039
1040 /*
1041 * Recovery actions
1042 */
1043 #define RELEASE_PAGE(m) \
1044 MACRO_BEGIN \
1045 PAGE_WAKEUP_DONE(m); \
1046 if ( !VM_PAGE_PAGEABLE(m)) { \
1047 vm_page_lockspin_queues(); \
1048 if (clear_absent_on_error && m->vmp_absent) {\
1049 vm_page_zero_fill(m); \
1050 counter_inc(&vm_statistics_zero_fill_count);\
1051 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);\
1052 m->vmp_absent = false; \
1053 } \
1054 if ( !VM_PAGE_PAGEABLE(m)) { \
1055 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) \
1056 vm_page_deactivate(m); \
1057 else \
1058 vm_page_activate(m); \
1059 } \
1060 vm_page_unlock_queues(); \
1061 } \
1062 clear_absent_on_error = false; \
1063 MACRO_END
1064
1065 #if TRACEFAULTPAGE
1066 dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */
1067 #endif
1068
1069 interruptible = fault_info->interruptible;
1070 interruptible_state = thread_interrupt_level(interruptible);
1071
1072 /*
1073 * INVARIANTS (through entire routine):
1074 *
1075 * 1) At all times, we must either have the object
1076 * lock or a busy page in some object to prevent
1077 * some other thread from trying to bring in
1078 * the same page.
1079 *
1080 * Note that we cannot hold any locks during the
1081 * pager access or when waiting for memory, so
1082 * we use a busy page then.
1083 *
1084 * 2) To prevent another thread from racing us down the
1085 * shadow chain and entering a new page in the top
1086 * object before we do, we must keep a busy page in
1087 * the top object while following the shadow chain.
1088 *
1089 * 3) We must increment paging_in_progress on any object
1090 * for which we have a busy page before dropping
1091 * the object lock
1092 *
1093 * 4) We leave busy pages on the pageout queues.
1094 * If the pageout daemon comes across a busy page,
1095 * it will remove the page from the pageout queues.
1096 */
1097
1098 object = first_object;
1099 offset = first_offset;
1100 first_m = VM_PAGE_NULL;
1101 access_required = fault_type;
1102
1103 /*
1104 * default type of fault
1105 */
1106 my_fault = DBG_CACHE_HIT_FAULT;
1107 thread_pri_floor_t token;
1108 bool drop_floor = false;
1109
1110 while (TRUE) {
1111 #if TRACEFAULTPAGE
1112 dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1113 #endif
1114
1115 grab_options = 0;
1116 #if CONFIG_SECLUDED_MEMORY
1117 if (object->can_grab_secluded) {
1118 grab_options |= VM_PAGE_GRAB_SECLUDED;
1119 }
1120 #endif /* CONFIG_SECLUDED_MEMORY */
1121
1122 if (!object->alive) {
1123 /*
1124 * object is no longer valid
1125 * clean up and return error
1126 */
1127 #if DEVELOPMENT || DEBUG
1128 printf("FBDP rdar://93769854 %s:%d object %p internal %d pager %p (%s) copy %p shadow %p alive %d terminating %d named %d ref %d shadow_severed %d\n", __FUNCTION__, __LINE__, object, object->internal, object->pager, object->pager ? object->pager->mo_pager_ops->memory_object_pager_name : "?", object->vo_copy, object->shadow, object->alive, object->terminating, object->named, object->ref_count, object->shadow_severed);
1129 if (panic_object_not_alive) {
1130 panic("FBDP rdar://93769854 %s:%d object %p internal %d pager %p (%s) copy %p shadow %p alive %d terminating %d named %d ref %d shadow_severed %d\n", __FUNCTION__, __LINE__, object, object->internal, object->pager, object->pager ? object->pager->mo_pager_ops->memory_object_pager_name : "?", object->vo_copy, object->shadow, object->alive, object->terminating, object->named, object->ref_count, object->shadow_severed);
1131 }
1132 #endif /* DEVELOPMENT || DEBUG */
1133 vm_fault_cleanup(object, first_m);
1134 thread_interrupt_level(interruptible_state);
1135
1136 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_NOT_ALIVE), 0 /* arg */);
1137 return VM_FAULT_MEMORY_ERROR;
1138 }
1139
1140 if (!object->pager_created && object->phys_contiguous) {
1141 /*
1142 * A physically-contiguous object without a pager:
1143 * must be a "large page" object. We do not deal
1144 * with VM pages for this object.
1145 */
1146 caller_lookup = FALSE;
1147 m = VM_PAGE_NULL;
1148 goto phys_contig_object;
1149 }
1150
1151 if (object->blocked_access) {
1152 /*
1153 * Access to this VM object has been blocked.
1154 * Replace our "paging_in_progress" reference with
1155 * a "activity_in_progress" reference and wait for
1156 * access to be unblocked.
1157 */
1158 caller_lookup = FALSE; /* no longer valid after sleep */
1159 vm_object_activity_begin(object);
1160 vm_object_paging_end(object);
1161 while (object->blocked_access) {
1162 vm_object_sleep(object,
1163 VM_OBJECT_EVENT_UNBLOCKED,
1164 THREAD_UNINT);
1165 }
1166 vm_fault_page_blocked_access++;
1167 vm_object_paging_begin(object);
1168 vm_object_activity_end(object);
1169 }
1170
1171 /*
1172 * See whether the page at 'offset' is resident
1173 */
1174 if (caller_lookup == TRUE) {
1175 /*
1176 * The caller has already looked up the page
1177 * and gave us the result in "result_page".
1178 * We can use this for the first lookup but
1179 * it loses its validity as soon as we unlock
1180 * the object.
1181 */
1182 m = *result_page;
1183 caller_lookup = FALSE; /* no longer valid after that */
1184 } else {
1185 m = vm_page_lookup(object, vm_object_trunc_page(offset));
1186 }
1187 #if TRACEFAULTPAGE
1188 dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1189 #endif
1190 if (m != VM_PAGE_NULL) {
1191 if (m->vmp_busy) {
1192 /*
1193 * The page is being brought in,
1194 * wait for it and then retry.
1195 */
1196 #if TRACEFAULTPAGE
1197 dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1198 #endif
1199 wait_result = PAGE_SLEEP(object, m, interruptible);
1200
1201 if (wait_result != THREAD_AWAKENED) {
1202 vm_fault_cleanup(object, first_m);
1203 thread_interrupt_level(interruptible_state);
1204
1205 if (wait_result == THREAD_RESTART) {
1206 return VM_FAULT_RETRY;
1207 } else {
1208 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
1209 return VM_FAULT_INTERRUPTED;
1210 }
1211 }
1212 continue;
1213 }
1214 if (m->vmp_laundry) {
1215 m->vmp_free_when_done = FALSE;
1216
1217 if (!m->vmp_cleaning) {
1218 vm_pageout_steal_laundry(m, FALSE);
1219 }
1220 }
1221 vm_object_lock_assert_exclusive(VM_PAGE_OBJECT(m));
1222 if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
1223 /*
1224 * Guard page: off limits !
1225 */
1226 if (fault_type == VM_PROT_NONE) {
1227 /*
1228 * The fault is not requesting any
1229 * access to the guard page, so it must
1230 * be just to wire or unwire it.
1231 * Let's pretend it succeeded...
1232 */
1233 m->vmp_busy = TRUE;
1234 *result_page = m;
1235 assert(first_m == VM_PAGE_NULL);
1236 *top_page = first_m;
1237 if (type_of_fault) {
1238 *type_of_fault = DBG_GUARD_FAULT;
1239 }
1240 thread_interrupt_level(interruptible_state);
1241 return VM_FAULT_SUCCESS;
1242 } else {
1243 /*
1244 * The fault requests access to the
1245 * guard page: let's deny that !
1246 */
1247 vm_fault_cleanup(object, first_m);
1248 thread_interrupt_level(interruptible_state);
1249 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_GUARDPAGE_FAULT), 0 /* arg */);
1250 return VM_FAULT_MEMORY_ERROR;
1251 }
1252 }
1253
1254
1255 if (m->vmp_error) {
1256 /*
1257 * The page is in error, give up now.
1258 */
1259 #if TRACEFAULTPAGE
1260 dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code); /* (TEST/DEBUG) */
1261 #endif
1262 if (error_code) {
1263 *error_code = KERN_MEMORY_ERROR;
1264 }
1265 VM_PAGE_FREE(m);
1266
1267 vm_fault_cleanup(object, first_m);
1268 thread_interrupt_level(interruptible_state);
1269
1270 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_ERROR), 0 /* arg */);
1271 return VM_FAULT_MEMORY_ERROR;
1272 }
1273 if (m->vmp_restart) {
1274 /*
1275 * The pager wants us to restart
1276 * at the top of the chain,
1277 * typically because it has moved the
1278 * page to another pager, then do so.
1279 */
1280 #if TRACEFAULTPAGE
1281 dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1282 #endif
1283 VM_PAGE_FREE(m);
1284
1285 vm_fault_cleanup(object, first_m);
1286 thread_interrupt_level(interruptible_state);
1287
1288 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_RESTART), 0 /* arg */);
1289 return VM_FAULT_RETRY;
1290 }
1291 if (m->vmp_absent) {
1292 /*
1293 * The page isn't busy, but is absent,
1294 * therefore it's deemed "unavailable".
1295 *
1296 * Remove the non-existent page (unless it's
1297 * in the top object) and move on down to the
1298 * next object (if there is one).
1299 */
1300 #if TRACEFAULTPAGE
1301 dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow); /* (TEST/DEBUG) */
1302 #endif
1303 next_object = object->shadow;
1304
1305 if (next_object == VM_OBJECT_NULL) {
1306 /*
1307 * Absent page at bottom of shadow
1308 * chain; zero fill the page we left
1309 * busy in the first object, and free
1310 * the absent page.
1311 */
1312 assert(!must_be_resident);
1313
1314 /*
1315 * check for any conditions that prevent
1316 * us from creating a new zero-fill page
1317 * vm_fault_check will do all of the
1318 * fault cleanup in the case of an error condition
1319 * including resetting the thread_interrupt_level
1320 */
1321 error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
1322
1323 if (error != VM_FAULT_SUCCESS) {
1324 return error;
1325 }
1326
1327 if (object != first_object) {
1328 /*
1329 * free the absent page we just found
1330 */
1331 VM_PAGE_FREE(m);
1332
1333 /*
1334 * drop reference and lock on current object
1335 */
1336 vm_object_paging_end(object);
1337 vm_object_unlock(object);
1338
1339 /*
1340 * grab the original page we
1341 * 'soldered' in place and
1342 * retake lock on 'first_object'
1343 */
1344 m = first_m;
1345 first_m = VM_PAGE_NULL;
1346
1347 object = first_object;
1348 offset = first_offset;
1349
1350 vm_object_lock(object);
1351 } else {
1352 /*
1353 * we're going to use the absent page we just found
1354 * so convert it to a 'busy' page
1355 */
1356 m->vmp_absent = FALSE;
1357 m->vmp_busy = TRUE;
1358 }
1359 if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
1360 m->vmp_absent = TRUE;
1361 clear_absent_on_error = true;
1362 }
1363 /*
1364 * zero-fill the page and put it on
1365 * the correct paging queue
1366 */
1367 my_fault = vm_fault_zero_page(m, no_zero_fill);
1368
1369 break;
1370 } else {
1371 if (must_be_resident) {
1372 vm_object_paging_end(object);
1373 } else if (object != first_object) {
1374 vm_object_paging_end(object);
1375 VM_PAGE_FREE(m);
1376 } else {
1377 first_m = m;
1378 m->vmp_absent = FALSE;
1379 m->vmp_busy = TRUE;
1380
1381 vm_page_lockspin_queues();
1382 vm_page_queues_remove(m, FALSE);
1383 vm_page_unlock_queues();
1384 }
1385
1386 offset += object->vo_shadow_offset;
1387 fault_info->lo_offset += object->vo_shadow_offset;
1388 fault_info->hi_offset += object->vo_shadow_offset;
1389 access_required = VM_PROT_READ;
1390
1391 vm_object_lock(next_object);
1392 vm_object_unlock(object);
1393 object = next_object;
1394 vm_object_paging_begin(object);
1395
1396 /*
1397 * reset to default type of fault
1398 */
1399 my_fault = DBG_CACHE_HIT_FAULT;
1400
1401 continue;
1402 }
1403 }
1404 if ((m->vmp_cleaning)
1405 && ((object != first_object) || (object->vo_copy != VM_OBJECT_NULL))
1406 && (fault_type & VM_PROT_WRITE)) {
1407 /*
1408 * This is a copy-on-write fault that will
1409 * cause us to revoke access to this page, but
1410 * this page is in the process of being cleaned
1411 * in a clustered pageout. We must wait until
1412 * the cleaning operation completes before
1413 * revoking access to the original page,
1414 * otherwise we might attempt to remove a
1415 * wired mapping.
1416 */
1417 #if TRACEFAULTPAGE
1418 dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset); /* (TEST/DEBUG) */
1419 #endif
1420 /*
1421 * take an extra ref so that object won't die
1422 */
1423 vm_object_reference_locked(object);
1424
1425 vm_fault_cleanup(object, first_m);
1426
1427 vm_object_lock(object);
1428 assert(object->ref_count > 0);
1429
1430 m = vm_page_lookup(object, vm_object_trunc_page(offset));
1431
1432 if (m != VM_PAGE_NULL && m->vmp_cleaning) {
1433 PAGE_ASSERT_WAIT(m, interruptible);
1434
1435 vm_object_unlock(object);
1436 wait_result = thread_block(THREAD_CONTINUE_NULL);
1437 vm_object_deallocate(object);
1438
1439 goto backoff;
1440 } else {
1441 vm_object_unlock(object);
1442
1443 vm_object_deallocate(object);
1444 thread_interrupt_level(interruptible_state);
1445
1446 return VM_FAULT_RETRY;
1447 }
1448 }
1449 if (type_of_fault == NULL && (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) &&
1450 !(fault_info != NULL && fault_info->stealth)) {
1451 /*
1452 * If we were passed a non-NULL pointer for
1453 * "type_of_fault", than we came from
1454 * vm_fault... we'll let it deal with
1455 * this condition, since it
1456 * needs to see m->vmp_speculative to correctly
1457 * account the pageins, otherwise...
1458 * take it off the speculative queue, we'll
1459 * let the caller of vm_fault_page deal
1460 * with getting it onto the correct queue
1461 *
1462 * If the caller specified in fault_info that
1463 * it wants a "stealth" fault, we also leave
1464 * the page in the speculative queue.
1465 */
1466 vm_page_lockspin_queues();
1467 if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
1468 vm_page_queues_remove(m, FALSE);
1469 }
1470 vm_page_unlock_queues();
1471 }
1472 assert(object == VM_PAGE_OBJECT(m));
1473
1474 if (object->code_signed) {
1475 /*
1476 * CODE SIGNING:
1477 * We just paged in a page from a signed
1478 * memory object but we don't need to
1479 * validate it now. We'll validate it if
1480 * when it gets mapped into a user address
1481 * space for the first time or when the page
1482 * gets copied to another object as a result
1483 * of a copy-on-write.
1484 */
1485 }
1486
1487 /*
1488 * We mark the page busy and leave it on
1489 * the pageout queues. If the pageout
1490 * deamon comes across it, then it will
1491 * remove the page from the queue, but not the object
1492 */
1493 #if TRACEFAULTPAGE
1494 dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1495 #endif
1496 assert(!m->vmp_busy);
1497 assert(!m->vmp_absent);
1498
1499 m->vmp_busy = TRUE;
1500 break;
1501 }
1502
1503 /*
1504 * we get here when there is no page present in the object at
1505 * the offset we're interested in... we'll allocate a page
1506 * at this point if the pager associated with
1507 * this object can provide the data or we're the top object...
1508 * object is locked; m == NULL
1509 */
1510
1511 if (must_be_resident) {
1512 if (fault_type == VM_PROT_NONE &&
1513 is_kernel_object(object)) {
1514 /*
1515 * We've been called from vm_fault_unwire()
1516 * while removing a map entry that was allocated
1517 * with KMA_KOBJECT and KMA_VAONLY. This page
1518 * is not present and there's nothing more to
1519 * do here (nothing to unwire).
1520 */
1521 vm_fault_cleanup(object, first_m);
1522 thread_interrupt_level(interruptible_state);
1523
1524 return VM_FAULT_MEMORY_ERROR;
1525 }
1526
1527 goto dont_look_for_page;
1528 }
1529
1530 /* Don't expect to fault pages into the kernel object. */
1531 assert(!is_kernel_object(object));
1532
1533 look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE));
1534
1535 #if TRACEFAULTPAGE
1536 dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object); /* (TEST/DEBUG) */
1537 #endif
1538 if (!look_for_page && object == first_object && !object->phys_contiguous) {
1539 /*
1540 * Allocate a new page for this object/offset pair as a placeholder
1541 */
1542 m = vm_page_grab_options(grab_options);
1543 #if TRACEFAULTPAGE
1544 dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1545 #endif
1546 if (m == VM_PAGE_NULL) {
1547 vm_fault_cleanup(object, first_m);
1548 thread_interrupt_level(interruptible_state);
1549
1550 return VM_FAULT_MEMORY_SHORTAGE;
1551 }
1552
1553 if (fault_info && fault_info->batch_pmap_op == TRUE) {
1554 vm_page_insert_internal(m, object,
1555 vm_object_trunc_page(offset),
1556 VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1557 } else {
1558 vm_page_insert(m, object, vm_object_trunc_page(offset));
1559 }
1560 }
1561 if (look_for_page) {
1562 kern_return_t rc;
1563 int my_fault_type;
1564
1565 /*
1566 * If the memory manager is not ready, we
1567 * cannot make requests.
1568 */
1569 if (!object->pager_ready) {
1570 #if TRACEFAULTPAGE
1571 dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1572 #endif
1573 if (m != VM_PAGE_NULL) {
1574 VM_PAGE_FREE(m);
1575 }
1576
1577 /*
1578 * take an extra ref so object won't die
1579 */
1580 vm_object_reference_locked(object);
1581 vm_fault_cleanup(object, first_m);
1582
1583 vm_object_lock(object);
1584 assert(object->ref_count > 0);
1585
1586 if (!object->pager_ready) {
1587 wait_result = vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGER_READY, interruptible);
1588
1589 vm_object_unlock(object);
1590 if (wait_result == THREAD_WAITING) {
1591 wait_result = thread_block(THREAD_CONTINUE_NULL);
1592 }
1593 vm_object_deallocate(object);
1594
1595 goto backoff;
1596 } else {
1597 vm_object_unlock(object);
1598 vm_object_deallocate(object);
1599 thread_interrupt_level(interruptible_state);
1600
1601 return VM_FAULT_RETRY;
1602 }
1603 }
1604 if (!object->internal && !object->phys_contiguous && object->paging_in_progress > vm_object_pagein_throttle) {
1605 /*
1606 * If there are too many outstanding page
1607 * requests pending on this external object, we
1608 * wait for them to be resolved now.
1609 */
1610 #if TRACEFAULTPAGE
1611 dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1612 #endif
1613 if (m != VM_PAGE_NULL) {
1614 VM_PAGE_FREE(m);
1615 }
1616 /*
1617 * take an extra ref so object won't die
1618 */
1619 vm_object_reference_locked(object);
1620
1621 vm_fault_cleanup(object, first_m);
1622
1623 vm_object_lock(object);
1624 assert(object->ref_count > 0);
1625
1626 if (object->paging_in_progress >= vm_object_pagein_throttle) {
1627 vm_object_assert_wait(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS, interruptible);
1628
1629 vm_object_unlock(object);
1630 wait_result = thread_block(THREAD_CONTINUE_NULL);
1631 vm_object_deallocate(object);
1632
1633 goto backoff;
1634 } else {
1635 vm_object_unlock(object);
1636 vm_object_deallocate(object);
1637 thread_interrupt_level(interruptible_state);
1638
1639 return VM_FAULT_RETRY;
1640 }
1641 }
1642 if (object->internal) {
1643 int compressed_count_delta;
1644
1645 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
1646
1647 if (m == VM_PAGE_NULL) {
1648 /*
1649 * Allocate a new page for this object/offset pair as a placeholder
1650 */
1651 m = vm_page_grab_options(grab_options);
1652 #if TRACEFAULTPAGE
1653 dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1654 #endif
1655 if (m == VM_PAGE_NULL) {
1656 vm_fault_cleanup(object, first_m);
1657 thread_interrupt_level(interruptible_state);
1658
1659 return VM_FAULT_MEMORY_SHORTAGE;
1660 }
1661
1662 m->vmp_absent = TRUE;
1663 if (fault_info && fault_info->batch_pmap_op == TRUE) {
1664 vm_page_insert_internal(m, object, vm_object_trunc_page(offset), VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1665 } else {
1666 vm_page_insert(m, object, vm_object_trunc_page(offset));
1667 }
1668 }
1669 assert(m->vmp_busy);
1670
1671 m->vmp_absent = TRUE;
1672 pager = object->pager;
1673
1674 assert(object->paging_in_progress > 0);
1675 vm_object_unlock(object);
1676
1677 rc = vm_compressor_pager_get(
1678 pager,
1679 offset + object->paging_offset,
1680 VM_PAGE_GET_PHYS_PAGE(m),
1681 &my_fault_type,
1682 0,
1683 &compressed_count_delta);
1684
1685 if (type_of_fault == NULL) {
1686 int throttle_delay;
1687
1688 /*
1689 * we weren't called from vm_fault, so we
1690 * need to apply page creation throttling
1691 * do it before we re-acquire any locks
1692 */
1693 if (my_fault_type == DBG_COMPRESSOR_FAULT) {
1694 if ((throttle_delay = vm_page_throttled(TRUE))) {
1695 VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 1, 0);
1696 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
1697 }
1698 }
1699 }
1700 vm_object_lock(object);
1701 assert(object->paging_in_progress > 0);
1702
1703 vm_compressor_pager_count(
1704 pager,
1705 compressed_count_delta,
1706 FALSE, /* shared_lock */
1707 object);
1708
1709 switch (rc) {
1710 case KERN_SUCCESS:
1711 m->vmp_absent = FALSE;
1712 m->vmp_dirty = TRUE;
1713 if ((object->wimg_bits &
1714 VM_WIMG_MASK) !=
1715 VM_WIMG_USE_DEFAULT) {
1716 /*
1717 * If the page is not cacheable,
1718 * we can't let its contents
1719 * linger in the data cache
1720 * after the decompression.
1721 */
1722 pmap_sync_page_attributes_phys(
1723 VM_PAGE_GET_PHYS_PAGE(m));
1724 } else {
1725 m->vmp_written_by_kernel = TRUE;
1726 }
1727 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
1728 if ((fault_type & VM_PROT_WRITE) == 0) {
1729 vm_object_lock_assert_exclusive(object);
1730 vm_page_lockspin_queues();
1731 m->vmp_unmodified_ro = true;
1732 vm_page_unlock_queues();
1733 os_atomic_inc(&compressor_ro_uncompressed, relaxed);
1734 *protection &= ~VM_PROT_WRITE;
1735 }
1736 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
1737
1738 /*
1739 * If the object is purgeable, its
1740 * owner's purgeable ledgers have been
1741 * updated in vm_page_insert() but the
1742 * page was also accounted for in a
1743 * "compressed purgeable" ledger, so
1744 * update that now.
1745 */
1746 if (((object->purgable !=
1747 VM_PURGABLE_DENY) ||
1748 object->vo_ledger_tag) &&
1749 (object->vo_owner !=
1750 NULL)) {
1751 /*
1752 * One less compressed
1753 * purgeable/tagged page.
1754 */
1755 if (compressed_count_delta) {
1756 vm_object_owner_compressed_update(
1757 object,
1758 -1);
1759 }
1760 }
1761
1762 break;
1763 case KERN_MEMORY_FAILURE:
1764 m->vmp_unusual = TRUE;
1765 m->vmp_error = TRUE;
1766 m->vmp_absent = FALSE;
1767 break;
1768 case KERN_MEMORY_ERROR:
1769 assert(m->vmp_absent);
1770 break;
1771 default:
1772 panic("vm_fault_page(): unexpected "
1773 "error %d from "
1774 "vm_compressor_pager_get()\n",
1775 rc);
1776 }
1777 PAGE_WAKEUP_DONE(m);
1778
1779 rc = KERN_SUCCESS;
1780 goto data_requested;
1781 }
1782 my_fault_type = DBG_PAGEIN_FAULT;
1783
1784 if (m != VM_PAGE_NULL) {
1785 VM_PAGE_FREE(m);
1786 m = VM_PAGE_NULL;
1787 }
1788
1789 #if TRACEFAULTPAGE
1790 dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0); /* (TEST/DEBUG) */
1791 #endif
1792
1793 /*
1794 * It's possible someone called vm_object_destroy while we weren't
1795 * holding the object lock. If that has happened, then bail out
1796 * here.
1797 */
1798
1799 pager = object->pager;
1800
1801 if (pager == MEMORY_OBJECT_NULL) {
1802 vm_fault_cleanup(object, first_m);
1803 thread_interrupt_level(interruptible_state);
1804 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_NO_PAGER), 0 /* arg */);
1805 return VM_FAULT_MEMORY_ERROR;
1806 }
1807
1808 /*
1809 * We have an absent page in place for the faulting offset,
1810 * so we can release the object lock.
1811 */
1812
1813 if (object->object_is_shared_cache) {
1814 token = thread_priority_floor_start();
1815 /*
1816 * A non-native shared cache object might
1817 * be getting set up in parallel with this
1818 * fault and so we can't assume that this
1819 * check will be valid after we drop the
1820 * object lock below.
1821 */
1822 drop_floor = true;
1823 }
1824
1825 vm_object_unlock(object);
1826
1827 /*
1828 * If this object uses a copy_call strategy,
1829 * and we are interested in a copy of this object
1830 * (having gotten here only by following a
1831 * shadow chain), then tell the memory manager
1832 * via a flag added to the desired_access
1833 * parameter, so that it can detect a race
1834 * between our walking down the shadow chain
1835 * and its pushing pages up into a copy of
1836 * the object that it manages.
1837 */
1838 if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object) {
1839 wants_copy_flag = VM_PROT_WANTS_COPY;
1840 } else {
1841 wants_copy_flag = VM_PROT_NONE;
1842 }
1843
1844 if (object->vo_copy == first_object) {
1845 /*
1846 * if we issue the memory_object_data_request in
1847 * this state, we are subject to a deadlock with
1848 * the underlying filesystem if it is trying to
1849 * shrink the file resulting in a push of pages
1850 * into the copy object... that push will stall
1851 * on the placeholder page, and if the pushing thread
1852 * is holding a lock that is required on the pagein
1853 * path (such as a truncate lock), we'll deadlock...
1854 * to avoid this potential deadlock, we throw away
1855 * our placeholder page before calling memory_object_data_request
1856 * and force this thread to retry the vm_fault_page after
1857 * we have issued the I/O. the second time through this path
1858 * we will find the page already in the cache (presumably still
1859 * busy waiting for the I/O to complete) and then complete
1860 * the fault w/o having to go through memory_object_data_request again
1861 */
1862 assert(first_m != VM_PAGE_NULL);
1863 assert(VM_PAGE_OBJECT(first_m) == first_object);
1864
1865 vm_object_lock(first_object);
1866 VM_PAGE_FREE(first_m);
1867 vm_object_paging_end(first_object);
1868 vm_object_unlock(first_object);
1869
1870 first_m = VM_PAGE_NULL;
1871 force_fault_retry = TRUE;
1872
1873 vm_fault_page_forced_retry++;
1874 }
1875
1876 if (data_already_requested == TRUE) {
1877 orig_behavior = fault_info->behavior;
1878 orig_cluster_size = fault_info->cluster_size;
1879
1880 fault_info->behavior = VM_BEHAVIOR_RANDOM;
1881 fault_info->cluster_size = PAGE_SIZE;
1882 }
1883 /*
1884 * Call the memory manager to retrieve the data.
1885 */
1886 rc = memory_object_data_request(
1887 pager,
1888 vm_object_trunc_page(offset) + object->paging_offset,
1889 PAGE_SIZE,
1890 access_required | wants_copy_flag,
1891 (memory_object_fault_info_t)fault_info);
1892
1893 if (data_already_requested == TRUE) {
1894 fault_info->behavior = orig_behavior;
1895 fault_info->cluster_size = orig_cluster_size;
1896 } else {
1897 data_already_requested = TRUE;
1898 }
1899
1900 DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL);
1901 #if TRACEFAULTPAGE
1902 dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */
1903 #endif
1904 vm_object_lock(object);
1905
1906 if (drop_floor && object->object_is_shared_cache) {
1907 thread_priority_floor_end(&token);
1908 drop_floor = false;
1909 }
1910
1911 data_requested:
1912 if (rc != KERN_SUCCESS) {
1913 vm_fault_cleanup(object, first_m);
1914 thread_interrupt_level(interruptible_state);
1915
1916 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NO_DATA), 0 /* arg */);
1917
1918 return (rc == MACH_SEND_INTERRUPTED) ?
1919 VM_FAULT_INTERRUPTED :
1920 VM_FAULT_MEMORY_ERROR;
1921 } else {
1922 clock_sec_t tv_sec;
1923 clock_usec_t tv_usec;
1924
1925 if (my_fault_type == DBG_PAGEIN_FAULT) {
1926 clock_get_system_microtime(&tv_sec, &tv_usec);
1927 current_thread()->t_page_creation_time = tv_sec;
1928 current_thread()->t_page_creation_count = 0;
1929 }
1930 }
1931 if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) {
1932 vm_fault_cleanup(object, first_m);
1933 thread_interrupt_level(interruptible_state);
1934
1935 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
1936 return VM_FAULT_INTERRUPTED;
1937 }
1938 if (force_fault_retry == TRUE) {
1939 vm_fault_cleanup(object, first_m);
1940 thread_interrupt_level(interruptible_state);
1941
1942 return VM_FAULT_RETRY;
1943 }
1944 if (m == VM_PAGE_NULL && object->phys_contiguous) {
1945 /*
1946 * No page here means that the object we
1947 * initially looked up was "physically
1948 * contiguous" (i.e. device memory). However,
1949 * with Virtual VRAM, the object might not
1950 * be backed by that device memory anymore,
1951 * so we're done here only if the object is
1952 * still "phys_contiguous".
1953 * Otherwise, if the object is no longer
1954 * "phys_contiguous", we need to retry the
1955 * page fault against the object's new backing
1956 * store (different memory object).
1957 */
1958 phys_contig_object:
1959 goto done;
1960 }
1961 /*
1962 * potentially a pagein fault
1963 * if we make it through the state checks
1964 * above, than we'll count it as such
1965 */
1966 my_fault = my_fault_type;
1967
1968 /*
1969 * Retry with same object/offset, since new data may
1970 * be in a different page (i.e., m is meaningless at
1971 * this point).
1972 */
1973 continue;
1974 }
1975 dont_look_for_page:
1976 /*
1977 * We get here if the object has no pager, or an existence map
1978 * exists and indicates the page isn't present on the pager
1979 * or we're unwiring a page. If a pager exists, but there
1980 * is no existence map, then the m->vmp_absent case above handles
1981 * the ZF case when the pager can't provide the page
1982 */
1983 #if TRACEFAULTPAGE
1984 dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
1985 #endif
1986 if (object == first_object) {
1987 first_m = m;
1988 } else {
1989 assert(m == VM_PAGE_NULL);
1990 }
1991
1992 next_object = object->shadow;
1993
1994 if (next_object == VM_OBJECT_NULL) {
1995 /*
1996 * we've hit the bottom of the shadown chain,
1997 * fill the page in the top object with zeros.
1998 */
1999 assert(!must_be_resident);
2000
2001 if (object != first_object) {
2002 vm_object_paging_end(object);
2003 vm_object_unlock(object);
2004
2005 object = first_object;
2006 offset = first_offset;
2007 vm_object_lock(object);
2008 }
2009 m = first_m;
2010 assert(VM_PAGE_OBJECT(m) == object);
2011 first_m = VM_PAGE_NULL;
2012
2013 /*
2014 * check for any conditions that prevent
2015 * us from creating a new zero-fill page
2016 * vm_fault_check will do all of the
2017 * fault cleanup in the case of an error condition
2018 * including resetting the thread_interrupt_level
2019 */
2020 error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
2021
2022 if (error != VM_FAULT_SUCCESS) {
2023 return error;
2024 }
2025
2026 if (m == VM_PAGE_NULL) {
2027 m = vm_page_grab_options(grab_options);
2028
2029 if (m == VM_PAGE_NULL) {
2030 vm_fault_cleanup(object, VM_PAGE_NULL);
2031 thread_interrupt_level(interruptible_state);
2032
2033 return VM_FAULT_MEMORY_SHORTAGE;
2034 }
2035 vm_page_insert(m, object, vm_object_trunc_page(offset));
2036 }
2037 if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
2038 m->vmp_absent = TRUE;
2039 clear_absent_on_error = true;
2040 }
2041
2042 my_fault = vm_fault_zero_page(m, no_zero_fill);
2043
2044 break;
2045 } else {
2046 /*
2047 * Move on to the next object. Lock the next
2048 * object before unlocking the current one.
2049 */
2050 if ((object != first_object) || must_be_resident) {
2051 vm_object_paging_end(object);
2052 }
2053
2054 offset += object->vo_shadow_offset;
2055 fault_info->lo_offset += object->vo_shadow_offset;
2056 fault_info->hi_offset += object->vo_shadow_offset;
2057 access_required = VM_PROT_READ;
2058
2059 vm_object_lock(next_object);
2060 vm_object_unlock(object);
2061
2062 object = next_object;
2063 vm_object_paging_begin(object);
2064 }
2065 }
2066
2067 /*
2068 * PAGE HAS BEEN FOUND.
2069 *
2070 * This page (m) is:
2071 * busy, so that we can play with it;
2072 * not absent, so that nobody else will fill it;
2073 * possibly eligible for pageout;
2074 *
2075 * The top-level page (first_m) is:
2076 * VM_PAGE_NULL if the page was found in the
2077 * top-level object;
2078 * busy, not absent, and ineligible for pageout.
2079 *
2080 * The current object (object) is locked. A paging
2081 * reference is held for the current and top-level
2082 * objects.
2083 */
2084
2085 #if TRACEFAULTPAGE
2086 dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
2087 #endif
2088 #if EXTRA_ASSERTIONS
2089 assert(m->vmp_busy && !m->vmp_absent);
2090 assert((first_m == VM_PAGE_NULL) ||
2091 (first_m->vmp_busy && !first_m->vmp_absent &&
2092 !first_m->vmp_active && !first_m->vmp_inactive && !first_m->vmp_secluded));
2093 #endif /* EXTRA_ASSERTIONS */
2094
2095 /*
2096 * If the page is being written, but isn't
2097 * already owned by the top-level object,
2098 * we have to copy it into a new page owned
2099 * by the top-level object.
2100 */
2101 if (object != first_object) {
2102 #if TRACEFAULTPAGE
2103 dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */
2104 #endif
2105 if (fault_type & VM_PROT_WRITE) {
2106 vm_page_t copy_m;
2107
2108 /*
2109 * We only really need to copy if we
2110 * want to write it.
2111 */
2112 assert(!must_be_resident);
2113
2114 /*
2115 * If we try to collapse first_object at this
2116 * point, we may deadlock when we try to get
2117 * the lock on an intermediate object (since we
2118 * have the bottom object locked). We can't
2119 * unlock the bottom object, because the page
2120 * we found may move (by collapse) if we do.
2121 *
2122 * Instead, we first copy the page. Then, when
2123 * we have no more use for the bottom object,
2124 * we unlock it and try to collapse.
2125 *
2126 * Note that we copy the page even if we didn't
2127 * need to... that's the breaks.
2128 */
2129
2130 /*
2131 * Allocate a page for the copy
2132 */
2133 copy_m = vm_page_grab_options(grab_options);
2134
2135 if (copy_m == VM_PAGE_NULL) {
2136 RELEASE_PAGE(m);
2137
2138 vm_fault_cleanup(object, first_m);
2139 thread_interrupt_level(interruptible_state);
2140
2141 return VM_FAULT_MEMORY_SHORTAGE;
2142 }
2143
2144 vm_page_copy(m, copy_m);
2145
2146 /*
2147 * If another map is truly sharing this
2148 * page with us, we have to flush all
2149 * uses of the original page, since we
2150 * can't distinguish those which want the
2151 * original from those which need the
2152 * new copy.
2153 *
2154 * XXXO If we know that only one map has
2155 * access to this page, then we could
2156 * avoid the pmap_disconnect() call.
2157 */
2158 if (m->vmp_pmapped) {
2159 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2160 }
2161
2162 if (m->vmp_clustered) {
2163 VM_PAGE_COUNT_AS_PAGEIN(m);
2164 VM_PAGE_CONSUME_CLUSTERED(m);
2165 }
2166 assert(!m->vmp_cleaning);
2167
2168 /*
2169 * We no longer need the old page or object.
2170 */
2171 RELEASE_PAGE(m);
2172
2173 /*
2174 * This check helps with marking the object as having a sequential pattern
2175 * Normally we'll miss doing this below because this fault is about COW to
2176 * the first_object i.e. bring page in from disk, push to object above but
2177 * don't update the file object's sequential pattern.
2178 */
2179 if (object->internal == FALSE) {
2180 vm_fault_is_sequential(object, offset, fault_info->behavior);
2181 }
2182
2183 vm_object_paging_end(object);
2184 vm_object_unlock(object);
2185
2186 my_fault = DBG_COW_FAULT;
2187 counter_inc(&vm_statistics_cow_faults);
2188 DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
2189 counter_inc(¤t_task()->cow_faults);
2190
2191 object = first_object;
2192 offset = first_offset;
2193
2194 vm_object_lock(object);
2195 /*
2196 * get rid of the place holder
2197 * page that we soldered in earlier
2198 */
2199 VM_PAGE_FREE(first_m);
2200 first_m = VM_PAGE_NULL;
2201
2202 /*
2203 * and replace it with the
2204 * page we just copied into
2205 */
2206 assert(copy_m->vmp_busy);
2207 vm_page_insert(copy_m, object, vm_object_trunc_page(offset));
2208 SET_PAGE_DIRTY(copy_m, TRUE);
2209
2210 m = copy_m;
2211 /*
2212 * Now that we've gotten the copy out of the
2213 * way, let's try to collapse the top object.
2214 * But we have to play ugly games with
2215 * paging_in_progress to do that...
2216 */
2217 vm_object_paging_end(object);
2218 vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
2219 vm_object_paging_begin(object);
2220 } else {
2221 *protection &= (~VM_PROT_WRITE);
2222 }
2223 }
2224 /*
2225 * Now check whether the page needs to be pushed into the
2226 * copy object. The use of asymmetric copy on write for
2227 * shared temporary objects means that we may do two copies to
2228 * satisfy the fault; one above to get the page from a
2229 * shadowed object, and one here to push it into the copy.
2230 */
2231 try_failed_count = 0;
2232
2233 while ((copy_object = first_object->vo_copy) != VM_OBJECT_NULL) {
2234 vm_object_offset_t copy_offset;
2235 vm_page_t copy_m;
2236
2237 #if TRACEFAULTPAGE
2238 dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type); /* (TEST/DEBUG) */
2239 #endif
2240 /*
2241 * If the page is being written, but hasn't been
2242 * copied to the copy-object, we have to copy it there.
2243 */
2244 if ((fault_type & VM_PROT_WRITE) == 0) {
2245 *protection &= ~VM_PROT_WRITE;
2246 break;
2247 }
2248
2249 /*
2250 * If the page was guaranteed to be resident,
2251 * we must have already performed the copy.
2252 */
2253 if (must_be_resident) {
2254 break;
2255 }
2256
2257 /*
2258 * Try to get the lock on the copy_object.
2259 */
2260 if (!vm_object_lock_try(copy_object)) {
2261 vm_object_unlock(object);
2262 try_failed_count++;
2263
2264 mutex_pause(try_failed_count); /* wait a bit */
2265 vm_object_lock(object);
2266
2267 continue;
2268 }
2269 try_failed_count = 0;
2270
2271 /*
2272 * Make another reference to the copy-object,
2273 * to keep it from disappearing during the
2274 * copy.
2275 */
2276 vm_object_reference_locked(copy_object);
2277
2278 /*
2279 * Does the page exist in the copy?
2280 */
2281 copy_offset = first_offset - copy_object->vo_shadow_offset;
2282 copy_offset = vm_object_trunc_page(copy_offset);
2283
2284 if (copy_object->vo_size <= copy_offset) {
2285 /*
2286 * Copy object doesn't cover this page -- do nothing.
2287 */
2288 ;
2289 } else if ((copy_m = vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) {
2290 /*
2291 * Page currently exists in the copy object
2292 */
2293 if (copy_m->vmp_busy) {
2294 /*
2295 * If the page is being brought
2296 * in, wait for it and then retry.
2297 */
2298 RELEASE_PAGE(m);
2299
2300 /*
2301 * take an extra ref so object won't die
2302 */
2303 vm_object_reference_locked(copy_object);
2304 vm_object_unlock(copy_object);
2305 vm_fault_cleanup(object, first_m);
2306
2307 vm_object_lock(copy_object);
2308 assert(copy_object->ref_count > 0);
2309 vm_object_lock_assert_exclusive(copy_object);
2310 copy_object->ref_count--;
2311 assert(copy_object->ref_count > 0);
2312 copy_m = vm_page_lookup(copy_object, copy_offset);
2313
2314 if (copy_m != VM_PAGE_NULL && copy_m->vmp_busy) {
2315 PAGE_ASSERT_WAIT(copy_m, interruptible);
2316
2317 vm_object_unlock(copy_object);
2318 wait_result = thread_block(THREAD_CONTINUE_NULL);
2319 vm_object_deallocate(copy_object);
2320
2321 goto backoff;
2322 } else {
2323 vm_object_unlock(copy_object);
2324 vm_object_deallocate(copy_object);
2325 thread_interrupt_level(interruptible_state);
2326
2327 return VM_FAULT_RETRY;
2328 }
2329 }
2330 } else if (!PAGED_OUT(copy_object, copy_offset)) {
2331 /*
2332 * If PAGED_OUT is TRUE, then the page used to exist
2333 * in the copy-object, and has already been paged out.
2334 * We don't need to repeat this. If PAGED_OUT is
2335 * FALSE, then either we don't know (!pager_created,
2336 * for example) or it hasn't been paged out.
2337 * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT)
2338 * We must copy the page to the copy object.
2339 *
2340 * Allocate a page for the copy
2341 */
2342 copy_m = vm_page_alloc(copy_object, copy_offset);
2343
2344 if (copy_m == VM_PAGE_NULL) {
2345 RELEASE_PAGE(m);
2346
2347 vm_object_lock_assert_exclusive(copy_object);
2348 copy_object->ref_count--;
2349 assert(copy_object->ref_count > 0);
2350
2351 vm_object_unlock(copy_object);
2352 vm_fault_cleanup(object, first_m);
2353 thread_interrupt_level(interruptible_state);
2354
2355 return VM_FAULT_MEMORY_SHORTAGE;
2356 }
2357 /*
2358 * Must copy page into copy-object.
2359 */
2360 vm_page_copy(m, copy_m);
2361
2362 /*
2363 * If the old page was in use by any users
2364 * of the copy-object, it must be removed
2365 * from all pmaps. (We can't know which
2366 * pmaps use it.)
2367 */
2368 if (m->vmp_pmapped) {
2369 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2370 }
2371
2372 if (m->vmp_clustered) {
2373 VM_PAGE_COUNT_AS_PAGEIN(m);
2374 VM_PAGE_CONSUME_CLUSTERED(m);
2375 }
2376 /*
2377 * If there's a pager, then immediately
2378 * page out this page, using the "initialize"
2379 * option. Else, we use the copy.
2380 */
2381 if ((!copy_object->pager_ready)
2382 || VM_COMPRESSOR_PAGER_STATE_GET(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT
2383 ) {
2384 vm_page_lockspin_queues();
2385 assert(!m->vmp_cleaning);
2386 vm_page_activate(copy_m);
2387 vm_page_unlock_queues();
2388
2389 SET_PAGE_DIRTY(copy_m, TRUE);
2390 PAGE_WAKEUP_DONE(copy_m);
2391 } else {
2392 assert(copy_m->vmp_busy == TRUE);
2393 assert(!m->vmp_cleaning);
2394
2395 /*
2396 * dirty is protected by the object lock
2397 */
2398 SET_PAGE_DIRTY(copy_m, TRUE);
2399
2400 /*
2401 * The page is already ready for pageout:
2402 * not on pageout queues and busy.
2403 * Unlock everything except the
2404 * copy_object itself.
2405 */
2406 vm_object_unlock(object);
2407
2408 /*
2409 * Write the page to the copy-object,
2410 * flushing it from the kernel.
2411 */
2412 vm_pageout_initialize_page(copy_m);
2413
2414 /*
2415 * Since the pageout may have
2416 * temporarily dropped the
2417 * copy_object's lock, we
2418 * check whether we'll have
2419 * to deallocate the hard way.
2420 */
2421 if ((copy_object->shadow != object) || (copy_object->ref_count == 1)) {
2422 vm_object_unlock(copy_object);
2423 vm_object_deallocate(copy_object);
2424 vm_object_lock(object);
2425
2426 continue;
2427 }
2428 /*
2429 * Pick back up the old object's
2430 * lock. [It is safe to do so,
2431 * since it must be deeper in the
2432 * object tree.]
2433 */
2434 vm_object_lock(object);
2435 }
2436
2437 /*
2438 * Because we're pushing a page upward
2439 * in the object tree, we must restart
2440 * any faults that are waiting here.
2441 * [Note that this is an expansion of
2442 * PAGE_WAKEUP that uses the THREAD_RESTART
2443 * wait result]. Can't turn off the page's
2444 * busy bit because we're not done with it.
2445 */
2446 if (m->vmp_wanted) {
2447 m->vmp_wanted = FALSE;
2448 thread_wakeup_with_result((event_t) m, THREAD_RESTART);
2449 }
2450 }
2451 /*
2452 * The reference count on copy_object must be
2453 * at least 2: one for our extra reference,
2454 * and at least one from the outside world
2455 * (we checked that when we last locked
2456 * copy_object).
2457 */
2458 vm_object_lock_assert_exclusive(copy_object);
2459 copy_object->ref_count--;
2460 assert(copy_object->ref_count > 0);
2461
2462 vm_object_unlock(copy_object);
2463
2464 break;
2465 }
2466
2467 done:
2468 *result_page = m;
2469 *top_page = first_m;
2470
2471 if (m != VM_PAGE_NULL) {
2472 assert(VM_PAGE_OBJECT(m) == object);
2473
2474 retval = VM_FAULT_SUCCESS;
2475
2476 if (my_fault == DBG_PAGEIN_FAULT) {
2477 VM_PAGE_COUNT_AS_PAGEIN(m);
2478
2479 if (object->internal) {
2480 my_fault = DBG_PAGEIND_FAULT;
2481 } else {
2482 my_fault = DBG_PAGEINV_FAULT;
2483 }
2484
2485 /*
2486 * evaluate access pattern and update state
2487 * vm_fault_deactivate_behind depends on the
2488 * state being up to date
2489 */
2490 vm_fault_is_sequential(object, offset, fault_info->behavior);
2491 vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2492 } else if (type_of_fault == NULL && my_fault == DBG_CACHE_HIT_FAULT) {
2493 /*
2494 * we weren't called from vm_fault, so handle the
2495 * accounting here for hits in the cache
2496 */
2497 if (m->vmp_clustered) {
2498 VM_PAGE_COUNT_AS_PAGEIN(m);
2499 VM_PAGE_CONSUME_CLUSTERED(m);
2500 }
2501 vm_fault_is_sequential(object, offset, fault_info->behavior);
2502 vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2503 } else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) {
2504 VM_STAT_DECOMPRESSIONS();
2505 }
2506 if (type_of_fault) {
2507 *type_of_fault = my_fault;
2508 }
2509 } else {
2510 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_SUCCESS_NO_PAGE), 0 /* arg */);
2511 retval = VM_FAULT_SUCCESS_NO_VM_PAGE;
2512 assert(first_m == VM_PAGE_NULL);
2513 assert(object == first_object);
2514 }
2515
2516 thread_interrupt_level(interruptible_state);
2517
2518 #if TRACEFAULTPAGE
2519 dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */
2520 #endif
2521 return retval;
2522
2523 backoff:
2524 thread_interrupt_level(interruptible_state);
2525
2526 if (wait_result == THREAD_INTERRUPTED) {
2527 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
2528 return VM_FAULT_INTERRUPTED;
2529 }
2530 return VM_FAULT_RETRY;
2531
2532 #undef RELEASE_PAGE
2533 }
2534
2535 #if MACH_ASSERT && (XNU_PLATFORM_WatchOS || __x86_64__)
2536 #define PANIC_ON_CS_KILLED_DEFAULT true
2537 #else
2538 #define PANIC_ON_CS_KILLED_DEFAULT false
2539 #endif
2540 static TUNABLE(bool, panic_on_cs_killed, "panic_on_cs_killed",
2541 PANIC_ON_CS_KILLED_DEFAULT);
2542
2543 extern int proc_selfpid(void);
2544 extern char *proc_name_address(struct proc *p);
2545 extern char *proc_best_name(struct proc *);
2546 unsigned long cs_enter_tainted_rejected = 0;
2547 unsigned long cs_enter_tainted_accepted = 0;
2548
2549 /*
2550 * CODE SIGNING:
2551 * When soft faulting a page, we have to validate the page if:
2552 * 1. the page is being mapped in user space
2553 * 2. the page hasn't already been found to be "tainted"
2554 * 3. the page belongs to a code-signed object
2555 * 4. the page has not been validated yet or has been mapped for write.
2556 */
2557 static bool
vm_fault_cs_need_validation(pmap_t pmap,vm_page_t page,vm_object_t page_obj,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2558 vm_fault_cs_need_validation(
2559 pmap_t pmap,
2560 vm_page_t page,
2561 vm_object_t page_obj,
2562 vm_map_size_t fault_page_size,
2563 vm_map_offset_t fault_phys_offset)
2564 {
2565 if (pmap == kernel_pmap) {
2566 /* 1 - not user space */
2567 return false;
2568 }
2569 if (!page_obj->code_signed) {
2570 /* 3 - page does not belong to a code-signed object */
2571 return false;
2572 }
2573 if (fault_page_size == PAGE_SIZE) {
2574 /* looking at the whole page */
2575 assertf(fault_phys_offset == 0,
2576 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
2577 (uint64_t)fault_page_size,
2578 (uint64_t)fault_phys_offset);
2579 if (page->vmp_cs_tainted == VMP_CS_ALL_TRUE) {
2580 /* 2 - page is all tainted */
2581 return false;
2582 }
2583 if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
2584 !page->vmp_wpmapped) {
2585 /* 4 - already fully validated and never mapped writable */
2586 return false;
2587 }
2588 } else {
2589 /* looking at a specific sub-page */
2590 if (VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
2591 /* 2 - sub-page was already marked as tainted */
2592 return false;
2593 }
2594 if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) &&
2595 !page->vmp_wpmapped) {
2596 /* 4 - already validated and never mapped writable */
2597 return false;
2598 }
2599 }
2600 /* page needs to be validated */
2601 return true;
2602 }
2603
2604
2605 static bool
vm_fault_cs_page_immutable(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot __unused)2606 vm_fault_cs_page_immutable(
2607 vm_page_t m,
2608 vm_map_size_t fault_page_size,
2609 vm_map_offset_t fault_phys_offset,
2610 vm_prot_t prot __unused)
2611 {
2612 if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)
2613 /*&& ((prot) & VM_PROT_EXECUTE)*/) {
2614 return true;
2615 }
2616 return false;
2617 }
2618
2619 static bool
vm_fault_cs_page_nx(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2620 vm_fault_cs_page_nx(
2621 vm_page_t m,
2622 vm_map_size_t fault_page_size,
2623 vm_map_offset_t fault_phys_offset)
2624 {
2625 return VMP_CS_NX(m, fault_page_size, fault_phys_offset);
2626 }
2627
2628 /*
2629 * Check if the page being entered into the pmap violates code signing.
2630 */
2631 static kern_return_t
vm_fault_cs_check_violation(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool map_is_switched,bool map_is_switch_protected,bool * cs_violation)2632 vm_fault_cs_check_violation(
2633 bool cs_bypass,
2634 vm_object_t object,
2635 vm_page_t m,
2636 pmap_t pmap,
2637 vm_prot_t prot,
2638 vm_prot_t caller_prot,
2639 vm_map_size_t fault_page_size,
2640 vm_map_offset_t fault_phys_offset,
2641 vm_object_fault_info_t fault_info,
2642 bool map_is_switched,
2643 bool map_is_switch_protected,
2644 bool *cs_violation)
2645 {
2646 #if !CODE_SIGNING_MONITOR
2647 #pragma unused(caller_prot)
2648 #pragma unused(fault_info)
2649 #endif /* !CODE_SIGNING_MONITOR */
2650
2651 int cs_enforcement_enabled;
2652 if (!cs_bypass &&
2653 vm_fault_cs_need_validation(pmap, m, object,
2654 fault_page_size, fault_phys_offset)) {
2655 vm_object_lock_assert_exclusive(object);
2656
2657 if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)) {
2658 vm_cs_revalidates++;
2659 }
2660
2661 /* VM map is locked, so 1 ref will remain on VM object -
2662 * so no harm if vm_page_validate_cs drops the object lock */
2663
2664 #if CODE_SIGNING_MONITOR
2665 if (fault_info->csm_associated &&
2666 csm_enabled() &&
2667 !VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
2668 !VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) &&
2669 !VMP_CS_NX(m, fault_page_size, fault_phys_offset) &&
2670 (prot & VM_PROT_EXECUTE) &&
2671 (caller_prot & VM_PROT_EXECUTE)) {
2672 /*
2673 * When we have a code signing monitor, the monitor will evaluate the code signature
2674 * for any executable page mapping. No need for the VM to also validate the page.
2675 * In the code signing monitor we trust :)
2676 */
2677 vm_cs_defer_to_csm++;
2678 } else {
2679 vm_cs_defer_to_csm_not++;
2680 vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
2681 }
2682 #else /* CODE_SIGNING_MONITOR */
2683 vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
2684 #endif /* CODE_SIGNING_MONITOR */
2685 }
2686
2687 /* If the map is switched, and is switch-protected, we must protect
2688 * some pages from being write-faulted: immutable pages because by
2689 * definition they may not be written, and executable pages because that
2690 * would provide a way to inject unsigned code.
2691 * If the page is immutable, we can simply return. However, we can't
2692 * immediately determine whether a page is executable anywhere. But,
2693 * we can disconnect it everywhere and remove the executable protection
2694 * from the current map. We do that below right before we do the
2695 * PMAP_ENTER.
2696 */
2697 if (pmap == kernel_pmap) {
2698 /* kernel fault: cs_enforcement does not apply */
2699 cs_enforcement_enabled = 0;
2700 } else {
2701 cs_enforcement_enabled = pmap_get_vm_map_cs_enforced(pmap);
2702 }
2703
2704 if (cs_enforcement_enabled && map_is_switched &&
2705 map_is_switch_protected &&
2706 vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2707 (prot & VM_PROT_WRITE)) {
2708 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_IMMUTABLE_PAGE_WRITE), 0 /* arg */);
2709 return KERN_CODESIGN_ERROR;
2710 }
2711
2712 if (cs_enforcement_enabled &&
2713 vm_fault_cs_page_nx(m, fault_page_size, fault_phys_offset) &&
2714 (prot & VM_PROT_EXECUTE)) {
2715 if (cs_debug) {
2716 printf("page marked to be NX, not letting it be mapped EXEC\n");
2717 }
2718 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_NX_PAGE_EXEC_MAPPING), 0 /* arg */);
2719 return KERN_CODESIGN_ERROR;
2720 }
2721
2722 /* A page could be tainted, or pose a risk of being tainted later.
2723 * Check whether the receiving process wants it, and make it feel
2724 * the consequences (that hapens in cs_invalid_page()).
2725 * For CS Enforcement, two other conditions will
2726 * cause that page to be tainted as well:
2727 * - pmapping an unsigned page executable - this means unsigned code;
2728 * - writeable mapping of a validated page - the content of that page
2729 * can be changed without the kernel noticing, therefore unsigned
2730 * code can be created
2731 */
2732 if (cs_bypass) {
2733 /* code-signing is bypassed */
2734 *cs_violation = FALSE;
2735 } else if (VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
2736 /* tainted page */
2737 *cs_violation = TRUE;
2738 } else if (!cs_enforcement_enabled) {
2739 /* no further code-signing enforcement */
2740 *cs_violation = FALSE;
2741 } else if (vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2742 ((prot & VM_PROT_WRITE) ||
2743 m->vmp_wpmapped)) {
2744 /*
2745 * The page should be immutable, but is in danger of being
2746 * modified.
2747 * This is the case where we want policy from the code
2748 * directory - is the page immutable or not? For now we have
2749 * to assume that code pages will be immutable, data pages not.
2750 * We'll assume a page is a code page if it has a code directory
2751 * and we fault for execution.
2752 * That is good enough since if we faulted the code page for
2753 * writing in another map before, it is wpmapped; if we fault
2754 * it for writing in this map later it will also be faulted for
2755 * executing at the same time; and if we fault for writing in
2756 * another map later, we will disconnect it from this pmap so
2757 * we'll notice the change.
2758 */
2759 *cs_violation = TRUE;
2760 } else if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
2761 (prot & VM_PROT_EXECUTE)
2762 #if CODE_SIGNING_MONITOR
2763 /*
2764 * Executable pages will be validated by the code signing monitor. If the
2765 * code signing monitor is turned off, then this is a code-signing violation.
2766 */
2767 && !csm_enabled()
2768 #endif /* CODE_SIGNING_MONITOR */
2769 ) {
2770 *cs_violation = TRUE;
2771 } else {
2772 *cs_violation = FALSE;
2773 }
2774 return KERN_SUCCESS;
2775 }
2776
2777 /*
2778 * Handles a code signing violation by either rejecting the page or forcing a disconnect.
2779 * @param must_disconnect This value will be set to true if the caller must disconnect
2780 * this page.
2781 * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
2782 */
2783 static kern_return_t
vm_fault_cs_handle_violation(vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,bool map_is_switched,bool map_is_switch_protected,bool * must_disconnect)2784 vm_fault_cs_handle_violation(
2785 vm_object_t object,
2786 vm_page_t m,
2787 pmap_t pmap,
2788 vm_prot_t prot,
2789 vm_map_offset_t vaddr,
2790 vm_map_size_t fault_page_size,
2791 vm_map_offset_t fault_phys_offset,
2792 bool map_is_switched,
2793 bool map_is_switch_protected,
2794 bool *must_disconnect)
2795 {
2796 #if !MACH_ASSERT
2797 #pragma unused(pmap)
2798 #pragma unused(map_is_switch_protected)
2799 #endif /* !MACH_ASSERT */
2800 /*
2801 * We will have a tainted page. Have to handle the special case
2802 * of a switched map now. If the map is not switched, standard
2803 * procedure applies - call cs_invalid_page().
2804 * If the map is switched, the real owner is invalid already.
2805 * There is no point in invalidating the switching process since
2806 * it will not be executing from the map. So we don't call
2807 * cs_invalid_page() in that case.
2808 */
2809 boolean_t reject_page, cs_killed;
2810 kern_return_t kr;
2811 if (map_is_switched) {
2812 assert(pmap == vm_map_pmap(current_thread()->map));
2813 assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
2814 reject_page = FALSE;
2815 } else {
2816 if (cs_debug > 5) {
2817 printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n",
2818 object->code_signed ? "yes" : "no",
2819 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2820 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2821 m->vmp_wpmapped ? "yes" : "no",
2822 (int)prot);
2823 }
2824 reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed);
2825 }
2826
2827 if (reject_page) {
2828 /* reject the invalid page: abort the page fault */
2829 int pid;
2830 const char *procname;
2831 task_t task;
2832 vm_object_t file_object, shadow;
2833 vm_object_offset_t file_offset;
2834 char *pathname, *filename;
2835 vm_size_t pathname_len, filename_len;
2836 boolean_t truncated_path;
2837 #define __PATH_MAX 1024
2838 struct timespec mtime, cs_mtime;
2839 int shadow_depth;
2840 os_reason_t codesigning_exit_reason = OS_REASON_NULL;
2841
2842 kr = KERN_CODESIGN_ERROR;
2843 cs_enter_tainted_rejected++;
2844
2845 /* get process name and pid */
2846 procname = "?";
2847 task = current_task();
2848 pid = proc_selfpid();
2849 if (get_bsdtask_info(task) != NULL) {
2850 procname = proc_name_address(get_bsdtask_info(task));
2851 }
2852
2853 /* get file's VM object */
2854 file_object = object;
2855 file_offset = m->vmp_offset;
2856 for (shadow = file_object->shadow,
2857 shadow_depth = 0;
2858 shadow != VM_OBJECT_NULL;
2859 shadow = file_object->shadow,
2860 shadow_depth++) {
2861 vm_object_lock_shared(shadow);
2862 if (file_object != object) {
2863 vm_object_unlock(file_object);
2864 }
2865 file_offset += file_object->vo_shadow_offset;
2866 file_object = shadow;
2867 }
2868
2869 mtime.tv_sec = 0;
2870 mtime.tv_nsec = 0;
2871 cs_mtime.tv_sec = 0;
2872 cs_mtime.tv_nsec = 0;
2873
2874 /* get file's pathname and/or filename */
2875 pathname = NULL;
2876 filename = NULL;
2877 pathname_len = 0;
2878 filename_len = 0;
2879 truncated_path = FALSE;
2880 /* no pager -> no file -> no pathname, use "<nil>" in that case */
2881 if (file_object->pager != NULL) {
2882 pathname = kalloc_data(__PATH_MAX * 2, Z_WAITOK);
2883 if (pathname) {
2884 pathname[0] = '\0';
2885 pathname_len = __PATH_MAX;
2886 filename = pathname + pathname_len;
2887 filename_len = __PATH_MAX;
2888
2889 if (vnode_pager_get_object_name(file_object->pager,
2890 pathname,
2891 pathname_len,
2892 filename,
2893 filename_len,
2894 &truncated_path) == KERN_SUCCESS) {
2895 /* safety first... */
2896 pathname[__PATH_MAX - 1] = '\0';
2897 filename[__PATH_MAX - 1] = '\0';
2898
2899 vnode_pager_get_object_mtime(file_object->pager,
2900 &mtime,
2901 &cs_mtime);
2902 } else {
2903 kfree_data(pathname, __PATH_MAX * 2);
2904 pathname = NULL;
2905 filename = NULL;
2906 pathname_len = 0;
2907 filename_len = 0;
2908 truncated_path = FALSE;
2909 }
2910 }
2911 }
2912 printf("CODE SIGNING: process %d[%s]: "
2913 "rejecting invalid page at address 0x%llx "
2914 "from offset 0x%llx in file \"%s%s%s\" "
2915 "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
2916 "(signed:%d validated:%d tainted:%d nx:%d "
2917 "wpmapped:%d dirty:%d depth:%d)\n",
2918 pid, procname, (addr64_t) vaddr,
2919 file_offset,
2920 (pathname ? pathname : "<nil>"),
2921 (truncated_path ? "/.../" : ""),
2922 (truncated_path ? filename : ""),
2923 cs_mtime.tv_sec, cs_mtime.tv_nsec,
2924 ((cs_mtime.tv_sec == mtime.tv_sec &&
2925 cs_mtime.tv_nsec == mtime.tv_nsec)
2926 ? "=="
2927 : "!="),
2928 mtime.tv_sec, mtime.tv_nsec,
2929 object->code_signed,
2930 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
2931 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
2932 VMP_CS_NX(m, fault_page_size, fault_phys_offset),
2933 m->vmp_wpmapped,
2934 m->vmp_dirty,
2935 shadow_depth);
2936
2937 /*
2938 * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page
2939 * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the
2940 * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler
2941 * will deal with the segmentation fault.
2942 */
2943 if (cs_killed) {
2944 KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
2945 pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
2946
2947 codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
2948 if (codesigning_exit_reason == NULL) {
2949 printf("vm_fault_enter: failed to allocate codesigning exit reason\n");
2950 } else {
2951 mach_vm_address_t data_addr = 0;
2952 struct codesigning_exit_reason_info *ceri = NULL;
2953 uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri));
2954
2955 if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) {
2956 printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n");
2957 } else {
2958 if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor,
2959 EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) {
2960 ceri = (struct codesigning_exit_reason_info *)data_addr;
2961 static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname));
2962
2963 ceri->ceri_virt_addr = vaddr;
2964 ceri->ceri_file_offset = file_offset;
2965 if (pathname) {
2966 strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname));
2967 } else {
2968 ceri->ceri_pathname[0] = '\0';
2969 }
2970 if (filename) {
2971 strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename));
2972 } else {
2973 ceri->ceri_filename[0] = '\0';
2974 }
2975 ceri->ceri_path_truncated = (truncated_path ? 1 : 0);
2976 ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec;
2977 ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec;
2978 ceri->ceri_page_modtime_secs = mtime.tv_sec;
2979 ceri->ceri_page_modtime_nsecs = mtime.tv_nsec;
2980 ceri->ceri_object_codesigned = (object->code_signed);
2981 ceri->ceri_page_codesig_validated = VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset);
2982 ceri->ceri_page_codesig_tainted = VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset);
2983 ceri->ceri_page_codesig_nx = VMP_CS_NX(m, fault_page_size, fault_phys_offset);
2984 ceri->ceri_page_wpmapped = (m->vmp_wpmapped);
2985 ceri->ceri_page_slid = 0;
2986 ceri->ceri_page_dirty = (m->vmp_dirty);
2987 ceri->ceri_page_shadow_depth = shadow_depth;
2988 } else {
2989 #if DEBUG || DEVELOPMENT
2990 panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason");
2991 #else
2992 printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n");
2993 #endif /* DEBUG || DEVELOPMENT */
2994 /* Free the buffer */
2995 os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0);
2996 }
2997 }
2998 }
2999
3000 set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE);
3001 }
3002 if (panic_on_cs_killed &&
3003 object->object_is_shared_cache) {
3004 char *tainted_contents;
3005 vm_map_offset_t src_vaddr;
3006 src_vaddr = (vm_map_offset_t) phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m) << PAGE_SHIFT);
3007 tainted_contents = kalloc_data(PAGE_SIZE, Z_WAITOK);
3008 bcopy((const char *)src_vaddr, tainted_contents, PAGE_SIZE);
3009 printf("CODE SIGNING: tainted page %p phys 0x%x phystokv 0x%llx copied to %p\n", m, VM_PAGE_GET_PHYS_PAGE(m), (uint64_t)src_vaddr, tainted_contents);
3010 panic("CODE SIGNING: process %d[%s]: "
3011 "rejecting invalid page (phys#0x%x) at address 0x%llx "
3012 "from offset 0x%llx in file \"%s%s%s\" "
3013 "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
3014 "(signed:%d validated:%d tainted:%d nx:%d"
3015 "wpmapped:%d dirty:%d depth:%d)\n",
3016 pid, procname,
3017 VM_PAGE_GET_PHYS_PAGE(m),
3018 (addr64_t) vaddr,
3019 file_offset,
3020 (pathname ? pathname : "<nil>"),
3021 (truncated_path ? "/.../" : ""),
3022 (truncated_path ? filename : ""),
3023 cs_mtime.tv_sec, cs_mtime.tv_nsec,
3024 ((cs_mtime.tv_sec == mtime.tv_sec &&
3025 cs_mtime.tv_nsec == mtime.tv_nsec)
3026 ? "=="
3027 : "!="),
3028 mtime.tv_sec, mtime.tv_nsec,
3029 object->code_signed,
3030 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
3031 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
3032 VMP_CS_NX(m, fault_page_size, fault_phys_offset),
3033 m->vmp_wpmapped,
3034 m->vmp_dirty,
3035 shadow_depth);
3036 }
3037
3038 if (file_object != object) {
3039 vm_object_unlock(file_object);
3040 }
3041 if (pathname_len != 0) {
3042 kfree_data(pathname, __PATH_MAX * 2);
3043 pathname = NULL;
3044 filename = NULL;
3045 }
3046 } else {
3047 /* proceed with the invalid page */
3048 kr = KERN_SUCCESS;
3049 if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
3050 !object->code_signed) {
3051 /*
3052 * This page has not been (fully) validated but
3053 * does not belong to a code-signed object
3054 * so it should not be forcefully considered
3055 * as tainted.
3056 * We're just concerned about it here because
3057 * we've been asked to "execute" it but that
3058 * does not mean that it should cause other
3059 * accesses to fail.
3060 * This happens when a debugger sets a
3061 * breakpoint and we then execute code in
3062 * that page. Marking the page as "tainted"
3063 * would cause any inspection tool ("leaks",
3064 * "vmmap", "CrashReporter", ...) to get killed
3065 * due to code-signing violation on that page,
3066 * even though they're just reading it and not
3067 * executing from it.
3068 */
3069 } else {
3070 /*
3071 * Page might have been tainted before or not;
3072 * now it definitively is. If the page wasn't
3073 * tainted, we must disconnect it from all
3074 * pmaps later, to force existing mappings
3075 * through that code path for re-consideration
3076 * of the validity of that page.
3077 */
3078 if (!VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
3079 *must_disconnect = TRUE;
3080 VMP_CS_SET_TAINTED(m, fault_page_size, fault_phys_offset, TRUE);
3081 }
3082 }
3083 cs_enter_tainted_accepted++;
3084 }
3085 if (kr != KERN_SUCCESS) {
3086 if (cs_debug) {
3087 printf("CODESIGNING: vm_fault_enter(0x%llx): "
3088 "*** INVALID PAGE ***\n",
3089 (long long)vaddr);
3090 }
3091 #if !SECURE_KERNEL
3092 if (cs_enforcement_panic) {
3093 panic("CODESIGNING: panicking on invalid page");
3094 }
3095 #endif
3096 }
3097 return kr;
3098 }
3099
3100 /*
3101 * Check that the code signature is valid for the given page being inserted into
3102 * the pmap.
3103 *
3104 * @param must_disconnect This value will be set to true if the caller must disconnect
3105 * this page.
3106 * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
3107 */
3108 static kern_return_t
vm_fault_validate_cs(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool * must_disconnect)3109 vm_fault_validate_cs(
3110 bool cs_bypass,
3111 vm_object_t object,
3112 vm_page_t m,
3113 pmap_t pmap,
3114 vm_map_offset_t vaddr,
3115 vm_prot_t prot,
3116 vm_prot_t caller_prot,
3117 vm_map_size_t fault_page_size,
3118 vm_map_offset_t fault_phys_offset,
3119 vm_object_fault_info_t fault_info,
3120 bool *must_disconnect)
3121 {
3122 bool map_is_switched, map_is_switch_protected, cs_violation;
3123 kern_return_t kr;
3124 /* Validate code signature if necessary. */
3125 map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) &&
3126 (pmap == vm_map_pmap(current_thread()->map)));
3127 map_is_switch_protected = current_thread()->map->switch_protect;
3128 kr = vm_fault_cs_check_violation(cs_bypass, object, m, pmap,
3129 prot, caller_prot, fault_page_size, fault_phys_offset, fault_info,
3130 map_is_switched, map_is_switch_protected, &cs_violation);
3131 if (kr != KERN_SUCCESS) {
3132 return kr;
3133 }
3134 if (cs_violation) {
3135 kr = vm_fault_cs_handle_violation(object, m, pmap, prot, vaddr,
3136 fault_page_size, fault_phys_offset,
3137 map_is_switched, map_is_switch_protected, must_disconnect);
3138 }
3139 return kr;
3140 }
3141
3142 /*
3143 * Enqueue the page on the appropriate paging queue.
3144 */
3145 static void
vm_fault_enqueue_page(vm_object_t object,vm_page_t m,bool wired,bool change_wiring,vm_tag_t wire_tag,bool no_cache,int * type_of_fault,kern_return_t kr)3146 vm_fault_enqueue_page(
3147 vm_object_t object,
3148 vm_page_t m,
3149 bool wired,
3150 bool change_wiring,
3151 vm_tag_t wire_tag,
3152 bool no_cache,
3153 int *type_of_fault,
3154 kern_return_t kr)
3155 {
3156 assert((m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object);
3157 boolean_t page_queues_locked = FALSE;
3158 boolean_t previously_pmapped = m->vmp_pmapped;
3159 #define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \
3160 MACRO_BEGIN \
3161 if (! page_queues_locked) { \
3162 page_queues_locked = TRUE; \
3163 vm_page_lockspin_queues(); \
3164 } \
3165 MACRO_END
3166 #define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \
3167 MACRO_BEGIN \
3168 if (page_queues_locked) { \
3169 page_queues_locked = FALSE; \
3170 vm_page_unlock_queues(); \
3171 } \
3172 MACRO_END
3173
3174 vm_page_update_special_state(m);
3175 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
3176 /*
3177 * Compressor pages are neither wired
3178 * nor pageable and should never change.
3179 */
3180 assert(object == compressor_object);
3181 } else if (change_wiring) {
3182 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3183
3184 if (wired) {
3185 if (kr == KERN_SUCCESS) {
3186 vm_page_wire(m, wire_tag, TRUE);
3187 }
3188 } else {
3189 vm_page_unwire(m, TRUE);
3190 }
3191 /* we keep the page queues lock, if we need it later */
3192 } else {
3193 if (object->internal == TRUE) {
3194 /*
3195 * don't allow anonymous pages on
3196 * the speculative queues
3197 */
3198 no_cache = FALSE;
3199 }
3200 if (kr != KERN_SUCCESS) {
3201 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3202 vm_page_deactivate(m);
3203 /* we keep the page queues lock, if we need it later */
3204 } else if (((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
3205 (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3206 (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
3207 ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) &&
3208 !VM_PAGE_WIRED(m)) {
3209 if (vm_page_local_q &&
3210 (*type_of_fault == DBG_COW_FAULT ||
3211 *type_of_fault == DBG_ZERO_FILL_FAULT)) {
3212 struct vpl *lq;
3213 uint32_t lid;
3214
3215 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
3216
3217 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3218 vm_object_lock_assert_exclusive(object);
3219
3220 /*
3221 * we got a local queue to stuff this
3222 * new page on...
3223 * its safe to manipulate local and
3224 * local_id at this point since we're
3225 * behind an exclusive object lock and
3226 * the page is not on any global queue.
3227 *
3228 * we'll use the current cpu number to
3229 * select the queue note that we don't
3230 * need to disable preemption... we're
3231 * going to be behind the local queue's
3232 * lock to do the real work
3233 */
3234 lid = cpu_number();
3235
3236 lq = zpercpu_get_cpu(vm_page_local_q, lid);
3237
3238 VPL_LOCK(&lq->vpl_lock);
3239
3240 vm_page_check_pageable_safe(m);
3241 vm_page_queue_enter(&lq->vpl_queue, m, vmp_pageq);
3242 m->vmp_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q;
3243 m->vmp_local_id = lid;
3244 lq->vpl_count++;
3245
3246 if (object->internal) {
3247 lq->vpl_internal_count++;
3248 } else {
3249 lq->vpl_external_count++;
3250 }
3251
3252 VPL_UNLOCK(&lq->vpl_lock);
3253
3254 if (lq->vpl_count > vm_page_local_q_soft_limit) {
3255 /*
3256 * we're beyond the soft limit
3257 * for the local queue
3258 * vm_page_reactivate_local will
3259 * 'try' to take the global page
3260 * queue lock... if it can't
3261 * that's ok... we'll let the
3262 * queue continue to grow up
3263 * to the hard limit... at that
3264 * point we'll wait for the
3265 * lock... once we've got the
3266 * lock, we'll transfer all of
3267 * the pages from the local
3268 * queue to the global active
3269 * queue
3270 */
3271 vm_page_reactivate_local(lid, FALSE, FALSE);
3272 }
3273 } else {
3274 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3275
3276 /*
3277 * test again now that we hold the
3278 * page queue lock
3279 */
3280 if (!VM_PAGE_WIRED(m)) {
3281 if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3282 vm_page_queues_remove(m, FALSE);
3283
3284 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3285 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_fault_reactivated, 1);
3286 }
3287
3288 if (!VM_PAGE_ACTIVE_OR_INACTIVE(m) ||
3289 no_cache) {
3290 /*
3291 * If this is a no_cache mapping
3292 * and the page has never been
3293 * mapped before or was
3294 * previously a no_cache page,
3295 * then we want to leave pages
3296 * in the speculative state so
3297 * that they can be readily
3298 * recycled if free memory runs
3299 * low. Otherwise the page is
3300 * activated as normal.
3301 */
3302
3303 if (no_cache &&
3304 (!previously_pmapped ||
3305 m->vmp_no_cache)) {
3306 m->vmp_no_cache = TRUE;
3307
3308 if (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
3309 vm_page_speculate(m, FALSE);
3310 }
3311 } else if (!VM_PAGE_ACTIVE_OR_INACTIVE(m)) {
3312 vm_page_activate(m);
3313 }
3314 }
3315 }
3316 /* we keep the page queues lock, if we need it later */
3317 }
3318 }
3319 }
3320 /* we're done with the page queues lock, if we ever took it */
3321 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3322 }
3323
3324 /*
3325 * Sets the pmmpped, xpmapped, and wpmapped bits on the vm_page_t and updates accounting.
3326 * @return true if the page needs to be sync'ed via pmap_sync-page_data_physo
3327 * before being inserted into the pmap.
3328 */
3329 static bool
vm_fault_enter_set_mapped(vm_object_t object,vm_page_t m,vm_prot_t prot,vm_prot_t fault_type)3330 vm_fault_enter_set_mapped(
3331 vm_object_t object,
3332 vm_page_t m,
3333 vm_prot_t prot,
3334 vm_prot_t fault_type)
3335 {
3336 bool page_needs_sync = false;
3337 /*
3338 * NOTE: we may only hold the vm_object lock SHARED
3339 * at this point, so we need the phys_page lock to
3340 * properly serialize updating the pmapped and
3341 * xpmapped bits
3342 */
3343 if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) {
3344 ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3345
3346 pmap_lock_phys_page(phys_page);
3347 m->vmp_pmapped = TRUE;
3348
3349 if (!m->vmp_xpmapped) {
3350 m->vmp_xpmapped = TRUE;
3351
3352 pmap_unlock_phys_page(phys_page);
3353
3354 if (!object->internal) {
3355 OSAddAtomic(1, &vm_page_xpmapped_external_count);
3356 }
3357
3358 #if defined(__arm64__)
3359 page_needs_sync = true;
3360 #else
3361 if (object->internal &&
3362 object->pager != NULL) {
3363 /*
3364 * This page could have been
3365 * uncompressed by the
3366 * compressor pager and its
3367 * contents might be only in
3368 * the data cache.
3369 * Since it's being mapped for
3370 * "execute" for the fist time,
3371 * make sure the icache is in
3372 * sync.
3373 */
3374 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
3375 page_needs_sync = true;
3376 }
3377 #endif
3378 } else {
3379 pmap_unlock_phys_page(phys_page);
3380 }
3381 } else {
3382 if (m->vmp_pmapped == FALSE) {
3383 ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3384
3385 pmap_lock_phys_page(phys_page);
3386 m->vmp_pmapped = TRUE;
3387 pmap_unlock_phys_page(phys_page);
3388 }
3389 }
3390
3391 if (fault_type & VM_PROT_WRITE) {
3392 if (m->vmp_wpmapped == FALSE) {
3393 vm_object_lock_assert_exclusive(object);
3394 if (!object->internal && object->pager) {
3395 task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
3396 }
3397 m->vmp_wpmapped = TRUE;
3398 }
3399 }
3400 return page_needs_sync;
3401 }
3402
3403 #if CODE_SIGNING_MONITOR && !XNU_PLATFORM_MacOSX
3404 #define KILL_FOR_CSM_VIOLATION 1
3405 #else /* CODE_SIGNING_MONITOR && !XNU_PLATFORM_MacOSX */
3406 #define KILL_FOR_CSM_VIOLATION 0
3407 #endif /* CODE_SIGNING_MONITOR && !XNU_PLATFORM_MacOSX */
3408
3409 #if KILL_FOR_CSM_VIOLATION
3410 static void
vm_fault_kill_for_csm_violation(pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t prot,vm_prot_t fault_type)3411 vm_fault_kill_for_csm_violation(
3412 pmap_t pmap,
3413 vm_map_offset_t vaddr,
3414 vm_prot_t prot,
3415 vm_prot_t fault_type)
3416 {
3417 void *p;
3418 char *pname;
3419
3420 if (pmap_is_nested(pmap)) {
3421 panic("code-signing violation for nested pmap %p vaddr 0x%llx prot 0x%x fault 0x%x", pmap, (uint64_t)vaddr, prot, fault_type);
3422 }
3423 p = get_bsdtask_info(current_task());
3424 pname = p ? proc_best_name(p) : "?";
3425 printf("CODESIGNING: killing %d[%s] for CSM violation at vaddr 0x%llx prot 0x%x fault 0x%x\n",
3426 proc_selfpid(), pname,
3427 (uint64_t)vaddr, prot, fault_type);
3428 task_bsdtask_kill(current_task());
3429 }
3430 #endif /* KILL_FOR_CSM_VIOLATION */
3431
3432 /*
3433 * wrapper for pmap_enter_options()
3434 */
3435 static kern_return_t
pmap_enter_options_check(pmap_t pmap,vm_map_address_t virtual_address,vm_map_offset_t fault_phys_offset,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,unsigned int flags,boolean_t wired,unsigned int options)3436 pmap_enter_options_check(
3437 pmap_t pmap,
3438 vm_map_address_t virtual_address,
3439 vm_map_offset_t fault_phys_offset,
3440 vm_page_t page,
3441 vm_prot_t protection,
3442 vm_prot_t fault_type,
3443 unsigned int flags,
3444 boolean_t wired,
3445 unsigned int options)
3446 {
3447 int extra_options = 0;
3448 vm_object_t obj;
3449
3450 if (page->vmp_error) {
3451 return KERN_MEMORY_FAILURE;
3452 }
3453 obj = VM_PAGE_OBJECT(page);
3454 if (obj->internal) {
3455 extra_options |= PMAP_OPTIONS_INTERNAL;
3456 }
3457 if (page->vmp_reusable || obj->all_reusable) {
3458 extra_options |= PMAP_OPTIONS_REUSABLE;
3459 }
3460 return pmap_enter_options_addr(pmap,
3461 virtual_address,
3462 (pmap_paddr_t)ptoa(VM_PAGE_GET_PHYS_PAGE(page)) + fault_phys_offset,
3463 protection,
3464 fault_type,
3465 flags,
3466 wired,
3467 options | extra_options,
3468 NULL);
3469 }
3470
3471 /*
3472 * Try to enter the given page into the pmap.
3473 * Will retry without execute permission if the code signing monitor is enabled and
3474 * we encounter a codesigning failure on a non-execute fault.
3475 */
3476 static kern_return_t
vm_fault_attempt_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options)3477 vm_fault_attempt_pmap_enter(
3478 pmap_t pmap,
3479 vm_map_offset_t vaddr,
3480 vm_map_size_t fault_page_size,
3481 vm_map_offset_t fault_phys_offset,
3482 vm_page_t m,
3483 vm_prot_t *prot,
3484 vm_prot_t caller_prot,
3485 vm_prot_t fault_type,
3486 bool wired,
3487 int pmap_options)
3488 {
3489 #if !CODE_SIGNING_MONITOR
3490 #pragma unused(caller_prot)
3491 #endif /* !CODE_SIGNING_MONITOR */
3492
3493 kern_return_t kr;
3494 if (fault_page_size != PAGE_SIZE) {
3495 DEBUG4K_FAULT("pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x fault_type 0x%x\n", pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, *prot, fault_type);
3496 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
3497 fault_phys_offset < PAGE_SIZE),
3498 "0x%llx\n", (uint64_t)fault_phys_offset);
3499 } else {
3500 assertf(fault_phys_offset == 0,
3501 "0x%llx\n", (uint64_t)fault_phys_offset);
3502 }
3503
3504 kr = pmap_enter_options_check(pmap, vaddr,
3505 fault_phys_offset,
3506 m, *prot, fault_type, 0,
3507 wired,
3508 pmap_options);
3509
3510 #if CODE_SIGNING_MONITOR
3511 /*
3512 * Retry without execute permission if we encountered a codesigning
3513 * failure on a non-execute fault. This allows applications which
3514 * don't actually need to execute code to still map it for read access.
3515 */
3516 if (kr == KERN_CODESIGN_ERROR &&
3517 csm_enabled() &&
3518 (*prot & VM_PROT_EXECUTE) &&
3519 !(caller_prot & VM_PROT_EXECUTE)) {
3520 *prot &= ~VM_PROT_EXECUTE;
3521 kr = pmap_enter_options_check(pmap, vaddr,
3522 fault_phys_offset,
3523 m, *prot, fault_type, 0,
3524 wired,
3525 pmap_options);
3526 }
3527 #if KILL_FOR_CSM_VIOLATION
3528 if (kr == KERN_CODESIGN_ERROR && pmap != kernel_pmap) {
3529 vm_fault_kill_for_csm_violation(pmap, vaddr, *prot, fault_type);
3530 }
3531 #endif /* KILL_FOR_CSM_VIOLATION */
3532 #endif /* CODE_SIGNING_MONITOR */
3533
3534 return kr;
3535 }
3536
3537 /*
3538 * Enter the given page into the pmap.
3539 * The map must be locked shared.
3540 * The vm object must NOT be locked.
3541 *
3542 * @param need_retry if not null, avoid making a (potentially) blocking call into
3543 * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3544 */
3545 static kern_return_t
vm_fault_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry)3546 vm_fault_pmap_enter(
3547 pmap_t pmap,
3548 vm_map_offset_t vaddr,
3549 vm_map_size_t fault_page_size,
3550 vm_map_offset_t fault_phys_offset,
3551 vm_page_t m,
3552 vm_prot_t *prot,
3553 vm_prot_t caller_prot,
3554 vm_prot_t fault_type,
3555 bool wired,
3556 int pmap_options,
3557 boolean_t *need_retry)
3558 {
3559 kern_return_t kr;
3560 if (need_retry != NULL) {
3561 /*
3562 * Although we don't hold a lock on this object, we hold a lock
3563 * on the top object in the chain. To prevent a deadlock, we
3564 * can't allow the pmap layer to block.
3565 */
3566 pmap_options |= PMAP_OPTIONS_NOWAIT;
3567 }
3568 kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3569 fault_page_size, fault_phys_offset,
3570 m, prot, caller_prot, fault_type, wired, pmap_options);
3571 if (kr == KERN_RESOURCE_SHORTAGE) {
3572 if (need_retry) {
3573 /*
3574 * There's nothing we can do here since we hold the
3575 * lock on the top object in the chain. The caller
3576 * will need to deal with this by dropping that lock and retrying.
3577 */
3578 *need_retry = TRUE;
3579 vm_pmap_enter_retried++;
3580 }
3581 }
3582 return kr;
3583 }
3584
3585 /*
3586 * Enter the given page into the pmap.
3587 * The vm map must be locked shared.
3588 * The vm object must be locked exclusive, unless this is a soft fault.
3589 * For a soft fault, the object must be locked shared or exclusive.
3590 *
3591 * @param need_retry if not null, avoid making a (potentially) blocking call into
3592 * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3593 */
3594 static kern_return_t
vm_fault_pmap_enter_with_object_lock(vm_object_t object,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry,uint8_t * object_lock_type)3595 vm_fault_pmap_enter_with_object_lock(
3596 vm_object_t object,
3597 pmap_t pmap,
3598 vm_map_offset_t vaddr,
3599 vm_map_size_t fault_page_size,
3600 vm_map_offset_t fault_phys_offset,
3601 vm_page_t m,
3602 vm_prot_t *prot,
3603 vm_prot_t caller_prot,
3604 vm_prot_t fault_type,
3605 bool wired,
3606 int pmap_options,
3607 boolean_t *need_retry,
3608 uint8_t *object_lock_type)
3609 {
3610 kern_return_t kr;
3611 /*
3612 * Prevent a deadlock by not
3613 * holding the object lock if we need to wait for a page in
3614 * pmap_enter() - <rdar://problem/7138958>
3615 */
3616 kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3617 fault_page_size, fault_phys_offset,
3618 m, prot, caller_prot, fault_type, wired, pmap_options | PMAP_OPTIONS_NOWAIT);
3619 #if __x86_64__
3620 if (kr == KERN_INVALID_ARGUMENT &&
3621 pmap == PMAP_NULL &&
3622 wired) {
3623 /*
3624 * Wiring a page in a pmap-less VM map:
3625 * VMware's "vmmon" kernel extension does this
3626 * to grab pages.
3627 * Let it proceed even though the PMAP_ENTER() failed.
3628 */
3629 kr = KERN_SUCCESS;
3630 }
3631 #endif /* __x86_64__ */
3632
3633 if (kr == KERN_RESOURCE_SHORTAGE) {
3634 if (need_retry) {
3635 /*
3636 * this will be non-null in the case where we hold the lock
3637 * on the top-object in this chain... we can't just drop
3638 * the lock on the object we're inserting the page into
3639 * and recall the PMAP_ENTER since we can still cause
3640 * a deadlock if one of the critical paths tries to
3641 * acquire the lock on the top-object and we're blocked
3642 * in PMAP_ENTER waiting for memory... our only recourse
3643 * is to deal with it at a higher level where we can
3644 * drop both locks.
3645 */
3646 *need_retry = TRUE;
3647 vm_pmap_enter_retried++;
3648 goto done;
3649 }
3650 /*
3651 * The nonblocking version of pmap_enter did not succeed.
3652 * and we don't need to drop other locks and retry
3653 * at the level above us, so
3654 * use the blocking version instead. Requires marking
3655 * the page busy and unlocking the object
3656 */
3657 boolean_t was_busy = m->vmp_busy;
3658
3659 vm_object_lock_assert_exclusive(object);
3660
3661 m->vmp_busy = TRUE;
3662 vm_object_unlock(object);
3663
3664 kr = pmap_enter_options_check(pmap, vaddr,
3665 fault_phys_offset,
3666 m, *prot, fault_type,
3667 0, wired,
3668 pmap_options);
3669
3670 assert(VM_PAGE_OBJECT(m) == object);
3671
3672 #if KILL_FOR_CSM_VIOLATION
3673 if (kr == KERN_CODESIGN_ERROR && pmap != kernel_pmap) {
3674 vm_fault_kill_for_csm_violation(pmap, vaddr, *prot, fault_type);
3675 }
3676 #endif /* KILL_FOR_CSM_VIOLATION */
3677
3678 /* Take the object lock again. */
3679 vm_object_lock(object);
3680
3681 /* If the page was busy, someone else will wake it up.
3682 * Otherwise, we have to do it now. */
3683 assert(m->vmp_busy);
3684 if (!was_busy) {
3685 PAGE_WAKEUP_DONE(m);
3686 }
3687 vm_pmap_enter_blocked++;
3688 }
3689
3690 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
3691 if ((*prot & VM_PROT_WRITE) && m->vmp_unmodified_ro) {
3692 if (*object_lock_type == OBJECT_LOCK_SHARED) {
3693 boolean_t was_busy = m->vmp_busy;
3694 m->vmp_busy = TRUE;
3695
3696 *object_lock_type = OBJECT_LOCK_EXCLUSIVE;
3697
3698 if (vm_object_lock_upgrade(object) == FALSE) {
3699 vm_object_lock(object);
3700 }
3701
3702 if (!was_busy) {
3703 PAGE_WAKEUP_DONE(m);
3704 }
3705 }
3706 vm_object_lock_assert_exclusive(object);
3707 vm_page_lockspin_queues();
3708 m->vmp_unmodified_ro = false;
3709 vm_page_unlock_queues();
3710 os_atomic_dec(&compressor_ro_uncompressed, relaxed);
3711
3712 VM_COMPRESSOR_PAGER_STATE_CLR(VM_PAGE_OBJECT(m), m->vmp_offset);
3713 }
3714 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
3715 #pragma unused(object_lock_type)
3716 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
3717
3718 done:
3719 return kr;
3720 }
3721
3722 /*
3723 * Prepare to enter a page into the pmap by checking CS, protection bits,
3724 * and setting mapped bits on the page_t.
3725 * Does not modify the page's paging queue.
3726 *
3727 * page queue lock must NOT be held
3728 * m->vmp_object must be locked
3729 *
3730 * NOTE: m->vmp_object could be locked "shared" only if we are called
3731 * from vm_fault() as part of a soft fault.
3732 */
3733 static kern_return_t
vm_fault_enter_prepare(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t * prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,boolean_t change_wiring,vm_prot_t fault_type,vm_object_fault_info_t fault_info,int * type_of_fault,bool * page_needs_data_sync)3734 vm_fault_enter_prepare(
3735 vm_page_t m,
3736 pmap_t pmap,
3737 vm_map_offset_t vaddr,
3738 vm_prot_t *prot,
3739 vm_prot_t caller_prot,
3740 vm_map_size_t fault_page_size,
3741 vm_map_offset_t fault_phys_offset,
3742 boolean_t change_wiring,
3743 vm_prot_t fault_type,
3744 vm_object_fault_info_t fault_info,
3745 int *type_of_fault,
3746 bool *page_needs_data_sync)
3747 {
3748 kern_return_t kr;
3749 bool is_tainted = false;
3750 vm_object_t object;
3751 boolean_t cs_bypass = fault_info->cs_bypass;
3752
3753 object = VM_PAGE_OBJECT(m);
3754
3755 vm_object_lock_assert_held(object);
3756
3757 #if KASAN
3758 if (pmap == kernel_pmap) {
3759 kasan_notify_address(vaddr, PAGE_SIZE);
3760 }
3761 #endif
3762
3763 #if CODE_SIGNING_MONITOR
3764 if (csm_address_space_exempt(pmap) == KERN_SUCCESS) {
3765 cs_bypass = TRUE;
3766 }
3767 #endif
3768
3769 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3770
3771 if (*type_of_fault == DBG_ZERO_FILL_FAULT) {
3772 vm_object_lock_assert_exclusive(object);
3773 } else if ((fault_type & VM_PROT_WRITE) == 0 &&
3774 !change_wiring &&
3775 (!m->vmp_wpmapped
3776 #if VM_OBJECT_ACCESS_TRACKING
3777 || object->access_tracking
3778 #endif /* VM_OBJECT_ACCESS_TRACKING */
3779 )) {
3780 /*
3781 * This is not a "write" fault, so we
3782 * might not have taken the object lock
3783 * exclusively and we might not be able
3784 * to update the "wpmapped" bit in
3785 * vm_fault_enter().
3786 * Let's just grant read access to
3787 * the page for now and we'll
3788 * soft-fault again if we need write
3789 * access later...
3790 */
3791
3792 /* This had better not be a JIT page. */
3793 if (!pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) {
3794 *prot &= ~VM_PROT_WRITE;
3795 } else {
3796 assert(cs_bypass);
3797 }
3798 }
3799 if (m->vmp_pmapped == FALSE) {
3800 if (m->vmp_clustered) {
3801 if (*type_of_fault == DBG_CACHE_HIT_FAULT) {
3802 /*
3803 * found it in the cache, but this
3804 * is the first fault-in of the page (m->vmp_pmapped == FALSE)
3805 * so it must have come in as part of
3806 * a cluster... account 1 pagein against it
3807 */
3808 if (object->internal) {
3809 *type_of_fault = DBG_PAGEIND_FAULT;
3810 } else {
3811 *type_of_fault = DBG_PAGEINV_FAULT;
3812 }
3813
3814 VM_PAGE_COUNT_AS_PAGEIN(m);
3815 }
3816 VM_PAGE_CONSUME_CLUSTERED(m);
3817 }
3818 }
3819
3820 if (*type_of_fault != DBG_COW_FAULT) {
3821 DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL);
3822
3823 if (pmap == kernel_pmap) {
3824 DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL);
3825 }
3826 }
3827
3828 kr = vm_fault_validate_cs(cs_bypass, object, m, pmap, vaddr,
3829 *prot, caller_prot, fault_page_size, fault_phys_offset,
3830 fault_info, &is_tainted);
3831 if (kr == KERN_SUCCESS) {
3832 /*
3833 * We either have a good page, or a tainted page that has been accepted by the process.
3834 * In both cases the page will be entered into the pmap.
3835 */
3836 *page_needs_data_sync = vm_fault_enter_set_mapped(object, m, *prot, fault_type);
3837 if ((fault_type & VM_PROT_WRITE) && is_tainted) {
3838 /*
3839 * This page is tainted but we're inserting it anyways.
3840 * Since it's writeable, we need to disconnect it from other pmaps
3841 * now so those processes can take note.
3842 */
3843
3844 /*
3845 * We can only get here
3846 * because of the CSE logic
3847 */
3848 assert(pmap_get_vm_map_cs_enforced(pmap));
3849 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
3850 /*
3851 * If we are faulting for a write, we can clear
3852 * the execute bit - that will ensure the page is
3853 * checked again before being executable, which
3854 * protects against a map switch.
3855 * This only happens the first time the page
3856 * gets tainted, so we won't get stuck here
3857 * to make an already writeable page executable.
3858 */
3859 if (!cs_bypass) {
3860 assert(!pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot));
3861 *prot &= ~VM_PROT_EXECUTE;
3862 }
3863 }
3864 assert(VM_PAGE_OBJECT(m) == object);
3865
3866 #if VM_OBJECT_ACCESS_TRACKING
3867 if (object->access_tracking) {
3868 DTRACE_VM2(access_tracking, vm_map_offset_t, vaddr, int, fault_type);
3869 if (fault_type & VM_PROT_WRITE) {
3870 object->access_tracking_writes++;
3871 vm_object_access_tracking_writes++;
3872 } else {
3873 object->access_tracking_reads++;
3874 vm_object_access_tracking_reads++;
3875 }
3876 }
3877 #endif /* VM_OBJECT_ACCESS_TRACKING */
3878 }
3879
3880 return kr;
3881 }
3882
3883 /*
3884 * page queue lock must NOT be held
3885 * m->vmp_object must be locked
3886 *
3887 * NOTE: m->vmp_object could be locked "shared" only if we are called
3888 * from vm_fault() as part of a soft fault. If so, we must be
3889 * careful not to modify the VM object in any way that is not
3890 * legal under a shared lock...
3891 */
3892 kern_return_t
vm_fault_enter(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot,vm_prot_t caller_prot,boolean_t wired,boolean_t change_wiring,vm_tag_t wire_tag,vm_object_fault_info_t fault_info,boolean_t * need_retry,int * type_of_fault,uint8_t * object_lock_type)3893 vm_fault_enter(
3894 vm_page_t m,
3895 pmap_t pmap,
3896 vm_map_offset_t vaddr,
3897 vm_map_size_t fault_page_size,
3898 vm_map_offset_t fault_phys_offset,
3899 vm_prot_t prot,
3900 vm_prot_t caller_prot,
3901 boolean_t wired,
3902 boolean_t change_wiring,
3903 vm_tag_t wire_tag,
3904 vm_object_fault_info_t fault_info,
3905 boolean_t *need_retry,
3906 int *type_of_fault,
3907 uint8_t *object_lock_type)
3908 {
3909 kern_return_t kr;
3910 vm_object_t object;
3911 bool page_needs_data_sync;
3912 vm_prot_t fault_type;
3913 int pmap_options = fault_info->pmap_options;
3914
3915 if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
3916 assert(m->vmp_fictitious);
3917 return KERN_SUCCESS;
3918 }
3919
3920 fault_type = change_wiring ? VM_PROT_NONE : caller_prot;
3921
3922 assertf(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL, "m=%p", m);
3923 kr = vm_fault_enter_prepare(m, pmap, vaddr, &prot, caller_prot,
3924 fault_page_size, fault_phys_offset, change_wiring, fault_type,
3925 fault_info, type_of_fault, &page_needs_data_sync);
3926 object = VM_PAGE_OBJECT(m);
3927
3928 vm_fault_enqueue_page(object, m, wired, change_wiring, wire_tag, fault_info->no_cache, type_of_fault, kr);
3929
3930 if (kr == KERN_SUCCESS) {
3931 if (page_needs_data_sync) {
3932 pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
3933 }
3934
3935 if (fault_info->fi_xnu_user_debug && !object->code_signed) {
3936 pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
3937 }
3938
3939
3940 kr = vm_fault_pmap_enter_with_object_lock(object, pmap, vaddr,
3941 fault_page_size, fault_phys_offset, m,
3942 &prot, caller_prot, fault_type, wired, pmap_options, need_retry, object_lock_type);
3943 }
3944
3945 return kr;
3946 }
3947
3948 void
vm_pre_fault(vm_map_offset_t vaddr,vm_prot_t prot)3949 vm_pre_fault(vm_map_offset_t vaddr, vm_prot_t prot)
3950 {
3951 if (pmap_find_phys(current_map()->pmap, vaddr) == 0) {
3952 vm_fault(current_map(), /* map */
3953 vaddr, /* vaddr */
3954 prot, /* fault_type */
3955 FALSE, /* change_wiring */
3956 VM_KERN_MEMORY_NONE, /* tag - not wiring */
3957 THREAD_UNINT, /* interruptible */
3958 NULL, /* caller_pmap */
3959 0 /* caller_pmap_addr */);
3960 }
3961 }
3962
3963
3964 /*
3965 * Routine: vm_fault
3966 * Purpose:
3967 * Handle page faults, including pseudo-faults
3968 * used to change the wiring status of pages.
3969 * Returns:
3970 * Explicit continuations have been removed.
3971 * Implementation:
3972 * vm_fault and vm_fault_page save mucho state
3973 * in the moral equivalent of a closure. The state
3974 * structure is allocated when first entering vm_fault
3975 * and deallocated when leaving vm_fault.
3976 */
3977
3978 extern uint64_t get_current_unique_pid(void);
3979
3980 unsigned long vm_fault_collapse_total = 0;
3981 unsigned long vm_fault_collapse_skipped = 0;
3982
3983
3984 kern_return_t
vm_fault_external(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)3985 vm_fault_external(
3986 vm_map_t map,
3987 vm_map_offset_t vaddr,
3988 vm_prot_t fault_type,
3989 boolean_t change_wiring,
3990 int interruptible,
3991 pmap_t caller_pmap,
3992 vm_map_offset_t caller_pmap_addr)
3993 {
3994 return vm_fault_internal(map, vaddr, fault_type, change_wiring,
3995 change_wiring ? vm_tag_bt() : VM_KERN_MEMORY_NONE,
3996 interruptible, caller_pmap, caller_pmap_addr,
3997 NULL);
3998 }
3999
4000 kern_return_t
vm_fault(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,vm_tag_t wire_tag,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)4001 vm_fault(
4002 vm_map_t map,
4003 vm_map_offset_t vaddr,
4004 vm_prot_t fault_type,
4005 boolean_t change_wiring,
4006 vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
4007 int interruptible,
4008 pmap_t caller_pmap,
4009 vm_map_offset_t caller_pmap_addr)
4010 {
4011 return vm_fault_internal(map, vaddr, fault_type, change_wiring, wire_tag,
4012 interruptible, caller_pmap, caller_pmap_addr,
4013 NULL);
4014 }
4015
4016 static boolean_t
current_proc_is_privileged(void)4017 current_proc_is_privileged(void)
4018 {
4019 return csproc_get_platform_binary(current_proc());
4020 }
4021
4022 uint64_t vm_copied_on_read = 0;
4023
4024 /*
4025 * Cleanup after a vm_fault_enter.
4026 * At this point, the fault should either have failed (kr != KERN_SUCCESS)
4027 * or the page should be in the pmap and on the correct paging queue.
4028 *
4029 * Precondition:
4030 * map must be locked shared.
4031 * m_object must be locked.
4032 * If top_object != VM_OBJECT_NULL, it must be locked.
4033 * real_map must be locked.
4034 *
4035 * Postcondition:
4036 * map will be unlocked
4037 * m_object will be unlocked
4038 * top_object will be unlocked
4039 * If real_map != map, it will be unlocked
4040 */
4041 static void
vm_fault_complete(vm_map_t map,vm_map_t real_map,vm_object_t object,vm_object_t m_object,vm_page_t m,vm_map_offset_t offset,vm_map_offset_t trace_real_vaddr,vm_object_fault_info_t fault_info,vm_prot_t caller_prot,vm_map_offset_t real_vaddr,int type_of_fault,boolean_t need_retry,kern_return_t kr,ppnum_t * physpage_p,vm_prot_t prot,vm_object_t top_object,boolean_t need_collapse,vm_map_offset_t cur_offset,vm_prot_t fault_type,vm_object_t * written_on_object,memory_object_t * written_on_pager,vm_object_offset_t * written_on_offset)4042 vm_fault_complete(
4043 vm_map_t map,
4044 vm_map_t real_map,
4045 vm_object_t object,
4046 vm_object_t m_object,
4047 vm_page_t m,
4048 vm_map_offset_t offset,
4049 vm_map_offset_t trace_real_vaddr,
4050 vm_object_fault_info_t fault_info,
4051 vm_prot_t caller_prot,
4052 #if CONFIG_DTRACE
4053 vm_map_offset_t real_vaddr,
4054 #else
4055 __unused vm_map_offset_t real_vaddr,
4056 #endif /* CONFIG_DTRACE */
4057 int type_of_fault,
4058 boolean_t need_retry,
4059 kern_return_t kr,
4060 ppnum_t *physpage_p,
4061 vm_prot_t prot,
4062 vm_object_t top_object,
4063 boolean_t need_collapse,
4064 vm_map_offset_t cur_offset,
4065 vm_prot_t fault_type,
4066 vm_object_t *written_on_object,
4067 memory_object_t *written_on_pager,
4068 vm_object_offset_t *written_on_offset)
4069 {
4070 int event_code = 0;
4071 vm_map_lock_assert_shared(map);
4072 vm_object_lock_assert_held(m_object);
4073 if (top_object != VM_OBJECT_NULL) {
4074 vm_object_lock_assert_held(top_object);
4075 }
4076 vm_map_lock_assert_held(real_map);
4077
4078 if (m_object->internal) {
4079 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
4080 } else if (m_object->object_is_shared_cache) {
4081 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
4082 } else {
4083 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
4084 }
4085 KDBG_RELEASE(event_code | DBG_FUNC_NONE, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid());
4086 if (need_retry == FALSE) {
4087 KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_FAST), get_current_unique_pid());
4088 }
4089 DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag);
4090 if (kr == KERN_SUCCESS &&
4091 physpage_p != NULL) {
4092 /* for vm_map_wire_and_extract() */
4093 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
4094 if (prot & VM_PROT_WRITE) {
4095 vm_object_lock_assert_exclusive(m_object);
4096 m->vmp_dirty = TRUE;
4097 }
4098 }
4099
4100 if (top_object != VM_OBJECT_NULL) {
4101 /*
4102 * It's safe to drop the top object
4103 * now that we've done our
4104 * vm_fault_enter(). Any other fault
4105 * in progress for that virtual
4106 * address will either find our page
4107 * and translation or put in a new page
4108 * and translation.
4109 */
4110 vm_object_unlock(top_object);
4111 top_object = VM_OBJECT_NULL;
4112 }
4113
4114 if (need_collapse == TRUE) {
4115 vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
4116 }
4117
4118 if (need_retry == FALSE &&
4119 (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) {
4120 /*
4121 * evaluate access pattern and update state
4122 * vm_fault_deactivate_behind depends on the
4123 * state being up to date
4124 */
4125 vm_fault_is_sequential(m_object, cur_offset, fault_info->behavior);
4126
4127 vm_fault_deactivate_behind(m_object, cur_offset, fault_info->behavior);
4128 }
4129 /*
4130 * That's it, clean up and return.
4131 */
4132 if (m->vmp_busy) {
4133 vm_object_lock_assert_exclusive(m_object);
4134 PAGE_WAKEUP_DONE(m);
4135 }
4136
4137 if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) {
4138 vm_object_paging_begin(m_object);
4139
4140 assert(*written_on_object == VM_OBJECT_NULL);
4141 *written_on_object = m_object;
4142 *written_on_pager = m_object->pager;
4143 *written_on_offset = m_object->paging_offset + m->vmp_offset;
4144 }
4145 vm_object_unlock(object);
4146
4147 vm_map_unlock_read(map);
4148 if (real_map != map) {
4149 vm_map_unlock(real_map);
4150 }
4151 }
4152
4153 static inline int
vm_fault_type_for_tracing(boolean_t need_copy_on_read,int type_of_fault)4154 vm_fault_type_for_tracing(boolean_t need_copy_on_read, int type_of_fault)
4155 {
4156 if (need_copy_on_read && type_of_fault == DBG_COW_FAULT) {
4157 return DBG_COR_FAULT;
4158 }
4159 return type_of_fault;
4160 }
4161
4162 uint64_t vm_fault_resilient_media_initiate = 0;
4163 uint64_t vm_fault_resilient_media_retry = 0;
4164 uint64_t vm_fault_resilient_media_proceed = 0;
4165 uint64_t vm_fault_resilient_media_release = 0;
4166 uint64_t vm_fault_resilient_media_abort1 = 0;
4167 uint64_t vm_fault_resilient_media_abort2 = 0;
4168
4169 #if MACH_ASSERT
4170 int vm_fault_resilient_media_inject_error1_rate = 0;
4171 int vm_fault_resilient_media_inject_error1 = 0;
4172 int vm_fault_resilient_media_inject_error2_rate = 0;
4173 int vm_fault_resilient_media_inject_error2 = 0;
4174 int vm_fault_resilient_media_inject_error3_rate = 0;
4175 int vm_fault_resilient_media_inject_error3 = 0;
4176 #endif /* MACH_ASSERT */
4177
4178 kern_return_t
vm_fault_internal(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t caller_prot,boolean_t change_wiring,vm_tag_t wire_tag,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr,ppnum_t * physpage_p)4179 vm_fault_internal(
4180 vm_map_t map,
4181 vm_map_offset_t vaddr,
4182 vm_prot_t caller_prot,
4183 boolean_t change_wiring,
4184 vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
4185 int interruptible,
4186 pmap_t caller_pmap,
4187 vm_map_offset_t caller_pmap_addr,
4188 ppnum_t *physpage_p)
4189 {
4190 vm_map_version_t version; /* Map version for verificiation */
4191 boolean_t wired; /* Should mapping be wired down? */
4192 vm_object_t object; /* Top-level object */
4193 vm_object_offset_t offset; /* Top-level offset */
4194 vm_prot_t prot; /* Protection for mapping */
4195 vm_object_t old_copy_object; /* Saved copy object */
4196 vm_page_t result_page; /* Result of vm_fault_page */
4197 vm_page_t top_page; /* Placeholder page */
4198 kern_return_t kr;
4199
4200 vm_page_t m; /* Fast access to result_page */
4201 kern_return_t error_code;
4202 vm_object_t cur_object;
4203 vm_object_t m_object = NULL;
4204 vm_object_offset_t cur_offset;
4205 vm_page_t cur_m;
4206 vm_object_t new_object;
4207 int type_of_fault;
4208 pmap_t pmap;
4209 wait_interrupt_t interruptible_state;
4210 vm_map_t real_map = map;
4211 vm_map_t original_map = map;
4212 bool object_locks_dropped = FALSE;
4213 vm_prot_t fault_type;
4214 vm_prot_t original_fault_type;
4215 struct vm_object_fault_info fault_info = {};
4216 bool need_collapse = FALSE;
4217 boolean_t need_retry = FALSE;
4218 boolean_t *need_retry_ptr = NULL;
4219 uint8_t object_lock_type = 0;
4220 uint8_t cur_object_lock_type;
4221 vm_object_t top_object = VM_OBJECT_NULL;
4222 vm_object_t written_on_object = VM_OBJECT_NULL;
4223 memory_object_t written_on_pager = NULL;
4224 vm_object_offset_t written_on_offset = 0;
4225 int throttle_delay;
4226 int compressed_count_delta;
4227 uint8_t grab_options;
4228 bool need_copy;
4229 bool need_copy_on_read;
4230 vm_map_offset_t trace_vaddr;
4231 vm_map_offset_t trace_real_vaddr;
4232 vm_map_size_t fault_page_size;
4233 vm_map_size_t fault_page_mask;
4234 int fault_page_shift;
4235 vm_map_offset_t fault_phys_offset;
4236 vm_map_offset_t real_vaddr;
4237 bool resilient_media_retry = false;
4238 bool resilient_media_ref_transfer = false;
4239 vm_object_t resilient_media_object = VM_OBJECT_NULL;
4240 vm_object_offset_t resilient_media_offset = (vm_object_offset_t)-1;
4241 bool page_needs_data_sync = false;
4242 /*
4243 * Was the VM object contended when vm_map_lookup_and_lock_object locked it?
4244 * If so, the zero fill path will drop the lock
4245 * NB: Ideally we would always drop the lock rather than rely on
4246 * this heuristic, but vm_object_unlock currently takes > 30 cycles.
4247 */
4248 bool object_is_contended = false;
4249
4250 real_vaddr = vaddr;
4251 trace_real_vaddr = vaddr;
4252
4253 /*
4254 * Some (kernel) submaps are marked with "should never fault".
4255 *
4256 * We do this for two reasons:
4257 * - PGZ which is inside the zone map range can't go down the normal
4258 * lookup path (vm_map_lookup_entry() would panic).
4259 *
4260 * - we want for guard pages to not have to use fictitious pages at all
4261 * to prevent from ZFOD pages to be made.
4262 *
4263 * We also want capture the fault address easily so that the zone
4264 * allocator might present an enhanced panic log.
4265 */
4266 if (map->never_faults || (pgz_owned(vaddr) && map->pmap == kernel_pmap)) {
4267 assert(map->pmap == kernel_pmap);
4268 return KERN_INVALID_ADDRESS;
4269 }
4270
4271 if (VM_MAP_PAGE_SIZE(original_map) < PAGE_SIZE) {
4272 fault_phys_offset = (vm_map_offset_t)-1;
4273 fault_page_size = VM_MAP_PAGE_SIZE(original_map);
4274 fault_page_mask = VM_MAP_PAGE_MASK(original_map);
4275 fault_page_shift = VM_MAP_PAGE_SHIFT(original_map);
4276 if (fault_page_size < PAGE_SIZE) {
4277 DEBUG4K_FAULT("map %p vaddr 0x%llx caller_prot 0x%x\n", map, (uint64_t)trace_real_vaddr, caller_prot);
4278 vaddr = vm_map_trunc_page(vaddr, fault_page_mask);
4279 }
4280 } else {
4281 fault_phys_offset = 0;
4282 fault_page_size = PAGE_SIZE;
4283 fault_page_mask = PAGE_MASK;
4284 fault_page_shift = PAGE_SHIFT;
4285 vaddr = vm_map_trunc_page(vaddr, PAGE_MASK);
4286 }
4287
4288 if (map == kernel_map) {
4289 trace_vaddr = VM_KERNEL_ADDRHIDE(vaddr);
4290 trace_real_vaddr = VM_KERNEL_ADDRHIDE(trace_real_vaddr);
4291 } else {
4292 trace_vaddr = vaddr;
4293 }
4294
4295 KDBG_RELEASE(
4296 (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_START,
4297 ((uint64_t)trace_vaddr >> 32),
4298 trace_vaddr,
4299 (map == kernel_map));
4300
4301 if (get_preemption_level() != 0) {
4302 KDBG_RELEASE(
4303 (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
4304 ((uint64_t)trace_vaddr >> 32),
4305 trace_vaddr,
4306 KERN_FAILURE);
4307
4308 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NONZERO_PREEMPTION_LEVEL), 0 /* arg */);
4309 return KERN_FAILURE;
4310 }
4311
4312 thread_t cthread = current_thread();
4313 bool rtfault = (cthread->sched_mode == TH_MODE_REALTIME);
4314 uint64_t fstart = 0;
4315
4316 if (rtfault) {
4317 fstart = mach_continuous_time();
4318 }
4319
4320 interruptible_state = thread_interrupt_level(interruptible);
4321
4322 fault_type = (change_wiring ? VM_PROT_NONE : caller_prot);
4323
4324 counter_inc(&vm_statistics_faults);
4325 counter_inc(¤t_task()->faults);
4326 original_fault_type = fault_type;
4327
4328 need_copy = FALSE;
4329 if (fault_type & VM_PROT_WRITE) {
4330 need_copy = TRUE;
4331 }
4332
4333 if (need_copy || change_wiring) {
4334 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4335 } else {
4336 object_lock_type = OBJECT_LOCK_SHARED;
4337 }
4338
4339 cur_object_lock_type = OBJECT_LOCK_SHARED;
4340
4341 if ((map == kernel_map) && (caller_prot & VM_PROT_WRITE)) {
4342 if (compressor_map) {
4343 if ((vaddr >= vm_map_min(compressor_map)) && (vaddr < vm_map_max(compressor_map))) {
4344 panic("Write fault on compressor map, va: %p type: %u bounds: %p->%p", (void *) vaddr, caller_prot, (void *) vm_map_min(compressor_map), (void *) vm_map_max(compressor_map));
4345 }
4346 }
4347 }
4348 RetryFault:
4349 assert(written_on_object == VM_OBJECT_NULL);
4350
4351 /*
4352 * assume we will hit a page in the cache
4353 * otherwise, explicitly override with
4354 * the real fault type once we determine it
4355 */
4356 type_of_fault = DBG_CACHE_HIT_FAULT;
4357
4358 /*
4359 * Find the backing store object and offset into
4360 * it to begin the search.
4361 */
4362 fault_type = original_fault_type;
4363 map = original_map;
4364 vm_map_lock_read(map);
4365
4366 if (resilient_media_retry) {
4367 /*
4368 * If we have to insert a fake zero-filled page to hide
4369 * a media failure to provide the real page, we need to
4370 * resolve any pending copy-on-write on this mapping.
4371 * VM_PROT_COPY tells vm_map_lookup_and_lock_object() to deal
4372 * with that even if this is not a "write" fault.
4373 */
4374 need_copy = TRUE;
4375 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4376 vm_fault_resilient_media_retry++;
4377 }
4378
4379 kr = vm_map_lookup_and_lock_object(&map, vaddr,
4380 (fault_type | (need_copy ? VM_PROT_COPY : 0)),
4381 object_lock_type, &version,
4382 &object, &offset, &prot, &wired,
4383 &fault_info,
4384 &real_map,
4385 &object_is_contended);
4386
4387 if (kr != KERN_SUCCESS) {
4388 vm_map_unlock_read(map);
4389 /*
4390 * This can be seen in a crash report if indeed the
4391 * thread is crashing due to an invalid access in a non-existent
4392 * range.
4393 * Turning this OFF for now because it is noisy and not always fatal
4394 * eg prefaulting.
4395 *
4396 * if (kr == KERN_INVALID_ADDRESS) {
4397 * ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0);
4398 * }
4399 */
4400 goto done;
4401 }
4402
4403
4404 pmap = real_map->pmap;
4405 fault_info.interruptible = interruptible;
4406 fault_info.stealth = FALSE;
4407 fault_info.io_sync = FALSE;
4408 fault_info.mark_zf_absent = FALSE;
4409 fault_info.batch_pmap_op = FALSE;
4410
4411 if (resilient_media_retry) {
4412 /*
4413 * We're retrying this fault after having detected a media
4414 * failure from a "resilient_media" mapping.
4415 * Check that the mapping is still pointing at the object
4416 * that just failed to provide a page.
4417 */
4418 assert(resilient_media_object != VM_OBJECT_NULL);
4419 assert(resilient_media_offset != (vm_object_offset_t)-1);
4420 if ((object != VM_OBJECT_NULL &&
4421 object == resilient_media_object &&
4422 offset == resilient_media_offset &&
4423 fault_info.resilient_media)
4424 #if MACH_ASSERT
4425 && (vm_fault_resilient_media_inject_error1_rate == 0 ||
4426 (++vm_fault_resilient_media_inject_error1 % vm_fault_resilient_media_inject_error1_rate) != 0)
4427 #endif /* MACH_ASSERT */
4428 ) {
4429 /*
4430 * This mapping still points at the same object
4431 * and is still "resilient_media": proceed in
4432 * "recovery-from-media-failure" mode, where we'll
4433 * insert a zero-filled page in the top object.
4434 */
4435 // printf("RESILIENT_MEDIA %s:%d recovering for object %p offset 0x%llx\n", __FUNCTION__, __LINE__, object, offset);
4436 vm_fault_resilient_media_proceed++;
4437 } else {
4438 /* not recovering: reset state and retry fault */
4439 // printf("RESILIENT_MEDIA %s:%d no recovery resilient %d object %p/%p offset 0x%llx/0x%llx\n", __FUNCTION__, __LINE__, fault_info.resilient_media, object, resilient_media_object, offset, resilient_media_offset);
4440 vm_object_unlock(object);
4441 if (real_map != map) {
4442 vm_map_unlock(real_map);
4443 }
4444 vm_map_unlock_read(map);
4445 /* release our extra reference on failed object */
4446 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
4447 vm_object_lock_assert_notheld(resilient_media_object);
4448 vm_object_deallocate(resilient_media_object);
4449 resilient_media_object = VM_OBJECT_NULL;
4450 resilient_media_offset = (vm_object_offset_t)-1;
4451 resilient_media_retry = false;
4452 vm_fault_resilient_media_abort1++;
4453 goto RetryFault;
4454 }
4455 } else {
4456 assert(resilient_media_object == VM_OBJECT_NULL);
4457 resilient_media_offset = (vm_object_offset_t)-1;
4458 }
4459
4460 /*
4461 * If the page is wired, we must fault for the current protection
4462 * value, to avoid further faults.
4463 */
4464 if (wired) {
4465 fault_type = prot | VM_PROT_WRITE;
4466 }
4467 if (wired || need_copy) {
4468 /*
4469 * since we're treating this fault as a 'write'
4470 * we must hold the top object lock exclusively
4471 */
4472 if (object_lock_type == OBJECT_LOCK_SHARED) {
4473 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4474
4475 if (vm_object_lock_upgrade(object) == FALSE) {
4476 /*
4477 * couldn't upgrade, so explictly
4478 * take the lock exclusively
4479 */
4480 vm_object_lock(object);
4481 }
4482 }
4483 }
4484
4485 #if VM_FAULT_CLASSIFY
4486 /*
4487 * Temporary data gathering code
4488 */
4489 vm_fault_classify(object, offset, fault_type);
4490 #endif
4491 /*
4492 * Fast fault code. The basic idea is to do as much as
4493 * possible while holding the map lock and object locks.
4494 * Busy pages are not used until the object lock has to
4495 * be dropped to do something (copy, zero fill, pmap enter).
4496 * Similarly, paging references aren't acquired until that
4497 * point, and object references aren't used.
4498 *
4499 * If we can figure out what to do
4500 * (zero fill, copy on write, pmap enter) while holding
4501 * the locks, then it gets done. Otherwise, we give up,
4502 * and use the original fault path (which doesn't hold
4503 * the map lock, and relies on busy pages).
4504 * The give up cases include:
4505 * - Have to talk to pager.
4506 * - Page is busy, absent or in error.
4507 * - Pager has locked out desired access.
4508 * - Fault needs to be restarted.
4509 * - Have to push page into copy object.
4510 *
4511 * The code is an infinite loop that moves one level down
4512 * the shadow chain each time. cur_object and cur_offset
4513 * refer to the current object being examined. object and offset
4514 * are the original object from the map. The loop is at the
4515 * top level if and only if object and cur_object are the same.
4516 *
4517 * Invariants: Map lock is held throughout. Lock is held on
4518 * original object and cur_object (if different) when
4519 * continuing or exiting loop.
4520 *
4521 */
4522
4523 #if defined(__arm64__)
4524 /*
4525 * Fail if reading an execute-only page in a
4526 * pmap that enforces execute-only protection.
4527 */
4528 if (fault_type == VM_PROT_READ &&
4529 (prot & VM_PROT_EXECUTE) &&
4530 !(prot & VM_PROT_READ) &&
4531 pmap_enforces_execute_only(pmap)) {
4532 vm_object_unlock(object);
4533 vm_map_unlock_read(map);
4534 if (real_map != map) {
4535 vm_map_unlock(real_map);
4536 }
4537 kr = KERN_PROTECTION_FAILURE;
4538 goto done;
4539 }
4540 #endif
4541
4542 fault_phys_offset = (vm_map_offset_t)offset - vm_map_trunc_page((vm_map_offset_t)offset, PAGE_MASK);
4543
4544 /*
4545 * If this page is to be inserted in a copy delay object
4546 * for writing, and if the object has a copy, then the
4547 * copy delay strategy is implemented in the slow fault page.
4548 */
4549 if ((object->copy_strategy == MEMORY_OBJECT_COPY_DELAY ||
4550 object->copy_strategy == MEMORY_OBJECT_COPY_DELAY_FORK) &&
4551 object->vo_copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE)) {
4552 goto handle_copy_delay;
4553 }
4554
4555 cur_object = object;
4556 cur_offset = offset;
4557
4558 grab_options = 0;
4559 #if CONFIG_SECLUDED_MEMORY
4560 if (object->can_grab_secluded) {
4561 grab_options |= VM_PAGE_GRAB_SECLUDED;
4562 }
4563 #endif /* CONFIG_SECLUDED_MEMORY */
4564
4565 while (TRUE) {
4566 if (!cur_object->pager_created &&
4567 cur_object->phys_contiguous) { /* superpage */
4568 break;
4569 }
4570
4571 if (cur_object->blocked_access) {
4572 /*
4573 * Access to this VM object has been blocked.
4574 * Let the slow path handle it.
4575 */
4576 break;
4577 }
4578
4579 m = vm_page_lookup(cur_object, vm_object_trunc_page(cur_offset));
4580 m_object = NULL;
4581
4582 if (m != VM_PAGE_NULL) {
4583 m_object = cur_object;
4584
4585 if (m->vmp_busy) {
4586 wait_result_t result;
4587
4588 /*
4589 * in order to do the PAGE_ASSERT_WAIT, we must
4590 * have object that 'm' belongs to locked exclusively
4591 */
4592 if (object != cur_object) {
4593 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4594 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4595
4596 if (vm_object_lock_upgrade(cur_object) == FALSE) {
4597 /*
4598 * couldn't upgrade so go do a full retry
4599 * immediately since we can no longer be
4600 * certain about cur_object (since we
4601 * don't hold a reference on it)...
4602 * first drop the top object lock
4603 */
4604 vm_object_unlock(object);
4605
4606 vm_map_unlock_read(map);
4607 if (real_map != map) {
4608 vm_map_unlock(real_map);
4609 }
4610
4611 goto RetryFault;
4612 }
4613 }
4614 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
4615 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4616
4617 if (vm_object_lock_upgrade(object) == FALSE) {
4618 /*
4619 * couldn't upgrade, so explictly take the lock
4620 * exclusively and go relookup the page since we
4621 * will have dropped the object lock and
4622 * a different thread could have inserted
4623 * a page at this offset
4624 * no need for a full retry since we're
4625 * at the top level of the object chain
4626 */
4627 vm_object_lock(object);
4628
4629 continue;
4630 }
4631 }
4632 if ((m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) && m_object->internal) {
4633 /*
4634 * m->vmp_busy == TRUE and the object is locked exclusively
4635 * if m->pageout_queue == TRUE after we acquire the
4636 * queues lock, we are guaranteed that it is stable on
4637 * the pageout queue and therefore reclaimable
4638 *
4639 * NOTE: this is only true for the internal pageout queue
4640 * in the compressor world
4641 */
4642 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
4643
4644 vm_page_lock_queues();
4645
4646 if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
4647 vm_pageout_throttle_up(m);
4648 vm_page_unlock_queues();
4649
4650 PAGE_WAKEUP_DONE(m);
4651 goto reclaimed_from_pageout;
4652 }
4653 vm_page_unlock_queues();
4654 }
4655 if (object != cur_object) {
4656 vm_object_unlock(object);
4657 }
4658
4659 vm_map_unlock_read(map);
4660 if (real_map != map) {
4661 vm_map_unlock(real_map);
4662 }
4663
4664 result = PAGE_ASSERT_WAIT(m, interruptible);
4665
4666 vm_object_unlock(cur_object);
4667
4668 if (result == THREAD_WAITING) {
4669 result = thread_block(THREAD_CONTINUE_NULL);
4670 }
4671 if (result == THREAD_AWAKENED || result == THREAD_RESTART) {
4672 goto RetryFault;
4673 }
4674
4675 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
4676 kr = KERN_ABORTED;
4677 goto done;
4678 }
4679 reclaimed_from_pageout:
4680 if (m->vmp_laundry) {
4681 if (object != cur_object) {
4682 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4683 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4684
4685 vm_object_unlock(object);
4686 vm_object_unlock(cur_object);
4687
4688 vm_map_unlock_read(map);
4689 if (real_map != map) {
4690 vm_map_unlock(real_map);
4691 }
4692
4693 goto RetryFault;
4694 }
4695 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
4696 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4697
4698 if (vm_object_lock_upgrade(object) == FALSE) {
4699 /*
4700 * couldn't upgrade, so explictly take the lock
4701 * exclusively and go relookup the page since we
4702 * will have dropped the object lock and
4703 * a different thread could have inserted
4704 * a page at this offset
4705 * no need for a full retry since we're
4706 * at the top level of the object chain
4707 */
4708 vm_object_lock(object);
4709
4710 continue;
4711 }
4712 }
4713 vm_object_lock_assert_exclusive(VM_PAGE_OBJECT(m));
4714 vm_pageout_steal_laundry(m, FALSE);
4715 }
4716
4717
4718 if (VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
4719 /*
4720 * Guard page: let the slow path deal with it
4721 */
4722 break;
4723 }
4724 if (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_private || m->vmp_absent)) {
4725 /*
4726 * Unusual case... let the slow path deal with it
4727 */
4728 break;
4729 }
4730 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m_object)) {
4731 if (object != cur_object) {
4732 vm_object_unlock(object);
4733 }
4734 vm_map_unlock_read(map);
4735 if (real_map != map) {
4736 vm_map_unlock(real_map);
4737 }
4738 vm_object_unlock(cur_object);
4739 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
4740 kr = KERN_MEMORY_ERROR;
4741 goto done;
4742 }
4743 assert(m_object == VM_PAGE_OBJECT(m));
4744
4745 if (vm_fault_cs_need_validation(map->pmap, m, m_object,
4746 PAGE_SIZE, 0) ||
4747 (physpage_p != NULL && (prot & VM_PROT_WRITE))) {
4748 upgrade_lock_and_retry:
4749 /*
4750 * We might need to validate this page
4751 * against its code signature, so we
4752 * want to hold the VM object exclusively.
4753 */
4754 if (object != cur_object) {
4755 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4756 vm_object_unlock(object);
4757 vm_object_unlock(cur_object);
4758
4759 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4760
4761 vm_map_unlock_read(map);
4762 if (real_map != map) {
4763 vm_map_unlock(real_map);
4764 }
4765
4766 goto RetryFault;
4767 }
4768 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
4769 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4770
4771 if (vm_object_lock_upgrade(object) == FALSE) {
4772 /*
4773 * couldn't upgrade, so explictly take the lock
4774 * exclusively and go relookup the page since we
4775 * will have dropped the object lock and
4776 * a different thread could have inserted
4777 * a page at this offset
4778 * no need for a full retry since we're
4779 * at the top level of the object chain
4780 */
4781 vm_object_lock(object);
4782
4783 continue;
4784 }
4785 }
4786 }
4787 /*
4788 * Two cases of map in faults:
4789 * - At top level w/o copy object.
4790 * - Read fault anywhere.
4791 * --> must disallow write.
4792 */
4793
4794 if (object == cur_object && object->vo_copy == VM_OBJECT_NULL) {
4795 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4796 if ((fault_type & VM_PROT_WRITE) && m->vmp_unmodified_ro) {
4797 assert(cur_object == VM_PAGE_OBJECT(m));
4798 assert(cur_object->internal);
4799 vm_object_lock_assert_exclusive(cur_object);
4800 vm_page_lockspin_queues();
4801 m->vmp_unmodified_ro = false;
4802 vm_page_unlock_queues();
4803 os_atomic_dec(&compressor_ro_uncompressed, relaxed);
4804 VM_COMPRESSOR_PAGER_STATE_CLR(cur_object, m->vmp_offset);
4805 }
4806 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4807 goto FastPmapEnter;
4808 }
4809
4810 if (!need_copy &&
4811 !fault_info.no_copy_on_read &&
4812 cur_object != object &&
4813 !cur_object->internal &&
4814 !cur_object->pager_trusted &&
4815 vm_protect_privileged_from_untrusted &&
4816 !cur_object->code_signed &&
4817 current_proc_is_privileged()) {
4818 /*
4819 * We're faulting on a page in "object" and
4820 * went down the shadow chain to "cur_object"
4821 * to find out that "cur_object"'s pager
4822 * is not "trusted", i.e. we can not trust it
4823 * to always return the same contents.
4824 * Since the target is a "privileged" process,
4825 * let's treat this as a copy-on-read fault, as
4826 * if it was a copy-on-write fault.
4827 * Once "object" gets a copy of this page, it
4828 * won't have to rely on "cur_object" to
4829 * provide the contents again.
4830 *
4831 * This is done by setting "need_copy" and
4832 * retrying the fault from the top with the
4833 * appropriate locking.
4834 *
4835 * Special case: if the mapping is executable
4836 * and the untrusted object is code-signed and
4837 * the process is "cs_enforced", we do not
4838 * copy-on-read because that would break
4839 * code-signing enforcement expectations (an
4840 * executable page must belong to a code-signed
4841 * object) and we can rely on code-signing
4842 * to re-validate the page if it gets evicted
4843 * and paged back in.
4844 */
4845 // printf("COPY-ON-READ %s:%d map %p va 0x%llx page %p object %p offset 0x%llx UNTRUSTED: need copy-on-read!\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, m, VM_PAGE_OBJECT(m), m->vmp_offset);
4846 vm_copied_on_read++;
4847 need_copy = TRUE;
4848
4849 vm_object_unlock(object);
4850 vm_object_unlock(cur_object);
4851 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4852 vm_map_unlock_read(map);
4853 if (real_map != map) {
4854 vm_map_unlock(real_map);
4855 }
4856 goto RetryFault;
4857 }
4858
4859 if (!(fault_type & VM_PROT_WRITE) && !need_copy) {
4860 if (!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
4861 prot &= ~VM_PROT_WRITE;
4862 } else {
4863 /*
4864 * For a protection that the pmap cares
4865 * about, we must hand over the full
4866 * set of protections (so that the pmap
4867 * layer can apply any desired policy).
4868 * This means that cs_bypass must be
4869 * set, as this can force us to pass
4870 * RWX.
4871 */
4872 assert(fault_info.cs_bypass);
4873 }
4874
4875 if (object != cur_object) {
4876 /*
4877 * We still need to hold the top object
4878 * lock here to prevent a race between
4879 * a read fault (taking only "shared"
4880 * locks) and a write fault (taking
4881 * an "exclusive" lock on the top
4882 * object.
4883 * Otherwise, as soon as we release the
4884 * top lock, the write fault could
4885 * proceed and actually complete before
4886 * the read fault, and the copied page's
4887 * translation could then be overwritten
4888 * by the read fault's translation for
4889 * the original page.
4890 *
4891 * Let's just record what the top object
4892 * is and we'll release it later.
4893 */
4894 top_object = object;
4895
4896 /*
4897 * switch to the object that has the new page
4898 */
4899 object = cur_object;
4900 object_lock_type = cur_object_lock_type;
4901 }
4902 FastPmapEnter:
4903 assert(m_object == VM_PAGE_OBJECT(m));
4904
4905 /*
4906 * prepare for the pmap_enter...
4907 * object and map are both locked
4908 * m contains valid data
4909 * object == m->vmp_object
4910 * cur_object == NULL or it's been unlocked
4911 * no paging references on either object or cur_object
4912 */
4913 if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
4914 need_retry_ptr = &need_retry;
4915 } else {
4916 need_retry_ptr = NULL;
4917 }
4918
4919 if (fault_page_size < PAGE_SIZE) {
4920 DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx caller pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, caller_pmap, (uint64_t)caller_pmap_addr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
4921 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
4922 fault_phys_offset < PAGE_SIZE),
4923 "0x%llx\n", (uint64_t)fault_phys_offset);
4924 } else {
4925 assertf(fault_phys_offset == 0,
4926 "0x%llx\n", (uint64_t)fault_phys_offset);
4927 }
4928
4929 if (__improbable(rtfault &&
4930 !m->vmp_realtime &&
4931 vm_pageout_protect_realtime)) {
4932 vm_page_lock_queues();
4933 if (!m->vmp_realtime) {
4934 m->vmp_realtime = true;
4935 vm_page_realtime_count++;
4936 }
4937 vm_page_unlock_queues();
4938 }
4939 assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p object=%p", m, m_object, object);
4940 assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
4941 if (caller_pmap) {
4942 kr = vm_fault_enter(m,
4943 caller_pmap,
4944 caller_pmap_addr,
4945 fault_page_size,
4946 fault_phys_offset,
4947 prot,
4948 caller_prot,
4949 wired,
4950 change_wiring,
4951 wire_tag,
4952 &fault_info,
4953 need_retry_ptr,
4954 &type_of_fault,
4955 &object_lock_type);
4956 } else {
4957 kr = vm_fault_enter(m,
4958 pmap,
4959 vaddr,
4960 fault_page_size,
4961 fault_phys_offset,
4962 prot,
4963 caller_prot,
4964 wired,
4965 change_wiring,
4966 wire_tag,
4967 &fault_info,
4968 need_retry_ptr,
4969 &type_of_fault,
4970 &object_lock_type);
4971 }
4972
4973 vm_fault_complete(
4974 map,
4975 real_map,
4976 object,
4977 m_object,
4978 m,
4979 offset,
4980 trace_real_vaddr,
4981 &fault_info,
4982 caller_prot,
4983 real_vaddr,
4984 vm_fault_type_for_tracing(need_copy_on_read, type_of_fault),
4985 need_retry,
4986 kr,
4987 physpage_p,
4988 prot,
4989 top_object,
4990 need_collapse,
4991 cur_offset,
4992 fault_type,
4993 &written_on_object,
4994 &written_on_pager,
4995 &written_on_offset);
4996 top_object = VM_OBJECT_NULL;
4997 if (need_retry == TRUE) {
4998 /*
4999 * vm_fault_enter couldn't complete the PMAP_ENTER...
5000 * at this point we don't hold any locks so it's safe
5001 * to ask the pmap layer to expand the page table to
5002 * accommodate this mapping... once expanded, we'll
5003 * re-drive the fault which should result in vm_fault_enter
5004 * being able to successfully enter the mapping this time around
5005 */
5006 (void)pmap_enter_options(
5007 pmap, vaddr, 0, 0, 0, 0, 0,
5008 PMAP_OPTIONS_NOENTER, NULL);
5009
5010 need_retry = FALSE;
5011 goto RetryFault;
5012 }
5013 goto done;
5014 }
5015 /*
5016 * COPY ON WRITE FAULT
5017 */
5018 assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE);
5019
5020 /*
5021 * If objects match, then
5022 * object->vo_copy must not be NULL (else control
5023 * would be in previous code block), and we
5024 * have a potential push into the copy object
5025 * with which we can't cope with here.
5026 */
5027 if (cur_object == object) {
5028 /*
5029 * must take the slow path to
5030 * deal with the copy push
5031 */
5032 break;
5033 }
5034
5035 /*
5036 * This is now a shadow based copy on write
5037 * fault -- it requires a copy up the shadow
5038 * chain.
5039 */
5040 assert(m_object == VM_PAGE_OBJECT(m));
5041
5042 if ((cur_object_lock_type == OBJECT_LOCK_SHARED) &&
5043 vm_fault_cs_need_validation(NULL, m, m_object,
5044 PAGE_SIZE, 0)) {
5045 goto upgrade_lock_and_retry;
5046 }
5047
5048 #if MACH_ASSERT
5049 if (resilient_media_retry &&
5050 vm_fault_resilient_media_inject_error2_rate != 0 &&
5051 (++vm_fault_resilient_media_inject_error2 % vm_fault_resilient_media_inject_error2_rate) == 0) {
5052 /* inject an error */
5053 cur_m = m;
5054 m = VM_PAGE_NULL;
5055 m_object = VM_OBJECT_NULL;
5056 break;
5057 }
5058 #endif /* MACH_ASSERT */
5059 /*
5060 * Allocate a page in the original top level
5061 * object. Give up if allocate fails. Also
5062 * need to remember current page, as it's the
5063 * source of the copy.
5064 *
5065 * at this point we hold locks on both
5066 * object and cur_object... no need to take
5067 * paging refs or mark pages BUSY since
5068 * we don't drop either object lock until
5069 * the page has been copied and inserted
5070 */
5071 cur_m = m;
5072 m = vm_page_grab_options(grab_options);
5073 m_object = NULL;
5074
5075 if (m == VM_PAGE_NULL) {
5076 /*
5077 * no free page currently available...
5078 * must take the slow path
5079 */
5080 break;
5081 }
5082
5083 /*
5084 * Now do the copy. Mark the source page busy...
5085 *
5086 * NOTE: This code holds the map lock across
5087 * the page copy.
5088 */
5089 vm_page_copy(cur_m, m);
5090 vm_page_insert(m, object, vm_object_trunc_page(offset));
5091 if (VM_MAP_PAGE_MASK(map) != PAGE_MASK) {
5092 DEBUG4K_FAULT("map %p vaddr 0x%llx page %p [%p 0x%llx] copied to %p [%p 0x%llx]\n", map, (uint64_t)vaddr, cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
5093 }
5094 m_object = object;
5095 SET_PAGE_DIRTY(m, FALSE);
5096
5097 /*
5098 * Now cope with the source page and object
5099 */
5100 if (object->ref_count > 1 && cur_m->vmp_pmapped) {
5101 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
5102 } else if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) {
5103 /*
5104 * We've copied the full 16K page but we're
5105 * about to call vm_fault_enter() only for
5106 * the 4K chunk we're faulting on. The other
5107 * three 4K chunks in that page could still
5108 * be pmapped in this pmap.
5109 * Since the VM object layer thinks that the
5110 * entire page has been dealt with and the
5111 * original page might no longer be needed,
5112 * it might collapse/bypass the original VM
5113 * object and free its pages, which would be
5114 * bad (and would trigger pmap_verify_free()
5115 * assertions) if the other 4K chunks are still
5116 * pmapped.
5117 */
5118 /*
5119 * XXX FBDP TODO4K: to be revisisted
5120 * Technically, we need to pmap_disconnect()
5121 * only the target pmap's mappings for the 4K
5122 * chunks of this 16K VM page. If other pmaps
5123 * have PTEs on these chunks, that means that
5124 * the associated VM map must have a reference
5125 * on the VM object, so no need to worry about
5126 * those.
5127 * pmap_protect() for each 4K chunk would be
5128 * better but we'd have to check which chunks
5129 * are actually mapped before and after this
5130 * one.
5131 * A full-blown pmap_disconnect() is easier
5132 * for now but not efficient.
5133 */
5134 DEBUG4K_FAULT("pmap_disconnect() page %p object %p offset 0x%llx phys 0x%x\n", cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, VM_PAGE_GET_PHYS_PAGE(cur_m));
5135 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
5136 }
5137
5138 if (cur_m->vmp_clustered) {
5139 VM_PAGE_COUNT_AS_PAGEIN(cur_m);
5140 VM_PAGE_CONSUME_CLUSTERED(cur_m);
5141 vm_fault_is_sequential(cur_object, cur_offset, fault_info.behavior);
5142 }
5143 need_collapse = TRUE;
5144
5145 if (!cur_object->internal &&
5146 cur_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
5147 /*
5148 * The object from which we've just
5149 * copied a page is most probably backed
5150 * by a vnode. We don't want to waste too
5151 * much time trying to collapse the VM objects
5152 * and create a bottleneck when several tasks
5153 * map the same file.
5154 */
5155 if (cur_object->vo_copy == object) {
5156 /*
5157 * Shared mapping or no COW yet.
5158 * We can never collapse a copy
5159 * object into its backing object.
5160 */
5161 need_collapse = FALSE;
5162 } else if (cur_object->vo_copy == object->shadow &&
5163 object->shadow->resident_page_count == 0) {
5164 /*
5165 * Shared mapping after a COW occurred.
5166 */
5167 need_collapse = FALSE;
5168 }
5169 }
5170 vm_object_unlock(cur_object);
5171
5172 if (need_collapse == FALSE) {
5173 vm_fault_collapse_skipped++;
5174 }
5175 vm_fault_collapse_total++;
5176
5177 type_of_fault = DBG_COW_FAULT;
5178 counter_inc(&vm_statistics_cow_faults);
5179 DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
5180 counter_inc(¤t_task()->cow_faults);
5181
5182 goto FastPmapEnter;
5183 } else {
5184 /*
5185 * No page at cur_object, cur_offset... m == NULL
5186 */
5187 if (cur_object->pager_created) {
5188 vm_external_state_t compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
5189
5190 if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) {
5191 int my_fault_type;
5192 vm_compressor_options_t c_flags = C_DONT_BLOCK;
5193 bool insert_cur_object = FALSE;
5194
5195 /*
5196 * May have to talk to a pager...
5197 * if so, take the slow path by
5198 * doing a 'break' from the while (TRUE) loop
5199 *
5200 * external_state will only be set to VM_EXTERNAL_STATE_EXISTS
5201 * if the compressor is active and the page exists there
5202 */
5203 if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS) {
5204 break;
5205 }
5206
5207 if (map == kernel_map || real_map == kernel_map) {
5208 /*
5209 * can't call into the compressor with the kernel_map
5210 * lock held, since the compressor may try to operate
5211 * on the kernel map in order to return an empty c_segment
5212 */
5213 break;
5214 }
5215 if (object != cur_object) {
5216 if (fault_type & VM_PROT_WRITE) {
5217 c_flags |= C_KEEP;
5218 } else {
5219 insert_cur_object = TRUE;
5220 }
5221 }
5222 if (insert_cur_object == TRUE) {
5223 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5224 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5225
5226 if (vm_object_lock_upgrade(cur_object) == FALSE) {
5227 /*
5228 * couldn't upgrade so go do a full retry
5229 * immediately since we can no longer be
5230 * certain about cur_object (since we
5231 * don't hold a reference on it)...
5232 * first drop the top object lock
5233 */
5234 vm_object_unlock(object);
5235
5236 vm_map_unlock_read(map);
5237 if (real_map != map) {
5238 vm_map_unlock(real_map);
5239 }
5240
5241 goto RetryFault;
5242 }
5243 }
5244 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
5245 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5246
5247 if (object != cur_object) {
5248 /*
5249 * we can't go for the upgrade on the top
5250 * lock since the upgrade may block waiting
5251 * for readers to drain... since we hold
5252 * cur_object locked at this point, waiting
5253 * for the readers to drain would represent
5254 * a lock order inversion since the lock order
5255 * for objects is the reference order in the
5256 * shadown chain
5257 */
5258 vm_object_unlock(object);
5259 vm_object_unlock(cur_object);
5260
5261 vm_map_unlock_read(map);
5262 if (real_map != map) {
5263 vm_map_unlock(real_map);
5264 }
5265
5266 goto RetryFault;
5267 }
5268 if (vm_object_lock_upgrade(object) == FALSE) {
5269 /*
5270 * couldn't upgrade, so explictly take the lock
5271 * exclusively and go relookup the page since we
5272 * will have dropped the object lock and
5273 * a different thread could have inserted
5274 * a page at this offset
5275 * no need for a full retry since we're
5276 * at the top level of the object chain
5277 */
5278 vm_object_lock(object);
5279
5280 continue;
5281 }
5282 }
5283 m = vm_page_grab_options(grab_options);
5284 m_object = NULL;
5285
5286 if (m == VM_PAGE_NULL) {
5287 /*
5288 * no free page currently available...
5289 * must take the slow path
5290 */
5291 break;
5292 }
5293
5294 /*
5295 * The object is and remains locked
5296 * so no need to take a
5297 * "paging_in_progress" reference.
5298 */
5299 bool shared_lock;
5300 if ((object == cur_object &&
5301 object_lock_type == OBJECT_LOCK_EXCLUSIVE) ||
5302 (object != cur_object &&
5303 cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) {
5304 shared_lock = FALSE;
5305 } else {
5306 shared_lock = TRUE;
5307 }
5308
5309 kr = vm_compressor_pager_get(
5310 cur_object->pager,
5311 (vm_object_trunc_page(cur_offset)
5312 + cur_object->paging_offset),
5313 VM_PAGE_GET_PHYS_PAGE(m),
5314 &my_fault_type,
5315 c_flags,
5316 &compressed_count_delta);
5317
5318 vm_compressor_pager_count(
5319 cur_object->pager,
5320 compressed_count_delta,
5321 shared_lock,
5322 cur_object);
5323
5324 if (kr != KERN_SUCCESS) {
5325 vm_page_release(m, FALSE);
5326 m = VM_PAGE_NULL;
5327 }
5328 /*
5329 * If vm_compressor_pager_get() returns
5330 * KERN_MEMORY_FAILURE, then the
5331 * compressed data is permanently lost,
5332 * so return this error immediately.
5333 */
5334 if (kr == KERN_MEMORY_FAILURE) {
5335 if (object != cur_object) {
5336 vm_object_unlock(cur_object);
5337 }
5338 vm_object_unlock(object);
5339 vm_map_unlock_read(map);
5340 if (real_map != map) {
5341 vm_map_unlock(real_map);
5342 }
5343
5344 goto done;
5345 } else if (kr != KERN_SUCCESS) {
5346 break;
5347 }
5348 m->vmp_dirty = TRUE;
5349 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5350 if ((fault_type & VM_PROT_WRITE) == 0) {
5351 prot &= ~VM_PROT_WRITE;
5352 /*
5353 * The page, m, has yet to be inserted
5354 * into an object. So we are fine with
5355 * the object/cur_object lock being held
5356 * shared.
5357 */
5358 vm_page_lockspin_queues();
5359 m->vmp_unmodified_ro = true;
5360 vm_page_unlock_queues();
5361 os_atomic_inc(&compressor_ro_uncompressed, relaxed);
5362 }
5363 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5364
5365 /*
5366 * If the object is purgeable, its
5367 * owner's purgeable ledgers will be
5368 * updated in vm_page_insert() but the
5369 * page was also accounted for in a
5370 * "compressed purgeable" ledger, so
5371 * update that now.
5372 */
5373 if (object != cur_object &&
5374 !insert_cur_object) {
5375 /*
5376 * We're not going to insert
5377 * the decompressed page into
5378 * the object it came from.
5379 *
5380 * We're dealing with a
5381 * copy-on-write fault on
5382 * "object".
5383 * We're going to decompress
5384 * the page directly into the
5385 * target "object" while
5386 * keepin the compressed
5387 * page for "cur_object", so
5388 * no ledger update in that
5389 * case.
5390 */
5391 } else if (((cur_object->purgable ==
5392 VM_PURGABLE_DENY) &&
5393 (!cur_object->vo_ledger_tag)) ||
5394 (cur_object->vo_owner ==
5395 NULL)) {
5396 /*
5397 * "cur_object" is not purgeable
5398 * and is not ledger-taged, or
5399 * there's no owner for it,
5400 * so no owner's ledgers to
5401 * update.
5402 */
5403 } else {
5404 /*
5405 * One less compressed
5406 * purgeable/tagged page for
5407 * cur_object's owner.
5408 */
5409 if (compressed_count_delta) {
5410 vm_object_owner_compressed_update(
5411 cur_object,
5412 -1);
5413 }
5414 }
5415
5416 if (insert_cur_object) {
5417 vm_page_insert(m, cur_object, vm_object_trunc_page(cur_offset));
5418 m_object = cur_object;
5419 } else {
5420 vm_page_insert(m, object, vm_object_trunc_page(offset));
5421 m_object = object;
5422 }
5423
5424 if ((m_object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_USE_DEFAULT) {
5425 /*
5426 * If the page is not cacheable,
5427 * we can't let its contents
5428 * linger in the data cache
5429 * after the decompression.
5430 */
5431 pmap_sync_page_attributes_phys(VM_PAGE_GET_PHYS_PAGE(m));
5432 }
5433
5434 type_of_fault = my_fault_type;
5435
5436 VM_STAT_DECOMPRESSIONS();
5437
5438 if (cur_object != object) {
5439 if (insert_cur_object) {
5440 top_object = object;
5441 /*
5442 * switch to the object that has the new page
5443 */
5444 object = cur_object;
5445 object_lock_type = cur_object_lock_type;
5446 } else {
5447 vm_object_unlock(cur_object);
5448 cur_object = object;
5449 }
5450 }
5451 goto FastPmapEnter;
5452 }
5453 /*
5454 * existence map present and indicates
5455 * that the pager doesn't have this page
5456 */
5457 }
5458 if (cur_object->shadow == VM_OBJECT_NULL ||
5459 resilient_media_retry) {
5460 /*
5461 * Zero fill fault. Page gets
5462 * inserted into the original object.
5463 */
5464 if (cur_object->shadow_severed ||
5465 VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object) ||
5466 cur_object == compressor_object ||
5467 is_kernel_object(cur_object)) {
5468 if (object != cur_object) {
5469 vm_object_unlock(cur_object);
5470 }
5471 vm_object_unlock(object);
5472
5473 vm_map_unlock_read(map);
5474 if (real_map != map) {
5475 vm_map_unlock(real_map);
5476 }
5477 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object)) {
5478 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
5479 }
5480
5481 if (cur_object->shadow_severed) {
5482 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
5483 }
5484
5485 kr = KERN_MEMORY_ERROR;
5486 goto done;
5487 }
5488 if (cur_object != object) {
5489 vm_object_unlock(cur_object);
5490
5491 cur_object = object;
5492 }
5493 if (object_lock_type == OBJECT_LOCK_SHARED) {
5494 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5495
5496 if (vm_object_lock_upgrade(object) == FALSE) {
5497 /*
5498 * couldn't upgrade so do a full retry on the fault
5499 * since we dropped the object lock which
5500 * could allow another thread to insert
5501 * a page at this offset
5502 */
5503 vm_map_unlock_read(map);
5504 if (real_map != map) {
5505 vm_map_unlock(real_map);
5506 }
5507
5508 goto RetryFault;
5509 }
5510 }
5511 if (!object->internal) {
5512 panic("%s:%d should not zero-fill page at offset 0x%llx in external object %p", __FUNCTION__, __LINE__, (uint64_t)offset, object);
5513 }
5514 #if MACH_ASSERT
5515 if (resilient_media_retry &&
5516 vm_fault_resilient_media_inject_error3_rate != 0 &&
5517 (++vm_fault_resilient_media_inject_error3 % vm_fault_resilient_media_inject_error3_rate) == 0) {
5518 /* inject an error */
5519 m_object = NULL;
5520 break;
5521 }
5522 #endif /* MACH_ASSERT */
5523 m = vm_page_alloc(object, vm_object_trunc_page(offset));
5524 m_object = NULL;
5525
5526 if (m == VM_PAGE_NULL) {
5527 /*
5528 * no free page currently available...
5529 * must take the slow path
5530 */
5531 break;
5532 }
5533 m_object = object;
5534
5535 if ((prot & VM_PROT_WRITE) &&
5536 !(fault_type & VM_PROT_WRITE) &&
5537 object->vo_copy != VM_OBJECT_NULL) {
5538 /*
5539 * This is not a write fault and
5540 * we might have a copy-on-write
5541 * obligation to honor (copy object or
5542 * "needs_copy" map entry), so do not
5543 * give write access yet.
5544 * We'll need to catch the first write
5545 * to resolve the copy-on-write by
5546 * pushing this page to a copy object
5547 * or making a shadow object.
5548 */
5549 if (!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
5550 prot &= ~VM_PROT_WRITE;
5551 } else {
5552 assert(fault_info.cs_bypass);
5553 }
5554 }
5555
5556 /*
5557 * Zeroing the page and entering into it into the pmap
5558 * represents a significant amount of the zero fill fault handler's work.
5559 *
5560 * To improve fault scalability, we'll drop the object lock, if it appears contended,
5561 * now that we've inserted the page into the vm object.
5562 * Before dropping the lock, we need to check protection bits and set the
5563 * mapped bits on the page. Then we can mark the page busy, drop the lock,
5564 * zero it, and do the pmap enter. We'll need to reacquire the lock
5565 * to clear the busy bit and wake up any waiters.
5566 */
5567 vm_fault_cs_clear(m);
5568 m->vmp_pmapped = TRUE;
5569 if (map->no_zero_fill) {
5570 type_of_fault = DBG_NZF_PAGE_FAULT;
5571 } else {
5572 type_of_fault = DBG_ZERO_FILL_FAULT;
5573 }
5574 {
5575 pmap_t destination_pmap;
5576 vm_map_offset_t destination_pmap_vaddr;
5577 vm_prot_t enter_fault_type;
5578 if (caller_pmap) {
5579 destination_pmap = caller_pmap;
5580 destination_pmap_vaddr = caller_pmap_addr;
5581 } else {
5582 destination_pmap = pmap;
5583 destination_pmap_vaddr = vaddr;
5584 }
5585 if (change_wiring) {
5586 enter_fault_type = VM_PROT_NONE;
5587 } else {
5588 enter_fault_type = caller_prot;
5589 }
5590 assertf(VM_PAGE_OBJECT(m) == object, "m=%p object=%p", m, object);
5591 kr = vm_fault_enter_prepare(m,
5592 destination_pmap,
5593 destination_pmap_vaddr,
5594 &prot,
5595 caller_prot,
5596 fault_page_size,
5597 fault_phys_offset,
5598 change_wiring,
5599 enter_fault_type,
5600 &fault_info,
5601 &type_of_fault,
5602 &page_needs_data_sync);
5603 if (kr != KERN_SUCCESS) {
5604 goto zero_fill_cleanup;
5605 }
5606
5607 if (object_is_contended) {
5608 /*
5609 * At this point the page is in the vm object, but not on a paging queue.
5610 * Since it's accessible to another thread but its contents are invalid
5611 * (it hasn't been zeroed) mark it busy before dropping the object lock.
5612 */
5613 m->vmp_busy = TRUE;
5614 vm_object_unlock(object);
5615 }
5616 if (type_of_fault == DBG_ZERO_FILL_FAULT) {
5617 /*
5618 * Now zero fill page...
5619 * the page is probably going to
5620 * be written soon, so don't bother
5621 * to clear the modified bit
5622 *
5623 * NOTE: This code holds the map
5624 * lock across the zero fill.
5625 */
5626 vm_page_zero_fill(m);
5627 counter_inc(&vm_statistics_zero_fill_count);
5628 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
5629 }
5630 if (page_needs_data_sync) {
5631 pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
5632 }
5633
5634 if (top_object != VM_OBJECT_NULL) {
5635 need_retry_ptr = &need_retry;
5636 } else {
5637 need_retry_ptr = NULL;
5638 }
5639 if (fault_info.fi_xnu_user_debug &&
5640 !object->code_signed) {
5641 fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
5642 }
5643 if (object_is_contended) {
5644 kr = vm_fault_pmap_enter(destination_pmap, destination_pmap_vaddr,
5645 fault_page_size, fault_phys_offset,
5646 m, &prot, caller_prot, enter_fault_type, wired,
5647 fault_info.pmap_options, need_retry_ptr);
5648 vm_object_lock(object);
5649 } else {
5650 kr = vm_fault_pmap_enter_with_object_lock(object, destination_pmap, destination_pmap_vaddr,
5651 fault_page_size, fault_phys_offset,
5652 m, &prot, caller_prot, enter_fault_type, wired,
5653 fault_info.pmap_options, need_retry_ptr, &object_lock_type);
5654 }
5655 }
5656 zero_fill_cleanup:
5657 if (!VM_DYNAMIC_PAGING_ENABLED() &&
5658 (object->purgable == VM_PURGABLE_DENY ||
5659 object->purgable == VM_PURGABLE_NONVOLATILE ||
5660 object->purgable == VM_PURGABLE_VOLATILE)) {
5661 vm_page_lockspin_queues();
5662 if (!VM_DYNAMIC_PAGING_ENABLED()) {
5663 vm_fault_enqueue_throttled_locked(m);
5664 }
5665 vm_page_unlock_queues();
5666 }
5667 vm_fault_enqueue_page(object, m, wired, change_wiring, wire_tag, fault_info.no_cache, &type_of_fault, kr);
5668
5669 if (__improbable(rtfault &&
5670 !m->vmp_realtime &&
5671 vm_pageout_protect_realtime)) {
5672 vm_page_lock_queues();
5673 if (!m->vmp_realtime) {
5674 m->vmp_realtime = true;
5675 vm_page_realtime_count++;
5676 }
5677 vm_page_unlock_queues();
5678 }
5679 vm_fault_complete(
5680 map,
5681 real_map,
5682 object,
5683 m_object,
5684 m,
5685 offset,
5686 trace_real_vaddr,
5687 &fault_info,
5688 caller_prot,
5689 real_vaddr,
5690 type_of_fault,
5691 need_retry,
5692 kr,
5693 physpage_p,
5694 prot,
5695 top_object,
5696 need_collapse,
5697 cur_offset,
5698 fault_type,
5699 &written_on_object,
5700 &written_on_pager,
5701 &written_on_offset);
5702 top_object = VM_OBJECT_NULL;
5703 if (need_retry == TRUE) {
5704 /*
5705 * vm_fault_enter couldn't complete the PMAP_ENTER...
5706 * at this point we don't hold any locks so it's safe
5707 * to ask the pmap layer to expand the page table to
5708 * accommodate this mapping... once expanded, we'll
5709 * re-drive the fault which should result in vm_fault_enter
5710 * being able to successfully enter the mapping this time around
5711 */
5712 (void)pmap_enter_options(
5713 pmap, vaddr, 0, 0, 0, 0, 0,
5714 PMAP_OPTIONS_NOENTER, NULL);
5715
5716 need_retry = FALSE;
5717 goto RetryFault;
5718 }
5719 goto done;
5720 }
5721 /*
5722 * On to the next level in the shadow chain
5723 */
5724 cur_offset += cur_object->vo_shadow_offset;
5725 new_object = cur_object->shadow;
5726 fault_phys_offset = cur_offset - vm_object_trunc_page(cur_offset);
5727
5728 /*
5729 * take the new_object's lock with the indicated state
5730 */
5731 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5732 vm_object_lock_shared(new_object);
5733 } else {
5734 vm_object_lock(new_object);
5735 }
5736
5737 if (cur_object != object) {
5738 vm_object_unlock(cur_object);
5739 }
5740
5741 cur_object = new_object;
5742
5743 continue;
5744 }
5745 }
5746 /*
5747 * Cleanup from fast fault failure. Drop any object
5748 * lock other than original and drop map lock.
5749 */
5750 if (object != cur_object) {
5751 vm_object_unlock(cur_object);
5752 }
5753
5754 /*
5755 * must own the object lock exclusively at this point
5756 */
5757 if (object_lock_type == OBJECT_LOCK_SHARED) {
5758 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5759
5760 if (vm_object_lock_upgrade(object) == FALSE) {
5761 /*
5762 * couldn't upgrade, so explictly
5763 * take the lock exclusively
5764 * no need to retry the fault at this
5765 * point since "vm_fault_page" will
5766 * completely re-evaluate the state
5767 */
5768 vm_object_lock(object);
5769 }
5770 }
5771
5772 handle_copy_delay:
5773 vm_map_unlock_read(map);
5774 if (real_map != map) {
5775 vm_map_unlock(real_map);
5776 }
5777
5778 if (__improbable(object == compressor_object ||
5779 is_kernel_object(object))) {
5780 /*
5781 * These objects are explicitly managed and populated by the
5782 * kernel. The virtual ranges backed by these objects should
5783 * either have wired pages or "holes" that are not supposed to
5784 * be accessed at all until they get explicitly populated.
5785 * We should never have to resolve a fault on a mapping backed
5786 * by one of these VM objects and providing a zero-filled page
5787 * would be wrong here, so let's fail the fault and let the
5788 * caller crash or recover.
5789 */
5790 vm_object_unlock(object);
5791 kr = KERN_MEMORY_ERROR;
5792 goto done;
5793 }
5794
5795 resilient_media_ref_transfer = false;
5796 if (resilient_media_retry) {
5797 /*
5798 * We could get here if we failed to get a free page
5799 * to zero-fill and had to take the slow path again.
5800 * Reset our "recovery-from-failed-media" state.
5801 */
5802 assert(resilient_media_object != VM_OBJECT_NULL);
5803 assert(resilient_media_offset != (vm_object_offset_t)-1);
5804 /* release our extra reference on failed object */
5805 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
5806 if (object == resilient_media_object) {
5807 /*
5808 * We're holding "object"'s lock, so we can't release
5809 * our extra reference at this point.
5810 * We need an extra reference on "object" anyway
5811 * (see below), so let's just transfer this reference.
5812 */
5813 resilient_media_ref_transfer = true;
5814 } else {
5815 vm_object_lock_assert_notheld(resilient_media_object);
5816 vm_object_deallocate(resilient_media_object);
5817 }
5818 resilient_media_object = VM_OBJECT_NULL;
5819 resilient_media_offset = (vm_object_offset_t)-1;
5820 resilient_media_retry = false;
5821 vm_fault_resilient_media_abort2++;
5822 }
5823
5824 /*
5825 * Make a reference to this object to
5826 * prevent its disposal while we are messing with
5827 * it. Once we have the reference, the map is free
5828 * to be diddled. Since objects reference their
5829 * shadows (and copies), they will stay around as well.
5830 */
5831 if (resilient_media_ref_transfer) {
5832 /* we already have an extra reference on this object */
5833 resilient_media_ref_transfer = false;
5834 } else {
5835 vm_object_reference_locked(object);
5836 }
5837 vm_object_paging_begin(object);
5838
5839 set_thread_pagein_error(cthread, 0);
5840 error_code = 0;
5841
5842 result_page = VM_PAGE_NULL;
5843 kr = vm_fault_page(object, offset, fault_type,
5844 (change_wiring && !wired),
5845 FALSE, /* page not looked up */
5846 &prot, &result_page, &top_page,
5847 &type_of_fault,
5848 &error_code, map->no_zero_fill,
5849 &fault_info);
5850
5851 /*
5852 * if kr != VM_FAULT_SUCCESS, then the paging reference
5853 * has been dropped and the object unlocked... the ref_count
5854 * is still held
5855 *
5856 * if kr == VM_FAULT_SUCCESS, then the paging reference
5857 * is still held along with the ref_count on the original object
5858 *
5859 * the object is returned locked with a paging reference
5860 *
5861 * if top_page != NULL, then it's BUSY and the
5862 * object it belongs to has a paging reference
5863 * but is returned unlocked
5864 */
5865 if (kr != VM_FAULT_SUCCESS &&
5866 kr != VM_FAULT_SUCCESS_NO_VM_PAGE) {
5867 if (kr == VM_FAULT_MEMORY_ERROR &&
5868 fault_info.resilient_media) {
5869 assertf(object->internal, "object %p", object);
5870 /*
5871 * This fault failed but the mapping was
5872 * "media resilient", so we'll retry the fault in
5873 * recovery mode to get a zero-filled page in the
5874 * top object.
5875 * Keep the reference on the failing object so
5876 * that we can check that the mapping is still
5877 * pointing to it when we retry the fault.
5878 */
5879 // printf("RESILIENT_MEDIA %s:%d: object %p offset 0x%llx recover from media error 0x%x kr 0x%x top_page %p result_page %p\n", __FUNCTION__, __LINE__, object, offset, error_code, kr, top_page, result_page);
5880 assert(!resilient_media_retry); /* no double retry */
5881 assert(resilient_media_object == VM_OBJECT_NULL);
5882 assert(resilient_media_offset == (vm_object_offset_t)-1);
5883 resilient_media_retry = true;
5884 resilient_media_object = object;
5885 resilient_media_offset = offset;
5886 // printf("FBDP %s:%d resilient_media_object %p offset 0x%llx kept reference\n", __FUNCTION__, __LINE__, resilient_media_object, resilient_mmedia_offset);
5887 vm_fault_resilient_media_initiate++;
5888 goto RetryFault;
5889 } else {
5890 /*
5891 * we didn't succeed, lose the object reference
5892 * immediately.
5893 */
5894 vm_object_deallocate(object);
5895 object = VM_OBJECT_NULL; /* no longer valid */
5896 }
5897
5898 /*
5899 * See why we failed, and take corrective action.
5900 */
5901 switch (kr) {
5902 case VM_FAULT_MEMORY_SHORTAGE:
5903 if (vm_page_wait((change_wiring) ?
5904 THREAD_UNINT :
5905 THREAD_ABORTSAFE)) {
5906 goto RetryFault;
5907 }
5908 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_MEMORY_SHORTAGE), 0 /* arg */);
5909 OS_FALLTHROUGH;
5910 case VM_FAULT_INTERRUPTED:
5911 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
5912 kr = KERN_ABORTED;
5913 goto done;
5914 case VM_FAULT_RETRY:
5915 goto RetryFault;
5916 case VM_FAULT_MEMORY_ERROR:
5917 if (error_code) {
5918 kr = error_code;
5919 } else {
5920 kr = KERN_MEMORY_ERROR;
5921 }
5922 goto done;
5923 default:
5924 panic("vm_fault: unexpected error 0x%x from "
5925 "vm_fault_page()\n", kr);
5926 }
5927 }
5928 m = result_page;
5929 m_object = NULL;
5930
5931 if (m != VM_PAGE_NULL) {
5932 m_object = VM_PAGE_OBJECT(m);
5933 assert((change_wiring && !wired) ?
5934 (top_page == VM_PAGE_NULL) :
5935 ((top_page == VM_PAGE_NULL) == (m_object == object)));
5936 }
5937
5938 /*
5939 * What to do with the resulting page from vm_fault_page
5940 * if it doesn't get entered into the physical map:
5941 */
5942 #define RELEASE_PAGE(m) \
5943 MACRO_BEGIN \
5944 PAGE_WAKEUP_DONE(m); \
5945 if ( !VM_PAGE_PAGEABLE(m)) { \
5946 vm_page_lockspin_queues(); \
5947 if ( !VM_PAGE_PAGEABLE(m)) \
5948 vm_page_activate(m); \
5949 vm_page_unlock_queues(); \
5950 } \
5951 MACRO_END
5952
5953
5954 object_locks_dropped = FALSE;
5955 /*
5956 * We must verify that the maps have not changed
5957 * since our last lookup. vm_map_verify() needs the
5958 * map lock (shared) but we are holding object locks.
5959 * So we do a try_lock() first and, if that fails, we
5960 * drop the object locks and go in for the map lock again.
5961 */
5962 if (!vm_map_try_lock_read(original_map)) {
5963 if (m != VM_PAGE_NULL) {
5964 old_copy_object = m_object->vo_copy;
5965 vm_object_unlock(m_object);
5966 } else {
5967 old_copy_object = VM_OBJECT_NULL;
5968 vm_object_unlock(object);
5969 }
5970
5971 object_locks_dropped = TRUE;
5972
5973 vm_map_lock_read(original_map);
5974 }
5975
5976 if ((map != original_map) || !vm_map_verify(map, &version)) {
5977 if (object_locks_dropped == FALSE) {
5978 if (m != VM_PAGE_NULL) {
5979 old_copy_object = m_object->vo_copy;
5980 vm_object_unlock(m_object);
5981 } else {
5982 old_copy_object = VM_OBJECT_NULL;
5983 vm_object_unlock(object);
5984 }
5985
5986 object_locks_dropped = TRUE;
5987 }
5988
5989 /*
5990 * no object locks are held at this point
5991 */
5992 vm_object_t retry_object;
5993 vm_object_offset_t retry_offset;
5994 vm_prot_t retry_prot;
5995
5996 /*
5997 * To avoid trying to write_lock the map while another
5998 * thread has it read_locked (in vm_map_pageable), we
5999 * do not try for write permission. If the page is
6000 * still writable, we will get write permission. If it
6001 * is not, or has been marked needs_copy, we enter the
6002 * mapping without write permission, and will merely
6003 * take another fault.
6004 */
6005 map = original_map;
6006
6007 kr = vm_map_lookup_and_lock_object(&map, vaddr,
6008 fault_type & ~VM_PROT_WRITE,
6009 OBJECT_LOCK_EXCLUSIVE, &version,
6010 &retry_object, &retry_offset, &retry_prot,
6011 &wired,
6012 &fault_info,
6013 &real_map,
6014 NULL);
6015 pmap = real_map->pmap;
6016
6017 if (kr != KERN_SUCCESS) {
6018 vm_map_unlock_read(map);
6019
6020 if (m != VM_PAGE_NULL) {
6021 assert(VM_PAGE_OBJECT(m) == m_object);
6022
6023 /*
6024 * retake the lock so that
6025 * we can drop the paging reference
6026 * in vm_fault_cleanup and do the
6027 * PAGE_WAKEUP_DONE in RELEASE_PAGE
6028 */
6029 vm_object_lock(m_object);
6030
6031 RELEASE_PAGE(m);
6032
6033 vm_fault_cleanup(m_object, top_page);
6034 } else {
6035 /*
6036 * retake the lock so that
6037 * we can drop the paging reference
6038 * in vm_fault_cleanup
6039 */
6040 vm_object_lock(object);
6041
6042 vm_fault_cleanup(object, top_page);
6043 }
6044 vm_object_deallocate(object);
6045
6046 if (kr == KERN_INVALID_ADDRESS) {
6047 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0 /* arg */);
6048 }
6049 goto done;
6050 }
6051 vm_object_unlock(retry_object);
6052
6053 if ((retry_object != object) || (retry_offset != offset)) {
6054 vm_map_unlock_read(map);
6055 if (real_map != map) {
6056 vm_map_unlock(real_map);
6057 }
6058
6059 if (m != VM_PAGE_NULL) {
6060 assert(VM_PAGE_OBJECT(m) == m_object);
6061
6062 /*
6063 * retake the lock so that
6064 * we can drop the paging reference
6065 * in vm_fault_cleanup and do the
6066 * PAGE_WAKEUP_DONE in RELEASE_PAGE
6067 */
6068 vm_object_lock(m_object);
6069
6070 RELEASE_PAGE(m);
6071
6072 vm_fault_cleanup(m_object, top_page);
6073 } else {
6074 /*
6075 * retake the lock so that
6076 * we can drop the paging reference
6077 * in vm_fault_cleanup
6078 */
6079 vm_object_lock(object);
6080
6081 vm_fault_cleanup(object, top_page);
6082 }
6083 vm_object_deallocate(object);
6084
6085 goto RetryFault;
6086 }
6087 /*
6088 * Check whether the protection has changed or the object
6089 * has been copied while we left the map unlocked.
6090 */
6091 if (pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, retry_prot)) {
6092 /* If the pmap layer cares, pass the full set. */
6093 prot = retry_prot;
6094 } else {
6095 prot &= retry_prot;
6096 }
6097 }
6098
6099 if (object_locks_dropped == TRUE) {
6100 if (m != VM_PAGE_NULL) {
6101 assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p", m, m_object);
6102 assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6103 vm_object_lock(m_object);
6104
6105 if (m_object->vo_copy != old_copy_object) {
6106 /*
6107 * The copy object changed while the top-level object
6108 * was unlocked, so take away write permission.
6109 */
6110 assert(!pmap_has_prot_policy(pmap, fault_info.pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot));
6111 prot &= ~VM_PROT_WRITE;
6112 }
6113 } else {
6114 vm_object_lock(object);
6115 }
6116
6117 object_locks_dropped = FALSE;
6118 }
6119
6120 if (!need_copy &&
6121 !fault_info.no_copy_on_read &&
6122 m != VM_PAGE_NULL &&
6123 VM_PAGE_OBJECT(m) != object &&
6124 !VM_PAGE_OBJECT(m)->pager_trusted &&
6125 vm_protect_privileged_from_untrusted &&
6126 !VM_PAGE_OBJECT(m)->code_signed &&
6127 current_proc_is_privileged()) {
6128 /*
6129 * We found the page we want in an "untrusted" VM object
6130 * down the shadow chain. Since the target is "privileged"
6131 * we want to perform a copy-on-read of that page, so that the
6132 * mapped object gets a stable copy and does not have to
6133 * rely on the "untrusted" object to provide the same
6134 * contents if the page gets reclaimed and has to be paged
6135 * in again later on.
6136 *
6137 * Special case: if the mapping is executable and the untrusted
6138 * object is code-signed and the process is "cs_enforced", we
6139 * do not copy-on-read because that would break code-signing
6140 * enforcement expectations (an executable page must belong
6141 * to a code-signed object) and we can rely on code-signing
6142 * to re-validate the page if it gets evicted and paged back in.
6143 */
6144 // printf("COPY-ON-READ %s:%d map %p vaddr 0x%llx obj %p offset 0x%llx found page %p (obj %p offset 0x%llx) UNTRUSTED -> need copy-on-read\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, object, offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
6145 vm_copied_on_read++;
6146 need_copy_on_read = TRUE;
6147 need_copy = TRUE;
6148 } else {
6149 need_copy_on_read = FALSE;
6150 }
6151
6152 /*
6153 * If we want to wire down this page, but no longer have
6154 * adequate permissions, we must start all over.
6155 * If we decided to copy-on-read, we must also start all over.
6156 */
6157 if ((wired && (fault_type != (prot | VM_PROT_WRITE))) ||
6158 need_copy_on_read) {
6159 vm_map_unlock_read(map);
6160 if (real_map != map) {
6161 vm_map_unlock(real_map);
6162 }
6163
6164 if (m != VM_PAGE_NULL) {
6165 assert(VM_PAGE_OBJECT(m) == m_object);
6166
6167 RELEASE_PAGE(m);
6168
6169 vm_fault_cleanup(m_object, top_page);
6170 } else {
6171 vm_fault_cleanup(object, top_page);
6172 }
6173
6174 vm_object_deallocate(object);
6175
6176 goto RetryFault;
6177 }
6178 if (m != VM_PAGE_NULL) {
6179 /*
6180 * Put this page into the physical map.
6181 * We had to do the unlock above because pmap_enter
6182 * may cause other faults. The page may be on
6183 * the pageout queues. If the pageout daemon comes
6184 * across the page, it will remove it from the queues.
6185 */
6186 if (fault_page_size < PAGE_SIZE) {
6187 DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx pa 0x%llx(0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
6188 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
6189 fault_phys_offset < PAGE_SIZE),
6190 "0x%llx\n", (uint64_t)fault_phys_offset);
6191 } else {
6192 assertf(fault_phys_offset == 0,
6193 "0x%llx\n", (uint64_t)fault_phys_offset);
6194 }
6195 assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p", m, m_object);
6196 assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6197 if (caller_pmap) {
6198 kr = vm_fault_enter(m,
6199 caller_pmap,
6200 caller_pmap_addr,
6201 fault_page_size,
6202 fault_phys_offset,
6203 prot,
6204 caller_prot,
6205 wired,
6206 change_wiring,
6207 wire_tag,
6208 &fault_info,
6209 NULL,
6210 &type_of_fault,
6211 &object_lock_type);
6212 } else {
6213 kr = vm_fault_enter(m,
6214 pmap,
6215 vaddr,
6216 fault_page_size,
6217 fault_phys_offset,
6218 prot,
6219 caller_prot,
6220 wired,
6221 change_wiring,
6222 wire_tag,
6223 &fault_info,
6224 NULL,
6225 &type_of_fault,
6226 &object_lock_type);
6227 }
6228 assert(VM_PAGE_OBJECT(m) == m_object);
6229
6230 {
6231 int event_code = 0;
6232
6233 if (m_object->internal) {
6234 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
6235 } else if (m_object->object_is_shared_cache) {
6236 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
6237 } else {
6238 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
6239 }
6240
6241 KDBG_RELEASE(event_code | DBG_FUNC_NONE, trace_real_vaddr, (fault_info.user_tag << 16) | (caller_prot << 8) | vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), m->vmp_offset, get_current_unique_pid());
6242 KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_SLOW), get_current_unique_pid());
6243
6244 DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info.user_tag);
6245 }
6246 if (kr != KERN_SUCCESS) {
6247 /* abort this page fault */
6248 vm_map_unlock_read(map);
6249 if (real_map != map) {
6250 vm_map_unlock(real_map);
6251 }
6252 PAGE_WAKEUP_DONE(m);
6253 vm_fault_cleanup(m_object, top_page);
6254 vm_object_deallocate(object);
6255 goto done;
6256 }
6257 if (physpage_p != NULL) {
6258 /* for vm_map_wire_and_extract() */
6259 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
6260 if (prot & VM_PROT_WRITE) {
6261 vm_object_lock_assert_exclusive(m_object);
6262 m->vmp_dirty = TRUE;
6263 }
6264 }
6265 } else {
6266 vm_map_entry_t entry;
6267 vm_map_offset_t laddr;
6268 vm_map_offset_t ldelta, hdelta;
6269
6270 /*
6271 * do a pmap block mapping from the physical address
6272 * in the object
6273 */
6274
6275 if (real_map != map) {
6276 vm_map_unlock(real_map);
6277 }
6278
6279 if (original_map != map) {
6280 vm_map_unlock_read(map);
6281 vm_map_lock_read(original_map);
6282 map = original_map;
6283 }
6284 real_map = map;
6285
6286 laddr = vaddr;
6287 hdelta = ldelta = (vm_map_offset_t)0xFFFFFFFFFFFFF000ULL;
6288
6289 while (vm_map_lookup_entry(map, laddr, &entry)) {
6290 if (ldelta > (laddr - entry->vme_start)) {
6291 ldelta = laddr - entry->vme_start;
6292 }
6293 if (hdelta > (entry->vme_end - laddr)) {
6294 hdelta = entry->vme_end - laddr;
6295 }
6296 if (entry->is_sub_map) {
6297 laddr = ((laddr - entry->vme_start)
6298 + VME_OFFSET(entry));
6299 vm_map_lock_read(VME_SUBMAP(entry));
6300
6301 if (map != real_map) {
6302 vm_map_unlock_read(map);
6303 }
6304 if (entry->use_pmap) {
6305 vm_map_unlock_read(real_map);
6306 real_map = VME_SUBMAP(entry);
6307 }
6308 map = VME_SUBMAP(entry);
6309 } else {
6310 break;
6311 }
6312 }
6313
6314 if (vm_map_lookup_entry(map, laddr, &entry) &&
6315 (!entry->is_sub_map) &&
6316 (object != VM_OBJECT_NULL) &&
6317 (VME_OBJECT(entry) == object)) {
6318 uint16_t superpage;
6319
6320 if (!object->pager_created &&
6321 object->phys_contiguous &&
6322 VME_OFFSET(entry) == 0 &&
6323 (entry->vme_end - entry->vme_start == object->vo_size) &&
6324 VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size - 1))) {
6325 superpage = VM_MEM_SUPERPAGE;
6326 } else {
6327 superpage = 0;
6328 }
6329
6330 if (superpage && physpage_p) {
6331 /* for vm_map_wire_and_extract() */
6332 *physpage_p = (ppnum_t)
6333 ((((vm_map_offset_t)
6334 object->vo_shadow_offset)
6335 + VME_OFFSET(entry)
6336 + (laddr - entry->vme_start))
6337 >> PAGE_SHIFT);
6338 }
6339
6340 if (caller_pmap) {
6341 /*
6342 * Set up a block mapped area
6343 */
6344 assert((uint32_t)((ldelta + hdelta) >> fault_page_shift) == ((ldelta + hdelta) >> fault_page_shift));
6345 kr = pmap_map_block_addr(caller_pmap,
6346 (addr64_t)(caller_pmap_addr - ldelta),
6347 (pmap_paddr_t)(((vm_map_offset_t) (object->vo_shadow_offset)) +
6348 VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta),
6349 (uint32_t)((ldelta + hdelta) >> fault_page_shift), prot,
6350 (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
6351
6352 if (kr != KERN_SUCCESS) {
6353 goto cleanup;
6354 }
6355 } else {
6356 /*
6357 * Set up a block mapped area
6358 */
6359 assert((uint32_t)((ldelta + hdelta) >> fault_page_shift) == ((ldelta + hdelta) >> fault_page_shift));
6360 kr = pmap_map_block_addr(real_map->pmap,
6361 (addr64_t)(vaddr - ldelta),
6362 (pmap_paddr_t)(((vm_map_offset_t)(object->vo_shadow_offset)) +
6363 VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta),
6364 (uint32_t)((ldelta + hdelta) >> fault_page_shift), prot,
6365 (VM_WIMG_MASK & (int)object->wimg_bits) | superpage, 0);
6366
6367 if (kr != KERN_SUCCESS) {
6368 goto cleanup;
6369 }
6370 }
6371 }
6372 }
6373
6374 /*
6375 * Success
6376 */
6377 kr = KERN_SUCCESS;
6378
6379 /*
6380 * TODO: could most of the done cases just use cleanup?
6381 */
6382 cleanup:
6383 /*
6384 * Unlock everything, and return
6385 */
6386 vm_map_unlock_read(map);
6387 if (real_map != map) {
6388 vm_map_unlock(real_map);
6389 }
6390
6391 if (m != VM_PAGE_NULL) {
6392 if (__improbable(rtfault &&
6393 !m->vmp_realtime &&
6394 vm_pageout_protect_realtime)) {
6395 vm_page_lock_queues();
6396 if (!m->vmp_realtime) {
6397 m->vmp_realtime = true;
6398 vm_page_realtime_count++;
6399 }
6400 vm_page_unlock_queues();
6401 }
6402 assert(VM_PAGE_OBJECT(m) == m_object);
6403
6404 if (!m_object->internal && (fault_type & VM_PROT_WRITE)) {
6405 vm_object_paging_begin(m_object);
6406
6407 assert(written_on_object == VM_OBJECT_NULL);
6408 written_on_object = m_object;
6409 written_on_pager = m_object->pager;
6410 written_on_offset = m_object->paging_offset + m->vmp_offset;
6411 }
6412 PAGE_WAKEUP_DONE(m);
6413
6414 vm_fault_cleanup(m_object, top_page);
6415 } else {
6416 vm_fault_cleanup(object, top_page);
6417 }
6418
6419 vm_object_deallocate(object);
6420
6421 #undef RELEASE_PAGE
6422
6423 done:
6424 thread_interrupt_level(interruptible_state);
6425
6426 if (resilient_media_object != VM_OBJECT_NULL) {
6427 assert(resilient_media_retry);
6428 assert(resilient_media_offset != (vm_object_offset_t)-1);
6429 /* release extra reference on failed object */
6430 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
6431 vm_object_lock_assert_notheld(resilient_media_object);
6432 vm_object_deallocate(resilient_media_object);
6433 resilient_media_object = VM_OBJECT_NULL;
6434 resilient_media_offset = (vm_object_offset_t)-1;
6435 resilient_media_retry = false;
6436 vm_fault_resilient_media_release++;
6437 }
6438 assert(!resilient_media_retry);
6439
6440 /*
6441 * Only I/O throttle on faults which cause a pagein/swapin.
6442 */
6443 if ((type_of_fault == DBG_PAGEIND_FAULT) || (type_of_fault == DBG_PAGEINV_FAULT) || (type_of_fault == DBG_COMPRESSOR_SWAPIN_FAULT)) {
6444 throttle_lowpri_io(1);
6445 } else {
6446 if (kr == KERN_SUCCESS && type_of_fault != DBG_CACHE_HIT_FAULT && type_of_fault != DBG_GUARD_FAULT) {
6447 if ((throttle_delay = vm_page_throttled(TRUE))) {
6448 if (vm_debug_events) {
6449 if (type_of_fault == DBG_COMPRESSOR_FAULT) {
6450 VM_DEBUG_EVENT(vmf_compressordelay, VMF_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6451 } else if (type_of_fault == DBG_COW_FAULT) {
6452 VM_DEBUG_EVENT(vmf_cowdelay, VMF_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6453 } else {
6454 VM_DEBUG_EVENT(vmf_zfdelay, VMF_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6455 }
6456 }
6457 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
6458 }
6459 }
6460 }
6461
6462 if (written_on_object) {
6463 vnode_pager_dirtied(written_on_pager, written_on_offset, written_on_offset + PAGE_SIZE_64);
6464
6465 vm_object_lock(written_on_object);
6466 vm_object_paging_end(written_on_object);
6467 vm_object_unlock(written_on_object);
6468
6469 written_on_object = VM_OBJECT_NULL;
6470 }
6471
6472 if (rtfault) {
6473 vm_record_rtfault(cthread, fstart, trace_vaddr, type_of_fault);
6474 }
6475
6476 KDBG_RELEASE(
6477 (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
6478 ((uint64_t)trace_vaddr >> 32),
6479 trace_vaddr,
6480 kr,
6481 vm_fault_type_for_tracing(need_copy_on_read, type_of_fault));
6482
6483 if (fault_page_size < PAGE_SIZE && kr != KERN_SUCCESS) {
6484 DEBUG4K_FAULT("map %p original %p vaddr 0x%llx -> 0x%x\n", map, original_map, (uint64_t)trace_real_vaddr, kr);
6485 }
6486
6487 return kr;
6488 }
6489
6490 /*
6491 * vm_fault_wire:
6492 *
6493 * Wire down a range of virtual addresses in a map.
6494 */
6495 kern_return_t
vm_fault_wire(vm_map_t map,vm_map_entry_t entry,vm_prot_t prot,vm_tag_t wire_tag,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)6496 vm_fault_wire(
6497 vm_map_t map,
6498 vm_map_entry_t entry,
6499 vm_prot_t prot,
6500 vm_tag_t wire_tag,
6501 pmap_t pmap,
6502 vm_map_offset_t pmap_addr,
6503 ppnum_t *physpage_p)
6504 {
6505 vm_map_offset_t va;
6506 vm_map_offset_t end_addr = entry->vme_end;
6507 kern_return_t rc;
6508 vm_map_size_t effective_page_size;
6509
6510 assert(entry->in_transition);
6511
6512 if (!entry->is_sub_map &&
6513 VME_OBJECT(entry) != VM_OBJECT_NULL &&
6514 VME_OBJECT(entry)->phys_contiguous) {
6515 return KERN_SUCCESS;
6516 }
6517
6518 /*
6519 * Inform the physical mapping system that the
6520 * range of addresses may not fault, so that
6521 * page tables and such can be locked down as well.
6522 */
6523
6524 pmap_pageable(pmap, pmap_addr,
6525 pmap_addr + (end_addr - entry->vme_start), FALSE);
6526
6527 /*
6528 * We simulate a fault to get the page and enter it
6529 * in the physical map.
6530 */
6531
6532 effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6533 for (va = entry->vme_start;
6534 va < end_addr;
6535 va += effective_page_size) {
6536 rc = vm_fault_wire_fast(map, va, prot, wire_tag, entry, pmap,
6537 pmap_addr + (va - entry->vme_start),
6538 physpage_p);
6539 if (rc != KERN_SUCCESS) {
6540 rc = vm_fault_internal(map, va, prot, TRUE, wire_tag,
6541 ((pmap == kernel_pmap)
6542 ? THREAD_UNINT
6543 : THREAD_ABORTSAFE),
6544 pmap,
6545 (pmap_addr +
6546 (va - entry->vme_start)),
6547 physpage_p);
6548 DTRACE_VM2(softlock, int, 1, (uint64_t *), NULL);
6549 }
6550
6551 if (rc != KERN_SUCCESS) {
6552 struct vm_map_entry tmp_entry = *entry;
6553
6554 /* unwire wired pages */
6555 tmp_entry.vme_end = va;
6556 vm_fault_unwire(map, &tmp_entry, FALSE,
6557 pmap, pmap_addr, tmp_entry.vme_end);
6558
6559 return rc;
6560 }
6561 }
6562 return KERN_SUCCESS;
6563 }
6564
6565 /*
6566 * vm_fault_unwire:
6567 *
6568 * Unwire a range of virtual addresses in a map.
6569 */
6570 void
vm_fault_unwire(vm_map_t map,vm_map_entry_t entry,boolean_t deallocate,pmap_t pmap,vm_map_offset_t pmap_addr,vm_map_offset_t end_addr)6571 vm_fault_unwire(
6572 vm_map_t map,
6573 vm_map_entry_t entry,
6574 boolean_t deallocate,
6575 pmap_t pmap,
6576 vm_map_offset_t pmap_addr,
6577 vm_map_offset_t end_addr)
6578 {
6579 vm_map_offset_t va;
6580 vm_object_t object;
6581 struct vm_object_fault_info fault_info = {};
6582 unsigned int unwired_pages;
6583 vm_map_size_t effective_page_size;
6584
6585 object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry);
6586
6587 /*
6588 * If it's marked phys_contiguous, then vm_fault_wire() didn't actually
6589 * do anything since such memory is wired by default. So we don't have
6590 * anything to undo here.
6591 */
6592
6593 if (object != VM_OBJECT_NULL && object->phys_contiguous) {
6594 return;
6595 }
6596
6597 fault_info.interruptible = THREAD_UNINT;
6598 fault_info.behavior = entry->behavior;
6599 fault_info.user_tag = VME_ALIAS(entry);
6600 if (entry->iokit_acct ||
6601 (!entry->is_sub_map && !entry->use_pmap)) {
6602 fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
6603 }
6604 fault_info.lo_offset = VME_OFFSET(entry);
6605 fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry);
6606 fault_info.no_cache = entry->no_cache;
6607 fault_info.stealth = TRUE;
6608 if (entry->vme_xnu_user_debug) {
6609 /*
6610 * Modified code-signed executable region: wired pages must
6611 * have been copied, so they should be XNU_USER_DEBUG rather
6612 * than XNU_USER_EXEC.
6613 */
6614 fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
6615 }
6616
6617 unwired_pages = 0;
6618
6619 /*
6620 * Since the pages are wired down, we must be able to
6621 * get their mappings from the physical map system.
6622 */
6623
6624 effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6625 for (va = entry->vme_start;
6626 va < end_addr;
6627 va += effective_page_size) {
6628 if (object == VM_OBJECT_NULL) {
6629 if (pmap) {
6630 pmap_change_wiring(pmap,
6631 pmap_addr + (va - entry->vme_start), FALSE);
6632 }
6633 (void) vm_fault(map, va, VM_PROT_NONE,
6634 TRUE, VM_KERN_MEMORY_NONE, THREAD_UNINT, pmap, pmap_addr);
6635 } else {
6636 vm_prot_t prot;
6637 vm_page_t result_page;
6638 vm_page_t top_page;
6639 vm_object_t result_object;
6640 vm_fault_return_t result;
6641
6642 /* cap cluster size at maximum UPL size */
6643 upl_size_t cluster_size;
6644 if (os_sub_overflow(end_addr, va, &cluster_size)) {
6645 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
6646 }
6647 fault_info.cluster_size = cluster_size;
6648
6649 do {
6650 prot = VM_PROT_NONE;
6651
6652 vm_object_lock(object);
6653 vm_object_paging_begin(object);
6654 result_page = VM_PAGE_NULL;
6655 result = vm_fault_page(
6656 object,
6657 (VME_OFFSET(entry) +
6658 (va - entry->vme_start)),
6659 VM_PROT_NONE, TRUE,
6660 FALSE, /* page not looked up */
6661 &prot, &result_page, &top_page,
6662 (int *)0,
6663 NULL, map->no_zero_fill,
6664 &fault_info);
6665 } while (result == VM_FAULT_RETRY);
6666
6667 /*
6668 * If this was a mapping to a file on a device that has been forcibly
6669 * unmounted, then we won't get a page back from vm_fault_page(). Just
6670 * move on to the next one in case the remaining pages are mapped from
6671 * different objects. During a forced unmount, the object is terminated
6672 * so the alive flag will be false if this happens. A forced unmount will
6673 * will occur when an external disk is unplugged before the user does an
6674 * eject, so we don't want to panic in that situation.
6675 */
6676
6677 if (result == VM_FAULT_MEMORY_ERROR) {
6678 if (!object->alive) {
6679 continue;
6680 }
6681 if (!object->internal && object->pager == NULL) {
6682 continue;
6683 }
6684 }
6685
6686 if (result == VM_FAULT_MEMORY_ERROR &&
6687 is_kernel_object(object)) {
6688 /*
6689 * This must have been allocated with
6690 * KMA_KOBJECT and KMA_VAONLY and there's
6691 * no physical page at this offset.
6692 * We're done (no page to free).
6693 */
6694 assert(deallocate);
6695 continue;
6696 }
6697
6698 if (result != VM_FAULT_SUCCESS) {
6699 panic("vm_fault_unwire: failure");
6700 }
6701
6702 result_object = VM_PAGE_OBJECT(result_page);
6703
6704 if (deallocate) {
6705 assert(VM_PAGE_GET_PHYS_PAGE(result_page) !=
6706 vm_page_fictitious_addr);
6707 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(result_page));
6708 if (VM_PAGE_WIRED(result_page)) {
6709 unwired_pages++;
6710 }
6711 VM_PAGE_FREE(result_page);
6712 } else {
6713 if ((pmap) && (VM_PAGE_GET_PHYS_PAGE(result_page) != vm_page_guard_addr)) {
6714 pmap_change_wiring(pmap,
6715 pmap_addr + (va - entry->vme_start), FALSE);
6716 }
6717
6718
6719 if (VM_PAGE_WIRED(result_page)) {
6720 vm_page_lockspin_queues();
6721 vm_page_unwire(result_page, TRUE);
6722 vm_page_unlock_queues();
6723 unwired_pages++;
6724 }
6725 if (entry->zero_wired_pages) {
6726 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(result_page));
6727 entry->zero_wired_pages = FALSE;
6728 }
6729
6730 PAGE_WAKEUP_DONE(result_page);
6731 }
6732 vm_fault_cleanup(result_object, top_page);
6733 }
6734 }
6735
6736 /*
6737 * Inform the physical mapping system that the range
6738 * of addresses may fault, so that page tables and
6739 * such may be unwired themselves.
6740 */
6741
6742 pmap_pageable(pmap, pmap_addr,
6743 pmap_addr + (end_addr - entry->vme_start), TRUE);
6744
6745 if (is_kernel_object(object)) {
6746 /*
6747 * Would like to make user_tag in vm_object_fault_info
6748 * vm_tag_t (unsigned short) but user_tag derives its value from
6749 * VME_ALIAS(entry) at a few places and VME_ALIAS, in turn, casts
6750 * to an _unsigned int_ which is used by non-fault_info paths throughout the
6751 * code at many places.
6752 *
6753 * So, for now, an explicit truncation to unsigned short (vm_tag_t).
6754 */
6755 assertf((fault_info.user_tag & VME_ALIAS_MASK) == fault_info.user_tag,
6756 "VM Tag truncated from 0x%x to 0x%x\n", fault_info.user_tag, (fault_info.user_tag & VME_ALIAS_MASK));
6757 vm_tag_update_size((vm_tag_t) fault_info.user_tag, -ptoa_64(unwired_pages), NULL);
6758 }
6759 }
6760
6761 /*
6762 * vm_fault_wire_fast:
6763 *
6764 * Handle common case of a wire down page fault at the given address.
6765 * If successful, the page is inserted into the associated physical map.
6766 * The map entry is passed in to avoid the overhead of a map lookup.
6767 *
6768 * NOTE: the given address should be truncated to the
6769 * proper page address.
6770 *
6771 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
6772 * a standard error specifying why the fault is fatal is returned.
6773 *
6774 * The map in question must be referenced, and remains so.
6775 * Caller has a read lock on the map.
6776 *
6777 * This is a stripped version of vm_fault() for wiring pages. Anything
6778 * other than the common case will return KERN_FAILURE, and the caller
6779 * is expected to call vm_fault().
6780 */
6781 static kern_return_t
vm_fault_wire_fast(__unused vm_map_t map,vm_map_offset_t va,__unused vm_prot_t caller_prot,vm_tag_t wire_tag,vm_map_entry_t entry,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)6782 vm_fault_wire_fast(
6783 __unused vm_map_t map,
6784 vm_map_offset_t va,
6785 __unused vm_prot_t caller_prot,
6786 vm_tag_t wire_tag,
6787 vm_map_entry_t entry,
6788 pmap_t pmap,
6789 vm_map_offset_t pmap_addr,
6790 ppnum_t *physpage_p)
6791 {
6792 vm_object_t object;
6793 vm_object_offset_t offset;
6794 vm_page_t m;
6795 vm_prot_t prot;
6796 thread_t thread = current_thread();
6797 int type_of_fault;
6798 kern_return_t kr;
6799 vm_map_size_t fault_page_size;
6800 vm_map_offset_t fault_phys_offset;
6801 struct vm_object_fault_info fault_info = {};
6802 uint8_t object_lock_type = 0;
6803
6804 counter_inc(&vm_statistics_faults);
6805
6806 if (thread != THREAD_NULL) {
6807 counter_inc(&get_threadtask(thread)->faults);
6808 }
6809
6810 /*
6811 * Recovery actions
6812 */
6813
6814 #undef RELEASE_PAGE
6815 #define RELEASE_PAGE(m) { \
6816 PAGE_WAKEUP_DONE(m); \
6817 vm_page_lockspin_queues(); \
6818 vm_page_unwire(m, TRUE); \
6819 vm_page_unlock_queues(); \
6820 }
6821
6822
6823 #undef UNLOCK_THINGS
6824 #define UNLOCK_THINGS { \
6825 vm_object_paging_end(object); \
6826 vm_object_unlock(object); \
6827 }
6828
6829 #undef UNLOCK_AND_DEALLOCATE
6830 #define UNLOCK_AND_DEALLOCATE { \
6831 UNLOCK_THINGS; \
6832 vm_object_deallocate(object); \
6833 }
6834 /*
6835 * Give up and have caller do things the hard way.
6836 */
6837
6838 #define GIVE_UP { \
6839 UNLOCK_AND_DEALLOCATE; \
6840 return(KERN_FAILURE); \
6841 }
6842
6843
6844 /*
6845 * If this entry is not directly to a vm_object, bail out.
6846 */
6847 if (entry->is_sub_map) {
6848 assert(physpage_p == NULL);
6849 return KERN_FAILURE;
6850 }
6851
6852 /*
6853 * Find the backing store object and offset into it.
6854 */
6855
6856 object = VME_OBJECT(entry);
6857 offset = (va - entry->vme_start) + VME_OFFSET(entry);
6858 prot = entry->protection;
6859
6860 /*
6861 * Make a reference to this object to prevent its
6862 * disposal while we are messing with it.
6863 */
6864
6865 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
6866 vm_object_lock(object);
6867 vm_object_reference_locked(object);
6868 vm_object_paging_begin(object);
6869
6870 /*
6871 * INVARIANTS (through entire routine):
6872 *
6873 * 1) At all times, we must either have the object
6874 * lock or a busy page in some object to prevent
6875 * some other thread from trying to bring in
6876 * the same page.
6877 *
6878 * 2) Once we have a busy page, we must remove it from
6879 * the pageout queues, so that the pageout daemon
6880 * will not grab it away.
6881 *
6882 */
6883
6884 /*
6885 * Look for page in top-level object. If it's not there or
6886 * there's something going on, give up.
6887 */
6888 m = vm_page_lookup(object, vm_object_trunc_page(offset));
6889 if ((m == VM_PAGE_NULL) || (m->vmp_busy) ||
6890 (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_absent))) {
6891 GIVE_UP;
6892 }
6893 if (m->vmp_fictitious &&
6894 VM_PAGE_GET_PHYS_PAGE(m) == vm_page_guard_addr) {
6895 /*
6896 * Guard pages are fictitious pages and are never
6897 * entered into a pmap, so let's say it's been wired...
6898 */
6899 kr = KERN_SUCCESS;
6900 goto done;
6901 }
6902
6903 /*
6904 * Wire the page down now. All bail outs beyond this
6905 * point must unwire the page.
6906 */
6907
6908 vm_page_lockspin_queues();
6909 vm_page_wire(m, wire_tag, TRUE);
6910 vm_page_unlock_queues();
6911
6912 /*
6913 * Mark page busy for other threads.
6914 */
6915 assert(!m->vmp_busy);
6916 m->vmp_busy = TRUE;
6917 assert(!m->vmp_absent);
6918
6919 /*
6920 * Give up if the page is being written and there's a copy object
6921 */
6922 if ((object->vo_copy != VM_OBJECT_NULL) && (prot & VM_PROT_WRITE)) {
6923 RELEASE_PAGE(m);
6924 GIVE_UP;
6925 }
6926
6927 fault_info.user_tag = VME_ALIAS(entry);
6928 fault_info.pmap_options = 0;
6929 if (entry->iokit_acct ||
6930 (!entry->is_sub_map && !entry->use_pmap)) {
6931 fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
6932 }
6933 if (entry->vme_xnu_user_debug) {
6934 /*
6935 * Modified code-signed executable region: wiring will
6936 * copy the pages, so they should be XNU_USER_DEBUG rather
6937 * than XNU_USER_EXEC.
6938 */
6939 fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
6940 }
6941
6942 fault_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6943 fault_phys_offset = offset - vm_object_trunc_page(offset);
6944
6945 /*
6946 * Put this page into the physical map.
6947 */
6948 type_of_fault = DBG_CACHE_HIT_FAULT;
6949 assertf(VM_PAGE_OBJECT(m) == object, "m=%p object=%p", m, object);
6950 assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6951 kr = vm_fault_enter(m,
6952 pmap,
6953 pmap_addr,
6954 fault_page_size,
6955 fault_phys_offset,
6956 prot,
6957 prot,
6958 TRUE, /* wired */
6959 FALSE, /* change_wiring */
6960 wire_tag,
6961 &fault_info,
6962 NULL,
6963 &type_of_fault,
6964 &object_lock_type); /* Exclusive lock mode. Will remain unchanged.*/
6965 if (kr != KERN_SUCCESS) {
6966 RELEASE_PAGE(m);
6967 GIVE_UP;
6968 }
6969
6970 done:
6971 /*
6972 * Unlock everything, and return
6973 */
6974
6975 if (physpage_p) {
6976 /* for vm_map_wire_and_extract() */
6977 if (kr == KERN_SUCCESS) {
6978 assert(object == VM_PAGE_OBJECT(m));
6979 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
6980 if (prot & VM_PROT_WRITE) {
6981 vm_object_lock_assert_exclusive(object);
6982 m->vmp_dirty = TRUE;
6983 }
6984 } else {
6985 *physpage_p = 0;
6986 }
6987 }
6988
6989 PAGE_WAKEUP_DONE(m);
6990 UNLOCK_AND_DEALLOCATE;
6991
6992 return kr;
6993 }
6994
6995 /*
6996 * Routine: vm_fault_copy_cleanup
6997 * Purpose:
6998 * Release a page used by vm_fault_copy.
6999 */
7000
7001 static void
vm_fault_copy_cleanup(vm_page_t page,vm_page_t top_page)7002 vm_fault_copy_cleanup(
7003 vm_page_t page,
7004 vm_page_t top_page)
7005 {
7006 vm_object_t object = VM_PAGE_OBJECT(page);
7007
7008 vm_object_lock(object);
7009 PAGE_WAKEUP_DONE(page);
7010 if (!VM_PAGE_PAGEABLE(page)) {
7011 vm_page_lockspin_queues();
7012 if (!VM_PAGE_PAGEABLE(page)) {
7013 vm_page_activate(page);
7014 }
7015 vm_page_unlock_queues();
7016 }
7017 vm_fault_cleanup(object, top_page);
7018 }
7019
7020 static void
vm_fault_copy_dst_cleanup(vm_page_t page)7021 vm_fault_copy_dst_cleanup(
7022 vm_page_t page)
7023 {
7024 vm_object_t object;
7025
7026 if (page != VM_PAGE_NULL) {
7027 object = VM_PAGE_OBJECT(page);
7028 vm_object_lock(object);
7029 vm_page_lockspin_queues();
7030 vm_page_unwire(page, TRUE);
7031 vm_page_unlock_queues();
7032 vm_object_paging_end(object);
7033 vm_object_unlock(object);
7034 }
7035 }
7036
7037 /*
7038 * Routine: vm_fault_copy
7039 *
7040 * Purpose:
7041 * Copy pages from one virtual memory object to another --
7042 * neither the source nor destination pages need be resident.
7043 *
7044 * Before actually copying a page, the version associated with
7045 * the destination address map wil be verified.
7046 *
7047 * In/out conditions:
7048 * The caller must hold a reference, but not a lock, to
7049 * each of the source and destination objects and to the
7050 * destination map.
7051 *
7052 * Results:
7053 * Returns KERN_SUCCESS if no errors were encountered in
7054 * reading or writing the data. Returns KERN_INTERRUPTED if
7055 * the operation was interrupted (only possible if the
7056 * "interruptible" argument is asserted). Other return values
7057 * indicate a permanent error in copying the data.
7058 *
7059 * The actual amount of data copied will be returned in the
7060 * "copy_size" argument. In the event that the destination map
7061 * verification failed, this amount may be less than the amount
7062 * requested.
7063 */
7064 kern_return_t
vm_fault_copy(vm_object_t src_object,vm_object_offset_t src_offset,vm_map_size_t * copy_size,vm_object_t dst_object,vm_object_offset_t dst_offset,vm_map_t dst_map,vm_map_version_t * dst_version,int interruptible)7065 vm_fault_copy(
7066 vm_object_t src_object,
7067 vm_object_offset_t src_offset,
7068 vm_map_size_t *copy_size, /* INOUT */
7069 vm_object_t dst_object,
7070 vm_object_offset_t dst_offset,
7071 vm_map_t dst_map,
7072 vm_map_version_t *dst_version,
7073 int interruptible)
7074 {
7075 vm_page_t result_page;
7076
7077 vm_page_t src_page;
7078 vm_page_t src_top_page;
7079 vm_prot_t src_prot;
7080
7081 vm_page_t dst_page;
7082 vm_page_t dst_top_page;
7083 vm_prot_t dst_prot;
7084
7085 vm_map_size_t amount_left;
7086 vm_object_t old_copy_object;
7087 vm_object_t result_page_object = NULL;
7088 kern_return_t error = 0;
7089 vm_fault_return_t result;
7090
7091 vm_map_size_t part_size;
7092 struct vm_object_fault_info fault_info_src = {};
7093 struct vm_object_fault_info fault_info_dst = {};
7094
7095 /*
7096 * In order not to confuse the clustered pageins, align
7097 * the different offsets on a page boundary.
7098 */
7099
7100 #define RETURN(x) \
7101 MACRO_BEGIN \
7102 *copy_size -= amount_left; \
7103 MACRO_RETURN(x); \
7104 MACRO_END
7105
7106 amount_left = *copy_size;
7107
7108 fault_info_src.interruptible = interruptible;
7109 fault_info_src.behavior = VM_BEHAVIOR_SEQUENTIAL;
7110 fault_info_src.lo_offset = vm_object_trunc_page(src_offset);
7111 fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left;
7112 fault_info_src.stealth = TRUE;
7113
7114 fault_info_dst.interruptible = interruptible;
7115 fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL;
7116 fault_info_dst.lo_offset = vm_object_trunc_page(dst_offset);
7117 fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left;
7118 fault_info_dst.stealth = TRUE;
7119
7120 do { /* while (amount_left > 0) */
7121 /*
7122 * There may be a deadlock if both source and destination
7123 * pages are the same. To avoid this deadlock, the copy must
7124 * start by getting the destination page in order to apply
7125 * COW semantics if any.
7126 */
7127
7128 RetryDestinationFault:;
7129
7130 dst_prot = VM_PROT_WRITE | VM_PROT_READ;
7131
7132 vm_object_lock(dst_object);
7133 vm_object_paging_begin(dst_object);
7134
7135 /* cap cluster size at maximum UPL size */
7136 upl_size_t cluster_size;
7137 if (os_convert_overflow(amount_left, &cluster_size)) {
7138 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
7139 }
7140 fault_info_dst.cluster_size = cluster_size;
7141
7142 dst_page = VM_PAGE_NULL;
7143 result = vm_fault_page(dst_object,
7144 vm_object_trunc_page(dst_offset),
7145 VM_PROT_WRITE | VM_PROT_READ,
7146 FALSE,
7147 FALSE, /* page not looked up */
7148 &dst_prot, &dst_page, &dst_top_page,
7149 (int *)0,
7150 &error,
7151 dst_map->no_zero_fill,
7152 &fault_info_dst);
7153 switch (result) {
7154 case VM_FAULT_SUCCESS:
7155 break;
7156 case VM_FAULT_RETRY:
7157 goto RetryDestinationFault;
7158 case VM_FAULT_MEMORY_SHORTAGE:
7159 if (vm_page_wait(interruptible)) {
7160 goto RetryDestinationFault;
7161 }
7162 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_COPY_MEMORY_SHORTAGE), 0 /* arg */);
7163 OS_FALLTHROUGH;
7164 case VM_FAULT_INTERRUPTED:
7165 RETURN(MACH_SEND_INTERRUPTED);
7166 case VM_FAULT_SUCCESS_NO_VM_PAGE:
7167 /* success but no VM page: fail the copy */
7168 vm_object_paging_end(dst_object);
7169 vm_object_unlock(dst_object);
7170 OS_FALLTHROUGH;
7171 case VM_FAULT_MEMORY_ERROR:
7172 if (error) {
7173 return error;
7174 } else {
7175 return KERN_MEMORY_ERROR;
7176 }
7177 default:
7178 panic("vm_fault_copy: unexpected error 0x%x from "
7179 "vm_fault_page()\n", result);
7180 }
7181 assert((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE);
7182
7183 assert(dst_object == VM_PAGE_OBJECT(dst_page));
7184 old_copy_object = dst_object->vo_copy;
7185
7186 /*
7187 * There exists the possiblity that the source and
7188 * destination page are the same. But we can't
7189 * easily determine that now. If they are the
7190 * same, the call to vm_fault_page() for the
7191 * destination page will deadlock. To prevent this we
7192 * wire the page so we can drop busy without having
7193 * the page daemon steal the page. We clean up the
7194 * top page but keep the paging reference on the object
7195 * holding the dest page so it doesn't go away.
7196 */
7197
7198 vm_page_lockspin_queues();
7199 vm_page_wire(dst_page, VM_KERN_MEMORY_OSFMK, TRUE);
7200 vm_page_unlock_queues();
7201 PAGE_WAKEUP_DONE(dst_page);
7202 vm_object_unlock(dst_object);
7203
7204 if (dst_top_page != VM_PAGE_NULL) {
7205 vm_object_lock(dst_object);
7206 VM_PAGE_FREE(dst_top_page);
7207 vm_object_paging_end(dst_object);
7208 vm_object_unlock(dst_object);
7209 }
7210
7211 RetrySourceFault:;
7212
7213 if (src_object == VM_OBJECT_NULL) {
7214 /*
7215 * No source object. We will just
7216 * zero-fill the page in dst_object.
7217 */
7218 src_page = VM_PAGE_NULL;
7219 result_page = VM_PAGE_NULL;
7220 } else {
7221 vm_object_lock(src_object);
7222 src_page = vm_page_lookup(src_object,
7223 vm_object_trunc_page(src_offset));
7224 if (src_page == dst_page) {
7225 src_prot = dst_prot;
7226 result_page = VM_PAGE_NULL;
7227 } else {
7228 src_prot = VM_PROT_READ;
7229 vm_object_paging_begin(src_object);
7230
7231 /* cap cluster size at maximum UPL size */
7232 if (os_convert_overflow(amount_left, &cluster_size)) {
7233 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
7234 }
7235 fault_info_src.cluster_size = cluster_size;
7236
7237 result_page = VM_PAGE_NULL;
7238 result = vm_fault_page(
7239 src_object,
7240 vm_object_trunc_page(src_offset),
7241 VM_PROT_READ, FALSE,
7242 FALSE, /* page not looked up */
7243 &src_prot,
7244 &result_page, &src_top_page,
7245 (int *)0, &error, FALSE,
7246 &fault_info_src);
7247
7248 switch (result) {
7249 case VM_FAULT_SUCCESS:
7250 break;
7251 case VM_FAULT_RETRY:
7252 goto RetrySourceFault;
7253 case VM_FAULT_MEMORY_SHORTAGE:
7254 if (vm_page_wait(interruptible)) {
7255 goto RetrySourceFault;
7256 }
7257 OS_FALLTHROUGH;
7258 case VM_FAULT_INTERRUPTED:
7259 vm_fault_copy_dst_cleanup(dst_page);
7260 RETURN(MACH_SEND_INTERRUPTED);
7261 case VM_FAULT_SUCCESS_NO_VM_PAGE:
7262 /* success but no VM page: fail */
7263 vm_object_paging_end(src_object);
7264 vm_object_unlock(src_object);
7265 OS_FALLTHROUGH;
7266 case VM_FAULT_MEMORY_ERROR:
7267 vm_fault_copy_dst_cleanup(dst_page);
7268 if (error) {
7269 return error;
7270 } else {
7271 return KERN_MEMORY_ERROR;
7272 }
7273 default:
7274 panic("vm_fault_copy(2): unexpected "
7275 "error 0x%x from "
7276 "vm_fault_page()\n", result);
7277 }
7278
7279 result_page_object = VM_PAGE_OBJECT(result_page);
7280 assert((src_top_page == VM_PAGE_NULL) ==
7281 (result_page_object == src_object));
7282 }
7283 assert((src_prot & VM_PROT_READ) != VM_PROT_NONE);
7284 vm_object_unlock(result_page_object);
7285 }
7286
7287 vm_map_lock_read(dst_map);
7288
7289 if (!vm_map_verify(dst_map, dst_version)) {
7290 vm_map_unlock_read(dst_map);
7291 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7292 vm_fault_copy_cleanup(result_page, src_top_page);
7293 }
7294 vm_fault_copy_dst_cleanup(dst_page);
7295 break;
7296 }
7297 assert(dst_object == VM_PAGE_OBJECT(dst_page));
7298
7299 vm_object_lock(dst_object);
7300
7301 if (dst_object->vo_copy != old_copy_object) {
7302 vm_object_unlock(dst_object);
7303 vm_map_unlock_read(dst_map);
7304 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7305 vm_fault_copy_cleanup(result_page, src_top_page);
7306 }
7307 vm_fault_copy_dst_cleanup(dst_page);
7308 break;
7309 }
7310 vm_object_unlock(dst_object);
7311
7312 /*
7313 * Copy the page, and note that it is dirty
7314 * immediately.
7315 */
7316
7317 if (!page_aligned(src_offset) ||
7318 !page_aligned(dst_offset) ||
7319 !page_aligned(amount_left)) {
7320 vm_object_offset_t src_po,
7321 dst_po;
7322
7323 src_po = src_offset - vm_object_trunc_page(src_offset);
7324 dst_po = dst_offset - vm_object_trunc_page(dst_offset);
7325
7326 if (dst_po > src_po) {
7327 part_size = PAGE_SIZE - dst_po;
7328 } else {
7329 part_size = PAGE_SIZE - src_po;
7330 }
7331 if (part_size > (amount_left)) {
7332 part_size = amount_left;
7333 }
7334
7335 if (result_page == VM_PAGE_NULL) {
7336 assert((vm_offset_t) dst_po == dst_po);
7337 assert((vm_size_t) part_size == part_size);
7338 vm_page_part_zero_fill(dst_page,
7339 (vm_offset_t) dst_po,
7340 (vm_size_t) part_size);
7341 } else {
7342 assert((vm_offset_t) src_po == src_po);
7343 assert((vm_offset_t) dst_po == dst_po);
7344 assert((vm_size_t) part_size == part_size);
7345 vm_page_part_copy(result_page,
7346 (vm_offset_t) src_po,
7347 dst_page,
7348 (vm_offset_t) dst_po,
7349 (vm_size_t)part_size);
7350 if (!dst_page->vmp_dirty) {
7351 vm_object_lock(dst_object);
7352 SET_PAGE_DIRTY(dst_page, TRUE);
7353 vm_object_unlock(dst_object);
7354 }
7355 }
7356 } else {
7357 part_size = PAGE_SIZE;
7358
7359 if (result_page == VM_PAGE_NULL) {
7360 vm_page_zero_fill(dst_page);
7361 } else {
7362 vm_object_lock(result_page_object);
7363 vm_page_copy(result_page, dst_page);
7364 vm_object_unlock(result_page_object);
7365
7366 if (!dst_page->vmp_dirty) {
7367 vm_object_lock(dst_object);
7368 SET_PAGE_DIRTY(dst_page, TRUE);
7369 vm_object_unlock(dst_object);
7370 }
7371 }
7372 }
7373
7374 /*
7375 * Unlock everything, and return
7376 */
7377
7378 vm_map_unlock_read(dst_map);
7379
7380 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7381 vm_fault_copy_cleanup(result_page, src_top_page);
7382 }
7383 vm_fault_copy_dst_cleanup(dst_page);
7384
7385 amount_left -= part_size;
7386 src_offset += part_size;
7387 dst_offset += part_size;
7388 } while (amount_left > 0);
7389
7390 RETURN(KERN_SUCCESS);
7391 #undef RETURN
7392
7393 /*NOTREACHED*/
7394 }
7395
7396 #if VM_FAULT_CLASSIFY
7397 /*
7398 * Temporary statistics gathering support.
7399 */
7400
7401 /*
7402 * Statistics arrays:
7403 */
7404 #define VM_FAULT_TYPES_MAX 5
7405 #define VM_FAULT_LEVEL_MAX 8
7406
7407 int vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX];
7408
7409 #define VM_FAULT_TYPE_ZERO_FILL 0
7410 #define VM_FAULT_TYPE_MAP_IN 1
7411 #define VM_FAULT_TYPE_PAGER 2
7412 #define VM_FAULT_TYPE_COPY 3
7413 #define VM_FAULT_TYPE_OTHER 4
7414
7415
7416 void
vm_fault_classify(vm_object_t object,vm_object_offset_t offset,vm_prot_t fault_type)7417 vm_fault_classify(vm_object_t object,
7418 vm_object_offset_t offset,
7419 vm_prot_t fault_type)
7420 {
7421 int type, level = 0;
7422 vm_page_t m;
7423
7424 while (TRUE) {
7425 m = vm_page_lookup(object, offset);
7426 if (m != VM_PAGE_NULL) {
7427 if (m->vmp_busy || m->vmp_error || m->vmp_restart || m->vmp_absent) {
7428 type = VM_FAULT_TYPE_OTHER;
7429 break;
7430 }
7431 if (((fault_type & VM_PROT_WRITE) == 0) ||
7432 ((level == 0) && object->vo_copy == VM_OBJECT_NULL)) {
7433 type = VM_FAULT_TYPE_MAP_IN;
7434 break;
7435 }
7436 type = VM_FAULT_TYPE_COPY;
7437 break;
7438 } else {
7439 if (object->pager_created) {
7440 type = VM_FAULT_TYPE_PAGER;
7441 break;
7442 }
7443 if (object->shadow == VM_OBJECT_NULL) {
7444 type = VM_FAULT_TYPE_ZERO_FILL;
7445 break;
7446 }
7447
7448 offset += object->vo_shadow_offset;
7449 object = object->shadow;
7450 level++;
7451 continue;
7452 }
7453 }
7454
7455 if (level > VM_FAULT_LEVEL_MAX) {
7456 level = VM_FAULT_LEVEL_MAX;
7457 }
7458
7459 vm_fault_stats[type][level] += 1;
7460
7461 return;
7462 }
7463
7464 /* cleanup routine to call from debugger */
7465
7466 void
vm_fault_classify_init(void)7467 vm_fault_classify_init(void)
7468 {
7469 int type, level;
7470
7471 for (type = 0; type < VM_FAULT_TYPES_MAX; type++) {
7472 for (level = 0; level < VM_FAULT_LEVEL_MAX; level++) {
7473 vm_fault_stats[type][level] = 0;
7474 }
7475 }
7476
7477 return;
7478 }
7479 #endif /* VM_FAULT_CLASSIFY */
7480
7481 vm_offset_t
kdp_lightweight_fault(vm_map_t map,vm_offset_t cur_target_addr)7482 kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr)
7483 {
7484 vm_map_entry_t entry;
7485 vm_object_t object;
7486 vm_offset_t object_offset;
7487 vm_page_t m;
7488 int compressor_external_state, compressed_count_delta;
7489 vm_compressor_options_t compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP);
7490 int my_fault_type = VM_PROT_READ;
7491 kern_return_t kr;
7492 int effective_page_mask, effective_page_size;
7493
7494 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
7495 effective_page_mask = VM_MAP_PAGE_MASK(map);
7496 effective_page_size = VM_MAP_PAGE_SIZE(map);
7497 } else {
7498 effective_page_mask = PAGE_MASK;
7499 effective_page_size = PAGE_SIZE;
7500 }
7501
7502 if (not_in_kdp) {
7503 panic("kdp_lightweight_fault called from outside of debugger context");
7504 }
7505
7506 assert(map != VM_MAP_NULL);
7507
7508 assert((cur_target_addr & effective_page_mask) == 0);
7509 if ((cur_target_addr & effective_page_mask) != 0) {
7510 return 0;
7511 }
7512
7513 if (kdp_lck_rw_lock_is_acquired_exclusive(&map->lock)) {
7514 return 0;
7515 }
7516
7517 if (!vm_map_lookup_entry(map, cur_target_addr, &entry)) {
7518 return 0;
7519 }
7520
7521 if (entry->is_sub_map) {
7522 return 0;
7523 }
7524
7525 object = VME_OBJECT(entry);
7526 if (object == VM_OBJECT_NULL) {
7527 return 0;
7528 }
7529
7530 object_offset = cur_target_addr - entry->vme_start + VME_OFFSET(entry);
7531
7532 while (TRUE) {
7533 if (kdp_lck_rw_lock_is_acquired_exclusive(&object->Lock)) {
7534 return 0;
7535 }
7536
7537 if (object->pager_created && (object->paging_in_progress ||
7538 object->activity_in_progress)) {
7539 return 0;
7540 }
7541
7542 m = kdp_vm_page_lookup(object, vm_object_trunc_page(object_offset));
7543
7544 if (m != VM_PAGE_NULL) {
7545 if ((object->wimg_bits & VM_WIMG_MASK) != VM_WIMG_DEFAULT) {
7546 return 0;
7547 }
7548
7549 if (m->vmp_laundry || m->vmp_busy || m->vmp_free_when_done || m->vmp_absent || VMP_ERROR_GET(m) || m->vmp_cleaning ||
7550 m->vmp_overwriting || m->vmp_restart || m->vmp_unusual) {
7551 return 0;
7552 }
7553
7554 assert(!m->vmp_private);
7555 if (m->vmp_private) {
7556 return 0;
7557 }
7558
7559 assert(!m->vmp_fictitious);
7560 if (m->vmp_fictitious) {
7561 return 0;
7562 }
7563
7564 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
7565 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
7566 return 0;
7567 }
7568
7569 return ptoa(VM_PAGE_GET_PHYS_PAGE(m));
7570 }
7571
7572 compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
7573
7574 if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) {
7575 if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) {
7576 kr = vm_compressor_pager_get(object->pager,
7577 vm_object_trunc_page(object_offset + object->paging_offset),
7578 kdp_compressor_decompressed_page_ppnum, &my_fault_type,
7579 compressor_flags, &compressed_count_delta);
7580 if (kr == KERN_SUCCESS) {
7581 return kdp_compressor_decompressed_page_paddr;
7582 } else {
7583 return 0;
7584 }
7585 }
7586 }
7587
7588 if (object->shadow == VM_OBJECT_NULL) {
7589 return 0;
7590 }
7591
7592 object_offset += object->vo_shadow_offset;
7593 object = object->shadow;
7594 }
7595 }
7596
7597 /*
7598 * vm_page_validate_cs_fast():
7599 * Performs a few quick checks to determine if the page's code signature
7600 * really needs to be fully validated. It could:
7601 * 1. have been modified (i.e. automatically tainted),
7602 * 2. have already been validated,
7603 * 3. have already been found to be tainted,
7604 * 4. no longer have a backing store.
7605 * Returns FALSE if the page needs to be fully validated.
7606 */
7607 static boolean_t
vm_page_validate_cs_fast(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)7608 vm_page_validate_cs_fast(
7609 vm_page_t page,
7610 vm_map_size_t fault_page_size,
7611 vm_map_offset_t fault_phys_offset)
7612 {
7613 vm_object_t object;
7614
7615 object = VM_PAGE_OBJECT(page);
7616 vm_object_lock_assert_held(object);
7617
7618 if (page->vmp_wpmapped &&
7619 !VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
7620 /*
7621 * This page was mapped for "write" access sometime in the
7622 * past and could still be modifiable in the future.
7623 * Consider it tainted.
7624 * [ If the page was already found to be "tainted", no
7625 * need to re-validate. ]
7626 */
7627 vm_object_lock_assert_exclusive(object);
7628 VMP_CS_SET_VALIDATED(page, fault_page_size, fault_phys_offset, TRUE);
7629 VMP_CS_SET_TAINTED(page, fault_page_size, fault_phys_offset, TRUE);
7630 if (cs_debug) {
7631 printf("CODESIGNING: %s: "
7632 "page %p obj %p off 0x%llx "
7633 "was modified\n",
7634 __FUNCTION__,
7635 page, object, page->vmp_offset);
7636 }
7637 vm_cs_validated_dirtied++;
7638 }
7639
7640 if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) ||
7641 VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
7642 return TRUE;
7643 }
7644 vm_object_lock_assert_exclusive(object);
7645
7646 #if CHECK_CS_VALIDATION_BITMAP
7647 kern_return_t kr;
7648
7649 kr = vnode_pager_cs_check_validation_bitmap(
7650 object->pager,
7651 page->vmp_offset + object->paging_offset,
7652 CS_BITMAP_CHECK);
7653 if (kr == KERN_SUCCESS) {
7654 page->vmp_cs_validated = VMP_CS_ALL_TRUE;
7655 page->vmp_cs_tainted = VMP_CS_ALL_FALSE;
7656 vm_cs_bitmap_validated++;
7657 return TRUE;
7658 }
7659 #endif /* CHECK_CS_VALIDATION_BITMAP */
7660
7661 if (!object->alive || object->terminating || object->pager == NULL) {
7662 /*
7663 * The object is terminating and we don't have its pager
7664 * so we can't validate the data...
7665 */
7666 return TRUE;
7667 }
7668
7669 /* we need to really validate this page */
7670 vm_object_lock_assert_exclusive(object);
7671 return FALSE;
7672 }
7673
7674 void
vm_page_validate_cs_mapped_slow(vm_page_t page,const void * kaddr)7675 vm_page_validate_cs_mapped_slow(
7676 vm_page_t page,
7677 const void *kaddr)
7678 {
7679 vm_object_t object;
7680 memory_object_offset_t mo_offset;
7681 memory_object_t pager;
7682 struct vnode *vnode;
7683 int validated, tainted, nx;
7684
7685 assert(page->vmp_busy);
7686 object = VM_PAGE_OBJECT(page);
7687 vm_object_lock_assert_exclusive(object);
7688
7689 vm_cs_validates++;
7690
7691 /*
7692 * Since we get here to validate a page that was brought in by
7693 * the pager, we know that this pager is all setup and ready
7694 * by now.
7695 */
7696 assert(object->code_signed);
7697 assert(!object->internal);
7698 assert(object->pager != NULL);
7699 assert(object->pager_ready);
7700
7701 pager = object->pager;
7702 assert(object->paging_in_progress);
7703 vnode = vnode_pager_lookup_vnode(pager);
7704 mo_offset = page->vmp_offset + object->paging_offset;
7705
7706 /* verify the SHA1 hash for this page */
7707 validated = 0;
7708 tainted = 0;
7709 nx = 0;
7710 cs_validate_page(vnode,
7711 pager,
7712 mo_offset,
7713 (const void *)((const char *)kaddr),
7714 &validated,
7715 &tainted,
7716 &nx);
7717
7718 page->vmp_cs_validated |= validated;
7719 page->vmp_cs_tainted |= tainted;
7720 page->vmp_cs_nx |= nx;
7721
7722 #if CHECK_CS_VALIDATION_BITMAP
7723 if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
7724 page->vmp_cs_tainted == VMP_CS_ALL_FALSE) {
7725 vnode_pager_cs_check_validation_bitmap(object->pager,
7726 mo_offset,
7727 CS_BITMAP_SET);
7728 }
7729 #endif /* CHECK_CS_VALIDATION_BITMAP */
7730 }
7731
7732 void
vm_page_validate_cs_mapped(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,const void * kaddr)7733 vm_page_validate_cs_mapped(
7734 vm_page_t page,
7735 vm_map_size_t fault_page_size,
7736 vm_map_offset_t fault_phys_offset,
7737 const void *kaddr)
7738 {
7739 if (!vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
7740 vm_page_validate_cs_mapped_slow(page, kaddr);
7741 }
7742 }
7743
7744 static void
vm_page_map_and_validate_cs(vm_object_t object,vm_page_t page)7745 vm_page_map_and_validate_cs(
7746 vm_object_t object,
7747 vm_page_t page)
7748 {
7749 vm_object_offset_t offset;
7750 vm_map_offset_t koffset;
7751 vm_map_size_t ksize;
7752 vm_offset_t kaddr;
7753 kern_return_t kr;
7754 boolean_t busy_page;
7755 boolean_t need_unmap;
7756
7757 vm_object_lock_assert_exclusive(object);
7758
7759 assert(object->code_signed);
7760 offset = page->vmp_offset;
7761
7762 busy_page = page->vmp_busy;
7763 if (!busy_page) {
7764 /* keep page busy while we map (and unlock) the VM object */
7765 page->vmp_busy = TRUE;
7766 }
7767
7768 /*
7769 * Take a paging reference on the VM object
7770 * to protect it from collapse or bypass,
7771 * and keep it from disappearing too.
7772 */
7773 vm_object_paging_begin(object);
7774
7775 /* map the page in the kernel address space */
7776 ksize = PAGE_SIZE_64;
7777 koffset = 0;
7778 need_unmap = FALSE;
7779 kr = vm_paging_map_object(page,
7780 object,
7781 offset,
7782 VM_PROT_READ,
7783 FALSE, /* can't unlock object ! */
7784 &ksize,
7785 &koffset,
7786 &need_unmap);
7787 if (kr != KERN_SUCCESS) {
7788 panic("%s: could not map page: 0x%x", __FUNCTION__, kr);
7789 }
7790 kaddr = CAST_DOWN(vm_offset_t, koffset);
7791
7792 /* validate the mapped page */
7793 vm_page_validate_cs_mapped_slow(page, (const void *) kaddr);
7794
7795 assert(page->vmp_busy);
7796 assert(object == VM_PAGE_OBJECT(page));
7797 vm_object_lock_assert_exclusive(object);
7798
7799 if (!busy_page) {
7800 PAGE_WAKEUP_DONE(page);
7801 }
7802 if (need_unmap) {
7803 /* unmap the map from the kernel address space */
7804 vm_paging_unmap_object(object, koffset, koffset + ksize);
7805 koffset = 0;
7806 ksize = 0;
7807 kaddr = 0;
7808 }
7809 vm_object_paging_end(object);
7810 }
7811
7812 void
vm_page_validate_cs(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)7813 vm_page_validate_cs(
7814 vm_page_t page,
7815 vm_map_size_t fault_page_size,
7816 vm_map_offset_t fault_phys_offset)
7817 {
7818 vm_object_t object;
7819
7820 object = VM_PAGE_OBJECT(page);
7821 vm_object_lock_assert_held(object);
7822
7823 if (vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
7824 return;
7825 }
7826 vm_page_map_and_validate_cs(object, page);
7827 }
7828
7829 void
vm_page_validate_cs_mapped_chunk(vm_page_t page,const void * kaddr,vm_offset_t chunk_offset,vm_size_t chunk_size,boolean_t * validated_p,unsigned * tainted_p)7830 vm_page_validate_cs_mapped_chunk(
7831 vm_page_t page,
7832 const void *kaddr,
7833 vm_offset_t chunk_offset,
7834 vm_size_t chunk_size,
7835 boolean_t *validated_p,
7836 unsigned *tainted_p)
7837 {
7838 vm_object_t object;
7839 vm_object_offset_t offset, offset_in_page;
7840 memory_object_t pager;
7841 struct vnode *vnode;
7842 boolean_t validated;
7843 unsigned tainted;
7844
7845 *validated_p = FALSE;
7846 *tainted_p = 0;
7847
7848 assert(page->vmp_busy);
7849 object = VM_PAGE_OBJECT(page);
7850 vm_object_lock_assert_exclusive(object);
7851
7852 assert(object->code_signed);
7853 offset = page->vmp_offset;
7854
7855 if (!object->alive || object->terminating || object->pager == NULL) {
7856 /*
7857 * The object is terminating and we don't have its pager
7858 * so we can't validate the data...
7859 */
7860 return;
7861 }
7862 /*
7863 * Since we get here to validate a page that was brought in by
7864 * the pager, we know that this pager is all setup and ready
7865 * by now.
7866 */
7867 assert(!object->internal);
7868 assert(object->pager != NULL);
7869 assert(object->pager_ready);
7870
7871 pager = object->pager;
7872 assert(object->paging_in_progress);
7873 vnode = vnode_pager_lookup_vnode(pager);
7874
7875 /* verify the signature for this chunk */
7876 offset_in_page = chunk_offset;
7877 assert(offset_in_page < PAGE_SIZE);
7878
7879 tainted = 0;
7880 validated = cs_validate_range(vnode,
7881 pager,
7882 (object->paging_offset +
7883 offset +
7884 offset_in_page),
7885 (const void *)((const char *)kaddr
7886 + offset_in_page),
7887 chunk_size,
7888 &tainted);
7889 if (validated) {
7890 *validated_p = TRUE;
7891 }
7892 if (tainted) {
7893 *tainted_p = tainted;
7894 }
7895 }
7896
7897 static void
vm_rtfrecord_lock(void)7898 vm_rtfrecord_lock(void)
7899 {
7900 lck_spin_lock(&vm_rtfr_slock);
7901 }
7902
7903 static void
vm_rtfrecord_unlock(void)7904 vm_rtfrecord_unlock(void)
7905 {
7906 lck_spin_unlock(&vm_rtfr_slock);
7907 }
7908
7909 unsigned int
vmrtfaultinfo_bufsz(void)7910 vmrtfaultinfo_bufsz(void)
7911 {
7912 return vmrtf_num_records * sizeof(vm_rtfault_record_t);
7913 }
7914
7915 #include <kern/backtrace.h>
7916
7917 __attribute__((noinline))
7918 static void
vm_record_rtfault(thread_t cthread,uint64_t fstart,vm_map_offset_t fault_vaddr,int type_of_fault)7919 vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr, int type_of_fault)
7920 {
7921 uint64_t fend = mach_continuous_time();
7922
7923 uint64_t cfpc = 0;
7924 uint64_t ctid = cthread->thread_id;
7925 uint64_t cupid = get_current_unique_pid();
7926
7927 uintptr_t bpc = 0;
7928 errno_t btr = 0;
7929
7930 /*
7931 * Capture a single-frame backtrace. This extracts just the program
7932 * counter at the point of the fault, and should not use copyin to get
7933 * Rosetta save state.
7934 */
7935 struct backtrace_control ctl = {
7936 .btc_user_thread = cthread,
7937 .btc_user_copy = backtrace_user_copy_error,
7938 };
7939 unsigned int bfrs = backtrace_user(&bpc, 1U, &ctl, NULL);
7940 if ((btr == 0) && (bfrs > 0)) {
7941 cfpc = bpc;
7942 }
7943
7944 assert((fstart != 0) && fend >= fstart);
7945 vm_rtfrecord_lock();
7946 assert(vmrtfrs.vmrtfr_curi <= vmrtfrs.vmrtfr_maxi);
7947
7948 vmrtfrs.vmrtf_total++;
7949 vm_rtfault_record_t *cvmr = &vmrtfrs.vm_rtf_records[vmrtfrs.vmrtfr_curi++];
7950
7951 cvmr->rtfabstime = fstart;
7952 cvmr->rtfduration = fend - fstart;
7953 cvmr->rtfaddr = fault_vaddr;
7954 cvmr->rtfpc = cfpc;
7955 cvmr->rtftype = type_of_fault;
7956 cvmr->rtfupid = cupid;
7957 cvmr->rtftid = ctid;
7958
7959 if (vmrtfrs.vmrtfr_curi > vmrtfrs.vmrtfr_maxi) {
7960 vmrtfrs.vmrtfr_curi = 0;
7961 }
7962
7963 vm_rtfrecord_unlock();
7964 }
7965
7966 int
vmrtf_extract(uint64_t cupid,__unused boolean_t isroot,unsigned long vrecordsz,void * vrecords,unsigned long * vmrtfrv)7967 vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, unsigned long vrecordsz, void *vrecords, unsigned long *vmrtfrv)
7968 {
7969 vm_rtfault_record_t *cvmrd = vrecords;
7970 size_t residue = vrecordsz;
7971 size_t numextracted = 0;
7972 boolean_t early_exit = FALSE;
7973
7974 vm_rtfrecord_lock();
7975
7976 for (int vmfi = 0; vmfi <= vmrtfrs.vmrtfr_maxi; vmfi++) {
7977 if (residue < sizeof(vm_rtfault_record_t)) {
7978 early_exit = TRUE;
7979 break;
7980 }
7981
7982 if (vmrtfrs.vm_rtf_records[vmfi].rtfupid != cupid) {
7983 #if DEVELOPMENT || DEBUG
7984 if (isroot == FALSE) {
7985 continue;
7986 }
7987 #else
7988 continue;
7989 #endif /* DEVDEBUG */
7990 }
7991
7992 *cvmrd = vmrtfrs.vm_rtf_records[vmfi];
7993 cvmrd++;
7994 residue -= sizeof(vm_rtfault_record_t);
7995 numextracted++;
7996 }
7997
7998 vm_rtfrecord_unlock();
7999
8000 *vmrtfrv = numextracted;
8001 return early_exit;
8002 }
8003
8004 /*
8005 * Only allow one diagnosis to be in flight at a time, to avoid
8006 * creating too much additional memory usage.
8007 */
8008 static volatile uint_t vmtc_diagnosing;
8009 unsigned int vmtc_total = 0;
8010
8011 /*
8012 * Type used to update telemetry for the diagnosis counts.
8013 */
8014 CA_EVENT(vmtc_telemetry,
8015 CA_INT, vmtc_num_byte, /* number of corrupt bytes found */
8016 CA_BOOL, vmtc_undiagnosed, /* undiagnosed because more than 1 at a time */
8017 CA_BOOL, vmtc_not_eligible, /* the page didn't qualify */
8018 CA_BOOL, vmtc_copyin_fail, /* unable to copy in the page */
8019 CA_BOOL, vmtc_not_found, /* no corruption found even though CS failed */
8020 CA_BOOL, vmtc_one_bit_flip, /* single bit flip */
8021 CA_BOOL, vmtc_testing); /* caused on purpose by testing */
8022
8023 #if DEVELOPMENT || DEBUG
8024 /*
8025 * Buffers used to compare before/after page contents.
8026 * Stashed to aid when debugging crashes.
8027 */
8028 static size_t vmtc_last_buffer_size = 0;
8029 static uint64_t *vmtc_last_before_buffer = NULL;
8030 static uint64_t *vmtc_last_after_buffer = NULL;
8031
8032 /*
8033 * Needed to record corruptions due to testing.
8034 */
8035 static uintptr_t corruption_test_va = 0;
8036 #endif /* DEVELOPMENT || DEBUG */
8037
8038 /*
8039 * Stash a copy of data from a possibly corrupt page.
8040 */
8041 static uint64_t *
vmtc_get_page_data(vm_map_offset_t code_addr,vm_page_t page)8042 vmtc_get_page_data(
8043 vm_map_offset_t code_addr,
8044 vm_page_t page)
8045 {
8046 uint64_t *buffer = NULL;
8047 addr64_t buffer_paddr;
8048 addr64_t page_paddr;
8049 extern void bcopy_phys(addr64_t from, addr64_t to, vm_size_t bytes);
8050 uint_t size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
8051
8052 /*
8053 * Need an aligned buffer to do a physical copy.
8054 */
8055 if (kernel_memory_allocate(kernel_map, (vm_offset_t *)&buffer,
8056 size, size - 1, KMA_KOBJECT, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
8057 return NULL;
8058 }
8059 buffer_paddr = kvtophys((vm_offset_t)buffer);
8060 page_paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(page));
8061
8062 /* adjust the page start address if we need only 4K of a 16K page */
8063 if (size < PAGE_SIZE) {
8064 uint_t subpage_start = ((code_addr & (PAGE_SIZE - 1)) & ~(size - 1));
8065 page_paddr += subpage_start;
8066 }
8067
8068 bcopy_phys(page_paddr, buffer_paddr, size);
8069 return buffer;
8070 }
8071
8072 /*
8073 * Set things up so we can diagnose a potential text page corruption.
8074 */
8075 static uint64_t *
vmtc_text_page_diagnose_setup(vm_map_offset_t code_addr,vm_page_t page,CA_EVENT_TYPE (vmtc_telemetry)* event)8076 vmtc_text_page_diagnose_setup(
8077 vm_map_offset_t code_addr,
8078 vm_page_t page,
8079 CA_EVENT_TYPE(vmtc_telemetry) *event)
8080 {
8081 uint64_t *buffer = NULL;
8082
8083 /*
8084 * If another is being diagnosed, skip this one.
8085 */
8086 if (!OSCompareAndSwap(0, 1, &vmtc_diagnosing)) {
8087 event->vmtc_undiagnosed = true;
8088 return NULL;
8089 }
8090
8091 /*
8092 * Get the contents of the corrupt page.
8093 */
8094 buffer = vmtc_get_page_data(code_addr, page);
8095 if (buffer == NULL) {
8096 event->vmtc_copyin_fail = true;
8097 if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
8098 panic("Bad compare and swap in setup!");
8099 }
8100 return NULL;
8101 }
8102 return buffer;
8103 }
8104
8105 /*
8106 * Diagnose the text page by comparing its contents with
8107 * the one we've previously saved.
8108 */
8109 static void
vmtc_text_page_diagnose(vm_map_offset_t code_addr,uint64_t * old_code_buffer,CA_EVENT_TYPE (vmtc_telemetry)* event)8110 vmtc_text_page_diagnose(
8111 vm_map_offset_t code_addr,
8112 uint64_t *old_code_buffer,
8113 CA_EVENT_TYPE(vmtc_telemetry) *event)
8114 {
8115 uint64_t *new_code_buffer;
8116 size_t size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
8117 uint_t count = (uint_t)size / sizeof(uint64_t);
8118 uint_t diff_count = 0;
8119 bool bit_flip = false;
8120 uint_t b;
8121 uint64_t *new;
8122 uint64_t *old;
8123
8124 new_code_buffer = kalloc_data(size, Z_WAITOK);
8125 assert(new_code_buffer != NULL);
8126 if (copyin((user_addr_t)vm_map_trunc_page(code_addr, size - 1), new_code_buffer, size) != 0) {
8127 /* copyin error, so undo things */
8128 event->vmtc_copyin_fail = true;
8129 goto done;
8130 }
8131
8132 new = new_code_buffer;
8133 old = old_code_buffer;
8134 for (; count-- > 0; ++new, ++old) {
8135 if (*new == *old) {
8136 continue;
8137 }
8138
8139 /*
8140 * On first diff, check for a single bit flip
8141 */
8142 if (diff_count == 0) {
8143 uint64_t x = (*new ^ *old);
8144 assert(x != 0);
8145 if ((x & (x - 1)) == 0) {
8146 bit_flip = true;
8147 ++diff_count;
8148 continue;
8149 }
8150 }
8151
8152 /*
8153 * count up the number of different bytes.
8154 */
8155 for (b = 0; b < sizeof(uint64_t); ++b) {
8156 char *n = (char *)new;
8157 char *o = (char *)old;
8158 if (n[b] != o[b]) {
8159 ++diff_count;
8160 }
8161 }
8162 }
8163
8164 if (diff_count > 1) {
8165 bit_flip = false;
8166 }
8167
8168 if (diff_count == 0) {
8169 event->vmtc_not_found = true;
8170 } else {
8171 event->vmtc_num_byte = diff_count;
8172 }
8173 if (bit_flip) {
8174 event->vmtc_one_bit_flip = true;
8175 }
8176
8177 done:
8178 /*
8179 * Free up the code copy buffers, but save the last
8180 * set on development / debug kernels in case they
8181 * can provide evidence for debugging memory stomps.
8182 */
8183 #if DEVELOPMENT || DEBUG
8184 if (vmtc_last_before_buffer != NULL) {
8185 kmem_free(kernel_map, (vm_offset_t)vmtc_last_before_buffer, vmtc_last_buffer_size);
8186 }
8187 if (vmtc_last_after_buffer != NULL) {
8188 kfree_data(vmtc_last_after_buffer, vmtc_last_buffer_size);
8189 }
8190 vmtc_last_before_buffer = old_code_buffer;
8191 vmtc_last_after_buffer = new_code_buffer;
8192 vmtc_last_buffer_size = size;
8193 #else /* DEVELOPMENT || DEBUG */
8194 kfree_data(new_code_buffer, size);
8195 kmem_free(kernel_map, (vm_offset_t)old_code_buffer, size);
8196 #endif /* DEVELOPMENT || DEBUG */
8197
8198 /*
8199 * We're finished, so clear the diagnosing flag.
8200 */
8201 if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
8202 panic("Bad compare and swap in diagnose!");
8203 }
8204 }
8205
8206 /*
8207 * For the given map, virt address, find the object, offset, and page.
8208 * This has to lookup the map entry, verify protections, walk any shadow chains.
8209 * If found, returns with the object locked.
8210 */
8211 static kern_return_t
vmtc_revalidate_lookup(vm_map_t map,vm_map_offset_t vaddr,vm_object_t * ret_object,vm_object_offset_t * ret_offset,vm_page_t * ret_page,vm_prot_t * ret_prot)8212 vmtc_revalidate_lookup(
8213 vm_map_t map,
8214 vm_map_offset_t vaddr,
8215 vm_object_t *ret_object,
8216 vm_object_offset_t *ret_offset,
8217 vm_page_t *ret_page,
8218 vm_prot_t *ret_prot)
8219 {
8220 vm_object_t object;
8221 vm_object_offset_t offset;
8222 vm_page_t page;
8223 kern_return_t kr = KERN_SUCCESS;
8224 uint8_t object_lock_type = OBJECT_LOCK_EXCLUSIVE;
8225 vm_map_version_t version;
8226 boolean_t wired;
8227 struct vm_object_fault_info fault_info = {};
8228 vm_map_t real_map = NULL;
8229 vm_prot_t prot;
8230 vm_object_t shadow;
8231
8232 /*
8233 * Find the object/offset for the given location/map.
8234 * Note this returns with the object locked.
8235 */
8236 restart:
8237 vm_map_lock_read(map);
8238 object = VM_OBJECT_NULL; /* in case we come around the restart path */
8239 kr = vm_map_lookup_and_lock_object(&map, vaddr, VM_PROT_READ,
8240 object_lock_type, &version, &object, &offset, &prot, &wired,
8241 &fault_info, &real_map, NULL);
8242 vm_map_unlock_read(map);
8243 if (real_map != NULL && real_map != map) {
8244 vm_map_unlock(real_map);
8245 }
8246
8247 /*
8248 * If there's no page here, fail.
8249 */
8250 if (kr != KERN_SUCCESS || object == NULL) {
8251 kr = KERN_FAILURE;
8252 goto done;
8253 }
8254
8255 /*
8256 * Chase down any shadow chains to find the actual page.
8257 */
8258 for (;;) {
8259 /*
8260 * See if the page is on the current object.
8261 */
8262 page = vm_page_lookup(object, vm_object_trunc_page(offset));
8263 if (page != NULL) {
8264 /* restart the lookup */
8265 if (page->vmp_restart) {
8266 vm_object_unlock(object);
8267 goto restart;
8268 }
8269
8270 /*
8271 * If this page is busy, we need to wait for it.
8272 */
8273 if (page->vmp_busy) {
8274 PAGE_SLEEP(object, page, TRUE);
8275 vm_object_unlock(object);
8276 goto restart;
8277 }
8278 break;
8279 }
8280
8281 /*
8282 * If the object doesn't have the page and
8283 * has no shadow, then we can quit.
8284 */
8285 shadow = object->shadow;
8286 if (shadow == NULL) {
8287 kr = KERN_FAILURE;
8288 goto done;
8289 }
8290
8291 /*
8292 * Move to the next object
8293 */
8294 offset += object->vo_shadow_offset;
8295 vm_object_lock(shadow);
8296 vm_object_unlock(object);
8297 object = shadow;
8298 shadow = VM_OBJECT_NULL;
8299 }
8300 *ret_object = object;
8301 *ret_offset = vm_object_trunc_page(offset);
8302 *ret_page = page;
8303 *ret_prot = prot;
8304
8305 done:
8306 if (kr != KERN_SUCCESS && object != NULL) {
8307 vm_object_unlock(object);
8308 }
8309 return kr;
8310 }
8311
8312 /*
8313 * Check if a page is wired, needs extra locking.
8314 */
8315 static bool
is_page_wired(vm_page_t page)8316 is_page_wired(vm_page_t page)
8317 {
8318 bool result;
8319 vm_page_lock_queues();
8320 result = VM_PAGE_WIRED(page);
8321 vm_page_unlock_queues();
8322 return result;
8323 }
8324
8325 /*
8326 * A fatal process error has occurred in the given task.
8327 * Recheck the code signing of the text page at the given
8328 * address to check for a text page corruption.
8329 *
8330 * Returns KERN_FAILURE if a page was found to be corrupt
8331 * by failing to match its code signature. KERN_SUCCESS
8332 * means the page is either valid or we don't have the
8333 * information to say it's corrupt.
8334 */
8335 kern_return_t
revalidate_text_page(task_t task,vm_map_offset_t code_addr)8336 revalidate_text_page(task_t task, vm_map_offset_t code_addr)
8337 {
8338 kern_return_t kr;
8339 vm_map_t map;
8340 vm_object_t object = NULL;
8341 vm_object_offset_t offset;
8342 vm_page_t page = NULL;
8343 struct vnode *vnode;
8344 uint64_t *diagnose_buffer = NULL;
8345 CA_EVENT_TYPE(vmtc_telemetry) * event = NULL;
8346 ca_event_t ca_event = NULL;
8347 vm_prot_t prot;
8348
8349 map = task->map;
8350 if (task->map == NULL) {
8351 return KERN_SUCCESS;
8352 }
8353
8354 kr = vmtc_revalidate_lookup(map, code_addr, &object, &offset, &page, &prot);
8355 if (kr != KERN_SUCCESS) {
8356 goto done;
8357 }
8358
8359 /*
8360 * The page must be executable.
8361 */
8362 if (!(prot & VM_PROT_EXECUTE)) {
8363 goto done;
8364 }
8365
8366 /*
8367 * The object needs to have a pager.
8368 */
8369 if (object->pager == NULL) {
8370 goto done;
8371 }
8372
8373 /*
8374 * Needs to be a vnode backed page to have a signature.
8375 */
8376 vnode = vnode_pager_lookup_vnode(object->pager);
8377 if (vnode == NULL) {
8378 goto done;
8379 }
8380
8381 /*
8382 * Object checks to see if we should proceed.
8383 */
8384 if (!object->code_signed || /* no code signature to check */
8385 object->internal || /* internal objects aren't signed */
8386 object->terminating || /* the object and its pages are already going away */
8387 !object->pager_ready) { /* this should happen, but check shouldn't hurt */
8388 goto done;
8389 }
8390
8391
8392 /*
8393 * Check the code signature of the page in question.
8394 */
8395 vm_page_map_and_validate_cs(object, page);
8396
8397 /*
8398 * At this point:
8399 * vmp_cs_validated |= validated (set if a code signature exists)
8400 * vmp_cs_tainted |= tainted (set if code signature violation)
8401 * vmp_cs_nx |= nx; ??
8402 *
8403 * if vmp_pmapped then have to pmap_disconnect..
8404 * other flags to check on object or page?
8405 */
8406 if (page->vmp_cs_tainted != VMP_CS_ALL_FALSE) {
8407 #if DEBUG || DEVELOPMENT
8408 /*
8409 * On development builds, a boot-arg can be used to cause
8410 * a panic, instead of a quiet repair.
8411 */
8412 if (vmtc_panic_instead) {
8413 panic("Text page corruption detected: vm_page_t 0x%llx", (long long)(uintptr_t)page);
8414 }
8415 #endif /* DEBUG || DEVELOPMENT */
8416
8417 /*
8418 * We're going to invalidate this page. Grab a copy of it for comparison.
8419 */
8420 ca_event = CA_EVENT_ALLOCATE(vmtc_telemetry);
8421 event = ca_event->data;
8422 diagnose_buffer = vmtc_text_page_diagnose_setup(code_addr, page, event);
8423
8424 /*
8425 * Invalidate, i.e. toss, the corrupted page.
8426 */
8427 if (!page->vmp_cleaning &&
8428 !page->vmp_laundry &&
8429 !page->vmp_fictitious &&
8430 !page->vmp_precious &&
8431 !page->vmp_absent &&
8432 !VMP_ERROR_GET(page) &&
8433 !page->vmp_dirty &&
8434 !is_page_wired(page)) {
8435 if (page->vmp_pmapped) {
8436 int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(page));
8437 if (refmod & VM_MEM_MODIFIED) {
8438 SET_PAGE_DIRTY(page, FALSE);
8439 }
8440 if (refmod & VM_MEM_REFERENCED) {
8441 page->vmp_reference = TRUE;
8442 }
8443 }
8444 /* If the page seems intentionally modified, don't trash it. */
8445 if (!page->vmp_dirty) {
8446 VM_PAGE_FREE(page);
8447 } else {
8448 event->vmtc_not_eligible = true;
8449 }
8450 } else {
8451 event->vmtc_not_eligible = true;
8452 }
8453 vm_object_unlock(object);
8454 object = VM_OBJECT_NULL;
8455
8456 /*
8457 * Now try to diagnose the type of failure by faulting
8458 * in a new copy and diff'ing it with what we saved.
8459 */
8460 if (diagnose_buffer != NULL) {
8461 vmtc_text_page_diagnose(code_addr, diagnose_buffer, event);
8462 }
8463 #if DEBUG || DEVELOPMENT
8464 if (corruption_test_va != 0) {
8465 corruption_test_va = 0;
8466 event->vmtc_testing = true;
8467 }
8468 #endif /* DEBUG || DEVELOPMENT */
8469 ktriage_record(thread_tid(current_thread()),
8470 KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_TEXT_CORRUPTION),
8471 0 /* arg */);
8472 CA_EVENT_SEND(ca_event);
8473 printf("Text page corruption detected for pid %d\n", proc_selfpid());
8474 ++vmtc_total;
8475 return KERN_FAILURE; /* failure means we definitely found a corrupt page */
8476 }
8477 done:
8478 if (object != NULL) {
8479 vm_object_unlock(object);
8480 }
8481 return KERN_SUCCESS;
8482 }
8483
8484 #if DEBUG || DEVELOPMENT
8485 /*
8486 * For implementing unit tests - ask the pmap to corrupt a text page.
8487 * We have to find the page, to get the physical address, then invoke
8488 * the pmap.
8489 */
8490 extern kern_return_t vm_corrupt_text_addr(uintptr_t);
8491
8492 kern_return_t
vm_corrupt_text_addr(uintptr_t va)8493 vm_corrupt_text_addr(uintptr_t va)
8494 {
8495 task_t task = current_task();
8496 vm_map_t map;
8497 kern_return_t kr = KERN_SUCCESS;
8498 vm_object_t object = VM_OBJECT_NULL;
8499 vm_object_offset_t offset;
8500 vm_page_t page = NULL;
8501 pmap_paddr_t pa;
8502 vm_prot_t prot;
8503
8504 map = task->map;
8505 if (task->map == NULL) {
8506 printf("corrupt_text_addr: no map\n");
8507 return KERN_FAILURE;
8508 }
8509
8510 kr = vmtc_revalidate_lookup(map, (vm_map_offset_t)va, &object, &offset, &page, &prot);
8511 if (kr != KERN_SUCCESS) {
8512 printf("corrupt_text_addr: page lookup failed\n");
8513 return kr;
8514 }
8515 if (!(prot & VM_PROT_EXECUTE)) {
8516 printf("corrupt_text_addr: page not executable\n");
8517 return KERN_FAILURE;
8518 }
8519
8520 /* get the physical address to use */
8521 pa = ptoa(VM_PAGE_GET_PHYS_PAGE(page)) + (va - vm_object_trunc_page(va));
8522
8523 /*
8524 * Check we have something we can work with.
8525 * Due to racing with pageout as we enter the sysctl,
8526 * it's theoretically possible to have the page disappear, just
8527 * before the lookup.
8528 *
8529 * That's highly likely to happen often. I've filed a radar 72857482
8530 * to bubble up the error here to the sysctl result and have the
8531 * test not FAIL in that case.
8532 */
8533 if (page->vmp_busy) {
8534 printf("corrupt_text_addr: vmp_busy\n");
8535 kr = KERN_FAILURE;
8536 }
8537 if (page->vmp_cleaning) {
8538 printf("corrupt_text_addr: vmp_cleaning\n");
8539 kr = KERN_FAILURE;
8540 }
8541 if (page->vmp_laundry) {
8542 printf("corrupt_text_addr: vmp_cleaning\n");
8543 kr = KERN_FAILURE;
8544 }
8545 if (page->vmp_fictitious) {
8546 printf("corrupt_text_addr: vmp_fictitious\n");
8547 kr = KERN_FAILURE;
8548 }
8549 if (page->vmp_precious) {
8550 printf("corrupt_text_addr: vmp_precious\n");
8551 kr = KERN_FAILURE;
8552 }
8553 if (page->vmp_absent) {
8554 printf("corrupt_text_addr: vmp_absent\n");
8555 kr = KERN_FAILURE;
8556 }
8557 if (VMP_ERROR_GET(page)) {
8558 printf("corrupt_text_addr: vmp_error\n");
8559 kr = KERN_FAILURE;
8560 }
8561 if (page->vmp_dirty) {
8562 printf("corrupt_text_addr: vmp_dirty\n");
8563 kr = KERN_FAILURE;
8564 }
8565 if (is_page_wired(page)) {
8566 printf("corrupt_text_addr: wired\n");
8567 kr = KERN_FAILURE;
8568 }
8569 if (!page->vmp_pmapped) {
8570 printf("corrupt_text_addr: !vmp_pmapped\n");
8571 kr = KERN_FAILURE;
8572 }
8573
8574 if (kr == KERN_SUCCESS) {
8575 printf("corrupt_text_addr: using physaddr 0x%llx\n", (long long)pa);
8576 kr = pmap_test_text_corruption(pa);
8577 if (kr != KERN_SUCCESS) {
8578 printf("corrupt_text_addr: pmap error %d\n", kr);
8579 } else {
8580 corruption_test_va = va;
8581 }
8582 } else {
8583 printf("corrupt_text_addr: object %p\n", object);
8584 printf("corrupt_text_addr: offset 0x%llx\n", (uint64_t)offset);
8585 printf("corrupt_text_addr: va 0x%llx\n", (uint64_t)va);
8586 printf("corrupt_text_addr: vm_object_trunc_page(va) 0x%llx\n", (uint64_t)vm_object_trunc_page(va));
8587 printf("corrupt_text_addr: vm_page_t %p\n", page);
8588 printf("corrupt_text_addr: ptoa(PHYS_PAGE) 0x%llx\n", (uint64_t)ptoa(VM_PAGE_GET_PHYS_PAGE(page)));
8589 printf("corrupt_text_addr: using physaddr 0x%llx\n", (uint64_t)pa);
8590 }
8591
8592 if (object != VM_OBJECT_NULL) {
8593 vm_object_unlock(object);
8594 }
8595 return kr;
8596 }
8597
8598 #endif /* DEBUG || DEVELOPMENT */
8599