1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm_fault.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Page fault handling module.
63 */
64
65 #include <libkern/OSAtomic.h>
66
67 #include <mach/mach_types.h>
68 #include <mach/kern_return.h>
69 #include <mach/message.h> /* for error codes */
70 #include <mach/vm_param.h>
71 #include <mach/vm_behavior.h>
72 #include <mach/memory_object.h>
73 /* For memory_object_data_{request,unlock} */
74 #include <mach/sdt.h>
75
76 #include <kern/kern_types.h>
77 #include <kern/host_statistics.h>
78 #include <kern/counter.h>
79 #include <kern/task.h>
80 #include <kern/thread.h>
81 #include <kern/sched_prim.h>
82 #include <kern/host.h>
83 #include <kern/mach_param.h>
84 #include <kern/macro_help.h>
85 #include <kern/zalloc_internal.h>
86 #include <kern/misc_protos.h>
87 #include <kern/policy_internal.h>
88 #include <kern/exc_guard.h>
89
90 #include <vm/vm_compressor_internal.h>
91 #include <vm/vm_compressor_pager_internal.h>
92 #include <vm/vm_fault_internal.h>
93 #include <vm/vm_map_internal.h>
94 #include <vm/vm_object_internal.h>
95 #include <vm/vm_page_internal.h>
96 #include <vm/vm_kern_internal.h>
97 #include <vm/pmap.h>
98 #include <vm/vm_pageout_internal.h>
99 #include <vm/vm_protos_internal.h>
100 #include <vm/vm_external.h>
101 #include <vm/memory_object.h>
102 #include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */
103 #include <vm/vm_shared_region.h>
104 #include <vm/vm_page_internal.h>
105
106 #include <sys/codesign.h>
107 #include <sys/code_signing.h>
108 #include <sys/kdebug.h>
109 #include <sys/kdebug_triage.h>
110 #include <sys/reason.h>
111 #include <sys/signalvar.h>
112
113 #include <san/kasan.h>
114 #include <libkern/coreanalytics/coreanalytics.h>
115
116 #define VM_FAULT_CLASSIFY 0
117
118 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
119
120 int vm_protect_privileged_from_untrusted = 1;
121
122 /*
123 * Enforce a maximum number of concurrent PageIns per vm-object to prevent
124 * high-I/O-volume tasks from saturating storage and starving the rest of the
125 * system.
126 *
127 * TODO: This throttling mechanism may be more naturally done by the pager,
128 * filesystem, or storage layers, which will have better information about how
129 * much concurrency the backing store can reasonably support.
130 */
131 TUNABLE(uint16_t, vm_object_pagein_throttle, "vm_object_pagein_throttle", 16);
132
133 /*
134 * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which
135 * kicks in when swap space runs out. 64-bit programs have massive address spaces and can leak enormous amounts
136 * of memory if they're buggy and can run the system completely out of swap space. If this happens, we
137 * impose a hard throttle on them to prevent them from taking the last bit of memory left. This helps
138 * keep the UI active so that the user has a chance to kill the offending task before the system
139 * completely hangs.
140 *
141 * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied
142 * to tasks that appear to be bloated. When swap runs out, any task using more than vm_hard_throttle_threshold
143 * will be throttled. The throttling is done by giving the thread that's trying to demand zero a page a
144 * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
145 */
146
147 extern void throttle_lowpri_io(int);
148
149 extern struct vnode *vnode_pager_lookup_vnode(memory_object_t);
150
151 uint64_t vm_hard_throttle_threshold;
152
153 #if DEBUG || DEVELOPMENT
154 static bool vmtc_panic_instead = false;
155 int panic_object_not_alive = 1;
156 #endif /* DEBUG || DEVELOPMENT */
157
158 OS_ALWAYS_INLINE
159 boolean_t
NEED_TO_HARD_THROTTLE_THIS_TASK(void)160 NEED_TO_HARD_THROTTLE_THIS_TASK(void)
161 {
162 return vm_wants_task_throttled(current_task()) ||
163 ((vm_page_free_count < vm_page_throttle_limit ||
164 HARD_THROTTLE_LIMIT_REACHED()) &&
165 proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED);
166 }
167
168
169 /*
170 * XXX: For now, vm faults cannot be recursively disabled. If the need for
171 * nested code that disables faults arises, the implementation can be modified
172 * to track a disabled-count.
173 */
174
175 OS_ALWAYS_INLINE
176 void
vm_fault_disable(void)177 vm_fault_disable(void)
178 {
179 thread_t t = current_thread();
180 assert(!t->th_vm_faults_disabled);
181 t->th_vm_faults_disabled = true;
182 act_set_debug_assert();
183 }
184
185 OS_ALWAYS_INLINE
186 void
vm_fault_enable(void)187 vm_fault_enable(void)
188 {
189 thread_t t = current_thread();
190 assert(t->th_vm_faults_disabled);
191 t->th_vm_faults_disabled = false;
192 }
193
194 OS_ALWAYS_INLINE
195 bool
vm_fault_get_disabled(void)196 vm_fault_get_disabled(void)
197 {
198 thread_t t = current_thread();
199 return t->th_vm_faults_disabled;
200 }
201
202 #define HARD_THROTTLE_DELAY 10000 /* 10000 us == 10 ms */
203 #define SOFT_THROTTLE_DELAY 200 /* 200 us == .2 ms */
204
205 #define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS 6
206 #define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC 20000
207
208
209 #define VM_STAT_DECOMPRESSIONS() \
210 MACRO_BEGIN \
211 counter_inc(&vm_statistics_decompressions); \
212 current_thread()->decompressions++; \
213 MACRO_END
214
215 boolean_t current_thread_aborted(void);
216
217 /* Forward declarations of internal routines. */
218 static kern_return_t vm_fault_wire_fast(
219 vm_map_t map,
220 vm_map_offset_t va,
221 vm_prot_t prot,
222 vm_tag_t wire_tag,
223 vm_map_entry_t entry,
224 pmap_t pmap,
225 vm_map_offset_t pmap_addr,
226 ppnum_t *physpage_p);
227
228 static kern_return_t vm_fault_internal(
229 vm_map_t map,
230 vm_map_offset_t vaddr,
231 vm_prot_t caller_prot,
232 vm_tag_t wire_tag,
233 pmap_t pmap,
234 vm_map_offset_t pmap_addr,
235 ppnum_t *physpage_p,
236 vm_object_fault_info_t fault_info);
237
238 static void vm_fault_copy_cleanup(
239 vm_page_t page,
240 vm_page_t top_page);
241
242 static void vm_fault_copy_dst_cleanup(
243 vm_page_t page);
244
245 #if VM_FAULT_CLASSIFY
246 extern void vm_fault_classify(vm_object_t object,
247 vm_object_offset_t offset,
248 vm_prot_t fault_type);
249
250 extern void vm_fault_classify_init(void);
251 #endif
252
253 unsigned long vm_pmap_enter_blocked = 0;
254 unsigned long vm_pmap_enter_retried = 0;
255
256 unsigned long vm_cs_validates = 0;
257 unsigned long vm_cs_revalidates = 0;
258 unsigned long vm_cs_query_modified = 0;
259 unsigned long vm_cs_validated_dirtied = 0;
260 unsigned long vm_cs_bitmap_validated = 0;
261
262 #if CODE_SIGNING_MONITOR
263 uint64_t vm_cs_defer_to_csm = 0;
264 uint64_t vm_cs_defer_to_csm_not = 0;
265 #endif /* CODE_SIGNING_MONITOR */
266
267 extern char *kdp_compressor_decompressed_page;
268 extern addr64_t kdp_compressor_decompressed_page_paddr;
269 extern ppnum_t kdp_compressor_decompressed_page_ppnum;
270
271 struct vmrtfr {
272 int vmrtfr_maxi;
273 int vmrtfr_curi;
274 int64_t vmrtf_total;
275 vm_rtfault_record_t *vm_rtf_records;
276 } vmrtfrs;
277 #define VMRTF_DEFAULT_BUFSIZE (4096)
278 #define VMRTF_NUM_RECORDS_DEFAULT (VMRTF_DEFAULT_BUFSIZE / sizeof(vm_rtfault_record_t))
279 TUNABLE(int, vmrtf_num_records, "vm_rtfault_records", VMRTF_NUM_RECORDS_DEFAULT);
280
281 static void vm_rtfrecord_lock(void);
282 static void vm_rtfrecord_unlock(void);
283 static void vm_record_rtfault(thread_t, uint64_t, vm_map_offset_t, int);
284
285 extern lck_grp_t vm_page_lck_grp_bucket;
286 extern lck_attr_t vm_page_lck_attr;
287 LCK_SPIN_DECLARE_ATTR(vm_rtfr_slock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
288
289 #if DEVELOPMENT || DEBUG
290 extern int madvise_free_debug;
291 extern int madvise_free_debug_sometimes;
292 #endif /* DEVELOPMENT || DEBUG */
293
294 extern int vm_pageout_protect_realtime;
295
296 #if CONFIG_FREEZE
297 #endif /* CONFIG_FREEZE */
298
299 /*
300 * Routine: vm_fault_init
301 * Purpose:
302 * Initialize our private data structures.
303 */
304 __startup_func
305 void
vm_fault_init(void)306 vm_fault_init(void)
307 {
308 int i, vm_compressor_temp;
309 boolean_t need_default_val = TRUE;
310 /*
311 * Choose a value for the hard throttle threshold based on the amount of ram. The threshold is
312 * computed as a percentage of available memory, and the percentage used is scaled inversely with
313 * the amount of memory. The percentage runs between 10% and 35%. We use 35% for small memory systems
314 * and reduce the value down to 10% for very large memory configurations. This helps give us a
315 * definition of a memory hog that makes more sense relative to the amount of ram in the machine.
316 * The formula here simply uses the number of gigabytes of ram to adjust the percentage.
317 */
318
319 vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024 * 1024 * 1024)), 25)) / 100;
320
321 /*
322 * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry.
323 */
324
325 if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof(vm_compressor_temp))) {
326 for (i = 0; i < VM_PAGER_MAX_MODES; i++) {
327 if (((vm_compressor_temp & (1 << i)) == vm_compressor_temp)) {
328 need_default_val = FALSE;
329 vm_compressor_mode = vm_compressor_temp;
330 break;
331 }
332 }
333 if (need_default_val) {
334 printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp);
335 }
336 }
337 #if CONFIG_FREEZE
338 if (need_default_val) {
339 if (osenvironment_is_diagnostics()) {
340 printf("osenvironment == \"diagnostics\". Setting \"vm_compressor_mode\" to in-core compressor only\n");
341 vm_compressor_mode = VM_PAGER_COMPRESSOR_NO_SWAP;
342 need_default_val = false;
343 }
344 }
345 #endif /* CONFIG_FREEZE */
346 if (need_default_val) {
347 /* If no boot arg or incorrect boot arg, try device tree. */
348 PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode));
349 }
350 printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode);
351 vm_config_init();
352
353 PE_parse_boot_argn("vm_protect_privileged_from_untrusted",
354 &vm_protect_privileged_from_untrusted,
355 sizeof(vm_protect_privileged_from_untrusted));
356
357 #if DEBUG || DEVELOPMENT
358 (void)PE_parse_boot_argn("text_corruption_panic", &vmtc_panic_instead, sizeof(vmtc_panic_instead));
359
360 if (kern_feature_override(KF_MADVISE_FREE_DEBUG_OVRD)) {
361 madvise_free_debug = 0;
362 madvise_free_debug_sometimes = 0;
363 }
364
365 PE_parse_boot_argn("panic_object_not_alive", &panic_object_not_alive, sizeof(panic_object_not_alive));
366 #endif /* DEBUG || DEVELOPMENT */
367 }
368
369 __startup_func
370 static void
vm_rtfault_record_init(void)371 vm_rtfault_record_init(void)
372 {
373 size_t size;
374
375 vmrtf_num_records = MAX(vmrtf_num_records, 1);
376 size = vmrtf_num_records * sizeof(vm_rtfault_record_t);
377 vmrtfrs.vm_rtf_records = zalloc_permanent_tag(size,
378 ZALIGN(vm_rtfault_record_t), VM_KERN_MEMORY_DIAG);
379 vmrtfrs.vmrtfr_maxi = vmrtf_num_records - 1;
380 }
381 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_rtfault_record_init);
382
383 /*
384 * Routine: vm_fault_cleanup
385 * Purpose:
386 * Clean up the result of vm_fault_page.
387 * Results:
388 * The paging reference for "object" is released.
389 * "object" is unlocked.
390 * If "top_page" is not null, "top_page" is
391 * freed and the paging reference for the object
392 * containing it is released.
393 *
394 * In/out conditions:
395 * "object" must be locked.
396 */
397 void
vm_fault_cleanup(vm_object_t object,vm_page_t top_page)398 vm_fault_cleanup(
399 vm_object_t object,
400 vm_page_t top_page)
401 {
402 vm_object_paging_end(object);
403 vm_object_unlock(object);
404
405 if (top_page != VM_PAGE_NULL) {
406 object = VM_PAGE_OBJECT(top_page);
407
408 vm_object_lock(object);
409 VM_PAGE_FREE(top_page);
410 vm_object_paging_end(object);
411 vm_object_unlock(object);
412 }
413 }
414
415 #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
416
417
418 boolean_t vm_page_deactivate_behind = TRUE;
419 /*
420 * default sizes given VM_BEHAVIOR_DEFAULT reference behavior
421 */
422 #define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW 128
423 #define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER 16 /* don't make this too big... */
424 /* we use it to size an array on the stack */
425
426 int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW;
427
428 #define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024)
429
430 /*
431 * vm_page_is_sequential
432 *
433 * Determine if sequential access is in progress
434 * in accordance with the behavior specified.
435 * Update state to indicate current access pattern.
436 *
437 * object must have at least the shared lock held
438 */
439 static
440 void
vm_fault_is_sequential(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)441 vm_fault_is_sequential(
442 vm_object_t object,
443 vm_object_offset_t offset,
444 vm_behavior_t behavior)
445 {
446 vm_object_offset_t last_alloc;
447 int sequential;
448 int orig_sequential;
449
450 last_alloc = object->last_alloc;
451 sequential = object->sequential;
452 orig_sequential = sequential;
453
454 offset = vm_object_trunc_page(offset);
455 if (offset == last_alloc && behavior != VM_BEHAVIOR_RANDOM) {
456 /* re-faulting in the same page: no change in behavior */
457 return;
458 }
459
460 switch (behavior) {
461 case VM_BEHAVIOR_RANDOM:
462 /*
463 * reset indicator of sequential behavior
464 */
465 sequential = 0;
466 break;
467
468 case VM_BEHAVIOR_SEQUENTIAL:
469 if (offset && last_alloc == offset - PAGE_SIZE_64) {
470 /*
471 * advance indicator of sequential behavior
472 */
473 if (sequential < MAX_SEQUENTIAL_RUN) {
474 sequential += PAGE_SIZE;
475 }
476 } else {
477 /*
478 * reset indicator of sequential behavior
479 */
480 sequential = 0;
481 }
482 break;
483
484 case VM_BEHAVIOR_RSEQNTL:
485 if (last_alloc && last_alloc == offset + PAGE_SIZE_64) {
486 /*
487 * advance indicator of sequential behavior
488 */
489 if (sequential > -MAX_SEQUENTIAL_RUN) {
490 sequential -= PAGE_SIZE;
491 }
492 } else {
493 /*
494 * reset indicator of sequential behavior
495 */
496 sequential = 0;
497 }
498 break;
499
500 case VM_BEHAVIOR_DEFAULT:
501 default:
502 if (offset && last_alloc == (offset - PAGE_SIZE_64)) {
503 /*
504 * advance indicator of sequential behavior
505 */
506 if (sequential < 0) {
507 sequential = 0;
508 }
509 if (sequential < MAX_SEQUENTIAL_RUN) {
510 sequential += PAGE_SIZE;
511 }
512 } else if (last_alloc && last_alloc == (offset + PAGE_SIZE_64)) {
513 /*
514 * advance indicator of sequential behavior
515 */
516 if (sequential > 0) {
517 sequential = 0;
518 }
519 if (sequential > -MAX_SEQUENTIAL_RUN) {
520 sequential -= PAGE_SIZE;
521 }
522 } else {
523 /*
524 * reset indicator of sequential behavior
525 */
526 sequential = 0;
527 }
528 break;
529 }
530 if (sequential != orig_sequential) {
531 if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) {
532 /*
533 * if someone else has already updated object->sequential
534 * don't bother trying to update it or object->last_alloc
535 */
536 return;
537 }
538 }
539 /*
540 * I'd like to do this with a OSCompareAndSwap64, but that
541 * doesn't exist for PPC... however, it shouldn't matter
542 * that much... last_alloc is maintained so that we can determine
543 * if a sequential access pattern is taking place... if only
544 * one thread is banging on this object, no problem with the unprotected
545 * update... if 2 or more threads are banging away, we run the risk of
546 * someone seeing a mangled update... however, in the face of multiple
547 * accesses, no sequential access pattern can develop anyway, so we
548 * haven't lost any real info.
549 */
550 object->last_alloc = offset;
551 }
552
553 #if DEVELOPMENT || DEBUG
554 uint64_t vm_page_deactivate_behind_count = 0;
555 #endif /* DEVELOPMENT || DEBUG */
556
557 /*
558 * vm_page_deactivate_behind
559 *
560 * Determine if sequential access is in progress
561 * in accordance with the behavior specified. If
562 * so, compute a potential page to deactivate and
563 * deactivate it.
564 *
565 * object must be locked.
566 *
567 * return TRUE if we actually deactivate a page
568 */
569 static
570 boolean_t
vm_fault_deactivate_behind(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)571 vm_fault_deactivate_behind(
572 vm_object_t object,
573 vm_object_offset_t offset,
574 vm_behavior_t behavior)
575 {
576 int n;
577 int pages_in_run = 0;
578 int max_pages_in_run = 0;
579 int sequential_run;
580 int sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
581 vm_object_offset_t run_offset = 0;
582 vm_object_offset_t pg_offset = 0;
583 vm_page_t m;
584 vm_page_t page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER];
585
586 pages_in_run = 0;
587 #if TRACEFAULTPAGE
588 dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
589 #endif
590 if (is_kernel_object(object) || vm_page_deactivate_behind == FALSE || (vm_object_trunc_page(offset) != offset)) {
591 /*
592 * Do not deactivate pages from the kernel object: they
593 * are not intended to become pageable.
594 * or we've disabled the deactivate behind mechanism
595 * or we are dealing with an offset that is not aligned to
596 * the system's PAGE_SIZE because in that case we will
597 * handle the deactivation on the aligned offset and, thus,
598 * the full PAGE_SIZE page once. This helps us avoid the redundant
599 * deactivates and the extra faults.
600 */
601 return FALSE;
602 }
603 if ((sequential_run = object->sequential)) {
604 if (sequential_run < 0) {
605 sequential_behavior = VM_BEHAVIOR_RSEQNTL;
606 sequential_run = 0 - sequential_run;
607 } else {
608 sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
609 }
610 }
611 switch (behavior) {
612 case VM_BEHAVIOR_RANDOM:
613 break;
614 case VM_BEHAVIOR_SEQUENTIAL:
615 if (sequential_run >= (int)PAGE_SIZE) {
616 run_offset = 0 - PAGE_SIZE_64;
617 max_pages_in_run = 1;
618 }
619 break;
620 case VM_BEHAVIOR_RSEQNTL:
621 if (sequential_run >= (int)PAGE_SIZE) {
622 run_offset = PAGE_SIZE_64;
623 max_pages_in_run = 1;
624 }
625 break;
626 case VM_BEHAVIOR_DEFAULT:
627 default:
628 { vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
629
630 /*
631 * determine if the run of sequential accesss has been
632 * long enough on an object with default access behavior
633 * to consider it for deactivation
634 */
635 if ((uint64_t)sequential_run >= behind && (sequential_run % (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER * PAGE_SIZE)) == 0) {
636 /*
637 * the comparisons between offset and behind are done
638 * in this kind of odd fashion in order to prevent wrap around
639 * at the end points
640 */
641 if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
642 if (offset >= behind) {
643 run_offset = 0 - behind;
644 pg_offset = PAGE_SIZE_64;
645 max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
646 }
647 } else {
648 if (offset < -behind) {
649 run_offset = behind;
650 pg_offset = 0 - PAGE_SIZE_64;
651 max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
652 }
653 }
654 }
655 break;}
656 }
657 for (n = 0; n < max_pages_in_run; n++) {
658 m = vm_page_lookup(object, offset + run_offset + (n * pg_offset));
659
660 if (m && !m->vmp_laundry && !m->vmp_busy && !m->vmp_no_cache &&
661 (m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) &&
662 !vm_page_is_fictitious(m) && !m->vmp_absent) {
663 page_run[pages_in_run++] = m;
664
665 /*
666 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
667 *
668 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
669 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
670 * new reference happens. If no futher references happen on the page after that remote TLB flushes
671 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
672 * by pageout_scan, which is just fine since the last reference would have happened quite far
673 * in the past (TLB caches don't hang around for very long), and of course could just as easily
674 * have happened before we did the deactivate_behind.
675 */
676 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
677 }
678 }
679 if (pages_in_run) {
680 vm_page_lockspin_queues();
681
682 for (n = 0; n < pages_in_run; n++) {
683 m = page_run[n];
684
685 vm_page_deactivate_internal(m, FALSE);
686
687 #if DEVELOPMENT || DEBUG
688 vm_page_deactivate_behind_count++;
689 #endif /* DEVELOPMENT || DEBUG */
690
691 #if TRACEFAULTPAGE
692 dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
693 #endif
694 }
695 vm_page_unlock_queues();
696
697 return TRUE;
698 }
699 return FALSE;
700 }
701
702
703 #if (DEVELOPMENT || DEBUG)
704 uint32_t vm_page_creation_throttled_hard = 0;
705 uint32_t vm_page_creation_throttled_soft = 0;
706 uint64_t vm_page_creation_throttle_avoided = 0;
707 #endif /* DEVELOPMENT || DEBUG */
708
709 static int
vm_page_throttled(boolean_t page_kept)710 vm_page_throttled(boolean_t page_kept)
711 {
712 clock_sec_t elapsed_sec;
713 clock_sec_t tv_sec;
714 clock_usec_t tv_usec;
715 task_t curtask = current_task_early();
716
717 thread_t thread = current_thread();
718
719 if (thread->options & TH_OPT_VMPRIV) {
720 return 0;
721 }
722
723 if (curtask && !curtask->active) {
724 return 0;
725 }
726
727 if (thread->t_page_creation_throttled) {
728 thread->t_page_creation_throttled = 0;
729
730 if (page_kept == FALSE) {
731 goto no_throttle;
732 }
733 }
734 if (NEED_TO_HARD_THROTTLE_THIS_TASK()) {
735 #if (DEVELOPMENT || DEBUG)
736 thread->t_page_creation_throttled_hard++;
737 OSAddAtomic(1, &vm_page_creation_throttled_hard);
738 #endif /* DEVELOPMENT || DEBUG */
739 return HARD_THROTTLE_DELAY;
740 }
741
742 if ((vm_page_free_count < vm_page_throttle_limit || (VM_CONFIG_COMPRESSOR_IS_PRESENT && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
743 thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) {
744 if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) {
745 #if (DEVELOPMENT || DEBUG)
746 OSAddAtomic64(1, &vm_page_creation_throttle_avoided);
747 #endif
748 goto no_throttle;
749 }
750 clock_get_system_microtime(&tv_sec, &tv_usec);
751
752 elapsed_sec = tv_sec - thread->t_page_creation_time;
753
754 if (elapsed_sec <= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS ||
755 (thread->t_page_creation_count / elapsed_sec) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC) {
756 if (elapsed_sec >= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS)) {
757 /*
758 * we'll reset our stats to give a well behaved app
759 * that was unlucky enough to accumulate a bunch of pages
760 * over a long period of time a chance to get out of
761 * the throttled state... we reset the counter and timestamp
762 * so that if it stays under the rate limit for the next second
763 * it will be back in our good graces... if it exceeds it, it
764 * will remain in the throttled state
765 */
766 thread->t_page_creation_time = tv_sec;
767 thread->t_page_creation_count = VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC * (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS - 1);
768 }
769 VM_PAGEOUT_DEBUG(vm_page_throttle_count, 1);
770
771 thread->t_page_creation_throttled = 1;
772
773 if (VM_CONFIG_COMPRESSOR_IS_PRESENT && HARD_THROTTLE_LIMIT_REACHED()) {
774 #if (DEVELOPMENT || DEBUG)
775 thread->t_page_creation_throttled_hard++;
776 OSAddAtomic(1, &vm_page_creation_throttled_hard);
777 #endif /* DEVELOPMENT || DEBUG */
778 return HARD_THROTTLE_DELAY;
779 } else {
780 #if (DEVELOPMENT || DEBUG)
781 thread->t_page_creation_throttled_soft++;
782 OSAddAtomic(1, &vm_page_creation_throttled_soft);
783 #endif /* DEVELOPMENT || DEBUG */
784 return SOFT_THROTTLE_DELAY;
785 }
786 }
787 thread->t_page_creation_time = tv_sec;
788 thread->t_page_creation_count = 0;
789 }
790 no_throttle:
791 thread->t_page_creation_count++;
792
793 return 0;
794 }
795
796 extern boolean_t vm_pageout_running;
797 static __attribute__((noinline, not_tail_called)) void
__VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(int throttle_delay)798 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(
799 int throttle_delay)
800 {
801 /* make sure vm_pageout_scan() gets to work while we're throttled */
802 if (!vm_pageout_running) {
803 thread_wakeup((event_t)&vm_page_free_wanted);
804 }
805 delay(throttle_delay);
806 }
807
808
809 /*
810 * check for various conditions that would
811 * prevent us from creating a ZF page...
812 * cleanup is based on being called from vm_fault_page
813 *
814 * object must be locked
815 * object == m->vmp_object
816 */
817 static vm_fault_return_t
vm_fault_check(vm_object_t object,vm_page_t m,vm_page_t first_m,wait_interrupt_t interruptible_state,boolean_t page_throttle)818 vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrupt_t interruptible_state, boolean_t page_throttle)
819 {
820 int throttle_delay;
821
822 if (object->shadow_severed ||
823 VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
824 /*
825 * Either:
826 * 1. the shadow chain was severed,
827 * 2. the purgeable object is volatile or empty and is marked
828 * to fault on access while volatile.
829 * Just have to return an error at this point
830 */
831 if (m != VM_PAGE_NULL) {
832 VM_PAGE_FREE(m);
833 }
834 vm_fault_cleanup(object, first_m);
835
836 thread_interrupt_level(interruptible_state);
837
838 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
839 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
840 }
841
842 if (object->shadow_severed) {
843 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
844 }
845 return VM_FAULT_MEMORY_ERROR;
846 }
847 if (page_throttle == TRUE) {
848 if ((throttle_delay = vm_page_throttled(FALSE))) {
849 /*
850 * we're throttling zero-fills...
851 * treat this as if we couldn't grab a page
852 */
853 if (m != VM_PAGE_NULL) {
854 VM_PAGE_FREE(m);
855 }
856 vm_fault_cleanup(object, first_m);
857
858 VM_DEBUG_EVENT(vmf_check_zfdelay, DBG_VM_FAULT_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
859
860 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
861
862 if (current_thread_aborted()) {
863 thread_interrupt_level(interruptible_state);
864 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
865 return VM_FAULT_INTERRUPTED;
866 }
867 thread_interrupt_level(interruptible_state);
868
869 return VM_FAULT_MEMORY_SHORTAGE;
870 }
871 }
872 return VM_FAULT_SUCCESS;
873 }
874
875 /*
876 * Clear the code signing bits on the given page_t
877 */
878 static void
vm_fault_cs_clear(vm_page_t m)879 vm_fault_cs_clear(vm_page_t m)
880 {
881 m->vmp_cs_validated = VMP_CS_ALL_FALSE;
882 m->vmp_cs_tainted = VMP_CS_ALL_FALSE;
883 m->vmp_cs_nx = VMP_CS_ALL_FALSE;
884 }
885
886 /*
887 * Enqueues the given page on the throttled queue.
888 * The caller must hold the vm_page_queue_lock and it will be held on return.
889 */
890 static void
vm_fault_enqueue_throttled_locked(vm_page_t m)891 vm_fault_enqueue_throttled_locked(vm_page_t m)
892 {
893 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
894 assert(!VM_PAGE_WIRED(m));
895
896 /*
897 * can't be on the pageout queue since we don't
898 * have a pager to try and clean to
899 */
900 vm_page_queues_remove(m, TRUE);
901 vm_page_check_pageable_safe(m);
902 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
903 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
904 vm_page_throttled_count++;
905 }
906
907 /*
908 * do the work to zero fill a page and
909 * inject it into the correct paging queue
910 *
911 * m->vmp_object must be locked
912 * page queue lock must NOT be held
913 */
914 static int
vm_fault_zero_page(vm_page_t m,boolean_t no_zero_fill)915 vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
916 {
917 int my_fault = DBG_ZERO_FILL_FAULT;
918 vm_object_t object;
919
920 object = VM_PAGE_OBJECT(m);
921
922 /*
923 * This is is a zero-fill page fault...
924 *
925 * Checking the page lock is a waste of
926 * time; this page was absent, so
927 * it can't be page locked by a pager.
928 *
929 * we also consider it undefined
930 * with respect to instruction
931 * execution. i.e. it is the responsibility
932 * of higher layers to call for an instruction
933 * sync after changing the contents and before
934 * sending a program into this area. We
935 * choose this approach for performance
936 */
937 vm_fault_cs_clear(m);
938 m->vmp_pmapped = TRUE;
939
940 if (no_zero_fill == TRUE) {
941 my_fault = DBG_NZF_PAGE_FAULT;
942
943 if (m->vmp_absent && m->vmp_busy) {
944 return my_fault;
945 }
946 } else {
947 vm_page_zero_fill(
948 m
949 );
950
951 counter_inc(&vm_statistics_zero_fill_count);
952 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
953 }
954 assert(!m->vmp_laundry);
955 assert(!is_kernel_object(object));
956 //assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
957 if (!VM_DYNAMIC_PAGING_ENABLED() &&
958 (object->purgable == VM_PURGABLE_DENY ||
959 object->purgable == VM_PURGABLE_NONVOLATILE ||
960 object->purgable == VM_PURGABLE_VOLATILE)) {
961 vm_page_lockspin_queues();
962 if (!VM_DYNAMIC_PAGING_ENABLED()) {
963 vm_fault_enqueue_throttled_locked(m);
964 }
965 vm_page_unlock_queues();
966 }
967 return my_fault;
968 }
969
970 /*
971 * Recovery actions for vm_fault_page
972 */
973 __attribute__((always_inline))
974 static void
vm_fault_page_release_page(vm_page_t m,bool * clear_absent_on_error)975 vm_fault_page_release_page(
976 vm_page_t m, /* Page to release */
977 bool *clear_absent_on_error /* IN/OUT */)
978 {
979 vm_page_wakeup_done(VM_PAGE_OBJECT(m), m);
980 if (!VM_PAGE_PAGEABLE(m)) {
981 vm_page_lockspin_queues();
982 if (*clear_absent_on_error && m->vmp_absent) {
983 vm_page_zero_fill(
984 m
985 );
986 counter_inc(&vm_statistics_zero_fill_count);
987 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
988 m->vmp_absent = false;
989 }
990 if (!VM_PAGE_PAGEABLE(m)) {
991 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
992 vm_page_deactivate(m);
993 } else {
994 vm_page_activate(m);
995 }
996 }
997 vm_page_unlock_queues();
998 }
999 *clear_absent_on_error = false;
1000 }
1001 /*
1002 * Routine: vm_fault_page
1003 * Purpose:
1004 * Find the resident page for the virtual memory
1005 * specified by the given virtual memory object
1006 * and offset.
1007 * Additional arguments:
1008 * The required permissions for the page is given
1009 * in "fault_type". Desired permissions are included
1010 * in "protection".
1011 * fault_info is passed along to determine pagein cluster
1012 * limits... it contains the expected reference pattern,
1013 * cluster size if available, etc...
1014 *
1015 * If the desired page is known to be resident (for
1016 * example, because it was previously wired down), asserting
1017 * the "unwiring" parameter will speed the search.
1018 *
1019 * If the operation can be interrupted (by thread_abort
1020 * or thread_terminate), then the "interruptible"
1021 * parameter should be asserted.
1022 *
1023 * Results:
1024 * The page containing the proper data is returned
1025 * in "result_page".
1026 *
1027 * In/out conditions:
1028 * The source object must be locked and referenced,
1029 * and must donate one paging reference. The reference
1030 * is not affected. The paging reference and lock are
1031 * consumed.
1032 *
1033 * If the call succeeds, the object in which "result_page"
1034 * resides is left locked and holding a paging reference.
1035 * If this is not the original object, a busy page in the
1036 * original object is returned in "top_page", to prevent other
1037 * callers from pursuing this same data, along with a paging
1038 * reference for the original object. The "top_page" should
1039 * be destroyed when this guarantee is no longer required.
1040 * The "result_page" is also left busy. It is not removed
1041 * from the pageout queues.
1042 * Special Case:
1043 * A return value of VM_FAULT_SUCCESS_NO_PAGE means that the
1044 * fault succeeded but there's no VM page (i.e. the VM object
1045 * does not actually hold VM pages, but device memory or
1046 * large pages). The object is still locked and we still hold a
1047 * paging_in_progress reference.
1048 */
1049 unsigned int vm_fault_page_blocked_access = 0;
1050 unsigned int vm_fault_page_forced_retry = 0;
1051
1052 vm_fault_return_t
vm_fault_page(vm_object_t first_object,vm_object_offset_t first_offset,vm_prot_t fault_type,boolean_t must_be_resident,boolean_t caller_lookup,vm_prot_t * protection,vm_page_t * result_page,vm_page_t * top_page,int * type_of_fault,kern_return_t * error_code,boolean_t no_zero_fill,vm_object_fault_info_t fault_info)1053 vm_fault_page(
1054 /* Arguments: */
1055 vm_object_t first_object, /* Object to begin search */
1056 vm_object_offset_t first_offset, /* Offset into object */
1057 vm_prot_t fault_type, /* What access is requested */
1058 boolean_t must_be_resident,/* Must page be resident? */
1059 boolean_t caller_lookup, /* caller looked up page */
1060 /* Modifies in place: */
1061 vm_prot_t *protection, /* Protection for mapping */
1062 vm_page_t *result_page, /* Page found, if successful */
1063 /* Returns: */
1064 vm_page_t *top_page, /* Page in top object, if
1065 * not result_page. */
1066 int *type_of_fault, /* if non-null, fill in with type of fault
1067 * COW, zero-fill, etc... returned in trace point */
1068 /* More arguments: */
1069 kern_return_t *error_code, /* code if page is in error */
1070 boolean_t no_zero_fill, /* don't zero fill absent pages */
1071 vm_object_fault_info_t fault_info)
1072 {
1073 vm_page_t m;
1074 vm_object_t object;
1075 vm_object_offset_t offset;
1076 vm_page_t first_m;
1077 vm_object_t next_object;
1078 vm_object_t copy_object;
1079 boolean_t look_for_page;
1080 boolean_t force_fault_retry = FALSE;
1081 vm_prot_t access_required = fault_type;
1082 vm_prot_t wants_copy_flag;
1083 kern_return_t wait_result;
1084 wait_interrupt_t interruptible_state;
1085 boolean_t data_already_requested = FALSE;
1086 vm_behavior_t orig_behavior;
1087 vm_size_t orig_cluster_size;
1088 vm_fault_return_t error;
1089 int my_fault;
1090 uint32_t try_failed_count;
1091 wait_interrupt_t interruptible; /* how may fault be interrupted? */
1092 int external_state = VM_EXTERNAL_STATE_UNKNOWN;
1093 memory_object_t pager;
1094 vm_fault_return_t retval;
1095 int grab_options;
1096 bool clear_absent_on_error = false;
1097
1098 /*
1099 * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
1100 * marked as paged out in the compressor pager or the pager doesn't exist.
1101 * Note also that if the pager for an internal object
1102 * has not been created, the pager is not invoked regardless of the value
1103 * of MUST_ASK_PAGER().
1104 *
1105 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
1106 * is marked as paged out in the compressor pager.
1107 * PAGED_OUT() is used to determine if a page has already been pushed
1108 * into a copy object in order to avoid a redundant page out operation.
1109 */
1110 #define MUST_ASK_PAGER(o, f, s) \
1111 ((s = vm_object_compressor_pager_state_get((o), (f))) != VM_EXTERNAL_STATE_ABSENT)
1112
1113 #define PAGED_OUT(o, f) \
1114 (vm_object_compressor_pager_state_get((o), (f)) == VM_EXTERNAL_STATE_EXISTS)
1115
1116 #if TRACEFAULTPAGE
1117 dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */
1118 #endif
1119
1120 interruptible = fault_info->interruptible;
1121 interruptible_state = thread_interrupt_level(interruptible);
1122
1123 /*
1124 * INVARIANTS (through entire routine):
1125 *
1126 * 1) At all times, we must either have the object
1127 * lock or a busy page in some object to prevent
1128 * some other thread from trying to bring in
1129 * the same page.
1130 *
1131 * Note that we cannot hold any locks during the
1132 * pager access or when waiting for memory, so
1133 * we use a busy page then.
1134 *
1135 * 2) To prevent another thread from racing us down the
1136 * shadow chain and entering a new page in the top
1137 * object before we do, we must keep a busy page in
1138 * the top object while following the shadow chain.
1139 *
1140 * 3) We must increment paging_in_progress on any object
1141 * for which we have a busy page before dropping
1142 * the object lock
1143 *
1144 * 4) We leave busy pages on the pageout queues.
1145 * If the pageout daemon comes across a busy page,
1146 * it will remove the page from the pageout queues.
1147 */
1148
1149 object = first_object;
1150 offset = first_offset;
1151 first_m = VM_PAGE_NULL;
1152 access_required = fault_type;
1153
1154 /*
1155 * default type of fault
1156 */
1157 my_fault = DBG_CACHE_HIT_FAULT;
1158 thread_pri_floor_t token;
1159 bool drop_floor = false;
1160
1161 while (TRUE) {
1162 #if TRACEFAULTPAGE
1163 dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1164 #endif
1165
1166 grab_options = 0;
1167 #if CONFIG_SECLUDED_MEMORY
1168 if (object->can_grab_secluded) {
1169 grab_options |= VM_PAGE_GRAB_SECLUDED;
1170 }
1171 #endif /* CONFIG_SECLUDED_MEMORY */
1172
1173 if (!object->alive) {
1174 /*
1175 * object is no longer valid
1176 * clean up and return error
1177 */
1178 #if DEVELOPMENT || DEBUG
1179 printf("FBDP rdar://93769854 %s:%d object %p internal %d pager %p (%s) copy %p shadow %p alive %d terminating %d named %d ref %d shadow_severed %d\n", __FUNCTION__, __LINE__, object, object->internal, object->pager, object->pager ? object->pager->mo_pager_ops->memory_object_pager_name : "?", object->vo_copy, object->shadow, object->alive, object->terminating, object->named, os_ref_get_count_raw(&object->ref_count), object->shadow_severed);
1180 if (panic_object_not_alive) {
1181 panic("FBDP rdar://93769854 %s:%d object %p internal %d pager %p (%s) copy %p shadow %p alive %d terminating %d named %d ref %d shadow_severed %d\n", __FUNCTION__, __LINE__, object, object->internal, object->pager, object->pager ? object->pager->mo_pager_ops->memory_object_pager_name : "?", object->vo_copy, object->shadow, object->alive, object->terminating, object->named, os_ref_get_count_raw(&object->ref_count), object->shadow_severed);
1182 }
1183 #endif /* DEVELOPMENT || DEBUG */
1184 vm_fault_cleanup(object, first_m);
1185 thread_interrupt_level(interruptible_state);
1186
1187 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_NOT_ALIVE), 0 /* arg */);
1188 return VM_FAULT_MEMORY_ERROR;
1189 }
1190
1191 if (!object->pager_created && object->phys_contiguous) {
1192 /*
1193 * A physically-contiguous object without a pager:
1194 * must be a "large page" object. We do not deal
1195 * with VM pages for this object.
1196 */
1197 caller_lookup = FALSE;
1198 m = VM_PAGE_NULL;
1199 goto phys_contig_object;
1200 }
1201
1202 if (object->blocked_access) {
1203 /*
1204 * Access to this VM object has been blocked.
1205 * Replace our "paging_in_progress" reference with
1206 * a "activity_in_progress" reference and wait for
1207 * access to be unblocked.
1208 */
1209 caller_lookup = FALSE; /* no longer valid after sleep */
1210 vm_object_activity_begin(object);
1211 vm_object_paging_end(object);
1212 while (object->blocked_access) {
1213 vm_object_sleep(object,
1214 VM_OBJECT_EVENT_UNBLOCKED,
1215 THREAD_UNINT, LCK_SLEEP_EXCLUSIVE);
1216 }
1217 vm_fault_page_blocked_access++;
1218 vm_object_paging_begin(object);
1219 vm_object_activity_end(object);
1220 }
1221
1222 /*
1223 * See whether the page at 'offset' is resident
1224 */
1225 if (caller_lookup == TRUE) {
1226 /*
1227 * The caller has already looked up the page
1228 * and gave us the result in "result_page".
1229 * We can use this for the first lookup but
1230 * it loses its validity as soon as we unlock
1231 * the object.
1232 */
1233 m = *result_page;
1234 caller_lookup = FALSE; /* no longer valid after that */
1235 } else {
1236 m = vm_page_lookup(object, vm_object_trunc_page(offset));
1237 }
1238 #if TRACEFAULTPAGE
1239 dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1240 #endif
1241 if (m != VM_PAGE_NULL) {
1242 if (m->vmp_busy) {
1243 /*
1244 * The page is being brought in,
1245 * wait for it and then retry.
1246 */
1247 #if TRACEFAULTPAGE
1248 dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1249 #endif
1250 if (fault_info->fi_no_sleep) {
1251 /* Caller has requested not to sleep on busy pages */
1252 vm_fault_cleanup(object, first_m);
1253 thread_interrupt_level(interruptible_state);
1254 return VM_FAULT_BUSY;
1255 }
1256
1257 wait_result = vm_page_sleep(object, m, interruptible, LCK_SLEEP_DEFAULT);
1258
1259 if (wait_result != THREAD_AWAKENED) {
1260 vm_fault_cleanup(object, first_m);
1261 thread_interrupt_level(interruptible_state);
1262
1263 if (wait_result == THREAD_RESTART) {
1264 return VM_FAULT_RETRY;
1265 } else {
1266 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
1267 return VM_FAULT_INTERRUPTED;
1268 }
1269 }
1270 continue;
1271 }
1272 if (m->vmp_laundry) {
1273 m->vmp_free_when_done = FALSE;
1274
1275 if (!m->vmp_cleaning) {
1276 vm_pageout_steal_laundry(m, FALSE);
1277 }
1278 }
1279 vm_object_lock_assert_exclusive(VM_PAGE_OBJECT(m));
1280 if (vm_page_is_guard(m)) {
1281 /*
1282 * Guard page: off limits !
1283 */
1284 if (fault_type == VM_PROT_NONE) {
1285 /*
1286 * The fault is not requesting any
1287 * access to the guard page, so it must
1288 * be just to wire or unwire it.
1289 * Let's pretend it succeeded...
1290 */
1291 m->vmp_busy = TRUE;
1292 *result_page = m;
1293 assert(first_m == VM_PAGE_NULL);
1294 *top_page = first_m;
1295 if (type_of_fault) {
1296 *type_of_fault = DBG_GUARD_FAULT;
1297 }
1298 thread_interrupt_level(interruptible_state);
1299 return VM_FAULT_SUCCESS;
1300 } else {
1301 /*
1302 * The fault requests access to the
1303 * guard page: let's deny that !
1304 */
1305 vm_fault_cleanup(object, first_m);
1306 thread_interrupt_level(interruptible_state);
1307 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_GUARDPAGE_FAULT), 0 /* arg */);
1308 return VM_FAULT_MEMORY_ERROR;
1309 }
1310 }
1311
1312
1313 if (m->vmp_error) {
1314 /*
1315 * The page is in error, give up now.
1316 */
1317 #if TRACEFAULTPAGE
1318 dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code); /* (TEST/DEBUG) */
1319 #endif
1320 if (error_code) {
1321 *error_code = KERN_MEMORY_ERROR;
1322 }
1323 VM_PAGE_FREE(m);
1324
1325 vm_fault_cleanup(object, first_m);
1326 thread_interrupt_level(interruptible_state);
1327
1328 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_ERROR), 0 /* arg */);
1329 return VM_FAULT_MEMORY_ERROR;
1330 }
1331 if (m->vmp_restart) {
1332 /*
1333 * The pager wants us to restart
1334 * at the top of the chain,
1335 * typically because it has moved the
1336 * page to another pager, then do so.
1337 */
1338 #if TRACEFAULTPAGE
1339 dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1340 #endif
1341 VM_PAGE_FREE(m);
1342
1343 vm_fault_cleanup(object, first_m);
1344 thread_interrupt_level(interruptible_state);
1345
1346 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_RESTART), 0 /* arg */);
1347 return VM_FAULT_RETRY;
1348 }
1349 if (m->vmp_absent) {
1350 /*
1351 * The page isn't busy, but is absent,
1352 * therefore it's deemed "unavailable".
1353 *
1354 * Remove the non-existent page (unless it's
1355 * in the top object) and move on down to the
1356 * next object (if there is one).
1357 */
1358 #if TRACEFAULTPAGE
1359 dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow); /* (TEST/DEBUG) */
1360 #endif
1361 next_object = object->shadow;
1362
1363 if (next_object == VM_OBJECT_NULL) {
1364 /*
1365 * Absent page at bottom of shadow
1366 * chain; zero fill the page we left
1367 * busy in the first object, and free
1368 * the absent page.
1369 */
1370 assert(!must_be_resident);
1371
1372 /*
1373 * check for any conditions that prevent
1374 * us from creating a new zero-fill page
1375 * vm_fault_check will do all of the
1376 * fault cleanup in the case of an error condition
1377 * including resetting the thread_interrupt_level
1378 */
1379 error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
1380
1381 if (error != VM_FAULT_SUCCESS) {
1382 return error;
1383 }
1384
1385 if (object != first_object) {
1386 /*
1387 * free the absent page we just found
1388 */
1389 VM_PAGE_FREE(m);
1390
1391 /*
1392 * drop reference and lock on current object
1393 */
1394 vm_object_paging_end(object);
1395 vm_object_unlock(object);
1396
1397 /*
1398 * grab the original page we
1399 * 'soldered' in place and
1400 * retake lock on 'first_object'
1401 */
1402 m = first_m;
1403 first_m = VM_PAGE_NULL;
1404
1405 object = first_object;
1406 offset = first_offset;
1407
1408 vm_object_lock(object);
1409 } else {
1410 /*
1411 * we're going to use the absent page we just found
1412 * so convert it to a 'busy' page
1413 */
1414 m->vmp_absent = FALSE;
1415 m->vmp_busy = TRUE;
1416 }
1417 if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
1418 m->vmp_absent = TRUE;
1419 clear_absent_on_error = true;
1420 }
1421 /*
1422 * zero-fill the page and put it on
1423 * the correct paging queue
1424 */
1425 my_fault = vm_fault_zero_page(m, no_zero_fill);
1426
1427 break;
1428 } else {
1429 if (must_be_resident) {
1430 vm_object_paging_end(object);
1431 } else if (object != first_object) {
1432 vm_object_paging_end(object);
1433 VM_PAGE_FREE(m);
1434 } else {
1435 first_m = m;
1436 m->vmp_absent = FALSE;
1437 m->vmp_busy = TRUE;
1438
1439 vm_page_lockspin_queues();
1440 vm_page_queues_remove(m, FALSE);
1441 vm_page_unlock_queues();
1442 }
1443
1444 offset += object->vo_shadow_offset;
1445 fault_info->lo_offset += object->vo_shadow_offset;
1446 fault_info->hi_offset += object->vo_shadow_offset;
1447 access_required = VM_PROT_READ;
1448
1449 vm_object_lock(next_object);
1450 vm_object_unlock(object);
1451 object = next_object;
1452 vm_object_paging_begin(object);
1453
1454 /*
1455 * reset to default type of fault
1456 */
1457 my_fault = DBG_CACHE_HIT_FAULT;
1458
1459 continue;
1460 }
1461 }
1462 if ((m->vmp_cleaning)
1463 && ((object != first_object) || (object->vo_copy != VM_OBJECT_NULL))
1464 && (fault_type & VM_PROT_WRITE)) {
1465 /*
1466 * This is a copy-on-write fault that will
1467 * cause us to revoke access to this page, but
1468 * this page is in the process of being cleaned
1469 * in a clustered pageout. We must wait until
1470 * the cleaning operation completes before
1471 * revoking access to the original page,
1472 * otherwise we might attempt to remove a
1473 * wired mapping.
1474 */
1475 #if TRACEFAULTPAGE
1476 dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset); /* (TEST/DEBUG) */
1477 #endif
1478 /*
1479 * take an extra ref so that object won't die
1480 */
1481 vm_object_reference_locked(object);
1482
1483 vm_fault_cleanup(object, first_m);
1484
1485 vm_object_lock(object);
1486 assert(os_ref_get_count_raw(&object->ref_count) > 0);
1487
1488 m = vm_page_lookup(object, vm_object_trunc_page(offset));
1489
1490 if (m != VM_PAGE_NULL && m->vmp_cleaning) {
1491 wait_result = vm_page_sleep(object, m, interruptible, LCK_SLEEP_UNLOCK);
1492 vm_object_deallocate(object);
1493 goto backoff;
1494 } else {
1495 vm_object_unlock(object);
1496
1497 vm_object_deallocate(object);
1498 thread_interrupt_level(interruptible_state);
1499
1500 return VM_FAULT_RETRY;
1501 }
1502 }
1503 if (type_of_fault == NULL && (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) &&
1504 !(fault_info != NULL && fault_info->stealth)) {
1505 /*
1506 * If we were passed a non-NULL pointer for
1507 * "type_of_fault", than we came from
1508 * vm_fault... we'll let it deal with
1509 * this condition, since it
1510 * needs to see m->vmp_speculative to correctly
1511 * account the pageins, otherwise...
1512 * take it off the speculative queue, we'll
1513 * let the caller of vm_fault_page deal
1514 * with getting it onto the correct queue
1515 *
1516 * If the caller specified in fault_info that
1517 * it wants a "stealth" fault, we also leave
1518 * the page in the speculative queue.
1519 */
1520 vm_page_lockspin_queues();
1521 if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
1522 vm_page_queues_remove(m, FALSE);
1523 }
1524 vm_page_unlock_queues();
1525 }
1526 assert(object == VM_PAGE_OBJECT(m));
1527
1528 if (object->code_signed) {
1529 /*
1530 * CODE SIGNING:
1531 * We just paged in a page from a signed
1532 * memory object but we don't need to
1533 * validate it now. We'll validate it if
1534 * when it gets mapped into a user address
1535 * space for the first time or when the page
1536 * gets copied to another object as a result
1537 * of a copy-on-write.
1538 */
1539 }
1540
1541 /*
1542 * We mark the page busy and leave it on
1543 * the pageout queues. If the pageout
1544 * deamon comes across it, then it will
1545 * remove the page from the queue, but not the object
1546 */
1547 #if TRACEFAULTPAGE
1548 dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1549 #endif
1550 assert(!m->vmp_busy);
1551 assert(!m->vmp_absent);
1552
1553 m->vmp_busy = TRUE;
1554 break;
1555 }
1556
1557 /*
1558 * we get here when there is no page present in the object at
1559 * the offset we're interested in... we'll allocate a page
1560 * at this point if the pager associated with
1561 * this object can provide the data or we're the top object...
1562 * object is locked; m == NULL
1563 */
1564
1565 if (must_be_resident) {
1566 if (fault_type == VM_PROT_NONE &&
1567 is_kernel_object(object)) {
1568 /*
1569 * We've been called from vm_fault_unwire()
1570 * while removing a map entry that was allocated
1571 * with KMA_KOBJECT and KMA_VAONLY. This page
1572 * is not present and there's nothing more to
1573 * do here (nothing to unwire).
1574 */
1575 vm_fault_cleanup(object, first_m);
1576 thread_interrupt_level(interruptible_state);
1577
1578 return VM_FAULT_MEMORY_ERROR;
1579 }
1580
1581 goto dont_look_for_page;
1582 }
1583
1584 /* Don't expect to fault pages into the kernel object. */
1585 assert(!is_kernel_object(object));
1586
1587 look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE));
1588
1589 #if TRACEFAULTPAGE
1590 dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object); /* (TEST/DEBUG) */
1591 #endif
1592 if (!look_for_page && object == first_object && !object->phys_contiguous) {
1593 /*
1594 * Allocate a new page for this object/offset pair as a placeholder
1595 */
1596 m = vm_page_grab_options(grab_options);
1597 #if TRACEFAULTPAGE
1598 dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1599 #endif
1600 if (m == VM_PAGE_NULL) {
1601 vm_fault_cleanup(object, first_m);
1602 thread_interrupt_level(interruptible_state);
1603
1604 return VM_FAULT_MEMORY_SHORTAGE;
1605 }
1606
1607 if (fault_info && fault_info->batch_pmap_op == TRUE) {
1608 vm_page_insert_internal(m, object,
1609 vm_object_trunc_page(offset),
1610 VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1611 } else {
1612 vm_page_insert(m, object, vm_object_trunc_page(offset));
1613 }
1614 }
1615 if (look_for_page) {
1616 kern_return_t rc;
1617 int my_fault_type;
1618
1619 /*
1620 * If the memory manager is not ready, we
1621 * cannot make requests.
1622 */
1623 if (!object->pager_ready) {
1624 #if TRACEFAULTPAGE
1625 dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1626 #endif
1627 if (m != VM_PAGE_NULL) {
1628 VM_PAGE_FREE(m);
1629 }
1630
1631 /*
1632 * take an extra ref so object won't die
1633 */
1634 vm_object_reference_locked(object);
1635 vm_fault_cleanup(object, first_m);
1636
1637 vm_object_lock(object);
1638 assert(os_ref_get_count_raw(&object->ref_count) > 0);
1639
1640 if (!object->pager_ready) {
1641 wait_result = vm_object_sleep(object, VM_OBJECT_EVENT_PAGER_READY, interruptible, LCK_SLEEP_UNLOCK);
1642 vm_object_deallocate(object);
1643
1644 goto backoff;
1645 } else {
1646 vm_object_unlock(object);
1647 vm_object_deallocate(object);
1648 thread_interrupt_level(interruptible_state);
1649
1650 return VM_FAULT_RETRY;
1651 }
1652 }
1653 if (!object->internal && !object->phys_contiguous && object->paging_in_progress > vm_object_pagein_throttle) {
1654 /*
1655 * If there are too many outstanding page
1656 * requests pending on this external object, we
1657 * wait for them to be resolved now.
1658 */
1659 #if TRACEFAULTPAGE
1660 dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1661 #endif
1662 if (m != VM_PAGE_NULL) {
1663 VM_PAGE_FREE(m);
1664 }
1665 /*
1666 * take an extra ref so object won't die
1667 */
1668 vm_object_reference_locked(object);
1669
1670 vm_fault_cleanup(object, first_m);
1671
1672 vm_object_lock(object);
1673 assert(os_ref_get_count_raw(&object->ref_count) > 0);
1674
1675 if (object->paging_in_progress >= vm_object_pagein_throttle) {
1676 wait_result = vm_object_paging_throttle_wait(object, interruptible);
1677 vm_object_unlock(object);
1678 vm_object_deallocate(object);
1679 goto backoff;
1680 } else {
1681 vm_object_unlock(object);
1682 vm_object_deallocate(object);
1683 thread_interrupt_level(interruptible_state);
1684
1685 return VM_FAULT_RETRY;
1686 }
1687 }
1688 if (object->internal) {
1689 int compressed_count_delta;
1690 vm_compressor_options_t c_flags = 0;
1691
1692 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
1693
1694 if (m == VM_PAGE_NULL) {
1695 /*
1696 * Allocate a new page for this object/offset pair as a placeholder
1697 */
1698 m = vm_page_grab_options(grab_options);
1699 #if TRACEFAULTPAGE
1700 dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1701 #endif
1702 if (m == VM_PAGE_NULL) {
1703 vm_fault_cleanup(object, first_m);
1704 thread_interrupt_level(interruptible_state);
1705
1706 return VM_FAULT_MEMORY_SHORTAGE;
1707 }
1708
1709 m->vmp_absent = TRUE;
1710 if (fault_info && fault_info->batch_pmap_op == TRUE) {
1711 vm_page_insert_internal(m, object, vm_object_trunc_page(offset), VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1712 } else {
1713 vm_page_insert(m, object, vm_object_trunc_page(offset));
1714 }
1715 }
1716 assert(m->vmp_busy);
1717
1718 m->vmp_absent = TRUE;
1719 pager = object->pager;
1720
1721 assert(object->paging_in_progress > 0);
1722
1723 page_worker_token_t pw_token;
1724 #if PAGE_SLEEP_WITH_INHERITOR
1725 page_worker_register_worker((event_t)m, &pw_token);
1726 #endif /* PAGE_SLEEP_WITH_INHERITOR */
1727
1728 vm_object_unlock(object);
1729 rc = vm_compressor_pager_get(
1730 pager,
1731 offset + object->paging_offset,
1732 VM_PAGE_GET_PHYS_PAGE(m),
1733 &my_fault_type,
1734 c_flags,
1735 &compressed_count_delta);
1736
1737 if (type_of_fault == NULL) {
1738 int throttle_delay;
1739
1740 /*
1741 * we weren't called from vm_fault, so we
1742 * need to apply page creation throttling
1743 * do it before we re-acquire any locks
1744 */
1745 if (my_fault_type == DBG_COMPRESSOR_FAULT) {
1746 if ((throttle_delay = vm_page_throttled(TRUE))) {
1747 VM_DEBUG_EVENT(vmf_compressordelay, DBG_VM_FAULT_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 1, 0);
1748 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
1749 }
1750 }
1751 }
1752 vm_object_lock(object);
1753 assert(object->paging_in_progress > 0);
1754
1755 vm_compressor_pager_count(
1756 pager,
1757 compressed_count_delta,
1758 FALSE, /* shared_lock */
1759 object);
1760
1761 switch (rc) {
1762 case KERN_SUCCESS:
1763 m->vmp_absent = FALSE;
1764 m->vmp_dirty = TRUE;
1765 if (!HAS_DEFAULT_CACHEABILITY(object->wimg_bits &
1766 VM_WIMG_MASK)) {
1767 /*
1768 * If the page is not cacheable,
1769 * we can't let its contents
1770 * linger in the data cache
1771 * after the decompression.
1772 */
1773 pmap_sync_page_attributes_phys(
1774 VM_PAGE_GET_PHYS_PAGE(m));
1775 } else {
1776 m->vmp_written_by_kernel = TRUE;
1777 }
1778 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
1779 if ((fault_type & VM_PROT_WRITE) == 0) {
1780 vm_object_lock_assert_exclusive(object);
1781 vm_page_lockspin_queues();
1782 m->vmp_unmodified_ro = true;
1783 vm_page_unlock_queues();
1784 os_atomic_inc(&compressor_ro_uncompressed, relaxed);
1785 *protection &= ~VM_PROT_WRITE;
1786 }
1787 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
1788
1789 /*
1790 * If the object is purgeable, its
1791 * owner's purgeable ledgers have been
1792 * updated in vm_page_insert() but the
1793 * page was also accounted for in a
1794 * "compressed purgeable" ledger, so
1795 * update that now.
1796 */
1797 if (((object->purgable !=
1798 VM_PURGABLE_DENY) ||
1799 object->vo_ledger_tag) &&
1800 (object->vo_owner !=
1801 NULL)) {
1802 /*
1803 * One less compressed
1804 * purgeable/tagged page.
1805 */
1806 if (compressed_count_delta) {
1807 vm_object_owner_compressed_update(
1808 object,
1809 -1);
1810 }
1811 }
1812
1813 break;
1814 case KERN_MEMORY_FAILURE:
1815 m->vmp_unusual = TRUE;
1816 m->vmp_error = TRUE;
1817 m->vmp_absent = FALSE;
1818 break;
1819 case KERN_MEMORY_ERROR:
1820 assert(m->vmp_absent);
1821 break;
1822 default:
1823 panic("vm_fault_page(): unexpected "
1824 "error %d from "
1825 "vm_compressor_pager_get()\n",
1826 rc);
1827 }
1828 vm_page_wakeup_done_with_inheritor(object, m, &pw_token);
1829
1830 rc = KERN_SUCCESS;
1831 goto data_requested;
1832 }
1833 my_fault_type = DBG_PAGEIN_FAULT;
1834
1835 if (m != VM_PAGE_NULL) {
1836 VM_PAGE_FREE(m);
1837 m = VM_PAGE_NULL;
1838 }
1839
1840 #if TRACEFAULTPAGE
1841 dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0); /* (TEST/DEBUG) */
1842 #endif
1843
1844 /*
1845 * It's possible someone called vm_object_destroy while we weren't
1846 * holding the object lock. If that has happened, then bail out
1847 * here.
1848 */
1849
1850 pager = object->pager;
1851
1852 if (pager == MEMORY_OBJECT_NULL) {
1853 vm_fault_cleanup(object, first_m);
1854 thread_interrupt_level(interruptible_state);
1855
1856 static const enum vm_subsys_error_codes object_destroy_errors[VM_OBJECT_DESTROY_MAX + 1] = {
1857 [VM_OBJECT_DESTROY_UNKNOWN_REASON] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER,
1858 [VM_OBJECT_DESTROY_UNMOUNT] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_UNMOUNT,
1859 [VM_OBJECT_DESTROY_FORCED_UNMOUNT] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_FORCED_UNMOUNT,
1860 [VM_OBJECT_DESTROY_UNGRAFT] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_UNGRAFT,
1861 [VM_OBJECT_DESTROY_PAGER] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_DEALLOC_PAGER,
1862 [VM_OBJECT_DESTROY_RECLAIM] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_RECLAIM,
1863 };
1864 enum vm_subsys_error_codes kdbg_code = object_destroy_errors[(vm_object_destroy_reason_t)object->no_pager_reason];
1865 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, kdbg_code), 0 /* arg */);
1866 return VM_FAULT_MEMORY_ERROR;
1867 }
1868
1869 /*
1870 * We have an absent page in place for the faulting offset,
1871 * so we can release the object lock.
1872 */
1873
1874 if (object->object_is_shared_cache) {
1875 token = thread_priority_floor_start();
1876 /*
1877 * A non-native shared cache object might
1878 * be getting set up in parallel with this
1879 * fault and so we can't assume that this
1880 * check will be valid after we drop the
1881 * object lock below.
1882 */
1883 drop_floor = true;
1884 }
1885
1886 vm_object_unlock(object);
1887
1888 /*
1889 * If this object uses a copy_call strategy,
1890 * and we are interested in a copy of this object
1891 * (having gotten here only by following a
1892 * shadow chain), then tell the memory manager
1893 * via a flag added to the desired_access
1894 * parameter, so that it can detect a race
1895 * between our walking down the shadow chain
1896 * and its pushing pages up into a copy of
1897 * the object that it manages.
1898 */
1899 if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object) {
1900 wants_copy_flag = VM_PROT_WANTS_COPY;
1901 } else {
1902 wants_copy_flag = VM_PROT_NONE;
1903 }
1904
1905 if (object->vo_copy == first_object) {
1906 /*
1907 * if we issue the memory_object_data_request in
1908 * this state, we are subject to a deadlock with
1909 * the underlying filesystem if it is trying to
1910 * shrink the file resulting in a push of pages
1911 * into the copy object... that push will stall
1912 * on the placeholder page, and if the pushing thread
1913 * is holding a lock that is required on the pagein
1914 * path (such as a truncate lock), we'll deadlock...
1915 * to avoid this potential deadlock, we throw away
1916 * our placeholder page before calling memory_object_data_request
1917 * and force this thread to retry the vm_fault_page after
1918 * we have issued the I/O. the second time through this path
1919 * we will find the page already in the cache (presumably still
1920 * busy waiting for the I/O to complete) and then complete
1921 * the fault w/o having to go through memory_object_data_request again
1922 */
1923 assert(first_m != VM_PAGE_NULL);
1924 assert(VM_PAGE_OBJECT(first_m) == first_object);
1925
1926 vm_object_lock(first_object);
1927 VM_PAGE_FREE(first_m);
1928 vm_object_paging_end(first_object);
1929 vm_object_unlock(first_object);
1930
1931 first_m = VM_PAGE_NULL;
1932 force_fault_retry = TRUE;
1933
1934 vm_fault_page_forced_retry++;
1935 }
1936
1937 if (data_already_requested == TRUE) {
1938 orig_behavior = fault_info->behavior;
1939 orig_cluster_size = fault_info->cluster_size;
1940
1941 fault_info->behavior = VM_BEHAVIOR_RANDOM;
1942 fault_info->cluster_size = PAGE_SIZE;
1943 }
1944 /*
1945 * Call the memory manager to retrieve the data.
1946 */
1947 rc = memory_object_data_request(
1948 pager,
1949 vm_object_trunc_page(offset) + object->paging_offset,
1950 PAGE_SIZE,
1951 access_required | wants_copy_flag,
1952 (memory_object_fault_info_t)fault_info);
1953
1954 if (data_already_requested == TRUE) {
1955 fault_info->behavior = orig_behavior;
1956 fault_info->cluster_size = orig_cluster_size;
1957 } else {
1958 data_already_requested = TRUE;
1959 }
1960
1961 DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL);
1962 #if TRACEFAULTPAGE
1963 dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */
1964 #endif
1965 vm_object_lock(object);
1966
1967 if (drop_floor && object->object_is_shared_cache) {
1968 thread_priority_floor_end(&token);
1969 drop_floor = false;
1970 }
1971
1972 data_requested:
1973 if (rc != ERR_SUCCESS) {
1974 vm_fault_cleanup(object, first_m);
1975 thread_interrupt_level(interruptible_state);
1976
1977 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NO_DATA), 0 /* arg */);
1978
1979 if (rc == MACH_SEND_INTERRUPTED) {
1980 return VM_FAULT_INTERRUPTED;
1981 } else if (rc == KERN_ALREADY_WAITING) {
1982 return VM_FAULT_BUSY;
1983 } else {
1984 return VM_FAULT_MEMORY_ERROR;
1985 }
1986 } else {
1987 clock_sec_t tv_sec;
1988 clock_usec_t tv_usec;
1989
1990 if (my_fault_type == DBG_PAGEIN_FAULT) {
1991 clock_get_system_microtime(&tv_sec, &tv_usec);
1992 current_thread()->t_page_creation_time = tv_sec;
1993 current_thread()->t_page_creation_count = 0;
1994 }
1995 }
1996 if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) {
1997 vm_fault_cleanup(object, first_m);
1998 thread_interrupt_level(interruptible_state);
1999
2000 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
2001 return VM_FAULT_INTERRUPTED;
2002 }
2003 if (force_fault_retry == TRUE) {
2004 vm_fault_cleanup(object, first_m);
2005 thread_interrupt_level(interruptible_state);
2006
2007 return VM_FAULT_RETRY;
2008 }
2009 if (m == VM_PAGE_NULL && object->phys_contiguous) {
2010 /*
2011 * No page here means that the object we
2012 * initially looked up was "physically
2013 * contiguous" (i.e. device memory). However,
2014 * with Virtual VRAM, the object might not
2015 * be backed by that device memory anymore,
2016 * so we're done here only if the object is
2017 * still "phys_contiguous".
2018 * Otherwise, if the object is no longer
2019 * "phys_contiguous", we need to retry the
2020 * page fault against the object's new backing
2021 * store (different memory object).
2022 */
2023 phys_contig_object:
2024 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
2025 assert(object == first_object);
2026 goto done;
2027 }
2028 /*
2029 * potentially a pagein fault
2030 * if we make it through the state checks
2031 * above, than we'll count it as such
2032 */
2033 my_fault = my_fault_type;
2034
2035 /*
2036 * Retry with same object/offset, since new data may
2037 * be in a different page (i.e., m is meaningless at
2038 * this point).
2039 */
2040 continue;
2041 }
2042 dont_look_for_page:
2043 /*
2044 * We get here if the object has no pager, or an existence map
2045 * exists and indicates the page isn't present on the pager
2046 * or we're unwiring a page. If a pager exists, but there
2047 * is no existence map, then the m->vmp_absent case above handles
2048 * the ZF case when the pager can't provide the page
2049 */
2050 #if TRACEFAULTPAGE
2051 dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
2052 #endif
2053 if (object == first_object) {
2054 first_m = m;
2055 } else {
2056 assert(m == VM_PAGE_NULL);
2057 }
2058
2059 next_object = object->shadow;
2060
2061 if (next_object == VM_OBJECT_NULL) {
2062 /*
2063 * we've hit the bottom of the shadown chain,
2064 * fill the page in the top object with zeros.
2065 */
2066 assert(!must_be_resident);
2067
2068 if (object != first_object) {
2069 vm_object_paging_end(object);
2070 vm_object_unlock(object);
2071
2072 object = first_object;
2073 offset = first_offset;
2074 vm_object_lock(object);
2075 }
2076 m = first_m;
2077 assert(VM_PAGE_OBJECT(m) == object);
2078 first_m = VM_PAGE_NULL;
2079
2080 /*
2081 * check for any conditions that prevent
2082 * us from creating a new zero-fill page
2083 * vm_fault_check will do all of the
2084 * fault cleanup in the case of an error condition
2085 * including resetting the thread_interrupt_level
2086 */
2087 error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
2088
2089 if (error != VM_FAULT_SUCCESS) {
2090 return error;
2091 }
2092
2093 if (m == VM_PAGE_NULL) {
2094 m = vm_page_grab_options(grab_options);
2095
2096 if (m == VM_PAGE_NULL) {
2097 vm_fault_cleanup(object, VM_PAGE_NULL);
2098 thread_interrupt_level(interruptible_state);
2099
2100 return VM_FAULT_MEMORY_SHORTAGE;
2101 }
2102 vm_page_insert(m, object, vm_object_trunc_page(offset));
2103 }
2104 if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
2105 m->vmp_absent = TRUE;
2106 clear_absent_on_error = true;
2107 }
2108
2109 my_fault = vm_fault_zero_page(m, no_zero_fill);
2110
2111 break;
2112 } else {
2113 /*
2114 * Move on to the next object. Lock the next
2115 * object before unlocking the current one.
2116 */
2117 if ((object != first_object) || must_be_resident) {
2118 vm_object_paging_end(object);
2119 }
2120
2121 offset += object->vo_shadow_offset;
2122 fault_info->lo_offset += object->vo_shadow_offset;
2123 fault_info->hi_offset += object->vo_shadow_offset;
2124 access_required = VM_PROT_READ;
2125
2126 vm_object_lock(next_object);
2127 vm_object_unlock(object);
2128
2129 object = next_object;
2130 vm_object_paging_begin(object);
2131 }
2132 }
2133
2134 /*
2135 * PAGE HAS BEEN FOUND.
2136 *
2137 * This page (m) is:
2138 * busy, so that we can play with it;
2139 * not absent, so that nobody else will fill it;
2140 * possibly eligible for pageout;
2141 *
2142 * The top-level page (first_m) is:
2143 * VM_PAGE_NULL if the page was found in the
2144 * top-level object;
2145 * busy, not absent, and ineligible for pageout.
2146 *
2147 * The current object (object) is locked. A paging
2148 * reference is held for the current and top-level
2149 * objects.
2150 */
2151
2152 #if TRACEFAULTPAGE
2153 dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
2154 #endif
2155 #if EXTRA_ASSERTIONS
2156 assert(m->vmp_busy && !m->vmp_absent);
2157 assert((first_m == VM_PAGE_NULL) ||
2158 (first_m->vmp_busy && !first_m->vmp_absent &&
2159 !first_m->vmp_active && !first_m->vmp_inactive && !first_m->vmp_secluded));
2160 #endif /* EXTRA_ASSERTIONS */
2161
2162 /*
2163 * If the page is being written, but isn't
2164 * already owned by the top-level object,
2165 * we have to copy it into a new page owned
2166 * by the top-level object.
2167 */
2168 if (object != first_object) {
2169 #if TRACEFAULTPAGE
2170 dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */
2171 #endif
2172 if (fault_type & VM_PROT_WRITE) {
2173 vm_page_t copy_m;
2174
2175 /*
2176 * We only really need to copy if we
2177 * want to write it.
2178 */
2179 assert(!must_be_resident);
2180
2181 /*
2182 * If we try to collapse first_object at this
2183 * point, we may deadlock when we try to get
2184 * the lock on an intermediate object (since we
2185 * have the bottom object locked). We can't
2186 * unlock the bottom object, because the page
2187 * we found may move (by collapse) if we do.
2188 *
2189 * Instead, we first copy the page. Then, when
2190 * we have no more use for the bottom object,
2191 * we unlock it and try to collapse.
2192 *
2193 * Note that we copy the page even if we didn't
2194 * need to... that's the breaks.
2195 */
2196
2197 /*
2198 * Allocate a page for the copy
2199 */
2200 copy_m = vm_page_grab_options(grab_options);
2201
2202 if (copy_m == VM_PAGE_NULL) {
2203 vm_fault_page_release_page(m, &clear_absent_on_error);
2204
2205 vm_fault_cleanup(object, first_m);
2206 thread_interrupt_level(interruptible_state);
2207
2208 return VM_FAULT_MEMORY_SHORTAGE;
2209 }
2210
2211 vm_page_copy(m, copy_m);
2212
2213 /*
2214 * If another map is truly sharing this
2215 * page with us, we have to flush all
2216 * uses of the original page, since we
2217 * can't distinguish those which want the
2218 * original from those which need the
2219 * new copy.
2220 *
2221 * XXXO If we know that only one map has
2222 * access to this page, then we could
2223 * avoid the pmap_disconnect() call.
2224 */
2225 if (m->vmp_pmapped) {
2226 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2227 }
2228
2229 if (m->vmp_clustered) {
2230 VM_PAGE_COUNT_AS_PAGEIN(m);
2231 VM_PAGE_CONSUME_CLUSTERED(m);
2232 }
2233 assert(!m->vmp_cleaning);
2234
2235 /*
2236 * We no longer need the old page or object.
2237 */
2238 vm_fault_page_release_page(m, &clear_absent_on_error);
2239
2240 /*
2241 * This check helps with marking the object as having a sequential pattern
2242 * Normally we'll miss doing this below because this fault is about COW to
2243 * the first_object i.e. bring page in from disk, push to object above but
2244 * don't update the file object's sequential pattern.
2245 */
2246 if (object->internal == FALSE) {
2247 vm_fault_is_sequential(object, offset, fault_info->behavior);
2248 }
2249
2250 vm_object_paging_end(object);
2251 vm_object_unlock(object);
2252
2253 my_fault = DBG_COW_FAULT;
2254 counter_inc(&vm_statistics_cow_faults);
2255 DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
2256 counter_inc(¤t_task()->cow_faults);
2257
2258 object = first_object;
2259 offset = first_offset;
2260
2261 vm_object_lock(object);
2262 /*
2263 * get rid of the place holder
2264 * page that we soldered in earlier
2265 */
2266 VM_PAGE_FREE(first_m);
2267 first_m = VM_PAGE_NULL;
2268
2269 /*
2270 * and replace it with the
2271 * page we just copied into
2272 */
2273 assert(copy_m->vmp_busy);
2274 vm_page_insert(copy_m, object, vm_object_trunc_page(offset));
2275 SET_PAGE_DIRTY(copy_m, TRUE);
2276
2277 m = copy_m;
2278 /*
2279 * Now that we've gotten the copy out of the
2280 * way, let's try to collapse the top object.
2281 * But we have to play ugly games with
2282 * paging_in_progress to do that...
2283 */
2284 vm_object_paging_end(object);
2285 vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
2286 vm_object_paging_begin(object);
2287 } else {
2288 *protection &= (~VM_PROT_WRITE);
2289 }
2290 }
2291 /*
2292 * Now check whether the page needs to be pushed into the
2293 * copy object. The use of asymmetric copy on write for
2294 * shared temporary objects means that we may do two copies to
2295 * satisfy the fault; one above to get the page from a
2296 * shadowed object, and one here to push it into the copy.
2297 */
2298 try_failed_count = 0;
2299
2300 while ((copy_object = first_object->vo_copy) != VM_OBJECT_NULL) {
2301 vm_object_offset_t copy_offset;
2302 vm_page_t copy_m;
2303
2304 #if TRACEFAULTPAGE
2305 dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type); /* (TEST/DEBUG) */
2306 #endif
2307 /*
2308 * If the page is being written, but hasn't been
2309 * copied to the copy-object, we have to copy it there.
2310 */
2311 if ((fault_type & VM_PROT_WRITE) == 0) {
2312 *protection &= ~VM_PROT_WRITE;
2313 break;
2314 }
2315
2316 /*
2317 * If the page was guaranteed to be resident,
2318 * we must have already performed the copy.
2319 */
2320 if (must_be_resident) {
2321 break;
2322 }
2323
2324 /*
2325 * Try to get the lock on the copy_object.
2326 */
2327 if (!vm_object_lock_try(copy_object)) {
2328 vm_object_unlock(object);
2329 try_failed_count++;
2330
2331 mutex_pause(try_failed_count); /* wait a bit */
2332 vm_object_lock(object);
2333
2334 continue;
2335 }
2336 try_failed_count = 0;
2337
2338 /*
2339 * Make another reference to the copy-object,
2340 * to keep it from disappearing during the
2341 * copy.
2342 */
2343 vm_object_reference_locked(copy_object);
2344
2345 /*
2346 * Does the page exist in the copy?
2347 */
2348 copy_offset = first_offset - copy_object->vo_shadow_offset;
2349 copy_offset = vm_object_trunc_page(copy_offset);
2350
2351 if (copy_object->vo_size <= copy_offset) {
2352 /*
2353 * Copy object doesn't cover this page -- do nothing.
2354 */
2355 ;
2356 } else if ((copy_m = vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) {
2357 /*
2358 * Page currently exists in the copy object
2359 */
2360 if (copy_m->vmp_busy) {
2361 /*
2362 * If the page is being brought
2363 * in, wait for it and then retry.
2364 */
2365 vm_fault_page_release_page(m, &clear_absent_on_error);
2366
2367 /*
2368 * take an extra ref so object won't die
2369 */
2370 vm_object_reference_locked(copy_object);
2371 vm_object_unlock(copy_object);
2372 vm_fault_cleanup(object, first_m);
2373
2374 vm_object_lock(copy_object);
2375 vm_object_lock_assert_exclusive(copy_object);
2376 os_ref_release_live_locked_raw(©_object->ref_count,
2377 &vm_object_refgrp);
2378 copy_m = vm_page_lookup(copy_object, copy_offset);
2379
2380 if (copy_m != VM_PAGE_NULL && copy_m->vmp_busy) {
2381 wait_result = vm_page_sleep(copy_object, copy_m, interruptible, LCK_SLEEP_UNLOCK);
2382 vm_object_deallocate(copy_object);
2383
2384 goto backoff;
2385 } else {
2386 vm_object_unlock(copy_object);
2387 vm_object_deallocate(copy_object);
2388 thread_interrupt_level(interruptible_state);
2389
2390 return VM_FAULT_RETRY;
2391 }
2392 }
2393 } else if (!PAGED_OUT(copy_object, copy_offset)) {
2394 /*
2395 * If PAGED_OUT is TRUE, then the page used to exist
2396 * in the copy-object, and has already been paged out.
2397 * We don't need to repeat this. If PAGED_OUT is
2398 * FALSE, then either we don't know (!pager_created,
2399 * for example) or it hasn't been paged out.
2400 * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT)
2401 * We must copy the page to the copy object.
2402 *
2403 * Allocate a page for the copy
2404 */
2405 copy_m = vm_page_alloc(copy_object, copy_offset);
2406
2407 if (copy_m == VM_PAGE_NULL) {
2408 vm_fault_page_release_page(m, &clear_absent_on_error);
2409
2410 vm_object_lock_assert_exclusive(copy_object);
2411 os_ref_release_live_locked_raw(©_object->ref_count,
2412 &vm_object_refgrp);
2413
2414 vm_object_unlock(copy_object);
2415 vm_fault_cleanup(object, first_m);
2416 thread_interrupt_level(interruptible_state);
2417
2418 return VM_FAULT_MEMORY_SHORTAGE;
2419 }
2420 /*
2421 * Must copy page into copy-object.
2422 */
2423 vm_page_copy(m, copy_m);
2424
2425 /*
2426 * If the old page was in use by any users
2427 * of the copy-object, it must be removed
2428 * from all pmaps. (We can't know which
2429 * pmaps use it.)
2430 */
2431 if (m->vmp_pmapped) {
2432 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2433 }
2434
2435 if (m->vmp_clustered) {
2436 VM_PAGE_COUNT_AS_PAGEIN(m);
2437 VM_PAGE_CONSUME_CLUSTERED(m);
2438 }
2439 /*
2440 * If there's a pager, then immediately
2441 * page out this page, using the "initialize"
2442 * option. Else, we use the copy.
2443 */
2444 if ((!copy_object->pager_ready)
2445 || vm_object_compressor_pager_state_get(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT
2446 ) {
2447 vm_page_lockspin_queues();
2448 assert(!m->vmp_cleaning);
2449 vm_page_activate(copy_m);
2450 vm_page_unlock_queues();
2451
2452 SET_PAGE_DIRTY(copy_m, TRUE);
2453 vm_page_wakeup_done(copy_object, copy_m);
2454 } else {
2455 assert(copy_m->vmp_busy == TRUE);
2456 assert(!m->vmp_cleaning);
2457
2458 /*
2459 * dirty is protected by the object lock
2460 */
2461 SET_PAGE_DIRTY(copy_m, TRUE);
2462
2463 /*
2464 * The page is already ready for pageout:
2465 * not on pageout queues and busy.
2466 * Unlock everything except the
2467 * copy_object itself.
2468 */
2469 vm_object_unlock(object);
2470
2471 /*
2472 * Write the page to the copy-object,
2473 * flushing it from the kernel.
2474 */
2475 vm_pageout_initialize_page(copy_m);
2476
2477 /*
2478 * Since the pageout may have
2479 * temporarily dropped the
2480 * copy_object's lock, we
2481 * check whether we'll have
2482 * to deallocate the hard way.
2483 */
2484 if ((copy_object->shadow != object) ||
2485 (os_ref_get_count_raw(©_object->ref_count) == 1)) {
2486 vm_object_unlock(copy_object);
2487 vm_object_deallocate(copy_object);
2488 vm_object_lock(object);
2489
2490 continue;
2491 }
2492 /*
2493 * Pick back up the old object's
2494 * lock. [It is safe to do so,
2495 * since it must be deeper in the
2496 * object tree.]
2497 */
2498 vm_object_lock(object);
2499 }
2500
2501 /*
2502 * Because we're pushing a page upward
2503 * in the object tree, we must restart
2504 * any faults that are waiting here.
2505 * [Note that this is an expansion of
2506 * vm_page_wakeup() that uses the THREAD_RESTART
2507 * wait result]. Can't turn off the page's
2508 * busy bit because we're not done with it.
2509 */
2510 if (m->vmp_wanted) {
2511 m->vmp_wanted = FALSE;
2512 thread_wakeup_with_result((event_t) m, THREAD_RESTART);
2513 }
2514 }
2515 /*
2516 * The reference count on copy_object must be
2517 * at least 2: one for our extra reference,
2518 * and at least one from the outside world
2519 * (we checked that when we last locked
2520 * copy_object).
2521 */
2522 vm_object_lock_assert_exclusive(copy_object);
2523 os_ref_release_live_locked_raw(©_object->ref_count,
2524 &vm_object_refgrp);
2525
2526 vm_object_unlock(copy_object);
2527
2528 break;
2529 }
2530
2531 done:
2532 *result_page = m;
2533 *top_page = first_m;
2534
2535 if (m != VM_PAGE_NULL) {
2536 assert(VM_PAGE_OBJECT(m) == object);
2537
2538 retval = VM_FAULT_SUCCESS;
2539
2540 if (my_fault == DBG_PAGEIN_FAULT) {
2541 VM_PAGE_COUNT_AS_PAGEIN(m);
2542
2543 if (object->internal) {
2544 my_fault = DBG_PAGEIND_FAULT;
2545 } else {
2546 my_fault = DBG_PAGEINV_FAULT;
2547 }
2548
2549 /*
2550 * evaluate access pattern and update state
2551 * vm_fault_deactivate_behind depends on the
2552 * state being up to date
2553 */
2554 vm_fault_is_sequential(object, offset, fault_info->behavior);
2555 vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2556 } else if (type_of_fault == NULL && my_fault == DBG_CACHE_HIT_FAULT) {
2557 /*
2558 * we weren't called from vm_fault, so handle the
2559 * accounting here for hits in the cache
2560 */
2561 if (m->vmp_clustered) {
2562 VM_PAGE_COUNT_AS_PAGEIN(m);
2563 VM_PAGE_CONSUME_CLUSTERED(m);
2564 }
2565 vm_fault_is_sequential(object, offset, fault_info->behavior);
2566 vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2567 } else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) {
2568 VM_STAT_DECOMPRESSIONS();
2569 }
2570 if (type_of_fault) {
2571 *type_of_fault = my_fault;
2572 }
2573 } else {
2574 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_SUCCESS_NO_PAGE), 0 /* arg */);
2575 retval = VM_FAULT_SUCCESS_NO_VM_PAGE;
2576 assert(first_m == VM_PAGE_NULL);
2577 assert(object == first_object);
2578 }
2579
2580 thread_interrupt_level(interruptible_state);
2581
2582 #if TRACEFAULTPAGE
2583 dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */
2584 #endif
2585 return retval;
2586
2587 backoff:
2588 thread_interrupt_level(interruptible_state);
2589
2590 if (wait_result == THREAD_INTERRUPTED) {
2591 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
2592 return VM_FAULT_INTERRUPTED;
2593 }
2594 return VM_FAULT_RETRY;
2595 }
2596
2597 #if MACH_ASSERT && (XNU_PLATFORM_WatchOS || __x86_64__)
2598 #define PANIC_ON_CS_KILLED_DEFAULT true
2599 #else
2600 #define PANIC_ON_CS_KILLED_DEFAULT false
2601 #endif
2602 static TUNABLE(bool, panic_on_cs_killed, "panic_on_cs_killed",
2603 PANIC_ON_CS_KILLED_DEFAULT);
2604
2605 extern int proc_selfpid(void);
2606 extern char *proc_name_address(struct proc *p);
2607 extern const char *proc_best_name(struct proc *);
2608 unsigned long cs_enter_tainted_rejected = 0;
2609 unsigned long cs_enter_tainted_accepted = 0;
2610
2611 /*
2612 * CODE SIGNING:
2613 * When soft faulting a page, we have to validate the page if:
2614 * 1. the page is being mapped in user space
2615 * 2. the page hasn't already been found to be "tainted"
2616 * 3. the page belongs to a code-signed object
2617 * 4. the page has not been validated yet or has been mapped for write.
2618 */
2619 static bool
vm_fault_cs_need_validation(pmap_t pmap,vm_page_t page,vm_object_t page_obj,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2620 vm_fault_cs_need_validation(
2621 pmap_t pmap,
2622 vm_page_t page,
2623 vm_object_t page_obj,
2624 vm_map_size_t fault_page_size,
2625 vm_map_offset_t fault_phys_offset)
2626 {
2627 if (pmap == kernel_pmap) {
2628 /* 1 - not user space */
2629 return false;
2630 }
2631 if (!page_obj->code_signed) {
2632 /* 3 - page does not belong to a code-signed object */
2633 return false;
2634 }
2635 if (fault_page_size == PAGE_SIZE) {
2636 /* looking at the whole page */
2637 assertf(fault_phys_offset == 0,
2638 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
2639 (uint64_t)fault_page_size,
2640 (uint64_t)fault_phys_offset);
2641 if (page->vmp_cs_tainted == VMP_CS_ALL_TRUE) {
2642 /* 2 - page is all tainted */
2643 return false;
2644 }
2645 if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
2646 !page->vmp_wpmapped) {
2647 /* 4 - already fully validated and never mapped writable */
2648 return false;
2649 }
2650 } else {
2651 /* looking at a specific sub-page */
2652 if (VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
2653 /* 2 - sub-page was already marked as tainted */
2654 return false;
2655 }
2656 if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) &&
2657 !page->vmp_wpmapped) {
2658 /* 4 - already validated and never mapped writable */
2659 return false;
2660 }
2661 }
2662 /* page needs to be validated */
2663 return true;
2664 }
2665
2666
2667 static bool
vm_fault_cs_page_immutable(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot __unused)2668 vm_fault_cs_page_immutable(
2669 vm_page_t m,
2670 vm_map_size_t fault_page_size,
2671 vm_map_offset_t fault_phys_offset,
2672 vm_prot_t prot __unused)
2673 {
2674 if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)
2675 /*&& ((prot) & VM_PROT_EXECUTE)*/) {
2676 return true;
2677 }
2678 return false;
2679 }
2680
2681 static bool
vm_fault_cs_page_nx(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2682 vm_fault_cs_page_nx(
2683 vm_page_t m,
2684 vm_map_size_t fault_page_size,
2685 vm_map_offset_t fault_phys_offset)
2686 {
2687 return VMP_CS_NX(m, fault_page_size, fault_phys_offset);
2688 }
2689
2690 /*
2691 * Check if the page being entered into the pmap violates code signing.
2692 */
2693 static kern_return_t
vm_fault_cs_check_violation(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool map_is_switched,bool map_is_switch_protected,bool * cs_violation)2694 vm_fault_cs_check_violation(
2695 bool cs_bypass,
2696 vm_object_t object,
2697 vm_page_t m,
2698 pmap_t pmap,
2699 vm_prot_t prot,
2700 vm_prot_t caller_prot,
2701 vm_map_size_t fault_page_size,
2702 vm_map_offset_t fault_phys_offset,
2703 vm_object_fault_info_t fault_info,
2704 bool map_is_switched,
2705 bool map_is_switch_protected,
2706 bool *cs_violation)
2707 {
2708 #if !CODE_SIGNING_MONITOR
2709 #pragma unused(caller_prot)
2710 #pragma unused(fault_info)
2711 #endif /* !CODE_SIGNING_MONITOR */
2712
2713 int cs_enforcement_enabled;
2714 if (!cs_bypass &&
2715 vm_fault_cs_need_validation(pmap, m, object,
2716 fault_page_size, fault_phys_offset)) {
2717 vm_object_lock_assert_exclusive(object);
2718
2719 if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)) {
2720 vm_cs_revalidates++;
2721 }
2722
2723 /* VM map is locked, so 1 ref will remain on VM object -
2724 * so no harm if vm_page_validate_cs drops the object lock */
2725
2726 #if CODE_SIGNING_MONITOR
2727 if (fault_info->csm_associated &&
2728 csm_enabled() &&
2729 !VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
2730 !VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) &&
2731 !VMP_CS_NX(m, fault_page_size, fault_phys_offset) &&
2732 (prot & VM_PROT_EXECUTE) &&
2733 (caller_prot & VM_PROT_EXECUTE)) {
2734 /*
2735 * When we have a code signing monitor, the monitor will evaluate the code signature
2736 * for any executable page mapping. No need for the VM to also validate the page.
2737 * In the code signing monitor we trust :)
2738 */
2739 vm_cs_defer_to_csm++;
2740 } else {
2741 vm_cs_defer_to_csm_not++;
2742 vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
2743 }
2744 #else /* CODE_SIGNING_MONITOR */
2745 vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
2746 #endif /* CODE_SIGNING_MONITOR */
2747 }
2748
2749 /* If the map is switched, and is switch-protected, we must protect
2750 * some pages from being write-faulted: immutable pages because by
2751 * definition they may not be written, and executable pages because that
2752 * would provide a way to inject unsigned code.
2753 * If the page is immutable, we can simply return. However, we can't
2754 * immediately determine whether a page is executable anywhere. But,
2755 * we can disconnect it everywhere and remove the executable protection
2756 * from the current map. We do that below right before we do the
2757 * PMAP_ENTER.
2758 */
2759 if (pmap == kernel_pmap) {
2760 /* kernel fault: cs_enforcement does not apply */
2761 cs_enforcement_enabled = 0;
2762 } else {
2763 cs_enforcement_enabled = pmap_get_vm_map_cs_enforced(pmap);
2764 }
2765
2766 if (cs_enforcement_enabled && map_is_switched &&
2767 map_is_switch_protected &&
2768 vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2769 (prot & VM_PROT_WRITE)) {
2770 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_IMMUTABLE_PAGE_WRITE), 0 /* arg */);
2771 return KERN_CODESIGN_ERROR;
2772 }
2773
2774 if (cs_enforcement_enabled &&
2775 vm_fault_cs_page_nx(m, fault_page_size, fault_phys_offset) &&
2776 (prot & VM_PROT_EXECUTE)) {
2777 if (cs_debug) {
2778 printf("page marked to be NX, not letting it be mapped EXEC\n");
2779 }
2780 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_NX_PAGE_EXEC_MAPPING), 0 /* arg */);
2781 return KERN_CODESIGN_ERROR;
2782 }
2783
2784 /* A page could be tainted, or pose a risk of being tainted later.
2785 * Check whether the receiving process wants it, and make it feel
2786 * the consequences (that hapens in cs_invalid_page()).
2787 * For CS Enforcement, two other conditions will
2788 * cause that page to be tainted as well:
2789 * - pmapping an unsigned page executable - this means unsigned code;
2790 * - writeable mapping of a validated page - the content of that page
2791 * can be changed without the kernel noticing, therefore unsigned
2792 * code can be created
2793 */
2794 if (cs_bypass) {
2795 /* code-signing is bypassed */
2796 *cs_violation = FALSE;
2797 } else if (VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
2798 /* tainted page */
2799 *cs_violation = TRUE;
2800 } else if (!cs_enforcement_enabled) {
2801 /* no further code-signing enforcement */
2802 *cs_violation = FALSE;
2803 } else if (vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2804 ((prot & VM_PROT_WRITE) ||
2805 m->vmp_wpmapped)) {
2806 /*
2807 * The page should be immutable, but is in danger of being
2808 * modified.
2809 * This is the case where we want policy from the code
2810 * directory - is the page immutable or not? For now we have
2811 * to assume that code pages will be immutable, data pages not.
2812 * We'll assume a page is a code page if it has a code directory
2813 * and we fault for execution.
2814 * That is good enough since if we faulted the code page for
2815 * writing in another map before, it is wpmapped; if we fault
2816 * it for writing in this map later it will also be faulted for
2817 * executing at the same time; and if we fault for writing in
2818 * another map later, we will disconnect it from this pmap so
2819 * we'll notice the change.
2820 */
2821 *cs_violation = TRUE;
2822 } else if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
2823 (prot & VM_PROT_EXECUTE)
2824 #if CODE_SIGNING_MONITOR
2825 /*
2826 * Executable pages will be validated by the code signing monitor. If the
2827 * code signing monitor is turned off, then this is a code-signing violation.
2828 */
2829 && !csm_enabled()
2830 #endif /* CODE_SIGNING_MONITOR */
2831 ) {
2832 *cs_violation = TRUE;
2833 } else {
2834 *cs_violation = FALSE;
2835 }
2836 return KERN_SUCCESS;
2837 }
2838
2839 /*
2840 * Handles a code signing violation by either rejecting the page or forcing a disconnect.
2841 * @param must_disconnect This value will be set to true if the caller must disconnect
2842 * this page.
2843 * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
2844 */
2845 static kern_return_t
vm_fault_cs_handle_violation(vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,bool map_is_switched,bool map_is_switch_protected,bool * must_disconnect)2846 vm_fault_cs_handle_violation(
2847 vm_object_t object,
2848 vm_page_t m,
2849 pmap_t pmap,
2850 vm_prot_t prot,
2851 vm_map_offset_t vaddr,
2852 vm_map_size_t fault_page_size,
2853 vm_map_offset_t fault_phys_offset,
2854 bool map_is_switched,
2855 bool map_is_switch_protected,
2856 bool *must_disconnect)
2857 {
2858 #if !MACH_ASSERT
2859 #pragma unused(pmap)
2860 #pragma unused(map_is_switch_protected)
2861 #endif /* !MACH_ASSERT */
2862 /*
2863 * We will have a tainted page. Have to handle the special case
2864 * of a switched map now. If the map is not switched, standard
2865 * procedure applies - call cs_invalid_page().
2866 * If the map is switched, the real owner is invalid already.
2867 * There is no point in invalidating the switching process since
2868 * it will not be executing from the map. So we don't call
2869 * cs_invalid_page() in that case.
2870 */
2871 boolean_t reject_page, cs_killed;
2872 kern_return_t kr;
2873 if (map_is_switched) {
2874 assert(pmap == vm_map_pmap(current_thread()->map));
2875 assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
2876 reject_page = FALSE;
2877 } else {
2878 if (cs_debug > 5) {
2879 printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n",
2880 object->code_signed ? "yes" : "no",
2881 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2882 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2883 m->vmp_wpmapped ? "yes" : "no",
2884 (int)prot);
2885 }
2886 reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed);
2887 }
2888
2889 if (reject_page) {
2890 /* reject the invalid page: abort the page fault */
2891 int pid;
2892 const char *procname;
2893 task_t task;
2894 vm_object_t file_object, shadow;
2895 vm_object_offset_t file_offset;
2896 char *pathname, *filename;
2897 vm_size_t pathname_len, filename_len;
2898 boolean_t truncated_path;
2899 #define __PATH_MAX 1024
2900 struct timespec mtime, cs_mtime;
2901 int shadow_depth;
2902 os_reason_t codesigning_exit_reason = OS_REASON_NULL;
2903
2904 kr = KERN_CODESIGN_ERROR;
2905 cs_enter_tainted_rejected++;
2906
2907 /* get process name and pid */
2908 procname = "?";
2909 task = current_task();
2910 pid = proc_selfpid();
2911 if (get_bsdtask_info(task) != NULL) {
2912 procname = proc_name_address(get_bsdtask_info(task));
2913 }
2914
2915 /* get file's VM object */
2916 file_object = object;
2917 file_offset = m->vmp_offset;
2918 for (shadow = file_object->shadow,
2919 shadow_depth = 0;
2920 shadow != VM_OBJECT_NULL;
2921 shadow = file_object->shadow,
2922 shadow_depth++) {
2923 vm_object_lock_shared(shadow);
2924 if (file_object != object) {
2925 vm_object_unlock(file_object);
2926 }
2927 file_offset += file_object->vo_shadow_offset;
2928 file_object = shadow;
2929 }
2930
2931 mtime.tv_sec = 0;
2932 mtime.tv_nsec = 0;
2933 cs_mtime.tv_sec = 0;
2934 cs_mtime.tv_nsec = 0;
2935
2936 /* get file's pathname and/or filename */
2937 pathname = NULL;
2938 filename = NULL;
2939 pathname_len = 0;
2940 filename_len = 0;
2941 truncated_path = FALSE;
2942 /* no pager -> no file -> no pathname, use "<nil>" in that case */
2943 if (file_object->pager != NULL) {
2944 pathname = kalloc_data(__PATH_MAX * 2, Z_WAITOK);
2945 if (pathname) {
2946 pathname[0] = '\0';
2947 pathname_len = __PATH_MAX;
2948 filename = pathname + pathname_len;
2949 filename_len = __PATH_MAX;
2950
2951 if (vnode_pager_get_object_name(file_object->pager,
2952 pathname,
2953 pathname_len,
2954 filename,
2955 filename_len,
2956 &truncated_path) == KERN_SUCCESS) {
2957 /* safety first... */
2958 pathname[__PATH_MAX - 1] = '\0';
2959 filename[__PATH_MAX - 1] = '\0';
2960
2961 vnode_pager_get_object_mtime(file_object->pager,
2962 &mtime,
2963 &cs_mtime);
2964 } else {
2965 kfree_data(pathname, __PATH_MAX * 2);
2966 pathname = NULL;
2967 filename = NULL;
2968 pathname_len = 0;
2969 filename_len = 0;
2970 truncated_path = FALSE;
2971 }
2972 }
2973 }
2974 printf("CODE SIGNING: process %d[%s]: "
2975 "rejecting invalid page at address 0x%llx "
2976 "from offset 0x%llx in file \"%s%s%s\" "
2977 "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
2978 "(signed:%d validated:%d tainted:%d nx:%d "
2979 "wpmapped:%d dirty:%d depth:%d)\n",
2980 pid, procname, (addr64_t) vaddr,
2981 file_offset,
2982 (pathname ? pathname : "<nil>"),
2983 (truncated_path ? "/.../" : ""),
2984 (truncated_path ? filename : ""),
2985 cs_mtime.tv_sec, cs_mtime.tv_nsec,
2986 ((cs_mtime.tv_sec == mtime.tv_sec &&
2987 cs_mtime.tv_nsec == mtime.tv_nsec)
2988 ? "=="
2989 : "!="),
2990 mtime.tv_sec, mtime.tv_nsec,
2991 object->code_signed,
2992 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
2993 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
2994 VMP_CS_NX(m, fault_page_size, fault_phys_offset),
2995 m->vmp_wpmapped,
2996 m->vmp_dirty,
2997 shadow_depth);
2998
2999 /*
3000 * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page
3001 * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the
3002 * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler
3003 * will deal with the segmentation fault.
3004 */
3005 if (cs_killed) {
3006 KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
3007 pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
3008
3009 codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
3010 if (codesigning_exit_reason == NULL) {
3011 printf("vm_fault_enter: failed to allocate codesigning exit reason\n");
3012 } else {
3013 mach_vm_address_t data_addr = 0;
3014 struct codesigning_exit_reason_info *ceri = NULL;
3015 uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri));
3016
3017 if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) {
3018 printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n");
3019 } else {
3020 if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor,
3021 EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) {
3022 ceri = (struct codesigning_exit_reason_info *)data_addr;
3023 static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname));
3024
3025 ceri->ceri_virt_addr = vaddr;
3026 ceri->ceri_file_offset = file_offset;
3027 if (pathname) {
3028 strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname));
3029 } else {
3030 ceri->ceri_pathname[0] = '\0';
3031 }
3032 if (filename) {
3033 strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename));
3034 } else {
3035 ceri->ceri_filename[0] = '\0';
3036 }
3037 ceri->ceri_path_truncated = (truncated_path ? 1 : 0);
3038 ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec;
3039 ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec;
3040 ceri->ceri_page_modtime_secs = mtime.tv_sec;
3041 ceri->ceri_page_modtime_nsecs = mtime.tv_nsec;
3042 ceri->ceri_object_codesigned = (object->code_signed);
3043 ceri->ceri_page_codesig_validated = VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset);
3044 ceri->ceri_page_codesig_tainted = VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset);
3045 ceri->ceri_page_codesig_nx = VMP_CS_NX(m, fault_page_size, fault_phys_offset);
3046 ceri->ceri_page_wpmapped = (m->vmp_wpmapped);
3047 ceri->ceri_page_slid = 0;
3048 ceri->ceri_page_dirty = (m->vmp_dirty);
3049 ceri->ceri_page_shadow_depth = shadow_depth;
3050 } else {
3051 #if DEBUG || DEVELOPMENT
3052 panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason");
3053 #else
3054 printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n");
3055 #endif /* DEBUG || DEVELOPMENT */
3056 /* Free the buffer */
3057 os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0);
3058 }
3059 }
3060 }
3061
3062 set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE);
3063 }
3064 if (panic_on_cs_killed &&
3065 object->object_is_shared_cache) {
3066 char *tainted_contents;
3067 vm_map_offset_t src_vaddr;
3068 src_vaddr = (vm_map_offset_t) phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m) << PAGE_SHIFT);
3069 tainted_contents = kalloc_data(PAGE_SIZE, Z_WAITOK);
3070 bcopy((const char *)src_vaddr, tainted_contents, PAGE_SIZE);
3071 printf("CODE SIGNING: tainted page %p phys 0x%x phystokv 0x%llx copied to %p\n", m, VM_PAGE_GET_PHYS_PAGE(m), (uint64_t)src_vaddr, tainted_contents);
3072 panic("CODE SIGNING: process %d[%s]: "
3073 "rejecting invalid page (phys#0x%x) at address 0x%llx "
3074 "from offset 0x%llx in file \"%s%s%s\" "
3075 "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
3076 "(signed:%d validated:%d tainted:%d nx:%d"
3077 "wpmapped:%d dirty:%d depth:%d)\n",
3078 pid, procname,
3079 VM_PAGE_GET_PHYS_PAGE(m),
3080 (addr64_t) vaddr,
3081 file_offset,
3082 (pathname ? pathname : "<nil>"),
3083 (truncated_path ? "/.../" : ""),
3084 (truncated_path ? filename : ""),
3085 cs_mtime.tv_sec, cs_mtime.tv_nsec,
3086 ((cs_mtime.tv_sec == mtime.tv_sec &&
3087 cs_mtime.tv_nsec == mtime.tv_nsec)
3088 ? "=="
3089 : "!="),
3090 mtime.tv_sec, mtime.tv_nsec,
3091 object->code_signed,
3092 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
3093 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
3094 VMP_CS_NX(m, fault_page_size, fault_phys_offset),
3095 m->vmp_wpmapped,
3096 m->vmp_dirty,
3097 shadow_depth);
3098 }
3099
3100 if (file_object != object) {
3101 vm_object_unlock(file_object);
3102 }
3103 if (pathname_len != 0) {
3104 kfree_data(pathname, __PATH_MAX * 2);
3105 pathname = NULL;
3106 filename = NULL;
3107 }
3108 } else {
3109 /* proceed with the invalid page */
3110 kr = KERN_SUCCESS;
3111 if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
3112 !object->code_signed) {
3113 /*
3114 * This page has not been (fully) validated but
3115 * does not belong to a code-signed object
3116 * so it should not be forcefully considered
3117 * as tainted.
3118 * We're just concerned about it here because
3119 * we've been asked to "execute" it but that
3120 * does not mean that it should cause other
3121 * accesses to fail.
3122 * This happens when a debugger sets a
3123 * breakpoint and we then execute code in
3124 * that page. Marking the page as "tainted"
3125 * would cause any inspection tool ("leaks",
3126 * "vmmap", "CrashReporter", ...) to get killed
3127 * due to code-signing violation on that page,
3128 * even though they're just reading it and not
3129 * executing from it.
3130 */
3131 } else {
3132 /*
3133 * Page might have been tainted before or not;
3134 * now it definitively is. If the page wasn't
3135 * tainted, we must disconnect it from all
3136 * pmaps later, to force existing mappings
3137 * through that code path for re-consideration
3138 * of the validity of that page.
3139 */
3140 if (!VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
3141 *must_disconnect = TRUE;
3142 VMP_CS_SET_TAINTED(m, fault_page_size, fault_phys_offset, TRUE);
3143 }
3144 }
3145 cs_enter_tainted_accepted++;
3146 }
3147 if (kr != KERN_SUCCESS) {
3148 if (cs_debug) {
3149 printf("CODESIGNING: vm_fault_enter(0x%llx): "
3150 "*** INVALID PAGE ***\n",
3151 (long long)vaddr);
3152 }
3153 #if !SECURE_KERNEL
3154 if (cs_enforcement_panic) {
3155 panic("CODESIGNING: panicking on invalid page");
3156 }
3157 #endif
3158 }
3159 return kr;
3160 }
3161
3162 /*
3163 * Check that the code signature is valid for the given page being inserted into
3164 * the pmap.
3165 *
3166 * @param must_disconnect This value will be set to true if the caller must disconnect
3167 * this page.
3168 * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
3169 */
3170 static kern_return_t
vm_fault_validate_cs(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool * must_disconnect)3171 vm_fault_validate_cs(
3172 bool cs_bypass,
3173 vm_object_t object,
3174 vm_page_t m,
3175 pmap_t pmap,
3176 vm_map_offset_t vaddr,
3177 vm_prot_t prot,
3178 vm_prot_t caller_prot,
3179 vm_map_size_t fault_page_size,
3180 vm_map_offset_t fault_phys_offset,
3181 vm_object_fault_info_t fault_info,
3182 bool *must_disconnect)
3183 {
3184 bool map_is_switched, map_is_switch_protected, cs_violation;
3185 kern_return_t kr;
3186 /* Validate code signature if necessary. */
3187 map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) &&
3188 (pmap == vm_map_pmap(current_thread()->map)));
3189 map_is_switch_protected = current_thread()->map->switch_protect;
3190 kr = vm_fault_cs_check_violation(cs_bypass, object, m, pmap,
3191 prot, caller_prot, fault_page_size, fault_phys_offset, fault_info,
3192 map_is_switched, map_is_switch_protected, &cs_violation);
3193 if (kr != KERN_SUCCESS) {
3194 return kr;
3195 }
3196 if (cs_violation) {
3197 kr = vm_fault_cs_handle_violation(object, m, pmap, prot, vaddr,
3198 fault_page_size, fault_phys_offset,
3199 map_is_switched, map_is_switch_protected, must_disconnect);
3200 }
3201 return kr;
3202 }
3203
3204 /*
3205 * Enqueue the page on the appropriate paging queue.
3206 */
3207 static void
vm_fault_enqueue_page(vm_object_t object,vm_page_t m,bool wired,bool change_wiring,vm_tag_t wire_tag,bool no_cache,int * type_of_fault,kern_return_t kr)3208 vm_fault_enqueue_page(
3209 vm_object_t object,
3210 vm_page_t m,
3211 bool wired,
3212 bool change_wiring,
3213 vm_tag_t wire_tag,
3214 bool no_cache,
3215 int *type_of_fault,
3216 kern_return_t kr)
3217 {
3218 assert((m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object);
3219 boolean_t page_queues_locked = FALSE;
3220 boolean_t previously_pmapped = m->vmp_pmapped;
3221 #define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \
3222 MACRO_BEGIN \
3223 if (! page_queues_locked) { \
3224 page_queues_locked = TRUE; \
3225 vm_page_lockspin_queues(); \
3226 } \
3227 MACRO_END
3228 #define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \
3229 MACRO_BEGIN \
3230 if (page_queues_locked) { \
3231 page_queues_locked = FALSE; \
3232 vm_page_unlock_queues(); \
3233 } \
3234 MACRO_END
3235
3236 vm_page_update_special_state(m);
3237 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
3238 /*
3239 * Compressor pages are neither wired
3240 * nor pageable and should never change.
3241 */
3242 assert(object == compressor_object);
3243 } else if (change_wiring) {
3244 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3245
3246 if (wired) {
3247 if (kr == KERN_SUCCESS) {
3248 vm_page_wire(m, wire_tag, TRUE);
3249 }
3250 } else {
3251 vm_page_unwire(m, TRUE);
3252 }
3253 /* we keep the page queues lock, if we need it later */
3254 } else {
3255 if (object->internal == TRUE) {
3256 /*
3257 * don't allow anonymous pages on
3258 * the speculative queues
3259 */
3260 no_cache = FALSE;
3261 }
3262 if (kr != KERN_SUCCESS) {
3263 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3264 vm_page_deactivate(m);
3265 /* we keep the page queues lock, if we need it later */
3266 } else if (((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
3267 (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3268 (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
3269 ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) &&
3270 !VM_PAGE_WIRED(m)) {
3271 if (vm_page_local_q &&
3272 (*type_of_fault == DBG_COW_FAULT ||
3273 *type_of_fault == DBG_ZERO_FILL_FAULT)) {
3274 struct vpl *lq;
3275 uint32_t lid;
3276
3277 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
3278
3279 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3280 vm_object_lock_assert_exclusive(object);
3281
3282 /*
3283 * we got a local queue to stuff this
3284 * new page on...
3285 * its safe to manipulate local and
3286 * local_id at this point since we're
3287 * behind an exclusive object lock and
3288 * the page is not on any global queue.
3289 *
3290 * we'll use the current cpu number to
3291 * select the queue note that we don't
3292 * need to disable preemption... we're
3293 * going to be behind the local queue's
3294 * lock to do the real work
3295 */
3296 lid = cpu_number();
3297
3298 lq = zpercpu_get_cpu(vm_page_local_q, lid);
3299
3300 VPL_LOCK(&lq->vpl_lock);
3301
3302 vm_page_check_pageable_safe(m);
3303 vm_page_queue_enter(&lq->vpl_queue, m, vmp_pageq);
3304 m->vmp_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q;
3305 m->vmp_local_id = lid;
3306 lq->vpl_count++;
3307
3308 if (object->internal) {
3309 lq->vpl_internal_count++;
3310 } else {
3311 lq->vpl_external_count++;
3312 }
3313
3314 VPL_UNLOCK(&lq->vpl_lock);
3315
3316 if (lq->vpl_count > vm_page_local_q_soft_limit) {
3317 /*
3318 * we're beyond the soft limit
3319 * for the local queue
3320 * vm_page_reactivate_local will
3321 * 'try' to take the global page
3322 * queue lock... if it can't
3323 * that's ok... we'll let the
3324 * queue continue to grow up
3325 * to the hard limit... at that
3326 * point we'll wait for the
3327 * lock... once we've got the
3328 * lock, we'll transfer all of
3329 * the pages from the local
3330 * queue to the global active
3331 * queue
3332 */
3333 vm_page_reactivate_local(lid, FALSE, FALSE);
3334 }
3335 } else {
3336 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3337
3338 /*
3339 * test again now that we hold the
3340 * page queue lock
3341 */
3342 if (!VM_PAGE_WIRED(m)) {
3343 if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3344 vm_page_queues_remove(m, FALSE);
3345
3346 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3347 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_fault_reactivated, 1);
3348 }
3349
3350 if (!VM_PAGE_ACTIVE_OR_INACTIVE(m) ||
3351 no_cache) {
3352 /*
3353 * If this is a no_cache mapping
3354 * and the page has never been
3355 * mapped before or was
3356 * previously a no_cache page,
3357 * then we want to leave pages
3358 * in the speculative state so
3359 * that they can be readily
3360 * recycled if free memory runs
3361 * low. Otherwise the page is
3362 * activated as normal.
3363 */
3364
3365 if (no_cache &&
3366 (!previously_pmapped ||
3367 m->vmp_no_cache)) {
3368 m->vmp_no_cache = TRUE;
3369
3370 if (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
3371 vm_page_speculate(m, FALSE);
3372 }
3373 } else if (!VM_PAGE_ACTIVE_OR_INACTIVE(m)) {
3374 vm_page_activate(m);
3375 }
3376 }
3377 }
3378 /* we keep the page queues lock, if we need it later */
3379 }
3380 }
3381 }
3382 /* we're done with the page queues lock, if we ever took it */
3383 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3384 }
3385
3386 /*
3387 * Sets the pmmpped, xpmapped, and wpmapped bits on the vm_page_t and updates accounting.
3388 * @return true if the page needs to be sync'ed via pmap_sync-page_data_physo
3389 * before being inserted into the pmap.
3390 */
3391 static bool
vm_fault_enter_set_mapped(vm_object_t object,vm_page_t m,vm_prot_t prot,vm_prot_t fault_type)3392 vm_fault_enter_set_mapped(
3393 vm_object_t object,
3394 vm_page_t m,
3395 vm_prot_t prot,
3396 vm_prot_t fault_type)
3397 {
3398 bool page_needs_sync = false;
3399 /*
3400 * NOTE: we may only hold the vm_object lock SHARED
3401 * at this point, so we need the phys_page lock to
3402 * properly serialize updating the pmapped and
3403 * xpmapped bits
3404 */
3405 if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) {
3406 ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3407
3408 pmap_lock_phys_page(phys_page);
3409 m->vmp_pmapped = TRUE;
3410
3411 if (!m->vmp_xpmapped) {
3412 m->vmp_xpmapped = TRUE;
3413
3414 pmap_unlock_phys_page(phys_page);
3415
3416 if (!object->internal) {
3417 OSAddAtomic(1, &vm_page_xpmapped_external_count);
3418 }
3419
3420 #if defined(__arm64__)
3421 page_needs_sync = true;
3422 #else
3423 if (object->internal &&
3424 object->pager != NULL) {
3425 /*
3426 * This page could have been
3427 * uncompressed by the
3428 * compressor pager and its
3429 * contents might be only in
3430 * the data cache.
3431 * Since it's being mapped for
3432 * "execute" for the fist time,
3433 * make sure the icache is in
3434 * sync.
3435 */
3436 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
3437 page_needs_sync = true;
3438 }
3439 #endif
3440 } else {
3441 pmap_unlock_phys_page(phys_page);
3442 }
3443 } else {
3444 if (m->vmp_pmapped == FALSE) {
3445 ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3446
3447 pmap_lock_phys_page(phys_page);
3448 m->vmp_pmapped = TRUE;
3449 pmap_unlock_phys_page(phys_page);
3450 }
3451 }
3452
3453 if (fault_type & VM_PROT_WRITE) {
3454 if (m->vmp_wpmapped == FALSE) {
3455 vm_object_lock_assert_exclusive(object);
3456 if (!object->internal && object->pager) {
3457 task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
3458 }
3459 m->vmp_wpmapped = TRUE;
3460 }
3461 }
3462 return page_needs_sync;
3463 }
3464
3465 /*
3466 * wrappers for pmap_enter_options()
3467 */
3468 kern_return_t
pmap_enter_object_options_check(pmap_t pmap,vm_map_address_t virtual_address,vm_map_offset_t fault_phys_offset,vm_object_t obj,ppnum_t pn,vm_prot_t protection,vm_prot_t fault_type,boolean_t wired,unsigned int options)3469 pmap_enter_object_options_check(
3470 pmap_t pmap,
3471 vm_map_address_t virtual_address,
3472 vm_map_offset_t fault_phys_offset,
3473 vm_object_t obj,
3474 ppnum_t pn,
3475 vm_prot_t protection,
3476 vm_prot_t fault_type,
3477 boolean_t wired,
3478 unsigned int options)
3479 {
3480 unsigned int flags = 0;
3481 unsigned int extra_options = 0;
3482
3483 if (obj->internal) {
3484 extra_options |= PMAP_OPTIONS_INTERNAL;
3485 }
3486 pmap_paddr_t physical_address = (pmap_paddr_t)ptoa(pn) + fault_phys_offset;
3487
3488
3489 return pmap_enter_options_addr(pmap,
3490 virtual_address,
3491 physical_address,
3492 protection,
3493 fault_type,
3494 flags,
3495 wired,
3496 options | extra_options,
3497 NULL,
3498 PMAP_MAPPING_TYPE_INFER);
3499 }
3500
3501 kern_return_t
pmap_enter_options_check(pmap_t pmap,vm_map_address_t virtual_address,vm_map_offset_t fault_phys_offset,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,boolean_t wired,unsigned int options)3502 pmap_enter_options_check(
3503 pmap_t pmap,
3504 vm_map_address_t virtual_address,
3505 vm_map_offset_t fault_phys_offset,
3506 vm_page_t page,
3507 vm_prot_t protection,
3508 vm_prot_t fault_type,
3509 boolean_t wired,
3510 unsigned int options)
3511 {
3512 if (page->vmp_error) {
3513 return KERN_MEMORY_FAILURE;
3514 }
3515 vm_object_t obj = VM_PAGE_OBJECT(page);
3516 if (page->vmp_reusable || obj->all_reusable) {
3517 options |= PMAP_OPTIONS_REUSABLE;
3518 }
3519 return pmap_enter_object_options_check(
3520 pmap,
3521 virtual_address,
3522 fault_phys_offset,
3523 obj,
3524 VM_PAGE_GET_PHYS_PAGE(page),
3525 protection,
3526 fault_type,
3527 wired,
3528 options);
3529 }
3530
3531 kern_return_t
pmap_enter_check(pmap_t pmap,vm_map_address_t virtual_address,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,boolean_t wired)3532 pmap_enter_check(
3533 pmap_t pmap,
3534 vm_map_address_t virtual_address,
3535 vm_page_t page,
3536 vm_prot_t protection,
3537 vm_prot_t fault_type,
3538 boolean_t wired)
3539 {
3540 return pmap_enter_options_check(pmap,
3541 virtual_address,
3542 0 /* fault_phys_offset */,
3543 page,
3544 protection,
3545 fault_type,
3546 wired,
3547 0 /* options */);
3548 }
3549
3550 /*
3551 * Try to enter the given page into the pmap.
3552 * Will retry without execute permission if the code signing monitor is enabled and
3553 * we encounter a codesigning failure on a non-execute fault.
3554 */
3555 static kern_return_t
vm_fault_attempt_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options)3556 vm_fault_attempt_pmap_enter(
3557 pmap_t pmap,
3558 vm_map_offset_t vaddr,
3559 vm_map_size_t fault_page_size,
3560 vm_map_offset_t fault_phys_offset,
3561 vm_page_t m,
3562 vm_prot_t *prot,
3563 vm_prot_t caller_prot,
3564 vm_prot_t fault_type,
3565 bool wired,
3566 int pmap_options)
3567 {
3568 #if !CODE_SIGNING_MONITOR
3569 #pragma unused(caller_prot)
3570 #endif /* !CODE_SIGNING_MONITOR */
3571
3572 kern_return_t kr;
3573 if (fault_page_size != PAGE_SIZE) {
3574 DEBUG4K_FAULT("pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x fault_type 0x%x\n", pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, *prot, fault_type);
3575 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
3576 fault_phys_offset < PAGE_SIZE),
3577 "0x%llx\n", (uint64_t)fault_phys_offset);
3578 } else {
3579 assertf(fault_phys_offset == 0,
3580 "0x%llx\n", (uint64_t)fault_phys_offset);
3581 }
3582
3583 kr = pmap_enter_options_check(pmap, vaddr,
3584 fault_phys_offset,
3585 m, *prot, fault_type,
3586 wired, pmap_options);
3587
3588 #if CODE_SIGNING_MONITOR
3589 /*
3590 * Retry without execute permission if we encountered a codesigning
3591 * failure on a non-execute fault. This allows applications which
3592 * don't actually need to execute code to still map it for read access.
3593 */
3594 if (kr == KERN_CODESIGN_ERROR &&
3595 csm_enabled() &&
3596 (*prot & VM_PROT_EXECUTE) &&
3597 !(caller_prot & VM_PROT_EXECUTE)) {
3598 *prot &= ~VM_PROT_EXECUTE;
3599 kr = pmap_enter_options_check(pmap, vaddr,
3600 fault_phys_offset,
3601 m, *prot, fault_type,
3602 wired, pmap_options);
3603 }
3604 #endif /* CODE_SIGNING_MONITOR */
3605
3606 return kr;
3607 }
3608
3609 /*
3610 * Enter the given page into the pmap.
3611 * The map must be locked shared.
3612 * The vm object must NOT be locked.
3613 *
3614 * @param need_retry if not null, avoid making a (potentially) blocking call into
3615 * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3616 */
3617 static kern_return_t
vm_fault_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry)3618 vm_fault_pmap_enter(
3619 pmap_t pmap,
3620 vm_map_offset_t vaddr,
3621 vm_map_size_t fault_page_size,
3622 vm_map_offset_t fault_phys_offset,
3623 vm_page_t m,
3624 vm_prot_t *prot,
3625 vm_prot_t caller_prot,
3626 vm_prot_t fault_type,
3627 bool wired,
3628 int pmap_options,
3629 boolean_t *need_retry)
3630 {
3631 kern_return_t kr;
3632 if (need_retry != NULL) {
3633 /*
3634 * Although we don't hold a lock on this object, we hold a lock
3635 * on the top object in the chain. To prevent a deadlock, we
3636 * can't allow the pmap layer to block.
3637 */
3638 pmap_options |= PMAP_OPTIONS_NOWAIT;
3639 }
3640 kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3641 fault_page_size, fault_phys_offset,
3642 m, prot, caller_prot, fault_type, wired, pmap_options);
3643 if (kr == KERN_RESOURCE_SHORTAGE) {
3644 if (need_retry) {
3645 /*
3646 * There's nothing we can do here since we hold the
3647 * lock on the top object in the chain. The caller
3648 * will need to deal with this by dropping that lock and retrying.
3649 */
3650 *need_retry = TRUE;
3651 vm_pmap_enter_retried++;
3652 }
3653 }
3654 return kr;
3655 }
3656
3657 /*
3658 * Enter the given page into the pmap.
3659 * The vm map must be locked shared.
3660 * The vm object must be locked exclusive, unless this is a soft fault.
3661 * For a soft fault, the object must be locked shared or exclusive.
3662 *
3663 * @param need_retry if not null, avoid making a (potentially) blocking call into
3664 * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3665 */
3666 static kern_return_t
vm_fault_pmap_enter_with_object_lock(vm_object_t object,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry,uint8_t * object_lock_type)3667 vm_fault_pmap_enter_with_object_lock(
3668 vm_object_t object,
3669 pmap_t pmap,
3670 vm_map_offset_t vaddr,
3671 vm_map_size_t fault_page_size,
3672 vm_map_offset_t fault_phys_offset,
3673 vm_page_t m,
3674 vm_prot_t *prot,
3675 vm_prot_t caller_prot,
3676 vm_prot_t fault_type,
3677 bool wired,
3678 int pmap_options,
3679 boolean_t *need_retry,
3680 uint8_t *object_lock_type)
3681 {
3682 kern_return_t kr;
3683 /*
3684 * Prevent a deadlock by not
3685 * holding the object lock if we need to wait for a page in
3686 * pmap_enter() - <rdar://problem/7138958>
3687 */
3688 kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3689 fault_page_size, fault_phys_offset,
3690 m, prot, caller_prot, fault_type, wired, pmap_options | PMAP_OPTIONS_NOWAIT);
3691 #if __x86_64__
3692 if (kr == KERN_INVALID_ARGUMENT &&
3693 pmap == PMAP_NULL &&
3694 wired) {
3695 /*
3696 * Wiring a page in a pmap-less VM map:
3697 * VMware's "vmmon" kernel extension does this
3698 * to grab pages.
3699 * Let it proceed even though the PMAP_ENTER() failed.
3700 */
3701 kr = KERN_SUCCESS;
3702 }
3703 #endif /* __x86_64__ */
3704
3705 if (kr == KERN_RESOURCE_SHORTAGE) {
3706 if (need_retry) {
3707 /*
3708 * this will be non-null in the case where we hold the lock
3709 * on the top-object in this chain... we can't just drop
3710 * the lock on the object we're inserting the page into
3711 * and recall the PMAP_ENTER since we can still cause
3712 * a deadlock if one of the critical paths tries to
3713 * acquire the lock on the top-object and we're blocked
3714 * in PMAP_ENTER waiting for memory... our only recourse
3715 * is to deal with it at a higher level where we can
3716 * drop both locks.
3717 */
3718 *need_retry = TRUE;
3719 vm_pmap_enter_retried++;
3720 goto done;
3721 }
3722 /*
3723 * The nonblocking version of pmap_enter did not succeed.
3724 * and we don't need to drop other locks and retry
3725 * at the level above us, so
3726 * use the blocking version instead. Requires marking
3727 * the page busy and unlocking the object
3728 */
3729 boolean_t was_busy = m->vmp_busy;
3730
3731 vm_object_lock_assert_exclusive(object);
3732
3733 m->vmp_busy = TRUE;
3734 vm_object_unlock(object);
3735
3736 kr = pmap_enter_options_check(pmap, vaddr,
3737 fault_phys_offset,
3738 m, *prot, fault_type,
3739 wired, pmap_options);
3740
3741 assert(VM_PAGE_OBJECT(m) == object);
3742
3743 /* Take the object lock again. */
3744 vm_object_lock(object);
3745
3746 /* If the page was busy, someone else will wake it up.
3747 * Otherwise, we have to do it now. */
3748 assert(m->vmp_busy);
3749 if (!was_busy) {
3750 vm_page_wakeup_done(object, m);
3751 }
3752 vm_pmap_enter_blocked++;
3753 }
3754
3755 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
3756 if ((*prot & VM_PROT_WRITE) && m->vmp_unmodified_ro) {
3757 if (*object_lock_type == OBJECT_LOCK_SHARED) {
3758 boolean_t was_busy = m->vmp_busy;
3759 m->vmp_busy = TRUE;
3760
3761 *object_lock_type = OBJECT_LOCK_EXCLUSIVE;
3762
3763 if (vm_object_lock_upgrade(object) == FALSE) {
3764 vm_object_lock(object);
3765 }
3766
3767 if (!was_busy) {
3768 vm_page_wakeup_done(object, m);
3769 }
3770 }
3771 vm_object_lock_assert_exclusive(object);
3772 vm_page_lockspin_queues();
3773 m->vmp_unmodified_ro = false;
3774 vm_page_unlock_queues();
3775 os_atomic_dec(&compressor_ro_uncompressed, relaxed);
3776
3777 vm_object_compressor_pager_state_clr(VM_PAGE_OBJECT(m), m->vmp_offset);
3778 }
3779 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
3780 #pragma unused(object_lock_type)
3781 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
3782
3783 done:
3784 return kr;
3785 }
3786
3787 /*
3788 * Prepare to enter a page into the pmap by checking CS, protection bits,
3789 * and setting mapped bits on the page_t.
3790 * Does not modify the page's paging queue.
3791 *
3792 * page queue lock must NOT be held
3793 * m->vmp_object must be locked
3794 *
3795 * NOTE: m->vmp_object could be locked "shared" only if we are called
3796 * from vm_fault() as part of a soft fault.
3797 */
3798 static kern_return_t
vm_fault_enter_prepare(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t * prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t fault_type,vm_object_fault_info_t fault_info,int * type_of_fault,bool * page_needs_data_sync)3799 vm_fault_enter_prepare(
3800 vm_page_t m,
3801 pmap_t pmap,
3802 vm_map_offset_t vaddr,
3803 vm_prot_t *prot,
3804 vm_prot_t caller_prot,
3805 vm_map_size_t fault_page_size,
3806 vm_map_offset_t fault_phys_offset,
3807 vm_prot_t fault_type,
3808 vm_object_fault_info_t fault_info,
3809 int *type_of_fault,
3810 bool *page_needs_data_sync)
3811 {
3812 kern_return_t kr;
3813 bool is_tainted = false;
3814 vm_object_t object;
3815 boolean_t cs_bypass = fault_info->cs_bypass;
3816
3817 object = VM_PAGE_OBJECT(m);
3818
3819 vm_object_lock_assert_held(object);
3820
3821 #if KASAN
3822 if (pmap == kernel_pmap) {
3823 kasan_notify_address(vaddr, PAGE_SIZE);
3824 }
3825 #endif
3826
3827 #if CODE_SIGNING_MONITOR
3828 if (csm_address_space_exempt(pmap) == KERN_SUCCESS) {
3829 cs_bypass = TRUE;
3830 }
3831 #endif
3832
3833 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3834
3835 if (*type_of_fault == DBG_ZERO_FILL_FAULT) {
3836 vm_object_lock_assert_exclusive(object);
3837 } else if ((fault_type & VM_PROT_WRITE) == 0 &&
3838 !fault_info->fi_change_wiring &&
3839 (!m->vmp_wpmapped
3840 #if VM_OBJECT_ACCESS_TRACKING
3841 || object->access_tracking
3842 #endif /* VM_OBJECT_ACCESS_TRACKING */
3843 )) {
3844 /*
3845 * This is not a "write" fault, so we
3846 * might not have taken the object lock
3847 * exclusively and we might not be able
3848 * to update the "wpmapped" bit in
3849 * vm_fault_enter().
3850 * Let's just grant read access to
3851 * the page for now and we'll
3852 * soft-fault again if we need write
3853 * access later...
3854 */
3855
3856 /* This had better not be a JIT page. */
3857 if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) {
3858 /*
3859 * This pmap enforces extra constraints for this set of
3860 * protections, so we can't modify them.
3861 */
3862 if (!cs_bypass) {
3863 panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x !cs_bypass",
3864 __FUNCTION__, pmap, (uint64_t)vaddr,
3865 *prot, fault_info->pmap_options);
3866 }
3867 } else {
3868 *prot &= ~VM_PROT_WRITE;
3869 }
3870 }
3871 if (m->vmp_pmapped == FALSE) {
3872 if (m->vmp_clustered) {
3873 if (*type_of_fault == DBG_CACHE_HIT_FAULT) {
3874 /*
3875 * found it in the cache, but this
3876 * is the first fault-in of the page (m->vmp_pmapped == FALSE)
3877 * so it must have come in as part of
3878 * a cluster... account 1 pagein against it
3879 */
3880 if (object->internal) {
3881 *type_of_fault = DBG_PAGEIND_FAULT;
3882 } else {
3883 *type_of_fault = DBG_PAGEINV_FAULT;
3884 }
3885
3886 VM_PAGE_COUNT_AS_PAGEIN(m);
3887 }
3888 VM_PAGE_CONSUME_CLUSTERED(m);
3889 }
3890 }
3891
3892 if (*type_of_fault != DBG_COW_FAULT) {
3893 DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL);
3894
3895 if (pmap == kernel_pmap) {
3896 DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL);
3897 }
3898 }
3899
3900 kr = vm_fault_validate_cs(cs_bypass, object, m, pmap, vaddr,
3901 *prot, caller_prot, fault_page_size, fault_phys_offset,
3902 fault_info, &is_tainted);
3903 if (kr == KERN_SUCCESS) {
3904 /*
3905 * We either have a good page, or a tainted page that has been accepted by the process.
3906 * In both cases the page will be entered into the pmap.
3907 */
3908 *page_needs_data_sync = vm_fault_enter_set_mapped(object, m, *prot, fault_type);
3909 if ((fault_type & VM_PROT_WRITE) && is_tainted) {
3910 /*
3911 * This page is tainted but we're inserting it anyways.
3912 * Since it's writeable, we need to disconnect it from other pmaps
3913 * now so those processes can take note.
3914 */
3915
3916 /*
3917 * We can only get here
3918 * because of the CSE logic
3919 */
3920 assert(pmap_get_vm_map_cs_enforced(pmap));
3921 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
3922 /*
3923 * If we are faulting for a write, we can clear
3924 * the execute bit - that will ensure the page is
3925 * checked again before being executable, which
3926 * protects against a map switch.
3927 * This only happens the first time the page
3928 * gets tainted, so we won't get stuck here
3929 * to make an already writeable page executable.
3930 */
3931 if (!cs_bypass) {
3932 if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) {
3933 /*
3934 * This pmap enforces extra constraints
3935 * for this set of protections, so we
3936 * can't change the protections.
3937 */
3938 panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
3939 __FUNCTION__, pmap,
3940 (uint64_t)vaddr, *prot,
3941 fault_info->pmap_options);
3942 }
3943 *prot &= ~VM_PROT_EXECUTE;
3944 }
3945 }
3946 assert(VM_PAGE_OBJECT(m) == object);
3947
3948 #if VM_OBJECT_ACCESS_TRACKING
3949 if (object->access_tracking) {
3950 DTRACE_VM2(access_tracking, vm_map_offset_t, vaddr, int, fault_type);
3951 if (fault_type & VM_PROT_WRITE) {
3952 object->access_tracking_writes++;
3953 vm_object_access_tracking_writes++;
3954 } else {
3955 object->access_tracking_reads++;
3956 vm_object_access_tracking_reads++;
3957 }
3958 }
3959 #endif /* VM_OBJECT_ACCESS_TRACKING */
3960 }
3961
3962 return kr;
3963 }
3964
3965 /*
3966 * page queue lock must NOT be held
3967 * m->vmp_object must be locked
3968 *
3969 * NOTE: m->vmp_object could be locked "shared" only if we are called
3970 * from vm_fault() as part of a soft fault. If so, we must be
3971 * careful not to modify the VM object in any way that is not
3972 * legal under a shared lock...
3973 */
3974 kern_return_t
vm_fault_enter(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot,vm_prot_t caller_prot,boolean_t wired,vm_tag_t wire_tag,vm_object_fault_info_t fault_info,boolean_t * need_retry,int * type_of_fault,uint8_t * object_lock_type)3975 vm_fault_enter(
3976 vm_page_t m,
3977 pmap_t pmap,
3978 vm_map_offset_t vaddr,
3979 vm_map_size_t fault_page_size,
3980 vm_map_offset_t fault_phys_offset,
3981 vm_prot_t prot,
3982 vm_prot_t caller_prot,
3983 boolean_t wired,
3984 vm_tag_t wire_tag,
3985 vm_object_fault_info_t fault_info,
3986 boolean_t *need_retry,
3987 int *type_of_fault,
3988 uint8_t *object_lock_type)
3989 {
3990 kern_return_t kr;
3991 vm_object_t object;
3992 bool page_needs_data_sync;
3993 vm_prot_t fault_type;
3994 int pmap_options = fault_info->pmap_options;
3995
3996 if (vm_page_is_guard(m)) {
3997 return KERN_SUCCESS;
3998 }
3999
4000 fault_type = fault_info->fi_change_wiring ? VM_PROT_NONE : caller_prot;
4001
4002 assertf(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL, "m=%p", m);
4003 kr = vm_fault_enter_prepare(m, pmap, vaddr, &prot, caller_prot,
4004 fault_page_size, fault_phys_offset, fault_type,
4005 fault_info, type_of_fault, &page_needs_data_sync);
4006 object = VM_PAGE_OBJECT(m);
4007
4008 vm_fault_enqueue_page(object, m, wired, fault_info->fi_change_wiring, wire_tag, fault_info->no_cache, type_of_fault, kr);
4009
4010 if (kr == KERN_SUCCESS) {
4011 if (page_needs_data_sync) {
4012 pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
4013 }
4014
4015 if (fault_info->fi_xnu_user_debug && !object->code_signed) {
4016 pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
4017 }
4018
4019
4020 kr = vm_fault_pmap_enter_with_object_lock(object, pmap, vaddr,
4021 fault_page_size, fault_phys_offset, m,
4022 &prot, caller_prot, fault_type, wired, pmap_options, need_retry, object_lock_type);
4023 }
4024
4025 return kr;
4026 }
4027
4028 kern_return_t
vm_pre_fault_with_info(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t prot,vm_object_fault_info_t fault_info)4029 vm_pre_fault_with_info(
4030 vm_map_t map,
4031 vm_map_offset_t vaddr,
4032 vm_prot_t prot,
4033 vm_object_fault_info_t fault_info)
4034 {
4035 assert(fault_info != NULL);
4036 if (pmap_find_phys(map->pmap, vaddr) == 0) {
4037 return vm_fault_internal(map,
4038 vaddr, /* vaddr */
4039 prot, /* fault_type */
4040 VM_KERN_MEMORY_NONE, /* tag - not wiring */
4041 NULL, /* caller_pmap */
4042 0, /* caller_pmap_addr */
4043 NULL,
4044 fault_info);
4045 }
4046 return KERN_SUCCESS;
4047 }
4048
4049 /*
4050 * Fault on the given vaddr iff the page is not already entered in the pmap.
4051 */
4052 kern_return_t
vm_pre_fault(vm_map_offset_t vaddr,vm_prot_t prot)4053 vm_pre_fault(vm_map_offset_t vaddr, vm_prot_t prot)
4054 {
4055 struct vm_object_fault_info fault_info = {
4056 .interruptible = THREAD_UNINT,
4057 };
4058 return vm_pre_fault_with_info(current_map(), vaddr, prot, &fault_info);
4059 }
4060
4061 /*
4062 * Routine: vm_fault
4063 * Purpose:
4064 * Handle page faults, including pseudo-faults
4065 * used to change the wiring status of pages.
4066 * Returns:
4067 * Explicit continuations have been removed.
4068 * Implementation:
4069 * vm_fault and vm_fault_page save mucho state
4070 * in the moral equivalent of a closure. The state
4071 * structure is allocated when first entering vm_fault
4072 * and deallocated when leaving vm_fault.
4073 */
4074
4075 extern uint64_t get_current_unique_pid(void);
4076
4077 unsigned long vm_fault_collapse_total = 0;
4078 unsigned long vm_fault_collapse_skipped = 0;
4079
4080
4081 kern_return_t
vm_fault_external(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)4082 vm_fault_external(
4083 vm_map_t map,
4084 vm_map_offset_t vaddr,
4085 vm_prot_t fault_type,
4086 boolean_t change_wiring,
4087 int interruptible,
4088 pmap_t caller_pmap,
4089 vm_map_offset_t caller_pmap_addr)
4090 {
4091 struct vm_object_fault_info fault_info = {
4092 .interruptible = interruptible,
4093 .fi_change_wiring = change_wiring,
4094 };
4095
4096 return vm_fault_internal(map, vaddr, fault_type,
4097 change_wiring ? vm_tag_bt() : VM_KERN_MEMORY_NONE,
4098 caller_pmap, caller_pmap_addr,
4099 NULL, &fault_info);
4100 }
4101
4102 kern_return_t
vm_fault(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,vm_tag_t wire_tag,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)4103 vm_fault(
4104 vm_map_t map,
4105 vm_map_offset_t vaddr,
4106 vm_prot_t fault_type,
4107 boolean_t change_wiring,
4108 vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
4109 int interruptible,
4110 pmap_t caller_pmap,
4111 vm_map_offset_t caller_pmap_addr)
4112 {
4113 struct vm_object_fault_info fault_info = {
4114 .interruptible = interruptible,
4115 .fi_change_wiring = change_wiring,
4116 };
4117
4118 return vm_fault_internal(map, vaddr, fault_type, wire_tag,
4119 caller_pmap, caller_pmap_addr,
4120 NULL, &fault_info);
4121 }
4122
4123 static boolean_t
current_proc_is_privileged(void)4124 current_proc_is_privileged(void)
4125 {
4126 return csproc_get_platform_binary(current_proc());
4127 }
4128
4129 uint64_t vm_copied_on_read = 0;
4130
4131 /*
4132 * Cleanup after a vm_fault_enter.
4133 * At this point, the fault should either have failed (kr != KERN_SUCCESS)
4134 * or the page should be in the pmap and on the correct paging queue.
4135 *
4136 * Precondition:
4137 * map must be locked shared.
4138 * m_object must be locked.
4139 * If top_object != VM_OBJECT_NULL, it must be locked.
4140 * real_map must be locked.
4141 *
4142 * Postcondition:
4143 * map will be unlocked
4144 * m_object will be unlocked
4145 * top_object will be unlocked
4146 * If real_map != map, it will be unlocked
4147 */
4148 static void
vm_fault_complete(vm_map_t map,vm_map_t real_map,vm_object_t object,vm_object_t m_object,vm_page_t m,vm_map_offset_t offset,vm_map_offset_t trace_real_vaddr,vm_object_fault_info_t fault_info,vm_prot_t caller_prot,vm_map_offset_t real_vaddr,int type_of_fault,boolean_t need_retry,kern_return_t kr,ppnum_t * physpage_p,vm_prot_t prot,vm_object_t top_object,boolean_t need_collapse,vm_map_offset_t cur_offset,vm_prot_t fault_type,vm_object_t * written_on_object,memory_object_t * written_on_pager,vm_object_offset_t * written_on_offset)4149 vm_fault_complete(
4150 vm_map_t map,
4151 vm_map_t real_map,
4152 vm_object_t object,
4153 vm_object_t m_object,
4154 vm_page_t m,
4155 vm_map_offset_t offset,
4156 vm_map_offset_t trace_real_vaddr,
4157 vm_object_fault_info_t fault_info,
4158 vm_prot_t caller_prot,
4159 #if CONFIG_DTRACE
4160 vm_map_offset_t real_vaddr,
4161 #else
4162 __unused vm_map_offset_t real_vaddr,
4163 #endif /* CONFIG_DTRACE */
4164 int type_of_fault,
4165 boolean_t need_retry,
4166 kern_return_t kr,
4167 ppnum_t *physpage_p,
4168 vm_prot_t prot,
4169 vm_object_t top_object,
4170 boolean_t need_collapse,
4171 vm_map_offset_t cur_offset,
4172 vm_prot_t fault_type,
4173 vm_object_t *written_on_object,
4174 memory_object_t *written_on_pager,
4175 vm_object_offset_t *written_on_offset)
4176 {
4177 int event_code = 0;
4178 vm_map_lock_assert_shared(map);
4179 vm_object_lock_assert_held(m_object);
4180 if (top_object != VM_OBJECT_NULL) {
4181 vm_object_lock_assert_held(top_object);
4182 }
4183 vm_map_lock_assert_held(real_map);
4184
4185 if (m_object->internal) {
4186 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
4187 } else if (m_object->object_is_shared_cache) {
4188 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
4189 } else {
4190 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
4191 }
4192 KDBG_RELEASE(event_code | DBG_FUNC_NONE, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid());
4193 if (need_retry == FALSE) {
4194 KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_FAST), get_current_unique_pid());
4195 }
4196 DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag);
4197 if (kr == KERN_SUCCESS &&
4198 physpage_p != NULL) {
4199 /* for vm_map_wire_and_extract() */
4200 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
4201 if (prot & VM_PROT_WRITE) {
4202 vm_object_lock_assert_exclusive(m_object);
4203 m->vmp_dirty = TRUE;
4204 }
4205 }
4206
4207 if (top_object != VM_OBJECT_NULL) {
4208 /*
4209 * It's safe to drop the top object
4210 * now that we've done our
4211 * vm_fault_enter(). Any other fault
4212 * in progress for that virtual
4213 * address will either find our page
4214 * and translation or put in a new page
4215 * and translation.
4216 */
4217 vm_object_unlock(top_object);
4218 top_object = VM_OBJECT_NULL;
4219 }
4220
4221 if (need_collapse == TRUE) {
4222 vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
4223 }
4224
4225 if (need_retry == FALSE &&
4226 (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) {
4227 /*
4228 * evaluate access pattern and update state
4229 * vm_fault_deactivate_behind depends on the
4230 * state being up to date
4231 */
4232 vm_fault_is_sequential(m_object, cur_offset, fault_info->behavior);
4233
4234 vm_fault_deactivate_behind(m_object, cur_offset, fault_info->behavior);
4235 }
4236 /*
4237 * That's it, clean up and return.
4238 */
4239 if (m->vmp_busy) {
4240 vm_object_lock_assert_exclusive(m_object);
4241 vm_page_wakeup_done(m_object, m);
4242 }
4243
4244 if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) {
4245 vm_object_paging_begin(m_object);
4246
4247 assert(*written_on_object == VM_OBJECT_NULL);
4248 *written_on_object = m_object;
4249 *written_on_pager = m_object->pager;
4250 *written_on_offset = m_object->paging_offset + m->vmp_offset;
4251 }
4252 vm_object_unlock(object);
4253
4254 vm_map_unlock_read(map);
4255 if (real_map != map) {
4256 vm_map_unlock(real_map);
4257 }
4258 }
4259
4260 static inline int
vm_fault_type_for_tracing(boolean_t need_copy_on_read,int type_of_fault)4261 vm_fault_type_for_tracing(boolean_t need_copy_on_read, int type_of_fault)
4262 {
4263 if (need_copy_on_read && type_of_fault == DBG_COW_FAULT) {
4264 return DBG_COR_FAULT;
4265 }
4266 return type_of_fault;
4267 }
4268
4269 uint64_t vm_fault_resilient_media_initiate = 0;
4270 uint64_t vm_fault_resilient_media_retry = 0;
4271 uint64_t vm_fault_resilient_media_proceed = 0;
4272 uint64_t vm_fault_resilient_media_release = 0;
4273 uint64_t vm_fault_resilient_media_abort1 = 0;
4274 uint64_t vm_fault_resilient_media_abort2 = 0;
4275
4276 #if MACH_ASSERT
4277 int vm_fault_resilient_media_inject_error1_rate = 0;
4278 int vm_fault_resilient_media_inject_error1 = 0;
4279 int vm_fault_resilient_media_inject_error2_rate = 0;
4280 int vm_fault_resilient_media_inject_error2 = 0;
4281 int vm_fault_resilient_media_inject_error3_rate = 0;
4282 int vm_fault_resilient_media_inject_error3 = 0;
4283 #endif /* MACH_ASSERT */
4284
4285 kern_return_t
vm_fault_internal(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t caller_prot,vm_tag_t wire_tag,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr,ppnum_t * physpage_p,vm_object_fault_info_t fault_info)4286 vm_fault_internal(
4287 vm_map_t map,
4288 vm_map_offset_t vaddr,
4289 vm_prot_t caller_prot,
4290 vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
4291 pmap_t caller_pmap,
4292 vm_map_offset_t caller_pmap_addr,
4293 ppnum_t *physpage_p,
4294 vm_object_fault_info_t fault_info)
4295 {
4296 vm_map_version_t version; /* Map version for verificiation */
4297 boolean_t wired; /* Should mapping be wired down? */
4298 vm_object_t object; /* Top-level object */
4299 vm_object_offset_t offset; /* Top-level offset */
4300 vm_prot_t prot; /* Protection for mapping */
4301 vm_object_t old_copy_object; /* Saved copy object */
4302 uint32_t old_copy_version;
4303 vm_page_t result_page; /* Result of vm_fault_page */
4304 vm_page_t top_page; /* Placeholder page */
4305 kern_return_t kr;
4306
4307 vm_page_t m; /* Fast access to result_page */
4308 kern_return_t error_code;
4309 vm_object_t cur_object;
4310 vm_object_t m_object = NULL;
4311 vm_object_offset_t cur_offset;
4312 vm_page_t cur_m;
4313 vm_object_t new_object;
4314 int type_of_fault;
4315 pmap_t pmap;
4316 wait_interrupt_t interruptible_state;
4317 vm_map_t real_map = map;
4318 vm_map_t original_map = map;
4319 bool object_locks_dropped = FALSE;
4320 vm_prot_t fault_type;
4321 vm_prot_t original_fault_type;
4322 bool need_collapse = FALSE;
4323 boolean_t need_retry = FALSE;
4324 boolean_t *need_retry_ptr = NULL;
4325 uint8_t object_lock_type = 0;
4326 uint8_t cur_object_lock_type;
4327 vm_object_t top_object = VM_OBJECT_NULL;
4328 vm_object_t written_on_object = VM_OBJECT_NULL;
4329 memory_object_t written_on_pager = NULL;
4330 vm_object_offset_t written_on_offset = 0;
4331 int throttle_delay;
4332 int compressed_count_delta;
4333 uint8_t grab_options;
4334 bool need_copy;
4335 bool need_copy_on_read;
4336 vm_map_offset_t trace_vaddr;
4337 vm_map_offset_t trace_real_vaddr;
4338 vm_map_size_t fault_page_size;
4339 vm_map_size_t fault_page_mask;
4340 int fault_page_shift;
4341 vm_map_offset_t fault_phys_offset;
4342 vm_map_offset_t real_vaddr;
4343 bool resilient_media_retry = false;
4344 bool resilient_media_ref_transfer = false;
4345 vm_object_t resilient_media_object = VM_OBJECT_NULL;
4346 vm_object_offset_t resilient_media_offset = (vm_object_offset_t)-1;
4347 bool page_needs_data_sync = false;
4348 /*
4349 * Was the VM object contended when vm_map_lookup_and_lock_object locked it?
4350 * If so, the zero fill path will drop the lock
4351 * NB: Ideally we would always drop the lock rather than rely on
4352 * this heuristic, but vm_object_unlock currently takes > 30 cycles.
4353 */
4354 bool object_is_contended = false;
4355
4356
4357 real_vaddr = vaddr;
4358 trace_real_vaddr = vaddr;
4359
4360 /*
4361 * Some (kernel) submaps are marked with "should never fault".
4362 *
4363 * We do this for two reasons:
4364 * - PGZ which is inside the zone map range can't go down the normal
4365 * lookup path (vm_map_lookup_entry() would panic).
4366 *
4367 * - we want for guard pages to not have to use fictitious pages at all
4368 * to prevent from ZFOD pages to be made.
4369 *
4370 * We also want capture the fault address easily so that the zone
4371 * allocator might present an enhanced panic log.
4372 */
4373 if (map->never_faults || (pgz_owned(vaddr) && map->pmap == kernel_pmap)) {
4374 assert(map->pmap == kernel_pmap);
4375 return KERN_INVALID_ADDRESS;
4376 }
4377
4378 if (VM_MAP_PAGE_SIZE(original_map) < PAGE_SIZE) {
4379 fault_phys_offset = (vm_map_offset_t)-1;
4380 fault_page_size = VM_MAP_PAGE_SIZE(original_map);
4381 fault_page_mask = VM_MAP_PAGE_MASK(original_map);
4382 fault_page_shift = VM_MAP_PAGE_SHIFT(original_map);
4383 if (fault_page_size < PAGE_SIZE) {
4384 DEBUG4K_FAULT("map %p vaddr 0x%llx caller_prot 0x%x\n", map, (uint64_t)trace_real_vaddr, caller_prot);
4385 vaddr = vm_map_trunc_page(vaddr, fault_page_mask);
4386 }
4387 } else {
4388 fault_phys_offset = 0;
4389 fault_page_size = PAGE_SIZE;
4390 fault_page_mask = PAGE_MASK;
4391 fault_page_shift = PAGE_SHIFT;
4392 vaddr = vm_map_trunc_page(vaddr, PAGE_MASK);
4393 }
4394
4395 if (map == kernel_map) {
4396 trace_vaddr = VM_KERNEL_ADDRHIDE(vaddr);
4397 trace_real_vaddr = VM_KERNEL_ADDRHIDE(trace_real_vaddr);
4398 } else {
4399 trace_vaddr = vaddr;
4400 }
4401
4402 KDBG_RELEASE(
4403 (VMDBG_CODE(DBG_VM_FAULT_INTERNAL)) | DBG_FUNC_START,
4404 ((uint64_t)trace_vaddr >> 32),
4405 trace_vaddr,
4406 (map == kernel_map));
4407
4408 if (get_preemption_level() != 0) {
4409 KDBG_RELEASE(
4410 (VMDBG_CODE(DBG_VM_FAULT_INTERNAL)) | DBG_FUNC_END,
4411 ((uint64_t)trace_vaddr >> 32),
4412 trace_vaddr,
4413 KERN_FAILURE);
4414
4415 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NONZERO_PREEMPTION_LEVEL), 0 /* arg */);
4416 return KERN_FAILURE;
4417 }
4418
4419 thread_t cthread = current_thread();
4420
4421 if (cthread->th_vm_faults_disabled) {
4422 KDBG_RELEASE(
4423 (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
4424 ((uint64_t)trace_vaddr >> 32),
4425 trace_vaddr,
4426 KERN_FAILURE);
4427 ktriage_record(thread_tid(cthread),
4428 KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM,
4429 KDBG_TRIAGE_RESERVED,
4430 KDBG_TRIAGE_VM_FAULTS_DISABLED),
4431 0 /* arg */);
4432 return KERN_FAILURE;
4433 }
4434
4435 bool rtfault = (cthread->sched_mode == TH_MODE_REALTIME);
4436 uint64_t fstart = 0;
4437
4438 if (rtfault) {
4439 fstart = mach_continuous_time();
4440 }
4441
4442 assert(fault_info != NULL);
4443 interruptible_state = thread_interrupt_level(fault_info->interruptible);
4444
4445 fault_type = (fault_info->fi_change_wiring ? VM_PROT_NONE : caller_prot);
4446
4447 counter_inc(&vm_statistics_faults);
4448 counter_inc(¤t_task()->faults);
4449 original_fault_type = fault_type;
4450
4451 need_copy = FALSE;
4452 if (fault_type & VM_PROT_WRITE) {
4453 need_copy = TRUE;
4454 }
4455
4456 if (need_copy || fault_info->fi_change_wiring) {
4457 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4458 } else {
4459 object_lock_type = OBJECT_LOCK_SHARED;
4460 }
4461
4462 cur_object_lock_type = OBJECT_LOCK_SHARED;
4463
4464 if ((map == kernel_map) && (caller_prot & VM_PROT_WRITE)) {
4465 if (compressor_map) {
4466 if ((vaddr >= vm_map_min(compressor_map)) && (vaddr < vm_map_max(compressor_map))) {
4467 panic("Write fault on compressor map, va: %p type: %u bounds: %p->%p", (void *) vaddr, caller_prot, (void *) vm_map_min(compressor_map), (void *) vm_map_max(compressor_map));
4468 }
4469 }
4470 }
4471 RetryFault:
4472 assert(written_on_object == VM_OBJECT_NULL);
4473
4474 /*
4475 * assume we will hit a page in the cache
4476 * otherwise, explicitly override with
4477 * the real fault type once we determine it
4478 */
4479 type_of_fault = DBG_CACHE_HIT_FAULT;
4480
4481 /*
4482 * Find the backing store object and offset into
4483 * it to begin the search.
4484 */
4485 fault_type = original_fault_type;
4486 map = original_map;
4487 vm_map_lock_read(map);
4488
4489 if (resilient_media_retry) {
4490 /*
4491 * If we have to insert a fake zero-filled page to hide
4492 * a media failure to provide the real page, we need to
4493 * resolve any pending copy-on-write on this mapping.
4494 * VM_PROT_COPY tells vm_map_lookup_and_lock_object() to deal
4495 * with that even if this is not a "write" fault.
4496 */
4497 need_copy = TRUE;
4498 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4499 vm_fault_resilient_media_retry++;
4500 }
4501
4502 kr = vm_map_lookup_and_lock_object(&map, vaddr,
4503 (fault_type | (need_copy ? VM_PROT_COPY : 0)),
4504 object_lock_type, &version,
4505 &object, &offset, &prot, &wired,
4506 fault_info,
4507 &real_map,
4508 &object_is_contended);
4509 object_is_contended = false; /* avoid unsafe optimization */
4510
4511 if (kr != KERN_SUCCESS) {
4512 vm_map_unlock_read(map);
4513 /*
4514 * This can be seen in a crash report if indeed the
4515 * thread is crashing due to an invalid access in a non-existent
4516 * range.
4517 * Turning this OFF for now because it is noisy and not always fatal
4518 * eg prefaulting.
4519 *
4520 * if (kr == KERN_INVALID_ADDRESS) {
4521 * ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0);
4522 * }
4523 */
4524 goto done;
4525 }
4526
4527
4528 pmap = real_map->pmap;
4529 fault_info->io_sync = FALSE;
4530 fault_info->mark_zf_absent = FALSE;
4531 fault_info->batch_pmap_op = FALSE;
4532
4533 if (resilient_media_retry) {
4534 /*
4535 * We're retrying this fault after having detected a media
4536 * failure from a "resilient_media" mapping.
4537 * Check that the mapping is still pointing at the object
4538 * that just failed to provide a page.
4539 */
4540 assert(resilient_media_object != VM_OBJECT_NULL);
4541 assert(resilient_media_offset != (vm_object_offset_t)-1);
4542 if ((object != VM_OBJECT_NULL &&
4543 object == resilient_media_object &&
4544 offset == resilient_media_offset &&
4545 fault_info->resilient_media)
4546 #if MACH_ASSERT
4547 && (vm_fault_resilient_media_inject_error1_rate == 0 ||
4548 (++vm_fault_resilient_media_inject_error1 % vm_fault_resilient_media_inject_error1_rate) != 0)
4549 #endif /* MACH_ASSERT */
4550 ) {
4551 /*
4552 * This mapping still points at the same object
4553 * and is still "resilient_media": proceed in
4554 * "recovery-from-media-failure" mode, where we'll
4555 * insert a zero-filled page in the top object.
4556 */
4557 // printf("RESILIENT_MEDIA %s:%d recovering for object %p offset 0x%llx\n", __FUNCTION__, __LINE__, object, offset);
4558 vm_fault_resilient_media_proceed++;
4559 } else {
4560 /* not recovering: reset state and retry fault */
4561 // printf("RESILIENT_MEDIA %s:%d no recovery resilient %d object %p/%p offset 0x%llx/0x%llx\n", __FUNCTION__, __LINE__, fault_info->resilient_media, object, resilient_media_object, offset, resilient_media_offset);
4562 vm_object_unlock(object);
4563 if (real_map != map) {
4564 vm_map_unlock(real_map);
4565 }
4566 vm_map_unlock_read(map);
4567 /* release our extra reference on failed object */
4568 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
4569 vm_object_deallocate(resilient_media_object);
4570 resilient_media_object = VM_OBJECT_NULL;
4571 resilient_media_offset = (vm_object_offset_t)-1;
4572 resilient_media_retry = false;
4573 vm_fault_resilient_media_abort1++;
4574 goto RetryFault;
4575 }
4576 } else {
4577 assert(resilient_media_object == VM_OBJECT_NULL);
4578 resilient_media_offset = (vm_object_offset_t)-1;
4579 }
4580
4581 /*
4582 * If the page is wired, we must fault for the current protection
4583 * value, to avoid further faults.
4584 */
4585 if (wired) {
4586 fault_type = prot | VM_PROT_WRITE;
4587 }
4588 if (wired || need_copy) {
4589 /*
4590 * since we're treating this fault as a 'write'
4591 * we must hold the top object lock exclusively
4592 */
4593 if (object_lock_type == OBJECT_LOCK_SHARED) {
4594 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4595
4596 if (vm_object_lock_upgrade(object) == FALSE) {
4597 /*
4598 * couldn't upgrade, so explictly
4599 * take the lock exclusively
4600 */
4601 vm_object_lock(object);
4602 }
4603 }
4604 }
4605
4606 #if VM_FAULT_CLASSIFY
4607 /*
4608 * Temporary data gathering code
4609 */
4610 vm_fault_classify(object, offset, fault_type);
4611 #endif
4612 /*
4613 * Fast fault code. The basic idea is to do as much as
4614 * possible while holding the map lock and object locks.
4615 * Busy pages are not used until the object lock has to
4616 * be dropped to do something (copy, zero fill, pmap enter).
4617 * Similarly, paging references aren't acquired until that
4618 * point, and object references aren't used.
4619 *
4620 * If we can figure out what to do
4621 * (zero fill, copy on write, pmap enter) while holding
4622 * the locks, then it gets done. Otherwise, we give up,
4623 * and use the original fault path (which doesn't hold
4624 * the map lock, and relies on busy pages).
4625 * The give up cases include:
4626 * - Have to talk to pager.
4627 * - Page is busy, absent or in error.
4628 * - Pager has locked out desired access.
4629 * - Fault needs to be restarted.
4630 * - Have to push page into copy object.
4631 *
4632 * The code is an infinite loop that moves one level down
4633 * the shadow chain each time. cur_object and cur_offset
4634 * refer to the current object being examined. object and offset
4635 * are the original object from the map. The loop is at the
4636 * top level if and only if object and cur_object are the same.
4637 *
4638 * Invariants: Map lock is held throughout. Lock is held on
4639 * original object and cur_object (if different) when
4640 * continuing or exiting loop.
4641 *
4642 */
4643
4644 #if defined(__arm64__)
4645 /*
4646 * Fail if reading an execute-only page in a
4647 * pmap that enforces execute-only protection.
4648 */
4649 if (fault_type == VM_PROT_READ &&
4650 (prot & VM_PROT_EXECUTE) &&
4651 !(prot & VM_PROT_READ) &&
4652 pmap_enforces_execute_only(pmap)) {
4653 vm_object_unlock(object);
4654 vm_map_unlock_read(map);
4655 if (real_map != map) {
4656 vm_map_unlock(real_map);
4657 }
4658 kr = KERN_PROTECTION_FAILURE;
4659 goto done;
4660 }
4661 #endif
4662
4663 fault_phys_offset = (vm_map_offset_t)offset - vm_map_trunc_page((vm_map_offset_t)offset, PAGE_MASK);
4664
4665 /*
4666 * If this page is to be inserted in a copy delay object
4667 * for writing, and if the object has a copy, then the
4668 * copy delay strategy is implemented in the slow fault page.
4669 */
4670 if ((object->copy_strategy == MEMORY_OBJECT_COPY_DELAY ||
4671 object->copy_strategy == MEMORY_OBJECT_COPY_DELAY_FORK) &&
4672 object->vo_copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE)) {
4673 if (resilient_media_retry && object && object->internal) {
4674 /*
4675 * We're handling a "resilient media retry" and we
4676 * just want to insert of zero-filled page in this
4677 * top object (if there's not already a page there),
4678 * so this is not a real "write" and we want to stay
4679 * on this code path.
4680 */
4681 } else {
4682 goto handle_copy_delay;
4683 }
4684 }
4685
4686 cur_object = object;
4687 cur_offset = offset;
4688
4689 grab_options = 0;
4690 #if CONFIG_SECLUDED_MEMORY
4691 if (object->can_grab_secluded) {
4692 grab_options |= VM_PAGE_GRAB_SECLUDED;
4693 }
4694 #endif /* CONFIG_SECLUDED_MEMORY */
4695
4696 while (TRUE) {
4697 if (!cur_object->pager_created &&
4698 cur_object->phys_contiguous) { /* superpage */
4699 break;
4700 }
4701
4702 if (cur_object->blocked_access) {
4703 /*
4704 * Access to this VM object has been blocked.
4705 * Let the slow path handle it.
4706 */
4707 break;
4708 }
4709
4710 m = vm_page_lookup(cur_object, vm_object_trunc_page(cur_offset));
4711 m_object = NULL;
4712
4713 if (m != VM_PAGE_NULL) {
4714 m_object = cur_object;
4715
4716 if (m->vmp_busy) {
4717 wait_result_t result;
4718
4719 /*
4720 * in order to vm_page_sleep(), we must
4721 * have object that 'm' belongs to locked exclusively
4722 */
4723 if (object != cur_object) {
4724 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4725 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4726
4727 if (vm_object_lock_upgrade(cur_object) == FALSE) {
4728 /*
4729 * couldn't upgrade so go do a full retry
4730 * immediately since we can no longer be
4731 * certain about cur_object (since we
4732 * don't hold a reference on it)...
4733 * first drop the top object lock
4734 */
4735 vm_object_unlock(object);
4736
4737 vm_map_unlock_read(map);
4738 if (real_map != map) {
4739 vm_map_unlock(real_map);
4740 }
4741
4742 goto RetryFault;
4743 }
4744 }
4745 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
4746 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4747
4748 if (vm_object_lock_upgrade(object) == FALSE) {
4749 /*
4750 * couldn't upgrade, so explictly take the lock
4751 * exclusively and go relookup the page since we
4752 * will have dropped the object lock and
4753 * a different thread could have inserted
4754 * a page at this offset
4755 * no need for a full retry since we're
4756 * at the top level of the object chain
4757 */
4758 vm_object_lock(object);
4759
4760 continue;
4761 }
4762 }
4763 if ((m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) && m_object->internal) {
4764 /*
4765 * m->vmp_busy == TRUE and the object is locked exclusively
4766 * if m->pageout_queue == TRUE after we acquire the
4767 * queues lock, we are guaranteed that it is stable on
4768 * the pageout queue and therefore reclaimable
4769 *
4770 * NOTE: this is only true for the internal pageout queue
4771 * in the compressor world
4772 */
4773 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
4774
4775 vm_page_lock_queues();
4776
4777 if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
4778 vm_pageout_throttle_up(m);
4779 vm_page_unlock_queues();
4780
4781 vm_page_wakeup_done(m_object, m);
4782 goto reclaimed_from_pageout;
4783 }
4784 vm_page_unlock_queues();
4785 }
4786 if (object != cur_object) {
4787 vm_object_unlock(object);
4788 }
4789
4790 vm_map_unlock_read(map);
4791 if (real_map != map) {
4792 vm_map_unlock(real_map);
4793 }
4794
4795 result = vm_page_sleep(cur_object, m, fault_info->interruptible, LCK_SLEEP_UNLOCK);
4796 if (result == THREAD_AWAKENED || result == THREAD_RESTART) {
4797 goto RetryFault;
4798 }
4799
4800 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
4801 kr = KERN_ABORTED;
4802 goto done;
4803 }
4804 reclaimed_from_pageout:
4805 if (m->vmp_laundry) {
4806 if (object != cur_object) {
4807 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4808 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4809
4810 vm_object_unlock(object);
4811 vm_object_unlock(cur_object);
4812
4813 vm_map_unlock_read(map);
4814 if (real_map != map) {
4815 vm_map_unlock(real_map);
4816 }
4817
4818 goto RetryFault;
4819 }
4820 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
4821 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4822
4823 if (vm_object_lock_upgrade(object) == FALSE) {
4824 /*
4825 * couldn't upgrade, so explictly take the lock
4826 * exclusively and go relookup the page since we
4827 * will have dropped the object lock and
4828 * a different thread could have inserted
4829 * a page at this offset
4830 * no need for a full retry since we're
4831 * at the top level of the object chain
4832 */
4833 vm_object_lock(object);
4834
4835 continue;
4836 }
4837 }
4838 vm_object_lock_assert_exclusive(VM_PAGE_OBJECT(m));
4839 vm_pageout_steal_laundry(m, FALSE);
4840 }
4841
4842
4843 if (vm_page_is_guard(m)) {
4844 /*
4845 * Guard page: let the slow path deal with it
4846 */
4847 break;
4848 }
4849 if (m->vmp_unusual && (m->vmp_error || m->vmp_restart ||
4850 vm_page_is_private(m) || m->vmp_absent)) {
4851 /*
4852 * Unusual case... let the slow path deal with it
4853 */
4854 break;
4855 }
4856 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m_object)) {
4857 if (object != cur_object) {
4858 vm_object_unlock(object);
4859 }
4860 vm_map_unlock_read(map);
4861 if (real_map != map) {
4862 vm_map_unlock(real_map);
4863 }
4864 vm_object_unlock(cur_object);
4865 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
4866 kr = KERN_MEMORY_ERROR;
4867 goto done;
4868 }
4869 assert(m_object == VM_PAGE_OBJECT(m));
4870
4871 if (vm_fault_cs_need_validation(map->pmap, m, m_object,
4872 PAGE_SIZE, 0) ||
4873 (physpage_p != NULL && (prot & VM_PROT_WRITE))) {
4874 upgrade_lock_and_retry:
4875 /*
4876 * We might need to validate this page
4877 * against its code signature, so we
4878 * want to hold the VM object exclusively.
4879 */
4880 if (object != cur_object) {
4881 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4882 vm_object_unlock(object);
4883 vm_object_unlock(cur_object);
4884
4885 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4886
4887 vm_map_unlock_read(map);
4888 if (real_map != map) {
4889 vm_map_unlock(real_map);
4890 }
4891
4892 goto RetryFault;
4893 }
4894 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
4895 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4896
4897 if (vm_object_lock_upgrade(object) == FALSE) {
4898 /*
4899 * couldn't upgrade, so explictly take the lock
4900 * exclusively and go relookup the page since we
4901 * will have dropped the object lock and
4902 * a different thread could have inserted
4903 * a page at this offset
4904 * no need for a full retry since we're
4905 * at the top level of the object chain
4906 */
4907 vm_object_lock(object);
4908
4909 continue;
4910 }
4911 }
4912 }
4913 /*
4914 * Two cases of map in faults:
4915 * - At top level w/o copy object.
4916 * - Read fault anywhere.
4917 * --> must disallow write.
4918 */
4919
4920 if (object == cur_object && object->vo_copy == VM_OBJECT_NULL) {
4921 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
4922 if ((fault_type & VM_PROT_WRITE) && m->vmp_unmodified_ro) {
4923 assert(cur_object == VM_PAGE_OBJECT(m));
4924 assert(cur_object->internal);
4925 vm_object_lock_assert_exclusive(cur_object);
4926 vm_page_lockspin_queues();
4927 m->vmp_unmodified_ro = false;
4928 vm_page_unlock_queues();
4929 os_atomic_dec(&compressor_ro_uncompressed, relaxed);
4930 vm_object_compressor_pager_state_clr(cur_object, m->vmp_offset);
4931 }
4932 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
4933 goto FastPmapEnter;
4934 }
4935
4936 if (!need_copy &&
4937 !fault_info->no_copy_on_read &&
4938 cur_object != object &&
4939 !cur_object->internal &&
4940 !cur_object->pager_trusted &&
4941 vm_protect_privileged_from_untrusted &&
4942 !cur_object->code_signed &&
4943 current_proc_is_privileged()) {
4944 /*
4945 * We're faulting on a page in "object" and
4946 * went down the shadow chain to "cur_object"
4947 * to find out that "cur_object"'s pager
4948 * is not "trusted", i.e. we can not trust it
4949 * to always return the same contents.
4950 * Since the target is a "privileged" process,
4951 * let's treat this as a copy-on-read fault, as
4952 * if it was a copy-on-write fault.
4953 * Once "object" gets a copy of this page, it
4954 * won't have to rely on "cur_object" to
4955 * provide the contents again.
4956 *
4957 * This is done by setting "need_copy" and
4958 * retrying the fault from the top with the
4959 * appropriate locking.
4960 *
4961 * Special case: if the mapping is executable
4962 * and the untrusted object is code-signed and
4963 * the process is "cs_enforced", we do not
4964 * copy-on-read because that would break
4965 * code-signing enforcement expectations (an
4966 * executable page must belong to a code-signed
4967 * object) and we can rely on code-signing
4968 * to re-validate the page if it gets evicted
4969 * and paged back in.
4970 */
4971 // printf("COPY-ON-READ %s:%d map %p va 0x%llx page %p object %p offset 0x%llx UNTRUSTED: need copy-on-read!\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, m, VM_PAGE_OBJECT(m), m->vmp_offset);
4972 vm_copied_on_read++;
4973 need_copy = TRUE;
4974
4975 vm_object_unlock(object);
4976 vm_object_unlock(cur_object);
4977 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4978 vm_map_unlock_read(map);
4979 if (real_map != map) {
4980 vm_map_unlock(real_map);
4981 }
4982 goto RetryFault;
4983 }
4984
4985 if (!(fault_type & VM_PROT_WRITE) && !need_copy) {
4986 if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
4987 /*
4988 * For a protection that the pmap cares
4989 * about, we must hand over the full
4990 * set of protections (so that the pmap
4991 * layer can apply any desired policy).
4992 * This means that cs_bypass must be
4993 * set, as this can force us to pass
4994 * RWX.
4995 */
4996 if (!fault_info->cs_bypass) {
4997 panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
4998 __FUNCTION__, pmap,
4999 (uint64_t)vaddr, prot,
5000 fault_info->pmap_options);
5001 }
5002 } else {
5003 prot &= ~VM_PROT_WRITE;
5004 }
5005
5006 if (object != cur_object) {
5007 /*
5008 * We still need to hold the top object
5009 * lock here to prevent a race between
5010 * a read fault (taking only "shared"
5011 * locks) and a write fault (taking
5012 * an "exclusive" lock on the top
5013 * object.
5014 * Otherwise, as soon as we release the
5015 * top lock, the write fault could
5016 * proceed and actually complete before
5017 * the read fault, and the copied page's
5018 * translation could then be overwritten
5019 * by the read fault's translation for
5020 * the original page.
5021 *
5022 * Let's just record what the top object
5023 * is and we'll release it later.
5024 */
5025 top_object = object;
5026
5027 /*
5028 * switch to the object that has the new page
5029 */
5030 object = cur_object;
5031 object_lock_type = cur_object_lock_type;
5032 }
5033 FastPmapEnter:
5034 assert(m_object == VM_PAGE_OBJECT(m));
5035
5036 if (resilient_media_retry && (prot & VM_PROT_WRITE)) {
5037 /*
5038 * We might have bypassed some copy-on-write
5039 * mechanism to get here (theoretically inserting
5040 * a zero-filled page in the top object to avoid
5041 * raising an exception on an unavailable page at
5042 * the bottom of the shadow chain.
5043 * So let's not grant write access to this page yet.
5044 * If write access is needed, the next fault should
5045 * handle any copy-on-write obligations.
5046 */
5047 if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
5048 /*
5049 * For a protection that the pmap cares
5050 * about, we must hand over the full
5051 * set of protections (so that the pmap
5052 * layer can apply any desired policy).
5053 * This means that cs_bypass must be
5054 * set, as this can force us to pass
5055 * RWX.
5056 */
5057 if (!fault_info->cs_bypass) {
5058 panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
5059 __FUNCTION__, pmap,
5060 (uint64_t)vaddr, prot,
5061 fault_info->pmap_options);
5062 }
5063 } else {
5064 prot &= ~VM_PROT_WRITE;
5065 }
5066 }
5067
5068 /*
5069 * prepare for the pmap_enter...
5070 * object and map are both locked
5071 * m contains valid data
5072 * object == m->vmp_object
5073 * cur_object == NULL or it's been unlocked
5074 * no paging references on either object or cur_object
5075 */
5076 if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5077 need_retry_ptr = &need_retry;
5078 } else {
5079 need_retry_ptr = NULL;
5080 }
5081
5082 if (fault_page_size < PAGE_SIZE) {
5083 DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx caller pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, caller_pmap, (uint64_t)caller_pmap_addr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
5084 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
5085 fault_phys_offset < PAGE_SIZE),
5086 "0x%llx\n", (uint64_t)fault_phys_offset);
5087 } else {
5088 assertf(fault_phys_offset == 0,
5089 "0x%llx\n", (uint64_t)fault_phys_offset);
5090 }
5091
5092 if (__improbable(rtfault &&
5093 !m->vmp_realtime &&
5094 vm_pageout_protect_realtime)) {
5095 vm_page_lock_queues();
5096 if (!m->vmp_realtime) {
5097 m->vmp_realtime = true;
5098 vm_page_realtime_count++;
5099 }
5100 vm_page_unlock_queues();
5101 }
5102 assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p object=%p", m, m_object, object);
5103 assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
5104 if (caller_pmap) {
5105 kr = vm_fault_enter(m,
5106 caller_pmap,
5107 caller_pmap_addr,
5108 fault_page_size,
5109 fault_phys_offset,
5110 prot,
5111 caller_prot,
5112 wired,
5113 wire_tag,
5114 fault_info,
5115 need_retry_ptr,
5116 &type_of_fault,
5117 &object_lock_type);
5118 } else {
5119 kr = vm_fault_enter(m,
5120 pmap,
5121 vaddr,
5122 fault_page_size,
5123 fault_phys_offset,
5124 prot,
5125 caller_prot,
5126 wired,
5127 wire_tag,
5128 fault_info,
5129 need_retry_ptr,
5130 &type_of_fault,
5131 &object_lock_type);
5132 }
5133
5134 vm_fault_complete(
5135 map,
5136 real_map,
5137 object,
5138 m_object,
5139 m,
5140 offset,
5141 trace_real_vaddr,
5142 fault_info,
5143 caller_prot,
5144 real_vaddr,
5145 vm_fault_type_for_tracing(need_copy_on_read, type_of_fault),
5146 need_retry,
5147 kr,
5148 physpage_p,
5149 prot,
5150 top_object,
5151 need_collapse,
5152 cur_offset,
5153 fault_type,
5154 &written_on_object,
5155 &written_on_pager,
5156 &written_on_offset);
5157 top_object = VM_OBJECT_NULL;
5158 if (need_retry == TRUE) {
5159 /*
5160 * vm_fault_enter couldn't complete the PMAP_ENTER...
5161 * at this point we don't hold any locks so it's safe
5162 * to ask the pmap layer to expand the page table to
5163 * accommodate this mapping... once expanded, we'll
5164 * re-drive the fault which should result in vm_fault_enter
5165 * being able to successfully enter the mapping this time around
5166 */
5167 (void)pmap_enter_options(
5168 pmap, vaddr, 0, 0, 0, 0, 0,
5169 PMAP_OPTIONS_NOENTER, NULL, PMAP_MAPPING_TYPE_INFER);
5170
5171 need_retry = FALSE;
5172 goto RetryFault;
5173 }
5174 goto done;
5175 }
5176 /*
5177 * COPY ON WRITE FAULT
5178 */
5179 assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE);
5180
5181 /*
5182 * If objects match, then
5183 * object->vo_copy must not be NULL (else control
5184 * would be in previous code block), and we
5185 * have a potential push into the copy object
5186 * with which we can't cope with here.
5187 */
5188 if (cur_object == object) {
5189 /*
5190 * must take the slow path to
5191 * deal with the copy push
5192 */
5193 break;
5194 }
5195
5196 /*
5197 * This is now a shadow based copy on write
5198 * fault -- it requires a copy up the shadow
5199 * chain.
5200 */
5201 assert(m_object == VM_PAGE_OBJECT(m));
5202
5203 if ((cur_object_lock_type == OBJECT_LOCK_SHARED) &&
5204 vm_fault_cs_need_validation(NULL, m, m_object,
5205 PAGE_SIZE, 0)) {
5206 goto upgrade_lock_and_retry;
5207 }
5208
5209 #if MACH_ASSERT
5210 if (resilient_media_retry &&
5211 vm_fault_resilient_media_inject_error2_rate != 0 &&
5212 (++vm_fault_resilient_media_inject_error2 % vm_fault_resilient_media_inject_error2_rate) == 0) {
5213 /* inject an error */
5214 cur_m = m;
5215 m = VM_PAGE_NULL;
5216 m_object = VM_OBJECT_NULL;
5217 break;
5218 }
5219 #endif /* MACH_ASSERT */
5220 /*
5221 * Allocate a page in the original top level
5222 * object. Give up if allocate fails. Also
5223 * need to remember current page, as it's the
5224 * source of the copy.
5225 *
5226 * at this point we hold locks on both
5227 * object and cur_object... no need to take
5228 * paging refs or mark pages BUSY since
5229 * we don't drop either object lock until
5230 * the page has been copied and inserted
5231 */
5232
5233
5234 cur_m = m;
5235 m = vm_page_grab_options(grab_options);
5236 m_object = NULL;
5237
5238 if (m == VM_PAGE_NULL) {
5239 /*
5240 * no free page currently available...
5241 * must take the slow path
5242 */
5243 break;
5244 }
5245
5246 /*
5247 * Now do the copy. Mark the source page busy...
5248 *
5249 * NOTE: This code holds the map lock across
5250 * the page copy.
5251 */
5252 vm_page_copy(cur_m, m);
5253 vm_page_insert(m, object, vm_object_trunc_page(offset));
5254 if (VM_MAP_PAGE_MASK(map) != PAGE_MASK) {
5255 DEBUG4K_FAULT("map %p vaddr 0x%llx page %p [%p 0x%llx] copied to %p [%p 0x%llx]\n", map, (uint64_t)vaddr, cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
5256 }
5257 m_object = object;
5258 SET_PAGE_DIRTY(m, FALSE);
5259
5260 /*
5261 * Now cope with the source page and object
5262 */
5263 if (os_ref_get_count_raw(&object->ref_count) > 1 &&
5264 cur_m->vmp_pmapped) {
5265 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
5266 } else if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) {
5267 /*
5268 * We've copied the full 16K page but we're
5269 * about to call vm_fault_enter() only for
5270 * the 4K chunk we're faulting on. The other
5271 * three 4K chunks in that page could still
5272 * be pmapped in this pmap.
5273 * Since the VM object layer thinks that the
5274 * entire page has been dealt with and the
5275 * original page might no longer be needed,
5276 * it might collapse/bypass the original VM
5277 * object and free its pages, which would be
5278 * bad (and would trigger pmap_verify_free()
5279 * assertions) if the other 4K chunks are still
5280 * pmapped.
5281 */
5282 /*
5283 * XXX FBDP TODO4K: to be revisisted
5284 * Technically, we need to pmap_disconnect()
5285 * only the target pmap's mappings for the 4K
5286 * chunks of this 16K VM page. If other pmaps
5287 * have PTEs on these chunks, that means that
5288 * the associated VM map must have a reference
5289 * on the VM object, so no need to worry about
5290 * those.
5291 * pmap_protect() for each 4K chunk would be
5292 * better but we'd have to check which chunks
5293 * are actually mapped before and after this
5294 * one.
5295 * A full-blown pmap_disconnect() is easier
5296 * for now but not efficient.
5297 */
5298 DEBUG4K_FAULT("pmap_disconnect() page %p object %p offset 0x%llx phys 0x%x\n", cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, VM_PAGE_GET_PHYS_PAGE(cur_m));
5299 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
5300 }
5301
5302 if (cur_m->vmp_clustered) {
5303 VM_PAGE_COUNT_AS_PAGEIN(cur_m);
5304 VM_PAGE_CONSUME_CLUSTERED(cur_m);
5305 vm_fault_is_sequential(cur_object, cur_offset, fault_info->behavior);
5306 }
5307 need_collapse = TRUE;
5308
5309 if (!cur_object->internal &&
5310 cur_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
5311 /*
5312 * The object from which we've just
5313 * copied a page is most probably backed
5314 * by a vnode. We don't want to waste too
5315 * much time trying to collapse the VM objects
5316 * and create a bottleneck when several tasks
5317 * map the same file.
5318 */
5319 if (cur_object->vo_copy == object) {
5320 /*
5321 * Shared mapping or no COW yet.
5322 * We can never collapse a copy
5323 * object into its backing object.
5324 */
5325 need_collapse = FALSE;
5326 } else if (cur_object->vo_copy == object->shadow &&
5327 object->shadow->resident_page_count == 0) {
5328 /*
5329 * Shared mapping after a COW occurred.
5330 */
5331 need_collapse = FALSE;
5332 }
5333 }
5334 vm_object_unlock(cur_object);
5335
5336 if (need_collapse == FALSE) {
5337 vm_fault_collapse_skipped++;
5338 }
5339 vm_fault_collapse_total++;
5340
5341 type_of_fault = DBG_COW_FAULT;
5342 counter_inc(&vm_statistics_cow_faults);
5343 DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
5344 counter_inc(¤t_task()->cow_faults);
5345
5346 goto FastPmapEnter;
5347 } else {
5348 /*
5349 * No page at cur_object, cur_offset... m == NULL
5350 */
5351 if (cur_object->pager_created) {
5352 vm_external_state_t compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
5353
5354 if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) {
5355 int my_fault_type;
5356 vm_compressor_options_t c_flags = C_DONT_BLOCK;
5357 bool insert_cur_object = FALSE;
5358
5359 /*
5360 * May have to talk to a pager...
5361 * if so, take the slow path by
5362 * doing a 'break' from the while (TRUE) loop
5363 *
5364 * external_state will only be set to VM_EXTERNAL_STATE_EXISTS
5365 * if the compressor is active and the page exists there
5366 */
5367 if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS) {
5368 break;
5369 }
5370
5371 if (map == kernel_map || real_map == kernel_map) {
5372 /*
5373 * can't call into the compressor with the kernel_map
5374 * lock held, since the compressor may try to operate
5375 * on the kernel map in order to return an empty c_segment
5376 */
5377 break;
5378 }
5379 if (object != cur_object) {
5380 if (fault_type & VM_PROT_WRITE) {
5381 c_flags |= C_KEEP;
5382 } else {
5383 insert_cur_object = TRUE;
5384 }
5385 }
5386 if (insert_cur_object == TRUE) {
5387 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5388 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5389
5390 if (vm_object_lock_upgrade(cur_object) == FALSE) {
5391 /*
5392 * couldn't upgrade so go do a full retry
5393 * immediately since we can no longer be
5394 * certain about cur_object (since we
5395 * don't hold a reference on it)...
5396 * first drop the top object lock
5397 */
5398 vm_object_unlock(object);
5399
5400 vm_map_unlock_read(map);
5401 if (real_map != map) {
5402 vm_map_unlock(real_map);
5403 }
5404
5405 goto RetryFault;
5406 }
5407 }
5408 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
5409 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5410
5411 if (object != cur_object) {
5412 /*
5413 * we can't go for the upgrade on the top
5414 * lock since the upgrade may block waiting
5415 * for readers to drain... since we hold
5416 * cur_object locked at this point, waiting
5417 * for the readers to drain would represent
5418 * a lock order inversion since the lock order
5419 * for objects is the reference order in the
5420 * shadown chain
5421 */
5422 vm_object_unlock(object);
5423 vm_object_unlock(cur_object);
5424
5425 vm_map_unlock_read(map);
5426 if (real_map != map) {
5427 vm_map_unlock(real_map);
5428 }
5429
5430 goto RetryFault;
5431 }
5432 if (vm_object_lock_upgrade(object) == FALSE) {
5433 /*
5434 * couldn't upgrade, so explictly take the lock
5435 * exclusively and go relookup the page since we
5436 * will have dropped the object lock and
5437 * a different thread could have inserted
5438 * a page at this offset
5439 * no need for a full retry since we're
5440 * at the top level of the object chain
5441 */
5442 vm_object_lock(object);
5443
5444 continue;
5445 }
5446 }
5447
5448 m = vm_page_grab_options(grab_options);
5449 m_object = NULL;
5450
5451 if (m == VM_PAGE_NULL) {
5452 /*
5453 * no free page currently available...
5454 * must take the slow path
5455 */
5456 break;
5457 }
5458
5459 /*
5460 * The object is and remains locked
5461 * so no need to take a
5462 * "paging_in_progress" reference.
5463 */
5464 bool shared_lock;
5465 if ((object == cur_object &&
5466 object_lock_type == OBJECT_LOCK_EXCLUSIVE) ||
5467 (object != cur_object &&
5468 cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) {
5469 shared_lock = FALSE;
5470 } else {
5471 shared_lock = TRUE;
5472 }
5473
5474 kr = vm_compressor_pager_get(
5475 cur_object->pager,
5476 (vm_object_trunc_page(cur_offset)
5477 + cur_object->paging_offset),
5478 VM_PAGE_GET_PHYS_PAGE(m),
5479 &my_fault_type,
5480 c_flags,
5481 &compressed_count_delta);
5482
5483 vm_compressor_pager_count(
5484 cur_object->pager,
5485 compressed_count_delta,
5486 shared_lock,
5487 cur_object);
5488
5489 if (kr != KERN_SUCCESS) {
5490 vm_page_release(m, FALSE);
5491 m = VM_PAGE_NULL;
5492 }
5493 /*
5494 * If vm_compressor_pager_get() returns
5495 * KERN_MEMORY_FAILURE, then the
5496 * compressed data is permanently lost,
5497 * so return this error immediately.
5498 */
5499 if (kr == KERN_MEMORY_FAILURE) {
5500 if (object != cur_object) {
5501 vm_object_unlock(cur_object);
5502 }
5503 vm_object_unlock(object);
5504 vm_map_unlock_read(map);
5505 if (real_map != map) {
5506 vm_map_unlock(real_map);
5507 }
5508
5509 goto done;
5510 } else if (kr != KERN_SUCCESS) {
5511 break;
5512 }
5513 m->vmp_dirty = TRUE;
5514 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5515 if ((fault_type & VM_PROT_WRITE) == 0) {
5516 prot &= ~VM_PROT_WRITE;
5517 /*
5518 * The page, m, has yet to be inserted
5519 * into an object. So we are fine with
5520 * the object/cur_object lock being held
5521 * shared.
5522 */
5523 vm_page_lockspin_queues();
5524 m->vmp_unmodified_ro = true;
5525 vm_page_unlock_queues();
5526 os_atomic_inc(&compressor_ro_uncompressed, relaxed);
5527 }
5528 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5529
5530 /*
5531 * If the object is purgeable, its
5532 * owner's purgeable ledgers will be
5533 * updated in vm_page_insert() but the
5534 * page was also accounted for in a
5535 * "compressed purgeable" ledger, so
5536 * update that now.
5537 */
5538 if (object != cur_object &&
5539 !insert_cur_object) {
5540 /*
5541 * We're not going to insert
5542 * the decompressed page into
5543 * the object it came from.
5544 *
5545 * We're dealing with a
5546 * copy-on-write fault on
5547 * "object".
5548 * We're going to decompress
5549 * the page directly into the
5550 * target "object" while
5551 * keepin the compressed
5552 * page for "cur_object", so
5553 * no ledger update in that
5554 * case.
5555 */
5556 } else if (((cur_object->purgable ==
5557 VM_PURGABLE_DENY) &&
5558 (!cur_object->vo_ledger_tag)) ||
5559 (cur_object->vo_owner ==
5560 NULL)) {
5561 /*
5562 * "cur_object" is not purgeable
5563 * and is not ledger-taged, or
5564 * there's no owner for it,
5565 * so no owner's ledgers to
5566 * update.
5567 */
5568 } else {
5569 /*
5570 * One less compressed
5571 * purgeable/tagged page for
5572 * cur_object's owner.
5573 */
5574 if (compressed_count_delta) {
5575 vm_object_owner_compressed_update(
5576 cur_object,
5577 -1);
5578 }
5579 }
5580
5581 if (insert_cur_object) {
5582 vm_page_insert(m, cur_object, vm_object_trunc_page(cur_offset));
5583 m_object = cur_object;
5584 } else {
5585 vm_page_insert(m, object, vm_object_trunc_page(offset));
5586 m_object = object;
5587 }
5588
5589 if (!HAS_DEFAULT_CACHEABILITY(m_object->wimg_bits & VM_WIMG_MASK)) {
5590 /*
5591 * If the page is not cacheable,
5592 * we can't let its contents
5593 * linger in the data cache
5594 * after the decompression.
5595 */
5596 pmap_sync_page_attributes_phys(VM_PAGE_GET_PHYS_PAGE(m));
5597 }
5598
5599 type_of_fault = my_fault_type;
5600
5601 VM_STAT_DECOMPRESSIONS();
5602
5603 if (cur_object != object) {
5604 if (insert_cur_object) {
5605 top_object = object;
5606 /*
5607 * switch to the object that has the new page
5608 */
5609 object = cur_object;
5610 object_lock_type = cur_object_lock_type;
5611 } else {
5612 vm_object_unlock(cur_object);
5613 cur_object = object;
5614 }
5615 }
5616 goto FastPmapEnter;
5617 }
5618 /*
5619 * existence map present and indicates
5620 * that the pager doesn't have this page
5621 */
5622 }
5623 if (cur_object->shadow == VM_OBJECT_NULL ||
5624 resilient_media_retry) {
5625 /*
5626 * Zero fill fault. Page gets
5627 * inserted into the original object.
5628 */
5629 if (cur_object->shadow_severed ||
5630 VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object) ||
5631 cur_object == compressor_object ||
5632 is_kernel_object(cur_object)) {
5633 if (object != cur_object) {
5634 vm_object_unlock(cur_object);
5635 }
5636 vm_object_unlock(object);
5637
5638 vm_map_unlock_read(map);
5639 if (real_map != map) {
5640 vm_map_unlock(real_map);
5641 }
5642 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object)) {
5643 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
5644 }
5645
5646 if (cur_object->shadow_severed) {
5647 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
5648 }
5649
5650 kr = KERN_MEMORY_ERROR;
5651 goto done;
5652 }
5653 if (cur_object != object) {
5654 vm_object_unlock(cur_object);
5655
5656 cur_object = object;
5657 }
5658 if (object_lock_type == OBJECT_LOCK_SHARED) {
5659 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5660
5661 if (vm_object_lock_upgrade(object) == FALSE) {
5662 /*
5663 * couldn't upgrade so do a full retry on the fault
5664 * since we dropped the object lock which
5665 * could allow another thread to insert
5666 * a page at this offset
5667 */
5668 vm_map_unlock_read(map);
5669 if (real_map != map) {
5670 vm_map_unlock(real_map);
5671 }
5672
5673 goto RetryFault;
5674 }
5675 }
5676 if (!object->internal) {
5677 panic("%s:%d should not zero-fill page at offset 0x%llx in external object %p", __FUNCTION__, __LINE__, (uint64_t)offset, object);
5678 }
5679 #if MACH_ASSERT
5680 if (resilient_media_retry &&
5681 vm_fault_resilient_media_inject_error3_rate != 0 &&
5682 (++vm_fault_resilient_media_inject_error3 % vm_fault_resilient_media_inject_error3_rate) == 0) {
5683 /* inject an error */
5684 m_object = NULL;
5685 break;
5686 }
5687 #endif /* MACH_ASSERT */
5688 m = vm_page_alloc(object, vm_object_trunc_page(offset));
5689 m_object = NULL;
5690
5691 if (m == VM_PAGE_NULL) {
5692 /*
5693 * no free page currently available...
5694 * must take the slow path
5695 */
5696 break;
5697 }
5698 m_object = object;
5699
5700 if ((prot & VM_PROT_WRITE) &&
5701 !(fault_type & VM_PROT_WRITE) &&
5702 object->vo_copy != VM_OBJECT_NULL) {
5703 /*
5704 * This is not a write fault and
5705 * we might have a copy-on-write
5706 * obligation to honor (copy object or
5707 * "needs_copy" map entry), so do not
5708 * give write access yet.
5709 * We'll need to catch the first write
5710 * to resolve the copy-on-write by
5711 * pushing this page to a copy object
5712 * or making a shadow object.
5713 */
5714 if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
5715 /*
5716 * This pmap enforces extra
5717 * constraints for this set of
5718 * protections, so we can't
5719 * change the protections.
5720 * We would expect code-signing
5721 * to be bypassed in this case.
5722 */
5723 if (!fault_info->cs_bypass) {
5724 panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
5725 __FUNCTION__,
5726 pmap,
5727 (uint64_t)vaddr,
5728 prot,
5729 fault_info->pmap_options);
5730 }
5731 } else {
5732 prot &= ~VM_PROT_WRITE;
5733 }
5734 }
5735 if (resilient_media_retry) {
5736 /*
5737 * Not a real write, so no reason to assert.
5738 * We've just allocated a new page for this
5739 * <object,offset> so we know nobody has any
5740 * PTE pointing at any previous version of this
5741 * page and no copy-on-write is involved here.
5742 * We're just inserting a page of zeroes at this
5743 * stage of the shadow chain because the pager
5744 * for the lowest object in the shadow chain
5745 * said it could not provide that page and we
5746 * want to avoid failing the fault and causing
5747 * a crash on this "resilient_media" mapping.
5748 */
5749 } else {
5750 assertf(!((fault_type & VM_PROT_WRITE) && object->vo_copy),
5751 "map %p va 0x%llx wrong path for write fault (fault_type 0x%x) on object %p with copy %p\n",
5752 map, (uint64_t)vaddr, fault_type, object, object->vo_copy);
5753 }
5754
5755 vm_object_t saved_copy_object;
5756 uint32_t saved_copy_version;
5757 saved_copy_object = object->vo_copy;
5758 saved_copy_version = object->vo_copy_version;
5759
5760 /*
5761 * Zeroing the page and entering into it into the pmap
5762 * represents a significant amount of the zero fill fault handler's work.
5763 *
5764 * To improve fault scalability, we'll drop the object lock, if it appears contended,
5765 * now that we've inserted the page into the vm object.
5766 * Before dropping the lock, we need to check protection bits and set the
5767 * mapped bits on the page. Then we can mark the page busy, drop the lock,
5768 * zero it, and do the pmap enter. We'll need to reacquire the lock
5769 * to clear the busy bit and wake up any waiters.
5770 */
5771 vm_fault_cs_clear(m);
5772 m->vmp_pmapped = TRUE;
5773 if (map->no_zero_fill) {
5774 type_of_fault = DBG_NZF_PAGE_FAULT;
5775 } else {
5776 type_of_fault = DBG_ZERO_FILL_FAULT;
5777 }
5778 {
5779 pmap_t destination_pmap;
5780 vm_map_offset_t destination_pmap_vaddr;
5781 vm_prot_t enter_fault_type;
5782 if (caller_pmap) {
5783 destination_pmap = caller_pmap;
5784 destination_pmap_vaddr = caller_pmap_addr;
5785 } else {
5786 destination_pmap = pmap;
5787 destination_pmap_vaddr = vaddr;
5788 }
5789 if (fault_info->fi_change_wiring) {
5790 enter_fault_type = VM_PROT_NONE;
5791 } else {
5792 enter_fault_type = caller_prot;
5793 }
5794 assertf(VM_PAGE_OBJECT(m) == object, "m=%p object=%p", m, object);
5795 kr = vm_fault_enter_prepare(m,
5796 destination_pmap,
5797 destination_pmap_vaddr,
5798 &prot,
5799 caller_prot,
5800 fault_page_size,
5801 fault_phys_offset,
5802 enter_fault_type,
5803 fault_info,
5804 &type_of_fault,
5805 &page_needs_data_sync);
5806 if (kr != KERN_SUCCESS) {
5807 goto zero_fill_cleanup;
5808 }
5809
5810 if (object_is_contended) {
5811 /*
5812 * At this point the page is in the vm object, but not on a paging queue.
5813 * Since it's accessible to another thread but its contents are invalid
5814 * (it hasn't been zeroed) mark it busy before dropping the object lock.
5815 */
5816 m->vmp_busy = TRUE;
5817 vm_object_paging_begin(object); /* keep object alive */
5818 vm_object_unlock(object);
5819 }
5820 if (type_of_fault == DBG_ZERO_FILL_FAULT) {
5821 /*
5822 * Now zero fill page...
5823 * the page is probably going to
5824 * be written soon, so don't bother
5825 * to clear the modified bit
5826 *
5827 * NOTE: This code holds the map
5828 * lock across the zero fill.
5829 */
5830 vm_page_zero_fill(
5831 m
5832 );
5833 counter_inc(&vm_statistics_zero_fill_count);
5834 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
5835 }
5836
5837 if (object_is_contended) {
5838 /*
5839 * It's not safe to do the pmap_enter() without holding
5840 * the object lock because its "vo_copy" could change.
5841 */
5842 object_is_contended = false; /* get out of that code path */
5843
5844 vm_object_lock(object);
5845 vm_object_paging_end(object);
5846 if (object->vo_copy != saved_copy_object ||
5847 object->vo_copy_version != saved_copy_version) {
5848 /*
5849 * The COPY_DELAY copy-on-write situation for
5850 * this VM object has changed while it was
5851 * unlocked, so do not grant write access to
5852 * this page.
5853 * The write access will fault again and we'll
5854 * resolve the copy-on-write then.
5855 */
5856 if (pmap_has_prot_policy(pmap,
5857 fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE,
5858 prot)) {
5859 /* we should not do CoW on pmap_has_prot_policy mappings */
5860 panic("%s: map %p va 0x%llx obj %p,%u saved %p,%u: unexpected CoW",
5861 __FUNCTION__,
5862 map, (uint64_t)vaddr,
5863 object, object->vo_copy_version,
5864 saved_copy_object, saved_copy_version);
5865 } else {
5866 /* the pmap layer is OK with changing the PTE's prot */
5867 prot &= ~VM_PROT_WRITE;
5868 }
5869 }
5870 }
5871
5872 if (page_needs_data_sync) {
5873 pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
5874 }
5875
5876 if (top_object != VM_OBJECT_NULL) {
5877 need_retry_ptr = &need_retry;
5878 } else {
5879 need_retry_ptr = NULL;
5880 }
5881 if (fault_info->fi_xnu_user_debug &&
5882 !object->code_signed) {
5883 fault_info->pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
5884 }
5885 if (object_is_contended) {
5886 panic("object_is_contended");
5887 kr = vm_fault_pmap_enter(destination_pmap, destination_pmap_vaddr,
5888 fault_page_size, fault_phys_offset,
5889 m, &prot, caller_prot, enter_fault_type, wired,
5890 fault_info->pmap_options, need_retry_ptr);
5891 vm_object_lock(object);
5892 assertf(!((prot & VM_PROT_WRITE) && object->vo_copy),
5893 "prot 0x%x object %p copy %p\n",
5894 prot, object, object->vo_copy);
5895 } else {
5896 kr = vm_fault_pmap_enter_with_object_lock(object, destination_pmap, destination_pmap_vaddr,
5897 fault_page_size, fault_phys_offset,
5898 m, &prot, caller_prot, enter_fault_type, wired,
5899 fault_info->pmap_options, need_retry_ptr, &object_lock_type);
5900 }
5901 }
5902 zero_fill_cleanup:
5903 if (!VM_DYNAMIC_PAGING_ENABLED() &&
5904 (object->purgable == VM_PURGABLE_DENY ||
5905 object->purgable == VM_PURGABLE_NONVOLATILE ||
5906 object->purgable == VM_PURGABLE_VOLATILE)) {
5907 vm_page_lockspin_queues();
5908 if (!VM_DYNAMIC_PAGING_ENABLED()) {
5909 vm_fault_enqueue_throttled_locked(m);
5910 }
5911 vm_page_unlock_queues();
5912 }
5913 vm_fault_enqueue_page(object, m, wired, fault_info->fi_change_wiring, wire_tag, fault_info->no_cache, &type_of_fault, kr);
5914
5915 if (__improbable(rtfault &&
5916 !m->vmp_realtime &&
5917 vm_pageout_protect_realtime)) {
5918 vm_page_lock_queues();
5919 if (!m->vmp_realtime) {
5920 m->vmp_realtime = true;
5921 vm_page_realtime_count++;
5922 }
5923 vm_page_unlock_queues();
5924 }
5925 vm_fault_complete(
5926 map,
5927 real_map,
5928 object,
5929 m_object,
5930 m,
5931 offset,
5932 trace_real_vaddr,
5933 fault_info,
5934 caller_prot,
5935 real_vaddr,
5936 type_of_fault,
5937 need_retry,
5938 kr,
5939 physpage_p,
5940 prot,
5941 top_object,
5942 need_collapse,
5943 cur_offset,
5944 fault_type,
5945 &written_on_object,
5946 &written_on_pager,
5947 &written_on_offset);
5948 top_object = VM_OBJECT_NULL;
5949 if (need_retry == TRUE) {
5950 /*
5951 * vm_fault_enter couldn't complete the PMAP_ENTER...
5952 * at this point we don't hold any locks so it's safe
5953 * to ask the pmap layer to expand the page table to
5954 * accommodate this mapping... once expanded, we'll
5955 * re-drive the fault which should result in vm_fault_enter
5956 * being able to successfully enter the mapping this time around
5957 */
5958 (void)pmap_enter_options(
5959 pmap, vaddr, 0, 0, 0, 0, 0,
5960 PMAP_OPTIONS_NOENTER, NULL, PMAP_MAPPING_TYPE_INFER);
5961
5962 need_retry = FALSE;
5963 goto RetryFault;
5964 }
5965 goto done;
5966 }
5967 /*
5968 * On to the next level in the shadow chain
5969 */
5970 cur_offset += cur_object->vo_shadow_offset;
5971 new_object = cur_object->shadow;
5972 fault_phys_offset = cur_offset - vm_object_trunc_page(cur_offset);
5973
5974 /*
5975 * take the new_object's lock with the indicated state
5976 */
5977 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5978 vm_object_lock_shared(new_object);
5979 } else {
5980 vm_object_lock(new_object);
5981 }
5982
5983 if (cur_object != object) {
5984 vm_object_unlock(cur_object);
5985 }
5986
5987 cur_object = new_object;
5988
5989 continue;
5990 }
5991 }
5992 /*
5993 * Cleanup from fast fault failure. Drop any object
5994 * lock other than original and drop map lock.
5995 */
5996 if (object != cur_object) {
5997 vm_object_unlock(cur_object);
5998 }
5999
6000 /*
6001 * must own the object lock exclusively at this point
6002 */
6003 if (object_lock_type == OBJECT_LOCK_SHARED) {
6004 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
6005
6006 if (vm_object_lock_upgrade(object) == FALSE) {
6007 /*
6008 * couldn't upgrade, so explictly
6009 * take the lock exclusively
6010 * no need to retry the fault at this
6011 * point since "vm_fault_page" will
6012 * completely re-evaluate the state
6013 */
6014 vm_object_lock(object);
6015 }
6016 }
6017
6018 handle_copy_delay:
6019 vm_map_unlock_read(map);
6020 if (real_map != map) {
6021 vm_map_unlock(real_map);
6022 }
6023
6024 if (__improbable(object == compressor_object ||
6025 is_kernel_object(object))) {
6026 /*
6027 * These objects are explicitly managed and populated by the
6028 * kernel. The virtual ranges backed by these objects should
6029 * either have wired pages or "holes" that are not supposed to
6030 * be accessed at all until they get explicitly populated.
6031 * We should never have to resolve a fault on a mapping backed
6032 * by one of these VM objects and providing a zero-filled page
6033 * would be wrong here, so let's fail the fault and let the
6034 * caller crash or recover.
6035 */
6036 vm_object_unlock(object);
6037 kr = KERN_MEMORY_ERROR;
6038 goto done;
6039 }
6040
6041 resilient_media_ref_transfer = false;
6042 if (resilient_media_retry) {
6043 /*
6044 * We could get here if we failed to get a free page
6045 * to zero-fill and had to take the slow path again.
6046 * Reset our "recovery-from-failed-media" state.
6047 */
6048 assert(resilient_media_object != VM_OBJECT_NULL);
6049 assert(resilient_media_offset != (vm_object_offset_t)-1);
6050 /* release our extra reference on failed object */
6051 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
6052 if (object == resilient_media_object) {
6053 /*
6054 * We're holding "object"'s lock, so we can't release
6055 * our extra reference at this point.
6056 * We need an extra reference on "object" anyway
6057 * (see below), so let's just transfer this reference.
6058 */
6059 resilient_media_ref_transfer = true;
6060 } else {
6061 vm_object_deallocate(resilient_media_object);
6062 }
6063 resilient_media_object = VM_OBJECT_NULL;
6064 resilient_media_offset = (vm_object_offset_t)-1;
6065 resilient_media_retry = false;
6066 vm_fault_resilient_media_abort2++;
6067 }
6068
6069 /*
6070 * Make a reference to this object to
6071 * prevent its disposal while we are messing with
6072 * it. Once we have the reference, the map is free
6073 * to be diddled. Since objects reference their
6074 * shadows (and copies), they will stay around as well.
6075 */
6076 if (resilient_media_ref_transfer) {
6077 /* we already have an extra reference on this object */
6078 resilient_media_ref_transfer = false;
6079 } else {
6080 vm_object_reference_locked(object);
6081 }
6082 vm_object_paging_begin(object);
6083
6084 set_thread_pagein_error(cthread, 0);
6085 error_code = 0;
6086
6087 result_page = VM_PAGE_NULL;
6088 vm_fault_return_t err = vm_fault_page(object, offset, fault_type,
6089 (fault_info->fi_change_wiring && !wired),
6090 FALSE, /* page not looked up */
6091 &prot, &result_page, &top_page,
6092 &type_of_fault,
6093 &error_code, map->no_zero_fill,
6094 fault_info);
6095
6096 /*
6097 * if kr != VM_FAULT_SUCCESS, then the paging reference
6098 * has been dropped and the object unlocked... the ref_count
6099 * is still held
6100 *
6101 * if kr == VM_FAULT_SUCCESS, then the paging reference
6102 * is still held along with the ref_count on the original object
6103 *
6104 * the object is returned locked with a paging reference
6105 *
6106 * if top_page != NULL, then it's BUSY and the
6107 * object it belongs to has a paging reference
6108 * but is returned unlocked
6109 */
6110 if (err != VM_FAULT_SUCCESS &&
6111 err != VM_FAULT_SUCCESS_NO_VM_PAGE) {
6112 if (err == VM_FAULT_MEMORY_ERROR &&
6113 fault_info->resilient_media) {
6114 assertf(object->internal, "object %p", object);
6115 /*
6116 * This fault failed but the mapping was
6117 * "media resilient", so we'll retry the fault in
6118 * recovery mode to get a zero-filled page in the
6119 * top object.
6120 * Keep the reference on the failing object so
6121 * that we can check that the mapping is still
6122 * pointing to it when we retry the fault.
6123 */
6124 // printf("RESILIENT_MEDIA %s:%d: object %p offset 0x%llx recover from media error 0x%x kr 0x%x top_page %p result_page %p\n", __FUNCTION__, __LINE__, object, offset, error_code, kr, top_page, result_page);
6125 assert(!resilient_media_retry); /* no double retry */
6126 assert(resilient_media_object == VM_OBJECT_NULL);
6127 assert(resilient_media_offset == (vm_object_offset_t)-1);
6128 resilient_media_retry = true;
6129 resilient_media_object = object;
6130 resilient_media_offset = offset;
6131 // printf("FBDP %s:%d resilient_media_object %p offset 0x%llx kept reference\n", __FUNCTION__, __LINE__, resilient_media_object, resilient_mmedia_offset);
6132 vm_fault_resilient_media_initiate++;
6133 goto RetryFault;
6134 } else {
6135 /*
6136 * we didn't succeed, lose the object reference
6137 * immediately.
6138 */
6139 vm_object_deallocate(object);
6140 object = VM_OBJECT_NULL; /* no longer valid */
6141 }
6142
6143 /*
6144 * See why we failed, and take corrective action.
6145 */
6146 switch (err) {
6147 case VM_FAULT_SUCCESS:
6148 case VM_FAULT_SUCCESS_NO_VM_PAGE:
6149 /* These aren't possible but needed to make the switch exhaustive */
6150 break;
6151 case VM_FAULT_MEMORY_SHORTAGE:
6152 if (vm_page_wait((fault_info->fi_change_wiring) ?
6153 THREAD_UNINT :
6154 THREAD_ABORTSAFE)) {
6155 goto RetryFault;
6156 }
6157 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_MEMORY_SHORTAGE), 0 /* arg */);
6158 OS_FALLTHROUGH;
6159 case VM_FAULT_INTERRUPTED:
6160 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
6161 kr = KERN_ABORTED;
6162 goto done;
6163 case VM_FAULT_RETRY:
6164 goto RetryFault;
6165 case VM_FAULT_MEMORY_ERROR:
6166 if (error_code) {
6167 kr = error_code;
6168 } else {
6169 kr = KERN_MEMORY_ERROR;
6170 }
6171 goto done;
6172 case VM_FAULT_BUSY:
6173 kr = KERN_ALREADY_WAITING;
6174 goto done;
6175 }
6176 }
6177 m = result_page;
6178 m_object = NULL;
6179
6180 if (m != VM_PAGE_NULL) {
6181 m_object = VM_PAGE_OBJECT(m);
6182 assert((fault_info->fi_change_wiring && !wired) ?
6183 (top_page == VM_PAGE_NULL) :
6184 ((top_page == VM_PAGE_NULL) == (m_object == object)));
6185 }
6186
6187 /*
6188 * What to do with the resulting page from vm_fault_page
6189 * if it doesn't get entered into the physical map:
6190 */
6191 #define RELEASE_PAGE(m) \
6192 MACRO_BEGIN \
6193 vm_page_wakeup_done(VM_PAGE_OBJECT(m), m); \
6194 if ( !VM_PAGE_PAGEABLE(m)) { \
6195 vm_page_lockspin_queues(); \
6196 if ( !VM_PAGE_PAGEABLE(m)) \
6197 vm_page_activate(m); \
6198 vm_page_unlock_queues(); \
6199 } \
6200 MACRO_END
6201
6202
6203 object_locks_dropped = FALSE;
6204 /*
6205 * We must verify that the maps have not changed
6206 * since our last lookup. vm_map_verify() needs the
6207 * map lock (shared) but we are holding object locks.
6208 * So we do a try_lock() first and, if that fails, we
6209 * drop the object locks and go in for the map lock again.
6210 */
6211 if (m != VM_PAGE_NULL) {
6212 old_copy_object = m_object->vo_copy;
6213 old_copy_version = m_object->vo_copy_version;
6214 } else {
6215 old_copy_object = VM_OBJECT_NULL;
6216 old_copy_version = 0;
6217 }
6218 if (!vm_map_try_lock_read(original_map)) {
6219 if (m != VM_PAGE_NULL) {
6220 vm_object_unlock(m_object);
6221 } else {
6222 vm_object_unlock(object);
6223 }
6224
6225 object_locks_dropped = TRUE;
6226
6227 vm_map_lock_read(original_map);
6228 }
6229
6230 if ((map != original_map) || !vm_map_verify(map, &version)) {
6231 if (object_locks_dropped == FALSE) {
6232 if (m != VM_PAGE_NULL) {
6233 vm_object_unlock(m_object);
6234 } else {
6235 vm_object_unlock(object);
6236 }
6237
6238 object_locks_dropped = TRUE;
6239 }
6240
6241 /*
6242 * no object locks are held at this point
6243 */
6244 vm_object_t retry_object;
6245 vm_object_offset_t retry_offset;
6246 vm_prot_t retry_prot;
6247
6248 /*
6249 * To avoid trying to write_lock the map while another
6250 * thread has it read_locked (in vm_map_pageable), we
6251 * do not try for write permission. If the page is
6252 * still writable, we will get write permission. If it
6253 * is not, or has been marked needs_copy, we enter the
6254 * mapping without write permission, and will merely
6255 * take another fault.
6256 */
6257 map = original_map;
6258
6259 kr = vm_map_lookup_and_lock_object(&map, vaddr,
6260 fault_type & ~VM_PROT_WRITE,
6261 OBJECT_LOCK_EXCLUSIVE, &version,
6262 &retry_object, &retry_offset, &retry_prot,
6263 &wired,
6264 fault_info,
6265 &real_map,
6266 NULL);
6267 pmap = real_map->pmap;
6268
6269 if (kr != KERN_SUCCESS) {
6270 vm_map_unlock_read(map);
6271
6272 if (m != VM_PAGE_NULL) {
6273 assert(VM_PAGE_OBJECT(m) == m_object);
6274
6275 /*
6276 * retake the lock so that
6277 * we can drop the paging reference
6278 * in vm_fault_cleanup and do the
6279 * vm_page_wakeup_done() in RELEASE_PAGE
6280 */
6281 vm_object_lock(m_object);
6282
6283 RELEASE_PAGE(m);
6284
6285 vm_fault_cleanup(m_object, top_page);
6286 } else {
6287 /*
6288 * retake the lock so that
6289 * we can drop the paging reference
6290 * in vm_fault_cleanup
6291 */
6292 vm_object_lock(object);
6293
6294 vm_fault_cleanup(object, top_page);
6295 }
6296 vm_object_deallocate(object);
6297
6298 if (kr == KERN_INVALID_ADDRESS) {
6299 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0 /* arg */);
6300 }
6301 goto done;
6302 }
6303 vm_object_unlock(retry_object);
6304
6305 if ((retry_object != object) || (retry_offset != offset)) {
6306 vm_map_unlock_read(map);
6307 if (real_map != map) {
6308 vm_map_unlock(real_map);
6309 }
6310
6311 if (m != VM_PAGE_NULL) {
6312 assert(VM_PAGE_OBJECT(m) == m_object);
6313
6314 /*
6315 * retake the lock so that
6316 * we can drop the paging reference
6317 * in vm_fault_cleanup and do the
6318 * vm_page_wakeup_done() in RELEASE_PAGE
6319 */
6320 vm_object_lock(m_object);
6321
6322 RELEASE_PAGE(m);
6323
6324 vm_fault_cleanup(m_object, top_page);
6325 } else {
6326 /*
6327 * retake the lock so that
6328 * we can drop the paging reference
6329 * in vm_fault_cleanup
6330 */
6331 vm_object_lock(object);
6332
6333 vm_fault_cleanup(object, top_page);
6334 }
6335 vm_object_deallocate(object);
6336
6337 goto RetryFault;
6338 }
6339 /*
6340 * Check whether the protection has changed or the object
6341 * has been copied while we left the map unlocked.
6342 */
6343 if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, retry_prot)) {
6344 /* If the pmap layer cares, pass the full set. */
6345 prot = retry_prot;
6346 } else {
6347 prot &= retry_prot;
6348 }
6349 }
6350
6351 if (object_locks_dropped == TRUE) {
6352 if (m != VM_PAGE_NULL) {
6353 assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p", m, m_object);
6354 assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6355 vm_object_lock(m_object);
6356 } else {
6357 vm_object_lock(object);
6358 }
6359
6360 object_locks_dropped = FALSE;
6361 }
6362
6363 if ((prot & VM_PROT_WRITE) &&
6364 m != VM_PAGE_NULL &&
6365 (m_object->vo_copy != old_copy_object ||
6366 m_object->vo_copy_version != old_copy_version)) {
6367 /*
6368 * The copy object changed while the top-level object
6369 * was unlocked, so take away write permission.
6370 */
6371 if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
6372 /*
6373 * This pmap enforces extra constraints for this set
6374 * of protections, so we can't change the protections.
6375 * This mapping should have been setup to avoid
6376 * copy-on-write since that requires removing write
6377 * access.
6378 */
6379 panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x m%p obj %p copyobj %p",
6380 __FUNCTION__, pmap, (uint64_t)vaddr, prot,
6381 fault_info->pmap_options,
6382 m, m_object, m_object->vo_copy);
6383 }
6384 prot &= ~VM_PROT_WRITE;
6385 }
6386
6387 if (!need_copy &&
6388 !fault_info->no_copy_on_read &&
6389 m != VM_PAGE_NULL &&
6390 VM_PAGE_OBJECT(m) != object &&
6391 !VM_PAGE_OBJECT(m)->pager_trusted &&
6392 vm_protect_privileged_from_untrusted &&
6393 !VM_PAGE_OBJECT(m)->code_signed &&
6394 current_proc_is_privileged()) {
6395 /*
6396 * We found the page we want in an "untrusted" VM object
6397 * down the shadow chain. Since the target is "privileged"
6398 * we want to perform a copy-on-read of that page, so that the
6399 * mapped object gets a stable copy and does not have to
6400 * rely on the "untrusted" object to provide the same
6401 * contents if the page gets reclaimed and has to be paged
6402 * in again later on.
6403 *
6404 * Special case: if the mapping is executable and the untrusted
6405 * object is code-signed and the process is "cs_enforced", we
6406 * do not copy-on-read because that would break code-signing
6407 * enforcement expectations (an executable page must belong
6408 * to a code-signed object) and we can rely on code-signing
6409 * to re-validate the page if it gets evicted and paged back in.
6410 */
6411 // printf("COPY-ON-READ %s:%d map %p vaddr 0x%llx obj %p offset 0x%llx found page %p (obj %p offset 0x%llx) UNTRUSTED -> need copy-on-read\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, object, offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
6412 vm_copied_on_read++;
6413 need_copy_on_read = TRUE;
6414 need_copy = TRUE;
6415 } else {
6416 need_copy_on_read = FALSE;
6417 }
6418
6419 /*
6420 * If we want to wire down this page, but no longer have
6421 * adequate permissions, we must start all over.
6422 * If we decided to copy-on-read, we must also start all over.
6423 */
6424 if ((wired && (fault_type != (prot | VM_PROT_WRITE))) ||
6425 need_copy_on_read) {
6426 vm_map_unlock_read(map);
6427 if (real_map != map) {
6428 vm_map_unlock(real_map);
6429 }
6430
6431 if (m != VM_PAGE_NULL) {
6432 assert(VM_PAGE_OBJECT(m) == m_object);
6433
6434 RELEASE_PAGE(m);
6435
6436 vm_fault_cleanup(m_object, top_page);
6437 } else {
6438 vm_fault_cleanup(object, top_page);
6439 }
6440
6441 vm_object_deallocate(object);
6442
6443 goto RetryFault;
6444 }
6445 if (m != VM_PAGE_NULL) {
6446 /*
6447 * Put this page into the physical map.
6448 * We had to do the unlock above because pmap_enter
6449 * may cause other faults. The page may be on
6450 * the pageout queues. If the pageout daemon comes
6451 * across the page, it will remove it from the queues.
6452 */
6453 if (fault_page_size < PAGE_SIZE) {
6454 DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx pa 0x%llx(0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
6455 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
6456 fault_phys_offset < PAGE_SIZE),
6457 "0x%llx\n", (uint64_t)fault_phys_offset);
6458 } else {
6459 assertf(fault_phys_offset == 0,
6460 "0x%llx\n", (uint64_t)fault_phys_offset);
6461 }
6462 assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p", m, m_object);
6463 assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6464 if (caller_pmap) {
6465 kr = vm_fault_enter(m,
6466 caller_pmap,
6467 caller_pmap_addr,
6468 fault_page_size,
6469 fault_phys_offset,
6470 prot,
6471 caller_prot,
6472 wired,
6473 wire_tag,
6474 fault_info,
6475 NULL,
6476 &type_of_fault,
6477 &object_lock_type);
6478 } else {
6479 kr = vm_fault_enter(m,
6480 pmap,
6481 vaddr,
6482 fault_page_size,
6483 fault_phys_offset,
6484 prot,
6485 caller_prot,
6486 wired,
6487 wire_tag,
6488 fault_info,
6489 NULL,
6490 &type_of_fault,
6491 &object_lock_type);
6492 }
6493 assert(VM_PAGE_OBJECT(m) == m_object);
6494
6495 {
6496 int event_code = 0;
6497
6498 if (m_object->internal) {
6499 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
6500 } else if (m_object->object_is_shared_cache) {
6501 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
6502 } else {
6503 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
6504 }
6505
6506 KDBG_RELEASE(event_code | DBG_FUNC_NONE, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), m->vmp_offset, get_current_unique_pid());
6507 KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_SLOW), get_current_unique_pid());
6508
6509 DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag);
6510 }
6511 if (kr != KERN_SUCCESS) {
6512 /* abort this page fault */
6513 vm_map_unlock_read(map);
6514 if (real_map != map) {
6515 vm_map_unlock(real_map);
6516 }
6517 vm_page_wakeup_done(m_object, m);
6518 vm_fault_cleanup(m_object, top_page);
6519 vm_object_deallocate(object);
6520 goto done;
6521 }
6522 if (physpage_p != NULL) {
6523 /* for vm_map_wire_and_extract() */
6524 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
6525 if (prot & VM_PROT_WRITE) {
6526 vm_object_lock_assert_exclusive(m_object);
6527 m->vmp_dirty = TRUE;
6528 }
6529 }
6530 } else {
6531 vm_map_entry_t entry;
6532 vm_map_offset_t laddr;
6533 vm_map_offset_t ldelta, hdelta;
6534
6535 /*
6536 * do a pmap block mapping from the physical address
6537 * in the object
6538 */
6539
6540 if (real_map != map) {
6541 vm_map_unlock(real_map);
6542 }
6543
6544 if (original_map != map) {
6545 vm_map_unlock_read(map);
6546 vm_map_lock_read(original_map);
6547 map = original_map;
6548 }
6549 real_map = map;
6550
6551 laddr = vaddr;
6552 hdelta = ldelta = (vm_map_offset_t)0xFFFFFFFFFFFFF000ULL;
6553
6554 while (vm_map_lookup_entry(map, laddr, &entry)) {
6555 if (ldelta > (laddr - entry->vme_start)) {
6556 ldelta = laddr - entry->vme_start;
6557 }
6558 if (hdelta > (entry->vme_end - laddr)) {
6559 hdelta = entry->vme_end - laddr;
6560 }
6561 if (entry->is_sub_map) {
6562 vm_map_t sub_map;
6563 bool use_pmap;
6564
6565 laddr = ((laddr - entry->vme_start)
6566 + VME_OFFSET(entry));
6567 vm_map_lock_read(VME_SUBMAP(entry));
6568 sub_map = VME_SUBMAP(entry);
6569 use_pmap = entry->use_pmap;
6570 entry = VM_MAP_ENTRY_NULL; /* not valid after unlock */
6571 if (map != real_map) {
6572 vm_map_unlock_read(map);
6573 }
6574 if (use_pmap) {
6575 vm_map_unlock_read(real_map);
6576 real_map = sub_map;
6577 }
6578 map = sub_map;
6579 } else {
6580 break;
6581 }
6582 }
6583
6584 if (vm_map_lookup_entry(map, laddr, &entry) &&
6585 (!entry->is_sub_map) &&
6586 (object != VM_OBJECT_NULL) &&
6587 (VME_OBJECT(entry) == object)) {
6588 uint16_t superpage;
6589
6590 if (!object->pager_created &&
6591 object->phys_contiguous &&
6592 VME_OFFSET(entry) == 0 &&
6593 (entry->vme_end - entry->vme_start == object->vo_size) &&
6594 VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size - 1))) {
6595 superpage = VM_MEM_SUPERPAGE;
6596 } else {
6597 superpage = 0;
6598 }
6599
6600 if (superpage && physpage_p) {
6601 /* for vm_map_wire_and_extract() */
6602 *physpage_p = (ppnum_t)
6603 ((((vm_map_offset_t)
6604 object->vo_shadow_offset)
6605 + VME_OFFSET(entry)
6606 + (laddr - entry->vme_start))
6607 >> PAGE_SHIFT);
6608 }
6609
6610 /*
6611 * Set up a block mapped area
6612 */
6613 assert((uint32_t)((ldelta + hdelta) >> fault_page_shift) == ((ldelta + hdelta) >> fault_page_shift));
6614 pmap_t block_map_pmap;
6615 addr64_t block_map_va;
6616 pmap_paddr_t block_map_pa = (pmap_paddr_t)(((vm_map_offset_t)(object->vo_shadow_offset)) +
6617 VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta);
6618 int block_map_wimg = VM_WIMG_MASK & (int)object->wimg_bits;
6619 if (caller_pmap) {
6620 block_map_pmap = caller_pmap;
6621 block_map_va = (addr64_t)(caller_pmap_addr - ldelta);
6622 } else {
6623 block_map_pmap = real_map->pmap;
6624 block_map_va = (addr64_t)(vaddr - ldelta);
6625 }
6626 kr = pmap_map_block_addr(block_map_pmap,
6627 block_map_va,
6628 block_map_pa,
6629 (uint32_t)((ldelta + hdelta) >> fault_page_shift),
6630 prot,
6631 block_map_wimg | superpage,
6632 0);
6633
6634 if (kr != KERN_SUCCESS) {
6635 goto cleanup;
6636 }
6637 }
6638 }
6639
6640 /*
6641 * Success
6642 */
6643 kr = KERN_SUCCESS;
6644
6645 /*
6646 * TODO: could most of the done cases just use cleanup?
6647 */
6648 cleanup:
6649 /*
6650 * Unlock everything, and return
6651 */
6652 vm_map_unlock_read(map);
6653 if (real_map != map) {
6654 vm_map_unlock(real_map);
6655 }
6656
6657 if (m != VM_PAGE_NULL) {
6658 if (__improbable(rtfault &&
6659 !m->vmp_realtime &&
6660 vm_pageout_protect_realtime)) {
6661 vm_page_lock_queues();
6662 if (!m->vmp_realtime) {
6663 m->vmp_realtime = true;
6664 vm_page_realtime_count++;
6665 }
6666 vm_page_unlock_queues();
6667 }
6668 assert(VM_PAGE_OBJECT(m) == m_object);
6669
6670 if (!m_object->internal && (fault_type & VM_PROT_WRITE)) {
6671 vm_object_paging_begin(m_object);
6672
6673 assert(written_on_object == VM_OBJECT_NULL);
6674 written_on_object = m_object;
6675 written_on_pager = m_object->pager;
6676 written_on_offset = m_object->paging_offset + m->vmp_offset;
6677 }
6678 vm_page_wakeup_done(m_object, m);
6679
6680 vm_fault_cleanup(m_object, top_page);
6681 } else {
6682 vm_fault_cleanup(object, top_page);
6683 }
6684
6685 vm_object_deallocate(object);
6686
6687 #undef RELEASE_PAGE
6688
6689 done:
6690 thread_interrupt_level(interruptible_state);
6691
6692 if (resilient_media_object != VM_OBJECT_NULL) {
6693 assert(resilient_media_retry);
6694 assert(resilient_media_offset != (vm_object_offset_t)-1);
6695 /* release extra reference on failed object */
6696 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
6697 vm_object_deallocate(resilient_media_object);
6698 resilient_media_object = VM_OBJECT_NULL;
6699 resilient_media_offset = (vm_object_offset_t)-1;
6700 resilient_media_retry = false;
6701 vm_fault_resilient_media_release++;
6702 }
6703 assert(!resilient_media_retry);
6704
6705 /*
6706 * Only I/O throttle on faults which cause a pagein/swapin.
6707 */
6708 if ((type_of_fault == DBG_PAGEIND_FAULT) || (type_of_fault == DBG_PAGEINV_FAULT) || (type_of_fault == DBG_COMPRESSOR_SWAPIN_FAULT)) {
6709 throttle_lowpri_io(1);
6710 } else {
6711 if (kr == KERN_SUCCESS && type_of_fault != DBG_CACHE_HIT_FAULT && type_of_fault != DBG_GUARD_FAULT) {
6712 if ((throttle_delay = vm_page_throttled(TRUE))) {
6713 if (vm_debug_events) {
6714 if (type_of_fault == DBG_COMPRESSOR_FAULT) {
6715 VM_DEBUG_EVENT(vmf_compressordelay, DBG_VM_FAULT_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6716 } else if (type_of_fault == DBG_COW_FAULT) {
6717 VM_DEBUG_EVENT(vmf_cowdelay, DBG_VM_FAULT_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6718 } else {
6719 VM_DEBUG_EVENT(vmf_zfdelay, DBG_VM_FAULT_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6720 }
6721 }
6722 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
6723 }
6724 }
6725 }
6726
6727 if (written_on_object) {
6728 vnode_pager_dirtied(written_on_pager, written_on_offset, written_on_offset + PAGE_SIZE_64);
6729
6730 vm_object_lock(written_on_object);
6731 vm_object_paging_end(written_on_object);
6732 vm_object_unlock(written_on_object);
6733
6734 written_on_object = VM_OBJECT_NULL;
6735 }
6736
6737 if (rtfault) {
6738 vm_record_rtfault(cthread, fstart, trace_vaddr, type_of_fault);
6739 }
6740
6741 KDBG_RELEASE(
6742 (VMDBG_CODE(DBG_VM_FAULT_INTERNAL)) | DBG_FUNC_END,
6743 ((uint64_t)trace_vaddr >> 32),
6744 trace_vaddr,
6745 kr,
6746 vm_fault_type_for_tracing(need_copy_on_read, type_of_fault));
6747
6748 if (fault_page_size < PAGE_SIZE && kr != KERN_SUCCESS) {
6749 DEBUG4K_FAULT("map %p original %p vaddr 0x%llx -> 0x%x\n", map, original_map, (uint64_t)trace_real_vaddr, kr);
6750 }
6751
6752 return kr;
6753 }
6754
6755 /*
6756 * vm_fault_wire:
6757 *
6758 * Wire down a range of virtual addresses in a map.
6759 */
6760 kern_return_t
vm_fault_wire(vm_map_t map,vm_map_entry_t entry,vm_prot_t prot,vm_tag_t wire_tag,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)6761 vm_fault_wire(
6762 vm_map_t map,
6763 vm_map_entry_t entry,
6764 vm_prot_t prot,
6765 vm_tag_t wire_tag,
6766 pmap_t pmap,
6767 vm_map_offset_t pmap_addr,
6768 ppnum_t *physpage_p)
6769 {
6770 vm_map_offset_t va;
6771 vm_map_offset_t end_addr = entry->vme_end;
6772 kern_return_t rc;
6773 vm_map_size_t effective_page_size;
6774
6775 assert(entry->in_transition);
6776
6777 if (!entry->is_sub_map &&
6778 VME_OBJECT(entry) != VM_OBJECT_NULL &&
6779 VME_OBJECT(entry)->phys_contiguous) {
6780 return KERN_SUCCESS;
6781 }
6782
6783 /*
6784 * Inform the physical mapping system that the
6785 * range of addresses may not fault, so that
6786 * page tables and such can be locked down as well.
6787 */
6788
6789 pmap_pageable(pmap, pmap_addr,
6790 pmap_addr + (end_addr - entry->vme_start), FALSE);
6791
6792 /*
6793 * We simulate a fault to get the page and enter it
6794 * in the physical map.
6795 */
6796
6797 effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6798 for (va = entry->vme_start;
6799 va < end_addr;
6800 va += effective_page_size) {
6801 rc = vm_fault_wire_fast(map, va, prot, wire_tag, entry, pmap,
6802 pmap_addr + (va - entry->vme_start),
6803 physpage_p);
6804 if (rc != KERN_SUCCESS) {
6805 struct vm_object_fault_info fault_info = {
6806 .interruptible = (pmap == kernel_pmap) ? THREAD_UNINT : THREAD_ABORTSAFE,
6807 .behavior = VM_BEHAVIOR_SEQUENTIAL,
6808 .fi_change_wiring = true,
6809 };
6810 if (os_sub_overflow(end_addr, va, &fault_info.cluster_size)) {
6811 fault_info.cluster_size = UPL_SIZE_MAX;
6812 }
6813 rc = vm_fault_internal(map, va, prot, wire_tag,
6814 pmap,
6815 (pmap_addr +
6816 (va - entry->vme_start)),
6817 physpage_p,
6818 &fault_info);
6819 DTRACE_VM2(softlock, int, 1, (uint64_t *), NULL);
6820 }
6821
6822 if (rc != KERN_SUCCESS) {
6823 struct vm_map_entry tmp_entry = *entry;
6824
6825 /* unwire wired pages */
6826 tmp_entry.vme_end = va;
6827 vm_fault_unwire(map, &tmp_entry, FALSE,
6828 pmap, pmap_addr, tmp_entry.vme_end);
6829
6830 return rc;
6831 }
6832 }
6833 return KERN_SUCCESS;
6834 }
6835
6836 /*
6837 * vm_fault_unwire:
6838 *
6839 * Unwire a range of virtual addresses in a map.
6840 */
6841 void
vm_fault_unwire(vm_map_t map,vm_map_entry_t entry,boolean_t deallocate,pmap_t pmap,vm_map_offset_t pmap_addr,vm_map_offset_t end_addr)6842 vm_fault_unwire(
6843 vm_map_t map,
6844 vm_map_entry_t entry,
6845 boolean_t deallocate,
6846 pmap_t pmap,
6847 vm_map_offset_t pmap_addr,
6848 vm_map_offset_t end_addr)
6849 {
6850 vm_map_offset_t va;
6851 vm_object_t object;
6852 struct vm_object_fault_info fault_info = {
6853 .interruptible = THREAD_UNINT,
6854 };
6855 unsigned int unwired_pages;
6856 vm_map_size_t effective_page_size;
6857
6858 object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry);
6859
6860 /*
6861 * If it's marked phys_contiguous, then vm_fault_wire() didn't actually
6862 * do anything since such memory is wired by default. So we don't have
6863 * anything to undo here.
6864 */
6865
6866 if (object != VM_OBJECT_NULL && object->phys_contiguous) {
6867 return;
6868 }
6869
6870 fault_info.interruptible = THREAD_UNINT;
6871 fault_info.behavior = entry->behavior;
6872 fault_info.user_tag = VME_ALIAS(entry);
6873 if (entry->iokit_acct ||
6874 (!entry->is_sub_map && !entry->use_pmap)) {
6875 fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
6876 }
6877 fault_info.lo_offset = VME_OFFSET(entry);
6878 fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry);
6879 fault_info.no_cache = entry->no_cache;
6880 fault_info.stealth = TRUE;
6881 if (entry->vme_xnu_user_debug) {
6882 /*
6883 * Modified code-signed executable region: wired pages must
6884 * have been copied, so they should be XNU_USER_DEBUG rather
6885 * than XNU_USER_EXEC.
6886 */
6887 fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
6888 }
6889
6890 unwired_pages = 0;
6891
6892 /*
6893 * Since the pages are wired down, we must be able to
6894 * get their mappings from the physical map system.
6895 */
6896
6897 effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6898 for (va = entry->vme_start;
6899 va < end_addr;
6900 va += effective_page_size) {
6901 if (object == VM_OBJECT_NULL) {
6902 if (pmap) {
6903 pmap_change_wiring(pmap,
6904 pmap_addr + (va - entry->vme_start), FALSE);
6905 }
6906 (void) vm_fault(map, va, VM_PROT_NONE,
6907 TRUE, VM_KERN_MEMORY_NONE, THREAD_UNINT, pmap, pmap_addr);
6908 } else {
6909 vm_prot_t prot;
6910 vm_page_t result_page;
6911 vm_page_t top_page;
6912 vm_object_t result_object;
6913 vm_fault_return_t result;
6914
6915 /* cap cluster size at maximum UPL size */
6916 upl_size_t cluster_size;
6917 if (os_sub_overflow(end_addr, va, &cluster_size)) {
6918 cluster_size = UPL_SIZE_MAX;
6919 }
6920 fault_info.cluster_size = cluster_size;
6921
6922 do {
6923 prot = VM_PROT_NONE;
6924
6925 vm_object_lock(object);
6926 vm_object_paging_begin(object);
6927 result_page = VM_PAGE_NULL;
6928 result = vm_fault_page(
6929 object,
6930 (VME_OFFSET(entry) +
6931 (va - entry->vme_start)),
6932 VM_PROT_NONE, TRUE,
6933 FALSE, /* page not looked up */
6934 &prot, &result_page, &top_page,
6935 (int *)0,
6936 NULL, map->no_zero_fill,
6937 &fault_info);
6938 } while (result == VM_FAULT_RETRY);
6939
6940 /*
6941 * If this was a mapping to a file on a device that has been forcibly
6942 * unmounted, then we won't get a page back from vm_fault_page(). Just
6943 * move on to the next one in case the remaining pages are mapped from
6944 * different objects. During a forced unmount, the object is terminated
6945 * so the alive flag will be false if this happens. A forced unmount will
6946 * will occur when an external disk is unplugged before the user does an
6947 * eject, so we don't want to panic in that situation.
6948 */
6949
6950 if (result == VM_FAULT_MEMORY_ERROR) {
6951 if (!object->alive) {
6952 continue;
6953 }
6954 if (!object->internal && object->pager == NULL) {
6955 continue;
6956 }
6957 }
6958
6959 if (result == VM_FAULT_MEMORY_ERROR &&
6960 is_kernel_object(object)) {
6961 /*
6962 * This must have been allocated with
6963 * KMA_KOBJECT and KMA_VAONLY and there's
6964 * no physical page at this offset.
6965 * We're done (no page to free).
6966 */
6967 assert(deallocate);
6968 continue;
6969 }
6970
6971 if (result != VM_FAULT_SUCCESS) {
6972 panic("vm_fault_unwire: failure");
6973 }
6974
6975 result_object = VM_PAGE_OBJECT(result_page);
6976
6977 if (deallocate) {
6978 assert(VM_PAGE_GET_PHYS_PAGE(result_page) !=
6979 vm_page_fictitious_addr);
6980 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(result_page));
6981 if (VM_PAGE_WIRED(result_page)) {
6982 unwired_pages++;
6983 }
6984 VM_PAGE_FREE(result_page);
6985 } else {
6986 if (pmap && !vm_page_is_guard(result_page)) {
6987 pmap_change_wiring(pmap,
6988 pmap_addr + (va - entry->vme_start), FALSE);
6989 }
6990
6991
6992 if (VM_PAGE_WIRED(result_page)) {
6993 vm_page_lockspin_queues();
6994 vm_page_unwire(result_page, TRUE);
6995 vm_page_unlock_queues();
6996 unwired_pages++;
6997 }
6998 if (entry->zero_wired_pages &&
6999 (entry->protection & VM_PROT_WRITE) &&
7000 #if __arm64e__
7001 !entry->used_for_tpro &&
7002 #endif /* __arm64e__ */
7003 !entry->used_for_jit) {
7004 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(result_page));
7005 }
7006
7007 vm_page_wakeup_done(result_object, result_page);
7008 }
7009 vm_fault_cleanup(result_object, top_page);
7010 }
7011 }
7012
7013 /*
7014 * Inform the physical mapping system that the range
7015 * of addresses may fault, so that page tables and
7016 * such may be unwired themselves.
7017 */
7018
7019 pmap_pageable(pmap, pmap_addr,
7020 pmap_addr + (end_addr - entry->vme_start), TRUE);
7021
7022 if (is_kernel_object(object)) {
7023 /*
7024 * Would like to make user_tag in vm_object_fault_info
7025 * vm_tag_t (unsigned short) but user_tag derives its value from
7026 * VME_ALIAS(entry) at a few places and VME_ALIAS, in turn, casts
7027 * to an _unsigned int_ which is used by non-fault_info paths throughout the
7028 * code at many places.
7029 *
7030 * So, for now, an explicit truncation to unsigned short (vm_tag_t).
7031 */
7032 assertf((fault_info.user_tag & VME_ALIAS_MASK) == fault_info.user_tag,
7033 "VM Tag truncated from 0x%x to 0x%x\n", fault_info.user_tag, (fault_info.user_tag & VME_ALIAS_MASK));
7034 vm_tag_update_size((vm_tag_t) fault_info.user_tag, -ptoa_64(unwired_pages), NULL);
7035 }
7036 }
7037
7038 /*
7039 * vm_fault_wire_fast:
7040 *
7041 * Handle common case of a wire down page fault at the given address.
7042 * If successful, the page is inserted into the associated physical map.
7043 * The map entry is passed in to avoid the overhead of a map lookup.
7044 *
7045 * NOTE: the given address should be truncated to the
7046 * proper page address.
7047 *
7048 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
7049 * a standard error specifying why the fault is fatal is returned.
7050 *
7051 * The map in question must be referenced, and remains so.
7052 * Caller has a read lock on the map.
7053 *
7054 * This is a stripped version of vm_fault() for wiring pages. Anything
7055 * other than the common case will return KERN_FAILURE, and the caller
7056 * is expected to call vm_fault().
7057 */
7058 static kern_return_t
vm_fault_wire_fast(__unused vm_map_t map,vm_map_offset_t va,__unused vm_prot_t caller_prot,vm_tag_t wire_tag,vm_map_entry_t entry,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)7059 vm_fault_wire_fast(
7060 __unused vm_map_t map,
7061 vm_map_offset_t va,
7062 __unused vm_prot_t caller_prot,
7063 vm_tag_t wire_tag,
7064 vm_map_entry_t entry,
7065 pmap_t pmap,
7066 vm_map_offset_t pmap_addr,
7067 ppnum_t *physpage_p)
7068 {
7069 vm_object_t object;
7070 vm_object_offset_t offset;
7071 vm_page_t m;
7072 vm_prot_t prot;
7073 thread_t thread = current_thread();
7074 int type_of_fault;
7075 kern_return_t kr;
7076 vm_map_size_t fault_page_size;
7077 vm_map_offset_t fault_phys_offset;
7078 struct vm_object_fault_info fault_info = {
7079 .interruptible = THREAD_UNINT,
7080 };
7081 uint8_t object_lock_type = 0;
7082
7083 counter_inc(&vm_statistics_faults);
7084
7085 if (thread != THREAD_NULL) {
7086 counter_inc(&get_threadtask(thread)->faults);
7087 }
7088
7089 /*
7090 * Recovery actions
7091 */
7092
7093 #undef RELEASE_PAGE
7094 #define RELEASE_PAGE(m) { \
7095 vm_page_wakeup_done(VM_PAGE_OBJECT(m), m); \
7096 vm_page_lockspin_queues(); \
7097 vm_page_unwire(m, TRUE); \
7098 vm_page_unlock_queues(); \
7099 }
7100
7101
7102 #undef UNLOCK_THINGS
7103 #define UNLOCK_THINGS { \
7104 vm_object_paging_end(object); \
7105 vm_object_unlock(object); \
7106 }
7107
7108 #undef UNLOCK_AND_DEALLOCATE
7109 #define UNLOCK_AND_DEALLOCATE { \
7110 UNLOCK_THINGS; \
7111 vm_object_deallocate(object); \
7112 }
7113 /*
7114 * Give up and have caller do things the hard way.
7115 */
7116
7117 #define GIVE_UP { \
7118 UNLOCK_AND_DEALLOCATE; \
7119 return(KERN_FAILURE); \
7120 }
7121
7122
7123 /*
7124 * If this entry is not directly to a vm_object, bail out.
7125 */
7126 if (entry->is_sub_map) {
7127 assert(physpage_p == NULL);
7128 return KERN_FAILURE;
7129 }
7130
7131 /*
7132 * Find the backing store object and offset into it.
7133 */
7134
7135 object = VME_OBJECT(entry);
7136 offset = (va - entry->vme_start) + VME_OFFSET(entry);
7137 prot = entry->protection;
7138
7139 /*
7140 * Make a reference to this object to prevent its
7141 * disposal while we are messing with it.
7142 */
7143
7144 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
7145 vm_object_lock(object);
7146 vm_object_reference_locked(object);
7147 vm_object_paging_begin(object);
7148
7149 /*
7150 * INVARIANTS (through entire routine):
7151 *
7152 * 1) At all times, we must either have the object
7153 * lock or a busy page in some object to prevent
7154 * some other thread from trying to bring in
7155 * the same page.
7156 *
7157 * 2) Once we have a busy page, we must remove it from
7158 * the pageout queues, so that the pageout daemon
7159 * will not grab it away.
7160 *
7161 */
7162
7163 /*
7164 * Look for page in top-level object. If it's not there or
7165 * there's something going on, give up.
7166 */
7167 m = vm_page_lookup(object, vm_object_trunc_page(offset));
7168 if ((m == VM_PAGE_NULL) || (m->vmp_busy) ||
7169 (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_absent))) {
7170 GIVE_UP;
7171 }
7172 if (vm_page_is_guard(m)) {
7173 /*
7174 * Guard pages are fictitious pages and are never
7175 * entered into a pmap, so let's say it's been wired...
7176 */
7177 kr = KERN_SUCCESS;
7178 goto done;
7179 }
7180
7181 /*
7182 * Wire the page down now. All bail outs beyond this
7183 * point must unwire the page.
7184 */
7185
7186 vm_page_lockspin_queues();
7187 vm_page_wire(m, wire_tag, TRUE);
7188 vm_page_unlock_queues();
7189
7190 /*
7191 * Mark page busy for other threads.
7192 */
7193 assert(!m->vmp_busy);
7194 m->vmp_busy = TRUE;
7195 assert(!m->vmp_absent);
7196
7197 /*
7198 * Give up if the page is being written and there's a copy object
7199 */
7200 if ((object->vo_copy != VM_OBJECT_NULL) && (prot & VM_PROT_WRITE)) {
7201 RELEASE_PAGE(m);
7202 GIVE_UP;
7203 }
7204
7205 fault_info.user_tag = VME_ALIAS(entry);
7206 fault_info.pmap_options = 0;
7207 if (entry->iokit_acct ||
7208 (!entry->is_sub_map && !entry->use_pmap)) {
7209 fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
7210 }
7211 if (entry->vme_xnu_user_debug) {
7212 /*
7213 * Modified code-signed executable region: wiring will
7214 * copy the pages, so they should be XNU_USER_DEBUG rather
7215 * than XNU_USER_EXEC.
7216 */
7217 fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
7218 }
7219
7220 if (entry->translated_allow_execute) {
7221 fault_info.pmap_options |= PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE;
7222 }
7223
7224 fault_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
7225 fault_phys_offset = offset - vm_object_trunc_page(offset);
7226
7227 /*
7228 * Put this page into the physical map.
7229 */
7230 type_of_fault = DBG_CACHE_HIT_FAULT;
7231 assert3p(VM_PAGE_OBJECT(m), ==, object);
7232 kr = vm_fault_enter(m,
7233 pmap,
7234 pmap_addr,
7235 fault_page_size,
7236 fault_phys_offset,
7237 prot,
7238 prot,
7239 TRUE, /* wired */
7240 wire_tag,
7241 &fault_info,
7242 NULL,
7243 &type_of_fault,
7244 &object_lock_type); /* Exclusive lock mode. Will remain unchanged.*/
7245 if (kr != KERN_SUCCESS) {
7246 RELEASE_PAGE(m);
7247 GIVE_UP;
7248 }
7249
7250
7251 done:
7252 /*
7253 * Unlock everything, and return
7254 */
7255
7256 if (physpage_p) {
7257 /* for vm_map_wire_and_extract() */
7258 if (kr == KERN_SUCCESS) {
7259 assert3p(object, ==, VM_PAGE_OBJECT(m));
7260 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
7261 if (prot & VM_PROT_WRITE) {
7262 vm_object_lock_assert_exclusive(object);
7263 m->vmp_dirty = TRUE;
7264 }
7265 } else {
7266 *physpage_p = 0;
7267 }
7268 }
7269
7270 if (m->vmp_busy) {
7271 vm_page_wakeup_done(object, m);
7272 }
7273
7274 UNLOCK_AND_DEALLOCATE;
7275
7276 return kr;
7277 }
7278
7279 /*
7280 * Routine: vm_fault_copy_cleanup
7281 * Purpose:
7282 * Release a page used by vm_fault_copy.
7283 */
7284
7285 static void
vm_fault_copy_cleanup(vm_page_t page,vm_page_t top_page)7286 vm_fault_copy_cleanup(
7287 vm_page_t page,
7288 vm_page_t top_page)
7289 {
7290 vm_object_t object = VM_PAGE_OBJECT(page);
7291
7292 vm_object_lock(object);
7293 vm_page_wakeup_done(object, page);
7294 if (!VM_PAGE_PAGEABLE(page)) {
7295 vm_page_lockspin_queues();
7296 if (!VM_PAGE_PAGEABLE(page)) {
7297 vm_page_activate(page);
7298 }
7299 vm_page_unlock_queues();
7300 }
7301 vm_fault_cleanup(object, top_page);
7302 }
7303
7304 static void
vm_fault_copy_dst_cleanup(vm_page_t page)7305 vm_fault_copy_dst_cleanup(
7306 vm_page_t page)
7307 {
7308 vm_object_t object;
7309
7310 if (page != VM_PAGE_NULL) {
7311 object = VM_PAGE_OBJECT(page);
7312 vm_object_lock(object);
7313 vm_page_lockspin_queues();
7314 vm_page_unwire(page, TRUE);
7315 vm_page_unlock_queues();
7316 vm_object_paging_end(object);
7317 vm_object_unlock(object);
7318 }
7319 }
7320
7321 /*
7322 * Routine: vm_fault_copy
7323 *
7324 * Purpose:
7325 * Copy pages from one virtual memory object to another --
7326 * neither the source nor destination pages need be resident.
7327 *
7328 * Before actually copying a page, the version associated with
7329 * the destination address map wil be verified.
7330 *
7331 * In/out conditions:
7332 * The caller must hold a reference, but not a lock, to
7333 * each of the source and destination objects and to the
7334 * destination map.
7335 *
7336 * Results:
7337 * Returns KERN_SUCCESS if no errors were encountered in
7338 * reading or writing the data. Returns KERN_INTERRUPTED if
7339 * the operation was interrupted (only possible if the
7340 * "interruptible" argument is asserted). Other return values
7341 * indicate a permanent error in copying the data.
7342 *
7343 * The actual amount of data copied will be returned in the
7344 * "copy_size" argument. In the event that the destination map
7345 * verification failed, this amount may be less than the amount
7346 * requested.
7347 */
7348 kern_return_t
vm_fault_copy(vm_object_t src_object,vm_object_offset_t src_offset,vm_map_size_t * copy_size,vm_object_t dst_object,vm_object_offset_t dst_offset,vm_map_t dst_map,vm_map_version_t * dst_version,int interruptible)7349 vm_fault_copy(
7350 vm_object_t src_object,
7351 vm_object_offset_t src_offset,
7352 vm_map_size_t *copy_size, /* INOUT */
7353 vm_object_t dst_object,
7354 vm_object_offset_t dst_offset,
7355 vm_map_t dst_map,
7356 vm_map_version_t *dst_version,
7357 int interruptible)
7358 {
7359 vm_page_t result_page;
7360
7361 vm_page_t src_page;
7362 vm_page_t src_top_page;
7363 vm_prot_t src_prot;
7364
7365 vm_page_t dst_page;
7366 vm_page_t dst_top_page;
7367 vm_prot_t dst_prot;
7368
7369 vm_map_size_t amount_left;
7370 vm_object_t old_copy_object;
7371 uint32_t old_copy_version;
7372 vm_object_t result_page_object = NULL;
7373 kern_return_t error = 0;
7374 vm_fault_return_t result;
7375
7376 vm_map_size_t part_size;
7377 struct vm_object_fault_info fault_info_src = {};
7378 struct vm_object_fault_info fault_info_dst = {};
7379
7380 /*
7381 * In order not to confuse the clustered pageins, align
7382 * the different offsets on a page boundary.
7383 */
7384
7385 #define RETURN(x) \
7386 MACRO_BEGIN \
7387 *copy_size -= amount_left; \
7388 MACRO_RETURN(x); \
7389 MACRO_END
7390
7391 amount_left = *copy_size;
7392
7393 fault_info_src.interruptible = interruptible;
7394 fault_info_src.behavior = VM_BEHAVIOR_SEQUENTIAL;
7395 fault_info_src.lo_offset = vm_object_trunc_page(src_offset);
7396 fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left;
7397 fault_info_src.stealth = TRUE;
7398
7399 fault_info_dst.interruptible = interruptible;
7400 fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL;
7401 fault_info_dst.lo_offset = vm_object_trunc_page(dst_offset);
7402 fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left;
7403 fault_info_dst.stealth = TRUE;
7404
7405 do { /* while (amount_left > 0) */
7406 /*
7407 * There may be a deadlock if both source and destination
7408 * pages are the same. To avoid this deadlock, the copy must
7409 * start by getting the destination page in order to apply
7410 * COW semantics if any.
7411 */
7412
7413 RetryDestinationFault:;
7414
7415 dst_prot = VM_PROT_WRITE | VM_PROT_READ;
7416
7417 vm_object_lock(dst_object);
7418 vm_object_paging_begin(dst_object);
7419
7420 /* cap cluster size at maximum UPL size */
7421 upl_size_t cluster_size;
7422 if (os_convert_overflow(amount_left, &cluster_size)) {
7423 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
7424 }
7425 fault_info_dst.cluster_size = cluster_size;
7426
7427 dst_page = VM_PAGE_NULL;
7428 result = vm_fault_page(dst_object,
7429 vm_object_trunc_page(dst_offset),
7430 VM_PROT_WRITE | VM_PROT_READ,
7431 FALSE,
7432 FALSE, /* page not looked up */
7433 &dst_prot, &dst_page, &dst_top_page,
7434 (int *)0,
7435 &error,
7436 dst_map->no_zero_fill,
7437 &fault_info_dst);
7438 switch (result) {
7439 case VM_FAULT_SUCCESS:
7440 break;
7441 case VM_FAULT_RETRY:
7442 goto RetryDestinationFault;
7443 case VM_FAULT_MEMORY_SHORTAGE:
7444 if (vm_page_wait(interruptible)) {
7445 goto RetryDestinationFault;
7446 }
7447 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_COPY_MEMORY_SHORTAGE), 0 /* arg */);
7448 OS_FALLTHROUGH;
7449 case VM_FAULT_INTERRUPTED:
7450 RETURN(MACH_SEND_INTERRUPTED);
7451 case VM_FAULT_SUCCESS_NO_VM_PAGE:
7452 /* success but no VM page: fail the copy */
7453 vm_object_paging_end(dst_object);
7454 vm_object_unlock(dst_object);
7455 OS_FALLTHROUGH;
7456 case VM_FAULT_MEMORY_ERROR:
7457 if (error) {
7458 return error;
7459 } else {
7460 return KERN_MEMORY_ERROR;
7461 }
7462 default:
7463 panic("vm_fault_copy: unexpected error 0x%x from "
7464 "vm_fault_page()\n", result);
7465 }
7466 assert((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE);
7467
7468 assert(dst_object == VM_PAGE_OBJECT(dst_page));
7469 old_copy_object = dst_object->vo_copy;
7470 old_copy_version = dst_object->vo_copy_version;
7471
7472 /*
7473 * There exists the possiblity that the source and
7474 * destination page are the same. But we can't
7475 * easily determine that now. If they are the
7476 * same, the call to vm_fault_page() for the
7477 * destination page will deadlock. To prevent this we
7478 * wire the page so we can drop busy without having
7479 * the page daemon steal the page. We clean up the
7480 * top page but keep the paging reference on the object
7481 * holding the dest page so it doesn't go away.
7482 */
7483
7484 vm_page_lockspin_queues();
7485 vm_page_wire(dst_page, VM_KERN_MEMORY_OSFMK, TRUE);
7486 vm_page_unlock_queues();
7487 vm_page_wakeup_done(dst_object, dst_page);
7488 vm_object_unlock(dst_object);
7489
7490 if (dst_top_page != VM_PAGE_NULL) {
7491 vm_object_lock(dst_object);
7492 VM_PAGE_FREE(dst_top_page);
7493 vm_object_paging_end(dst_object);
7494 vm_object_unlock(dst_object);
7495 }
7496
7497 RetrySourceFault:;
7498
7499 if (src_object == VM_OBJECT_NULL) {
7500 /*
7501 * No source object. We will just
7502 * zero-fill the page in dst_object.
7503 */
7504 src_page = VM_PAGE_NULL;
7505 result_page = VM_PAGE_NULL;
7506 } else {
7507 vm_object_lock(src_object);
7508 src_page = vm_page_lookup(src_object,
7509 vm_object_trunc_page(src_offset));
7510 if (src_page == dst_page) {
7511 src_prot = dst_prot;
7512 result_page = VM_PAGE_NULL;
7513 } else {
7514 src_prot = VM_PROT_READ;
7515 vm_object_paging_begin(src_object);
7516
7517 /* cap cluster size at maximum UPL size */
7518 if (os_convert_overflow(amount_left, &cluster_size)) {
7519 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
7520 }
7521 fault_info_src.cluster_size = cluster_size;
7522
7523 result_page = VM_PAGE_NULL;
7524 result = vm_fault_page(
7525 src_object,
7526 vm_object_trunc_page(src_offset),
7527 VM_PROT_READ, FALSE,
7528 FALSE, /* page not looked up */
7529 &src_prot,
7530 &result_page, &src_top_page,
7531 (int *)0, &error, FALSE,
7532 &fault_info_src);
7533
7534 switch (result) {
7535 case VM_FAULT_SUCCESS:
7536 break;
7537 case VM_FAULT_RETRY:
7538 goto RetrySourceFault;
7539 case VM_FAULT_MEMORY_SHORTAGE:
7540 if (vm_page_wait(interruptible)) {
7541 goto RetrySourceFault;
7542 }
7543 OS_FALLTHROUGH;
7544 case VM_FAULT_INTERRUPTED:
7545 vm_fault_copy_dst_cleanup(dst_page);
7546 RETURN(MACH_SEND_INTERRUPTED);
7547 case VM_FAULT_SUCCESS_NO_VM_PAGE:
7548 /* success but no VM page: fail */
7549 vm_object_paging_end(src_object);
7550 vm_object_unlock(src_object);
7551 OS_FALLTHROUGH;
7552 case VM_FAULT_MEMORY_ERROR:
7553 vm_fault_copy_dst_cleanup(dst_page);
7554 if (error) {
7555 return error;
7556 } else {
7557 return KERN_MEMORY_ERROR;
7558 }
7559 default:
7560 panic("vm_fault_copy(2): unexpected "
7561 "error 0x%x from "
7562 "vm_fault_page()\n", result);
7563 }
7564
7565 result_page_object = VM_PAGE_OBJECT(result_page);
7566 assert((src_top_page == VM_PAGE_NULL) ==
7567 (result_page_object == src_object));
7568 }
7569 assert((src_prot & VM_PROT_READ) != VM_PROT_NONE);
7570 vm_object_unlock(result_page_object);
7571 }
7572
7573 vm_map_lock_read(dst_map);
7574
7575 if (!vm_map_verify(dst_map, dst_version)) {
7576 vm_map_unlock_read(dst_map);
7577 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7578 vm_fault_copy_cleanup(result_page, src_top_page);
7579 }
7580 vm_fault_copy_dst_cleanup(dst_page);
7581 break;
7582 }
7583 assert(dst_object == VM_PAGE_OBJECT(dst_page));
7584
7585 vm_object_lock(dst_object);
7586
7587 if ((dst_object->vo_copy != old_copy_object ||
7588 dst_object->vo_copy_version != old_copy_version)) {
7589 vm_object_unlock(dst_object);
7590 vm_map_unlock_read(dst_map);
7591 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7592 vm_fault_copy_cleanup(result_page, src_top_page);
7593 }
7594 vm_fault_copy_dst_cleanup(dst_page);
7595 break;
7596 }
7597 vm_object_unlock(dst_object);
7598
7599 /*
7600 * Copy the page, and note that it is dirty
7601 * immediately.
7602 */
7603
7604 if (!page_aligned(src_offset) ||
7605 !page_aligned(dst_offset) ||
7606 !page_aligned(amount_left)) {
7607 vm_object_offset_t src_po,
7608 dst_po;
7609
7610 src_po = src_offset - vm_object_trunc_page(src_offset);
7611 dst_po = dst_offset - vm_object_trunc_page(dst_offset);
7612
7613 if (dst_po > src_po) {
7614 part_size = PAGE_SIZE - dst_po;
7615 } else {
7616 part_size = PAGE_SIZE - src_po;
7617 }
7618 if (part_size > (amount_left)) {
7619 part_size = amount_left;
7620 }
7621
7622 if (result_page == VM_PAGE_NULL) {
7623 assert((vm_offset_t) dst_po == dst_po);
7624 assert((vm_size_t) part_size == part_size);
7625 vm_page_part_zero_fill(dst_page,
7626 (vm_offset_t) dst_po,
7627 (vm_size_t) part_size);
7628 } else {
7629 assert((vm_offset_t) src_po == src_po);
7630 assert((vm_offset_t) dst_po == dst_po);
7631 assert((vm_size_t) part_size == part_size);
7632 vm_page_part_copy(result_page,
7633 (vm_offset_t) src_po,
7634 dst_page,
7635 (vm_offset_t) dst_po,
7636 (vm_size_t)part_size);
7637 if (!dst_page->vmp_dirty) {
7638 vm_object_lock(dst_object);
7639 SET_PAGE_DIRTY(dst_page, TRUE);
7640 vm_object_unlock(dst_object);
7641 }
7642 }
7643 } else {
7644 part_size = PAGE_SIZE;
7645
7646 if (result_page == VM_PAGE_NULL) {
7647 vm_page_zero_fill(
7648 dst_page
7649 );
7650 } else {
7651 vm_object_lock(result_page_object);
7652 vm_page_copy(result_page, dst_page);
7653 vm_object_unlock(result_page_object);
7654
7655 if (!dst_page->vmp_dirty) {
7656 vm_object_lock(dst_object);
7657 SET_PAGE_DIRTY(dst_page, TRUE);
7658 vm_object_unlock(dst_object);
7659 }
7660 }
7661 }
7662
7663 /*
7664 * Unlock everything, and return
7665 */
7666
7667 vm_map_unlock_read(dst_map);
7668
7669 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7670 vm_fault_copy_cleanup(result_page, src_top_page);
7671 }
7672 vm_fault_copy_dst_cleanup(dst_page);
7673
7674 amount_left -= part_size;
7675 src_offset += part_size;
7676 dst_offset += part_size;
7677 } while (amount_left > 0);
7678
7679 RETURN(KERN_SUCCESS);
7680 #undef RETURN
7681
7682 /*NOTREACHED*/
7683 }
7684
7685 #if VM_FAULT_CLASSIFY
7686 /*
7687 * Temporary statistics gathering support.
7688 */
7689
7690 /*
7691 * Statistics arrays:
7692 */
7693 #define VM_FAULT_TYPES_MAX 5
7694 #define VM_FAULT_LEVEL_MAX 8
7695
7696 int vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX];
7697
7698 #define VM_FAULT_TYPE_ZERO_FILL 0
7699 #define VM_FAULT_TYPE_MAP_IN 1
7700 #define VM_FAULT_TYPE_PAGER 2
7701 #define VM_FAULT_TYPE_COPY 3
7702 #define VM_FAULT_TYPE_OTHER 4
7703
7704
7705 void
vm_fault_classify(vm_object_t object,vm_object_offset_t offset,vm_prot_t fault_type)7706 vm_fault_classify(vm_object_t object,
7707 vm_object_offset_t offset,
7708 vm_prot_t fault_type)
7709 {
7710 int type, level = 0;
7711 vm_page_t m;
7712
7713 while (TRUE) {
7714 m = vm_page_lookup(object, offset);
7715 if (m != VM_PAGE_NULL) {
7716 if (m->vmp_busy || m->vmp_error || m->vmp_restart || m->vmp_absent) {
7717 type = VM_FAULT_TYPE_OTHER;
7718 break;
7719 }
7720 if (((fault_type & VM_PROT_WRITE) == 0) ||
7721 ((level == 0) && object->vo_copy == VM_OBJECT_NULL)) {
7722 type = VM_FAULT_TYPE_MAP_IN;
7723 break;
7724 }
7725 type = VM_FAULT_TYPE_COPY;
7726 break;
7727 } else {
7728 if (object->pager_created) {
7729 type = VM_FAULT_TYPE_PAGER;
7730 break;
7731 }
7732 if (object->shadow == VM_OBJECT_NULL) {
7733 type = VM_FAULT_TYPE_ZERO_FILL;
7734 break;
7735 }
7736
7737 offset += object->vo_shadow_offset;
7738 object = object->shadow;
7739 level++;
7740 continue;
7741 }
7742 }
7743
7744 if (level > VM_FAULT_LEVEL_MAX) {
7745 level = VM_FAULT_LEVEL_MAX;
7746 }
7747
7748 vm_fault_stats[type][level] += 1;
7749
7750 return;
7751 }
7752
7753 /* cleanup routine to call from debugger */
7754
7755 void
vm_fault_classify_init(void)7756 vm_fault_classify_init(void)
7757 {
7758 int type, level;
7759
7760 for (type = 0; type < VM_FAULT_TYPES_MAX; type++) {
7761 for (level = 0; level < VM_FAULT_LEVEL_MAX; level++) {
7762 vm_fault_stats[type][level] = 0;
7763 }
7764 }
7765
7766 return;
7767 }
7768 #endif /* VM_FAULT_CLASSIFY */
7769
7770 static inline bool
object_supports_coredump(const vm_object_t object)7771 object_supports_coredump(const vm_object_t object)
7772 {
7773 switch (object->wimg_bits & VM_WIMG_MASK) {
7774 case VM_WIMG_DEFAULT:
7775 return true;
7776 default:
7777 return false;
7778 }
7779 }
7780
7781 vm_offset_t
kdp_lightweight_fault(vm_map_t map,vm_offset_t cur_target_addr,bool multi_cpu)7782 kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr, bool multi_cpu)
7783 {
7784 vm_map_entry_t entry;
7785 vm_object_t object;
7786 vm_offset_t object_offset;
7787 vm_page_t m;
7788 int compressor_external_state, compressed_count_delta;
7789 vm_compressor_options_t compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP);
7790 int my_fault_type = VM_PROT_READ;
7791 kern_return_t kr;
7792 int effective_page_mask, effective_page_size;
7793 int my_cpu_no = cpu_number();
7794 ppnum_t decomp_ppnum;
7795 addr64_t decomp_paddr;
7796
7797 if (multi_cpu) {
7798 compressor_flags |= C_KDP_MULTICPU;
7799 }
7800
7801 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
7802 effective_page_mask = VM_MAP_PAGE_MASK(map);
7803 effective_page_size = VM_MAP_PAGE_SIZE(map);
7804 } else {
7805 effective_page_mask = PAGE_MASK;
7806 effective_page_size = PAGE_SIZE;
7807 }
7808
7809 if (not_in_kdp) {
7810 panic("kdp_lightweight_fault called from outside of debugger context");
7811 }
7812
7813 assert(map != VM_MAP_NULL);
7814
7815 assert((cur_target_addr & effective_page_mask) == 0);
7816 if ((cur_target_addr & effective_page_mask) != 0) {
7817 return 0;
7818 }
7819
7820 if (kdp_lck_rw_lock_is_acquired_exclusive(&map->lock)) {
7821 return 0;
7822 }
7823
7824 if (!vm_map_lookup_entry(map, cur_target_addr, &entry)) {
7825 return 0;
7826 }
7827
7828 if (entry->is_sub_map) {
7829 return 0;
7830 }
7831
7832 object = VME_OBJECT(entry);
7833 if (object == VM_OBJECT_NULL) {
7834 return 0;
7835 }
7836
7837 object_offset = cur_target_addr - entry->vme_start + VME_OFFSET(entry);
7838
7839 while (TRUE) {
7840 if (kdp_lck_rw_lock_is_acquired_exclusive(&object->Lock)) {
7841 return 0;
7842 }
7843
7844 if (object->pager_created && (object->paging_in_progress ||
7845 object->activity_in_progress)) {
7846 return 0;
7847 }
7848
7849 m = kdp_vm_page_lookup(object, vm_object_trunc_page(object_offset));
7850
7851 if (m != VM_PAGE_NULL) {
7852 if (!object_supports_coredump(object)) {
7853 return 0;
7854 }
7855
7856 if (m->vmp_laundry || m->vmp_busy || m->vmp_free_when_done ||
7857 m->vmp_absent || VMP_ERROR_GET(m) || m->vmp_cleaning ||
7858 m->vmp_overwriting || m->vmp_restart || m->vmp_unusual) {
7859 return 0;
7860 }
7861
7862 assert(!vm_page_is_private(m));
7863 if (vm_page_is_private(m)) {
7864 return 0;
7865 }
7866
7867 assert(!vm_page_is_fictitious(m));
7868 if (vm_page_is_fictitious(m)) {
7869 return 0;
7870 }
7871
7872 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
7873 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
7874 return 0;
7875 }
7876
7877 return ptoa(VM_PAGE_GET_PHYS_PAGE(m));
7878 }
7879
7880 compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
7881
7882 if (multi_cpu) {
7883 assert(vm_compressor_kdp_state.kc_decompressed_pages_ppnum != NULL);
7884 assert(vm_compressor_kdp_state.kc_decompressed_pages_paddr != NULL);
7885 decomp_ppnum = vm_compressor_kdp_state.kc_decompressed_pages_ppnum[my_cpu_no];
7886 decomp_paddr = vm_compressor_kdp_state.kc_decompressed_pages_paddr[my_cpu_no];
7887 } else {
7888 decomp_ppnum = vm_compressor_kdp_state.kc_panic_decompressed_page_ppnum;
7889 decomp_paddr = vm_compressor_kdp_state.kc_panic_decompressed_page_paddr;
7890 }
7891
7892 if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) {
7893 if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) {
7894 kr = vm_compressor_pager_get(object->pager,
7895 vm_object_trunc_page(object_offset + object->paging_offset),
7896 decomp_ppnum, &my_fault_type,
7897 compressor_flags, &compressed_count_delta);
7898 if (kr == KERN_SUCCESS) {
7899 return decomp_paddr;
7900 } else {
7901 return 0;
7902 }
7903 }
7904 }
7905
7906 if (object->shadow == VM_OBJECT_NULL) {
7907 return 0;
7908 }
7909
7910 object_offset += object->vo_shadow_offset;
7911 object = object->shadow;
7912 }
7913 }
7914
7915 /*
7916 * vm_page_validate_cs_fast():
7917 * Performs a few quick checks to determine if the page's code signature
7918 * really needs to be fully validated. It could:
7919 * 1. have been modified (i.e. automatically tainted),
7920 * 2. have already been validated,
7921 * 3. have already been found to be tainted,
7922 * 4. no longer have a backing store.
7923 * Returns FALSE if the page needs to be fully validated.
7924 */
7925 static boolean_t
vm_page_validate_cs_fast(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)7926 vm_page_validate_cs_fast(
7927 vm_page_t page,
7928 vm_map_size_t fault_page_size,
7929 vm_map_offset_t fault_phys_offset)
7930 {
7931 vm_object_t object;
7932
7933 object = VM_PAGE_OBJECT(page);
7934 vm_object_lock_assert_held(object);
7935
7936 if (page->vmp_wpmapped &&
7937 !VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
7938 /*
7939 * This page was mapped for "write" access sometime in the
7940 * past and could still be modifiable in the future.
7941 * Consider it tainted.
7942 * [ If the page was already found to be "tainted", no
7943 * need to re-validate. ]
7944 */
7945 vm_object_lock_assert_exclusive(object);
7946 VMP_CS_SET_VALIDATED(page, fault_page_size, fault_phys_offset, TRUE);
7947 VMP_CS_SET_TAINTED(page, fault_page_size, fault_phys_offset, TRUE);
7948 if (cs_debug) {
7949 printf("CODESIGNING: %s: "
7950 "page %p obj %p off 0x%llx "
7951 "was modified\n",
7952 __FUNCTION__,
7953 page, object, page->vmp_offset);
7954 }
7955 vm_cs_validated_dirtied++;
7956 }
7957
7958 if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) ||
7959 VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
7960 return TRUE;
7961 }
7962 vm_object_lock_assert_exclusive(object);
7963
7964 #if CHECK_CS_VALIDATION_BITMAP
7965 kern_return_t kr;
7966
7967 kr = vnode_pager_cs_check_validation_bitmap(
7968 object->pager,
7969 page->vmp_offset + object->paging_offset,
7970 CS_BITMAP_CHECK);
7971 if (kr == KERN_SUCCESS) {
7972 page->vmp_cs_validated = VMP_CS_ALL_TRUE;
7973 page->vmp_cs_tainted = VMP_CS_ALL_FALSE;
7974 vm_cs_bitmap_validated++;
7975 return TRUE;
7976 }
7977 #endif /* CHECK_CS_VALIDATION_BITMAP */
7978
7979 if (!object->alive || object->terminating || object->pager == NULL) {
7980 /*
7981 * The object is terminating and we don't have its pager
7982 * so we can't validate the data...
7983 */
7984 return TRUE;
7985 }
7986
7987 /* we need to really validate this page */
7988 vm_object_lock_assert_exclusive(object);
7989 return FALSE;
7990 }
7991
7992 void
vm_page_validate_cs_mapped_slow(vm_page_t page,const void * kaddr)7993 vm_page_validate_cs_mapped_slow(
7994 vm_page_t page,
7995 const void *kaddr)
7996 {
7997 vm_object_t object;
7998 memory_object_offset_t mo_offset;
7999 memory_object_t pager;
8000 struct vnode *vnode;
8001 int validated, tainted, nx;
8002
8003 assert(page->vmp_busy);
8004 object = VM_PAGE_OBJECT(page);
8005 vm_object_lock_assert_exclusive(object);
8006
8007 vm_cs_validates++;
8008
8009 /*
8010 * Since we get here to validate a page that was brought in by
8011 * the pager, we know that this pager is all setup and ready
8012 * by now.
8013 */
8014 assert(object->code_signed);
8015 assert(!object->internal);
8016 assert(object->pager != NULL);
8017 assert(object->pager_ready);
8018
8019 pager = object->pager;
8020 assert(object->paging_in_progress);
8021 vnode = vnode_pager_lookup_vnode(pager);
8022 mo_offset = page->vmp_offset + object->paging_offset;
8023
8024 /* verify the SHA1 hash for this page */
8025 validated = 0;
8026 tainted = 0;
8027 nx = 0;
8028 cs_validate_page(vnode,
8029 pager,
8030 mo_offset,
8031 (const void *)((const char *)kaddr),
8032 &validated,
8033 &tainted,
8034 &nx);
8035
8036 page->vmp_cs_validated |= validated;
8037 page->vmp_cs_tainted |= tainted;
8038 page->vmp_cs_nx |= nx;
8039
8040 #if CHECK_CS_VALIDATION_BITMAP
8041 if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
8042 page->vmp_cs_tainted == VMP_CS_ALL_FALSE) {
8043 vnode_pager_cs_check_validation_bitmap(object->pager,
8044 mo_offset,
8045 CS_BITMAP_SET);
8046 }
8047 #endif /* CHECK_CS_VALIDATION_BITMAP */
8048 }
8049
8050 void
vm_page_validate_cs_mapped(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,const void * kaddr)8051 vm_page_validate_cs_mapped(
8052 vm_page_t page,
8053 vm_map_size_t fault_page_size,
8054 vm_map_offset_t fault_phys_offset,
8055 const void *kaddr)
8056 {
8057 if (!vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
8058 vm_page_validate_cs_mapped_slow(page, kaddr);
8059 }
8060 }
8061
8062 static void
vm_page_map_and_validate_cs(vm_object_t object,vm_page_t page)8063 vm_page_map_and_validate_cs(
8064 vm_object_t object,
8065 vm_page_t page)
8066 {
8067 vm_object_offset_t offset;
8068 vm_map_offset_t koffset;
8069 vm_map_size_t ksize;
8070 vm_offset_t kaddr;
8071 kern_return_t kr;
8072 boolean_t busy_page;
8073 boolean_t need_unmap;
8074
8075 vm_object_lock_assert_exclusive(object);
8076
8077 assert(object->code_signed);
8078 offset = page->vmp_offset;
8079
8080 busy_page = page->vmp_busy;
8081 if (!busy_page) {
8082 /* keep page busy while we map (and unlock) the VM object */
8083 page->vmp_busy = TRUE;
8084 }
8085
8086 /*
8087 * Take a paging reference on the VM object
8088 * to protect it from collapse or bypass,
8089 * and keep it from disappearing too.
8090 */
8091 vm_object_paging_begin(object);
8092
8093 /* map the page in the kernel address space */
8094 ksize = PAGE_SIZE_64;
8095 koffset = 0;
8096 need_unmap = FALSE;
8097 kr = vm_paging_map_object(page,
8098 object,
8099 offset,
8100 VM_PROT_READ,
8101 FALSE, /* can't unlock object ! */
8102 &ksize,
8103 &koffset,
8104 &need_unmap);
8105 if (kr != KERN_SUCCESS) {
8106 panic("%s: could not map page: 0x%x", __FUNCTION__, kr);
8107 }
8108 kaddr = CAST_DOWN(vm_offset_t, koffset);
8109
8110 /* validate the mapped page */
8111 vm_page_validate_cs_mapped_slow(page, (const void *) kaddr);
8112
8113 assert(page->vmp_busy);
8114 assert(object == VM_PAGE_OBJECT(page));
8115 vm_object_lock_assert_exclusive(object);
8116
8117 if (!busy_page) {
8118 vm_page_wakeup_done(object, page);
8119 }
8120 if (need_unmap) {
8121 /* unmap the map from the kernel address space */
8122 vm_paging_unmap_object(object, koffset, koffset + ksize);
8123 koffset = 0;
8124 ksize = 0;
8125 kaddr = 0;
8126 }
8127 vm_object_paging_end(object);
8128 }
8129
8130 void
vm_page_validate_cs(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)8131 vm_page_validate_cs(
8132 vm_page_t page,
8133 vm_map_size_t fault_page_size,
8134 vm_map_offset_t fault_phys_offset)
8135 {
8136 vm_object_t object;
8137
8138 object = VM_PAGE_OBJECT(page);
8139 vm_object_lock_assert_held(object);
8140
8141 if (vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
8142 return;
8143 }
8144 vm_page_map_and_validate_cs(object, page);
8145 }
8146
8147 void
vm_page_validate_cs_mapped_chunk(vm_page_t page,const void * kaddr,vm_offset_t chunk_offset,vm_size_t chunk_size,boolean_t * validated_p,unsigned * tainted_p)8148 vm_page_validate_cs_mapped_chunk(
8149 vm_page_t page,
8150 const void *kaddr,
8151 vm_offset_t chunk_offset,
8152 vm_size_t chunk_size,
8153 boolean_t *validated_p,
8154 unsigned *tainted_p)
8155 {
8156 vm_object_t object;
8157 vm_object_offset_t offset, offset_in_page;
8158 memory_object_t pager;
8159 struct vnode *vnode;
8160 boolean_t validated;
8161 unsigned tainted;
8162
8163 *validated_p = FALSE;
8164 *tainted_p = 0;
8165
8166 assert(page->vmp_busy);
8167 object = VM_PAGE_OBJECT(page);
8168 vm_object_lock_assert_exclusive(object);
8169
8170 assert(object->code_signed);
8171 offset = page->vmp_offset;
8172
8173 if (!object->alive || object->terminating || object->pager == NULL) {
8174 /*
8175 * The object is terminating and we don't have its pager
8176 * so we can't validate the data...
8177 */
8178 return;
8179 }
8180 /*
8181 * Since we get here to validate a page that was brought in by
8182 * the pager, we know that this pager is all setup and ready
8183 * by now.
8184 */
8185 assert(!object->internal);
8186 assert(object->pager != NULL);
8187 assert(object->pager_ready);
8188
8189 pager = object->pager;
8190 assert(object->paging_in_progress);
8191 vnode = vnode_pager_lookup_vnode(pager);
8192
8193 /* verify the signature for this chunk */
8194 offset_in_page = chunk_offset;
8195 assert(offset_in_page < PAGE_SIZE);
8196
8197 tainted = 0;
8198 validated = cs_validate_range(vnode,
8199 pager,
8200 (object->paging_offset +
8201 offset +
8202 offset_in_page),
8203 (const void *)((const char *)kaddr
8204 + offset_in_page),
8205 chunk_size,
8206 &tainted);
8207 if (validated) {
8208 *validated_p = TRUE;
8209 }
8210 if (tainted) {
8211 *tainted_p = tainted;
8212 }
8213 }
8214
8215 static void
vm_rtfrecord_lock(void)8216 vm_rtfrecord_lock(void)
8217 {
8218 lck_spin_lock(&vm_rtfr_slock);
8219 }
8220
8221 static void
vm_rtfrecord_unlock(void)8222 vm_rtfrecord_unlock(void)
8223 {
8224 lck_spin_unlock(&vm_rtfr_slock);
8225 }
8226
8227 unsigned int
vmrtfaultinfo_bufsz(void)8228 vmrtfaultinfo_bufsz(void)
8229 {
8230 return vmrtf_num_records * sizeof(vm_rtfault_record_t);
8231 }
8232
8233 #include <kern/backtrace.h>
8234
8235 __attribute__((noinline))
8236 static void
vm_record_rtfault(thread_t cthread,uint64_t fstart,vm_map_offset_t fault_vaddr,int type_of_fault)8237 vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr, int type_of_fault)
8238 {
8239 uint64_t fend = mach_continuous_time();
8240
8241 uint64_t cfpc = 0;
8242 uint64_t ctid = cthread->thread_id;
8243 uint64_t cupid = get_current_unique_pid();
8244
8245 uintptr_t bpc = 0;
8246 errno_t btr = 0;
8247
8248 /*
8249 * Capture a single-frame backtrace. This extracts just the program
8250 * counter at the point of the fault, and should not use copyin to get
8251 * Rosetta save state.
8252 */
8253 struct backtrace_control ctl = {
8254 .btc_user_thread = cthread,
8255 .btc_user_copy = backtrace_user_copy_error,
8256 };
8257 unsigned int bfrs = backtrace_user(&bpc, 1U, &ctl, NULL);
8258 if ((btr == 0) && (bfrs > 0)) {
8259 cfpc = bpc;
8260 }
8261
8262 assert((fstart != 0) && fend >= fstart);
8263 vm_rtfrecord_lock();
8264 assert(vmrtfrs.vmrtfr_curi <= vmrtfrs.vmrtfr_maxi);
8265
8266 vmrtfrs.vmrtf_total++;
8267 vm_rtfault_record_t *cvmr = &vmrtfrs.vm_rtf_records[vmrtfrs.vmrtfr_curi++];
8268
8269 cvmr->rtfabstime = fstart;
8270 cvmr->rtfduration = fend - fstart;
8271 cvmr->rtfaddr = fault_vaddr;
8272 cvmr->rtfpc = cfpc;
8273 cvmr->rtftype = type_of_fault;
8274 cvmr->rtfupid = cupid;
8275 cvmr->rtftid = ctid;
8276
8277 if (vmrtfrs.vmrtfr_curi > vmrtfrs.vmrtfr_maxi) {
8278 vmrtfrs.vmrtfr_curi = 0;
8279 }
8280
8281 vm_rtfrecord_unlock();
8282 }
8283
8284 int
vmrtf_extract(uint64_t cupid,__unused boolean_t isroot,unsigned long vrecordsz,void * vrecords,unsigned long * vmrtfrv)8285 vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, unsigned long vrecordsz, void *vrecords, unsigned long *vmrtfrv)
8286 {
8287 vm_rtfault_record_t *cvmrd = vrecords;
8288 size_t residue = vrecordsz;
8289 size_t numextracted = 0;
8290 boolean_t early_exit = FALSE;
8291
8292 vm_rtfrecord_lock();
8293
8294 for (int vmfi = 0; vmfi <= vmrtfrs.vmrtfr_maxi; vmfi++) {
8295 if (residue < sizeof(vm_rtfault_record_t)) {
8296 early_exit = TRUE;
8297 break;
8298 }
8299
8300 if (vmrtfrs.vm_rtf_records[vmfi].rtfupid != cupid) {
8301 #if DEVELOPMENT || DEBUG
8302 if (isroot == FALSE) {
8303 continue;
8304 }
8305 #else
8306 continue;
8307 #endif /* DEVDEBUG */
8308 }
8309
8310 *cvmrd = vmrtfrs.vm_rtf_records[vmfi];
8311 cvmrd++;
8312 residue -= sizeof(vm_rtfault_record_t);
8313 numextracted++;
8314 }
8315
8316 vm_rtfrecord_unlock();
8317
8318 *vmrtfrv = numextracted;
8319 return early_exit;
8320 }
8321
8322 /*
8323 * Only allow one diagnosis to be in flight at a time, to avoid
8324 * creating too much additional memory usage.
8325 */
8326 static volatile uint_t vmtc_diagnosing;
8327 unsigned int vmtc_total = 0;
8328
8329 /*
8330 * Type used to update telemetry for the diagnosis counts.
8331 */
8332 CA_EVENT(vmtc_telemetry,
8333 CA_INT, vmtc_num_byte, /* number of corrupt bytes found */
8334 CA_BOOL, vmtc_undiagnosed, /* undiagnosed because more than 1 at a time */
8335 CA_BOOL, vmtc_not_eligible, /* the page didn't qualify */
8336 CA_BOOL, vmtc_copyin_fail, /* unable to copy in the page */
8337 CA_BOOL, vmtc_not_found, /* no corruption found even though CS failed */
8338 CA_BOOL, vmtc_one_bit_flip, /* single bit flip */
8339 CA_BOOL, vmtc_testing); /* caused on purpose by testing */
8340
8341 #if DEVELOPMENT || DEBUG
8342 /*
8343 * Buffers used to compare before/after page contents.
8344 * Stashed to aid when debugging crashes.
8345 */
8346 static size_t vmtc_last_buffer_size = 0;
8347 static uint64_t *vmtc_last_before_buffer = NULL;
8348 static uint64_t *vmtc_last_after_buffer = NULL;
8349
8350 /*
8351 * Needed to record corruptions due to testing.
8352 */
8353 static uintptr_t corruption_test_va = 0;
8354 #endif /* DEVELOPMENT || DEBUG */
8355
8356 /*
8357 * Stash a copy of data from a possibly corrupt page.
8358 */
8359 static uint64_t *
vmtc_get_page_data(vm_map_offset_t code_addr,vm_page_t page)8360 vmtc_get_page_data(
8361 vm_map_offset_t code_addr,
8362 vm_page_t page)
8363 {
8364 uint64_t *buffer = NULL;
8365 addr64_t buffer_paddr;
8366 addr64_t page_paddr;
8367 extern void bcopy_phys(addr64_t from, addr64_t to, vm_size_t bytes);
8368 uint_t size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
8369
8370 /*
8371 * Need an aligned buffer to do a physical copy.
8372 */
8373 if (kernel_memory_allocate(kernel_map, (vm_offset_t *)&buffer,
8374 size, size - 1, KMA_KOBJECT, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
8375 return NULL;
8376 }
8377 buffer_paddr = kvtophys((vm_offset_t)buffer);
8378 page_paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(page));
8379
8380 /* adjust the page start address if we need only 4K of a 16K page */
8381 if (size < PAGE_SIZE) {
8382 uint_t subpage_start = ((code_addr & (PAGE_SIZE - 1)) & ~(size - 1));
8383 page_paddr += subpage_start;
8384 }
8385
8386 bcopy_phys(page_paddr, buffer_paddr, size);
8387 return buffer;
8388 }
8389
8390 /*
8391 * Set things up so we can diagnose a potential text page corruption.
8392 */
8393 static uint64_t *
vmtc_text_page_diagnose_setup(vm_map_offset_t code_addr,vm_page_t page,CA_EVENT_TYPE (vmtc_telemetry)* event)8394 vmtc_text_page_diagnose_setup(
8395 vm_map_offset_t code_addr,
8396 vm_page_t page,
8397 CA_EVENT_TYPE(vmtc_telemetry) *event)
8398 {
8399 uint64_t *buffer = NULL;
8400
8401 /*
8402 * If another is being diagnosed, skip this one.
8403 */
8404 if (!OSCompareAndSwap(0, 1, &vmtc_diagnosing)) {
8405 event->vmtc_undiagnosed = true;
8406 return NULL;
8407 }
8408
8409 /*
8410 * Get the contents of the corrupt page.
8411 */
8412 buffer = vmtc_get_page_data(code_addr, page);
8413 if (buffer == NULL) {
8414 event->vmtc_copyin_fail = true;
8415 if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
8416 panic("Bad compare and swap in setup!");
8417 }
8418 return NULL;
8419 }
8420 return buffer;
8421 }
8422
8423 /*
8424 * Diagnose the text page by comparing its contents with
8425 * the one we've previously saved.
8426 */
8427 static void
vmtc_text_page_diagnose(vm_map_offset_t code_addr,uint64_t * old_code_buffer,CA_EVENT_TYPE (vmtc_telemetry)* event)8428 vmtc_text_page_diagnose(
8429 vm_map_offset_t code_addr,
8430 uint64_t *old_code_buffer,
8431 CA_EVENT_TYPE(vmtc_telemetry) *event)
8432 {
8433 uint64_t *new_code_buffer;
8434 size_t size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
8435 uint_t count = (uint_t)size / sizeof(uint64_t);
8436 uint_t diff_count = 0;
8437 bool bit_flip = false;
8438 uint_t b;
8439 uint64_t *new;
8440 uint64_t *old;
8441
8442 new_code_buffer = kalloc_data(size, Z_WAITOK);
8443 assert(new_code_buffer != NULL);
8444 if (copyin((user_addr_t)vm_map_trunc_page(code_addr, size - 1), new_code_buffer, size) != 0) {
8445 /* copyin error, so undo things */
8446 event->vmtc_copyin_fail = true;
8447 goto done;
8448 }
8449
8450 new = new_code_buffer;
8451 old = old_code_buffer;
8452 for (; count-- > 0; ++new, ++old) {
8453 if (*new == *old) {
8454 continue;
8455 }
8456
8457 /*
8458 * On first diff, check for a single bit flip
8459 */
8460 if (diff_count == 0) {
8461 uint64_t x = (*new ^ *old);
8462 assert(x != 0);
8463 if ((x & (x - 1)) == 0) {
8464 bit_flip = true;
8465 ++diff_count;
8466 continue;
8467 }
8468 }
8469
8470 /*
8471 * count up the number of different bytes.
8472 */
8473 for (b = 0; b < sizeof(uint64_t); ++b) {
8474 char *n = (char *)new;
8475 char *o = (char *)old;
8476 if (n[b] != o[b]) {
8477 ++diff_count;
8478 }
8479 }
8480 }
8481
8482 if (diff_count > 1) {
8483 bit_flip = false;
8484 }
8485
8486 if (diff_count == 0) {
8487 event->vmtc_not_found = true;
8488 } else {
8489 event->vmtc_num_byte = diff_count;
8490 }
8491 if (bit_flip) {
8492 event->vmtc_one_bit_flip = true;
8493 }
8494
8495 done:
8496 /*
8497 * Free up the code copy buffers, but save the last
8498 * set on development / debug kernels in case they
8499 * can provide evidence for debugging memory stomps.
8500 */
8501 #if DEVELOPMENT || DEBUG
8502 if (vmtc_last_before_buffer != NULL) {
8503 kmem_free(kernel_map, (vm_offset_t)vmtc_last_before_buffer, vmtc_last_buffer_size);
8504 }
8505 if (vmtc_last_after_buffer != NULL) {
8506 kfree_data(vmtc_last_after_buffer, vmtc_last_buffer_size);
8507 }
8508 vmtc_last_before_buffer = old_code_buffer;
8509 vmtc_last_after_buffer = new_code_buffer;
8510 vmtc_last_buffer_size = size;
8511 #else /* DEVELOPMENT || DEBUG */
8512 kfree_data(new_code_buffer, size);
8513 kmem_free(kernel_map, (vm_offset_t)old_code_buffer, size);
8514 #endif /* DEVELOPMENT || DEBUG */
8515
8516 /*
8517 * We're finished, so clear the diagnosing flag.
8518 */
8519 if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
8520 panic("Bad compare and swap in diagnose!");
8521 }
8522 }
8523
8524 /*
8525 * For the given map, virt address, find the object, offset, and page.
8526 * This has to lookup the map entry, verify protections, walk any shadow chains.
8527 * If found, returns with the object locked.
8528 */
8529 static kern_return_t
vmtc_revalidate_lookup(vm_map_t map,vm_map_offset_t vaddr,vm_object_t * ret_object,vm_object_offset_t * ret_offset,vm_page_t * ret_page,vm_prot_t * ret_prot)8530 vmtc_revalidate_lookup(
8531 vm_map_t map,
8532 vm_map_offset_t vaddr,
8533 vm_object_t *ret_object,
8534 vm_object_offset_t *ret_offset,
8535 vm_page_t *ret_page,
8536 vm_prot_t *ret_prot)
8537 {
8538 vm_object_t object;
8539 vm_object_offset_t offset;
8540 vm_page_t page;
8541 kern_return_t kr = KERN_SUCCESS;
8542 uint8_t object_lock_type = OBJECT_LOCK_EXCLUSIVE;
8543 vm_map_version_t version;
8544 boolean_t wired;
8545 struct vm_object_fault_info fault_info = {
8546 .interruptible = THREAD_UNINT
8547 };
8548 vm_map_t real_map = NULL;
8549 vm_prot_t prot;
8550 vm_object_t shadow;
8551
8552 /*
8553 * Find the object/offset for the given location/map.
8554 * Note this returns with the object locked.
8555 */
8556 restart:
8557 vm_map_lock_read(map);
8558 object = VM_OBJECT_NULL; /* in case we come around the restart path */
8559 kr = vm_map_lookup_and_lock_object(&map, vaddr, VM_PROT_READ,
8560 object_lock_type, &version, &object, &offset, &prot, &wired,
8561 &fault_info, &real_map, NULL);
8562 vm_map_unlock_read(map);
8563 if (real_map != NULL && real_map != map) {
8564 vm_map_unlock(real_map);
8565 }
8566
8567 /*
8568 * If there's no page here, fail.
8569 */
8570 if (kr != KERN_SUCCESS || object == NULL) {
8571 kr = KERN_FAILURE;
8572 goto done;
8573 }
8574
8575 /*
8576 * Chase down any shadow chains to find the actual page.
8577 */
8578 for (;;) {
8579 /*
8580 * See if the page is on the current object.
8581 */
8582 page = vm_page_lookup(object, vm_object_trunc_page(offset));
8583 if (page != NULL) {
8584 /* restart the lookup */
8585 if (page->vmp_restart) {
8586 vm_object_unlock(object);
8587 goto restart;
8588 }
8589
8590 /*
8591 * If this page is busy, we need to wait for it.
8592 */
8593 if (page->vmp_busy) {
8594 vm_page_sleep(object, page, THREAD_INTERRUPTIBLE, LCK_SLEEP_UNLOCK);
8595 goto restart;
8596 }
8597 break;
8598 }
8599
8600 /*
8601 * If the object doesn't have the page and
8602 * has no shadow, then we can quit.
8603 */
8604 shadow = object->shadow;
8605 if (shadow == NULL) {
8606 kr = KERN_FAILURE;
8607 goto done;
8608 }
8609
8610 /*
8611 * Move to the next object
8612 */
8613 offset += object->vo_shadow_offset;
8614 vm_object_lock(shadow);
8615 vm_object_unlock(object);
8616 object = shadow;
8617 shadow = VM_OBJECT_NULL;
8618 }
8619 *ret_object = object;
8620 *ret_offset = vm_object_trunc_page(offset);
8621 *ret_page = page;
8622 *ret_prot = prot;
8623
8624 done:
8625 if (kr != KERN_SUCCESS && object != NULL) {
8626 vm_object_unlock(object);
8627 }
8628 return kr;
8629 }
8630
8631 /*
8632 * Check if a page is wired, needs extra locking.
8633 */
8634 static bool
is_page_wired(vm_page_t page)8635 is_page_wired(vm_page_t page)
8636 {
8637 bool result;
8638 vm_page_lock_queues();
8639 result = VM_PAGE_WIRED(page);
8640 vm_page_unlock_queues();
8641 return result;
8642 }
8643
8644 /*
8645 * A fatal process error has occurred in the given task.
8646 * Recheck the code signing of the text page at the given
8647 * address to check for a text page corruption.
8648 *
8649 * Returns KERN_FAILURE if a page was found to be corrupt
8650 * by failing to match its code signature. KERN_SUCCESS
8651 * means the page is either valid or we don't have the
8652 * information to say it's corrupt.
8653 */
8654 kern_return_t
revalidate_text_page(task_t task,vm_map_offset_t code_addr)8655 revalidate_text_page(task_t task, vm_map_offset_t code_addr)
8656 {
8657 kern_return_t kr;
8658 vm_map_t map;
8659 vm_object_t object = NULL;
8660 vm_object_offset_t offset;
8661 vm_page_t page = NULL;
8662 struct vnode *vnode;
8663 uint64_t *diagnose_buffer = NULL;
8664 CA_EVENT_TYPE(vmtc_telemetry) * event = NULL;
8665 ca_event_t ca_event = NULL;
8666 vm_prot_t prot;
8667
8668 map = task->map;
8669 if (task->map == NULL) {
8670 return KERN_SUCCESS;
8671 }
8672
8673 kr = vmtc_revalidate_lookup(map, code_addr, &object, &offset, &page, &prot);
8674 if (kr != KERN_SUCCESS) {
8675 goto done;
8676 }
8677
8678 /*
8679 * The page must be executable.
8680 */
8681 if (!(prot & VM_PROT_EXECUTE)) {
8682 goto done;
8683 }
8684
8685 /*
8686 * The object needs to have a pager.
8687 */
8688 if (object->pager == NULL) {
8689 goto done;
8690 }
8691
8692 /*
8693 * Needs to be a vnode backed page to have a signature.
8694 */
8695 vnode = vnode_pager_lookup_vnode(object->pager);
8696 if (vnode == NULL) {
8697 goto done;
8698 }
8699
8700 /*
8701 * Object checks to see if we should proceed.
8702 */
8703 if (!object->code_signed || /* no code signature to check */
8704 object->internal || /* internal objects aren't signed */
8705 object->terminating || /* the object and its pages are already going away */
8706 !object->pager_ready) { /* this should happen, but check shouldn't hurt */
8707 goto done;
8708 }
8709
8710
8711 /*
8712 * Check the code signature of the page in question.
8713 */
8714 vm_page_map_and_validate_cs(object, page);
8715
8716 /*
8717 * At this point:
8718 * vmp_cs_validated |= validated (set if a code signature exists)
8719 * vmp_cs_tainted |= tainted (set if code signature violation)
8720 * vmp_cs_nx |= nx; ??
8721 *
8722 * if vmp_pmapped then have to pmap_disconnect..
8723 * other flags to check on object or page?
8724 */
8725 if (page->vmp_cs_tainted != VMP_CS_ALL_FALSE) {
8726 #if DEBUG || DEVELOPMENT
8727 /*
8728 * On development builds, a boot-arg can be used to cause
8729 * a panic, instead of a quiet repair.
8730 */
8731 if (vmtc_panic_instead) {
8732 panic("Text page corruption detected: vm_page_t 0x%llx", (long long)(uintptr_t)page);
8733 }
8734 #endif /* DEBUG || DEVELOPMENT */
8735
8736 /*
8737 * We're going to invalidate this page. Grab a copy of it for comparison.
8738 */
8739 ca_event = CA_EVENT_ALLOCATE(vmtc_telemetry);
8740 event = ca_event->data;
8741 diagnose_buffer = vmtc_text_page_diagnose_setup(code_addr, page, event);
8742
8743 /*
8744 * Invalidate, i.e. toss, the corrupted page.
8745 */
8746 if (!page->vmp_cleaning &&
8747 !page->vmp_laundry &&
8748 !vm_page_is_fictitious(page) &&
8749 !page->vmp_precious &&
8750 !page->vmp_absent &&
8751 !VMP_ERROR_GET(page) &&
8752 !page->vmp_dirty &&
8753 !is_page_wired(page)) {
8754 if (page->vmp_pmapped) {
8755 int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(page));
8756 if (refmod & VM_MEM_MODIFIED) {
8757 SET_PAGE_DIRTY(page, FALSE);
8758 }
8759 if (refmod & VM_MEM_REFERENCED) {
8760 page->vmp_reference = TRUE;
8761 }
8762 }
8763 /* If the page seems intentionally modified, don't trash it. */
8764 if (!page->vmp_dirty) {
8765 VM_PAGE_FREE(page);
8766 } else {
8767 event->vmtc_not_eligible = true;
8768 }
8769 } else {
8770 event->vmtc_not_eligible = true;
8771 }
8772 vm_object_unlock(object);
8773 object = VM_OBJECT_NULL;
8774
8775 /*
8776 * Now try to diagnose the type of failure by faulting
8777 * in a new copy and diff'ing it with what we saved.
8778 */
8779 if (diagnose_buffer != NULL) {
8780 vmtc_text_page_diagnose(code_addr, diagnose_buffer, event);
8781 }
8782 #if DEBUG || DEVELOPMENT
8783 if (corruption_test_va != 0) {
8784 corruption_test_va = 0;
8785 event->vmtc_testing = true;
8786 }
8787 #endif /* DEBUG || DEVELOPMENT */
8788 ktriage_record(thread_tid(current_thread()),
8789 KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_TEXT_CORRUPTION),
8790 0 /* arg */);
8791 CA_EVENT_SEND(ca_event);
8792 printf("Text page corruption detected for pid %d\n", proc_selfpid());
8793 ++vmtc_total;
8794 return KERN_FAILURE; /* failure means we definitely found a corrupt page */
8795 }
8796 done:
8797 if (object != NULL) {
8798 vm_object_unlock(object);
8799 }
8800 return KERN_SUCCESS;
8801 }
8802
8803 #if DEBUG || DEVELOPMENT
8804 /*
8805 * For implementing unit tests - ask the pmap to corrupt a text page.
8806 * We have to find the page, to get the physical address, then invoke
8807 * the pmap.
8808 */
8809 extern kern_return_t vm_corrupt_text_addr(uintptr_t);
8810
8811 kern_return_t
vm_corrupt_text_addr(uintptr_t va)8812 vm_corrupt_text_addr(uintptr_t va)
8813 {
8814 task_t task = current_task();
8815 vm_map_t map;
8816 kern_return_t kr = KERN_SUCCESS;
8817 vm_object_t object = VM_OBJECT_NULL;
8818 vm_object_offset_t offset;
8819 vm_page_t page = NULL;
8820 pmap_paddr_t pa;
8821 vm_prot_t prot;
8822
8823 map = task->map;
8824 if (task->map == NULL) {
8825 printf("corrupt_text_addr: no map\n");
8826 return KERN_FAILURE;
8827 }
8828
8829 kr = vmtc_revalidate_lookup(map, (vm_map_offset_t)va, &object, &offset, &page, &prot);
8830 if (kr != KERN_SUCCESS) {
8831 printf("corrupt_text_addr: page lookup failed\n");
8832 return kr;
8833 }
8834 if (!(prot & VM_PROT_EXECUTE)) {
8835 printf("corrupt_text_addr: page not executable\n");
8836 return KERN_FAILURE;
8837 }
8838
8839 /* get the physical address to use */
8840 pa = ptoa(VM_PAGE_GET_PHYS_PAGE(page)) + (va - vm_object_trunc_page(va));
8841
8842 /*
8843 * Check we have something we can work with.
8844 * Due to racing with pageout as we enter the sysctl,
8845 * it's theoretically possible to have the page disappear, just
8846 * before the lookup.
8847 *
8848 * That's highly likely to happen often. I've filed a radar 72857482
8849 * to bubble up the error here to the sysctl result and have the
8850 * test not FAIL in that case.
8851 */
8852 if (page->vmp_busy) {
8853 printf("corrupt_text_addr: vmp_busy\n");
8854 kr = KERN_FAILURE;
8855 }
8856 if (page->vmp_cleaning) {
8857 printf("corrupt_text_addr: vmp_cleaning\n");
8858 kr = KERN_FAILURE;
8859 }
8860 if (page->vmp_laundry) {
8861 printf("corrupt_text_addr: vmp_cleaning\n");
8862 kr = KERN_FAILURE;
8863 }
8864 if (vm_page_is_fictitious(page)) {
8865 printf("corrupt_text_addr: vmp_fictitious\n");
8866 kr = KERN_FAILURE;
8867 }
8868 if (page->vmp_precious) {
8869 printf("corrupt_text_addr: vmp_precious\n");
8870 kr = KERN_FAILURE;
8871 }
8872 if (page->vmp_absent) {
8873 printf("corrupt_text_addr: vmp_absent\n");
8874 kr = KERN_FAILURE;
8875 }
8876 if (VMP_ERROR_GET(page)) {
8877 printf("corrupt_text_addr: vmp_error\n");
8878 kr = KERN_FAILURE;
8879 }
8880 if (page->vmp_dirty) {
8881 printf("corrupt_text_addr: vmp_dirty\n");
8882 kr = KERN_FAILURE;
8883 }
8884 if (is_page_wired(page)) {
8885 printf("corrupt_text_addr: wired\n");
8886 kr = KERN_FAILURE;
8887 }
8888 if (!page->vmp_pmapped) {
8889 printf("corrupt_text_addr: !vmp_pmapped\n");
8890 kr = KERN_FAILURE;
8891 }
8892
8893 if (kr == KERN_SUCCESS) {
8894 printf("corrupt_text_addr: using physaddr 0x%llx\n", (long long)pa);
8895 kr = pmap_test_text_corruption(pa);
8896 if (kr != KERN_SUCCESS) {
8897 printf("corrupt_text_addr: pmap error %d\n", kr);
8898 } else {
8899 corruption_test_va = va;
8900 }
8901 } else {
8902 printf("corrupt_text_addr: object %p\n", object);
8903 printf("corrupt_text_addr: offset 0x%llx\n", (uint64_t)offset);
8904 printf("corrupt_text_addr: va 0x%llx\n", (uint64_t)va);
8905 printf("corrupt_text_addr: vm_object_trunc_page(va) 0x%llx\n", (uint64_t)vm_object_trunc_page(va));
8906 printf("corrupt_text_addr: vm_page_t %p\n", page);
8907 printf("corrupt_text_addr: ptoa(PHYS_PAGE) 0x%llx\n", (uint64_t)ptoa(VM_PAGE_GET_PHYS_PAGE(page)));
8908 printf("corrupt_text_addr: using physaddr 0x%llx\n", (uint64_t)pa);
8909 }
8910
8911 if (object != VM_OBJECT_NULL) {
8912 vm_object_unlock(object);
8913 }
8914 return kr;
8915 }
8916
8917 #endif /* DEBUG || DEVELOPMENT */
8918