1 /*
2 * Copyright (c) 2000-2021 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm_fault.c
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 *
62 * Page fault handling module.
63 */
64
65 #include <libkern/OSAtomic.h>
66
67 #include <mach/mach_types.h>
68 #include <mach/kern_return.h>
69 #include <mach/message.h> /* for error codes */
70 #include <mach/vm_param.h>
71 #include <mach/vm_behavior.h>
72 #include <mach/memory_object.h>
73 /* For memory_object_data_{request,unlock} */
74 #include <mach/sdt.h>
75
76 #include <kern/kern_types.h>
77 #include <kern/host_statistics.h>
78 #include <kern/counter.h>
79 #include <kern/task.h>
80 #include <kern/thread.h>
81 #include <kern/sched_prim.h>
82 #include <kern/host.h>
83 #include <kern/mach_param.h>
84 #include <kern/macro_help.h>
85 #include <kern/zalloc_internal.h>
86 #include <kern/misc_protos.h>
87 #include <kern/policy_internal.h>
88 #include <kern/exc_guard.h>
89
90 #include <vm/vm_compressor_internal.h>
91 #include <vm/vm_compressor_pager_internal.h>
92 #include <vm/vm_dyld_pager_internal.h>
93 #include <vm/vm_fault_internal.h>
94 #include <vm/vm_map_internal.h>
95 #include <vm/vm_object_internal.h>
96 #include <vm/vm_page_internal.h>
97 #include <vm/vm_kern_internal.h>
98 #include <vm/pmap.h>
99 #include <vm/vm_pageout_internal.h>
100 #include <vm/vm_protos_internal.h>
101 #include <vm/vm_external.h>
102 #include <vm/memory_object.h>
103 #include <vm/vm_purgeable_internal.h> /* Needed by some vm_page.h macros */
104 #include <vm/vm_shared_region.h>
105 #include <vm/vm_page_internal.h>
106
107 #include <sys/codesign.h>
108 #include <sys/code_signing.h>
109 #include <sys/kdebug.h>
110 #include <sys/kdebug_triage.h>
111 #include <sys/reason.h>
112 #include <sys/signalvar.h>
113
114 #include <san/kasan.h>
115 #include <libkern/coreanalytics/coreanalytics.h>
116
117 #define VM_FAULT_CLASSIFY 0
118
119 #define TRACEFAULTPAGE 0 /* (TEST/DEBUG) */
120
121 int vm_protect_privileged_from_untrusted = 1;
122
123 /*
124 * Enforce a maximum number of concurrent PageIns per vm-object to prevent
125 * high-I/O-volume tasks from saturating storage and starving the rest of the
126 * system.
127 *
128 * TODO: This throttling mechanism may be more naturally done by the pager,
129 * filesystem, or storage layers, which will have better information about how
130 * much concurrency the backing store can reasonably support.
131 */
132 TUNABLE(uint16_t, vm_object_pagein_throttle, "vm_object_pagein_throttle", 16);
133
134 /*
135 * We apply a hard throttle to the demand zero rate of tasks that we believe are running out of control which
136 * kicks in when swap space runs out. 64-bit programs have massive address spaces and can leak enormous amounts
137 * of memory if they're buggy and can run the system completely out of swap space. If this happens, we
138 * impose a hard throttle on them to prevent them from taking the last bit of memory left. This helps
139 * keep the UI active so that the user has a chance to kill the offending task before the system
140 * completely hangs.
141 *
142 * The hard throttle is only applied when the system is nearly completely out of swap space and is only applied
143 * to tasks that appear to be bloated. When swap runs out, any task using more than vm_hard_throttle_threshold
144 * will be throttled. The throttling is done by giving the thread that's trying to demand zero a page a
145 * delay of HARD_THROTTLE_DELAY microseconds before being allowed to try the page fault again.
146 */
147
148 extern void throttle_lowpri_io(int);
149
150 extern struct vnode *vnode_pager_lookup_vnode(memory_object_t);
151
152 uint64_t vm_hard_throttle_threshold;
153
154 #if DEBUG || DEVELOPMENT
155 static bool vmtc_panic_instead = false;
156 int panic_object_not_alive = 1;
157 #endif /* DEBUG || DEVELOPMENT */
158
159 OS_ALWAYS_INLINE
160 boolean_t
NEED_TO_HARD_THROTTLE_THIS_TASK(void)161 NEED_TO_HARD_THROTTLE_THIS_TASK(void)
162 {
163 return vm_wants_task_throttled(current_task()) ||
164 ((vm_page_free_count < vm_page_throttle_limit ||
165 HARD_THROTTLE_LIMIT_REACHED()) &&
166 proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO) >= THROTTLE_LEVEL_THROTTLED);
167 }
168
169
170 /*
171 * XXX: For now, vm faults cannot be recursively disabled. If the need for
172 * nested code that disables faults arises, the implementation can be modified
173 * to track a disabled-count.
174 */
175
176 OS_ALWAYS_INLINE
177 void
vm_fault_disable(void)178 vm_fault_disable(void)
179 {
180 thread_t t = current_thread();
181 assert(!t->th_vm_faults_disabled);
182 t->th_vm_faults_disabled = true;
183 act_set_debug_assert();
184 }
185
186 OS_ALWAYS_INLINE
187 void
vm_fault_enable(void)188 vm_fault_enable(void)
189 {
190 thread_t t = current_thread();
191 assert(t->th_vm_faults_disabled);
192 t->th_vm_faults_disabled = false;
193 }
194
195 OS_ALWAYS_INLINE
196 bool
vm_fault_get_disabled(void)197 vm_fault_get_disabled(void)
198 {
199 thread_t t = current_thread();
200 return t->th_vm_faults_disabled;
201 }
202
203 #define HARD_THROTTLE_DELAY 10000 /* 10000 us == 10 ms */
204 #define SOFT_THROTTLE_DELAY 200 /* 200 us == .2 ms */
205
206 #define VM_PAGE_CREATION_THROTTLE_PERIOD_SECS 6
207 #define VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC 20000
208
209
210 #define VM_STAT_DECOMPRESSIONS() \
211 MACRO_BEGIN \
212 counter_inc(&vm_statistics_decompressions); \
213 current_thread()->decompressions++; \
214 MACRO_END
215
216 boolean_t current_thread_aborted(void);
217
218 /* Forward declarations of internal routines. */
219 static kern_return_t vm_fault_wire_fast(
220 vm_map_t map,
221 vm_map_offset_t va,
222 vm_prot_t prot,
223 vm_tag_t wire_tag,
224 vm_map_entry_t entry,
225 pmap_t pmap,
226 vm_map_offset_t pmap_addr,
227 ppnum_t *physpage_p);
228
229 static kern_return_t vm_fault_internal(
230 vm_map_t map,
231 vm_map_offset_t vaddr,
232 vm_prot_t caller_prot,
233 vm_tag_t wire_tag,
234 pmap_t pmap,
235 vm_map_offset_t pmap_addr,
236 ppnum_t *physpage_p,
237 vm_object_fault_info_t fault_info);
238
239 static void vm_fault_copy_cleanup(
240 vm_page_t page,
241 vm_page_t top_page);
242
243 static void vm_fault_copy_dst_cleanup(
244 vm_page_t page);
245
246 #if VM_FAULT_CLASSIFY
247 extern void vm_fault_classify(vm_object_t object,
248 vm_object_offset_t offset,
249 vm_prot_t fault_type);
250
251 extern void vm_fault_classify_init(void);
252 #endif
253
254 unsigned long vm_pmap_enter_blocked = 0;
255 unsigned long vm_pmap_enter_retried = 0;
256
257 unsigned long vm_cs_validates = 0;
258 unsigned long vm_cs_revalidates = 0;
259 unsigned long vm_cs_query_modified = 0;
260 unsigned long vm_cs_validated_dirtied = 0;
261 unsigned long vm_cs_bitmap_validated = 0;
262
263 #if CODE_SIGNING_MONITOR
264 uint64_t vm_cs_defer_to_csm = 0;
265 uint64_t vm_cs_defer_to_csm_not = 0;
266 #endif /* CODE_SIGNING_MONITOR */
267
268 extern char *kdp_compressor_decompressed_page;
269 extern addr64_t kdp_compressor_decompressed_page_paddr;
270 extern ppnum_t kdp_compressor_decompressed_page_ppnum;
271
272 struct vmrtfr {
273 int vmrtfr_maxi;
274 int vmrtfr_curi;
275 int64_t vmrtf_total;
276 vm_rtfault_record_t *vm_rtf_records;
277 } vmrtfrs;
278 #define VMRTF_DEFAULT_BUFSIZE (4096)
279 #define VMRTF_NUM_RECORDS_DEFAULT (VMRTF_DEFAULT_BUFSIZE / sizeof(vm_rtfault_record_t))
280 TUNABLE(int, vmrtf_num_records, "vm_rtfault_records", VMRTF_NUM_RECORDS_DEFAULT);
281
282 static void vm_rtfrecord_lock(void);
283 static void vm_rtfrecord_unlock(void);
284 static void vm_record_rtfault(thread_t, uint64_t, vm_map_offset_t, int);
285
286 extern lck_grp_t vm_page_lck_grp_bucket;
287 extern lck_attr_t vm_page_lck_attr;
288 LCK_SPIN_DECLARE_ATTR(vm_rtfr_slock, &vm_page_lck_grp_bucket, &vm_page_lck_attr);
289
290 #if DEVELOPMENT || DEBUG
291 extern int madvise_free_debug;
292 extern int madvise_free_debug_sometimes;
293 #endif /* DEVELOPMENT || DEBUG */
294
295 extern int vm_pageout_protect_realtime;
296
297 #if CONFIG_FREEZE
298 #endif /* CONFIG_FREEZE */
299
300 /*
301 * Routine: vm_fault_init
302 * Purpose:
303 * Initialize our private data structures.
304 */
305 __startup_func
306 void
vm_fault_init(void)307 vm_fault_init(void)
308 {
309 int i, vm_compressor_temp;
310 boolean_t need_default_val = TRUE;
311 /*
312 * Choose a value for the hard throttle threshold based on the amount of ram. The threshold is
313 * computed as a percentage of available memory, and the percentage used is scaled inversely with
314 * the amount of memory. The percentage runs between 10% and 35%. We use 35% for small memory systems
315 * and reduce the value down to 10% for very large memory configurations. This helps give us a
316 * definition of a memory hog that makes more sense relative to the amount of ram in the machine.
317 * The formula here simply uses the number of gigabytes of ram to adjust the percentage.
318 */
319
320 vm_hard_throttle_threshold = sane_size * (35 - MIN((int)(sane_size / (1024 * 1024 * 1024)), 25)) / 100;
321
322 /*
323 * Configure compressed pager behavior. A boot arg takes precedence over a device tree entry.
324 */
325
326 if (PE_parse_boot_argn("vm_compressor", &vm_compressor_temp, sizeof(vm_compressor_temp))) {
327 for (i = 0; i < VM_PAGER_MAX_MODES; i++) {
328 if (((vm_compressor_temp & (1 << i)) == vm_compressor_temp)) {
329 need_default_val = FALSE;
330 vm_compressor_mode = vm_compressor_temp;
331 break;
332 }
333 }
334 if (need_default_val) {
335 printf("Ignoring \"vm_compressor\" boot arg %d\n", vm_compressor_temp);
336 }
337 }
338 #if CONFIG_FREEZE
339 if (need_default_val) {
340 if (osenvironment_is_diagnostics()) {
341 printf("osenvironment == \"diagnostics\". Setting \"vm_compressor_mode\" to in-core compressor only\n");
342 vm_compressor_mode = VM_PAGER_COMPRESSOR_NO_SWAP;
343 need_default_val = false;
344 }
345 }
346 #endif /* CONFIG_FREEZE */
347 if (need_default_val) {
348 /* If no boot arg or incorrect boot arg, try device tree. */
349 PE_get_default("kern.vm_compressor", &vm_compressor_mode, sizeof(vm_compressor_mode));
350 }
351 printf("\"vm_compressor_mode\" is %d\n", vm_compressor_mode);
352 vm_config_init();
353
354 PE_parse_boot_argn("vm_protect_privileged_from_untrusted",
355 &vm_protect_privileged_from_untrusted,
356 sizeof(vm_protect_privileged_from_untrusted));
357
358 #if DEBUG || DEVELOPMENT
359 (void)PE_parse_boot_argn("text_corruption_panic", &vmtc_panic_instead, sizeof(vmtc_panic_instead));
360
361 if (kern_feature_override(KF_MADVISE_FREE_DEBUG_OVRD)) {
362 madvise_free_debug = 0;
363 madvise_free_debug_sometimes = 0;
364 }
365
366 PE_parse_boot_argn("panic_object_not_alive", &panic_object_not_alive, sizeof(panic_object_not_alive));
367 #endif /* DEBUG || DEVELOPMENT */
368 }
369
370 __startup_func
371 static void
vm_rtfault_record_init(void)372 vm_rtfault_record_init(void)
373 {
374 size_t size;
375
376 vmrtf_num_records = MAX(vmrtf_num_records, 1);
377 size = vmrtf_num_records * sizeof(vm_rtfault_record_t);
378 vmrtfrs.vm_rtf_records = zalloc_permanent_tag(size,
379 ZALIGN(vm_rtfault_record_t), VM_KERN_MEMORY_DIAG);
380 vmrtfrs.vmrtfr_maxi = vmrtf_num_records - 1;
381 }
382 STARTUP(ZALLOC, STARTUP_RANK_MIDDLE, vm_rtfault_record_init);
383
384 /*
385 * Routine: vm_fault_cleanup
386 * Purpose:
387 * Clean up the result of vm_fault_page.
388 * Results:
389 * The paging reference for "object" is released.
390 * "object" is unlocked.
391 * If "top_page" is not null, "top_page" is
392 * freed and the paging reference for the object
393 * containing it is released.
394 *
395 * In/out conditions:
396 * "object" must be locked.
397 */
398 void
vm_fault_cleanup(vm_object_t object,vm_page_t top_page)399 vm_fault_cleanup(
400 vm_object_t object,
401 vm_page_t top_page)
402 {
403 thread_pri_floor_t token = {
404 .thread = THREAD_NULL
405 };
406 if (top_page != VM_PAGE_NULL &&
407 top_page->vmp_busy) {
408 /*
409 * We busied the top page. Apply a priority floor before dropping the
410 * current object (and therefore the rw-lock boost) to avoid
411 * inversions due to another thread sleeping on the top-level page.
412 *
413 * TODO: Register a page-worker token when busying the top-level page instead
414 * (rdar://154313767)
415 */
416 token = thread_priority_floor_start();
417 }
418
419 vm_object_paging_end(object);
420 vm_object_unlock(object);
421
422 if (top_page != VM_PAGE_NULL) {
423 object = VM_PAGE_OBJECT(top_page);
424
425 vm_object_lock(object);
426 VM_PAGE_FREE(top_page);
427 vm_object_paging_end(object);
428 vm_object_unlock(object);
429 }
430 if (token.thread != THREAD_NULL) {
431 thread_priority_floor_end(&token);
432 }
433 }
434
435 #define ALIGNED(x) (((x) & (PAGE_SIZE_64 - 1)) == 0)
436
437
438 TUNABLE(bool, vm_page_deactivate_behind, "vm_deactivate_behind", true);
439 TUNABLE(uint32_t, vm_page_deactivate_behind_min_resident_ratio, "vm_deactivate_behind_min_resident_ratio", 3);
440 /*
441 * default sizes given VM_BEHAVIOR_DEFAULT reference behavior
442 */
443 #define VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW 128
444 #define VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER 16 /* don't make this too big... */
445 /* we use it to size an array on the stack */
446
447 int vm_default_behind = VM_DEFAULT_DEACTIVATE_BEHIND_WINDOW;
448
449 #define MAX_SEQUENTIAL_RUN (1024 * 1024 * 1024)
450
451 /*
452 * vm_page_is_sequential
453 *
454 * Determine if sequential access is in progress
455 * in accordance with the behavior specified.
456 * Update state to indicate current access pattern.
457 *
458 * object must have at least the shared lock held
459 */
460 static
461 void
vm_fault_is_sequential(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)462 vm_fault_is_sequential(
463 vm_object_t object,
464 vm_object_offset_t offset,
465 vm_behavior_t behavior)
466 {
467 vm_object_offset_t last_alloc;
468 int sequential;
469 int orig_sequential;
470
471 last_alloc = object->last_alloc;
472 sequential = object->sequential;
473 orig_sequential = sequential;
474
475 offset = vm_object_trunc_page(offset);
476 if (offset == last_alloc && behavior != VM_BEHAVIOR_RANDOM) {
477 /* re-faulting in the same page: no change in behavior */
478 return;
479 }
480
481 switch (behavior) {
482 case VM_BEHAVIOR_RANDOM:
483 /*
484 * reset indicator of sequential behavior
485 */
486 sequential = 0;
487 break;
488
489 case VM_BEHAVIOR_SEQUENTIAL:
490 if (offset && last_alloc == offset - PAGE_SIZE_64) {
491 /*
492 * advance indicator of sequential behavior
493 */
494 if (sequential < MAX_SEQUENTIAL_RUN) {
495 sequential += PAGE_SIZE;
496 }
497 } else {
498 /*
499 * reset indicator of sequential behavior
500 */
501 sequential = 0;
502 }
503 break;
504
505 case VM_BEHAVIOR_RSEQNTL:
506 if (last_alloc && last_alloc == offset + PAGE_SIZE_64) {
507 /*
508 * advance indicator of sequential behavior
509 */
510 if (sequential > -MAX_SEQUENTIAL_RUN) {
511 sequential -= PAGE_SIZE;
512 }
513 } else {
514 /*
515 * reset indicator of sequential behavior
516 */
517 sequential = 0;
518 }
519 break;
520
521 case VM_BEHAVIOR_DEFAULT:
522 default:
523 if (offset && last_alloc == (offset - PAGE_SIZE_64)) {
524 /*
525 * advance indicator of sequential behavior
526 */
527 if (sequential < 0) {
528 sequential = 0;
529 }
530 if (sequential < MAX_SEQUENTIAL_RUN) {
531 sequential += PAGE_SIZE;
532 }
533 } else if (last_alloc && last_alloc == (offset + PAGE_SIZE_64)) {
534 /*
535 * advance indicator of sequential behavior
536 */
537 if (sequential > 0) {
538 sequential = 0;
539 }
540 if (sequential > -MAX_SEQUENTIAL_RUN) {
541 sequential -= PAGE_SIZE;
542 }
543 } else {
544 /*
545 * reset indicator of sequential behavior
546 */
547 sequential = 0;
548 }
549 break;
550 }
551 if (sequential != orig_sequential) {
552 if (!OSCompareAndSwap(orig_sequential, sequential, (UInt32 *)&object->sequential)) {
553 /*
554 * if someone else has already updated object->sequential
555 * don't bother trying to update it or object->last_alloc
556 */
557 return;
558 }
559 }
560 /*
561 * I'd like to do this with a OSCompareAndSwap64, but that
562 * doesn't exist for PPC... however, it shouldn't matter
563 * that much... last_alloc is maintained so that we can determine
564 * if a sequential access pattern is taking place... if only
565 * one thread is banging on this object, no problem with the unprotected
566 * update... if 2 or more threads are banging away, we run the risk of
567 * someone seeing a mangled update... however, in the face of multiple
568 * accesses, no sequential access pattern can develop anyway, so we
569 * haven't lost any real info.
570 */
571 object->last_alloc = offset;
572 }
573
574 #if DEVELOPMENT || DEBUG
575 SCALABLE_COUNTER_DEFINE(vm_page_deactivate_behind_count);
576 #endif /* DEVELOPMENT || DEBUG */
577
578 /*
579 * @func vm_fault_deactivate_behind
580 *
581 * @description
582 * Determine if sequential access is in progress
583 * in accordance with the behavior specified. If
584 * so, compute a potential page to deactivate and
585 * deactivate it.
586 *
587 * object must be locked.
588 *
589 * @returns the number of deactivated pages
590 */
591 static
592 uint32_t
vm_fault_deactivate_behind(vm_object_t object,vm_object_offset_t offset,vm_behavior_t behavior)593 vm_fault_deactivate_behind(
594 vm_object_t object,
595 vm_object_offset_t offset,
596 vm_behavior_t behavior)
597 {
598 uint32_t pages_in_run = 0;
599 uint32_t max_pages_in_run = 0;
600 int32_t sequential_run;
601 vm_behavior_t sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
602 vm_object_offset_t run_offset = 0;
603 vm_object_offset_t pg_offset = 0;
604 vm_page_t m;
605 vm_page_t page_run[VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER];
606
607 #if TRACEFAULTPAGE
608 dbgTrace(0xBEEF0018, (unsigned int) object, (unsigned int) vm_fault_deactivate_behind); /* (TEST/DEBUG) */
609 #endif
610 if (is_kernel_object(object) ||
611 !vm_page_deactivate_behind ||
612 (vm_object_trunc_page(offset) != offset) ||
613 (object->resident_page_count <
614 vm_page_active_count / vm_page_deactivate_behind_min_resident_ratio)) {
615 /*
616 * Do not deactivate pages from the kernel object: they
617 * are not intended to become pageable.
618 * or we've disabled the deactivate behind mechanism
619 * or we are dealing with an offset that is not aligned to
620 * the system's PAGE_SIZE because in that case we will
621 * handle the deactivation on the aligned offset and, thus,
622 * the full PAGE_SIZE page once. This helps us avoid the redundant
623 * deactivates and the extra faults.
624 *
625 * Objects need only participate in backwards
626 * deactivation if they are exceedingly large (i.e. their
627 * resident pages are liable to comprise a substantially large
628 * portion of the active queue and push out the rest of the
629 * system's working set).
630 */
631 return 0;
632 }
633
634 KDBG_FILTERED(VMDBG_CODE(DBG_VM_FAULT_DEACTIVATE_BEHIND) | DBG_FUNC_START,
635 VM_KERNEL_ADDRHIDE(object), offset, behavior);
636
637 if ((sequential_run = object->sequential)) {
638 if (sequential_run < 0) {
639 sequential_behavior = VM_BEHAVIOR_RSEQNTL;
640 sequential_run = 0 - sequential_run;
641 } else {
642 sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
643 }
644 }
645 switch (behavior) {
646 case VM_BEHAVIOR_RANDOM:
647 break;
648 case VM_BEHAVIOR_SEQUENTIAL:
649 if (sequential_run >= (int)PAGE_SIZE) {
650 run_offset = 0 - PAGE_SIZE_64;
651 max_pages_in_run = 1;
652 }
653 break;
654 case VM_BEHAVIOR_RSEQNTL:
655 if (sequential_run >= (int)PAGE_SIZE) {
656 run_offset = PAGE_SIZE_64;
657 max_pages_in_run = 1;
658 }
659 break;
660 case VM_BEHAVIOR_DEFAULT:
661 default:
662 { vm_object_offset_t behind = vm_default_behind * PAGE_SIZE_64;
663
664 /*
665 * determine if the run of sequential accesss has been
666 * long enough on an object with default access behavior
667 * to consider it for deactivation
668 */
669 if ((uint64_t)sequential_run >= behind && (sequential_run % (VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER * PAGE_SIZE)) == 0) {
670 /*
671 * the comparisons between offset and behind are done
672 * in this kind of odd fashion in order to prevent wrap around
673 * at the end points
674 */
675 if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
676 if (offset >= behind) {
677 run_offset = 0 - behind;
678 pg_offset = PAGE_SIZE_64;
679 max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
680 }
681 } else {
682 if (offset < -behind) {
683 run_offset = behind;
684 pg_offset = 0 - PAGE_SIZE_64;
685 max_pages_in_run = VM_DEFAULT_DEACTIVATE_BEHIND_CLUSTER;
686 }
687 }
688 }
689 break;}
690 }
691 for (unsigned n = 0; n < max_pages_in_run; n++) {
692 m = vm_page_lookup(object, offset + run_offset + (n * pg_offset));
693
694 if (m && !m->vmp_laundry && !m->vmp_busy && !m->vmp_no_cache &&
695 (m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) &&
696 !vm_page_is_fictitious(m) && !m->vmp_absent) {
697 page_run[pages_in_run++] = m;
698
699 /*
700 * by not passing in a pmap_flush_context we will forgo any TLB flushing, local or otherwise...
701 *
702 * a TLB flush isn't really needed here since at worst we'll miss the reference bit being
703 * updated in the PTE if a remote processor still has this mapping cached in its TLB when the
704 * new reference happens. If no futher references happen on the page after that remote TLB flushes
705 * we'll see a clean, non-referenced page when it eventually gets pulled out of the inactive queue
706 * by pageout_scan, which is just fine since the last reference would have happened quite far
707 * in the past (TLB caches don't hang around for very long), and of course could just as easily
708 * have happened before we did the deactivate_behind.
709 */
710 pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m), VM_MEM_REFERENCED, PMAP_OPTIONS_NOFLUSH, (void *)NULL);
711 }
712 }
713
714 if (pages_in_run) {
715 vm_page_lockspin_queues();
716
717 for (unsigned n = 0; n < pages_in_run; n++) {
718 m = page_run[n];
719
720 vm_page_deactivate_internal(m, FALSE);
721
722 #if DEVELOPMENT || DEBUG
723 counter_inc(&vm_page_deactivate_behind_count);
724 #endif /* DEVELOPMENT || DEBUG */
725
726 #if TRACEFAULTPAGE
727 dbgTrace(0xBEEF0019, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
728 #endif
729 }
730 vm_page_unlock_queues();
731 }
732
733 KDBG_FILTERED(VMDBG_CODE(DBG_VM_FAULT_DEACTIVATE_BEHIND) | DBG_FUNC_END,
734 pages_in_run);
735
736 return pages_in_run;
737 }
738
739
740 #if (DEVELOPMENT || DEBUG)
741 uint32_t vm_page_creation_throttled_hard = 0;
742 uint32_t vm_page_creation_throttled_soft = 0;
743 uint64_t vm_page_creation_throttle_avoided = 0;
744 #endif /* DEVELOPMENT || DEBUG */
745
746 static int
vm_page_throttled(boolean_t page_kept)747 vm_page_throttled(boolean_t page_kept)
748 {
749 clock_sec_t elapsed_sec;
750 clock_sec_t tv_sec;
751 clock_usec_t tv_usec;
752 task_t curtask = current_task_early();
753
754 thread_t thread = current_thread();
755
756 if (thread->options & TH_OPT_VMPRIV) {
757 return 0;
758 }
759
760 if (curtask && !curtask->active) {
761 return 0;
762 }
763
764 if (thread->t_page_creation_throttled) {
765 thread->t_page_creation_throttled = 0;
766
767 if (page_kept == FALSE) {
768 goto no_throttle;
769 }
770 }
771 if (NEED_TO_HARD_THROTTLE_THIS_TASK()) {
772 #if (DEVELOPMENT || DEBUG)
773 thread->t_page_creation_throttled_hard++;
774 OSAddAtomic(1, &vm_page_creation_throttled_hard);
775 #endif /* DEVELOPMENT || DEBUG */
776 return HARD_THROTTLE_DELAY;
777 }
778
779 if ((vm_page_free_count < vm_page_throttle_limit || (VM_CONFIG_COMPRESSOR_IS_PRESENT && SWAPPER_NEEDS_TO_UNTHROTTLE())) &&
780 thread->t_page_creation_count > (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS * VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC)) {
781 if (vm_page_free_wanted == 0 && vm_page_free_wanted_privileged == 0) {
782 #if (DEVELOPMENT || DEBUG)
783 OSAddAtomic64(1, &vm_page_creation_throttle_avoided);
784 #endif
785 goto no_throttle;
786 }
787 clock_get_system_microtime(&tv_sec, &tv_usec);
788
789 elapsed_sec = tv_sec - thread->t_page_creation_time;
790
791 if (elapsed_sec <= VM_PAGE_CREATION_THROTTLE_PERIOD_SECS ||
792 (thread->t_page_creation_count / elapsed_sec) >= VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC) {
793 if (elapsed_sec >= (3 * VM_PAGE_CREATION_THROTTLE_PERIOD_SECS)) {
794 /*
795 * we'll reset our stats to give a well behaved app
796 * that was unlucky enough to accumulate a bunch of pages
797 * over a long period of time a chance to get out of
798 * the throttled state... we reset the counter and timestamp
799 * so that if it stays under the rate limit for the next second
800 * it will be back in our good graces... if it exceeds it, it
801 * will remain in the throttled state
802 */
803 thread->t_page_creation_time = tv_sec;
804 thread->t_page_creation_count = VM_PAGE_CREATION_THROTTLE_RATE_PER_SEC * (VM_PAGE_CREATION_THROTTLE_PERIOD_SECS - 1);
805 }
806 VM_PAGEOUT_DEBUG(vm_page_throttle_count, 1);
807
808 thread->t_page_creation_throttled = 1;
809
810 if (VM_CONFIG_COMPRESSOR_IS_PRESENT && HARD_THROTTLE_LIMIT_REACHED()) {
811 #if (DEVELOPMENT || DEBUG)
812 thread->t_page_creation_throttled_hard++;
813 OSAddAtomic(1, &vm_page_creation_throttled_hard);
814 #endif /* DEVELOPMENT || DEBUG */
815 return HARD_THROTTLE_DELAY;
816 } else {
817 #if (DEVELOPMENT || DEBUG)
818 thread->t_page_creation_throttled_soft++;
819 OSAddAtomic(1, &vm_page_creation_throttled_soft);
820 #endif /* DEVELOPMENT || DEBUG */
821 return SOFT_THROTTLE_DELAY;
822 }
823 }
824 thread->t_page_creation_time = tv_sec;
825 thread->t_page_creation_count = 0;
826 }
827 no_throttle:
828 thread->t_page_creation_count++;
829
830 return 0;
831 }
832
833 extern boolean_t vm_pageout_running;
834 static __attribute__((noinline, not_tail_called)) void
__VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(int throttle_delay)835 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(
836 int throttle_delay)
837 {
838 /* make sure vm_pageout_scan() gets to work while we're throttled */
839 if (!vm_pageout_running) {
840 thread_wakeup((event_t)&vm_page_free_wanted);
841 }
842 delay(throttle_delay);
843 }
844
845
846 /*
847 * check for various conditions that would
848 * prevent us from creating a ZF page...
849 * cleanup is based on being called from vm_fault_page
850 *
851 * object must be locked
852 * object == m->vmp_object
853 */
854 static vm_fault_return_t
vm_fault_check(vm_object_t object,vm_page_t m,vm_page_t first_m,wait_interrupt_t interruptible_state,boolean_t page_throttle)855 vm_fault_check(vm_object_t object, vm_page_t m, vm_page_t first_m, wait_interrupt_t interruptible_state, boolean_t page_throttle)
856 {
857 int throttle_delay;
858
859 if (object->shadow_severed ||
860 VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
861 /*
862 * Either:
863 * 1. the shadow chain was severed,
864 * 2. the purgeable object is volatile or empty and is marked
865 * to fault on access while volatile.
866 * Just have to return an error at this point
867 */
868 if (m != VM_PAGE_NULL) {
869 VM_PAGE_FREE(m);
870 }
871 vm_fault_cleanup(object, first_m);
872
873 thread_interrupt_level(interruptible_state);
874
875 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(object)) {
876 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
877 }
878
879 if (object->shadow_severed) {
880 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
881 }
882 return VM_FAULT_MEMORY_ERROR;
883 }
884 if (page_throttle == TRUE) {
885 if ((throttle_delay = vm_page_throttled(FALSE))) {
886 /*
887 * we're throttling zero-fills...
888 * treat this as if we couldn't grab a page
889 */
890 if (m != VM_PAGE_NULL) {
891 VM_PAGE_FREE(m);
892 }
893 vm_fault_cleanup(object, first_m);
894
895 VM_DEBUG_EVENT(vmf_check_zfdelay, DBG_VM_FAULT_CHECK_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
896
897 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
898
899 if (current_thread_aborted()) {
900 thread_interrupt_level(interruptible_state);
901 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
902 return VM_FAULT_INTERRUPTED;
903 }
904 thread_interrupt_level(interruptible_state);
905
906 return VM_FAULT_MEMORY_SHORTAGE;
907 }
908 }
909 return VM_FAULT_SUCCESS;
910 }
911
912 /*
913 * Clear the code signing bits on the given page_t
914 */
915 static void
vm_fault_cs_clear(vm_page_t m)916 vm_fault_cs_clear(vm_page_t m)
917 {
918 m->vmp_cs_validated = VMP_CS_ALL_FALSE;
919 m->vmp_cs_tainted = VMP_CS_ALL_FALSE;
920 m->vmp_cs_nx = VMP_CS_ALL_FALSE;
921 }
922
923 /*
924 * Enqueues the given page on the throttled queue.
925 * The caller must hold the vm_page_queue_lock and it will be held on return.
926 */
927 static void
vm_fault_enqueue_throttled_locked(vm_page_t m)928 vm_fault_enqueue_throttled_locked(vm_page_t m)
929 {
930 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_OWNED);
931 assert(!VM_PAGE_WIRED(m));
932
933 /*
934 * can't be on the pageout queue since we don't
935 * have a pager to try and clean to
936 */
937 vm_page_queues_remove(m, TRUE);
938 vm_page_check_pageable_safe(m);
939 vm_page_queue_enter(&vm_page_queue_throttled, m, vmp_pageq);
940 m->vmp_q_state = VM_PAGE_ON_THROTTLED_Q;
941 vm_page_throttled_count++;
942 }
943
944 /*
945 * do the work to zero fill a page and
946 * inject it into the correct paging queue
947 *
948 * m->vmp_object must be locked
949 * page queue lock must NOT be held
950 */
951 static int
vm_fault_zero_page(vm_page_t m,boolean_t no_zero_fill)952 vm_fault_zero_page(vm_page_t m, boolean_t no_zero_fill)
953 {
954 int my_fault = DBG_ZERO_FILL_FAULT;
955 vm_object_t object;
956
957 object = VM_PAGE_OBJECT(m);
958
959 /*
960 * This is is a zero-fill page fault...
961 *
962 * Checking the page lock is a waste of
963 * time; this page was absent, so
964 * it can't be page locked by a pager.
965 *
966 * we also consider it undefined
967 * with respect to instruction
968 * execution. i.e. it is the responsibility
969 * of higher layers to call for an instruction
970 * sync after changing the contents and before
971 * sending a program into this area. We
972 * choose this approach for performance
973 */
974 vm_fault_cs_clear(m);
975 m->vmp_pmapped = TRUE;
976
977 if (no_zero_fill == TRUE) {
978 my_fault = DBG_NZF_PAGE_FAULT;
979
980 if (m->vmp_absent && m->vmp_busy) {
981 return my_fault;
982 }
983 } else {
984 vm_page_zero_fill(
985 m
986 );
987
988 counter_inc(&vm_statistics_zero_fill_count);
989 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
990 }
991 assert(!m->vmp_laundry);
992 assert(!is_kernel_object(object));
993 //assert(m->vmp_pageq.next == 0 && m->vmp_pageq.prev == 0);
994 if (!VM_DYNAMIC_PAGING_ENABLED() &&
995 (object->purgable == VM_PURGABLE_DENY ||
996 object->purgable == VM_PURGABLE_NONVOLATILE ||
997 object->purgable == VM_PURGABLE_VOLATILE)) {
998 vm_page_lockspin_queues();
999 if (!VM_DYNAMIC_PAGING_ENABLED()) {
1000 vm_fault_enqueue_throttled_locked(m);
1001 }
1002 vm_page_unlock_queues();
1003 }
1004 return my_fault;
1005 }
1006
1007 /*
1008 * Recovery actions for vm_fault_page
1009 */
1010 __attribute__((always_inline))
1011 static void
vm_fault_page_release_page(vm_page_t m,bool * clear_absent_on_error)1012 vm_fault_page_release_page(
1013 vm_page_t m, /* Page to release */
1014 bool *clear_absent_on_error /* IN/OUT */)
1015 {
1016 vm_page_wakeup_done(VM_PAGE_OBJECT(m), m);
1017 if (!VM_PAGE_PAGEABLE(m)) {
1018 vm_page_lockspin_queues();
1019 if (*clear_absent_on_error && m->vmp_absent) {
1020 vm_page_zero_fill(
1021 m
1022 );
1023 counter_inc(&vm_statistics_zero_fill_count);
1024 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
1025 m->vmp_absent = false;
1026 }
1027 if (!VM_PAGE_PAGEABLE(m)) {
1028 if (VM_CONFIG_COMPRESSOR_IS_ACTIVE) {
1029 vm_page_deactivate(m);
1030 } else {
1031 vm_page_activate(m);
1032 }
1033 }
1034 vm_page_unlock_queues();
1035 }
1036 *clear_absent_on_error = false;
1037 }
1038 /*
1039 * Routine: vm_fault_page
1040 * Purpose:
1041 * Find the resident page for the virtual memory
1042 * specified by the given virtual memory object
1043 * and offset.
1044 * Additional arguments:
1045 * The required permissions for the page is given
1046 * in "fault_type". Desired permissions are included
1047 * in "protection".
1048 * fault_info is passed along to determine pagein cluster
1049 * limits... it contains the expected reference pattern,
1050 * cluster size if available, etc...
1051 *
1052 * If the desired page is known to be resident (for
1053 * example, because it was previously wired down), asserting
1054 * the "unwiring" parameter will speed the search.
1055 *
1056 * If the operation can be interrupted (by thread_abort
1057 * or thread_terminate), then the "interruptible"
1058 * parameter should be asserted.
1059 *
1060 * Results:
1061 * The page containing the proper data is returned
1062 * in "result_page".
1063 *
1064 * In/out conditions:
1065 * The source object must be locked and referenced,
1066 * and must donate one paging reference. The reference
1067 * is not affected. The paging reference and lock are
1068 * consumed.
1069 *
1070 * If the call succeeds, the object in which "result_page"
1071 * resides is left locked and holding a paging reference.
1072 * If this is not the original object, a busy page in the
1073 * original object is returned in "top_page", to prevent other
1074 * callers from pursuing this same data, along with a paging
1075 * reference for the original object. The "top_page" should
1076 * be destroyed when this guarantee is no longer required.
1077 * The "result_page" is also left busy. It is not removed
1078 * from the pageout queues.
1079 * Special Case:
1080 * A return value of VM_FAULT_SUCCESS_NO_PAGE means that the
1081 * fault succeeded but there's no VM page (i.e. the VM object
1082 * does not actually hold VM pages, but device memory or
1083 * large pages). The object is still locked and we still hold a
1084 * paging_in_progress reference.
1085 */
1086 unsigned int vm_fault_page_blocked_access = 0;
1087 unsigned int vm_fault_page_forced_retry = 0;
1088
1089 vm_fault_return_t
vm_fault_page(vm_object_t first_object,vm_object_offset_t first_offset,vm_prot_t fault_type,boolean_t must_be_resident,boolean_t caller_lookup,vm_prot_t * protection,vm_page_t * result_page,vm_page_t * top_page,int * type_of_fault,kern_return_t * error_code,boolean_t no_zero_fill,vm_object_fault_info_t fault_info)1090 vm_fault_page(
1091 /* Arguments: */
1092 vm_object_t first_object, /* Object to begin search */
1093 vm_object_offset_t first_offset, /* Offset into object */
1094 vm_prot_t fault_type, /* What access is requested */
1095 boolean_t must_be_resident,/* Must page be resident? */
1096 boolean_t caller_lookup, /* caller looked up page */
1097 /* Modifies in place: */
1098 vm_prot_t *protection, /* Protection for mapping */
1099 vm_page_t *result_page, /* Page found, if successful */
1100 /* Returns: */
1101 vm_page_t *top_page, /* Page in top object, if
1102 * not result_page. */
1103 int *type_of_fault, /* if non-null, fill in with type of fault
1104 * COW, zero-fill, etc... returned in trace point */
1105 /* More arguments: */
1106 kern_return_t *error_code, /* code if page is in error */
1107 boolean_t no_zero_fill, /* don't zero fill absent pages */
1108 vm_object_fault_info_t fault_info)
1109 {
1110 vm_page_t m;
1111 vm_object_t object;
1112 vm_object_offset_t offset;
1113 vm_page_t first_m;
1114 vm_object_t next_object;
1115 vm_object_t copy_object;
1116 boolean_t look_for_page;
1117 boolean_t force_fault_retry = FALSE;
1118 vm_prot_t access_required = fault_type;
1119 vm_prot_t wants_copy_flag;
1120 kern_return_t wait_result;
1121 wait_interrupt_t interruptible_state;
1122 boolean_t data_already_requested = FALSE;
1123 vm_behavior_t orig_behavior;
1124 vm_size_t orig_cluster_size;
1125 vm_fault_return_t error;
1126 int my_fault;
1127 uint32_t try_failed_count;
1128 wait_interrupt_t interruptible; /* how may fault be interrupted? */
1129 int external_state = VM_EXTERNAL_STATE_UNKNOWN;
1130 memory_object_t pager;
1131 vm_fault_return_t retval;
1132 vm_grab_options_t grab_options;
1133 bool clear_absent_on_error = false;
1134
1135 /*
1136 * MUST_ASK_PAGER() evaluates to TRUE if the page specified by object/offset is
1137 * marked as paged out in the compressor pager or the pager doesn't exist.
1138 * Note also that if the pager for an internal object
1139 * has not been created, the pager is not invoked regardless of the value
1140 * of MUST_ASK_PAGER().
1141 *
1142 * PAGED_OUT() evaluates to TRUE if the page specified by the object/offset
1143 * is marked as paged out in the compressor pager.
1144 * PAGED_OUT() is used to determine if a page has already been pushed
1145 * into a copy object in order to avoid a redundant page out operation.
1146 */
1147 #define MUST_ASK_PAGER(o, f, s) \
1148 ((s = vm_object_compressor_pager_state_get((o), (f))) != VM_EXTERNAL_STATE_ABSENT)
1149
1150 #define PAGED_OUT(o, f) \
1151 (vm_object_compressor_pager_state_get((o), (f)) == VM_EXTERNAL_STATE_EXISTS)
1152
1153 #if TRACEFAULTPAGE
1154 dbgTrace(0xBEEF0002, (unsigned int) first_object, (unsigned int) first_offset); /* (TEST/DEBUG) */
1155 #endif
1156
1157 interruptible = fault_info->interruptible;
1158 interruptible_state = thread_interrupt_level(interruptible);
1159
1160 /*
1161 * INVARIANTS (through entire routine):
1162 *
1163 * 1) At all times, we must either have the object
1164 * lock or a busy page in some object to prevent
1165 * some other thread from trying to bring in
1166 * the same page.
1167 *
1168 * Note that we cannot hold any locks during the
1169 * pager access or when waiting for memory, so
1170 * we use a busy page then.
1171 *
1172 * 2) To prevent another thread from racing us down the
1173 * shadow chain and entering a new page in the top
1174 * object before we do, we must keep a busy page in
1175 * the top object while following the shadow chain.
1176 *
1177 * 3) We must increment paging_in_progress on any object
1178 * for which we have a busy page before dropping
1179 * the object lock
1180 *
1181 * 4) We leave busy pages on the pageout queues.
1182 * If the pageout daemon comes across a busy page,
1183 * it will remove the page from the pageout queues.
1184 */
1185
1186 object = first_object;
1187 offset = first_offset;
1188 first_m = VM_PAGE_NULL;
1189 access_required = fault_type;
1190
1191 /*
1192 * default type of fault
1193 */
1194 my_fault = DBG_CACHE_HIT_FAULT;
1195 thread_pri_floor_t token;
1196 bool drop_floor = false;
1197
1198 while (TRUE) {
1199 #if TRACEFAULTPAGE
1200 dbgTrace(0xBEEF0003, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1201 #endif
1202
1203 grab_options = vm_page_grab_options_for_object(object);
1204
1205 if (!object->alive) {
1206 /*
1207 * object is no longer valid
1208 * clean up and return error
1209 */
1210 #if DEVELOPMENT || DEBUG
1211 printf("FBDP rdar://93769854 %s:%d object %p internal %d pager %p (%s) copy %p shadow %p alive %d terminating %d named %d ref %d shadow_severed %d\n", __FUNCTION__, __LINE__, object, object->internal, object->pager, object->pager ? object->pager->mo_pager_ops->memory_object_pager_name : "?", object->vo_copy, object->shadow, object->alive, object->terminating, object->named, os_ref_get_count_raw(&object->ref_count), object->shadow_severed);
1212 if (panic_object_not_alive) {
1213 panic("FBDP rdar://93769854 %s:%d object %p internal %d pager %p (%s) copy %p shadow %p alive %d terminating %d named %d ref %d shadow_severed %d\n", __FUNCTION__, __LINE__, object, object->internal, object->pager, object->pager ? object->pager->mo_pager_ops->memory_object_pager_name : "?", object->vo_copy, object->shadow, object->alive, object->terminating, object->named, os_ref_get_count_raw(&object->ref_count), object->shadow_severed);
1214 }
1215 #endif /* DEVELOPMENT || DEBUG */
1216 vm_fault_cleanup(object, first_m);
1217 thread_interrupt_level(interruptible_state);
1218
1219 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_NOT_ALIVE), 0 /* arg */);
1220 return VM_FAULT_MEMORY_ERROR;
1221 }
1222
1223 if (!object->pager_created && object->phys_contiguous) {
1224 /*
1225 * A physically-contiguous object without a pager:
1226 * must be a "large page" object. We do not deal
1227 * with VM pages for this object.
1228 */
1229 caller_lookup = FALSE;
1230 m = VM_PAGE_NULL;
1231 goto phys_contig_object;
1232 }
1233
1234 if (object->blocked_access) {
1235 /*
1236 * Access to this VM object has been blocked.
1237 * Replace our "paging_in_progress" reference with
1238 * a "activity_in_progress" reference and wait for
1239 * access to be unblocked.
1240 */
1241 caller_lookup = FALSE; /* no longer valid after sleep */
1242 vm_object_activity_begin(object);
1243 vm_object_paging_end(object);
1244 while (object->blocked_access) {
1245 vm_object_sleep(object,
1246 VM_OBJECT_EVENT_UNBLOCKED,
1247 THREAD_UNINT, LCK_SLEEP_EXCLUSIVE);
1248 }
1249 vm_fault_page_blocked_access++;
1250 vm_object_paging_begin(object);
1251 vm_object_activity_end(object);
1252 }
1253
1254 /*
1255 * See whether the page at 'offset' is resident
1256 */
1257 if (caller_lookup == TRUE) {
1258 /*
1259 * The caller has already looked up the page
1260 * and gave us the result in "result_page".
1261 * We can use this for the first lookup but
1262 * it loses its validity as soon as we unlock
1263 * the object.
1264 */
1265 m = *result_page;
1266 caller_lookup = FALSE; /* no longer valid after that */
1267 } else {
1268 m = vm_page_lookup(object, vm_object_trunc_page(offset));
1269 }
1270 #if TRACEFAULTPAGE
1271 dbgTrace(0xBEEF0004, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1272 #endif
1273 if (m != VM_PAGE_NULL) {
1274 if (m->vmp_busy) {
1275 /*
1276 * The page is being brought in,
1277 * wait for it and then retry.
1278 */
1279 #if TRACEFAULTPAGE
1280 dbgTrace(0xBEEF0005, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1281 #endif
1282 if (fault_info->fi_no_sleep) {
1283 /* Caller has requested not to sleep on busy pages */
1284 vm_fault_cleanup(object, first_m);
1285 thread_interrupt_level(interruptible_state);
1286 return VM_FAULT_BUSY;
1287 }
1288
1289 wait_result = vm_page_sleep(object, m, interruptible, LCK_SLEEP_DEFAULT);
1290
1291 if (wait_result != THREAD_AWAKENED) {
1292 vm_fault_cleanup(object, first_m);
1293 thread_interrupt_level(interruptible_state);
1294
1295 if (wait_result == THREAD_RESTART) {
1296 return VM_FAULT_RETRY;
1297 } else {
1298 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
1299 return VM_FAULT_INTERRUPTED;
1300 }
1301 }
1302 continue;
1303 }
1304 if (m->vmp_laundry) {
1305 m->vmp_free_when_done = FALSE;
1306
1307 if (!m->vmp_cleaning) {
1308 vm_pageout_steal_laundry(m, FALSE);
1309 }
1310 }
1311 vm_object_lock_assert_exclusive(VM_PAGE_OBJECT(m));
1312 if (vm_page_is_guard(m)) {
1313 /*
1314 * Guard page: off limits !
1315 */
1316 if (fault_type == VM_PROT_NONE) {
1317 /*
1318 * The fault is not requesting any
1319 * access to the guard page, so it must
1320 * be just to wire or unwire it.
1321 * Let's pretend it succeeded...
1322 */
1323 m->vmp_busy = TRUE;
1324 *result_page = m;
1325 assert(first_m == VM_PAGE_NULL);
1326 *top_page = first_m;
1327 if (type_of_fault) {
1328 *type_of_fault = DBG_GUARD_FAULT;
1329 }
1330 thread_interrupt_level(interruptible_state);
1331 return VM_FAULT_SUCCESS;
1332 } else {
1333 /*
1334 * The fault requests access to the
1335 * guard page: let's deny that !
1336 */
1337 vm_fault_cleanup(object, first_m);
1338 thread_interrupt_level(interruptible_state);
1339 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_GUARDPAGE_FAULT), 0 /* arg */);
1340 return VM_FAULT_MEMORY_ERROR;
1341 }
1342 }
1343
1344
1345 if (m->vmp_error) {
1346 /*
1347 * The page is in error, give up now.
1348 */
1349 #if TRACEFAULTPAGE
1350 dbgTrace(0xBEEF0006, (unsigned int) m, (unsigned int) error_code); /* (TEST/DEBUG) */
1351 #endif
1352 if (error_code) {
1353 *error_code = KERN_MEMORY_ERROR;
1354 }
1355 VM_PAGE_FREE(m);
1356
1357 vm_fault_cleanup(object, first_m);
1358 thread_interrupt_level(interruptible_state);
1359
1360 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_ERROR), 0 /* arg */);
1361 return VM_FAULT_MEMORY_ERROR;
1362 }
1363 if (m->vmp_restart) {
1364 /*
1365 * The pager wants us to restart
1366 * at the top of the chain,
1367 * typically because it has moved the
1368 * page to another pager, then do so.
1369 */
1370 #if TRACEFAULTPAGE
1371 dbgTrace(0xBEEF0007, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1372 #endif
1373 VM_PAGE_FREE(m);
1374
1375 vm_fault_cleanup(object, first_m);
1376 thread_interrupt_level(interruptible_state);
1377
1378 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PAGE_HAS_RESTART), 0 /* arg */);
1379 return VM_FAULT_RETRY;
1380 }
1381 if (m->vmp_absent) {
1382 /*
1383 * The page isn't busy, but is absent,
1384 * therefore it's deemed "unavailable".
1385 *
1386 * Remove the non-existent page (unless it's
1387 * in the top object) and move on down to the
1388 * next object (if there is one).
1389 */
1390 #if TRACEFAULTPAGE
1391 dbgTrace(0xBEEF0008, (unsigned int) m, (unsigned int) object->shadow); /* (TEST/DEBUG) */
1392 #endif
1393 next_object = object->shadow;
1394
1395 if (next_object == VM_OBJECT_NULL) {
1396 /*
1397 * Absent page at bottom of shadow
1398 * chain; zero fill the page we left
1399 * busy in the first object, and free
1400 * the absent page.
1401 */
1402 assert(!must_be_resident);
1403
1404 /*
1405 * check for any conditions that prevent
1406 * us from creating a new zero-fill page
1407 * vm_fault_check will do all of the
1408 * fault cleanup in the case of an error condition
1409 * including resetting the thread_interrupt_level
1410 */
1411 error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
1412
1413 if (error != VM_FAULT_SUCCESS) {
1414 return error;
1415 }
1416
1417 if (object != first_object) {
1418 /*
1419 * free the absent page we just found
1420 */
1421 VM_PAGE_FREE(m);
1422
1423 /*
1424 * drop reference and lock on current object
1425 */
1426 vm_object_paging_end(object);
1427 vm_object_unlock(object);
1428
1429 /*
1430 * grab the original page we
1431 * 'soldered' in place and
1432 * retake lock on 'first_object'
1433 */
1434 m = first_m;
1435 first_m = VM_PAGE_NULL;
1436
1437 object = first_object;
1438 offset = first_offset;
1439
1440 vm_object_lock(object);
1441 } else {
1442 /*
1443 * we're going to use the absent page we just found
1444 * so convert it to a 'busy' page
1445 */
1446 m->vmp_absent = FALSE;
1447 m->vmp_busy = TRUE;
1448 }
1449 if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
1450 m->vmp_absent = TRUE;
1451 clear_absent_on_error = true;
1452 }
1453 /*
1454 * zero-fill the page and put it on
1455 * the correct paging queue
1456 */
1457 my_fault = vm_fault_zero_page(m, no_zero_fill);
1458
1459 break;
1460 } else {
1461 if (must_be_resident) {
1462 vm_object_paging_end(object);
1463 } else if (object != first_object) {
1464 vm_object_paging_end(object);
1465 VM_PAGE_FREE(m);
1466 } else {
1467 first_m = m;
1468 m->vmp_absent = FALSE;
1469 m->vmp_busy = TRUE;
1470
1471 vm_page_lockspin_queues();
1472 vm_page_queues_remove(m, FALSE);
1473 vm_page_unlock_queues();
1474 }
1475
1476 offset += object->vo_shadow_offset;
1477 fault_info->lo_offset += object->vo_shadow_offset;
1478 fault_info->hi_offset += object->vo_shadow_offset;
1479 access_required = VM_PROT_READ;
1480
1481 vm_object_lock(next_object);
1482 vm_object_unlock(object);
1483 object = next_object;
1484 vm_object_paging_begin(object);
1485
1486 /*
1487 * reset to default type of fault
1488 */
1489 my_fault = DBG_CACHE_HIT_FAULT;
1490
1491 continue;
1492 }
1493 }
1494 if ((m->vmp_cleaning)
1495 && ((object != first_object) || (object->vo_copy != VM_OBJECT_NULL))
1496 && (fault_type & VM_PROT_WRITE)) {
1497 /*
1498 * This is a copy-on-write fault that will
1499 * cause us to revoke access to this page, but
1500 * this page is in the process of being cleaned
1501 * in a clustered pageout. We must wait until
1502 * the cleaning operation completes before
1503 * revoking access to the original page,
1504 * otherwise we might attempt to remove a
1505 * wired mapping.
1506 */
1507 #if TRACEFAULTPAGE
1508 dbgTrace(0xBEEF0009, (unsigned int) m, (unsigned int) offset); /* (TEST/DEBUG) */
1509 #endif
1510 /*
1511 * take an extra ref so that object won't die
1512 */
1513 vm_object_reference_locked(object);
1514
1515 vm_fault_cleanup(object, first_m);
1516
1517 vm_object_lock(object);
1518 assert(os_ref_get_count_raw(&object->ref_count) > 0);
1519
1520 m = vm_page_lookup(object, vm_object_trunc_page(offset));
1521
1522 if (m != VM_PAGE_NULL && m->vmp_cleaning) {
1523 wait_result = vm_page_sleep(object, m, interruptible, LCK_SLEEP_UNLOCK);
1524 vm_object_deallocate(object);
1525 goto backoff;
1526 } else {
1527 vm_object_unlock(object);
1528
1529 vm_object_deallocate(object);
1530 thread_interrupt_level(interruptible_state);
1531
1532 return VM_FAULT_RETRY;
1533 }
1534 }
1535 if (type_of_fault == NULL && (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) &&
1536 !(fault_info != NULL && fault_info->stealth)) {
1537 /*
1538 * If we were passed a non-NULL pointer for
1539 * "type_of_fault", than we came from
1540 * vm_fault... we'll let it deal with
1541 * this condition, since it
1542 * needs to see m->vmp_speculative to correctly
1543 * account the pageins, otherwise...
1544 * take it off the speculative queue, we'll
1545 * let the caller of vm_fault_page deal
1546 * with getting it onto the correct queue
1547 *
1548 * If the caller specified in fault_info that
1549 * it wants a "stealth" fault, we also leave
1550 * the page in the speculative queue.
1551 */
1552 vm_page_lockspin_queues();
1553 if (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) {
1554 vm_page_queues_remove(m, FALSE);
1555 }
1556 vm_page_unlock_queues();
1557 }
1558 assert(object == VM_PAGE_OBJECT(m));
1559
1560 if (object->code_signed) {
1561 /*
1562 * CODE SIGNING:
1563 * We just paged in a page from a signed
1564 * memory object but we don't need to
1565 * validate it now. We'll validate it if
1566 * when it gets mapped into a user address
1567 * space for the first time or when the page
1568 * gets copied to another object as a result
1569 * of a copy-on-write.
1570 */
1571 }
1572
1573 /*
1574 * We mark the page busy and leave it on
1575 * the pageout queues. If the pageout
1576 * deamon comes across it, then it will
1577 * remove the page from the queue, but not the object
1578 */
1579 #if TRACEFAULTPAGE
1580 dbgTrace(0xBEEF000B, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1581 #endif
1582 assert(!m->vmp_busy);
1583 assert(!m->vmp_absent);
1584
1585 m->vmp_busy = TRUE;
1586 break;
1587 }
1588
1589 /*
1590 * we get here when there is no page present in the object at
1591 * the offset we're interested in... we'll allocate a page
1592 * at this point if the pager associated with
1593 * this object can provide the data or we're the top object...
1594 * object is locked; m == NULL
1595 */
1596
1597 if (must_be_resident) {
1598 if (fault_type == VM_PROT_NONE &&
1599 is_kernel_object(object)) {
1600 /*
1601 * We've been called from vm_fault_unwire()
1602 * while removing a map entry that was allocated
1603 * with KMA_KOBJECT and KMA_VAONLY. This page
1604 * is not present and there's nothing more to
1605 * do here (nothing to unwire).
1606 */
1607 vm_fault_cleanup(object, first_m);
1608 thread_interrupt_level(interruptible_state);
1609
1610 return VM_FAULT_MEMORY_ERROR;
1611 }
1612
1613 goto dont_look_for_page;
1614 }
1615
1616 /* Don't expect to fault pages into the kernel object. */
1617 assert(!is_kernel_object(object));
1618
1619 look_for_page = (object->pager_created && (MUST_ASK_PAGER(object, offset, external_state) == TRUE));
1620
1621 #if TRACEFAULTPAGE
1622 dbgTrace(0xBEEF000C, (unsigned int) look_for_page, (unsigned int) object); /* (TEST/DEBUG) */
1623 #endif
1624 if (!look_for_page && object == first_object && !object->phys_contiguous) {
1625 /*
1626 * Allocate a new page for this object/offset pair as a placeholder
1627 */
1628 m = vm_page_grab_options(grab_options);
1629 #if TRACEFAULTPAGE
1630 dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1631 #endif
1632 if (m == VM_PAGE_NULL) {
1633 vm_fault_cleanup(object, first_m);
1634 thread_interrupt_level(interruptible_state);
1635
1636 return VM_FAULT_MEMORY_SHORTAGE;
1637 }
1638
1639 if (fault_info && fault_info->batch_pmap_op == TRUE) {
1640 vm_page_insert_internal(m, object,
1641 vm_object_trunc_page(offset),
1642 VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1643 } else {
1644 vm_page_insert(m, object, vm_object_trunc_page(offset));
1645 }
1646 }
1647 if (look_for_page) {
1648 kern_return_t rc;
1649 int my_fault_type;
1650
1651 /*
1652 * If the memory manager is not ready, we
1653 * cannot make requests.
1654 */
1655 if (!object->pager_ready) {
1656 #if TRACEFAULTPAGE
1657 dbgTrace(0xBEEF000E, (unsigned int) 0, (unsigned int) 0); /* (TEST/DEBUG) */
1658 #endif
1659 if (m != VM_PAGE_NULL) {
1660 VM_PAGE_FREE(m);
1661 }
1662
1663 /*
1664 * take an extra ref so object won't die
1665 */
1666 vm_object_reference_locked(object);
1667 vm_fault_cleanup(object, first_m);
1668
1669 vm_object_lock(object);
1670 assert(os_ref_get_count_raw(&object->ref_count) > 0);
1671
1672 if (!object->pager_ready) {
1673 wait_result = vm_object_sleep(object, VM_OBJECT_EVENT_PAGER_READY, interruptible, LCK_SLEEP_UNLOCK);
1674 vm_object_deallocate(object);
1675
1676 goto backoff;
1677 } else {
1678 vm_object_unlock(object);
1679 vm_object_deallocate(object);
1680 thread_interrupt_level(interruptible_state);
1681
1682 return VM_FAULT_RETRY;
1683 }
1684 }
1685 if (!object->internal && !object->phys_contiguous && object->paging_in_progress > vm_object_pagein_throttle) {
1686 /*
1687 * If there are too many outstanding page
1688 * requests pending on this external object, we
1689 * wait for them to be resolved now.
1690 */
1691 #if TRACEFAULTPAGE
1692 dbgTrace(0xBEEF0010, (unsigned int) m, (unsigned int) 0); /* (TEST/DEBUG) */
1693 #endif
1694 if (m != VM_PAGE_NULL) {
1695 VM_PAGE_FREE(m);
1696 }
1697 /*
1698 * take an extra ref so object won't die
1699 */
1700 vm_object_reference_locked(object);
1701
1702 vm_fault_cleanup(object, first_m);
1703
1704 vm_object_lock(object);
1705 assert(os_ref_get_count_raw(&object->ref_count) > 0);
1706
1707 if (object->paging_in_progress >= vm_object_pagein_throttle) {
1708 wait_result = vm_object_paging_throttle_wait(object, interruptible);
1709 vm_object_unlock(object);
1710 vm_object_deallocate(object);
1711 goto backoff;
1712 } else {
1713 vm_object_unlock(object);
1714 vm_object_deallocate(object);
1715 thread_interrupt_level(interruptible_state);
1716
1717 return VM_FAULT_RETRY;
1718 }
1719 }
1720 if (object->internal) {
1721 int compressed_count_delta;
1722 vm_compressor_options_t c_flags = 0;
1723
1724 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
1725
1726 if (m == VM_PAGE_NULL) {
1727 /*
1728 * Allocate a new page for this object/offset pair as a placeholder
1729 */
1730 m = vm_page_grab_options(grab_options);
1731 #if TRACEFAULTPAGE
1732 dbgTrace(0xBEEF000D, (unsigned int) m, (unsigned int) object); /* (TEST/DEBUG) */
1733 #endif
1734 if (m == VM_PAGE_NULL) {
1735 vm_fault_cleanup(object, first_m);
1736 thread_interrupt_level(interruptible_state);
1737
1738 return VM_FAULT_MEMORY_SHORTAGE;
1739 }
1740
1741 m->vmp_absent = TRUE;
1742 if (fault_info && fault_info->batch_pmap_op == TRUE) {
1743 vm_page_insert_internal(m, object, vm_object_trunc_page(offset), VM_KERN_MEMORY_NONE, FALSE, TRUE, TRUE, FALSE, NULL);
1744 } else {
1745 vm_page_insert(m, object, vm_object_trunc_page(offset));
1746 }
1747 }
1748 assert(m->vmp_busy);
1749
1750 m->vmp_absent = TRUE;
1751 pager = object->pager;
1752
1753 assert(object->paging_in_progress > 0);
1754
1755 page_worker_token_t pw_token;
1756 #if PAGE_SLEEP_WITH_INHERITOR
1757 page_worker_register_worker((event_t)m, &pw_token);
1758 #endif /* PAGE_SLEEP_WITH_INHERITOR */
1759
1760 vm_object_unlock(object);
1761 rc = vm_compressor_pager_get(
1762 pager,
1763 offset + object->paging_offset,
1764 VM_PAGE_GET_PHYS_PAGE(m),
1765 &my_fault_type,
1766 c_flags,
1767 &compressed_count_delta);
1768
1769 if (type_of_fault == NULL) {
1770 int throttle_delay;
1771
1772 /*
1773 * we weren't called from vm_fault, so we
1774 * need to apply page creation throttling
1775 * do it before we re-acquire any locks
1776 */
1777 if (my_fault_type == DBG_COMPRESSOR_FAULT) {
1778 if ((throttle_delay = vm_page_throttled(TRUE))) {
1779 VM_DEBUG_EVENT(vmf_compressordelay, DBG_VM_FAULT_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 1, 0);
1780 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
1781 }
1782 }
1783 }
1784 vm_object_lock(object);
1785 assert(object->paging_in_progress > 0);
1786
1787 vm_compressor_pager_count(
1788 pager,
1789 compressed_count_delta,
1790 FALSE, /* shared_lock */
1791 object);
1792
1793 switch (rc) {
1794 case KERN_SUCCESS:
1795 m->vmp_absent = FALSE;
1796 m->vmp_dirty = TRUE;
1797 if (!HAS_DEFAULT_CACHEABILITY(object->wimg_bits &
1798 VM_WIMG_MASK)) {
1799 /*
1800 * If the page is not cacheable,
1801 * we can't let its contents
1802 * linger in the data cache
1803 * after the decompression.
1804 */
1805 pmap_sync_page_attributes_phys(
1806 VM_PAGE_GET_PHYS_PAGE(m));
1807 } else {
1808 m->vmp_written_by_kernel = TRUE;
1809 }
1810 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
1811 if ((fault_type & VM_PROT_WRITE) == 0) {
1812 vm_object_lock_assert_exclusive(object);
1813 vm_page_lockspin_queues();
1814 m->vmp_unmodified_ro = true;
1815 vm_page_unlock_queues();
1816 os_atomic_inc(&compressor_ro_uncompressed, relaxed);
1817 *protection &= ~VM_PROT_WRITE;
1818 }
1819 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
1820
1821 /*
1822 * If the object is purgeable, its
1823 * owner's purgeable ledgers have been
1824 * updated in vm_page_insert() but the
1825 * page was also accounted for in a
1826 * "compressed purgeable" ledger, so
1827 * update that now.
1828 */
1829 if (((object->purgable !=
1830 VM_PURGABLE_DENY) ||
1831 object->vo_ledger_tag) &&
1832 (object->vo_owner !=
1833 NULL)) {
1834 /*
1835 * One less compressed
1836 * purgeable/tagged page.
1837 */
1838 if (compressed_count_delta) {
1839 vm_object_owner_compressed_update(
1840 object,
1841 -1);
1842 }
1843 }
1844
1845 break;
1846 case KERN_MEMORY_FAILURE:
1847 m->vmp_unusual = TRUE;
1848 m->vmp_error = TRUE;
1849 m->vmp_absent = FALSE;
1850 break;
1851 case KERN_MEMORY_ERROR:
1852 assert(m->vmp_absent);
1853 break;
1854 default:
1855 panic("vm_fault_page(): unexpected "
1856 "error %d from "
1857 "vm_compressor_pager_get()\n",
1858 rc);
1859 }
1860 vm_page_wakeup_done_with_inheritor(object, m, &pw_token);
1861
1862 rc = KERN_SUCCESS;
1863 goto data_requested;
1864 }
1865 my_fault_type = DBG_PAGEIN_FAULT;
1866
1867 if (m != VM_PAGE_NULL) {
1868 VM_PAGE_FREE(m);
1869 m = VM_PAGE_NULL;
1870 }
1871
1872 #if TRACEFAULTPAGE
1873 dbgTrace(0xBEEF0012, (unsigned int) object, (unsigned int) 0); /* (TEST/DEBUG) */
1874 #endif
1875
1876 /*
1877 * It's possible someone called vm_object_destroy while we weren't
1878 * holding the object lock. If that has happened, then bail out
1879 * here.
1880 */
1881
1882 pager = object->pager;
1883
1884 if (pager == MEMORY_OBJECT_NULL) {
1885 vm_fault_cleanup(object, first_m);
1886 thread_interrupt_level(interruptible_state);
1887
1888 static const enum vm_subsys_error_codes object_destroy_errors[VM_OBJECT_DESTROY_MAX + 1] = {
1889 [VM_OBJECT_DESTROY_UNKNOWN_REASON] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER,
1890 [VM_OBJECT_DESTROY_UNMOUNT] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_UNMOUNT,
1891 [VM_OBJECT_DESTROY_FORCED_UNMOUNT] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_FORCED_UNMOUNT,
1892 [VM_OBJECT_DESTROY_UNGRAFT] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_UNGRAFT,
1893 [VM_OBJECT_DESTROY_PAGER] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_DEALLOC_PAGER,
1894 [VM_OBJECT_DESTROY_RECLAIM] = KDBG_TRIAGE_VM_OBJECT_NO_PAGER_RECLAIM,
1895 };
1896 enum vm_subsys_error_codes kdbg_code = object_destroy_errors[(vm_object_destroy_reason_t)object->no_pager_reason];
1897 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, kdbg_code), 0 /* arg */);
1898 return VM_FAULT_MEMORY_ERROR;
1899 }
1900
1901 /*
1902 * We have an absent page in place for the faulting offset,
1903 * so we can release the object lock.
1904 */
1905
1906 if (object->object_is_shared_cache || pager->mo_pager_ops == &dyld_pager_ops) {
1907 token = thread_priority_floor_start();
1908 /*
1909 * A non-native shared cache object might
1910 * be getting set up in parallel with this
1911 * fault and so we can't assume that this
1912 * check will be valid after we drop the
1913 * object lock below.
1914 *
1915 * FIXME: This should utilize @c page_worker_register_worker()
1916 * (rdar://153586539)
1917 */
1918 drop_floor = true;
1919 }
1920
1921 vm_object_unlock(object);
1922
1923 /*
1924 * If this object uses a copy_call strategy,
1925 * and we are interested in a copy of this object
1926 * (having gotten here only by following a
1927 * shadow chain), then tell the memory manager
1928 * via a flag added to the desired_access
1929 * parameter, so that it can detect a race
1930 * between our walking down the shadow chain
1931 * and its pushing pages up into a copy of
1932 * the object that it manages.
1933 */
1934 if (object->copy_strategy == MEMORY_OBJECT_COPY_CALL && object != first_object) {
1935 wants_copy_flag = VM_PROT_WANTS_COPY;
1936 } else {
1937 wants_copy_flag = VM_PROT_NONE;
1938 }
1939
1940 if (object->vo_copy == first_object) {
1941 /*
1942 * if we issue the memory_object_data_request in
1943 * this state, we are subject to a deadlock with
1944 * the underlying filesystem if it is trying to
1945 * shrink the file resulting in a push of pages
1946 * into the copy object... that push will stall
1947 * on the placeholder page, and if the pushing thread
1948 * is holding a lock that is required on the pagein
1949 * path (such as a truncate lock), we'll deadlock...
1950 * to avoid this potential deadlock, we throw away
1951 * our placeholder page before calling memory_object_data_request
1952 * and force this thread to retry the vm_fault_page after
1953 * we have issued the I/O. the second time through this path
1954 * we will find the page already in the cache (presumably still
1955 * busy waiting for the I/O to complete) and then complete
1956 * the fault w/o having to go through memory_object_data_request again
1957 */
1958 assert(first_m != VM_PAGE_NULL);
1959 assert(VM_PAGE_OBJECT(first_m) == first_object);
1960
1961 vm_object_lock(first_object);
1962 VM_PAGE_FREE(first_m);
1963 vm_object_paging_end(first_object);
1964 vm_object_unlock(first_object);
1965
1966 first_m = VM_PAGE_NULL;
1967 force_fault_retry = TRUE;
1968
1969 vm_fault_page_forced_retry++;
1970 }
1971
1972 if (data_already_requested == TRUE) {
1973 orig_behavior = fault_info->behavior;
1974 orig_cluster_size = fault_info->cluster_size;
1975
1976 fault_info->behavior = VM_BEHAVIOR_RANDOM;
1977 fault_info->cluster_size = PAGE_SIZE;
1978 }
1979 /*
1980 * Call the memory manager to retrieve the data.
1981 */
1982 rc = memory_object_data_request(
1983 pager,
1984 vm_object_trunc_page(offset) + object->paging_offset,
1985 PAGE_SIZE,
1986 access_required | wants_copy_flag,
1987 (memory_object_fault_info_t)fault_info);
1988
1989 if (data_already_requested == TRUE) {
1990 fault_info->behavior = orig_behavior;
1991 fault_info->cluster_size = orig_cluster_size;
1992 } else {
1993 data_already_requested = TRUE;
1994 }
1995
1996 DTRACE_VM2(maj_fault, int, 1, (uint64_t *), NULL);
1997 #if TRACEFAULTPAGE
1998 dbgTrace(0xBEEF0013, (unsigned int) object, (unsigned int) rc); /* (TEST/DEBUG) */
1999 #endif
2000 vm_object_lock(object);
2001
2002 if (drop_floor) {
2003 thread_priority_floor_end(&token);
2004 drop_floor = false;
2005 }
2006
2007 data_requested:
2008 if (rc != ERR_SUCCESS) {
2009 vm_fault_cleanup(object, first_m);
2010 thread_interrupt_level(interruptible_state);
2011
2012 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NO_DATA), 0 /* arg */);
2013
2014 if (rc == MACH_SEND_INTERRUPTED) {
2015 return VM_FAULT_INTERRUPTED;
2016 } else if (rc == KERN_ALREADY_WAITING) {
2017 return VM_FAULT_BUSY;
2018 } else {
2019 return VM_FAULT_MEMORY_ERROR;
2020 }
2021 } else {
2022 clock_sec_t tv_sec;
2023 clock_usec_t tv_usec;
2024
2025 if (my_fault_type == DBG_PAGEIN_FAULT) {
2026 clock_get_system_microtime(&tv_sec, &tv_usec);
2027 current_thread()->t_page_creation_time = tv_sec;
2028 current_thread()->t_page_creation_count = 0;
2029 }
2030 }
2031 if ((interruptible != THREAD_UNINT) && (current_thread()->sched_flags & TH_SFLAG_ABORT)) {
2032 vm_fault_cleanup(object, first_m);
2033 thread_interrupt_level(interruptible_state);
2034
2035 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
2036 return VM_FAULT_INTERRUPTED;
2037 }
2038 if (force_fault_retry == TRUE) {
2039 vm_fault_cleanup(object, first_m);
2040 thread_interrupt_level(interruptible_state);
2041
2042 return VM_FAULT_RETRY;
2043 }
2044 if (m == VM_PAGE_NULL && object->phys_contiguous) {
2045 /*
2046 * No page here means that the object we
2047 * initially looked up was "physically
2048 * contiguous" (i.e. device memory). However,
2049 * with Virtual VRAM, the object might not
2050 * be backed by that device memory anymore,
2051 * so we're done here only if the object is
2052 * still "phys_contiguous".
2053 * Otherwise, if the object is no longer
2054 * "phys_contiguous", we need to retry the
2055 * page fault against the object's new backing
2056 * store (different memory object).
2057 */
2058 phys_contig_object:
2059 assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
2060 assert(object == first_object);
2061 goto done;
2062 }
2063 /*
2064 * potentially a pagein fault
2065 * if we make it through the state checks
2066 * above, than we'll count it as such
2067 */
2068 my_fault = my_fault_type;
2069
2070 /*
2071 * Retry with same object/offset, since new data may
2072 * be in a different page (i.e., m is meaningless at
2073 * this point).
2074 */
2075 continue;
2076 }
2077 dont_look_for_page:
2078 /*
2079 * We get here if the object has no pager, or an existence map
2080 * exists and indicates the page isn't present on the pager
2081 * or we're unwiring a page. If a pager exists, but there
2082 * is no existence map, then the m->vmp_absent case above handles
2083 * the ZF case when the pager can't provide the page
2084 */
2085 #if TRACEFAULTPAGE
2086 dbgTrace(0xBEEF0014, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
2087 #endif
2088 if (object == first_object) {
2089 first_m = m;
2090 } else {
2091 assert(m == VM_PAGE_NULL);
2092 }
2093
2094 next_object = object->shadow;
2095
2096 if (next_object == VM_OBJECT_NULL) {
2097 /*
2098 * we've hit the bottom of the shadown chain,
2099 * fill the page in the top object with zeros.
2100 */
2101 assert(!must_be_resident);
2102
2103 if (object != first_object) {
2104 vm_object_paging_end(object);
2105 vm_object_unlock(object);
2106
2107 object = first_object;
2108 offset = first_offset;
2109 vm_object_lock(object);
2110 }
2111 m = first_m;
2112 assert(VM_PAGE_OBJECT(m) == object);
2113 first_m = VM_PAGE_NULL;
2114
2115 /*
2116 * check for any conditions that prevent
2117 * us from creating a new zero-fill page
2118 * vm_fault_check will do all of the
2119 * fault cleanup in the case of an error condition
2120 * including resetting the thread_interrupt_level
2121 */
2122 error = vm_fault_check(object, m, first_m, interruptible_state, (type_of_fault == NULL) ? TRUE : FALSE);
2123
2124 if (error != VM_FAULT_SUCCESS) {
2125 return error;
2126 }
2127
2128 if (m == VM_PAGE_NULL) {
2129 m = vm_page_grab_options(grab_options);
2130
2131 if (m == VM_PAGE_NULL) {
2132 vm_fault_cleanup(object, VM_PAGE_NULL);
2133 thread_interrupt_level(interruptible_state);
2134
2135 return VM_FAULT_MEMORY_SHORTAGE;
2136 }
2137 vm_page_insert(m, object, vm_object_trunc_page(offset));
2138 }
2139 if (fault_info->mark_zf_absent && no_zero_fill == TRUE) {
2140 m->vmp_absent = TRUE;
2141 clear_absent_on_error = true;
2142 }
2143
2144 my_fault = vm_fault_zero_page(m, no_zero_fill);
2145
2146 break;
2147 } else {
2148 /*
2149 * Move on to the next object. Lock the next
2150 * object before unlocking the current one.
2151 */
2152 if ((object != first_object) || must_be_resident) {
2153 vm_object_paging_end(object);
2154 }
2155
2156 offset += object->vo_shadow_offset;
2157 fault_info->lo_offset += object->vo_shadow_offset;
2158 fault_info->hi_offset += object->vo_shadow_offset;
2159 access_required = VM_PROT_READ;
2160
2161 vm_object_lock(next_object);
2162 vm_object_unlock(object);
2163
2164 object = next_object;
2165 vm_object_paging_begin(object);
2166 }
2167 }
2168
2169 /*
2170 * PAGE HAS BEEN FOUND.
2171 *
2172 * This page (m) is:
2173 * busy, so that we can play with it;
2174 * not absent, so that nobody else will fill it;
2175 * possibly eligible for pageout;
2176 *
2177 * The top-level page (first_m) is:
2178 * VM_PAGE_NULL if the page was found in the
2179 * top-level object;
2180 * busy, not absent, and ineligible for pageout.
2181 *
2182 * The current object (object) is locked. A paging
2183 * reference is held for the current and top-level
2184 * objects.
2185 */
2186
2187 #if TRACEFAULTPAGE
2188 dbgTrace(0xBEEF0015, (unsigned int) object, (unsigned int) m); /* (TEST/DEBUG) */
2189 #endif
2190 #if EXTRA_ASSERTIONS
2191 assert(m->vmp_busy && !m->vmp_absent);
2192 assert((first_m == VM_PAGE_NULL) ||
2193 (first_m->vmp_busy && !first_m->vmp_absent &&
2194 !first_m->vmp_active && !first_m->vmp_inactive && !first_m->vmp_secluded));
2195 #endif /* EXTRA_ASSERTIONS */
2196
2197 /*
2198 * If the page is being written, but isn't
2199 * already owned by the top-level object,
2200 * we have to copy it into a new page owned
2201 * by the top-level object.
2202 */
2203 if (object != first_object) {
2204 #if TRACEFAULTPAGE
2205 dbgTrace(0xBEEF0016, (unsigned int) object, (unsigned int) fault_type); /* (TEST/DEBUG) */
2206 #endif
2207 if (fault_type & VM_PROT_WRITE) {
2208 vm_page_t copy_m;
2209
2210 /*
2211 * We only really need to copy if we
2212 * want to write it.
2213 */
2214 assert(!must_be_resident);
2215
2216 /*
2217 * If we try to collapse first_object at this
2218 * point, we may deadlock when we try to get
2219 * the lock on an intermediate object (since we
2220 * have the bottom object locked). We can't
2221 * unlock the bottom object, because the page
2222 * we found may move (by collapse) if we do.
2223 *
2224 * Instead, we first copy the page. Then, when
2225 * we have no more use for the bottom object,
2226 * we unlock it and try to collapse.
2227 *
2228 * Note that we copy the page even if we didn't
2229 * need to... that's the breaks.
2230 */
2231
2232 /*
2233 * Allocate a page for the copy
2234 */
2235 copy_m = vm_page_grab_options(grab_options);
2236
2237 if (copy_m == VM_PAGE_NULL) {
2238 vm_fault_page_release_page(m, &clear_absent_on_error);
2239
2240 vm_fault_cleanup(object, first_m);
2241 thread_interrupt_level(interruptible_state);
2242
2243 return VM_FAULT_MEMORY_SHORTAGE;
2244 }
2245
2246 vm_page_copy(m, copy_m);
2247
2248 /*
2249 * If another map is truly sharing this
2250 * page with us, we have to flush all
2251 * uses of the original page, since we
2252 * can't distinguish those which want the
2253 * original from those which need the
2254 * new copy.
2255 *
2256 * XXXO If we know that only one map has
2257 * access to this page, then we could
2258 * avoid the pmap_disconnect() call.
2259 */
2260 if (m->vmp_pmapped) {
2261 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2262 }
2263
2264 if (m->vmp_clustered) {
2265 VM_PAGE_COUNT_AS_PAGEIN(m);
2266 VM_PAGE_CONSUME_CLUSTERED(m);
2267 }
2268 assert(!m->vmp_cleaning);
2269
2270 /*
2271 * We no longer need the old page or object.
2272 */
2273 vm_fault_page_release_page(m, &clear_absent_on_error);
2274
2275 /*
2276 * This check helps with marking the object as having a sequential pattern
2277 * Normally we'll miss doing this below because this fault is about COW to
2278 * the first_object i.e. bring page in from disk, push to object above but
2279 * don't update the file object's sequential pattern.
2280 */
2281 if (object->internal == FALSE) {
2282 vm_fault_is_sequential(object, offset, fault_info->behavior);
2283 }
2284
2285 vm_object_paging_end(object);
2286 vm_object_unlock(object);
2287
2288 my_fault = DBG_COW_FAULT;
2289 counter_inc(&vm_statistics_cow_faults);
2290 DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
2291 counter_inc(¤t_task()->cow_faults);
2292
2293 object = first_object;
2294 offset = first_offset;
2295
2296 vm_object_lock(object);
2297 /*
2298 * get rid of the place holder
2299 * page that we soldered in earlier
2300 */
2301 VM_PAGE_FREE(first_m);
2302 first_m = VM_PAGE_NULL;
2303
2304 /*
2305 * and replace it with the
2306 * page we just copied into
2307 */
2308 assert(copy_m->vmp_busy);
2309 vm_page_insert(copy_m, object, vm_object_trunc_page(offset));
2310 SET_PAGE_DIRTY(copy_m, TRUE);
2311
2312 m = copy_m;
2313 /*
2314 * Now that we've gotten the copy out of the
2315 * way, let's try to collapse the top object.
2316 * But we have to play ugly games with
2317 * paging_in_progress to do that...
2318 */
2319 vm_object_paging_end(object);
2320 vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
2321 vm_object_paging_begin(object);
2322 } else {
2323 *protection &= (~VM_PROT_WRITE);
2324 }
2325 }
2326 /*
2327 * Now check whether the page needs to be pushed into the
2328 * copy object. The use of asymmetric copy on write for
2329 * shared temporary objects means that we may do two copies to
2330 * satisfy the fault; one above to get the page from a
2331 * shadowed object, and one here to push it into the copy.
2332 */
2333 try_failed_count = 0;
2334
2335 while ((copy_object = first_object->vo_copy) != VM_OBJECT_NULL) {
2336 vm_object_offset_t copy_offset;
2337 vm_page_t copy_m;
2338
2339 #if TRACEFAULTPAGE
2340 dbgTrace(0xBEEF0017, (unsigned int) copy_object, (unsigned int) fault_type); /* (TEST/DEBUG) */
2341 #endif
2342 /*
2343 * If the page is being written, but hasn't been
2344 * copied to the copy-object, we have to copy it there.
2345 */
2346 if ((fault_type & VM_PROT_WRITE) == 0) {
2347 *protection &= ~VM_PROT_WRITE;
2348 break;
2349 }
2350
2351 /*
2352 * If the page was guaranteed to be resident,
2353 * we must have already performed the copy.
2354 */
2355 if (must_be_resident) {
2356 break;
2357 }
2358
2359 /*
2360 * Try to get the lock on the copy_object.
2361 */
2362 if (!vm_object_lock_try(copy_object)) {
2363 vm_object_unlock(object);
2364 try_failed_count++;
2365
2366 mutex_pause(try_failed_count); /* wait a bit */
2367 vm_object_lock(object);
2368
2369 continue;
2370 }
2371 try_failed_count = 0;
2372
2373 /*
2374 * Make another reference to the copy-object,
2375 * to keep it from disappearing during the
2376 * copy.
2377 */
2378 vm_object_reference_locked(copy_object);
2379
2380 /*
2381 * Does the page exist in the copy?
2382 */
2383 copy_offset = first_offset - copy_object->vo_shadow_offset;
2384 copy_offset = vm_object_trunc_page(copy_offset);
2385
2386 if (copy_object->vo_size <= copy_offset) {
2387 /*
2388 * Copy object doesn't cover this page -- do nothing.
2389 */
2390 ;
2391 } else if ((copy_m = vm_page_lookup(copy_object, copy_offset)) != VM_PAGE_NULL) {
2392 /*
2393 * Page currently exists in the copy object
2394 */
2395 if (copy_m->vmp_busy) {
2396 /*
2397 * If the page is being brought
2398 * in, wait for it and then retry.
2399 */
2400 vm_fault_page_release_page(m, &clear_absent_on_error);
2401
2402 /*
2403 * take an extra ref so object won't die
2404 */
2405 vm_object_reference_locked(copy_object);
2406 vm_object_unlock(copy_object);
2407 vm_fault_cleanup(object, first_m);
2408
2409 vm_object_lock(copy_object);
2410 vm_object_lock_assert_exclusive(copy_object);
2411 os_ref_release_live_locked_raw(©_object->ref_count,
2412 &vm_object_refgrp);
2413 copy_m = vm_page_lookup(copy_object, copy_offset);
2414
2415 if (copy_m != VM_PAGE_NULL && copy_m->vmp_busy) {
2416 wait_result = vm_page_sleep(copy_object, copy_m, interruptible, LCK_SLEEP_UNLOCK);
2417 vm_object_deallocate(copy_object);
2418
2419 goto backoff;
2420 } else {
2421 vm_object_unlock(copy_object);
2422 vm_object_deallocate(copy_object);
2423 thread_interrupt_level(interruptible_state);
2424
2425 return VM_FAULT_RETRY;
2426 }
2427 }
2428 } else if (!PAGED_OUT(copy_object, copy_offset)) {
2429 /*
2430 * If PAGED_OUT is TRUE, then the page used to exist
2431 * in the copy-object, and has already been paged out.
2432 * We don't need to repeat this. If PAGED_OUT is
2433 * FALSE, then either we don't know (!pager_created,
2434 * for example) or it hasn't been paged out.
2435 * (VM_EXTERNAL_STATE_UNKNOWN||VM_EXTERNAL_STATE_ABSENT)
2436 * We must copy the page to the copy object.
2437 *
2438 * Allocate a page for the copy
2439 */
2440 copy_m = vm_page_grab_options(grab_options);
2441
2442 if (copy_m == VM_PAGE_NULL) {
2443 vm_fault_page_release_page(m, &clear_absent_on_error);
2444
2445 vm_object_lock_assert_exclusive(copy_object);
2446 os_ref_release_live_locked_raw(©_object->ref_count,
2447 &vm_object_refgrp);
2448
2449 vm_object_unlock(copy_object);
2450 vm_fault_cleanup(object, first_m);
2451 thread_interrupt_level(interruptible_state);
2452
2453 return VM_FAULT_MEMORY_SHORTAGE;
2454 }
2455
2456 /*
2457 * Must copy page into copy-object.
2458 */
2459 vm_page_insert(copy_m, copy_object, copy_offset);
2460 vm_page_copy(m, copy_m);
2461
2462 /*
2463 * If the old page was in use by any users
2464 * of the copy-object, it must be removed
2465 * from all pmaps. (We can't know which
2466 * pmaps use it.)
2467 */
2468 if (m->vmp_pmapped) {
2469 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
2470 }
2471
2472 if (m->vmp_clustered) {
2473 VM_PAGE_COUNT_AS_PAGEIN(m);
2474 VM_PAGE_CONSUME_CLUSTERED(m);
2475 }
2476 /*
2477 * If there's a pager, then immediately
2478 * page out this page, using the "initialize"
2479 * option. Else, we use the copy.
2480 */
2481 if ((!copy_object->pager_ready)
2482 || vm_object_compressor_pager_state_get(copy_object, copy_offset) == VM_EXTERNAL_STATE_ABSENT
2483 ) {
2484 vm_page_lockspin_queues();
2485 assert(!m->vmp_cleaning);
2486 vm_page_activate(copy_m);
2487 vm_page_unlock_queues();
2488
2489 SET_PAGE_DIRTY(copy_m, TRUE);
2490 vm_page_wakeup_done(copy_object, copy_m);
2491 } else {
2492 assert(copy_m->vmp_busy == TRUE);
2493 assert(!m->vmp_cleaning);
2494
2495 /*
2496 * dirty is protected by the object lock
2497 */
2498 SET_PAGE_DIRTY(copy_m, TRUE);
2499
2500 /*
2501 * The page is already ready for pageout:
2502 * not on pageout queues and busy.
2503 * Unlock everything except the
2504 * copy_object itself.
2505 */
2506 vm_object_unlock(object);
2507
2508 /*
2509 * Write the page to the copy-object,
2510 * flushing it from the kernel.
2511 */
2512 vm_pageout_initialize_page(copy_m);
2513
2514 /*
2515 * Since the pageout may have
2516 * temporarily dropped the
2517 * copy_object's lock, we
2518 * check whether we'll have
2519 * to deallocate the hard way.
2520 */
2521 if ((copy_object->shadow != object) ||
2522 (os_ref_get_count_raw(©_object->ref_count) == 1)) {
2523 vm_object_unlock(copy_object);
2524 vm_object_deallocate(copy_object);
2525 vm_object_lock(object);
2526
2527 continue;
2528 }
2529 /*
2530 * Pick back up the old object's
2531 * lock. [It is safe to do so,
2532 * since it must be deeper in the
2533 * object tree.]
2534 */
2535 vm_object_lock(object);
2536 }
2537
2538 /*
2539 * Because we're pushing a page upward
2540 * in the object tree, we must restart
2541 * any faults that are waiting here.
2542 * [Note that this is an expansion of
2543 * vm_page_wakeup() that uses the THREAD_RESTART
2544 * wait result]. Can't turn off the page's
2545 * busy bit because we're not done with it.
2546 */
2547 if (m->vmp_wanted) {
2548 m->vmp_wanted = FALSE;
2549 thread_wakeup_with_result((event_t) m, THREAD_RESTART);
2550 }
2551 }
2552 /*
2553 * The reference count on copy_object must be
2554 * at least 2: one for our extra reference,
2555 * and at least one from the outside world
2556 * (we checked that when we last locked
2557 * copy_object).
2558 */
2559 vm_object_lock_assert_exclusive(copy_object);
2560 os_ref_release_live_locked_raw(©_object->ref_count,
2561 &vm_object_refgrp);
2562
2563 vm_object_unlock(copy_object);
2564
2565 break;
2566 }
2567
2568 done:
2569 *result_page = m;
2570 *top_page = first_m;
2571
2572 if (m != VM_PAGE_NULL) {
2573 assert(VM_PAGE_OBJECT(m) == object);
2574
2575 retval = VM_FAULT_SUCCESS;
2576
2577 if (my_fault == DBG_PAGEIN_FAULT) {
2578 VM_PAGE_COUNT_AS_PAGEIN(m);
2579
2580 if (object->internal) {
2581 my_fault = DBG_PAGEIND_FAULT;
2582 } else {
2583 my_fault = DBG_PAGEINV_FAULT;
2584 }
2585
2586 /*
2587 * evaluate access pattern and update state
2588 * vm_fault_deactivate_behind depends on the
2589 * state being up to date
2590 */
2591 vm_fault_is_sequential(object, offset, fault_info->behavior);
2592 vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2593 } else if (type_of_fault == NULL && my_fault == DBG_CACHE_HIT_FAULT) {
2594 /*
2595 * we weren't called from vm_fault, so handle the
2596 * accounting here for hits in the cache
2597 */
2598 if (m->vmp_clustered) {
2599 VM_PAGE_COUNT_AS_PAGEIN(m);
2600 VM_PAGE_CONSUME_CLUSTERED(m);
2601 }
2602 vm_fault_is_sequential(object, offset, fault_info->behavior);
2603 vm_fault_deactivate_behind(object, offset, fault_info->behavior);
2604 } else if (my_fault == DBG_COMPRESSOR_FAULT || my_fault == DBG_COMPRESSOR_SWAPIN_FAULT) {
2605 VM_STAT_DECOMPRESSIONS();
2606 }
2607 if (type_of_fault) {
2608 *type_of_fault = my_fault;
2609 }
2610 } else {
2611 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_SUCCESS_NO_PAGE), 0 /* arg */);
2612 retval = VM_FAULT_SUCCESS_NO_VM_PAGE;
2613 assert(first_m == VM_PAGE_NULL);
2614 assert(object == first_object);
2615 }
2616
2617 thread_interrupt_level(interruptible_state);
2618
2619 #if TRACEFAULTPAGE
2620 dbgTrace(0xBEEF001A, (unsigned int) VM_FAULT_SUCCESS, 0); /* (TEST/DEBUG) */
2621 #endif
2622 return retval;
2623
2624 backoff:
2625 thread_interrupt_level(interruptible_state);
2626
2627 if (wait_result == THREAD_INTERRUPTED) {
2628 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
2629 return VM_FAULT_INTERRUPTED;
2630 }
2631 return VM_FAULT_RETRY;
2632 }
2633
2634 #if MACH_ASSERT && (XNU_PLATFORM_WatchOS || __x86_64__)
2635 #define PANIC_ON_CS_KILLED_DEFAULT true
2636 #else
2637 #define PANIC_ON_CS_KILLED_DEFAULT false
2638 #endif
2639 static TUNABLE(bool, panic_on_cs_killed, "panic_on_cs_killed",
2640 PANIC_ON_CS_KILLED_DEFAULT);
2641
2642 extern int proc_selfpid(void);
2643 extern char *proc_name_address(struct proc *p);
2644 extern const char *proc_best_name(struct proc *);
2645 unsigned long cs_enter_tainted_rejected = 0;
2646 unsigned long cs_enter_tainted_accepted = 0;
2647
2648 /*
2649 * CODE SIGNING:
2650 * When soft faulting a page, we have to validate the page if:
2651 * 1. the page is being mapped in user space
2652 * 2. the page hasn't already been found to be "tainted"
2653 * 3. the page belongs to a code-signed object
2654 * 4. the page has not been validated yet or has been mapped for write.
2655 */
2656 static bool
vm_fault_cs_need_validation(pmap_t pmap,vm_page_t page,vm_object_t page_obj,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2657 vm_fault_cs_need_validation(
2658 pmap_t pmap,
2659 vm_page_t page,
2660 vm_object_t page_obj,
2661 vm_map_size_t fault_page_size,
2662 vm_map_offset_t fault_phys_offset)
2663 {
2664 if (pmap == kernel_pmap) {
2665 /* 1 - not user space */
2666 return false;
2667 }
2668 if (!page_obj->code_signed) {
2669 /* 3 - page does not belong to a code-signed object */
2670 return false;
2671 }
2672 if (fault_page_size == PAGE_SIZE) {
2673 /* looking at the whole page */
2674 assertf(fault_phys_offset == 0,
2675 "fault_page_size 0x%llx fault_phys_offset 0x%llx\n",
2676 (uint64_t)fault_page_size,
2677 (uint64_t)fault_phys_offset);
2678 if (page->vmp_cs_tainted == VMP_CS_ALL_TRUE) {
2679 /* 2 - page is all tainted */
2680 return false;
2681 }
2682 if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
2683 !page->vmp_wpmapped) {
2684 /* 4 - already fully validated and never mapped writable */
2685 return false;
2686 }
2687 } else {
2688 /* looking at a specific sub-page */
2689 if (VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
2690 /* 2 - sub-page was already marked as tainted */
2691 return false;
2692 }
2693 if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) &&
2694 !page->vmp_wpmapped) {
2695 /* 4 - already validated and never mapped writable */
2696 return false;
2697 }
2698 }
2699 /* page needs to be validated */
2700 return true;
2701 }
2702
2703
2704 static bool
vm_fault_cs_page_immutable(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot __unused)2705 vm_fault_cs_page_immutable(
2706 vm_page_t m,
2707 vm_map_size_t fault_page_size,
2708 vm_map_offset_t fault_phys_offset,
2709 vm_prot_t prot __unused)
2710 {
2711 if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)
2712 /*&& ((prot) & VM_PROT_EXECUTE)*/) {
2713 return true;
2714 }
2715 return false;
2716 }
2717
2718 static bool
vm_fault_cs_page_nx(vm_page_t m,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)2719 vm_fault_cs_page_nx(
2720 vm_page_t m,
2721 vm_map_size_t fault_page_size,
2722 vm_map_offset_t fault_phys_offset)
2723 {
2724 return VMP_CS_NX(m, fault_page_size, fault_phys_offset);
2725 }
2726
2727 /*
2728 * Check if the page being entered into the pmap violates code signing.
2729 */
2730 static kern_return_t
vm_fault_cs_check_violation(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool map_is_switched,bool map_is_switch_protected,bool * cs_violation)2731 vm_fault_cs_check_violation(
2732 bool cs_bypass,
2733 vm_object_t object,
2734 vm_page_t m,
2735 pmap_t pmap,
2736 vm_prot_t prot,
2737 vm_prot_t caller_prot,
2738 vm_map_size_t fault_page_size,
2739 vm_map_offset_t fault_phys_offset,
2740 vm_object_fault_info_t fault_info,
2741 bool map_is_switched,
2742 bool map_is_switch_protected,
2743 bool *cs_violation)
2744 {
2745 #if !CODE_SIGNING_MONITOR
2746 #pragma unused(caller_prot)
2747 #pragma unused(fault_info)
2748 #endif /* !CODE_SIGNING_MONITOR */
2749
2750 int cs_enforcement_enabled;
2751 if (!cs_bypass &&
2752 vm_fault_cs_need_validation(pmap, m, object,
2753 fault_page_size, fault_phys_offset)) {
2754 vm_object_lock_assert_exclusive(object);
2755
2756 if (VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset)) {
2757 vm_cs_revalidates++;
2758 }
2759
2760 /* VM map is locked, so 1 ref will remain on VM object -
2761 * so no harm if vm_page_validate_cs drops the object lock */
2762
2763 #if CODE_SIGNING_MONITOR
2764 if (fault_info->csm_associated &&
2765 csm_enabled() &&
2766 !VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
2767 !VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) &&
2768 !VMP_CS_NX(m, fault_page_size, fault_phys_offset) &&
2769 (prot & VM_PROT_EXECUTE) &&
2770 (caller_prot & VM_PROT_EXECUTE)) {
2771 /*
2772 * When we have a code signing monitor, the monitor will evaluate the code signature
2773 * for any executable page mapping. No need for the VM to also validate the page.
2774 * In the code signing monitor we trust :)
2775 */
2776 vm_cs_defer_to_csm++;
2777 } else {
2778 vm_cs_defer_to_csm_not++;
2779 vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
2780 }
2781 #else /* CODE_SIGNING_MONITOR */
2782 vm_page_validate_cs(m, fault_page_size, fault_phys_offset);
2783 #endif /* CODE_SIGNING_MONITOR */
2784 }
2785
2786 /* If the map is switched, and is switch-protected, we must protect
2787 * some pages from being write-faulted: immutable pages because by
2788 * definition they may not be written, and executable pages because that
2789 * would provide a way to inject unsigned code.
2790 * If the page is immutable, we can simply return. However, we can't
2791 * immediately determine whether a page is executable anywhere. But,
2792 * we can disconnect it everywhere and remove the executable protection
2793 * from the current map. We do that below right before we do the
2794 * PMAP_ENTER.
2795 */
2796 if (pmap == kernel_pmap) {
2797 /* kernel fault: cs_enforcement does not apply */
2798 cs_enforcement_enabled = 0;
2799 } else {
2800 cs_enforcement_enabled = pmap_get_vm_map_cs_enforced(pmap);
2801 }
2802
2803 if (cs_enforcement_enabled && map_is_switched &&
2804 map_is_switch_protected &&
2805 vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2806 (prot & VM_PROT_WRITE)) {
2807 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_IMMUTABLE_PAGE_WRITE), 0 /* arg */);
2808 return KERN_CODESIGN_ERROR;
2809 }
2810
2811 if (cs_enforcement_enabled &&
2812 vm_fault_cs_page_nx(m, fault_page_size, fault_phys_offset) &&
2813 (prot & VM_PROT_EXECUTE)) {
2814 if (cs_debug) {
2815 printf("page marked to be NX, not letting it be mapped EXEC\n");
2816 }
2817 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAILED_NX_PAGE_EXEC_MAPPING), 0 /* arg */);
2818 return KERN_CODESIGN_ERROR;
2819 }
2820
2821 /* A page could be tainted, or pose a risk of being tainted later.
2822 * Check whether the receiving process wants it, and make it feel
2823 * the consequences (that hapens in cs_invalid_page()).
2824 * For CS Enforcement, two other conditions will
2825 * cause that page to be tainted as well:
2826 * - pmapping an unsigned page executable - this means unsigned code;
2827 * - writeable mapping of a validated page - the content of that page
2828 * can be changed without the kernel noticing, therefore unsigned
2829 * code can be created
2830 */
2831 if (cs_bypass) {
2832 /* code-signing is bypassed */
2833 *cs_violation = FALSE;
2834 } else if (VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
2835 /* tainted page */
2836 *cs_violation = TRUE;
2837 } else if (!cs_enforcement_enabled) {
2838 /* no further code-signing enforcement */
2839 *cs_violation = FALSE;
2840 } else if (vm_fault_cs_page_immutable(m, fault_page_size, fault_phys_offset, prot) &&
2841 ((prot & VM_PROT_WRITE) ||
2842 m->vmp_wpmapped)) {
2843 /*
2844 * The page should be immutable, but is in danger of being
2845 * modified.
2846 * This is the case where we want policy from the code
2847 * directory - is the page immutable or not? For now we have
2848 * to assume that code pages will be immutable, data pages not.
2849 * We'll assume a page is a code page if it has a code directory
2850 * and we fault for execution.
2851 * That is good enough since if we faulted the code page for
2852 * writing in another map before, it is wpmapped; if we fault
2853 * it for writing in this map later it will also be faulted for
2854 * executing at the same time; and if we fault for writing in
2855 * another map later, we will disconnect it from this pmap so
2856 * we'll notice the change.
2857 */
2858 *cs_violation = TRUE;
2859 } else if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
2860 (prot & VM_PROT_EXECUTE)
2861 #if CODE_SIGNING_MONITOR
2862 /*
2863 * Executable pages will be validated by the code signing monitor. If the
2864 * code signing monitor is turned off, then this is a code-signing violation.
2865 */
2866 && !csm_enabled()
2867 #endif /* CODE_SIGNING_MONITOR */
2868 ) {
2869 *cs_violation = TRUE;
2870 } else {
2871 *cs_violation = FALSE;
2872 }
2873 return KERN_SUCCESS;
2874 }
2875
2876 /*
2877 * Handles a code signing violation by either rejecting the page or forcing a disconnect.
2878 * @param must_disconnect This value will be set to true if the caller must disconnect
2879 * this page.
2880 * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
2881 */
2882 static kern_return_t
vm_fault_cs_handle_violation(vm_object_t object,vm_page_t m,pmap_t pmap,vm_prot_t prot,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,bool map_is_switched,bool map_is_switch_protected,bool * must_disconnect)2883 vm_fault_cs_handle_violation(
2884 vm_object_t object,
2885 vm_page_t m,
2886 pmap_t pmap,
2887 vm_prot_t prot,
2888 vm_map_offset_t vaddr,
2889 vm_map_size_t fault_page_size,
2890 vm_map_offset_t fault_phys_offset,
2891 bool map_is_switched,
2892 bool map_is_switch_protected,
2893 bool *must_disconnect)
2894 {
2895 #if !MACH_ASSERT
2896 #pragma unused(pmap)
2897 #pragma unused(map_is_switch_protected)
2898 #endif /* !MACH_ASSERT */
2899 /*
2900 * We will have a tainted page. Have to handle the special case
2901 * of a switched map now. If the map is not switched, standard
2902 * procedure applies - call cs_invalid_page().
2903 * If the map is switched, the real owner is invalid already.
2904 * There is no point in invalidating the switching process since
2905 * it will not be executing from the map. So we don't call
2906 * cs_invalid_page() in that case.
2907 */
2908 boolean_t reject_page, cs_killed;
2909 kern_return_t kr;
2910 if (map_is_switched) {
2911 assert(pmap == vm_map_pmap(current_thread()->map));
2912 assert(!(prot & VM_PROT_WRITE) || (map_is_switch_protected == FALSE));
2913 reject_page = FALSE;
2914 } else {
2915 if (cs_debug > 5) {
2916 printf("vm_fault: signed: %s validate: %s tainted: %s wpmapped: %s prot: 0x%x\n",
2917 object->code_signed ? "yes" : "no",
2918 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2919 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset) ? "yes" : "no",
2920 m->vmp_wpmapped ? "yes" : "no",
2921 (int)prot);
2922 }
2923 reject_page = cs_invalid_page((addr64_t) vaddr, &cs_killed);
2924 }
2925
2926 if (reject_page) {
2927 /* reject the invalid page: abort the page fault */
2928 int pid;
2929 const char *procname;
2930 task_t task;
2931 vm_object_t file_object, shadow;
2932 vm_object_offset_t file_offset;
2933 char *pathname, *filename;
2934 vm_size_t pathname_len, filename_len;
2935 boolean_t truncated_path;
2936 #define __PATH_MAX 1024
2937 struct timespec mtime, cs_mtime;
2938 int shadow_depth;
2939 os_reason_t codesigning_exit_reason = OS_REASON_NULL;
2940
2941 kr = KERN_CODESIGN_ERROR;
2942 cs_enter_tainted_rejected++;
2943
2944 /* get process name and pid */
2945 procname = "?";
2946 task = current_task();
2947 pid = proc_selfpid();
2948 if (get_bsdtask_info(task) != NULL) {
2949 procname = proc_name_address(get_bsdtask_info(task));
2950 }
2951
2952 /* get file's VM object */
2953 file_object = object;
2954 file_offset = m->vmp_offset;
2955 for (shadow = file_object->shadow,
2956 shadow_depth = 0;
2957 shadow != VM_OBJECT_NULL;
2958 shadow = file_object->shadow,
2959 shadow_depth++) {
2960 vm_object_lock_shared(shadow);
2961 if (file_object != object) {
2962 vm_object_unlock(file_object);
2963 }
2964 file_offset += file_object->vo_shadow_offset;
2965 file_object = shadow;
2966 }
2967
2968 mtime.tv_sec = 0;
2969 mtime.tv_nsec = 0;
2970 cs_mtime.tv_sec = 0;
2971 cs_mtime.tv_nsec = 0;
2972
2973 /* get file's pathname and/or filename */
2974 pathname = NULL;
2975 filename = NULL;
2976 pathname_len = 0;
2977 filename_len = 0;
2978 truncated_path = FALSE;
2979 /* no pager -> no file -> no pathname, use "<nil>" in that case */
2980 if (file_object->pager != NULL) {
2981 pathname = kalloc_data(__PATH_MAX * 2, Z_WAITOK);
2982 if (pathname) {
2983 pathname[0] = '\0';
2984 pathname_len = __PATH_MAX;
2985 filename = pathname + pathname_len;
2986 filename_len = __PATH_MAX;
2987
2988 if (vnode_pager_get_object_name(file_object->pager,
2989 pathname,
2990 pathname_len,
2991 filename,
2992 filename_len,
2993 &truncated_path) == KERN_SUCCESS) {
2994 /* safety first... */
2995 pathname[__PATH_MAX - 1] = '\0';
2996 filename[__PATH_MAX - 1] = '\0';
2997
2998 vnode_pager_get_object_mtime(file_object->pager,
2999 &mtime,
3000 &cs_mtime);
3001 } else {
3002 kfree_data(pathname, __PATH_MAX * 2);
3003 pathname = NULL;
3004 filename = NULL;
3005 pathname_len = 0;
3006 filename_len = 0;
3007 truncated_path = FALSE;
3008 }
3009 }
3010 }
3011 printf("CODE SIGNING: process %d[%s]: "
3012 "rejecting invalid page at address 0x%llx "
3013 "from offset 0x%llx in file \"%s%s%s\" "
3014 "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
3015 "(signed:%d validated:%d tainted:%d nx:%d "
3016 "wpmapped:%d dirty:%d depth:%d)\n",
3017 pid, procname, (addr64_t) vaddr,
3018 file_offset,
3019 (pathname ? pathname : "<nil>"),
3020 (truncated_path ? "/.../" : ""),
3021 (truncated_path ? filename : ""),
3022 cs_mtime.tv_sec, cs_mtime.tv_nsec,
3023 ((cs_mtime.tv_sec == mtime.tv_sec &&
3024 cs_mtime.tv_nsec == mtime.tv_nsec)
3025 ? "=="
3026 : "!="),
3027 mtime.tv_sec, mtime.tv_nsec,
3028 object->code_signed,
3029 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
3030 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
3031 VMP_CS_NX(m, fault_page_size, fault_phys_offset),
3032 m->vmp_wpmapped,
3033 m->vmp_dirty,
3034 shadow_depth);
3035
3036 /*
3037 * We currently only generate an exit reason if cs_invalid_page directly killed a process. If cs_invalid_page
3038 * did not kill the process (more the case on desktop), vm_fault_enter will not satisfy the fault and whether the
3039 * process dies is dependent on whether there is a signal handler registered for SIGSEGV and how that handler
3040 * will deal with the segmentation fault.
3041 */
3042 if (cs_killed) {
3043 KDBG(BSDDBG_CODE(DBG_BSD_PROC, BSD_PROC_EXITREASON_CREATE) | DBG_FUNC_NONE,
3044 pid, OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
3045
3046 codesigning_exit_reason = os_reason_create(OS_REASON_CODESIGNING, CODESIGNING_EXIT_REASON_INVALID_PAGE);
3047 if (codesigning_exit_reason == NULL) {
3048 printf("vm_fault_enter: failed to allocate codesigning exit reason\n");
3049 } else {
3050 mach_vm_address_t data_addr = 0;
3051 struct codesigning_exit_reason_info *ceri = NULL;
3052 uint32_t reason_buffer_size_estimate = kcdata_estimate_required_buffer_size(1, sizeof(*ceri));
3053
3054 if (os_reason_alloc_buffer_noblock(codesigning_exit_reason, reason_buffer_size_estimate)) {
3055 printf("vm_fault_enter: failed to allocate buffer for codesigning exit reason\n");
3056 } else {
3057 if (KERN_SUCCESS == kcdata_get_memory_addr(&codesigning_exit_reason->osr_kcd_descriptor,
3058 EXIT_REASON_CODESIGNING_INFO, sizeof(*ceri), &data_addr)) {
3059 ceri = (struct codesigning_exit_reason_info *)data_addr;
3060 static_assert(__PATH_MAX == sizeof(ceri->ceri_pathname));
3061
3062 ceri->ceri_virt_addr = vaddr;
3063 ceri->ceri_file_offset = file_offset;
3064 if (pathname) {
3065 strncpy((char *)&ceri->ceri_pathname, pathname, sizeof(ceri->ceri_pathname));
3066 } else {
3067 ceri->ceri_pathname[0] = '\0';
3068 }
3069 if (filename) {
3070 strncpy((char *)&ceri->ceri_filename, filename, sizeof(ceri->ceri_filename));
3071 } else {
3072 ceri->ceri_filename[0] = '\0';
3073 }
3074 ceri->ceri_path_truncated = (truncated_path ? 1 : 0);
3075 ceri->ceri_codesig_modtime_secs = cs_mtime.tv_sec;
3076 ceri->ceri_codesig_modtime_nsecs = cs_mtime.tv_nsec;
3077 ceri->ceri_page_modtime_secs = mtime.tv_sec;
3078 ceri->ceri_page_modtime_nsecs = mtime.tv_nsec;
3079 ceri->ceri_object_codesigned = (object->code_signed);
3080 ceri->ceri_page_codesig_validated = VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset);
3081 ceri->ceri_page_codesig_tainted = VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset);
3082 ceri->ceri_page_codesig_nx = VMP_CS_NX(m, fault_page_size, fault_phys_offset);
3083 ceri->ceri_page_wpmapped = (m->vmp_wpmapped);
3084 ceri->ceri_page_slid = 0;
3085 ceri->ceri_page_dirty = (m->vmp_dirty);
3086 ceri->ceri_page_shadow_depth = shadow_depth;
3087 } else {
3088 #if DEBUG || DEVELOPMENT
3089 panic("vm_fault_enter: failed to allocate kcdata for codesigning exit reason");
3090 #else
3091 printf("vm_fault_enter: failed to allocate kcdata for codesigning exit reason\n");
3092 #endif /* DEBUG || DEVELOPMENT */
3093 /* Free the buffer */
3094 os_reason_alloc_buffer_noblock(codesigning_exit_reason, 0);
3095 }
3096 }
3097 }
3098
3099 set_thread_exit_reason(current_thread(), codesigning_exit_reason, FALSE);
3100 }
3101 if (panic_on_cs_killed &&
3102 object->object_is_shared_cache) {
3103 char *tainted_contents;
3104 vm_map_offset_t src_vaddr;
3105 src_vaddr = (vm_map_offset_t) phystokv((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m) << PAGE_SHIFT);
3106 tainted_contents = kalloc_data(PAGE_SIZE, Z_WAITOK);
3107 bcopy((const char *)src_vaddr, tainted_contents, PAGE_SIZE);
3108 printf("CODE SIGNING: tainted page %p phys 0x%x phystokv 0x%llx copied to %p\n", m, VM_PAGE_GET_PHYS_PAGE(m), (uint64_t)src_vaddr, tainted_contents);
3109 panic("CODE SIGNING: process %d[%s]: "
3110 "rejecting invalid page (phys#0x%x) at address 0x%llx "
3111 "from offset 0x%llx in file \"%s%s%s\" "
3112 "(cs_mtime:%lu.%ld %s mtime:%lu.%ld) "
3113 "(signed:%d validated:%d tainted:%d nx:%d"
3114 "wpmapped:%d dirty:%d depth:%d)\n",
3115 pid, procname,
3116 VM_PAGE_GET_PHYS_PAGE(m),
3117 (addr64_t) vaddr,
3118 file_offset,
3119 (pathname ? pathname : "<nil>"),
3120 (truncated_path ? "/.../" : ""),
3121 (truncated_path ? filename : ""),
3122 cs_mtime.tv_sec, cs_mtime.tv_nsec,
3123 ((cs_mtime.tv_sec == mtime.tv_sec &&
3124 cs_mtime.tv_nsec == mtime.tv_nsec)
3125 ? "=="
3126 : "!="),
3127 mtime.tv_sec, mtime.tv_nsec,
3128 object->code_signed,
3129 VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset),
3130 VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset),
3131 VMP_CS_NX(m, fault_page_size, fault_phys_offset),
3132 m->vmp_wpmapped,
3133 m->vmp_dirty,
3134 shadow_depth);
3135 }
3136
3137 if (file_object != object) {
3138 vm_object_unlock(file_object);
3139 }
3140 if (pathname_len != 0) {
3141 kfree_data(pathname, __PATH_MAX * 2);
3142 pathname = NULL;
3143 filename = NULL;
3144 }
3145 } else {
3146 /* proceed with the invalid page */
3147 kr = KERN_SUCCESS;
3148 if (!VMP_CS_VALIDATED(m, fault_page_size, fault_phys_offset) &&
3149 !object->code_signed) {
3150 /*
3151 * This page has not been (fully) validated but
3152 * does not belong to a code-signed object
3153 * so it should not be forcefully considered
3154 * as tainted.
3155 * We're just concerned about it here because
3156 * we've been asked to "execute" it but that
3157 * does not mean that it should cause other
3158 * accesses to fail.
3159 * This happens when a debugger sets a
3160 * breakpoint and we then execute code in
3161 * that page. Marking the page as "tainted"
3162 * would cause any inspection tool ("leaks",
3163 * "vmmap", "CrashReporter", ...) to get killed
3164 * due to code-signing violation on that page,
3165 * even though they're just reading it and not
3166 * executing from it.
3167 */
3168 } else {
3169 /*
3170 * Page might have been tainted before or not;
3171 * now it definitively is. If the page wasn't
3172 * tainted, we must disconnect it from all
3173 * pmaps later, to force existing mappings
3174 * through that code path for re-consideration
3175 * of the validity of that page.
3176 */
3177 if (!VMP_CS_TAINTED(m, fault_page_size, fault_phys_offset)) {
3178 *must_disconnect = TRUE;
3179 VMP_CS_SET_TAINTED(m, fault_page_size, fault_phys_offset, TRUE);
3180 }
3181 }
3182 cs_enter_tainted_accepted++;
3183 }
3184 if (kr != KERN_SUCCESS) {
3185 if (cs_debug) {
3186 printf("CODESIGNING: vm_fault_enter(0x%llx): "
3187 "*** INVALID PAGE ***\n",
3188 (long long)vaddr);
3189 }
3190 #if !SECURE_KERNEL
3191 if (cs_enforcement_panic) {
3192 panic("CODESIGNING: panicking on invalid page");
3193 }
3194 #endif
3195 }
3196 return kr;
3197 }
3198
3199 /*
3200 * Check that the code signature is valid for the given page being inserted into
3201 * the pmap.
3202 *
3203 * @param must_disconnect This value will be set to true if the caller must disconnect
3204 * this page.
3205 * @return If this function does not return KERN_SUCCESS, the caller must abort the page fault.
3206 */
3207 static kern_return_t
vm_fault_validate_cs(bool cs_bypass,vm_object_t object,vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_object_fault_info_t fault_info,bool * must_disconnect)3208 vm_fault_validate_cs(
3209 bool cs_bypass,
3210 vm_object_t object,
3211 vm_page_t m,
3212 pmap_t pmap,
3213 vm_map_offset_t vaddr,
3214 vm_prot_t prot,
3215 vm_prot_t caller_prot,
3216 vm_map_size_t fault_page_size,
3217 vm_map_offset_t fault_phys_offset,
3218 vm_object_fault_info_t fault_info,
3219 bool *must_disconnect)
3220 {
3221 bool map_is_switched, map_is_switch_protected, cs_violation;
3222 kern_return_t kr;
3223 /* Validate code signature if necessary. */
3224 map_is_switched = ((pmap != vm_map_pmap(current_task()->map)) &&
3225 (pmap == vm_map_pmap(current_thread()->map)));
3226 map_is_switch_protected = current_thread()->map->switch_protect;
3227 kr = vm_fault_cs_check_violation(cs_bypass, object, m, pmap,
3228 prot, caller_prot, fault_page_size, fault_phys_offset, fault_info,
3229 map_is_switched, map_is_switch_protected, &cs_violation);
3230 if (kr != KERN_SUCCESS) {
3231 return kr;
3232 }
3233 if (cs_violation) {
3234 kr = vm_fault_cs_handle_violation(object, m, pmap, prot, vaddr,
3235 fault_page_size, fault_phys_offset,
3236 map_is_switched, map_is_switch_protected, must_disconnect);
3237 }
3238 return kr;
3239 }
3240
3241 /*
3242 * Enqueue the page on the appropriate paging queue.
3243 */
3244 static void
vm_fault_enqueue_page(vm_object_t object,vm_page_t m,bool wired,bool change_wiring,vm_tag_t wire_tag,bool no_cache,int * type_of_fault,kern_return_t kr)3245 vm_fault_enqueue_page(
3246 vm_object_t object,
3247 vm_page_t m,
3248 bool wired,
3249 bool change_wiring,
3250 vm_tag_t wire_tag,
3251 bool no_cache,
3252 int *type_of_fault,
3253 kern_return_t kr)
3254 {
3255 assert((m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) || object != compressor_object);
3256 boolean_t page_queues_locked = FALSE;
3257 boolean_t previously_pmapped = m->vmp_pmapped;
3258 #define __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED() \
3259 MACRO_BEGIN \
3260 if (! page_queues_locked) { \
3261 page_queues_locked = TRUE; \
3262 vm_page_lockspin_queues(); \
3263 } \
3264 MACRO_END
3265 #define __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED() \
3266 MACRO_BEGIN \
3267 if (page_queues_locked) { \
3268 page_queues_locked = FALSE; \
3269 vm_page_unlock_queues(); \
3270 } \
3271 MACRO_END
3272
3273 vm_page_update_special_state(m);
3274 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
3275 /*
3276 * Compressor pages are neither wired
3277 * nor pageable and should never change.
3278 */
3279 assert(object == compressor_object);
3280 } else if (change_wiring) {
3281 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3282
3283 if (wired) {
3284 if (kr == KERN_SUCCESS) {
3285 vm_page_wire(m, wire_tag, TRUE);
3286 }
3287 } else {
3288 vm_page_unwire(m, TRUE);
3289 }
3290 /* we keep the page queues lock, if we need it later */
3291 } else {
3292 if (object->internal == TRUE) {
3293 /*
3294 * don't allow anonymous pages on
3295 * the speculative queues
3296 */
3297 no_cache = FALSE;
3298 }
3299 if (kr != KERN_SUCCESS) {
3300 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3301 vm_page_deactivate(m);
3302 /* we keep the page queues lock, if we need it later */
3303 } else if (((m->vmp_q_state == VM_PAGE_NOT_ON_Q) ||
3304 (m->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3305 (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) ||
3306 ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) && no_cache)) &&
3307 !VM_PAGE_WIRED(m)) {
3308 if (vm_page_local_q &&
3309 (*type_of_fault == DBG_COW_FAULT ||
3310 *type_of_fault == DBG_ZERO_FILL_FAULT)) {
3311 struct vpl *lq;
3312 uint32_t lid;
3313
3314 assert(m->vmp_q_state == VM_PAGE_NOT_ON_Q);
3315
3316 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3317 vm_object_lock_assert_exclusive(object);
3318
3319 /*
3320 * we got a local queue to stuff this
3321 * new page on...
3322 * its safe to manipulate local and
3323 * local_id at this point since we're
3324 * behind an exclusive object lock and
3325 * the page is not on any global queue.
3326 *
3327 * we'll use the current cpu number to
3328 * select the queue note that we don't
3329 * need to disable preemption... we're
3330 * going to be behind the local queue's
3331 * lock to do the real work
3332 */
3333 lid = cpu_number();
3334
3335 lq = zpercpu_get_cpu(vm_page_local_q, lid);
3336
3337 VPL_LOCK(&lq->vpl_lock);
3338
3339 vm_page_check_pageable_safe(m);
3340 vm_page_queue_enter(&lq->vpl_queue, m, vmp_pageq);
3341 m->vmp_q_state = VM_PAGE_ON_ACTIVE_LOCAL_Q;
3342 m->vmp_local_id = (uint16_t)lid;
3343 lq->vpl_count++;
3344
3345 if (object->internal) {
3346 lq->vpl_internal_count++;
3347 } else {
3348 lq->vpl_external_count++;
3349 }
3350
3351 VPL_UNLOCK(&lq->vpl_lock);
3352
3353 if (lq->vpl_count > vm_page_local_q_soft_limit) {
3354 /*
3355 * we're beyond the soft limit
3356 * for the local queue
3357 * vm_page_reactivate_local will
3358 * 'try' to take the global page
3359 * queue lock... if it can't
3360 * that's ok... we'll let the
3361 * queue continue to grow up
3362 * to the hard limit... at that
3363 * point we'll wait for the
3364 * lock... once we've got the
3365 * lock, we'll transfer all of
3366 * the pages from the local
3367 * queue to the global active
3368 * queue
3369 */
3370 vm_page_reactivate_local(lid, FALSE, FALSE);
3371 }
3372 } else {
3373 __VM_PAGE_LOCKSPIN_QUEUES_IF_NEEDED();
3374
3375 /*
3376 * test again now that we hold the
3377 * page queue lock
3378 */
3379 if (!VM_PAGE_WIRED(m)) {
3380 if (m->vmp_q_state == VM_PAGE_ON_INACTIVE_CLEANED_Q) {
3381 vm_page_queues_remove(m, FALSE);
3382
3383 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_reactivated, 1);
3384 VM_PAGEOUT_DEBUG(vm_pageout_cleaned_fault_reactivated, 1);
3385 }
3386
3387 if (!VM_PAGE_ACTIVE_OR_INACTIVE(m) ||
3388 no_cache) {
3389 /*
3390 * If this is a no_cache mapping
3391 * and the page has never been
3392 * mapped before or was
3393 * previously a no_cache page,
3394 * then we want to leave pages
3395 * in the speculative state so
3396 * that they can be readily
3397 * recycled if free memory runs
3398 * low. Otherwise the page is
3399 * activated as normal.
3400 */
3401
3402 if (no_cache &&
3403 (!previously_pmapped ||
3404 m->vmp_no_cache)) {
3405 m->vmp_no_cache = TRUE;
3406
3407 if (m->vmp_q_state != VM_PAGE_ON_SPECULATIVE_Q) {
3408 vm_page_speculate(m, FALSE);
3409 }
3410 } else if (!VM_PAGE_ACTIVE_OR_INACTIVE(m)) {
3411 vm_page_activate(m);
3412 }
3413 }
3414 }
3415 /* we keep the page queues lock, if we need it later */
3416 }
3417 }
3418 }
3419 /* we're done with the page queues lock, if we ever took it */
3420 __VM_PAGE_UNLOCK_QUEUES_IF_NEEDED();
3421 }
3422
3423 /*
3424 * Sets the pmmpped, xpmapped, and wpmapped bits on the vm_page_t and updates accounting.
3425 * @return true if the page needs to be sync'ed via pmap_sync-page_data_physo
3426 * before being inserted into the pmap.
3427 */
3428 static bool
vm_fault_enter_set_mapped(vm_object_t object,vm_page_t m,vm_prot_t prot,vm_prot_t fault_type)3429 vm_fault_enter_set_mapped(
3430 vm_object_t object,
3431 vm_page_t m,
3432 vm_prot_t prot,
3433 vm_prot_t fault_type)
3434 {
3435 bool page_needs_sync = false;
3436 /*
3437 * NOTE: we may only hold the vm_object lock SHARED
3438 * at this point, so we need the phys_page lock to
3439 * properly serialize updating the pmapped and
3440 * xpmapped bits
3441 */
3442 if ((prot & VM_PROT_EXECUTE) && !m->vmp_xpmapped) {
3443 ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3444
3445 pmap_lock_phys_page(phys_page);
3446 m->vmp_pmapped = TRUE;
3447
3448 if (!m->vmp_xpmapped) {
3449 m->vmp_xpmapped = TRUE;
3450
3451 pmap_unlock_phys_page(phys_page);
3452
3453 if (!object->internal) {
3454 OSAddAtomic(1, &vm_page_xpmapped_external_count);
3455 }
3456
3457 #if defined(__arm64__)
3458 page_needs_sync = true;
3459 #else
3460 if (object->internal &&
3461 object->pager != NULL) {
3462 /*
3463 * This page could have been
3464 * uncompressed by the
3465 * compressor pager and its
3466 * contents might be only in
3467 * the data cache.
3468 * Since it's being mapped for
3469 * "execute" for the fist time,
3470 * make sure the icache is in
3471 * sync.
3472 */
3473 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
3474 page_needs_sync = true;
3475 }
3476 #endif
3477 } else {
3478 pmap_unlock_phys_page(phys_page);
3479 }
3480 } else {
3481 if (m->vmp_pmapped == FALSE) {
3482 ppnum_t phys_page = VM_PAGE_GET_PHYS_PAGE(m);
3483
3484 pmap_lock_phys_page(phys_page);
3485 m->vmp_pmapped = TRUE;
3486 pmap_unlock_phys_page(phys_page);
3487 }
3488 }
3489
3490 if (fault_type & VM_PROT_WRITE) {
3491 if (m->vmp_wpmapped == FALSE) {
3492 vm_object_lock_assert_exclusive(object);
3493 if (!object->internal && object->pager) {
3494 task_update_logical_writes(current_task(), PAGE_SIZE, TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
3495 }
3496 m->vmp_wpmapped = TRUE;
3497 }
3498 }
3499 return page_needs_sync;
3500 }
3501
3502
3503 static inline kern_return_t
vm_fault_pmap_validate_page(pmap_t pmap __unused,vm_page_t m __unused,vm_map_offset_t vaddr __unused,vm_prot_t prot __unused,vm_object_fault_info_t fault_info __unused,bool * page_sleep_needed)3504 vm_fault_pmap_validate_page(
3505 pmap_t pmap __unused,
3506 vm_page_t m __unused,
3507 vm_map_offset_t vaddr __unused,
3508 vm_prot_t prot __unused,
3509 vm_object_fault_info_t fault_info __unused,
3510 bool *page_sleep_needed)
3511 {
3512 assert(page_sleep_needed != NULL);
3513 *page_sleep_needed = false;
3514 #if CONFIG_SPTM
3515 /*
3516 * Reject the executable or debug mapping if the page is already wired for I/O. The SPTM's security
3517 * model doesn't allow us to reliably use executable pages for I/O due to both CS integrity
3518 * protections and the possibility that the pages may be dynamically retyped while wired for I/O.
3519 * This check is required to happen under the VM object lock in order to synchronize with the
3520 * complementary check on the I/O wiring path in vm_page_do_delayed_work().
3521 */
3522 if (__improbable((m->vmp_cleaning || m->vmp_iopl_wired) &&
3523 pmap_will_retype(pmap, vaddr, VM_PAGE_GET_PHYS_PAGE(m), prot, fault_info->pmap_options |
3524 ((fault_info->fi_xnu_user_debug && !VM_PAGE_OBJECT(m)->code_signed) ? PMAP_OPTIONS_XNU_USER_DEBUG : 0),
3525 PMAP_MAPPING_TYPE_INFER))) {
3526 if (__improbable(m->vmp_iopl_wired)) {
3527 vm_map_guard_exception(vaddr, kGUARD_EXC_SEC_EXEC_ON_IOPL_PAGE);
3528 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM,
3529 KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_EXEC_ON_IOPL_PAGE), (uintptr_t)vaddr);
3530 return KERN_PROTECTION_FAILURE;
3531 }
3532 *page_sleep_needed = m->vmp_cleaning;
3533 }
3534 #endif /* CONFIG_SPTM */
3535 return KERN_SUCCESS;
3536 }
3537
3538 /*
3539 * wrappers for pmap_enter_options()
3540 */
3541 kern_return_t
pmap_enter_object_options_check(pmap_t pmap,vm_map_address_t virtual_address,vm_map_offset_t fault_phys_offset,vm_object_t obj,ppnum_t pn,vm_prot_t protection,vm_prot_t fault_type,boolean_t wired,unsigned int options)3542 pmap_enter_object_options_check(
3543 pmap_t pmap,
3544 vm_map_address_t virtual_address,
3545 vm_map_offset_t fault_phys_offset,
3546 vm_object_t obj,
3547 ppnum_t pn,
3548 vm_prot_t protection,
3549 vm_prot_t fault_type,
3550 boolean_t wired,
3551 unsigned int options)
3552 {
3553 unsigned int flags = 0;
3554 unsigned int extra_options = 0;
3555
3556 if (obj->internal) {
3557 extra_options |= PMAP_OPTIONS_INTERNAL;
3558 }
3559 pmap_paddr_t physical_address = (pmap_paddr_t)ptoa(pn) + fault_phys_offset;
3560
3561
3562 return pmap_enter_options_addr(pmap,
3563 virtual_address,
3564 physical_address,
3565 protection,
3566 fault_type,
3567 flags,
3568 wired,
3569 options | extra_options,
3570 NULL,
3571 PMAP_MAPPING_TYPE_INFER);
3572 }
3573
3574 kern_return_t
pmap_enter_options_check(pmap_t pmap,vm_map_address_t virtual_address,vm_map_offset_t fault_phys_offset,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,boolean_t wired,unsigned int options)3575 pmap_enter_options_check(
3576 pmap_t pmap,
3577 vm_map_address_t virtual_address,
3578 vm_map_offset_t fault_phys_offset,
3579 vm_page_t page,
3580 vm_prot_t protection,
3581 vm_prot_t fault_type,
3582 boolean_t wired,
3583 unsigned int options)
3584 {
3585 if (page->vmp_error) {
3586 return KERN_MEMORY_FAILURE;
3587 }
3588 vm_object_t obj = VM_PAGE_OBJECT(page);
3589 if (page->vmp_reusable || obj->all_reusable) {
3590 options |= PMAP_OPTIONS_REUSABLE;
3591 }
3592 assert(page->vmp_pmapped);
3593 if (fault_type & VM_PROT_WRITE) {
3594 if (pmap == kernel_pmap) {
3595 /*
3596 * The kernel sometimes needs to map a page to provide its
3597 * initial contents but that does not mean that the page is
3598 * actually dirty/modified, so let's not assert that it's been
3599 * "wpmapped".
3600 */
3601 } else {
3602 assert(page->vmp_wpmapped);
3603 }
3604 }
3605 return pmap_enter_object_options_check(
3606 pmap,
3607 virtual_address,
3608 fault_phys_offset,
3609 obj,
3610 VM_PAGE_GET_PHYS_PAGE(page),
3611 protection,
3612 fault_type,
3613 wired,
3614 options);
3615 }
3616
3617 kern_return_t
pmap_enter_check(pmap_t pmap,vm_map_address_t virtual_address,vm_page_t page,vm_prot_t protection,vm_prot_t fault_type,boolean_t wired)3618 pmap_enter_check(
3619 pmap_t pmap,
3620 vm_map_address_t virtual_address,
3621 vm_page_t page,
3622 vm_prot_t protection,
3623 vm_prot_t fault_type,
3624 boolean_t wired)
3625 {
3626 return pmap_enter_options_check(pmap,
3627 virtual_address,
3628 0 /* fault_phys_offset */,
3629 page,
3630 protection,
3631 fault_type,
3632 wired,
3633 0 /* options */);
3634 }
3635
3636 /*
3637 * Try to enter the given page into the pmap.
3638 * Will retry without execute permission if the code signing monitor is enabled and
3639 * we encounter a codesigning failure on a non-execute fault.
3640 */
3641 static kern_return_t
vm_fault_attempt_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options)3642 vm_fault_attempt_pmap_enter(
3643 pmap_t pmap,
3644 vm_map_offset_t vaddr,
3645 vm_map_size_t fault_page_size,
3646 vm_map_offset_t fault_phys_offset,
3647 vm_page_t m,
3648 vm_prot_t *prot,
3649 vm_prot_t caller_prot,
3650 vm_prot_t fault_type,
3651 bool wired,
3652 int pmap_options)
3653 {
3654 #if !CODE_SIGNING_MONITOR
3655 #pragma unused(caller_prot)
3656 #endif /* !CODE_SIGNING_MONITOR */
3657
3658 kern_return_t kr;
3659 if (fault_page_size != PAGE_SIZE) {
3660 DEBUG4K_FAULT("pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x fault_type 0x%x\n", pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, *prot, fault_type);
3661 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
3662 fault_phys_offset < PAGE_SIZE),
3663 "0x%llx\n", (uint64_t)fault_phys_offset);
3664 } else {
3665 assertf(fault_phys_offset == 0,
3666 "0x%llx\n", (uint64_t)fault_phys_offset);
3667 }
3668
3669 kr = pmap_enter_options_check(pmap, vaddr,
3670 fault_phys_offset,
3671 m, *prot, fault_type,
3672 wired, pmap_options);
3673
3674 #if CODE_SIGNING_MONITOR
3675 /*
3676 * Retry without execute permission if we encountered a codesigning
3677 * failure on a non-execute fault. This allows applications which
3678 * don't actually need to execute code to still map it for read access.
3679 */
3680 if (kr == KERN_CODESIGN_ERROR &&
3681 csm_enabled() &&
3682 (*prot & VM_PROT_EXECUTE) &&
3683 !(caller_prot & VM_PROT_EXECUTE)) {
3684 *prot &= ~VM_PROT_EXECUTE;
3685 kr = pmap_enter_options_check(pmap, vaddr,
3686 fault_phys_offset,
3687 m, *prot, fault_type,
3688 wired, pmap_options);
3689 }
3690 #endif /* CODE_SIGNING_MONITOR */
3691
3692 return kr;
3693 }
3694
3695 /*
3696 * Enter the given page into the pmap.
3697 * The map must be locked shared.
3698 * The vm object must NOT be locked.
3699 *
3700 * @param need_retry if not null, avoid making a (potentially) blocking call into
3701 * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3702 */
3703 static kern_return_t
vm_fault_pmap_enter(pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry)3704 vm_fault_pmap_enter(
3705 pmap_t pmap,
3706 vm_map_offset_t vaddr,
3707 vm_map_size_t fault_page_size,
3708 vm_map_offset_t fault_phys_offset,
3709 vm_page_t m,
3710 vm_prot_t *prot,
3711 vm_prot_t caller_prot,
3712 vm_prot_t fault_type,
3713 bool wired,
3714 int pmap_options,
3715 boolean_t *need_retry)
3716 {
3717 kern_return_t kr;
3718 if (need_retry != NULL) {
3719 /*
3720 * Although we don't hold a lock on this object, we hold a lock
3721 * on the top object in the chain. To prevent a deadlock, we
3722 * can't allow the pmap layer to block.
3723 */
3724 pmap_options |= PMAP_OPTIONS_NOWAIT;
3725 }
3726 kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3727 fault_page_size, fault_phys_offset,
3728 m, prot, caller_prot, fault_type, wired, pmap_options);
3729 if (kr == KERN_RESOURCE_SHORTAGE) {
3730 if (need_retry) {
3731 /*
3732 * There's nothing we can do here since we hold the
3733 * lock on the top object in the chain. The caller
3734 * will need to deal with this by dropping that lock and retrying.
3735 */
3736 *need_retry = TRUE;
3737 vm_pmap_enter_retried++;
3738 }
3739 }
3740 return kr;
3741 }
3742
3743 /*
3744 * Enter the given page into the pmap.
3745 * The vm map must be locked shared.
3746 * The vm object must be locked exclusive, unless this is a soft fault.
3747 * For a soft fault, the object must be locked shared or exclusive.
3748 *
3749 * @param need_retry if not null, avoid making a (potentially) blocking call into
3750 * the pmap layer. When such a call would be necessary, return true in this boolean instead.
3751 */
3752 static kern_return_t
vm_fault_pmap_enter_with_object_lock(vm_object_t object,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_page_t m,vm_prot_t * prot,vm_prot_t caller_prot,vm_prot_t fault_type,bool wired,int pmap_options,boolean_t * need_retry,uint8_t * object_lock_type)3753 vm_fault_pmap_enter_with_object_lock(
3754 vm_object_t object,
3755 pmap_t pmap,
3756 vm_map_offset_t vaddr,
3757 vm_map_size_t fault_page_size,
3758 vm_map_offset_t fault_phys_offset,
3759 vm_page_t m,
3760 vm_prot_t *prot,
3761 vm_prot_t caller_prot,
3762 vm_prot_t fault_type,
3763 bool wired,
3764 int pmap_options,
3765 boolean_t *need_retry,
3766 uint8_t *object_lock_type)
3767 {
3768 kern_return_t kr;
3769 /*
3770 * Prevent a deadlock by not
3771 * holding the object lock if we need to wait for a page in
3772 * pmap_enter() - <rdar://problem/7138958>
3773 */
3774 kr = vm_fault_attempt_pmap_enter(pmap, vaddr,
3775 fault_page_size, fault_phys_offset,
3776 m, prot, caller_prot, fault_type, wired, pmap_options | PMAP_OPTIONS_NOWAIT);
3777 #if __x86_64__
3778 if (kr == KERN_INVALID_ARGUMENT &&
3779 pmap == PMAP_NULL &&
3780 wired) {
3781 /*
3782 * Wiring a page in a pmap-less VM map:
3783 * VMware's "vmmon" kernel extension does this
3784 * to grab pages.
3785 * Let it proceed even though the PMAP_ENTER() failed.
3786 */
3787 kr = KERN_SUCCESS;
3788 }
3789 #endif /* __x86_64__ */
3790
3791 if (kr == KERN_RESOURCE_SHORTAGE) {
3792 if (need_retry) {
3793 /*
3794 * this will be non-null in the case where we hold the lock
3795 * on the top-object in this chain... we can't just drop
3796 * the lock on the object we're inserting the page into
3797 * and recall the PMAP_ENTER since we can still cause
3798 * a deadlock if one of the critical paths tries to
3799 * acquire the lock on the top-object and we're blocked
3800 * in PMAP_ENTER waiting for memory... our only recourse
3801 * is to deal with it at a higher level where we can
3802 * drop both locks.
3803 */
3804 *need_retry = TRUE;
3805 vm_pmap_enter_retried++;
3806 goto done;
3807 }
3808 /*
3809 * The nonblocking version of pmap_enter did not succeed.
3810 * and we don't need to drop other locks and retry
3811 * at the level above us, so
3812 * use the blocking version instead. Requires marking
3813 * the page busy and unlocking the object
3814 */
3815 boolean_t was_busy = m->vmp_busy;
3816
3817 vm_object_lock_assert_exclusive(object);
3818
3819 m->vmp_busy = TRUE;
3820 vm_object_unlock(object);
3821
3822 kr = pmap_enter_options_check(pmap, vaddr,
3823 fault_phys_offset,
3824 m, *prot, fault_type,
3825 wired, pmap_options);
3826
3827 assert(VM_PAGE_OBJECT(m) == object);
3828
3829 /* Take the object lock again. */
3830 vm_object_lock(object);
3831
3832 /* If the page was busy, someone else will wake it up.
3833 * Otherwise, we have to do it now. */
3834 assert(m->vmp_busy);
3835 if (!was_busy) {
3836 vm_page_wakeup_done(object, m);
3837 }
3838 vm_pmap_enter_blocked++;
3839 }
3840
3841 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
3842 if ((*prot & VM_PROT_WRITE) && m->vmp_unmodified_ro) {
3843 if (*object_lock_type == OBJECT_LOCK_SHARED) {
3844 boolean_t was_busy = m->vmp_busy;
3845 m->vmp_busy = TRUE;
3846
3847 *object_lock_type = OBJECT_LOCK_EXCLUSIVE;
3848
3849 if (vm_object_lock_upgrade(object) == FALSE) {
3850 vm_object_lock(object);
3851 }
3852
3853 if (!was_busy) {
3854 vm_page_wakeup_done(object, m);
3855 }
3856 }
3857 vm_object_lock_assert_exclusive(object);
3858 vm_page_lockspin_queues();
3859 m->vmp_unmodified_ro = false;
3860 vm_page_unlock_queues();
3861 os_atomic_dec(&compressor_ro_uncompressed, relaxed);
3862
3863 vm_object_compressor_pager_state_clr(VM_PAGE_OBJECT(m), m->vmp_offset);
3864 }
3865 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
3866 #pragma unused(object_lock_type)
3867 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
3868
3869 done:
3870 return kr;
3871 }
3872
3873 /*
3874 * Prepare to enter a page into the pmap by checking CS, protection bits,
3875 * and setting mapped bits on the page_t.
3876 * Does not modify the page's paging queue.
3877 *
3878 * page queue lock must NOT be held
3879 * m->vmp_object must be locked
3880 *
3881 * NOTE: m->vmp_object could be locked "shared" only if we are called
3882 * from vm_fault() as part of a soft fault.
3883 */
3884 static kern_return_t
vm_fault_enter_prepare(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_prot_t * prot,vm_prot_t caller_prot,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t fault_type,vm_object_fault_info_t fault_info,int * type_of_fault,bool * page_needs_data_sync,bool * page_needs_sleep)3885 vm_fault_enter_prepare(
3886 vm_page_t m,
3887 pmap_t pmap,
3888 vm_map_offset_t vaddr,
3889 vm_prot_t *prot,
3890 vm_prot_t caller_prot,
3891 vm_map_size_t fault_page_size,
3892 vm_map_offset_t fault_phys_offset,
3893 vm_prot_t fault_type,
3894 vm_object_fault_info_t fault_info,
3895 int *type_of_fault,
3896 bool *page_needs_data_sync,
3897 bool *page_needs_sleep)
3898 {
3899 kern_return_t kr;
3900 bool is_tainted = false;
3901 vm_object_t object;
3902 boolean_t cs_bypass = fault_info->cs_bypass;
3903
3904 object = VM_PAGE_OBJECT(m);
3905
3906 vm_object_lock_assert_held(object);
3907
3908 #if KASAN
3909 if (pmap == kernel_pmap) {
3910 kasan_notify_address(vaddr, PAGE_SIZE);
3911 }
3912 #endif
3913
3914 #if CODE_SIGNING_MONITOR
3915 if (csm_address_space_exempt(pmap) == KERN_SUCCESS) {
3916 cs_bypass = TRUE;
3917 }
3918 #endif
3919
3920 LCK_MTX_ASSERT(&vm_page_queue_lock, LCK_MTX_ASSERT_NOTOWNED);
3921
3922 if (*type_of_fault == DBG_ZERO_FILL_FAULT) {
3923 vm_object_lock_assert_exclusive(object);
3924 } else if ((fault_type & VM_PROT_WRITE) == 0 &&
3925 !fault_info->fi_change_wiring &&
3926 (!m->vmp_wpmapped
3927 #if VM_OBJECT_ACCESS_TRACKING
3928 || object->access_tracking
3929 #endif /* VM_OBJECT_ACCESS_TRACKING */
3930 )) {
3931 /*
3932 * This is not a "write" fault, so we
3933 * might not have taken the object lock
3934 * exclusively and we might not be able
3935 * to update the "wpmapped" bit in
3936 * vm_fault_enter().
3937 * Let's just grant read access to
3938 * the page for now and we'll
3939 * soft-fault again if we need write
3940 * access later...
3941 */
3942
3943 /* This had better not be a JIT page. */
3944 if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) {
3945 /*
3946 * This pmap enforces extra constraints for this set of
3947 * protections, so we can't modify them.
3948 */
3949 if (!cs_bypass) {
3950 panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x !cs_bypass",
3951 __FUNCTION__, pmap, (uint64_t)vaddr,
3952 *prot, fault_info->pmap_options);
3953 }
3954 } else {
3955 *prot &= ~VM_PROT_WRITE;
3956 }
3957 }
3958 if (m->vmp_pmapped == FALSE) {
3959 if (m->vmp_clustered) {
3960 if (*type_of_fault == DBG_CACHE_HIT_FAULT) {
3961 /*
3962 * found it in the cache, but this
3963 * is the first fault-in of the page (m->vmp_pmapped == FALSE)
3964 * so it must have come in as part of
3965 * a cluster... account 1 pagein against it
3966 */
3967 if (object->internal) {
3968 *type_of_fault = DBG_PAGEIND_FAULT;
3969 } else {
3970 *type_of_fault = DBG_PAGEINV_FAULT;
3971 }
3972
3973 VM_PAGE_COUNT_AS_PAGEIN(m);
3974 }
3975 VM_PAGE_CONSUME_CLUSTERED(m);
3976 }
3977 }
3978
3979 if (*type_of_fault != DBG_COW_FAULT) {
3980 DTRACE_VM2(as_fault, int, 1, (uint64_t *), NULL);
3981
3982 if (pmap == kernel_pmap) {
3983 DTRACE_VM2(kernel_asflt, int, 1, (uint64_t *), NULL);
3984 }
3985 }
3986
3987 kr = vm_fault_pmap_validate_page(pmap, m, vaddr, *prot, fault_info, page_needs_sleep);
3988 if (__improbable((kr != KERN_SUCCESS) || *page_needs_sleep)) {
3989 return kr;
3990 }
3991 kr = vm_fault_validate_cs(cs_bypass, object, m, pmap, vaddr,
3992 *prot, caller_prot, fault_page_size, fault_phys_offset,
3993 fault_info, &is_tainted);
3994 if (kr == KERN_SUCCESS) {
3995 /*
3996 * We either have a good page, or a tainted page that has been accepted by the process.
3997 * In both cases the page will be entered into the pmap.
3998 */
3999 *page_needs_data_sync = vm_fault_enter_set_mapped(object, m, *prot, fault_type);
4000 if ((fault_type & VM_PROT_WRITE) && is_tainted) {
4001 /*
4002 * This page is tainted but we're inserting it anyways.
4003 * Since it's writeable, we need to disconnect it from other pmaps
4004 * now so those processes can take note.
4005 */
4006
4007 /*
4008 * We can only get here
4009 * because of the CSE logic
4010 */
4011 assert(pmap_get_vm_map_cs_enforced(pmap));
4012 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
4013 /*
4014 * If we are faulting for a write, we can clear
4015 * the execute bit - that will ensure the page is
4016 * checked again before being executable, which
4017 * protects against a map switch.
4018 * This only happens the first time the page
4019 * gets tainted, so we won't get stuck here
4020 * to make an already writeable page executable.
4021 */
4022 if (!cs_bypass) {
4023 if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, *prot)) {
4024 /*
4025 * This pmap enforces extra constraints
4026 * for this set of protections, so we
4027 * can't change the protections.
4028 */
4029 panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
4030 __FUNCTION__, pmap,
4031 (uint64_t)vaddr, *prot,
4032 fault_info->pmap_options);
4033 }
4034 *prot &= ~VM_PROT_EXECUTE;
4035 }
4036 }
4037 assert(VM_PAGE_OBJECT(m) == object);
4038
4039 #if VM_OBJECT_ACCESS_TRACKING
4040 if (object->access_tracking) {
4041 DTRACE_VM2(access_tracking, vm_map_offset_t, vaddr, int, fault_type);
4042 if (fault_type & VM_PROT_WRITE) {
4043 object->access_tracking_writes++;
4044 vm_object_access_tracking_writes++;
4045 } else {
4046 object->access_tracking_reads++;
4047 vm_object_access_tracking_reads++;
4048 }
4049 }
4050 #endif /* VM_OBJECT_ACCESS_TRACKING */
4051 }
4052
4053 return kr;
4054 }
4055
4056 /*
4057 * page queue lock must NOT be held
4058 * m->vmp_object must be locked
4059 *
4060 * NOTE: m->vmp_object could be locked "shared" only if we are called
4061 * from vm_fault() as part of a soft fault. If so, we must be
4062 * careful not to modify the VM object in any way that is not
4063 * legal under a shared lock...
4064 */
4065 kern_return_t
vm_fault_enter(vm_page_t m,pmap_t pmap,vm_map_offset_t vaddr,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,vm_prot_t prot,vm_prot_t caller_prot,boolean_t wired,vm_tag_t wire_tag,vm_object_fault_info_t fault_info,boolean_t * need_retry,int * type_of_fault,uint8_t * object_lock_type,bool * page_needs_sleep)4066 vm_fault_enter(
4067 vm_page_t m,
4068 pmap_t pmap,
4069 vm_map_offset_t vaddr,
4070 vm_map_size_t fault_page_size,
4071 vm_map_offset_t fault_phys_offset,
4072 vm_prot_t prot,
4073 vm_prot_t caller_prot,
4074 boolean_t wired,
4075 vm_tag_t wire_tag,
4076 vm_object_fault_info_t fault_info,
4077 boolean_t *need_retry,
4078 int *type_of_fault,
4079 uint8_t *object_lock_type,
4080 bool *page_needs_sleep)
4081 {
4082 kern_return_t kr;
4083 vm_object_t object;
4084 bool page_needs_data_sync;
4085 vm_prot_t fault_type;
4086 int pmap_options = fault_info->pmap_options;
4087
4088 if (vm_page_is_guard(m)) {
4089 return KERN_SUCCESS;
4090 }
4091
4092 fault_type = fault_info->fi_change_wiring ? VM_PROT_NONE : caller_prot;
4093
4094 assertf(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL, "m=%p", m);
4095 kr = vm_fault_enter_prepare(m, pmap, vaddr, &prot, caller_prot,
4096 fault_page_size, fault_phys_offset, fault_type,
4097 fault_info, type_of_fault, &page_needs_data_sync, page_needs_sleep);
4098 object = VM_PAGE_OBJECT(m);
4099
4100 vm_fault_enqueue_page(object, m, wired, fault_info->fi_change_wiring, wire_tag, fault_info->no_cache, type_of_fault, kr);
4101
4102 if (__probable((kr == KERN_SUCCESS) && !(*page_needs_sleep))) {
4103 if (page_needs_data_sync) {
4104 pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
4105 }
4106
4107 if (fault_info->fi_xnu_user_debug && !object->code_signed) {
4108 pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
4109 }
4110
4111
4112 kr = vm_fault_pmap_enter_with_object_lock(object, pmap, vaddr,
4113 fault_page_size, fault_phys_offset, m,
4114 &prot, caller_prot, fault_type, wired, pmap_options, need_retry, object_lock_type);
4115 }
4116
4117 return kr;
4118 }
4119
4120 kern_return_t
vm_pre_fault_with_info(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t prot,vm_object_fault_info_t fault_info)4121 vm_pre_fault_with_info(
4122 vm_map_t map,
4123 vm_map_offset_t vaddr,
4124 vm_prot_t prot,
4125 vm_object_fault_info_t fault_info)
4126 {
4127 assert(fault_info != NULL);
4128 if (pmap_find_phys(map->pmap, vaddr) == 0) {
4129 return vm_fault_internal(map,
4130 vaddr, /* vaddr */
4131 prot, /* fault_type */
4132 VM_KERN_MEMORY_NONE, /* tag - not wiring */
4133 NULL, /* caller_pmap */
4134 0, /* caller_pmap_addr */
4135 NULL,
4136 fault_info);
4137 }
4138 return KERN_SUCCESS;
4139 }
4140
4141 /*
4142 * Fault on the given vaddr iff the page is not already entered in the pmap.
4143 */
4144 kern_return_t
vm_pre_fault(vm_map_offset_t vaddr,vm_prot_t prot)4145 vm_pre_fault(vm_map_offset_t vaddr, vm_prot_t prot)
4146 {
4147 struct vm_object_fault_info fault_info = {
4148 .interruptible = THREAD_UNINT,
4149 };
4150 return vm_pre_fault_with_info(current_map(), vaddr, prot, &fault_info);
4151 }
4152
4153 /*
4154 * Routine: vm_fault
4155 * Purpose:
4156 * Handle page faults, including pseudo-faults
4157 * used to change the wiring status of pages.
4158 * Returns:
4159 * Explicit continuations have been removed.
4160 * Implementation:
4161 * vm_fault and vm_fault_page save mucho state
4162 * in the moral equivalent of a closure. The state
4163 * structure is allocated when first entering vm_fault
4164 * and deallocated when leaving vm_fault.
4165 */
4166
4167 extern uint64_t get_current_unique_pid(void);
4168
4169 unsigned long vm_fault_collapse_total = 0;
4170 unsigned long vm_fault_collapse_skipped = 0;
4171
4172
4173 kern_return_t
vm_fault_external(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)4174 vm_fault_external(
4175 vm_map_t map,
4176 vm_map_offset_t vaddr,
4177 vm_prot_t fault_type,
4178 boolean_t change_wiring,
4179 int interruptible,
4180 pmap_t caller_pmap,
4181 vm_map_offset_t caller_pmap_addr)
4182 {
4183 struct vm_object_fault_info fault_info = {
4184 .interruptible = interruptible,
4185 .fi_change_wiring = change_wiring,
4186 };
4187
4188 return vm_fault_internal(map, vaddr, fault_type,
4189 change_wiring ? vm_tag_bt() : VM_KERN_MEMORY_NONE,
4190 caller_pmap, caller_pmap_addr,
4191 NULL, &fault_info);
4192 }
4193
4194 kern_return_t
vm_fault(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t fault_type,boolean_t change_wiring,vm_tag_t wire_tag,int interruptible,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr)4195 vm_fault(
4196 vm_map_t map,
4197 vm_map_offset_t vaddr,
4198 vm_prot_t fault_type,
4199 boolean_t change_wiring,
4200 vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
4201 int interruptible,
4202 pmap_t caller_pmap,
4203 vm_map_offset_t caller_pmap_addr)
4204 {
4205 struct vm_object_fault_info fault_info = {
4206 .interruptible = interruptible,
4207 .fi_change_wiring = change_wiring,
4208 };
4209
4210 return vm_fault_internal(map, vaddr, fault_type, wire_tag,
4211 caller_pmap, caller_pmap_addr,
4212 NULL, &fault_info);
4213 }
4214
4215 static boolean_t
current_proc_is_privileged(void)4216 current_proc_is_privileged(void)
4217 {
4218 return csproc_get_platform_binary(current_proc());
4219 }
4220
4221 uint64_t vm_copied_on_read = 0;
4222 uint64_t vm_copied_on_read_kernel_map = 0;
4223 uint64_t vm_copied_on_read_platform_map = 0;
4224
4225 /*
4226 * Cleanup after a vm_fault_enter.
4227 * At this point, the fault should either have failed (kr != KERN_SUCCESS)
4228 * or the page should be in the pmap and on the correct paging queue.
4229 *
4230 * Precondition:
4231 * map must be locked shared.
4232 * m_object must be locked.
4233 * If top_object != VM_OBJECT_NULL, it must be locked.
4234 * real_map must be locked.
4235 *
4236 * Postcondition:
4237 * map will be unlocked
4238 * m_object will be unlocked
4239 * top_object will be unlocked
4240 * If real_map != map, it will be unlocked
4241 */
4242 static void
vm_fault_complete(vm_map_t map,vm_map_t real_map,vm_object_t object,vm_object_t m_object,vm_page_t m,vm_map_offset_t offset,vm_map_offset_t trace_real_vaddr,vm_object_fault_info_t fault_info,vm_prot_t caller_prot,vm_map_offset_t real_vaddr,int type_of_fault,boolean_t need_retry,kern_return_t kr,ppnum_t * physpage_p,vm_prot_t prot,vm_object_t top_object,boolean_t need_collapse,vm_map_offset_t cur_offset,vm_prot_t fault_type,vm_object_t * written_on_object,memory_object_t * written_on_pager,vm_object_offset_t * written_on_offset)4243 vm_fault_complete(
4244 vm_map_t map,
4245 vm_map_t real_map,
4246 vm_object_t object,
4247 vm_object_t m_object,
4248 vm_page_t m,
4249 vm_map_offset_t offset,
4250 vm_map_offset_t trace_real_vaddr,
4251 vm_object_fault_info_t fault_info,
4252 vm_prot_t caller_prot,
4253 #if CONFIG_DTRACE
4254 vm_map_offset_t real_vaddr,
4255 #else
4256 __unused vm_map_offset_t real_vaddr,
4257 #endif /* CONFIG_DTRACE */
4258 int type_of_fault,
4259 boolean_t need_retry,
4260 kern_return_t kr,
4261 ppnum_t *physpage_p,
4262 vm_prot_t prot,
4263 vm_object_t top_object,
4264 boolean_t need_collapse,
4265 vm_map_offset_t cur_offset,
4266 vm_prot_t fault_type,
4267 vm_object_t *written_on_object,
4268 memory_object_t *written_on_pager,
4269 vm_object_offset_t *written_on_offset)
4270 {
4271 int event_code = 0;
4272 vm_map_lock_assert_shared(map);
4273 vm_object_lock_assert_held(m_object);
4274 if (top_object != VM_OBJECT_NULL) {
4275 vm_object_lock_assert_held(top_object);
4276 }
4277 vm_map_lock_assert_held(real_map);
4278
4279 if (m_object->internal) {
4280 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
4281 } else if (m_object->object_is_shared_cache) {
4282 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
4283 } else {
4284 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
4285 }
4286 KDBG_RELEASE(event_code | DBG_FUNC_NONE, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | type_of_fault, m->vmp_offset, get_current_unique_pid());
4287 if (need_retry == FALSE) {
4288 KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_FAST), get_current_unique_pid());
4289 }
4290 DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag);
4291 if (kr == KERN_SUCCESS &&
4292 physpage_p != NULL) {
4293 /* for vm_map_wire_and_extract() */
4294 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
4295 if (prot & VM_PROT_WRITE) {
4296 vm_object_lock_assert_exclusive(m_object);
4297 m->vmp_dirty = TRUE;
4298 }
4299 }
4300
4301 if (top_object != VM_OBJECT_NULL) {
4302 /*
4303 * It's safe to drop the top object
4304 * now that we've done our
4305 * vm_fault_enter(). Any other fault
4306 * in progress for that virtual
4307 * address will either find our page
4308 * and translation or put in a new page
4309 * and translation.
4310 */
4311 vm_object_unlock(top_object);
4312 top_object = VM_OBJECT_NULL;
4313 }
4314
4315 if (need_collapse == TRUE) {
4316 vm_object_collapse(object, vm_object_trunc_page(offset), TRUE);
4317 }
4318
4319 if (need_retry == FALSE &&
4320 (type_of_fault == DBG_PAGEIND_FAULT || type_of_fault == DBG_PAGEINV_FAULT || type_of_fault == DBG_CACHE_HIT_FAULT)) {
4321 /*
4322 * evaluate access pattern and update state
4323 * vm_fault_deactivate_behind depends on the
4324 * state being up to date
4325 */
4326 vm_fault_is_sequential(m_object, cur_offset, fault_info->behavior);
4327
4328 vm_fault_deactivate_behind(m_object, cur_offset, fault_info->behavior);
4329 }
4330 /*
4331 * That's it, clean up and return.
4332 */
4333 if (m->vmp_busy) {
4334 vm_object_lock_assert_exclusive(m_object);
4335 vm_page_wakeup_done(m_object, m);
4336 }
4337
4338 if (need_retry == FALSE && !m_object->internal && (fault_type & VM_PROT_WRITE)) {
4339 vm_object_paging_begin(m_object);
4340
4341 assert(*written_on_object == VM_OBJECT_NULL);
4342 *written_on_object = m_object;
4343 *written_on_pager = m_object->pager;
4344 *written_on_offset = m_object->paging_offset + m->vmp_offset;
4345 }
4346 vm_object_unlock(object);
4347
4348 vm_map_unlock_read(map);
4349 if (real_map != map) {
4350 vm_map_unlock(real_map);
4351 }
4352 }
4353
4354 static inline int
vm_fault_type_for_tracing(boolean_t need_copy_on_read,int type_of_fault)4355 vm_fault_type_for_tracing(boolean_t need_copy_on_read, int type_of_fault)
4356 {
4357 if (need_copy_on_read && type_of_fault == DBG_COW_FAULT) {
4358 return DBG_COR_FAULT;
4359 }
4360 return type_of_fault;
4361 }
4362
4363 uint64_t vm_fault_resilient_media_initiate = 0;
4364 uint64_t vm_fault_resilient_media_retry = 0;
4365 uint64_t vm_fault_resilient_media_proceed = 0;
4366 uint64_t vm_fault_resilient_media_release = 0;
4367 uint64_t vm_fault_resilient_media_abort1 = 0;
4368 uint64_t vm_fault_resilient_media_abort2 = 0;
4369
4370 #if MACH_ASSERT
4371 int vm_fault_resilient_media_inject_error1_rate = 0;
4372 int vm_fault_resilient_media_inject_error1 = 0;
4373 int vm_fault_resilient_media_inject_error2_rate = 0;
4374 int vm_fault_resilient_media_inject_error2 = 0;
4375 int vm_fault_resilient_media_inject_error3_rate = 0;
4376 int vm_fault_resilient_media_inject_error3 = 0;
4377 #endif /* MACH_ASSERT */
4378
4379 kern_return_t
vm_fault_internal(vm_map_t map,vm_map_offset_t vaddr,vm_prot_t caller_prot,vm_tag_t wire_tag,pmap_t caller_pmap,vm_map_offset_t caller_pmap_addr,ppnum_t * physpage_p,vm_object_fault_info_t fault_info)4380 vm_fault_internal(
4381 vm_map_t map,
4382 vm_map_offset_t vaddr,
4383 vm_prot_t caller_prot,
4384 vm_tag_t wire_tag, /* if wiring must pass tag != VM_KERN_MEMORY_NONE */
4385 pmap_t caller_pmap,
4386 vm_map_offset_t caller_pmap_addr,
4387 ppnum_t *physpage_p,
4388 vm_object_fault_info_t fault_info)
4389 {
4390 vm_map_version_t version; /* Map version for verificiation */
4391 boolean_t wired; /* Should mapping be wired down? */
4392 vm_object_t object; /* Top-level object */
4393 vm_object_offset_t offset; /* Top-level offset */
4394 vm_prot_t prot; /* Protection for mapping */
4395 vm_object_t old_copy_object; /* Saved copy object */
4396 uint32_t old_copy_version;
4397 vm_page_t result_page; /* Result of vm_fault_page */
4398 vm_page_t top_page; /* Placeholder page */
4399 kern_return_t kr;
4400
4401 vm_page_t m; /* Fast access to result_page */
4402 kern_return_t error_code;
4403 vm_object_t cur_object;
4404 vm_object_t m_object = NULL;
4405 vm_object_offset_t cur_offset;
4406 vm_page_t cur_m;
4407 vm_object_t new_object;
4408 int type_of_fault;
4409 pmap_t pmap;
4410 wait_interrupt_t interruptible_state;
4411 vm_map_t real_map = map;
4412 vm_map_t original_map = map;
4413 bool object_locks_dropped = FALSE;
4414 vm_prot_t fault_type;
4415 vm_prot_t original_fault_type;
4416 bool need_collapse = FALSE;
4417 boolean_t need_retry = FALSE;
4418 boolean_t *need_retry_ptr = NULL;
4419 uint8_t object_lock_type = 0;
4420 uint8_t cur_object_lock_type;
4421 vm_object_t top_object = VM_OBJECT_NULL;
4422 vm_object_t written_on_object = VM_OBJECT_NULL;
4423 memory_object_t written_on_pager = NULL;
4424 vm_object_offset_t written_on_offset = 0;
4425 int throttle_delay;
4426 int compressed_count_delta;
4427 vm_grab_options_t grab_options;
4428 bool need_copy;
4429 bool need_copy_on_read;
4430 vm_map_offset_t trace_vaddr;
4431 vm_map_offset_t trace_real_vaddr;
4432 vm_map_size_t fault_page_size;
4433 vm_map_size_t fault_page_mask;
4434 int fault_page_shift;
4435 vm_map_offset_t fault_phys_offset;
4436 vm_map_offset_t real_vaddr;
4437 bool resilient_media_retry = false;
4438 bool resilient_media_ref_transfer = false;
4439 vm_object_t resilient_media_object = VM_OBJECT_NULL;
4440 vm_object_offset_t resilient_media_offset = (vm_object_offset_t)-1;
4441 bool page_needs_data_sync = false;
4442 /*
4443 * Was the VM object contended when vm_map_lookup_and_lock_object locked it?
4444 * If so, the zero fill path will drop the lock
4445 * NB: Ideally we would always drop the lock rather than rely on
4446 * this heuristic, but vm_object_unlock currently takes > 30 cycles.
4447 */
4448 bool object_is_contended = false;
4449
4450 vmlp_api_start(VM_FAULT_INTERNAL);
4451
4452
4453 real_vaddr = vaddr;
4454 trace_real_vaddr = vaddr;
4455
4456 /*
4457 * Some (kernel) submaps are marked with "should never fault", so that
4458 * guard pages in such submaps do not need to use fictitious
4459 * placeholders at all, while not causing ZFOD pages to be made
4460 * (which is the default behavior otherwise).
4461 *
4462 * We also want capture the fault address easily so that the zone
4463 * allocator might present an enhanced panic log.
4464 */
4465 if (map->never_faults) {
4466 assert(map->pmap == kernel_pmap);
4467 vmlp_api_end(VM_FAULT_INTERNAL, KERN_INVALID_ADDRESS);
4468 return KERN_INVALID_ADDRESS;
4469 }
4470
4471 if (VM_MAP_PAGE_SIZE(original_map) < PAGE_SIZE) {
4472 fault_phys_offset = (vm_map_offset_t)-1;
4473 fault_page_size = VM_MAP_PAGE_SIZE(original_map);
4474 fault_page_mask = VM_MAP_PAGE_MASK(original_map);
4475 fault_page_shift = VM_MAP_PAGE_SHIFT(original_map);
4476 if (fault_page_size < PAGE_SIZE) {
4477 DEBUG4K_FAULT("map %p vaddr 0x%llx caller_prot 0x%x\n", map, (uint64_t)trace_real_vaddr, caller_prot);
4478 vaddr = vm_map_trunc_page(vaddr, fault_page_mask);
4479 }
4480 } else {
4481 fault_phys_offset = 0;
4482 fault_page_size = PAGE_SIZE;
4483 fault_page_mask = PAGE_MASK;
4484 fault_page_shift = PAGE_SHIFT;
4485 vaddr = vm_map_trunc_page(vaddr, PAGE_MASK);
4486 }
4487
4488 if (map == kernel_map) {
4489 trace_vaddr = VM_KERNEL_ADDRHIDE(vaddr);
4490 trace_real_vaddr = VM_KERNEL_ADDRHIDE(trace_real_vaddr);
4491 } else {
4492 trace_vaddr = vaddr;
4493 }
4494
4495 KDBG_RELEASE(
4496 (VMDBG_CODE(DBG_VM_FAULT_INTERNAL)) | DBG_FUNC_START,
4497 ((uint64_t)trace_vaddr >> 32),
4498 trace_vaddr,
4499 (map == kernel_map));
4500
4501 if (get_preemption_level() != 0) {
4502 KDBG_RELEASE(
4503 (VMDBG_CODE(DBG_VM_FAULT_INTERNAL)) | DBG_FUNC_END,
4504 ((uint64_t)trace_vaddr >> 32),
4505 trace_vaddr,
4506 KERN_FAILURE);
4507
4508 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_NONZERO_PREEMPTION_LEVEL), 0 /* arg */);
4509 vmlp_api_end(VM_FAULT_INTERNAL, KERN_FAILURE);
4510 return KERN_FAILURE;
4511 }
4512
4513 thread_t cthread = current_thread();
4514
4515 if (cthread->th_vm_faults_disabled) {
4516 KDBG_RELEASE(
4517 (MACHDBG_CODE(DBG_MACH_VM, 2)) | DBG_FUNC_END,
4518 ((uint64_t)trace_vaddr >> 32),
4519 trace_vaddr,
4520 KERN_FAILURE);
4521 ktriage_record(thread_tid(cthread),
4522 KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM,
4523 KDBG_TRIAGE_RESERVED,
4524 KDBG_TRIAGE_VM_FAULTS_DISABLED),
4525 0 /* arg */);
4526 vmlp_api_end(VM_FAULT_INTERNAL, KERN_FAILURE);
4527 return KERN_FAILURE;
4528 }
4529
4530 bool rtfault = (cthread->sched_mode == TH_MODE_REALTIME);
4531 bool page_sleep_needed = false;
4532 uint64_t fstart = 0;
4533
4534 if (rtfault) {
4535 fstart = mach_continuous_time();
4536 }
4537
4538 assert(fault_info != NULL);
4539 interruptible_state = thread_interrupt_level(fault_info->interruptible);
4540
4541 fault_type = (fault_info->fi_change_wiring ? VM_PROT_NONE : caller_prot);
4542
4543 counter_inc(&vm_statistics_faults);
4544 counter_inc(¤t_task()->faults);
4545 original_fault_type = fault_type;
4546
4547 need_copy = FALSE;
4548 if (fault_type & VM_PROT_WRITE) {
4549 need_copy = TRUE;
4550 }
4551
4552 if (need_copy || fault_info->fi_change_wiring) {
4553 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4554 } else {
4555 object_lock_type = OBJECT_LOCK_SHARED;
4556 }
4557
4558 cur_object_lock_type = OBJECT_LOCK_SHARED;
4559
4560 if ((map == kernel_map) && (caller_prot & VM_PROT_WRITE)) {
4561 if (compressor_map) {
4562 if ((vaddr >= vm_map_min(compressor_map)) && (vaddr < vm_map_max(compressor_map))) {
4563 panic("Write fault on compressor map, va: %p type: %u bounds: %p->%p", (void *) vaddr, caller_prot, (void *) vm_map_min(compressor_map), (void *) vm_map_max(compressor_map));
4564 }
4565 }
4566 }
4567 RetryFault:
4568 assert(written_on_object == VM_OBJECT_NULL);
4569
4570 /*
4571 * assume we will hit a page in the cache
4572 * otherwise, explicitly override with
4573 * the real fault type once we determine it
4574 */
4575 type_of_fault = DBG_CACHE_HIT_FAULT;
4576
4577 /*
4578 * Find the backing store object and offset into
4579 * it to begin the search.
4580 */
4581 fault_type = original_fault_type;
4582 map = original_map;
4583 vm_map_lock_read(map);
4584
4585 if (resilient_media_retry) {
4586 /*
4587 * If we have to insert a fake zero-filled page to hide
4588 * a media failure to provide the real page, we need to
4589 * resolve any pending copy-on-write on this mapping.
4590 * VM_PROT_COPY tells vm_map_lookup_and_lock_object() to deal
4591 * with that even if this is not a "write" fault.
4592 */
4593 need_copy = TRUE;
4594 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4595 vm_fault_resilient_media_retry++;
4596 }
4597
4598 kr = vm_map_lookup_and_lock_object(&map, vaddr,
4599 (fault_type | (need_copy ? VM_PROT_COPY : 0)),
4600 object_lock_type, &version,
4601 &object, &offset, &prot, &wired,
4602 fault_info,
4603 &real_map,
4604 &object_is_contended);
4605 object_is_contended = false; /* avoid unsafe optimization */
4606
4607 if (kr != KERN_SUCCESS) {
4608 vm_map_unlock_read(map);
4609 /*
4610 * This can be seen in a crash report if indeed the
4611 * thread is crashing due to an invalid access in a non-existent
4612 * range.
4613 * Turning this OFF for now because it is noisy and not always fatal
4614 * eg prefaulting.
4615 *
4616 * if (kr == KERN_INVALID_ADDRESS) {
4617 * ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0);
4618 * }
4619 */
4620 goto done;
4621 }
4622
4623 pmap = real_map->pmap;
4624 fault_info->io_sync = FALSE;
4625 fault_info->mark_zf_absent = FALSE;
4626 fault_info->batch_pmap_op = FALSE;
4627
4628
4629 if (resilient_media_retry) {
4630 /*
4631 * We're retrying this fault after having detected a media
4632 * failure from a "resilient_media" mapping.
4633 * Check that the mapping is still pointing at the object
4634 * that just failed to provide a page.
4635 */
4636 assert(resilient_media_object != VM_OBJECT_NULL);
4637 assert(resilient_media_offset != (vm_object_offset_t)-1);
4638 if ((object != VM_OBJECT_NULL &&
4639 object == resilient_media_object &&
4640 offset == resilient_media_offset &&
4641 fault_info->resilient_media)
4642 #if MACH_ASSERT
4643 && (vm_fault_resilient_media_inject_error1_rate == 0 ||
4644 (++vm_fault_resilient_media_inject_error1 % vm_fault_resilient_media_inject_error1_rate) != 0)
4645 #endif /* MACH_ASSERT */
4646 ) {
4647 /*
4648 * This mapping still points at the same object
4649 * and is still "resilient_media": proceed in
4650 * "recovery-from-media-failure" mode, where we'll
4651 * insert a zero-filled page in the top object.
4652 */
4653 // printf("RESILIENT_MEDIA %s:%d recovering for object %p offset 0x%llx\n", __FUNCTION__, __LINE__, object, offset);
4654 vm_fault_resilient_media_proceed++;
4655 } else {
4656 /* not recovering: reset state and retry fault */
4657 // printf("RESILIENT_MEDIA %s:%d no recovery resilient %d object %p/%p offset 0x%llx/0x%llx\n", __FUNCTION__, __LINE__, fault_info->resilient_media, object, resilient_media_object, offset, resilient_media_offset);
4658 vm_object_unlock(object);
4659 if (real_map != map) {
4660 vm_map_unlock(real_map);
4661 }
4662 vm_map_unlock_read(map);
4663 /* release our extra reference on failed object */
4664 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
4665 vm_object_deallocate(resilient_media_object);
4666 resilient_media_object = VM_OBJECT_NULL;
4667 resilient_media_offset = (vm_object_offset_t)-1;
4668 resilient_media_retry = false;
4669 vm_fault_resilient_media_abort1++;
4670 goto RetryFault;
4671 }
4672 } else {
4673 assert(resilient_media_object == VM_OBJECT_NULL);
4674 resilient_media_offset = (vm_object_offset_t)-1;
4675 }
4676
4677 /*
4678 * If the page is wired, we must fault for the current protection
4679 * value, to avoid further faults.
4680 */
4681 if (wired) {
4682 fault_type = prot | VM_PROT_WRITE;
4683 }
4684 if (wired || need_copy) {
4685 /*
4686 * since we're treating this fault as a 'write'
4687 * we must hold the top object lock exclusively
4688 */
4689 if (object_lock_type == OBJECT_LOCK_SHARED) {
4690 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4691
4692 if (vm_object_lock_upgrade(object) == FALSE) {
4693 /*
4694 * couldn't upgrade, so explictly
4695 * take the lock exclusively
4696 */
4697 vm_object_lock(object);
4698 }
4699 }
4700 }
4701
4702 #if VM_FAULT_CLASSIFY
4703 /*
4704 * Temporary data gathering code
4705 */
4706 vm_fault_classify(object, offset, fault_type);
4707 #endif
4708 /*
4709 * Fast fault code. The basic idea is to do as much as
4710 * possible while holding the map lock and object locks.
4711 * Busy pages are not used until the object lock has to
4712 * be dropped to do something (copy, zero fill, pmap enter).
4713 * Similarly, paging references aren't acquired until that
4714 * point, and object references aren't used.
4715 *
4716 * If we can figure out what to do
4717 * (zero fill, copy on write, pmap enter) while holding
4718 * the locks, then it gets done. Otherwise, we give up,
4719 * and use the original fault path (which doesn't hold
4720 * the map lock, and relies on busy pages).
4721 * The give up cases include:
4722 * - Have to talk to pager.
4723 * - Page is busy, absent or in error.
4724 * - Pager has locked out desired access.
4725 * - Fault needs to be restarted.
4726 * - Have to push page into copy object.
4727 *
4728 * The code is an infinite loop that moves one level down
4729 * the shadow chain each time. cur_object and cur_offset
4730 * refer to the current object being examined. object and offset
4731 * are the original object from the map. The loop is at the
4732 * top level if and only if object and cur_object are the same.
4733 *
4734 * Invariants: Map lock is held throughout. Lock is held on
4735 * original object and cur_object (if different) when
4736 * continuing or exiting loop.
4737 *
4738 */
4739
4740 #if defined(__arm64__)
4741 /*
4742 * Fail if reading an execute-only page in a
4743 * pmap that enforces execute-only protection.
4744 */
4745 if (fault_type == VM_PROT_READ &&
4746 (prot & VM_PROT_EXECUTE) &&
4747 !(prot & VM_PROT_READ) &&
4748 pmap_enforces_execute_only(pmap)) {
4749 vm_object_unlock(object);
4750 vm_map_unlock_read(map);
4751 if (real_map != map) {
4752 vm_map_unlock(real_map);
4753 }
4754 kr = KERN_PROTECTION_FAILURE;
4755 goto done;
4756 }
4757 #endif
4758
4759 fault_phys_offset = (vm_map_offset_t)offset - vm_map_trunc_page((vm_map_offset_t)offset, PAGE_MASK);
4760
4761 /*
4762 * If this page is to be inserted in a copy delay object
4763 * for writing, and if the object has a copy, then the
4764 * copy delay strategy is implemented in the slow fault page.
4765 */
4766 if ((object->copy_strategy == MEMORY_OBJECT_COPY_DELAY ||
4767 object->copy_strategy == MEMORY_OBJECT_COPY_DELAY_FORK) &&
4768 object->vo_copy != VM_OBJECT_NULL && (fault_type & VM_PROT_WRITE)) {
4769 if (resilient_media_retry && object && object->internal) {
4770 /*
4771 * We're handling a "resilient media retry" and we
4772 * just want to insert of zero-filled page in this
4773 * top object (if there's not already a page there),
4774 * so this is not a real "write" and we want to stay
4775 * on this code path.
4776 */
4777 } else {
4778 goto handle_copy_delay;
4779 }
4780 }
4781
4782 cur_object = object;
4783 cur_offset = offset;
4784
4785 grab_options = vm_page_grab_options_for_object(object);
4786
4787 while (TRUE) {
4788 if (!cur_object->pager_created &&
4789 cur_object->phys_contiguous) { /* superpage */
4790 break;
4791 }
4792
4793 if (cur_object->blocked_access) {
4794 /*
4795 * Access to this VM object has been blocked.
4796 * Let the slow path handle it.
4797 */
4798 break;
4799 }
4800
4801 m = vm_page_lookup(cur_object, vm_object_trunc_page(cur_offset));
4802 m_object = NULL;
4803
4804 if (m != VM_PAGE_NULL) {
4805 m_object = cur_object;
4806
4807 if (__improbable(page_sleep_needed)) {
4808 /*
4809 * If a prior iteration of the loop requested vm_page_sleep(), re-validate the page
4810 * to see if it's still needed.
4811 */
4812 kr = vm_fault_pmap_validate_page(pmap, m, vaddr, prot, fault_info, &page_sleep_needed);
4813 if (__improbable(kr != KERN_SUCCESS)) {
4814 vm_map_unlock_read(map);
4815 if (real_map != map) {
4816 vm_map_unlock(real_map);
4817 }
4818 goto done;
4819 }
4820 }
4821 if (m->vmp_busy || page_sleep_needed) {
4822 page_sleep_needed = false;
4823 wait_result_t result;
4824
4825 /*
4826 * in order to vm_page_sleep(), we must
4827 * have object that 'm' belongs to locked exclusively
4828 */
4829 if (object != cur_object) {
4830 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4831 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4832
4833 if (vm_object_lock_upgrade(cur_object) == FALSE) {
4834 /*
4835 * couldn't upgrade so go do a full retry
4836 * immediately since we can no longer be
4837 * certain about cur_object (since we
4838 * don't hold a reference on it)...
4839 * first drop the top object lock
4840 */
4841 vm_object_unlock(object);
4842
4843 vm_map_unlock_read(map);
4844 if (real_map != map) {
4845 vm_map_unlock(real_map);
4846 }
4847
4848 goto RetryFault;
4849 }
4850 }
4851 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
4852 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4853
4854 if (vm_object_lock_upgrade(object) == FALSE) {
4855 /*
4856 * couldn't upgrade, so explictly take the lock
4857 * exclusively and go relookup the page since we
4858 * will have dropped the object lock and
4859 * a different thread could have inserted
4860 * a page at this offset
4861 * no need for a full retry since we're
4862 * at the top level of the object chain
4863 */
4864 vm_object_lock(object);
4865
4866 continue;
4867 }
4868 }
4869 if ((m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) && m_object->internal) {
4870 /*
4871 * m->vmp_busy == TRUE and the object is locked exclusively
4872 * if m->pageout_queue == TRUE after we acquire the
4873 * queues lock, we are guaranteed that it is stable on
4874 * the pageout queue and therefore reclaimable
4875 *
4876 * NOTE: this is only true for the internal pageout queue
4877 * in the compressor world
4878 */
4879 assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
4880
4881 vm_page_lock_queues();
4882
4883 if (m->vmp_q_state == VM_PAGE_ON_PAGEOUT_Q) {
4884 vm_pageout_throttle_up(m);
4885 vm_page_unlock_queues();
4886
4887 vm_page_wakeup_done(m_object, m);
4888 goto reclaimed_from_pageout;
4889 }
4890 vm_page_unlock_queues();
4891 }
4892 if (object != cur_object) {
4893 vm_object_unlock(object);
4894 }
4895
4896 vm_map_unlock_read(map);
4897 if (real_map != map) {
4898 vm_map_unlock(real_map);
4899 }
4900
4901 result = vm_page_sleep(cur_object, m, fault_info->interruptible, LCK_SLEEP_UNLOCK);
4902 if (result == THREAD_AWAKENED || result == THREAD_RESTART) {
4903 goto RetryFault;
4904 }
4905
4906 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_BUSYPAGE_WAIT_INTERRUPTED), 0 /* arg */);
4907 kr = KERN_ABORTED;
4908 goto done;
4909 }
4910 reclaimed_from_pageout:
4911 if (m->vmp_laundry) {
4912 if (object != cur_object) {
4913 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4914 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4915
4916 vm_object_unlock(object);
4917 vm_object_unlock(cur_object);
4918
4919 vm_map_unlock_read(map);
4920 if (real_map != map) {
4921 vm_map_unlock(real_map);
4922 }
4923
4924 goto RetryFault;
4925 }
4926 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
4927 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4928
4929 if (vm_object_lock_upgrade(object) == FALSE) {
4930 /*
4931 * couldn't upgrade, so explictly take the lock
4932 * exclusively and go relookup the page since we
4933 * will have dropped the object lock and
4934 * a different thread could have inserted
4935 * a page at this offset
4936 * no need for a full retry since we're
4937 * at the top level of the object chain
4938 */
4939 vm_object_lock(object);
4940
4941 continue;
4942 }
4943 }
4944 vm_object_lock_assert_exclusive(VM_PAGE_OBJECT(m));
4945 vm_pageout_steal_laundry(m, FALSE);
4946 }
4947
4948
4949 if (vm_page_is_guard(m)) {
4950 /*
4951 * Guard page: let the slow path deal with it
4952 */
4953 break;
4954 }
4955 if (m->vmp_unusual && (m->vmp_error || m->vmp_restart ||
4956 vm_page_is_private(m) || m->vmp_absent)) {
4957 /*
4958 * Unusual case... let the slow path deal with it
4959 */
4960 break;
4961 }
4962 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(m_object)) {
4963 if (object != cur_object) {
4964 vm_object_unlock(object);
4965 }
4966 vm_map_unlock_read(map);
4967 if (real_map != map) {
4968 vm_map_unlock(real_map);
4969 }
4970 vm_object_unlock(cur_object);
4971 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
4972 kr = KERN_MEMORY_ERROR;
4973 goto done;
4974 }
4975 assert(m_object == VM_PAGE_OBJECT(m));
4976
4977 if (vm_fault_cs_need_validation(map->pmap, m, m_object,
4978 PAGE_SIZE, 0) ||
4979 (physpage_p != NULL && (prot & VM_PROT_WRITE))) {
4980 upgrade_lock_and_retry:
4981 /*
4982 * We might need to validate this page
4983 * against its code signature, so we
4984 * want to hold the VM object exclusively.
4985 */
4986 if (object != cur_object) {
4987 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
4988 vm_object_unlock(object);
4989 vm_object_unlock(cur_object);
4990
4991 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4992
4993 vm_map_unlock_read(map);
4994 if (real_map != map) {
4995 vm_map_unlock(real_map);
4996 }
4997
4998 goto RetryFault;
4999 }
5000 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
5001 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5002
5003 if (vm_object_lock_upgrade(object) == FALSE) {
5004 /*
5005 * couldn't upgrade, so explictly take the lock
5006 * exclusively and go relookup the page since we
5007 * will have dropped the object lock and
5008 * a different thread could have inserted
5009 * a page at this offset
5010 * no need for a full retry since we're
5011 * at the top level of the object chain
5012 */
5013 vm_object_lock(object);
5014
5015 continue;
5016 }
5017 }
5018 }
5019 /*
5020 * Two cases of map in faults:
5021 * - At top level w/o copy object.
5022 * - Read fault anywhere.
5023 * --> must disallow write.
5024 */
5025
5026 if (object == cur_object && object->vo_copy == VM_OBJECT_NULL) {
5027 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5028 if ((fault_type & VM_PROT_WRITE) && m->vmp_unmodified_ro) {
5029 assert(cur_object == VM_PAGE_OBJECT(m));
5030 assert(cur_object->internal);
5031 vm_object_lock_assert_exclusive(cur_object);
5032 vm_page_lockspin_queues();
5033 m->vmp_unmodified_ro = false;
5034 vm_page_unlock_queues();
5035 os_atomic_dec(&compressor_ro_uncompressed, relaxed);
5036 vm_object_compressor_pager_state_clr(cur_object, m->vmp_offset);
5037 }
5038 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5039 goto FastPmapEnter;
5040 }
5041
5042 if (!need_copy &&
5043 !fault_info->no_copy_on_read &&
5044 cur_object != object &&
5045 !cur_object->internal &&
5046 !cur_object->pager_trusted &&
5047 !cur_object->code_signed &&
5048 vm_protect_privileged_from_untrusted &&
5049 (current_proc_is_privileged() ||
5050 vm_kernel_map_is_kernel(map) ||
5051 vm_map_is_platform_binary(map))) {
5052 /*
5053 * We're faulting on a page in "object" and
5054 * went down the shadow chain to "cur_object"
5055 * to find out that "cur_object"'s pager
5056 * is not "trusted", i.e. we can not trust it
5057 * to always return the same contents.
5058 * Since the target is a "privileged" process,
5059 * let's treat this as a copy-on-read fault, as
5060 * if it was a copy-on-write fault.
5061 * Once "object" gets a copy of this page, it
5062 * won't have to rely on "cur_object" to
5063 * provide the contents again.
5064 *
5065 * This is done by setting "need_copy" and
5066 * retrying the fault from the top with the
5067 * appropriate locking.
5068 *
5069 * Special case: if the mapping is executable
5070 * and the untrusted object is code-signed and
5071 * the process is "cs_enforced", we do not
5072 * copy-on-read because that would break
5073 * code-signing enforcement expectations (an
5074 * executable page must belong to a code-signed
5075 * object) and we can rely on code-signing
5076 * to re-validate the page if it gets evicted
5077 * and paged back in.
5078 */
5079 // printf("COPY-ON-READ %s:%d map %p va 0x%llx page %p object %p offset 0x%llx UNTRUSTED: need copy-on-read!\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, m, VM_PAGE_OBJECT(m), m->vmp_offset);
5080 vm_copied_on_read++;
5081 if (!current_proc_is_privileged()) {
5082 /* not a privileged proc but still copy-on-read... */
5083 if (vm_kernel_map_is_kernel(map)) {
5084 /* ... because target map is a kernel map */
5085 vm_copied_on_read_kernel_map++;
5086 } else {
5087 /* ... because target map is "platform" */
5088 vm_copied_on_read_platform_map++;
5089 }
5090 }
5091 need_copy = TRUE;
5092
5093 vm_object_unlock(object);
5094 vm_object_unlock(cur_object);
5095 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5096 vm_map_unlock_read(map);
5097 if (real_map != map) {
5098 vm_map_unlock(real_map);
5099 }
5100 goto RetryFault;
5101 }
5102
5103 if (!(fault_type & VM_PROT_WRITE) && !need_copy) {
5104 if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
5105 /*
5106 * For a protection that the pmap cares
5107 * about, we must hand over the full
5108 * set of protections (so that the pmap
5109 * layer can apply any desired policy).
5110 * This means that cs_bypass must be
5111 * set, as this can force us to pass
5112 * RWX.
5113 */
5114 if (!fault_info->cs_bypass) {
5115 panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
5116 __FUNCTION__, pmap,
5117 (uint64_t)vaddr, prot,
5118 fault_info->pmap_options);
5119 }
5120 } else {
5121 prot &= ~VM_PROT_WRITE;
5122 }
5123
5124 if (object != cur_object) {
5125 /*
5126 * We still need to hold the top object
5127 * lock here to prevent a race between
5128 * a read fault (taking only "shared"
5129 * locks) and a write fault (taking
5130 * an "exclusive" lock on the top
5131 * object.
5132 * Otherwise, as soon as we release the
5133 * top lock, the write fault could
5134 * proceed and actually complete before
5135 * the read fault, and the copied page's
5136 * translation could then be overwritten
5137 * by the read fault's translation for
5138 * the original page.
5139 *
5140 * Let's just record what the top object
5141 * is and we'll release it later.
5142 */
5143 top_object = object;
5144
5145 /*
5146 * switch to the object that has the new page
5147 */
5148 object = cur_object;
5149 object_lock_type = cur_object_lock_type;
5150 }
5151 FastPmapEnter:
5152 assert(m_object == VM_PAGE_OBJECT(m));
5153
5154 if (resilient_media_retry && (prot & VM_PROT_WRITE)) {
5155 /*
5156 * We might have bypassed some copy-on-write
5157 * mechanism to get here (theoretically inserting
5158 * a zero-filled page in the top object to avoid
5159 * raising an exception on an unavailable page at
5160 * the bottom of the shadow chain.
5161 * So let's not grant write access to this page yet.
5162 * If write access is needed, the next fault should
5163 * handle any copy-on-write obligations.
5164 */
5165 if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
5166 /*
5167 * For a protection that the pmap cares
5168 * about, we must hand over the full
5169 * set of protections (so that the pmap
5170 * layer can apply any desired policy).
5171 * This means that cs_bypass must be
5172 * set, as this can force us to pass
5173 * RWX.
5174 */
5175 if (!fault_info->cs_bypass) {
5176 panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
5177 __FUNCTION__, pmap,
5178 (uint64_t)vaddr, prot,
5179 fault_info->pmap_options);
5180 }
5181 } else {
5182 prot &= ~VM_PROT_WRITE;
5183 }
5184 }
5185
5186 /*
5187 * prepare for the pmap_enter...
5188 * object and map are both locked
5189 * m contains valid data
5190 * object == m->vmp_object
5191 * cur_object == NULL or it's been unlocked
5192 * no paging references on either object or cur_object
5193 */
5194 if (top_object != VM_OBJECT_NULL || object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5195 need_retry_ptr = &need_retry;
5196 } else {
5197 need_retry_ptr = NULL;
5198 }
5199
5200 if (fault_page_size < PAGE_SIZE) {
5201 DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx caller pmap %p va 0x%llx pa 0x%llx (0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, caller_pmap, (uint64_t)caller_pmap_addr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
5202 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
5203 fault_phys_offset < PAGE_SIZE),
5204 "0x%llx\n", (uint64_t)fault_phys_offset);
5205 } else {
5206 assertf(fault_phys_offset == 0,
5207 "0x%llx\n", (uint64_t)fault_phys_offset);
5208 }
5209
5210 if (__improbable(rtfault &&
5211 !m->vmp_realtime &&
5212 vm_pageout_protect_realtime)) {
5213 vm_page_lock_queues();
5214 if (!m->vmp_realtime) {
5215 m->vmp_realtime = true;
5216 VM_COUNTER_INC(&vm_page_realtime_count);
5217 }
5218 vm_page_unlock_queues();
5219 }
5220 assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p object=%p", m, m_object, object);
5221 assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
5222 if (caller_pmap) {
5223 kr = vm_fault_enter(m,
5224 caller_pmap,
5225 caller_pmap_addr,
5226 fault_page_size,
5227 fault_phys_offset,
5228 prot,
5229 caller_prot,
5230 wired,
5231 wire_tag,
5232 fault_info,
5233 need_retry_ptr,
5234 &type_of_fault,
5235 &object_lock_type,
5236 &page_sleep_needed);
5237 } else {
5238 kr = vm_fault_enter(m,
5239 pmap,
5240 vaddr,
5241 fault_page_size,
5242 fault_phys_offset,
5243 prot,
5244 caller_prot,
5245 wired,
5246 wire_tag,
5247 fault_info,
5248 need_retry_ptr,
5249 &type_of_fault,
5250 &object_lock_type,
5251 &page_sleep_needed);
5252 }
5253
5254 vm_fault_complete(
5255 map,
5256 real_map,
5257 object,
5258 m_object,
5259 m,
5260 offset,
5261 trace_real_vaddr,
5262 fault_info,
5263 caller_prot,
5264 real_vaddr,
5265 vm_fault_type_for_tracing(need_copy_on_read, type_of_fault),
5266 need_retry,
5267 kr,
5268 physpage_p,
5269 prot,
5270 top_object,
5271 need_collapse,
5272 cur_offset,
5273 fault_type,
5274 &written_on_object,
5275 &written_on_pager,
5276 &written_on_offset);
5277 top_object = VM_OBJECT_NULL;
5278 if (need_retry == TRUE) {
5279 /*
5280 * vm_fault_enter couldn't complete the PMAP_ENTER...
5281 * at this point we don't hold any locks so it's safe
5282 * to ask the pmap layer to expand the page table to
5283 * accommodate this mapping... once expanded, we'll
5284 * re-drive the fault which should result in vm_fault_enter
5285 * being able to successfully enter the mapping this time around
5286 */
5287 (void)pmap_enter_options(
5288 pmap, vaddr, 0, 0, 0, 0, 0,
5289 PMAP_OPTIONS_NOENTER, NULL, PMAP_MAPPING_TYPE_INFER);
5290
5291 need_retry = FALSE;
5292 goto RetryFault;
5293 }
5294 if (page_sleep_needed) {
5295 goto RetryFault;
5296 }
5297 goto done;
5298 }
5299 /*
5300 * COPY ON WRITE FAULT
5301 */
5302 assert(object_lock_type == OBJECT_LOCK_EXCLUSIVE);
5303
5304 /*
5305 * If objects match, then
5306 * object->vo_copy must not be NULL (else control
5307 * would be in previous code block), and we
5308 * have a potential push into the copy object
5309 * with which we can't cope with here.
5310 */
5311 if (cur_object == object) {
5312 /*
5313 * must take the slow path to
5314 * deal with the copy push
5315 */
5316 break;
5317 }
5318
5319 /*
5320 * This is now a shadow based copy on write
5321 * fault -- it requires a copy up the shadow
5322 * chain.
5323 */
5324 assert(m_object == VM_PAGE_OBJECT(m));
5325
5326 if ((cur_object_lock_type == OBJECT_LOCK_SHARED) &&
5327 vm_fault_cs_need_validation(NULL, m, m_object,
5328 PAGE_SIZE, 0)) {
5329 goto upgrade_lock_and_retry;
5330 }
5331
5332 #if MACH_ASSERT
5333 if (resilient_media_retry &&
5334 vm_fault_resilient_media_inject_error2_rate != 0 &&
5335 (++vm_fault_resilient_media_inject_error2 % vm_fault_resilient_media_inject_error2_rate) == 0) {
5336 /* inject an error */
5337 cur_m = m;
5338 m = VM_PAGE_NULL;
5339 m_object = VM_OBJECT_NULL;
5340 break;
5341 }
5342 #endif /* MACH_ASSERT */
5343 /*
5344 * Allocate a page in the original top level
5345 * object. Give up if allocate fails. Also
5346 * need to remember current page, as it's the
5347 * source of the copy.
5348 *
5349 * at this point we hold locks on both
5350 * object and cur_object... no need to take
5351 * paging refs or mark pages BUSY since
5352 * we don't drop either object lock until
5353 * the page has been copied and inserted
5354 */
5355 cur_m = m;
5356 m = vm_page_grab_options(grab_options);
5357 m_object = NULL;
5358
5359 if (m == VM_PAGE_NULL) {
5360 /*
5361 * no free page currently available...
5362 * must take the slow path
5363 */
5364 break;
5365 }
5366
5367 /*
5368 * Now do the copy. Mark the source page busy...
5369 *
5370 * NOTE: This code holds the map lock across
5371 * the page copy.
5372 */
5373 vm_page_copy(cur_m, m);
5374 vm_page_insert(m, object, vm_object_trunc_page(offset));
5375 if (VM_MAP_PAGE_MASK(map) != PAGE_MASK) {
5376 DEBUG4K_FAULT("map %p vaddr 0x%llx page %p [%p 0x%llx] copied to %p [%p 0x%llx]\n", map, (uint64_t)vaddr, cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
5377 }
5378 m_object = object;
5379 SET_PAGE_DIRTY(m, FALSE);
5380
5381 /*
5382 * Now cope with the source page and object
5383 */
5384 if (os_ref_get_count_raw(&object->ref_count) > 1 &&
5385 cur_m->vmp_pmapped) {
5386 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
5387 } else if (VM_MAP_PAGE_SIZE(map) < PAGE_SIZE) {
5388 /*
5389 * We've copied the full 16K page but we're
5390 * about to call vm_fault_enter() only for
5391 * the 4K chunk we're faulting on. The other
5392 * three 4K chunks in that page could still
5393 * be pmapped in this pmap.
5394 * Since the VM object layer thinks that the
5395 * entire page has been dealt with and the
5396 * original page might no longer be needed,
5397 * it might collapse/bypass the original VM
5398 * object and free its pages, which would be
5399 * bad (and would trigger pmap_verify_free()
5400 * assertions) if the other 4K chunks are still
5401 * pmapped.
5402 */
5403 /*
5404 * XXX FBDP TODO4K: to be revisisted
5405 * Technically, we need to pmap_disconnect()
5406 * only the target pmap's mappings for the 4K
5407 * chunks of this 16K VM page. If other pmaps
5408 * have PTEs on these chunks, that means that
5409 * the associated VM map must have a reference
5410 * on the VM object, so no need to worry about
5411 * those.
5412 * pmap_protect() for each 4K chunk would be
5413 * better but we'd have to check which chunks
5414 * are actually mapped before and after this
5415 * one.
5416 * A full-blown pmap_disconnect() is easier
5417 * for now but not efficient.
5418 */
5419 DEBUG4K_FAULT("pmap_disconnect() page %p object %p offset 0x%llx phys 0x%x\n", cur_m, VM_PAGE_OBJECT(cur_m), cur_m->vmp_offset, VM_PAGE_GET_PHYS_PAGE(cur_m));
5420 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(cur_m));
5421 }
5422
5423 if (cur_m->vmp_clustered) {
5424 VM_PAGE_COUNT_AS_PAGEIN(cur_m);
5425 VM_PAGE_CONSUME_CLUSTERED(cur_m);
5426 vm_fault_is_sequential(cur_object, cur_offset, fault_info->behavior);
5427 }
5428 need_collapse = TRUE;
5429
5430 if (!cur_object->internal &&
5431 cur_object->copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
5432 /*
5433 * The object from which we've just
5434 * copied a page is most probably backed
5435 * by a vnode. We don't want to waste too
5436 * much time trying to collapse the VM objects
5437 * and create a bottleneck when several tasks
5438 * map the same file.
5439 */
5440 if (cur_object->vo_copy == object) {
5441 /*
5442 * Shared mapping or no COW yet.
5443 * We can never collapse a copy
5444 * object into its backing object.
5445 */
5446 need_collapse = FALSE;
5447 } else if (cur_object->vo_copy == object->shadow &&
5448 object->shadow->resident_page_count == 0) {
5449 /*
5450 * Shared mapping after a COW occurred.
5451 */
5452 need_collapse = FALSE;
5453 }
5454 }
5455 vm_object_unlock(cur_object);
5456
5457 if (need_collapse == FALSE) {
5458 vm_fault_collapse_skipped++;
5459 }
5460 vm_fault_collapse_total++;
5461
5462 type_of_fault = DBG_COW_FAULT;
5463 counter_inc(&vm_statistics_cow_faults);
5464 DTRACE_VM2(cow_fault, int, 1, (uint64_t *), NULL);
5465 counter_inc(¤t_task()->cow_faults);
5466
5467 goto FastPmapEnter;
5468 } else {
5469 /*
5470 * No page at cur_object, cur_offset... m == NULL
5471 */
5472 if (cur_object->pager_created) {
5473 vm_external_state_t compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
5474
5475 if (MUST_ASK_PAGER(cur_object, cur_offset, compressor_external_state) == TRUE) {
5476 int my_fault_type;
5477 vm_compressor_options_t c_flags = C_DONT_BLOCK;
5478 bool insert_cur_object = FALSE;
5479
5480 /*
5481 * May have to talk to a pager...
5482 * if so, take the slow path by
5483 * doing a 'break' from the while (TRUE) loop
5484 *
5485 * external_state will only be set to VM_EXTERNAL_STATE_EXISTS
5486 * if the compressor is active and the page exists there
5487 */
5488 if (compressor_external_state != VM_EXTERNAL_STATE_EXISTS) {
5489 break;
5490 }
5491
5492 if (map == kernel_map || real_map == kernel_map) {
5493 /*
5494 * can't call into the compressor with the kernel_map
5495 * lock held, since the compressor may try to operate
5496 * on the kernel map in order to return an empty c_segment
5497 */
5498 break;
5499 }
5500 if (object != cur_object) {
5501 if (fault_type & VM_PROT_WRITE) {
5502 c_flags |= C_KEEP;
5503 } else {
5504 insert_cur_object = TRUE;
5505 }
5506 }
5507 if (insert_cur_object == TRUE) {
5508 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
5509 cur_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5510
5511 if (vm_object_lock_upgrade(cur_object) == FALSE) {
5512 /*
5513 * couldn't upgrade so go do a full retry
5514 * immediately since we can no longer be
5515 * certain about cur_object (since we
5516 * don't hold a reference on it)...
5517 * first drop the top object lock
5518 */
5519 vm_object_unlock(object);
5520
5521 vm_map_unlock_read(map);
5522 if (real_map != map) {
5523 vm_map_unlock(real_map);
5524 }
5525
5526 goto RetryFault;
5527 }
5528 }
5529 } else if (object_lock_type == OBJECT_LOCK_SHARED) {
5530 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5531
5532 if (object != cur_object) {
5533 /*
5534 * we can't go for the upgrade on the top
5535 * lock since the upgrade may block waiting
5536 * for readers to drain... since we hold
5537 * cur_object locked at this point, waiting
5538 * for the readers to drain would represent
5539 * a lock order inversion since the lock order
5540 * for objects is the reference order in the
5541 * shadown chain
5542 */
5543 vm_object_unlock(object);
5544 vm_object_unlock(cur_object);
5545
5546 vm_map_unlock_read(map);
5547 if (real_map != map) {
5548 vm_map_unlock(real_map);
5549 }
5550
5551 goto RetryFault;
5552 }
5553 if (vm_object_lock_upgrade(object) == FALSE) {
5554 /*
5555 * couldn't upgrade, so explictly take the lock
5556 * exclusively and go relookup the page since we
5557 * will have dropped the object lock and
5558 * a different thread could have inserted
5559 * a page at this offset
5560 * no need for a full retry since we're
5561 * at the top level of the object chain
5562 */
5563 vm_object_lock(object);
5564
5565 continue;
5566 }
5567 }
5568
5569 m = vm_page_grab_options(grab_options);
5570 m_object = NULL;
5571
5572 if (m == VM_PAGE_NULL) {
5573 /*
5574 * no free page currently available...
5575 * must take the slow path
5576 */
5577 break;
5578 }
5579
5580 /*
5581 * The object is and remains locked
5582 * so no need to take a
5583 * "paging_in_progress" reference.
5584 */
5585 bool shared_lock;
5586 if ((object == cur_object &&
5587 object_lock_type == OBJECT_LOCK_EXCLUSIVE) ||
5588 (object != cur_object &&
5589 cur_object_lock_type == OBJECT_LOCK_EXCLUSIVE)) {
5590 shared_lock = FALSE;
5591 } else {
5592 shared_lock = TRUE;
5593 }
5594
5595 kr = vm_compressor_pager_get(
5596 cur_object->pager,
5597 (vm_object_trunc_page(cur_offset)
5598 + cur_object->paging_offset),
5599 VM_PAGE_GET_PHYS_PAGE(m),
5600 &my_fault_type,
5601 c_flags,
5602 &compressed_count_delta);
5603
5604 vm_compressor_pager_count(
5605 cur_object->pager,
5606 compressed_count_delta,
5607 shared_lock,
5608 cur_object);
5609
5610 if (kr != KERN_SUCCESS) {
5611 vm_page_release(m,
5612 VMP_RELEASE_NONE);
5613 m = VM_PAGE_NULL;
5614 }
5615 /*
5616 * If vm_compressor_pager_get() returns
5617 * KERN_MEMORY_FAILURE, then the
5618 * compressed data is permanently lost,
5619 * so return this error immediately.
5620 */
5621 if (kr == KERN_MEMORY_FAILURE) {
5622 if (object != cur_object) {
5623 vm_object_unlock(cur_object);
5624 }
5625 vm_object_unlock(object);
5626 vm_map_unlock_read(map);
5627 if (real_map != map) {
5628 vm_map_unlock(real_map);
5629 }
5630
5631 goto done;
5632 } else if (kr != KERN_SUCCESS) {
5633 break;
5634 }
5635 m->vmp_dirty = TRUE;
5636 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
5637 if ((fault_type & VM_PROT_WRITE) == 0) {
5638 prot &= ~VM_PROT_WRITE;
5639 /*
5640 * The page, m, has yet to be inserted
5641 * into an object. So we are fine with
5642 * the object/cur_object lock being held
5643 * shared.
5644 */
5645 vm_page_lockspin_queues();
5646 m->vmp_unmodified_ro = true;
5647 vm_page_unlock_queues();
5648 os_atomic_inc(&compressor_ro_uncompressed, relaxed);
5649 }
5650 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
5651
5652 /*
5653 * If the object is purgeable, its
5654 * owner's purgeable ledgers will be
5655 * updated in vm_page_insert() but the
5656 * page was also accounted for in a
5657 * "compressed purgeable" ledger, so
5658 * update that now.
5659 */
5660 if (object != cur_object &&
5661 !insert_cur_object) {
5662 /*
5663 * We're not going to insert
5664 * the decompressed page into
5665 * the object it came from.
5666 *
5667 * We're dealing with a
5668 * copy-on-write fault on
5669 * "object".
5670 * We're going to decompress
5671 * the page directly into the
5672 * target "object" while
5673 * keepin the compressed
5674 * page for "cur_object", so
5675 * no ledger update in that
5676 * case.
5677 */
5678 } else if (((cur_object->purgable ==
5679 VM_PURGABLE_DENY) &&
5680 (!cur_object->vo_ledger_tag)) ||
5681 (cur_object->vo_owner ==
5682 NULL)) {
5683 /*
5684 * "cur_object" is not purgeable
5685 * and is not ledger-taged, or
5686 * there's no owner for it,
5687 * so no owner's ledgers to
5688 * update.
5689 */
5690 } else {
5691 /*
5692 * One less compressed
5693 * purgeable/tagged page for
5694 * cur_object's owner.
5695 */
5696 if (compressed_count_delta) {
5697 vm_object_owner_compressed_update(
5698 cur_object,
5699 -1);
5700 }
5701 }
5702
5703 if (insert_cur_object) {
5704 vm_page_insert(m, cur_object, vm_object_trunc_page(cur_offset));
5705 m_object = cur_object;
5706 } else {
5707 vm_page_insert(m, object, vm_object_trunc_page(offset));
5708 m_object = object;
5709 }
5710
5711 if (!HAS_DEFAULT_CACHEABILITY(m_object->wimg_bits & VM_WIMG_MASK)) {
5712 /*
5713 * If the page is not cacheable,
5714 * we can't let its contents
5715 * linger in the data cache
5716 * after the decompression.
5717 */
5718 pmap_sync_page_attributes_phys(VM_PAGE_GET_PHYS_PAGE(m));
5719 }
5720
5721 type_of_fault = my_fault_type;
5722
5723 VM_STAT_DECOMPRESSIONS();
5724
5725 if (cur_object != object) {
5726 if (insert_cur_object) {
5727 top_object = object;
5728 /*
5729 * switch to the object that has the new page
5730 */
5731 object = cur_object;
5732 object_lock_type = cur_object_lock_type;
5733 } else {
5734 vm_object_unlock(cur_object);
5735 cur_object = object;
5736 }
5737 }
5738 goto FastPmapEnter;
5739 }
5740 /*
5741 * existence map present and indicates
5742 * that the pager doesn't have this page
5743 */
5744 }
5745 if (cur_object->shadow == VM_OBJECT_NULL ||
5746 resilient_media_retry) {
5747 /*
5748 * Zero fill fault. Page gets
5749 * inserted into the original object.
5750 */
5751 if (cur_object->shadow_severed ||
5752 VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object) ||
5753 cur_object == compressor_object ||
5754 is_kernel_object(cur_object)) {
5755 if (object != cur_object) {
5756 vm_object_unlock(cur_object);
5757 }
5758 vm_object_unlock(object);
5759
5760 vm_map_unlock_read(map);
5761 if (real_map != map) {
5762 vm_map_unlock(real_map);
5763 }
5764 if (VM_OBJECT_PURGEABLE_FAULT_ERROR(cur_object)) {
5765 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_PURGEABLE_FAULT_ERROR), 0 /* arg */);
5766 }
5767
5768 if (cur_object->shadow_severed) {
5769 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_OBJECT_SHADOW_SEVERED), 0 /* arg */);
5770 }
5771
5772 kr = KERN_MEMORY_ERROR;
5773 goto done;
5774 }
5775 if (cur_object != object) {
5776 vm_object_unlock(cur_object);
5777
5778 cur_object = object;
5779 }
5780 if (object_lock_type == OBJECT_LOCK_SHARED) {
5781 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5782
5783 if (vm_object_lock_upgrade(object) == FALSE) {
5784 /*
5785 * couldn't upgrade so do a full retry on the fault
5786 * since we dropped the object lock which
5787 * could allow another thread to insert
5788 * a page at this offset
5789 */
5790 vm_map_unlock_read(map);
5791 if (real_map != map) {
5792 vm_map_unlock(real_map);
5793 }
5794
5795 goto RetryFault;
5796 }
5797 }
5798 if (!object->internal) {
5799 panic("%s:%d should not zero-fill page at offset 0x%llx in external object %p", __FUNCTION__, __LINE__, (uint64_t)offset, object);
5800 }
5801 #if MACH_ASSERT
5802 if (resilient_media_retry &&
5803 vm_fault_resilient_media_inject_error3_rate != 0 &&
5804 (++vm_fault_resilient_media_inject_error3 % vm_fault_resilient_media_inject_error3_rate) == 0) {
5805 /* inject an error */
5806 m_object = NULL;
5807 break;
5808 }
5809 #endif /* MACH_ASSERT */
5810
5811 m = vm_page_grab_options(grab_options);
5812 m_object = NULL;
5813
5814 if (m == VM_PAGE_NULL) {
5815 /*
5816 * no free page currently available...
5817 * must take the slow path
5818 */
5819 break;
5820 }
5821 m_object = object;
5822 vm_page_insert(m, m_object, vm_object_trunc_page(offset));
5823
5824 if ((prot & VM_PROT_WRITE) &&
5825 !(fault_type & VM_PROT_WRITE) &&
5826 object->vo_copy != VM_OBJECT_NULL) {
5827 /*
5828 * This is not a write fault and
5829 * we might have a copy-on-write
5830 * obligation to honor (copy object or
5831 * "needs_copy" map entry), so do not
5832 * give write access yet.
5833 * We'll need to catch the first write
5834 * to resolve the copy-on-write by
5835 * pushing this page to a copy object
5836 * or making a shadow object.
5837 */
5838 if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
5839 /*
5840 * This pmap enforces extra
5841 * constraints for this set of
5842 * protections, so we can't
5843 * change the protections.
5844 * We would expect code-signing
5845 * to be bypassed in this case.
5846 */
5847 if (!fault_info->cs_bypass) {
5848 panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x",
5849 __FUNCTION__,
5850 pmap,
5851 (uint64_t)vaddr,
5852 prot,
5853 fault_info->pmap_options);
5854 }
5855 } else {
5856 prot &= ~VM_PROT_WRITE;
5857 }
5858 }
5859 if (resilient_media_retry) {
5860 /*
5861 * Not a real write, so no reason to assert.
5862 * We've just allocated a new page for this
5863 * <object,offset> so we know nobody has any
5864 * PTE pointing at any previous version of this
5865 * page and no copy-on-write is involved here.
5866 * We're just inserting a page of zeroes at this
5867 * stage of the shadow chain because the pager
5868 * for the lowest object in the shadow chain
5869 * said it could not provide that page and we
5870 * want to avoid failing the fault and causing
5871 * a crash on this "resilient_media" mapping.
5872 */
5873 } else {
5874 assertf(!((fault_type & VM_PROT_WRITE) && object->vo_copy),
5875 "map %p va 0x%llx wrong path for write fault (fault_type 0x%x) on object %p with copy %p\n",
5876 map, (uint64_t)vaddr, fault_type, object, object->vo_copy);
5877 }
5878
5879 vm_object_t saved_copy_object;
5880 uint32_t saved_copy_version;
5881 saved_copy_object = object->vo_copy;
5882 saved_copy_version = object->vo_copy_version;
5883
5884 /*
5885 * Zeroing the page and entering into it into the pmap
5886 * represents a significant amount of the zero fill fault handler's work.
5887 *
5888 * To improve fault scalability, we'll drop the object lock, if it appears contended,
5889 * now that we've inserted the page into the vm object.
5890 * Before dropping the lock, we need to check protection bits and set the
5891 * mapped bits on the page. Then we can mark the page busy, drop the lock,
5892 * zero it, and do the pmap enter. We'll need to reacquire the lock
5893 * to clear the busy bit and wake up any waiters.
5894 */
5895 vm_fault_cs_clear(m);
5896 m->vmp_pmapped = TRUE;
5897 if (map->no_zero_fill) {
5898 type_of_fault = DBG_NZF_PAGE_FAULT;
5899 } else {
5900 type_of_fault = DBG_ZERO_FILL_FAULT;
5901 }
5902 {
5903 pmap_t destination_pmap;
5904 vm_map_offset_t destination_pmap_vaddr;
5905 vm_prot_t enter_fault_type;
5906 if (caller_pmap) {
5907 destination_pmap = caller_pmap;
5908 destination_pmap_vaddr = caller_pmap_addr;
5909 } else {
5910 destination_pmap = pmap;
5911 destination_pmap_vaddr = vaddr;
5912 }
5913 if (fault_info->fi_change_wiring) {
5914 enter_fault_type = VM_PROT_NONE;
5915 } else {
5916 enter_fault_type = caller_prot;
5917 }
5918 assertf(VM_PAGE_OBJECT(m) == object, "m=%p object=%p", m, object);
5919 kr = vm_fault_enter_prepare(m,
5920 destination_pmap,
5921 destination_pmap_vaddr,
5922 &prot,
5923 caller_prot,
5924 fault_page_size,
5925 fault_phys_offset,
5926 enter_fault_type,
5927 fault_info,
5928 &type_of_fault,
5929 &page_needs_data_sync,
5930 &page_sleep_needed);
5931
5932 assert(!page_sleep_needed);
5933 if (kr != KERN_SUCCESS) {
5934 goto zero_fill_cleanup;
5935 }
5936
5937 if (object_is_contended) {
5938 /*
5939 * At this point the page is in the vm object, but not on a paging queue.
5940 * Since it's accessible to another thread but its contents are invalid
5941 * (it hasn't been zeroed) mark it busy before dropping the object lock.
5942 */
5943 m->vmp_busy = TRUE;
5944 vm_object_paging_begin(object); /* keep object alive */
5945 vm_object_unlock(object);
5946 }
5947 if (type_of_fault == DBG_ZERO_FILL_FAULT) {
5948 /*
5949 * Now zero fill page...
5950 * the page is probably going to
5951 * be written soon, so don't bother
5952 * to clear the modified bit
5953 *
5954 * NOTE: This code holds the map
5955 * lock across the zero fill.
5956 */
5957 vm_page_zero_fill(
5958 m
5959 );
5960 counter_inc(&vm_statistics_zero_fill_count);
5961 DTRACE_VM2(zfod, int, 1, (uint64_t *), NULL);
5962 }
5963
5964 if (object_is_contended) {
5965 /*
5966 * It's not safe to do the pmap_enter() without holding
5967 * the object lock because its "vo_copy" could change.
5968 */
5969 object_is_contended = false; /* get out of that code path */
5970
5971 vm_object_lock(object);
5972 vm_object_paging_end(object);
5973 if (object->vo_copy != saved_copy_object ||
5974 object->vo_copy_version != saved_copy_version) {
5975 /*
5976 * The COPY_DELAY copy-on-write situation for
5977 * this VM object has changed while it was
5978 * unlocked, so do not grant write access to
5979 * this page.
5980 * The write access will fault again and we'll
5981 * resolve the copy-on-write then.
5982 */
5983 if (pmap_has_prot_policy(pmap,
5984 fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE,
5985 prot)) {
5986 /* we should not do CoW on pmap_has_prot_policy mappings */
5987 panic("%s: map %p va 0x%llx obj %p,%u saved %p,%u: unexpected CoW",
5988 __FUNCTION__,
5989 map, (uint64_t)vaddr,
5990 object, object->vo_copy_version,
5991 saved_copy_object, saved_copy_version);
5992 } else {
5993 /* the pmap layer is OK with changing the PTE's prot */
5994 prot &= ~VM_PROT_WRITE;
5995 }
5996 }
5997 }
5998
5999 if (page_needs_data_sync) {
6000 pmap_sync_page_data_phys(VM_PAGE_GET_PHYS_PAGE(m));
6001 }
6002
6003 if (top_object != VM_OBJECT_NULL) {
6004 need_retry_ptr = &need_retry;
6005 } else {
6006 need_retry_ptr = NULL;
6007 }
6008 if (fault_info->fi_xnu_user_debug &&
6009 !object->code_signed) {
6010 fault_info->pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
6011 }
6012 if (object_is_contended) {
6013 panic("object_is_contended");
6014 kr = vm_fault_pmap_enter(destination_pmap, destination_pmap_vaddr,
6015 fault_page_size, fault_phys_offset,
6016 m, &prot, caller_prot, enter_fault_type, wired,
6017 fault_info->pmap_options, need_retry_ptr);
6018 vm_object_lock(object);
6019 assertf(!((prot & VM_PROT_WRITE) && object->vo_copy),
6020 "prot 0x%x object %p copy %p\n",
6021 prot, object, object->vo_copy);
6022 } else {
6023 kr = vm_fault_pmap_enter_with_object_lock(object, destination_pmap, destination_pmap_vaddr,
6024 fault_page_size, fault_phys_offset,
6025 m, &prot, caller_prot, enter_fault_type, wired,
6026 fault_info->pmap_options, need_retry_ptr, &object_lock_type);
6027 }
6028 }
6029 zero_fill_cleanup:
6030 if (!VM_DYNAMIC_PAGING_ENABLED() &&
6031 (object->purgable == VM_PURGABLE_DENY ||
6032 object->purgable == VM_PURGABLE_NONVOLATILE ||
6033 object->purgable == VM_PURGABLE_VOLATILE)) {
6034 vm_page_lockspin_queues();
6035 if (!VM_DYNAMIC_PAGING_ENABLED()) {
6036 vm_fault_enqueue_throttled_locked(m);
6037 }
6038 vm_page_unlock_queues();
6039 }
6040 vm_fault_enqueue_page(object, m, wired, fault_info->fi_change_wiring, wire_tag, fault_info->no_cache, &type_of_fault, kr);
6041
6042 if (__improbable(rtfault &&
6043 !m->vmp_realtime &&
6044 vm_pageout_protect_realtime)) {
6045 vm_page_lock_queues();
6046 if (!m->vmp_realtime) {
6047 m->vmp_realtime = true;
6048 VM_COUNTER_INC(&vm_page_realtime_count);
6049 }
6050 vm_page_unlock_queues();
6051 }
6052 vm_fault_complete(
6053 map,
6054 real_map,
6055 object,
6056 m_object,
6057 m,
6058 offset,
6059 trace_real_vaddr,
6060 fault_info,
6061 caller_prot,
6062 real_vaddr,
6063 type_of_fault,
6064 need_retry,
6065 kr,
6066 physpage_p,
6067 prot,
6068 top_object,
6069 need_collapse,
6070 cur_offset,
6071 fault_type,
6072 &written_on_object,
6073 &written_on_pager,
6074 &written_on_offset);
6075 top_object = VM_OBJECT_NULL;
6076 if (need_retry == TRUE) {
6077 /*
6078 * vm_fault_enter couldn't complete the PMAP_ENTER...
6079 * at this point we don't hold any locks so it's safe
6080 * to ask the pmap layer to expand the page table to
6081 * accommodate this mapping... once expanded, we'll
6082 * re-drive the fault which should result in vm_fault_enter
6083 * being able to successfully enter the mapping this time around
6084 */
6085 (void)pmap_enter_options(
6086 pmap, vaddr, 0, 0, 0, 0, 0,
6087 PMAP_OPTIONS_NOENTER, NULL, PMAP_MAPPING_TYPE_INFER);
6088
6089 need_retry = FALSE;
6090 goto RetryFault;
6091 }
6092 goto done;
6093 }
6094 /*
6095 * On to the next level in the shadow chain
6096 */
6097 cur_offset += cur_object->vo_shadow_offset;
6098 new_object = cur_object->shadow;
6099 fault_phys_offset = cur_offset - vm_object_trunc_page(cur_offset);
6100
6101 /*
6102 * take the new_object's lock with the indicated state
6103 */
6104 if (cur_object_lock_type == OBJECT_LOCK_SHARED) {
6105 vm_object_lock_shared(new_object);
6106 } else {
6107 vm_object_lock(new_object);
6108 }
6109
6110 if (cur_object != object) {
6111 vm_object_unlock(cur_object);
6112 }
6113
6114 cur_object = new_object;
6115
6116 continue;
6117 }
6118 }
6119 /*
6120 * Cleanup from fast fault failure. Drop any object
6121 * lock other than original and drop map lock.
6122 */
6123 if (object != cur_object) {
6124 vm_object_unlock(cur_object);
6125 }
6126
6127 /*
6128 * must own the object lock exclusively at this point
6129 */
6130 if (object_lock_type == OBJECT_LOCK_SHARED) {
6131 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
6132
6133 if (vm_object_lock_upgrade(object) == FALSE) {
6134 /*
6135 * couldn't upgrade, so explictly
6136 * take the lock exclusively
6137 * no need to retry the fault at this
6138 * point since "vm_fault_page" will
6139 * completely re-evaluate the state
6140 */
6141 vm_object_lock(object);
6142 }
6143 }
6144
6145 handle_copy_delay:
6146 vm_map_unlock_read(map);
6147 if (real_map != map) {
6148 vm_map_unlock(real_map);
6149 }
6150
6151 if (__improbable(object == compressor_object ||
6152 is_kernel_object(object))) {
6153 /*
6154 * These objects are explicitly managed and populated by the
6155 * kernel. The virtual ranges backed by these objects should
6156 * either have wired pages or "holes" that are not supposed to
6157 * be accessed at all until they get explicitly populated.
6158 * We should never have to resolve a fault on a mapping backed
6159 * by one of these VM objects and providing a zero-filled page
6160 * would be wrong here, so let's fail the fault and let the
6161 * caller crash or recover.
6162 */
6163 vm_object_unlock(object);
6164 kr = KERN_MEMORY_ERROR;
6165 goto done;
6166 }
6167
6168 resilient_media_ref_transfer = false;
6169 if (resilient_media_retry) {
6170 /*
6171 * We could get here if we failed to get a free page
6172 * to zero-fill and had to take the slow path again.
6173 * Reset our "recovery-from-failed-media" state.
6174 */
6175 assert(resilient_media_object != VM_OBJECT_NULL);
6176 assert(resilient_media_offset != (vm_object_offset_t)-1);
6177 /* release our extra reference on failed object */
6178 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
6179 if (object == resilient_media_object) {
6180 /*
6181 * We're holding "object"'s lock, so we can't release
6182 * our extra reference at this point.
6183 * We need an extra reference on "object" anyway
6184 * (see below), so let's just transfer this reference.
6185 */
6186 resilient_media_ref_transfer = true;
6187 } else {
6188 vm_object_deallocate(resilient_media_object);
6189 }
6190 resilient_media_object = VM_OBJECT_NULL;
6191 resilient_media_offset = (vm_object_offset_t)-1;
6192 resilient_media_retry = false;
6193 vm_fault_resilient_media_abort2++;
6194 }
6195
6196 /*
6197 * Make a reference to this object to
6198 * prevent its disposal while we are messing with
6199 * it. Once we have the reference, the map is free
6200 * to be diddled. Since objects reference their
6201 * shadows (and copies), they will stay around as well.
6202 */
6203 if (resilient_media_ref_transfer) {
6204 /* we already have an extra reference on this object */
6205 resilient_media_ref_transfer = false;
6206 } else {
6207 vm_object_reference_locked(object);
6208 }
6209 vm_object_paging_begin(object);
6210
6211 set_thread_pagein_error(cthread, 0);
6212 error_code = 0;
6213
6214 result_page = VM_PAGE_NULL;
6215 vm_fault_return_t err = vm_fault_page(object, offset, fault_type,
6216 (fault_info->fi_change_wiring && !wired),
6217 FALSE, /* page not looked up */
6218 &prot, &result_page, &top_page,
6219 &type_of_fault,
6220 &error_code, map->no_zero_fill,
6221 fault_info);
6222
6223 /*
6224 * if kr != VM_FAULT_SUCCESS, then the paging reference
6225 * has been dropped and the object unlocked... the ref_count
6226 * is still held
6227 *
6228 * if kr == VM_FAULT_SUCCESS, then the paging reference
6229 * is still held along with the ref_count on the original object
6230 *
6231 * the object is returned locked with a paging reference
6232 *
6233 * if top_page != NULL, then it's BUSY and the
6234 * object it belongs to has a paging reference
6235 * but is returned unlocked
6236 */
6237 if (err != VM_FAULT_SUCCESS &&
6238 err != VM_FAULT_SUCCESS_NO_VM_PAGE) {
6239 if (err == VM_FAULT_MEMORY_ERROR &&
6240 fault_info->resilient_media) {
6241 assertf(object->internal, "object %p", object);
6242 /*
6243 * This fault failed but the mapping was
6244 * "media resilient", so we'll retry the fault in
6245 * recovery mode to get a zero-filled page in the
6246 * top object.
6247 * Keep the reference on the failing object so
6248 * that we can check that the mapping is still
6249 * pointing to it when we retry the fault.
6250 */
6251 // printf("RESILIENT_MEDIA %s:%d: object %p offset 0x%llx recover from media error 0x%x kr 0x%x top_page %p result_page %p\n", __FUNCTION__, __LINE__, object, offset, error_code, kr, top_page, result_page);
6252 assert(!resilient_media_retry); /* no double retry */
6253 assert(resilient_media_object == VM_OBJECT_NULL);
6254 assert(resilient_media_offset == (vm_object_offset_t)-1);
6255 resilient_media_retry = true;
6256 resilient_media_object = object;
6257 resilient_media_offset = offset;
6258 // printf("FBDP %s:%d resilient_media_object %p offset 0x%llx kept reference\n", __FUNCTION__, __LINE__, resilient_media_object, resilient_mmedia_offset);
6259 vm_fault_resilient_media_initiate++;
6260 goto RetryFault;
6261 } else {
6262 /*
6263 * we didn't succeed, lose the object reference
6264 * immediately.
6265 */
6266 vm_object_deallocate(object);
6267 object = VM_OBJECT_NULL; /* no longer valid */
6268 }
6269
6270 /*
6271 * See why we failed, and take corrective action.
6272 */
6273 switch (err) {
6274 case VM_FAULT_SUCCESS:
6275 case VM_FAULT_SUCCESS_NO_VM_PAGE:
6276 /* These aren't possible but needed to make the switch exhaustive */
6277 break;
6278 case VM_FAULT_MEMORY_SHORTAGE:
6279 if (vm_page_wait((fault_info->fi_change_wiring) ?
6280 THREAD_UNINT :
6281 THREAD_ABORTSAFE)) {
6282 goto RetryFault;
6283 }
6284 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_MEMORY_SHORTAGE), 0 /* arg */);
6285 OS_FALLTHROUGH;
6286 case VM_FAULT_INTERRUPTED:
6287 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_INTERRUPTED), 0 /* arg */);
6288 kr = KERN_ABORTED;
6289 goto done;
6290 case VM_FAULT_RETRY:
6291 goto RetryFault;
6292 case VM_FAULT_MEMORY_ERROR:
6293 if (error_code) {
6294 kr = error_code;
6295 } else {
6296 kr = KERN_MEMORY_ERROR;
6297 }
6298 goto done;
6299 case VM_FAULT_BUSY:
6300 kr = KERN_ALREADY_WAITING;
6301 goto done;
6302 }
6303 }
6304 m = result_page;
6305 m_object = NULL;
6306
6307 if (m != VM_PAGE_NULL) {
6308 m_object = VM_PAGE_OBJECT(m);
6309 assert((fault_info->fi_change_wiring && !wired) ?
6310 (top_page == VM_PAGE_NULL) :
6311 ((top_page == VM_PAGE_NULL) == (m_object == object)));
6312 }
6313
6314 /*
6315 * What to do with the resulting page from vm_fault_page
6316 * if it doesn't get entered into the physical map:
6317 */
6318 #define RELEASE_PAGE(m) \
6319 MACRO_BEGIN \
6320 vm_page_wakeup_done(VM_PAGE_OBJECT(m), m); \
6321 if ( !VM_PAGE_PAGEABLE(m)) { \
6322 vm_page_lockspin_queues(); \
6323 if ( !VM_PAGE_PAGEABLE(m)) \
6324 vm_page_activate(m); \
6325 vm_page_unlock_queues(); \
6326 } \
6327 MACRO_END
6328
6329
6330 object_locks_dropped = FALSE;
6331 /*
6332 * We must verify that the maps have not changed
6333 * since our last lookup. vm_map_verify() needs the
6334 * map lock (shared) but we are holding object locks.
6335 * So we do a try_lock() first and, if that fails, we
6336 * drop the object locks and go in for the map lock again.
6337 */
6338 if (m != VM_PAGE_NULL) {
6339 old_copy_object = m_object->vo_copy;
6340 old_copy_version = m_object->vo_copy_version;
6341 } else {
6342 old_copy_object = VM_OBJECT_NULL;
6343 old_copy_version = 0;
6344 }
6345 if (!vm_map_try_lock_read(original_map)) {
6346 if (m != VM_PAGE_NULL) {
6347 vm_object_unlock(m_object);
6348 } else {
6349 vm_object_unlock(object);
6350 }
6351
6352 object_locks_dropped = TRUE;
6353
6354 vm_map_lock_read(original_map);
6355 }
6356
6357 if ((map != original_map) || !vm_map_verify(map, &version)) {
6358 if (object_locks_dropped == FALSE) {
6359 if (m != VM_PAGE_NULL) {
6360 vm_object_unlock(m_object);
6361 } else {
6362 vm_object_unlock(object);
6363 }
6364
6365 object_locks_dropped = TRUE;
6366 }
6367
6368 /*
6369 * no object locks are held at this point
6370 */
6371 vm_object_t retry_object;
6372 vm_object_offset_t retry_offset;
6373 vm_prot_t retry_prot;
6374
6375 /*
6376 * To avoid trying to write_lock the map while another
6377 * thread has it read_locked (in vm_map_pageable), we
6378 * do not try for write permission. If the page is
6379 * still writable, we will get write permission. If it
6380 * is not, or has been marked needs_copy, we enter the
6381 * mapping without write permission, and will merely
6382 * take another fault.
6383 */
6384 map = original_map;
6385
6386 kr = vm_map_lookup_and_lock_object(&map, vaddr,
6387 fault_type & ~VM_PROT_WRITE,
6388 OBJECT_LOCK_EXCLUSIVE, &version,
6389 &retry_object, &retry_offset, &retry_prot,
6390 &wired,
6391 fault_info,
6392 &real_map,
6393 NULL);
6394 pmap = real_map->pmap;
6395
6396 if (kr != KERN_SUCCESS) {
6397 vm_map_unlock_read(map);
6398
6399 if (m != VM_PAGE_NULL) {
6400 assert(VM_PAGE_OBJECT(m) == m_object);
6401
6402 /*
6403 * retake the lock so that
6404 * we can drop the paging reference
6405 * in vm_fault_cleanup and do the
6406 * vm_page_wakeup_done() in RELEASE_PAGE
6407 */
6408 vm_object_lock(m_object);
6409
6410 RELEASE_PAGE(m);
6411
6412 vm_fault_cleanup(m_object, top_page);
6413 } else {
6414 /*
6415 * retake the lock so that
6416 * we can drop the paging reference
6417 * in vm_fault_cleanup
6418 */
6419 vm_object_lock(object);
6420
6421 vm_fault_cleanup(object, top_page);
6422 }
6423 vm_object_deallocate(object);
6424
6425 if (kr == KERN_INVALID_ADDRESS) {
6426 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_ADDRESS_NOT_FOUND), 0 /* arg */);
6427 }
6428 goto done;
6429 }
6430 vm_object_unlock(retry_object);
6431
6432 if ((retry_object != object) || (retry_offset != offset)) {
6433 vm_map_unlock_read(map);
6434 if (real_map != map) {
6435 vm_map_unlock(real_map);
6436 }
6437
6438 if (m != VM_PAGE_NULL) {
6439 assert(VM_PAGE_OBJECT(m) == m_object);
6440
6441 /*
6442 * retake the lock so that
6443 * we can drop the paging reference
6444 * in vm_fault_cleanup and do the
6445 * vm_page_wakeup_done() in RELEASE_PAGE
6446 */
6447 vm_object_lock(m_object);
6448
6449 RELEASE_PAGE(m);
6450
6451 vm_fault_cleanup(m_object, top_page);
6452 } else {
6453 /*
6454 * retake the lock so that
6455 * we can drop the paging reference
6456 * in vm_fault_cleanup
6457 */
6458 vm_object_lock(object);
6459
6460 vm_fault_cleanup(object, top_page);
6461 }
6462 vm_object_deallocate(object);
6463
6464 goto RetryFault;
6465 }
6466 /*
6467 * Check whether the protection has changed or the object
6468 * has been copied while we left the map unlocked.
6469 */
6470 if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, retry_prot)) {
6471 /* If the pmap layer cares, pass the full set. */
6472 prot = retry_prot;
6473 } else {
6474 prot &= retry_prot;
6475 }
6476 }
6477
6478 if (object_locks_dropped == TRUE) {
6479 if (m != VM_PAGE_NULL) {
6480 assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p", m, m_object);
6481 assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6482 vm_object_lock(m_object);
6483 } else {
6484 vm_object_lock(object);
6485 }
6486
6487 object_locks_dropped = FALSE;
6488 }
6489
6490 if ((prot & VM_PROT_WRITE) &&
6491 m != VM_PAGE_NULL &&
6492 (m_object->vo_copy != old_copy_object ||
6493 m_object->vo_copy_version != old_copy_version)) {
6494 /*
6495 * The copy object changed while the top-level object
6496 * was unlocked, so take away write permission.
6497 */
6498 if (pmap_has_prot_policy(pmap, fault_info->pmap_options & PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE, prot)) {
6499 /*
6500 * This pmap enforces extra constraints for this set
6501 * of protections, so we can't change the protections.
6502 * This mapping should have been setup to avoid
6503 * copy-on-write since that requires removing write
6504 * access.
6505 */
6506 panic("%s: pmap %p vaddr 0x%llx prot 0x%x options 0x%x m%p obj %p copyobj %p",
6507 __FUNCTION__, pmap, (uint64_t)vaddr, prot,
6508 fault_info->pmap_options,
6509 m, m_object, m_object->vo_copy);
6510 }
6511 prot &= ~VM_PROT_WRITE;
6512 }
6513
6514 if (!need_copy &&
6515 !fault_info->no_copy_on_read &&
6516 m != VM_PAGE_NULL &&
6517 VM_PAGE_OBJECT(m) != object &&
6518 !VM_PAGE_OBJECT(m)->pager_trusted &&
6519 vm_protect_privileged_from_untrusted &&
6520 !VM_PAGE_OBJECT(m)->code_signed &&
6521 current_proc_is_privileged()) {
6522 /*
6523 * We found the page we want in an "untrusted" VM object
6524 * down the shadow chain. Since the target is "privileged"
6525 * we want to perform a copy-on-read of that page, so that the
6526 * mapped object gets a stable copy and does not have to
6527 * rely on the "untrusted" object to provide the same
6528 * contents if the page gets reclaimed and has to be paged
6529 * in again later on.
6530 *
6531 * Special case: if the mapping is executable and the untrusted
6532 * object is code-signed and the process is "cs_enforced", we
6533 * do not copy-on-read because that would break code-signing
6534 * enforcement expectations (an executable page must belong
6535 * to a code-signed object) and we can rely on code-signing
6536 * to re-validate the page if it gets evicted and paged back in.
6537 */
6538 // printf("COPY-ON-READ %s:%d map %p vaddr 0x%llx obj %p offset 0x%llx found page %p (obj %p offset 0x%llx) UNTRUSTED -> need copy-on-read\n", __FUNCTION__, __LINE__, map, (uint64_t)vaddr, object, offset, m, VM_PAGE_OBJECT(m), m->vmp_offset);
6539 vm_copied_on_read++;
6540 need_copy_on_read = TRUE;
6541 need_copy = TRUE;
6542 } else {
6543 need_copy_on_read = FALSE;
6544 }
6545
6546 /*
6547 * If we want to wire down this page, but no longer have
6548 * adequate permissions, we must start all over.
6549 * If we decided to copy-on-read, we must also start all over.
6550 */
6551 if ((wired && (fault_type != (prot | VM_PROT_WRITE))) ||
6552 need_copy_on_read) {
6553 vm_map_unlock_read(map);
6554 if (real_map != map) {
6555 vm_map_unlock(real_map);
6556 }
6557
6558 if (m != VM_PAGE_NULL) {
6559 assert(VM_PAGE_OBJECT(m) == m_object);
6560
6561 RELEASE_PAGE(m);
6562
6563 vm_fault_cleanup(m_object, top_page);
6564 } else {
6565 vm_fault_cleanup(object, top_page);
6566 }
6567
6568 vm_object_deallocate(object);
6569
6570 goto RetryFault;
6571 }
6572 if (m != VM_PAGE_NULL) {
6573 /*
6574 * Put this page into the physical map.
6575 * We had to do the unlock above because pmap_enter
6576 * may cause other faults. The page may be on
6577 * the pageout queues. If the pageout daemon comes
6578 * across the page, it will remove it from the queues.
6579 */
6580 if (fault_page_size < PAGE_SIZE) {
6581 DEBUG4K_FAULT("map %p original %p pmap %p va 0x%llx pa 0x%llx(0x%llx+0x%llx) prot 0x%x caller_prot 0x%x\n", map, original_map, pmap, (uint64_t)vaddr, (uint64_t)((((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT) + fault_phys_offset), (uint64_t)(((pmap_paddr_t)VM_PAGE_GET_PHYS_PAGE(m)) << PAGE_SHIFT), (uint64_t)fault_phys_offset, prot, caller_prot);
6582 assertf((!(fault_phys_offset & FOURK_PAGE_MASK) &&
6583 fault_phys_offset < PAGE_SIZE),
6584 "0x%llx\n", (uint64_t)fault_phys_offset);
6585 } else {
6586 assertf(fault_phys_offset == 0,
6587 "0x%llx\n", (uint64_t)fault_phys_offset);
6588 }
6589 assertf(VM_PAGE_OBJECT(m) == m_object, "m=%p m_object=%p", m, m_object);
6590 assert(VM_PAGE_OBJECT(m) != VM_OBJECT_NULL);
6591 if (caller_pmap) {
6592 kr = vm_fault_enter(m,
6593 caller_pmap,
6594 caller_pmap_addr,
6595 fault_page_size,
6596 fault_phys_offset,
6597 prot,
6598 caller_prot,
6599 wired,
6600 wire_tag,
6601 fault_info,
6602 NULL,
6603 &type_of_fault,
6604 &object_lock_type,
6605 &page_sleep_needed);
6606 } else {
6607 kr = vm_fault_enter(m,
6608 pmap,
6609 vaddr,
6610 fault_page_size,
6611 fault_phys_offset,
6612 prot,
6613 caller_prot,
6614 wired,
6615 wire_tag,
6616 fault_info,
6617 NULL,
6618 &type_of_fault,
6619 &object_lock_type,
6620 &page_sleep_needed);
6621 }
6622 assert(VM_PAGE_OBJECT(m) == m_object);
6623
6624 {
6625 int event_code = 0;
6626
6627 if (m_object->internal) {
6628 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_INTERNAL));
6629 } else if (m_object->object_is_shared_cache) {
6630 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_SHAREDCACHE));
6631 } else {
6632 event_code = (MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_ADDR_EXTERNAL));
6633 }
6634
6635 KDBG_RELEASE(event_code | DBG_FUNC_NONE, trace_real_vaddr, (fault_info->user_tag << 16) | (caller_prot << 8) | vm_fault_type_for_tracing(need_copy_on_read, type_of_fault), m->vmp_offset, get_current_unique_pid());
6636 KDBG_FILTERED(MACHDBG_CODE(DBG_MACH_WORKINGSET, VM_REAL_FAULT_SLOW), get_current_unique_pid());
6637
6638 DTRACE_VM6(real_fault, vm_map_offset_t, real_vaddr, vm_map_offset_t, m->vmp_offset, int, event_code, int, caller_prot, int, type_of_fault, int, fault_info->user_tag);
6639 }
6640 if ((kr != KERN_SUCCESS) || page_sleep_needed) {
6641 /* abort this page fault */
6642 vm_map_unlock_read(map);
6643 if (real_map != map) {
6644 vm_map_unlock(real_map);
6645 }
6646 vm_page_wakeup_done(m_object, m);
6647 vm_fault_cleanup(m_object, top_page);
6648 vm_object_deallocate(object);
6649 if (kr != KERN_SUCCESS) {
6650 goto done;
6651 } else {
6652 goto RetryFault;
6653 }
6654 }
6655 if (physpage_p != NULL) {
6656 /* for vm_map_wire_and_extract() */
6657 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
6658 if (prot & VM_PROT_WRITE) {
6659 vm_object_lock_assert_exclusive(m_object);
6660 m->vmp_dirty = TRUE;
6661 }
6662 }
6663 } else {
6664 vm_map_entry_t entry;
6665 vm_map_offset_t laddr;
6666 vm_map_offset_t ldelta, hdelta;
6667
6668 /*
6669 * do a pmap block mapping from the physical address
6670 * in the object
6671 */
6672
6673 if (real_map != map) {
6674 vm_map_unlock(real_map);
6675 }
6676
6677 if (original_map != map) {
6678 vm_map_unlock_read(map);
6679 vm_map_lock_read(original_map);
6680 map = original_map;
6681 }
6682 real_map = map;
6683
6684 laddr = vaddr;
6685 hdelta = ldelta = (vm_map_offset_t)0xFFFFFFFFFFFFF000ULL;
6686
6687 while (vm_map_lookup_entry(map, laddr, &entry)) {
6688 if (ldelta > (laddr - entry->vme_start)) {
6689 ldelta = laddr - entry->vme_start;
6690 }
6691 if (hdelta > (entry->vme_end - laddr)) {
6692 hdelta = entry->vme_end - laddr;
6693 }
6694 if (entry->is_sub_map) {
6695 vm_map_t sub_map;
6696 bool use_pmap;
6697
6698 laddr = ((laddr - entry->vme_start)
6699 + VME_OFFSET(entry));
6700 vm_map_lock_read(VME_SUBMAP(entry));
6701 sub_map = VME_SUBMAP(entry);
6702 use_pmap = entry->use_pmap;
6703 entry = VM_MAP_ENTRY_NULL; /* not valid after unlock */
6704 if (map != real_map) {
6705 vm_map_unlock_read(map);
6706 }
6707 if (use_pmap) {
6708 vm_map_unlock_read(real_map);
6709 real_map = sub_map;
6710 }
6711 map = sub_map;
6712 } else {
6713 break;
6714 }
6715 }
6716
6717 if (vm_map_lookup_entry(map, laddr, &entry) &&
6718 (!entry->is_sub_map) &&
6719 (object != VM_OBJECT_NULL) &&
6720 (VME_OBJECT(entry) == object)) {
6721 uint16_t superpage;
6722
6723 if (!object->pager_created &&
6724 object->phys_contiguous &&
6725 VME_OFFSET(entry) == 0 &&
6726 (entry->vme_end - entry->vme_start == object->vo_size) &&
6727 VM_MAP_PAGE_ALIGNED(entry->vme_start, (object->vo_size - 1))) {
6728 superpage = VM_MEM_SUPERPAGE;
6729 } else {
6730 superpage = 0;
6731 }
6732
6733 if (superpage && physpage_p) {
6734 /* for vm_map_wire_and_extract() */
6735 *physpage_p = (ppnum_t)
6736 ((((vm_map_offset_t)
6737 object->vo_shadow_offset)
6738 + VME_OFFSET(entry)
6739 + (laddr - entry->vme_start))
6740 >> PAGE_SHIFT);
6741 }
6742
6743 /*
6744 * Set up a block mapped area
6745 */
6746 assert((uint32_t)((ldelta + hdelta) >> fault_page_shift) == ((ldelta + hdelta) >> fault_page_shift));
6747 pmap_t block_map_pmap;
6748 addr64_t block_map_va;
6749 pmap_paddr_t block_map_pa = (pmap_paddr_t)(((vm_map_offset_t)(object->vo_shadow_offset)) +
6750 VME_OFFSET(entry) + (laddr - entry->vme_start) - ldelta);
6751 int block_map_wimg = VM_WIMG_MASK & (int)object->wimg_bits;
6752 if (caller_pmap) {
6753 block_map_pmap = caller_pmap;
6754 block_map_va = (addr64_t)(caller_pmap_addr - ldelta);
6755 } else {
6756 block_map_pmap = real_map->pmap;
6757 block_map_va = (addr64_t)(vaddr - ldelta);
6758 }
6759 kr = pmap_map_block_addr(block_map_pmap,
6760 block_map_va,
6761 block_map_pa,
6762 (uint32_t)((ldelta + hdelta) >> fault_page_shift),
6763 prot,
6764 block_map_wimg | superpage,
6765 0);
6766
6767 if (kr != KERN_SUCCESS) {
6768 goto cleanup;
6769 }
6770 }
6771 }
6772
6773 /*
6774 * Success
6775 */
6776 kr = KERN_SUCCESS;
6777
6778 /*
6779 * TODO: could most of the done cases just use cleanup?
6780 */
6781 cleanup:
6782 /*
6783 * Unlock everything, and return
6784 */
6785 vm_map_unlock_read(map);
6786 if (real_map != map) {
6787 vm_map_unlock(real_map);
6788 }
6789
6790 if (m != VM_PAGE_NULL) {
6791 if (__improbable(rtfault &&
6792 !m->vmp_realtime &&
6793 vm_pageout_protect_realtime)) {
6794 vm_page_lock_queues();
6795 if (!m->vmp_realtime) {
6796 m->vmp_realtime = true;
6797 VM_COUNTER_INC(&vm_page_realtime_count);
6798 }
6799 vm_page_unlock_queues();
6800 }
6801 assert(VM_PAGE_OBJECT(m) == m_object);
6802
6803 if (!m_object->internal && (fault_type & VM_PROT_WRITE)) {
6804 vm_object_paging_begin(m_object);
6805
6806 assert(written_on_object == VM_OBJECT_NULL);
6807 written_on_object = m_object;
6808 written_on_pager = m_object->pager;
6809 written_on_offset = m_object->paging_offset + m->vmp_offset;
6810 }
6811 vm_page_wakeup_done(m_object, m);
6812
6813 vm_fault_cleanup(m_object, top_page);
6814 } else {
6815 vm_fault_cleanup(object, top_page);
6816 }
6817
6818 vm_object_deallocate(object);
6819
6820 #undef RELEASE_PAGE
6821
6822 done:
6823 thread_interrupt_level(interruptible_state);
6824
6825 if (resilient_media_object != VM_OBJECT_NULL) {
6826 assert(resilient_media_retry);
6827 assert(resilient_media_offset != (vm_object_offset_t)-1);
6828 /* release extra reference on failed object */
6829 // printf("FBDP %s:%d resilient_media_object %p deallocate\n", __FUNCTION__, __LINE__, resilient_media_object);
6830 vm_object_deallocate(resilient_media_object);
6831 resilient_media_object = VM_OBJECT_NULL;
6832 resilient_media_offset = (vm_object_offset_t)-1;
6833 resilient_media_retry = false;
6834 vm_fault_resilient_media_release++;
6835 }
6836 assert(!resilient_media_retry);
6837
6838 /*
6839 * Only I/O throttle on faults which cause a pagein/swapin.
6840 */
6841 if ((type_of_fault == DBG_PAGEIND_FAULT) || (type_of_fault == DBG_PAGEINV_FAULT) || (type_of_fault == DBG_COMPRESSOR_SWAPIN_FAULT)) {
6842 throttle_lowpri_io(1);
6843 } else {
6844 if (kr == KERN_SUCCESS && type_of_fault != DBG_CACHE_HIT_FAULT && type_of_fault != DBG_GUARD_FAULT) {
6845 if ((throttle_delay = vm_page_throttled(TRUE))) {
6846 if (vm_debug_events) {
6847 if (type_of_fault == DBG_COMPRESSOR_FAULT) {
6848 VM_DEBUG_EVENT(vmf_compressordelay, DBG_VM_FAULT_COMPRESSORDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6849 } else if (type_of_fault == DBG_COW_FAULT) {
6850 VM_DEBUG_EVENT(vmf_cowdelay, DBG_VM_FAULT_COWDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6851 } else {
6852 VM_DEBUG_EVENT(vmf_zfdelay, DBG_VM_FAULT_ZFDELAY, DBG_FUNC_NONE, throttle_delay, 0, 0, 0);
6853 }
6854 }
6855 __VM_FAULT_THROTTLE_FOR_PAGEOUT_SCAN__(throttle_delay);
6856 }
6857 }
6858 }
6859
6860 if (written_on_object) {
6861 vnode_pager_dirtied(written_on_pager, written_on_offset, written_on_offset + PAGE_SIZE_64);
6862
6863 vm_object_lock(written_on_object);
6864 vm_object_paging_end(written_on_object);
6865 vm_object_unlock(written_on_object);
6866
6867 written_on_object = VM_OBJECT_NULL;
6868 }
6869
6870 if (rtfault) {
6871 vm_record_rtfault(cthread, fstart, trace_vaddr, type_of_fault);
6872 }
6873
6874 KDBG_RELEASE(
6875 (VMDBG_CODE(DBG_VM_FAULT_INTERNAL)) | DBG_FUNC_END,
6876 ((uint64_t)trace_vaddr >> 32),
6877 trace_vaddr,
6878 kr,
6879 vm_fault_type_for_tracing(need_copy_on_read, type_of_fault));
6880
6881 if (fault_page_size < PAGE_SIZE && kr != KERN_SUCCESS) {
6882 DEBUG4K_FAULT("map %p original %p vaddr 0x%llx -> 0x%x\n", map, original_map, (uint64_t)trace_real_vaddr, kr);
6883 }
6884
6885 vmlp_api_end(VM_FAULT_INTERNAL, KERN_FAILURE);
6886 return kr;
6887 }
6888
6889 /*
6890 * vm_fault_wire:
6891 *
6892 * Wire down a range of virtual addresses in a map.
6893 */
6894 kern_return_t
vm_fault_wire(vm_map_t map,vm_map_entry_t entry,vm_prot_t prot,vm_tag_t wire_tag,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)6895 vm_fault_wire(
6896 vm_map_t map,
6897 vm_map_entry_t entry,
6898 vm_prot_t prot,
6899 vm_tag_t wire_tag,
6900 pmap_t pmap,
6901 vm_map_offset_t pmap_addr,
6902 ppnum_t *physpage_p)
6903 {
6904 vm_map_offset_t va;
6905 vm_map_offset_t end_addr = entry->vme_end;
6906 kern_return_t rc;
6907 vm_map_size_t effective_page_size;
6908
6909 assert(entry->in_transition);
6910
6911 if (!entry->is_sub_map &&
6912 VME_OBJECT(entry) != VM_OBJECT_NULL &&
6913 VME_OBJECT(entry)->phys_contiguous) {
6914 return KERN_SUCCESS;
6915 }
6916
6917 /*
6918 * Inform the physical mapping system that the
6919 * range of addresses may not fault, so that
6920 * page tables and such can be locked down as well.
6921 */
6922
6923 pmap_pageable(pmap, pmap_addr,
6924 pmap_addr + (end_addr - entry->vme_start), FALSE);
6925
6926 /*
6927 * We simulate a fault to get the page and enter it
6928 * in the physical map.
6929 */
6930
6931 effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
6932 for (va = entry->vme_start;
6933 va < end_addr;
6934 va += effective_page_size) {
6935 rc = vm_fault_wire_fast(map, va, prot, wire_tag, entry, pmap,
6936 pmap_addr + (va - entry->vme_start),
6937 physpage_p);
6938 if (rc != KERN_SUCCESS) {
6939 struct vm_object_fault_info fault_info = {
6940 .interruptible = (pmap == kernel_pmap) ? THREAD_UNINT : THREAD_ABORTSAFE,
6941 .behavior = VM_BEHAVIOR_SEQUENTIAL,
6942 .fi_change_wiring = true,
6943 };
6944 if (os_sub_overflow(end_addr, va, &fault_info.cluster_size)) {
6945 fault_info.cluster_size = UPL_SIZE_MAX;
6946 }
6947 rc = vm_fault_internal(map, va, prot, wire_tag,
6948 pmap,
6949 (pmap_addr +
6950 (va - entry->vme_start)),
6951 physpage_p,
6952 &fault_info);
6953 DTRACE_VM2(softlock, int, 1, (uint64_t *), NULL);
6954 }
6955
6956 if (rc != KERN_SUCCESS) {
6957 struct vm_map_entry tmp_entry = *entry;
6958
6959 /* unwire wired pages */
6960 tmp_entry.vme_end = va;
6961 vm_fault_unwire(map, &tmp_entry, FALSE,
6962 pmap, pmap_addr, tmp_entry.vme_end);
6963
6964 return rc;
6965 }
6966 }
6967 return KERN_SUCCESS;
6968 }
6969
6970 /*
6971 * vm_fault_unwire:
6972 *
6973 * Unwire a range of virtual addresses in a map.
6974 */
6975 void
vm_fault_unwire(vm_map_t map,vm_map_entry_t entry,boolean_t deallocate,pmap_t pmap,vm_map_offset_t pmap_addr,vm_map_offset_t end_addr)6976 vm_fault_unwire(
6977 vm_map_t map,
6978 vm_map_entry_t entry,
6979 boolean_t deallocate,
6980 pmap_t pmap,
6981 vm_map_offset_t pmap_addr,
6982 vm_map_offset_t end_addr)
6983 {
6984 vm_map_offset_t va;
6985 vm_object_t object;
6986 struct vm_object_fault_info fault_info = {
6987 .interruptible = THREAD_UNINT,
6988 };
6989 unsigned int unwired_pages;
6990 vm_map_size_t effective_page_size;
6991
6992 object = (entry->is_sub_map) ? VM_OBJECT_NULL : VME_OBJECT(entry);
6993
6994 /*
6995 * If it's marked phys_contiguous, then vm_fault_wire() didn't actually
6996 * do anything since such memory is wired by default. So we don't have
6997 * anything to undo here.
6998 */
6999
7000 if (object != VM_OBJECT_NULL && object->phys_contiguous) {
7001 return;
7002 }
7003
7004 fault_info.interruptible = THREAD_UNINT;
7005 fault_info.behavior = entry->behavior;
7006 fault_info.user_tag = VME_ALIAS(entry);
7007 if (entry->iokit_acct ||
7008 (!entry->is_sub_map && !entry->use_pmap)) {
7009 fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
7010 }
7011 fault_info.lo_offset = VME_OFFSET(entry);
7012 fault_info.hi_offset = (entry->vme_end - entry->vme_start) + VME_OFFSET(entry);
7013 fault_info.no_cache = entry->no_cache;
7014 fault_info.stealth = TRUE;
7015 if (entry->vme_xnu_user_debug) {
7016 /*
7017 * Modified code-signed executable region: wired pages must
7018 * have been copied, so they should be XNU_USER_DEBUG rather
7019 * than XNU_USER_EXEC.
7020 */
7021 fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
7022 }
7023
7024 unwired_pages = 0;
7025
7026 /*
7027 * Since the pages are wired down, we must be able to
7028 * get their mappings from the physical map system.
7029 */
7030
7031 effective_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
7032 for (va = entry->vme_start;
7033 va < end_addr;
7034 va += effective_page_size) {
7035 if (object == VM_OBJECT_NULL) {
7036 if (pmap) {
7037 pmap_change_wiring(pmap,
7038 pmap_addr + (va - entry->vme_start), FALSE);
7039 }
7040 (void) vm_fault(map, va, VM_PROT_NONE,
7041 TRUE, VM_KERN_MEMORY_NONE, THREAD_UNINT, pmap, pmap_addr);
7042 } else {
7043 vm_prot_t prot;
7044 vm_page_t result_page;
7045 vm_page_t top_page;
7046 vm_object_t result_object;
7047 vm_fault_return_t result;
7048
7049 /* cap cluster size at maximum UPL size */
7050 upl_size_t cluster_size;
7051 if (os_sub_overflow(end_addr, va, &cluster_size)) {
7052 cluster_size = UPL_SIZE_MAX;
7053 }
7054 fault_info.cluster_size = cluster_size;
7055
7056 do {
7057 prot = VM_PROT_NONE;
7058
7059 vm_object_lock(object);
7060 vm_object_paging_begin(object);
7061 result_page = VM_PAGE_NULL;
7062 result = vm_fault_page(
7063 object,
7064 (VME_OFFSET(entry) +
7065 (va - entry->vme_start)),
7066 VM_PROT_NONE, TRUE,
7067 FALSE, /* page not looked up */
7068 &prot, &result_page, &top_page,
7069 (int *)0,
7070 NULL, map->no_zero_fill,
7071 &fault_info);
7072 } while (result == VM_FAULT_RETRY);
7073
7074 /*
7075 * If this was a mapping to a file on a device that has been forcibly
7076 * unmounted, then we won't get a page back from vm_fault_page(). Just
7077 * move on to the next one in case the remaining pages are mapped from
7078 * different objects. During a forced unmount, the object is terminated
7079 * so the alive flag will be false if this happens. A forced unmount will
7080 * will occur when an external disk is unplugged before the user does an
7081 * eject, so we don't want to panic in that situation.
7082 */
7083
7084 if (result == VM_FAULT_MEMORY_ERROR) {
7085 if (!object->alive) {
7086 continue;
7087 }
7088 if (!object->internal && object->pager == NULL) {
7089 continue;
7090 }
7091 }
7092
7093 if (result == VM_FAULT_MEMORY_ERROR &&
7094 is_kernel_object(object)) {
7095 /*
7096 * This must have been allocated with
7097 * KMA_KOBJECT and KMA_VAONLY and there's
7098 * no physical page at this offset.
7099 * We're done (no page to free).
7100 */
7101 assert(deallocate);
7102 continue;
7103 }
7104
7105 if (result != VM_FAULT_SUCCESS) {
7106 panic("vm_fault_unwire: failure");
7107 }
7108
7109 result_object = VM_PAGE_OBJECT(result_page);
7110
7111 if (deallocate) {
7112 assert(VM_PAGE_GET_PHYS_PAGE(result_page) !=
7113 vm_page_fictitious_addr);
7114 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(result_page));
7115 if (VM_PAGE_WIRED(result_page)) {
7116 unwired_pages++;
7117 }
7118 VM_PAGE_FREE(result_page);
7119 } else {
7120 if (pmap && !vm_page_is_guard(result_page)) {
7121 pmap_change_wiring(pmap,
7122 pmap_addr + (va - entry->vme_start), FALSE);
7123 }
7124
7125
7126 if (VM_PAGE_WIRED(result_page)) {
7127 vm_page_lockspin_queues();
7128 vm_page_unwire(result_page, TRUE);
7129 vm_page_unlock_queues();
7130 unwired_pages++;
7131 }
7132 if (entry->zero_wired_pages &&
7133 (entry->protection & VM_PROT_WRITE) &&
7134 #if __arm64e__
7135 !entry->used_for_tpro &&
7136 #endif /* __arm64e__ */
7137 !entry->used_for_jit) {
7138 pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(result_page));
7139 }
7140
7141 vm_page_wakeup_done(result_object, result_page);
7142 }
7143 vm_fault_cleanup(result_object, top_page);
7144 }
7145 }
7146
7147 /*
7148 * Inform the physical mapping system that the range
7149 * of addresses may fault, so that page tables and
7150 * such may be unwired themselves.
7151 */
7152
7153 pmap_pageable(pmap, pmap_addr,
7154 pmap_addr + (end_addr - entry->vme_start), TRUE);
7155
7156 if (is_kernel_object(object)) {
7157 /*
7158 * Would like to make user_tag in vm_object_fault_info
7159 * vm_tag_t (unsigned short) but user_tag derives its value from
7160 * VME_ALIAS(entry) at a few places and VME_ALIAS, in turn, casts
7161 * to an _unsigned int_ which is used by non-fault_info paths throughout the
7162 * code at many places.
7163 *
7164 * So, for now, an explicit truncation to unsigned short (vm_tag_t).
7165 */
7166 assertf((fault_info.user_tag & VME_ALIAS_MASK) == fault_info.user_tag,
7167 "VM Tag truncated from 0x%x to 0x%x\n", fault_info.user_tag, (fault_info.user_tag & VME_ALIAS_MASK));
7168 vm_tag_update_size((vm_tag_t) fault_info.user_tag, -ptoa_64(unwired_pages), NULL);
7169 }
7170 }
7171
7172 /*
7173 * vm_fault_wire_fast:
7174 *
7175 * Handle common case of a wire down page fault at the given address.
7176 * If successful, the page is inserted into the associated physical map.
7177 * The map entry is passed in to avoid the overhead of a map lookup.
7178 *
7179 * NOTE: the given address should be truncated to the
7180 * proper page address.
7181 *
7182 * KERN_SUCCESS is returned if the page fault is handled; otherwise,
7183 * a standard error specifying why the fault is fatal is returned.
7184 *
7185 * The map in question must be referenced, and remains so.
7186 * Caller has a read lock on the map.
7187 *
7188 * This is a stripped version of vm_fault() for wiring pages. Anything
7189 * other than the common case will return KERN_FAILURE, and the caller
7190 * is expected to call vm_fault().
7191 */
7192 static kern_return_t
vm_fault_wire_fast(__unused vm_map_t map,vm_map_offset_t va,__unused vm_prot_t caller_prot,vm_tag_t wire_tag,vm_map_entry_t entry,pmap_t pmap,vm_map_offset_t pmap_addr,ppnum_t * physpage_p)7193 vm_fault_wire_fast(
7194 __unused vm_map_t map,
7195 vm_map_offset_t va,
7196 __unused vm_prot_t caller_prot,
7197 vm_tag_t wire_tag,
7198 vm_map_entry_t entry,
7199 pmap_t pmap,
7200 vm_map_offset_t pmap_addr,
7201 ppnum_t *physpage_p)
7202 {
7203 vm_object_t object;
7204 vm_object_offset_t offset;
7205 vm_page_t m;
7206 vm_prot_t prot;
7207 thread_t thread = current_thread();
7208 int type_of_fault;
7209 kern_return_t kr;
7210 vm_map_size_t fault_page_size;
7211 vm_map_offset_t fault_phys_offset;
7212 struct vm_object_fault_info fault_info = {
7213 .interruptible = THREAD_UNINT,
7214 };
7215 uint8_t object_lock_type = 0;
7216
7217 counter_inc(&vm_statistics_faults);
7218
7219 if (thread != THREAD_NULL) {
7220 counter_inc(&get_threadtask(thread)->faults);
7221 }
7222
7223 /*
7224 * Recovery actions
7225 */
7226
7227 #undef RELEASE_PAGE
7228 #define RELEASE_PAGE(m) { \
7229 vm_page_wakeup_done(VM_PAGE_OBJECT(m), m); \
7230 vm_page_lockspin_queues(); \
7231 vm_page_unwire(m, TRUE); \
7232 vm_page_unlock_queues(); \
7233 }
7234
7235
7236 #undef UNLOCK_THINGS
7237 #define UNLOCK_THINGS { \
7238 vm_object_paging_end(object); \
7239 vm_object_unlock(object); \
7240 }
7241
7242 #undef UNLOCK_AND_DEALLOCATE
7243 #define UNLOCK_AND_DEALLOCATE { \
7244 UNLOCK_THINGS; \
7245 vm_object_deallocate(object); \
7246 }
7247 /*
7248 * Give up and have caller do things the hard way.
7249 */
7250
7251 #define GIVE_UP { \
7252 UNLOCK_AND_DEALLOCATE; \
7253 return(KERN_FAILURE); \
7254 }
7255
7256
7257 /*
7258 * If this entry is not directly to a vm_object, bail out.
7259 */
7260 if (entry->is_sub_map) {
7261 assert(physpage_p == NULL);
7262 return KERN_FAILURE;
7263 }
7264
7265 /*
7266 * Find the backing store object and offset into it.
7267 */
7268
7269 object = VME_OBJECT(entry);
7270 offset = (va - entry->vme_start) + VME_OFFSET(entry);
7271 prot = entry->protection;
7272
7273 /*
7274 * Make a reference to this object to prevent its
7275 * disposal while we are messing with it.
7276 */
7277
7278 object_lock_type = OBJECT_LOCK_EXCLUSIVE;
7279 vm_object_lock(object);
7280 vm_object_reference_locked(object);
7281 vm_object_paging_begin(object);
7282
7283 /*
7284 * INVARIANTS (through entire routine):
7285 *
7286 * 1) At all times, we must either have the object
7287 * lock or a busy page in some object to prevent
7288 * some other thread from trying to bring in
7289 * the same page.
7290 *
7291 * 2) Once we have a busy page, we must remove it from
7292 * the pageout queues, so that the pageout daemon
7293 * will not grab it away.
7294 *
7295 */
7296
7297 if (entry->needs_copy) {
7298 panic("attempting to wire needs_copy memory");
7299 }
7300
7301 /*
7302 * Since we don't have the machinary to resolve CoW obligations on the fast
7303 * path, if we might have to push pages to a copy, just give up.
7304 */
7305 if (object->vo_copy != VM_OBJECT_NULL) {
7306 GIVE_UP;
7307 }
7308
7309 /*
7310 * Look for page in top-level object. If it's not there or
7311 * there's something going on, give up.
7312 */
7313 m = vm_page_lookup(object, vm_object_trunc_page(offset));
7314 if ((m == VM_PAGE_NULL) || (m->vmp_busy) ||
7315 (m->vmp_unusual && (m->vmp_error || m->vmp_restart || m->vmp_absent))) {
7316 GIVE_UP;
7317 }
7318 if (vm_page_is_guard(m)) {
7319 /*
7320 * Guard pages are fictitious pages and are never
7321 * entered into a pmap, so let's say it's been wired...
7322 */
7323 kr = KERN_SUCCESS;
7324 goto done;
7325 }
7326
7327 /*
7328 * Wire the page down now. All bail outs beyond this
7329 * point must unwire the page.
7330 */
7331
7332 vm_page_lockspin_queues();
7333 vm_page_wire(m, wire_tag, TRUE);
7334 vm_page_unlock_queues();
7335
7336 /*
7337 * Mark page busy for other threads.
7338 */
7339 assert(!m->vmp_busy);
7340 m->vmp_busy = TRUE;
7341 assert(!m->vmp_absent);
7342
7343 fault_info.user_tag = VME_ALIAS(entry);
7344 fault_info.pmap_options = 0;
7345 if (entry->iokit_acct ||
7346 (!entry->is_sub_map && !entry->use_pmap)) {
7347 fault_info.pmap_options |= PMAP_OPTIONS_ALT_ACCT;
7348 }
7349 if (entry->vme_xnu_user_debug) {
7350 /*
7351 * Modified code-signed executable region: wiring will
7352 * copy the pages, so they should be XNU_USER_DEBUG rather
7353 * than XNU_USER_EXEC.
7354 */
7355 fault_info.pmap_options |= PMAP_OPTIONS_XNU_USER_DEBUG;
7356 }
7357
7358 if (entry->translated_allow_execute) {
7359 fault_info.pmap_options |= PMAP_OPTIONS_TRANSLATED_ALLOW_EXECUTE;
7360 }
7361
7362 fault_page_size = MIN(VM_MAP_PAGE_SIZE(map), PAGE_SIZE);
7363 fault_phys_offset = offset - vm_object_trunc_page(offset);
7364
7365 /*
7366 * Put this page into the physical map.
7367 */
7368 type_of_fault = DBG_CACHE_HIT_FAULT;
7369 assert3p(VM_PAGE_OBJECT(m), ==, object);
7370 bool page_sleep_needed = false;
7371 kr = vm_fault_enter(m,
7372 pmap,
7373 pmap_addr,
7374 fault_page_size,
7375 fault_phys_offset,
7376 prot,
7377 prot,
7378 TRUE, /* wired */
7379 wire_tag,
7380 &fault_info,
7381 NULL,
7382 &type_of_fault,
7383 &object_lock_type, /* Exclusive lock mode. Will remain unchanged.*/
7384 &page_sleep_needed);
7385 if ((kr != KERN_SUCCESS) || page_sleep_needed) {
7386 RELEASE_PAGE(m);
7387 GIVE_UP;
7388 }
7389
7390
7391 done:
7392 /*
7393 * Unlock everything, and return
7394 */
7395
7396 if (physpage_p) {
7397 /* for vm_map_wire_and_extract() */
7398 if (kr == KERN_SUCCESS) {
7399 assert3p(object, ==, VM_PAGE_OBJECT(m));
7400 *physpage_p = VM_PAGE_GET_PHYS_PAGE(m);
7401 if (prot & VM_PROT_WRITE) {
7402 vm_object_lock_assert_exclusive(object);
7403 m->vmp_dirty = TRUE;
7404 }
7405 } else {
7406 *physpage_p = 0;
7407 }
7408 }
7409
7410 if (m->vmp_busy) {
7411 vm_page_wakeup_done(object, m);
7412 }
7413
7414 UNLOCK_AND_DEALLOCATE;
7415
7416 return kr;
7417 }
7418
7419 /*
7420 * Routine: vm_fault_copy_cleanup
7421 * Purpose:
7422 * Release a page used by vm_fault_copy.
7423 */
7424
7425 static void
vm_fault_copy_cleanup(vm_page_t page,vm_page_t top_page)7426 vm_fault_copy_cleanup(
7427 vm_page_t page,
7428 vm_page_t top_page)
7429 {
7430 vm_object_t object = VM_PAGE_OBJECT(page);
7431
7432 vm_object_lock(object);
7433 vm_page_wakeup_done(object, page);
7434 if (!VM_PAGE_PAGEABLE(page)) {
7435 vm_page_lockspin_queues();
7436 if (!VM_PAGE_PAGEABLE(page)) {
7437 vm_page_activate(page);
7438 }
7439 vm_page_unlock_queues();
7440 }
7441 vm_fault_cleanup(object, top_page);
7442 }
7443
7444 static void
vm_fault_copy_dst_cleanup(vm_page_t page)7445 vm_fault_copy_dst_cleanup(
7446 vm_page_t page)
7447 {
7448 vm_object_t object;
7449
7450 if (page != VM_PAGE_NULL) {
7451 object = VM_PAGE_OBJECT(page);
7452 vm_object_lock(object);
7453 vm_page_lockspin_queues();
7454 vm_page_unwire(page, TRUE);
7455 vm_page_unlock_queues();
7456 vm_object_paging_end(object);
7457 vm_object_unlock(object);
7458 }
7459 }
7460
7461 /*
7462 * Routine: vm_fault_copy
7463 *
7464 * Purpose:
7465 * Copy pages from one virtual memory object to another --
7466 * neither the source nor destination pages need be resident.
7467 *
7468 * Before actually copying a page, the version associated with
7469 * the destination address map wil be verified.
7470 *
7471 * In/out conditions:
7472 * The caller must hold a reference, but not a lock, to
7473 * each of the source and destination objects and to the
7474 * destination map.
7475 *
7476 * Results:
7477 * Returns KERN_SUCCESS if no errors were encountered in
7478 * reading or writing the data. Returns KERN_INTERRUPTED if
7479 * the operation was interrupted (only possible if the
7480 * "interruptible" argument is asserted). Other return values
7481 * indicate a permanent error in copying the data.
7482 *
7483 * The actual amount of data copied will be returned in the
7484 * "copy_size" argument. In the event that the destination map
7485 * verification failed, this amount may be less than the amount
7486 * requested.
7487 */
7488 kern_return_t
vm_fault_copy(vm_object_t src_object,vm_object_offset_t src_offset,vm_map_size_t * copy_size,vm_object_t dst_object,vm_object_offset_t dst_offset,vm_map_t dst_map,vm_map_version_t * dst_version,int interruptible)7489 vm_fault_copy(
7490 vm_object_t src_object,
7491 vm_object_offset_t src_offset,
7492 vm_map_size_t *copy_size, /* INOUT */
7493 vm_object_t dst_object,
7494 vm_object_offset_t dst_offset,
7495 vm_map_t dst_map,
7496 vm_map_version_t *dst_version,
7497 int interruptible)
7498 {
7499 vm_page_t result_page;
7500
7501 vm_page_t src_page;
7502 vm_page_t src_top_page;
7503 vm_prot_t src_prot;
7504
7505 vm_page_t dst_page;
7506 vm_page_t dst_top_page;
7507 vm_prot_t dst_prot;
7508
7509 vm_map_size_t amount_left;
7510 vm_object_t old_copy_object;
7511 uint32_t old_copy_version;
7512 vm_object_t result_page_object = NULL;
7513 kern_return_t error = 0;
7514 vm_fault_return_t result;
7515
7516 vm_map_size_t part_size;
7517 struct vm_object_fault_info fault_info_src = {};
7518 struct vm_object_fault_info fault_info_dst = {};
7519
7520 vmlp_api_start(VM_FAULT_COPY);
7521 vmlp_range_event(dst_map, dst_offset, *copy_size);
7522
7523 /*
7524 * In order not to confuse the clustered pageins, align
7525 * the different offsets on a page boundary.
7526 */
7527
7528 #define RETURN(x) \
7529 MACRO_BEGIN \
7530 *copy_size -= amount_left; \
7531 vmlp_api_end(VM_FAULT_COPY, x); \
7532 MACRO_RETURN(x); \
7533 MACRO_END
7534
7535 amount_left = *copy_size;
7536
7537 fault_info_src.interruptible = interruptible;
7538 fault_info_src.behavior = VM_BEHAVIOR_SEQUENTIAL;
7539 fault_info_src.lo_offset = vm_object_trunc_page(src_offset);
7540 fault_info_src.hi_offset = fault_info_src.lo_offset + amount_left;
7541 fault_info_src.stealth = TRUE;
7542
7543 fault_info_dst.interruptible = interruptible;
7544 fault_info_dst.behavior = VM_BEHAVIOR_SEQUENTIAL;
7545 fault_info_dst.lo_offset = vm_object_trunc_page(dst_offset);
7546 fault_info_dst.hi_offset = fault_info_dst.lo_offset + amount_left;
7547 fault_info_dst.stealth = TRUE;
7548
7549 do { /* while (amount_left > 0) */
7550 /*
7551 * There may be a deadlock if both source and destination
7552 * pages are the same. To avoid this deadlock, the copy must
7553 * start by getting the destination page in order to apply
7554 * COW semantics if any.
7555 */
7556
7557 RetryDestinationFault:;
7558
7559 dst_prot = VM_PROT_WRITE | VM_PROT_READ;
7560
7561 vm_object_lock(dst_object);
7562 vm_object_paging_begin(dst_object);
7563
7564 /* cap cluster size at maximum UPL size */
7565 upl_size_t cluster_size;
7566 if (os_convert_overflow(amount_left, &cluster_size)) {
7567 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
7568 }
7569 fault_info_dst.cluster_size = cluster_size;
7570
7571 dst_page = VM_PAGE_NULL;
7572 result = vm_fault_page(dst_object,
7573 vm_object_trunc_page(dst_offset),
7574 VM_PROT_WRITE | VM_PROT_READ,
7575 FALSE,
7576 FALSE, /* page not looked up */
7577 &dst_prot, &dst_page, &dst_top_page,
7578 (int *)0,
7579 &error,
7580 dst_map->no_zero_fill,
7581 &fault_info_dst);
7582 switch (result) {
7583 case VM_FAULT_SUCCESS:
7584 break;
7585 case VM_FAULT_RETRY:
7586 goto RetryDestinationFault;
7587 case VM_FAULT_MEMORY_SHORTAGE:
7588 if (vm_page_wait(interruptible)) {
7589 goto RetryDestinationFault;
7590 }
7591 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_COPY_MEMORY_SHORTAGE), 0 /* arg */);
7592 OS_FALLTHROUGH;
7593 case VM_FAULT_INTERRUPTED:
7594 RETURN(MACH_SEND_INTERRUPTED);
7595 case VM_FAULT_SUCCESS_NO_VM_PAGE:
7596 /* success but no VM page: fail the copy */
7597 vm_object_paging_end(dst_object);
7598 vm_object_unlock(dst_object);
7599 OS_FALLTHROUGH;
7600 case VM_FAULT_MEMORY_ERROR:
7601 if (error) {
7602 vmlp_api_end(VM_FAULT_COPY, error);
7603 return error;
7604 } else {
7605 vmlp_api_end(VM_FAULT_COPY, KERN_MEMORY_ERROR);
7606 return KERN_MEMORY_ERROR;
7607 }
7608 default:
7609 panic("vm_fault_copy: unexpected error 0x%x from "
7610 "vm_fault_page()\n", result);
7611 }
7612 assert((dst_prot & VM_PROT_WRITE) != VM_PROT_NONE);
7613
7614 assert(dst_object == VM_PAGE_OBJECT(dst_page));
7615 old_copy_object = dst_object->vo_copy;
7616 old_copy_version = dst_object->vo_copy_version;
7617
7618 /*
7619 * There exists the possiblity that the source and
7620 * destination page are the same. But we can't
7621 * easily determine that now. If they are the
7622 * same, the call to vm_fault_page() for the
7623 * destination page will deadlock. To prevent this we
7624 * wire the page so we can drop busy without having
7625 * the page daemon steal the page. We clean up the
7626 * top page but keep the paging reference on the object
7627 * holding the dest page so it doesn't go away.
7628 */
7629
7630 vm_page_lockspin_queues();
7631 vm_page_wire(dst_page, VM_KERN_MEMORY_OSFMK, TRUE);
7632 vm_page_unlock_queues();
7633 vm_page_wakeup_done(dst_object, dst_page);
7634 vm_object_unlock(dst_object);
7635
7636 if (dst_top_page != VM_PAGE_NULL) {
7637 vm_object_lock(dst_object);
7638 VM_PAGE_FREE(dst_top_page);
7639 vm_object_paging_end(dst_object);
7640 vm_object_unlock(dst_object);
7641 }
7642
7643 RetrySourceFault:;
7644
7645 if (src_object == VM_OBJECT_NULL) {
7646 /*
7647 * No source object. We will just
7648 * zero-fill the page in dst_object.
7649 */
7650 src_page = VM_PAGE_NULL;
7651 result_page = VM_PAGE_NULL;
7652 } else {
7653 vm_object_lock(src_object);
7654 src_page = vm_page_lookup(src_object,
7655 vm_object_trunc_page(src_offset));
7656 if (src_page == dst_page) {
7657 src_prot = dst_prot;
7658 result_page = VM_PAGE_NULL;
7659 } else {
7660 src_prot = VM_PROT_READ;
7661 vm_object_paging_begin(src_object);
7662
7663 /* cap cluster size at maximum UPL size */
7664 if (os_convert_overflow(amount_left, &cluster_size)) {
7665 cluster_size = 0 - (upl_size_t)PAGE_SIZE;
7666 }
7667 fault_info_src.cluster_size = cluster_size;
7668
7669 result_page = VM_PAGE_NULL;
7670 result = vm_fault_page(
7671 src_object,
7672 vm_object_trunc_page(src_offset),
7673 VM_PROT_READ, FALSE,
7674 FALSE, /* page not looked up */
7675 &src_prot,
7676 &result_page, &src_top_page,
7677 (int *)0, &error, FALSE,
7678 &fault_info_src);
7679
7680 switch (result) {
7681 case VM_FAULT_SUCCESS:
7682 break;
7683 case VM_FAULT_RETRY:
7684 goto RetrySourceFault;
7685 case VM_FAULT_MEMORY_SHORTAGE:
7686 if (vm_page_wait(interruptible)) {
7687 goto RetrySourceFault;
7688 }
7689 OS_FALLTHROUGH;
7690 case VM_FAULT_INTERRUPTED:
7691 vm_fault_copy_dst_cleanup(dst_page);
7692 RETURN(MACH_SEND_INTERRUPTED);
7693 case VM_FAULT_SUCCESS_NO_VM_PAGE:
7694 /* success but no VM page: fail */
7695 vm_object_paging_end(src_object);
7696 vm_object_unlock(src_object);
7697 OS_FALLTHROUGH;
7698 case VM_FAULT_MEMORY_ERROR:
7699 vm_fault_copy_dst_cleanup(dst_page);
7700 if (error) {
7701 vmlp_api_end(VM_FAULT_COPY, error);
7702 return error;
7703 } else {
7704 vmlp_api_end(VM_FAULT_COPY, KERN_MEMORY_ERROR);
7705 return KERN_MEMORY_ERROR;
7706 }
7707 default:
7708 panic("vm_fault_copy(2): unexpected "
7709 "error 0x%x from "
7710 "vm_fault_page()\n", result);
7711 }
7712
7713 result_page_object = VM_PAGE_OBJECT(result_page);
7714 assert((src_top_page == VM_PAGE_NULL) ==
7715 (result_page_object == src_object));
7716 }
7717 assert((src_prot & VM_PROT_READ) != VM_PROT_NONE);
7718 vm_object_unlock(result_page_object);
7719 }
7720
7721 vm_map_lock_read(dst_map);
7722
7723 if (!vm_map_verify(dst_map, dst_version)) {
7724 vm_map_unlock_read(dst_map);
7725 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7726 vm_fault_copy_cleanup(result_page, src_top_page);
7727 }
7728 vm_fault_copy_dst_cleanup(dst_page);
7729 break;
7730 }
7731 assert(dst_object == VM_PAGE_OBJECT(dst_page));
7732
7733 vm_object_lock(dst_object);
7734
7735 if ((dst_object->vo_copy != old_copy_object ||
7736 dst_object->vo_copy_version != old_copy_version)) {
7737 vm_object_unlock(dst_object);
7738 vm_map_unlock_read(dst_map);
7739 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7740 vm_fault_copy_cleanup(result_page, src_top_page);
7741 }
7742 vm_fault_copy_dst_cleanup(dst_page);
7743 break;
7744 }
7745 vm_object_unlock(dst_object);
7746
7747 /*
7748 * Copy the page, and note that it is dirty
7749 * immediately.
7750 */
7751
7752 if (!page_aligned(src_offset) ||
7753 !page_aligned(dst_offset) ||
7754 !page_aligned(amount_left)) {
7755 vm_object_offset_t src_po,
7756 dst_po;
7757
7758 src_po = src_offset - vm_object_trunc_page(src_offset);
7759 dst_po = dst_offset - vm_object_trunc_page(dst_offset);
7760
7761 if (dst_po > src_po) {
7762 part_size = PAGE_SIZE - dst_po;
7763 } else {
7764 part_size = PAGE_SIZE - src_po;
7765 }
7766 if (part_size > (amount_left)) {
7767 part_size = amount_left;
7768 }
7769
7770 if (result_page == VM_PAGE_NULL) {
7771 assert((vm_offset_t) dst_po == dst_po);
7772 assert((vm_size_t) part_size == part_size);
7773 vm_page_part_zero_fill(dst_page,
7774 (vm_offset_t) dst_po,
7775 (vm_size_t) part_size);
7776 } else {
7777 assert((vm_offset_t) src_po == src_po);
7778 assert((vm_offset_t) dst_po == dst_po);
7779 assert((vm_size_t) part_size == part_size);
7780 vm_page_part_copy(result_page,
7781 (vm_offset_t) src_po,
7782 dst_page,
7783 (vm_offset_t) dst_po,
7784 (vm_size_t)part_size);
7785 if (!dst_page->vmp_dirty) {
7786 vm_object_lock(dst_object);
7787 SET_PAGE_DIRTY(dst_page, TRUE);
7788 vm_object_unlock(dst_object);
7789 }
7790 }
7791 } else {
7792 part_size = PAGE_SIZE;
7793
7794 if (result_page == VM_PAGE_NULL) {
7795 vm_page_zero_fill(
7796 dst_page
7797 );
7798 } else {
7799 vm_object_lock(result_page_object);
7800 vm_page_copy(result_page, dst_page);
7801 vm_object_unlock(result_page_object);
7802
7803 if (!dst_page->vmp_dirty) {
7804 vm_object_lock(dst_object);
7805 SET_PAGE_DIRTY(dst_page, TRUE);
7806 vm_object_unlock(dst_object);
7807 }
7808 }
7809 }
7810
7811 /*
7812 * Unlock everything, and return
7813 */
7814
7815 vm_map_unlock_read(dst_map);
7816
7817 if (result_page != VM_PAGE_NULL && src_page != dst_page) {
7818 vm_fault_copy_cleanup(result_page, src_top_page);
7819 }
7820 vm_fault_copy_dst_cleanup(dst_page);
7821
7822 amount_left -= part_size;
7823 src_offset += part_size;
7824 dst_offset += part_size;
7825 } while (amount_left > 0);
7826
7827 RETURN(KERN_SUCCESS);
7828 #undef RETURN
7829
7830 /*NOTREACHED*/
7831 }
7832
7833 #if VM_FAULT_CLASSIFY
7834 /*
7835 * Temporary statistics gathering support.
7836 */
7837
7838 /*
7839 * Statistics arrays:
7840 */
7841 #define VM_FAULT_TYPES_MAX 5
7842 #define VM_FAULT_LEVEL_MAX 8
7843
7844 int vm_fault_stats[VM_FAULT_TYPES_MAX][VM_FAULT_LEVEL_MAX];
7845
7846 #define VM_FAULT_TYPE_ZERO_FILL 0
7847 #define VM_FAULT_TYPE_MAP_IN 1
7848 #define VM_FAULT_TYPE_PAGER 2
7849 #define VM_FAULT_TYPE_COPY 3
7850 #define VM_FAULT_TYPE_OTHER 4
7851
7852
7853 void
vm_fault_classify(vm_object_t object,vm_object_offset_t offset,vm_prot_t fault_type)7854 vm_fault_classify(vm_object_t object,
7855 vm_object_offset_t offset,
7856 vm_prot_t fault_type)
7857 {
7858 int type, level = 0;
7859 vm_page_t m;
7860
7861 while (TRUE) {
7862 m = vm_page_lookup(object, offset);
7863 if (m != VM_PAGE_NULL) {
7864 if (m->vmp_busy || m->vmp_error || m->vmp_restart || m->vmp_absent) {
7865 type = VM_FAULT_TYPE_OTHER;
7866 break;
7867 }
7868 if (((fault_type & VM_PROT_WRITE) == 0) ||
7869 ((level == 0) && object->vo_copy == VM_OBJECT_NULL)) {
7870 type = VM_FAULT_TYPE_MAP_IN;
7871 break;
7872 }
7873 type = VM_FAULT_TYPE_COPY;
7874 break;
7875 } else {
7876 if (object->pager_created) {
7877 type = VM_FAULT_TYPE_PAGER;
7878 break;
7879 }
7880 if (object->shadow == VM_OBJECT_NULL) {
7881 type = VM_FAULT_TYPE_ZERO_FILL;
7882 break;
7883 }
7884
7885 offset += object->vo_shadow_offset;
7886 object = object->shadow;
7887 level++;
7888 continue;
7889 }
7890 }
7891
7892 if (level > VM_FAULT_LEVEL_MAX) {
7893 level = VM_FAULT_LEVEL_MAX;
7894 }
7895
7896 vm_fault_stats[type][level] += 1;
7897
7898 return;
7899 }
7900
7901 /* cleanup routine to call from debugger */
7902
7903 void
vm_fault_classify_init(void)7904 vm_fault_classify_init(void)
7905 {
7906 int type, level;
7907
7908 for (type = 0; type < VM_FAULT_TYPES_MAX; type++) {
7909 for (level = 0; level < VM_FAULT_LEVEL_MAX; level++) {
7910 vm_fault_stats[type][level] = 0;
7911 }
7912 }
7913
7914 return;
7915 }
7916 #endif /* VM_FAULT_CLASSIFY */
7917
7918 static inline bool
object_supports_coredump(const vm_object_t object)7919 object_supports_coredump(const vm_object_t object)
7920 {
7921 switch (object->wimg_bits & VM_WIMG_MASK) {
7922 case VM_WIMG_DEFAULT:
7923 return true;
7924 default:
7925 return false;
7926 }
7927 }
7928
7929 vm_offset_t
kdp_lightweight_fault(vm_map_t map,vm_offset_t cur_target_addr,bool multi_cpu)7930 kdp_lightweight_fault(vm_map_t map, vm_offset_t cur_target_addr, bool multi_cpu)
7931 {
7932 vm_map_entry_t entry;
7933 vm_object_t object;
7934 vm_offset_t object_offset;
7935 vm_page_t m;
7936 int compressor_external_state, compressed_count_delta;
7937 vm_compressor_options_t compressor_flags = (C_DONT_BLOCK | C_KEEP | C_KDP);
7938 int my_fault_type = VM_PROT_READ;
7939 kern_return_t kr;
7940 int effective_page_mask, effective_page_size;
7941 int my_cpu_no = cpu_number();
7942 ppnum_t decomp_ppnum;
7943 addr64_t decomp_paddr;
7944
7945 vmlp_api_start(KDP_LIGHTWEIGHT_FAULT);
7946
7947 if (multi_cpu) {
7948 compressor_flags |= C_KDP_MULTICPU;
7949 }
7950
7951 if (VM_MAP_PAGE_SHIFT(map) < PAGE_SHIFT) {
7952 effective_page_mask = VM_MAP_PAGE_MASK(map);
7953 effective_page_size = VM_MAP_PAGE_SIZE(map);
7954 } else {
7955 effective_page_mask = PAGE_MASK;
7956 effective_page_size = PAGE_SIZE;
7957 }
7958
7959 if (not_in_kdp) {
7960 panic("kdp_lightweight_fault called from outside of debugger context");
7961 }
7962
7963 assert(map != VM_MAP_NULL);
7964
7965 assert((cur_target_addr & effective_page_mask) == 0);
7966 if ((cur_target_addr & effective_page_mask) != 0) {
7967 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, -1);
7968 return 0;
7969 }
7970
7971 if (kdp_lck_rw_lock_is_acquired_exclusive(&map->lock)) {
7972 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, -1);
7973 return 0;
7974 }
7975
7976 if (!vm_map_lookup_entry(map, cur_target_addr, &entry)) {
7977 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, -1);
7978 return 0;
7979 }
7980
7981 vmlp_range_event_entry(map, entry);
7982
7983 if (entry->is_sub_map) {
7984 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, -1);
7985 return 0;
7986 }
7987
7988 object = VME_OBJECT(entry);
7989 if (object == VM_OBJECT_NULL) {
7990 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, -1);
7991 return 0;
7992 }
7993
7994 object_offset = cur_target_addr - entry->vme_start + VME_OFFSET(entry);
7995
7996 while (TRUE) {
7997 if (kdp_lck_rw_lock_is_acquired_exclusive(&object->Lock)) {
7998 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, -1);
7999 return 0;
8000 }
8001
8002 if (object->pager_created && (object->paging_in_progress ||
8003 object->activity_in_progress)) {
8004 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, -1);
8005 return 0;
8006 }
8007
8008 m = kdp_vm_page_lookup(object, vm_object_trunc_page(object_offset));
8009
8010 if (m != VM_PAGE_NULL) {
8011 if (!object_supports_coredump(object)) {
8012 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, -1);
8013 return 0;
8014 }
8015
8016 if (m->vmp_laundry || m->vmp_busy || m->vmp_free_when_done ||
8017 m->vmp_absent || VMP_ERROR_GET(m) || m->vmp_cleaning ||
8018 m->vmp_overwriting || m->vmp_restart || m->vmp_unusual) {
8019 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, -1);
8020 return 0;
8021 }
8022
8023 assert(!vm_page_is_private(m));
8024 if (vm_page_is_private(m)) {
8025 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, -1);
8026 return 0;
8027 }
8028
8029 assert(!vm_page_is_fictitious(m));
8030 if (vm_page_is_fictitious(m)) {
8031 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, -1);
8032 return 0;
8033 }
8034
8035 assert(m->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
8036 if (m->vmp_q_state == VM_PAGE_USED_BY_COMPRESSOR) {
8037 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, -1);
8038 return 0;
8039 }
8040
8041 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, 0);
8042 return ptoa(VM_PAGE_GET_PHYS_PAGE(m));
8043 }
8044
8045 compressor_external_state = VM_EXTERNAL_STATE_UNKNOWN;
8046
8047 if (multi_cpu) {
8048 assert(vm_compressor_kdp_state.kc_decompressed_pages_ppnum != NULL);
8049 assert(vm_compressor_kdp_state.kc_decompressed_pages_paddr != NULL);
8050 decomp_ppnum = vm_compressor_kdp_state.kc_decompressed_pages_ppnum[my_cpu_no];
8051 decomp_paddr = vm_compressor_kdp_state.kc_decompressed_pages_paddr[my_cpu_no];
8052 } else {
8053 decomp_ppnum = vm_compressor_kdp_state.kc_panic_decompressed_page_ppnum;
8054 decomp_paddr = vm_compressor_kdp_state.kc_panic_decompressed_page_paddr;
8055 }
8056
8057 if (object->pager_created && MUST_ASK_PAGER(object, object_offset, compressor_external_state)) {
8058 if (compressor_external_state == VM_EXTERNAL_STATE_EXISTS) {
8059 kr = vm_compressor_pager_get(object->pager,
8060 vm_object_trunc_page(object_offset + object->paging_offset),
8061 decomp_ppnum, &my_fault_type,
8062 compressor_flags, &compressed_count_delta);
8063 if (kr == KERN_SUCCESS) {
8064 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, 0);
8065 return decomp_paddr;
8066 } else {
8067 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, -1);
8068 return 0;
8069 }
8070 }
8071 }
8072
8073 if (object->shadow == VM_OBJECT_NULL) {
8074 vmlp_api_end(KDP_LIGHTWEIGHT_FAULT, -1);
8075 return 0;
8076 }
8077
8078 object_offset += object->vo_shadow_offset;
8079 object = object->shadow;
8080 }
8081 }
8082
8083 /*
8084 * vm_page_validate_cs_fast():
8085 * Performs a few quick checks to determine if the page's code signature
8086 * really needs to be fully validated. It could:
8087 * 1. have been modified (i.e. automatically tainted),
8088 * 2. have already been validated,
8089 * 3. have already been found to be tainted,
8090 * 4. no longer have a backing store.
8091 * Returns FALSE if the page needs to be fully validated.
8092 */
8093 static boolean_t
vm_page_validate_cs_fast(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)8094 vm_page_validate_cs_fast(
8095 vm_page_t page,
8096 vm_map_size_t fault_page_size,
8097 vm_map_offset_t fault_phys_offset)
8098 {
8099 vm_object_t object;
8100
8101 object = VM_PAGE_OBJECT(page);
8102 vm_object_lock_assert_held(object);
8103
8104 if (page->vmp_wpmapped &&
8105 !VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
8106 /*
8107 * This page was mapped for "write" access sometime in the
8108 * past and could still be modifiable in the future.
8109 * Consider it tainted.
8110 * [ If the page was already found to be "tainted", no
8111 * need to re-validate. ]
8112 */
8113 vm_object_lock_assert_exclusive(object);
8114 VMP_CS_SET_VALIDATED(page, fault_page_size, fault_phys_offset, TRUE);
8115 VMP_CS_SET_TAINTED(page, fault_page_size, fault_phys_offset, TRUE);
8116 if (cs_debug) {
8117 printf("CODESIGNING: %s: "
8118 "page %p obj %p off 0x%llx "
8119 "was modified\n",
8120 __FUNCTION__,
8121 page, object, page->vmp_offset);
8122 }
8123 vm_cs_validated_dirtied++;
8124 }
8125
8126 if (VMP_CS_VALIDATED(page, fault_page_size, fault_phys_offset) ||
8127 VMP_CS_TAINTED(page, fault_page_size, fault_phys_offset)) {
8128 return TRUE;
8129 }
8130 vm_object_lock_assert_exclusive(object);
8131
8132 #if CHECK_CS_VALIDATION_BITMAP
8133 kern_return_t kr;
8134
8135 kr = vnode_pager_cs_check_validation_bitmap(
8136 object->pager,
8137 page->vmp_offset + object->paging_offset,
8138 CS_BITMAP_CHECK);
8139 if (kr == KERN_SUCCESS) {
8140 page->vmp_cs_validated = VMP_CS_ALL_TRUE;
8141 page->vmp_cs_tainted = VMP_CS_ALL_FALSE;
8142 vm_cs_bitmap_validated++;
8143 return TRUE;
8144 }
8145 #endif /* CHECK_CS_VALIDATION_BITMAP */
8146
8147 if (!object->alive || object->terminating || object->pager == NULL) {
8148 /*
8149 * The object is terminating and we don't have its pager
8150 * so we can't validate the data...
8151 */
8152 return TRUE;
8153 }
8154
8155 /* we need to really validate this page */
8156 vm_object_lock_assert_exclusive(object);
8157 return FALSE;
8158 }
8159
8160 void
vm_page_validate_cs_mapped_slow(vm_page_t page,const void * kaddr)8161 vm_page_validate_cs_mapped_slow(
8162 vm_page_t page,
8163 const void *kaddr)
8164 {
8165 vm_object_t object;
8166 memory_object_offset_t mo_offset;
8167 memory_object_t pager;
8168 struct vnode *vnode;
8169 int validated, tainted, nx;
8170
8171 assert(page->vmp_busy);
8172 object = VM_PAGE_OBJECT(page);
8173 vm_object_lock_assert_exclusive(object);
8174
8175 vm_cs_validates++;
8176
8177 /*
8178 * Since we get here to validate a page that was brought in by
8179 * the pager, we know that this pager is all setup and ready
8180 * by now.
8181 */
8182 assert(object->code_signed);
8183 assert(!object->internal);
8184 assert(object->pager != NULL);
8185 assert(object->pager_ready);
8186
8187 pager = object->pager;
8188 assert(object->paging_in_progress);
8189 vnode = vnode_pager_lookup_vnode(pager);
8190 mo_offset = page->vmp_offset + object->paging_offset;
8191
8192 /* verify the SHA1 hash for this page */
8193 validated = 0;
8194 tainted = 0;
8195 nx = 0;
8196 cs_validate_page(vnode,
8197 pager,
8198 mo_offset,
8199 (const void *)((const char *)kaddr),
8200 &validated,
8201 &tainted,
8202 &nx);
8203
8204 page->vmp_cs_validated |= validated;
8205 page->vmp_cs_tainted |= tainted;
8206 page->vmp_cs_nx |= nx;
8207
8208 #if CHECK_CS_VALIDATION_BITMAP
8209 if (page->vmp_cs_validated == VMP_CS_ALL_TRUE &&
8210 page->vmp_cs_tainted == VMP_CS_ALL_FALSE) {
8211 vnode_pager_cs_check_validation_bitmap(object->pager,
8212 mo_offset,
8213 CS_BITMAP_SET);
8214 }
8215 #endif /* CHECK_CS_VALIDATION_BITMAP */
8216 }
8217
8218 void
vm_page_validate_cs_mapped(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset,const void * kaddr)8219 vm_page_validate_cs_mapped(
8220 vm_page_t page,
8221 vm_map_size_t fault_page_size,
8222 vm_map_offset_t fault_phys_offset,
8223 const void *kaddr)
8224 {
8225 if (!vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
8226 vm_page_validate_cs_mapped_slow(page, kaddr);
8227 }
8228 }
8229
8230 static void
vm_page_map_and_validate_cs(vm_object_t object,vm_page_t page)8231 vm_page_map_and_validate_cs(
8232 vm_object_t object,
8233 vm_page_t page)
8234 {
8235 vm_object_offset_t offset;
8236 vm_map_offset_t koffset;
8237 vm_map_size_t ksize;
8238 vm_offset_t kaddr;
8239 kern_return_t kr;
8240 boolean_t busy_page;
8241 boolean_t need_unmap;
8242
8243 vm_object_lock_assert_exclusive(object);
8244
8245 assert(object->code_signed);
8246 offset = page->vmp_offset;
8247
8248 busy_page = page->vmp_busy;
8249 if (!busy_page) {
8250 /* keep page busy while we map (and unlock) the VM object */
8251 page->vmp_busy = TRUE;
8252 }
8253
8254 /*
8255 * Take a paging reference on the VM object
8256 * to protect it from collapse or bypass,
8257 * and keep it from disappearing too.
8258 */
8259 vm_object_paging_begin(object);
8260
8261 /* map the page in the kernel address space */
8262 ksize = PAGE_SIZE_64;
8263 koffset = 0;
8264 need_unmap = FALSE;
8265 kr = vm_paging_map_object(page,
8266 object,
8267 offset,
8268 VM_PROT_READ,
8269 FALSE, /* can't unlock object ! */
8270 &ksize,
8271 &koffset,
8272 &need_unmap);
8273 if (kr != KERN_SUCCESS) {
8274 panic("%s: could not map page: 0x%x", __FUNCTION__, kr);
8275 }
8276 kaddr = CAST_DOWN(vm_offset_t, koffset);
8277
8278 /* validate the mapped page */
8279 vm_page_validate_cs_mapped_slow(page, (const void *) kaddr);
8280
8281 assert(page->vmp_busy);
8282 assert(object == VM_PAGE_OBJECT(page));
8283 vm_object_lock_assert_exclusive(object);
8284
8285 if (!busy_page) {
8286 vm_page_wakeup_done(object, page);
8287 }
8288 if (need_unmap) {
8289 /* unmap the map from the kernel address space */
8290 vm_paging_unmap_object(object, koffset, koffset + ksize);
8291 koffset = 0;
8292 ksize = 0;
8293 kaddr = 0;
8294 }
8295 vm_object_paging_end(object);
8296 }
8297
8298 void
vm_page_validate_cs(vm_page_t page,vm_map_size_t fault_page_size,vm_map_offset_t fault_phys_offset)8299 vm_page_validate_cs(
8300 vm_page_t page,
8301 vm_map_size_t fault_page_size,
8302 vm_map_offset_t fault_phys_offset)
8303 {
8304 vm_object_t object;
8305
8306 object = VM_PAGE_OBJECT(page);
8307 vm_object_lock_assert_held(object);
8308
8309 if (vm_page_validate_cs_fast(page, fault_page_size, fault_phys_offset)) {
8310 return;
8311 }
8312 vm_page_map_and_validate_cs(object, page);
8313 }
8314
8315 void
vm_page_validate_cs_mapped_chunk(vm_page_t page,const void * kaddr,vm_offset_t chunk_offset,vm_size_t chunk_size,boolean_t * validated_p,unsigned * tainted_p)8316 vm_page_validate_cs_mapped_chunk(
8317 vm_page_t page,
8318 const void *kaddr,
8319 vm_offset_t chunk_offset,
8320 vm_size_t chunk_size,
8321 boolean_t *validated_p,
8322 unsigned *tainted_p)
8323 {
8324 vm_object_t object;
8325 vm_object_offset_t offset, offset_in_page;
8326 memory_object_t pager;
8327 struct vnode *vnode;
8328 boolean_t validated;
8329 unsigned tainted;
8330
8331 *validated_p = FALSE;
8332 *tainted_p = 0;
8333
8334 assert(page->vmp_busy);
8335 object = VM_PAGE_OBJECT(page);
8336 vm_object_lock_assert_exclusive(object);
8337
8338 assert(object->code_signed);
8339 offset = page->vmp_offset;
8340
8341 if (!object->alive || object->terminating || object->pager == NULL) {
8342 /*
8343 * The object is terminating and we don't have its pager
8344 * so we can't validate the data...
8345 */
8346 return;
8347 }
8348 /*
8349 * Since we get here to validate a page that was brought in by
8350 * the pager, we know that this pager is all setup and ready
8351 * by now.
8352 */
8353 assert(!object->internal);
8354 assert(object->pager != NULL);
8355 assert(object->pager_ready);
8356
8357 pager = object->pager;
8358 assert(object->paging_in_progress);
8359 vnode = vnode_pager_lookup_vnode(pager);
8360
8361 /* verify the signature for this chunk */
8362 offset_in_page = chunk_offset;
8363 assert(offset_in_page < PAGE_SIZE);
8364
8365 tainted = 0;
8366 validated = cs_validate_range(vnode,
8367 pager,
8368 (object->paging_offset +
8369 offset +
8370 offset_in_page),
8371 (const void *)((const char *)kaddr
8372 + offset_in_page),
8373 chunk_size,
8374 &tainted);
8375 if (validated) {
8376 *validated_p = TRUE;
8377 }
8378 if (tainted) {
8379 *tainted_p = tainted;
8380 }
8381 }
8382
8383 static void
vm_rtfrecord_lock(void)8384 vm_rtfrecord_lock(void)
8385 {
8386 lck_spin_lock(&vm_rtfr_slock);
8387 }
8388
8389 static void
vm_rtfrecord_unlock(void)8390 vm_rtfrecord_unlock(void)
8391 {
8392 lck_spin_unlock(&vm_rtfr_slock);
8393 }
8394
8395 unsigned int
vmrtfaultinfo_bufsz(void)8396 vmrtfaultinfo_bufsz(void)
8397 {
8398 return vmrtf_num_records * sizeof(vm_rtfault_record_t);
8399 }
8400
8401 #include <kern/backtrace.h>
8402
8403 __attribute__((noinline))
8404 static void
vm_record_rtfault(thread_t cthread,uint64_t fstart,vm_map_offset_t fault_vaddr,int type_of_fault)8405 vm_record_rtfault(thread_t cthread, uint64_t fstart, vm_map_offset_t fault_vaddr, int type_of_fault)
8406 {
8407 uint64_t fend = mach_continuous_time();
8408
8409 uint64_t cfpc = 0;
8410 uint64_t ctid = cthread->thread_id;
8411 uint64_t cupid = get_current_unique_pid();
8412
8413 uintptr_t bpc = 0;
8414 errno_t btr = 0;
8415
8416 /*
8417 * Capture a single-frame backtrace. This extracts just the program
8418 * counter at the point of the fault, and should not use copyin to get
8419 * Rosetta save state.
8420 */
8421 struct backtrace_control ctl = {
8422 .btc_user_thread = cthread,
8423 .btc_user_copy = backtrace_user_copy_error,
8424 };
8425 unsigned int bfrs = backtrace_user(&bpc, 1U, &ctl, NULL);
8426 if ((btr == 0) && (bfrs > 0)) {
8427 cfpc = bpc;
8428 }
8429
8430 assert((fstart != 0) && fend >= fstart);
8431 vm_rtfrecord_lock();
8432 assert(vmrtfrs.vmrtfr_curi <= vmrtfrs.vmrtfr_maxi);
8433
8434 vmrtfrs.vmrtf_total++;
8435 vm_rtfault_record_t *cvmr = &vmrtfrs.vm_rtf_records[vmrtfrs.vmrtfr_curi++];
8436
8437 cvmr->rtfabstime = fstart;
8438 cvmr->rtfduration = fend - fstart;
8439 cvmr->rtfaddr = fault_vaddr;
8440 cvmr->rtfpc = cfpc;
8441 cvmr->rtftype = type_of_fault;
8442 cvmr->rtfupid = cupid;
8443 cvmr->rtftid = ctid;
8444
8445 if (vmrtfrs.vmrtfr_curi > vmrtfrs.vmrtfr_maxi) {
8446 vmrtfrs.vmrtfr_curi = 0;
8447 }
8448
8449 vm_rtfrecord_unlock();
8450 }
8451
8452 int
vmrtf_extract(uint64_t cupid,__unused boolean_t isroot,unsigned long vrecordsz,void * vrecords,unsigned long * vmrtfrv)8453 vmrtf_extract(uint64_t cupid, __unused boolean_t isroot, unsigned long vrecordsz, void *vrecords, unsigned long *vmrtfrv)
8454 {
8455 vm_rtfault_record_t *cvmrd = vrecords;
8456 size_t residue = vrecordsz;
8457 size_t numextracted = 0;
8458 boolean_t early_exit = FALSE;
8459
8460 vm_rtfrecord_lock();
8461
8462 for (int vmfi = 0; vmfi <= vmrtfrs.vmrtfr_maxi; vmfi++) {
8463 if (residue < sizeof(vm_rtfault_record_t)) {
8464 early_exit = TRUE;
8465 break;
8466 }
8467
8468 if (vmrtfrs.vm_rtf_records[vmfi].rtfupid != cupid) {
8469 #if DEVELOPMENT || DEBUG
8470 if (isroot == FALSE) {
8471 continue;
8472 }
8473 #else
8474 continue;
8475 #endif /* DEVDEBUG */
8476 }
8477
8478 *cvmrd = vmrtfrs.vm_rtf_records[vmfi];
8479 cvmrd++;
8480 residue -= sizeof(vm_rtfault_record_t);
8481 numextracted++;
8482 }
8483
8484 vm_rtfrecord_unlock();
8485
8486 *vmrtfrv = numextracted;
8487 return early_exit;
8488 }
8489
8490 /*
8491 * Only allow one diagnosis to be in flight at a time, to avoid
8492 * creating too much additional memory usage.
8493 */
8494 static volatile uint_t vmtc_diagnosing;
8495 unsigned int vmtc_total = 0;
8496
8497 /*
8498 * Type used to update telemetry for the diagnosis counts.
8499 */
8500 CA_EVENT(vmtc_telemetry,
8501 CA_INT, vmtc_num_byte, /* number of corrupt bytes found */
8502 CA_BOOL, vmtc_undiagnosed, /* undiagnosed because more than 1 at a time */
8503 CA_BOOL, vmtc_not_eligible, /* the page didn't qualify */
8504 CA_BOOL, vmtc_copyin_fail, /* unable to copy in the page */
8505 CA_BOOL, vmtc_not_found, /* no corruption found even though CS failed */
8506 CA_BOOL, vmtc_one_bit_flip, /* single bit flip */
8507 CA_BOOL, vmtc_testing); /* caused on purpose by testing */
8508
8509 #if DEVELOPMENT || DEBUG
8510 /*
8511 * Buffers used to compare before/after page contents.
8512 * Stashed to aid when debugging crashes.
8513 */
8514 static size_t vmtc_last_buffer_size = 0;
8515 static uint64_t *vmtc_last_before_buffer = NULL;
8516 static uint64_t *vmtc_last_after_buffer = NULL;
8517
8518 /*
8519 * Needed to record corruptions due to testing.
8520 */
8521 static uintptr_t corruption_test_va = 0;
8522 #endif /* DEVELOPMENT || DEBUG */
8523
8524 /*
8525 * Stash a copy of data from a possibly corrupt page.
8526 */
8527 static uint64_t *
vmtc_get_page_data(vm_map_offset_t code_addr,vm_page_t page)8528 vmtc_get_page_data(
8529 vm_map_offset_t code_addr,
8530 vm_page_t page)
8531 {
8532 uint64_t *buffer = NULL;
8533 addr64_t buffer_paddr;
8534 addr64_t page_paddr;
8535 extern void bcopy_phys(addr64_t from, addr64_t to, vm_size_t bytes);
8536 uint_t size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
8537
8538 /*
8539 * Need an aligned buffer to do a physical copy.
8540 */
8541 if (kernel_memory_allocate(kernel_map, (vm_offset_t *)&buffer,
8542 size, size - 1, KMA_KOBJECT, VM_KERN_MEMORY_DIAG) != KERN_SUCCESS) {
8543 return NULL;
8544 }
8545 buffer_paddr = kvtophys((vm_offset_t)buffer);
8546 page_paddr = ptoa(VM_PAGE_GET_PHYS_PAGE(page));
8547
8548 /* adjust the page start address if we need only 4K of a 16K page */
8549 if (size < PAGE_SIZE) {
8550 uint_t subpage_start = ((code_addr & (PAGE_SIZE - 1)) & ~(size - 1));
8551 page_paddr += subpage_start;
8552 }
8553
8554 bcopy_phys(page_paddr, buffer_paddr, size);
8555 return buffer;
8556 }
8557
8558 /*
8559 * Set things up so we can diagnose a potential text page corruption.
8560 */
8561 static uint64_t *
vmtc_text_page_diagnose_setup(vm_map_offset_t code_addr,vm_page_t page,CA_EVENT_TYPE (vmtc_telemetry)* event)8562 vmtc_text_page_diagnose_setup(
8563 vm_map_offset_t code_addr,
8564 vm_page_t page,
8565 CA_EVENT_TYPE(vmtc_telemetry) *event)
8566 {
8567 uint64_t *buffer = NULL;
8568
8569 /*
8570 * If another is being diagnosed, skip this one.
8571 */
8572 if (!OSCompareAndSwap(0, 1, &vmtc_diagnosing)) {
8573 event->vmtc_undiagnosed = true;
8574 return NULL;
8575 }
8576
8577 /*
8578 * Get the contents of the corrupt page.
8579 */
8580 buffer = vmtc_get_page_data(code_addr, page);
8581 if (buffer == NULL) {
8582 event->vmtc_copyin_fail = true;
8583 if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
8584 panic("Bad compare and swap in setup!");
8585 }
8586 return NULL;
8587 }
8588 return buffer;
8589 }
8590
8591 /*
8592 * Diagnose the text page by comparing its contents with
8593 * the one we've previously saved.
8594 */
8595 static void
vmtc_text_page_diagnose(vm_map_offset_t code_addr,uint64_t * old_code_buffer,CA_EVENT_TYPE (vmtc_telemetry)* event)8596 vmtc_text_page_diagnose(
8597 vm_map_offset_t code_addr,
8598 uint64_t *old_code_buffer,
8599 CA_EVENT_TYPE(vmtc_telemetry) *event)
8600 {
8601 uint64_t *new_code_buffer;
8602 size_t size = MIN(vm_map_page_size(current_map()), PAGE_SIZE);
8603 uint_t count = (uint_t)size / sizeof(uint64_t);
8604 uint_t diff_count = 0;
8605 bool bit_flip = false;
8606 uint_t b;
8607 uint64_t *new;
8608 uint64_t *old;
8609
8610 new_code_buffer = kalloc_data(size, Z_WAITOK);
8611 assert(new_code_buffer != NULL);
8612 if (copyin((user_addr_t)vm_map_trunc_page(code_addr, size - 1), new_code_buffer, size) != 0) {
8613 /* copyin error, so undo things */
8614 event->vmtc_copyin_fail = true;
8615 goto done;
8616 }
8617
8618 new = new_code_buffer;
8619 old = old_code_buffer;
8620 for (; count-- > 0; ++new, ++old) {
8621 if (*new == *old) {
8622 continue;
8623 }
8624
8625 /*
8626 * On first diff, check for a single bit flip
8627 */
8628 if (diff_count == 0) {
8629 uint64_t x = (*new ^ *old);
8630 assert(x != 0);
8631 if ((x & (x - 1)) == 0) {
8632 bit_flip = true;
8633 ++diff_count;
8634 continue;
8635 }
8636 }
8637
8638 /*
8639 * count up the number of different bytes.
8640 */
8641 for (b = 0; b < sizeof(uint64_t); ++b) {
8642 char *n = (char *)new;
8643 char *o = (char *)old;
8644 if (n[b] != o[b]) {
8645 ++diff_count;
8646 }
8647 }
8648 }
8649
8650 if (diff_count > 1) {
8651 bit_flip = false;
8652 }
8653
8654 if (diff_count == 0) {
8655 event->vmtc_not_found = true;
8656 } else {
8657 event->vmtc_num_byte = diff_count;
8658 }
8659 if (bit_flip) {
8660 event->vmtc_one_bit_flip = true;
8661 }
8662
8663 done:
8664 /*
8665 * Free up the code copy buffers, but save the last
8666 * set on development / debug kernels in case they
8667 * can provide evidence for debugging memory stomps.
8668 */
8669 #if DEVELOPMENT || DEBUG
8670 if (vmtc_last_before_buffer != NULL) {
8671 kmem_free(kernel_map, (vm_offset_t)vmtc_last_before_buffer, vmtc_last_buffer_size);
8672 }
8673 if (vmtc_last_after_buffer != NULL) {
8674 kfree_data(vmtc_last_after_buffer, vmtc_last_buffer_size);
8675 }
8676 vmtc_last_before_buffer = old_code_buffer;
8677 vmtc_last_after_buffer = new_code_buffer;
8678 vmtc_last_buffer_size = size;
8679 #else /* DEVELOPMENT || DEBUG */
8680 kfree_data(new_code_buffer, size);
8681 kmem_free(kernel_map, (vm_offset_t)old_code_buffer, size);
8682 #endif /* DEVELOPMENT || DEBUG */
8683
8684 /*
8685 * We're finished, so clear the diagnosing flag.
8686 */
8687 if (!OSCompareAndSwap(1, 0, &vmtc_diagnosing)) {
8688 panic("Bad compare and swap in diagnose!");
8689 }
8690 }
8691
8692 /*
8693 * For the given map, virt address, find the object, offset, and page.
8694 * This has to lookup the map entry, verify protections, walk any shadow chains.
8695 * If found, returns with the object locked.
8696 */
8697 static kern_return_t
vmtc_revalidate_lookup(vm_map_t map,vm_map_offset_t vaddr,vm_object_t * ret_object,vm_object_offset_t * ret_offset,vm_page_t * ret_page,vm_prot_t * ret_prot)8698 vmtc_revalidate_lookup(
8699 vm_map_t map,
8700 vm_map_offset_t vaddr,
8701 vm_object_t *ret_object,
8702 vm_object_offset_t *ret_offset,
8703 vm_page_t *ret_page,
8704 vm_prot_t *ret_prot)
8705 {
8706 vm_object_t object;
8707 vm_object_offset_t offset;
8708 vm_page_t page;
8709 kern_return_t kr = KERN_SUCCESS;
8710 uint8_t object_lock_type = OBJECT_LOCK_EXCLUSIVE;
8711 vm_map_version_t version;
8712 boolean_t wired;
8713 struct vm_object_fault_info fault_info = {
8714 .interruptible = THREAD_UNINT
8715 };
8716 vm_map_t real_map = NULL;
8717 vm_prot_t prot;
8718 vm_object_t shadow;
8719
8720 vmlp_api_start(VMTC_REVALIDATE_LOOKUP);
8721
8722 /*
8723 * Find the object/offset for the given location/map.
8724 * Note this returns with the object locked.
8725 */
8726 restart:
8727 vm_map_lock_read(map);
8728 object = VM_OBJECT_NULL; /* in case we come around the restart path */
8729 kr = vm_map_lookup_and_lock_object(&map, vaddr, VM_PROT_READ,
8730 object_lock_type, &version, &object, &offset, &prot, &wired,
8731 &fault_info, &real_map, NULL);
8732 vm_map_unlock_read(map);
8733 if (real_map != NULL && real_map != map) {
8734 vm_map_unlock(real_map);
8735 }
8736
8737 /*
8738 * If there's no page here, fail.
8739 */
8740 if (kr != KERN_SUCCESS || object == NULL) {
8741 kr = KERN_FAILURE;
8742 goto done;
8743 }
8744
8745 /*
8746 * Chase down any shadow chains to find the actual page.
8747 */
8748 for (;;) {
8749 /*
8750 * See if the page is on the current object.
8751 */
8752 page = vm_page_lookup(object, vm_object_trunc_page(offset));
8753 if (page != NULL) {
8754 /* restart the lookup */
8755 if (page->vmp_restart) {
8756 vm_object_unlock(object);
8757 goto restart;
8758 }
8759
8760 /*
8761 * If this page is busy, we need to wait for it.
8762 */
8763 if (page->vmp_busy) {
8764 vm_page_sleep(object, page, THREAD_INTERRUPTIBLE, LCK_SLEEP_UNLOCK);
8765 goto restart;
8766 }
8767 break;
8768 }
8769
8770 /*
8771 * If the object doesn't have the page and
8772 * has no shadow, then we can quit.
8773 */
8774 shadow = object->shadow;
8775 if (shadow == NULL) {
8776 kr = KERN_FAILURE;
8777 goto done;
8778 }
8779
8780 /*
8781 * Move to the next object
8782 */
8783 offset += object->vo_shadow_offset;
8784 vm_object_lock(shadow);
8785 vm_object_unlock(object);
8786 object = shadow;
8787 shadow = VM_OBJECT_NULL;
8788 }
8789 *ret_object = object;
8790 *ret_offset = vm_object_trunc_page(offset);
8791 *ret_page = page;
8792 *ret_prot = prot;
8793
8794 done:
8795 if (kr != KERN_SUCCESS && object != NULL) {
8796 vm_object_unlock(object);
8797 }
8798 vmlp_api_end(VMTC_REVALIDATE_LOOKUP, kr);
8799 return kr;
8800 }
8801
8802 /*
8803 * Check if a page is wired, needs extra locking.
8804 */
8805 static bool
is_page_wired(vm_page_t page)8806 is_page_wired(vm_page_t page)
8807 {
8808 bool result;
8809 vm_page_lock_queues();
8810 result = VM_PAGE_WIRED(page);
8811 vm_page_unlock_queues();
8812 return result;
8813 }
8814
8815 /*
8816 * A fatal process error has occurred in the given task.
8817 * Recheck the code signing of the text page at the given
8818 * address to check for a text page corruption.
8819 *
8820 * Returns KERN_FAILURE if a page was found to be corrupt
8821 * by failing to match its code signature. KERN_SUCCESS
8822 * means the page is either valid or we don't have the
8823 * information to say it's corrupt.
8824 */
8825 kern_return_t
revalidate_text_page(task_t task,vm_map_offset_t code_addr)8826 revalidate_text_page(task_t task, vm_map_offset_t code_addr)
8827 {
8828 kern_return_t kr;
8829 vm_map_t map;
8830 vm_object_t object = NULL;
8831 vm_object_offset_t offset;
8832 vm_page_t page = NULL;
8833 struct vnode *vnode;
8834 uint64_t *diagnose_buffer = NULL;
8835 CA_EVENT_TYPE(vmtc_telemetry) * event = NULL;
8836 ca_event_t ca_event = NULL;
8837 vm_prot_t prot;
8838
8839 map = task->map;
8840 if (task->map == NULL) {
8841 return KERN_SUCCESS;
8842 }
8843
8844 kr = vmtc_revalidate_lookup(map, code_addr, &object, &offset, &page, &prot);
8845 if (kr != KERN_SUCCESS) {
8846 goto done;
8847 }
8848
8849 /*
8850 * The page must be executable.
8851 */
8852 if (!(prot & VM_PROT_EXECUTE)) {
8853 goto done;
8854 }
8855
8856 /*
8857 * The object needs to have a pager.
8858 */
8859 if (object->pager == NULL) {
8860 goto done;
8861 }
8862
8863 /*
8864 * Needs to be a vnode backed page to have a signature.
8865 */
8866 vnode = vnode_pager_lookup_vnode(object->pager);
8867 if (vnode == NULL) {
8868 goto done;
8869 }
8870
8871 /*
8872 * Object checks to see if we should proceed.
8873 */
8874 if (!object->code_signed || /* no code signature to check */
8875 object->internal || /* internal objects aren't signed */
8876 object->terminating || /* the object and its pages are already going away */
8877 !object->pager_ready) { /* this should happen, but check shouldn't hurt */
8878 goto done;
8879 }
8880
8881
8882 /*
8883 * Check the code signature of the page in question.
8884 */
8885 vm_page_map_and_validate_cs(object, page);
8886
8887 /*
8888 * At this point:
8889 * vmp_cs_validated |= validated (set if a code signature exists)
8890 * vmp_cs_tainted |= tainted (set if code signature violation)
8891 * vmp_cs_nx |= nx; ??
8892 *
8893 * if vmp_pmapped then have to pmap_disconnect..
8894 * other flags to check on object or page?
8895 */
8896 if (page->vmp_cs_tainted != VMP_CS_ALL_FALSE) {
8897 #if DEBUG || DEVELOPMENT
8898 /*
8899 * On development builds, a boot-arg can be used to cause
8900 * a panic, instead of a quiet repair.
8901 */
8902 if (vmtc_panic_instead) {
8903 panic("Text page corruption detected: vm_page_t 0x%llx", (long long)(uintptr_t)page);
8904 }
8905 #endif /* DEBUG || DEVELOPMENT */
8906
8907 /*
8908 * We're going to invalidate this page. Grab a copy of it for comparison.
8909 */
8910 ca_event = CA_EVENT_ALLOCATE(vmtc_telemetry);
8911 event = ca_event->data;
8912 diagnose_buffer = vmtc_text_page_diagnose_setup(code_addr, page, event);
8913
8914 /*
8915 * Invalidate, i.e. toss, the corrupted page.
8916 */
8917 if (!page->vmp_cleaning &&
8918 !page->vmp_laundry &&
8919 !vm_page_is_fictitious(page) &&
8920 !page->vmp_precious &&
8921 !page->vmp_absent &&
8922 !VMP_ERROR_GET(page) &&
8923 !page->vmp_dirty &&
8924 !is_page_wired(page)) {
8925 if (page->vmp_pmapped) {
8926 int refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(page));
8927 if (refmod & VM_MEM_MODIFIED) {
8928 SET_PAGE_DIRTY(page, FALSE);
8929 }
8930 if (refmod & VM_MEM_REFERENCED) {
8931 page->vmp_reference = TRUE;
8932 }
8933 }
8934 /* If the page seems intentionally modified, don't trash it. */
8935 if (!page->vmp_dirty) {
8936 VM_PAGE_FREE(page);
8937 } else {
8938 event->vmtc_not_eligible = true;
8939 }
8940 } else {
8941 event->vmtc_not_eligible = true;
8942 }
8943 vm_object_unlock(object);
8944 object = VM_OBJECT_NULL;
8945
8946 /*
8947 * Now try to diagnose the type of failure by faulting
8948 * in a new copy and diff'ing it with what we saved.
8949 */
8950 if (diagnose_buffer != NULL) {
8951 vmtc_text_page_diagnose(code_addr, diagnose_buffer, event);
8952 }
8953 #if DEBUG || DEVELOPMENT
8954 if (corruption_test_va != 0) {
8955 corruption_test_va = 0;
8956 event->vmtc_testing = true;
8957 }
8958 #endif /* DEBUG || DEVELOPMENT */
8959 ktriage_record(thread_tid(current_thread()),
8960 KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_TEXT_CORRUPTION),
8961 0 /* arg */);
8962 CA_EVENT_SEND(ca_event);
8963 printf("Text page corruption detected for pid %d\n", proc_selfpid());
8964 ++vmtc_total;
8965 return KERN_FAILURE; /* failure means we definitely found a corrupt page */
8966 }
8967 done:
8968 if (object != NULL) {
8969 vm_object_unlock(object);
8970 }
8971 return KERN_SUCCESS;
8972 }
8973
8974 #if DEBUG || DEVELOPMENT
8975 /*
8976 * For implementing unit tests - ask the pmap to corrupt a text page.
8977 * We have to find the page, to get the physical address, then invoke
8978 * the pmap.
8979 */
8980 extern kern_return_t vm_corrupt_text_addr(uintptr_t);
8981
8982 kern_return_t
vm_corrupt_text_addr(uintptr_t va)8983 vm_corrupt_text_addr(uintptr_t va)
8984 {
8985 task_t task = current_task();
8986 vm_map_t map;
8987 kern_return_t kr = KERN_SUCCESS;
8988 vm_object_t object = VM_OBJECT_NULL;
8989 vm_object_offset_t offset;
8990 vm_page_t page = NULL;
8991 pmap_paddr_t pa;
8992 vm_prot_t prot;
8993
8994 map = task->map;
8995 if (task->map == NULL) {
8996 printf("corrupt_text_addr: no map\n");
8997 return KERN_FAILURE;
8998 }
8999
9000 kr = vmtc_revalidate_lookup(map, (vm_map_offset_t)va, &object, &offset, &page, &prot);
9001 if (kr != KERN_SUCCESS) {
9002 printf("corrupt_text_addr: page lookup failed\n");
9003 return kr;
9004 }
9005 if (!(prot & VM_PROT_EXECUTE)) {
9006 printf("corrupt_text_addr: page not executable\n");
9007 return KERN_FAILURE;
9008 }
9009
9010 /* get the physical address to use */
9011 pa = ptoa(VM_PAGE_GET_PHYS_PAGE(page)) + (va - vm_object_trunc_page(va));
9012
9013 /*
9014 * Check we have something we can work with.
9015 * Due to racing with pageout as we enter the sysctl,
9016 * it's theoretically possible to have the page disappear, just
9017 * before the lookup.
9018 *
9019 * That's highly likely to happen often. I've filed a radar 72857482
9020 * to bubble up the error here to the sysctl result and have the
9021 * test not FAIL in that case.
9022 */
9023 if (page->vmp_busy) {
9024 printf("corrupt_text_addr: vmp_busy\n");
9025 kr = KERN_FAILURE;
9026 }
9027 if (page->vmp_cleaning) {
9028 printf("corrupt_text_addr: vmp_cleaning\n");
9029 kr = KERN_FAILURE;
9030 }
9031 if (page->vmp_laundry) {
9032 printf("corrupt_text_addr: vmp_cleaning\n");
9033 kr = KERN_FAILURE;
9034 }
9035 if (vm_page_is_fictitious(page)) {
9036 printf("corrupt_text_addr: vmp_fictitious\n");
9037 kr = KERN_FAILURE;
9038 }
9039 if (page->vmp_precious) {
9040 printf("corrupt_text_addr: vmp_precious\n");
9041 kr = KERN_FAILURE;
9042 }
9043 if (page->vmp_absent) {
9044 printf("corrupt_text_addr: vmp_absent\n");
9045 kr = KERN_FAILURE;
9046 }
9047 if (VMP_ERROR_GET(page)) {
9048 printf("corrupt_text_addr: vmp_error\n");
9049 kr = KERN_FAILURE;
9050 }
9051 if (page->vmp_dirty) {
9052 printf("corrupt_text_addr: vmp_dirty\n");
9053 kr = KERN_FAILURE;
9054 }
9055 if (is_page_wired(page)) {
9056 printf("corrupt_text_addr: wired\n");
9057 kr = KERN_FAILURE;
9058 }
9059 if (!page->vmp_pmapped) {
9060 printf("corrupt_text_addr: !vmp_pmapped\n");
9061 kr = KERN_FAILURE;
9062 }
9063
9064 if (kr == KERN_SUCCESS) {
9065 printf("corrupt_text_addr: using physaddr 0x%llx\n", (long long)pa);
9066 kr = pmap_test_text_corruption(pa);
9067 if (kr != KERN_SUCCESS) {
9068 printf("corrupt_text_addr: pmap error %d\n", kr);
9069 } else {
9070 corruption_test_va = va;
9071 }
9072 } else {
9073 printf("corrupt_text_addr: object %p\n", object);
9074 printf("corrupt_text_addr: offset 0x%llx\n", (uint64_t)offset);
9075 printf("corrupt_text_addr: va 0x%llx\n", (uint64_t)va);
9076 printf("corrupt_text_addr: vm_object_trunc_page(va) 0x%llx\n", (uint64_t)vm_object_trunc_page(va));
9077 printf("corrupt_text_addr: vm_page_t %p\n", page);
9078 printf("corrupt_text_addr: ptoa(PHYS_PAGE) 0x%llx\n", (uint64_t)ptoa(VM_PAGE_GET_PHYS_PAGE(page)));
9079 printf("corrupt_text_addr: using physaddr 0x%llx\n", (uint64_t)pa);
9080 }
9081
9082 if (object != VM_OBJECT_NULL) {
9083 vm_object_unlock(object);
9084 }
9085 return kr;
9086 }
9087
9088 #endif /* DEBUG || DEVELOPMENT */
9089