1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41
42 #include <kern/assert.h>
43 #include <kern/host.h>
44 #include <kern/ledger.h>
45 #include <kern/thread.h>
46 #include <kern/ipc_kobject.h>
47 #include <os/refcnt.h>
48
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
51
52 #include <vm/vm_map.h>
53 #include <vm/vm_pageout.h>
54 #include <vm/memory_object.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_protos.h>
57 #include <vm/vm_purgeable_internal.h>
58
59 #include <sys/kdebug_triage.h>
60
61 /* BSD VM COMPONENT INTERFACES */
62 int
63 get_map_nentries(
64 vm_map_t);
65
66 vm_offset_t
67 get_map_start(
68 vm_map_t);
69
70 vm_offset_t
71 get_map_end(
72 vm_map_t);
73
74 /*
75 *
76 */
77 int
get_map_nentries(vm_map_t map)78 get_map_nentries(
79 vm_map_t map)
80 {
81 return map->hdr.nentries;
82 }
83
84 mach_vm_offset_t
mach_get_vm_start(vm_map_t map)85 mach_get_vm_start(vm_map_t map)
86 {
87 return vm_map_first_entry(map)->vme_start;
88 }
89
90 mach_vm_offset_t
mach_get_vm_end(vm_map_t map)91 mach_get_vm_end(vm_map_t map)
92 {
93 return vm_map_last_entry(map)->vme_end;
94 }
95
96 /*
97 * BSD VNODE PAGER
98 */
99
100 const struct memory_object_pager_ops vnode_pager_ops = {
101 .memory_object_reference = vnode_pager_reference,
102 .memory_object_deallocate = vnode_pager_deallocate,
103 .memory_object_init = vnode_pager_init,
104 .memory_object_terminate = vnode_pager_terminate,
105 .memory_object_data_request = vnode_pager_data_request,
106 .memory_object_data_return = vnode_pager_data_return,
107 .memory_object_data_initialize = vnode_pager_data_initialize,
108 .memory_object_map = vnode_pager_map,
109 .memory_object_last_unmap = vnode_pager_last_unmap,
110 .memory_object_backing_object = NULL,
111 .memory_object_pager_name = "vnode pager"
112 };
113
114 typedef struct vnode_pager {
115 /* mandatory generic header */
116 struct memory_object vn_pgr_hdr;
117
118 /* pager-specific */
119 #if MEMORY_OBJECT_HAS_REFCOUNT
120 #define vn_pgr_hdr_ref vn_pgr_hdr.mo_ref
121 #else
122 os_ref_atomic_t vn_pgr_hdr_ref;
123 #endif
124 struct vnode *vnode_handle; /* vnode handle */
125 } *vnode_pager_t;
126
127
128 kern_return_t
129 vnode_pager_cluster_read( /* forward */
130 vnode_pager_t,
131 vm_object_offset_t,
132 vm_object_offset_t,
133 uint32_t,
134 vm_size_t);
135
136 void
137 vnode_pager_cluster_write( /* forward */
138 vnode_pager_t,
139 vm_object_offset_t,
140 vm_size_t,
141 vm_object_offset_t *,
142 int *,
143 int);
144
145
146 vnode_pager_t
147 vnode_object_create( /* forward */
148 struct vnode *);
149
150 vnode_pager_t
151 vnode_pager_lookup( /* forward */
152 memory_object_t);
153
154 struct vnode *
155 vnode_pager_lookup_vnode( /* forward */
156 memory_object_t);
157
158 ZONE_DEFINE_TYPE(vnode_pager_zone, "vnode pager structures",
159 struct vnode_pager, ZC_NOENCRYPT);
160
161 #define VNODE_PAGER_NULL ((vnode_pager_t) 0)
162
163 /* TODO: Should be set dynamically by vnode_pager_init() */
164 #define CLUSTER_SHIFT 1
165
166
167 #if DEBUG
168 int pagerdebug = 0;
169
170 #define PAGER_ALL 0xffffffff
171 #define PAGER_INIT 0x00000001
172 #define PAGER_PAGEIN 0x00000002
173
174 #define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
175 #else
176 #define PAGER_DEBUG(LEVEL, A)
177 #endif
178
179 extern int proc_resetpcontrol(int);
180
181
182 extern int uiomove64(addr64_t, int, void *);
183 #define MAX_RUN 32
184
185 int
memory_object_control_uiomove(memory_object_control_t control,memory_object_offset_t offset,void * uio,int start_offset,int io_requested,int mark_dirty,int take_reference)186 memory_object_control_uiomove(
187 memory_object_control_t control,
188 memory_object_offset_t offset,
189 void * uio,
190 int start_offset,
191 int io_requested,
192 int mark_dirty,
193 int take_reference)
194 {
195 vm_object_t object;
196 vm_page_t dst_page;
197 int xsize;
198 int retval = 0;
199 int cur_run;
200 int cur_needed;
201 int i;
202 int orig_offset;
203 vm_page_t page_run[MAX_RUN];
204 int dirty_count; /* keeps track of number of pages dirtied as part of this uiomove */
205
206 object = memory_object_control_to_vm_object(control);
207 if (object == VM_OBJECT_NULL) {
208 return 0;
209 }
210 assert(!object->internal);
211
212 vm_object_lock(object);
213
214 if (mark_dirty && object->copy != VM_OBJECT_NULL) {
215 /*
216 * We can't modify the pages without honoring
217 * copy-on-write obligations first, so fall off
218 * this optimized path and fall back to the regular
219 * path.
220 */
221 vm_object_unlock(object);
222 return 0;
223 }
224 orig_offset = start_offset;
225
226 dirty_count = 0;
227 while (io_requested && retval == 0) {
228 cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
229
230 if (cur_needed > MAX_RUN) {
231 cur_needed = MAX_RUN;
232 }
233
234 for (cur_run = 0; cur_run < cur_needed;) {
235 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) {
236 break;
237 }
238
239
240 if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
241 /*
242 * someone else is playing with the page... if we've
243 * already collected pages into this run, go ahead
244 * and process now, we can't block on this
245 * page while holding other pages in the BUSY state
246 * otherwise we will wait
247 */
248 if (cur_run) {
249 break;
250 }
251 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
252 continue;
253 }
254 if (dst_page->vmp_laundry) {
255 vm_pageout_steal_laundry(dst_page, FALSE);
256 }
257
258 if (mark_dirty) {
259 if (dst_page->vmp_dirty == FALSE) {
260 dirty_count++;
261 }
262 SET_PAGE_DIRTY(dst_page, FALSE);
263 if (dst_page->vmp_cs_validated &&
264 !dst_page->vmp_cs_tainted) {
265 /*
266 * CODE SIGNING:
267 * We're modifying a code-signed
268 * page: force revalidate
269 */
270 dst_page->vmp_cs_validated = VMP_CS_ALL_FALSE;
271
272 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
273
274 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
275 }
276 }
277 dst_page->vmp_busy = TRUE;
278
279 page_run[cur_run++] = dst_page;
280
281 offset += PAGE_SIZE_64;
282 }
283 if (cur_run == 0) {
284 /*
285 * we hit a 'hole' in the cache or
286 * a page we don't want to try to handle,
287 * so bail at this point
288 * we'll unlock the object below
289 */
290 break;
291 }
292 vm_object_unlock(object);
293
294 for (i = 0; i < cur_run; i++) {
295 dst_page = page_run[i];
296
297 if ((xsize = PAGE_SIZE - start_offset) > io_requested) {
298 xsize = io_requested;
299 }
300
301 if ((retval = uiomove64((addr64_t)(((addr64_t)(VM_PAGE_GET_PHYS_PAGE(dst_page)) << PAGE_SHIFT) + start_offset), xsize, uio))) {
302 break;
303 }
304
305 io_requested -= xsize;
306 start_offset = 0;
307 }
308 vm_object_lock(object);
309
310 /*
311 * if we have more than 1 page to work on
312 * in the current run, or the original request
313 * started at offset 0 of the page, or we're
314 * processing multiple batches, we will move
315 * the pages to the tail of the inactive queue
316 * to implement an LRU for read/write accesses
317 *
318 * the check for orig_offset == 0 is there to
319 * mitigate the cost of small (< page_size) requests
320 * to the same page (this way we only move it once)
321 */
322 if (take_reference && (cur_run > 1 || orig_offset == 0)) {
323 vm_page_lockspin_queues();
324
325 for (i = 0; i < cur_run; i++) {
326 vm_page_lru(page_run[i]);
327 }
328
329 vm_page_unlock_queues();
330 }
331 for (i = 0; i < cur_run; i++) {
332 dst_page = page_run[i];
333
334 /*
335 * someone is explicitly referencing this page...
336 * update clustered and speculative state
337 *
338 */
339 if (dst_page->vmp_clustered) {
340 VM_PAGE_CONSUME_CLUSTERED(dst_page);
341 }
342
343 PAGE_WAKEUP_DONE(dst_page);
344 }
345 orig_offset = 0;
346 }
347 if (object->pager) {
348 task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
349 }
350 vm_object_unlock(object);
351 return retval;
352 }
353
354
355 /*
356 *
357 */
358 memory_object_t
vnode_pager_setup(struct vnode * vp,__unused memory_object_t pager)359 vnode_pager_setup(
360 struct vnode *vp,
361 __unused memory_object_t pager)
362 {
363 vnode_pager_t vnode_object;
364
365 vnode_object = vnode_object_create(vp);
366 if (vnode_object == VNODE_PAGER_NULL) {
367 panic("vnode_pager_setup: vnode_object_create() failed");
368 }
369 return (memory_object_t)vnode_object;
370 }
371
372 /*
373 *
374 */
375 kern_return_t
vnode_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)376 vnode_pager_init(memory_object_t mem_obj,
377 memory_object_control_t control,
378 #if !DEBUG
379 __unused
380 #endif
381 memory_object_cluster_size_t pg_size)
382 {
383 vnode_pager_t vnode_object;
384 kern_return_t kr;
385 memory_object_attr_info_data_t attributes;
386
387
388 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %lx\n", mem_obj, control, (unsigned long)pg_size));
389
390 if (control == MEMORY_OBJECT_CONTROL_NULL) {
391 return KERN_INVALID_ARGUMENT;
392 }
393
394 vnode_object = vnode_pager_lookup(mem_obj);
395
396 memory_object_control_reference(control);
397
398 vnode_object->vn_pgr_hdr.mo_control = control;
399
400 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
401 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
402 attributes.cluster_size = (1 << (PAGE_SHIFT));
403 attributes.may_cache_object = TRUE;
404 attributes.temporary = TRUE;
405
406 kr = memory_object_change_attributes(
407 control,
408 MEMORY_OBJECT_ATTRIBUTE_INFO,
409 (memory_object_info_t) &attributes,
410 MEMORY_OBJECT_ATTR_INFO_COUNT);
411 if (kr != KERN_SUCCESS) {
412 panic("vnode_pager_init: memory_object_change_attributes() failed");
413 }
414
415 return KERN_SUCCESS;
416 }
417
418 /*
419 *
420 */
421 kern_return_t
vnode_pager_data_return(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t data_cnt,memory_object_offset_t * resid_offset,int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,int upl_flags)422 vnode_pager_data_return(
423 memory_object_t mem_obj,
424 memory_object_offset_t offset,
425 memory_object_cluster_size_t data_cnt,
426 memory_object_offset_t *resid_offset,
427 int *io_error,
428 __unused boolean_t dirty,
429 __unused boolean_t kernel_copy,
430 int upl_flags)
431 {
432 vnode_pager_t vnode_object;
433
434 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
435
436 vnode_object = vnode_pager_lookup(mem_obj);
437
438 vnode_pager_cluster_write(vnode_object, offset, data_cnt, resid_offset, io_error, upl_flags);
439
440 return KERN_SUCCESS;
441 }
442
443 kern_return_t
vnode_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)444 vnode_pager_data_initialize(
445 __unused memory_object_t mem_obj,
446 __unused memory_object_offset_t offset,
447 __unused memory_object_cluster_size_t data_cnt)
448 {
449 panic("vnode_pager_data_initialize");
450 return KERN_FAILURE;
451 }
452
453 void
vnode_pager_dirtied(memory_object_t mem_obj,vm_object_offset_t s_offset,vm_object_offset_t e_offset)454 vnode_pager_dirtied(
455 memory_object_t mem_obj,
456 vm_object_offset_t s_offset,
457 vm_object_offset_t e_offset)
458 {
459 vnode_pager_t vnode_object;
460
461 if (mem_obj && mem_obj->mo_pager_ops == &vnode_pager_ops) {
462 vnode_object = vnode_pager_lookup(mem_obj);
463 vnode_pager_was_dirtied(vnode_object->vnode_handle, s_offset, e_offset);
464 }
465 }
466
467 kern_return_t
vnode_pager_get_isinuse(memory_object_t mem_obj,uint32_t * isinuse)468 vnode_pager_get_isinuse(
469 memory_object_t mem_obj,
470 uint32_t *isinuse)
471 {
472 vnode_pager_t vnode_object;
473
474 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
475 *isinuse = 1;
476 return KERN_INVALID_ARGUMENT;
477 }
478
479 vnode_object = vnode_pager_lookup(mem_obj);
480
481 *isinuse = vnode_pager_isinuse(vnode_object->vnode_handle);
482 return KERN_SUCCESS;
483 }
484
485 kern_return_t
vnode_pager_get_throttle_io_limit(memory_object_t mem_obj,uint32_t * limit)486 vnode_pager_get_throttle_io_limit(
487 memory_object_t mem_obj,
488 uint32_t *limit)
489 {
490 vnode_pager_t vnode_object;
491
492 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
493 return KERN_INVALID_ARGUMENT;
494 }
495
496 vnode_object = vnode_pager_lookup(mem_obj);
497
498 (void)vnode_pager_return_throttle_io_limit(vnode_object->vnode_handle, limit);
499 return KERN_SUCCESS;
500 }
501
502 kern_return_t
vnode_pager_get_isSSD(memory_object_t mem_obj,boolean_t * isSSD)503 vnode_pager_get_isSSD(
504 memory_object_t mem_obj,
505 boolean_t *isSSD)
506 {
507 vnode_pager_t vnode_object;
508
509 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
510 return KERN_INVALID_ARGUMENT;
511 }
512
513 vnode_object = vnode_pager_lookup(mem_obj);
514
515 *isSSD = vnode_pager_isSSD(vnode_object->vnode_handle);
516 return KERN_SUCCESS;
517 }
518
519 kern_return_t
vnode_pager_get_object_size(memory_object_t mem_obj,memory_object_offset_t * length)520 vnode_pager_get_object_size(
521 memory_object_t mem_obj,
522 memory_object_offset_t *length)
523 {
524 vnode_pager_t vnode_object;
525
526 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
527 *length = 0;
528 return KERN_INVALID_ARGUMENT;
529 }
530
531 vnode_object = vnode_pager_lookup(mem_obj);
532
533 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
534 return KERN_SUCCESS;
535 }
536
537 kern_return_t
vnode_pager_get_object_name(memory_object_t mem_obj,char * pathname,vm_size_t pathname_len,char * filename,vm_size_t filename_len,boolean_t * truncated_path_p)538 vnode_pager_get_object_name(
539 memory_object_t mem_obj,
540 char *pathname,
541 vm_size_t pathname_len,
542 char *filename,
543 vm_size_t filename_len,
544 boolean_t *truncated_path_p)
545 {
546 vnode_pager_t vnode_object;
547
548 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
549 return KERN_INVALID_ARGUMENT;
550 }
551
552 vnode_object = vnode_pager_lookup(mem_obj);
553
554 return vnode_pager_get_name(vnode_object->vnode_handle,
555 pathname,
556 pathname_len,
557 filename,
558 filename_len,
559 truncated_path_p);
560 }
561
562 kern_return_t
vnode_pager_get_object_mtime(memory_object_t mem_obj,struct timespec * mtime,struct timespec * cs_mtime)563 vnode_pager_get_object_mtime(
564 memory_object_t mem_obj,
565 struct timespec *mtime,
566 struct timespec *cs_mtime)
567 {
568 vnode_pager_t vnode_object;
569
570 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
571 return KERN_INVALID_ARGUMENT;
572 }
573
574 vnode_object = vnode_pager_lookup(mem_obj);
575
576 return vnode_pager_get_mtime(vnode_object->vnode_handle,
577 mtime,
578 cs_mtime);
579 }
580
581 #if CHECK_CS_VALIDATION_BITMAP
582 kern_return_t
vnode_pager_cs_check_validation_bitmap(memory_object_t mem_obj,memory_object_offset_t offset,int optype)583 vnode_pager_cs_check_validation_bitmap(
584 memory_object_t mem_obj,
585 memory_object_offset_t offset,
586 int optype )
587 {
588 vnode_pager_t vnode_object;
589
590 if (mem_obj == MEMORY_OBJECT_NULL ||
591 mem_obj->mo_pager_ops != &vnode_pager_ops) {
592 return KERN_INVALID_ARGUMENT;
593 }
594
595 vnode_object = vnode_pager_lookup(mem_obj);
596 return ubc_cs_check_validation_bitmap( vnode_object->vnode_handle, offset, optype );
597 }
598 #endif /* CHECK_CS_VALIDATION_BITMAP */
599
600 /*
601 *
602 */
603 kern_return_t
vnode_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,__unused memory_object_cluster_size_t length,__unused vm_prot_t desired_access,memory_object_fault_info_t fault_info)604 vnode_pager_data_request(
605 memory_object_t mem_obj,
606 memory_object_offset_t offset,
607 __unused memory_object_cluster_size_t length,
608 __unused vm_prot_t desired_access,
609 memory_object_fault_info_t fault_info)
610 {
611 vnode_pager_t vnode_object;
612 memory_object_offset_t base_offset;
613 vm_size_t size;
614 uint32_t io_streaming = 0;
615
616 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
617
618 vnode_object = vnode_pager_lookup(mem_obj);
619
620 size = MAX_UPL_TRANSFER_BYTES;
621 base_offset = offset;
622
623 if (memory_object_cluster_size(vnode_object->vn_pgr_hdr.mo_control,
624 &base_offset, &size, &io_streaming,
625 fault_info) != KERN_SUCCESS) {
626 size = PAGE_SIZE;
627 }
628
629 assert(offset >= base_offset &&
630 offset < base_offset + size);
631
632 return vnode_pager_cluster_read(vnode_object, base_offset, offset, io_streaming, size);
633 }
634
635 /*
636 *
637 */
638 void
vnode_pager_reference(memory_object_t mem_obj)639 vnode_pager_reference(
640 memory_object_t mem_obj)
641 {
642 vnode_pager_t vnode_object;
643
644 vnode_object = vnode_pager_lookup(mem_obj);
645 os_ref_retain_raw(&vnode_object->vn_pgr_hdr_ref, NULL);
646 }
647
648 /*
649 *
650 */
651 void
vnode_pager_deallocate(memory_object_t mem_obj)652 vnode_pager_deallocate(
653 memory_object_t mem_obj)
654 {
655 vnode_pager_t vnode_object;
656
657 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %p\n", mem_obj));
658
659 vnode_object = vnode_pager_lookup(mem_obj);
660
661 if (os_ref_release_raw(&vnode_object->vn_pgr_hdr_ref, NULL) == 0) {
662 if (vnode_object->vnode_handle != NULL) {
663 vnode_pager_vrele(vnode_object->vnode_handle);
664 }
665 zfree(vnode_pager_zone, vnode_object);
666 }
667 }
668
669 /*
670 *
671 */
672 kern_return_t
vnode_pager_terminate(__unused memory_object_t mem_obj)673 vnode_pager_terminate(
674 #if !DEBUG
675 __unused
676 #endif
677 memory_object_t mem_obj)
678 {
679 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %p\n", mem_obj));
680
681 return KERN_SUCCESS;
682 }
683
684 /*
685 *
686 */
687 kern_return_t
vnode_pager_map(memory_object_t mem_obj,vm_prot_t prot)688 vnode_pager_map(
689 memory_object_t mem_obj,
690 vm_prot_t prot)
691 {
692 vnode_pager_t vnode_object;
693 int ret;
694 kern_return_t kr;
695
696 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_map: %p %x\n", mem_obj, prot));
697
698 vnode_object = vnode_pager_lookup(mem_obj);
699
700 ret = ubc_map(vnode_object->vnode_handle, prot);
701
702 if (ret != 0) {
703 kr = KERN_FAILURE;
704 } else {
705 kr = KERN_SUCCESS;
706 }
707
708 return kr;
709 }
710
711 kern_return_t
vnode_pager_last_unmap(memory_object_t mem_obj)712 vnode_pager_last_unmap(
713 memory_object_t mem_obj)
714 {
715 vnode_pager_t vnode_object;
716
717 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_last_unmap: %p\n", mem_obj));
718
719 vnode_object = vnode_pager_lookup(mem_obj);
720
721 ubc_unmap(vnode_object->vnode_handle);
722 return KERN_SUCCESS;
723 }
724
725
726
727 /*
728 *
729 */
730 void
vnode_pager_cluster_write(vnode_pager_t vnode_object,vm_object_offset_t offset,vm_size_t cnt,vm_object_offset_t * resid_offset,int * io_error,int upl_flags)731 vnode_pager_cluster_write(
732 vnode_pager_t vnode_object,
733 vm_object_offset_t offset,
734 vm_size_t cnt,
735 vm_object_offset_t * resid_offset,
736 int * io_error,
737 int upl_flags)
738 {
739 vm_size_t size;
740 int errno;
741
742 if (upl_flags & UPL_MSYNC) {
743 upl_flags |= UPL_VNODE_PAGER;
744
745 if ((upl_flags & UPL_IOSYNC) && io_error) {
746 upl_flags |= UPL_KEEPCACHED;
747 }
748
749 while (cnt) {
750 size = (cnt < MAX_UPL_TRANSFER_BYTES) ? cnt : MAX_UPL_TRANSFER_BYTES; /* effective max */
751
752 assert((upl_size_t) size == size);
753 vnode_pageout(vnode_object->vnode_handle,
754 NULL, (upl_offset_t)0, offset, (upl_size_t)size, upl_flags, &errno);
755
756 if ((upl_flags & UPL_KEEPCACHED)) {
757 if ((*io_error = errno)) {
758 break;
759 }
760 }
761 cnt -= size;
762 offset += size;
763 }
764 if (resid_offset) {
765 *resid_offset = offset;
766 }
767 } else {
768 vm_object_offset_t vnode_size;
769 vm_object_offset_t base_offset;
770
771 /*
772 * this is the pageout path
773 */
774 vnode_size = vnode_pager_get_filesize(vnode_object->vnode_handle);
775
776 if (vnode_size > (offset + PAGE_SIZE)) {
777 /*
778 * preset the maximum size of the cluster
779 * and put us on a nice cluster boundary...
780 * and then clip the size to insure we
781 * don't request past the end of the underlying file
782 */
783 size = MAX_UPL_TRANSFER_BYTES;
784 base_offset = offset & ~((signed)(size - 1));
785
786 if ((base_offset + size) > vnode_size) {
787 size = round_page(((vm_size_t)(vnode_size - base_offset)));
788 }
789 } else {
790 /*
791 * we've been requested to page out a page beyond the current
792 * end of the 'file'... don't try to cluster in this case...
793 * we still need to send this page through because it might
794 * be marked precious and the underlying filesystem may need
795 * to do something with it (besides page it out)...
796 */
797 base_offset = offset;
798 size = PAGE_SIZE;
799 }
800 assert((upl_size_t) size == size);
801 vnode_pageout(vnode_object->vnode_handle,
802 NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size,
803 (upl_flags & UPL_IOSYNC) | UPL_VNODE_PAGER, NULL);
804 }
805 }
806
807
808 /*
809 *
810 */
811 kern_return_t
vnode_pager_cluster_read(vnode_pager_t vnode_object,vm_object_offset_t base_offset,vm_object_offset_t offset,uint32_t io_streaming,vm_size_t cnt)812 vnode_pager_cluster_read(
813 vnode_pager_t vnode_object,
814 vm_object_offset_t base_offset,
815 vm_object_offset_t offset,
816 uint32_t io_streaming,
817 vm_size_t cnt)
818 {
819 int local_error = 0;
820 int kret;
821 int flags = 0;
822
823 assert(!(cnt & PAGE_MASK));
824
825 if (io_streaming) {
826 flags |= UPL_IOSTREAMING;
827 }
828
829 assert((upl_size_t) cnt == cnt);
830 kret = vnode_pagein(vnode_object->vnode_handle,
831 (upl_t) NULL,
832 (upl_offset_t) (offset - base_offset),
833 base_offset,
834 (upl_size_t) cnt,
835 flags,
836 &local_error);
837 /*
838 * if(kret == PAGER_ABSENT) {
839 * Need to work out the defs here, 1 corresponds to PAGER_ABSENT
840 * defined in bsd/vm/vm_pager.h However, we should not be including
841 * that file here it is a layering violation.
842 */
843 if (kret == 1) {
844 int uplflags;
845 upl_t upl = NULL;
846 unsigned int count = 0;
847 kern_return_t kr;
848
849 uplflags = (UPL_NO_SYNC |
850 UPL_CLEAN_IN_PLACE |
851 UPL_SET_INTERNAL);
852 count = 0;
853 assert((upl_size_t) cnt == cnt);
854 kr = memory_object_upl_request(vnode_object->vn_pgr_hdr.mo_control,
855 base_offset, (upl_size_t) cnt,
856 &upl, NULL, &count, uplflags, VM_KERN_MEMORY_NONE);
857 if (kr == KERN_SUCCESS) {
858 upl_abort(upl, 0);
859 upl_deallocate(upl);
860 } else {
861 /*
862 * We couldn't gather the page list, probably
863 * because the memory object doesn't have a link
864 * to a VM object anymore (forced unmount, for
865 * example). Just return an error to the vm_fault()
866 * path and let it handle it.
867 */
868 }
869
870 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_VNODEPAGER_CLREAD_NO_UPL), 0 /* arg */);
871 return KERN_FAILURE;
872 }
873
874 return KERN_SUCCESS;
875 }
876
877 /*
878 *
879 */
880 vnode_pager_t
vnode_object_create(struct vnode * vp)881 vnode_object_create(
882 struct vnode *vp)
883 {
884 vnode_pager_t vnode_object;
885
886 vnode_object = zalloc_flags(vnode_pager_zone, Z_WAITOK | Z_NOFAIL);
887
888 /*
889 * The vm_map call takes both named entry ports and raw memory
890 * objects in the same parameter. We need to make sure that
891 * vm_map does not see this object as a named entry port. So,
892 * we reserve the first word in the object for a fake ip_kotype
893 * setting - that will tell vm_map to use it as a memory object.
894 */
895 vnode_object->vn_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
896 vnode_object->vn_pgr_hdr.mo_pager_ops = &vnode_pager_ops;
897 vnode_object->vn_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
898
899 os_ref_init_raw(&vnode_object->vn_pgr_hdr_ref, NULL);
900 vnode_object->vnode_handle = vp;
901
902 return vnode_object;
903 }
904
905 /*
906 *
907 */
908 vnode_pager_t
vnode_pager_lookup(memory_object_t name)909 vnode_pager_lookup(
910 memory_object_t name)
911 {
912 vnode_pager_t vnode_object;
913
914 vnode_object = (vnode_pager_t)name;
915 assert(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops);
916 return vnode_object;
917 }
918
919
920 struct vnode *
vnode_pager_lookup_vnode(memory_object_t name)921 vnode_pager_lookup_vnode(
922 memory_object_t name)
923 {
924 vnode_pager_t vnode_object;
925 vnode_object = (vnode_pager_t)name;
926 if (vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops) {
927 return vnode_object->vnode_handle;
928 } else {
929 return NULL;
930 }
931 }
932
933 /*********************** proc_info implementation *************/
934
935 #include <sys/bsdtask_info.h>
936
937 static int fill_vnodeinfoforaddr( vm_map_entry_t entry, uintptr_t * vnodeaddr, uint32_t * vid);
938
939 int
fill_procregioninfo(task_t task,uint64_t arg,struct proc_regioninfo_internal * pinfo,uintptr_t * vnodeaddr,uint32_t * vid)940 fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
941 {
942 vm_map_t map;
943 vm_map_offset_t address = (vm_map_offset_t)arg;
944 vm_map_entry_t tmp_entry;
945 vm_map_entry_t entry;
946 vm_map_offset_t start;
947 vm_region_extended_info_data_t extended;
948 vm_region_top_info_data_t top;
949 boolean_t do_region_footprint;
950 int effective_page_shift, effective_page_size;
951
952 task_lock(task);
953 map = task->map;
954 if (map == VM_MAP_NULL) {
955 task_unlock(task);
956 return 0;
957 }
958
959 effective_page_shift = vm_self_region_page_shift(map);
960 effective_page_size = (1 << effective_page_shift);
961
962 vm_map_reference(map);
963 task_unlock(task);
964
965 do_region_footprint = task_self_region_footprint();
966
967 vm_map_lock_read(map);
968
969 start = address;
970
971 if (!vm_map_lookup_entry_allow_pgz(map, start, &tmp_entry)) {
972 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
973 if (do_region_footprint &&
974 address == tmp_entry->vme_end) {
975 ledger_amount_t ledger_resident;
976 ledger_amount_t ledger_compressed;
977
978 /*
979 * This request is right after the last valid
980 * memory region; instead of reporting the
981 * end of the address space, report a fake
982 * memory region to account for non-volatile
983 * purgeable and/or ledger-tagged memory
984 * owned by this task.
985 */
986 task_ledgers_footprint(task->ledger,
987 &ledger_resident,
988 &ledger_compressed);
989 if (ledger_resident + ledger_compressed == 0) {
990 /* nothing to report */
991 vm_map_unlock_read(map);
992 vm_map_deallocate(map);
993 return 0;
994 }
995
996 /* provide fake region for purgeable */
997 pinfo->pri_offset = address;
998 pinfo->pri_protection = VM_PROT_DEFAULT;
999 pinfo->pri_max_protection = VM_PROT_DEFAULT;
1000 pinfo->pri_inheritance = VM_INHERIT_NONE;
1001 pinfo->pri_behavior = VM_BEHAVIOR_DEFAULT;
1002 pinfo->pri_user_wired_count = 0;
1003 pinfo->pri_user_tag = -1;
1004 pinfo->pri_pages_resident =
1005 (uint32_t) (ledger_resident / effective_page_size);
1006 pinfo->pri_pages_shared_now_private = 0;
1007 pinfo->pri_pages_swapped_out =
1008 (uint32_t) (ledger_compressed / effective_page_size);
1009 pinfo->pri_pages_dirtied =
1010 (uint32_t) (ledger_resident / effective_page_size);
1011 pinfo->pri_ref_count = 1;
1012 pinfo->pri_shadow_depth = 0;
1013 pinfo->pri_share_mode = SM_PRIVATE;
1014 pinfo->pri_private_pages_resident =
1015 (uint32_t) (ledger_resident / effective_page_size);
1016 pinfo->pri_shared_pages_resident = 0;
1017 pinfo->pri_obj_id = VM_OBJECT_ID_FAKE(map, task_ledgers.purgeable_nonvolatile);
1018 pinfo->pri_address = address;
1019 pinfo->pri_size =
1020 (uint64_t) (ledger_resident + ledger_compressed);
1021 pinfo->pri_depth = 0;
1022
1023 vm_map_unlock_read(map);
1024 vm_map_deallocate(map);
1025 return 1;
1026 }
1027 vm_map_unlock_read(map);
1028 vm_map_deallocate(map);
1029 return 0;
1030 }
1031 } else {
1032 entry = tmp_entry;
1033 }
1034
1035 start = entry->vme_start;
1036
1037 pinfo->pri_offset = VME_OFFSET(entry);
1038 pinfo->pri_protection = entry->protection;
1039 pinfo->pri_max_protection = entry->max_protection;
1040 pinfo->pri_inheritance = entry->inheritance;
1041 pinfo->pri_behavior = entry->behavior;
1042 pinfo->pri_user_wired_count = entry->user_wired_count;
1043 pinfo->pri_user_tag = VME_ALIAS(entry);
1044
1045 if (entry->is_sub_map) {
1046 pinfo->pri_flags |= PROC_REGION_SUBMAP;
1047 } else {
1048 if (entry->is_shared) {
1049 pinfo->pri_flags |= PROC_REGION_SHARED;
1050 }
1051 }
1052
1053
1054 extended.protection = entry->protection;
1055 extended.user_tag = VME_ALIAS(entry);
1056 extended.pages_resident = 0;
1057 extended.pages_swapped_out = 0;
1058 extended.pages_shared_now_private = 0;
1059 extended.pages_dirtied = 0;
1060 extended.external_pager = 0;
1061 extended.shadow_depth = 0;
1062
1063 vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended, TRUE, VM_REGION_EXTENDED_INFO_COUNT);
1064
1065 if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED) {
1066 extended.share_mode = SM_PRIVATE;
1067 }
1068
1069 top.private_pages_resident = 0;
1070 top.shared_pages_resident = 0;
1071 vm_map_region_top_walk(entry, &top);
1072
1073
1074 pinfo->pri_pages_resident = extended.pages_resident;
1075 pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private;
1076 pinfo->pri_pages_swapped_out = extended.pages_swapped_out;
1077 pinfo->pri_pages_dirtied = extended.pages_dirtied;
1078 pinfo->pri_ref_count = extended.ref_count;
1079 pinfo->pri_shadow_depth = extended.shadow_depth;
1080 pinfo->pri_share_mode = extended.share_mode;
1081
1082 pinfo->pri_private_pages_resident = top.private_pages_resident;
1083 pinfo->pri_shared_pages_resident = top.shared_pages_resident;
1084 pinfo->pri_obj_id = top.obj_id;
1085
1086 pinfo->pri_address = (uint64_t)start;
1087 pinfo->pri_size = (uint64_t)(entry->vme_end - start);
1088 pinfo->pri_depth = 0;
1089
1090 if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) {
1091 *vnodeaddr = (uintptr_t)0;
1092
1093 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid) == 0) {
1094 vm_map_unlock_read(map);
1095 vm_map_deallocate(map);
1096 return 1;
1097 }
1098 }
1099
1100 vm_map_unlock_read(map);
1101 vm_map_deallocate(map);
1102 return 1;
1103 }
1104
1105 int
fill_procregioninfo_onlymappedvnodes(task_t task,uint64_t arg,struct proc_regioninfo_internal * pinfo,uintptr_t * vnodeaddr,uint32_t * vid)1106 fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
1107 {
1108 vm_map_t map;
1109 vm_map_offset_t address = (vm_map_offset_t)arg;
1110 vm_map_entry_t tmp_entry;
1111 vm_map_entry_t entry;
1112
1113 task_lock(task);
1114 map = task->map;
1115 if (map == VM_MAP_NULL) {
1116 task_unlock(task);
1117 return 0;
1118 }
1119 vm_map_reference(map);
1120 task_unlock(task);
1121
1122 vm_map_lock_read(map);
1123
1124 if (!vm_map_lookup_entry_allow_pgz(map, address, &tmp_entry)) {
1125 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
1126 vm_map_unlock_read(map);
1127 vm_map_deallocate(map);
1128 return 0;
1129 }
1130 } else {
1131 entry = tmp_entry;
1132 }
1133
1134 while (entry != vm_map_to_entry(map)) {
1135 *vnodeaddr = 0;
1136 *vid = 0;
1137
1138 if (entry->is_sub_map == 0) {
1139 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid)) {
1140 pinfo->pri_offset = VME_OFFSET(entry);
1141 pinfo->pri_protection = entry->protection;
1142 pinfo->pri_max_protection = entry->max_protection;
1143 pinfo->pri_inheritance = entry->inheritance;
1144 pinfo->pri_behavior = entry->behavior;
1145 pinfo->pri_user_wired_count = entry->user_wired_count;
1146 pinfo->pri_user_tag = VME_ALIAS(entry);
1147
1148 if (entry->is_shared) {
1149 pinfo->pri_flags |= PROC_REGION_SHARED;
1150 }
1151
1152 pinfo->pri_pages_resident = 0;
1153 pinfo->pri_pages_shared_now_private = 0;
1154 pinfo->pri_pages_swapped_out = 0;
1155 pinfo->pri_pages_dirtied = 0;
1156 pinfo->pri_ref_count = 0;
1157 pinfo->pri_shadow_depth = 0;
1158 pinfo->pri_share_mode = 0;
1159
1160 pinfo->pri_private_pages_resident = 0;
1161 pinfo->pri_shared_pages_resident = 0;
1162 pinfo->pri_obj_id = 0;
1163
1164 pinfo->pri_address = (uint64_t)entry->vme_start;
1165 pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start);
1166 pinfo->pri_depth = 0;
1167
1168 vm_map_unlock_read(map);
1169 vm_map_deallocate(map);
1170 return 1;
1171 }
1172 }
1173
1174 /* Keep searching for a vnode-backed mapping */
1175 entry = entry->vme_next;
1176 }
1177
1178 vm_map_unlock_read(map);
1179 vm_map_deallocate(map);
1180 return 0;
1181 }
1182
1183 int
find_region_details(task_t task,vm_map_offset_t offset,uintptr_t * vnodeaddr,uint32_t * vid,uint64_t * start,uint64_t * len)1184 find_region_details(task_t task, vm_map_offset_t offset,
1185 uintptr_t *vnodeaddr, uint32_t *vid,
1186 uint64_t *start, uint64_t *len)
1187 {
1188 vm_map_t map;
1189 vm_map_entry_t tmp_entry, entry;
1190 int rc = 0;
1191
1192 task_lock(task);
1193 map = task->map;
1194 if (map == VM_MAP_NULL) {
1195 task_unlock(task);
1196 return 0;
1197 }
1198 vm_map_reference(map);
1199 task_unlock(task);
1200
1201 vm_map_lock_read(map);
1202 if (!vm_map_lookup_entry_allow_pgz(map, offset, &tmp_entry)) {
1203 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
1204 rc = 0;
1205 goto ret;
1206 }
1207 } else {
1208 entry = tmp_entry;
1209 }
1210
1211 while (entry != vm_map_to_entry(map)) {
1212 *vnodeaddr = 0;
1213 *vid = 0;
1214 *start = 0;
1215 *len = 0;
1216
1217 if (entry->is_sub_map == 0) {
1218 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid)) {
1219 *start = entry->vme_start;
1220 *len = entry->vme_end - entry->vme_start;
1221 rc = 1;
1222 goto ret;
1223 }
1224 }
1225
1226 entry = entry->vme_next;
1227 }
1228
1229 ret:
1230 vm_map_unlock_read(map);
1231 vm_map_deallocate(map);
1232 return rc;
1233 }
1234
1235 static int
fill_vnodeinfoforaddr(vm_map_entry_t entry,uintptr_t * vnodeaddr,uint32_t * vid)1236 fill_vnodeinfoforaddr(
1237 vm_map_entry_t entry,
1238 uintptr_t * vnodeaddr,
1239 uint32_t * vid)
1240 {
1241 vm_object_t top_object, object;
1242 memory_object_t memory_object;
1243 memory_object_pager_ops_t pager_ops;
1244 kern_return_t kr;
1245 int shadow_depth;
1246
1247
1248 if (entry->is_sub_map) {
1249 return 0;
1250 } else {
1251 /*
1252 * The last object in the shadow chain has the
1253 * relevant pager information.
1254 */
1255 top_object = VME_OBJECT(entry);
1256 if (top_object == VM_OBJECT_NULL) {
1257 object = VM_OBJECT_NULL;
1258 shadow_depth = 0;
1259 } else {
1260 vm_object_lock(top_object);
1261 for (object = top_object, shadow_depth = 0;
1262 object->shadow != VM_OBJECT_NULL;
1263 object = object->shadow, shadow_depth++) {
1264 vm_object_lock(object->shadow);
1265 vm_object_unlock(object);
1266 }
1267 }
1268 }
1269
1270 if (object == VM_OBJECT_NULL) {
1271 return 0;
1272 } else if (object->internal) {
1273 vm_object_unlock(object);
1274 return 0;
1275 } else if (!object->pager_ready ||
1276 object->terminating ||
1277 !object->alive ||
1278 object->pager == NULL) {
1279 vm_object_unlock(object);
1280 return 0;
1281 } else {
1282 memory_object = object->pager;
1283 pager_ops = memory_object->mo_pager_ops;
1284 if (pager_ops == &vnode_pager_ops) {
1285 kr = vnode_pager_get_object_vnode(
1286 memory_object,
1287 vnodeaddr, vid);
1288 if (kr != KERN_SUCCESS) {
1289 vm_object_unlock(object);
1290 return 0;
1291 }
1292 } else {
1293 vm_object_unlock(object);
1294 return 0;
1295 }
1296 }
1297 vm_object_unlock(object);
1298 return 1;
1299 }
1300
1301 kern_return_t
vnode_pager_get_object_vnode(memory_object_t mem_obj,uintptr_t * vnodeaddr,uint32_t * vid)1302 vnode_pager_get_object_vnode(
1303 memory_object_t mem_obj,
1304 uintptr_t * vnodeaddr,
1305 uint32_t * vid)
1306 {
1307 vnode_pager_t vnode_object;
1308
1309 vnode_object = vnode_pager_lookup(mem_obj);
1310 if (vnode_object->vnode_handle) {
1311 *vnodeaddr = (uintptr_t)vnode_object->vnode_handle;
1312 *vid = (uint32_t)vnode_vid((void *)vnode_object->vnode_handle);
1313
1314 return KERN_SUCCESS;
1315 }
1316
1317 return KERN_FAILURE;
1318 }
1319
1320 #if CONFIG_IOSCHED
1321 kern_return_t
vnode_pager_get_object_devvp(memory_object_t mem_obj,uintptr_t * devvp)1322 vnode_pager_get_object_devvp(
1323 memory_object_t mem_obj,
1324 uintptr_t *devvp)
1325 {
1326 struct vnode *vp;
1327 uint32_t vid;
1328
1329 if (vnode_pager_get_object_vnode(mem_obj, (uintptr_t *)&vp, (uint32_t *)&vid) != KERN_SUCCESS) {
1330 return KERN_FAILURE;
1331 }
1332 *devvp = (uintptr_t)vnode_mountdevvp(vp);
1333 if (*devvp) {
1334 return KERN_SUCCESS;
1335 }
1336 return KERN_FAILURE;
1337 }
1338 #endif
1339
1340 /*
1341 * Find the underlying vnode object for the given vm_map_entry. If found, return with the
1342 * object locked, otherwise return NULL with nothing locked.
1343 */
1344
1345 vm_object_t
find_vnode_object(vm_map_entry_t entry)1346 find_vnode_object(
1347 vm_map_entry_t entry
1348 )
1349 {
1350 vm_object_t top_object, object;
1351 memory_object_t memory_object;
1352 memory_object_pager_ops_t pager_ops;
1353
1354 if (!entry->is_sub_map) {
1355 /*
1356 * The last object in the shadow chain has the
1357 * relevant pager information.
1358 */
1359
1360 top_object = VME_OBJECT(entry);
1361
1362 if (top_object) {
1363 vm_object_lock(top_object);
1364
1365 for (object = top_object; object->shadow != VM_OBJECT_NULL; object = object->shadow) {
1366 vm_object_lock(object->shadow);
1367 vm_object_unlock(object);
1368 }
1369
1370 if (object &&
1371 !object->internal &&
1372 object->pager_ready &&
1373 !object->terminating &&
1374 object->alive &&
1375 object->pager != NULL) {
1376 memory_object = object->pager;
1377 pager_ops = memory_object->mo_pager_ops;
1378
1379 /*
1380 * If this object points to the vnode_pager_ops, then we found what we're
1381 * looking for. Otherwise, this vm_map_entry doesn't have an underlying
1382 * vnode and so we fall through to the bottom and return NULL.
1383 */
1384
1385 if (pager_ops == &vnode_pager_ops) {
1386 return object; /* we return with the object locked */
1387 }
1388 }
1389
1390 vm_object_unlock(object);
1391 }
1392 }
1393
1394 return VM_OBJECT_NULL;
1395 }
1396