1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41
42 #include <kern/assert.h>
43 #include <kern/host.h>
44 #include <kern/ledger.h>
45 #include <kern/thread.h>
46 #include <kern/ipc_kobject.h>
47 #include <os/refcnt.h>
48
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
51
52 #include <vm/vm_map_internal.h>
53 #include <vm/vm_pageout_internal.h>
54 #include <vm/memory_object_internal.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_protos_internal.h>
57 #include <vm/vm_purgeable_internal.h>
58 #include <vm/vm_ubc.h>
59 #include <vm/vm_page_internal.h>
60 #include <vm/vm_object_internal.h>
61
62 #include <sys/kdebug_triage.h>
63
64 /* BSD VM COMPONENT INTERFACES */
65 int
get_map_nentries(vm_map_t map)66 get_map_nentries(
67 vm_map_t map)
68 {
69 return map->hdr.nentries;
70 }
71
72 /*
73 * BSD VNODE PAGER
74 */
75
76 const struct memory_object_pager_ops vnode_pager_ops = {
77 .memory_object_reference = vnode_pager_reference,
78 .memory_object_deallocate = vnode_pager_deallocate,
79 .memory_object_init = vnode_pager_init,
80 .memory_object_terminate = vnode_pager_terminate,
81 .memory_object_data_request = vnode_pager_data_request,
82 .memory_object_data_return = vnode_pager_data_return,
83 .memory_object_data_initialize = vnode_pager_data_initialize,
84 .memory_object_map = vnode_pager_map,
85 .memory_object_last_unmap = vnode_pager_last_unmap,
86 .memory_object_backing_object = NULL,
87 .memory_object_pager_name = "vnode pager"
88 };
89
90 typedef struct vnode_pager {
91 /* mandatory generic header */
92 struct memory_object vn_pgr_hdr;
93
94 /* pager-specific */
95 #if MEMORY_OBJECT_HAS_REFCOUNT
96 #define vn_pgr_hdr_ref vn_pgr_hdr.mo_ref
97 #else
98 os_ref_atomic_t vn_pgr_hdr_ref;
99 #endif
100 struct vnode *vnode_handle; /* vnode handle */
101 } *vnode_pager_t;
102
103
104 kern_return_t
105 vnode_pager_cluster_read( /* forward */
106 vnode_pager_t,
107 vm_object_offset_t,
108 vm_object_offset_t,
109 uint32_t,
110 vm_size_t);
111
112 void
113 vnode_pager_cluster_write( /* forward */
114 vnode_pager_t,
115 vm_object_offset_t,
116 vm_size_t,
117 vm_object_offset_t *,
118 int *,
119 int);
120
121
122 vnode_pager_t
123 vnode_object_create( /* forward */
124 struct vnode *);
125
126 vnode_pager_t
127 vnode_pager_lookup( /* forward */
128 memory_object_t);
129
130 struct vnode *
131 vnode_pager_lookup_vnode( /* forward */
132 memory_object_t);
133
134 ZONE_DEFINE_TYPE(vnode_pager_zone, "vnode pager structures",
135 struct vnode_pager, ZC_NOENCRYPT);
136
137 #define VNODE_PAGER_NULL ((vnode_pager_t) 0)
138
139 /* TODO: Should be set dynamically by vnode_pager_init() */
140 #define CLUSTER_SHIFT 1
141
142
143 #if DEBUG
144 int pagerdebug = 0;
145
146 #define PAGER_ALL 0xffffffff
147 #define PAGER_INIT 0x00000001
148 #define PAGER_PAGEIN 0x00000002
149
150 #define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
151 #else
152 #define PAGER_DEBUG(LEVEL, A)
153 #endif
154
155 extern int proc_resetpcontrol(int);
156
157
158 extern int uiomove64(addr64_t, int, void *);
159 #define MAX_RUN 32
160
161 int
memory_object_control_uiomove(memory_object_control_t control,memory_object_offset_t offset,void * uio,int start_offset,int io_requested,int mark_dirty,int take_reference)162 memory_object_control_uiomove(
163 memory_object_control_t control,
164 memory_object_offset_t offset,
165 void * uio,
166 int start_offset,
167 int io_requested,
168 int mark_dirty,
169 int take_reference)
170 {
171 vm_object_t object;
172 vm_page_t dst_page;
173 int xsize;
174 int retval = 0;
175 int cur_run;
176 int cur_needed;
177 int i;
178 int orig_offset;
179 vm_page_t page_run[MAX_RUN];
180 int dirty_count; /* keeps track of number of pages dirtied as part of this uiomove */
181
182 object = memory_object_control_to_vm_object(control);
183 if (object == VM_OBJECT_NULL) {
184 return 0;
185 }
186 assert(!object->internal);
187
188 vm_object_lock(object);
189
190 if (mark_dirty && object->vo_copy != VM_OBJECT_NULL) {
191 /*
192 * We can't modify the pages without honoring
193 * copy-on-write obligations first, so fall off
194 * this optimized path and fall back to the regular
195 * path.
196 */
197 vm_object_unlock(object);
198 return 0;
199 }
200 orig_offset = start_offset;
201
202 dirty_count = 0;
203 while (io_requested && retval == 0) {
204 cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
205
206 if (cur_needed > MAX_RUN) {
207 cur_needed = MAX_RUN;
208 }
209
210 for (cur_run = 0; cur_run < cur_needed;) {
211 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) {
212 break;
213 }
214
215
216 if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
217 /*
218 * someone else is playing with the page... if we've
219 * already collected pages into this run, go ahead
220 * and process now, we can't block on this
221 * page while holding other pages in the BUSY state
222 * otherwise we will wait
223 */
224 if (cur_run) {
225 break;
226 }
227 vm_page_sleep(object, dst_page, THREAD_UNINT, LCK_SLEEP_EXCLUSIVE);
228 continue;
229 }
230 if (dst_page->vmp_laundry) {
231 vm_pageout_steal_laundry(dst_page, FALSE);
232 }
233
234 if (mark_dirty) {
235 if (dst_page->vmp_dirty == FALSE) {
236 dirty_count++;
237 }
238 SET_PAGE_DIRTY(dst_page, FALSE);
239 if (dst_page->vmp_cs_validated &&
240 !dst_page->vmp_cs_tainted) {
241 /*
242 * CODE SIGNING:
243 * We're modifying a code-signed
244 * page: force revalidate
245 */
246 dst_page->vmp_cs_validated = VMP_CS_ALL_FALSE;
247
248 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
249
250 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
251 }
252 }
253 dst_page->vmp_busy = TRUE;
254
255 page_run[cur_run++] = dst_page;
256
257 offset += PAGE_SIZE_64;
258 }
259 if (cur_run == 0) {
260 /*
261 * we hit a 'hole' in the cache or
262 * a page we don't want to try to handle,
263 * so bail at this point
264 * we'll unlock the object below
265 */
266 break;
267 }
268 vm_object_unlock(object);
269
270 for (i = 0; i < cur_run; i++) {
271 dst_page = page_run[i];
272
273 if ((xsize = PAGE_SIZE - start_offset) > io_requested) {
274 xsize = io_requested;
275 }
276
277 if ((retval = uiomove64((addr64_t)(((addr64_t)(VM_PAGE_GET_PHYS_PAGE(dst_page)) << PAGE_SHIFT) + start_offset), xsize, uio))) {
278 break;
279 }
280
281 io_requested -= xsize;
282 start_offset = 0;
283 }
284 vm_object_lock(object);
285
286 /*
287 * if we have more than 1 page to work on
288 * in the current run, or the original request
289 * started at offset 0 of the page, or we're
290 * processing multiple batches, we will move
291 * the pages to the tail of the inactive queue
292 * to implement an LRU for read/write accesses
293 *
294 * the check for orig_offset == 0 is there to
295 * mitigate the cost of small (< page_size) requests
296 * to the same page (this way we only move it once)
297 */
298 if (take_reference && (cur_run > 1 || orig_offset == 0)) {
299 vm_page_lockspin_queues();
300
301 for (i = 0; i < cur_run; i++) {
302 vm_page_lru(page_run[i]);
303 }
304
305 vm_page_unlock_queues();
306 }
307 for (i = 0; i < cur_run; i++) {
308 dst_page = page_run[i];
309
310 /*
311 * someone is explicitly referencing this page...
312 * update clustered and speculative state
313 *
314 */
315 if (dst_page->vmp_clustered) {
316 VM_PAGE_CONSUME_CLUSTERED(dst_page);
317 }
318
319 vm_page_wakeup_done(object, dst_page);
320 }
321 orig_offset = 0;
322 }
323 vm_object_unlock(object);
324 return retval;
325 }
326
327
328 bool
memory_object_is_vnode_pager(memory_object_t mem_obj)329 memory_object_is_vnode_pager(
330 memory_object_t mem_obj)
331 {
332 if (mem_obj != NULL &&
333 mem_obj->mo_pager_ops == &vnode_pager_ops) {
334 return true;
335 }
336 return false;
337 }
338
339 /*
340 *
341 */
342 memory_object_t
vnode_pager_setup(struct vnode * vp,__unused memory_object_t pager)343 vnode_pager_setup(
344 struct vnode *vp,
345 __unused memory_object_t pager)
346 {
347 vnode_pager_t vnode_object;
348
349 vnode_object = vnode_object_create(vp);
350 if (vnode_object == VNODE_PAGER_NULL) {
351 panic("vnode_pager_setup: vnode_object_create() failed");
352 }
353 return (memory_object_t)vnode_object;
354 }
355
356 /*
357 *
358 */
359 kern_return_t
vnode_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)360 vnode_pager_init(memory_object_t mem_obj,
361 memory_object_control_t control,
362 #if !DEBUG
363 __unused
364 #endif
365 memory_object_cluster_size_t pg_size)
366 {
367 vnode_pager_t vnode_object;
368 kern_return_t kr;
369 memory_object_attr_info_data_t attributes;
370
371
372 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %lx\n", mem_obj, control, (unsigned long)pg_size));
373
374 if (control == MEMORY_OBJECT_CONTROL_NULL) {
375 return KERN_INVALID_ARGUMENT;
376 }
377
378 vnode_object = vnode_pager_lookup(mem_obj);
379
380 memory_object_control_reference(control);
381
382 vnode_object->vn_pgr_hdr.mo_control = control;
383
384 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
385 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
386 attributes.cluster_size = (1 << (PAGE_SHIFT));
387 attributes.may_cache_object = TRUE;
388 attributes.temporary = TRUE;
389
390 kr = memory_object_change_attributes(
391 control,
392 MEMORY_OBJECT_ATTRIBUTE_INFO,
393 (memory_object_info_t) &attributes,
394 MEMORY_OBJECT_ATTR_INFO_COUNT);
395 if (kr != KERN_SUCCESS) {
396 panic("vnode_pager_init: memory_object_change_attributes() failed");
397 }
398
399 return KERN_SUCCESS;
400 }
401
402 /*
403 *
404 */
405 kern_return_t
vnode_pager_data_return(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t data_cnt,memory_object_offset_t * resid_offset,int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,int upl_flags)406 vnode_pager_data_return(
407 memory_object_t mem_obj,
408 memory_object_offset_t offset,
409 memory_object_cluster_size_t data_cnt,
410 memory_object_offset_t *resid_offset,
411 int *io_error,
412 __unused boolean_t dirty,
413 __unused boolean_t kernel_copy,
414 int upl_flags)
415 {
416 vnode_pager_t vnode_object;
417
418 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
419
420 vnode_object = vnode_pager_lookup(mem_obj);
421
422 vnode_pager_cluster_write(vnode_object, offset, data_cnt, resid_offset, io_error, upl_flags);
423
424 return KERN_SUCCESS;
425 }
426
427 kern_return_t
vnode_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)428 vnode_pager_data_initialize(
429 __unused memory_object_t mem_obj,
430 __unused memory_object_offset_t offset,
431 __unused memory_object_cluster_size_t data_cnt)
432 {
433 panic("vnode_pager_data_initialize");
434 return KERN_FAILURE;
435 }
436
437 void
vnode_pager_dirtied(memory_object_t mem_obj,vm_object_offset_t s_offset,vm_object_offset_t e_offset)438 vnode_pager_dirtied(
439 memory_object_t mem_obj,
440 vm_object_offset_t s_offset,
441 vm_object_offset_t e_offset)
442 {
443 vnode_pager_t vnode_object;
444
445 if (mem_obj && mem_obj->mo_pager_ops == &vnode_pager_ops) {
446 vnode_object = vnode_pager_lookup(mem_obj);
447 vnode_pager_was_dirtied(vnode_object->vnode_handle, s_offset, e_offset);
448 }
449 }
450
451 kern_return_t
vnode_pager_get_isinuse(memory_object_t mem_obj,uint32_t * isinuse)452 vnode_pager_get_isinuse(
453 memory_object_t mem_obj,
454 uint32_t *isinuse)
455 {
456 vnode_pager_t vnode_object;
457
458 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
459 *isinuse = 1;
460 return KERN_INVALID_ARGUMENT;
461 }
462
463 vnode_object = vnode_pager_lookup(mem_obj);
464
465 *isinuse = vnode_pager_isinuse(vnode_object->vnode_handle);
466 return KERN_SUCCESS;
467 }
468
469 kern_return_t
vnode_pager_get_throttle_io_limit(memory_object_t mem_obj,uint32_t * limit)470 vnode_pager_get_throttle_io_limit(
471 memory_object_t mem_obj,
472 uint32_t *limit)
473 {
474 vnode_pager_t vnode_object;
475
476 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
477 return KERN_INVALID_ARGUMENT;
478 }
479
480 vnode_object = vnode_pager_lookup(mem_obj);
481
482 (void)vnode_pager_return_throttle_io_limit(vnode_object->vnode_handle, limit);
483 return KERN_SUCCESS;
484 }
485
486 kern_return_t
vnode_pager_get_isSSD(memory_object_t mem_obj,boolean_t * isSSD)487 vnode_pager_get_isSSD(
488 memory_object_t mem_obj,
489 boolean_t *isSSD)
490 {
491 vnode_pager_t vnode_object;
492
493 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
494 return KERN_INVALID_ARGUMENT;
495 }
496
497 vnode_object = vnode_pager_lookup(mem_obj);
498
499 *isSSD = vnode_pager_isSSD(vnode_object->vnode_handle);
500 return KERN_SUCCESS;
501 }
502
503 #if FBDP_DEBUG_OBJECT_NO_PAGER
504 kern_return_t
vnode_pager_get_forced_unmount(memory_object_t mem_obj,bool * forced_unmount)505 vnode_pager_get_forced_unmount(
506 memory_object_t mem_obj,
507 bool *forced_unmount)
508 {
509 vnode_pager_t vnode_object;
510
511 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
512 return KERN_INVALID_ARGUMENT;
513 }
514
515 vnode_object = vnode_pager_lookup(mem_obj);
516
517 *forced_unmount = vnode_pager_forced_unmount(vnode_object->vnode_handle);
518 return KERN_SUCCESS;
519 }
520 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
521
522 kern_return_t
vnode_pager_get_object_size(memory_object_t mem_obj,memory_object_offset_t * length)523 vnode_pager_get_object_size(
524 memory_object_t mem_obj,
525 memory_object_offset_t *length)
526 {
527 vnode_pager_t vnode_object;
528
529 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
530 *length = 0;
531 return KERN_INVALID_ARGUMENT;
532 }
533
534 vnode_object = vnode_pager_lookup(mem_obj);
535
536 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
537 return KERN_SUCCESS;
538 }
539
540 kern_return_t
vnode_pager_get_object_name(memory_object_t mem_obj,char * pathname,vm_size_t pathname_len,char * filename,vm_size_t filename_len,boolean_t * truncated_path_p)541 vnode_pager_get_object_name(
542 memory_object_t mem_obj,
543 char *pathname,
544 vm_size_t pathname_len,
545 char *filename,
546 vm_size_t filename_len,
547 boolean_t *truncated_path_p)
548 {
549 vnode_pager_t vnode_object;
550
551 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
552 return KERN_INVALID_ARGUMENT;
553 }
554
555 vnode_object = vnode_pager_lookup(mem_obj);
556
557 return vnode_pager_get_name(vnode_object->vnode_handle,
558 pathname,
559 pathname_len,
560 filename,
561 filename_len,
562 truncated_path_p);
563 }
564
565 kern_return_t
vnode_pager_get_object_mtime(memory_object_t mem_obj,struct timespec * mtime,struct timespec * cs_mtime)566 vnode_pager_get_object_mtime(
567 memory_object_t mem_obj,
568 struct timespec *mtime,
569 struct timespec *cs_mtime)
570 {
571 vnode_pager_t vnode_object;
572
573 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
574 return KERN_INVALID_ARGUMENT;
575 }
576
577 vnode_object = vnode_pager_lookup(mem_obj);
578
579 return vnode_pager_get_mtime(vnode_object->vnode_handle,
580 mtime,
581 cs_mtime);
582 }
583
584 #if CHECK_CS_VALIDATION_BITMAP
585 kern_return_t
vnode_pager_cs_check_validation_bitmap(memory_object_t mem_obj,memory_object_offset_t offset,int optype)586 vnode_pager_cs_check_validation_bitmap(
587 memory_object_t mem_obj,
588 memory_object_offset_t offset,
589 int optype )
590 {
591 vnode_pager_t vnode_object;
592
593 if (mem_obj == MEMORY_OBJECT_NULL ||
594 mem_obj->mo_pager_ops != &vnode_pager_ops) {
595 return KERN_INVALID_ARGUMENT;
596 }
597
598 vnode_object = vnode_pager_lookup(mem_obj);
599 return ubc_cs_check_validation_bitmap( vnode_object->vnode_handle, offset, optype );
600 }
601 #endif /* CHECK_CS_VALIDATION_BITMAP */
602
603 /*
604 *
605 */
606 kern_return_t
vnode_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,__unused memory_object_cluster_size_t length,__unused vm_prot_t desired_access,memory_object_fault_info_t fault_info)607 vnode_pager_data_request(
608 memory_object_t mem_obj,
609 memory_object_offset_t offset,
610 __unused memory_object_cluster_size_t length,
611 __unused vm_prot_t desired_access,
612 memory_object_fault_info_t fault_info)
613 {
614 vnode_pager_t vnode_object;
615 memory_object_offset_t base_offset;
616 vm_size_t size;
617 uint32_t io_streaming = 0;
618
619 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
620
621 vnode_object = vnode_pager_lookup(mem_obj);
622
623 size = MAX_UPL_TRANSFER_BYTES;
624 base_offset = offset;
625
626 if (memory_object_cluster_size(vnode_object->vn_pgr_hdr.mo_control,
627 &base_offset, &size, &io_streaming,
628 fault_info) != KERN_SUCCESS) {
629 size = PAGE_SIZE;
630 }
631
632 assert(offset >= base_offset &&
633 offset < base_offset + size);
634
635 return vnode_pager_cluster_read(vnode_object, base_offset, offset, io_streaming, size);
636 }
637
638 /*
639 *
640 */
641 void
vnode_pager_reference(memory_object_t mem_obj)642 vnode_pager_reference(
643 memory_object_t mem_obj)
644 {
645 vnode_pager_t vnode_object;
646
647 vnode_object = vnode_pager_lookup(mem_obj);
648 os_ref_retain_raw(&vnode_object->vn_pgr_hdr_ref, NULL);
649 }
650
651 /*
652 *
653 */
654 void
vnode_pager_deallocate(memory_object_t mem_obj)655 vnode_pager_deallocate(
656 memory_object_t mem_obj)
657 {
658 vnode_pager_t vnode_object;
659
660 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %p\n", mem_obj));
661
662 vnode_object = vnode_pager_lookup(mem_obj);
663
664 if (os_ref_release_raw(&vnode_object->vn_pgr_hdr_ref, NULL) == 0) {
665 if (vnode_object->vnode_handle != NULL) {
666 vnode_pager_vrele(vnode_object->vnode_handle);
667 }
668 zfree(vnode_pager_zone, vnode_object);
669 }
670 }
671
672 /*
673 *
674 */
675 kern_return_t
vnode_pager_terminate(__unused memory_object_t mem_obj)676 vnode_pager_terminate(
677 #if !DEBUG
678 __unused
679 #endif
680 memory_object_t mem_obj)
681 {
682 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %p\n", mem_obj));
683
684 return KERN_SUCCESS;
685 }
686
687 /*
688 *
689 */
690 kern_return_t
vnode_pager_map(memory_object_t mem_obj,vm_prot_t prot)691 vnode_pager_map(
692 memory_object_t mem_obj,
693 vm_prot_t prot)
694 {
695 vnode_pager_t vnode_object;
696 int ret;
697 kern_return_t kr;
698
699 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_map: %p %x\n", mem_obj, prot));
700
701 vnode_object = vnode_pager_lookup(mem_obj);
702
703 ret = ubc_map(vnode_object->vnode_handle, prot);
704
705 if (ret != 0) {
706 kr = KERN_FAILURE;
707 } else {
708 kr = KERN_SUCCESS;
709 }
710
711 return kr;
712 }
713
714 kern_return_t
vnode_pager_last_unmap(memory_object_t mem_obj)715 vnode_pager_last_unmap(
716 memory_object_t mem_obj)
717 {
718 vnode_pager_t vnode_object;
719
720 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_last_unmap: %p\n", mem_obj));
721
722 vnode_object = vnode_pager_lookup(mem_obj);
723
724 ubc_unmap(vnode_object->vnode_handle);
725 return KERN_SUCCESS;
726 }
727
728
729
730 /*
731 *
732 */
733 void
vnode_pager_cluster_write(vnode_pager_t vnode_object,vm_object_offset_t offset,vm_size_t cnt,vm_object_offset_t * resid_offset,int * io_error,int upl_flags)734 vnode_pager_cluster_write(
735 vnode_pager_t vnode_object,
736 vm_object_offset_t offset,
737 vm_size_t cnt,
738 vm_object_offset_t * resid_offset,
739 int * io_error,
740 int upl_flags)
741 {
742 vm_size_t size;
743 int errno;
744
745 if (upl_flags & UPL_MSYNC) {
746 upl_flags |= UPL_VNODE_PAGER;
747
748 if ((upl_flags & UPL_IOSYNC) && io_error) {
749 upl_flags |= UPL_KEEPCACHED;
750 }
751
752 while (cnt) {
753 size = (cnt < MAX_UPL_TRANSFER_BYTES) ? cnt : MAX_UPL_TRANSFER_BYTES; /* effective max */
754
755 assert((upl_size_t) size == size);
756 vnode_pageout(vnode_object->vnode_handle,
757 NULL, (upl_offset_t)0, offset, (upl_size_t)size, upl_flags, &errno);
758
759 if ((upl_flags & UPL_KEEPCACHED)) {
760 if ((*io_error = errno)) {
761 break;
762 }
763 }
764 cnt -= size;
765 offset += size;
766 }
767 if (resid_offset) {
768 *resid_offset = offset;
769 }
770 } else {
771 vm_object_offset_t vnode_size;
772 vm_object_offset_t base_offset;
773
774 /*
775 * this is the pageout path
776 */
777 vnode_size = vnode_pager_get_filesize(vnode_object->vnode_handle);
778
779 if (vnode_size > (offset + PAGE_SIZE)) {
780 /*
781 * preset the maximum size of the cluster
782 * and put us on a nice cluster boundary...
783 * and then clip the size to insure we
784 * don't request past the end of the underlying file
785 */
786 size = MAX_UPL_TRANSFER_BYTES;
787 base_offset = offset & ~((signed)(size - 1));
788
789 if ((base_offset + size) > vnode_size) {
790 size = round_page(((vm_size_t)(vnode_size - base_offset)));
791 }
792 } else {
793 /*
794 * we've been requested to page out a page beyond the current
795 * end of the 'file'... don't try to cluster in this case...
796 * we still need to send this page through because it might
797 * be marked precious and the underlying filesystem may need
798 * to do something with it (besides page it out)...
799 */
800 base_offset = offset;
801 size = PAGE_SIZE;
802 }
803 assert((upl_size_t) size == size);
804 vnode_pageout(vnode_object->vnode_handle,
805 NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size,
806 (upl_flags & UPL_IOSYNC) | UPL_VNODE_PAGER, NULL);
807 }
808 }
809
810
811 /*
812 *
813 */
814 kern_return_t
vnode_pager_cluster_read(vnode_pager_t vnode_object,vm_object_offset_t base_offset,vm_object_offset_t offset,uint32_t io_streaming,vm_size_t cnt)815 vnode_pager_cluster_read(
816 vnode_pager_t vnode_object,
817 vm_object_offset_t base_offset,
818 vm_object_offset_t offset,
819 uint32_t io_streaming,
820 vm_size_t cnt)
821 {
822 int local_error = 0;
823 int kret;
824 int flags = 0;
825
826 assert(!(cnt & PAGE_MASK));
827
828 if (io_streaming) {
829 flags |= UPL_IOSTREAMING;
830 }
831
832 assert((upl_size_t) cnt == cnt);
833 kret = vnode_pagein(vnode_object->vnode_handle,
834 (upl_t) NULL,
835 (upl_offset_t) (offset - base_offset),
836 base_offset,
837 (upl_size_t) cnt,
838 flags,
839 &local_error);
840 /*
841 * if(kret == PAGER_ABSENT) {
842 * Need to work out the defs here, 1 corresponds to PAGER_ABSENT
843 * defined in bsd/vm/vm_pager_xnu.h However, we should not be including
844 * that file here it is a layering violation.
845 */
846 if (kret == 1) {
847 int uplflags;
848 upl_t upl = NULL;
849 unsigned int count = 0;
850 kern_return_t kr;
851
852 uplflags = (UPL_NO_SYNC |
853 UPL_CLEAN_IN_PLACE |
854 UPL_SET_INTERNAL);
855 count = 0;
856 assert((upl_size_t) cnt == cnt);
857 kr = memory_object_upl_request(vnode_object->vn_pgr_hdr.mo_control,
858 base_offset, (upl_size_t) cnt,
859 &upl, NULL, &count, uplflags, VM_KERN_MEMORY_NONE);
860 if (kr == KERN_SUCCESS) {
861 upl_abort(upl, 0);
862 upl_deallocate(upl);
863 } else {
864 /*
865 * We couldn't gather the page list, probably
866 * because the memory object doesn't have a link
867 * to a VM object anymore (forced unmount, for
868 * example). Just return an error to the vm_fault()
869 * path and let it handle it.
870 */
871 }
872
873 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_VNODEPAGER_CLREAD_NO_UPL), 0 /* arg */);
874 return KERN_FAILURE;
875 }
876
877 return KERN_SUCCESS;
878 }
879
880 /*
881 *
882 */
883 vnode_pager_t
vnode_object_create(struct vnode * vp)884 vnode_object_create(
885 struct vnode *vp)
886 {
887 vnode_pager_t vnode_object;
888
889 vnode_object = zalloc_flags(vnode_pager_zone, Z_WAITOK | Z_NOFAIL);
890
891 /*
892 * The vm_map call takes both named entry ports and raw memory
893 * objects in the same parameter. We need to make sure that
894 * vm_map does not see this object as a named entry port. So,
895 * we reserve the first word in the object for a fake ip_kotype
896 * setting - that will tell vm_map to use it as a memory object.
897 */
898 vnode_object->vn_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
899 vnode_object->vn_pgr_hdr.mo_pager_ops = &vnode_pager_ops;
900 vnode_object->vn_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
901
902 os_ref_init_raw(&vnode_object->vn_pgr_hdr_ref, NULL);
903 vnode_object->vnode_handle = vp;
904
905 return vnode_object;
906 }
907
908 /*
909 *
910 */
911 vnode_pager_t
vnode_pager_lookup(memory_object_t name)912 vnode_pager_lookup(
913 memory_object_t name)
914 {
915 vnode_pager_t vnode_object;
916
917 vnode_object = (vnode_pager_t)name;
918 assert(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops);
919 return vnode_object;
920 }
921
922
923 struct vnode *
vnode_pager_lookup_vnode(memory_object_t name)924 vnode_pager_lookup_vnode(
925 memory_object_t name)
926 {
927 vnode_pager_t vnode_object;
928 vnode_object = (vnode_pager_t)name;
929 if (vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops) {
930 return vnode_object->vnode_handle;
931 } else {
932 return NULL;
933 }
934 }
935
936 /*********************** proc_info implementation *************/
937
938 #include <sys/bsdtask_info.h>
939
940 static int fill_vnodeinfoforaddr( vm_map_entry_t entry, uintptr_t * vnodeaddr, uint32_t * vid, bool *is_map_shared);
941
942 int
fill_procregioninfo(task_t task,uint64_t arg,struct proc_regioninfo_internal * pinfo,uintptr_t * vnodeaddr,uint32_t * vid)943 fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
944 {
945 vm_map_t map;
946 vm_map_offset_t address = (vm_map_offset_t)arg;
947 vm_map_entry_t tmp_entry;
948 vm_map_entry_t entry;
949 vm_map_offset_t start;
950 vm_region_extended_info_data_t extended;
951 vm_region_top_info_data_t top;
952 boolean_t do_region_footprint;
953 int effective_page_shift, effective_page_size;
954
955 task_lock(task);
956 map = task->map;
957 if (map == VM_MAP_NULL) {
958 task_unlock(task);
959 return 0;
960 }
961
962 effective_page_shift = vm_self_region_page_shift(map);
963 effective_page_size = (1 << effective_page_shift);
964
965 vm_map_reference(map);
966 task_unlock(task);
967
968 do_region_footprint = task_self_region_footprint();
969
970 vm_map_lock_read(map);
971
972 start = address;
973
974 if (!vm_map_lookup_entry_allow_pgz(map, start, &tmp_entry)) {
975 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
976 if (do_region_footprint &&
977 address == tmp_entry->vme_end) {
978 ledger_amount_t ledger_resident;
979 ledger_amount_t ledger_compressed;
980
981 /*
982 * This request is right after the last valid
983 * memory region; instead of reporting the
984 * end of the address space, report a fake
985 * memory region to account for non-volatile
986 * purgeable and/or ledger-tagged memory
987 * owned by this task.
988 */
989 task_ledgers_footprint(task->ledger,
990 &ledger_resident,
991 &ledger_compressed);
992 if (ledger_resident + ledger_compressed == 0) {
993 /* nothing to report */
994 vm_map_unlock_read(map);
995 vm_map_deallocate(map);
996 return 0;
997 }
998
999 /* provide fake region for purgeable */
1000 pinfo->pri_offset = address;
1001 pinfo->pri_protection = VM_PROT_DEFAULT;
1002 pinfo->pri_max_protection = VM_PROT_DEFAULT;
1003 pinfo->pri_inheritance = VM_INHERIT_NONE;
1004 pinfo->pri_behavior = VM_BEHAVIOR_DEFAULT;
1005 pinfo->pri_user_wired_count = 0;
1006 pinfo->pri_user_tag = -1;
1007 pinfo->pri_pages_resident =
1008 (uint32_t) (ledger_resident / effective_page_size);
1009 pinfo->pri_pages_shared_now_private = 0;
1010 pinfo->pri_pages_swapped_out =
1011 (uint32_t) (ledger_compressed / effective_page_size);
1012 pinfo->pri_pages_dirtied =
1013 (uint32_t) (ledger_resident / effective_page_size);
1014 pinfo->pri_ref_count = 1;
1015 pinfo->pri_shadow_depth = 0;
1016 pinfo->pri_share_mode = SM_PRIVATE;
1017 pinfo->pri_private_pages_resident =
1018 (uint32_t) (ledger_resident / effective_page_size);
1019 pinfo->pri_shared_pages_resident = 0;
1020 pinfo->pri_obj_id = VM_OBJECT_ID_FAKE(map, task_ledgers.purgeable_nonvolatile);
1021 pinfo->pri_address = address;
1022 pinfo->pri_size =
1023 (uint64_t) (ledger_resident + ledger_compressed);
1024 pinfo->pri_depth = 0;
1025
1026 vm_map_unlock_read(map);
1027 vm_map_deallocate(map);
1028 return 1;
1029 }
1030 vm_map_unlock_read(map);
1031 vm_map_deallocate(map);
1032 return 0;
1033 }
1034 } else {
1035 entry = tmp_entry;
1036 }
1037
1038 start = entry->vme_start;
1039
1040 pinfo->pri_offset = VME_OFFSET(entry);
1041 pinfo->pri_protection = entry->protection;
1042 pinfo->pri_max_protection = entry->max_protection;
1043 pinfo->pri_inheritance = entry->inheritance;
1044 pinfo->pri_behavior = entry->behavior;
1045 pinfo->pri_user_wired_count = entry->user_wired_count;
1046 pinfo->pri_user_tag = VME_ALIAS(entry);
1047
1048 if (entry->is_sub_map) {
1049 pinfo->pri_flags |= PROC_REGION_SUBMAP;
1050 } else {
1051 if (entry->is_shared) {
1052 pinfo->pri_flags |= PROC_REGION_SHARED;
1053 }
1054 }
1055
1056
1057 extended.protection = entry->protection;
1058 extended.user_tag = VME_ALIAS(entry);
1059 extended.pages_resident = 0;
1060 extended.pages_swapped_out = 0;
1061 extended.pages_shared_now_private = 0;
1062 extended.pages_dirtied = 0;
1063 extended.external_pager = 0;
1064 extended.shadow_depth = 0;
1065
1066 vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended, TRUE, VM_REGION_EXTENDED_INFO_COUNT);
1067
1068 top.private_pages_resident = 0;
1069 top.shared_pages_resident = 0;
1070 vm_map_region_top_walk(entry, &top);
1071
1072
1073 pinfo->pri_pages_resident = extended.pages_resident;
1074 pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private;
1075 pinfo->pri_pages_swapped_out = extended.pages_swapped_out;
1076 pinfo->pri_pages_dirtied = extended.pages_dirtied;
1077 pinfo->pri_ref_count = extended.ref_count;
1078 pinfo->pri_shadow_depth = extended.shadow_depth;
1079 pinfo->pri_share_mode = extended.share_mode;
1080
1081 pinfo->pri_private_pages_resident = top.private_pages_resident;
1082 pinfo->pri_shared_pages_resident = top.shared_pages_resident;
1083 pinfo->pri_obj_id = top.obj_id;
1084
1085 pinfo->pri_address = (uint64_t)start;
1086 pinfo->pri_size = (uint64_t)(entry->vme_end - start);
1087 pinfo->pri_depth = 0;
1088
1089 if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) {
1090 *vnodeaddr = (uintptr_t)0;
1091
1092 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid, NULL) == 0) {
1093 vm_map_unlock_read(map);
1094 vm_map_deallocate(map);
1095 return 1;
1096 }
1097 }
1098
1099 vm_map_unlock_read(map);
1100 vm_map_deallocate(map);
1101 return 1;
1102 }
1103
1104 int
fill_procregioninfo_onlymappedvnodes(task_t task,uint64_t arg,struct proc_regioninfo_internal * pinfo,uintptr_t * vnodeaddr,uint32_t * vid)1105 fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
1106 {
1107 vm_map_t map;
1108 vm_map_offset_t address = (vm_map_offset_t)arg;
1109 vm_map_entry_t tmp_entry;
1110 vm_map_entry_t entry;
1111
1112 task_lock(task);
1113 map = task->map;
1114 if (map == VM_MAP_NULL) {
1115 task_unlock(task);
1116 return 0;
1117 }
1118 vm_map_reference(map);
1119 task_unlock(task);
1120
1121 vm_map_lock_read(map);
1122
1123 if (!vm_map_lookup_entry_allow_pgz(map, address, &tmp_entry)) {
1124 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
1125 vm_map_unlock_read(map);
1126 vm_map_deallocate(map);
1127 return 0;
1128 }
1129 } else {
1130 entry = tmp_entry;
1131 }
1132
1133 while (entry != vm_map_to_entry(map)) {
1134 *vnodeaddr = 0;
1135 *vid = 0;
1136
1137 if (entry->is_sub_map == 0) {
1138 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid, NULL)) {
1139 pinfo->pri_offset = VME_OFFSET(entry);
1140 pinfo->pri_protection = entry->protection;
1141 pinfo->pri_max_protection = entry->max_protection;
1142 pinfo->pri_inheritance = entry->inheritance;
1143 pinfo->pri_behavior = entry->behavior;
1144 pinfo->pri_user_wired_count = entry->user_wired_count;
1145 pinfo->pri_user_tag = VME_ALIAS(entry);
1146
1147 if (entry->is_shared) {
1148 pinfo->pri_flags |= PROC_REGION_SHARED;
1149 }
1150
1151 pinfo->pri_pages_resident = 0;
1152 pinfo->pri_pages_shared_now_private = 0;
1153 pinfo->pri_pages_swapped_out = 0;
1154 pinfo->pri_pages_dirtied = 0;
1155 pinfo->pri_ref_count = 0;
1156 pinfo->pri_shadow_depth = 0;
1157 pinfo->pri_share_mode = 0;
1158
1159 pinfo->pri_private_pages_resident = 0;
1160 pinfo->pri_shared_pages_resident = 0;
1161 pinfo->pri_obj_id = 0;
1162
1163 pinfo->pri_address = (uint64_t)entry->vme_start;
1164 pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start);
1165 pinfo->pri_depth = 0;
1166
1167 vm_map_unlock_read(map);
1168 vm_map_deallocate(map);
1169 return 1;
1170 }
1171 }
1172
1173 /* Keep searching for a vnode-backed mapping */
1174 entry = entry->vme_next;
1175 }
1176
1177 vm_map_unlock_read(map);
1178 vm_map_deallocate(map);
1179 return 0;
1180 }
1181
1182 extern int vnode_get(struct vnode *vp);
1183 int
task_find_region_details(task_t task,vm_map_offset_t offset,find_region_details_options_t options,uintptr_t * vp_p,uint32_t * vid_p,bool * is_map_shared_p,uint64_t * start_p,uint64_t * len_p)1184 task_find_region_details(
1185 task_t task,
1186 vm_map_offset_t offset,
1187 find_region_details_options_t options,
1188 uintptr_t *vp_p,
1189 uint32_t *vid_p,
1190 bool *is_map_shared_p,
1191 uint64_t *start_p,
1192 uint64_t *len_p)
1193 {
1194 vm_map_t map;
1195 vm_map_entry_t entry;
1196 int rc;
1197
1198 rc = 0;
1199 *vp_p = 0;
1200 *vid_p = 0;
1201 *is_map_shared_p = false;
1202 *start_p = 0;
1203 *len_p = 0;
1204 if (options & ~FIND_REGION_DETAILS_OPTIONS_ALL) {
1205 return 0;
1206 }
1207
1208 task_lock(task);
1209 map = task->map;
1210 if (map == VM_MAP_NULL) {
1211 task_unlock(task);
1212 return 0;
1213 }
1214 vm_map_reference(map);
1215 task_unlock(task);
1216
1217 vm_map_lock_read(map);
1218 if (!vm_map_lookup_entry_allow_pgz(map, offset, &entry)) {
1219 if (options & FIND_REGION_DETAILS_AT_OFFSET) {
1220 /* no mapping at this offset */
1221 goto ret;
1222 }
1223 /* check next entry */
1224 entry = entry->vme_next;
1225 if (entry == vm_map_to_entry(map)) {
1226 /* no next entry */
1227 goto ret;
1228 }
1229 }
1230
1231 for (;
1232 entry != vm_map_to_entry(map);
1233 entry = entry->vme_next) {
1234 if (entry->is_sub_map) {
1235 /* fallthru to check next entry */
1236 } else if (fill_vnodeinfoforaddr(entry, vp_p, vid_p, is_map_shared_p)) {
1237 if ((options & FIND_REGION_DETAILS_GET_VNODE) &&
1238 vnode_get((struct vnode *)*vp_p)) {
1239 /* tried but could not get an iocount */
1240 *vp_p = 0;
1241 *vid_p = 0;
1242 if (options & FIND_REGION_DETAILS_AT_OFFSET) {
1243 /* done */
1244 break;
1245 }
1246 /* check next entry */
1247 continue;
1248 }
1249 *start_p = entry->vme_start;
1250 *len_p = entry->vme_end - entry->vme_start;
1251 rc = 1; /* success */
1252 break;
1253 }
1254 if (options & FIND_REGION_DETAILS_AT_OFFSET) {
1255 /* no file mapping at this offset: done */
1256 break;
1257 }
1258 /* check next entry */
1259 }
1260
1261 ret:
1262 vm_map_unlock_read(map);
1263 vm_map_deallocate(map);
1264 return rc;
1265 }
1266
1267 static int
fill_vnodeinfoforaddr(vm_map_entry_t entry,uintptr_t * vnodeaddr,uint32_t * vid,bool * is_map_shared)1268 fill_vnodeinfoforaddr(
1269 vm_map_entry_t entry,
1270 uintptr_t * vnodeaddr,
1271 uint32_t * vid,
1272 bool *is_map_shared)
1273 {
1274 vm_object_t top_object, object;
1275 memory_object_t memory_object;
1276 memory_object_pager_ops_t pager_ops;
1277 kern_return_t kr;
1278 int shadow_depth;
1279
1280
1281 if (entry->is_sub_map) {
1282 return 0;
1283 } else {
1284 /*
1285 * The last object in the shadow chain has the
1286 * relevant pager information.
1287 */
1288 top_object = VME_OBJECT(entry);
1289 if (top_object == VM_OBJECT_NULL) {
1290 object = VM_OBJECT_NULL;
1291 shadow_depth = 0;
1292 } else {
1293 vm_object_lock(top_object);
1294 for (object = top_object, shadow_depth = 0;
1295 object->shadow != VM_OBJECT_NULL;
1296 object = object->shadow, shadow_depth++) {
1297 vm_object_lock(object->shadow);
1298 vm_object_unlock(object);
1299 }
1300 }
1301 }
1302
1303 if (object == VM_OBJECT_NULL) {
1304 return 0;
1305 } else if (object->internal) {
1306 vm_object_unlock(object);
1307 return 0;
1308 } else if (!object->pager_ready ||
1309 object->terminating ||
1310 !object->alive ||
1311 object->pager == NULL) {
1312 vm_object_unlock(object);
1313 return 0;
1314 } else {
1315 memory_object = object->pager;
1316 pager_ops = memory_object->mo_pager_ops;
1317 if (pager_ops == &vnode_pager_ops) {
1318 kr = vnode_pager_get_object_vnode(
1319 memory_object,
1320 vnodeaddr, vid);
1321 if (kr != KERN_SUCCESS) {
1322 vm_object_unlock(object);
1323 return 0;
1324 }
1325 } else {
1326 vm_object_unlock(object);
1327 return 0;
1328 }
1329 }
1330 if (is_map_shared) {
1331 *is_map_shared = (shadow_depth == 0);
1332 }
1333 vm_object_unlock(object);
1334 return 1;
1335 }
1336
1337 kern_return_t
vnode_pager_get_object_vnode(memory_object_t mem_obj,uintptr_t * vnodeaddr,uint32_t * vid)1338 vnode_pager_get_object_vnode(
1339 memory_object_t mem_obj,
1340 uintptr_t * vnodeaddr,
1341 uint32_t * vid)
1342 {
1343 vnode_pager_t vnode_object;
1344
1345 vnode_object = vnode_pager_lookup(mem_obj);
1346 if (vnode_object->vnode_handle) {
1347 *vnodeaddr = (uintptr_t)vnode_object->vnode_handle;
1348 *vid = (uint32_t)vnode_vid((void *)vnode_object->vnode_handle);
1349
1350 return KERN_SUCCESS;
1351 }
1352
1353 return KERN_FAILURE;
1354 }
1355
1356 #if CONFIG_IOSCHED
1357 kern_return_t
vnode_pager_get_object_devvp(memory_object_t mem_obj,uintptr_t * devvp)1358 vnode_pager_get_object_devvp(
1359 memory_object_t mem_obj,
1360 uintptr_t *devvp)
1361 {
1362 struct vnode *vp;
1363 uint32_t vid;
1364
1365 if (vnode_pager_get_object_vnode(mem_obj, (uintptr_t *)&vp, (uint32_t *)&vid) != KERN_SUCCESS) {
1366 return KERN_FAILURE;
1367 }
1368 *devvp = (uintptr_t)vnode_mountdevvp(vp);
1369 if (*devvp) {
1370 return KERN_SUCCESS;
1371 }
1372 return KERN_FAILURE;
1373 }
1374 #endif
1375
1376 /*
1377 * Find the underlying vnode object for the given vm_map_entry. If found, return with the
1378 * object locked, otherwise return NULL with nothing locked.
1379 */
1380
1381 vm_object_t
find_vnode_object(vm_map_entry_t entry)1382 find_vnode_object(
1383 vm_map_entry_t entry
1384 )
1385 {
1386 vm_object_t top_object, object;
1387 memory_object_t memory_object;
1388 memory_object_pager_ops_t pager_ops;
1389
1390 if (!entry->is_sub_map) {
1391 /*
1392 * The last object in the shadow chain has the
1393 * relevant pager information.
1394 */
1395
1396 top_object = VME_OBJECT(entry);
1397
1398 if (top_object) {
1399 vm_object_lock(top_object);
1400
1401 for (object = top_object; object->shadow != VM_OBJECT_NULL; object = object->shadow) {
1402 vm_object_lock(object->shadow);
1403 vm_object_unlock(object);
1404 }
1405
1406 if (object &&
1407 !object->internal &&
1408 object->pager_ready &&
1409 !object->terminating &&
1410 object->alive &&
1411 object->pager != NULL) {
1412 memory_object = object->pager;
1413 pager_ops = memory_object->mo_pager_ops;
1414
1415 /*
1416 * If this object points to the vnode_pager_ops, then we found what we're
1417 * looking for. Otherwise, this vm_map_entry doesn't have an underlying
1418 * vnode and so we fall through to the bottom and return NULL.
1419 */
1420
1421 if (pager_ops == &vnode_pager_ops) {
1422 return object; /* we return with the object locked */
1423 }
1424 }
1425
1426 vm_object_unlock(object);
1427 }
1428 }
1429
1430 return VM_OBJECT_NULL;
1431 }
1432