1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41
42 #include <kern/assert.h>
43 #include <kern/host.h>
44 #include <kern/ledger.h>
45 #include <kern/thread.h>
46 #include <kern/ipc_kobject.h>
47 #include <os/refcnt.h>
48
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
51
52 #include <vm/vm_map.h>
53 #include <vm/vm_pageout.h>
54 #include <vm/memory_object.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_protos.h>
57 #include <vm/vm_purgeable_internal.h>
58
59 #include <sys/kdebug_triage.h>
60
61 /* BSD VM COMPONENT INTERFACES */
62 int
63 get_map_nentries(
64 vm_map_t);
65
66 vm_offset_t
67 get_map_start(
68 vm_map_t);
69
70 vm_offset_t
71 get_map_end(
72 vm_map_t);
73
74 /*
75 *
76 */
77 int
get_map_nentries(vm_map_t map)78 get_map_nentries(
79 vm_map_t map)
80 {
81 return map->hdr.nentries;
82 }
83
84 mach_vm_offset_t
mach_get_vm_start(vm_map_t map)85 mach_get_vm_start(vm_map_t map)
86 {
87 return vm_map_first_entry(map)->vme_start;
88 }
89
90 mach_vm_offset_t
mach_get_vm_end(vm_map_t map)91 mach_get_vm_end(vm_map_t map)
92 {
93 return vm_map_last_entry(map)->vme_end;
94 }
95
96 /*
97 * BSD VNODE PAGER
98 */
99
100 const struct memory_object_pager_ops vnode_pager_ops = {
101 .memory_object_reference = vnode_pager_reference,
102 .memory_object_deallocate = vnode_pager_deallocate,
103 .memory_object_init = vnode_pager_init,
104 .memory_object_terminate = vnode_pager_terminate,
105 .memory_object_data_request = vnode_pager_data_request,
106 .memory_object_data_return = vnode_pager_data_return,
107 .memory_object_data_initialize = vnode_pager_data_initialize,
108 .memory_object_data_unlock = vnode_pager_data_unlock,
109 .memory_object_synchronize = vnode_pager_synchronize,
110 .memory_object_map = vnode_pager_map,
111 .memory_object_last_unmap = vnode_pager_last_unmap,
112 .memory_object_data_reclaim = NULL,
113 .memory_object_backing_object = NULL,
114 .memory_object_pager_name = "vnode pager"
115 };
116
117 typedef struct vnode_pager {
118 /* mandatory generic header */
119 struct memory_object vn_pgr_hdr;
120
121 /* pager-specific */
122 #if MEMORY_OBJECT_HAS_REFCOUNT
123 #define vn_pgr_hdr_ref vn_pgr_hdr.mo_ref
124 #else
125 os_ref_atomic_t vn_pgr_hdr_ref;
126 #endif
127 struct vnode *vnode_handle; /* vnode handle */
128 } *vnode_pager_t;
129
130
131 kern_return_t
132 vnode_pager_cluster_read( /* forward */
133 vnode_pager_t,
134 vm_object_offset_t,
135 vm_object_offset_t,
136 uint32_t,
137 vm_size_t);
138
139 void
140 vnode_pager_cluster_write( /* forward */
141 vnode_pager_t,
142 vm_object_offset_t,
143 vm_size_t,
144 vm_object_offset_t *,
145 int *,
146 int);
147
148
149 vnode_pager_t
150 vnode_object_create( /* forward */
151 struct vnode *);
152
153 vnode_pager_t
154 vnode_pager_lookup( /* forward */
155 memory_object_t);
156
157 struct vnode *
158 vnode_pager_lookup_vnode( /* forward */
159 memory_object_t);
160
161 ZONE_DEFINE_TYPE(vnode_pager_zone, "vnode pager structures",
162 struct vnode_pager, ZC_NOENCRYPT);
163
164 #define VNODE_PAGER_NULL ((vnode_pager_t) 0)
165
166 /* TODO: Should be set dynamically by vnode_pager_init() */
167 #define CLUSTER_SHIFT 1
168
169
170 #if DEBUG
171 int pagerdebug = 0;
172
173 #define PAGER_ALL 0xffffffff
174 #define PAGER_INIT 0x00000001
175 #define PAGER_PAGEIN 0x00000002
176
177 #define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
178 #else
179 #define PAGER_DEBUG(LEVEL, A)
180 #endif
181
182 extern int proc_resetpcontrol(int);
183
184
185 extern int uiomove64(addr64_t, int, void *);
186 #define MAX_RUN 32
187
188 int
memory_object_control_uiomove(memory_object_control_t control,memory_object_offset_t offset,void * uio,int start_offset,int io_requested,int mark_dirty,int take_reference)189 memory_object_control_uiomove(
190 memory_object_control_t control,
191 memory_object_offset_t offset,
192 void * uio,
193 int start_offset,
194 int io_requested,
195 int mark_dirty,
196 int take_reference)
197 {
198 vm_object_t object;
199 vm_page_t dst_page;
200 int xsize;
201 int retval = 0;
202 int cur_run;
203 int cur_needed;
204 int i;
205 int orig_offset;
206 vm_page_t page_run[MAX_RUN];
207 int dirty_count; /* keeps track of number of pages dirtied as part of this uiomove */
208
209 object = memory_object_control_to_vm_object(control);
210 if (object == VM_OBJECT_NULL) {
211 return 0;
212 }
213 assert(!object->internal);
214
215 vm_object_lock(object);
216
217 if (mark_dirty && object->copy != VM_OBJECT_NULL) {
218 /*
219 * We can't modify the pages without honoring
220 * copy-on-write obligations first, so fall off
221 * this optimized path and fall back to the regular
222 * path.
223 */
224 vm_object_unlock(object);
225 return 0;
226 }
227 orig_offset = start_offset;
228
229 dirty_count = 0;
230 while (io_requested && retval == 0) {
231 cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
232
233 if (cur_needed > MAX_RUN) {
234 cur_needed = MAX_RUN;
235 }
236
237 for (cur_run = 0; cur_run < cur_needed;) {
238 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) {
239 break;
240 }
241
242
243 if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
244 /*
245 * someone else is playing with the page... if we've
246 * already collected pages into this run, go ahead
247 * and process now, we can't block on this
248 * page while holding other pages in the BUSY state
249 * otherwise we will wait
250 */
251 if (cur_run) {
252 break;
253 }
254 PAGE_SLEEP(object, dst_page, THREAD_UNINT);
255 continue;
256 }
257 if (dst_page->vmp_laundry) {
258 vm_pageout_steal_laundry(dst_page, FALSE);
259 }
260
261 if (mark_dirty) {
262 if (dst_page->vmp_dirty == FALSE) {
263 dirty_count++;
264 }
265 SET_PAGE_DIRTY(dst_page, FALSE);
266 if (dst_page->vmp_cs_validated &&
267 !dst_page->vmp_cs_tainted) {
268 /*
269 * CODE SIGNING:
270 * We're modifying a code-signed
271 * page: force revalidate
272 */
273 dst_page->vmp_cs_validated = VMP_CS_ALL_FALSE;
274
275 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
276
277 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
278 }
279 }
280 dst_page->vmp_busy = TRUE;
281
282 page_run[cur_run++] = dst_page;
283
284 offset += PAGE_SIZE_64;
285 }
286 if (cur_run == 0) {
287 /*
288 * we hit a 'hole' in the cache or
289 * a page we don't want to try to handle,
290 * so bail at this point
291 * we'll unlock the object below
292 */
293 break;
294 }
295 vm_object_unlock(object);
296
297 for (i = 0; i < cur_run; i++) {
298 dst_page = page_run[i];
299
300 if ((xsize = PAGE_SIZE - start_offset) > io_requested) {
301 xsize = io_requested;
302 }
303
304 if ((retval = uiomove64((addr64_t)(((addr64_t)(VM_PAGE_GET_PHYS_PAGE(dst_page)) << PAGE_SHIFT) + start_offset), xsize, uio))) {
305 break;
306 }
307
308 io_requested -= xsize;
309 start_offset = 0;
310 }
311 vm_object_lock(object);
312
313 /*
314 * if we have more than 1 page to work on
315 * in the current run, or the original request
316 * started at offset 0 of the page, or we're
317 * processing multiple batches, we will move
318 * the pages to the tail of the inactive queue
319 * to implement an LRU for read/write accesses
320 *
321 * the check for orig_offset == 0 is there to
322 * mitigate the cost of small (< page_size) requests
323 * to the same page (this way we only move it once)
324 */
325 if (take_reference && (cur_run > 1 || orig_offset == 0)) {
326 vm_page_lockspin_queues();
327
328 for (i = 0; i < cur_run; i++) {
329 vm_page_lru(page_run[i]);
330 }
331
332 vm_page_unlock_queues();
333 }
334 for (i = 0; i < cur_run; i++) {
335 dst_page = page_run[i];
336
337 /*
338 * someone is explicitly referencing this page...
339 * update clustered and speculative state
340 *
341 */
342 if (dst_page->vmp_clustered) {
343 VM_PAGE_CONSUME_CLUSTERED(dst_page);
344 }
345
346 PAGE_WAKEUP_DONE(dst_page);
347 }
348 orig_offset = 0;
349 }
350 if (object->pager) {
351 task_update_logical_writes(current_task(), (dirty_count * PAGE_SIZE), TASK_WRITE_DEFERRED, vnode_pager_lookup_vnode(object->pager));
352 }
353 vm_object_unlock(object);
354 return retval;
355 }
356
357
358 /*
359 *
360 */
361 memory_object_t
vnode_pager_setup(struct vnode * vp,__unused memory_object_t pager)362 vnode_pager_setup(
363 struct vnode *vp,
364 __unused memory_object_t pager)
365 {
366 vnode_pager_t vnode_object;
367
368 vnode_object = vnode_object_create(vp);
369 if (vnode_object == VNODE_PAGER_NULL) {
370 panic("vnode_pager_setup: vnode_object_create() failed");
371 }
372 return (memory_object_t)vnode_object;
373 }
374
375 /*
376 *
377 */
378 kern_return_t
vnode_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)379 vnode_pager_init(memory_object_t mem_obj,
380 memory_object_control_t control,
381 #if !DEBUG
382 __unused
383 #endif
384 memory_object_cluster_size_t pg_size)
385 {
386 vnode_pager_t vnode_object;
387 kern_return_t kr;
388 memory_object_attr_info_data_t attributes;
389
390
391 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %lx\n", mem_obj, control, (unsigned long)pg_size));
392
393 if (control == MEMORY_OBJECT_CONTROL_NULL) {
394 return KERN_INVALID_ARGUMENT;
395 }
396
397 vnode_object = vnode_pager_lookup(mem_obj);
398
399 memory_object_control_reference(control);
400
401 vnode_object->vn_pgr_hdr.mo_control = control;
402
403 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
404 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
405 attributes.cluster_size = (1 << (PAGE_SHIFT));
406 attributes.may_cache_object = TRUE;
407 attributes.temporary = TRUE;
408
409 kr = memory_object_change_attributes(
410 control,
411 MEMORY_OBJECT_ATTRIBUTE_INFO,
412 (memory_object_info_t) &attributes,
413 MEMORY_OBJECT_ATTR_INFO_COUNT);
414 if (kr != KERN_SUCCESS) {
415 panic("vnode_pager_init: memory_object_change_attributes() failed");
416 }
417
418 return KERN_SUCCESS;
419 }
420
421 /*
422 *
423 */
424 kern_return_t
vnode_pager_data_return(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t data_cnt,memory_object_offset_t * resid_offset,int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,int upl_flags)425 vnode_pager_data_return(
426 memory_object_t mem_obj,
427 memory_object_offset_t offset,
428 memory_object_cluster_size_t data_cnt,
429 memory_object_offset_t *resid_offset,
430 int *io_error,
431 __unused boolean_t dirty,
432 __unused boolean_t kernel_copy,
433 int upl_flags)
434 {
435 vnode_pager_t vnode_object;
436
437 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
438
439 vnode_object = vnode_pager_lookup(mem_obj);
440
441 vnode_pager_cluster_write(vnode_object, offset, data_cnt, resid_offset, io_error, upl_flags);
442
443 return KERN_SUCCESS;
444 }
445
446 kern_return_t
vnode_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)447 vnode_pager_data_initialize(
448 __unused memory_object_t mem_obj,
449 __unused memory_object_offset_t offset,
450 __unused memory_object_cluster_size_t data_cnt)
451 {
452 panic("vnode_pager_data_initialize");
453 return KERN_FAILURE;
454 }
455
456 kern_return_t
vnode_pager_data_unlock(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t size,__unused vm_prot_t desired_access)457 vnode_pager_data_unlock(
458 __unused memory_object_t mem_obj,
459 __unused memory_object_offset_t offset,
460 __unused memory_object_size_t size,
461 __unused vm_prot_t desired_access)
462 {
463 return KERN_FAILURE;
464 }
465
466 void
vnode_pager_dirtied(memory_object_t mem_obj,vm_object_offset_t s_offset,vm_object_offset_t e_offset)467 vnode_pager_dirtied(
468 memory_object_t mem_obj,
469 vm_object_offset_t s_offset,
470 vm_object_offset_t e_offset)
471 {
472 vnode_pager_t vnode_object;
473
474 if (mem_obj && mem_obj->mo_pager_ops == &vnode_pager_ops) {
475 vnode_object = vnode_pager_lookup(mem_obj);
476 vnode_pager_was_dirtied(vnode_object->vnode_handle, s_offset, e_offset);
477 }
478 }
479
480 kern_return_t
vnode_pager_get_isinuse(memory_object_t mem_obj,uint32_t * isinuse)481 vnode_pager_get_isinuse(
482 memory_object_t mem_obj,
483 uint32_t *isinuse)
484 {
485 vnode_pager_t vnode_object;
486
487 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
488 *isinuse = 1;
489 return KERN_INVALID_ARGUMENT;
490 }
491
492 vnode_object = vnode_pager_lookup(mem_obj);
493
494 *isinuse = vnode_pager_isinuse(vnode_object->vnode_handle);
495 return KERN_SUCCESS;
496 }
497
498 kern_return_t
vnode_pager_get_throttle_io_limit(memory_object_t mem_obj,uint32_t * limit)499 vnode_pager_get_throttle_io_limit(
500 memory_object_t mem_obj,
501 uint32_t *limit)
502 {
503 vnode_pager_t vnode_object;
504
505 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
506 return KERN_INVALID_ARGUMENT;
507 }
508
509 vnode_object = vnode_pager_lookup(mem_obj);
510
511 (void)vnode_pager_return_throttle_io_limit(vnode_object->vnode_handle, limit);
512 return KERN_SUCCESS;
513 }
514
515 kern_return_t
vnode_pager_get_isSSD(memory_object_t mem_obj,boolean_t * isSSD)516 vnode_pager_get_isSSD(
517 memory_object_t mem_obj,
518 boolean_t *isSSD)
519 {
520 vnode_pager_t vnode_object;
521
522 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
523 return KERN_INVALID_ARGUMENT;
524 }
525
526 vnode_object = vnode_pager_lookup(mem_obj);
527
528 *isSSD = vnode_pager_isSSD(vnode_object->vnode_handle);
529 return KERN_SUCCESS;
530 }
531
532 kern_return_t
vnode_pager_get_object_size(memory_object_t mem_obj,memory_object_offset_t * length)533 vnode_pager_get_object_size(
534 memory_object_t mem_obj,
535 memory_object_offset_t *length)
536 {
537 vnode_pager_t vnode_object;
538
539 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
540 *length = 0;
541 return KERN_INVALID_ARGUMENT;
542 }
543
544 vnode_object = vnode_pager_lookup(mem_obj);
545
546 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
547 return KERN_SUCCESS;
548 }
549
550 kern_return_t
vnode_pager_get_object_name(memory_object_t mem_obj,char * pathname,vm_size_t pathname_len,char * filename,vm_size_t filename_len,boolean_t * truncated_path_p)551 vnode_pager_get_object_name(
552 memory_object_t mem_obj,
553 char *pathname,
554 vm_size_t pathname_len,
555 char *filename,
556 vm_size_t filename_len,
557 boolean_t *truncated_path_p)
558 {
559 vnode_pager_t vnode_object;
560
561 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
562 return KERN_INVALID_ARGUMENT;
563 }
564
565 vnode_object = vnode_pager_lookup(mem_obj);
566
567 return vnode_pager_get_name(vnode_object->vnode_handle,
568 pathname,
569 pathname_len,
570 filename,
571 filename_len,
572 truncated_path_p);
573 }
574
575 kern_return_t
vnode_pager_get_object_mtime(memory_object_t mem_obj,struct timespec * mtime,struct timespec * cs_mtime)576 vnode_pager_get_object_mtime(
577 memory_object_t mem_obj,
578 struct timespec *mtime,
579 struct timespec *cs_mtime)
580 {
581 vnode_pager_t vnode_object;
582
583 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
584 return KERN_INVALID_ARGUMENT;
585 }
586
587 vnode_object = vnode_pager_lookup(mem_obj);
588
589 return vnode_pager_get_mtime(vnode_object->vnode_handle,
590 mtime,
591 cs_mtime);
592 }
593
594 #if CHECK_CS_VALIDATION_BITMAP
595 kern_return_t
vnode_pager_cs_check_validation_bitmap(memory_object_t mem_obj,memory_object_offset_t offset,int optype)596 vnode_pager_cs_check_validation_bitmap(
597 memory_object_t mem_obj,
598 memory_object_offset_t offset,
599 int optype )
600 {
601 vnode_pager_t vnode_object;
602
603 if (mem_obj == MEMORY_OBJECT_NULL ||
604 mem_obj->mo_pager_ops != &vnode_pager_ops) {
605 return KERN_INVALID_ARGUMENT;
606 }
607
608 vnode_object = vnode_pager_lookup(mem_obj);
609 return ubc_cs_check_validation_bitmap( vnode_object->vnode_handle, offset, optype );
610 }
611 #endif /* CHECK_CS_VALIDATION_BITMAP */
612
613 /*
614 *
615 */
616 kern_return_t
vnode_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,__unused memory_object_cluster_size_t length,__unused vm_prot_t desired_access,memory_object_fault_info_t fault_info)617 vnode_pager_data_request(
618 memory_object_t mem_obj,
619 memory_object_offset_t offset,
620 __unused memory_object_cluster_size_t length,
621 __unused vm_prot_t desired_access,
622 memory_object_fault_info_t fault_info)
623 {
624 vnode_pager_t vnode_object;
625 memory_object_offset_t base_offset;
626 vm_size_t size;
627 uint32_t io_streaming = 0;
628
629 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
630
631 vnode_object = vnode_pager_lookup(mem_obj);
632
633 size = MAX_UPL_TRANSFER_BYTES;
634 base_offset = offset;
635
636 if (memory_object_cluster_size(vnode_object->vn_pgr_hdr.mo_control,
637 &base_offset, &size, &io_streaming,
638 fault_info) != KERN_SUCCESS) {
639 size = PAGE_SIZE;
640 }
641
642 assert(offset >= base_offset &&
643 offset < base_offset + size);
644
645 return vnode_pager_cluster_read(vnode_object, base_offset, offset, io_streaming, size);
646 }
647
648 /*
649 *
650 */
651 void
vnode_pager_reference(memory_object_t mem_obj)652 vnode_pager_reference(
653 memory_object_t mem_obj)
654 {
655 vnode_pager_t vnode_object;
656
657 vnode_object = vnode_pager_lookup(mem_obj);
658 os_ref_retain_raw(&vnode_object->vn_pgr_hdr_ref, NULL);
659 }
660
661 /*
662 *
663 */
664 void
vnode_pager_deallocate(memory_object_t mem_obj)665 vnode_pager_deallocate(
666 memory_object_t mem_obj)
667 {
668 vnode_pager_t vnode_object;
669
670 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %p\n", mem_obj));
671
672 vnode_object = vnode_pager_lookup(mem_obj);
673
674 if (os_ref_release_raw(&vnode_object->vn_pgr_hdr_ref, NULL) == 0) {
675 if (vnode_object->vnode_handle != NULL) {
676 vnode_pager_vrele(vnode_object->vnode_handle);
677 }
678 zfree(vnode_pager_zone, vnode_object);
679 }
680 }
681
682 /*
683 *
684 */
685 kern_return_t
vnode_pager_terminate(__unused memory_object_t mem_obj)686 vnode_pager_terminate(
687 #if !DEBUG
688 __unused
689 #endif
690 memory_object_t mem_obj)
691 {
692 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %p\n", mem_obj));
693
694 return KERN_SUCCESS;
695 }
696
697 /*
698 *
699 */
700 kern_return_t
vnode_pager_synchronize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_size_t length,__unused vm_sync_t sync_flags)701 vnode_pager_synchronize(
702 __unused memory_object_t mem_obj,
703 __unused memory_object_offset_t offset,
704 __unused memory_object_size_t length,
705 __unused vm_sync_t sync_flags)
706 {
707 panic("vnode_pager_synchronize: memory_object_synchronize no longer supported");
708 return KERN_FAILURE;
709 }
710
711 /*
712 *
713 */
714 kern_return_t
vnode_pager_map(memory_object_t mem_obj,vm_prot_t prot)715 vnode_pager_map(
716 memory_object_t mem_obj,
717 vm_prot_t prot)
718 {
719 vnode_pager_t vnode_object;
720 int ret;
721 kern_return_t kr;
722
723 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_map: %p %x\n", mem_obj, prot));
724
725 vnode_object = vnode_pager_lookup(mem_obj);
726
727 ret = ubc_map(vnode_object->vnode_handle, prot);
728
729 if (ret != 0) {
730 kr = KERN_FAILURE;
731 } else {
732 kr = KERN_SUCCESS;
733 }
734
735 return kr;
736 }
737
738 kern_return_t
vnode_pager_last_unmap(memory_object_t mem_obj)739 vnode_pager_last_unmap(
740 memory_object_t mem_obj)
741 {
742 vnode_pager_t vnode_object;
743
744 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_last_unmap: %p\n", mem_obj));
745
746 vnode_object = vnode_pager_lookup(mem_obj);
747
748 ubc_unmap(vnode_object->vnode_handle);
749 return KERN_SUCCESS;
750 }
751
752
753
754 /*
755 *
756 */
757 void
vnode_pager_cluster_write(vnode_pager_t vnode_object,vm_object_offset_t offset,vm_size_t cnt,vm_object_offset_t * resid_offset,int * io_error,int upl_flags)758 vnode_pager_cluster_write(
759 vnode_pager_t vnode_object,
760 vm_object_offset_t offset,
761 vm_size_t cnt,
762 vm_object_offset_t * resid_offset,
763 int * io_error,
764 int upl_flags)
765 {
766 vm_size_t size;
767 int errno;
768
769 if (upl_flags & UPL_MSYNC) {
770 upl_flags |= UPL_VNODE_PAGER;
771
772 if ((upl_flags & UPL_IOSYNC) && io_error) {
773 upl_flags |= UPL_KEEPCACHED;
774 }
775
776 while (cnt) {
777 size = (cnt < MAX_UPL_TRANSFER_BYTES) ? cnt : MAX_UPL_TRANSFER_BYTES; /* effective max */
778
779 assert((upl_size_t) size == size);
780 vnode_pageout(vnode_object->vnode_handle,
781 NULL, (upl_offset_t)0, offset, (upl_size_t)size, upl_flags, &errno);
782
783 if ((upl_flags & UPL_KEEPCACHED)) {
784 if ((*io_error = errno)) {
785 break;
786 }
787 }
788 cnt -= size;
789 offset += size;
790 }
791 if (resid_offset) {
792 *resid_offset = offset;
793 }
794 } else {
795 vm_object_offset_t vnode_size;
796 vm_object_offset_t base_offset;
797
798 /*
799 * this is the pageout path
800 */
801 vnode_size = vnode_pager_get_filesize(vnode_object->vnode_handle);
802
803 if (vnode_size > (offset + PAGE_SIZE)) {
804 /*
805 * preset the maximum size of the cluster
806 * and put us on a nice cluster boundary...
807 * and then clip the size to insure we
808 * don't request past the end of the underlying file
809 */
810 size = MAX_UPL_TRANSFER_BYTES;
811 base_offset = offset & ~((signed)(size - 1));
812
813 if ((base_offset + size) > vnode_size) {
814 size = round_page(((vm_size_t)(vnode_size - base_offset)));
815 }
816 } else {
817 /*
818 * we've been requested to page out a page beyond the current
819 * end of the 'file'... don't try to cluster in this case...
820 * we still need to send this page through because it might
821 * be marked precious and the underlying filesystem may need
822 * to do something with it (besides page it out)...
823 */
824 base_offset = offset;
825 size = PAGE_SIZE;
826 }
827 assert((upl_size_t) size == size);
828 vnode_pageout(vnode_object->vnode_handle,
829 NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size,
830 (upl_flags & UPL_IOSYNC) | UPL_VNODE_PAGER, NULL);
831 }
832 }
833
834
835 /*
836 *
837 */
838 kern_return_t
vnode_pager_cluster_read(vnode_pager_t vnode_object,vm_object_offset_t base_offset,vm_object_offset_t offset,uint32_t io_streaming,vm_size_t cnt)839 vnode_pager_cluster_read(
840 vnode_pager_t vnode_object,
841 vm_object_offset_t base_offset,
842 vm_object_offset_t offset,
843 uint32_t io_streaming,
844 vm_size_t cnt)
845 {
846 int local_error = 0;
847 int kret;
848 int flags = 0;
849
850 assert(!(cnt & PAGE_MASK));
851
852 if (io_streaming) {
853 flags |= UPL_IOSTREAMING;
854 }
855
856 assert((upl_size_t) cnt == cnt);
857 kret = vnode_pagein(vnode_object->vnode_handle,
858 (upl_t) NULL,
859 (upl_offset_t) (offset - base_offset),
860 base_offset,
861 (upl_size_t) cnt,
862 flags,
863 &local_error);
864 /*
865 * if(kret == PAGER_ABSENT) {
866 * Need to work out the defs here, 1 corresponds to PAGER_ABSENT
867 * defined in bsd/vm/vm_pager.h However, we should not be including
868 * that file here it is a layering violation.
869 */
870 if (kret == 1) {
871 int uplflags;
872 upl_t upl = NULL;
873 unsigned int count = 0;
874 kern_return_t kr;
875
876 uplflags = (UPL_NO_SYNC |
877 UPL_CLEAN_IN_PLACE |
878 UPL_SET_INTERNAL);
879 count = 0;
880 assert((upl_size_t) cnt == cnt);
881 kr = memory_object_upl_request(vnode_object->vn_pgr_hdr.mo_control,
882 base_offset, (upl_size_t) cnt,
883 &upl, NULL, &count, uplflags, VM_KERN_MEMORY_NONE);
884 if (kr == KERN_SUCCESS) {
885 upl_abort(upl, 0);
886 upl_deallocate(upl);
887 } else {
888 /*
889 * We couldn't gather the page list, probably
890 * because the memory object doesn't have a link
891 * to a VM object anymore (forced unmount, for
892 * example). Just return an error to the vm_fault()
893 * path and let it handle it.
894 */
895 }
896
897 kernel_triage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_VNODEPAGER_CLREAD_NO_UPL), 0 /* arg */);
898 return KERN_FAILURE;
899 }
900
901 return KERN_SUCCESS;
902 }
903
904 /*
905 *
906 */
907 vnode_pager_t
vnode_object_create(struct vnode * vp)908 vnode_object_create(
909 struct vnode *vp)
910 {
911 vnode_pager_t vnode_object;
912
913 vnode_object = zalloc_flags(vnode_pager_zone, Z_WAITOK | Z_NOFAIL);
914
915 /*
916 * The vm_map call takes both named entry ports and raw memory
917 * objects in the same parameter. We need to make sure that
918 * vm_map does not see this object as a named entry port. So,
919 * we reserve the first word in the object for a fake ip_kotype
920 * setting - that will tell vm_map to use it as a memory object.
921 */
922 vnode_object->vn_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
923 vnode_object->vn_pgr_hdr.mo_pager_ops = &vnode_pager_ops;
924 vnode_object->vn_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
925
926 os_ref_init_raw(&vnode_object->vn_pgr_hdr_ref, NULL);
927 vnode_object->vnode_handle = vp;
928
929 return vnode_object;
930 }
931
932 /*
933 *
934 */
935 vnode_pager_t
vnode_pager_lookup(memory_object_t name)936 vnode_pager_lookup(
937 memory_object_t name)
938 {
939 vnode_pager_t vnode_object;
940
941 vnode_object = (vnode_pager_t)name;
942 assert(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops);
943 return vnode_object;
944 }
945
946
947 struct vnode *
vnode_pager_lookup_vnode(memory_object_t name)948 vnode_pager_lookup_vnode(
949 memory_object_t name)
950 {
951 vnode_pager_t vnode_object;
952 vnode_object = (vnode_pager_t)name;
953 if (vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops) {
954 return vnode_object->vnode_handle;
955 } else {
956 return NULL;
957 }
958 }
959
960 /*********************** proc_info implementation *************/
961
962 #include <sys/bsdtask_info.h>
963
964 static int fill_vnodeinfoforaddr( vm_map_entry_t entry, uintptr_t * vnodeaddr, uint32_t * vid);
965
966 int
fill_procregioninfo(task_t task,uint64_t arg,struct proc_regioninfo_internal * pinfo,uintptr_t * vnodeaddr,uint32_t * vid)967 fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
968 {
969 vm_map_t map;
970 vm_map_offset_t address = (vm_map_offset_t)arg;
971 vm_map_entry_t tmp_entry;
972 vm_map_entry_t entry;
973 vm_map_offset_t start;
974 vm_region_extended_info_data_t extended;
975 vm_region_top_info_data_t top;
976 boolean_t do_region_footprint;
977 int effective_page_shift, effective_page_size;
978
979 task_lock(task);
980 map = task->map;
981 if (map == VM_MAP_NULL) {
982 task_unlock(task);
983 return 0;
984 }
985
986 effective_page_shift = vm_self_region_page_shift(map);
987 effective_page_size = (1 << effective_page_shift);
988
989 vm_map_reference(map);
990 task_unlock(task);
991
992 do_region_footprint = task_self_region_footprint();
993
994 vm_map_lock_read(map);
995
996 start = address;
997
998 if (!vm_map_lookup_entry_allow_pgz(map, start, &tmp_entry)) {
999 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
1000 if (do_region_footprint &&
1001 address == tmp_entry->vme_end) {
1002 ledger_amount_t ledger_resident;
1003 ledger_amount_t ledger_compressed;
1004
1005 /*
1006 * This request is right after the last valid
1007 * memory region; instead of reporting the
1008 * end of the address space, report a fake
1009 * memory region to account for non-volatile
1010 * purgeable and/or ledger-tagged memory
1011 * owned by this task.
1012 */
1013 task_ledgers_footprint(task->ledger,
1014 &ledger_resident,
1015 &ledger_compressed);
1016 if (ledger_resident + ledger_compressed == 0) {
1017 /* nothing to report */
1018 vm_map_unlock_read(map);
1019 vm_map_deallocate(map);
1020 return 0;
1021 }
1022
1023 /* provide fake region for purgeable */
1024 pinfo->pri_offset = address;
1025 pinfo->pri_protection = VM_PROT_DEFAULT;
1026 pinfo->pri_max_protection = VM_PROT_DEFAULT;
1027 pinfo->pri_inheritance = VM_INHERIT_NONE;
1028 pinfo->pri_behavior = VM_BEHAVIOR_DEFAULT;
1029 pinfo->pri_user_wired_count = 0;
1030 pinfo->pri_user_tag = -1;
1031 pinfo->pri_pages_resident =
1032 (uint32_t) (ledger_resident / effective_page_size);
1033 pinfo->pri_pages_shared_now_private = 0;
1034 pinfo->pri_pages_swapped_out =
1035 (uint32_t) (ledger_compressed / effective_page_size);
1036 pinfo->pri_pages_dirtied =
1037 (uint32_t) (ledger_resident / effective_page_size);
1038 pinfo->pri_ref_count = 1;
1039 pinfo->pri_shadow_depth = 0;
1040 pinfo->pri_share_mode = SM_PRIVATE;
1041 pinfo->pri_private_pages_resident =
1042 (uint32_t) (ledger_resident / effective_page_size);
1043 pinfo->pri_shared_pages_resident = 0;
1044 pinfo->pri_obj_id = VM_OBJECT_ID_FAKE(map, task_ledgers.purgeable_nonvolatile);
1045 pinfo->pri_address = address;
1046 pinfo->pri_size =
1047 (uint64_t) (ledger_resident + ledger_compressed);
1048 pinfo->pri_depth = 0;
1049
1050 vm_map_unlock_read(map);
1051 vm_map_deallocate(map);
1052 return 1;
1053 }
1054 vm_map_unlock_read(map);
1055 vm_map_deallocate(map);
1056 return 0;
1057 }
1058 } else {
1059 entry = tmp_entry;
1060 }
1061
1062 start = entry->vme_start;
1063
1064 pinfo->pri_offset = VME_OFFSET(entry);
1065 pinfo->pri_protection = entry->protection;
1066 pinfo->pri_max_protection = entry->max_protection;
1067 pinfo->pri_inheritance = entry->inheritance;
1068 pinfo->pri_behavior = entry->behavior;
1069 pinfo->pri_user_wired_count = entry->user_wired_count;
1070 pinfo->pri_user_tag = VME_ALIAS(entry);
1071
1072 if (entry->is_sub_map) {
1073 pinfo->pri_flags |= PROC_REGION_SUBMAP;
1074 } else {
1075 if (entry->is_shared) {
1076 pinfo->pri_flags |= PROC_REGION_SHARED;
1077 }
1078 }
1079
1080
1081 extended.protection = entry->protection;
1082 extended.user_tag = VME_ALIAS(entry);
1083 extended.pages_resident = 0;
1084 extended.pages_swapped_out = 0;
1085 extended.pages_shared_now_private = 0;
1086 extended.pages_dirtied = 0;
1087 extended.external_pager = 0;
1088 extended.shadow_depth = 0;
1089
1090 vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended, TRUE, VM_REGION_EXTENDED_INFO_COUNT);
1091
1092 if (extended.external_pager && extended.ref_count == 2 && extended.share_mode == SM_SHARED) {
1093 extended.share_mode = SM_PRIVATE;
1094 }
1095
1096 top.private_pages_resident = 0;
1097 top.shared_pages_resident = 0;
1098 vm_map_region_top_walk(entry, &top);
1099
1100
1101 pinfo->pri_pages_resident = extended.pages_resident;
1102 pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private;
1103 pinfo->pri_pages_swapped_out = extended.pages_swapped_out;
1104 pinfo->pri_pages_dirtied = extended.pages_dirtied;
1105 pinfo->pri_ref_count = extended.ref_count;
1106 pinfo->pri_shadow_depth = extended.shadow_depth;
1107 pinfo->pri_share_mode = extended.share_mode;
1108
1109 pinfo->pri_private_pages_resident = top.private_pages_resident;
1110 pinfo->pri_shared_pages_resident = top.shared_pages_resident;
1111 pinfo->pri_obj_id = top.obj_id;
1112
1113 pinfo->pri_address = (uint64_t)start;
1114 pinfo->pri_size = (uint64_t)(entry->vme_end - start);
1115 pinfo->pri_depth = 0;
1116
1117 if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) {
1118 *vnodeaddr = (uintptr_t)0;
1119
1120 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid) == 0) {
1121 vm_map_unlock_read(map);
1122 vm_map_deallocate(map);
1123 return 1;
1124 }
1125 }
1126
1127 vm_map_unlock_read(map);
1128 vm_map_deallocate(map);
1129 return 1;
1130 }
1131
1132 int
fill_procregioninfo_onlymappedvnodes(task_t task,uint64_t arg,struct proc_regioninfo_internal * pinfo,uintptr_t * vnodeaddr,uint32_t * vid)1133 fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
1134 {
1135 vm_map_t map;
1136 vm_map_offset_t address = (vm_map_offset_t)arg;
1137 vm_map_entry_t tmp_entry;
1138 vm_map_entry_t entry;
1139
1140 task_lock(task);
1141 map = task->map;
1142 if (map == VM_MAP_NULL) {
1143 task_unlock(task);
1144 return 0;
1145 }
1146 vm_map_reference(map);
1147 task_unlock(task);
1148
1149 vm_map_lock_read(map);
1150
1151 if (!vm_map_lookup_entry_allow_pgz(map, address, &tmp_entry)) {
1152 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
1153 vm_map_unlock_read(map);
1154 vm_map_deallocate(map);
1155 return 0;
1156 }
1157 } else {
1158 entry = tmp_entry;
1159 }
1160
1161 while (entry != vm_map_to_entry(map)) {
1162 *vnodeaddr = 0;
1163 *vid = 0;
1164
1165 if (entry->is_sub_map == 0) {
1166 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid)) {
1167 pinfo->pri_offset = VME_OFFSET(entry);
1168 pinfo->pri_protection = entry->protection;
1169 pinfo->pri_max_protection = entry->max_protection;
1170 pinfo->pri_inheritance = entry->inheritance;
1171 pinfo->pri_behavior = entry->behavior;
1172 pinfo->pri_user_wired_count = entry->user_wired_count;
1173 pinfo->pri_user_tag = VME_ALIAS(entry);
1174
1175 if (entry->is_shared) {
1176 pinfo->pri_flags |= PROC_REGION_SHARED;
1177 }
1178
1179 pinfo->pri_pages_resident = 0;
1180 pinfo->pri_pages_shared_now_private = 0;
1181 pinfo->pri_pages_swapped_out = 0;
1182 pinfo->pri_pages_dirtied = 0;
1183 pinfo->pri_ref_count = 0;
1184 pinfo->pri_shadow_depth = 0;
1185 pinfo->pri_share_mode = 0;
1186
1187 pinfo->pri_private_pages_resident = 0;
1188 pinfo->pri_shared_pages_resident = 0;
1189 pinfo->pri_obj_id = 0;
1190
1191 pinfo->pri_address = (uint64_t)entry->vme_start;
1192 pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start);
1193 pinfo->pri_depth = 0;
1194
1195 vm_map_unlock_read(map);
1196 vm_map_deallocate(map);
1197 return 1;
1198 }
1199 }
1200
1201 /* Keep searching for a vnode-backed mapping */
1202 entry = entry->vme_next;
1203 }
1204
1205 vm_map_unlock_read(map);
1206 vm_map_deallocate(map);
1207 return 0;
1208 }
1209
1210 int
find_region_details(task_t task,vm_map_offset_t offset,uintptr_t * vnodeaddr,uint32_t * vid,uint64_t * start,uint64_t * len)1211 find_region_details(task_t task, vm_map_offset_t offset,
1212 uintptr_t *vnodeaddr, uint32_t *vid,
1213 uint64_t *start, uint64_t *len)
1214 {
1215 vm_map_t map;
1216 vm_map_entry_t tmp_entry, entry;
1217 int rc = 0;
1218
1219 task_lock(task);
1220 map = task->map;
1221 if (map == VM_MAP_NULL) {
1222 task_unlock(task);
1223 return 0;
1224 }
1225 vm_map_reference(map);
1226 task_unlock(task);
1227
1228 vm_map_lock_read(map);
1229 if (!vm_map_lookup_entry_allow_pgz(map, offset, &tmp_entry)) {
1230 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
1231 rc = 0;
1232 goto ret;
1233 }
1234 } else {
1235 entry = tmp_entry;
1236 }
1237
1238 while (entry != vm_map_to_entry(map)) {
1239 *vnodeaddr = 0;
1240 *vid = 0;
1241 *start = 0;
1242 *len = 0;
1243
1244 if (entry->is_sub_map == 0) {
1245 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid)) {
1246 *start = entry->vme_start;
1247 *len = entry->vme_end - entry->vme_start;
1248 rc = 1;
1249 goto ret;
1250 }
1251 }
1252
1253 entry = entry->vme_next;
1254 }
1255
1256 ret:
1257 vm_map_unlock_read(map);
1258 vm_map_deallocate(map);
1259 return rc;
1260 }
1261
1262 static int
fill_vnodeinfoforaddr(vm_map_entry_t entry,uintptr_t * vnodeaddr,uint32_t * vid)1263 fill_vnodeinfoforaddr(
1264 vm_map_entry_t entry,
1265 uintptr_t * vnodeaddr,
1266 uint32_t * vid)
1267 {
1268 vm_object_t top_object, object;
1269 memory_object_t memory_object;
1270 memory_object_pager_ops_t pager_ops;
1271 kern_return_t kr;
1272 int shadow_depth;
1273
1274
1275 if (entry->is_sub_map) {
1276 return 0;
1277 } else {
1278 /*
1279 * The last object in the shadow chain has the
1280 * relevant pager information.
1281 */
1282 top_object = VME_OBJECT(entry);
1283 if (top_object == VM_OBJECT_NULL) {
1284 object = VM_OBJECT_NULL;
1285 shadow_depth = 0;
1286 } else {
1287 vm_object_lock(top_object);
1288 for (object = top_object, shadow_depth = 0;
1289 object->shadow != VM_OBJECT_NULL;
1290 object = object->shadow, shadow_depth++) {
1291 vm_object_lock(object->shadow);
1292 vm_object_unlock(object);
1293 }
1294 }
1295 }
1296
1297 if (object == VM_OBJECT_NULL) {
1298 return 0;
1299 } else if (object->internal) {
1300 vm_object_unlock(object);
1301 return 0;
1302 } else if (!object->pager_ready ||
1303 object->terminating ||
1304 !object->alive) {
1305 vm_object_unlock(object);
1306 return 0;
1307 } else {
1308 memory_object = object->pager;
1309 pager_ops = memory_object->mo_pager_ops;
1310 if (pager_ops == &vnode_pager_ops) {
1311 kr = vnode_pager_get_object_vnode(
1312 memory_object,
1313 vnodeaddr, vid);
1314 if (kr != KERN_SUCCESS) {
1315 vm_object_unlock(object);
1316 return 0;
1317 }
1318 } else {
1319 vm_object_unlock(object);
1320 return 0;
1321 }
1322 }
1323 vm_object_unlock(object);
1324 return 1;
1325 }
1326
1327 kern_return_t
vnode_pager_get_object_vnode(memory_object_t mem_obj,uintptr_t * vnodeaddr,uint32_t * vid)1328 vnode_pager_get_object_vnode(
1329 memory_object_t mem_obj,
1330 uintptr_t * vnodeaddr,
1331 uint32_t * vid)
1332 {
1333 vnode_pager_t vnode_object;
1334
1335 vnode_object = vnode_pager_lookup(mem_obj);
1336 if (vnode_object->vnode_handle) {
1337 *vnodeaddr = (uintptr_t)vnode_object->vnode_handle;
1338 *vid = (uint32_t)vnode_vid((void *)vnode_object->vnode_handle);
1339
1340 return KERN_SUCCESS;
1341 }
1342
1343 return KERN_FAILURE;
1344 }
1345
1346 #if CONFIG_IOSCHED
1347 kern_return_t
vnode_pager_get_object_devvp(memory_object_t mem_obj,uintptr_t * devvp)1348 vnode_pager_get_object_devvp(
1349 memory_object_t mem_obj,
1350 uintptr_t *devvp)
1351 {
1352 struct vnode *vp;
1353 uint32_t vid;
1354
1355 if (vnode_pager_get_object_vnode(mem_obj, (uintptr_t *)&vp, (uint32_t *)&vid) != KERN_SUCCESS) {
1356 return KERN_FAILURE;
1357 }
1358 *devvp = (uintptr_t)vnode_mountdevvp(vp);
1359 if (*devvp) {
1360 return KERN_SUCCESS;
1361 }
1362 return KERN_FAILURE;
1363 }
1364 #endif
1365
1366 /*
1367 * Find the underlying vnode object for the given vm_map_entry. If found, return with the
1368 * object locked, otherwise return NULL with nothing locked.
1369 */
1370
1371 vm_object_t
find_vnode_object(vm_map_entry_t entry)1372 find_vnode_object(
1373 vm_map_entry_t entry
1374 )
1375 {
1376 vm_object_t top_object, object;
1377 memory_object_t memory_object;
1378 memory_object_pager_ops_t pager_ops;
1379
1380 if (!entry->is_sub_map) {
1381 /*
1382 * The last object in the shadow chain has the
1383 * relevant pager information.
1384 */
1385
1386 top_object = VME_OBJECT(entry);
1387
1388 if (top_object) {
1389 vm_object_lock(top_object);
1390
1391 for (object = top_object; object->shadow != VM_OBJECT_NULL; object = object->shadow) {
1392 vm_object_lock(object->shadow);
1393 vm_object_unlock(object);
1394 }
1395
1396 if (object && !object->internal && object->pager_ready && !object->terminating &&
1397 object->alive) {
1398 memory_object = object->pager;
1399 pager_ops = memory_object->mo_pager_ops;
1400
1401 /*
1402 * If this object points to the vnode_pager_ops, then we found what we're
1403 * looking for. Otherwise, this vm_map_entry doesn't have an underlying
1404 * vnode and so we fall through to the bottom and return NULL.
1405 */
1406
1407 if (pager_ops == &vnode_pager_ops) {
1408 return object; /* we return with the object locked */
1409 }
1410 }
1411
1412 vm_object_unlock(object);
1413 }
1414 }
1415
1416 return VM_OBJECT_NULL;
1417 }
1418