1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <sys/errno.h>
30
31 #include <mach/mach_types.h>
32 #include <mach/mach_traps.h>
33 #include <mach/host_priv.h>
34 #include <mach/kern_return.h>
35 #include <mach/memory_object_control.h>
36 #include <mach/memory_object_types.h>
37 #include <mach/port.h>
38 #include <mach/policy.h>
39 #include <mach/upl.h>
40 #include <mach/thread_act.h>
41
42 #include <kern/assert.h>
43 #include <kern/host.h>
44 #include <kern/ledger.h>
45 #include <kern/thread.h>
46 #include <kern/ipc_kobject.h>
47 #include <os/refcnt.h>
48
49 #include <ipc/ipc_port.h>
50 #include <ipc/ipc_space.h>
51
52 #include <vm/vm_map_internal.h>
53 #include <vm/vm_pageout_internal.h>
54 #include <vm/memory_object_internal.h>
55 #include <vm/vm_pageout.h>
56 #include <vm/vm_protos_internal.h>
57 #include <vm/vm_purgeable_internal.h>
58 #include <vm/vm_ubc.h>
59 #include <vm/vm_page_internal.h>
60 #include <vm/vm_object_internal.h>
61
62 #include <sys/kdebug_triage.h>
63
64 /* BSD VM COMPONENT INTERFACES */
65 int
get_map_nentries(vm_map_t map)66 get_map_nentries(
67 vm_map_t map)
68 {
69 return map->hdr.nentries;
70 }
71
72 /*
73 * BSD VNODE PAGER
74 */
75
76 const struct memory_object_pager_ops vnode_pager_ops = {
77 .memory_object_reference = vnode_pager_reference,
78 .memory_object_deallocate = vnode_pager_deallocate,
79 .memory_object_init = vnode_pager_init,
80 .memory_object_terminate = vnode_pager_terminate,
81 .memory_object_data_request = vnode_pager_data_request,
82 .memory_object_data_return = vnode_pager_data_return,
83 .memory_object_data_initialize = vnode_pager_data_initialize,
84 .memory_object_map = vnode_pager_map,
85 .memory_object_last_unmap = vnode_pager_last_unmap,
86 .memory_object_backing_object = NULL,
87 .memory_object_pager_name = "vnode pager"
88 };
89
90 typedef struct vnode_pager {
91 /* mandatory generic header */
92 struct memory_object vn_pgr_hdr;
93
94 /* pager-specific */
95 #if MEMORY_OBJECT_HAS_REFCOUNT
96 #define vn_pgr_hdr_ref vn_pgr_hdr.mo_ref
97 #else
98 os_ref_atomic_t vn_pgr_hdr_ref;
99 #endif
100 struct vnode *vnode_handle; /* vnode handle */
101 } *vnode_pager_t;
102
103
104 kern_return_t
105 vnode_pager_cluster_read( /* forward */
106 vnode_pager_t,
107 vm_object_offset_t,
108 vm_object_offset_t,
109 uint32_t,
110 vm_size_t);
111
112 void
113 vnode_pager_cluster_write( /* forward */
114 vnode_pager_t,
115 vm_object_offset_t,
116 vm_size_t,
117 vm_object_offset_t *,
118 int *,
119 int);
120
121
122 vnode_pager_t
123 vnode_object_create( /* forward */
124 struct vnode *);
125
126 vnode_pager_t
127 vnode_pager_lookup( /* forward */
128 memory_object_t);
129
130 struct vnode *
131 vnode_pager_lookup_vnode( /* forward */
132 memory_object_t);
133
134 ZONE_DEFINE_TYPE(vnode_pager_zone, "vnode pager structures",
135 struct vnode_pager, ZC_NOENCRYPT);
136
137 #define VNODE_PAGER_NULL ((vnode_pager_t) 0)
138
139 /* TODO: Should be set dynamically by vnode_pager_init() */
140 #define CLUSTER_SHIFT 1
141
142
143 #if DEBUG
144 int pagerdebug = 0;
145
146 #define PAGER_ALL 0xffffffff
147 #define PAGER_INIT 0x00000001
148 #define PAGER_PAGEIN 0x00000002
149
150 #define PAGER_DEBUG(LEVEL, A) {if ((pagerdebug & LEVEL)==LEVEL){printf A;}}
151 #else
152 #define PAGER_DEBUG(LEVEL, A)
153 #endif
154
155 extern int proc_resetpcontrol(int);
156
157
158 extern int uiomove64(addr64_t, int, void *);
159 #define MAX_RUN 32
160
161 int
memory_object_control_uiomove(memory_object_control_t control,memory_object_offset_t offset,void * uio,int start_offset,int io_requested,int mark_dirty,int take_reference)162 memory_object_control_uiomove(
163 memory_object_control_t control,
164 memory_object_offset_t offset,
165 void * uio,
166 int start_offset,
167 int io_requested,
168 int mark_dirty,
169 int take_reference)
170 {
171 vm_object_t object;
172 vm_page_t dst_page;
173 int xsize;
174 int retval = 0;
175 int cur_run;
176 int cur_needed;
177 int i;
178 int orig_offset;
179 vm_page_t page_run[MAX_RUN];
180 int dirty_count; /* keeps track of number of pages dirtied as part of this uiomove */
181
182 object = memory_object_control_to_vm_object(control);
183 if (object == VM_OBJECT_NULL) {
184 return 0;
185 }
186 assert(!object->internal);
187
188 vm_object_lock(object);
189
190 if (mark_dirty && object->vo_copy != VM_OBJECT_NULL) {
191 /*
192 * We can't modify the pages without honoring
193 * copy-on-write obligations first, so fall off
194 * this optimized path and fall back to the regular
195 * path.
196 */
197 vm_object_unlock(object);
198 return 0;
199 }
200 orig_offset = start_offset;
201
202 dirty_count = 0;
203 while (io_requested && retval == 0) {
204 cur_needed = (start_offset + io_requested + (PAGE_SIZE - 1)) / PAGE_SIZE;
205
206 if (cur_needed > MAX_RUN) {
207 cur_needed = MAX_RUN;
208 }
209
210 for (cur_run = 0; cur_run < cur_needed;) {
211 if (mark_dirty && object->vo_copy != VM_OBJECT_NULL) {
212 /*
213 * We checked that this file-backed object did not have
214 * a copy object when we entered this routine but it now has
215 * one, so we can't stay on this optimized path.
216 * We can finish processing the pages we have already grabbed
217 * because they were made "busy" before the copy object was
218 * created so they can't have been seen through that copy
219 * object yet.
220 */
221 break;
222 }
223
224 if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) {
225 break;
226 }
227
228 if (__improbable(dst_page->vmp_error)) {
229 retval = EIO;
230 break;
231 }
232 if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
233 /*
234 * someone else is playing with the page... if we've
235 * already collected pages into this run, go ahead
236 * and process now, we can't block on this
237 * page while holding other pages in the BUSY state
238 * otherwise we will wait
239 */
240 if (cur_run) {
241 break;
242 }
243 vm_page_sleep(object, dst_page, THREAD_UNINT, LCK_SLEEP_EXCLUSIVE);
244 continue;
245 }
246 if (dst_page->vmp_laundry) {
247 vm_pageout_steal_laundry(dst_page, FALSE);
248 }
249 if (__improbable(dst_page->vmp_absent)) {
250 printf("absent page %p (obj %p offset 0x%llx) -> EIO",
251 dst_page, object, offset);
252 retval = EIO;
253 break;
254 }
255
256 if (mark_dirty) {
257 if (dst_page->vmp_dirty == FALSE) {
258 dirty_count++;
259 }
260 SET_PAGE_DIRTY(dst_page, FALSE);
261 if (dst_page->vmp_cs_validated &&
262 !dst_page->vmp_cs_tainted) {
263 /*
264 * CODE SIGNING:
265 * We're modifying a code-signed
266 * page: force revalidate
267 */
268 dst_page->vmp_cs_validated = VMP_CS_ALL_FALSE;
269
270 VM_PAGEOUT_DEBUG(vm_cs_validated_resets, 1);
271
272 pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
273 }
274 }
275 dst_page->vmp_busy = TRUE;
276
277 page_run[cur_run++] = dst_page;
278
279 offset += PAGE_SIZE_64;
280 }
281 if (cur_run == 0) {
282 /*
283 * we hit a 'hole' in the cache or
284 * a page we don't want to try to handle,
285 * so bail at this point
286 * we'll unlock the object below
287 */
288 break;
289 }
290 vm_object_unlock(object);
291
292 for (i = 0; i < cur_run; i++) {
293 dst_page = page_run[i];
294
295 if ((xsize = PAGE_SIZE - start_offset) > io_requested) {
296 xsize = io_requested;
297 }
298
299 /* Such phyiscal pages should never be restricted pages */
300 if (vm_page_is_restricted(dst_page)) {
301 panic("%s: cannot uiomove64 into restricted page", __func__);
302 }
303
304 if ((retval = uiomove64((addr64_t)(((addr64_t)(VM_PAGE_GET_PHYS_PAGE(dst_page)) << PAGE_SHIFT) + start_offset), xsize, uio))) {
305 break;
306 }
307
308 io_requested -= xsize;
309 start_offset = 0;
310 }
311 vm_object_lock(object);
312
313 /*
314 * if we have more than 1 page to work on
315 * in the current run, or the original request
316 * started at offset 0 of the page, or we're
317 * processing multiple batches, we will move
318 * the pages to the tail of the inactive queue
319 * to implement an LRU for read/write accesses
320 *
321 * the check for orig_offset == 0 is there to
322 * mitigate the cost of small (< page_size) requests
323 * to the same page (this way we only move it once)
324 */
325 if (take_reference && (cur_run > 1 || orig_offset == 0)) {
326 vm_page_lockspin_queues();
327
328 for (i = 0; i < cur_run; i++) {
329 vm_page_lru(page_run[i]);
330 }
331
332 vm_page_unlock_queues();
333 }
334 for (i = 0; i < cur_run; i++) {
335 dst_page = page_run[i];
336
337 /*
338 * someone is explicitly referencing this page...
339 * update clustered and speculative state
340 *
341 */
342 if (dst_page->vmp_clustered) {
343 VM_PAGE_CONSUME_CLUSTERED(dst_page);
344 }
345
346 vm_page_wakeup_done(object, dst_page);
347 }
348 orig_offset = 0;
349 }
350 vm_object_unlock(object);
351 return retval;
352 }
353
354
355 bool
memory_object_is_vnode_pager(memory_object_t mem_obj)356 memory_object_is_vnode_pager(
357 memory_object_t mem_obj)
358 {
359 if (mem_obj != NULL &&
360 mem_obj->mo_pager_ops == &vnode_pager_ops) {
361 return true;
362 }
363 return false;
364 }
365
366 /*
367 *
368 */
369 memory_object_t
vnode_pager_setup(struct vnode * vp,__unused memory_object_t pager)370 vnode_pager_setup(
371 struct vnode *vp,
372 __unused memory_object_t pager)
373 {
374 vnode_pager_t vnode_object;
375
376 vnode_object = vnode_object_create(vp);
377 if (vnode_object == VNODE_PAGER_NULL) {
378 panic("vnode_pager_setup: vnode_object_create() failed");
379 }
380 return (memory_object_t)vnode_object;
381 }
382
383 /*
384 *
385 */
386 kern_return_t
vnode_pager_init(memory_object_t mem_obj,memory_object_control_t control,__unused memory_object_cluster_size_t pg_size)387 vnode_pager_init(memory_object_t mem_obj,
388 memory_object_control_t control,
389 #if !DEBUG
390 __unused
391 #endif
392 memory_object_cluster_size_t pg_size)
393 {
394 vnode_pager_t vnode_object;
395 kern_return_t kr;
396 memory_object_attr_info_data_t attributes;
397
398
399 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_init: %p, %p, %lx\n", mem_obj, control, (unsigned long)pg_size));
400
401 if (control == MEMORY_OBJECT_CONTROL_NULL) {
402 return KERN_INVALID_ARGUMENT;
403 }
404
405 vnode_object = vnode_pager_lookup(mem_obj);
406
407 memory_object_control_reference(control);
408
409 vnode_object->vn_pgr_hdr.mo_control = control;
410
411 attributes.copy_strategy = MEMORY_OBJECT_COPY_DELAY;
412 /* attributes.cluster_size = (1 << (CLUSTER_SHIFT + PAGE_SHIFT));*/
413 attributes.cluster_size = (1 << (PAGE_SHIFT));
414 attributes.may_cache_object = TRUE;
415 attributes.temporary = TRUE;
416
417 kr = memory_object_change_attributes(
418 control,
419 MEMORY_OBJECT_ATTRIBUTE_INFO,
420 (memory_object_info_t) &attributes,
421 MEMORY_OBJECT_ATTR_INFO_COUNT);
422 if (kr != KERN_SUCCESS) {
423 panic("vnode_pager_init: memory_object_change_attributes() failed");
424 }
425
426 return KERN_SUCCESS;
427 }
428
429 /*
430 *
431 */
432 kern_return_t
vnode_pager_data_return(memory_object_t mem_obj,memory_object_offset_t offset,memory_object_cluster_size_t data_cnt,memory_object_offset_t * resid_offset,int * io_error,__unused boolean_t dirty,__unused boolean_t kernel_copy,int upl_flags)433 vnode_pager_data_return(
434 memory_object_t mem_obj,
435 memory_object_offset_t offset,
436 memory_object_cluster_size_t data_cnt,
437 memory_object_offset_t *resid_offset,
438 int *io_error,
439 __unused boolean_t dirty,
440 __unused boolean_t kernel_copy,
441 int upl_flags)
442 {
443 vnode_pager_t vnode_object;
444
445 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
446
447 vnode_object = vnode_pager_lookup(mem_obj);
448
449 vnode_pager_cluster_write(vnode_object, offset, data_cnt, resid_offset, io_error, upl_flags);
450
451 return KERN_SUCCESS;
452 }
453
454 kern_return_t
vnode_pager_data_initialize(__unused memory_object_t mem_obj,__unused memory_object_offset_t offset,__unused memory_object_cluster_size_t data_cnt)455 vnode_pager_data_initialize(
456 __unused memory_object_t mem_obj,
457 __unused memory_object_offset_t offset,
458 __unused memory_object_cluster_size_t data_cnt)
459 {
460 panic("vnode_pager_data_initialize");
461 return KERN_FAILURE;
462 }
463
464 void
vnode_pager_dirtied(memory_object_t mem_obj,vm_object_offset_t s_offset,vm_object_offset_t e_offset)465 vnode_pager_dirtied(
466 memory_object_t mem_obj,
467 vm_object_offset_t s_offset,
468 vm_object_offset_t e_offset)
469 {
470 vnode_pager_t vnode_object;
471
472 if (mem_obj && mem_obj->mo_pager_ops == &vnode_pager_ops) {
473 vnode_object = vnode_pager_lookup(mem_obj);
474 vnode_pager_was_dirtied(vnode_object->vnode_handle, s_offset, e_offset);
475 }
476 }
477
478 kern_return_t
vnode_pager_get_isinuse(memory_object_t mem_obj,uint32_t * isinuse)479 vnode_pager_get_isinuse(
480 memory_object_t mem_obj,
481 uint32_t *isinuse)
482 {
483 vnode_pager_t vnode_object;
484
485 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
486 *isinuse = 1;
487 return KERN_INVALID_ARGUMENT;
488 }
489
490 vnode_object = vnode_pager_lookup(mem_obj);
491
492 *isinuse = vnode_pager_isinuse(vnode_object->vnode_handle);
493 return KERN_SUCCESS;
494 }
495
496 kern_return_t
vnode_pager_get_throttle_io_limit(memory_object_t mem_obj,uint32_t * limit)497 vnode_pager_get_throttle_io_limit(
498 memory_object_t mem_obj,
499 uint32_t *limit)
500 {
501 vnode_pager_t vnode_object;
502
503 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
504 return KERN_INVALID_ARGUMENT;
505 }
506
507 vnode_object = vnode_pager_lookup(mem_obj);
508
509 (void)vnode_pager_return_throttle_io_limit(vnode_object->vnode_handle, limit);
510 return KERN_SUCCESS;
511 }
512
513 kern_return_t
vnode_pager_get_isSSD(memory_object_t mem_obj,boolean_t * isSSD)514 vnode_pager_get_isSSD(
515 memory_object_t mem_obj,
516 boolean_t *isSSD)
517 {
518 vnode_pager_t vnode_object;
519
520 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
521 return KERN_INVALID_ARGUMENT;
522 }
523
524 vnode_object = vnode_pager_lookup(mem_obj);
525
526 *isSSD = vnode_pager_isSSD(vnode_object->vnode_handle);
527 return KERN_SUCCESS;
528 }
529
530 #if FBDP_DEBUG_OBJECT_NO_PAGER
531 kern_return_t
vnode_pager_get_forced_unmount(memory_object_t mem_obj,bool * forced_unmount)532 vnode_pager_get_forced_unmount(
533 memory_object_t mem_obj,
534 bool *forced_unmount)
535 {
536 vnode_pager_t vnode_object;
537
538 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
539 return KERN_INVALID_ARGUMENT;
540 }
541
542 vnode_object = vnode_pager_lookup(mem_obj);
543
544 *forced_unmount = vnode_pager_forced_unmount(vnode_object->vnode_handle);
545 return KERN_SUCCESS;
546 }
547 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
548
549 kern_return_t
vnode_pager_get_object_size(memory_object_t mem_obj,memory_object_offset_t * length)550 vnode_pager_get_object_size(
551 memory_object_t mem_obj,
552 memory_object_offset_t *length)
553 {
554 vnode_pager_t vnode_object;
555
556 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
557 *length = 0;
558 return KERN_INVALID_ARGUMENT;
559 }
560
561 vnode_object = vnode_pager_lookup(mem_obj);
562
563 *length = vnode_pager_get_filesize(vnode_object->vnode_handle);
564 return KERN_SUCCESS;
565 }
566
567 kern_return_t
vnode_pager_get_object_name(memory_object_t mem_obj,char * pathname,vm_size_t pathname_len,char * filename,vm_size_t filename_len,boolean_t * truncated_path_p)568 vnode_pager_get_object_name(
569 memory_object_t mem_obj,
570 char *pathname,
571 vm_size_t pathname_len,
572 char *filename,
573 vm_size_t filename_len,
574 boolean_t *truncated_path_p)
575 {
576 vnode_pager_t vnode_object;
577
578 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
579 return KERN_INVALID_ARGUMENT;
580 }
581
582 vnode_object = vnode_pager_lookup(mem_obj);
583
584 return vnode_pager_get_name(vnode_object->vnode_handle,
585 pathname,
586 pathname_len,
587 filename,
588 filename_len,
589 truncated_path_p);
590 }
591
592 kern_return_t
vnode_pager_get_object_mtime(memory_object_t mem_obj,struct timespec * mtime,struct timespec * cs_mtime)593 vnode_pager_get_object_mtime(
594 memory_object_t mem_obj,
595 struct timespec *mtime,
596 struct timespec *cs_mtime)
597 {
598 vnode_pager_t vnode_object;
599
600 if (mem_obj->mo_pager_ops != &vnode_pager_ops) {
601 return KERN_INVALID_ARGUMENT;
602 }
603
604 vnode_object = vnode_pager_lookup(mem_obj);
605
606 return vnode_pager_get_mtime(vnode_object->vnode_handle,
607 mtime,
608 cs_mtime);
609 }
610
611 #if CHECK_CS_VALIDATION_BITMAP
612 kern_return_t
vnode_pager_cs_check_validation_bitmap(memory_object_t mem_obj,memory_object_offset_t offset,int optype)613 vnode_pager_cs_check_validation_bitmap(
614 memory_object_t mem_obj,
615 memory_object_offset_t offset,
616 int optype )
617 {
618 vnode_pager_t vnode_object;
619
620 if (mem_obj == MEMORY_OBJECT_NULL ||
621 mem_obj->mo_pager_ops != &vnode_pager_ops) {
622 return KERN_INVALID_ARGUMENT;
623 }
624
625 vnode_object = vnode_pager_lookup(mem_obj);
626 return ubc_cs_check_validation_bitmap( vnode_object->vnode_handle, offset, optype );
627 }
628 #endif /* CHECK_CS_VALIDATION_BITMAP */
629
630 /*
631 *
632 */
633 kern_return_t
vnode_pager_data_request(memory_object_t mem_obj,memory_object_offset_t offset,__unused memory_object_cluster_size_t length,__unused vm_prot_t desired_access,memory_object_fault_info_t fault_info)634 vnode_pager_data_request(
635 memory_object_t mem_obj,
636 memory_object_offset_t offset,
637 __unused memory_object_cluster_size_t length,
638 __unused vm_prot_t desired_access,
639 memory_object_fault_info_t fault_info)
640 {
641 vnode_pager_t vnode_object;
642 memory_object_offset_t base_offset;
643 vm_size_t size;
644 uint32_t io_streaming = 0;
645
646 assertf(page_aligned(offset), "offset 0x%llx\n", offset);
647
648 vnode_object = vnode_pager_lookup(mem_obj);
649
650 size = MAX_UPL_TRANSFER_BYTES;
651 base_offset = offset;
652
653 if (memory_object_cluster_size(vnode_object->vn_pgr_hdr.mo_control,
654 &base_offset, &size, &io_streaming,
655 fault_info) != KERN_SUCCESS) {
656 size = PAGE_SIZE;
657 }
658
659 assert(offset >= base_offset &&
660 offset < base_offset + size);
661
662 return vnode_pager_cluster_read(vnode_object, base_offset, offset, io_streaming, size);
663 }
664
665 /*
666 *
667 */
668 void
vnode_pager_reference(memory_object_t mem_obj)669 vnode_pager_reference(
670 memory_object_t mem_obj)
671 {
672 vnode_pager_t vnode_object;
673
674 vnode_object = vnode_pager_lookup(mem_obj);
675 os_ref_retain_raw(&vnode_object->vn_pgr_hdr_ref, NULL);
676 }
677
678 /*
679 *
680 */
681 void
vnode_pager_deallocate(memory_object_t mem_obj)682 vnode_pager_deallocate(
683 memory_object_t mem_obj)
684 {
685 vnode_pager_t vnode_object;
686
687 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_deallocate: %p\n", mem_obj));
688
689 vnode_object = vnode_pager_lookup(mem_obj);
690
691 if (os_ref_release_raw(&vnode_object->vn_pgr_hdr_ref, NULL) == 0) {
692 if (vnode_object->vnode_handle != NULL) {
693 vnode_pager_vrele(vnode_object->vnode_handle);
694 }
695 zfree(vnode_pager_zone, vnode_object);
696 }
697 }
698
699 /*
700 *
701 */
702 kern_return_t
vnode_pager_terminate(__unused memory_object_t mem_obj)703 vnode_pager_terminate(
704 #if !DEBUG
705 __unused
706 #endif
707 memory_object_t mem_obj)
708 {
709 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_terminate: %p\n", mem_obj));
710
711 return KERN_SUCCESS;
712 }
713
714 /*
715 *
716 */
717 kern_return_t
vnode_pager_map(memory_object_t mem_obj,vm_prot_t prot)718 vnode_pager_map(
719 memory_object_t mem_obj,
720 vm_prot_t prot)
721 {
722 vnode_pager_t vnode_object;
723 int ret;
724 kern_return_t kr;
725
726 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_map: %p %x\n", mem_obj, prot));
727
728 vnode_object = vnode_pager_lookup(mem_obj);
729
730 ret = ubc_map(vnode_object->vnode_handle, prot);
731
732 if (ret != 0) {
733 kr = KERN_FAILURE;
734 } else {
735 kr = KERN_SUCCESS;
736 }
737
738 return kr;
739 }
740
741 kern_return_t
vnode_pager_last_unmap(memory_object_t mem_obj)742 vnode_pager_last_unmap(
743 memory_object_t mem_obj)
744 {
745 vnode_pager_t vnode_object;
746
747 PAGER_DEBUG(PAGER_ALL, ("vnode_pager_last_unmap: %p\n", mem_obj));
748
749 vnode_object = vnode_pager_lookup(mem_obj);
750
751 ubc_unmap(vnode_object->vnode_handle);
752 return KERN_SUCCESS;
753 }
754
755
756
757 /*
758 *
759 */
760 void
vnode_pager_cluster_write(vnode_pager_t vnode_object,vm_object_offset_t offset,vm_size_t cnt,vm_object_offset_t * resid_offset,int * io_error,int upl_flags)761 vnode_pager_cluster_write(
762 vnode_pager_t vnode_object,
763 vm_object_offset_t offset,
764 vm_size_t cnt,
765 vm_object_offset_t * resid_offset,
766 int * io_error,
767 int upl_flags)
768 {
769 vm_size_t size;
770 int errno;
771
772 if (upl_flags & UPL_MSYNC) {
773 upl_flags |= UPL_VNODE_PAGER;
774
775 if ((upl_flags & UPL_IOSYNC) && io_error) {
776 upl_flags |= UPL_KEEPCACHED;
777 }
778
779 while (cnt) {
780 size = (cnt < MAX_UPL_TRANSFER_BYTES) ? cnt : MAX_UPL_TRANSFER_BYTES; /* effective max */
781
782 assert((upl_size_t) size == size);
783 vnode_pageout(vnode_object->vnode_handle,
784 NULL, (upl_offset_t)0, offset, (upl_size_t)size, upl_flags, &errno);
785
786 if ((upl_flags & UPL_KEEPCACHED)) {
787 if ((*io_error = errno)) {
788 break;
789 }
790 }
791 cnt -= size;
792 offset += size;
793 }
794 if (resid_offset) {
795 *resid_offset = offset;
796 }
797 } else {
798 vm_object_offset_t vnode_size;
799 vm_object_offset_t base_offset;
800
801 /*
802 * this is the pageout path
803 */
804 vnode_size = vnode_pager_get_filesize(vnode_object->vnode_handle);
805
806 if (vnode_size > (offset + PAGE_SIZE)) {
807 /*
808 * preset the maximum size of the cluster
809 * and put us on a nice cluster boundary...
810 * and then clip the size to insure we
811 * don't request past the end of the underlying file
812 */
813 size = MAX_UPL_TRANSFER_BYTES;
814 base_offset = offset & ~((signed)(size - 1));
815
816 if ((base_offset + size) > vnode_size) {
817 size = round_page(((vm_size_t)(vnode_size - base_offset)));
818 }
819 } else {
820 /*
821 * we've been requested to page out a page beyond the current
822 * end of the 'file'... don't try to cluster in this case...
823 * we still need to send this page through because it might
824 * be marked precious and the underlying filesystem may need
825 * to do something with it (besides page it out)...
826 */
827 base_offset = offset;
828 size = PAGE_SIZE;
829 }
830 assert((upl_size_t) size == size);
831 vnode_pageout(vnode_object->vnode_handle,
832 NULL, (upl_offset_t)(offset - base_offset), base_offset, (upl_size_t) size,
833 (upl_flags & UPL_IOSYNC) | UPL_VNODE_PAGER, NULL);
834 }
835 }
836
837
838 /*
839 *
840 */
841 kern_return_t
vnode_pager_cluster_read(vnode_pager_t vnode_object,vm_object_offset_t base_offset,vm_object_offset_t offset,uint32_t io_streaming,vm_size_t cnt)842 vnode_pager_cluster_read(
843 vnode_pager_t vnode_object,
844 vm_object_offset_t base_offset,
845 vm_object_offset_t offset,
846 uint32_t io_streaming,
847 vm_size_t cnt)
848 {
849 int local_error = 0;
850 int kret;
851 int flags = 0;
852
853 assert(!(cnt & PAGE_MASK));
854
855 if (io_streaming) {
856 flags |= UPL_IOSTREAMING;
857 }
858
859 assert((upl_size_t) cnt == cnt);
860 kret = vnode_pagein(vnode_object->vnode_handle,
861 (upl_t) NULL,
862 (upl_offset_t) (offset - base_offset),
863 base_offset,
864 (upl_size_t) cnt,
865 flags,
866 &local_error);
867 /*
868 * if(kret == PAGER_ABSENT) {
869 * Need to work out the defs here, 1 corresponds to PAGER_ABSENT
870 * defined in bsd/vm/vm_pager_xnu.h However, we should not be including
871 * that file here it is a layering violation.
872 */
873 if (kret == 1) {
874 int uplflags;
875 upl_t upl = NULL;
876 unsigned int count = 0;
877 kern_return_t kr;
878
879 uplflags = (UPL_NO_SYNC |
880 UPL_CLEAN_IN_PLACE |
881 UPL_SET_INTERNAL);
882 count = 0;
883 assert((upl_size_t) cnt == cnt);
884 kr = memory_object_upl_request(vnode_object->vn_pgr_hdr.mo_control,
885 base_offset, (upl_size_t) cnt,
886 &upl, NULL, &count, uplflags, VM_KERN_MEMORY_NONE);
887 if (kr == KERN_SUCCESS) {
888 upl_abort(upl, 0);
889 upl_deallocate(upl);
890 } else {
891 /*
892 * We couldn't gather the page list, probably
893 * because the memory object doesn't have a link
894 * to a VM object anymore (forced unmount, for
895 * example). Just return an error to the vm_fault()
896 * path and let it handle it.
897 */
898 }
899
900 ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_VNODEPAGER_CLREAD_NO_UPL), 0 /* arg */);
901 return KERN_FAILURE;
902 }
903
904 return KERN_SUCCESS;
905 }
906
907 /*
908 *
909 */
910 vnode_pager_t
vnode_object_create(struct vnode * vp)911 vnode_object_create(
912 struct vnode *vp)
913 {
914 vnode_pager_t vnode_object;
915
916 vnode_object = zalloc_flags(vnode_pager_zone, Z_WAITOK | Z_NOFAIL);
917
918 /*
919 * The vm_map call takes both named entry ports and raw memory
920 * objects in the same parameter. We need to make sure that
921 * vm_map does not see this object as a named entry port. So,
922 * we reserve the first word in the object for a fake ip_kotype
923 * setting - that will tell vm_map to use it as a memory object.
924 */
925 vnode_object->vn_pgr_hdr.mo_ikot = IKOT_MEMORY_OBJECT;
926 vnode_object->vn_pgr_hdr.mo_pager_ops = &vnode_pager_ops;
927 vnode_object->vn_pgr_hdr.mo_control = MEMORY_OBJECT_CONTROL_NULL;
928
929 os_ref_init_raw(&vnode_object->vn_pgr_hdr_ref, NULL);
930 vnode_object->vnode_handle = vp;
931
932 return vnode_object;
933 }
934
935 /*
936 *
937 */
938 vnode_pager_t
vnode_pager_lookup(memory_object_t name)939 vnode_pager_lookup(
940 memory_object_t name)
941 {
942 vnode_pager_t vnode_object;
943
944 vnode_object = (vnode_pager_t)name;
945 assert(vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops);
946 return vnode_object;
947 }
948
949
950 struct vnode *
vnode_pager_lookup_vnode(memory_object_t name)951 vnode_pager_lookup_vnode(
952 memory_object_t name)
953 {
954 vnode_pager_t vnode_object;
955 vnode_object = (vnode_pager_t)name;
956 if (vnode_object->vn_pgr_hdr.mo_pager_ops == &vnode_pager_ops) {
957 return vnode_object->vnode_handle;
958 } else {
959 return NULL;
960 }
961 }
962
963 /*********************** proc_info implementation *************/
964
965 #include <sys/bsdtask_info.h>
966
967 static int fill_vnodeinfoforaddr( vm_map_entry_t entry, uintptr_t * vnodeaddr, uint32_t * vid, bool *is_map_shared);
968
969 int
fill_procregioninfo(task_t task,uint64_t arg,struct proc_regioninfo_internal * pinfo,uintptr_t * vnodeaddr,uint32_t * vid)970 fill_procregioninfo(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
971 {
972 vm_map_t map;
973 vm_map_offset_t address = (vm_map_offset_t)arg;
974 vm_map_entry_t tmp_entry;
975 vm_map_entry_t entry;
976 vm_map_offset_t start;
977 vm_region_extended_info_data_t extended;
978 vm_region_top_info_data_t top;
979 boolean_t do_region_footprint;
980 int effective_page_shift, effective_page_size;
981
982 task_lock(task);
983 map = task->map;
984 if (map == VM_MAP_NULL) {
985 task_unlock(task);
986 return 0;
987 }
988
989 effective_page_shift = vm_self_region_page_shift(map);
990 effective_page_size = (1 << effective_page_shift);
991
992 vm_map_reference(map);
993 task_unlock(task);
994
995 do_region_footprint = task_self_region_footprint();
996
997 vm_map_lock_read(map);
998
999 start = address;
1000
1001 if (!vm_map_lookup_entry_allow_pgz(map, start, &tmp_entry)) {
1002 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
1003 if (do_region_footprint &&
1004 address == tmp_entry->vme_end) {
1005 ledger_amount_t ledger_resident;
1006 ledger_amount_t ledger_compressed;
1007
1008 /*
1009 * This request is right after the last valid
1010 * memory region; instead of reporting the
1011 * end of the address space, report a fake
1012 * memory region to account for non-volatile
1013 * purgeable and/or ledger-tagged memory
1014 * owned by this task.
1015 */
1016 task_ledgers_footprint(task->ledger,
1017 &ledger_resident,
1018 &ledger_compressed);
1019 if (ledger_resident + ledger_compressed == 0) {
1020 /* nothing to report */
1021 vm_map_unlock_read(map);
1022 vm_map_deallocate(map);
1023 return 0;
1024 }
1025
1026 /* provide fake region for purgeable */
1027 pinfo->pri_offset = address;
1028 pinfo->pri_protection = VM_PROT_DEFAULT;
1029 pinfo->pri_max_protection = VM_PROT_DEFAULT;
1030 pinfo->pri_inheritance = VM_INHERIT_NONE;
1031 pinfo->pri_behavior = VM_BEHAVIOR_DEFAULT;
1032 pinfo->pri_user_wired_count = 0;
1033 pinfo->pri_user_tag = -1;
1034 pinfo->pri_pages_resident =
1035 (uint32_t) (ledger_resident / effective_page_size);
1036 pinfo->pri_pages_shared_now_private = 0;
1037 pinfo->pri_pages_swapped_out =
1038 (uint32_t) (ledger_compressed / effective_page_size);
1039 pinfo->pri_pages_dirtied =
1040 (uint32_t) (ledger_resident / effective_page_size);
1041 pinfo->pri_ref_count = 1;
1042 pinfo->pri_shadow_depth = 0;
1043 pinfo->pri_share_mode = SM_PRIVATE;
1044 pinfo->pri_private_pages_resident =
1045 (uint32_t) (ledger_resident / effective_page_size);
1046 pinfo->pri_shared_pages_resident = 0;
1047 pinfo->pri_obj_id = VM_OBJECT_ID_FAKE(map, task_ledgers.purgeable_nonvolatile);
1048 pinfo->pri_address = address;
1049 pinfo->pri_size =
1050 (uint64_t) (ledger_resident + ledger_compressed);
1051 pinfo->pri_depth = 0;
1052
1053 vm_map_unlock_read(map);
1054 vm_map_deallocate(map);
1055 return 1;
1056 }
1057 vm_map_unlock_read(map);
1058 vm_map_deallocate(map);
1059 return 0;
1060 }
1061 } else {
1062 entry = tmp_entry;
1063 }
1064
1065 start = entry->vme_start;
1066
1067 pinfo->pri_offset = VME_OFFSET(entry);
1068 pinfo->pri_protection = entry->protection;
1069 pinfo->pri_max_protection = entry->max_protection;
1070 pinfo->pri_inheritance = entry->inheritance;
1071 pinfo->pri_behavior = entry->behavior;
1072 pinfo->pri_user_wired_count = entry->user_wired_count;
1073 pinfo->pri_user_tag = VME_ALIAS(entry);
1074
1075 if (entry->is_sub_map) {
1076 pinfo->pri_flags |= PROC_REGION_SUBMAP;
1077 } else {
1078 if (entry->is_shared) {
1079 pinfo->pri_flags |= PROC_REGION_SHARED;
1080 }
1081 }
1082
1083
1084 extended.protection = entry->protection;
1085 extended.user_tag = VME_ALIAS(entry);
1086 extended.pages_resident = 0;
1087 extended.pages_swapped_out = 0;
1088 extended.pages_shared_now_private = 0;
1089 extended.pages_dirtied = 0;
1090 extended.external_pager = 0;
1091 extended.shadow_depth = 0;
1092
1093 vm_map_region_walk(map, start, entry, VME_OFFSET(entry), entry->vme_end - start, &extended, TRUE, VM_REGION_EXTENDED_INFO_COUNT);
1094
1095 top.private_pages_resident = 0;
1096 top.shared_pages_resident = 0;
1097 vm_map_region_top_walk(entry, &top);
1098
1099
1100 pinfo->pri_pages_resident = extended.pages_resident;
1101 pinfo->pri_pages_shared_now_private = extended.pages_shared_now_private;
1102 pinfo->pri_pages_swapped_out = extended.pages_swapped_out;
1103 pinfo->pri_pages_dirtied = extended.pages_dirtied;
1104 pinfo->pri_ref_count = extended.ref_count;
1105 pinfo->pri_shadow_depth = extended.shadow_depth;
1106 pinfo->pri_share_mode = extended.share_mode;
1107
1108 pinfo->pri_private_pages_resident = top.private_pages_resident;
1109 pinfo->pri_shared_pages_resident = top.shared_pages_resident;
1110 pinfo->pri_obj_id = top.obj_id;
1111
1112 pinfo->pri_address = (uint64_t)start;
1113 pinfo->pri_size = (uint64_t)(entry->vme_end - start);
1114 pinfo->pri_depth = 0;
1115
1116 if ((vnodeaddr != 0) && (entry->is_sub_map == 0)) {
1117 *vnodeaddr = (uintptr_t)0;
1118
1119 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid, NULL) == 0) {
1120 vm_map_unlock_read(map);
1121 vm_map_deallocate(map);
1122 return 1;
1123 }
1124 }
1125
1126 vm_map_unlock_read(map);
1127 vm_map_deallocate(map);
1128 return 1;
1129 }
1130
1131 int
fill_procregioninfo_onlymappedvnodes(task_t task,uint64_t arg,struct proc_regioninfo_internal * pinfo,uintptr_t * vnodeaddr,uint32_t * vid)1132 fill_procregioninfo_onlymappedvnodes(task_t task, uint64_t arg, struct proc_regioninfo_internal *pinfo, uintptr_t *vnodeaddr, uint32_t *vid)
1133 {
1134 vm_map_t map;
1135 vm_map_offset_t address = (vm_map_offset_t)arg;
1136 vm_map_entry_t tmp_entry;
1137 vm_map_entry_t entry;
1138
1139 task_lock(task);
1140 map = task->map;
1141 if (map == VM_MAP_NULL) {
1142 task_unlock(task);
1143 return 0;
1144 }
1145 vm_map_reference(map);
1146 task_unlock(task);
1147
1148 vm_map_lock_read(map);
1149
1150 if (!vm_map_lookup_entry_allow_pgz(map, address, &tmp_entry)) {
1151 if ((entry = tmp_entry->vme_next) == vm_map_to_entry(map)) {
1152 vm_map_unlock_read(map);
1153 vm_map_deallocate(map);
1154 return 0;
1155 }
1156 } else {
1157 entry = tmp_entry;
1158 }
1159
1160 while (entry != vm_map_to_entry(map)) {
1161 *vnodeaddr = 0;
1162 *vid = 0;
1163
1164 if (entry->is_sub_map == 0) {
1165 if (fill_vnodeinfoforaddr(entry, vnodeaddr, vid, NULL)) {
1166 pinfo->pri_offset = VME_OFFSET(entry);
1167 pinfo->pri_protection = entry->protection;
1168 pinfo->pri_max_protection = entry->max_protection;
1169 pinfo->pri_inheritance = entry->inheritance;
1170 pinfo->pri_behavior = entry->behavior;
1171 pinfo->pri_user_wired_count = entry->user_wired_count;
1172 pinfo->pri_user_tag = VME_ALIAS(entry);
1173
1174 if (entry->is_shared) {
1175 pinfo->pri_flags |= PROC_REGION_SHARED;
1176 }
1177
1178 pinfo->pri_pages_resident = 0;
1179 pinfo->pri_pages_shared_now_private = 0;
1180 pinfo->pri_pages_swapped_out = 0;
1181 pinfo->pri_pages_dirtied = 0;
1182 pinfo->pri_ref_count = 0;
1183 pinfo->pri_shadow_depth = 0;
1184 pinfo->pri_share_mode = 0;
1185
1186 pinfo->pri_private_pages_resident = 0;
1187 pinfo->pri_shared_pages_resident = 0;
1188 pinfo->pri_obj_id = 0;
1189
1190 pinfo->pri_address = (uint64_t)entry->vme_start;
1191 pinfo->pri_size = (uint64_t)(entry->vme_end - entry->vme_start);
1192 pinfo->pri_depth = 0;
1193
1194 vm_map_unlock_read(map);
1195 vm_map_deallocate(map);
1196 return 1;
1197 }
1198 }
1199
1200 /* Keep searching for a vnode-backed mapping */
1201 entry = entry->vme_next;
1202 }
1203
1204 vm_map_unlock_read(map);
1205 vm_map_deallocate(map);
1206 return 0;
1207 }
1208
1209 extern int vnode_get(struct vnode *vp);
1210 int
task_find_region_details(task_t task,vm_map_offset_t offset,find_region_details_options_t options,uintptr_t * vp_p,uint32_t * vid_p,bool * is_map_shared_p,uint64_t * start_p,uint64_t * len_p)1211 task_find_region_details(
1212 task_t task,
1213 vm_map_offset_t offset,
1214 find_region_details_options_t options,
1215 uintptr_t *vp_p,
1216 uint32_t *vid_p,
1217 bool *is_map_shared_p,
1218 uint64_t *start_p,
1219 uint64_t *len_p)
1220 {
1221 vm_map_t map;
1222 vm_map_entry_t entry;
1223 int rc;
1224
1225 rc = 0;
1226 *vp_p = 0;
1227 *vid_p = 0;
1228 *is_map_shared_p = false;
1229 *start_p = 0;
1230 *len_p = 0;
1231 if (options & ~FIND_REGION_DETAILS_OPTIONS_ALL) {
1232 return 0;
1233 }
1234
1235 task_lock(task);
1236 map = task->map;
1237 if (map == VM_MAP_NULL) {
1238 task_unlock(task);
1239 return 0;
1240 }
1241 vm_map_reference(map);
1242 task_unlock(task);
1243
1244 vm_map_lock_read(map);
1245 if (!vm_map_lookup_entry_allow_pgz(map, offset, &entry)) {
1246 if (options & FIND_REGION_DETAILS_AT_OFFSET) {
1247 /* no mapping at this offset */
1248 goto ret;
1249 }
1250 /* check next entry */
1251 entry = entry->vme_next;
1252 if (entry == vm_map_to_entry(map)) {
1253 /* no next entry */
1254 goto ret;
1255 }
1256 }
1257
1258 for (;
1259 entry != vm_map_to_entry(map);
1260 entry = entry->vme_next) {
1261 if (entry->is_sub_map) {
1262 /* fallthru to check next entry */
1263 } else if (fill_vnodeinfoforaddr(entry, vp_p, vid_p, is_map_shared_p)) {
1264 if ((options & FIND_REGION_DETAILS_GET_VNODE) &&
1265 vnode_get((struct vnode *)*vp_p)) {
1266 /* tried but could not get an iocount */
1267 *vp_p = 0;
1268 *vid_p = 0;
1269 if (options & FIND_REGION_DETAILS_AT_OFFSET) {
1270 /* done */
1271 break;
1272 }
1273 /* check next entry */
1274 continue;
1275 }
1276 *start_p = entry->vme_start;
1277 *len_p = entry->vme_end - entry->vme_start;
1278 rc = 1; /* success */
1279 break;
1280 }
1281 if (options & FIND_REGION_DETAILS_AT_OFFSET) {
1282 /* no file mapping at this offset: done */
1283 break;
1284 }
1285 /* check next entry */
1286 }
1287
1288 ret:
1289 vm_map_unlock_read(map);
1290 vm_map_deallocate(map);
1291 return rc;
1292 }
1293
1294 static int
fill_vnodeinfoforaddr(vm_map_entry_t entry,uintptr_t * vnodeaddr,uint32_t * vid,bool * is_map_shared)1295 fill_vnodeinfoforaddr(
1296 vm_map_entry_t entry,
1297 uintptr_t * vnodeaddr,
1298 uint32_t * vid,
1299 bool *is_map_shared)
1300 {
1301 vm_object_t top_object, object;
1302 memory_object_t memory_object;
1303 memory_object_pager_ops_t pager_ops;
1304 kern_return_t kr;
1305 int shadow_depth;
1306
1307
1308 if (entry->is_sub_map) {
1309 return 0;
1310 } else {
1311 /*
1312 * The last object in the shadow chain has the
1313 * relevant pager information.
1314 */
1315 top_object = VME_OBJECT(entry);
1316 if (top_object == VM_OBJECT_NULL) {
1317 object = VM_OBJECT_NULL;
1318 shadow_depth = 0;
1319 } else {
1320 vm_object_lock(top_object);
1321 for (object = top_object, shadow_depth = 0;
1322 object->shadow != VM_OBJECT_NULL;
1323 object = object->shadow, shadow_depth++) {
1324 vm_object_lock(object->shadow);
1325 vm_object_unlock(object);
1326 }
1327 }
1328 }
1329
1330 if (object == VM_OBJECT_NULL) {
1331 return 0;
1332 } else if (object->internal) {
1333 vm_object_unlock(object);
1334 return 0;
1335 } else if (!object->pager_ready ||
1336 object->terminating ||
1337 !object->alive ||
1338 object->pager == NULL) {
1339 vm_object_unlock(object);
1340 return 0;
1341 } else {
1342 memory_object = object->pager;
1343 pager_ops = memory_object->mo_pager_ops;
1344 if (pager_ops == &vnode_pager_ops) {
1345 kr = vnode_pager_get_object_vnode(
1346 memory_object,
1347 vnodeaddr, vid);
1348 if (kr != KERN_SUCCESS) {
1349 vm_object_unlock(object);
1350 return 0;
1351 }
1352 } else {
1353 vm_object_unlock(object);
1354 return 0;
1355 }
1356 }
1357 if (is_map_shared) {
1358 *is_map_shared = (shadow_depth == 0);
1359 }
1360 vm_object_unlock(object);
1361 return 1;
1362 }
1363
1364 kern_return_t
vnode_pager_get_object_vnode(memory_object_t mem_obj,uintptr_t * vnodeaddr,uint32_t * vid)1365 vnode_pager_get_object_vnode(
1366 memory_object_t mem_obj,
1367 uintptr_t * vnodeaddr,
1368 uint32_t * vid)
1369 {
1370 vnode_pager_t vnode_object;
1371
1372 vnode_object = vnode_pager_lookup(mem_obj);
1373 if (vnode_object->vnode_handle) {
1374 *vnodeaddr = (uintptr_t)vnode_object->vnode_handle;
1375 *vid = (uint32_t)vnode_vid((void *)vnode_object->vnode_handle);
1376
1377 return KERN_SUCCESS;
1378 }
1379
1380 return KERN_FAILURE;
1381 }
1382
1383 #if CONFIG_IOSCHED
1384 kern_return_t
vnode_pager_get_object_devvp(memory_object_t mem_obj,uintptr_t * devvp)1385 vnode_pager_get_object_devvp(
1386 memory_object_t mem_obj,
1387 uintptr_t *devvp)
1388 {
1389 struct vnode *vp;
1390 uint32_t vid;
1391
1392 if (vnode_pager_get_object_vnode(mem_obj, (uintptr_t *)&vp, (uint32_t *)&vid) != KERN_SUCCESS) {
1393 return KERN_FAILURE;
1394 }
1395 *devvp = (uintptr_t)vnode_mountdevvp(vp);
1396 if (*devvp) {
1397 return KERN_SUCCESS;
1398 }
1399 return KERN_FAILURE;
1400 }
1401 #endif
1402
1403 /*
1404 * Find the underlying vnode object for the given vm_map_entry. If found, return with the
1405 * object locked, otherwise return NULL with nothing locked.
1406 */
1407
1408 vm_object_t
find_vnode_object(vm_map_entry_t entry)1409 find_vnode_object(
1410 vm_map_entry_t entry
1411 )
1412 {
1413 vm_object_t top_object, object;
1414 memory_object_t memory_object;
1415 memory_object_pager_ops_t pager_ops;
1416
1417 if (!entry->is_sub_map) {
1418 /*
1419 * The last object in the shadow chain has the
1420 * relevant pager information.
1421 */
1422
1423 top_object = VME_OBJECT(entry);
1424
1425 if (top_object) {
1426 vm_object_lock(top_object);
1427
1428 for (object = top_object; object->shadow != VM_OBJECT_NULL; object = object->shadow) {
1429 vm_object_lock(object->shadow);
1430 vm_object_unlock(object);
1431 }
1432
1433 if (object &&
1434 !object->internal &&
1435 object->pager_ready &&
1436 !object->terminating &&
1437 object->alive &&
1438 object->pager != NULL) {
1439 memory_object = object->pager;
1440 pager_ops = memory_object->mo_pager_ops;
1441
1442 /*
1443 * If this object points to the vnode_pager_ops, then we found what we're
1444 * looking for. Otherwise, this vm_map_entry doesn't have an underlying
1445 * vnode and so we fall through to the bottom and return NULL.
1446 */
1447
1448 if (pager_ops == &vnode_pager_ops) {
1449 return object; /* we return with the object locked */
1450 }
1451 }
1452
1453 vm_object_unlock(object);
1454 }
1455 }
1456
1457 return VM_OBJECT_NULL;
1458 }
1459