xref: /xnu-8020.140.41/osfmk/vm/vm_object.h (revision 27b03b360a988dfd3dfdf34262bb0042026747cc)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm_object.h
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *	Date:	1985
62  *
63  *	Virtual memory object module definitions.
64  */
65 
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
68 
69 #include <debug.h>
70 #include <mach_assert.h>
71 #include <mach_pagemap.h>
72 
73 #include <mach/kern_return.h>
74 #include <mach/boolean.h>
75 #include <mach/memory_object_types.h>
76 #include <mach/port.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_param.h>
79 #include <mach/machine/vm_types.h>
80 #include <kern/queue.h>
81 #include <kern/locks.h>
82 #include <kern/assert.h>
83 #include <kern/misc_protos.h>
84 #include <kern/macro_help.h>
85 #include <ipc/ipc_types.h>
86 #include <vm/pmap.h>
87 
88 #include <vm/vm_external.h>
89 
90 #include <vm/vm_options.h>
91 #include <vm/vm_page.h>
92 
93 #if VM_OBJECT_TRACKING
94 #include <libkern/OSDebug.h>
95 #include <kern/btlog.h>
96 extern void vm_object_tracking_init(void);
97 extern btlog_t vm_object_tracking_btlog;
98 #define VM_OBJECT_TRACKING_NUM_RECORDS  50000
99 #define VM_OBJECT_TRACKING_OP_CREATED   1
100 #define VM_OBJECT_TRACKING_OP_MODIFIED  2
101 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
102 #endif /* VM_OBJECT_TRACKING */
103 
104 struct vm_page;
105 
106 /*
107  *	Types defined:
108  *
109  *	vm_object_t		Virtual memory object.
110  *	vm_object_fault_info_t	Used to determine cluster size.
111  */
112 
113 struct vm_object_fault_info {
114 	int             interruptible;
115 	uint32_t        user_tag;
116 	vm_size_t       cluster_size;
117 	vm_behavior_t   behavior;
118 	vm_object_offset_t lo_offset;
119 	vm_object_offset_t hi_offset;
120 	unsigned int
121 	/* boolean_t */ no_cache:1,
122 	/* boolean_t */ stealth:1,
123 	/* boolean_t */ io_sync:1,
124 	/* boolean_t */ cs_bypass:1,
125 	/* boolean_t */ pmap_cs_associated:1,
126 	/* boolean_t */ mark_zf_absent:1,
127 	/* boolean_t */ batch_pmap_op:1,
128 	/* boolean_t */ resilient_media:1,
129 	/* boolean_t */ no_copy_on_read:1,
130 	    __vm_object_fault_info_unused_bits:23;
131 	int             pmap_options;
132 };
133 
134 
135 #define vo_size                         vo_un1.vou_size
136 #define vo_cache_pages_to_scan          vo_un1.vou_cache_pages_to_scan
137 #define vo_shadow_offset                vo_un2.vou_shadow_offset
138 #define vo_cache_ts                     vo_un2.vou_cache_ts
139 #define vo_owner                        vo_un2.vou_owner
140 
141 struct vm_object {
142 	/*
143 	 * on 64 bit systems we pack the pointers hung off the memq.
144 	 * those pointers have to be able to point back to the memq.
145 	 * the packed pointers are required to be on a 64 byte boundary
146 	 * which means 2 things for the vm_object...  (1) the memq
147 	 * struct has to be the first element of the structure so that
148 	 * we can control it's alignment... (2) the vm_object must be
149 	 * aligned on a 64 byte boundary... for static vm_object's
150 	 * this is accomplished via the 'aligned' attribute... for
151 	 * vm_object's in the zone pool, this is accomplished by
152 	 * rounding the size of the vm_object element to the nearest
153 	 * 64 byte size before creating the zone.
154 	 */
155 	vm_page_queue_head_t    memq;           /* Resident memory - must be first */
156 	lck_rw_t                Lock;           /* Synchronization */
157 
158 	union {
159 		vm_object_size_t  vou_size;     /* Object size (only valid if internal) */
160 		int               vou_cache_pages_to_scan;      /* pages yet to be visited in an
161 		                                                 * external object in cache
162 		                                                 */
163 	} vo_un1;
164 
165 	struct vm_page          *memq_hint;
166 	int                     ref_count;      /* Number of references */
167 	unsigned int            resident_page_count;
168 	/* number of resident pages */
169 	unsigned int            wired_page_count; /* number of wired pages
170 	                                           *  use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
171 	unsigned int            reusable_page_count;
172 
173 	struct vm_object        *copy;          /* Object that should receive
174 	                                         * a copy of my changed pages,
175 	                                         * for copy_delay, or just the
176 	                                         * temporary object that
177 	                                         * shadows this object, for
178 	                                         * copy_call.
179 	                                         */
180 	struct vm_object        *shadow;        /* My shadow */
181 	memory_object_t         pager;          /* Where to get data */
182 
183 	union {
184 		vm_object_offset_t vou_shadow_offset;   /* Offset into shadow */
185 		clock_sec_t     vou_cache_ts;   /* age of an external object
186 		                                 * present in cache
187 		                                 */
188 		task_t          vou_owner;      /* If the object is purgeable
189 		                                 * or has a "ledger_tag", this
190 		                                 * is the task that owns it.
191 		                                 */
192 	} vo_un2;
193 
194 	vm_object_offset_t      paging_offset;  /* Offset into memory object */
195 	memory_object_control_t pager_control;  /* Where data comes back */
196 
197 	memory_object_copy_strategy_t
198 	    copy_strategy;                      /* How to handle data copy */
199 
200 #if __LP64__
201 	/*
202 	 * Some user processes (mostly VirtualMachine software) take a large
203 	 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
204 	 * VM objects and overflow the 16-bit "activity_in_progress" counter.
205 	 * Since we never enforced any limit there, let's give them 32 bits
206 	 * for backwards compatibility's sake.
207 	 */
208 	unsigned int            paging_in_progress:16,
209 	    __object1_unused_bits:16;
210 	unsigned int            activity_in_progress;
211 #else /* __LP64__ */
212 	/*
213 	 * On 32-bit platforms, enlarging "activity_in_progress" would increase
214 	 * the size of "struct vm_object".  Since we don't know of any actual
215 	 * overflow of these counters on these platforms, let's keep the
216 	 * counters as 16-bit integers.
217 	 */
218 	unsigned short          paging_in_progress;
219 	unsigned short          activity_in_progress;
220 #endif /* __LP64__ */
221 	/* The memory object ports are
222 	 * being used (e.g., for pagein
223 	 * or pageout) -- don't change
224 	 * any of these fields (i.e.,
225 	 * don't collapse, destroy or
226 	 * terminate)
227 	 */
228 
229 	unsigned int
230 	/* boolean_t array */ all_wanted:11,    /* Bit array of "want to be
231 	                                         * awakened" notations.  See
232 	                                         * VM_OBJECT_EVENT_* items
233 	                                         * below */
234 	/* boolean_t */ pager_created:1,        /* Has pager been created? */
235 	/* boolean_t */ pager_initialized:1,    /* Are fields ready to use? */
236 	/* boolean_t */ pager_ready:1,          /* Will pager take requests? */
237 
238 	/* boolean_t */ pager_trusted:1,        /* The pager for this object
239 	                                         * is trusted. This is true for
240 	                                         * all internal objects (backed
241 	                                         * by the default pager)
242 	                                         */
243 	/* boolean_t */ can_persist:1,          /* The kernel may keep the data
244 	                                         * for this object (and rights
245 	                                         * to the memory object) after
246 	                                         * all address map references
247 	                                         * are deallocated?
248 	                                         */
249 	/* boolean_t */ internal:1,             /* Created by the kernel (and
250 	                                         * therefore, managed by the
251 	                                         * default memory manger)
252 	                                         */
253 	/* boolean_t */ private:1,              /* magic device_pager object,
254 	                                        * holds private pages only */
255 	/* boolean_t */ pageout:1,              /* pageout object. contains
256 	                                         * private pages that refer to
257 	                                         * a real memory object. */
258 	/* boolean_t */ alive:1,                /* Not yet terminated */
259 
260 	/* boolean_t */ purgable:2,             /* Purgable state.  See
261 	                                         * VM_PURGABLE_*
262 	                                         */
263 	/* boolean_t */ purgeable_only_by_kernel:1,
264 	/* boolean_t */ purgeable_when_ripe:1,         /* Purgeable when a token
265 	                                                * becomes ripe.
266 	                                                */
267 	/* boolean_t */ shadowed:1,             /* Shadow may exist */
268 	/* boolean_t */ true_share:1,
269 	/* This object is mapped
270 	 * in more than one place
271 	 * and hence cannot be
272 	 * coalesced */
273 	/* boolean_t */ terminating:1,
274 	/* Allows vm_object_lookup
275 	 * and vm_object_deallocate
276 	 * to special case their
277 	 * behavior when they are
278 	 * called as a result of
279 	 * page cleaning during
280 	 * object termination
281 	 */
282 	/* boolean_t */ named:1,                /* An enforces an internal
283 	                                         * naming convention, by
284 	                                         * calling the right routines
285 	                                         * for allocation and
286 	                                         * destruction, UBC references
287 	                                         * against the vm_object are
288 	                                         * checked.
289 	                                         */
290 	/* boolean_t */ shadow_severed:1,
291 	/* When a permanent object
292 	 * backing a COW goes away
293 	 * unexpectedly.  This bit
294 	 * allows vm_fault to return
295 	 * an error rather than a
296 	 * zero filled page.
297 	 */
298 	/* boolean_t */ phys_contiguous:1,
299 	/* Memory is wired and
300 	 * guaranteed physically
301 	 * contiguous.  However
302 	 * it is not device memory
303 	 * and obeys normal virtual
304 	 * memory rules w.r.t pmap
305 	 * access bits.
306 	 */
307 	/* boolean_t */ nophyscache:1,
308 	/* When mapped at the
309 	 * pmap level, don't allow
310 	 * primary caching. (for
311 	 * I/O)
312 	 */
313 	/* boolean_t */ _object5_unused_bits:1;
314 
315 	queue_chain_t           cached_list;    /* Attachment point for the
316 	                                         * list of objects cached as a
317 	                                         * result of their can_persist
318 	                                         * value
319 	                                         */
320 	/*
321 	 * the following fields are not protected by any locks
322 	 * they are updated via atomic compare and swap
323 	 */
324 	vm_object_offset_t      last_alloc;     /* last allocation offset */
325 	vm_offset_t             cow_hint;       /* last page present in     */
326 	                                        /* shadow but not in object */
327 	int                     sequential;     /* sequential access size */
328 
329 	uint32_t                pages_created;
330 	uint32_t                pages_used;
331 	/* hold object lock when altering */
332 	unsigned        int
333 	    wimg_bits:8,                /* cache WIMG bits         */
334 	    code_signed:1,              /* pages are signed and should be
335 	                                 *  validated; the signatures are stored
336 	                                 *  with the pager */
337 	    transposed:1,               /* object was transposed with another */
338 	    mapping_in_progress:1,      /* pager being mapped/unmapped */
339 	    phantom_isssd:1,
340 	    volatile_empty:1,
341 	    volatile_fault:1,
342 	    all_reusable:1,
343 	    blocked_access:1,
344 	    set_cache_attr:1,
345 	    object_is_shared_cache:1,
346 	    purgeable_queue_type:2,
347 	    purgeable_queue_group:3,
348 	    io_tracking:1,
349 	    no_tag_update:1,            /*  */
350 #if CONFIG_SECLUDED_MEMORY
351 	    eligible_for_secluded:1,
352 	    can_grab_secluded:1,
353 #else /* CONFIG_SECLUDED_MEMORY */
354 	__object3_unused_bits:2,
355 #endif /* CONFIG_SECLUDED_MEMORY */
356 #if VM_OBJECT_ACCESS_TRACKING
357 	    access_tracking:1,
358 #else /* VM_OBJECT_ACCESS_TRACKING */
359 	__unused_access_tracking:1,
360 #endif /* VM_OBJECT_ACCESS_TRACKING */
361 	vo_ledger_tag:3,
362 	    vo_no_footprint:1;
363 
364 #if VM_OBJECT_ACCESS_TRACKING
365 	uint32_t        access_tracking_reads;
366 	uint32_t        access_tracking_writes;
367 #endif /* VM_OBJECT_ACCESS_TRACKING */
368 
369 	uint8_t                 scan_collisions;
370 	uint8_t                 __object4_unused_bits[1];
371 	vm_tag_t                wire_tag;
372 
373 #if CONFIG_PHANTOM_CACHE
374 	uint32_t                phantom_object_id;
375 #endif
376 #if CONFIG_IOSCHED || UPL_DEBUG
377 	queue_head_t            uplq;           /* List of outstanding upls */
378 #endif
379 
380 #ifdef  VM_PIP_DEBUG
381 /*
382  * Keep track of the stack traces for the first holders
383  * of a "paging_in_progress" reference for this VM object.
384  */
385 #define VM_PIP_DEBUG_STACK_FRAMES       25      /* depth of each stack trace */
386 #define VM_PIP_DEBUG_MAX_REFS           10      /* track that many references */
387 	struct __pip_backtrace {
388 		void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
389 	} pip_holders[VM_PIP_DEBUG_MAX_REFS];
390 #endif  /* VM_PIP_DEBUG  */
391 
392 	queue_chain_t           objq;      /* object queue - currently used for purgable queues */
393 	queue_chain_t           task_objq; /* objects owned by task - protected by task lock */
394 
395 #if !VM_TAG_ACTIVE_UPDATE
396 	queue_chain_t           wired_objq;
397 #endif /* !VM_TAG_ACTIVE_UPDATE */
398 
399 #if DEBUG
400 	void *purgeable_owner_bt[16];
401 	task_t vo_purgeable_volatilizer; /* who made it volatile? */
402 	void *purgeable_volatilizer_bt[16];
403 #endif /* DEBUG */
404 };
405 
406 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object)                         \
407 	((object)->volatile_fault &&                                    \
408 	 ((object)->purgable == VM_PURGABLE_VOLATILE ||                 \
409 	  (object)->purgable == VM_PURGABLE_EMPTY))
410 
411 #if VM_OBJECT_ACCESS_TRACKING
412 extern uint64_t vm_object_access_tracking_reads;
413 extern uint64_t vm_object_access_tracking_writes;
414 extern void vm_object_access_tracking(vm_object_t object,
415     int *access_tracking,
416     uint32_t *access_tracking_reads,
417     uint32_t *acess_tracking_writes);
418 #endif /* VM_OBJECT_ACCESS_TRACKING */
419 
420 extern const vm_object_t kernel_object;          /* the single kernel object */
421 
422 extern const vm_object_t compressor_object;      /* the single compressor object */
423 
424 extern const vm_object_t retired_pages_object;   /* holds VM pages which should never be used */
425 
426 # define        VM_MSYNC_INITIALIZED                    0
427 # define        VM_MSYNC_SYNCHRONIZING                  1
428 # define        VM_MSYNC_DONE                           2
429 
430 
431 extern lck_grp_t                vm_map_lck_grp;
432 extern lck_attr_t               vm_map_lck_attr;
433 
434 #ifndef VM_TAG_ACTIVE_UPDATE
435 #error VM_TAG_ACTIVE_UPDATE
436 #endif
437 
438 #if VM_TAG_ACTIVE_UPDATE
439 #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE")
440 #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE")
441 #else /* VM_TAG_ACTIVE_UPDATE */
442 #define VM_OBJECT_WIRED_ENQUEUE(object)                                 \
443 	MACRO_BEGIN                                                     \
444 	lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);                          \
445 	assert(!(object)->wired_objq.next);                             \
446 	assert(!(object)->wired_objq.prev);                             \
447 	queue_enter(&vm_objects_wired, (object),                        \
448 	            vm_object_t, wired_objq);                           \
449 	lck_spin_unlock(&vm_objects_wired_lock);                        \
450 	MACRO_END
451 #define VM_OBJECT_WIRED_DEQUEUE(object)                                 \
452 	MACRO_BEGIN                                                     \
453 	if ((object)->wired_objq.next) {                                \
454 	        lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);                  \
455 	        queue_remove(&vm_objects_wired, (object),               \
456 	                     vm_object_t, wired_objq);                  \
457 	        lck_spin_unlock(&vm_objects_wired_lock);                \
458 	}                                                               \
459 	MACRO_END
460 #endif /* VM_TAG_ACTIVE_UPDATE */
461 
462 #define VM_OBJECT_WIRED(object, tag)                                    \
463     MACRO_BEGIN                                                         \
464     assert(VM_KERN_MEMORY_NONE != (tag));                               \
465     assert(VM_KERN_MEMORY_NONE == (object)->wire_tag);                  \
466     (object)->wire_tag = (tag);                                         \
467     if (!VM_TAG_ACTIVE_UPDATE) {                                        \
468 	VM_OBJECT_WIRED_ENQUEUE((object));                              \
469     }                                                                   \
470     MACRO_END
471 
472 #define VM_OBJECT_UNWIRED(object)                                                       \
473     MACRO_BEGIN                                                                         \
474     if (!VM_TAG_ACTIVE_UPDATE) {                                                        \
475 	    VM_OBJECT_WIRED_DEQUEUE((object));                                          \
476     }                                                                                   \
477     if (VM_KERN_MEMORY_NONE != (object)->wire_tag) {                                    \
478 	vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count));   \
479 	(object)->wire_tag = VM_KERN_MEMORY_NONE;                                       \
480     }                                                                                   \
481     MACRO_END
482 
483 // These two macros start & end a C block
484 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object)                                       \
485     MACRO_BEGIN                                                                         \
486     {                                                                                   \
487 	int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
488 
489 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag)                                    \
490 	if (__wireddelta) {                                                             \
491 	    boolean_t __overflow __assert_only =                                        \
492 	    os_add_overflow((object)->wired_page_count, __wireddelta,                   \
493 	                    &(object)->wired_page_count);                               \
494 	    assert(!__overflow);                                                        \
495 	    if (!(object)->pageout && !(object)->no_tag_update) {                       \
496 	        if (__wireddelta > 0) {                                                 \
497 	            assert (VM_KERN_MEMORY_NONE != (tag));                              \
498 	            if (VM_KERN_MEMORY_NONE == __waswired) {                            \
499 	                VM_OBJECT_WIRED((object), (tag));                               \
500 	            }                                                                   \
501 	            vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta));      \
502 	        } else if (VM_KERN_MEMORY_NONE != __waswired) {                         \
503 	            assert (VM_KERN_MEMORY_NONE != (object)->wire_tag);                 \
504 	            vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta));      \
505 	            if (!(object)->wired_page_count) {                                  \
506 	                VM_OBJECT_UNWIRED((object));                                    \
507 	            }                                                                   \
508 	        }                                                                       \
509 	    }                                                                           \
510 	}                                                                               \
511     }                                                                                   \
512     MACRO_END
513 
514 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta)               \
515     __wireddelta += delta; \
516 
517 #define VM_OBJECT_WIRED_PAGE_ADD(object, m)                     \
518     if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++;
519 
520 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m)                  \
521     if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--;
522 
523 
524 
525 #define OBJECT_LOCK_SHARED      0
526 #define OBJECT_LOCK_EXCLUSIVE   1
527 
528 extern lck_grp_t        vm_object_lck_grp;
529 extern lck_attr_t       vm_object_lck_attr;
530 extern lck_attr_t       kernel_object_lck_attr;
531 extern lck_attr_t       compressor_object_lck_attr;
532 
533 extern vm_object_t      vm_pageout_scan_wants_object;
534 
535 extern void             vm_object_lock(vm_object_t);
536 extern bool             vm_object_lock_check_contended(vm_object_t);
537 extern boolean_t        vm_object_lock_try(vm_object_t);
538 extern boolean_t        _vm_object_lock_try(vm_object_t);
539 extern boolean_t        vm_object_lock_avoid(vm_object_t);
540 extern void             vm_object_lock_shared(vm_object_t);
541 extern boolean_t        vm_object_lock_yield_shared(vm_object_t);
542 extern boolean_t        vm_object_lock_try_shared(vm_object_t);
543 extern void             vm_object_unlock(vm_object_t);
544 extern boolean_t        vm_object_lock_upgrade(vm_object_t);
545 
546 /*
547  *	Object locking macros
548  */
549 
550 #define vm_object_lock_init(object)                                     \
551 	lck_rw_init(&(object)->Lock, &vm_object_lck_grp,                \
552 	            ((object) == kernel_object ?                        \
553 	             &kernel_object_lck_attr :                          \
554 	             (((object) == compressor_object) ?                 \
555 	             &compressor_object_lck_attr :                      \
556 	              &vm_object_lck_attr)))
557 #define vm_object_lock_destroy(object)  lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
558 
559 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
560 
561 /*
562  * CAUTION: the following vm_object_lock_assert_held*() macros merely
563  * check if anyone is holding the lock, but the holder may not necessarily
564  * be the caller...
565  */
566 #if MACH_ASSERT || DEBUG
567 #define vm_object_lock_assert_held(object) \
568 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
569 #define vm_object_lock_assert_shared(object) \
570 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
571 #define vm_object_lock_assert_exclusive(object) \
572 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
573 #define vm_object_lock_assert_notheld(object) \
574 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
575 #else  /* MACH_ASSERT || DEBUG */
576 #define vm_object_lock_assert_held(object)
577 #define vm_object_lock_assert_shared(object)
578 #define vm_object_lock_assert_exclusive(object)
579 #define vm_object_lock_assert_notheld(object)
580 #endif /* MACH_ASSERT || DEBUG */
581 
582 
583 /*
584  *	Declare procedures that operate on VM objects.
585  */
586 
587 __private_extern__ void         vm_object_bootstrap(void);
588 
589 __private_extern__ void         vm_object_reaper_init(void);
590 
591 __private_extern__ vm_object_t  vm_object_allocate(vm_object_size_t size);
592 
593 __private_extern__ void    _vm_object_allocate(vm_object_size_t size,
594     vm_object_t object);
595 
596 #define vm_object_reference_locked(object)              \
597 	MACRO_BEGIN                                     \
598 	vm_object_t RLObject = (object);                \
599 	vm_object_lock_assert_exclusive(object);        \
600 	assert((RLObject)->ref_count > 0);              \
601 	(RLObject)->ref_count++;                        \
602 	assert((RLObject)->ref_count > 1);              \
603 	MACRO_END
604 
605 
606 #define vm_object_reference_shared(object)              \
607 	MACRO_BEGIN                                     \
608 	vm_object_t RLObject = (object);                \
609 	vm_object_lock_assert_shared(object);           \
610 	assert((RLObject)->ref_count > 0);              \
611 	OSAddAtomic(1, &(RLObject)->ref_count);         \
612 	assert((RLObject)->ref_count > 0);              \
613 	MACRO_END
614 
615 
616 __private_extern__ void         vm_object_reference(
617 	vm_object_t     object);
618 
619 #if     !MACH_ASSERT
620 
621 #define vm_object_reference(object)                     \
622 MACRO_BEGIN                                             \
623 	vm_object_t RObject = (object);                 \
624 	if (RObject) {                                  \
625 	        vm_object_lock_shared(RObject);         \
626 	        vm_object_reference_shared(RObject);    \
627 	        vm_object_unlock(RObject);              \
628 	}                                               \
629 MACRO_END
630 
631 #endif  /* MACH_ASSERT */
632 
633 __private_extern__ void         vm_object_deallocate(
634 	vm_object_t     object);
635 
636 __private_extern__ kern_return_t vm_object_release_name(
637 	vm_object_t     object,
638 	int             flags);
639 
640 __private_extern__ void         vm_object_pmap_protect(
641 	vm_object_t             object,
642 	vm_object_offset_t      offset,
643 	vm_object_size_t        size,
644 	pmap_t                  pmap,
645 	vm_map_size_t           pmap_page_size,
646 	vm_map_offset_t         pmap_start,
647 	vm_prot_t               prot);
648 
649 __private_extern__ void         vm_object_pmap_protect_options(
650 	vm_object_t             object,
651 	vm_object_offset_t      offset,
652 	vm_object_size_t        size,
653 	pmap_t                  pmap,
654 	vm_map_size_t           pmap_page_size,
655 	vm_map_offset_t         pmap_start,
656 	vm_prot_t               prot,
657 	int                     options);
658 
659 __private_extern__ void         vm_object_page_remove(
660 	vm_object_t             object,
661 	vm_object_offset_t      start,
662 	vm_object_offset_t      end);
663 
664 __private_extern__ void         vm_object_deactivate_pages(
665 	vm_object_t             object,
666 	vm_object_offset_t      offset,
667 	vm_object_size_t        size,
668 	boolean_t               kill_page,
669 	boolean_t               reusable_page,
670 	struct pmap             *pmap,
671 /* XXX TODO4K: need pmap_page_size here too? */
672 	vm_map_offset_t         pmap_offset);
673 
674 __private_extern__ void vm_object_reuse_pages(
675 	vm_object_t             object,
676 	vm_object_offset_t      start_offset,
677 	vm_object_offset_t      end_offset,
678 	boolean_t               allow_partial_reuse);
679 
680 __private_extern__ uint64_t     vm_object_purge(
681 	vm_object_t              object,
682 	int                      flags);
683 
684 __private_extern__ kern_return_t vm_object_purgable_control(
685 	vm_object_t     object,
686 	vm_purgable_t   control,
687 	int             *state);
688 
689 __private_extern__ kern_return_t vm_object_get_page_counts(
690 	vm_object_t             object,
691 	vm_object_offset_t      offset,
692 	vm_object_size_t        size,
693 	unsigned int            *resident_page_count,
694 	unsigned int            *dirty_page_count);
695 
696 __private_extern__ boolean_t    vm_object_coalesce(
697 	vm_object_t             prev_object,
698 	vm_object_t             next_object,
699 	vm_object_offset_t      prev_offset,
700 	vm_object_offset_t      next_offset,
701 	vm_object_size_t        prev_size,
702 	vm_object_size_t        next_size);
703 
704 __private_extern__ boolean_t    vm_object_shadow(
705 	vm_object_t             *object,
706 	vm_object_offset_t      *offset,
707 	vm_object_size_t        length);
708 
709 __private_extern__ void         vm_object_collapse(
710 	vm_object_t             object,
711 	vm_object_offset_t      offset,
712 	boolean_t               can_bypass);
713 
714 __private_extern__ boolean_t    vm_object_copy_quickly(
715 	vm_object_t             object,
716 	vm_object_offset_t      src_offset,
717 	vm_object_size_t        size,
718 	boolean_t               *_src_needs_copy,
719 	boolean_t               *_dst_needs_copy);
720 
721 __private_extern__ kern_return_t        vm_object_copy_strategically(
722 	vm_object_t             src_object,
723 	vm_object_offset_t      src_offset,
724 	vm_object_size_t        size,
725 	vm_object_t             *dst_object,
726 	vm_object_offset_t      *dst_offset,
727 	boolean_t               *dst_needs_copy);
728 
729 __private_extern__ kern_return_t        vm_object_copy_slowly(
730 	vm_object_t             src_object,
731 	vm_object_offset_t      src_offset,
732 	vm_object_size_t        size,
733 	boolean_t               interruptible,
734 	vm_object_t             *_result_object);
735 
736 __private_extern__ vm_object_t  vm_object_copy_delayed(
737 	vm_object_t             src_object,
738 	vm_object_offset_t      src_offset,
739 	vm_object_size_t        size,
740 	boolean_t               src_object_shared);
741 
742 
743 
744 __private_extern__ kern_return_t        vm_object_destroy(
745 	vm_object_t     object,
746 	kern_return_t   reason);
747 
748 __private_extern__ void         vm_object_pager_create(
749 	vm_object_t     object);
750 
751 __private_extern__ void         vm_object_compressor_pager_create(
752 	vm_object_t     object);
753 
754 __private_extern__ void         vm_object_page_map(
755 	vm_object_t     object,
756 	vm_object_offset_t      offset,
757 	vm_object_size_t        size,
758 	vm_object_offset_t      (*map_fn)
759 	(void *, vm_object_offset_t),
760 	void            *map_fn_data);
761 
762 __private_extern__ kern_return_t vm_object_upl_request(
763 	vm_object_t             object,
764 	vm_object_offset_t      offset,
765 	upl_size_t              size,
766 	upl_t                   *upl,
767 	upl_page_info_t         *page_info,
768 	unsigned int            *count,
769 	upl_control_flags_t     flags,
770 	vm_tag_t            tag);
771 
772 __private_extern__ kern_return_t vm_object_transpose(
773 	vm_object_t             object1,
774 	vm_object_t             object2,
775 	vm_object_size_t        transpose_size);
776 
777 __private_extern__ boolean_t vm_object_sync(
778 	vm_object_t             object,
779 	vm_object_offset_t      offset,
780 	vm_object_size_t        size,
781 	boolean_t               should_flush,
782 	boolean_t               should_return,
783 	boolean_t               should_iosync);
784 
785 __private_extern__ kern_return_t vm_object_update(
786 	vm_object_t             object,
787 	vm_object_offset_t      offset,
788 	vm_object_size_t        size,
789 	vm_object_offset_t      *error_offset,
790 	int                     *io_errno,
791 	memory_object_return_t  should_return,
792 	int                     flags,
793 	vm_prot_t               prot);
794 
795 __private_extern__ kern_return_t vm_object_lock_request(
796 	vm_object_t             object,
797 	vm_object_offset_t      offset,
798 	vm_object_size_t        size,
799 	memory_object_return_t  should_return,
800 	int                     flags,
801 	vm_prot_t               prot);
802 
803 
804 
805 __private_extern__ vm_object_t  vm_object_memory_object_associate(
806 	memory_object_t         pager,
807 	vm_object_t             object,
808 	vm_object_size_t        size,
809 	boolean_t               check_named);
810 
811 
812 __private_extern__ void vm_object_cluster_size(
813 	vm_object_t             object,
814 	vm_object_offset_t      *start,
815 	vm_size_t               *length,
816 	vm_object_fault_info_t  fault_info,
817 	uint32_t                *io_streaming);
818 
819 __private_extern__ kern_return_t vm_object_populate_with_private(
820 	vm_object_t             object,
821 	vm_object_offset_t      offset,
822 	ppnum_t                 phys_page,
823 	vm_size_t               size);
824 
825 __private_extern__ void vm_object_change_wimg_mode(
826 	vm_object_t             object,
827 	unsigned int            wimg_mode);
828 
829 extern kern_return_t adjust_vm_object_cache(
830 	vm_size_t oval,
831 	vm_size_t nval);
832 
833 extern kern_return_t vm_object_page_op(
834 	vm_object_t             object,
835 	vm_object_offset_t      offset,
836 	int                     ops,
837 	ppnum_t                 *phys_entry,
838 	int                     *flags);
839 
840 extern kern_return_t vm_object_range_op(
841 	vm_object_t             object,
842 	vm_object_offset_t      offset_beg,
843 	vm_object_offset_t      offset_end,
844 	int                     ops,
845 	uint32_t                *range);
846 
847 
848 __private_extern__ void         vm_object_reap_pages(
849 	vm_object_t object,
850 	int     reap_type);
851 #define REAP_REAP       0
852 #define REAP_TERMINATE  1
853 #define REAP_PURGEABLE  2
854 #define REAP_DATA_FLUSH 3
855 
856 #if CONFIG_FREEZE
857 
858 __private_extern__ uint32_t
859 vm_object_compressed_freezer_pageout(
860 	vm_object_t     object, uint32_t dirty_budget);
861 
862 __private_extern__ void
863 vm_object_compressed_freezer_done(
864 	void);
865 
866 #endif /* CONFIG_FREEZE */
867 
868 __private_extern__ void
869 vm_object_pageout(
870 	vm_object_t     object);
871 
872 #if CONFIG_IOSCHED
873 struct io_reprioritize_req {
874 	uint64_t        blkno;
875 	uint32_t        len;
876 	int             priority;
877 	struct vnode    *devvp;
878 	queue_chain_t   io_reprioritize_list;
879 };
880 typedef struct io_reprioritize_req *io_reprioritize_req_t;
881 
882 extern void vm_io_reprioritize_init(void);
883 #endif
884 
885 /*
886  *	Event waiting handling
887  */
888 
889 #define VM_OBJECT_EVENT_INITIALIZED             0
890 #define VM_OBJECT_EVENT_PAGER_READY             1
891 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS      2
892 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS     3
893 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS        4
894 #define VM_OBJECT_EVENT_UNCACHING               5
895 #define VM_OBJECT_EVENT_COPY_CALL               6
896 #define VM_OBJECT_EVENT_CACHING                 7
897 #define VM_OBJECT_EVENT_UNBLOCKED               8
898 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
899 
900 #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
901 
902 static __inline__ wait_result_t
vm_object_assert_wait(vm_object_t object,int event,wait_interrupt_t interruptible)903 vm_object_assert_wait(
904 	vm_object_t             object,
905 	int                     event,
906 	wait_interrupt_t        interruptible)
907 {
908 	wait_result_t wr;
909 
910 	vm_object_lock_assert_exclusive(object);
911 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
912 
913 	object->all_wanted |= 1 << event;
914 	wr = assert_wait((event_t)((vm_offset_t)object + (vm_offset_t)event),
915 	    interruptible);
916 	return wr;
917 }
918 
919 static __inline__ wait_result_t
vm_object_wait(vm_object_t object,int event,wait_interrupt_t interruptible)920 vm_object_wait(
921 	vm_object_t             object,
922 	int                     event,
923 	wait_interrupt_t        interruptible)
924 {
925 	wait_result_t wr;
926 
927 	vm_object_assert_wait(object, event, interruptible);
928 	vm_object_unlock(object);
929 	wr = thread_block(THREAD_CONTINUE_NULL);
930 	return wr;
931 }
932 
933 static __inline__ wait_result_t
thread_sleep_vm_object(vm_object_t object,event_t event,wait_interrupt_t interruptible)934 thread_sleep_vm_object(
935 	vm_object_t             object,
936 	event_t                 event,
937 	wait_interrupt_t        interruptible)
938 {
939 	wait_result_t wr;
940 
941 	wr = lck_rw_sleep(&object->Lock,
942 	    LCK_SLEEP_PROMOTED_PRI,
943 	    event,
944 	    interruptible);
945 	return wr;
946 }
947 
948 static __inline__ wait_result_t
vm_object_sleep(vm_object_t object,int event,wait_interrupt_t interruptible)949 vm_object_sleep(
950 	vm_object_t             object,
951 	int                     event,
952 	wait_interrupt_t        interruptible)
953 {
954 	wait_result_t wr;
955 
956 	vm_object_lock_assert_exclusive(object);
957 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
958 
959 	object->all_wanted |= 1 << event;
960 	wr = thread_sleep_vm_object(object,
961 	    (event_t)((vm_offset_t)object + (vm_offset_t)event),
962 	    interruptible);
963 	return wr;
964 }
965 
966 static __inline__ void
vm_object_wakeup(vm_object_t object,int event)967 vm_object_wakeup(
968 	vm_object_t             object,
969 	int                     event)
970 {
971 	vm_object_lock_assert_exclusive(object);
972 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
973 
974 	if (object->all_wanted & (1 << event)) {
975 		thread_wakeup((event_t)((vm_offset_t)object + (vm_offset_t)event));
976 	}
977 	object->all_wanted &= ~(1 << event);
978 }
979 
980 static __inline__ void
vm_object_set_wanted(vm_object_t object,int event)981 vm_object_set_wanted(
982 	vm_object_t             object,
983 	int                     event)
984 {
985 	vm_object_lock_assert_exclusive(object);
986 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
987 
988 	object->all_wanted |= (1 << event);
989 }
990 
991 static __inline__ int
vm_object_wanted(vm_object_t object,int event)992 vm_object_wanted(
993 	vm_object_t             object,
994 	int                     event)
995 {
996 	vm_object_lock_assert_held(object);
997 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
998 
999 	return object->all_wanted & (1 << event);
1000 }
1001 
1002 /*
1003  *	Routines implemented as macros
1004  */
1005 #ifdef VM_PIP_DEBUG
1006 #include <libkern/OSDebug.h>
1007 #define VM_PIP_DEBUG_BEGIN(object)                                      \
1008 	MACRO_BEGIN                                                     \
1009 	int pip = ((object)->paging_in_progress +                       \
1010 	           (object)->activity_in_progress);                     \
1011 	if (pip < VM_PIP_DEBUG_MAX_REFS) {                              \
1012 	        (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1013 	                           VM_PIP_DEBUG_STACK_FRAMES);          \
1014 	}                                                               \
1015 	MACRO_END
1016 #else   /* VM_PIP_DEBUG */
1017 #define VM_PIP_DEBUG_BEGIN(object)
1018 #endif  /* VM_PIP_DEBUG */
1019 
1020 #define         vm_object_activity_begin(object)                        \
1021 	MACRO_BEGIN                                                     \
1022 	vm_object_lock_assert_exclusive((object));                      \
1023 	VM_PIP_DEBUG_BEGIN((object));                                   \
1024 	(object)->activity_in_progress++;                               \
1025 	if ((object)->activity_in_progress == 0) {                      \
1026 	        panic("vm_object_activity_begin(%p): overflow\n", (object));\
1027 	}                                                               \
1028 	MACRO_END
1029 
1030 #define         vm_object_activity_end(object)                          \
1031 	MACRO_BEGIN                                                     \
1032 	vm_object_lock_assert_exclusive((object));                      \
1033 	if ((object)->activity_in_progress == 0) {                      \
1034 	        panic("vm_object_activity_end(%p): underflow\n", (object));\
1035 	}                                                               \
1036 	(object)->activity_in_progress--;                               \
1037 	if ((object)->paging_in_progress == 0 &&                        \
1038 	    (object)->activity_in_progress == 0)                        \
1039 	        vm_object_wakeup((object),                              \
1040 	                         VM_OBJECT_EVENT_PAGING_IN_PROGRESS);   \
1041 	MACRO_END
1042 
1043 #define         vm_object_paging_begin(object)                          \
1044 	MACRO_BEGIN                                                     \
1045 	vm_object_lock_assert_exclusive((object));                      \
1046 	VM_PIP_DEBUG_BEGIN((object));                                   \
1047 	(object)->paging_in_progress++;                                 \
1048 	if ((object)->paging_in_progress == 0) {                        \
1049 	        panic("vm_object_paging_begin(%p): overflow\n", (object));\
1050 	}                                                               \
1051 	MACRO_END
1052 
1053 #define         vm_object_paging_end(object)                            \
1054 	MACRO_BEGIN                                                     \
1055 	vm_object_lock_assert_exclusive((object));                      \
1056 	if ((object)->paging_in_progress == 0) {                        \
1057 	        panic("vm_object_paging_end(%p): underflow\n", (object));\
1058 	}                                                               \
1059 	(object)->paging_in_progress--;                                 \
1060 	if ((object)->paging_in_progress == 0) {                        \
1061 	        vm_object_wakeup((object),                              \
1062 	                         VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1063 	        if ((object)->activity_in_progress == 0)                \
1064 	                vm_object_wakeup((object),                      \
1065 	                                 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1066 	}                                                               \
1067 	MACRO_END
1068 
1069 #define         vm_object_paging_wait(object, interruptible)            \
1070 	MACRO_BEGIN                                                     \
1071 	vm_object_lock_assert_exclusive((object));                      \
1072 	while ((object)->paging_in_progress != 0 ||                     \
1073 	       (object)->activity_in_progress != 0) {                   \
1074 	        wait_result_t  _wr;                                     \
1075                                                                         \
1076 	        _wr = vm_object_sleep((object),                         \
1077 	                        VM_OBJECT_EVENT_PAGING_IN_PROGRESS,     \
1078 	                        (interruptible));                       \
1079                                                                         \
1080 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1081 	/*XXX break; */                                 \
1082 	}                                                               \
1083 	MACRO_END
1084 
1085 #define vm_object_paging_only_wait(object, interruptible)               \
1086 	MACRO_BEGIN                                                     \
1087 	vm_object_lock_assert_exclusive((object));                      \
1088 	while ((object)->paging_in_progress != 0) {                     \
1089 	        wait_result_t  _wr;                                     \
1090                                                                         \
1091 	        _wr = vm_object_sleep((object),                         \
1092 	                        VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1093 	                        (interruptible));                       \
1094                                                                         \
1095 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1096 	/*XXX break; */                                 \
1097 	}                                                               \
1098 	MACRO_END
1099 
1100 
1101 #define vm_object_mapping_begin(object)                                 \
1102 	MACRO_BEGIN                                                     \
1103 	vm_object_lock_assert_exclusive((object));                      \
1104 	assert(! (object)->mapping_in_progress);                        \
1105 	(object)->mapping_in_progress = TRUE;                           \
1106 	MACRO_END
1107 
1108 #define vm_object_mapping_end(object)                                   \
1109 	MACRO_BEGIN                                                     \
1110 	vm_object_lock_assert_exclusive((object));                      \
1111 	assert((object)->mapping_in_progress);                          \
1112 	(object)->mapping_in_progress = FALSE;                          \
1113 	vm_object_wakeup((object),                                      \
1114 	                 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS);          \
1115 	MACRO_END
1116 
1117 #define vm_object_mapping_wait(object, interruptible)                   \
1118 	MACRO_BEGIN                                                     \
1119 	vm_object_lock_assert_exclusive((object));                      \
1120 	while ((object)->mapping_in_progress) {                         \
1121 	        wait_result_t	_wr;                                    \
1122                                                                         \
1123 	        _wr = vm_object_sleep((object),                         \
1124 	                              VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1125 	                              (interruptible));                 \
1126 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1127 	/*XXX break; */                                 \
1128 	}                                                               \
1129 	assert(!(object)->mapping_in_progress);                         \
1130 	MACRO_END
1131 
1132 
1133 
1134 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1135 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1136 
1137 extern void     vm_object_cache_add(vm_object_t);
1138 extern void     vm_object_cache_remove(vm_object_t);
1139 extern int      vm_object_cache_evict(int, int);
1140 
1141 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
1142 #define VM_OBJECT_OWNER(object)                                         \
1143 	((((object)->purgable == VM_PURGABLE_DENY &&                    \
1144 	   (object)->vo_ledger_tag == 0) ||                             \
1145 	  (object)->vo_owner == TASK_NULL)                              \
1146 	 ? TASK_NULL    /* not owned */                                 \
1147 	 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED)            \
1148 	    ? kernel_task /* disowned -> kernel */                      \
1149 	    : (object)->vo_owner)) /* explicit owner */                 \
1150 
1151 extern void     vm_object_ledger_tag_ledgers(
1152 	vm_object_t object,
1153 	int *ledger_idx_volatile,
1154 	int *ledger_idx_nonvolatile,
1155 	int *ledger_idx_volatile_compressed,
1156 	int *ledger_idx_nonvolatile_compressed,
1157 	boolean_t *do_footprint);
1158 extern kern_return_t vm_object_ownership_change(
1159 	vm_object_t object,
1160 	int new_ledger_tag,
1161 	task_t new_owner,
1162 	int new_ledger_flags,
1163 	boolean_t task_objq_locked);
1164 
1165 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
1166 // so probably should be a real 32b ID vs. ptr.
1167 // Current users just check for equality
1168 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((o)))
1169 
1170 #endif  /* _VM_VM_OBJECT_H_ */
1171