xref: /xnu-8792.61.2/osfmk/vm/vm_object.h (revision 42e220869062b56f8d7d0726fd4c88954f87902c)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm_object.h
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *	Date:	1985
62  *
63  *	Virtual memory object module definitions.
64  */
65 
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
68 
69 #include <debug.h>
70 #include <mach_assert.h>
71 
72 #include <mach/kern_return.h>
73 #include <mach/boolean.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/port.h>
76 #include <mach/vm_prot.h>
77 #include <mach/vm_param.h>
78 #include <mach/machine/vm_types.h>
79 #include <kern/queue.h>
80 #include <kern/locks.h>
81 #include <kern/assert.h>
82 #include <kern/misc_protos.h>
83 #include <kern/macro_help.h>
84 #include <ipc/ipc_types.h>
85 #include <vm/pmap.h>
86 
87 #include <vm/vm_external.h>
88 
89 #include <vm/vm_options.h>
90 #include <vm/vm_page.h>
91 
92 #if VM_OBJECT_TRACKING
93 #include <libkern/OSDebug.h>
94 #include <kern/btlog.h>
95 extern void vm_object_tracking_init(void);
96 extern btlog_t vm_object_tracking_btlog;
97 #define VM_OBJECT_TRACKING_NUM_RECORDS  50000
98 #define VM_OBJECT_TRACKING_OP_CREATED   1
99 #define VM_OBJECT_TRACKING_OP_MODIFIED  2
100 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
101 #endif /* VM_OBJECT_TRACKING */
102 
103 struct vm_page;
104 
105 /*
106  *	Types defined:
107  *
108  *	vm_object_t		Virtual memory object.
109  *	vm_object_fault_info_t	Used to determine cluster size.
110  */
111 
112 struct vm_object_fault_info {
113 	int             interruptible;
114 	uint32_t        user_tag;
115 	vm_size_t       cluster_size;
116 	vm_behavior_t   behavior;
117 	vm_object_offset_t lo_offset;
118 	vm_object_offset_t hi_offset;
119 	unsigned int
120 	/* boolean_t */ no_cache:1,
121 	/* boolean_t */ stealth:1,
122 	/* boolean_t */ io_sync:1,
123 	/* boolean_t */ cs_bypass:1,
124 	/* boolean_t */ pmap_cs_associated:1,
125 	/* boolean_t */ mark_zf_absent:1,
126 	/* boolean_t */ batch_pmap_op:1,
127 	/* boolean_t */ resilient_media:1,
128 	/* boolean_t */ no_copy_on_read:1,
129 	    __vm_object_fault_info_unused_bits:23;
130 	int             pmap_options;
131 };
132 
133 
134 #define vo_size                         vo_un1.vou_size
135 #define vo_cache_pages_to_scan          vo_un1.vou_cache_pages_to_scan
136 #define vo_shadow_offset                vo_un2.vou_shadow_offset
137 #define vo_cache_ts                     vo_un2.vou_cache_ts
138 #define vo_owner                        vo_un2.vou_owner
139 
140 struct vm_object {
141 	/*
142 	 * on 64 bit systems we pack the pointers hung off the memq.
143 	 * those pointers have to be able to point back to the memq.
144 	 * the packed pointers are required to be on a 64 byte boundary
145 	 * which means 2 things for the vm_object...  (1) the memq
146 	 * struct has to be the first element of the structure so that
147 	 * we can control it's alignment... (2) the vm_object must be
148 	 * aligned on a 64 byte boundary... for static vm_object's
149 	 * this is accomplished via the 'aligned' attribute... for
150 	 * vm_object's in the zone pool, this is accomplished by
151 	 * rounding the size of the vm_object element to the nearest
152 	 * 64 byte size before creating the zone.
153 	 */
154 	vm_page_queue_head_t    memq;           /* Resident memory - must be first */
155 	lck_rw_t                Lock;           /* Synchronization */
156 
157 	union {
158 		vm_object_size_t  vou_size;     /* Object size (only valid if internal) */
159 		int               vou_cache_pages_to_scan;      /* pages yet to be visited in an
160 		                                                 * external object in cache
161 		                                                 */
162 	} vo_un1;
163 
164 	struct vm_page          *memq_hint;
165 	int                     ref_count;      /* Number of references */
166 	unsigned int            resident_page_count;
167 	/* number of resident pages */
168 	unsigned int            wired_page_count; /* number of wired pages
169 	                                           *  use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
170 	unsigned int            reusable_page_count;
171 
172 	struct vm_object        *copy;          /* Object that should receive
173 	                                         * a copy of my changed pages,
174 	                                         * for copy_delay, or just the
175 	                                         * temporary object that
176 	                                         * shadows this object, for
177 	                                         * copy_call.
178 	                                         */
179 	struct vm_object        *shadow;        /* My shadow */
180 	memory_object_t         pager;          /* Where to get data */
181 
182 	union {
183 		vm_object_offset_t vou_shadow_offset;   /* Offset into shadow */
184 		clock_sec_t     vou_cache_ts;   /* age of an external object
185 		                                 * present in cache
186 		                                 */
187 		task_t          vou_owner;      /* If the object is purgeable
188 		                                 * or has a "ledger_tag", this
189 		                                 * is the task that owns it.
190 		                                 */
191 	} vo_un2;
192 
193 	vm_object_offset_t      paging_offset;  /* Offset into memory object */
194 	memory_object_control_t pager_control;  /* Where data comes back */
195 
196 	memory_object_copy_strategy_t
197 	    copy_strategy;                      /* How to handle data copy */
198 
199 #if __LP64__
200 	/*
201 	 * Some user processes (mostly VirtualMachine software) take a large
202 	 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
203 	 * VM objects and overflow the 16-bit "activity_in_progress" counter.
204 	 * Since we never enforced any limit there, let's give them 32 bits
205 	 * for backwards compatibility's sake.
206 	 */
207 	unsigned int            paging_in_progress:16,
208 	    __object1_unused_bits:16;
209 	unsigned int            activity_in_progress;
210 #else /* __LP64__ */
211 	/*
212 	 * On 32-bit platforms, enlarging "activity_in_progress" would increase
213 	 * the size of "struct vm_object".  Since we don't know of any actual
214 	 * overflow of these counters on these platforms, let's keep the
215 	 * counters as 16-bit integers.
216 	 */
217 	unsigned short          paging_in_progress;
218 	unsigned short          activity_in_progress;
219 #endif /* __LP64__ */
220 	/* The memory object ports are
221 	 * being used (e.g., for pagein
222 	 * or pageout) -- don't change
223 	 * any of these fields (i.e.,
224 	 * don't collapse, destroy or
225 	 * terminate)
226 	 */
227 
228 	unsigned int
229 	/* boolean_t array */ all_wanted:11,    /* Bit array of "want to be
230 	                                         * awakened" notations.  See
231 	                                         * VM_OBJECT_EVENT_* items
232 	                                         * below */
233 	/* boolean_t */ pager_created:1,        /* Has pager been created? */
234 	/* boolean_t */ pager_initialized:1,    /* Are fields ready to use? */
235 	/* boolean_t */ pager_ready:1,          /* Will pager take requests? */
236 
237 	/* boolean_t */ pager_trusted:1,        /* The pager for this object
238 	                                         * is trusted. This is true for
239 	                                         * all internal objects (backed
240 	                                         * by the default pager)
241 	                                         */
242 	/* boolean_t */ can_persist:1,          /* The kernel may keep the data
243 	                                         * for this object (and rights
244 	                                         * to the memory object) after
245 	                                         * all address map references
246 	                                         * are deallocated?
247 	                                         */
248 	/* boolean_t */ internal:1,             /* Created by the kernel (and
249 	                                         * therefore, managed by the
250 	                                         * default memory manger)
251 	                                         */
252 	/* boolean_t */ private:1,              /* magic device_pager object,
253 	                                        * holds private pages only */
254 	/* boolean_t */ pageout:1,              /* pageout object. contains
255 	                                         * private pages that refer to
256 	                                         * a real memory object. */
257 	/* boolean_t */ alive:1,                /* Not yet terminated */
258 
259 	/* boolean_t */ purgable:2,             /* Purgable state.  See
260 	                                         * VM_PURGABLE_*
261 	                                         */
262 	/* boolean_t */ purgeable_only_by_kernel:1,
263 	/* boolean_t */ purgeable_when_ripe:1,         /* Purgeable when a token
264 	                                                * becomes ripe.
265 	                                                */
266 	/* boolean_t */ shadowed:1,             /* Shadow may exist */
267 	/* boolean_t */ true_share:1,
268 	/* This object is mapped
269 	 * in more than one place
270 	 * and hence cannot be
271 	 * coalesced */
272 	/* boolean_t */ terminating:1,
273 	/* Allows vm_object_lookup
274 	 * and vm_object_deallocate
275 	 * to special case their
276 	 * behavior when they are
277 	 * called as a result of
278 	 * page cleaning during
279 	 * object termination
280 	 */
281 	/* boolean_t */ named:1,                /* An enforces an internal
282 	                                         * naming convention, by
283 	                                         * calling the right routines
284 	                                         * for allocation and
285 	                                         * destruction, UBC references
286 	                                         * against the vm_object are
287 	                                         * checked.
288 	                                         */
289 	/* boolean_t */ shadow_severed:1,
290 	/* When a permanent object
291 	 * backing a COW goes away
292 	 * unexpectedly.  This bit
293 	 * allows vm_fault to return
294 	 * an error rather than a
295 	 * zero filled page.
296 	 */
297 	/* boolean_t */ phys_contiguous:1,
298 	/* Memory is wired and
299 	 * guaranteed physically
300 	 * contiguous.  However
301 	 * it is not device memory
302 	 * and obeys normal virtual
303 	 * memory rules w.r.t pmap
304 	 * access bits.
305 	 */
306 	/* boolean_t */ nophyscache:1,
307 	/* When mapped at the
308 	 * pmap level, don't allow
309 	 * primary caching. (for
310 	 * I/O)
311 	 */
312 	/* boolean_t */ for_realtime:1;
313 	/* Might be needed for realtime code path */
314 
315 	queue_chain_t           cached_list;    /* Attachment point for the
316 	                                         * list of objects cached as a
317 	                                         * result of their can_persist
318 	                                         * value
319 	                                         */
320 	/*
321 	 * the following fields are not protected by any locks
322 	 * they are updated via atomic compare and swap
323 	 */
324 	vm_object_offset_t      last_alloc;     /* last allocation offset */
325 	vm_offset_t             cow_hint;       /* last page present in     */
326 	                                        /* shadow but not in object */
327 	int                     sequential;     /* sequential access size */
328 
329 	uint32_t                pages_created;
330 	uint32_t                pages_used;
331 	/* hold object lock when altering */
332 	unsigned        int
333 	    wimg_bits:8,                /* cache WIMG bits         */
334 	    code_signed:1,              /* pages are signed and should be
335 	                                 *  validated; the signatures are stored
336 	                                 *  with the pager */
337 	    transposed:1,               /* object was transposed with another */
338 	    mapping_in_progress:1,      /* pager being mapped/unmapped */
339 	    phantom_isssd:1,
340 	    volatile_empty:1,
341 	    volatile_fault:1,
342 	    all_reusable:1,
343 	    blocked_access:1,
344 	    set_cache_attr:1,
345 	    object_is_shared_cache:1,
346 	    purgeable_queue_type:2,
347 	    purgeable_queue_group:3,
348 	    io_tracking:1,
349 	    no_tag_update:1,            /*  */
350 #if CONFIG_SECLUDED_MEMORY
351 	    eligible_for_secluded:1,
352 	    can_grab_secluded:1,
353 #else /* CONFIG_SECLUDED_MEMORY */
354 	__object3_unused_bits:2,
355 #endif /* CONFIG_SECLUDED_MEMORY */
356 #if VM_OBJECT_ACCESS_TRACKING
357 	    access_tracking:1,
358 #else /* VM_OBJECT_ACCESS_TRACKING */
359 	__unused_access_tracking:1,
360 #endif /* VM_OBJECT_ACCESS_TRACKING */
361 	vo_ledger_tag:3,
362 	    vo_no_footprint:1;
363 
364 #if VM_OBJECT_ACCESS_TRACKING
365 	uint32_t        access_tracking_reads;
366 	uint32_t        access_tracking_writes;
367 #endif /* VM_OBJECT_ACCESS_TRACKING */
368 
369 	uint8_t                 scan_collisions;
370 	uint8_t                 __object4_unused_bits[1];
371 	vm_tag_t                wire_tag;
372 
373 #if CONFIG_PHANTOM_CACHE
374 	uint32_t                phantom_object_id;
375 #endif
376 #if CONFIG_IOSCHED || UPL_DEBUG
377 	queue_head_t            uplq;           /* List of outstanding upls */
378 #endif
379 
380 #ifdef  VM_PIP_DEBUG
381 /*
382  * Keep track of the stack traces for the first holders
383  * of a "paging_in_progress" reference for this VM object.
384  */
385 #define VM_PIP_DEBUG_STACK_FRAMES       25      /* depth of each stack trace */
386 #define VM_PIP_DEBUG_MAX_REFS           10      /* track that many references */
387 	struct __pip_backtrace {
388 		void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
389 	} pip_holders[VM_PIP_DEBUG_MAX_REFS];
390 #endif  /* VM_PIP_DEBUG  */
391 
392 	queue_chain_t           objq;      /* object queue - currently used for purgable queues */
393 	queue_chain_t           task_objq; /* objects owned by task - protected by task lock */
394 
395 #if !VM_TAG_ACTIVE_UPDATE
396 	queue_chain_t           wired_objq;
397 #endif /* !VM_TAG_ACTIVE_UPDATE */
398 
399 #if DEBUG
400 	void *purgeable_owner_bt[16];
401 	task_t vo_purgeable_volatilizer; /* who made it volatile? */
402 	void *purgeable_volatilizer_bt[16];
403 #endif /* DEBUG */
404 };
405 
406 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object)                         \
407 	((object)->volatile_fault &&                                    \
408 	 ((object)->purgable == VM_PURGABLE_VOLATILE ||                 \
409 	  (object)->purgable == VM_PURGABLE_EMPTY))
410 
411 #if VM_OBJECT_ACCESS_TRACKING
412 extern uint64_t vm_object_access_tracking_reads;
413 extern uint64_t vm_object_access_tracking_writes;
414 extern void vm_object_access_tracking(vm_object_t object,
415     int *access_tracking,
416     uint32_t *access_tracking_reads,
417     uint32_t *acess_tracking_writes);
418 #endif /* VM_OBJECT_ACCESS_TRACKING */
419 
420 extern const vm_object_t kernel_object;          /* the single kernel object */
421 
422 extern const vm_object_t compressor_object;      /* the single compressor object */
423 
424 extern const vm_object_t retired_pages_object;   /* holds VM pages which should never be used */
425 
426 # define        VM_MSYNC_INITIALIZED                    0
427 # define        VM_MSYNC_SYNCHRONIZING                  1
428 # define        VM_MSYNC_DONE                           2
429 
430 
431 extern lck_grp_t                vm_map_lck_grp;
432 extern lck_attr_t               vm_map_lck_attr;
433 
434 #ifndef VM_TAG_ACTIVE_UPDATE
435 #error VM_TAG_ACTIVE_UPDATE
436 #endif
437 
438 #if VM_TAG_ACTIVE_UPDATE
439 #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE")
440 #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE")
441 #else /* VM_TAG_ACTIVE_UPDATE */
442 #define VM_OBJECT_WIRED_ENQUEUE(object)                                 \
443 	MACRO_BEGIN                                                     \
444 	lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);                          \
445 	assert(!(object)->wired_objq.next);                             \
446 	assert(!(object)->wired_objq.prev);                             \
447 	queue_enter(&vm_objects_wired, (object),                        \
448 	            vm_object_t, wired_objq);                           \
449 	lck_spin_unlock(&vm_objects_wired_lock);                        \
450 	MACRO_END
451 #define VM_OBJECT_WIRED_DEQUEUE(object)                                 \
452 	MACRO_BEGIN                                                     \
453 	if ((object)->wired_objq.next) {                                \
454 	        lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);                  \
455 	        queue_remove(&vm_objects_wired, (object),               \
456 	                     vm_object_t, wired_objq);                  \
457 	        lck_spin_unlock(&vm_objects_wired_lock);                \
458 	}                                                               \
459 	MACRO_END
460 #endif /* VM_TAG_ACTIVE_UPDATE */
461 
462 #define VM_OBJECT_WIRED(object, tag)                                    \
463     MACRO_BEGIN                                                         \
464     assert(VM_KERN_MEMORY_NONE != (tag));                               \
465     assert(VM_KERN_MEMORY_NONE == (object)->wire_tag);                  \
466     (object)->wire_tag = (tag);                                         \
467     if (!VM_TAG_ACTIVE_UPDATE) {                                        \
468 	VM_OBJECT_WIRED_ENQUEUE((object));                              \
469     }                                                                   \
470     MACRO_END
471 
472 #define VM_OBJECT_UNWIRED(object)                                                       \
473     MACRO_BEGIN                                                                         \
474     if (!VM_TAG_ACTIVE_UPDATE) {                                                        \
475 	    VM_OBJECT_WIRED_DEQUEUE((object));                                          \
476     }                                                                                   \
477     if (VM_KERN_MEMORY_NONE != (object)->wire_tag) {                                    \
478 	vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count));   \
479 	(object)->wire_tag = VM_KERN_MEMORY_NONE;                                       \
480     }                                                                                   \
481     MACRO_END
482 
483 // These two macros start & end a C block
484 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object)                                       \
485     MACRO_BEGIN                                                                         \
486     {                                                                                   \
487 	int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
488 
489 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag)                                    \
490 	if (__wireddelta) {                                                             \
491 	    boolean_t __overflow __assert_only =                                        \
492 	    os_add_overflow((object)->wired_page_count, __wireddelta,                   \
493 	                    &(object)->wired_page_count);                               \
494 	    assert(!__overflow);                                                        \
495 	    if (!(object)->pageout && !(object)->no_tag_update) {                       \
496 	        if (__wireddelta > 0) {                                                 \
497 	            assert (VM_KERN_MEMORY_NONE != (tag));                              \
498 	            if (VM_KERN_MEMORY_NONE == __waswired) {                            \
499 	                VM_OBJECT_WIRED((object), (tag));                               \
500 	            }                                                                   \
501 	            vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta));      \
502 	        } else if (VM_KERN_MEMORY_NONE != __waswired) {                         \
503 	            assert (VM_KERN_MEMORY_NONE != (object)->wire_tag);                 \
504 	            vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta));      \
505 	            if (!(object)->wired_page_count) {                                  \
506 	                VM_OBJECT_UNWIRED((object));                                    \
507 	            }                                                                   \
508 	        }                                                                       \
509 	    }                                                                           \
510 	}                                                                               \
511     }                                                                                   \
512     MACRO_END
513 
514 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta)               \
515     __wireddelta += delta; \
516 
517 #define VM_OBJECT_WIRED_PAGE_ADD(object, m)                     \
518     if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++;
519 
520 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m)                  \
521     if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--;
522 
523 
524 
525 #define OBJECT_LOCK_SHARED      0
526 #define OBJECT_LOCK_EXCLUSIVE   1
527 
528 extern lck_grp_t        vm_object_lck_grp;
529 extern lck_attr_t       vm_object_lck_attr;
530 extern lck_attr_t       kernel_object_lck_attr;
531 extern lck_attr_t       compressor_object_lck_attr;
532 
533 extern vm_object_t      vm_pageout_scan_wants_object;
534 
535 extern void             vm_object_lock(vm_object_t);
536 extern bool             vm_object_lock_check_contended(vm_object_t);
537 extern boolean_t        vm_object_lock_try(vm_object_t);
538 extern boolean_t        _vm_object_lock_try(vm_object_t);
539 extern boolean_t        vm_object_lock_avoid(vm_object_t);
540 extern void             vm_object_lock_shared(vm_object_t);
541 extern boolean_t        vm_object_lock_yield_shared(vm_object_t);
542 extern boolean_t        vm_object_lock_try_shared(vm_object_t);
543 extern void             vm_object_unlock(vm_object_t);
544 extern boolean_t        vm_object_lock_upgrade(vm_object_t);
545 
546 /*
547  *	Object locking macros
548  */
549 
550 #define vm_object_lock_init(object)                                     \
551 	lck_rw_init(&(object)->Lock, &vm_object_lck_grp,                \
552 	            ((object) == kernel_object ?                        \
553 	             &kernel_object_lck_attr :                          \
554 	             (((object) == compressor_object) ?                 \
555 	             &compressor_object_lck_attr :                      \
556 	              &vm_object_lck_attr)))
557 #define vm_object_lock_destroy(object)  lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
558 
559 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
560 
561 /*
562  * CAUTION: the following vm_object_lock_assert_held*() macros merely
563  * check if anyone is holding the lock, but the holder may not necessarily
564  * be the caller...
565  */
566 #if MACH_ASSERT || DEBUG
567 #define vm_object_lock_assert_held(object) \
568 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
569 #define vm_object_lock_assert_shared(object) \
570 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
571 #define vm_object_lock_assert_exclusive(object) \
572 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
573 #define vm_object_lock_assert_notheld(object) \
574 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
575 #else  /* MACH_ASSERT || DEBUG */
576 #define vm_object_lock_assert_held(object)
577 #define vm_object_lock_assert_shared(object)
578 #define vm_object_lock_assert_exclusive(object)
579 #define vm_object_lock_assert_notheld(object)
580 #endif /* MACH_ASSERT || DEBUG */
581 
582 
583 /*
584  *	Declare procedures that operate on VM objects.
585  */
586 
587 __private_extern__ void         vm_object_bootstrap(void);
588 
589 __private_extern__ void         vm_object_reaper_init(void);
590 
591 __private_extern__ vm_object_t  vm_object_allocate(vm_object_size_t size);
592 
593 __private_extern__ void    _vm_object_allocate(vm_object_size_t size,
594     vm_object_t object);
595 
596 #define vm_object_reference_locked(object)              \
597 	MACRO_BEGIN                                     \
598 	vm_object_t RLObject = (object);                \
599 	vm_object_lock_assert_exclusive(object);        \
600 	assert((RLObject)->ref_count > 0);              \
601 	(RLObject)->ref_count++;                        \
602 	assert((RLObject)->ref_count > 1);              \
603 	MACRO_END
604 
605 
606 #define vm_object_reference_shared(object)              \
607 	MACRO_BEGIN                                     \
608 	vm_object_t RLObject = (object);                \
609 	vm_object_lock_assert_shared(object);           \
610 	assert((RLObject)->ref_count > 0);              \
611 	OSAddAtomic(1, &(RLObject)->ref_count);         \
612 	assert((RLObject)->ref_count > 0);              \
613 	MACRO_END
614 
615 
616 __private_extern__ void         vm_object_reference(
617 	vm_object_t     object);
618 
619 #if     !MACH_ASSERT
620 
621 #define vm_object_reference(object)                     \
622 MACRO_BEGIN                                             \
623 	vm_object_t RObject = (object);                 \
624 	if (RObject) {                                  \
625 	        vm_object_lock_shared(RObject);         \
626 	        vm_object_reference_shared(RObject);    \
627 	        vm_object_unlock(RObject);              \
628 	}                                               \
629 MACRO_END
630 
631 #endif  /* MACH_ASSERT */
632 
633 __private_extern__ void         vm_object_deallocate(
634 	vm_object_t     object);
635 
636 __private_extern__ void         vm_object_pmap_protect(
637 	vm_object_t             object,
638 	vm_object_offset_t      offset,
639 	vm_object_size_t        size,
640 	pmap_t                  pmap,
641 	vm_map_size_t           pmap_page_size,
642 	vm_map_offset_t         pmap_start,
643 	vm_prot_t               prot);
644 
645 __private_extern__ void         vm_object_pmap_protect_options(
646 	vm_object_t             object,
647 	vm_object_offset_t      offset,
648 	vm_object_size_t        size,
649 	pmap_t                  pmap,
650 	vm_map_size_t           pmap_page_size,
651 	vm_map_offset_t         pmap_start,
652 	vm_prot_t               prot,
653 	int                     options);
654 
655 __private_extern__ void         vm_object_page_remove(
656 	vm_object_t             object,
657 	vm_object_offset_t      start,
658 	vm_object_offset_t      end);
659 
660 __private_extern__ void         vm_object_deactivate_pages(
661 	vm_object_t             object,
662 	vm_object_offset_t      offset,
663 	vm_object_size_t        size,
664 	boolean_t               kill_page,
665 	boolean_t               reusable_page,
666 	struct pmap             *pmap,
667 /* XXX TODO4K: need pmap_page_size here too? */
668 	vm_map_offset_t         pmap_offset);
669 
670 __private_extern__ void vm_object_reuse_pages(
671 	vm_object_t             object,
672 	vm_object_offset_t      start_offset,
673 	vm_object_offset_t      end_offset,
674 	boolean_t               allow_partial_reuse);
675 
676 __private_extern__ uint64_t     vm_object_purge(
677 	vm_object_t              object,
678 	int                      flags);
679 
680 __private_extern__ kern_return_t vm_object_purgable_control(
681 	vm_object_t     object,
682 	vm_purgable_t   control,
683 	int             *state);
684 
685 __private_extern__ kern_return_t vm_object_get_page_counts(
686 	vm_object_t             object,
687 	vm_object_offset_t      offset,
688 	vm_object_size_t        size,
689 	unsigned int            *resident_page_count,
690 	unsigned int            *dirty_page_count);
691 
692 __private_extern__ boolean_t    vm_object_coalesce(
693 	vm_object_t             prev_object,
694 	vm_object_t             next_object,
695 	vm_object_offset_t      prev_offset,
696 	vm_object_offset_t      next_offset,
697 	vm_object_size_t        prev_size,
698 	vm_object_size_t        next_size);
699 
700 __private_extern__ boolean_t    vm_object_shadow(
701 	vm_object_t             *object,
702 	vm_object_offset_t      *offset,
703 	vm_object_size_t        length,
704 	boolean_t               always_shadow);
705 
706 __private_extern__ void         vm_object_collapse(
707 	vm_object_t             object,
708 	vm_object_offset_t      offset,
709 	boolean_t               can_bypass);
710 
711 __private_extern__ boolean_t    vm_object_copy_quickly(
712 	vm_object_t             object,
713 	vm_object_offset_t      src_offset,
714 	vm_object_size_t        size,
715 	boolean_t               *_src_needs_copy,
716 	boolean_t               *_dst_needs_copy);
717 
718 __private_extern__ kern_return_t        vm_object_copy_strategically(
719 	vm_object_t             src_object,
720 	vm_object_offset_t      src_offset,
721 	vm_object_size_t        size,
722 	vm_object_t             *dst_object,
723 	vm_object_offset_t      *dst_offset,
724 	boolean_t               *dst_needs_copy);
725 
726 __private_extern__ kern_return_t        vm_object_copy_slowly(
727 	vm_object_t             src_object,
728 	vm_object_offset_t      src_offset,
729 	vm_object_size_t        size,
730 	boolean_t               interruptible,
731 	vm_object_t             *_result_object);
732 
733 __private_extern__ vm_object_t  vm_object_copy_delayed(
734 	vm_object_t             src_object,
735 	vm_object_offset_t      src_offset,
736 	vm_object_size_t        size,
737 	boolean_t               src_object_shared);
738 
739 
740 
741 __private_extern__ kern_return_t        vm_object_destroy(
742 	vm_object_t     object,
743 	kern_return_t   reason);
744 
745 __private_extern__ void         vm_object_pager_create(
746 	vm_object_t     object);
747 
748 __private_extern__ void         vm_object_compressor_pager_create(
749 	vm_object_t     object);
750 
751 __private_extern__ void         vm_object_page_map(
752 	vm_object_t     object,
753 	vm_object_offset_t      offset,
754 	vm_object_size_t        size,
755 	vm_object_offset_t      (*map_fn)
756 	(void *, vm_object_offset_t),
757 	void            *map_fn_data);
758 
759 __private_extern__ kern_return_t vm_object_upl_request(
760 	vm_object_t             object,
761 	vm_object_offset_t      offset,
762 	upl_size_t              size,
763 	upl_t                   *upl,
764 	upl_page_info_t         *page_info,
765 	unsigned int            *count,
766 	upl_control_flags_t     flags,
767 	vm_tag_t            tag);
768 
769 __private_extern__ kern_return_t vm_object_transpose(
770 	vm_object_t             object1,
771 	vm_object_t             object2,
772 	vm_object_size_t        transpose_size);
773 
774 __private_extern__ boolean_t vm_object_sync(
775 	vm_object_t             object,
776 	vm_object_offset_t      offset,
777 	vm_object_size_t        size,
778 	boolean_t               should_flush,
779 	boolean_t               should_return,
780 	boolean_t               should_iosync);
781 
782 __private_extern__ kern_return_t vm_object_update(
783 	vm_object_t             object,
784 	vm_object_offset_t      offset,
785 	vm_object_size_t        size,
786 	vm_object_offset_t      *error_offset,
787 	int                     *io_errno,
788 	memory_object_return_t  should_return,
789 	int                     flags,
790 	vm_prot_t               prot);
791 
792 __private_extern__ kern_return_t vm_object_lock_request(
793 	vm_object_t             object,
794 	vm_object_offset_t      offset,
795 	vm_object_size_t        size,
796 	memory_object_return_t  should_return,
797 	int                     flags,
798 	vm_prot_t               prot);
799 
800 
801 
802 __private_extern__ vm_object_t  vm_object_memory_object_associate(
803 	memory_object_t         pager,
804 	vm_object_t             object,
805 	vm_object_size_t        size,
806 	boolean_t               check_named);
807 
808 
809 __private_extern__ void vm_object_cluster_size(
810 	vm_object_t             object,
811 	vm_object_offset_t      *start,
812 	vm_size_t               *length,
813 	vm_object_fault_info_t  fault_info,
814 	uint32_t                *io_streaming);
815 
816 __private_extern__ kern_return_t vm_object_populate_with_private(
817 	vm_object_t             object,
818 	vm_object_offset_t      offset,
819 	ppnum_t                 phys_page,
820 	vm_size_t               size);
821 
822 __private_extern__ void vm_object_change_wimg_mode(
823 	vm_object_t             object,
824 	unsigned int            wimg_mode);
825 
826 extern kern_return_t adjust_vm_object_cache(
827 	vm_size_t oval,
828 	vm_size_t nval);
829 
830 extern kern_return_t vm_object_page_op(
831 	vm_object_t             object,
832 	vm_object_offset_t      offset,
833 	int                     ops,
834 	ppnum_t                 *phys_entry,
835 	int                     *flags);
836 
837 extern kern_return_t vm_object_range_op(
838 	vm_object_t             object,
839 	vm_object_offset_t      offset_beg,
840 	vm_object_offset_t      offset_end,
841 	int                     ops,
842 	uint32_t                *range);
843 
844 
845 __private_extern__ void         vm_object_reap_pages(
846 	vm_object_t object,
847 	int     reap_type);
848 #define REAP_REAP       0
849 #define REAP_TERMINATE  1
850 #define REAP_PURGEABLE  2
851 #define REAP_DATA_FLUSH 3
852 
853 #if CONFIG_FREEZE
854 
855 __private_extern__ uint32_t
856 vm_object_compressed_freezer_pageout(
857 	vm_object_t     object, uint32_t dirty_budget);
858 
859 __private_extern__ void
860 vm_object_compressed_freezer_done(
861 	void);
862 
863 #endif /* CONFIG_FREEZE */
864 
865 __private_extern__ void
866 vm_object_pageout(
867 	vm_object_t     object);
868 
869 #if CONFIG_IOSCHED
870 struct io_reprioritize_req {
871 	uint64_t        blkno;
872 	uint32_t        len;
873 	int             priority;
874 	struct vnode    *devvp;
875 	queue_chain_t   io_reprioritize_list;
876 };
877 typedef struct io_reprioritize_req *io_reprioritize_req_t;
878 
879 extern void vm_io_reprioritize_init(void);
880 #endif
881 
882 /*
883  *	Event waiting handling
884  */
885 
886 #define VM_OBJECT_EVENT_INITIALIZED             0
887 #define VM_OBJECT_EVENT_PAGER_READY             1
888 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS      2
889 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS     3
890 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS        4
891 #define VM_OBJECT_EVENT_UNCACHING               5
892 #define VM_OBJECT_EVENT_COPY_CALL               6
893 #define VM_OBJECT_EVENT_CACHING                 7
894 #define VM_OBJECT_EVENT_UNBLOCKED               8
895 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
896 
897 #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
898 
899 static __inline__ wait_result_t
vm_object_assert_wait(vm_object_t object,int event,wait_interrupt_t interruptible)900 vm_object_assert_wait(
901 	vm_object_t             object,
902 	int                     event,
903 	wait_interrupt_t        interruptible)
904 {
905 	wait_result_t wr;
906 
907 	vm_object_lock_assert_exclusive(object);
908 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
909 
910 	object->all_wanted |= 1 << event;
911 	wr = assert_wait((event_t)((vm_offset_t)object + (vm_offset_t)event),
912 	    interruptible);
913 	return wr;
914 }
915 
916 static __inline__ wait_result_t
vm_object_wait(vm_object_t object,int event,wait_interrupt_t interruptible)917 vm_object_wait(
918 	vm_object_t             object,
919 	int                     event,
920 	wait_interrupt_t        interruptible)
921 {
922 	wait_result_t wr;
923 
924 	vm_object_assert_wait(object, event, interruptible);
925 	vm_object_unlock(object);
926 	wr = thread_block(THREAD_CONTINUE_NULL);
927 	return wr;
928 }
929 
930 static __inline__ wait_result_t
thread_sleep_vm_object(vm_object_t object,event_t event,wait_interrupt_t interruptible)931 thread_sleep_vm_object(
932 	vm_object_t             object,
933 	event_t                 event,
934 	wait_interrupt_t        interruptible)
935 {
936 	wait_result_t wr;
937 
938 	wr = lck_rw_sleep(&object->Lock,
939 	    LCK_SLEEP_PROMOTED_PRI,
940 	    event,
941 	    interruptible);
942 	return wr;
943 }
944 
945 static __inline__ wait_result_t
vm_object_sleep(vm_object_t object,int event,wait_interrupt_t interruptible)946 vm_object_sleep(
947 	vm_object_t             object,
948 	int                     event,
949 	wait_interrupt_t        interruptible)
950 {
951 	wait_result_t wr;
952 
953 	vm_object_lock_assert_exclusive(object);
954 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
955 
956 	object->all_wanted |= 1 << event;
957 	wr = thread_sleep_vm_object(object,
958 	    (event_t)((vm_offset_t)object + (vm_offset_t)event),
959 	    interruptible);
960 	return wr;
961 }
962 
963 static __inline__ void
vm_object_wakeup(vm_object_t object,int event)964 vm_object_wakeup(
965 	vm_object_t             object,
966 	int                     event)
967 {
968 	vm_object_lock_assert_exclusive(object);
969 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
970 
971 	if (object->all_wanted & (1 << event)) {
972 		thread_wakeup((event_t)((vm_offset_t)object + (vm_offset_t)event));
973 	}
974 	object->all_wanted &= ~(1 << event);
975 }
976 
977 static __inline__ void
vm_object_set_wanted(vm_object_t object,int event)978 vm_object_set_wanted(
979 	vm_object_t             object,
980 	int                     event)
981 {
982 	vm_object_lock_assert_exclusive(object);
983 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
984 
985 	object->all_wanted |= (1 << event);
986 }
987 
988 static __inline__ int
vm_object_wanted(vm_object_t object,int event)989 vm_object_wanted(
990 	vm_object_t             object,
991 	int                     event)
992 {
993 	vm_object_lock_assert_held(object);
994 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
995 
996 	return object->all_wanted & (1 << event);
997 }
998 
999 /*
1000  *	Routines implemented as macros
1001  */
1002 #ifdef VM_PIP_DEBUG
1003 #include <libkern/OSDebug.h>
1004 #define VM_PIP_DEBUG_BEGIN(object)                                      \
1005 	MACRO_BEGIN                                                     \
1006 	int pip = ((object)->paging_in_progress +                       \
1007 	           (object)->activity_in_progress);                     \
1008 	if (pip < VM_PIP_DEBUG_MAX_REFS) {                              \
1009 	        (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1010 	                           VM_PIP_DEBUG_STACK_FRAMES);          \
1011 	}                                                               \
1012 	MACRO_END
1013 #else   /* VM_PIP_DEBUG */
1014 #define VM_PIP_DEBUG_BEGIN(object)
1015 #endif  /* VM_PIP_DEBUG */
1016 
1017 #define         vm_object_activity_begin(object)                        \
1018 	MACRO_BEGIN                                                     \
1019 	vm_object_lock_assert_exclusive((object));                      \
1020 	VM_PIP_DEBUG_BEGIN((object));                                   \
1021 	(object)->activity_in_progress++;                               \
1022 	if ((object)->activity_in_progress == 0) {                      \
1023 	        panic("vm_object_activity_begin(%p): overflow\n", (object));\
1024 	}                                                               \
1025 	MACRO_END
1026 
1027 #define         vm_object_activity_end(object)                          \
1028 	MACRO_BEGIN                                                     \
1029 	vm_object_lock_assert_exclusive((object));                      \
1030 	if ((object)->activity_in_progress == 0) {                      \
1031 	        panic("vm_object_activity_end(%p): underflow\n", (object));\
1032 	}                                                               \
1033 	(object)->activity_in_progress--;                               \
1034 	if ((object)->paging_in_progress == 0 &&                        \
1035 	    (object)->activity_in_progress == 0)                        \
1036 	        vm_object_wakeup((object),                              \
1037 	                         VM_OBJECT_EVENT_PAGING_IN_PROGRESS);   \
1038 	MACRO_END
1039 
1040 #define         vm_object_paging_begin(object)                          \
1041 	MACRO_BEGIN                                                     \
1042 	vm_object_lock_assert_exclusive((object));                      \
1043 	VM_PIP_DEBUG_BEGIN((object));                                   \
1044 	(object)->paging_in_progress++;                                 \
1045 	if ((object)->paging_in_progress == 0) {                        \
1046 	        panic("vm_object_paging_begin(%p): overflow\n", (object));\
1047 	}                                                               \
1048 	MACRO_END
1049 
1050 #define         vm_object_paging_end(object)                            \
1051 	MACRO_BEGIN                                                     \
1052 	vm_object_lock_assert_exclusive((object));                      \
1053 	if ((object)->paging_in_progress == 0) {                        \
1054 	        panic("vm_object_paging_end(%p): underflow\n", (object));\
1055 	}                                                               \
1056 	(object)->paging_in_progress--;                                 \
1057 	if ((object)->paging_in_progress == 0) {                        \
1058 	        vm_object_wakeup((object),                              \
1059 	                         VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1060 	        if ((object)->activity_in_progress == 0)                \
1061 	                vm_object_wakeup((object),                      \
1062 	                                 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1063 	}                                                               \
1064 	MACRO_END
1065 
1066 #define         vm_object_paging_wait(object, interruptible)            \
1067 	MACRO_BEGIN                                                     \
1068 	vm_object_lock_assert_exclusive((object));                      \
1069 	while ((object)->paging_in_progress != 0 ||                     \
1070 	       (object)->activity_in_progress != 0) {                   \
1071 	        wait_result_t  _wr;                                     \
1072                                                                         \
1073 	        _wr = vm_object_sleep((object),                         \
1074 	                        VM_OBJECT_EVENT_PAGING_IN_PROGRESS,     \
1075 	                        (interruptible));                       \
1076                                                                         \
1077 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1078 	/*XXX break; */                                 \
1079 	}                                                               \
1080 	MACRO_END
1081 
1082 #define vm_object_paging_only_wait(object, interruptible)               \
1083 	MACRO_BEGIN                                                     \
1084 	vm_object_lock_assert_exclusive((object));                      \
1085 	while ((object)->paging_in_progress != 0) {                     \
1086 	        wait_result_t  _wr;                                     \
1087                                                                         \
1088 	        _wr = vm_object_sleep((object),                         \
1089 	                        VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1090 	                        (interruptible));                       \
1091                                                                         \
1092 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1093 	/*XXX break; */                                 \
1094 	}                                                               \
1095 	MACRO_END
1096 
1097 
1098 #define vm_object_mapping_begin(object)                                 \
1099 	MACRO_BEGIN                                                     \
1100 	vm_object_lock_assert_exclusive((object));                      \
1101 	assert(! (object)->mapping_in_progress);                        \
1102 	(object)->mapping_in_progress = TRUE;                           \
1103 	MACRO_END
1104 
1105 #define vm_object_mapping_end(object)                                   \
1106 	MACRO_BEGIN                                                     \
1107 	vm_object_lock_assert_exclusive((object));                      \
1108 	assert((object)->mapping_in_progress);                          \
1109 	(object)->mapping_in_progress = FALSE;                          \
1110 	vm_object_wakeup((object),                                      \
1111 	                 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS);          \
1112 	MACRO_END
1113 
1114 #define vm_object_mapping_wait(object, interruptible)                   \
1115 	MACRO_BEGIN                                                     \
1116 	vm_object_lock_assert_exclusive((object));                      \
1117 	while ((object)->mapping_in_progress) {                         \
1118 	        wait_result_t	_wr;                                    \
1119                                                                         \
1120 	        _wr = vm_object_sleep((object),                         \
1121 	                              VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1122 	                              (interruptible));                 \
1123 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1124 	/*XXX break; */                                 \
1125 	}                                                               \
1126 	assert(!(object)->mapping_in_progress);                         \
1127 	MACRO_END
1128 
1129 
1130 
1131 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1132 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1133 
1134 extern void     vm_object_cache_add(vm_object_t);
1135 extern void     vm_object_cache_remove(vm_object_t);
1136 extern int      vm_object_cache_evict(int, int);
1137 
1138 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
1139 #define VM_OBJECT_OWNER_UNCHANGED ((task_t) -2)
1140 #define VM_OBJECT_OWNER(object)                                         \
1141 	((object == VM_OBJECT_NULL ||                                   \
1142 	  ((object)->purgable == VM_PURGABLE_DENY &&                    \
1143 	   (object)->vo_ledger_tag == 0) ||                             \
1144 	  (object)->vo_owner == TASK_NULL)                              \
1145 	 ? TASK_NULL    /* not owned */                                 \
1146 	 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED)            \
1147 	    ? kernel_task /* disowned -> kernel */                      \
1148 	    : (object)->vo_owner)) /* explicit owner */                 \
1149 
1150 extern void     vm_object_ledger_tag_ledgers(
1151 	vm_object_t object,
1152 	int *ledger_idx_volatile,
1153 	int *ledger_idx_nonvolatile,
1154 	int *ledger_idx_volatile_compressed,
1155 	int *ledger_idx_nonvolatile_compressed,
1156 	boolean_t *do_footprint);
1157 extern kern_return_t vm_object_ownership_change(
1158 	vm_object_t object,
1159 	int new_ledger_tag,
1160 	task_t new_owner,
1161 	int new_ledger_flags,
1162 	boolean_t task_objq_locked);
1163 
1164 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
1165 // so probably should be a real 32b ID vs. ptr.
1166 // Current users just check for equality
1167 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((o)))
1168 
1169 #endif  /* _VM_VM_OBJECT_H_ */
1170