xref: /xnu-8792.41.9/osfmk/vm/vm_object.h (revision 5c2921b07a2480ab43ec66f5b9e41cb872bc554f)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm_object.h
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *	Date:	1985
62  *
63  *	Virtual memory object module definitions.
64  */
65 
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
68 
69 #include <debug.h>
70 #include <mach_assert.h>
71 
72 #include <mach/kern_return.h>
73 #include <mach/boolean.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/port.h>
76 #include <mach/vm_prot.h>
77 #include <mach/vm_param.h>
78 #include <mach/machine/vm_types.h>
79 #include <kern/queue.h>
80 #include <kern/locks.h>
81 #include <kern/assert.h>
82 #include <kern/misc_protos.h>
83 #include <kern/macro_help.h>
84 #include <ipc/ipc_types.h>
85 #include <vm/pmap.h>
86 
87 #include <vm/vm_external.h>
88 
89 #include <vm/vm_options.h>
90 #include <vm/vm_page.h>
91 
92 #if VM_OBJECT_TRACKING
93 #include <libkern/OSDebug.h>
94 #include <kern/btlog.h>
95 extern void vm_object_tracking_init(void);
96 extern btlog_t vm_object_tracking_btlog;
97 #define VM_OBJECT_TRACKING_NUM_RECORDS  50000
98 #define VM_OBJECT_TRACKING_OP_CREATED   1
99 #define VM_OBJECT_TRACKING_OP_MODIFIED  2
100 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
101 #endif /* VM_OBJECT_TRACKING */
102 
103 struct vm_page;
104 
105 /*
106  *	Types defined:
107  *
108  *	vm_object_t		Virtual memory object.
109  *	vm_object_fault_info_t	Used to determine cluster size.
110  */
111 
112 struct vm_object_fault_info {
113 	int             interruptible;
114 	uint32_t        user_tag;
115 	vm_size_t       cluster_size;
116 	vm_behavior_t   behavior;
117 	vm_object_offset_t lo_offset;
118 	vm_object_offset_t hi_offset;
119 	unsigned int
120 	/* boolean_t */ no_cache:1,
121 	/* boolean_t */ stealth:1,
122 	/* boolean_t */ io_sync:1,
123 	/* boolean_t */ cs_bypass:1,
124 	/* boolean_t */ pmap_cs_associated:1,
125 	/* boolean_t */ mark_zf_absent:1,
126 	/* boolean_t */ batch_pmap_op:1,
127 	/* boolean_t */ resilient_media:1,
128 	/* boolean_t */ no_copy_on_read:1,
129 	    __vm_object_fault_info_unused_bits:23;
130 	int             pmap_options;
131 };
132 
133 
134 #define vo_size                         vo_un1.vou_size
135 #define vo_cache_pages_to_scan          vo_un1.vou_cache_pages_to_scan
136 #define vo_shadow_offset                vo_un2.vou_shadow_offset
137 #define vo_cache_ts                     vo_un2.vou_cache_ts
138 #define vo_owner                        vo_un2.vou_owner
139 
140 struct vm_object {
141 	/*
142 	 * on 64 bit systems we pack the pointers hung off the memq.
143 	 * those pointers have to be able to point back to the memq.
144 	 * the packed pointers are required to be on a 64 byte boundary
145 	 * which means 2 things for the vm_object...  (1) the memq
146 	 * struct has to be the first element of the structure so that
147 	 * we can control it's alignment... (2) the vm_object must be
148 	 * aligned on a 64 byte boundary... for static vm_object's
149 	 * this is accomplished via the 'aligned' attribute... for
150 	 * vm_object's in the zone pool, this is accomplished by
151 	 * rounding the size of the vm_object element to the nearest
152 	 * 64 byte size before creating the zone.
153 	 */
154 	vm_page_queue_head_t    memq;           /* Resident memory - must be first */
155 	lck_rw_t                Lock;           /* Synchronization */
156 
157 	union {
158 		vm_object_size_t  vou_size;     /* Object size (only valid if internal) */
159 		int               vou_cache_pages_to_scan;      /* pages yet to be visited in an
160 		                                                 * external object in cache
161 		                                                 */
162 	} vo_un1;
163 
164 	struct vm_page          *memq_hint;
165 	int                     ref_count;      /* Number of references */
166 	unsigned int            resident_page_count;
167 	/* number of resident pages */
168 	unsigned int            wired_page_count; /* number of wired pages
169 	                                           *  use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
170 	unsigned int            reusable_page_count;
171 
172 	struct vm_object        *copy;          /* Object that should receive
173 	                                         * a copy of my changed pages,
174 	                                         * for copy_delay, or just the
175 	                                         * temporary object that
176 	                                         * shadows this object, for
177 	                                         * copy_call.
178 	                                         */
179 	struct vm_object        *shadow;        /* My shadow */
180 	memory_object_t         pager;          /* Where to get data */
181 
182 	union {
183 		vm_object_offset_t vou_shadow_offset;   /* Offset into shadow */
184 		clock_sec_t     vou_cache_ts;   /* age of an external object
185 		                                 * present in cache
186 		                                 */
187 		task_t          vou_owner;      /* If the object is purgeable
188 		                                 * or has a "ledger_tag", this
189 		                                 * is the task that owns it.
190 		                                 */
191 	} vo_un2;
192 
193 	vm_object_offset_t      paging_offset;  /* Offset into memory object */
194 	memory_object_control_t pager_control;  /* Where data comes back */
195 
196 	memory_object_copy_strategy_t
197 	    copy_strategy;                      /* How to handle data copy */
198 
199 #if __LP64__
200 	/*
201 	 * Some user processes (mostly VirtualMachine software) take a large
202 	 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
203 	 * VM objects and overflow the 16-bit "activity_in_progress" counter.
204 	 * Since we never enforced any limit there, let's give them 32 bits
205 	 * for backwards compatibility's sake.
206 	 */
207 	unsigned int            paging_in_progress:16,
208 	    __object1_unused_bits:16;
209 	unsigned int            activity_in_progress;
210 #else /* __LP64__ */
211 	/*
212 	 * On 32-bit platforms, enlarging "activity_in_progress" would increase
213 	 * the size of "struct vm_object".  Since we don't know of any actual
214 	 * overflow of these counters on these platforms, let's keep the
215 	 * counters as 16-bit integers.
216 	 */
217 	unsigned short          paging_in_progress;
218 	unsigned short          activity_in_progress;
219 #endif /* __LP64__ */
220 	/* The memory object ports are
221 	 * being used (e.g., for pagein
222 	 * or pageout) -- don't change
223 	 * any of these fields (i.e.,
224 	 * don't collapse, destroy or
225 	 * terminate)
226 	 */
227 
228 	unsigned int
229 	/* boolean_t array */ all_wanted:11,    /* Bit array of "want to be
230 	                                         * awakened" notations.  See
231 	                                         * VM_OBJECT_EVENT_* items
232 	                                         * below */
233 	/* boolean_t */ pager_created:1,        /* Has pager been created? */
234 	/* boolean_t */ pager_initialized:1,    /* Are fields ready to use? */
235 	/* boolean_t */ pager_ready:1,          /* Will pager take requests? */
236 
237 	/* boolean_t */ pager_trusted:1,        /* The pager for this object
238 	                                         * is trusted. This is true for
239 	                                         * all internal objects (backed
240 	                                         * by the default pager)
241 	                                         */
242 	/* boolean_t */ can_persist:1,          /* The kernel may keep the data
243 	                                         * for this object (and rights
244 	                                         * to the memory object) after
245 	                                         * all address map references
246 	                                         * are deallocated?
247 	                                         */
248 	/* boolean_t */ internal:1,             /* Created by the kernel (and
249 	                                         * therefore, managed by the
250 	                                         * default memory manger)
251 	                                         */
252 	/* boolean_t */ private:1,              /* magic device_pager object,
253 	                                        * holds private pages only */
254 	/* boolean_t */ pageout:1,              /* pageout object. contains
255 	                                         * private pages that refer to
256 	                                         * a real memory object. */
257 	/* boolean_t */ alive:1,                /* Not yet terminated */
258 
259 	/* boolean_t */ purgable:2,             /* Purgable state.  See
260 	                                         * VM_PURGABLE_*
261 	                                         */
262 	/* boolean_t */ purgeable_only_by_kernel:1,
263 	/* boolean_t */ purgeable_when_ripe:1,         /* Purgeable when a token
264 	                                                * becomes ripe.
265 	                                                */
266 	/* boolean_t */ shadowed:1,             /* Shadow may exist */
267 	/* boolean_t */ true_share:1,
268 	/* This object is mapped
269 	 * in more than one place
270 	 * and hence cannot be
271 	 * coalesced */
272 	/* boolean_t */ terminating:1,
273 	/* Allows vm_object_lookup
274 	 * and vm_object_deallocate
275 	 * to special case their
276 	 * behavior when they are
277 	 * called as a result of
278 	 * page cleaning during
279 	 * object termination
280 	 */
281 	/* boolean_t */ named:1,                /* An enforces an internal
282 	                                         * naming convention, by
283 	                                         * calling the right routines
284 	                                         * for allocation and
285 	                                         * destruction, UBC references
286 	                                         * against the vm_object are
287 	                                         * checked.
288 	                                         */
289 	/* boolean_t */ shadow_severed:1,
290 	/* When a permanent object
291 	 * backing a COW goes away
292 	 * unexpectedly.  This bit
293 	 * allows vm_fault to return
294 	 * an error rather than a
295 	 * zero filled page.
296 	 */
297 	/* boolean_t */ phys_contiguous:1,
298 	/* Memory is wired and
299 	 * guaranteed physically
300 	 * contiguous.  However
301 	 * it is not device memory
302 	 * and obeys normal virtual
303 	 * memory rules w.r.t pmap
304 	 * access bits.
305 	 */
306 	/* boolean_t */ nophyscache:1,
307 	/* When mapped at the
308 	 * pmap level, don't allow
309 	 * primary caching. (for
310 	 * I/O)
311 	 */
312 	/* boolean_t */ _object5_unused_bits:1;
313 
314 	queue_chain_t           cached_list;    /* Attachment point for the
315 	                                         * list of objects cached as a
316 	                                         * result of their can_persist
317 	                                         * value
318 	                                         */
319 	/*
320 	 * the following fields are not protected by any locks
321 	 * they are updated via atomic compare and swap
322 	 */
323 	vm_object_offset_t      last_alloc;     /* last allocation offset */
324 	vm_offset_t             cow_hint;       /* last page present in     */
325 	                                        /* shadow but not in object */
326 	int                     sequential;     /* sequential access size */
327 
328 	uint32_t                pages_created;
329 	uint32_t                pages_used;
330 	/* hold object lock when altering */
331 	unsigned        int
332 	    wimg_bits:8,                /* cache WIMG bits         */
333 	    code_signed:1,              /* pages are signed and should be
334 	                                 *  validated; the signatures are stored
335 	                                 *  with the pager */
336 	    transposed:1,               /* object was transposed with another */
337 	    mapping_in_progress:1,      /* pager being mapped/unmapped */
338 	    phantom_isssd:1,
339 	    volatile_empty:1,
340 	    volatile_fault:1,
341 	    all_reusable:1,
342 	    blocked_access:1,
343 	    set_cache_attr:1,
344 	    object_is_shared_cache:1,
345 	    purgeable_queue_type:2,
346 	    purgeable_queue_group:3,
347 	    io_tracking:1,
348 	    no_tag_update:1,            /*  */
349 #if CONFIG_SECLUDED_MEMORY
350 	    eligible_for_secluded:1,
351 	    can_grab_secluded:1,
352 #else /* CONFIG_SECLUDED_MEMORY */
353 	__object3_unused_bits:2,
354 #endif /* CONFIG_SECLUDED_MEMORY */
355 #if VM_OBJECT_ACCESS_TRACKING
356 	    access_tracking:1,
357 #else /* VM_OBJECT_ACCESS_TRACKING */
358 	__unused_access_tracking:1,
359 #endif /* VM_OBJECT_ACCESS_TRACKING */
360 	vo_ledger_tag:3,
361 	    vo_no_footprint:1;
362 
363 #if VM_OBJECT_ACCESS_TRACKING
364 	uint32_t        access_tracking_reads;
365 	uint32_t        access_tracking_writes;
366 #endif /* VM_OBJECT_ACCESS_TRACKING */
367 
368 	uint8_t                 scan_collisions;
369 	uint8_t                 __object4_unused_bits[1];
370 	vm_tag_t                wire_tag;
371 
372 #if CONFIG_PHANTOM_CACHE
373 	uint32_t                phantom_object_id;
374 #endif
375 #if CONFIG_IOSCHED || UPL_DEBUG
376 	queue_head_t            uplq;           /* List of outstanding upls */
377 #endif
378 
379 #ifdef  VM_PIP_DEBUG
380 /*
381  * Keep track of the stack traces for the first holders
382  * of a "paging_in_progress" reference for this VM object.
383  */
384 #define VM_PIP_DEBUG_STACK_FRAMES       25      /* depth of each stack trace */
385 #define VM_PIP_DEBUG_MAX_REFS           10      /* track that many references */
386 	struct __pip_backtrace {
387 		void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
388 	} pip_holders[VM_PIP_DEBUG_MAX_REFS];
389 #endif  /* VM_PIP_DEBUG  */
390 
391 	queue_chain_t           objq;      /* object queue - currently used for purgable queues */
392 	queue_chain_t           task_objq; /* objects owned by task - protected by task lock */
393 
394 #if !VM_TAG_ACTIVE_UPDATE
395 	queue_chain_t           wired_objq;
396 #endif /* !VM_TAG_ACTIVE_UPDATE */
397 
398 #if DEBUG
399 	void *purgeable_owner_bt[16];
400 	task_t vo_purgeable_volatilizer; /* who made it volatile? */
401 	void *purgeable_volatilizer_bt[16];
402 #endif /* DEBUG */
403 };
404 
405 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object)                         \
406 	((object)->volatile_fault &&                                    \
407 	 ((object)->purgable == VM_PURGABLE_VOLATILE ||                 \
408 	  (object)->purgable == VM_PURGABLE_EMPTY))
409 
410 #if VM_OBJECT_ACCESS_TRACKING
411 extern uint64_t vm_object_access_tracking_reads;
412 extern uint64_t vm_object_access_tracking_writes;
413 extern void vm_object_access_tracking(vm_object_t object,
414     int *access_tracking,
415     uint32_t *access_tracking_reads,
416     uint32_t *acess_tracking_writes);
417 #endif /* VM_OBJECT_ACCESS_TRACKING */
418 
419 extern const vm_object_t kernel_object;          /* the single kernel object */
420 
421 extern const vm_object_t compressor_object;      /* the single compressor object */
422 
423 extern const vm_object_t retired_pages_object;   /* holds VM pages which should never be used */
424 
425 # define        VM_MSYNC_INITIALIZED                    0
426 # define        VM_MSYNC_SYNCHRONIZING                  1
427 # define        VM_MSYNC_DONE                           2
428 
429 
430 extern lck_grp_t                vm_map_lck_grp;
431 extern lck_attr_t               vm_map_lck_attr;
432 
433 #ifndef VM_TAG_ACTIVE_UPDATE
434 #error VM_TAG_ACTIVE_UPDATE
435 #endif
436 
437 #if VM_TAG_ACTIVE_UPDATE
438 #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE")
439 #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE")
440 #else /* VM_TAG_ACTIVE_UPDATE */
441 #define VM_OBJECT_WIRED_ENQUEUE(object)                                 \
442 	MACRO_BEGIN                                                     \
443 	lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);                          \
444 	assert(!(object)->wired_objq.next);                             \
445 	assert(!(object)->wired_objq.prev);                             \
446 	queue_enter(&vm_objects_wired, (object),                        \
447 	            vm_object_t, wired_objq);                           \
448 	lck_spin_unlock(&vm_objects_wired_lock);                        \
449 	MACRO_END
450 #define VM_OBJECT_WIRED_DEQUEUE(object)                                 \
451 	MACRO_BEGIN                                                     \
452 	if ((object)->wired_objq.next) {                                \
453 	        lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);                  \
454 	        queue_remove(&vm_objects_wired, (object),               \
455 	                     vm_object_t, wired_objq);                  \
456 	        lck_spin_unlock(&vm_objects_wired_lock);                \
457 	}                                                               \
458 	MACRO_END
459 #endif /* VM_TAG_ACTIVE_UPDATE */
460 
461 #define VM_OBJECT_WIRED(object, tag)                                    \
462     MACRO_BEGIN                                                         \
463     assert(VM_KERN_MEMORY_NONE != (tag));                               \
464     assert(VM_KERN_MEMORY_NONE == (object)->wire_tag);                  \
465     (object)->wire_tag = (tag);                                         \
466     if (!VM_TAG_ACTIVE_UPDATE) {                                        \
467 	VM_OBJECT_WIRED_ENQUEUE((object));                              \
468     }                                                                   \
469     MACRO_END
470 
471 #define VM_OBJECT_UNWIRED(object)                                                       \
472     MACRO_BEGIN                                                                         \
473     if (!VM_TAG_ACTIVE_UPDATE) {                                                        \
474 	    VM_OBJECT_WIRED_DEQUEUE((object));                                          \
475     }                                                                                   \
476     if (VM_KERN_MEMORY_NONE != (object)->wire_tag) {                                    \
477 	vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count));   \
478 	(object)->wire_tag = VM_KERN_MEMORY_NONE;                                       \
479     }                                                                                   \
480     MACRO_END
481 
482 // These two macros start & end a C block
483 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object)                                       \
484     MACRO_BEGIN                                                                         \
485     {                                                                                   \
486 	int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
487 
488 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag)                                    \
489 	if (__wireddelta) {                                                             \
490 	    boolean_t __overflow __assert_only =                                        \
491 	    os_add_overflow((object)->wired_page_count, __wireddelta,                   \
492 	                    &(object)->wired_page_count);                               \
493 	    assert(!__overflow);                                                        \
494 	    if (!(object)->pageout && !(object)->no_tag_update) {                       \
495 	        if (__wireddelta > 0) {                                                 \
496 	            assert (VM_KERN_MEMORY_NONE != (tag));                              \
497 	            if (VM_KERN_MEMORY_NONE == __waswired) {                            \
498 	                VM_OBJECT_WIRED((object), (tag));                               \
499 	            }                                                                   \
500 	            vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta));      \
501 	        } else if (VM_KERN_MEMORY_NONE != __waswired) {                         \
502 	            assert (VM_KERN_MEMORY_NONE != (object)->wire_tag);                 \
503 	            vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta));      \
504 	            if (!(object)->wired_page_count) {                                  \
505 	                VM_OBJECT_UNWIRED((object));                                    \
506 	            }                                                                   \
507 	        }                                                                       \
508 	    }                                                                           \
509 	}                                                                               \
510     }                                                                                   \
511     MACRO_END
512 
513 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta)               \
514     __wireddelta += delta; \
515 
516 #define VM_OBJECT_WIRED_PAGE_ADD(object, m)                     \
517     if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++;
518 
519 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m)                  \
520     if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--;
521 
522 
523 
524 #define OBJECT_LOCK_SHARED      0
525 #define OBJECT_LOCK_EXCLUSIVE   1
526 
527 extern lck_grp_t        vm_object_lck_grp;
528 extern lck_attr_t       vm_object_lck_attr;
529 extern lck_attr_t       kernel_object_lck_attr;
530 extern lck_attr_t       compressor_object_lck_attr;
531 
532 extern vm_object_t      vm_pageout_scan_wants_object;
533 
534 extern void             vm_object_lock(vm_object_t);
535 extern bool             vm_object_lock_check_contended(vm_object_t);
536 extern boolean_t        vm_object_lock_try(vm_object_t);
537 extern boolean_t        _vm_object_lock_try(vm_object_t);
538 extern boolean_t        vm_object_lock_avoid(vm_object_t);
539 extern void             vm_object_lock_shared(vm_object_t);
540 extern boolean_t        vm_object_lock_yield_shared(vm_object_t);
541 extern boolean_t        vm_object_lock_try_shared(vm_object_t);
542 extern void             vm_object_unlock(vm_object_t);
543 extern boolean_t        vm_object_lock_upgrade(vm_object_t);
544 
545 /*
546  *	Object locking macros
547  */
548 
549 #define vm_object_lock_init(object)                                     \
550 	lck_rw_init(&(object)->Lock, &vm_object_lck_grp,                \
551 	            ((object) == kernel_object ?                        \
552 	             &kernel_object_lck_attr :                          \
553 	             (((object) == compressor_object) ?                 \
554 	             &compressor_object_lck_attr :                      \
555 	              &vm_object_lck_attr)))
556 #define vm_object_lock_destroy(object)  lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
557 
558 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
559 
560 /*
561  * CAUTION: the following vm_object_lock_assert_held*() macros merely
562  * check if anyone is holding the lock, but the holder may not necessarily
563  * be the caller...
564  */
565 #if MACH_ASSERT || DEBUG
566 #define vm_object_lock_assert_held(object) \
567 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
568 #define vm_object_lock_assert_shared(object) \
569 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
570 #define vm_object_lock_assert_exclusive(object) \
571 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
572 #define vm_object_lock_assert_notheld(object) \
573 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
574 #else  /* MACH_ASSERT || DEBUG */
575 #define vm_object_lock_assert_held(object)
576 #define vm_object_lock_assert_shared(object)
577 #define vm_object_lock_assert_exclusive(object)
578 #define vm_object_lock_assert_notheld(object)
579 #endif /* MACH_ASSERT || DEBUG */
580 
581 
582 /*
583  *	Declare procedures that operate on VM objects.
584  */
585 
586 __private_extern__ void         vm_object_bootstrap(void);
587 
588 __private_extern__ void         vm_object_reaper_init(void);
589 
590 __private_extern__ vm_object_t  vm_object_allocate(vm_object_size_t size);
591 
592 __private_extern__ void    _vm_object_allocate(vm_object_size_t size,
593     vm_object_t object);
594 
595 #define vm_object_reference_locked(object)              \
596 	MACRO_BEGIN                                     \
597 	vm_object_t RLObject = (object);                \
598 	vm_object_lock_assert_exclusive(object);        \
599 	assert((RLObject)->ref_count > 0);              \
600 	(RLObject)->ref_count++;                        \
601 	assert((RLObject)->ref_count > 1);              \
602 	MACRO_END
603 
604 
605 #define vm_object_reference_shared(object)              \
606 	MACRO_BEGIN                                     \
607 	vm_object_t RLObject = (object);                \
608 	vm_object_lock_assert_shared(object);           \
609 	assert((RLObject)->ref_count > 0);              \
610 	OSAddAtomic(1, &(RLObject)->ref_count);         \
611 	assert((RLObject)->ref_count > 0);              \
612 	MACRO_END
613 
614 
615 __private_extern__ void         vm_object_reference(
616 	vm_object_t     object);
617 
618 #if     !MACH_ASSERT
619 
620 #define vm_object_reference(object)                     \
621 MACRO_BEGIN                                             \
622 	vm_object_t RObject = (object);                 \
623 	if (RObject) {                                  \
624 	        vm_object_lock_shared(RObject);         \
625 	        vm_object_reference_shared(RObject);    \
626 	        vm_object_unlock(RObject);              \
627 	}                                               \
628 MACRO_END
629 
630 #endif  /* MACH_ASSERT */
631 
632 __private_extern__ void         vm_object_deallocate(
633 	vm_object_t     object);
634 
635 __private_extern__ void         vm_object_pmap_protect(
636 	vm_object_t             object,
637 	vm_object_offset_t      offset,
638 	vm_object_size_t        size,
639 	pmap_t                  pmap,
640 	vm_map_size_t           pmap_page_size,
641 	vm_map_offset_t         pmap_start,
642 	vm_prot_t               prot);
643 
644 __private_extern__ void         vm_object_pmap_protect_options(
645 	vm_object_t             object,
646 	vm_object_offset_t      offset,
647 	vm_object_size_t        size,
648 	pmap_t                  pmap,
649 	vm_map_size_t           pmap_page_size,
650 	vm_map_offset_t         pmap_start,
651 	vm_prot_t               prot,
652 	int                     options);
653 
654 __private_extern__ void         vm_object_page_remove(
655 	vm_object_t             object,
656 	vm_object_offset_t      start,
657 	vm_object_offset_t      end);
658 
659 __private_extern__ void         vm_object_deactivate_pages(
660 	vm_object_t             object,
661 	vm_object_offset_t      offset,
662 	vm_object_size_t        size,
663 	boolean_t               kill_page,
664 	boolean_t               reusable_page,
665 	struct pmap             *pmap,
666 /* XXX TODO4K: need pmap_page_size here too? */
667 	vm_map_offset_t         pmap_offset);
668 
669 __private_extern__ void vm_object_reuse_pages(
670 	vm_object_t             object,
671 	vm_object_offset_t      start_offset,
672 	vm_object_offset_t      end_offset,
673 	boolean_t               allow_partial_reuse);
674 
675 __private_extern__ uint64_t     vm_object_purge(
676 	vm_object_t              object,
677 	int                      flags);
678 
679 __private_extern__ kern_return_t vm_object_purgable_control(
680 	vm_object_t     object,
681 	vm_purgable_t   control,
682 	int             *state);
683 
684 __private_extern__ kern_return_t vm_object_get_page_counts(
685 	vm_object_t             object,
686 	vm_object_offset_t      offset,
687 	vm_object_size_t        size,
688 	unsigned int            *resident_page_count,
689 	unsigned int            *dirty_page_count);
690 
691 __private_extern__ boolean_t    vm_object_coalesce(
692 	vm_object_t             prev_object,
693 	vm_object_t             next_object,
694 	vm_object_offset_t      prev_offset,
695 	vm_object_offset_t      next_offset,
696 	vm_object_size_t        prev_size,
697 	vm_object_size_t        next_size);
698 
699 __private_extern__ boolean_t    vm_object_shadow(
700 	vm_object_t             *object,
701 	vm_object_offset_t      *offset,
702 	vm_object_size_t        length,
703 	boolean_t               always_shadow);
704 
705 __private_extern__ void         vm_object_collapse(
706 	vm_object_t             object,
707 	vm_object_offset_t      offset,
708 	boolean_t               can_bypass);
709 
710 __private_extern__ boolean_t    vm_object_copy_quickly(
711 	vm_object_t             object,
712 	vm_object_offset_t      src_offset,
713 	vm_object_size_t        size,
714 	boolean_t               *_src_needs_copy,
715 	boolean_t               *_dst_needs_copy);
716 
717 __private_extern__ kern_return_t        vm_object_copy_strategically(
718 	vm_object_t             src_object,
719 	vm_object_offset_t      src_offset,
720 	vm_object_size_t        size,
721 	vm_object_t             *dst_object,
722 	vm_object_offset_t      *dst_offset,
723 	boolean_t               *dst_needs_copy);
724 
725 __private_extern__ kern_return_t        vm_object_copy_slowly(
726 	vm_object_t             src_object,
727 	vm_object_offset_t      src_offset,
728 	vm_object_size_t        size,
729 	boolean_t               interruptible,
730 	vm_object_t             *_result_object);
731 
732 __private_extern__ vm_object_t  vm_object_copy_delayed(
733 	vm_object_t             src_object,
734 	vm_object_offset_t      src_offset,
735 	vm_object_size_t        size,
736 	boolean_t               src_object_shared);
737 
738 
739 
740 __private_extern__ kern_return_t        vm_object_destroy(
741 	vm_object_t     object,
742 	kern_return_t   reason);
743 
744 __private_extern__ void         vm_object_pager_create(
745 	vm_object_t     object);
746 
747 __private_extern__ void         vm_object_compressor_pager_create(
748 	vm_object_t     object);
749 
750 __private_extern__ void         vm_object_page_map(
751 	vm_object_t     object,
752 	vm_object_offset_t      offset,
753 	vm_object_size_t        size,
754 	vm_object_offset_t      (*map_fn)
755 	(void *, vm_object_offset_t),
756 	void            *map_fn_data);
757 
758 __private_extern__ kern_return_t vm_object_upl_request(
759 	vm_object_t             object,
760 	vm_object_offset_t      offset,
761 	upl_size_t              size,
762 	upl_t                   *upl,
763 	upl_page_info_t         *page_info,
764 	unsigned int            *count,
765 	upl_control_flags_t     flags,
766 	vm_tag_t            tag);
767 
768 __private_extern__ kern_return_t vm_object_transpose(
769 	vm_object_t             object1,
770 	vm_object_t             object2,
771 	vm_object_size_t        transpose_size);
772 
773 __private_extern__ boolean_t vm_object_sync(
774 	vm_object_t             object,
775 	vm_object_offset_t      offset,
776 	vm_object_size_t        size,
777 	boolean_t               should_flush,
778 	boolean_t               should_return,
779 	boolean_t               should_iosync);
780 
781 __private_extern__ kern_return_t vm_object_update(
782 	vm_object_t             object,
783 	vm_object_offset_t      offset,
784 	vm_object_size_t        size,
785 	vm_object_offset_t      *error_offset,
786 	int                     *io_errno,
787 	memory_object_return_t  should_return,
788 	int                     flags,
789 	vm_prot_t               prot);
790 
791 __private_extern__ kern_return_t vm_object_lock_request(
792 	vm_object_t             object,
793 	vm_object_offset_t      offset,
794 	vm_object_size_t        size,
795 	memory_object_return_t  should_return,
796 	int                     flags,
797 	vm_prot_t               prot);
798 
799 
800 
801 __private_extern__ vm_object_t  vm_object_memory_object_associate(
802 	memory_object_t         pager,
803 	vm_object_t             object,
804 	vm_object_size_t        size,
805 	boolean_t               check_named);
806 
807 
808 __private_extern__ void vm_object_cluster_size(
809 	vm_object_t             object,
810 	vm_object_offset_t      *start,
811 	vm_size_t               *length,
812 	vm_object_fault_info_t  fault_info,
813 	uint32_t                *io_streaming);
814 
815 __private_extern__ kern_return_t vm_object_populate_with_private(
816 	vm_object_t             object,
817 	vm_object_offset_t      offset,
818 	ppnum_t                 phys_page,
819 	vm_size_t               size);
820 
821 __private_extern__ void vm_object_change_wimg_mode(
822 	vm_object_t             object,
823 	unsigned int            wimg_mode);
824 
825 extern kern_return_t adjust_vm_object_cache(
826 	vm_size_t oval,
827 	vm_size_t nval);
828 
829 extern kern_return_t vm_object_page_op(
830 	vm_object_t             object,
831 	vm_object_offset_t      offset,
832 	int                     ops,
833 	ppnum_t                 *phys_entry,
834 	int                     *flags);
835 
836 extern kern_return_t vm_object_range_op(
837 	vm_object_t             object,
838 	vm_object_offset_t      offset_beg,
839 	vm_object_offset_t      offset_end,
840 	int                     ops,
841 	uint32_t                *range);
842 
843 
844 __private_extern__ void         vm_object_reap_pages(
845 	vm_object_t object,
846 	int     reap_type);
847 #define REAP_REAP       0
848 #define REAP_TERMINATE  1
849 #define REAP_PURGEABLE  2
850 #define REAP_DATA_FLUSH 3
851 
852 #if CONFIG_FREEZE
853 
854 __private_extern__ uint32_t
855 vm_object_compressed_freezer_pageout(
856 	vm_object_t     object, uint32_t dirty_budget);
857 
858 __private_extern__ void
859 vm_object_compressed_freezer_done(
860 	void);
861 
862 #endif /* CONFIG_FREEZE */
863 
864 __private_extern__ void
865 vm_object_pageout(
866 	vm_object_t     object);
867 
868 #if CONFIG_IOSCHED
869 struct io_reprioritize_req {
870 	uint64_t        blkno;
871 	uint32_t        len;
872 	int             priority;
873 	struct vnode    *devvp;
874 	queue_chain_t   io_reprioritize_list;
875 };
876 typedef struct io_reprioritize_req *io_reprioritize_req_t;
877 
878 extern void vm_io_reprioritize_init(void);
879 #endif
880 
881 /*
882  *	Event waiting handling
883  */
884 
885 #define VM_OBJECT_EVENT_INITIALIZED             0
886 #define VM_OBJECT_EVENT_PAGER_READY             1
887 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS      2
888 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS     3
889 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS        4
890 #define VM_OBJECT_EVENT_UNCACHING               5
891 #define VM_OBJECT_EVENT_COPY_CALL               6
892 #define VM_OBJECT_EVENT_CACHING                 7
893 #define VM_OBJECT_EVENT_UNBLOCKED               8
894 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
895 
896 #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
897 
898 static __inline__ wait_result_t
vm_object_assert_wait(vm_object_t object,int event,wait_interrupt_t interruptible)899 vm_object_assert_wait(
900 	vm_object_t             object,
901 	int                     event,
902 	wait_interrupt_t        interruptible)
903 {
904 	wait_result_t wr;
905 
906 	vm_object_lock_assert_exclusive(object);
907 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
908 
909 	object->all_wanted |= 1 << event;
910 	wr = assert_wait((event_t)((vm_offset_t)object + (vm_offset_t)event),
911 	    interruptible);
912 	return wr;
913 }
914 
915 static __inline__ wait_result_t
vm_object_wait(vm_object_t object,int event,wait_interrupt_t interruptible)916 vm_object_wait(
917 	vm_object_t             object,
918 	int                     event,
919 	wait_interrupt_t        interruptible)
920 {
921 	wait_result_t wr;
922 
923 	vm_object_assert_wait(object, event, interruptible);
924 	vm_object_unlock(object);
925 	wr = thread_block(THREAD_CONTINUE_NULL);
926 	return wr;
927 }
928 
929 static __inline__ wait_result_t
thread_sleep_vm_object(vm_object_t object,event_t event,wait_interrupt_t interruptible)930 thread_sleep_vm_object(
931 	vm_object_t             object,
932 	event_t                 event,
933 	wait_interrupt_t        interruptible)
934 {
935 	wait_result_t wr;
936 
937 	wr = lck_rw_sleep(&object->Lock,
938 	    LCK_SLEEP_PROMOTED_PRI,
939 	    event,
940 	    interruptible);
941 	return wr;
942 }
943 
944 static __inline__ wait_result_t
vm_object_sleep(vm_object_t object,int event,wait_interrupt_t interruptible)945 vm_object_sleep(
946 	vm_object_t             object,
947 	int                     event,
948 	wait_interrupt_t        interruptible)
949 {
950 	wait_result_t wr;
951 
952 	vm_object_lock_assert_exclusive(object);
953 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
954 
955 	object->all_wanted |= 1 << event;
956 	wr = thread_sleep_vm_object(object,
957 	    (event_t)((vm_offset_t)object + (vm_offset_t)event),
958 	    interruptible);
959 	return wr;
960 }
961 
962 static __inline__ void
vm_object_wakeup(vm_object_t object,int event)963 vm_object_wakeup(
964 	vm_object_t             object,
965 	int                     event)
966 {
967 	vm_object_lock_assert_exclusive(object);
968 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
969 
970 	if (object->all_wanted & (1 << event)) {
971 		thread_wakeup((event_t)((vm_offset_t)object + (vm_offset_t)event));
972 	}
973 	object->all_wanted &= ~(1 << event);
974 }
975 
976 static __inline__ void
vm_object_set_wanted(vm_object_t object,int event)977 vm_object_set_wanted(
978 	vm_object_t             object,
979 	int                     event)
980 {
981 	vm_object_lock_assert_exclusive(object);
982 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
983 
984 	object->all_wanted |= (1 << event);
985 }
986 
987 static __inline__ int
vm_object_wanted(vm_object_t object,int event)988 vm_object_wanted(
989 	vm_object_t             object,
990 	int                     event)
991 {
992 	vm_object_lock_assert_held(object);
993 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
994 
995 	return object->all_wanted & (1 << event);
996 }
997 
998 /*
999  *	Routines implemented as macros
1000  */
1001 #ifdef VM_PIP_DEBUG
1002 #include <libkern/OSDebug.h>
1003 #define VM_PIP_DEBUG_BEGIN(object)                                      \
1004 	MACRO_BEGIN                                                     \
1005 	int pip = ((object)->paging_in_progress +                       \
1006 	           (object)->activity_in_progress);                     \
1007 	if (pip < VM_PIP_DEBUG_MAX_REFS) {                              \
1008 	        (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1009 	                           VM_PIP_DEBUG_STACK_FRAMES);          \
1010 	}                                                               \
1011 	MACRO_END
1012 #else   /* VM_PIP_DEBUG */
1013 #define VM_PIP_DEBUG_BEGIN(object)
1014 #endif  /* VM_PIP_DEBUG */
1015 
1016 #define         vm_object_activity_begin(object)                        \
1017 	MACRO_BEGIN                                                     \
1018 	vm_object_lock_assert_exclusive((object));                      \
1019 	VM_PIP_DEBUG_BEGIN((object));                                   \
1020 	(object)->activity_in_progress++;                               \
1021 	if ((object)->activity_in_progress == 0) {                      \
1022 	        panic("vm_object_activity_begin(%p): overflow\n", (object));\
1023 	}                                                               \
1024 	MACRO_END
1025 
1026 #define         vm_object_activity_end(object)                          \
1027 	MACRO_BEGIN                                                     \
1028 	vm_object_lock_assert_exclusive((object));                      \
1029 	if ((object)->activity_in_progress == 0) {                      \
1030 	        panic("vm_object_activity_end(%p): underflow\n", (object));\
1031 	}                                                               \
1032 	(object)->activity_in_progress--;                               \
1033 	if ((object)->paging_in_progress == 0 &&                        \
1034 	    (object)->activity_in_progress == 0)                        \
1035 	        vm_object_wakeup((object),                              \
1036 	                         VM_OBJECT_EVENT_PAGING_IN_PROGRESS);   \
1037 	MACRO_END
1038 
1039 #define         vm_object_paging_begin(object)                          \
1040 	MACRO_BEGIN                                                     \
1041 	vm_object_lock_assert_exclusive((object));                      \
1042 	VM_PIP_DEBUG_BEGIN((object));                                   \
1043 	(object)->paging_in_progress++;                                 \
1044 	if ((object)->paging_in_progress == 0) {                        \
1045 	        panic("vm_object_paging_begin(%p): overflow\n", (object));\
1046 	}                                                               \
1047 	MACRO_END
1048 
1049 #define         vm_object_paging_end(object)                            \
1050 	MACRO_BEGIN                                                     \
1051 	vm_object_lock_assert_exclusive((object));                      \
1052 	if ((object)->paging_in_progress == 0) {                        \
1053 	        panic("vm_object_paging_end(%p): underflow\n", (object));\
1054 	}                                                               \
1055 	(object)->paging_in_progress--;                                 \
1056 	if ((object)->paging_in_progress == 0) {                        \
1057 	        vm_object_wakeup((object),                              \
1058 	                         VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1059 	        if ((object)->activity_in_progress == 0)                \
1060 	                vm_object_wakeup((object),                      \
1061 	                                 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1062 	}                                                               \
1063 	MACRO_END
1064 
1065 #define         vm_object_paging_wait(object, interruptible)            \
1066 	MACRO_BEGIN                                                     \
1067 	vm_object_lock_assert_exclusive((object));                      \
1068 	while ((object)->paging_in_progress != 0 ||                     \
1069 	       (object)->activity_in_progress != 0) {                   \
1070 	        wait_result_t  _wr;                                     \
1071                                                                         \
1072 	        _wr = vm_object_sleep((object),                         \
1073 	                        VM_OBJECT_EVENT_PAGING_IN_PROGRESS,     \
1074 	                        (interruptible));                       \
1075                                                                         \
1076 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1077 	/*XXX break; */                                 \
1078 	}                                                               \
1079 	MACRO_END
1080 
1081 #define vm_object_paging_only_wait(object, interruptible)               \
1082 	MACRO_BEGIN                                                     \
1083 	vm_object_lock_assert_exclusive((object));                      \
1084 	while ((object)->paging_in_progress != 0) {                     \
1085 	        wait_result_t  _wr;                                     \
1086                                                                         \
1087 	        _wr = vm_object_sleep((object),                         \
1088 	                        VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1089 	                        (interruptible));                       \
1090                                                                         \
1091 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1092 	/*XXX break; */                                 \
1093 	}                                                               \
1094 	MACRO_END
1095 
1096 
1097 #define vm_object_mapping_begin(object)                                 \
1098 	MACRO_BEGIN                                                     \
1099 	vm_object_lock_assert_exclusive((object));                      \
1100 	assert(! (object)->mapping_in_progress);                        \
1101 	(object)->mapping_in_progress = TRUE;                           \
1102 	MACRO_END
1103 
1104 #define vm_object_mapping_end(object)                                   \
1105 	MACRO_BEGIN                                                     \
1106 	vm_object_lock_assert_exclusive((object));                      \
1107 	assert((object)->mapping_in_progress);                          \
1108 	(object)->mapping_in_progress = FALSE;                          \
1109 	vm_object_wakeup((object),                                      \
1110 	                 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS);          \
1111 	MACRO_END
1112 
1113 #define vm_object_mapping_wait(object, interruptible)                   \
1114 	MACRO_BEGIN                                                     \
1115 	vm_object_lock_assert_exclusive((object));                      \
1116 	while ((object)->mapping_in_progress) {                         \
1117 	        wait_result_t	_wr;                                    \
1118                                                                         \
1119 	        _wr = vm_object_sleep((object),                         \
1120 	                              VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1121 	                              (interruptible));                 \
1122 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1123 	/*XXX break; */                                 \
1124 	}                                                               \
1125 	assert(!(object)->mapping_in_progress);                         \
1126 	MACRO_END
1127 
1128 
1129 
1130 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1131 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1132 
1133 extern void     vm_object_cache_add(vm_object_t);
1134 extern void     vm_object_cache_remove(vm_object_t);
1135 extern int      vm_object_cache_evict(int, int);
1136 
1137 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
1138 #define VM_OBJECT_OWNER_UNCHANGED ((task_t) -2)
1139 #define VM_OBJECT_OWNER(object)                                         \
1140 	((object == VM_OBJECT_NULL ||                                   \
1141 	  ((object)->purgable == VM_PURGABLE_DENY &&                    \
1142 	   (object)->vo_ledger_tag == 0) ||                             \
1143 	  (object)->vo_owner == TASK_NULL)                              \
1144 	 ? TASK_NULL    /* not owned */                                 \
1145 	 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED)            \
1146 	    ? kernel_task /* disowned -> kernel */                      \
1147 	    : (object)->vo_owner)) /* explicit owner */                 \
1148 
1149 extern void     vm_object_ledger_tag_ledgers(
1150 	vm_object_t object,
1151 	int *ledger_idx_volatile,
1152 	int *ledger_idx_nonvolatile,
1153 	int *ledger_idx_volatile_compressed,
1154 	int *ledger_idx_nonvolatile_compressed,
1155 	boolean_t *do_footprint);
1156 extern kern_return_t vm_object_ownership_change(
1157 	vm_object_t object,
1158 	int new_ledger_tag,
1159 	task_t new_owner,
1160 	int new_ledger_flags,
1161 	boolean_t task_objq_locked);
1162 
1163 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
1164 // so probably should be a real 32b ID vs. ptr.
1165 // Current users just check for equality
1166 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((o)))
1167 
1168 #endif  /* _VM_VM_OBJECT_H_ */
1169