xref: /xnu-8796.101.5/osfmk/vm/vm_object.h (revision aca3beaa3dfbd42498b42c5e5ce20a938e6554e5)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm_object.h
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *	Date:	1985
62  *
63  *	Virtual memory object module definitions.
64  */
65 
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
68 
69 #include <debug.h>
70 #include <mach_assert.h>
71 
72 #include <mach/kern_return.h>
73 #include <mach/boolean.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/port.h>
76 #include <mach/vm_prot.h>
77 #include <mach/vm_param.h>
78 #include <mach/machine/vm_types.h>
79 #include <kern/queue.h>
80 #include <kern/locks.h>
81 #include <kern/assert.h>
82 #include <kern/misc_protos.h>
83 #include <kern/macro_help.h>
84 #include <ipc/ipc_types.h>
85 #include <vm/pmap.h>
86 
87 #include <vm/vm_external.h>
88 
89 #include <vm/vm_options.h>
90 #include <vm/vm_page.h>
91 
92 #if VM_OBJECT_TRACKING
93 #include <libkern/OSDebug.h>
94 #include <kern/btlog.h>
95 extern void vm_object_tracking_init(void);
96 extern btlog_t vm_object_tracking_btlog;
97 #define VM_OBJECT_TRACKING_NUM_RECORDS  50000
98 #define VM_OBJECT_TRACKING_OP_CREATED   1
99 #define VM_OBJECT_TRACKING_OP_MODIFIED  2
100 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
101 #endif /* VM_OBJECT_TRACKING */
102 
103 struct vm_page;
104 
105 /*
106  *	Types defined:
107  *
108  *	vm_object_t		Virtual memory object.
109  *	vm_object_fault_info_t	Used to determine cluster size.
110  */
111 
112 struct vm_object_fault_info {
113 	int             interruptible;
114 	uint32_t        user_tag;
115 	vm_size_t       cluster_size;
116 	vm_behavior_t   behavior;
117 	vm_object_offset_t lo_offset;
118 	vm_object_offset_t hi_offset;
119 	unsigned int
120 	/* boolean_t */ no_cache:1,
121 	/* boolean_t */ stealth:1,
122 	/* boolean_t */ io_sync:1,
123 	/* boolean_t */ cs_bypass:1,
124 	/* boolean_t */ csm_associated:1,
125 	/* boolean_t */ mark_zf_absent:1,
126 	/* boolean_t */ batch_pmap_op:1,
127 	/* boolean_t */ resilient_media:1,
128 	/* boolean_t */ no_copy_on_read:1,
129 	/* boolean_t */ fi_xnu_user_debug:1,
130 	    __vm_object_fault_info_unused_bits:22;
131 	int             pmap_options;
132 };
133 
134 
135 #define vo_size                         vo_un1.vou_size
136 #define vo_cache_pages_to_scan          vo_un1.vou_cache_pages_to_scan
137 #define vo_shadow_offset                vo_un2.vou_shadow_offset
138 #define vo_cache_ts                     vo_un2.vou_cache_ts
139 #define vo_owner                        vo_un2.vou_owner
140 
141 struct vm_object {
142 	/*
143 	 * on 64 bit systems we pack the pointers hung off the memq.
144 	 * those pointers have to be able to point back to the memq.
145 	 * the packed pointers are required to be on a 64 byte boundary
146 	 * which means 2 things for the vm_object...  (1) the memq
147 	 * struct has to be the first element of the structure so that
148 	 * we can control it's alignment... (2) the vm_object must be
149 	 * aligned on a 64 byte boundary... for static vm_object's
150 	 * this is accomplished via the 'aligned' attribute... for
151 	 * vm_object's in the zone pool, this is accomplished by
152 	 * rounding the size of the vm_object element to the nearest
153 	 * 64 byte size before creating the zone.
154 	 */
155 	vm_page_queue_head_t    memq;           /* Resident memory - must be first */
156 	lck_rw_t                Lock;           /* Synchronization */
157 
158 	union {
159 		vm_object_size_t  vou_size;     /* Object size (only valid if internal) */
160 		int               vou_cache_pages_to_scan;      /* pages yet to be visited in an
161 		                                                 * external object in cache
162 		                                                 */
163 	} vo_un1;
164 
165 	struct vm_page          *memq_hint;
166 	int                     ref_count;      /* Number of references */
167 	unsigned int            resident_page_count;
168 	/* number of resident pages */
169 	unsigned int            wired_page_count; /* number of wired pages
170 	                                           *  use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
171 	unsigned int            reusable_page_count;
172 
173 	struct vm_object        *copy;          /* Object that should receive
174 	                                         * a copy of my changed pages,
175 	                                         * for copy_delay, or just the
176 	                                         * temporary object that
177 	                                         * shadows this object, for
178 	                                         * copy_call.
179 	                                         */
180 	struct vm_object        *shadow;        /* My shadow */
181 	memory_object_t         pager;          /* Where to get data */
182 
183 	union {
184 		vm_object_offset_t vou_shadow_offset;   /* Offset into shadow */
185 		clock_sec_t     vou_cache_ts;   /* age of an external object
186 		                                 * present in cache
187 		                                 */
188 		task_t          vou_owner;      /* If the object is purgeable
189 		                                 * or has a "ledger_tag", this
190 		                                 * is the task that owns it.
191 		                                 */
192 	} vo_un2;
193 
194 	vm_object_offset_t      paging_offset;  /* Offset into memory object */
195 	memory_object_control_t pager_control;  /* Where data comes back */
196 
197 	memory_object_copy_strategy_t
198 	    copy_strategy;                      /* How to handle data copy */
199 
200 	/*
201 	 * Some user processes (mostly VirtualMachine software) take a large
202 	 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
203 	 * VM objects and overflow the 16-bit "activity_in_progress" counter.
204 	 * Since we never enforced any limit there, let's give them 32 bits
205 	 * for backwards compatibility's sake.
206 	 */
207 	unsigned short          paging_in_progress:16;
208 	unsigned short          vo_size_delta;
209 	unsigned int            activity_in_progress;
210 
211 	/* The memory object ports are
212 	 * being used (e.g., for pagein
213 	 * or pageout) -- don't change
214 	 * any of these fields (i.e.,
215 	 * don't collapse, destroy or
216 	 * terminate)
217 	 */
218 
219 	unsigned int
220 	/* boolean_t array */ all_wanted:11,    /* Bit array of "want to be
221 	                                         * awakened" notations.  See
222 	                                         * VM_OBJECT_EVENT_* items
223 	                                         * below */
224 	/* boolean_t */ pager_created:1,        /* Has pager been created? */
225 	/* boolean_t */ pager_initialized:1,    /* Are fields ready to use? */
226 	/* boolean_t */ pager_ready:1,          /* Will pager take requests? */
227 
228 	/* boolean_t */ pager_trusted:1,        /* The pager for this object
229 	                                         * is trusted. This is true for
230 	                                         * all internal objects (backed
231 	                                         * by the default pager)
232 	                                         */
233 	/* boolean_t */ can_persist:1,          /* The kernel may keep the data
234 	                                         * for this object (and rights
235 	                                         * to the memory object) after
236 	                                         * all address map references
237 	                                         * are deallocated?
238 	                                         */
239 	/* boolean_t */ internal:1,             /* Created by the kernel (and
240 	                                         * therefore, managed by the
241 	                                         * default memory manger)
242 	                                         */
243 	/* boolean_t */ private:1,              /* magic device_pager object,
244 	                                        * holds private pages only */
245 	/* boolean_t */ pageout:1,              /* pageout object. contains
246 	                                         * private pages that refer to
247 	                                         * a real memory object. */
248 	/* boolean_t */ alive:1,                /* Not yet terminated */
249 
250 	/* boolean_t */ purgable:2,             /* Purgable state.  See
251 	                                         * VM_PURGABLE_*
252 	                                         */
253 	/* boolean_t */ purgeable_only_by_kernel:1,
254 	/* boolean_t */ purgeable_when_ripe:1,         /* Purgeable when a token
255 	                                                * becomes ripe.
256 	                                                */
257 	/* boolean_t */ shadowed:1,             /* Shadow may exist */
258 	/* boolean_t */ true_share:1,
259 	/* This object is mapped
260 	 * in more than one place
261 	 * and hence cannot be
262 	 * coalesced */
263 	/* boolean_t */ terminating:1,
264 	/* Allows vm_object_lookup
265 	 * and vm_object_deallocate
266 	 * to special case their
267 	 * behavior when they are
268 	 * called as a result of
269 	 * page cleaning during
270 	 * object termination
271 	 */
272 	/* boolean_t */ named:1,                /* An enforces an internal
273 	                                         * naming convention, by
274 	                                         * calling the right routines
275 	                                         * for allocation and
276 	                                         * destruction, UBC references
277 	                                         * against the vm_object are
278 	                                         * checked.
279 	                                         */
280 	/* boolean_t */ shadow_severed:1,
281 	/* When a permanent object
282 	 * backing a COW goes away
283 	 * unexpectedly.  This bit
284 	 * allows vm_fault to return
285 	 * an error rather than a
286 	 * zero filled page.
287 	 */
288 	/* boolean_t */ phys_contiguous:1,
289 	/* Memory is wired and
290 	 * guaranteed physically
291 	 * contiguous.  However
292 	 * it is not device memory
293 	 * and obeys normal virtual
294 	 * memory rules w.r.t pmap
295 	 * access bits.
296 	 */
297 	/* boolean_t */ nophyscache:1,
298 	/* When mapped at the
299 	 * pmap level, don't allow
300 	 * primary caching. (for
301 	 * I/O)
302 	 */
303 	/* boolean_t */ for_realtime:1;
304 	/* Might be needed for realtime code path */
305 
306 	queue_chain_t           cached_list;    /* Attachment point for the
307 	                                         * list of objects cached as a
308 	                                         * result of their can_persist
309 	                                         * value
310 	                                         */
311 	/*
312 	 * the following fields are not protected by any locks
313 	 * they are updated via atomic compare and swap
314 	 */
315 	vm_object_offset_t      last_alloc;     /* last allocation offset */
316 	vm_offset_t             cow_hint;       /* last page present in     */
317 	                                        /* shadow but not in object */
318 	int                     sequential;     /* sequential access size */
319 
320 	uint32_t                pages_created;
321 	uint32_t                pages_used;
322 	/* hold object lock when altering */
323 	unsigned        int
324 	    wimg_bits:8,                /* cache WIMG bits         */
325 	    code_signed:1,              /* pages are signed and should be
326 	                                 *  validated; the signatures are stored
327 	                                 *  with the pager */
328 	    transposed:1,               /* object was transposed with another */
329 	    mapping_in_progress:1,      /* pager being mapped/unmapped */
330 	    phantom_isssd:1,
331 	    volatile_empty:1,
332 	    volatile_fault:1,
333 	    all_reusable:1,
334 	    blocked_access:1,
335 	    set_cache_attr:1,
336 	    object_is_shared_cache:1,
337 	    purgeable_queue_type:2,
338 	    purgeable_queue_group:3,
339 	    io_tracking:1,
340 	    no_tag_update:1,            /*  */
341 #if CONFIG_SECLUDED_MEMORY
342 	    eligible_for_secluded:1,
343 	    can_grab_secluded:1,
344 #else /* CONFIG_SECLUDED_MEMORY */
345 	__object3_unused_bits:2,
346 #endif /* CONFIG_SECLUDED_MEMORY */
347 #if VM_OBJECT_ACCESS_TRACKING
348 	    access_tracking:1,
349 #else /* VM_OBJECT_ACCESS_TRACKING */
350 	__unused_access_tracking:1,
351 #endif /* VM_OBJECT_ACCESS_TRACKING */
352 	vo_ledger_tag:3,
353 	    vo_no_footprint:1;
354 
355 #if VM_OBJECT_ACCESS_TRACKING
356 	uint32_t        access_tracking_reads;
357 	uint32_t        access_tracking_writes;
358 #endif /* VM_OBJECT_ACCESS_TRACKING */
359 
360 	uint8_t                 scan_collisions;
361 	uint8_t                 __object4_unused_bits[1];
362 	vm_tag_t                wire_tag;
363 
364 #if CONFIG_PHANTOM_CACHE
365 	uint32_t                phantom_object_id;
366 #endif
367 #if CONFIG_IOSCHED || UPL_DEBUG
368 	queue_head_t            uplq;           /* List of outstanding upls */
369 #endif
370 
371 #ifdef  VM_PIP_DEBUG
372 /*
373  * Keep track of the stack traces for the first holders
374  * of a "paging_in_progress" reference for this VM object.
375  */
376 #define VM_PIP_DEBUG_STACK_FRAMES       25      /* depth of each stack trace */
377 #define VM_PIP_DEBUG_MAX_REFS           10      /* track that many references */
378 	struct __pip_backtrace {
379 		void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
380 	} pip_holders[VM_PIP_DEBUG_MAX_REFS];
381 #endif  /* VM_PIP_DEBUG  */
382 
383 	queue_chain_t           objq;      /* object queue - currently used for purgable queues */
384 	queue_chain_t           task_objq; /* objects owned by task - protected by task lock */
385 
386 #if !VM_TAG_ACTIVE_UPDATE
387 	queue_chain_t           wired_objq;
388 #endif /* !VM_TAG_ACTIVE_UPDATE */
389 
390 #if DEBUG
391 	void *purgeable_owner_bt[16];
392 	task_t vo_purgeable_volatilizer; /* who made it volatile? */
393 	void *purgeable_volatilizer_bt[16];
394 #endif /* DEBUG */
395 };
396 
397 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object)                         \
398 	((object)->volatile_fault &&                                    \
399 	 ((object)->purgable == VM_PURGABLE_VOLATILE ||                 \
400 	  (object)->purgable == VM_PURGABLE_EMPTY))
401 
402 #if VM_OBJECT_ACCESS_TRACKING
403 extern uint64_t vm_object_access_tracking_reads;
404 extern uint64_t vm_object_access_tracking_writes;
405 extern void vm_object_access_tracking(vm_object_t object,
406     int *access_tracking,
407     uint32_t *access_tracking_reads,
408     uint32_t *acess_tracking_writes);
409 #endif /* VM_OBJECT_ACCESS_TRACKING */
410 
411 extern const vm_object_t kernel_object;          /* the single kernel object */
412 
413 extern const vm_object_t compressor_object;      /* the single compressor object */
414 
415 extern const vm_object_t retired_pages_object;   /* holds VM pages which should never be used */
416 
417 # define        VM_MSYNC_INITIALIZED                    0
418 # define        VM_MSYNC_SYNCHRONIZING                  1
419 # define        VM_MSYNC_DONE                           2
420 
421 
422 extern lck_grp_t                vm_map_lck_grp;
423 extern lck_attr_t               vm_map_lck_attr;
424 
425 #ifndef VM_TAG_ACTIVE_UPDATE
426 #error VM_TAG_ACTIVE_UPDATE
427 #endif
428 
429 #if VM_TAG_ACTIVE_UPDATE
430 #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE")
431 #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE")
432 #else /* VM_TAG_ACTIVE_UPDATE */
433 #define VM_OBJECT_WIRED_ENQUEUE(object)                                 \
434 	MACRO_BEGIN                                                     \
435 	lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);                          \
436 	assert(!(object)->wired_objq.next);                             \
437 	assert(!(object)->wired_objq.prev);                             \
438 	queue_enter(&vm_objects_wired, (object),                        \
439 	            vm_object_t, wired_objq);                           \
440 	lck_spin_unlock(&vm_objects_wired_lock);                        \
441 	MACRO_END
442 #define VM_OBJECT_WIRED_DEQUEUE(object)                                 \
443 	MACRO_BEGIN                                                     \
444 	if ((object)->wired_objq.next) {                                \
445 	        lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);                  \
446 	        queue_remove(&vm_objects_wired, (object),               \
447 	                     vm_object_t, wired_objq);                  \
448 	        lck_spin_unlock(&vm_objects_wired_lock);                \
449 	}                                                               \
450 	MACRO_END
451 #endif /* VM_TAG_ACTIVE_UPDATE */
452 
453 #define VM_OBJECT_WIRED(object, tag)                                    \
454     MACRO_BEGIN                                                         \
455     assert(VM_KERN_MEMORY_NONE != (tag));                               \
456     assert(VM_KERN_MEMORY_NONE == (object)->wire_tag);                  \
457     (object)->wire_tag = (tag);                                         \
458     if (!VM_TAG_ACTIVE_UPDATE) {                                        \
459 	VM_OBJECT_WIRED_ENQUEUE((object));                              \
460     }                                                                   \
461     MACRO_END
462 
463 #define VM_OBJECT_UNWIRED(object)                                                       \
464     MACRO_BEGIN                                                                         \
465     if (!VM_TAG_ACTIVE_UPDATE) {                                                        \
466 	    VM_OBJECT_WIRED_DEQUEUE((object));                                          \
467     }                                                                                   \
468     if (VM_KERN_MEMORY_NONE != (object)->wire_tag) {                                    \
469 	vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count));   \
470 	(object)->wire_tag = VM_KERN_MEMORY_NONE;                                       \
471     }                                                                                   \
472     MACRO_END
473 
474 // These two macros start & end a C block
475 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object)                                       \
476     MACRO_BEGIN                                                                         \
477     {                                                                                   \
478 	int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
479 
480 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag)                                    \
481 	if (__wireddelta) {                                                             \
482 	    boolean_t __overflow __assert_only =                                        \
483 	    os_add_overflow((object)->wired_page_count, __wireddelta,                   \
484 	                    &(object)->wired_page_count);                               \
485 	    assert(!__overflow);                                                        \
486 	    if (!(object)->pageout && !(object)->no_tag_update) {                       \
487 	        if (__wireddelta > 0) {                                                 \
488 	            assert (VM_KERN_MEMORY_NONE != (tag));                              \
489 	            if (VM_KERN_MEMORY_NONE == __waswired) {                            \
490 	                VM_OBJECT_WIRED((object), (tag));                               \
491 	            }                                                                   \
492 	            vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta));      \
493 	        } else if (VM_KERN_MEMORY_NONE != __waswired) {                         \
494 	            assert (VM_KERN_MEMORY_NONE != (object)->wire_tag);                 \
495 	            vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta));      \
496 	            if (!(object)->wired_page_count) {                                  \
497 	                VM_OBJECT_UNWIRED((object));                                    \
498 	            }                                                                   \
499 	        }                                                                       \
500 	    }                                                                           \
501 	}                                                                               \
502     }                                                                                   \
503     MACRO_END
504 
505 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta)               \
506     __wireddelta += delta; \
507 
508 #define VM_OBJECT_WIRED_PAGE_ADD(object, m)                     \
509     if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++;
510 
511 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m)                  \
512     if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--;
513 
514 
515 
516 #define OBJECT_LOCK_SHARED      0
517 #define OBJECT_LOCK_EXCLUSIVE   1
518 
519 extern lck_grp_t        vm_object_lck_grp;
520 extern lck_attr_t       vm_object_lck_attr;
521 extern lck_attr_t       kernel_object_lck_attr;
522 extern lck_attr_t       compressor_object_lck_attr;
523 
524 extern vm_object_t      vm_pageout_scan_wants_object;
525 
526 extern void             vm_object_lock(vm_object_t);
527 extern bool             vm_object_lock_check_contended(vm_object_t);
528 extern boolean_t        vm_object_lock_try(vm_object_t);
529 extern boolean_t        _vm_object_lock_try(vm_object_t);
530 extern boolean_t        vm_object_lock_avoid(vm_object_t);
531 extern void             vm_object_lock_shared(vm_object_t);
532 extern boolean_t        vm_object_lock_yield_shared(vm_object_t);
533 extern boolean_t        vm_object_lock_try_shared(vm_object_t);
534 extern void             vm_object_unlock(vm_object_t);
535 extern boolean_t        vm_object_lock_upgrade(vm_object_t);
536 
537 /*
538  *	Object locking macros
539  */
540 
541 #define vm_object_lock_init(object)                                     \
542 	lck_rw_init(&(object)->Lock, &vm_object_lck_grp,                \
543 	            ((object) == kernel_object ?                        \
544 	             &kernel_object_lck_attr :                          \
545 	             (((object) == compressor_object) ?                 \
546 	             &compressor_object_lck_attr :                      \
547 	              &vm_object_lck_attr)))
548 #define vm_object_lock_destroy(object)  lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
549 
550 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
551 
552 /*
553  * CAUTION: the following vm_object_lock_assert_held*() macros merely
554  * check if anyone is holding the lock, but the holder may not necessarily
555  * be the caller...
556  */
557 #if MACH_ASSERT || DEBUG
558 #define vm_object_lock_assert_held(object) \
559 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
560 #define vm_object_lock_assert_shared(object) \
561 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
562 #define vm_object_lock_assert_exclusive(object) \
563 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
564 #define vm_object_lock_assert_notheld(object) \
565 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
566 #else  /* MACH_ASSERT || DEBUG */
567 #define vm_object_lock_assert_held(object)
568 #define vm_object_lock_assert_shared(object)
569 #define vm_object_lock_assert_exclusive(object)
570 #define vm_object_lock_assert_notheld(object)
571 #endif /* MACH_ASSERT || DEBUG */
572 
573 
574 /*
575  *	Declare procedures that operate on VM objects.
576  */
577 
578 __private_extern__ void         vm_object_bootstrap(void);
579 
580 __private_extern__ void         vm_object_reaper_init(void);
581 
582 __private_extern__ vm_object_t  vm_object_allocate(vm_object_size_t size);
583 
584 __private_extern__ void    _vm_object_allocate(vm_object_size_t size,
585     vm_object_t object);
586 
587 __private_extern__ void vm_object_set_size(
588 	vm_object_t             object,
589 	vm_object_size_t        outer_size,
590 	vm_object_size_t        inner_size);
591 
592 #define vm_object_reference_locked(object)              \
593 	MACRO_BEGIN                                     \
594 	vm_object_t RLObject = (object);                \
595 	vm_object_lock_assert_exclusive(object);        \
596 	assert((RLObject)->ref_count > 0);              \
597 	(RLObject)->ref_count++;                        \
598 	assert((RLObject)->ref_count > 1);              \
599 	MACRO_END
600 
601 
602 #define vm_object_reference_shared(object)              \
603 	MACRO_BEGIN                                     \
604 	vm_object_t RLObject = (object);                \
605 	vm_object_lock_assert_shared(object);           \
606 	assert((RLObject)->ref_count > 0);              \
607 	OSAddAtomic(1, &(RLObject)->ref_count);         \
608 	assert((RLObject)->ref_count > 0);              \
609 	MACRO_END
610 
611 
612 __private_extern__ void         vm_object_reference(
613 	vm_object_t     object);
614 
615 #if     !MACH_ASSERT
616 
617 #define vm_object_reference(object)                     \
618 MACRO_BEGIN                                             \
619 	vm_object_t RObject = (object);                 \
620 	if (RObject) {                                  \
621 	        vm_object_lock_shared(RObject);         \
622 	        vm_object_reference_shared(RObject);    \
623 	        vm_object_unlock(RObject);              \
624 	}                                               \
625 MACRO_END
626 
627 #endif  /* MACH_ASSERT */
628 
629 __private_extern__ void         vm_object_deallocate(
630 	vm_object_t     object);
631 
632 __private_extern__ void         vm_object_pmap_protect(
633 	vm_object_t             object,
634 	vm_object_offset_t      offset,
635 	vm_object_size_t        size,
636 	pmap_t                  pmap,
637 	vm_map_size_t           pmap_page_size,
638 	vm_map_offset_t         pmap_start,
639 	vm_prot_t               prot);
640 
641 __private_extern__ void         vm_object_pmap_protect_options(
642 	vm_object_t             object,
643 	vm_object_offset_t      offset,
644 	vm_object_size_t        size,
645 	pmap_t                  pmap,
646 	vm_map_size_t           pmap_page_size,
647 	vm_map_offset_t         pmap_start,
648 	vm_prot_t               prot,
649 	int                     options);
650 
651 __private_extern__ void         vm_object_page_remove(
652 	vm_object_t             object,
653 	vm_object_offset_t      start,
654 	vm_object_offset_t      end);
655 
656 __private_extern__ void         vm_object_deactivate_pages(
657 	vm_object_t             object,
658 	vm_object_offset_t      offset,
659 	vm_object_size_t        size,
660 	boolean_t               kill_page,
661 	boolean_t               reusable_page,
662 	boolean_t               reusable_no_write,
663 	struct pmap             *pmap,
664 /* XXX TODO4K: need pmap_page_size here too? */
665 	vm_map_offset_t         pmap_offset);
666 
667 __private_extern__ void vm_object_reuse_pages(
668 	vm_object_t             object,
669 	vm_object_offset_t      start_offset,
670 	vm_object_offset_t      end_offset,
671 	boolean_t               allow_partial_reuse);
672 
673 __private_extern__ uint64_t     vm_object_purge(
674 	vm_object_t              object,
675 	int                      flags);
676 
677 __private_extern__ kern_return_t vm_object_purgable_control(
678 	vm_object_t     object,
679 	vm_purgable_t   control,
680 	int             *state);
681 
682 __private_extern__ kern_return_t vm_object_get_page_counts(
683 	vm_object_t             object,
684 	vm_object_offset_t      offset,
685 	vm_object_size_t        size,
686 	unsigned int            *resident_page_count,
687 	unsigned int            *dirty_page_count);
688 
689 __private_extern__ boolean_t    vm_object_coalesce(
690 	vm_object_t             prev_object,
691 	vm_object_t             next_object,
692 	vm_object_offset_t      prev_offset,
693 	vm_object_offset_t      next_offset,
694 	vm_object_size_t        prev_size,
695 	vm_object_size_t        next_size);
696 
697 __private_extern__ boolean_t    vm_object_shadow(
698 	vm_object_t             *object,
699 	vm_object_offset_t      *offset,
700 	vm_object_size_t        length,
701 	boolean_t               always_shadow);
702 
703 __private_extern__ void         vm_object_collapse(
704 	vm_object_t             object,
705 	vm_object_offset_t      offset,
706 	boolean_t               can_bypass);
707 
708 __private_extern__ boolean_t    vm_object_copy_quickly(
709 	vm_object_t             object,
710 	vm_object_offset_t      src_offset,
711 	vm_object_size_t        size,
712 	boolean_t               *_src_needs_copy,
713 	boolean_t               *_dst_needs_copy);
714 
715 __private_extern__ kern_return_t        vm_object_copy_strategically(
716 	vm_object_t             src_object,
717 	vm_object_offset_t      src_offset,
718 	vm_object_size_t        size,
719 	vm_object_t             *dst_object,
720 	vm_object_offset_t      *dst_offset,
721 	boolean_t               *dst_needs_copy);
722 
723 __private_extern__ kern_return_t        vm_object_copy_slowly(
724 	vm_object_t             src_object,
725 	vm_object_offset_t      src_offset,
726 	vm_object_size_t        size,
727 	boolean_t               interruptible,
728 	vm_object_t             *_result_object);
729 
730 __private_extern__ vm_object_t  vm_object_copy_delayed(
731 	vm_object_t             src_object,
732 	vm_object_offset_t      src_offset,
733 	vm_object_size_t        size,
734 	boolean_t               src_object_shared);
735 
736 
737 
738 __private_extern__ kern_return_t        vm_object_destroy(
739 	vm_object_t     object,
740 	kern_return_t   reason);
741 
742 __private_extern__ void         vm_object_pager_create(
743 	vm_object_t     object);
744 
745 __private_extern__ void         vm_object_compressor_pager_create(
746 	vm_object_t     object);
747 
748 __private_extern__ void         vm_object_page_map(
749 	vm_object_t     object,
750 	vm_object_offset_t      offset,
751 	vm_object_size_t        size,
752 	vm_object_offset_t      (*map_fn)
753 	(void *, vm_object_offset_t),
754 	void            *map_fn_data);
755 
756 __private_extern__ kern_return_t vm_object_upl_request(
757 	vm_object_t             object,
758 	vm_object_offset_t      offset,
759 	upl_size_t              size,
760 	upl_t                   *upl,
761 	upl_page_info_t         *page_info,
762 	unsigned int            *count,
763 	upl_control_flags_t     flags,
764 	vm_tag_t            tag);
765 
766 __private_extern__ kern_return_t vm_object_transpose(
767 	vm_object_t             object1,
768 	vm_object_t             object2,
769 	vm_object_size_t        transpose_size);
770 
771 __private_extern__ boolean_t vm_object_sync(
772 	vm_object_t             object,
773 	vm_object_offset_t      offset,
774 	vm_object_size_t        size,
775 	boolean_t               should_flush,
776 	boolean_t               should_return,
777 	boolean_t               should_iosync);
778 
779 __private_extern__ kern_return_t vm_object_update(
780 	vm_object_t             object,
781 	vm_object_offset_t      offset,
782 	vm_object_size_t        size,
783 	vm_object_offset_t      *error_offset,
784 	int                     *io_errno,
785 	memory_object_return_t  should_return,
786 	int                     flags,
787 	vm_prot_t               prot);
788 
789 __private_extern__ kern_return_t vm_object_lock_request(
790 	vm_object_t             object,
791 	vm_object_offset_t      offset,
792 	vm_object_size_t        size,
793 	memory_object_return_t  should_return,
794 	int                     flags,
795 	vm_prot_t               prot);
796 
797 
798 
799 __private_extern__ vm_object_t  vm_object_memory_object_associate(
800 	memory_object_t         pager,
801 	vm_object_t             object,
802 	vm_object_size_t        size,
803 	boolean_t               check_named);
804 
805 
806 __private_extern__ void vm_object_cluster_size(
807 	vm_object_t             object,
808 	vm_object_offset_t      *start,
809 	vm_size_t               *length,
810 	vm_object_fault_info_t  fault_info,
811 	uint32_t                *io_streaming);
812 
813 __private_extern__ kern_return_t vm_object_populate_with_private(
814 	vm_object_t             object,
815 	vm_object_offset_t      offset,
816 	ppnum_t                 phys_page,
817 	vm_size_t               size);
818 
819 __private_extern__ void vm_object_change_wimg_mode(
820 	vm_object_t             object,
821 	unsigned int            wimg_mode);
822 
823 extern kern_return_t adjust_vm_object_cache(
824 	vm_size_t oval,
825 	vm_size_t nval);
826 
827 extern kern_return_t vm_object_page_op(
828 	vm_object_t             object,
829 	vm_object_offset_t      offset,
830 	int                     ops,
831 	ppnum_t                 *phys_entry,
832 	int                     *flags);
833 
834 extern kern_return_t vm_object_range_op(
835 	vm_object_t             object,
836 	vm_object_offset_t      offset_beg,
837 	vm_object_offset_t      offset_end,
838 	int                     ops,
839 	uint32_t                *range);
840 
841 
842 __private_extern__ void         vm_object_reap_pages(
843 	vm_object_t object,
844 	int     reap_type);
845 #define REAP_REAP       0
846 #define REAP_TERMINATE  1
847 #define REAP_PURGEABLE  2
848 #define REAP_DATA_FLUSH 3
849 
850 #if CONFIG_FREEZE
851 
852 __private_extern__ uint32_t
853 vm_object_compressed_freezer_pageout(
854 	vm_object_t     object, uint32_t dirty_budget);
855 
856 __private_extern__ void
857 vm_object_compressed_freezer_done(
858 	void);
859 
860 #endif /* CONFIG_FREEZE */
861 
862 __private_extern__ void
863 vm_object_pageout(
864 	vm_object_t     object);
865 
866 #if CONFIG_IOSCHED
867 struct io_reprioritize_req {
868 	uint64_t        blkno;
869 	uint32_t        len;
870 	int             priority;
871 	struct vnode    *devvp;
872 	queue_chain_t   io_reprioritize_list;
873 };
874 typedef struct io_reprioritize_req *io_reprioritize_req_t;
875 
876 extern void vm_io_reprioritize_init(void);
877 #endif
878 
879 /*
880  *	Event waiting handling
881  */
882 
883 #define VM_OBJECT_EVENT_INITIALIZED             0
884 #define VM_OBJECT_EVENT_PAGER_READY             1
885 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS      2
886 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS     3
887 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS        4
888 #define VM_OBJECT_EVENT_UNCACHING               5
889 #define VM_OBJECT_EVENT_COPY_CALL               6
890 #define VM_OBJECT_EVENT_CACHING                 7
891 #define VM_OBJECT_EVENT_UNBLOCKED               8
892 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
893 
894 #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
895 
896 static __inline__ wait_result_t
vm_object_assert_wait(vm_object_t object,int event,wait_interrupt_t interruptible)897 vm_object_assert_wait(
898 	vm_object_t             object,
899 	int                     event,
900 	wait_interrupt_t        interruptible)
901 {
902 	wait_result_t wr;
903 
904 	vm_object_lock_assert_exclusive(object);
905 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
906 
907 	object->all_wanted |= 1 << event;
908 	wr = assert_wait((event_t)((vm_offset_t)object + (vm_offset_t)event),
909 	    interruptible);
910 	return wr;
911 }
912 
913 static __inline__ wait_result_t
vm_object_wait(vm_object_t object,int event,wait_interrupt_t interruptible)914 vm_object_wait(
915 	vm_object_t             object,
916 	int                     event,
917 	wait_interrupt_t        interruptible)
918 {
919 	wait_result_t wr;
920 
921 	vm_object_assert_wait(object, event, interruptible);
922 	vm_object_unlock(object);
923 	wr = thread_block(THREAD_CONTINUE_NULL);
924 	return wr;
925 }
926 
927 static __inline__ wait_result_t
thread_sleep_vm_object(vm_object_t object,event_t event,wait_interrupt_t interruptible)928 thread_sleep_vm_object(
929 	vm_object_t             object,
930 	event_t                 event,
931 	wait_interrupt_t        interruptible)
932 {
933 	wait_result_t wr;
934 
935 	wr = lck_rw_sleep(&object->Lock,
936 	    LCK_SLEEP_PROMOTED_PRI,
937 	    event,
938 	    interruptible);
939 	return wr;
940 }
941 
942 static __inline__ wait_result_t
vm_object_sleep(vm_object_t object,int event,wait_interrupt_t interruptible)943 vm_object_sleep(
944 	vm_object_t             object,
945 	int                     event,
946 	wait_interrupt_t        interruptible)
947 {
948 	wait_result_t wr;
949 
950 	vm_object_lock_assert_exclusive(object);
951 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
952 
953 	object->all_wanted |= 1 << event;
954 	wr = thread_sleep_vm_object(object,
955 	    (event_t)((vm_offset_t)object + (vm_offset_t)event),
956 	    interruptible);
957 	return wr;
958 }
959 
960 static __inline__ void
vm_object_wakeup(vm_object_t object,int event)961 vm_object_wakeup(
962 	vm_object_t             object,
963 	int                     event)
964 {
965 	vm_object_lock_assert_exclusive(object);
966 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
967 
968 	if (object->all_wanted & (1 << event)) {
969 		thread_wakeup((event_t)((vm_offset_t)object + (vm_offset_t)event));
970 	}
971 	object->all_wanted &= ~(1 << event);
972 }
973 
974 static __inline__ void
vm_object_set_wanted(vm_object_t object,int event)975 vm_object_set_wanted(
976 	vm_object_t             object,
977 	int                     event)
978 {
979 	vm_object_lock_assert_exclusive(object);
980 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
981 
982 	object->all_wanted |= (1 << event);
983 }
984 
985 static __inline__ int
vm_object_wanted(vm_object_t object,int event)986 vm_object_wanted(
987 	vm_object_t             object,
988 	int                     event)
989 {
990 	vm_object_lock_assert_held(object);
991 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
992 
993 	return object->all_wanted & (1 << event);
994 }
995 
996 /*
997  *	Routines implemented as macros
998  */
999 #ifdef VM_PIP_DEBUG
1000 #include <libkern/OSDebug.h>
1001 #define VM_PIP_DEBUG_BEGIN(object)                                      \
1002 	MACRO_BEGIN                                                     \
1003 	int pip = ((object)->paging_in_progress +                       \
1004 	           (object)->activity_in_progress);                     \
1005 	if (pip < VM_PIP_DEBUG_MAX_REFS) {                              \
1006 	        (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1007 	                           VM_PIP_DEBUG_STACK_FRAMES);          \
1008 	}                                                               \
1009 	MACRO_END
1010 #else   /* VM_PIP_DEBUG */
1011 #define VM_PIP_DEBUG_BEGIN(object)
1012 #endif  /* VM_PIP_DEBUG */
1013 
1014 #define         vm_object_activity_begin(object)                        \
1015 	MACRO_BEGIN                                                     \
1016 	vm_object_lock_assert_exclusive((object));                      \
1017 	VM_PIP_DEBUG_BEGIN((object));                                   \
1018 	(object)->activity_in_progress++;                               \
1019 	if ((object)->activity_in_progress == 0) {                      \
1020 	        panic("vm_object_activity_begin(%p): overflow\n", (object));\
1021 	}                                                               \
1022 	MACRO_END
1023 
1024 #define         vm_object_activity_end(object)                          \
1025 	MACRO_BEGIN                                                     \
1026 	vm_object_lock_assert_exclusive((object));                      \
1027 	if ((object)->activity_in_progress == 0) {                      \
1028 	        panic("vm_object_activity_end(%p): underflow\n", (object));\
1029 	}                                                               \
1030 	(object)->activity_in_progress--;                               \
1031 	if ((object)->paging_in_progress == 0 &&                        \
1032 	    (object)->activity_in_progress == 0)                        \
1033 	        vm_object_wakeup((object),                              \
1034 	                         VM_OBJECT_EVENT_PAGING_IN_PROGRESS);   \
1035 	MACRO_END
1036 
1037 #define         vm_object_paging_begin(object)                          \
1038 	MACRO_BEGIN                                                     \
1039 	vm_object_lock_assert_exclusive((object));                      \
1040 	VM_PIP_DEBUG_BEGIN((object));                                   \
1041 	(object)->paging_in_progress++;                                 \
1042 	if ((object)->paging_in_progress == 0) {                        \
1043 	        panic("vm_object_paging_begin(%p): overflow\n", (object));\
1044 	}                                                               \
1045 	MACRO_END
1046 
1047 #define         vm_object_paging_end(object)                            \
1048 	MACRO_BEGIN                                                     \
1049 	vm_object_lock_assert_exclusive((object));                      \
1050 	if ((object)->paging_in_progress == 0) {                        \
1051 	        panic("vm_object_paging_end(%p): underflow\n", (object));\
1052 	}                                                               \
1053 	(object)->paging_in_progress--;                                 \
1054 	if ((object)->paging_in_progress == 0) {                        \
1055 	        vm_object_wakeup((object),                              \
1056 	                         VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1057 	        if ((object)->activity_in_progress == 0)                \
1058 	                vm_object_wakeup((object),                      \
1059 	                                 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1060 	}                                                               \
1061 	MACRO_END
1062 
1063 #define         vm_object_paging_wait(object, interruptible)            \
1064 	MACRO_BEGIN                                                     \
1065 	vm_object_lock_assert_exclusive((object));                      \
1066 	while ((object)->paging_in_progress != 0 ||                     \
1067 	       (object)->activity_in_progress != 0) {                   \
1068 	        wait_result_t  _wr;                                     \
1069                                                                         \
1070 	        _wr = vm_object_sleep((object),                         \
1071 	                        VM_OBJECT_EVENT_PAGING_IN_PROGRESS,     \
1072 	                        (interruptible));                       \
1073                                                                         \
1074 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1075 	/*XXX break; */                                 \
1076 	}                                                               \
1077 	MACRO_END
1078 
1079 #define vm_object_paging_only_wait(object, interruptible)               \
1080 	MACRO_BEGIN                                                     \
1081 	vm_object_lock_assert_exclusive((object));                      \
1082 	while ((object)->paging_in_progress != 0) {                     \
1083 	        wait_result_t  _wr;                                     \
1084                                                                         \
1085 	        _wr = vm_object_sleep((object),                         \
1086 	                        VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1087 	                        (interruptible));                       \
1088                                                                         \
1089 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1090 	/*XXX break; */                                 \
1091 	}                                                               \
1092 	MACRO_END
1093 
1094 
1095 #define vm_object_mapping_begin(object)                                 \
1096 	MACRO_BEGIN                                                     \
1097 	vm_object_lock_assert_exclusive((object));                      \
1098 	assert(! (object)->mapping_in_progress);                        \
1099 	(object)->mapping_in_progress = TRUE;                           \
1100 	MACRO_END
1101 
1102 #define vm_object_mapping_end(object)                                   \
1103 	MACRO_BEGIN                                                     \
1104 	vm_object_lock_assert_exclusive((object));                      \
1105 	assert((object)->mapping_in_progress);                          \
1106 	(object)->mapping_in_progress = FALSE;                          \
1107 	vm_object_wakeup((object),                                      \
1108 	                 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS);          \
1109 	MACRO_END
1110 
1111 #define vm_object_mapping_wait(object, interruptible)                   \
1112 	MACRO_BEGIN                                                     \
1113 	vm_object_lock_assert_exclusive((object));                      \
1114 	while ((object)->mapping_in_progress) {                         \
1115 	        wait_result_t	_wr;                                    \
1116                                                                         \
1117 	        _wr = vm_object_sleep((object),                         \
1118 	                              VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1119 	                              (interruptible));                 \
1120 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1121 	/*XXX break; */                                 \
1122 	}                                                               \
1123 	assert(!(object)->mapping_in_progress);                         \
1124 	MACRO_END
1125 
1126 
1127 
1128 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1129 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1130 
1131 extern void     vm_object_cache_add(vm_object_t);
1132 extern void     vm_object_cache_remove(vm_object_t);
1133 extern int      vm_object_cache_evict(int, int);
1134 
1135 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
1136 #define VM_OBJECT_OWNER_UNCHANGED ((task_t) -2)
1137 #define VM_OBJECT_OWNER(object)                                         \
1138 	((object == VM_OBJECT_NULL ||                                   \
1139 	  ((object)->purgable == VM_PURGABLE_DENY &&                    \
1140 	   (object)->vo_ledger_tag == 0) ||                             \
1141 	  (object)->vo_owner == TASK_NULL)                              \
1142 	 ? TASK_NULL    /* not owned */                                 \
1143 	 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED)            \
1144 	    ? kernel_task /* disowned -> kernel */                      \
1145 	    : (object)->vo_owner)) /* explicit owner */                 \
1146 
1147 extern void     vm_object_ledger_tag_ledgers(
1148 	vm_object_t object,
1149 	int *ledger_idx_volatile,
1150 	int *ledger_idx_nonvolatile,
1151 	int *ledger_idx_volatile_compressed,
1152 	int *ledger_idx_nonvolatile_compressed,
1153 	boolean_t *do_footprint);
1154 extern kern_return_t vm_object_ownership_change(
1155 	vm_object_t object,
1156 	int new_ledger_tag,
1157 	task_t new_owner,
1158 	int new_ledger_flags,
1159 	boolean_t task_objq_locked);
1160 
1161 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
1162 // so probably should be a real 32b ID vs. ptr.
1163 // Current users just check for equality
1164 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((o)))
1165 
1166 #endif  /* _VM_VM_OBJECT_H_ */
1167