xref: /xnu-8019.80.24/osfmk/vm/vm_object.h (revision a325d9c4a84054e40bbe985afedcb50ab80993ea)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm_object.h
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *	Date:	1985
62  *
63  *	Virtual memory object module definitions.
64  */
65 
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
68 
69 #include <debug.h>
70 #include <mach_assert.h>
71 #include <mach_pagemap.h>
72 
73 #include <mach/kern_return.h>
74 #include <mach/boolean.h>
75 #include <mach/memory_object_types.h>
76 #include <mach/port.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_param.h>
79 #include <mach/machine/vm_types.h>
80 #include <kern/queue.h>
81 #include <kern/locks.h>
82 #include <kern/assert.h>
83 #include <kern/misc_protos.h>
84 #include <kern/macro_help.h>
85 #include <ipc/ipc_types.h>
86 #include <vm/pmap.h>
87 
88 #include <vm/vm_external.h>
89 
90 #include <vm/vm_options.h>
91 #include <vm/vm_page.h>
92 
93 #if VM_OBJECT_TRACKING
94 #include <libkern/OSDebug.h>
95 #include <kern/btlog.h>
96 extern void vm_object_tracking_init(void);
97 extern boolean_t vm_object_tracking_inited;
98 extern btlog_t *vm_object_tracking_btlog;
99 #define VM_OBJECT_TRACKING_NUM_RECORDS  50000
100 #define VM_OBJECT_TRACKING_BTDEPTH 7
101 #define VM_OBJECT_TRACKING_OP_CREATED   1
102 #define VM_OBJECT_TRACKING_OP_MODIFIED  2
103 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
104 #endif /* VM_OBJECT_TRACKING */
105 
106 struct vm_page;
107 
108 /*
109  *	Types defined:
110  *
111  *	vm_object_t		Virtual memory object.
112  *	vm_object_fault_info_t	Used to determine cluster size.
113  */
114 
115 struct vm_object_fault_info {
116 	int             interruptible;
117 	uint32_t        user_tag;
118 	vm_size_t       cluster_size;
119 	vm_behavior_t   behavior;
120 	vm_object_offset_t lo_offset;
121 	vm_object_offset_t hi_offset;
122 	unsigned int
123 	/* boolean_t */ no_cache:1,
124 	/* boolean_t */ stealth:1,
125 	/* boolean_t */ io_sync:1,
126 	/* boolean_t */ cs_bypass:1,
127 	/* boolean_t */ pmap_cs_associated:1,
128 	/* boolean_t */ mark_zf_absent:1,
129 	/* boolean_t */ batch_pmap_op:1,
130 	/* boolean_t */ resilient_media:1,
131 	/* boolean_t */ no_copy_on_read:1,
132 	    __vm_object_fault_info_unused_bits:23;
133 	int             pmap_options;
134 };
135 
136 
137 #define vo_size                         vo_un1.vou_size
138 #define vo_cache_pages_to_scan          vo_un1.vou_cache_pages_to_scan
139 #define vo_shadow_offset                vo_un2.vou_shadow_offset
140 #define vo_cache_ts                     vo_un2.vou_cache_ts
141 #define vo_owner                        vo_un2.vou_owner
142 
143 struct vm_object {
144 	/*
145 	 * on 64 bit systems we pack the pointers hung off the memq.
146 	 * those pointers have to be able to point back to the memq.
147 	 * the packed pointers are required to be on a 64 byte boundary
148 	 * which means 2 things for the vm_object...  (1) the memq
149 	 * struct has to be the first element of the structure so that
150 	 * we can control it's alignment... (2) the vm_object must be
151 	 * aligned on a 64 byte boundary... for static vm_object's
152 	 * this is accomplished via the 'aligned' attribute... for
153 	 * vm_object's in the zone pool, this is accomplished by
154 	 * rounding the size of the vm_object element to the nearest
155 	 * 64 byte size before creating the zone.
156 	 */
157 	vm_page_queue_head_t    memq;           /* Resident memory - must be first */
158 	lck_rw_t                Lock;           /* Synchronization */
159 
160 #if DEVELOPMENT || DEBUG
161 	thread_t                Lock_owner;
162 #endif
163 	union {
164 		vm_object_size_t  vou_size;     /* Object size (only valid if internal) */
165 		int               vou_cache_pages_to_scan;      /* pages yet to be visited in an
166 		                                                 * external object in cache
167 		                                                 */
168 	} vo_un1;
169 
170 	struct vm_page          *memq_hint;
171 	int                     ref_count;      /* Number of references */
172 	unsigned int            resident_page_count;
173 	/* number of resident pages */
174 	unsigned int            wired_page_count; /* number of wired pages
175 	                                           *  use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
176 	unsigned int            reusable_page_count;
177 
178 	struct vm_object        *copy;          /* Object that should receive
179 	                                         * a copy of my changed pages,
180 	                                         * for copy_delay, or just the
181 	                                         * temporary object that
182 	                                         * shadows this object, for
183 	                                         * copy_call.
184 	                                         */
185 	struct vm_object        *shadow;        /* My shadow */
186 	memory_object_t         pager;          /* Where to get data */
187 
188 	union {
189 		vm_object_offset_t vou_shadow_offset;   /* Offset into shadow */
190 		clock_sec_t     vou_cache_ts;   /* age of an external object
191 		                                 * present in cache
192 		                                 */
193 		task_t          vou_owner;      /* If the object is purgeable
194 		                                 * or has a "ledger_tag", this
195 		                                 * is the task that owns it.
196 		                                 */
197 	} vo_un2;
198 
199 	vm_object_offset_t      paging_offset;  /* Offset into memory object */
200 	memory_object_control_t pager_control;  /* Where data comes back */
201 
202 	memory_object_copy_strategy_t
203 	    copy_strategy;                      /* How to handle data copy */
204 
205 #if __LP64__
206 	/*
207 	 * Some user processes (mostly VirtualMachine software) take a large
208 	 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
209 	 * VM objects and overflow the 16-bit "activity_in_progress" counter.
210 	 * Since we never enforced any limit there, let's give them 32 bits
211 	 * for backwards compatibility's sake.
212 	 */
213 	unsigned int            paging_in_progress:16,
214 	    __object1_unused_bits:16;
215 	unsigned int            activity_in_progress;
216 #else /* __LP64__ */
217 	/*
218 	 * On 32-bit platforms, enlarging "activity_in_progress" would increase
219 	 * the size of "struct vm_object".  Since we don't know of any actual
220 	 * overflow of these counters on these platforms, let's keep the
221 	 * counters as 16-bit integers.
222 	 */
223 	unsigned short          paging_in_progress;
224 	unsigned short          activity_in_progress;
225 #endif /* __LP64__ */
226 	/* The memory object ports are
227 	 * being used (e.g., for pagein
228 	 * or pageout) -- don't change
229 	 * any of these fields (i.e.,
230 	 * don't collapse, destroy or
231 	 * terminate)
232 	 */
233 
234 	unsigned int
235 	/* boolean_t array */ all_wanted:11,    /* Bit array of "want to be
236 	                                         * awakened" notations.  See
237 	                                         * VM_OBJECT_EVENT_* items
238 	                                         * below */
239 	/* boolean_t */ pager_created:1,        /* Has pager been created? */
240 	/* boolean_t */ pager_initialized:1,    /* Are fields ready to use? */
241 	/* boolean_t */ pager_ready:1,          /* Will pager take requests? */
242 
243 	/* boolean_t */ pager_trusted:1,        /* The pager for this object
244 	                                         * is trusted. This is true for
245 	                                         * all internal objects (backed
246 	                                         * by the default pager)
247 	                                         */
248 	/* boolean_t */ can_persist:1,          /* The kernel may keep the data
249 	                                         * for this object (and rights
250 	                                         * to the memory object) after
251 	                                         * all address map references
252 	                                         * are deallocated?
253 	                                         */
254 	/* boolean_t */ internal:1,             /* Created by the kernel (and
255 	                                         * therefore, managed by the
256 	                                         * default memory manger)
257 	                                         */
258 	/* boolean_t */ private:1,              /* magic device_pager object,
259 	                                        * holds private pages only */
260 	/* boolean_t */ pageout:1,              /* pageout object. contains
261 	                                         * private pages that refer to
262 	                                         * a real memory object. */
263 	/* boolean_t */ alive:1,                /* Not yet terminated */
264 
265 	/* boolean_t */ purgable:2,             /* Purgable state.  See
266 	                                         * VM_PURGABLE_*
267 	                                         */
268 	/* boolean_t */ purgeable_only_by_kernel:1,
269 	/* boolean_t */ purgeable_when_ripe:1,         /* Purgeable when a token
270 	                                                * becomes ripe.
271 	                                                */
272 	/* boolean_t */ shadowed:1,             /* Shadow may exist */
273 	/* boolean_t */ true_share:1,
274 	/* This object is mapped
275 	 * in more than one place
276 	 * and hence cannot be
277 	 * coalesced */
278 	/* boolean_t */ terminating:1,
279 	/* Allows vm_object_lookup
280 	 * and vm_object_deallocate
281 	 * to special case their
282 	 * behavior when they are
283 	 * called as a result of
284 	 * page cleaning during
285 	 * object termination
286 	 */
287 	/* boolean_t */ named:1,                /* An enforces an internal
288 	                                         * naming convention, by
289 	                                         * calling the right routines
290 	                                         * for allocation and
291 	                                         * destruction, UBC references
292 	                                         * against the vm_object are
293 	                                         * checked.
294 	                                         */
295 	/* boolean_t */ shadow_severed:1,
296 	/* When a permanent object
297 	 * backing a COW goes away
298 	 * unexpectedly.  This bit
299 	 * allows vm_fault to return
300 	 * an error rather than a
301 	 * zero filled page.
302 	 */
303 	/* boolean_t */ phys_contiguous:1,
304 	/* Memory is wired and
305 	 * guaranteed physically
306 	 * contiguous.  However
307 	 * it is not device memory
308 	 * and obeys normal virtual
309 	 * memory rules w.r.t pmap
310 	 * access bits.
311 	 */
312 	/* boolean_t */ nophyscache:1,
313 	/* When mapped at the
314 	 * pmap level, don't allow
315 	 * primary caching. (for
316 	 * I/O)
317 	 */
318 	/* boolean_t */ _object5_unused_bits:1;
319 
320 	queue_chain_t           cached_list;    /* Attachment point for the
321 	                                         * list of objects cached as a
322 	                                         * result of their can_persist
323 	                                         * value
324 	                                         */
325 	/*
326 	 * the following fields are not protected by any locks
327 	 * they are updated via atomic compare and swap
328 	 */
329 	vm_object_offset_t      last_alloc;     /* last allocation offset */
330 	vm_offset_t             cow_hint;       /* last page present in     */
331 	                                        /* shadow but not in object */
332 	int                     sequential;     /* sequential access size */
333 
334 	uint32_t                pages_created;
335 	uint32_t                pages_used;
336 	/* hold object lock when altering */
337 	unsigned        int
338 	    wimg_bits:8,                /* cache WIMG bits         */
339 	    code_signed:1,              /* pages are signed and should be
340 	                                 *  validated; the signatures are stored
341 	                                 *  with the pager */
342 	    transposed:1,               /* object was transposed with another */
343 	    mapping_in_progress:1,      /* pager being mapped/unmapped */
344 	    phantom_isssd:1,
345 	    volatile_empty:1,
346 	    volatile_fault:1,
347 	    all_reusable:1,
348 	    blocked_access:1,
349 	    set_cache_attr:1,
350 	    object_is_shared_cache:1,
351 	    purgeable_queue_type:2,
352 	    purgeable_queue_group:3,
353 	    io_tracking:1,
354 	    no_tag_update:1,            /*  */
355 #if CONFIG_SECLUDED_MEMORY
356 	    eligible_for_secluded:1,
357 	    can_grab_secluded:1,
358 #else /* CONFIG_SECLUDED_MEMORY */
359 	__object3_unused_bits:2,
360 #endif /* CONFIG_SECLUDED_MEMORY */
361 #if VM_OBJECT_ACCESS_TRACKING
362 	    access_tracking:1,
363 #else /* VM_OBJECT_ACCESS_TRACKING */
364 	__unused_access_tracking:1,
365 #endif /* VM_OBJECT_ACCESS_TRACKING */
366 	vo_ledger_tag:3,
367 	    vo_no_footprint:1;
368 
369 #if VM_OBJECT_ACCESS_TRACKING
370 	uint32_t        access_tracking_reads;
371 	uint32_t        access_tracking_writes;
372 #endif /* VM_OBJECT_ACCESS_TRACKING */
373 
374 	uint8_t                 scan_collisions;
375 	uint8_t                 __object4_unused_bits[1];
376 	vm_tag_t                wire_tag;
377 
378 #if CONFIG_PHANTOM_CACHE
379 	uint32_t                phantom_object_id;
380 #endif
381 #if CONFIG_IOSCHED || UPL_DEBUG
382 	queue_head_t            uplq;           /* List of outstanding upls */
383 #endif
384 
385 #ifdef  VM_PIP_DEBUG
386 /*
387  * Keep track of the stack traces for the first holders
388  * of a "paging_in_progress" reference for this VM object.
389  */
390 #define VM_PIP_DEBUG_STACK_FRAMES       25      /* depth of each stack trace */
391 #define VM_PIP_DEBUG_MAX_REFS           10      /* track that many references */
392 	struct __pip_backtrace {
393 		void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
394 	} pip_holders[VM_PIP_DEBUG_MAX_REFS];
395 #endif  /* VM_PIP_DEBUG  */
396 
397 	queue_chain_t           objq;      /* object queue - currently used for purgable queues */
398 	queue_chain_t           task_objq; /* objects owned by task - protected by task lock */
399 
400 #if !VM_TAG_ACTIVE_UPDATE
401 	queue_chain_t           wired_objq;
402 #endif /* !VM_TAG_ACTIVE_UPDATE */
403 
404 #if DEBUG
405 	void *purgeable_owner_bt[16];
406 	task_t vo_purgeable_volatilizer; /* who made it volatile? */
407 	void *purgeable_volatilizer_bt[16];
408 #endif /* DEBUG */
409 };
410 
411 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object)                         \
412 	((object)->volatile_fault &&                                    \
413 	 ((object)->purgable == VM_PURGABLE_VOLATILE ||                 \
414 	  (object)->purgable == VM_PURGABLE_EMPTY))
415 
416 #if VM_OBJECT_ACCESS_TRACKING
417 extern uint64_t vm_object_access_tracking_reads;
418 extern uint64_t vm_object_access_tracking_writes;
419 extern void vm_object_access_tracking(vm_object_t object,
420     int *access_tracking,
421     uint32_t *access_tracking_reads,
422     uint32_t *acess_tracking_writes);
423 #endif /* VM_OBJECT_ACCESS_TRACKING */
424 
425 extern
426 vm_object_t     kernel_object;          /* the single kernel object */
427 
428 extern
429 vm_object_t     compressor_object;      /* the single compressor object */
430 
431 extern
432 vm_object_t     retired_pages_object;   /* holds VM pages which should never be used */
433 
434 extern
435 unsigned int    vm_object_absent_max;   /* maximum number of absent pages
436                                          *  at a time for each object */
437 
438 # define        VM_MSYNC_INITIALIZED                    0
439 # define        VM_MSYNC_SYNCHRONIZING                  1
440 # define        VM_MSYNC_DONE                           2
441 
442 
443 extern lck_grp_t                vm_map_lck_grp;
444 extern lck_attr_t               vm_map_lck_attr;
445 
446 #ifndef VM_TAG_ACTIVE_UPDATE
447 #error VM_TAG_ACTIVE_UPDATE
448 #endif
449 
450 #if VM_TAG_ACTIVE_UPDATE
451 #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE")
452 #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE")
453 #else /* VM_TAG_ACTIVE_UPDATE */
454 #define VM_OBJECT_WIRED_ENQUEUE(object)                                 \
455 	MACRO_BEGIN                                                     \
456 	lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);                          \
457 	assert(!(object)->wired_objq.next);                             \
458 	assert(!(object)->wired_objq.prev);                             \
459 	queue_enter(&vm_objects_wired, (object),                        \
460 	            vm_object_t, wired_objq);                           \
461 	lck_spin_unlock(&vm_objects_wired_lock);                        \
462 	MACRO_END
463 #define VM_OBJECT_WIRED_DEQUEUE(object)                                 \
464 	MACRO_BEGIN                                                     \
465 	if ((object)->wired_objq.next) {                                \
466 	        lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);                  \
467 	        queue_remove(&vm_objects_wired, (object),               \
468 	                     vm_object_t, wired_objq);                  \
469 	        lck_spin_unlock(&vm_objects_wired_lock);                \
470 	}                                                               \
471 	MACRO_END
472 #endif /* VM_TAG_ACTIVE_UPDATE */
473 
474 #define VM_OBJECT_WIRED(object, tag)                                    \
475     MACRO_BEGIN                                                         \
476     assert(VM_KERN_MEMORY_NONE != (tag));                               \
477     assert(VM_KERN_MEMORY_NONE == (object)->wire_tag);                  \
478     (object)->wire_tag = (tag);                                         \
479     if (!VM_TAG_ACTIVE_UPDATE) {                                        \
480 	VM_OBJECT_WIRED_ENQUEUE((object));                              \
481     }                                                                   \
482     MACRO_END
483 
484 #define VM_OBJECT_UNWIRED(object)                                                       \
485     MACRO_BEGIN                                                                         \
486     if (!VM_TAG_ACTIVE_UPDATE) {                                                        \
487 	    VM_OBJECT_WIRED_DEQUEUE((object));                                          \
488     }                                                                                   \
489     if (VM_KERN_MEMORY_NONE != (object)->wire_tag) {                                    \
490 	vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count));   \
491 	(object)->wire_tag = VM_KERN_MEMORY_NONE;                                       \
492     }                                                                                   \
493     MACRO_END
494 
495 // These two macros start & end a C block
496 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object)                                       \
497     MACRO_BEGIN                                                                         \
498     {                                                                                   \
499 	int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
500 
501 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag)                                    \
502 	if (__wireddelta) {                                                             \
503 	    boolean_t __overflow __assert_only =                                        \
504 	    os_add_overflow((object)->wired_page_count, __wireddelta,                   \
505 	                    &(object)->wired_page_count);                               \
506 	    assert(!__overflow);                                                        \
507 	    if (!(object)->pageout && !(object)->no_tag_update) {                       \
508 	        if (__wireddelta > 0) {                                                 \
509 	            assert (VM_KERN_MEMORY_NONE != (tag));                              \
510 	            if (VM_KERN_MEMORY_NONE == __waswired) {                            \
511 	                VM_OBJECT_WIRED((object), (tag));                               \
512 	            }                                                                   \
513 	            vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta));      \
514 	        } else if (VM_KERN_MEMORY_NONE != __waswired) {                         \
515 	            assert (VM_KERN_MEMORY_NONE != (object)->wire_tag);                 \
516 	            vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta));      \
517 	            if (!(object)->wired_page_count) {                                  \
518 	                VM_OBJECT_UNWIRED((object));                                    \
519 	            }                                                                   \
520 	        }                                                                       \
521 	    }                                                                           \
522 	}                                                                               \
523     }                                                                                   \
524     MACRO_END
525 
526 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta)               \
527     __wireddelta += delta; \
528 
529 #define VM_OBJECT_WIRED_PAGE_ADD(object, m)                     \
530     if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++;
531 
532 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m)                  \
533     if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--;
534 
535 
536 
537 #define OBJECT_LOCK_SHARED      0
538 #define OBJECT_LOCK_EXCLUSIVE   1
539 
540 extern lck_grp_t        vm_object_lck_grp;
541 extern lck_attr_t       vm_object_lck_attr;
542 extern lck_attr_t       kernel_object_lck_attr;
543 extern lck_attr_t       compressor_object_lck_attr;
544 
545 extern vm_object_t      vm_pageout_scan_wants_object;
546 
547 extern void             vm_object_lock(vm_object_t);
548 extern bool             vm_object_lock_check_contended(vm_object_t);
549 extern boolean_t        vm_object_lock_try(vm_object_t);
550 extern boolean_t        _vm_object_lock_try(vm_object_t);
551 extern boolean_t        vm_object_lock_avoid(vm_object_t);
552 extern void             vm_object_lock_shared(vm_object_t);
553 extern boolean_t        vm_object_lock_yield_shared(vm_object_t);
554 extern boolean_t        vm_object_lock_try_shared(vm_object_t);
555 extern void             vm_object_unlock(vm_object_t);
556 extern boolean_t        vm_object_lock_upgrade(vm_object_t);
557 
558 /*
559  *	Object locking macros
560  */
561 
562 #define vm_object_lock_init(object)                                     \
563 	lck_rw_init(&(object)->Lock, &vm_object_lck_grp,                \
564 	            (((object) == kernel_object ||                      \
565 	              (object) == vm_submap_object) ?                   \
566 	             &kernel_object_lck_attr :                          \
567 	             (((object) == compressor_object) ?                 \
568 	             &compressor_object_lck_attr :                      \
569 	              &vm_object_lck_attr)))
570 #define vm_object_lock_destroy(object)  lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
571 
572 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
573 
574 /*
575  * CAUTION: the following vm_object_lock_assert_held*() macros merely
576  * check if anyone is holding the lock, but the holder may not necessarily
577  * be the caller...
578  */
579 #if MACH_ASSERT || DEBUG
580 #define vm_object_lock_assert_held(object) \
581 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
582 #define vm_object_lock_assert_shared(object) \
583 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
584 #define vm_object_lock_assert_exclusive(object) \
585 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
586 #define vm_object_lock_assert_notheld(object) \
587 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
588 #else  /* MACH_ASSERT || DEBUG */
589 #define vm_object_lock_assert_held(object)
590 #define vm_object_lock_assert_shared(object)
591 #define vm_object_lock_assert_exclusive(object)
592 #define vm_object_lock_assert_notheld(object)
593 #endif /* MACH_ASSERT || DEBUG */
594 
595 
596 /*
597  *	Declare procedures that operate on VM objects.
598  */
599 
600 __private_extern__ void         vm_object_bootstrap(void);
601 
602 __private_extern__ void         vm_object_reaper_init(void);
603 
604 __private_extern__ vm_object_t  vm_object_allocate(vm_object_size_t size);
605 
606 __private_extern__ void    _vm_object_allocate(vm_object_size_t size,
607     vm_object_t object);
608 
609 #define vm_object_reference_locked(object)              \
610 	MACRO_BEGIN                                     \
611 	vm_object_t RLObject = (object);                \
612 	vm_object_lock_assert_exclusive(object);        \
613 	assert((RLObject)->ref_count > 0);              \
614 	(RLObject)->ref_count++;                        \
615 	assert((RLObject)->ref_count > 1);              \
616 	MACRO_END
617 
618 
619 #define vm_object_reference_shared(object)              \
620 	MACRO_BEGIN                                     \
621 	vm_object_t RLObject = (object);                \
622 	vm_object_lock_assert_shared(object);           \
623 	assert((RLObject)->ref_count > 0);              \
624 	OSAddAtomic(1, &(RLObject)->ref_count);         \
625 	assert((RLObject)->ref_count > 0);              \
626 	MACRO_END
627 
628 
629 __private_extern__ void         vm_object_reference(
630 	vm_object_t     object);
631 
632 #if     !MACH_ASSERT
633 
634 #define vm_object_reference(object)                     \
635 MACRO_BEGIN                                             \
636 	vm_object_t RObject = (object);                 \
637 	if (RObject) {                                  \
638 	        vm_object_lock_shared(RObject);         \
639 	        vm_object_reference_shared(RObject);    \
640 	        vm_object_unlock(RObject);              \
641 	}                                               \
642 MACRO_END
643 
644 #endif  /* MACH_ASSERT */
645 
646 __private_extern__ void         vm_object_deallocate(
647 	vm_object_t     object);
648 
649 __private_extern__ kern_return_t vm_object_release_name(
650 	vm_object_t     object,
651 	int             flags);
652 
653 __private_extern__ void         vm_object_pmap_protect(
654 	vm_object_t             object,
655 	vm_object_offset_t      offset,
656 	vm_object_size_t        size,
657 	pmap_t                  pmap,
658 	vm_map_size_t           pmap_page_size,
659 	vm_map_offset_t         pmap_start,
660 	vm_prot_t               prot);
661 
662 __private_extern__ void         vm_object_pmap_protect_options(
663 	vm_object_t             object,
664 	vm_object_offset_t      offset,
665 	vm_object_size_t        size,
666 	pmap_t                  pmap,
667 	vm_map_size_t           pmap_page_size,
668 	vm_map_offset_t         pmap_start,
669 	vm_prot_t               prot,
670 	int                     options);
671 
672 __private_extern__ void         vm_object_page_remove(
673 	vm_object_t             object,
674 	vm_object_offset_t      start,
675 	vm_object_offset_t      end);
676 
677 __private_extern__ void         vm_object_deactivate_pages(
678 	vm_object_t             object,
679 	vm_object_offset_t      offset,
680 	vm_object_size_t        size,
681 	boolean_t               kill_page,
682 	boolean_t               reusable_page,
683 	struct pmap             *pmap,
684 /* XXX TODO4K: need pmap_page_size here too? */
685 	vm_map_offset_t         pmap_offset);
686 
687 __private_extern__ void vm_object_reuse_pages(
688 	vm_object_t             object,
689 	vm_object_offset_t      start_offset,
690 	vm_object_offset_t      end_offset,
691 	boolean_t               allow_partial_reuse);
692 
693 __private_extern__ uint64_t     vm_object_purge(
694 	vm_object_t              object,
695 	int                      flags);
696 
697 __private_extern__ kern_return_t vm_object_purgable_control(
698 	vm_object_t     object,
699 	vm_purgable_t   control,
700 	int             *state);
701 
702 __private_extern__ kern_return_t vm_object_get_page_counts(
703 	vm_object_t             object,
704 	vm_object_offset_t      offset,
705 	vm_object_size_t        size,
706 	unsigned int            *resident_page_count,
707 	unsigned int            *dirty_page_count);
708 
709 __private_extern__ boolean_t    vm_object_coalesce(
710 	vm_object_t             prev_object,
711 	vm_object_t             next_object,
712 	vm_object_offset_t      prev_offset,
713 	vm_object_offset_t      next_offset,
714 	vm_object_size_t        prev_size,
715 	vm_object_size_t        next_size);
716 
717 __private_extern__ boolean_t    vm_object_shadow(
718 	vm_object_t             *object,
719 	vm_object_offset_t      *offset,
720 	vm_object_size_t        length);
721 
722 __private_extern__ void         vm_object_collapse(
723 	vm_object_t             object,
724 	vm_object_offset_t      offset,
725 	boolean_t               can_bypass);
726 
727 __private_extern__ boolean_t    vm_object_copy_quickly(
728 	vm_object_t             *_object,
729 	vm_object_offset_t      src_offset,
730 	vm_object_size_t        size,
731 	boolean_t               *_src_needs_copy,
732 	boolean_t               *_dst_needs_copy);
733 
734 __private_extern__ kern_return_t        vm_object_copy_strategically(
735 	vm_object_t             src_object,
736 	vm_object_offset_t      src_offset,
737 	vm_object_size_t        size,
738 	vm_object_t             *dst_object,
739 	vm_object_offset_t      *dst_offset,
740 	boolean_t               *dst_needs_copy);
741 
742 __private_extern__ kern_return_t        vm_object_copy_slowly(
743 	vm_object_t             src_object,
744 	vm_object_offset_t      src_offset,
745 	vm_object_size_t        size,
746 	boolean_t               interruptible,
747 	vm_object_t             *_result_object);
748 
749 __private_extern__ vm_object_t  vm_object_copy_delayed(
750 	vm_object_t             src_object,
751 	vm_object_offset_t      src_offset,
752 	vm_object_size_t        size,
753 	boolean_t               src_object_shared);
754 
755 
756 
757 __private_extern__ kern_return_t        vm_object_destroy(
758 	vm_object_t     object,
759 	kern_return_t   reason);
760 
761 __private_extern__ void         vm_object_pager_create(
762 	vm_object_t     object);
763 
764 __private_extern__ void         vm_object_compressor_pager_create(
765 	vm_object_t     object);
766 
767 __private_extern__ void         vm_object_page_map(
768 	vm_object_t     object,
769 	vm_object_offset_t      offset,
770 	vm_object_size_t        size,
771 	vm_object_offset_t      (*map_fn)
772 	(void *, vm_object_offset_t),
773 	void            *map_fn_data);
774 
775 __private_extern__ kern_return_t vm_object_upl_request(
776 	vm_object_t             object,
777 	vm_object_offset_t      offset,
778 	upl_size_t              size,
779 	upl_t                   *upl,
780 	upl_page_info_t         *page_info,
781 	unsigned int            *count,
782 	upl_control_flags_t     flags,
783 	vm_tag_t            tag);
784 
785 __private_extern__ kern_return_t vm_object_transpose(
786 	vm_object_t             object1,
787 	vm_object_t             object2,
788 	vm_object_size_t        transpose_size);
789 
790 __private_extern__ boolean_t vm_object_sync(
791 	vm_object_t             object,
792 	vm_object_offset_t      offset,
793 	vm_object_size_t        size,
794 	boolean_t               should_flush,
795 	boolean_t               should_return,
796 	boolean_t               should_iosync);
797 
798 __private_extern__ kern_return_t vm_object_update(
799 	vm_object_t             object,
800 	vm_object_offset_t      offset,
801 	vm_object_size_t        size,
802 	vm_object_offset_t      *error_offset,
803 	int                     *io_errno,
804 	memory_object_return_t  should_return,
805 	int                     flags,
806 	vm_prot_t               prot);
807 
808 __private_extern__ kern_return_t vm_object_lock_request(
809 	vm_object_t             object,
810 	vm_object_offset_t      offset,
811 	vm_object_size_t        size,
812 	memory_object_return_t  should_return,
813 	int                     flags,
814 	vm_prot_t               prot);
815 
816 
817 
818 __private_extern__ vm_object_t  vm_object_memory_object_associate(
819 	memory_object_t         pager,
820 	vm_object_t             object,
821 	vm_object_size_t        size,
822 	boolean_t               check_named);
823 
824 
825 __private_extern__ void vm_object_cluster_size(
826 	vm_object_t             object,
827 	vm_object_offset_t      *start,
828 	vm_size_t               *length,
829 	vm_object_fault_info_t  fault_info,
830 	uint32_t                *io_streaming);
831 
832 __private_extern__ kern_return_t vm_object_populate_with_private(
833 	vm_object_t             object,
834 	vm_object_offset_t      offset,
835 	ppnum_t                 phys_page,
836 	vm_size_t               size);
837 
838 __private_extern__ void vm_object_change_wimg_mode(
839 	vm_object_t             object,
840 	unsigned int            wimg_mode);
841 
842 extern kern_return_t adjust_vm_object_cache(
843 	vm_size_t oval,
844 	vm_size_t nval);
845 
846 extern kern_return_t vm_object_page_op(
847 	vm_object_t             object,
848 	vm_object_offset_t      offset,
849 	int                     ops,
850 	ppnum_t                 *phys_entry,
851 	int                     *flags);
852 
853 extern kern_return_t vm_object_range_op(
854 	vm_object_t             object,
855 	vm_object_offset_t      offset_beg,
856 	vm_object_offset_t      offset_end,
857 	int                     ops,
858 	uint32_t                *range);
859 
860 
861 __private_extern__ void         vm_object_reap_pages(
862 	vm_object_t object,
863 	int     reap_type);
864 #define REAP_REAP       0
865 #define REAP_TERMINATE  1
866 #define REAP_PURGEABLE  2
867 #define REAP_DATA_FLUSH 3
868 
869 #if CONFIG_FREEZE
870 
871 __private_extern__ uint32_t
872 vm_object_compressed_freezer_pageout(
873 	vm_object_t     object, uint32_t dirty_budget);
874 
875 __private_extern__ void
876 vm_object_compressed_freezer_done(
877 	void);
878 
879 #endif /* CONFIG_FREEZE */
880 
881 __private_extern__ void
882 vm_object_pageout(
883 	vm_object_t     object);
884 
885 #if CONFIG_IOSCHED
886 struct io_reprioritize_req {
887 	uint64_t        blkno;
888 	uint32_t        len;
889 	int             priority;
890 	struct vnode    *devvp;
891 	queue_chain_t   io_reprioritize_list;
892 };
893 typedef struct io_reprioritize_req *io_reprioritize_req_t;
894 
895 extern void vm_io_reprioritize_init(void);
896 #endif
897 
898 /*
899  *	Event waiting handling
900  */
901 
902 #define VM_OBJECT_EVENT_INITIALIZED             0
903 #define VM_OBJECT_EVENT_PAGER_READY             1
904 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS      2
905 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS     3
906 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS        4
907 #define VM_OBJECT_EVENT_UNCACHING               5
908 #define VM_OBJECT_EVENT_COPY_CALL               6
909 #define VM_OBJECT_EVENT_CACHING                 7
910 #define VM_OBJECT_EVENT_UNBLOCKED               8
911 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
912 
913 #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
914 
915 static __inline__ wait_result_t
vm_object_assert_wait(vm_object_t object,int event,wait_interrupt_t interruptible)916 vm_object_assert_wait(
917 	vm_object_t             object,
918 	int                     event,
919 	wait_interrupt_t        interruptible)
920 {
921 	wait_result_t wr;
922 
923 	vm_object_lock_assert_exclusive(object);
924 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
925 
926 	object->all_wanted |= 1 << event;
927 	wr = assert_wait((event_t)((vm_offset_t)object + (vm_offset_t)event),
928 	    interruptible);
929 	return wr;
930 }
931 
932 static __inline__ wait_result_t
vm_object_wait(vm_object_t object,int event,wait_interrupt_t interruptible)933 vm_object_wait(
934 	vm_object_t             object,
935 	int                     event,
936 	wait_interrupt_t        interruptible)
937 {
938 	wait_result_t wr;
939 
940 	vm_object_assert_wait(object, event, interruptible);
941 	vm_object_unlock(object);
942 	wr = thread_block(THREAD_CONTINUE_NULL);
943 	return wr;
944 }
945 
946 static __inline__ wait_result_t
thread_sleep_vm_object(vm_object_t object,event_t event,wait_interrupt_t interruptible)947 thread_sleep_vm_object(
948 	vm_object_t             object,
949 	event_t                 event,
950 	wait_interrupt_t        interruptible)
951 {
952 	wait_result_t wr;
953 
954 #if DEVELOPMENT || DEBUG
955 	if (object->Lock_owner != current_thread()) {
956 		panic("thread_sleep_vm_object: now owner - %p\n", object);
957 	}
958 	object->Lock_owner = 0;
959 #endif
960 	wr = lck_rw_sleep(&object->Lock,
961 	    LCK_SLEEP_PROMOTED_PRI,
962 	    event,
963 	    interruptible);
964 #if DEVELOPMENT || DEBUG
965 	object->Lock_owner = current_thread();
966 #endif
967 	return wr;
968 }
969 
970 static __inline__ wait_result_t
vm_object_sleep(vm_object_t object,int event,wait_interrupt_t interruptible)971 vm_object_sleep(
972 	vm_object_t             object,
973 	int                     event,
974 	wait_interrupt_t        interruptible)
975 {
976 	wait_result_t wr;
977 
978 	vm_object_lock_assert_exclusive(object);
979 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
980 
981 	object->all_wanted |= 1 << event;
982 	wr = thread_sleep_vm_object(object,
983 	    (event_t)((vm_offset_t)object + (vm_offset_t)event),
984 	    interruptible);
985 	return wr;
986 }
987 
988 static __inline__ void
vm_object_wakeup(vm_object_t object,int event)989 vm_object_wakeup(
990 	vm_object_t             object,
991 	int                     event)
992 {
993 	vm_object_lock_assert_exclusive(object);
994 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
995 
996 	if (object->all_wanted & (1 << event)) {
997 		thread_wakeup((event_t)((vm_offset_t)object + (vm_offset_t)event));
998 	}
999 	object->all_wanted &= ~(1 << event);
1000 }
1001 
1002 static __inline__ void
vm_object_set_wanted(vm_object_t object,int event)1003 vm_object_set_wanted(
1004 	vm_object_t             object,
1005 	int                     event)
1006 {
1007 	vm_object_lock_assert_exclusive(object);
1008 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1009 
1010 	object->all_wanted |= (1 << event);
1011 }
1012 
1013 static __inline__ int
vm_object_wanted(vm_object_t object,int event)1014 vm_object_wanted(
1015 	vm_object_t             object,
1016 	int                     event)
1017 {
1018 	vm_object_lock_assert_held(object);
1019 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1020 
1021 	return object->all_wanted & (1 << event);
1022 }
1023 
1024 /*
1025  *	Routines implemented as macros
1026  */
1027 #ifdef VM_PIP_DEBUG
1028 #include <libkern/OSDebug.h>
1029 #define VM_PIP_DEBUG_BEGIN(object)                                      \
1030 	MACRO_BEGIN                                                     \
1031 	int pip = ((object)->paging_in_progress +                       \
1032 	           (object)->activity_in_progress);                     \
1033 	if (pip < VM_PIP_DEBUG_MAX_REFS) {                              \
1034 	        (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1035 	                           VM_PIP_DEBUG_STACK_FRAMES);          \
1036 	}                                                               \
1037 	MACRO_END
1038 #else   /* VM_PIP_DEBUG */
1039 #define VM_PIP_DEBUG_BEGIN(object)
1040 #endif  /* VM_PIP_DEBUG */
1041 
1042 #define         vm_object_activity_begin(object)                        \
1043 	MACRO_BEGIN                                                     \
1044 	vm_object_lock_assert_exclusive((object));                      \
1045 	VM_PIP_DEBUG_BEGIN((object));                                   \
1046 	(object)->activity_in_progress++;                               \
1047 	if ((object)->activity_in_progress == 0) {                      \
1048 	        panic("vm_object_activity_begin(%p): overflow\n", (object));\
1049 	}                                                               \
1050 	MACRO_END
1051 
1052 #define         vm_object_activity_end(object)                          \
1053 	MACRO_BEGIN                                                     \
1054 	vm_object_lock_assert_exclusive((object));                      \
1055 	if ((object)->activity_in_progress == 0) {                      \
1056 	        panic("vm_object_activity_end(%p): underflow\n", (object));\
1057 	}                                                               \
1058 	(object)->activity_in_progress--;                               \
1059 	if ((object)->paging_in_progress == 0 &&                        \
1060 	    (object)->activity_in_progress == 0)                        \
1061 	        vm_object_wakeup((object),                              \
1062 	                         VM_OBJECT_EVENT_PAGING_IN_PROGRESS);   \
1063 	MACRO_END
1064 
1065 #define         vm_object_paging_begin(object)                          \
1066 	MACRO_BEGIN                                                     \
1067 	vm_object_lock_assert_exclusive((object));                      \
1068 	VM_PIP_DEBUG_BEGIN((object));                                   \
1069 	(object)->paging_in_progress++;                                 \
1070 	if ((object)->paging_in_progress == 0) {                        \
1071 	        panic("vm_object_paging_begin(%p): overflow\n", (object));\
1072 	}                                                               \
1073 	MACRO_END
1074 
1075 #define         vm_object_paging_end(object)                            \
1076 	MACRO_BEGIN                                                     \
1077 	vm_object_lock_assert_exclusive((object));                      \
1078 	if ((object)->paging_in_progress == 0) {                        \
1079 	        panic("vm_object_paging_end(%p): underflow\n", (object));\
1080 	}                                                               \
1081 	(object)->paging_in_progress--;                                 \
1082 	if ((object)->paging_in_progress == 0) {                        \
1083 	        vm_object_wakeup((object),                              \
1084 	                         VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1085 	        if ((object)->activity_in_progress == 0)                \
1086 	                vm_object_wakeup((object),                      \
1087 	                                 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1088 	}                                                               \
1089 	MACRO_END
1090 
1091 #define         vm_object_paging_wait(object, interruptible)            \
1092 	MACRO_BEGIN                                                     \
1093 	vm_object_lock_assert_exclusive((object));                      \
1094 	while ((object)->paging_in_progress != 0 ||                     \
1095 	       (object)->activity_in_progress != 0) {                   \
1096 	        wait_result_t  _wr;                                     \
1097                                                                         \
1098 	        _wr = vm_object_sleep((object),                         \
1099 	                        VM_OBJECT_EVENT_PAGING_IN_PROGRESS,     \
1100 	                        (interruptible));                       \
1101                                                                         \
1102 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1103 	/*XXX break; */                                 \
1104 	}                                                               \
1105 	MACRO_END
1106 
1107 #define vm_object_paging_only_wait(object, interruptible)               \
1108 	MACRO_BEGIN                                                     \
1109 	vm_object_lock_assert_exclusive((object));                      \
1110 	while ((object)->paging_in_progress != 0) {                     \
1111 	        wait_result_t  _wr;                                     \
1112                                                                         \
1113 	        _wr = vm_object_sleep((object),                         \
1114 	                        VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1115 	                        (interruptible));                       \
1116                                                                         \
1117 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1118 	/*XXX break; */                                 \
1119 	}                                                               \
1120 	MACRO_END
1121 
1122 
1123 #define vm_object_mapping_begin(object)                                 \
1124 	MACRO_BEGIN                                                     \
1125 	vm_object_lock_assert_exclusive((object));                      \
1126 	assert(! (object)->mapping_in_progress);                        \
1127 	(object)->mapping_in_progress = TRUE;                           \
1128 	MACRO_END
1129 
1130 #define vm_object_mapping_end(object)                                   \
1131 	MACRO_BEGIN                                                     \
1132 	vm_object_lock_assert_exclusive((object));                      \
1133 	assert((object)->mapping_in_progress);                          \
1134 	(object)->mapping_in_progress = FALSE;                          \
1135 	vm_object_wakeup((object),                                      \
1136 	                 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS);          \
1137 	MACRO_END
1138 
1139 #define vm_object_mapping_wait(object, interruptible)                   \
1140 	MACRO_BEGIN                                                     \
1141 	vm_object_lock_assert_exclusive((object));                      \
1142 	while ((object)->mapping_in_progress) {                         \
1143 	        wait_result_t	_wr;                                    \
1144                                                                         \
1145 	        _wr = vm_object_sleep((object),                         \
1146 	                              VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1147 	                              (interruptible));                 \
1148 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1149 	/*XXX break; */                                 \
1150 	}                                                               \
1151 	assert(!(object)->mapping_in_progress);                         \
1152 	MACRO_END
1153 
1154 
1155 
1156 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1157 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1158 
1159 extern void     vm_object_cache_add(vm_object_t);
1160 extern void     vm_object_cache_remove(vm_object_t);
1161 extern int      vm_object_cache_evict(int, int);
1162 
1163 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
1164 #define VM_OBJECT_OWNER(object)                                         \
1165 	((((object)->purgable == VM_PURGABLE_DENY &&                    \
1166 	   (object)->vo_ledger_tag == 0) ||                             \
1167 	  (object)->vo_owner == TASK_NULL)                              \
1168 	 ? TASK_NULL    /* not owned */                                 \
1169 	 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED)            \
1170 	    ? kernel_task /* disowned -> kernel */                      \
1171 	    : (object)->vo_owner)) /* explicit owner */                 \
1172 
1173 extern void     vm_object_ledger_tag_ledgers(
1174 	vm_object_t object,
1175 	int *ledger_idx_volatile,
1176 	int *ledger_idx_nonvolatile,
1177 	int *ledger_idx_volatile_compressed,
1178 	int *ledger_idx_nonvolatile_compressed,
1179 	boolean_t *do_footprint);
1180 extern kern_return_t vm_object_ownership_change(
1181 	vm_object_t object,
1182 	int new_ledger_tag,
1183 	task_t new_owner,
1184 	int new_ledger_flags,
1185 	boolean_t task_objq_locked);
1186 
1187 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
1188 // so probably should be a real 32b ID vs. ptr.
1189 // Current users just check for equality
1190 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((o)))
1191 
1192 #endif  /* _VM_VM_OBJECT_H_ */
1193