xref: /xnu-8020.101.4/osfmk/vm/vm_object.h (revision e7776783b89a353188416a9a346c6cdb4928faad)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm_object.h
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *	Date:	1985
62  *
63  *	Virtual memory object module definitions.
64  */
65 
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
68 
69 #include <debug.h>
70 #include <mach_assert.h>
71 #include <mach_pagemap.h>
72 
73 #include <mach/kern_return.h>
74 #include <mach/boolean.h>
75 #include <mach/memory_object_types.h>
76 #include <mach/port.h>
77 #include <mach/vm_prot.h>
78 #include <mach/vm_param.h>
79 #include <mach/machine/vm_types.h>
80 #include <kern/queue.h>
81 #include <kern/locks.h>
82 #include <kern/assert.h>
83 #include <kern/misc_protos.h>
84 #include <kern/macro_help.h>
85 #include <ipc/ipc_types.h>
86 #include <vm/pmap.h>
87 
88 #include <vm/vm_external.h>
89 
90 #include <vm/vm_options.h>
91 #include <vm/vm_page.h>
92 
93 #if VM_OBJECT_TRACKING
94 #include <libkern/OSDebug.h>
95 #include <kern/btlog.h>
96 extern void vm_object_tracking_init(void);
97 extern btlog_t vm_object_tracking_btlog;
98 #define VM_OBJECT_TRACKING_NUM_RECORDS  50000
99 #define VM_OBJECT_TRACKING_OP_CREATED   1
100 #define VM_OBJECT_TRACKING_OP_MODIFIED  2
101 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
102 #endif /* VM_OBJECT_TRACKING */
103 
104 struct vm_page;
105 
106 /*
107  *	Types defined:
108  *
109  *	vm_object_t		Virtual memory object.
110  *	vm_object_fault_info_t	Used to determine cluster size.
111  */
112 
113 struct vm_object_fault_info {
114 	int             interruptible;
115 	uint32_t        user_tag;
116 	vm_size_t       cluster_size;
117 	vm_behavior_t   behavior;
118 	vm_object_offset_t lo_offset;
119 	vm_object_offset_t hi_offset;
120 	unsigned int
121 	/* boolean_t */ no_cache:1,
122 	/* boolean_t */ stealth:1,
123 	/* boolean_t */ io_sync:1,
124 	/* boolean_t */ cs_bypass:1,
125 	/* boolean_t */ pmap_cs_associated:1,
126 	/* boolean_t */ mark_zf_absent:1,
127 	/* boolean_t */ batch_pmap_op:1,
128 	/* boolean_t */ resilient_media:1,
129 	/* boolean_t */ no_copy_on_read:1,
130 	    __vm_object_fault_info_unused_bits:23;
131 	int             pmap_options;
132 };
133 
134 
135 #define vo_size                         vo_un1.vou_size
136 #define vo_cache_pages_to_scan          vo_un1.vou_cache_pages_to_scan
137 #define vo_shadow_offset                vo_un2.vou_shadow_offset
138 #define vo_cache_ts                     vo_un2.vou_cache_ts
139 #define vo_owner                        vo_un2.vou_owner
140 
141 struct vm_object {
142 	/*
143 	 * on 64 bit systems we pack the pointers hung off the memq.
144 	 * those pointers have to be able to point back to the memq.
145 	 * the packed pointers are required to be on a 64 byte boundary
146 	 * which means 2 things for the vm_object...  (1) the memq
147 	 * struct has to be the first element of the structure so that
148 	 * we can control it's alignment... (2) the vm_object must be
149 	 * aligned on a 64 byte boundary... for static vm_object's
150 	 * this is accomplished via the 'aligned' attribute... for
151 	 * vm_object's in the zone pool, this is accomplished by
152 	 * rounding the size of the vm_object element to the nearest
153 	 * 64 byte size before creating the zone.
154 	 */
155 	vm_page_queue_head_t    memq;           /* Resident memory - must be first */
156 	lck_rw_t                Lock;           /* Synchronization */
157 
158 #if DEVELOPMENT || DEBUG
159 	thread_t                Lock_owner;
160 #endif
161 	union {
162 		vm_object_size_t  vou_size;     /* Object size (only valid if internal) */
163 		int               vou_cache_pages_to_scan;      /* pages yet to be visited in an
164 		                                                 * external object in cache
165 		                                                 */
166 	} vo_un1;
167 
168 	struct vm_page          *memq_hint;
169 	int                     ref_count;      /* Number of references */
170 	unsigned int            resident_page_count;
171 	/* number of resident pages */
172 	unsigned int            wired_page_count; /* number of wired pages
173 	                                           *  use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
174 	unsigned int            reusable_page_count;
175 
176 	struct vm_object        *copy;          /* Object that should receive
177 	                                         * a copy of my changed pages,
178 	                                         * for copy_delay, or just the
179 	                                         * temporary object that
180 	                                         * shadows this object, for
181 	                                         * copy_call.
182 	                                         */
183 	struct vm_object        *shadow;        /* My shadow */
184 	memory_object_t         pager;          /* Where to get data */
185 
186 	union {
187 		vm_object_offset_t vou_shadow_offset;   /* Offset into shadow */
188 		clock_sec_t     vou_cache_ts;   /* age of an external object
189 		                                 * present in cache
190 		                                 */
191 		task_t          vou_owner;      /* If the object is purgeable
192 		                                 * or has a "ledger_tag", this
193 		                                 * is the task that owns it.
194 		                                 */
195 	} vo_un2;
196 
197 	vm_object_offset_t      paging_offset;  /* Offset into memory object */
198 	memory_object_control_t pager_control;  /* Where data comes back */
199 
200 	memory_object_copy_strategy_t
201 	    copy_strategy;                      /* How to handle data copy */
202 
203 #if __LP64__
204 	/*
205 	 * Some user processes (mostly VirtualMachine software) take a large
206 	 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
207 	 * VM objects and overflow the 16-bit "activity_in_progress" counter.
208 	 * Since we never enforced any limit there, let's give them 32 bits
209 	 * for backwards compatibility's sake.
210 	 */
211 	unsigned int            paging_in_progress:16,
212 	    __object1_unused_bits:16;
213 	unsigned int            activity_in_progress;
214 #else /* __LP64__ */
215 	/*
216 	 * On 32-bit platforms, enlarging "activity_in_progress" would increase
217 	 * the size of "struct vm_object".  Since we don't know of any actual
218 	 * overflow of these counters on these platforms, let's keep the
219 	 * counters as 16-bit integers.
220 	 */
221 	unsigned short          paging_in_progress;
222 	unsigned short          activity_in_progress;
223 #endif /* __LP64__ */
224 	/* The memory object ports are
225 	 * being used (e.g., for pagein
226 	 * or pageout) -- don't change
227 	 * any of these fields (i.e.,
228 	 * don't collapse, destroy or
229 	 * terminate)
230 	 */
231 
232 	unsigned int
233 	/* boolean_t array */ all_wanted:11,    /* Bit array of "want to be
234 	                                         * awakened" notations.  See
235 	                                         * VM_OBJECT_EVENT_* items
236 	                                         * below */
237 	/* boolean_t */ pager_created:1,        /* Has pager been created? */
238 	/* boolean_t */ pager_initialized:1,    /* Are fields ready to use? */
239 	/* boolean_t */ pager_ready:1,          /* Will pager take requests? */
240 
241 	/* boolean_t */ pager_trusted:1,        /* The pager for this object
242 	                                         * is trusted. This is true for
243 	                                         * all internal objects (backed
244 	                                         * by the default pager)
245 	                                         */
246 	/* boolean_t */ can_persist:1,          /* The kernel may keep the data
247 	                                         * for this object (and rights
248 	                                         * to the memory object) after
249 	                                         * all address map references
250 	                                         * are deallocated?
251 	                                         */
252 	/* boolean_t */ internal:1,             /* Created by the kernel (and
253 	                                         * therefore, managed by the
254 	                                         * default memory manger)
255 	                                         */
256 	/* boolean_t */ private:1,              /* magic device_pager object,
257 	                                        * holds private pages only */
258 	/* boolean_t */ pageout:1,              /* pageout object. contains
259 	                                         * private pages that refer to
260 	                                         * a real memory object. */
261 	/* boolean_t */ alive:1,                /* Not yet terminated */
262 
263 	/* boolean_t */ purgable:2,             /* Purgable state.  See
264 	                                         * VM_PURGABLE_*
265 	                                         */
266 	/* boolean_t */ purgeable_only_by_kernel:1,
267 	/* boolean_t */ purgeable_when_ripe:1,         /* Purgeable when a token
268 	                                                * becomes ripe.
269 	                                                */
270 	/* boolean_t */ shadowed:1,             /* Shadow may exist */
271 	/* boolean_t */ true_share:1,
272 	/* This object is mapped
273 	 * in more than one place
274 	 * and hence cannot be
275 	 * coalesced */
276 	/* boolean_t */ terminating:1,
277 	/* Allows vm_object_lookup
278 	 * and vm_object_deallocate
279 	 * to special case their
280 	 * behavior when they are
281 	 * called as a result of
282 	 * page cleaning during
283 	 * object termination
284 	 */
285 	/* boolean_t */ named:1,                /* An enforces an internal
286 	                                         * naming convention, by
287 	                                         * calling the right routines
288 	                                         * for allocation and
289 	                                         * destruction, UBC references
290 	                                         * against the vm_object are
291 	                                         * checked.
292 	                                         */
293 	/* boolean_t */ shadow_severed:1,
294 	/* When a permanent object
295 	 * backing a COW goes away
296 	 * unexpectedly.  This bit
297 	 * allows vm_fault to return
298 	 * an error rather than a
299 	 * zero filled page.
300 	 */
301 	/* boolean_t */ phys_contiguous:1,
302 	/* Memory is wired and
303 	 * guaranteed physically
304 	 * contiguous.  However
305 	 * it is not device memory
306 	 * and obeys normal virtual
307 	 * memory rules w.r.t pmap
308 	 * access bits.
309 	 */
310 	/* boolean_t */ nophyscache:1,
311 	/* When mapped at the
312 	 * pmap level, don't allow
313 	 * primary caching. (for
314 	 * I/O)
315 	 */
316 	/* boolean_t */ _object5_unused_bits:1;
317 
318 	queue_chain_t           cached_list;    /* Attachment point for the
319 	                                         * list of objects cached as a
320 	                                         * result of their can_persist
321 	                                         * value
322 	                                         */
323 	/*
324 	 * the following fields are not protected by any locks
325 	 * they are updated via atomic compare and swap
326 	 */
327 	vm_object_offset_t      last_alloc;     /* last allocation offset */
328 	vm_offset_t             cow_hint;       /* last page present in     */
329 	                                        /* shadow but not in object */
330 	int                     sequential;     /* sequential access size */
331 
332 	uint32_t                pages_created;
333 	uint32_t                pages_used;
334 	/* hold object lock when altering */
335 	unsigned        int
336 	    wimg_bits:8,                /* cache WIMG bits         */
337 	    code_signed:1,              /* pages are signed and should be
338 	                                 *  validated; the signatures are stored
339 	                                 *  with the pager */
340 	    transposed:1,               /* object was transposed with another */
341 	    mapping_in_progress:1,      /* pager being mapped/unmapped */
342 	    phantom_isssd:1,
343 	    volatile_empty:1,
344 	    volatile_fault:1,
345 	    all_reusable:1,
346 	    blocked_access:1,
347 	    set_cache_attr:1,
348 	    object_is_shared_cache:1,
349 	    purgeable_queue_type:2,
350 	    purgeable_queue_group:3,
351 	    io_tracking:1,
352 	    no_tag_update:1,            /*  */
353 #if CONFIG_SECLUDED_MEMORY
354 	    eligible_for_secluded:1,
355 	    can_grab_secluded:1,
356 #else /* CONFIG_SECLUDED_MEMORY */
357 	__object3_unused_bits:2,
358 #endif /* CONFIG_SECLUDED_MEMORY */
359 #if VM_OBJECT_ACCESS_TRACKING
360 	    access_tracking:1,
361 #else /* VM_OBJECT_ACCESS_TRACKING */
362 	__unused_access_tracking:1,
363 #endif /* VM_OBJECT_ACCESS_TRACKING */
364 	vo_ledger_tag:3,
365 	    vo_no_footprint:1;
366 
367 #if VM_OBJECT_ACCESS_TRACKING
368 	uint32_t        access_tracking_reads;
369 	uint32_t        access_tracking_writes;
370 #endif /* VM_OBJECT_ACCESS_TRACKING */
371 
372 	uint8_t                 scan_collisions;
373 	uint8_t                 __object4_unused_bits[1];
374 	vm_tag_t                wire_tag;
375 
376 #if CONFIG_PHANTOM_CACHE
377 	uint32_t                phantom_object_id;
378 #endif
379 #if CONFIG_IOSCHED || UPL_DEBUG
380 	queue_head_t            uplq;           /* List of outstanding upls */
381 #endif
382 
383 #ifdef  VM_PIP_DEBUG
384 /*
385  * Keep track of the stack traces for the first holders
386  * of a "paging_in_progress" reference for this VM object.
387  */
388 #define VM_PIP_DEBUG_STACK_FRAMES       25      /* depth of each stack trace */
389 #define VM_PIP_DEBUG_MAX_REFS           10      /* track that many references */
390 	struct __pip_backtrace {
391 		void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
392 	} pip_holders[VM_PIP_DEBUG_MAX_REFS];
393 #endif  /* VM_PIP_DEBUG  */
394 
395 	queue_chain_t           objq;      /* object queue - currently used for purgable queues */
396 	queue_chain_t           task_objq; /* objects owned by task - protected by task lock */
397 
398 #if !VM_TAG_ACTIVE_UPDATE
399 	queue_chain_t           wired_objq;
400 #endif /* !VM_TAG_ACTIVE_UPDATE */
401 
402 #if DEBUG
403 	void *purgeable_owner_bt[16];
404 	task_t vo_purgeable_volatilizer; /* who made it volatile? */
405 	void *purgeable_volatilizer_bt[16];
406 #endif /* DEBUG */
407 };
408 
409 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object)                         \
410 	((object)->volatile_fault &&                                    \
411 	 ((object)->purgable == VM_PURGABLE_VOLATILE ||                 \
412 	  (object)->purgable == VM_PURGABLE_EMPTY))
413 
414 #if VM_OBJECT_ACCESS_TRACKING
415 extern uint64_t vm_object_access_tracking_reads;
416 extern uint64_t vm_object_access_tracking_writes;
417 extern void vm_object_access_tracking(vm_object_t object,
418     int *access_tracking,
419     uint32_t *access_tracking_reads,
420     uint32_t *acess_tracking_writes);
421 #endif /* VM_OBJECT_ACCESS_TRACKING */
422 
423 extern
424 vm_object_t     kernel_object;          /* the single kernel object */
425 
426 extern
427 vm_object_t     compressor_object;      /* the single compressor object */
428 
429 extern
430 vm_object_t     retired_pages_object;   /* holds VM pages which should never be used */
431 
432 extern
433 unsigned int    vm_object_absent_max;   /* maximum number of absent pages
434                                          *  at a time for each object */
435 
436 # define        VM_MSYNC_INITIALIZED                    0
437 # define        VM_MSYNC_SYNCHRONIZING                  1
438 # define        VM_MSYNC_DONE                           2
439 
440 
441 extern lck_grp_t                vm_map_lck_grp;
442 extern lck_attr_t               vm_map_lck_attr;
443 
444 #ifndef VM_TAG_ACTIVE_UPDATE
445 #error VM_TAG_ACTIVE_UPDATE
446 #endif
447 
448 #if VM_TAG_ACTIVE_UPDATE
449 #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE")
450 #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE")
451 #else /* VM_TAG_ACTIVE_UPDATE */
452 #define VM_OBJECT_WIRED_ENQUEUE(object)                                 \
453 	MACRO_BEGIN                                                     \
454 	lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);                          \
455 	assert(!(object)->wired_objq.next);                             \
456 	assert(!(object)->wired_objq.prev);                             \
457 	queue_enter(&vm_objects_wired, (object),                        \
458 	            vm_object_t, wired_objq);                           \
459 	lck_spin_unlock(&vm_objects_wired_lock);                        \
460 	MACRO_END
461 #define VM_OBJECT_WIRED_DEQUEUE(object)                                 \
462 	MACRO_BEGIN                                                     \
463 	if ((object)->wired_objq.next) {                                \
464 	        lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket);                  \
465 	        queue_remove(&vm_objects_wired, (object),               \
466 	                     vm_object_t, wired_objq);                  \
467 	        lck_spin_unlock(&vm_objects_wired_lock);                \
468 	}                                                               \
469 	MACRO_END
470 #endif /* VM_TAG_ACTIVE_UPDATE */
471 
472 #define VM_OBJECT_WIRED(object, tag)                                    \
473     MACRO_BEGIN                                                         \
474     assert(VM_KERN_MEMORY_NONE != (tag));                               \
475     assert(VM_KERN_MEMORY_NONE == (object)->wire_tag);                  \
476     (object)->wire_tag = (tag);                                         \
477     if (!VM_TAG_ACTIVE_UPDATE) {                                        \
478 	VM_OBJECT_WIRED_ENQUEUE((object));                              \
479     }                                                                   \
480     MACRO_END
481 
482 #define VM_OBJECT_UNWIRED(object)                                                       \
483     MACRO_BEGIN                                                                         \
484     if (!VM_TAG_ACTIVE_UPDATE) {                                                        \
485 	    VM_OBJECT_WIRED_DEQUEUE((object));                                          \
486     }                                                                                   \
487     if (VM_KERN_MEMORY_NONE != (object)->wire_tag) {                                    \
488 	vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count));   \
489 	(object)->wire_tag = VM_KERN_MEMORY_NONE;                                       \
490     }                                                                                   \
491     MACRO_END
492 
493 // These two macros start & end a C block
494 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object)                                       \
495     MACRO_BEGIN                                                                         \
496     {                                                                                   \
497 	int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
498 
499 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag)                                    \
500 	if (__wireddelta) {                                                             \
501 	    boolean_t __overflow __assert_only =                                        \
502 	    os_add_overflow((object)->wired_page_count, __wireddelta,                   \
503 	                    &(object)->wired_page_count);                               \
504 	    assert(!__overflow);                                                        \
505 	    if (!(object)->pageout && !(object)->no_tag_update) {                       \
506 	        if (__wireddelta > 0) {                                                 \
507 	            assert (VM_KERN_MEMORY_NONE != (tag));                              \
508 	            if (VM_KERN_MEMORY_NONE == __waswired) {                            \
509 	                VM_OBJECT_WIRED((object), (tag));                               \
510 	            }                                                                   \
511 	            vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta));      \
512 	        } else if (VM_KERN_MEMORY_NONE != __waswired) {                         \
513 	            assert (VM_KERN_MEMORY_NONE != (object)->wire_tag);                 \
514 	            vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta));      \
515 	            if (!(object)->wired_page_count) {                                  \
516 	                VM_OBJECT_UNWIRED((object));                                    \
517 	            }                                                                   \
518 	        }                                                                       \
519 	    }                                                                           \
520 	}                                                                               \
521     }                                                                                   \
522     MACRO_END
523 
524 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta)               \
525     __wireddelta += delta; \
526 
527 #define VM_OBJECT_WIRED_PAGE_ADD(object, m)                     \
528     if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++;
529 
530 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m)                  \
531     if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--;
532 
533 
534 
535 #define OBJECT_LOCK_SHARED      0
536 #define OBJECT_LOCK_EXCLUSIVE   1
537 
538 extern lck_grp_t        vm_object_lck_grp;
539 extern lck_attr_t       vm_object_lck_attr;
540 extern lck_attr_t       kernel_object_lck_attr;
541 extern lck_attr_t       compressor_object_lck_attr;
542 
543 extern vm_object_t      vm_pageout_scan_wants_object;
544 
545 extern void             vm_object_lock(vm_object_t);
546 extern bool             vm_object_lock_check_contended(vm_object_t);
547 extern boolean_t        vm_object_lock_try(vm_object_t);
548 extern boolean_t        _vm_object_lock_try(vm_object_t);
549 extern boolean_t        vm_object_lock_avoid(vm_object_t);
550 extern void             vm_object_lock_shared(vm_object_t);
551 extern boolean_t        vm_object_lock_yield_shared(vm_object_t);
552 extern boolean_t        vm_object_lock_try_shared(vm_object_t);
553 extern void             vm_object_unlock(vm_object_t);
554 extern boolean_t        vm_object_lock_upgrade(vm_object_t);
555 
556 /*
557  *	Object locking macros
558  */
559 
560 #define vm_object_lock_init(object)                                     \
561 	lck_rw_init(&(object)->Lock, &vm_object_lck_grp,                \
562 	            (((object) == kernel_object ||                      \
563 	              (object) == vm_submap_object) ?                   \
564 	             &kernel_object_lck_attr :                          \
565 	             (((object) == compressor_object) ?                 \
566 	             &compressor_object_lck_attr :                      \
567 	              &vm_object_lck_attr)))
568 #define vm_object_lock_destroy(object)  lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
569 
570 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
571 
572 /*
573  * CAUTION: the following vm_object_lock_assert_held*() macros merely
574  * check if anyone is holding the lock, but the holder may not necessarily
575  * be the caller...
576  */
577 #if MACH_ASSERT || DEBUG
578 #define vm_object_lock_assert_held(object) \
579 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_HELD)
580 #define vm_object_lock_assert_shared(object) \
581 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_SHARED)
582 #define vm_object_lock_assert_exclusive(object) \
583 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
584 #define vm_object_lock_assert_notheld(object) \
585 	lck_rw_assert(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
586 #else  /* MACH_ASSERT || DEBUG */
587 #define vm_object_lock_assert_held(object)
588 #define vm_object_lock_assert_shared(object)
589 #define vm_object_lock_assert_exclusive(object)
590 #define vm_object_lock_assert_notheld(object)
591 #endif /* MACH_ASSERT || DEBUG */
592 
593 
594 /*
595  *	Declare procedures that operate on VM objects.
596  */
597 
598 __private_extern__ void         vm_object_bootstrap(void);
599 
600 __private_extern__ void         vm_object_reaper_init(void);
601 
602 __private_extern__ vm_object_t  vm_object_allocate(vm_object_size_t size);
603 
604 __private_extern__ void    _vm_object_allocate(vm_object_size_t size,
605     vm_object_t object);
606 
607 #define vm_object_reference_locked(object)              \
608 	MACRO_BEGIN                                     \
609 	vm_object_t RLObject = (object);                \
610 	vm_object_lock_assert_exclusive(object);        \
611 	assert((RLObject)->ref_count > 0);              \
612 	(RLObject)->ref_count++;                        \
613 	assert((RLObject)->ref_count > 1);              \
614 	MACRO_END
615 
616 
617 #define vm_object_reference_shared(object)              \
618 	MACRO_BEGIN                                     \
619 	vm_object_t RLObject = (object);                \
620 	vm_object_lock_assert_shared(object);           \
621 	assert((RLObject)->ref_count > 0);              \
622 	OSAddAtomic(1, &(RLObject)->ref_count);         \
623 	assert((RLObject)->ref_count > 0);              \
624 	MACRO_END
625 
626 
627 __private_extern__ void         vm_object_reference(
628 	vm_object_t     object);
629 
630 #if     !MACH_ASSERT
631 
632 #define vm_object_reference(object)                     \
633 MACRO_BEGIN                                             \
634 	vm_object_t RObject = (object);                 \
635 	if (RObject) {                                  \
636 	        vm_object_lock_shared(RObject);         \
637 	        vm_object_reference_shared(RObject);    \
638 	        vm_object_unlock(RObject);              \
639 	}                                               \
640 MACRO_END
641 
642 #endif  /* MACH_ASSERT */
643 
644 __private_extern__ void         vm_object_deallocate(
645 	vm_object_t     object);
646 
647 __private_extern__ kern_return_t vm_object_release_name(
648 	vm_object_t     object,
649 	int             flags);
650 
651 __private_extern__ void         vm_object_pmap_protect(
652 	vm_object_t             object,
653 	vm_object_offset_t      offset,
654 	vm_object_size_t        size,
655 	pmap_t                  pmap,
656 	vm_map_size_t           pmap_page_size,
657 	vm_map_offset_t         pmap_start,
658 	vm_prot_t               prot);
659 
660 __private_extern__ void         vm_object_pmap_protect_options(
661 	vm_object_t             object,
662 	vm_object_offset_t      offset,
663 	vm_object_size_t        size,
664 	pmap_t                  pmap,
665 	vm_map_size_t           pmap_page_size,
666 	vm_map_offset_t         pmap_start,
667 	vm_prot_t               prot,
668 	int                     options);
669 
670 __private_extern__ void         vm_object_page_remove(
671 	vm_object_t             object,
672 	vm_object_offset_t      start,
673 	vm_object_offset_t      end);
674 
675 __private_extern__ void         vm_object_deactivate_pages(
676 	vm_object_t             object,
677 	vm_object_offset_t      offset,
678 	vm_object_size_t        size,
679 	boolean_t               kill_page,
680 	boolean_t               reusable_page,
681 	struct pmap             *pmap,
682 /* XXX TODO4K: need pmap_page_size here too? */
683 	vm_map_offset_t         pmap_offset);
684 
685 __private_extern__ void vm_object_reuse_pages(
686 	vm_object_t             object,
687 	vm_object_offset_t      start_offset,
688 	vm_object_offset_t      end_offset,
689 	boolean_t               allow_partial_reuse);
690 
691 __private_extern__ uint64_t     vm_object_purge(
692 	vm_object_t              object,
693 	int                      flags);
694 
695 __private_extern__ kern_return_t vm_object_purgable_control(
696 	vm_object_t     object,
697 	vm_purgable_t   control,
698 	int             *state);
699 
700 __private_extern__ kern_return_t vm_object_get_page_counts(
701 	vm_object_t             object,
702 	vm_object_offset_t      offset,
703 	vm_object_size_t        size,
704 	unsigned int            *resident_page_count,
705 	unsigned int            *dirty_page_count);
706 
707 __private_extern__ boolean_t    vm_object_coalesce(
708 	vm_object_t             prev_object,
709 	vm_object_t             next_object,
710 	vm_object_offset_t      prev_offset,
711 	vm_object_offset_t      next_offset,
712 	vm_object_size_t        prev_size,
713 	vm_object_size_t        next_size);
714 
715 __private_extern__ boolean_t    vm_object_shadow(
716 	vm_object_t             *object,
717 	vm_object_offset_t      *offset,
718 	vm_object_size_t        length);
719 
720 __private_extern__ void         vm_object_collapse(
721 	vm_object_t             object,
722 	vm_object_offset_t      offset,
723 	boolean_t               can_bypass);
724 
725 __private_extern__ boolean_t    vm_object_copy_quickly(
726 	vm_object_t             object,
727 	vm_object_offset_t      src_offset,
728 	vm_object_size_t        size,
729 	boolean_t               *_src_needs_copy,
730 	boolean_t               *_dst_needs_copy);
731 
732 __private_extern__ kern_return_t        vm_object_copy_strategically(
733 	vm_object_t             src_object,
734 	vm_object_offset_t      src_offset,
735 	vm_object_size_t        size,
736 	vm_object_t             *dst_object,
737 	vm_object_offset_t      *dst_offset,
738 	boolean_t               *dst_needs_copy);
739 
740 __private_extern__ kern_return_t        vm_object_copy_slowly(
741 	vm_object_t             src_object,
742 	vm_object_offset_t      src_offset,
743 	vm_object_size_t        size,
744 	boolean_t               interruptible,
745 	vm_object_t             *_result_object);
746 
747 __private_extern__ vm_object_t  vm_object_copy_delayed(
748 	vm_object_t             src_object,
749 	vm_object_offset_t      src_offset,
750 	vm_object_size_t        size,
751 	boolean_t               src_object_shared);
752 
753 
754 
755 __private_extern__ kern_return_t        vm_object_destroy(
756 	vm_object_t     object,
757 	kern_return_t   reason);
758 
759 __private_extern__ void         vm_object_pager_create(
760 	vm_object_t     object);
761 
762 __private_extern__ void         vm_object_compressor_pager_create(
763 	vm_object_t     object);
764 
765 __private_extern__ void         vm_object_page_map(
766 	vm_object_t     object,
767 	vm_object_offset_t      offset,
768 	vm_object_size_t        size,
769 	vm_object_offset_t      (*map_fn)
770 	(void *, vm_object_offset_t),
771 	void            *map_fn_data);
772 
773 __private_extern__ kern_return_t vm_object_upl_request(
774 	vm_object_t             object,
775 	vm_object_offset_t      offset,
776 	upl_size_t              size,
777 	upl_t                   *upl,
778 	upl_page_info_t         *page_info,
779 	unsigned int            *count,
780 	upl_control_flags_t     flags,
781 	vm_tag_t            tag);
782 
783 __private_extern__ kern_return_t vm_object_transpose(
784 	vm_object_t             object1,
785 	vm_object_t             object2,
786 	vm_object_size_t        transpose_size);
787 
788 __private_extern__ boolean_t vm_object_sync(
789 	vm_object_t             object,
790 	vm_object_offset_t      offset,
791 	vm_object_size_t        size,
792 	boolean_t               should_flush,
793 	boolean_t               should_return,
794 	boolean_t               should_iosync);
795 
796 __private_extern__ kern_return_t vm_object_update(
797 	vm_object_t             object,
798 	vm_object_offset_t      offset,
799 	vm_object_size_t        size,
800 	vm_object_offset_t      *error_offset,
801 	int                     *io_errno,
802 	memory_object_return_t  should_return,
803 	int                     flags,
804 	vm_prot_t               prot);
805 
806 __private_extern__ kern_return_t vm_object_lock_request(
807 	vm_object_t             object,
808 	vm_object_offset_t      offset,
809 	vm_object_size_t        size,
810 	memory_object_return_t  should_return,
811 	int                     flags,
812 	vm_prot_t               prot);
813 
814 
815 
816 __private_extern__ vm_object_t  vm_object_memory_object_associate(
817 	memory_object_t         pager,
818 	vm_object_t             object,
819 	vm_object_size_t        size,
820 	boolean_t               check_named);
821 
822 
823 __private_extern__ void vm_object_cluster_size(
824 	vm_object_t             object,
825 	vm_object_offset_t      *start,
826 	vm_size_t               *length,
827 	vm_object_fault_info_t  fault_info,
828 	uint32_t                *io_streaming);
829 
830 __private_extern__ kern_return_t vm_object_populate_with_private(
831 	vm_object_t             object,
832 	vm_object_offset_t      offset,
833 	ppnum_t                 phys_page,
834 	vm_size_t               size);
835 
836 __private_extern__ void vm_object_change_wimg_mode(
837 	vm_object_t             object,
838 	unsigned int            wimg_mode);
839 
840 extern kern_return_t adjust_vm_object_cache(
841 	vm_size_t oval,
842 	vm_size_t nval);
843 
844 extern kern_return_t vm_object_page_op(
845 	vm_object_t             object,
846 	vm_object_offset_t      offset,
847 	int                     ops,
848 	ppnum_t                 *phys_entry,
849 	int                     *flags);
850 
851 extern kern_return_t vm_object_range_op(
852 	vm_object_t             object,
853 	vm_object_offset_t      offset_beg,
854 	vm_object_offset_t      offset_end,
855 	int                     ops,
856 	uint32_t                *range);
857 
858 
859 __private_extern__ void         vm_object_reap_pages(
860 	vm_object_t object,
861 	int     reap_type);
862 #define REAP_REAP       0
863 #define REAP_TERMINATE  1
864 #define REAP_PURGEABLE  2
865 #define REAP_DATA_FLUSH 3
866 
867 #if CONFIG_FREEZE
868 
869 __private_extern__ uint32_t
870 vm_object_compressed_freezer_pageout(
871 	vm_object_t     object, uint32_t dirty_budget);
872 
873 __private_extern__ void
874 vm_object_compressed_freezer_done(
875 	void);
876 
877 #endif /* CONFIG_FREEZE */
878 
879 __private_extern__ void
880 vm_object_pageout(
881 	vm_object_t     object);
882 
883 #if CONFIG_IOSCHED
884 struct io_reprioritize_req {
885 	uint64_t        blkno;
886 	uint32_t        len;
887 	int             priority;
888 	struct vnode    *devvp;
889 	queue_chain_t   io_reprioritize_list;
890 };
891 typedef struct io_reprioritize_req *io_reprioritize_req_t;
892 
893 extern void vm_io_reprioritize_init(void);
894 #endif
895 
896 /*
897  *	Event waiting handling
898  */
899 
900 #define VM_OBJECT_EVENT_INITIALIZED             0
901 #define VM_OBJECT_EVENT_PAGER_READY             1
902 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS      2
903 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS     3
904 #define VM_OBJECT_EVENT_LOCK_IN_PROGRESS        4
905 #define VM_OBJECT_EVENT_UNCACHING               5
906 #define VM_OBJECT_EVENT_COPY_CALL               6
907 #define VM_OBJECT_EVENT_CACHING                 7
908 #define VM_OBJECT_EVENT_UNBLOCKED               8
909 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 9
910 
911 #define VM_OBJECT_EVENT_MAX 10 /* 11 bits in "all_wanted", so 0->10 */
912 
913 static __inline__ wait_result_t
vm_object_assert_wait(vm_object_t object,int event,wait_interrupt_t interruptible)914 vm_object_assert_wait(
915 	vm_object_t             object,
916 	int                     event,
917 	wait_interrupt_t        interruptible)
918 {
919 	wait_result_t wr;
920 
921 	vm_object_lock_assert_exclusive(object);
922 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
923 
924 	object->all_wanted |= 1 << event;
925 	wr = assert_wait((event_t)((vm_offset_t)object + (vm_offset_t)event),
926 	    interruptible);
927 	return wr;
928 }
929 
930 static __inline__ wait_result_t
vm_object_wait(vm_object_t object,int event,wait_interrupt_t interruptible)931 vm_object_wait(
932 	vm_object_t             object,
933 	int                     event,
934 	wait_interrupt_t        interruptible)
935 {
936 	wait_result_t wr;
937 
938 	vm_object_assert_wait(object, event, interruptible);
939 	vm_object_unlock(object);
940 	wr = thread_block(THREAD_CONTINUE_NULL);
941 	return wr;
942 }
943 
944 static __inline__ wait_result_t
thread_sleep_vm_object(vm_object_t object,event_t event,wait_interrupt_t interruptible)945 thread_sleep_vm_object(
946 	vm_object_t             object,
947 	event_t                 event,
948 	wait_interrupt_t        interruptible)
949 {
950 	wait_result_t wr;
951 
952 #if DEVELOPMENT || DEBUG
953 	if (object->Lock_owner != current_thread()) {
954 		panic("thread_sleep_vm_object: now owner - %p\n", object);
955 	}
956 	object->Lock_owner = 0;
957 #endif
958 	wr = lck_rw_sleep(&object->Lock,
959 	    LCK_SLEEP_PROMOTED_PRI,
960 	    event,
961 	    interruptible);
962 #if DEVELOPMENT || DEBUG
963 	object->Lock_owner = current_thread();
964 #endif
965 	return wr;
966 }
967 
968 static __inline__ wait_result_t
vm_object_sleep(vm_object_t object,int event,wait_interrupt_t interruptible)969 vm_object_sleep(
970 	vm_object_t             object,
971 	int                     event,
972 	wait_interrupt_t        interruptible)
973 {
974 	wait_result_t wr;
975 
976 	vm_object_lock_assert_exclusive(object);
977 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
978 
979 	object->all_wanted |= 1 << event;
980 	wr = thread_sleep_vm_object(object,
981 	    (event_t)((vm_offset_t)object + (vm_offset_t)event),
982 	    interruptible);
983 	return wr;
984 }
985 
986 static __inline__ void
vm_object_wakeup(vm_object_t object,int event)987 vm_object_wakeup(
988 	vm_object_t             object,
989 	int                     event)
990 {
991 	vm_object_lock_assert_exclusive(object);
992 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
993 
994 	if (object->all_wanted & (1 << event)) {
995 		thread_wakeup((event_t)((vm_offset_t)object + (vm_offset_t)event));
996 	}
997 	object->all_wanted &= ~(1 << event);
998 }
999 
1000 static __inline__ void
vm_object_set_wanted(vm_object_t object,int event)1001 vm_object_set_wanted(
1002 	vm_object_t             object,
1003 	int                     event)
1004 {
1005 	vm_object_lock_assert_exclusive(object);
1006 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1007 
1008 	object->all_wanted |= (1 << event);
1009 }
1010 
1011 static __inline__ int
vm_object_wanted(vm_object_t object,int event)1012 vm_object_wanted(
1013 	vm_object_t             object,
1014 	int                     event)
1015 {
1016 	vm_object_lock_assert_held(object);
1017 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
1018 
1019 	return object->all_wanted & (1 << event);
1020 }
1021 
1022 /*
1023  *	Routines implemented as macros
1024  */
1025 #ifdef VM_PIP_DEBUG
1026 #include <libkern/OSDebug.h>
1027 #define VM_PIP_DEBUG_BEGIN(object)                                      \
1028 	MACRO_BEGIN                                                     \
1029 	int pip = ((object)->paging_in_progress +                       \
1030 	           (object)->activity_in_progress);                     \
1031 	if (pip < VM_PIP_DEBUG_MAX_REFS) {                              \
1032 	        (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1033 	                           VM_PIP_DEBUG_STACK_FRAMES);          \
1034 	}                                                               \
1035 	MACRO_END
1036 #else   /* VM_PIP_DEBUG */
1037 #define VM_PIP_DEBUG_BEGIN(object)
1038 #endif  /* VM_PIP_DEBUG */
1039 
1040 #define         vm_object_activity_begin(object)                        \
1041 	MACRO_BEGIN                                                     \
1042 	vm_object_lock_assert_exclusive((object));                      \
1043 	VM_PIP_DEBUG_BEGIN((object));                                   \
1044 	(object)->activity_in_progress++;                               \
1045 	if ((object)->activity_in_progress == 0) {                      \
1046 	        panic("vm_object_activity_begin(%p): overflow\n", (object));\
1047 	}                                                               \
1048 	MACRO_END
1049 
1050 #define         vm_object_activity_end(object)                          \
1051 	MACRO_BEGIN                                                     \
1052 	vm_object_lock_assert_exclusive((object));                      \
1053 	if ((object)->activity_in_progress == 0) {                      \
1054 	        panic("vm_object_activity_end(%p): underflow\n", (object));\
1055 	}                                                               \
1056 	(object)->activity_in_progress--;                               \
1057 	if ((object)->paging_in_progress == 0 &&                        \
1058 	    (object)->activity_in_progress == 0)                        \
1059 	        vm_object_wakeup((object),                              \
1060 	                         VM_OBJECT_EVENT_PAGING_IN_PROGRESS);   \
1061 	MACRO_END
1062 
1063 #define         vm_object_paging_begin(object)                          \
1064 	MACRO_BEGIN                                                     \
1065 	vm_object_lock_assert_exclusive((object));                      \
1066 	VM_PIP_DEBUG_BEGIN((object));                                   \
1067 	(object)->paging_in_progress++;                                 \
1068 	if ((object)->paging_in_progress == 0) {                        \
1069 	        panic("vm_object_paging_begin(%p): overflow\n", (object));\
1070 	}                                                               \
1071 	MACRO_END
1072 
1073 #define         vm_object_paging_end(object)                            \
1074 	MACRO_BEGIN                                                     \
1075 	vm_object_lock_assert_exclusive((object));                      \
1076 	if ((object)->paging_in_progress == 0) {                        \
1077 	        panic("vm_object_paging_end(%p): underflow\n", (object));\
1078 	}                                                               \
1079 	(object)->paging_in_progress--;                                 \
1080 	if ((object)->paging_in_progress == 0) {                        \
1081 	        vm_object_wakeup((object),                              \
1082 	                         VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1083 	        if ((object)->activity_in_progress == 0)                \
1084 	                vm_object_wakeup((object),                      \
1085 	                                 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1086 	}                                                               \
1087 	MACRO_END
1088 
1089 #define         vm_object_paging_wait(object, interruptible)            \
1090 	MACRO_BEGIN                                                     \
1091 	vm_object_lock_assert_exclusive((object));                      \
1092 	while ((object)->paging_in_progress != 0 ||                     \
1093 	       (object)->activity_in_progress != 0) {                   \
1094 	        wait_result_t  _wr;                                     \
1095                                                                         \
1096 	        _wr = vm_object_sleep((object),                         \
1097 	                        VM_OBJECT_EVENT_PAGING_IN_PROGRESS,     \
1098 	                        (interruptible));                       \
1099                                                                         \
1100 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1101 	/*XXX break; */                                 \
1102 	}                                                               \
1103 	MACRO_END
1104 
1105 #define vm_object_paging_only_wait(object, interruptible)               \
1106 	MACRO_BEGIN                                                     \
1107 	vm_object_lock_assert_exclusive((object));                      \
1108 	while ((object)->paging_in_progress != 0) {                     \
1109 	        wait_result_t  _wr;                                     \
1110                                                                         \
1111 	        _wr = vm_object_sleep((object),                         \
1112 	                        VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1113 	                        (interruptible));                       \
1114                                                                         \
1115 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1116 	/*XXX break; */                                 \
1117 	}                                                               \
1118 	MACRO_END
1119 
1120 
1121 #define vm_object_mapping_begin(object)                                 \
1122 	MACRO_BEGIN                                                     \
1123 	vm_object_lock_assert_exclusive((object));                      \
1124 	assert(! (object)->mapping_in_progress);                        \
1125 	(object)->mapping_in_progress = TRUE;                           \
1126 	MACRO_END
1127 
1128 #define vm_object_mapping_end(object)                                   \
1129 	MACRO_BEGIN                                                     \
1130 	vm_object_lock_assert_exclusive((object));                      \
1131 	assert((object)->mapping_in_progress);                          \
1132 	(object)->mapping_in_progress = FALSE;                          \
1133 	vm_object_wakeup((object),                                      \
1134 	                 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS);          \
1135 	MACRO_END
1136 
1137 #define vm_object_mapping_wait(object, interruptible)                   \
1138 	MACRO_BEGIN                                                     \
1139 	vm_object_lock_assert_exclusive((object));                      \
1140 	while ((object)->mapping_in_progress) {                         \
1141 	        wait_result_t	_wr;                                    \
1142                                                                         \
1143 	        _wr = vm_object_sleep((object),                         \
1144 	                              VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1145 	                              (interruptible));                 \
1146 	/*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1147 	/*XXX break; */                                 \
1148 	}                                                               \
1149 	assert(!(object)->mapping_in_progress);                         \
1150 	MACRO_END
1151 
1152 
1153 
1154 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1155 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1156 
1157 extern void     vm_object_cache_add(vm_object_t);
1158 extern void     vm_object_cache_remove(vm_object_t);
1159 extern int      vm_object_cache_evict(int, int);
1160 
1161 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
1162 #define VM_OBJECT_OWNER(object)                                         \
1163 	((((object)->purgable == VM_PURGABLE_DENY &&                    \
1164 	   (object)->vo_ledger_tag == 0) ||                             \
1165 	  (object)->vo_owner == TASK_NULL)                              \
1166 	 ? TASK_NULL    /* not owned */                                 \
1167 	 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED)            \
1168 	    ? kernel_task /* disowned -> kernel */                      \
1169 	    : (object)->vo_owner)) /* explicit owner */                 \
1170 
1171 extern void     vm_object_ledger_tag_ledgers(
1172 	vm_object_t object,
1173 	int *ledger_idx_volatile,
1174 	int *ledger_idx_nonvolatile,
1175 	int *ledger_idx_volatile_compressed,
1176 	int *ledger_idx_nonvolatile_compressed,
1177 	boolean_t *do_footprint);
1178 extern kern_return_t vm_object_ownership_change(
1179 	vm_object_t object,
1180 	int new_ledger_tag,
1181 	task_t new_owner,
1182 	int new_ledger_flags,
1183 	boolean_t task_objq_locked);
1184 
1185 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
1186 // so probably should be a real 32b ID vs. ptr.
1187 // Current users just check for equality
1188 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((o)))
1189 
1190 #endif  /* _VM_VM_OBJECT_H_ */
1191