1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm_object.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Virtual memory object module definitions.
64 */
65
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
68
69 #include <debug.h>
70 #include <mach_assert.h>
71
72 #include <mach/kern_return.h>
73 #include <mach/boolean.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/port.h>
76 #include <mach/vm_prot.h>
77 #include <mach/vm_param.h>
78 #include <mach/machine/vm_types.h>
79 #include <kern/queue.h>
80 #include <kern/locks.h>
81 #include <kern/assert.h>
82 #include <kern/misc_protos.h>
83 #include <kern/macro_help.h>
84 #include <ipc/ipc_types.h>
85 #include <vm/pmap.h>
86
87 #include <vm/vm_external.h>
88
89 #include <vm/vm_options.h>
90 #include <vm/vm_page.h>
91
92 #if VM_OBJECT_TRACKING
93 #include <libkern/OSDebug.h>
94 #include <kern/btlog.h>
95 extern void vm_object_tracking_init(void);
96 extern btlog_t vm_object_tracking_btlog;
97 #define VM_OBJECT_TRACKING_NUM_RECORDS 50000
98 #define VM_OBJECT_TRACKING_OP_CREATED 1
99 #define VM_OBJECT_TRACKING_OP_MODIFIED 2
100 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
101 #endif /* VM_OBJECT_TRACKING */
102
103 struct vm_page;
104
105 /*
106 * Types defined:
107 *
108 * vm_object_t Virtual memory object.
109 * vm_object_fault_info_t Used to determine cluster size.
110 */
111
112 struct vm_object_fault_info {
113 int interruptible;
114 uint32_t user_tag;
115 vm_size_t cluster_size;
116 vm_behavior_t behavior;
117 vm_object_offset_t lo_offset;
118 vm_object_offset_t hi_offset;
119 unsigned int
120 /* boolean_t */ no_cache:1,
121 /* boolean_t */ stealth:1,
122 /* boolean_t */ io_sync:1,
123 /* boolean_t */ cs_bypass:1,
124 /* boolean_t */ csm_associated:1,
125 /* boolean_t */ mark_zf_absent:1,
126 /* boolean_t */ batch_pmap_op:1,
127 /* boolean_t */ resilient_media:1,
128 /* boolean_t */ no_copy_on_read:1,
129 /* boolean_t */ fi_xnu_user_debug:1,
130 /* boolean_t */ fi_used_for_tpro:1,
131 __vm_object_fault_info_unused_bits:21;
132 int pmap_options;
133 };
134
135
136 #define vo_size vo_un1.vou_size
137 #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
138 #define vo_shadow_offset vo_un2.vou_shadow_offset
139 #define vo_cache_ts vo_un2.vou_cache_ts
140 #define vo_owner vo_un2.vou_owner
141
142 struct vm_object {
143 /*
144 * on 64 bit systems we pack the pointers hung off the memq.
145 * those pointers have to be able to point back to the memq.
146 * the packed pointers are required to be on a 64 byte boundary
147 * which means 2 things for the vm_object... (1) the memq
148 * struct has to be the first element of the structure so that
149 * we can control it's alignment... (2) the vm_object must be
150 * aligned on a 64 byte boundary... for static vm_object's
151 * this is accomplished via the 'aligned' attribute... for
152 * vm_object's in the zone pool, this is accomplished by
153 * rounding the size of the vm_object element to the nearest
154 * 64 byte size before creating the zone.
155 */
156 vm_page_queue_head_t memq; /* Resident memory - must be first */
157 lck_rw_t Lock; /* Synchronization */
158
159 union {
160 vm_object_size_t vou_size; /* Object size (only valid if internal) */
161 int vou_cache_pages_to_scan; /* pages yet to be visited in an
162 * external object in cache
163 */
164 } vo_un1;
165
166 struct vm_page *memq_hint;
167 int ref_count; /* Number of references */
168 unsigned int resident_page_count;
169 /* number of resident pages */
170 unsigned int wired_page_count; /* number of wired pages
171 * use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
172 unsigned int reusable_page_count;
173
174 struct vm_object *vo_copy; /* Object that should receive
175 * a copy of my changed pages,
176 * for copy_delay, or just the
177 * temporary object that
178 * shadows this object, for
179 * copy_call.
180 */
181 struct vm_object *shadow; /* My shadow */
182 memory_object_t pager; /* Where to get data */
183
184 union {
185 vm_object_offset_t vou_shadow_offset; /* Offset into shadow */
186 clock_sec_t vou_cache_ts; /* age of an external object
187 * present in cache
188 */
189 task_t vou_owner; /* If the object is purgeable
190 * or has a "ledger_tag", this
191 * is the task that owns it.
192 */
193 } vo_un2;
194
195 vm_object_offset_t paging_offset; /* Offset into memory object */
196 memory_object_control_t pager_control; /* Where data comes back */
197
198 memory_object_copy_strategy_t
199 copy_strategy; /* How to handle data copy */
200
201 /*
202 * Some user processes (mostly VirtualMachine software) take a large
203 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
204 * VM objects and overflow the 16-bit "activity_in_progress" counter.
205 * Since we never enforced any limit there, let's give them 32 bits
206 * for backwards compatibility's sake.
207 */
208 unsigned short paging_in_progress:16;
209 unsigned short vo_size_delta;
210 unsigned int activity_in_progress;
211
212 /* The memory object ports are
213 * being used (e.g., for pagein
214 * or pageout) -- don't change
215 * any of these fields (i.e.,
216 * don't collapse, destroy or
217 * terminate)
218 */
219
220 unsigned int
221 /* boolean_t array */ all_wanted:6, /* Bit array of "want to be
222 * awakened" notations. See
223 * VM_OBJECT_EVENT_* items
224 * below */
225 /* boolean_t */ pager_created:1, /* Has pager been created? */
226 /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */
227 /* boolean_t */ pager_ready:1, /* Will pager take requests? */
228
229 /* boolean_t */ pager_trusted:1, /* The pager for this object
230 * is trusted. This is true for
231 * all internal objects (backed
232 * by the default pager)
233 */
234 /* boolean_t */ can_persist:1, /* The kernel may keep the data
235 * for this object (and rights
236 * to the memory object) after
237 * all address map references
238 * are deallocated?
239 */
240 /* boolean_t */ internal:1, /* Created by the kernel (and
241 * therefore, managed by the
242 * default memory manger)
243 */
244 /* boolean_t */ private:1, /* magic device_pager object,
245 * holds private pages only */
246 /* boolean_t */ pageout:1, /* pageout object. contains
247 * private pages that refer to
248 * a real memory object. */
249 /* boolean_t */ alive:1, /* Not yet terminated */
250
251 /* boolean_t */ purgable:2, /* Purgable state. See
252 * VM_PURGABLE_*
253 */
254 /* boolean_t */ purgeable_only_by_kernel:1,
255 /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token
256 * becomes ripe.
257 */
258 /* boolean_t */ shadowed:1, /* Shadow may exist */
259 /* boolean_t */ true_share:1,
260 /* This object is mapped
261 * in more than one place
262 * and hence cannot be
263 * coalesced */
264 /* boolean_t */ terminating:1,
265 /* Allows vm_object_lookup
266 * and vm_object_deallocate
267 * to special case their
268 * behavior when they are
269 * called as a result of
270 * page cleaning during
271 * object termination
272 */
273 /* boolean_t */ named:1, /* An enforces an internal
274 * naming convention, by
275 * calling the right routines
276 * for allocation and
277 * destruction, UBC references
278 * against the vm_object are
279 * checked.
280 */
281 /* boolean_t */ shadow_severed:1,
282 /* When a permanent object
283 * backing a COW goes away
284 * unexpectedly. This bit
285 * allows vm_fault to return
286 * an error rather than a
287 * zero filled page.
288 */
289 /* boolean_t */ phys_contiguous:1,
290 /* Memory is wired and
291 * guaranteed physically
292 * contiguous. However
293 * it is not device memory
294 * and obeys normal virtual
295 * memory rules w.r.t pmap
296 * access bits.
297 */
298 /* boolean_t */ nophyscache:1,
299 /* When mapped at the
300 * pmap level, don't allow
301 * primary caching. (for
302 * I/O)
303 */
304 /* boolean_t */ for_realtime:1,
305 /* Might be needed for realtime code path */
306 #if FBDP_DEBUG_OBJECT_NO_PAGER
307 /* boolean_t */ fbdp_tracked:1,
308 __object1_unused_bits:4;
309 #else /* FBDP_DEBUG_OBJECT_NO_PAGER */
310 __object1_unused_bits:5;
311 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
312
313 queue_chain_t cached_list; /* Attachment point for the
314 * list of objects cached as a
315 * result of their can_persist
316 * value
317 */
318 /*
319 * the following fields are not protected by any locks
320 * they are updated via atomic compare and swap
321 */
322 vm_object_offset_t last_alloc; /* last allocation offset */
323 vm_offset_t cow_hint; /* last page present in */
324 /* shadow but not in object */
325 int sequential; /* sequential access size */
326
327 uint32_t pages_created;
328 uint32_t pages_used;
329 /* hold object lock when altering */
330 unsigned int
331 wimg_bits:8, /* cache WIMG bits */
332 code_signed:1, /* pages are signed and should be
333 * validated; the signatures are stored
334 * with the pager */
335 transposed:1, /* object was transposed with another */
336 mapping_in_progress:1, /* pager being mapped/unmapped */
337 phantom_isssd:1,
338 volatile_empty:1,
339 volatile_fault:1,
340 all_reusable:1,
341 blocked_access:1,
342 set_cache_attr:1,
343 object_is_shared_cache:1,
344 purgeable_queue_type:2,
345 purgeable_queue_group:3,
346 io_tracking:1,
347 no_tag_update:1, /* */
348 #if CONFIG_SECLUDED_MEMORY
349 eligible_for_secluded:1,
350 can_grab_secluded:1,
351 #else /* CONFIG_SECLUDED_MEMORY */
352 __object3_unused_bits:2,
353 #endif /* CONFIG_SECLUDED_MEMORY */
354 #if VM_OBJECT_ACCESS_TRACKING
355 access_tracking:1,
356 #else /* VM_OBJECT_ACCESS_TRACKING */
357 __unused_access_tracking:1,
358 #endif /* VM_OBJECT_ACCESS_TRACKING */
359 vo_ledger_tag:3,
360 vo_no_footprint:1;
361
362 #if VM_OBJECT_ACCESS_TRACKING
363 uint32_t access_tracking_reads;
364 uint32_t access_tracking_writes;
365 #endif /* VM_OBJECT_ACCESS_TRACKING */
366
367 uint8_t scan_collisions;
368 uint8_t __object4_unused_bits[1];
369 vm_tag_t wire_tag;
370
371 #if CONFIG_PHANTOM_CACHE
372 uint32_t phantom_object_id;
373 #endif
374 #if CONFIG_IOSCHED || UPL_DEBUG
375 queue_head_t uplq; /* List of outstanding upls */
376 #endif
377
378 #ifdef VM_PIP_DEBUG
379 /*
380 * Keep track of the stack traces for the first holders
381 * of a "paging_in_progress" reference for this VM object.
382 */
383 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
384 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
385 struct __pip_backtrace {
386 void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
387 } pip_holders[VM_PIP_DEBUG_MAX_REFS];
388 #endif /* VM_PIP_DEBUG */
389
390 queue_chain_t objq; /* object queue - currently used for purgable queues */
391 queue_chain_t task_objq; /* objects owned by task - protected by task lock */
392
393 #if !VM_TAG_ACTIVE_UPDATE
394 queue_chain_t wired_objq;
395 #endif /* !VM_TAG_ACTIVE_UPDATE */
396
397 #if DEBUG
398 void *purgeable_owner_bt[16];
399 task_t vo_purgeable_volatilizer; /* who made it volatile? */
400 void *purgeable_volatilizer_bt[16];
401 #endif /* DEBUG */
402 };
403
404 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
405 ((object)->volatile_fault && \
406 ((object)->purgable == VM_PURGABLE_VOLATILE || \
407 (object)->purgable == VM_PURGABLE_EMPTY))
408
409 #if VM_OBJECT_ACCESS_TRACKING
410 extern uint64_t vm_object_access_tracking_reads;
411 extern uint64_t vm_object_access_tracking_writes;
412 extern void vm_object_access_tracking(vm_object_t object,
413 int *access_tracking,
414 uint32_t *access_tracking_reads,
415 uint32_t *acess_tracking_writes);
416 #endif /* VM_OBJECT_ACCESS_TRACKING */
417
418 extern const vm_object_t kernel_object_default; /* the default kernel object */
419
420 extern const vm_object_t compressor_object; /* the single compressor object */
421
422 extern const vm_object_t retired_pages_object; /* pages retired due to ECC, should never be used */
423
424 #define is_kernel_object(object) ((object) == kernel_object_default)
425
426 # define VM_MSYNC_INITIALIZED 0
427 # define VM_MSYNC_SYNCHRONIZING 1
428 # define VM_MSYNC_DONE 2
429
430
431 extern lck_grp_t vm_map_lck_grp;
432 extern lck_attr_t vm_map_lck_attr;
433
434 #ifndef VM_TAG_ACTIVE_UPDATE
435 #error VM_TAG_ACTIVE_UPDATE
436 #endif
437
438 #if VM_TAG_ACTIVE_UPDATE
439 #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE")
440 #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE")
441 #else /* VM_TAG_ACTIVE_UPDATE */
442 #define VM_OBJECT_WIRED_ENQUEUE(object) \
443 MACRO_BEGIN \
444 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \
445 assert(!(object)->wired_objq.next); \
446 assert(!(object)->wired_objq.prev); \
447 queue_enter(&vm_objects_wired, (object), \
448 vm_object_t, wired_objq); \
449 lck_spin_unlock(&vm_objects_wired_lock); \
450 MACRO_END
451 #define VM_OBJECT_WIRED_DEQUEUE(object) \
452 MACRO_BEGIN \
453 if ((object)->wired_objq.next) { \
454 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \
455 queue_remove(&vm_objects_wired, (object), \
456 vm_object_t, wired_objq); \
457 lck_spin_unlock(&vm_objects_wired_lock); \
458 } \
459 MACRO_END
460 #endif /* VM_TAG_ACTIVE_UPDATE */
461
462 #define VM_OBJECT_WIRED(object, tag) \
463 MACRO_BEGIN \
464 assert(VM_KERN_MEMORY_NONE != (tag)); \
465 assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \
466 (object)->wire_tag = (tag); \
467 if (!VM_TAG_ACTIVE_UPDATE) { \
468 VM_OBJECT_WIRED_ENQUEUE((object)); \
469 } \
470 MACRO_END
471
472 #define VM_OBJECT_UNWIRED(object) \
473 MACRO_BEGIN \
474 if (!VM_TAG_ACTIVE_UPDATE) { \
475 VM_OBJECT_WIRED_DEQUEUE((object)); \
476 } \
477 if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \
478 vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count), (object)); \
479 (object)->wire_tag = VM_KERN_MEMORY_NONE; \
480 } \
481 MACRO_END
482
483 // These two macros start & end a C block
484 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \
485 MACRO_BEGIN \
486 { \
487 int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
488
489 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \
490 if (__wireddelta) { \
491 boolean_t __overflow __assert_only = \
492 os_add_overflow((object)->wired_page_count, __wireddelta, \
493 &(object)->wired_page_count); \
494 assert(!__overflow); \
495 if (!(object)->pageout && !(object)->no_tag_update) { \
496 if (__wireddelta > 0) { \
497 assert (VM_KERN_MEMORY_NONE != (tag)); \
498 if (VM_KERN_MEMORY_NONE == __waswired) { \
499 VM_OBJECT_WIRED((object), (tag)); \
500 } \
501 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta), (object)); \
502 } else if (VM_KERN_MEMORY_NONE != __waswired) { \
503 assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \
504 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta), (object)); \
505 if (!(object)->wired_page_count) { \
506 VM_OBJECT_UNWIRED((object)); \
507 } \
508 } \
509 } \
510 } \
511 } \
512 MACRO_END
513
514 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta) \
515 __wireddelta += delta; \
516
517 #define VM_OBJECT_WIRED_PAGE_ADD(object, m) \
518 if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++;
519
520 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m) \
521 if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--;
522
523
524
525 #define OBJECT_LOCK_SHARED 0
526 #define OBJECT_LOCK_EXCLUSIVE 1
527
528 extern lck_grp_t vm_object_lck_grp;
529 extern lck_attr_t vm_object_lck_attr;
530 extern lck_attr_t kernel_object_lck_attr;
531 extern lck_attr_t compressor_object_lck_attr;
532
533 extern vm_object_t vm_pageout_scan_wants_object;
534
535 extern void vm_object_lock(vm_object_t);
536 extern bool vm_object_lock_check_contended(vm_object_t);
537 extern boolean_t vm_object_lock_try(vm_object_t);
538 extern boolean_t _vm_object_lock_try(vm_object_t);
539 extern boolean_t vm_object_lock_avoid(vm_object_t);
540 extern void vm_object_lock_shared(vm_object_t);
541 extern boolean_t vm_object_lock_yield_shared(vm_object_t);
542 extern boolean_t vm_object_lock_try_shared(vm_object_t);
543 extern void vm_object_unlock(vm_object_t);
544 extern boolean_t vm_object_lock_upgrade(vm_object_t);
545
546 /*
547 * Object locking macros
548 */
549
550 #define vm_object_lock_init(object) \
551 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
552 (is_kernel_object(object) ? \
553 &kernel_object_lck_attr : \
554 (((object) == compressor_object) ? \
555 &compressor_object_lck_attr : \
556 &vm_object_lck_attr)))
557 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
558
559 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
560
561 /*
562 * CAUTION: the following vm_object_lock_assert_held*() macros merely
563 * check if anyone is holding the lock, but the holder may not necessarily
564 * be the caller...
565 */
566 #define vm_object_lock_assert_held(object) \
567 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_HELD)
568 #define vm_object_lock_assert_shared(object) \
569 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_SHARED)
570 #define vm_object_lock_assert_exclusive(object) \
571 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
572 #define vm_object_lock_assert_notheld(object) \
573 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
574
575
576 /*
577 * Declare procedures that operate on VM objects.
578 */
579
580 __private_extern__ void vm_object_bootstrap(void);
581
582 __private_extern__ void vm_object_reaper_init(void);
583
584 __private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size);
585
586 __private_extern__ void _vm_object_allocate(vm_object_size_t size,
587 vm_object_t object);
588
589 __private_extern__ void vm_object_set_size(
590 vm_object_t object,
591 vm_object_size_t outer_size,
592 vm_object_size_t inner_size);
593
594 #define vm_object_reference_locked(object) \
595 MACRO_BEGIN \
596 vm_object_t RLObject = (object); \
597 vm_object_lock_assert_exclusive(object); \
598 assert((RLObject)->ref_count > 0); \
599 (RLObject)->ref_count++; \
600 assert((RLObject)->ref_count > 1); \
601 MACRO_END
602
603
604 #define vm_object_reference_shared(object) \
605 MACRO_BEGIN \
606 vm_object_t RLObject = (object); \
607 vm_object_lock_assert_shared(object); \
608 assert((RLObject)->ref_count > 0); \
609 OSAddAtomic(1, &(RLObject)->ref_count); \
610 assert((RLObject)->ref_count > 0); \
611 MACRO_END
612
613
614 __private_extern__ void vm_object_reference(
615 vm_object_t object);
616
617 #if !MACH_ASSERT
618
619 #define vm_object_reference(object) \
620 MACRO_BEGIN \
621 vm_object_t RObject = (object); \
622 if (RObject) { \
623 vm_object_lock_shared(RObject); \
624 vm_object_reference_shared(RObject); \
625 vm_object_unlock(RObject); \
626 } \
627 MACRO_END
628
629 #endif /* MACH_ASSERT */
630
631 __private_extern__ void vm_object_deallocate(
632 vm_object_t object);
633
634 __private_extern__ void vm_object_pmap_protect(
635 vm_object_t object,
636 vm_object_offset_t offset,
637 vm_object_size_t size,
638 pmap_t pmap,
639 vm_map_size_t pmap_page_size,
640 vm_map_offset_t pmap_start,
641 vm_prot_t prot);
642
643 __private_extern__ void vm_object_pmap_protect_options(
644 vm_object_t object,
645 vm_object_offset_t offset,
646 vm_object_size_t size,
647 pmap_t pmap,
648 vm_map_size_t pmap_page_size,
649 vm_map_offset_t pmap_start,
650 vm_prot_t prot,
651 int options);
652
653 __private_extern__ void vm_object_page_remove(
654 vm_object_t object,
655 vm_object_offset_t start,
656 vm_object_offset_t end);
657
658 __private_extern__ void vm_object_deactivate_pages(
659 vm_object_t object,
660 vm_object_offset_t offset,
661 vm_object_size_t size,
662 boolean_t kill_page,
663 boolean_t reusable_page,
664 boolean_t reusable_no_write,
665 struct pmap *pmap,
666 /* XXX TODO4K: need pmap_page_size here too? */
667 vm_map_offset_t pmap_offset);
668
669 __private_extern__ void vm_object_reuse_pages(
670 vm_object_t object,
671 vm_object_offset_t start_offset,
672 vm_object_offset_t end_offset,
673 boolean_t allow_partial_reuse);
674
675 __private_extern__ uint64_t vm_object_purge(
676 vm_object_t object,
677 int flags);
678
679 __private_extern__ kern_return_t vm_object_purgable_control(
680 vm_object_t object,
681 vm_purgable_t control,
682 int *state);
683
684 __private_extern__ kern_return_t vm_object_get_page_counts(
685 vm_object_t object,
686 vm_object_offset_t offset,
687 vm_object_size_t size,
688 unsigned int *resident_page_count,
689 unsigned int *dirty_page_count);
690
691 __private_extern__ boolean_t vm_object_coalesce(
692 vm_object_t prev_object,
693 vm_object_t next_object,
694 vm_object_offset_t prev_offset,
695 vm_object_offset_t next_offset,
696 vm_object_size_t prev_size,
697 vm_object_size_t next_size);
698
699 __private_extern__ boolean_t vm_object_shadow(
700 vm_object_t *object,
701 vm_object_offset_t *offset,
702 vm_object_size_t length,
703 boolean_t always_shadow);
704
705 __private_extern__ void vm_object_collapse(
706 vm_object_t object,
707 vm_object_offset_t offset,
708 boolean_t can_bypass);
709
710 __private_extern__ boolean_t vm_object_copy_quickly(
711 vm_object_t object,
712 vm_object_offset_t src_offset,
713 vm_object_size_t size,
714 boolean_t *_src_needs_copy,
715 boolean_t *_dst_needs_copy);
716
717 __private_extern__ kern_return_t vm_object_copy_strategically(
718 vm_object_t src_object,
719 vm_object_offset_t src_offset,
720 vm_object_size_t size,
721 bool forking,
722 vm_object_t *dst_object,
723 vm_object_offset_t *dst_offset,
724 boolean_t *dst_needs_copy);
725
726 __private_extern__ kern_return_t vm_object_copy_slowly(
727 vm_object_t src_object,
728 vm_object_offset_t src_offset,
729 vm_object_size_t size,
730 boolean_t interruptible,
731 vm_object_t *_result_object);
732
733 __private_extern__ vm_object_t vm_object_copy_delayed(
734 vm_object_t src_object,
735 vm_object_offset_t src_offset,
736 vm_object_size_t size,
737 boolean_t src_object_shared);
738
739
740
741 __private_extern__ kern_return_t vm_object_destroy(
742 vm_object_t object,
743 kern_return_t reason);
744
745 __private_extern__ void vm_object_pager_create(
746 vm_object_t object);
747
748 __private_extern__ void vm_object_compressor_pager_create(
749 vm_object_t object);
750
751 __private_extern__ void vm_object_page_map(
752 vm_object_t object,
753 vm_object_offset_t offset,
754 vm_object_size_t size,
755 vm_object_offset_t (*map_fn)
756 (void *, vm_object_offset_t),
757 void *map_fn_data);
758
759 __private_extern__ kern_return_t vm_object_upl_request(
760 vm_object_t object,
761 vm_object_offset_t offset,
762 upl_size_t size,
763 upl_t *upl,
764 upl_page_info_t *page_info,
765 unsigned int *count,
766 upl_control_flags_t flags,
767 vm_tag_t tag);
768
769 __private_extern__ kern_return_t vm_object_transpose(
770 vm_object_t object1,
771 vm_object_t object2,
772 vm_object_size_t transpose_size);
773
774 __private_extern__ boolean_t vm_object_sync(
775 vm_object_t object,
776 vm_object_offset_t offset,
777 vm_object_size_t size,
778 boolean_t should_flush,
779 boolean_t should_return,
780 boolean_t should_iosync);
781
782 __private_extern__ kern_return_t vm_object_update(
783 vm_object_t object,
784 vm_object_offset_t offset,
785 vm_object_size_t size,
786 vm_object_offset_t *error_offset,
787 int *io_errno,
788 memory_object_return_t should_return,
789 int flags,
790 vm_prot_t prot);
791
792 __private_extern__ kern_return_t vm_object_lock_request(
793 vm_object_t object,
794 vm_object_offset_t offset,
795 vm_object_size_t size,
796 memory_object_return_t should_return,
797 int flags,
798 vm_prot_t prot);
799
800
801
802 __private_extern__ vm_object_t vm_object_memory_object_associate(
803 memory_object_t pager,
804 vm_object_t object,
805 vm_object_size_t size,
806 boolean_t check_named);
807
808
809 __private_extern__ void vm_object_cluster_size(
810 vm_object_t object,
811 vm_object_offset_t *start,
812 vm_size_t *length,
813 vm_object_fault_info_t fault_info,
814 uint32_t *io_streaming);
815
816 __private_extern__ kern_return_t vm_object_populate_with_private(
817 vm_object_t object,
818 vm_object_offset_t offset,
819 ppnum_t phys_page,
820 vm_size_t size);
821
822 __private_extern__ void vm_object_change_wimg_mode(
823 vm_object_t object,
824 unsigned int wimg_mode);
825
826 extern kern_return_t adjust_vm_object_cache(
827 vm_size_t oval,
828 vm_size_t nval);
829
830 extern kern_return_t vm_object_page_op(
831 vm_object_t object,
832 vm_object_offset_t offset,
833 int ops,
834 ppnum_t *phys_entry,
835 int *flags);
836
837 extern kern_return_t vm_object_range_op(
838 vm_object_t object,
839 vm_object_offset_t offset_beg,
840 vm_object_offset_t offset_end,
841 int ops,
842 uint32_t *range);
843
844
845 __private_extern__ void vm_object_reap_pages(
846 vm_object_t object,
847 int reap_type);
848 #define REAP_REAP 0
849 #define REAP_TERMINATE 1
850 #define REAP_PURGEABLE 2
851 #define REAP_DATA_FLUSH 3
852
853 #if CONFIG_FREEZE
854
855 __private_extern__ uint32_t
856 vm_object_compressed_freezer_pageout(
857 vm_object_t object, uint32_t dirty_budget);
858
859 __private_extern__ void
860 vm_object_compressed_freezer_done(
861 void);
862
863 #endif /* CONFIG_FREEZE */
864
865 __private_extern__ void
866 vm_object_pageout(
867 vm_object_t object);
868
869 #if CONFIG_IOSCHED
870 struct io_reprioritize_req {
871 uint64_t blkno;
872 uint32_t len;
873 int priority;
874 struct vnode *devvp;
875 queue_chain_t io_reprioritize_list;
876 };
877 typedef struct io_reprioritize_req *io_reprioritize_req_t;
878
879 extern void vm_io_reprioritize_init(void);
880 #endif
881
882 /*
883 * Event waiting handling
884 */
885
886 #define VM_OBJECT_EVENT_INITIALIZED 0
887 #define VM_OBJECT_EVENT_PAGER_READY 1
888 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
889 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
890 #define VM_OBJECT_EVENT_UNBLOCKED 4
891 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 5
892
893 #define VM_OBJECT_EVENT_MAX 5 /* 6 bits in "all_wanted", so 0->5 */
894
895 static __inline__ wait_result_t
vm_object_assert_wait(vm_object_t object,int event,wait_interrupt_t interruptible)896 vm_object_assert_wait(
897 vm_object_t object,
898 int event,
899 wait_interrupt_t interruptible)
900 {
901 wait_result_t wr;
902
903 vm_object_lock_assert_exclusive(object);
904 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
905
906 object->all_wanted |= 1 << event;
907 wr = assert_wait((event_t)((vm_offset_t)object + (vm_offset_t)event),
908 interruptible);
909 return wr;
910 }
911
912 static __inline__ wait_result_t
vm_object_wait(vm_object_t object,int event,wait_interrupt_t interruptible)913 vm_object_wait(
914 vm_object_t object,
915 int event,
916 wait_interrupt_t interruptible)
917 {
918 wait_result_t wr;
919
920 vm_object_assert_wait(object, event, interruptible);
921 vm_object_unlock(object);
922 wr = thread_block(THREAD_CONTINUE_NULL);
923 return wr;
924 }
925
926 static __inline__ wait_result_t
thread_sleep_vm_object(vm_object_t object,event_t event,wait_interrupt_t interruptible)927 thread_sleep_vm_object(
928 vm_object_t object,
929 event_t event,
930 wait_interrupt_t interruptible)
931 {
932 wait_result_t wr;
933
934 wr = lck_rw_sleep(&object->Lock,
935 LCK_SLEEP_PROMOTED_PRI,
936 event,
937 interruptible);
938 return wr;
939 }
940
941 static __inline__ wait_result_t
vm_object_sleep(vm_object_t object,int event,wait_interrupt_t interruptible)942 vm_object_sleep(
943 vm_object_t object,
944 int event,
945 wait_interrupt_t interruptible)
946 {
947 wait_result_t wr;
948
949 vm_object_lock_assert_exclusive(object);
950 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
951
952 object->all_wanted |= 1 << event;
953 wr = thread_sleep_vm_object(object,
954 (event_t)((vm_offset_t)object + (vm_offset_t)event),
955 interruptible);
956 return wr;
957 }
958
959 static __inline__ void
vm_object_wakeup(vm_object_t object,int event)960 vm_object_wakeup(
961 vm_object_t object,
962 int event)
963 {
964 vm_object_lock_assert_exclusive(object);
965 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
966
967 if (object->all_wanted & (1 << event)) {
968 thread_wakeup((event_t)((vm_offset_t)object + (vm_offset_t)event));
969 }
970 object->all_wanted &= ~(1 << event);
971 }
972
973 static __inline__ void
vm_object_set_wanted(vm_object_t object,int event)974 vm_object_set_wanted(
975 vm_object_t object,
976 int event)
977 {
978 vm_object_lock_assert_exclusive(object);
979 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
980
981 object->all_wanted |= (1 << event);
982 }
983
984 static __inline__ int
vm_object_wanted(vm_object_t object,int event)985 vm_object_wanted(
986 vm_object_t object,
987 int event)
988 {
989 vm_object_lock_assert_held(object);
990 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
991
992 return object->all_wanted & (1 << event);
993 }
994
995 /*
996 * Routines implemented as macros
997 */
998 #ifdef VM_PIP_DEBUG
999 #include <libkern/OSDebug.h>
1000 #define VM_PIP_DEBUG_BEGIN(object) \
1001 MACRO_BEGIN \
1002 int pip = ((object)->paging_in_progress + \
1003 (object)->activity_in_progress); \
1004 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
1005 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1006 VM_PIP_DEBUG_STACK_FRAMES); \
1007 } \
1008 MACRO_END
1009 #else /* VM_PIP_DEBUG */
1010 #define VM_PIP_DEBUG_BEGIN(object)
1011 #endif /* VM_PIP_DEBUG */
1012
1013 #define vm_object_activity_begin(object) \
1014 MACRO_BEGIN \
1015 vm_object_lock_assert_exclusive((object)); \
1016 VM_PIP_DEBUG_BEGIN((object)); \
1017 (object)->activity_in_progress++; \
1018 if ((object)->activity_in_progress == 0) { \
1019 panic("vm_object_activity_begin(%p): overflow\n", (object));\
1020 } \
1021 MACRO_END
1022
1023 #define vm_object_activity_end(object) \
1024 MACRO_BEGIN \
1025 vm_object_lock_assert_exclusive((object)); \
1026 if ((object)->activity_in_progress == 0) { \
1027 panic("vm_object_activity_end(%p): underflow\n", (object));\
1028 } \
1029 (object)->activity_in_progress--; \
1030 if ((object)->paging_in_progress == 0 && \
1031 (object)->activity_in_progress == 0) \
1032 vm_object_wakeup((object), \
1033 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1034 MACRO_END
1035
1036 #define vm_object_paging_begin(object) \
1037 MACRO_BEGIN \
1038 vm_object_lock_assert_exclusive((object)); \
1039 VM_PIP_DEBUG_BEGIN((object)); \
1040 (object)->paging_in_progress++; \
1041 if ((object)->paging_in_progress == 0) { \
1042 panic("vm_object_paging_begin(%p): overflow\n", (object));\
1043 } \
1044 MACRO_END
1045
1046 #define vm_object_paging_end(object) \
1047 MACRO_BEGIN \
1048 vm_object_lock_assert_exclusive((object)); \
1049 if ((object)->paging_in_progress == 0) { \
1050 panic("vm_object_paging_end(%p): underflow\n", (object));\
1051 } \
1052 (object)->paging_in_progress--; \
1053 if ((object)->paging_in_progress == 0) { \
1054 vm_object_wakeup((object), \
1055 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1056 if ((object)->activity_in_progress == 0) \
1057 vm_object_wakeup((object), \
1058 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1059 } \
1060 MACRO_END
1061
1062 #define vm_object_paging_wait(object, interruptible) \
1063 MACRO_BEGIN \
1064 vm_object_lock_assert_exclusive((object)); \
1065 while ((object)->paging_in_progress != 0 || \
1066 (object)->activity_in_progress != 0) { \
1067 wait_result_t _wr; \
1068 \
1069 _wr = vm_object_sleep((object), \
1070 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
1071 (interruptible)); \
1072 \
1073 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1074 /*XXX break; */ \
1075 } \
1076 MACRO_END
1077
1078 #define vm_object_paging_only_wait(object, interruptible) \
1079 MACRO_BEGIN \
1080 vm_object_lock_assert_exclusive((object)); \
1081 while ((object)->paging_in_progress != 0) { \
1082 wait_result_t _wr; \
1083 \
1084 _wr = vm_object_sleep((object), \
1085 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1086 (interruptible)); \
1087 \
1088 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1089 /*XXX break; */ \
1090 } \
1091 MACRO_END
1092
1093
1094 #define vm_object_mapping_begin(object) \
1095 MACRO_BEGIN \
1096 vm_object_lock_assert_exclusive((object)); \
1097 assert(! (object)->mapping_in_progress); \
1098 (object)->mapping_in_progress = TRUE; \
1099 MACRO_END
1100
1101 #define vm_object_mapping_end(object) \
1102 MACRO_BEGIN \
1103 vm_object_lock_assert_exclusive((object)); \
1104 assert((object)->mapping_in_progress); \
1105 (object)->mapping_in_progress = FALSE; \
1106 vm_object_wakeup((object), \
1107 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
1108 MACRO_END
1109
1110 #define vm_object_mapping_wait(object, interruptible) \
1111 MACRO_BEGIN \
1112 vm_object_lock_assert_exclusive((object)); \
1113 while ((object)->mapping_in_progress) { \
1114 wait_result_t _wr; \
1115 \
1116 _wr = vm_object_sleep((object), \
1117 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1118 (interruptible)); \
1119 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1120 /*XXX break; */ \
1121 } \
1122 assert(!(object)->mapping_in_progress); \
1123 MACRO_END
1124
1125
1126
1127 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1128 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1129
1130 extern void vm_object_cache_add(vm_object_t);
1131 extern void vm_object_cache_remove(vm_object_t);
1132 extern int vm_object_cache_evict(int, int);
1133
1134 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
1135 #define VM_OBJECT_OWNER_UNCHANGED ((task_t) -2)
1136 #define VM_OBJECT_OWNER(object) \
1137 ((object == VM_OBJECT_NULL || \
1138 ((object)->purgable == VM_PURGABLE_DENY && \
1139 (object)->vo_ledger_tag == 0) || \
1140 (object)->vo_owner == TASK_NULL) \
1141 ? TASK_NULL /* not owned */ \
1142 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED) \
1143 ? kernel_task /* disowned -> kernel */ \
1144 : (object)->vo_owner)) /* explicit owner */ \
1145
1146 extern void vm_object_ledger_tag_ledgers(
1147 vm_object_t object,
1148 int *ledger_idx_volatile,
1149 int *ledger_idx_nonvolatile,
1150 int *ledger_idx_volatile_compressed,
1151 int *ledger_idx_nonvolatile_compressed,
1152 boolean_t *do_footprint);
1153 extern kern_return_t vm_object_ownership_change(
1154 vm_object_t object,
1155 int new_ledger_tag,
1156 task_t new_owner,
1157 int new_ledger_flags,
1158 boolean_t task_objq_locked);
1159
1160 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
1161 // so probably should be a real 32b ID vs. ptr.
1162 // Current users just check for equality
1163 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((o)))
1164
1165 #endif /* _VM_VM_OBJECT_H_ */
1166