1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or [email protected]
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: vm_object.h
60 * Author: Avadis Tevanian, Jr., Michael Wayne Young
61 * Date: 1985
62 *
63 * Virtual memory object module definitions.
64 */
65
66 #ifndef _VM_VM_OBJECT_H_
67 #define _VM_VM_OBJECT_H_
68
69 #include <debug.h>
70 #include <mach_assert.h>
71
72 #include <mach/kern_return.h>
73 #include <mach/boolean.h>
74 #include <mach/memory_object_types.h>
75 #include <mach/port.h>
76 #include <mach/vm_prot.h>
77 #include <mach/vm_param.h>
78 #include <mach/machine/vm_types.h>
79 #include <kern/queue.h>
80 #include <kern/locks.h>
81 #include <kern/assert.h>
82 #include <kern/misc_protos.h>
83 #include <kern/macro_help.h>
84 #include <ipc/ipc_types.h>
85 #include <vm/pmap.h>
86
87 #include <vm/vm_external.h>
88
89 #include <vm/vm_options.h>
90 #include <vm/vm_page.h>
91
92 #if VM_OBJECT_TRACKING
93 #include <libkern/OSDebug.h>
94 #include <kern/btlog.h>
95 extern void vm_object_tracking_init(void);
96 extern btlog_t vm_object_tracking_btlog;
97 #define VM_OBJECT_TRACKING_NUM_RECORDS 50000
98 #define VM_OBJECT_TRACKING_OP_CREATED 1
99 #define VM_OBJECT_TRACKING_OP_MODIFIED 2
100 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
101 #endif /* VM_OBJECT_TRACKING */
102
103 struct vm_page;
104
105 /*
106 * Types defined:
107 *
108 * vm_object_t Virtual memory object.
109 * vm_object_fault_info_t Used to determine cluster size.
110 */
111
112 struct vm_object_fault_info {
113 int interruptible;
114 uint32_t user_tag;
115 vm_size_t cluster_size;
116 vm_behavior_t behavior;
117 vm_object_offset_t lo_offset;
118 vm_object_offset_t hi_offset;
119 unsigned int
120 /* boolean_t */ no_cache:1,
121 /* boolean_t */ stealth:1,
122 /* boolean_t */ io_sync:1,
123 /* boolean_t */ cs_bypass:1,
124 /* boolean_t */ csm_associated:1,
125 /* boolean_t */ mark_zf_absent:1,
126 /* boolean_t */ batch_pmap_op:1,
127 /* boolean_t */ resilient_media:1,
128 /* boolean_t */ no_copy_on_read:1,
129 /* boolean_t */ fi_xnu_user_debug:1,
130 /* boolean_t */ fi_used_for_tpro:1,
131 __vm_object_fault_info_unused_bits:21;
132 int pmap_options;
133 };
134
135
136 #define vo_size vo_un1.vou_size
137 #define vo_cache_pages_to_scan vo_un1.vou_cache_pages_to_scan
138 #define vo_shadow_offset vo_un2.vou_shadow_offset
139 #define vo_cache_ts vo_un2.vou_cache_ts
140 #define vo_owner vo_un2.vou_owner
141
142 struct vm_object {
143 /*
144 * on 64 bit systems we pack the pointers hung off the memq.
145 * those pointers have to be able to point back to the memq.
146 * the packed pointers are required to be on a 64 byte boundary
147 * which means 2 things for the vm_object... (1) the memq
148 * struct has to be the first element of the structure so that
149 * we can control it's alignment... (2) the vm_object must be
150 * aligned on a 64 byte boundary... for static vm_object's
151 * this is accomplished via the 'aligned' attribute... for
152 * vm_object's in the zone pool, this is accomplished by
153 * rounding the size of the vm_object element to the nearest
154 * 64 byte size before creating the zone.
155 */
156 vm_page_queue_head_t memq; /* Resident memory - must be first */
157 lck_rw_t Lock; /* Synchronization */
158
159 union {
160 vm_object_size_t vou_size; /* Object size (only valid if internal) */
161 int vou_cache_pages_to_scan; /* pages yet to be visited in an
162 * external object in cache
163 */
164 } vo_un1;
165
166 struct vm_page *memq_hint;
167 int ref_count; /* Number of references */
168 unsigned int resident_page_count;
169 /* number of resident pages */
170 unsigned int wired_page_count; /* number of wired pages
171 * use VM_OBJECT_WIRED_PAGE_UPDATE macros to update */
172 unsigned int reusable_page_count;
173
174 struct vm_object *vo_copy; /* Object that should receive
175 * a copy of my changed pages,
176 * for copy_delay, or just the
177 * temporary object that
178 * shadows this object, for
179 * copy_call.
180 */
181 uint32_t vo_copy_version;
182 uint32_t __vo_unused_padding;
183 struct vm_object *shadow; /* My shadow */
184 memory_object_t pager; /* Where to get data */
185
186 union {
187 vm_object_offset_t vou_shadow_offset; /* Offset into shadow */
188 clock_sec_t vou_cache_ts; /* age of an external object
189 * present in cache
190 */
191 task_t vou_owner; /* If the object is purgeable
192 * or has a "ledger_tag", this
193 * is the task that owns it.
194 */
195 } vo_un2;
196
197 vm_object_offset_t paging_offset; /* Offset into memory object */
198 memory_object_control_t pager_control; /* Where data comes back */
199
200 memory_object_copy_strategy_t
201 copy_strategy; /* How to handle data copy */
202
203 /*
204 * Some user processes (mostly VirtualMachine software) take a large
205 * number of UPLs (via IOMemoryDescriptors) to wire pages in large
206 * VM objects and overflow the 16-bit "activity_in_progress" counter.
207 * Since we never enforced any limit there, let's give them 32 bits
208 * for backwards compatibility's sake.
209 */
210 unsigned short paging_in_progress:16;
211 unsigned short vo_size_delta;
212 unsigned int activity_in_progress;
213
214 /* The memory object ports are
215 * being used (e.g., for pagein
216 * or pageout) -- don't change
217 * any of these fields (i.e.,
218 * don't collapse, destroy or
219 * terminate)
220 */
221
222 unsigned int
223 /* boolean_t array */ all_wanted:6, /* Bit array of "want to be
224 * awakened" notations. See
225 * VM_OBJECT_EVENT_* items
226 * below */
227 /* boolean_t */ pager_created:1, /* Has pager been created? */
228 /* boolean_t */ pager_initialized:1, /* Are fields ready to use? */
229 /* boolean_t */ pager_ready:1, /* Will pager take requests? */
230
231 /* boolean_t */ pager_trusted:1, /* The pager for this object
232 * is trusted. This is true for
233 * all internal objects (backed
234 * by the default pager)
235 */
236 /* boolean_t */ can_persist:1, /* The kernel may keep the data
237 * for this object (and rights
238 * to the memory object) after
239 * all address map references
240 * are deallocated?
241 */
242 /* boolean_t */ internal:1, /* Created by the kernel (and
243 * therefore, managed by the
244 * default memory manger)
245 */
246 /* boolean_t */ private:1, /* magic device_pager object,
247 * holds private pages only */
248 /* boolean_t */ pageout:1, /* pageout object. contains
249 * private pages that refer to
250 * a real memory object. */
251 /* boolean_t */ alive:1, /* Not yet terminated */
252
253 /* boolean_t */ purgable:2, /* Purgable state. See
254 * VM_PURGABLE_*
255 */
256 /* boolean_t */ purgeable_only_by_kernel:1,
257 /* boolean_t */ purgeable_when_ripe:1, /* Purgeable when a token
258 * becomes ripe.
259 */
260 /* boolean_t */ shadowed:1, /* Shadow may exist */
261 /* boolean_t */ true_share:1,
262 /* This object is mapped
263 * in more than one place
264 * and hence cannot be
265 * coalesced */
266 /* boolean_t */ terminating:1,
267 /* Allows vm_object_lookup
268 * and vm_object_deallocate
269 * to special case their
270 * behavior when they are
271 * called as a result of
272 * page cleaning during
273 * object termination
274 */
275 /* boolean_t */ named:1, /* An enforces an internal
276 * naming convention, by
277 * calling the right routines
278 * for allocation and
279 * destruction, UBC references
280 * against the vm_object are
281 * checked.
282 */
283 /* boolean_t */ shadow_severed:1,
284 /* When a permanent object
285 * backing a COW goes away
286 * unexpectedly. This bit
287 * allows vm_fault to return
288 * an error rather than a
289 * zero filled page.
290 */
291 /* boolean_t */ phys_contiguous:1,
292 /* Memory is wired and
293 * guaranteed physically
294 * contiguous. However
295 * it is not device memory
296 * and obeys normal virtual
297 * memory rules w.r.t pmap
298 * access bits.
299 */
300 /* boolean_t */ nophyscache:1,
301 /* When mapped at the
302 * pmap level, don't allow
303 * primary caching. (for
304 * I/O)
305 */
306 /* boolean_t */ for_realtime:1,
307 /* Might be needed for realtime code path */
308 #if FBDP_DEBUG_OBJECT_NO_PAGER
309 /* boolean_t */ fbdp_tracked:1,
310 __object1_unused_bits:4;
311 #else /* FBDP_DEBUG_OBJECT_NO_PAGER */
312 __object1_unused_bits:5;
313 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
314
315 queue_chain_t cached_list; /* Attachment point for the
316 * list of objects cached as a
317 * result of their can_persist
318 * value
319 */
320 /*
321 * the following fields are not protected by any locks
322 * they are updated via atomic compare and swap
323 */
324 vm_object_offset_t last_alloc; /* last allocation offset */
325 vm_offset_t cow_hint; /* last page present in */
326 /* shadow but not in object */
327 int sequential; /* sequential access size */
328
329 uint32_t pages_created;
330 uint32_t pages_used;
331 /* hold object lock when altering */
332 unsigned int
333 wimg_bits:8, /* cache WIMG bits */
334 code_signed:1, /* pages are signed and should be
335 * validated; the signatures are stored
336 * with the pager */
337 transposed:1, /* object was transposed with another */
338 mapping_in_progress:1, /* pager being mapped/unmapped */
339 phantom_isssd:1,
340 volatile_empty:1,
341 volatile_fault:1,
342 all_reusable:1,
343 blocked_access:1,
344 set_cache_attr:1,
345 object_is_shared_cache:1,
346 purgeable_queue_type:2,
347 purgeable_queue_group:3,
348 io_tracking:1,
349 no_tag_update:1, /* */
350 #if CONFIG_SECLUDED_MEMORY
351 eligible_for_secluded:1,
352 can_grab_secluded:1,
353 #else /* CONFIG_SECLUDED_MEMORY */
354 __object3_unused_bits:2,
355 #endif /* CONFIG_SECLUDED_MEMORY */
356 #if VM_OBJECT_ACCESS_TRACKING
357 access_tracking:1,
358 #else /* VM_OBJECT_ACCESS_TRACKING */
359 __unused_access_tracking:1,
360 #endif /* VM_OBJECT_ACCESS_TRACKING */
361 vo_ledger_tag:3,
362 vo_no_footprint:1;
363
364 #if VM_OBJECT_ACCESS_TRACKING
365 uint32_t access_tracking_reads;
366 uint32_t access_tracking_writes;
367 #endif /* VM_OBJECT_ACCESS_TRACKING */
368
369 uint8_t scan_collisions;
370 uint8_t __object4_unused_bits[1];
371 vm_tag_t wire_tag;
372
373 #if CONFIG_PHANTOM_CACHE
374 uint32_t phantom_object_id;
375 #endif
376 #if CONFIG_IOSCHED || UPL_DEBUG
377 queue_head_t uplq; /* List of outstanding upls */
378 #endif
379
380 #ifdef VM_PIP_DEBUG
381 /*
382 * Keep track of the stack traces for the first holders
383 * of a "paging_in_progress" reference for this VM object.
384 */
385 #define VM_PIP_DEBUG_STACK_FRAMES 25 /* depth of each stack trace */
386 #define VM_PIP_DEBUG_MAX_REFS 10 /* track that many references */
387 struct __pip_backtrace {
388 void *pip_retaddr[VM_PIP_DEBUG_STACK_FRAMES];
389 } pip_holders[VM_PIP_DEBUG_MAX_REFS];
390 #endif /* VM_PIP_DEBUG */
391
392 queue_chain_t objq; /* object queue - currently used for purgable queues */
393 queue_chain_t task_objq; /* objects owned by task - protected by task lock */
394
395 #if !VM_TAG_ACTIVE_UPDATE
396 queue_chain_t wired_objq;
397 #endif /* !VM_TAG_ACTIVE_UPDATE */
398
399 #if DEBUG
400 void *purgeable_owner_bt[16];
401 task_t vo_purgeable_volatilizer; /* who made it volatile? */
402 void *purgeable_volatilizer_bt[16];
403 #endif /* DEBUG */
404 };
405
406 #define VM_OBJECT_PURGEABLE_FAULT_ERROR(object) \
407 ((object)->volatile_fault && \
408 ((object)->purgable == VM_PURGABLE_VOLATILE || \
409 (object)->purgable == VM_PURGABLE_EMPTY))
410
411 #if VM_OBJECT_ACCESS_TRACKING
412 extern uint64_t vm_object_access_tracking_reads;
413 extern uint64_t vm_object_access_tracking_writes;
414 extern void vm_object_access_tracking(vm_object_t object,
415 int *access_tracking,
416 uint32_t *access_tracking_reads,
417 uint32_t *acess_tracking_writes);
418 #endif /* VM_OBJECT_ACCESS_TRACKING */
419
420 extern const vm_object_t kernel_object_default; /* the default kernel object */
421
422 extern const vm_object_t compressor_object; /* the single compressor object */
423
424 extern const vm_object_t retired_pages_object; /* pages retired due to ECC, should never be used */
425
426 #define is_kernel_object(object) ((object) == kernel_object_default)
427
428 # define VM_MSYNC_INITIALIZED 0
429 # define VM_MSYNC_SYNCHRONIZING 1
430 # define VM_MSYNC_DONE 2
431
432
433 extern lck_grp_t vm_map_lck_grp;
434 extern lck_attr_t vm_map_lck_attr;
435
436 #ifndef VM_TAG_ACTIVE_UPDATE
437 #error VM_TAG_ACTIVE_UPDATE
438 #endif
439
440 #if VM_TAG_ACTIVE_UPDATE
441 #define VM_OBJECT_WIRED_ENQUEUE(object) panic("VM_OBJECT_WIRED_ENQUEUE")
442 #define VM_OBJECT_WIRED_DEQUEUE(object) panic("VM_OBJECT_WIRED_DEQUEUE")
443 #else /* VM_TAG_ACTIVE_UPDATE */
444 #define VM_OBJECT_WIRED_ENQUEUE(object) \
445 MACRO_BEGIN \
446 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \
447 assert(!(object)->wired_objq.next); \
448 assert(!(object)->wired_objq.prev); \
449 queue_enter(&vm_objects_wired, (object), \
450 vm_object_t, wired_objq); \
451 lck_spin_unlock(&vm_objects_wired_lock); \
452 MACRO_END
453 #define VM_OBJECT_WIRED_DEQUEUE(object) \
454 MACRO_BEGIN \
455 if ((object)->wired_objq.next) { \
456 lck_spin_lock_grp(&vm_objects_wired_lock, &vm_page_lck_grp_bucket); \
457 queue_remove(&vm_objects_wired, (object), \
458 vm_object_t, wired_objq); \
459 lck_spin_unlock(&vm_objects_wired_lock); \
460 } \
461 MACRO_END
462 #endif /* VM_TAG_ACTIVE_UPDATE */
463
464 #define VM_OBJECT_WIRED(object, tag) \
465 MACRO_BEGIN \
466 assert(VM_KERN_MEMORY_NONE != (tag)); \
467 assert(VM_KERN_MEMORY_NONE == (object)->wire_tag); \
468 (object)->wire_tag = (tag); \
469 if (!VM_TAG_ACTIVE_UPDATE) { \
470 VM_OBJECT_WIRED_ENQUEUE((object)); \
471 } \
472 MACRO_END
473
474 #define VM_OBJECT_UNWIRED(object) \
475 MACRO_BEGIN \
476 if (!VM_TAG_ACTIVE_UPDATE) { \
477 VM_OBJECT_WIRED_DEQUEUE((object)); \
478 } \
479 if (VM_KERN_MEMORY_NONE != (object)->wire_tag) { \
480 vm_tag_update_size((object)->wire_tag, -ptoa_64((object)->wired_page_count), (object)); \
481 (object)->wire_tag = VM_KERN_MEMORY_NONE; \
482 } \
483 MACRO_END
484
485 // These two macros start & end a C block
486 #define VM_OBJECT_WIRED_PAGE_UPDATE_START(object) \
487 MACRO_BEGIN \
488 { \
489 int64_t __wireddelta = 0; vm_tag_t __waswired = (object)->wire_tag;
490
491 #define VM_OBJECT_WIRED_PAGE_UPDATE_END(object, tag) \
492 if (__wireddelta) { \
493 boolean_t __overflow __assert_only = \
494 os_add_overflow((object)->wired_page_count, __wireddelta, \
495 &(object)->wired_page_count); \
496 assert(!__overflow); \
497 if (!(object)->pageout && !(object)->no_tag_update) { \
498 if (__wireddelta > 0) { \
499 assert (VM_KERN_MEMORY_NONE != (tag)); \
500 if (VM_KERN_MEMORY_NONE == __waswired) { \
501 VM_OBJECT_WIRED((object), (tag)); \
502 } \
503 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta), (object)); \
504 } else if (VM_KERN_MEMORY_NONE != __waswired) { \
505 assert (VM_KERN_MEMORY_NONE != (object)->wire_tag); \
506 vm_tag_update_size((object)->wire_tag, ptoa_64(__wireddelta), (object)); \
507 if (!(object)->wired_page_count) { \
508 VM_OBJECT_UNWIRED((object)); \
509 } \
510 } \
511 } \
512 } \
513 } \
514 MACRO_END
515
516 #define VM_OBJECT_WIRED_PAGE_COUNT(object, delta) \
517 __wireddelta += delta; \
518
519 #define VM_OBJECT_WIRED_PAGE_ADD(object, m) \
520 if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta++;
521
522 #define VM_OBJECT_WIRED_PAGE_REMOVE(object, m) \
523 if (!(m)->vmp_private && !(m)->vmp_fictitious) __wireddelta--;
524
525
526
527 #define OBJECT_LOCK_SHARED 0
528 #define OBJECT_LOCK_EXCLUSIVE 1
529
530 extern lck_grp_t vm_object_lck_grp;
531 extern lck_attr_t vm_object_lck_attr;
532 extern lck_attr_t kernel_object_lck_attr;
533 extern lck_attr_t compressor_object_lck_attr;
534
535 extern vm_object_t vm_pageout_scan_wants_object;
536
537 extern void vm_object_lock(vm_object_t);
538 extern bool vm_object_lock_check_contended(vm_object_t);
539 extern boolean_t vm_object_lock_try(vm_object_t);
540 extern boolean_t _vm_object_lock_try(vm_object_t);
541 extern boolean_t vm_object_lock_avoid(vm_object_t);
542 extern void vm_object_lock_shared(vm_object_t);
543 extern boolean_t vm_object_lock_yield_shared(vm_object_t);
544 extern boolean_t vm_object_lock_try_shared(vm_object_t);
545 extern void vm_object_unlock(vm_object_t);
546 extern boolean_t vm_object_lock_upgrade(vm_object_t);
547
548 /*
549 * Object locking macros
550 */
551
552 #define vm_object_lock_init(object) \
553 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
554 (is_kernel_object(object) ? \
555 &kernel_object_lck_attr : \
556 (((object) == compressor_object) ? \
557 &compressor_object_lck_attr : \
558 &vm_object_lck_attr)))
559 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
560
561 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
562
563 /*
564 * CAUTION: the following vm_object_lock_assert_held*() macros merely
565 * check if anyone is holding the lock, but the holder may not necessarily
566 * be the caller...
567 */
568 #define vm_object_lock_assert_held(object) \
569 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_HELD)
570 #define vm_object_lock_assert_shared(object) \
571 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_SHARED)
572 #define vm_object_lock_assert_exclusive(object) \
573 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
574 #define vm_object_lock_assert_notheld(object) \
575 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
576
577
578 /*
579 * Declare procedures that operate on VM objects.
580 */
581
582 __private_extern__ void vm_object_bootstrap(void);
583
584 __private_extern__ void vm_object_reaper_init(void);
585
586 __private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size);
587
588 __private_extern__ void _vm_object_allocate(vm_object_size_t size,
589 vm_object_t object);
590
591 __private_extern__ void vm_object_set_size(
592 vm_object_t object,
593 vm_object_size_t outer_size,
594 vm_object_size_t inner_size);
595
596 #define vm_object_reference_locked(object) \
597 MACRO_BEGIN \
598 vm_object_t RLObject = (object); \
599 vm_object_lock_assert_exclusive(object); \
600 assert((RLObject)->ref_count > 0); \
601 (RLObject)->ref_count++; \
602 assert((RLObject)->ref_count > 1); \
603 MACRO_END
604
605
606 #define vm_object_reference_shared(object) \
607 MACRO_BEGIN \
608 vm_object_t RLObject = (object); \
609 vm_object_lock_assert_shared(object); \
610 assert((RLObject)->ref_count > 0); \
611 OSAddAtomic(1, &(RLObject)->ref_count); \
612 assert((RLObject)->ref_count > 0); \
613 MACRO_END
614
615
616 __private_extern__ void vm_object_reference(
617 vm_object_t object);
618
619 #if !MACH_ASSERT
620
621 #define vm_object_reference(object) \
622 MACRO_BEGIN \
623 vm_object_t RObject = (object); \
624 if (RObject) { \
625 vm_object_lock_shared(RObject); \
626 vm_object_reference_shared(RObject); \
627 vm_object_unlock(RObject); \
628 } \
629 MACRO_END
630
631 #endif /* MACH_ASSERT */
632
633 __private_extern__ void vm_object_deallocate(
634 vm_object_t object);
635
636 __private_extern__ void vm_object_pmap_protect(
637 vm_object_t object,
638 vm_object_offset_t offset,
639 vm_object_size_t size,
640 pmap_t pmap,
641 vm_map_size_t pmap_page_size,
642 vm_map_offset_t pmap_start,
643 vm_prot_t prot);
644
645 __private_extern__ void vm_object_pmap_protect_options(
646 vm_object_t object,
647 vm_object_offset_t offset,
648 vm_object_size_t size,
649 pmap_t pmap,
650 vm_map_size_t pmap_page_size,
651 vm_map_offset_t pmap_start,
652 vm_prot_t prot,
653 int options);
654
655 __private_extern__ void vm_object_page_remove(
656 vm_object_t object,
657 vm_object_offset_t start,
658 vm_object_offset_t end);
659
660 __private_extern__ void vm_object_deactivate_pages(
661 vm_object_t object,
662 vm_object_offset_t offset,
663 vm_object_size_t size,
664 boolean_t kill_page,
665 boolean_t reusable_page,
666 boolean_t reusable_no_write,
667 struct pmap *pmap,
668 /* XXX TODO4K: need pmap_page_size here too? */
669 vm_map_offset_t pmap_offset);
670
671 __private_extern__ void vm_object_reuse_pages(
672 vm_object_t object,
673 vm_object_offset_t start_offset,
674 vm_object_offset_t end_offset,
675 boolean_t allow_partial_reuse);
676
677 __private_extern__ uint64_t vm_object_purge(
678 vm_object_t object,
679 int flags);
680
681 __private_extern__ kern_return_t vm_object_purgable_control(
682 vm_object_t object,
683 vm_purgable_t control,
684 int *state);
685
686 __private_extern__ kern_return_t vm_object_get_page_counts(
687 vm_object_t object,
688 vm_object_offset_t offset,
689 vm_object_size_t size,
690 unsigned int *resident_page_count,
691 unsigned int *dirty_page_count);
692
693 __private_extern__ boolean_t vm_object_coalesce(
694 vm_object_t prev_object,
695 vm_object_t next_object,
696 vm_object_offset_t prev_offset,
697 vm_object_offset_t next_offset,
698 vm_object_size_t prev_size,
699 vm_object_size_t next_size);
700
701 __private_extern__ boolean_t vm_object_shadow(
702 vm_object_t *object,
703 vm_object_offset_t *offset,
704 vm_object_size_t length,
705 boolean_t always_shadow);
706
707 __private_extern__ void vm_object_collapse(
708 vm_object_t object,
709 vm_object_offset_t offset,
710 boolean_t can_bypass);
711
712 __private_extern__ boolean_t vm_object_copy_quickly(
713 vm_object_t object,
714 vm_object_offset_t src_offset,
715 vm_object_size_t size,
716 boolean_t *_src_needs_copy,
717 boolean_t *_dst_needs_copy);
718
719 __private_extern__ kern_return_t vm_object_copy_strategically(
720 vm_object_t src_object,
721 vm_object_offset_t src_offset,
722 vm_object_size_t size,
723 bool forking,
724 vm_object_t *dst_object,
725 vm_object_offset_t *dst_offset,
726 boolean_t *dst_needs_copy);
727
728 __private_extern__ kern_return_t vm_object_copy_slowly(
729 vm_object_t src_object,
730 vm_object_offset_t src_offset,
731 vm_object_size_t size,
732 boolean_t interruptible,
733 vm_object_t *_result_object);
734
735 __private_extern__ vm_object_t vm_object_copy_delayed(
736 vm_object_t src_object,
737 vm_object_offset_t src_offset,
738 vm_object_size_t size,
739 boolean_t src_object_shared);
740
741
742
743 __private_extern__ kern_return_t vm_object_destroy(
744 vm_object_t object,
745 kern_return_t reason);
746
747 __private_extern__ void vm_object_pager_create(
748 vm_object_t object);
749
750 __private_extern__ void vm_object_compressor_pager_create(
751 vm_object_t object);
752
753 __private_extern__ void vm_object_page_map(
754 vm_object_t object,
755 vm_object_offset_t offset,
756 vm_object_size_t size,
757 vm_object_offset_t (*map_fn)
758 (void *, vm_object_offset_t),
759 void *map_fn_data);
760
761 __private_extern__ kern_return_t vm_object_upl_request(
762 vm_object_t object,
763 vm_object_offset_t offset,
764 upl_size_t size,
765 upl_t *upl,
766 upl_page_info_t *page_info,
767 unsigned int *count,
768 upl_control_flags_t flags,
769 vm_tag_t tag);
770
771 __private_extern__ kern_return_t vm_object_transpose(
772 vm_object_t object1,
773 vm_object_t object2,
774 vm_object_size_t transpose_size);
775
776 __private_extern__ boolean_t vm_object_sync(
777 vm_object_t object,
778 vm_object_offset_t offset,
779 vm_object_size_t size,
780 boolean_t should_flush,
781 boolean_t should_return,
782 boolean_t should_iosync);
783
784 __private_extern__ kern_return_t vm_object_update(
785 vm_object_t object,
786 vm_object_offset_t offset,
787 vm_object_size_t size,
788 vm_object_offset_t *error_offset,
789 int *io_errno,
790 memory_object_return_t should_return,
791 int flags,
792 vm_prot_t prot);
793
794 __private_extern__ kern_return_t vm_object_lock_request(
795 vm_object_t object,
796 vm_object_offset_t offset,
797 vm_object_size_t size,
798 memory_object_return_t should_return,
799 int flags,
800 vm_prot_t prot);
801
802
803
804 __private_extern__ vm_object_t vm_object_memory_object_associate(
805 memory_object_t pager,
806 vm_object_t object,
807 vm_object_size_t size,
808 boolean_t check_named);
809
810
811 __private_extern__ void vm_object_cluster_size(
812 vm_object_t object,
813 vm_object_offset_t *start,
814 vm_size_t *length,
815 vm_object_fault_info_t fault_info,
816 uint32_t *io_streaming);
817
818 __private_extern__ kern_return_t vm_object_populate_with_private(
819 vm_object_t object,
820 vm_object_offset_t offset,
821 ppnum_t phys_page,
822 vm_size_t size);
823
824 __private_extern__ void vm_object_change_wimg_mode(
825 vm_object_t object,
826 unsigned int wimg_mode);
827
828 extern kern_return_t adjust_vm_object_cache(
829 vm_size_t oval,
830 vm_size_t nval);
831
832 extern kern_return_t vm_object_page_op(
833 vm_object_t object,
834 vm_object_offset_t offset,
835 int ops,
836 ppnum_t *phys_entry,
837 int *flags);
838
839 extern kern_return_t vm_object_range_op(
840 vm_object_t object,
841 vm_object_offset_t offset_beg,
842 vm_object_offset_t offset_end,
843 int ops,
844 uint32_t *range);
845
846
847 __private_extern__ void vm_object_reap_pages(
848 vm_object_t object,
849 int reap_type);
850 #define REAP_REAP 0
851 #define REAP_TERMINATE 1
852 #define REAP_PURGEABLE 2
853 #define REAP_DATA_FLUSH 3
854
855 #if CONFIG_FREEZE
856
857 __private_extern__ uint32_t
858 vm_object_compressed_freezer_pageout(
859 vm_object_t object, uint32_t dirty_budget);
860
861 __private_extern__ void
862 vm_object_compressed_freezer_done(
863 void);
864
865 #endif /* CONFIG_FREEZE */
866
867 __private_extern__ void
868 vm_object_pageout(
869 vm_object_t object);
870
871 #if CONFIG_IOSCHED
872 struct io_reprioritize_req {
873 uint64_t blkno;
874 uint32_t len;
875 int priority;
876 struct vnode *devvp;
877 queue_chain_t io_reprioritize_list;
878 };
879 typedef struct io_reprioritize_req *io_reprioritize_req_t;
880
881 extern void vm_io_reprioritize_init(void);
882 #endif
883
884 /*
885 * Event waiting handling
886 */
887
888 #define VM_OBJECT_EVENT_INITIALIZED 0
889 #define VM_OBJECT_EVENT_PAGER_READY 1
890 #define VM_OBJECT_EVENT_PAGING_IN_PROGRESS 2
891 #define VM_OBJECT_EVENT_MAPPING_IN_PROGRESS 3
892 #define VM_OBJECT_EVENT_UNBLOCKED 4
893 #define VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS 5
894
895 #define VM_OBJECT_EVENT_MAX 5 /* 6 bits in "all_wanted", so 0->5 */
896
897 static __inline__ wait_result_t
vm_object_assert_wait(vm_object_t object,int event,wait_interrupt_t interruptible)898 vm_object_assert_wait(
899 vm_object_t object,
900 int event,
901 wait_interrupt_t interruptible)
902 {
903 wait_result_t wr;
904
905 vm_object_lock_assert_exclusive(object);
906 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
907
908 object->all_wanted |= 1 << event;
909 wr = assert_wait((event_t)((vm_offset_t)object + (vm_offset_t)event),
910 interruptible);
911 return wr;
912 }
913
914 static __inline__ wait_result_t
vm_object_wait(vm_object_t object,int event,wait_interrupt_t interruptible)915 vm_object_wait(
916 vm_object_t object,
917 int event,
918 wait_interrupt_t interruptible)
919 {
920 wait_result_t wr;
921
922 vm_object_assert_wait(object, event, interruptible);
923 vm_object_unlock(object);
924 wr = thread_block(THREAD_CONTINUE_NULL);
925 return wr;
926 }
927
928 static __inline__ wait_result_t
thread_sleep_vm_object(vm_object_t object,event_t event,wait_interrupt_t interruptible)929 thread_sleep_vm_object(
930 vm_object_t object,
931 event_t event,
932 wait_interrupt_t interruptible)
933 {
934 wait_result_t wr;
935
936 wr = lck_rw_sleep(&object->Lock,
937 LCK_SLEEP_PROMOTED_PRI,
938 event,
939 interruptible);
940 return wr;
941 }
942
943 static __inline__ wait_result_t
vm_object_sleep(vm_object_t object,int event,wait_interrupt_t interruptible)944 vm_object_sleep(
945 vm_object_t object,
946 int event,
947 wait_interrupt_t interruptible)
948 {
949 wait_result_t wr;
950
951 vm_object_lock_assert_exclusive(object);
952 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
953
954 object->all_wanted |= 1 << event;
955 wr = thread_sleep_vm_object(object,
956 (event_t)((vm_offset_t)object + (vm_offset_t)event),
957 interruptible);
958 return wr;
959 }
960
961 static __inline__ void
vm_object_wakeup(vm_object_t object,int event)962 vm_object_wakeup(
963 vm_object_t object,
964 int event)
965 {
966 vm_object_lock_assert_exclusive(object);
967 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
968
969 if (object->all_wanted & (1 << event)) {
970 thread_wakeup((event_t)((vm_offset_t)object + (vm_offset_t)event));
971 }
972 object->all_wanted &= ~(1 << event);
973 }
974
975 static __inline__ void
vm_object_set_wanted(vm_object_t object,int event)976 vm_object_set_wanted(
977 vm_object_t object,
978 int event)
979 {
980 vm_object_lock_assert_exclusive(object);
981 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
982
983 object->all_wanted |= (1 << event);
984 }
985
986 static __inline__ int
vm_object_wanted(vm_object_t object,int event)987 vm_object_wanted(
988 vm_object_t object,
989 int event)
990 {
991 vm_object_lock_assert_held(object);
992 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
993
994 return object->all_wanted & (1 << event);
995 }
996
997 /*
998 * Routines implemented as macros
999 */
1000 #ifdef VM_PIP_DEBUG
1001 #include <libkern/OSDebug.h>
1002 #define VM_PIP_DEBUG_BEGIN(object) \
1003 MACRO_BEGIN \
1004 int pip = ((object)->paging_in_progress + \
1005 (object)->activity_in_progress); \
1006 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
1007 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
1008 VM_PIP_DEBUG_STACK_FRAMES); \
1009 } \
1010 MACRO_END
1011 #else /* VM_PIP_DEBUG */
1012 #define VM_PIP_DEBUG_BEGIN(object)
1013 #endif /* VM_PIP_DEBUG */
1014
1015 #define vm_object_activity_begin(object) \
1016 MACRO_BEGIN \
1017 vm_object_lock_assert_exclusive((object)); \
1018 VM_PIP_DEBUG_BEGIN((object)); \
1019 (object)->activity_in_progress++; \
1020 if ((object)->activity_in_progress == 0) { \
1021 panic("vm_object_activity_begin(%p): overflow\n", (object));\
1022 } \
1023 MACRO_END
1024
1025 #define vm_object_activity_end(object) \
1026 MACRO_BEGIN \
1027 vm_object_lock_assert_exclusive((object)); \
1028 if ((object)->activity_in_progress == 0) { \
1029 panic("vm_object_activity_end(%p): underflow\n", (object));\
1030 } \
1031 (object)->activity_in_progress--; \
1032 if ((object)->paging_in_progress == 0 && \
1033 (object)->activity_in_progress == 0) \
1034 vm_object_wakeup((object), \
1035 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1036 MACRO_END
1037
1038 #define vm_object_paging_begin(object) \
1039 MACRO_BEGIN \
1040 vm_object_lock_assert_exclusive((object)); \
1041 VM_PIP_DEBUG_BEGIN((object)); \
1042 (object)->paging_in_progress++; \
1043 if ((object)->paging_in_progress == 0) { \
1044 panic("vm_object_paging_begin(%p): overflow\n", (object));\
1045 } \
1046 MACRO_END
1047
1048 #define vm_object_paging_end(object) \
1049 MACRO_BEGIN \
1050 vm_object_lock_assert_exclusive((object)); \
1051 if ((object)->paging_in_progress == 0) { \
1052 panic("vm_object_paging_end(%p): underflow\n", (object));\
1053 } \
1054 (object)->paging_in_progress--; \
1055 if ((object)->paging_in_progress == 0) { \
1056 vm_object_wakeup((object), \
1057 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS); \
1058 if ((object)->activity_in_progress == 0) \
1059 vm_object_wakeup((object), \
1060 VM_OBJECT_EVENT_PAGING_IN_PROGRESS); \
1061 } \
1062 MACRO_END
1063
1064 #define vm_object_paging_wait(object, interruptible) \
1065 MACRO_BEGIN \
1066 vm_object_lock_assert_exclusive((object)); \
1067 while ((object)->paging_in_progress != 0 || \
1068 (object)->activity_in_progress != 0) { \
1069 wait_result_t _wr; \
1070 \
1071 _wr = vm_object_sleep((object), \
1072 VM_OBJECT_EVENT_PAGING_IN_PROGRESS, \
1073 (interruptible)); \
1074 \
1075 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1076 /*XXX break; */ \
1077 } \
1078 MACRO_END
1079
1080 #define vm_object_paging_only_wait(object, interruptible) \
1081 MACRO_BEGIN \
1082 vm_object_lock_assert_exclusive((object)); \
1083 while ((object)->paging_in_progress != 0) { \
1084 wait_result_t _wr; \
1085 \
1086 _wr = vm_object_sleep((object), \
1087 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,\
1088 (interruptible)); \
1089 \
1090 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1091 /*XXX break; */ \
1092 } \
1093 MACRO_END
1094
1095
1096 #define vm_object_mapping_begin(object) \
1097 MACRO_BEGIN \
1098 vm_object_lock_assert_exclusive((object)); \
1099 assert(! (object)->mapping_in_progress); \
1100 (object)->mapping_in_progress = TRUE; \
1101 MACRO_END
1102
1103 #define vm_object_mapping_end(object) \
1104 MACRO_BEGIN \
1105 vm_object_lock_assert_exclusive((object)); \
1106 assert((object)->mapping_in_progress); \
1107 (object)->mapping_in_progress = FALSE; \
1108 vm_object_wakeup((object), \
1109 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS); \
1110 MACRO_END
1111
1112 #define vm_object_mapping_wait(object, interruptible) \
1113 MACRO_BEGIN \
1114 vm_object_lock_assert_exclusive((object)); \
1115 while ((object)->mapping_in_progress) { \
1116 wait_result_t _wr; \
1117 \
1118 _wr = vm_object_sleep((object), \
1119 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS, \
1120 (interruptible)); \
1121 /*XXX if ((interruptible) && (_wr != THREAD_AWAKENED))*/ \
1122 /*XXX break; */ \
1123 } \
1124 assert(!(object)->mapping_in_progress); \
1125 MACRO_END
1126
1127
1128
1129 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
1130 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
1131
1132 extern void vm_object_cache_add(vm_object_t);
1133 extern void vm_object_cache_remove(vm_object_t);
1134 extern int vm_object_cache_evict(int, int);
1135
1136 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
1137 #define VM_OBJECT_OWNER_UNCHANGED ((task_t) -2)
1138 #define VM_OBJECT_OWNER(object) \
1139 ((object == VM_OBJECT_NULL || \
1140 ((object)->purgable == VM_PURGABLE_DENY && \
1141 (object)->vo_ledger_tag == 0) || \
1142 (object)->vo_owner == TASK_NULL) \
1143 ? TASK_NULL /* not owned */ \
1144 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED) \
1145 ? kernel_task /* disowned -> kernel */ \
1146 : (object)->vo_owner)) /* explicit owner */ \
1147
1148 extern void vm_object_ledger_tag_ledgers(
1149 vm_object_t object,
1150 int *ledger_idx_volatile,
1151 int *ledger_idx_nonvolatile,
1152 int *ledger_idx_volatile_compressed,
1153 int *ledger_idx_nonvolatile_compressed,
1154 boolean_t *do_footprint);
1155 extern kern_return_t vm_object_ownership_change(
1156 vm_object_t object,
1157 int new_ledger_tag,
1158 task_t new_owner,
1159 int new_ledger_flags,
1160 boolean_t task_objq_locked);
1161
1162 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
1163 // so probably should be a real 32b ID vs. ptr.
1164 // Current users just check for equality
1165 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRPERM((o)))
1166
1167 static inline void
VM_OBJECT_COPY_SET(vm_object_t object,vm_object_t copy)1168 VM_OBJECT_COPY_SET(
1169 vm_object_t object,
1170 vm_object_t copy)
1171 {
1172 vm_object_lock_assert_exclusive(object);
1173 object->vo_copy = copy;
1174 if (copy != VM_OBJECT_NULL) {
1175 object->vo_copy_version++;
1176 }
1177 }
1178
1179 #endif /* _VM_VM_OBJECT_H_ */
1180