1 /*
2 * Copyright (c) 2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _VM_VM_OBJECT_INTERNAL_H_
30 #define _VM_VM_OBJECT_INTERNAL_H_
31
32 #ifdef XNU_KERNEL_PRIVATE
33 #include <vm/vm_object_xnu.h>
34
35 #if VM_OBJECT_TRACKING
36 #include <libkern/OSDebug.h>
37 #include <kern/btlog.h>
38 extern void vm_object_tracking_init(void);
39 extern btlog_t vm_object_tracking_btlog;
40 #define VM_OBJECT_TRACKING_NUM_RECORDS 50000
41 #define VM_OBJECT_TRACKING_OP_CREATED 1
42 #define VM_OBJECT_TRACKING_OP_MODIFIED 2
43 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
44 #endif /* VM_OBJECT_TRACKING */
45
46 #if VM_OBJECT_ACCESS_TRACKING
47 extern uint64_t vm_object_access_tracking_reads;
48 extern uint64_t vm_object_access_tracking_writes;
49 extern void vm_object_access_tracking(vm_object_t object,
50 int *access_tracking,
51 uint32_t *access_tracking_reads,
52 uint32_t *acess_tracking_writes);
53 #endif /* VM_OBJECT_ACCESS_TRACKING */
54
55 extern uint16_t vm_object_pagein_throttle;
56
57 /*
58 * Object locking macros
59 */
60
61 #define vm_object_lock_init(object) \
62 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
63 (is_kernel_object(object) ? \
64 &kernel_object_lck_attr : \
65 (((object) == compressor_object) ? \
66 &compressor_object_lck_attr : \
67 &vm_object_lck_attr)))
68 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
69
70 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
71
72 /*
73 * CAUTION: the following vm_object_lock_assert_held*() macros merely
74 * check if anyone is holding the lock, but the holder may not necessarily
75 * be the caller...
76 */
77 #define vm_object_lock_assert_held(object) \
78 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_HELD)
79 #define vm_object_lock_assert_shared(object) \
80 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_SHARED)
81 #define vm_object_lock_assert_exclusive(object) \
82 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
83 #define vm_object_lock_assert_notheld(object) \
84 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
85
86
87 static inline void
VM_OBJECT_SET_PAGER_CREATED(vm_object_t object,bool value)88 VM_OBJECT_SET_PAGER_CREATED(
89 vm_object_t object,
90 bool value)
91 {
92 vm_object_lock_assert_exclusive(object);
93 object->pager_created = value;
94 }
95 static inline void
VM_OBJECT_SET_PAGER_INITIALIZED(vm_object_t object,bool value)96 VM_OBJECT_SET_PAGER_INITIALIZED(
97 vm_object_t object,
98 bool value)
99 {
100 vm_object_lock_assert_exclusive(object);
101 object->pager_initialized = value;
102 }
103 static inline void
VM_OBJECT_SET_PAGER_READY(vm_object_t object,bool value)104 VM_OBJECT_SET_PAGER_READY(
105 vm_object_t object,
106 bool value)
107 {
108 vm_object_lock_assert_exclusive(object);
109 object->pager_ready = value;
110 }
111 static inline void
VM_OBJECT_SET_PAGER_TRUSTED(vm_object_t object,bool value)112 VM_OBJECT_SET_PAGER_TRUSTED(
113 vm_object_t object,
114 bool value)
115 {
116 vm_object_lock_assert_exclusive(object);
117 object->pager_trusted = value;
118 }
119 static inline void
VM_OBJECT_SET_CAN_PERSIST(vm_object_t object,bool value)120 VM_OBJECT_SET_CAN_PERSIST(
121 vm_object_t object,
122 bool value)
123 {
124 vm_object_lock_assert_exclusive(object);
125 object->can_persist = value;
126 }
127 static inline void
VM_OBJECT_SET_INTERNAL(vm_object_t object,bool value)128 VM_OBJECT_SET_INTERNAL(
129 vm_object_t object,
130 bool value)
131 {
132 vm_object_lock_assert_exclusive(object);
133 object->internal = value;
134 }
135 static inline void
VM_OBJECT_SET_PRIVATE(vm_object_t object,bool value)136 VM_OBJECT_SET_PRIVATE(
137 vm_object_t object,
138 bool value)
139 {
140 vm_object_lock_assert_exclusive(object);
141 object->private = value;
142 }
143 static inline void
VM_OBJECT_SET_PAGEOUT(vm_object_t object,bool value)144 VM_OBJECT_SET_PAGEOUT(
145 vm_object_t object,
146 bool value)
147 {
148 vm_object_lock_assert_exclusive(object);
149 object->pageout = value;
150 }
151 static inline void
VM_OBJECT_SET_ALIVE(vm_object_t object,bool value)152 VM_OBJECT_SET_ALIVE(
153 vm_object_t object,
154 bool value)
155 {
156 vm_object_lock_assert_exclusive(object);
157 object->alive = value;
158 }
159 static inline void
VM_OBJECT_SET_PURGABLE(vm_object_t object,unsigned int value)160 VM_OBJECT_SET_PURGABLE(
161 vm_object_t object,
162 unsigned int value)
163 {
164 vm_object_lock_assert_exclusive(object);
165 object->purgable = value;
166 assert3u(object->purgable, ==, value);
167 }
168 static inline void
VM_OBJECT_SET_PURGEABLE_ONLY_BY_KERNEL(vm_object_t object,bool value)169 VM_OBJECT_SET_PURGEABLE_ONLY_BY_KERNEL(
170 vm_object_t object,
171 bool value)
172 {
173 vm_object_lock_assert_exclusive(object);
174 object->purgeable_only_by_kernel = value;
175 }
176 static inline void
VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(vm_object_t object,bool value)177 VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(
178 vm_object_t object,
179 bool value)
180 {
181 vm_object_lock_assert_exclusive(object);
182 object->purgeable_when_ripe = value;
183 }
184 static inline void
VM_OBJECT_SET_SHADOWED(vm_object_t object,bool value)185 VM_OBJECT_SET_SHADOWED(
186 vm_object_t object,
187 bool value)
188 {
189 vm_object_lock_assert_exclusive(object);
190 object->shadowed = value;
191 }
192 static inline void
VM_OBJECT_SET_TRUE_SHARE(vm_object_t object,bool value)193 VM_OBJECT_SET_TRUE_SHARE(
194 vm_object_t object,
195 bool value)
196 {
197 vm_object_lock_assert_exclusive(object);
198 object->true_share = value;
199 }
200 static inline void
VM_OBJECT_SET_TERMINATING(vm_object_t object,bool value)201 VM_OBJECT_SET_TERMINATING(
202 vm_object_t object,
203 bool value)
204 {
205 vm_object_lock_assert_exclusive(object);
206 object->terminating = value;
207 }
208 static inline void
VM_OBJECT_SET_NAMED(vm_object_t object,bool value)209 VM_OBJECT_SET_NAMED(
210 vm_object_t object,
211 bool value)
212 {
213 vm_object_lock_assert_exclusive(object);
214 object->named = value;
215 }
216 static inline void
VM_OBJECT_SET_SHADOW_SEVERED(vm_object_t object,bool value)217 VM_OBJECT_SET_SHADOW_SEVERED(
218 vm_object_t object,
219 bool value)
220 {
221 vm_object_lock_assert_exclusive(object);
222 object->shadow_severed = value;
223 }
224 static inline void
VM_OBJECT_SET_PHYS_CONTIGUOUS(vm_object_t object,bool value)225 VM_OBJECT_SET_PHYS_CONTIGUOUS(
226 vm_object_t object,
227 bool value)
228 {
229 vm_object_lock_assert_exclusive(object);
230 object->phys_contiguous = value;
231 }
232 static inline void
VM_OBJECT_SET_NOPHYSCACHE(vm_object_t object,bool value)233 VM_OBJECT_SET_NOPHYSCACHE(
234 vm_object_t object,
235 bool value)
236 {
237 vm_object_lock_assert_exclusive(object);
238 object->nophyscache = value;
239 }
240 static inline void
VM_OBJECT_SET_FOR_REALTIME(vm_object_t object,bool value)241 VM_OBJECT_SET_FOR_REALTIME(
242 vm_object_t object,
243 bool value)
244 {
245 vm_object_lock_assert_exclusive(object);
246 object->for_realtime = value;
247 }
248 static inline void
VM_OBJECT_SET_NO_PAGER_REASON(vm_object_t object,unsigned int value)249 VM_OBJECT_SET_NO_PAGER_REASON(
250 vm_object_t object,
251 unsigned int value)
252 {
253 vm_object_lock_assert_exclusive(object);
254 object->no_pager_reason = value;
255 assert3u(object->no_pager_reason, ==, value);
256 }
257 #if FBDP_DEBUG_OBJECT_NO_PAGER
258 static inline void
VM_OBJECT_SET_FBDP_TRACKED(vm_object_t object,bool value)259 VM_OBJECT_SET_FBDP_TRACKED(
260 vm_object_t object,
261 bool value)
262 {
263 vm_object_lock_assert_exclusive(object);
264 object->fbdp_tracked = value;
265 }
266 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
267
268 /*
269 * Declare procedures that operate on VM objects.
270 */
271
272 __private_extern__ void vm_object_bootstrap(void);
273
274 __private_extern__ void vm_object_reaper_init(void);
275
276 __private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size);
277
278 __private_extern__ void _vm_object_allocate(vm_object_size_t size,
279 vm_object_t object);
280
281 __private_extern__ void vm_object_set_size(
282 vm_object_t object,
283 vm_object_size_t outer_size,
284 vm_object_size_t inner_size);
285
286 #define vm_object_reference_locked(object) \
287 MACRO_BEGIN \
288 vm_object_t RLObject = (object); \
289 vm_object_lock_assert_exclusive(object); \
290 assert((RLObject)->ref_count > 0); \
291 (RLObject)->ref_count++; \
292 assert((RLObject)->ref_count > 1); \
293 MACRO_END
294
295
296 #define vm_object_reference_shared(object) \
297 MACRO_BEGIN \
298 vm_object_t RLObject = (object); \
299 vm_object_lock_assert_shared(object); \
300 assert((RLObject)->ref_count > 0); \
301 OSAddAtomic(1, &(RLObject)->ref_count); \
302 assert((RLObject)->ref_count > 0); \
303 MACRO_END
304
305
306 __private_extern__ void vm_object_reference(
307 vm_object_t object);
308
309 #if !MACH_ASSERT
310
311 #define vm_object_reference(object) \
312 MACRO_BEGIN \
313 vm_object_t RObject = (object); \
314 if (RObject) { \
315 vm_object_lock_shared(RObject); \
316 vm_object_reference_shared(RObject); \
317 vm_object_unlock(RObject); \
318 } \
319 MACRO_END
320
321 #endif /* MACH_ASSERT */
322
323 __private_extern__ void vm_object_deallocate(
324 vm_object_t object);
325
326 __private_extern__ void vm_object_pmap_protect(
327 vm_object_t object,
328 vm_object_offset_t offset,
329 vm_object_size_t size,
330 pmap_t pmap,
331 vm_map_size_t pmap_page_size,
332 vm_map_offset_t pmap_start,
333 vm_prot_t prot);
334
335 __private_extern__ void vm_object_pmap_protect_options(
336 vm_object_t object,
337 vm_object_offset_t offset,
338 vm_object_size_t size,
339 pmap_t pmap,
340 vm_map_size_t pmap_page_size,
341 vm_map_offset_t pmap_start,
342 vm_prot_t prot,
343 int options);
344
345 __private_extern__ void vm_object_page_remove(
346 vm_object_t object,
347 vm_object_offset_t start,
348 vm_object_offset_t end);
349
350 __private_extern__ void vm_object_deactivate_pages(
351 vm_object_t object,
352 vm_object_offset_t offset,
353 vm_object_size_t size,
354 boolean_t kill_page,
355 boolean_t reusable_page,
356 boolean_t reusable_no_write,
357 struct pmap *pmap,
358 /* XXX TODO4K: need pmap_page_size here too? */
359 vm_map_offset_t pmap_offset);
360
361 __private_extern__ void vm_object_reuse_pages(
362 vm_object_t object,
363 vm_object_offset_t start_offset,
364 vm_object_offset_t end_offset,
365 boolean_t allow_partial_reuse);
366
367 __private_extern__ kern_return_t vm_object_zero(
368 vm_object_t object,
369 vm_object_offset_t cur_offset,
370 vm_object_offset_t end_offset);
371
372 __private_extern__ uint64_t vm_object_purge(
373 vm_object_t object,
374 int flags);
375
376 __private_extern__ kern_return_t vm_object_purgable_control(
377 vm_object_t object,
378 vm_purgable_t control,
379 int *state);
380
381 __private_extern__ kern_return_t vm_object_get_page_counts(
382 vm_object_t object,
383 vm_object_offset_t offset,
384 vm_object_size_t size,
385 unsigned int *resident_page_count,
386 unsigned int *dirty_page_count);
387
388 __private_extern__ boolean_t vm_object_coalesce(
389 vm_object_t prev_object,
390 vm_object_t next_object,
391 vm_object_offset_t prev_offset,
392 vm_object_offset_t next_offset,
393 vm_object_size_t prev_size,
394 vm_object_size_t next_size);
395
396 __private_extern__ boolean_t vm_object_shadow(
397 vm_object_t *object,
398 vm_object_offset_t *offset,
399 vm_object_size_t length,
400 boolean_t always_shadow);
401
402 __private_extern__ void vm_object_collapse(
403 vm_object_t object,
404 vm_object_offset_t offset,
405 boolean_t can_bypass);
406
407 __private_extern__ boolean_t vm_object_copy_quickly(
408 vm_object_t object,
409 vm_object_offset_t src_offset,
410 vm_object_size_t size,
411 boolean_t *_src_needs_copy,
412 boolean_t *_dst_needs_copy);
413
414 __private_extern__ kern_return_t vm_object_copy_strategically(
415 vm_object_t src_object,
416 vm_object_offset_t src_offset,
417 vm_object_size_t size,
418 bool forking,
419 vm_object_t *dst_object,
420 vm_object_offset_t *dst_offset,
421 boolean_t *dst_needs_copy);
422
423 __private_extern__ kern_return_t vm_object_copy_slowly(
424 vm_object_t src_object,
425 vm_object_offset_t src_offset,
426 vm_object_size_t size,
427 boolean_t interruptible,
428 vm_object_t *_result_object);
429
430 __private_extern__ vm_object_t vm_object_copy_delayed(
431 vm_object_t src_object,
432 vm_object_offset_t src_offset,
433 vm_object_size_t size,
434 boolean_t src_object_shared);
435
436 __private_extern__ kern_return_t vm_object_destroy(
437 vm_object_t object,
438 vm_object_destroy_reason_t reason);
439
440 __private_extern__ void vm_object_compressor_pager_create(
441 vm_object_t object);
442
443 /*
444 * Query whether the provided object,offset reside in the compressor. The
445 * caller must hold the object lock and ensure that the object,offset under
446 * inspection is not in the process of being paged in/out (i.e. no busy
447 * backing page)
448 */
449 __private_extern__ vm_external_state_t vm_object_compressor_pager_state_get(
450 vm_object_t object,
451 vm_object_offset_t offset);
452
453 /*
454 * Clear the compressor slot corresponding to an object,offset. The caller
455 * must hold the object lock (exclusive) and ensure that the object,offset
456 * under inspection is not in the process of being paged in/out (i.e. no busy
457 * backing page)
458 */
459 __private_extern__ void vm_object_compressor_pager_state_clr(
460 vm_object_t object,
461 vm_object_offset_t offset);
462
463 __private_extern__ kern_return_t vm_object_upl_request(
464 vm_object_t object,
465 vm_object_offset_t offset,
466 upl_size_t size,
467 upl_t *upl,
468 upl_page_info_t *page_info,
469 unsigned int *count,
470 upl_control_flags_t flags,
471 vm_tag_t tag);
472
473 __private_extern__ kern_return_t vm_object_transpose(
474 vm_object_t object1,
475 vm_object_t object2,
476 vm_object_size_t transpose_size);
477
478 __private_extern__ boolean_t vm_object_sync(
479 vm_object_t object,
480 vm_object_offset_t offset,
481 vm_object_size_t size,
482 boolean_t should_flush,
483 boolean_t should_return,
484 boolean_t should_iosync);
485
486 __private_extern__ kern_return_t vm_object_update(
487 vm_object_t object,
488 vm_object_offset_t offset,
489 vm_object_size_t size,
490 vm_object_offset_t *error_offset,
491 int *io_errno,
492 memory_object_return_t should_return,
493 int flags,
494 vm_prot_t prot);
495
496 __private_extern__ kern_return_t vm_object_lock_request(
497 vm_object_t object,
498 vm_object_offset_t offset,
499 vm_object_size_t size,
500 memory_object_return_t should_return,
501 int flags,
502 vm_prot_t prot);
503
504
505
506 __private_extern__ vm_object_t vm_object_memory_object_associate(
507 memory_object_t pager,
508 vm_object_t object,
509 vm_object_size_t size,
510 boolean_t check_named);
511
512
513 __private_extern__ void vm_object_cluster_size(
514 vm_object_t object,
515 vm_object_offset_t *start,
516 vm_size_t *length,
517 vm_object_fault_info_t fault_info,
518 uint32_t *io_streaming);
519
520 __private_extern__ kern_return_t vm_object_populate_with_private(
521 vm_object_t object,
522 vm_object_offset_t offset,
523 ppnum_t phys_page,
524 vm_size_t size);
525
526 __private_extern__ void vm_object_change_wimg_mode(
527 vm_object_t object,
528 unsigned int wimg_mode);
529
530 extern kern_return_t vm_object_page_op(
531 vm_object_t object,
532 vm_object_offset_t offset,
533 int ops,
534 ppnum_t *phys_entry,
535 int *flags);
536
537 extern kern_return_t vm_object_range_op(
538 vm_object_t object,
539 vm_object_offset_t offset_beg,
540 vm_object_offset_t offset_end,
541 int ops,
542 uint32_t *range);
543
544
545 __private_extern__ void vm_object_reap_pages(
546 vm_object_t object,
547 int reap_type);
548 #define REAP_REAP 0
549 #define REAP_TERMINATE 1
550 #define REAP_PURGEABLE 2
551 #define REAP_DATA_FLUSH 3
552
553 #if CONFIG_FREEZE
554
555 __private_extern__ uint32_t
556 vm_object_compressed_freezer_pageout(
557 vm_object_t object, uint32_t dirty_budget);
558
559 __private_extern__ void
560 vm_object_compressed_freezer_done(
561 void);
562
563 #endif /* CONFIG_FREEZE */
564
565 __private_extern__ void
566 vm_object_pageout(
567 vm_object_t object);
568
569 /*
570 * Event waiting handling
571 */
572 __enum_closed_decl(vm_object_wait_reason_t, uint8_t, {
573 VM_OBJECT_EVENT_PAGER_INIT = 0,
574 VM_OBJECT_EVENT_PAGER_READY = 1,
575 VM_OBJECT_EVENT_PAGING_IN_PROGRESS = 2,
576 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS = 3,
577 VM_OBJECT_EVENT_UNBLOCKED = 4,
578 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS = 5,
579 VM_OBJECT_EVENT_PAGEIN_THROTTLE = 6,
580 });
581 #define VM_OBJECT_EVENT_MAX VM_OBJECT_EVENT_PAGEIN_THROTTLE
582 /* 7 bits in "all_wanted" */
583 _Static_assert(VM_OBJECT_EVENT_MAX < 7,
584 "vm_object_wait_reason_t must fit in all_wanted");
585 /*
586 * @c vm_object_sleep uses (object + wait_reason) as the wait event, ensure
587 * this does not colide with the object lock.
588 */
589 _Static_assert(VM_OBJECT_EVENT_MAX < offsetof(struct vm_object, Lock),
590 "Wait reason collides with vm_object->Lock");
591
592 extern wait_result_t vm_object_sleep(
593 vm_object_t object,
594 vm_object_wait_reason_t reason,
595 wait_interrupt_t interruptible,
596 lck_sleep_action_t action);
597
598
599 static inline void
vm_object_set_wanted(vm_object_t object,vm_object_wait_reason_t reason)600 vm_object_set_wanted(
601 vm_object_t object,
602 vm_object_wait_reason_t reason)
603 {
604 vm_object_lock_assert_exclusive(object);
605 assert(reason >= 0 && reason <= VM_OBJECT_EVENT_MAX);
606
607 object->all_wanted |= (1 << reason);
608 }
609
610 static inline bool
vm_object_wanted(vm_object_t object,vm_object_wait_reason_t event)611 vm_object_wanted(
612 vm_object_t object,
613 vm_object_wait_reason_t event)
614 {
615 vm_object_lock_assert_held(object);
616 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
617
618 return object->all_wanted & (1 << event);
619 }
620
621 extern void vm_object_wakeup(
622 vm_object_t object,
623 vm_object_wait_reason_t reason);
624
625 /*
626 * Routines implemented as macros
627 */
628 #ifdef VM_PIP_DEBUG
629 #include <libkern/OSDebug.h>
630 #define VM_PIP_DEBUG_BEGIN(object) \
631 MACRO_BEGIN \
632 int pip = ((object)->paging_in_progress + \
633 (object)->activity_in_progress); \
634 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
635 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
636 VM_PIP_DEBUG_STACK_FRAMES); \
637 } \
638 MACRO_END
639 #else /* VM_PIP_DEBUG */
640 #define VM_PIP_DEBUG_BEGIN(object)
641 #endif /* VM_PIP_DEBUG */
642
643 static inline void
vm_object_activity_begin(vm_object_t object)644 vm_object_activity_begin(vm_object_t object)
645 {
646 vm_object_lock_assert_exclusive(object);
647 VM_PIP_DEBUG_BEGIN(object);
648 if (os_inc_overflow(&object->activity_in_progress)) {
649 panic("vm_object_activity_begin(%p): overflow\n", object);
650 }
651 }
652
653 static inline void
vm_object_activity_end(vm_object_t object)654 vm_object_activity_end(vm_object_t object)
655 {
656 vm_object_lock_assert_exclusive(object);
657 if (os_dec_overflow(&object->activity_in_progress)) {
658 panic("vm_object_activity_end(%p): underflow\n", object);
659 }
660 if (object->paging_in_progress == 0 &&
661 object->activity_in_progress == 0) {
662 vm_object_wakeup((object),
663 VM_OBJECT_EVENT_PAGING_IN_PROGRESS);
664 }
665 }
666
667 static inline void
vm_object_paging_begin(vm_object_t object)668 vm_object_paging_begin(vm_object_t object)
669 {
670 vm_object_lock_assert_exclusive(object);
671 VM_PIP_DEBUG_BEGIN((object));
672 if (os_inc_overflow(&object->paging_in_progress)) {
673 panic("vm_object_paging_begin(%p): overflow\n", object);
674 }
675 }
676
677 static inline void
vm_object_paging_end(vm_object_t object)678 vm_object_paging_end(vm_object_t object)
679 {
680 vm_object_lock_assert_exclusive(object);
681 if (os_dec_overflow(&object->paging_in_progress)) {
682 panic("vm_object_paging_end(%p): underflow\n", object);
683 }
684 /*
685 * NB: This broadcast can be noisy, especially because all threads
686 * receiving the wakeup are given a priority floor. In the future, it
687 * would be great to utilize a primitive which can arbitrate
688 * the priority of all waiters and only issue as many wakeups as can be
689 * serviced.
690 */
691 if (object->paging_in_progress == vm_object_pagein_throttle - 1) {
692 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGEIN_THROTTLE);
693 }
694 if (object->paging_in_progress == 0) {
695 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS);
696 if (object->activity_in_progress == 0) {
697 vm_object_wakeup((object),
698 VM_OBJECT_EVENT_PAGING_IN_PROGRESS);
699 }
700 }
701 }
702
703 /* Wait for *all* paging and activities on this object to complete */
704 extern wait_result_t vm_object_paging_wait(vm_object_t object, wait_interrupt_t interruptible);
705 /* Wait for *all* paging on this object to complete */
706 extern wait_result_t vm_object_paging_only_wait(vm_object_t object, wait_interrupt_t interruptible);
707 /* Wait for the number of page-ins on this object to fall below the throttle limit */
708 extern wait_result_t vm_object_paging_throttle_wait(vm_object_t object, wait_interrupt_t interruptible);
709
710 static inline void
vm_object_mapping_begin(vm_object_t object)711 vm_object_mapping_begin(vm_object_t object)
712 {
713 vm_object_lock_assert_exclusive(object);
714 assert(!object->mapping_in_progress);
715 object->mapping_in_progress = TRUE;
716 }
717
718 static inline void
vm_object_mapping_end(vm_object_t object)719 vm_object_mapping_end(vm_object_t object)
720 {
721 vm_object_lock_assert_exclusive(object);
722 assert(object->mapping_in_progress);
723 object->mapping_in_progress = FALSE;
724 vm_object_wakeup(object,
725 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS);
726 }
727
728 extern wait_result_t vm_object_mapping_wait(vm_object_t object, wait_interrupt_t interruptible);
729
730 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
731 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
732
733 extern void vm_object_cache_add(vm_object_t);
734 extern void vm_object_cache_remove(vm_object_t);
735 extern int vm_object_cache_evict(int, int);
736
737 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
738 #define VM_OBJECT_OWNER_UNCHANGED ((task_t) -2)
739 #define VM_OBJECT_OWNER(object) \
740 ((object == VM_OBJECT_NULL || \
741 ((object)->purgable == VM_PURGABLE_DENY && \
742 (object)->vo_ledger_tag == 0) || \
743 (object)->vo_owner == TASK_NULL) \
744 ? TASK_NULL /* not owned */ \
745 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED) \
746 ? kernel_task /* disowned -> kernel */ \
747 : (object)->vo_owner)) /* explicit owner */ \
748
749
750 extern void vm_object_ledger_tag_ledgers(
751 vm_object_t object,
752 int *ledger_idx_volatile,
753 int *ledger_idx_nonvolatile,
754 int *ledger_idx_volatile_compressed,
755 int *ledger_idx_nonvolatile_compressed,
756 int *ledger_idx_composite,
757 int *ledger_idx_external_wired,
758 boolean_t *do_footprint);
759
760 extern kern_return_t vm_object_ownership_change(
761 vm_object_t object,
762 int new_ledger_tag,
763 task_t new_owner,
764 int new_ledger_flags,
765 boolean_t task_objq_locked);
766
767
768 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
769 // so probably should be a real 32b ID vs. ptr.
770 // Current users just check for equality
771 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRHASH((o)))
772
773 static inline void
VM_OBJECT_COPY_SET(vm_object_t object,vm_object_t copy)774 VM_OBJECT_COPY_SET(
775 vm_object_t object,
776 vm_object_t copy)
777 {
778 vm_object_lock_assert_exclusive(object);
779 object->vo_copy = copy;
780 if (copy != VM_OBJECT_NULL) {
781 object->vo_copy_version++;
782 }
783 }
784
785 #endif /* XNU_KERNEL_PRIVATE */
786
787 #endif /* _VM_VM_OBJECT_INTERNAL_H_ */
788