1 /*
2 * Copyright (c) 2023 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #ifndef _VM_VM_OBJECT_INTERNAL_H_
30 #define _VM_VM_OBJECT_INTERNAL_H_
31
32 #ifdef XNU_KERNEL_PRIVATE
33 #include <vm/vm_object_xnu.h>
34
35 #if VM_OBJECT_TRACKING
36 #include <libkern/OSDebug.h>
37 #include <kern/btlog.h>
38 extern void vm_object_tracking_init(void);
39 extern btlog_t vm_object_tracking_btlog;
40 #define VM_OBJECT_TRACKING_NUM_RECORDS 50000
41 #define VM_OBJECT_TRACKING_OP_CREATED 1
42 #define VM_OBJECT_TRACKING_OP_MODIFIED 2
43 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
44 #endif /* VM_OBJECT_TRACKING */
45
46 #if VM_OBJECT_ACCESS_TRACKING
47 extern uint64_t vm_object_access_tracking_reads;
48 extern uint64_t vm_object_access_tracking_writes;
49 extern void vm_object_access_tracking(vm_object_t object,
50 int *access_tracking,
51 uint32_t *access_tracking_reads,
52 uint32_t *acess_tracking_writes);
53 #endif /* VM_OBJECT_ACCESS_TRACKING */
54
55 extern uint16_t vm_object_pagein_throttle;
56
57 /*
58 * Object locking macros
59 */
60
61 #define vm_object_lock_init(object) \
62 lck_rw_init(&(object)->Lock, &vm_object_lck_grp, \
63 (is_kernel_object(object) ? \
64 &kernel_object_lck_attr : \
65 (((object) == compressor_object) ? \
66 &compressor_object_lck_attr : \
67 &vm_object_lck_attr)))
68 #define vm_object_lock_destroy(object) lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
69
70 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
71
72 /*
73 * CAUTION: the following vm_object_lock_assert_held*() macros merely
74 * check if anyone is holding the lock, but the holder may not necessarily
75 * be the caller...
76 */
77 #define vm_object_lock_assert_held(object) \
78 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_HELD)
79 #define vm_object_lock_assert_shared(object) \
80 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_SHARED)
81 #define vm_object_lock_assert_exclusive(object) \
82 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
83 #define vm_object_lock_assert_notheld(object) \
84 LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
85
86
87 static inline void
VM_OBJECT_SET_PAGER_CREATED(vm_object_t object,bool value)88 VM_OBJECT_SET_PAGER_CREATED(
89 vm_object_t object,
90 bool value)
91 {
92 vm_object_lock_assert_exclusive(object);
93 object->pager_created = value;
94 }
95 static inline void
VM_OBJECT_SET_PAGER_INITIALIZED(vm_object_t object,bool value)96 VM_OBJECT_SET_PAGER_INITIALIZED(
97 vm_object_t object,
98 bool value)
99 {
100 vm_object_lock_assert_exclusive(object);
101 object->pager_initialized = value;
102 }
103 static inline void
VM_OBJECT_SET_PAGER_READY(vm_object_t object,bool value)104 VM_OBJECT_SET_PAGER_READY(
105 vm_object_t object,
106 bool value)
107 {
108 vm_object_lock_assert_exclusive(object);
109 object->pager_ready = value;
110 }
111 static inline void
VM_OBJECT_SET_PAGER_TRUSTED(vm_object_t object,bool value)112 VM_OBJECT_SET_PAGER_TRUSTED(
113 vm_object_t object,
114 bool value)
115 {
116 vm_object_lock_assert_exclusive(object);
117 object->pager_trusted = value;
118 }
119 static inline void
VM_OBJECT_SET_CAN_PERSIST(vm_object_t object,bool value)120 VM_OBJECT_SET_CAN_PERSIST(
121 vm_object_t object,
122 bool value)
123 {
124 vm_object_lock_assert_exclusive(object);
125 object->can_persist = value;
126 }
127 static inline void
VM_OBJECT_SET_INTERNAL(vm_object_t object,bool value)128 VM_OBJECT_SET_INTERNAL(
129 vm_object_t object,
130 bool value)
131 {
132 vm_object_lock_assert_exclusive(object);
133 object->internal = value;
134 }
135 static inline void
VM_OBJECT_SET_PRIVATE(vm_object_t object,bool value)136 VM_OBJECT_SET_PRIVATE(
137 vm_object_t object,
138 bool value)
139 {
140 vm_object_lock_assert_exclusive(object);
141 object->private = value;
142 }
143 static inline void
VM_OBJECT_SET_PAGEOUT(vm_object_t object,bool value)144 VM_OBJECT_SET_PAGEOUT(
145 vm_object_t object,
146 bool value)
147 {
148 vm_object_lock_assert_exclusive(object);
149 object->pageout = value;
150 }
151 static inline void
VM_OBJECT_SET_ALIVE(vm_object_t object,bool value)152 VM_OBJECT_SET_ALIVE(
153 vm_object_t object,
154 bool value)
155 {
156 vm_object_lock_assert_exclusive(object);
157 object->alive = value;
158 }
159 static inline void
VM_OBJECT_SET_PURGABLE(vm_object_t object,unsigned int value)160 VM_OBJECT_SET_PURGABLE(
161 vm_object_t object,
162 unsigned int value)
163 {
164 vm_object_lock_assert_exclusive(object);
165 object->purgable = value;
166 assert3u(object->purgable, ==, value);
167 }
168 static inline void
VM_OBJECT_SET_PURGEABLE_ONLY_BY_KERNEL(vm_object_t object,bool value)169 VM_OBJECT_SET_PURGEABLE_ONLY_BY_KERNEL(
170 vm_object_t object,
171 bool value)
172 {
173 vm_object_lock_assert_exclusive(object);
174 object->purgeable_only_by_kernel = value;
175 }
176 static inline void
VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(vm_object_t object,bool value)177 VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(
178 vm_object_t object,
179 bool value)
180 {
181 vm_object_lock_assert_exclusive(object);
182 object->purgeable_when_ripe = value;
183 }
184 static inline void
VM_OBJECT_SET_SHADOWED(vm_object_t object,bool value)185 VM_OBJECT_SET_SHADOWED(
186 vm_object_t object,
187 bool value)
188 {
189 vm_object_lock_assert_exclusive(object);
190 object->shadowed = value;
191 }
192 static inline void
VM_OBJECT_SET_TRUE_SHARE(vm_object_t object,bool value)193 VM_OBJECT_SET_TRUE_SHARE(
194 vm_object_t object,
195 bool value)
196 {
197 vm_object_lock_assert_exclusive(object);
198 object->true_share = value;
199 }
200 static inline void
VM_OBJECT_SET_TERMINATING(vm_object_t object,bool value)201 VM_OBJECT_SET_TERMINATING(
202 vm_object_t object,
203 bool value)
204 {
205 vm_object_lock_assert_exclusive(object);
206 object->terminating = value;
207 }
208 static inline void
VM_OBJECT_SET_NAMED(vm_object_t object,bool value)209 VM_OBJECT_SET_NAMED(
210 vm_object_t object,
211 bool value)
212 {
213 vm_object_lock_assert_exclusive(object);
214 object->named = value;
215 }
216 static inline void
VM_OBJECT_SET_SHADOW_SEVERED(vm_object_t object,bool value)217 VM_OBJECT_SET_SHADOW_SEVERED(
218 vm_object_t object,
219 bool value)
220 {
221 vm_object_lock_assert_exclusive(object);
222 object->shadow_severed = value;
223 }
224 static inline void
VM_OBJECT_SET_PHYS_CONTIGUOUS(vm_object_t object,bool value)225 VM_OBJECT_SET_PHYS_CONTIGUOUS(
226 vm_object_t object,
227 bool value)
228 {
229 vm_object_lock_assert_exclusive(object);
230 object->phys_contiguous = value;
231 }
232 static inline void
VM_OBJECT_SET_NOPHYSCACHE(vm_object_t object,bool value)233 VM_OBJECT_SET_NOPHYSCACHE(
234 vm_object_t object,
235 bool value)
236 {
237 vm_object_lock_assert_exclusive(object);
238 object->nophyscache = value;
239 }
240 static inline void
VM_OBJECT_SET_FOR_REALTIME(vm_object_t object,bool value)241 VM_OBJECT_SET_FOR_REALTIME(
242 vm_object_t object,
243 bool value)
244 {
245 vm_object_lock_assert_exclusive(object);
246 object->for_realtime = value;
247 }
248 static inline void
VM_OBJECT_SET_NO_PAGER_REASON(vm_object_t object,unsigned int value)249 VM_OBJECT_SET_NO_PAGER_REASON(
250 vm_object_t object,
251 unsigned int value)
252 {
253 vm_object_lock_assert_exclusive(object);
254 object->no_pager_reason = value;
255 assert3u(object->no_pager_reason, ==, value);
256 }
257 #if FBDP_DEBUG_OBJECT_NO_PAGER
258 static inline void
VM_OBJECT_SET_FBDP_TRACKED(vm_object_t object,bool value)259 VM_OBJECT_SET_FBDP_TRACKED(
260 vm_object_t object,
261 bool value)
262 {
263 vm_object_lock_assert_exclusive(object);
264 object->fbdp_tracked = value;
265 }
266 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
267
268 /*
269 * Declare procedures that operate on VM objects.
270 */
271
272 __private_extern__ void vm_object_bootstrap(void);
273
274 __private_extern__ void vm_object_reaper_init(void);
275
276 __private_extern__ vm_object_t vm_object_allocate(vm_object_size_t size,
277 vm_map_serial_t provenance);
278
279 __private_extern__ void _vm_object_allocate(vm_object_size_t size,
280 vm_object_t object, vm_map_serial_t provenance);
281
282 __private_extern__ void vm_object_set_size(
283 vm_object_t object,
284 vm_object_size_t outer_size,
285 vm_object_size_t inner_size);
286
287 static inline void
vm_object_reference_locked(vm_object_t object)288 vm_object_reference_locked(vm_object_t object)
289 {
290 vm_object_lock_assert_exclusive(object);
291 os_ref_retain_locked_raw(&object->ref_count, &vm_object_refgrp);
292 }
293
294 static inline void
vm_object_reference_shared(vm_object_t object)295 vm_object_reference_shared(vm_object_t object)
296 {
297 vm_object_lock_assert_shared(object);
298 os_ref_retain_raw(&object->ref_count, &vm_object_refgrp);
299 }
300
301 __private_extern__ void vm_object_reference(
302 vm_object_t object);
303
304 #if !MACH_ASSERT
305
306 #define vm_object_reference(object) \
307 MACRO_BEGIN \
308 vm_object_t RObject = (object); \
309 if (RObject) { \
310 vm_object_lock_shared(RObject); \
311 vm_object_reference_shared(RObject); \
312 vm_object_unlock(RObject); \
313 } \
314 MACRO_END
315
316 #endif /* MACH_ASSERT */
317
318 __private_extern__ void vm_object_deallocate(
319 vm_object_t object);
320
321 __private_extern__ void vm_object_pmap_protect(
322 vm_object_t object,
323 vm_object_offset_t offset,
324 vm_object_size_t size,
325 pmap_t pmap,
326 vm_map_size_t pmap_page_size,
327 vm_map_offset_t pmap_start,
328 vm_prot_t prot);
329
330 __private_extern__ void vm_object_pmap_protect_options(
331 vm_object_t object,
332 vm_object_offset_t offset,
333 vm_object_size_t size,
334 pmap_t pmap,
335 vm_map_size_t pmap_page_size,
336 vm_map_offset_t pmap_start,
337 vm_prot_t prot,
338 int options);
339
340 __private_extern__ void vm_object_page_remove(
341 vm_object_t object,
342 vm_object_offset_t start,
343 vm_object_offset_t end);
344
345 __private_extern__ void vm_object_deactivate_pages(
346 vm_object_t object,
347 vm_object_offset_t offset,
348 vm_object_size_t size,
349 boolean_t kill_page,
350 boolean_t reusable_page,
351 boolean_t kill_no_write,
352 struct pmap *pmap,
353 /* XXX TODO4K: need pmap_page_size here too? */
354 vm_map_offset_t pmap_offset);
355
356 __private_extern__ void vm_object_reuse_pages(
357 vm_object_t object,
358 vm_object_offset_t start_offset,
359 vm_object_offset_t end_offset,
360 boolean_t allow_partial_reuse);
361
362 __private_extern__ kern_return_t vm_object_zero(
363 vm_object_t object,
364 vm_object_offset_t cur_offset,
365 vm_object_offset_t end_offset);
366
367 __private_extern__ uint64_t vm_object_purge(
368 vm_object_t object,
369 int flags);
370
371 __private_extern__ kern_return_t vm_object_purgable_control(
372 vm_object_t object,
373 vm_purgable_t control,
374 int *state);
375
376 __private_extern__ kern_return_t vm_object_get_page_counts(
377 vm_object_t object,
378 vm_object_offset_t offset,
379 vm_object_size_t size,
380 unsigned int *resident_page_count,
381 unsigned int *dirty_page_count);
382
383 __private_extern__ boolean_t vm_object_coalesce(
384 vm_object_t prev_object,
385 vm_object_t next_object,
386 vm_object_offset_t prev_offset,
387 vm_object_offset_t next_offset,
388 vm_object_size_t prev_size,
389 vm_object_size_t next_size);
390
391 __private_extern__ boolean_t vm_object_shadow(
392 vm_object_t *object,
393 vm_object_offset_t *offset,
394 vm_object_size_t length,
395 boolean_t always_shadow);
396
397 __private_extern__ void vm_object_collapse(
398 vm_object_t object,
399 vm_object_offset_t offset,
400 boolean_t can_bypass);
401
402 __private_extern__ boolean_t vm_object_copy_quickly(
403 vm_object_t object,
404 vm_object_offset_t src_offset,
405 vm_object_size_t size,
406 boolean_t *_src_needs_copy,
407 boolean_t *_dst_needs_copy);
408
409 __private_extern__ kern_return_t vm_object_copy_strategically(
410 vm_object_t src_object,
411 vm_object_offset_t src_offset,
412 vm_object_size_t size,
413 bool forking,
414 vm_object_t *dst_object,
415 vm_object_offset_t *dst_offset,
416 boolean_t *dst_needs_copy);
417
418 __private_extern__ kern_return_t vm_object_copy_slowly(
419 vm_object_t src_object,
420 vm_object_offset_t src_offset,
421 vm_object_size_t size,
422 boolean_t interruptible,
423 vm_object_t *_result_object);
424
425 __private_extern__ vm_object_t vm_object_copy_delayed(
426 vm_object_t src_object,
427 vm_object_offset_t src_offset,
428 vm_object_size_t size,
429 boolean_t src_object_shared);
430
431 __private_extern__ kern_return_t vm_object_destroy(
432 vm_object_t object,
433 vm_object_destroy_reason_t reason);
434
435 __private_extern__ void vm_object_compressor_pager_create(
436 vm_object_t object);
437
438 /*
439 * Query whether the provided object,offset reside in the compressor. The
440 * caller must hold the object lock and ensure that the object,offset under
441 * inspection is not in the process of being paged in/out (i.e. no busy
442 * backing page)
443 */
444 __private_extern__ vm_external_state_t vm_object_compressor_pager_state_get(
445 vm_object_t object,
446 vm_object_offset_t offset);
447
448 /*
449 * Clear the compressor slot corresponding to an object,offset. The caller
450 * must hold the object lock (exclusive) and ensure that the object,offset
451 * under inspection is not in the process of being paged in/out (i.e. no busy
452 * backing page)
453 */
454 __private_extern__ void vm_object_compressor_pager_state_clr(
455 vm_object_t object,
456 vm_object_offset_t offset);
457
458 __private_extern__ kern_return_t vm_object_upl_request(
459 vm_object_t object,
460 vm_object_offset_t offset,
461 upl_size_t size,
462 upl_t *upl,
463 upl_page_info_t *page_info,
464 unsigned int *count,
465 upl_control_flags_t flags,
466 vm_tag_t tag);
467
468 __private_extern__ kern_return_t vm_object_transpose(
469 vm_object_t object1,
470 vm_object_t object2,
471 vm_object_size_t transpose_size);
472
473 __private_extern__ boolean_t vm_object_sync(
474 vm_object_t object,
475 vm_object_offset_t offset,
476 vm_object_size_t size,
477 boolean_t should_flush,
478 boolean_t should_return,
479 boolean_t should_iosync);
480
481 __private_extern__ kern_return_t vm_object_update(
482 vm_object_t object,
483 vm_object_offset_t offset,
484 vm_object_size_t size,
485 vm_object_offset_t *error_offset,
486 int *io_errno,
487 memory_object_return_t should_return,
488 int flags,
489 vm_prot_t prot);
490
491 __private_extern__ kern_return_t vm_object_lock_request(
492 vm_object_t object,
493 vm_object_offset_t offset,
494 vm_object_size_t size,
495 memory_object_return_t should_return,
496 int flags,
497 vm_prot_t prot);
498
499
500
501 __private_extern__ vm_object_t vm_object_memory_object_associate(
502 memory_object_t pager,
503 vm_object_t object,
504 vm_object_size_t size,
505 boolean_t check_named);
506
507
508 __private_extern__ void vm_object_cluster_size(
509 vm_object_t object,
510 vm_object_offset_t *start,
511 vm_size_t *length,
512 vm_object_fault_info_t fault_info,
513 uint32_t *io_streaming);
514
515 __private_extern__ kern_return_t vm_object_populate_with_private(
516 vm_object_t object,
517 vm_object_offset_t offset,
518 ppnum_t phys_page,
519 vm_size_t size);
520
521 __private_extern__ void vm_object_change_wimg_mode(
522 vm_object_t object,
523 unsigned int wimg_mode);
524
525 extern kern_return_t vm_object_page_op(
526 vm_object_t object,
527 vm_object_offset_t offset,
528 int ops,
529 ppnum_t *phys_entry,
530 int *flags);
531
532 extern kern_return_t vm_object_range_op(
533 vm_object_t object,
534 vm_object_offset_t offset_beg,
535 vm_object_offset_t offset_end,
536 int ops,
537 uint32_t *range);
538
539
540 __private_extern__ void vm_object_reap_pages(
541 vm_object_t object,
542 int reap_type);
543 #define REAP_REAP 0
544 #define REAP_TERMINATE 1
545 #define REAP_PURGEABLE 2
546 #define REAP_DATA_FLUSH 3
547
548 #if CONFIG_FREEZE
549
550 __private_extern__ uint32_t
551 vm_object_compressed_freezer_pageout(
552 vm_object_t object, uint32_t dirty_budget);
553
554 __private_extern__ void
555 vm_object_compressed_freezer_done(
556 void);
557
558 #endif /* CONFIG_FREEZE */
559
560 __private_extern__ void
561 vm_object_pageout(
562 vm_object_t object);
563
564 /*
565 * Event waiting handling
566 */
567 __enum_closed_decl(vm_object_wait_reason_t, uint8_t, {
568 VM_OBJECT_EVENT_PAGER_INIT = 0,
569 VM_OBJECT_EVENT_PAGER_READY = 1,
570 VM_OBJECT_EVENT_PAGING_IN_PROGRESS = 2,
571 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS = 3,
572 VM_OBJECT_EVENT_UNBLOCKED = 4,
573 VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS = 5,
574 VM_OBJECT_EVENT_PAGEIN_THROTTLE = 6,
575 });
576 #define VM_OBJECT_EVENT_MAX VM_OBJECT_EVENT_PAGEIN_THROTTLE
577 /* 7 bits in "all_wanted" */
578 _Static_assert(VM_OBJECT_EVENT_MAX < 7,
579 "vm_object_wait_reason_t must fit in all_wanted");
580 /*
581 * @c vm_object_sleep uses (object + wait_reason) as the wait event, ensure
582 * this does not colide with the object lock.
583 */
584 _Static_assert(VM_OBJECT_EVENT_MAX < offsetof(struct vm_object, Lock),
585 "Wait reason collides with vm_object->Lock");
586
587 extern wait_result_t vm_object_sleep(
588 vm_object_t object,
589 vm_object_wait_reason_t reason,
590 wait_interrupt_t interruptible,
591 lck_sleep_action_t action);
592
593
594 static inline void
vm_object_set_wanted(vm_object_t object,vm_object_wait_reason_t reason)595 vm_object_set_wanted(
596 vm_object_t object,
597 vm_object_wait_reason_t reason)
598 {
599 vm_object_lock_assert_exclusive(object);
600 assert(reason >= 0 && reason <= VM_OBJECT_EVENT_MAX);
601
602 object->all_wanted |= (1 << reason);
603 }
604
605 static inline bool
vm_object_wanted(vm_object_t object,vm_object_wait_reason_t event)606 vm_object_wanted(
607 vm_object_t object,
608 vm_object_wait_reason_t event)
609 {
610 vm_object_lock_assert_held(object);
611 assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
612
613 return object->all_wanted & (1 << event);
614 }
615
616 extern void vm_object_wakeup(
617 vm_object_t object,
618 vm_object_wait_reason_t reason);
619
620 /*
621 * Routines implemented as macros
622 */
623 #ifdef VM_PIP_DEBUG
624 #include <libkern/OSDebug.h>
625 #define VM_PIP_DEBUG_BEGIN(object) \
626 MACRO_BEGIN \
627 int pip = ((object)->paging_in_progress + \
628 (object)->activity_in_progress); \
629 if (pip < VM_PIP_DEBUG_MAX_REFS) { \
630 (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
631 VM_PIP_DEBUG_STACK_FRAMES); \
632 } \
633 MACRO_END
634 #else /* VM_PIP_DEBUG */
635 #define VM_PIP_DEBUG_BEGIN(object)
636 #endif /* VM_PIP_DEBUG */
637
638 static inline void
vm_object_activity_begin(vm_object_t object)639 vm_object_activity_begin(vm_object_t object)
640 {
641 vm_object_lock_assert_exclusive(object);
642 VM_PIP_DEBUG_BEGIN(object);
643 if (os_inc_overflow(&object->activity_in_progress)) {
644 panic("vm_object_activity_begin(%p): overflow\n", object);
645 }
646 }
647
648 static inline void
vm_object_activity_end(vm_object_t object)649 vm_object_activity_end(vm_object_t object)
650 {
651 vm_object_lock_assert_exclusive(object);
652 if (os_dec_overflow(&object->activity_in_progress)) {
653 panic("vm_object_activity_end(%p): underflow\n", object);
654 }
655 if (object->paging_in_progress == 0 &&
656 object->activity_in_progress == 0) {
657 vm_object_wakeup((object),
658 VM_OBJECT_EVENT_PAGING_IN_PROGRESS);
659 }
660 }
661
662 static inline void
vm_object_paging_begin(vm_object_t object)663 vm_object_paging_begin(vm_object_t object)
664 {
665 vm_object_lock_assert_exclusive(object);
666 VM_PIP_DEBUG_BEGIN((object));
667 if (os_inc_overflow(&object->paging_in_progress)) {
668 panic("vm_object_paging_begin(%p): overflow\n", object);
669 }
670 }
671
672 static inline void
vm_object_paging_end(vm_object_t object)673 vm_object_paging_end(vm_object_t object)
674 {
675 vm_object_lock_assert_exclusive(object);
676 if (os_dec_overflow(&object->paging_in_progress)) {
677 panic("vm_object_paging_end(%p): underflow\n", object);
678 }
679 /*
680 * NB: This broadcast can be noisy, especially because all threads
681 * receiving the wakeup are given a priority floor. In the future, it
682 * would be great to utilize a primitive which can arbitrate
683 * the priority of all waiters and only issue as many wakeups as can be
684 * serviced.
685 */
686 if (object->paging_in_progress == vm_object_pagein_throttle - 1) {
687 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGEIN_THROTTLE);
688 }
689 if (object->paging_in_progress == 0) {
690 vm_object_wakeup(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS);
691 if (object->activity_in_progress == 0) {
692 vm_object_wakeup((object),
693 VM_OBJECT_EVENT_PAGING_IN_PROGRESS);
694 }
695 }
696 }
697
698 /* Wait for *all* paging and activities on this object to complete */
699 extern wait_result_t vm_object_paging_wait(vm_object_t object, wait_interrupt_t interruptible);
700 /* Wait for *all* paging on this object to complete */
701 extern wait_result_t vm_object_paging_only_wait(vm_object_t object, wait_interrupt_t interruptible);
702 /* Wait for the number of page-ins on this object to fall below the throttle limit */
703 extern wait_result_t vm_object_paging_throttle_wait(vm_object_t object, wait_interrupt_t interruptible);
704
705 static inline void
vm_object_mapping_begin(vm_object_t object)706 vm_object_mapping_begin(vm_object_t object)
707 {
708 vm_object_lock_assert_exclusive(object);
709 assert(!object->mapping_in_progress);
710 object->mapping_in_progress = TRUE;
711 }
712
713 static inline void
vm_object_mapping_end(vm_object_t object)714 vm_object_mapping_end(vm_object_t object)
715 {
716 vm_object_lock_assert_exclusive(object);
717 assert(object->mapping_in_progress);
718 object->mapping_in_progress = FALSE;
719 vm_object_wakeup(object,
720 VM_OBJECT_EVENT_MAPPING_IN_PROGRESS);
721 }
722
723 extern wait_result_t vm_object_mapping_wait(vm_object_t object, wait_interrupt_t interruptible);
724
725 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
726 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
727
728 extern void vm_object_cache_add(vm_object_t);
729 extern void vm_object_cache_remove(vm_object_t);
730 extern int vm_object_cache_evict(int, int);
731
732 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
733 #define VM_OBJECT_OWNER_UNCHANGED ((task_t) -2)
734 #define VM_OBJECT_OWNER(object) \
735 ((object == VM_OBJECT_NULL || \
736 ((object)->purgable == VM_PURGABLE_DENY && \
737 (object)->vo_ledger_tag == 0) || \
738 (object)->vo_owner == TASK_NULL) \
739 ? TASK_NULL /* not owned */ \
740 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED) \
741 ? kernel_task /* disowned -> kernel */ \
742 : (object)->vo_owner)) /* explicit owner */ \
743
744
745 extern void vm_object_ledger_tag_ledgers(
746 vm_object_t object,
747 int *ledger_idx_volatile,
748 int *ledger_idx_nonvolatile,
749 int *ledger_idx_volatile_compressed,
750 int *ledger_idx_nonvolatile_compressed,
751 int *ledger_idx_composite,
752 int *ledger_idx_external_wired,
753 boolean_t *do_footprint);
754
755 extern kern_return_t vm_object_ownership_change(
756 vm_object_t object,
757 int new_ledger_tag,
758 task_t new_owner,
759 int new_ledger_flags,
760 boolean_t task_objq_locked);
761
762
763 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
764 // so probably should be a real 32b ID vs. ptr.
765 // Current users just check for equality
766 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRHASH((o)))
767
768 static inline void
VM_OBJECT_COPY_SET(vm_object_t object,vm_object_t copy)769 VM_OBJECT_COPY_SET(
770 vm_object_t object,
771 vm_object_t copy)
772 {
773 vm_object_lock_assert_exclusive(object);
774 object->vo_copy = copy;
775 if (copy != VM_OBJECT_NULL) {
776 object->vo_copy_version++;
777 }
778 }
779
780 #endif /* XNU_KERNEL_PRIVATE */
781
782 #endif /* _VM_VM_OBJECT_INTERNAL_H_ */
783