xref: /xnu-12377.81.4/osfmk/vm/vm_object_internal.h (revision 043036a2b3718f7f0be807e2870f8f47d3fa0796)
1 /*
2  * Copyright (c) 2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _VM_VM_OBJECT_INTERNAL_H_
30 #define _VM_VM_OBJECT_INTERNAL_H_
31 
32 #ifdef XNU_KERNEL_PRIVATE
33 #include <vm/vm_object_xnu.h>
34 
35 #if VM_OBJECT_TRACKING
36 #include <libkern/OSDebug.h>
37 #include <kern/btlog.h>
38 extern void vm_object_tracking_init(void);
39 extern btlog_t vm_object_tracking_btlog;
40 #define VM_OBJECT_TRACKING_NUM_RECORDS  50000
41 #define VM_OBJECT_TRACKING_OP_CREATED   1
42 #define VM_OBJECT_TRACKING_OP_MODIFIED  2
43 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
44 #endif /* VM_OBJECT_TRACKING */
45 
46 #if VM_OBJECT_ACCESS_TRACKING
47 extern uint64_t vm_object_access_tracking_reads;
48 extern uint64_t vm_object_access_tracking_writes;
49 extern void vm_object_access_tracking(vm_object_t object,
50     int *access_tracking,
51     uint32_t *access_tracking_reads,
52     uint32_t *acess_tracking_writes);
53 #endif /* VM_OBJECT_ACCESS_TRACKING */
54 
55 extern uint16_t vm_object_pagein_throttle;
56 
57 /*
58  *	Object locking macros
59  */
60 
61 #define vm_object_lock_init(object)                                     \
62 	lck_rw_init(&(object)->Lock, &vm_object_lck_grp,                \
63 	            (is_kernel_object(object) ?                         \
64 	             &kernel_object_lck_attr :                          \
65 	             (((object) == compressor_object) ?                 \
66 	             &compressor_object_lck_attr :                      \
67 	              &vm_object_lck_attr)))
68 #define vm_object_lock_destroy(object)  lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
69 
70 /*
71  * This is used whenever we try to acquire the VM object lock
72  * without mutex_pause. The mutex_pause is intended to let
73  * pageout_scan try getting the object lock if it's trying to
74  * reclaim pages from that object (see vm_pageout_scan_wants_object).
75  */
76 /*
77  * This is also used by the fill thread when reclaiming tag storage
78  * pages because we don't want to block while holding the page queues lock.
79  */
80 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
81 
82 /*
83  * CAUTION: the following vm_object_lock_assert_held*() macros merely
84  * check if anyone is holding the lock, but the holder may not necessarily
85  * be the caller...
86  */
87 #define vm_object_lock_assert_held(object) \
88 	LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_HELD)
89 #define vm_object_lock_assert_shared(object) \
90 	LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_SHARED)
91 #define vm_object_lock_assert_exclusive(object) \
92 	LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
93 #define vm_object_lock_assert_notheld(object) \
94 	LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
95 
96 
97 static inline void
VM_OBJECT_SET_PAGER_CREATED(vm_object_t object,bool value)98 VM_OBJECT_SET_PAGER_CREATED(
99 	vm_object_t object,
100 	bool value)
101 {
102 	vm_object_lock_assert_exclusive(object);
103 	object->pager_created = value;
104 }
105 static inline void
VM_OBJECT_SET_PAGER_INITIALIZED(vm_object_t object,bool value)106 VM_OBJECT_SET_PAGER_INITIALIZED(
107 	vm_object_t object,
108 	bool value)
109 {
110 	vm_object_lock_assert_exclusive(object);
111 	object->pager_initialized = value;
112 }
113 static inline void
VM_OBJECT_SET_PAGER_READY(vm_object_t object,bool value)114 VM_OBJECT_SET_PAGER_READY(
115 	vm_object_t object,
116 	bool value)
117 {
118 	vm_object_lock_assert_exclusive(object);
119 	object->pager_ready = value;
120 }
121 static inline void
VM_OBJECT_SET_PAGER_TRUSTED(vm_object_t object,bool value)122 VM_OBJECT_SET_PAGER_TRUSTED(
123 	vm_object_t object,
124 	bool value)
125 {
126 	vm_object_lock_assert_exclusive(object);
127 	object->pager_trusted = value;
128 }
129 static inline void
VM_OBJECT_SET_CAN_PERSIST(vm_object_t object,bool value)130 VM_OBJECT_SET_CAN_PERSIST(
131 	vm_object_t object,
132 	bool value)
133 {
134 	vm_object_lock_assert_exclusive(object);
135 	object->can_persist = value;
136 }
137 static inline void
VM_OBJECT_SET_INTERNAL(vm_object_t object,bool value)138 VM_OBJECT_SET_INTERNAL(
139 	vm_object_t object,
140 	bool value)
141 {
142 	vm_object_lock_assert_exclusive(object);
143 	object->internal = value;
144 }
145 static inline void
VM_OBJECT_SET_PRIVATE(vm_object_t object,bool value)146 VM_OBJECT_SET_PRIVATE(
147 	vm_object_t object,
148 	bool value)
149 {
150 	vm_object_lock_assert_exclusive(object);
151 	object->private = value;
152 }
153 static inline void
VM_OBJECT_SET_PAGEOUT(vm_object_t object,bool value)154 VM_OBJECT_SET_PAGEOUT(
155 	vm_object_t object,
156 	bool value)
157 {
158 	vm_object_lock_assert_exclusive(object);
159 	object->pageout = value;
160 }
161 static inline void
VM_OBJECT_SET_ALIVE(vm_object_t object,bool value)162 VM_OBJECT_SET_ALIVE(
163 	vm_object_t object,
164 	bool value)
165 {
166 	vm_object_lock_assert_exclusive(object);
167 	object->alive = value;
168 }
169 static inline void
VM_OBJECT_SET_PURGABLE(vm_object_t object,unsigned int value)170 VM_OBJECT_SET_PURGABLE(
171 	vm_object_t object,
172 	unsigned int value)
173 {
174 	vm_object_lock_assert_exclusive(object);
175 	object->purgable = value;
176 	assert3u(object->purgable, ==, value);
177 }
178 static inline void
VM_OBJECT_SET_PURGEABLE_ONLY_BY_KERNEL(vm_object_t object,bool value)179 VM_OBJECT_SET_PURGEABLE_ONLY_BY_KERNEL(
180 	vm_object_t object,
181 	bool value)
182 {
183 	vm_object_lock_assert_exclusive(object);
184 	object->purgeable_only_by_kernel = value;
185 }
186 static inline void
VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(vm_object_t object,bool value)187 VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(
188 	vm_object_t object,
189 	bool value)
190 {
191 	vm_object_lock_assert_exclusive(object);
192 	object->purgeable_when_ripe = value;
193 }
194 static inline void
VM_OBJECT_SET_SHADOWED(vm_object_t object,bool value)195 VM_OBJECT_SET_SHADOWED(
196 	vm_object_t object,
197 	bool value)
198 {
199 	vm_object_lock_assert_exclusive(object);
200 	object->shadowed = value;
201 }
202 static inline void
VM_OBJECT_SET_TRUE_SHARE(vm_object_t object,bool value)203 VM_OBJECT_SET_TRUE_SHARE(
204 	vm_object_t object,
205 	bool value)
206 {
207 	vm_object_lock_assert_exclusive(object);
208 	object->true_share = value;
209 }
210 static inline void
VM_OBJECT_SET_TERMINATING(vm_object_t object,bool value)211 VM_OBJECT_SET_TERMINATING(
212 	vm_object_t object,
213 	bool value)
214 {
215 	vm_object_lock_assert_exclusive(object);
216 	object->terminating = value;
217 }
218 static inline void
VM_OBJECT_SET_NAMED(vm_object_t object,bool value)219 VM_OBJECT_SET_NAMED(
220 	vm_object_t object,
221 	bool value)
222 {
223 	vm_object_lock_assert_exclusive(object);
224 	object->named = value;
225 }
226 static inline void
VM_OBJECT_SET_SHADOW_SEVERED(vm_object_t object,bool value)227 VM_OBJECT_SET_SHADOW_SEVERED(
228 	vm_object_t object,
229 	bool value)
230 {
231 	vm_object_lock_assert_exclusive(object);
232 	object->shadow_severed = value;
233 }
234 static inline void
VM_OBJECT_SET_PHYS_CONTIGUOUS(vm_object_t object,bool value)235 VM_OBJECT_SET_PHYS_CONTIGUOUS(
236 	vm_object_t object,
237 	bool value)
238 {
239 	vm_object_lock_assert_exclusive(object);
240 	object->phys_contiguous = value;
241 }
242 static inline void
VM_OBJECT_SET_NOPHYSCACHE(vm_object_t object,bool value)243 VM_OBJECT_SET_NOPHYSCACHE(
244 	vm_object_t object,
245 	bool value)
246 {
247 	vm_object_lock_assert_exclusive(object);
248 	object->nophyscache = value;
249 }
250 static inline void
VM_OBJECT_SET_FOR_REALTIME(vm_object_t object,bool value)251 VM_OBJECT_SET_FOR_REALTIME(
252 	vm_object_t object,
253 	bool value)
254 {
255 	vm_object_lock_assert_exclusive(object);
256 	object->for_realtime = value;
257 }
258 static inline void
VM_OBJECT_SET_NO_PAGER_REASON(vm_object_t object,unsigned int value)259 VM_OBJECT_SET_NO_PAGER_REASON(
260 	vm_object_t object,
261 	unsigned int value)
262 {
263 	vm_object_lock_assert_exclusive(object);
264 	object->no_pager_reason = value;
265 	assert3u(object->no_pager_reason, ==, value);
266 }
267 #if FBDP_DEBUG_OBJECT_NO_PAGER
268 static inline void
VM_OBJECT_SET_FBDP_TRACKED(vm_object_t object,bool value)269 VM_OBJECT_SET_FBDP_TRACKED(
270 	vm_object_t object,
271 	bool value)
272 {
273 	vm_object_lock_assert_exclusive(object);
274 	object->fbdp_tracked = value;
275 }
276 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
277 
278 /*
279  *	Declare procedures that operate on VM objects.
280  */
281 
282 __private_extern__ void         vm_object_bootstrap(void);
283 
284 __private_extern__ void         vm_object_reaper_init(void);
285 
286 __private_extern__ vm_object_t  vm_object_allocate(vm_object_size_t size,
287     vm_map_serial_t provenance);
288 
289 __private_extern__ void    _vm_object_allocate(vm_object_size_t size,
290     vm_object_t object, vm_map_serial_t provenance);
291 
292 __private_extern__ void vm_object_set_size(
293 	vm_object_t             object,
294 	vm_object_size_t        outer_size,
295 	vm_object_size_t        inner_size);
296 
297 static inline void
vm_object_reference_locked(vm_object_t object)298 vm_object_reference_locked(vm_object_t object)
299 {
300 	vm_object_lock_assert_exclusive(object);
301 	os_ref_retain_locked_raw(&object->ref_count, &vm_object_refgrp);
302 }
303 
304 static inline void
vm_object_reference_shared(vm_object_t object)305 vm_object_reference_shared(vm_object_t object)
306 {
307 	vm_object_lock_assert_shared(object);
308 	os_ref_retain_raw(&object->ref_count, &vm_object_refgrp);
309 }
310 
311 __private_extern__ void         vm_object_reference(
312 	vm_object_t     object);
313 
314 #if     !MACH_ASSERT
315 
316 #define vm_object_reference(object)                     \
317 MACRO_BEGIN                                             \
318 	vm_object_t RObject = (object);                 \
319 	if (RObject) {                                  \
320 	        vm_object_lock_shared(RObject);         \
321 	        vm_object_reference_shared(RObject);    \
322 	        vm_object_unlock(RObject);              \
323 	}                                               \
324 MACRO_END
325 
326 #endif  /* MACH_ASSERT */
327 
328 __private_extern__ void         vm_object_deallocate(
329 	vm_object_t     object);
330 
331 __private_extern__ void         vm_object_pmap_protect(
332 	vm_object_t             object,
333 	vm_object_offset_t      offset,
334 	vm_object_size_t        size,
335 	pmap_t                  pmap,
336 	vm_map_size_t           pmap_page_size,
337 	vm_map_offset_t         pmap_start,
338 	vm_prot_t               prot);
339 
340 __private_extern__ void         vm_object_pmap_protect_options(
341 	vm_object_t             object,
342 	vm_object_offset_t      offset,
343 	vm_object_size_t        size,
344 	pmap_t                  pmap,
345 	vm_map_size_t           pmap_page_size,
346 	vm_map_offset_t         pmap_start,
347 	vm_prot_t               prot,
348 	int                     options);
349 
350 __private_extern__ void         vm_object_page_remove(
351 	vm_object_t             object,
352 	vm_object_offset_t      start,
353 	vm_object_offset_t      end);
354 
355 __private_extern__ void         vm_object_deactivate_pages(
356 	vm_object_t             object,
357 	vm_object_offset_t      offset,
358 	vm_object_size_t        size,
359 	boolean_t               kill_page,
360 	boolean_t               reusable_page,
361 	boolean_t               kill_no_write,
362 	struct pmap             *pmap,
363 /* XXX TODO4K: need pmap_page_size here too? */
364 	vm_map_offset_t         pmap_offset);
365 
366 __private_extern__ void vm_object_reuse_pages(
367 	vm_object_t             object,
368 	vm_object_offset_t      start_offset,
369 	vm_object_offset_t      end_offset,
370 	boolean_t               allow_partial_reuse);
371 
372 __private_extern__ kern_return_t vm_object_zero(
373 	vm_object_t             object,
374 	vm_object_offset_t      *cur_offset_p,
375 	vm_object_offset_t      end_offset);
376 
377 __private_extern__ uint64_t     vm_object_purge(
378 	vm_object_t              object,
379 	int                      flags);
380 
381 __private_extern__ kern_return_t vm_object_purgable_control(
382 	vm_object_t     object,
383 	vm_purgable_t   control,
384 	int             *state);
385 
386 __private_extern__ kern_return_t vm_object_get_page_counts(
387 	vm_object_t             object,
388 	vm_object_offset_t      offset,
389 	vm_object_size_t        size,
390 	uint64_t                *resident_page_count,
391 	uint64_t                *dirty_page_count,
392 	uint64_t                *swapped_page_count);
393 
394 __private_extern__ boolean_t    vm_object_coalesce(
395 	vm_object_t             prev_object,
396 	vm_object_t             next_object,
397 	vm_object_offset_t      prev_offset,
398 	vm_object_offset_t      next_offset,
399 	vm_object_size_t        prev_size,
400 	vm_object_size_t        next_size);
401 
402 __private_extern__ boolean_t    vm_object_shadow(
403 	vm_object_t             *object,
404 	vm_object_offset_t      *offset,
405 	vm_object_size_t        length,
406 	boolean_t               always_shadow);
407 
408 __private_extern__ void         vm_object_collapse(
409 	vm_object_t             object,
410 	vm_object_offset_t      offset,
411 	boolean_t               can_bypass);
412 
413 __private_extern__ boolean_t    vm_object_copy_quickly(
414 	vm_object_t             object,
415 	vm_object_offset_t      src_offset,
416 	vm_object_size_t        size,
417 	boolean_t               *_src_needs_copy,
418 	boolean_t               *_dst_needs_copy);
419 
420 __private_extern__ kern_return_t        vm_object_copy_strategically(
421 	vm_object_t             src_object,
422 	vm_object_offset_t      src_offset,
423 	vm_object_size_t        size,
424 	bool                    forking,
425 	vm_object_t             *dst_object,
426 	vm_object_offset_t      *dst_offset,
427 	boolean_t               *dst_needs_copy);
428 
429 __private_extern__ kern_return_t        vm_object_copy_slowly(
430 	vm_object_t             src_object,
431 	vm_object_offset_t      src_offset,
432 	vm_object_size_t        size,
433 	boolean_t               interruptible,
434 #if HAS_MTE
435 	bool                    create_mte_object,
436 #endif /* HAS_MTE */
437 	vm_object_t             *_result_object);
438 
439 __private_extern__ vm_object_t  vm_object_copy_delayed(
440 	vm_object_t             src_object,
441 	vm_object_offset_t      src_offset,
442 	vm_object_size_t        size,
443 	boolean_t               src_object_shared);
444 
445 __private_extern__ kern_return_t        vm_object_destroy(
446 	vm_object_t                                     object,
447 	vm_object_destroy_reason_t   reason);
448 
449 __private_extern__ void         vm_object_compressor_pager_create(
450 	vm_object_t     object);
451 
452 /*
453  * Query whether the provided object,offset reside in the compressor. The
454  * caller must hold the object lock and ensure that the object,offset under
455  * inspection is not in the process of being paged in/out (i.e. no busy
456  * backing page)
457  */
458 __private_extern__ vm_external_state_t vm_object_compressor_pager_state_get(
459 	vm_object_t        object,
460 	vm_object_offset_t offset);
461 
462 /*
463  * Clear the compressor slot corresponding to an object,offset. The caller
464  * must hold the object lock (exclusive) and ensure that the object,offset
465  * under inspection is not in the process of being paged in/out (i.e. no busy
466  * backing page)
467  */
468 __private_extern__ void vm_object_compressor_pager_state_clr(
469 	vm_object_t        object,
470 	vm_object_offset_t offset);
471 
472 __private_extern__ kern_return_t vm_object_upl_request(
473 	vm_object_t             object,
474 	vm_object_offset_t      offset,
475 	upl_size_t              size,
476 	upl_t                   *upl,
477 	upl_page_info_t         *page_info,
478 	unsigned int            *count,
479 	upl_control_flags_t     flags,
480 	vm_tag_t            tag);
481 
482 __private_extern__ kern_return_t vm_object_transpose(
483 	vm_object_t             object1,
484 	vm_object_t             object2,
485 	vm_object_size_t        transpose_size);
486 
487 __private_extern__ boolean_t vm_object_sync(
488 	vm_object_t             object,
489 	vm_object_offset_t      offset,
490 	vm_object_size_t        size,
491 	boolean_t               should_flush,
492 	boolean_t               should_return,
493 	boolean_t               should_iosync);
494 
495 __private_extern__ kern_return_t vm_object_update(
496 	vm_object_t             object,
497 	vm_object_offset_t      offset,
498 	vm_object_size_t        size,
499 	vm_object_offset_t      *error_offset,
500 	int                     *io_errno,
501 	memory_object_return_t  should_return,
502 	int                     flags,
503 	vm_prot_t               prot);
504 
505 __private_extern__ kern_return_t vm_object_lock_request(
506 	vm_object_t             object,
507 	vm_object_offset_t      offset,
508 	vm_object_size_t        size,
509 	memory_object_return_t  should_return,
510 	int                     flags,
511 	vm_prot_t               prot);
512 
513 
514 
515 __private_extern__ vm_object_t  vm_object_memory_object_associate(
516 	memory_object_t         pager,
517 	vm_object_t             object,
518 	vm_object_size_t        size,
519 	boolean_t               check_named);
520 
521 
522 __private_extern__ void vm_object_cluster_size(
523 	vm_object_t             object,
524 	vm_object_offset_t      *start,
525 	vm_size_t               *length,
526 	vm_object_fault_info_t  fault_info,
527 	uint32_t                *io_streaming);
528 
529 __private_extern__ kern_return_t vm_object_populate_with_private(
530 	vm_object_t             object,
531 	vm_object_offset_t      offset,
532 	ppnum_t                 phys_page,
533 	vm_size_t               size);
534 
535 __private_extern__ void vm_object_change_wimg_mode(
536 	vm_object_t             object,
537 	unsigned int            wimg_mode);
538 
539 extern kern_return_t vm_object_page_op(
540 	vm_object_t             object,
541 	vm_object_offset_t      offset,
542 	int                     ops,
543 	ppnum_t                 *phys_entry,
544 	int                     *flags);
545 
546 extern kern_return_t vm_object_range_op(
547 	vm_object_t             object,
548 	vm_object_offset_t      offset_beg,
549 	vm_object_offset_t      offset_end,
550 	int                     ops,
551 	uint32_t                *range);
552 
553 __private_extern__ void vm_object_set_chead_hint(
554 	vm_object_t     object);
555 
556 
557 __private_extern__ void         vm_object_reap_pages(
558 	vm_object_t object,
559 	int     reap_type);
560 #define REAP_REAP               0
561 #define REAP_TERMINATE          1
562 #define REAP_PURGEABLE          2
563 #define REAP_DATA_FLUSH         3
564 #define REAP_DATA_FLUSH_CLEAN   4
565 
566 #if CONFIG_FREEZE
567 
568 __private_extern__ uint32_t
569 vm_object_compressed_freezer_pageout(
570 	vm_object_t     object, uint32_t dirty_budget);
571 
572 __private_extern__ void
573 vm_object_compressed_freezer_done(
574 	void);
575 
576 #endif /* CONFIG_FREEZE */
577 
578 __private_extern__ void
579 vm_object_pageout(
580 	vm_object_t     object);
581 
582 /*
583  *	Event waiting handling
584  */
585 __enum_closed_decl(vm_object_wait_reason_t, uint8_t, {
586 	VM_OBJECT_EVENT_PL_REQ_IN_PROGRESS = 0,
587 	VM_OBJECT_EVENT_PAGER_READY = 1,
588 	VM_OBJECT_EVENT_PAGING_IN_PROGRESS = 2,
589 	VM_OBJECT_EVENT_MAPPING_IN_PROGRESS = 3,
590 	VM_OBJECT_EVENT_UNBLOCKED = 4,
591 	VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS = 5,
592 	VM_OBJECT_EVENT_PAGEIN_THROTTLE = 6,
593 });
594 #define VM_OBJECT_EVENT_MAX VM_OBJECT_EVENT_PAGEIN_THROTTLE
595 /* 7 bits in "all_wanted" */
596 _Static_assert(VM_OBJECT_EVENT_MAX < 7,
597     "vm_object_wait_reason_t must fit in all_wanted");
598 /*
599  * @c vm_object_sleep uses (object + wait_reason) as the wait event, ensure
600  * this does not colide with the object lock.
601  */
602 _Static_assert(VM_OBJECT_EVENT_MAX < offsetof(struct vm_object, Lock),
603     "Wait reason collides with vm_object->Lock");
604 
605 extern wait_result_t vm_object_sleep(
606 	vm_object_t             object,
607 	vm_object_wait_reason_t reason,
608 	wait_interrupt_t        interruptible,
609 	lck_sleep_action_t      action);
610 
611 
612 static inline void
vm_object_set_wanted(vm_object_t object,vm_object_wait_reason_t reason)613 vm_object_set_wanted(
614 	vm_object_t             object,
615 	vm_object_wait_reason_t reason)
616 {
617 	vm_object_lock_assert_exclusive(object);
618 	assert(reason >= 0 && reason <= VM_OBJECT_EVENT_MAX);
619 
620 	object->all_wanted |= (1 << reason);
621 }
622 
623 static inline bool
vm_object_wanted(vm_object_t object,vm_object_wait_reason_t event)624 vm_object_wanted(
625 	vm_object_t             object,
626 	vm_object_wait_reason_t event)
627 {
628 	vm_object_lock_assert_held(object);
629 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
630 
631 	return object->all_wanted & (1 << event);
632 }
633 
634 extern void vm_object_wakeup(
635 	vm_object_t             object,
636 	vm_object_wait_reason_t reason);
637 
638 /*
639  *	Routines implemented as macros
640  */
641 #ifdef VM_PIP_DEBUG
642 #include <libkern/OSDebug.h>
643 #define VM_PIP_DEBUG_BEGIN(object)                                      \
644 	MACRO_BEGIN                                                     \
645 	int pip = ((object)->paging_in_progress +                       \
646 	           (object)->activity_in_progress);                     \
647 	if (pip < VM_PIP_DEBUG_MAX_REFS) {                              \
648 	        (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
649 	                           VM_PIP_DEBUG_STACK_FRAMES);          \
650 	}                                                               \
651 	MACRO_END
652 #else   /* VM_PIP_DEBUG */
653 #define VM_PIP_DEBUG_BEGIN(object)
654 #endif  /* VM_PIP_DEBUG */
655 
656 static inline void
vm_object_pl_req_begin(vm_object_t object)657 vm_object_pl_req_begin(vm_object_t object)
658 {
659 	vm_object_lock_assert_exclusive(object);
660 //	VM_PIP_DEBUG_BEGIN(object);
661 	if (os_inc_overflow(&object->vmo_pl_req_in_progress)) {
662 		panic("vm_object_pl_req_begin(%p): overflow\n", object);
663 	}
664 }
665 
666 static inline void
vm_object_pl_req_end(vm_object_t object)667 vm_object_pl_req_end(vm_object_t object)
668 {
669 	vm_object_lock_assert_exclusive(object);
670 	if (os_dec_overflow(&object->vmo_pl_req_in_progress)) {
671 		panic("vm_object_pl_req_end(%p): underflow\n", object);
672 	}
673 	if (object->vmo_pl_req_in_progress == 0) {
674 		vm_object_wakeup((object),
675 		    VM_OBJECT_EVENT_PL_REQ_IN_PROGRESS);
676 	}
677 }
678 
679 static inline void
vm_object_activity_begin(vm_object_t object)680 vm_object_activity_begin(vm_object_t object)
681 {
682 	vm_object_lock_assert_exclusive(object);
683 	VM_PIP_DEBUG_BEGIN(object);
684 	if (os_inc_overflow(&object->activity_in_progress)) {
685 		panic("vm_object_activity_begin(%p): overflow\n", object);
686 	}
687 }
688 
689 static inline void
vm_object_activity_end(vm_object_t object)690 vm_object_activity_end(vm_object_t object)
691 {
692 	vm_object_lock_assert_exclusive(object);
693 	if (os_dec_overflow(&object->activity_in_progress)) {
694 		panic("vm_object_activity_end(%p): underflow\n", object);
695 	}
696 	if (object->paging_in_progress == 0 &&
697 	    object->activity_in_progress == 0) {
698 		vm_object_wakeup((object),
699 		    VM_OBJECT_EVENT_PAGING_IN_PROGRESS);
700 	}
701 }
702 
703 static inline void
vm_object_paging_begin(vm_object_t object)704 vm_object_paging_begin(vm_object_t object)
705 {
706 	vm_object_lock_assert_exclusive(object);
707 	VM_PIP_DEBUG_BEGIN((object));
708 	if (os_inc_overflow(&object->paging_in_progress)) {
709 		panic("vm_object_paging_begin(%p): overflow\n", object);
710 	}
711 }
712 
713 static inline void
vm_object_paging_end(vm_object_t object)714 vm_object_paging_end(vm_object_t object)
715 {
716 	vm_object_lock_assert_exclusive(object);
717 	if (os_dec_overflow(&object->paging_in_progress)) {
718 		panic("vm_object_paging_end(%p): underflow\n", object);
719 	}
720 	/*
721 	 * NB: This broadcast can be noisy, especially because all threads
722 	 * receiving the wakeup are given a priority floor. In the future, it
723 	 * would be great to utilize a primitive which can arbitrate
724 	 * the priority of all waiters and only issue as many wakeups as can be
725 	 * serviced.
726 	 */
727 	if (object->paging_in_progress == vm_object_pagein_throttle - 1) {
728 		vm_object_wakeup(object, VM_OBJECT_EVENT_PAGEIN_THROTTLE);
729 	}
730 	if (object->paging_in_progress == 0) {
731 		vm_object_wakeup(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS);
732 		if (object->activity_in_progress == 0) {
733 			vm_object_wakeup((object),
734 			    VM_OBJECT_EVENT_PAGING_IN_PROGRESS);
735 		}
736 	}
737 }
738 
739 extern wait_result_t vm_object_pl_req_wait(vm_object_t object, wait_interrupt_t interruptible);
740 /* Wait for *all* paging and activities on this object to complete */
741 extern wait_result_t vm_object_paging_wait(vm_object_t object, wait_interrupt_t interruptible);
742 /* Wait for *all* paging on this object to complete */
743 extern wait_result_t vm_object_paging_only_wait(vm_object_t object, wait_interrupt_t interruptible);
744 /* Wait for the number of page-ins on this object to fall below the throttle limit */
745 extern wait_result_t vm_object_paging_throttle_wait(vm_object_t object, wait_interrupt_t interruptible);
746 
747 static inline void
vm_object_mapping_begin(vm_object_t object)748 vm_object_mapping_begin(vm_object_t object)
749 {
750 	vm_object_lock_assert_exclusive(object);
751 	assert(!object->mapping_in_progress);
752 	object->mapping_in_progress = TRUE;
753 }
754 
755 static inline void
vm_object_mapping_end(vm_object_t object)756 vm_object_mapping_end(vm_object_t object)
757 {
758 	vm_object_lock_assert_exclusive(object);
759 	assert(object->mapping_in_progress);
760 	object->mapping_in_progress = FALSE;
761 	vm_object_wakeup(object,
762 	    VM_OBJECT_EVENT_MAPPING_IN_PROGRESS);
763 }
764 
765 extern wait_result_t vm_object_mapping_wait(vm_object_t object, wait_interrupt_t interruptible);
766 
767 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
768 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
769 
770 extern void     vm_object_cache_add(vm_object_t);
771 extern void     vm_object_cache_remove(vm_object_t);
772 extern int      vm_object_cache_evict(int, int);
773 
774 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
775 #define VM_OBJECT_OWNER_UNCHANGED ((task_t) -2)
776 #define VM_OBJECT_OWNER(object)                                         \
777 	((object == VM_OBJECT_NULL ||                                   \
778 	  ((object)->purgable == VM_PURGABLE_DENY &&                    \
779 	   (object)->vo_ledger_tag == 0) ||                             \
780 	  (object)->vo_owner == TASK_NULL)                              \
781 	 ? TASK_NULL    /* not owned */                                 \
782 	 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED)            \
783 	    ? kernel_task /* disowned -> kernel */                      \
784 	    : (object)->vo_owner)) /* explicit owner */                 \
785 
786 
787 extern void     vm_object_ledger_tag_ledgers(
788 	vm_object_t object,
789 	int *ledger_idx_volatile,
790 	int *ledger_idx_nonvolatile,
791 	int *ledger_idx_volatile_compressed,
792 	int *ledger_idx_nonvolatile_compressed,
793 	int *ledger_idx_composite,
794 	int *ledger_idx_external_wired,
795 	boolean_t *do_footprint);
796 
797 extern kern_return_t vm_object_ownership_change(
798 	vm_object_t object,
799 	int new_ledger_tag,
800 	task_t new_owner,
801 	int new_ledger_flags,
802 	boolean_t task_objq_locked);
803 
804 
805 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
806 // so probably should be a real 32b ID vs. ptr.
807 // Current users just check for equality
808 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRHASH((o)))
809 
810 static inline void
VM_OBJECT_COPY_SET(vm_object_t object,vm_object_t copy)811 VM_OBJECT_COPY_SET(
812 	vm_object_t object,
813 	vm_object_t copy)
814 {
815 	vm_object_lock_assert_exclusive(object);
816 	object->vo_copy = copy;
817 	if (copy != VM_OBJECT_NULL) {
818 		object->vo_copy_version++;
819 	}
820 }
821 
822 #endif /* XNU_KERNEL_PRIVATE */
823 
824 #endif  /* _VM_VM_OBJECT_INTERNAL_H_ */
825