xref: /xnu-12377.1.9/osfmk/vm/vm_object_internal.h (revision f6217f891ac0bb64f3d375211650a4c1ff8ca1ea)
1 /*
2  * Copyright (c) 2023 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 
29 #ifndef _VM_VM_OBJECT_INTERNAL_H_
30 #define _VM_VM_OBJECT_INTERNAL_H_
31 
32 #ifdef XNU_KERNEL_PRIVATE
33 #include <vm/vm_object_xnu.h>
34 
35 #if VM_OBJECT_TRACKING
36 #include <libkern/OSDebug.h>
37 #include <kern/btlog.h>
38 extern void vm_object_tracking_init(void);
39 extern btlog_t vm_object_tracking_btlog;
40 #define VM_OBJECT_TRACKING_NUM_RECORDS  50000
41 #define VM_OBJECT_TRACKING_OP_CREATED   1
42 #define VM_OBJECT_TRACKING_OP_MODIFIED  2
43 #define VM_OBJECT_TRACKING_OP_TRUESHARE 3
44 #endif /* VM_OBJECT_TRACKING */
45 
46 #if VM_OBJECT_ACCESS_TRACKING
47 extern uint64_t vm_object_access_tracking_reads;
48 extern uint64_t vm_object_access_tracking_writes;
49 extern void vm_object_access_tracking(vm_object_t object,
50     int *access_tracking,
51     uint32_t *access_tracking_reads,
52     uint32_t *acess_tracking_writes);
53 #endif /* VM_OBJECT_ACCESS_TRACKING */
54 
55 extern uint16_t vm_object_pagein_throttle;
56 
57 /*
58  *	Object locking macros
59  */
60 
61 #define vm_object_lock_init(object)                                     \
62 	lck_rw_init(&(object)->Lock, &vm_object_lck_grp,                \
63 	            (is_kernel_object(object) ?                         \
64 	             &kernel_object_lck_attr :                          \
65 	             (((object) == compressor_object) ?                 \
66 	             &compressor_object_lck_attr :                      \
67 	              &vm_object_lck_attr)))
68 #define vm_object_lock_destroy(object)  lck_rw_destroy(&(object)->Lock, &vm_object_lck_grp)
69 
70 /*
71  * This is used whenever we try to acquire the VM object lock
72  * without mutex_pause. The mutex_pause is intended to let
73  * pageout_scan try getting the object lock if it's trying to
74  * reclaim pages from that object (see vm_pageout_scan_wants_object).
75  */
76 #define vm_object_lock_try_scan(object) _vm_object_lock_try(object)
77 
78 /*
79  * CAUTION: the following vm_object_lock_assert_held*() macros merely
80  * check if anyone is holding the lock, but the holder may not necessarily
81  * be the caller...
82  */
83 #define vm_object_lock_assert_held(object) \
84 	LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_HELD)
85 #define vm_object_lock_assert_shared(object) \
86 	LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_SHARED)
87 #define vm_object_lock_assert_exclusive(object) \
88 	LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_EXCLUSIVE)
89 #define vm_object_lock_assert_notheld(object) \
90 	LCK_RW_ASSERT(&(object)->Lock, LCK_RW_ASSERT_NOTHELD)
91 
92 
93 static inline void
VM_OBJECT_SET_PAGER_CREATED(vm_object_t object,bool value)94 VM_OBJECT_SET_PAGER_CREATED(
95 	vm_object_t object,
96 	bool value)
97 {
98 	vm_object_lock_assert_exclusive(object);
99 	object->pager_created = value;
100 }
101 static inline void
VM_OBJECT_SET_PAGER_INITIALIZED(vm_object_t object,bool value)102 VM_OBJECT_SET_PAGER_INITIALIZED(
103 	vm_object_t object,
104 	bool value)
105 {
106 	vm_object_lock_assert_exclusive(object);
107 	object->pager_initialized = value;
108 }
109 static inline void
VM_OBJECT_SET_PAGER_READY(vm_object_t object,bool value)110 VM_OBJECT_SET_PAGER_READY(
111 	vm_object_t object,
112 	bool value)
113 {
114 	vm_object_lock_assert_exclusive(object);
115 	object->pager_ready = value;
116 }
117 static inline void
VM_OBJECT_SET_PAGER_TRUSTED(vm_object_t object,bool value)118 VM_OBJECT_SET_PAGER_TRUSTED(
119 	vm_object_t object,
120 	bool value)
121 {
122 	vm_object_lock_assert_exclusive(object);
123 	object->pager_trusted = value;
124 }
125 static inline void
VM_OBJECT_SET_CAN_PERSIST(vm_object_t object,bool value)126 VM_OBJECT_SET_CAN_PERSIST(
127 	vm_object_t object,
128 	bool value)
129 {
130 	vm_object_lock_assert_exclusive(object);
131 	object->can_persist = value;
132 }
133 static inline void
VM_OBJECT_SET_INTERNAL(vm_object_t object,bool value)134 VM_OBJECT_SET_INTERNAL(
135 	vm_object_t object,
136 	bool value)
137 {
138 	vm_object_lock_assert_exclusive(object);
139 	object->internal = value;
140 }
141 static inline void
VM_OBJECT_SET_PRIVATE(vm_object_t object,bool value)142 VM_OBJECT_SET_PRIVATE(
143 	vm_object_t object,
144 	bool value)
145 {
146 	vm_object_lock_assert_exclusive(object);
147 	object->private = value;
148 }
149 static inline void
VM_OBJECT_SET_PAGEOUT(vm_object_t object,bool value)150 VM_OBJECT_SET_PAGEOUT(
151 	vm_object_t object,
152 	bool value)
153 {
154 	vm_object_lock_assert_exclusive(object);
155 	object->pageout = value;
156 }
157 static inline void
VM_OBJECT_SET_ALIVE(vm_object_t object,bool value)158 VM_OBJECT_SET_ALIVE(
159 	vm_object_t object,
160 	bool value)
161 {
162 	vm_object_lock_assert_exclusive(object);
163 	object->alive = value;
164 }
165 static inline void
VM_OBJECT_SET_PURGABLE(vm_object_t object,unsigned int value)166 VM_OBJECT_SET_PURGABLE(
167 	vm_object_t object,
168 	unsigned int value)
169 {
170 	vm_object_lock_assert_exclusive(object);
171 	object->purgable = value;
172 	assert3u(object->purgable, ==, value);
173 }
174 static inline void
VM_OBJECT_SET_PURGEABLE_ONLY_BY_KERNEL(vm_object_t object,bool value)175 VM_OBJECT_SET_PURGEABLE_ONLY_BY_KERNEL(
176 	vm_object_t object,
177 	bool value)
178 {
179 	vm_object_lock_assert_exclusive(object);
180 	object->purgeable_only_by_kernel = value;
181 }
182 static inline void
VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(vm_object_t object,bool value)183 VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(
184 	vm_object_t object,
185 	bool value)
186 {
187 	vm_object_lock_assert_exclusive(object);
188 	object->purgeable_when_ripe = value;
189 }
190 static inline void
VM_OBJECT_SET_SHADOWED(vm_object_t object,bool value)191 VM_OBJECT_SET_SHADOWED(
192 	vm_object_t object,
193 	bool value)
194 {
195 	vm_object_lock_assert_exclusive(object);
196 	object->shadowed = value;
197 }
198 static inline void
VM_OBJECT_SET_TRUE_SHARE(vm_object_t object,bool value)199 VM_OBJECT_SET_TRUE_SHARE(
200 	vm_object_t object,
201 	bool value)
202 {
203 	vm_object_lock_assert_exclusive(object);
204 	object->true_share = value;
205 }
206 static inline void
VM_OBJECT_SET_TERMINATING(vm_object_t object,bool value)207 VM_OBJECT_SET_TERMINATING(
208 	vm_object_t object,
209 	bool value)
210 {
211 	vm_object_lock_assert_exclusive(object);
212 	object->terminating = value;
213 }
214 static inline void
VM_OBJECT_SET_NAMED(vm_object_t object,bool value)215 VM_OBJECT_SET_NAMED(
216 	vm_object_t object,
217 	bool value)
218 {
219 	vm_object_lock_assert_exclusive(object);
220 	object->named = value;
221 }
222 static inline void
VM_OBJECT_SET_SHADOW_SEVERED(vm_object_t object,bool value)223 VM_OBJECT_SET_SHADOW_SEVERED(
224 	vm_object_t object,
225 	bool value)
226 {
227 	vm_object_lock_assert_exclusive(object);
228 	object->shadow_severed = value;
229 }
230 static inline void
VM_OBJECT_SET_PHYS_CONTIGUOUS(vm_object_t object,bool value)231 VM_OBJECT_SET_PHYS_CONTIGUOUS(
232 	vm_object_t object,
233 	bool value)
234 {
235 	vm_object_lock_assert_exclusive(object);
236 	object->phys_contiguous = value;
237 }
238 static inline void
VM_OBJECT_SET_NOPHYSCACHE(vm_object_t object,bool value)239 VM_OBJECT_SET_NOPHYSCACHE(
240 	vm_object_t object,
241 	bool value)
242 {
243 	vm_object_lock_assert_exclusive(object);
244 	object->nophyscache = value;
245 }
246 static inline void
VM_OBJECT_SET_FOR_REALTIME(vm_object_t object,bool value)247 VM_OBJECT_SET_FOR_REALTIME(
248 	vm_object_t object,
249 	bool value)
250 {
251 	vm_object_lock_assert_exclusive(object);
252 	object->for_realtime = value;
253 }
254 static inline void
VM_OBJECT_SET_NO_PAGER_REASON(vm_object_t object,unsigned int value)255 VM_OBJECT_SET_NO_PAGER_REASON(
256 	vm_object_t object,
257 	unsigned int value)
258 {
259 	vm_object_lock_assert_exclusive(object);
260 	object->no_pager_reason = value;
261 	assert3u(object->no_pager_reason, ==, value);
262 }
263 #if FBDP_DEBUG_OBJECT_NO_PAGER
264 static inline void
VM_OBJECT_SET_FBDP_TRACKED(vm_object_t object,bool value)265 VM_OBJECT_SET_FBDP_TRACKED(
266 	vm_object_t object,
267 	bool value)
268 {
269 	vm_object_lock_assert_exclusive(object);
270 	object->fbdp_tracked = value;
271 }
272 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
273 
274 /*
275  *	Declare procedures that operate on VM objects.
276  */
277 
278 __private_extern__ void         vm_object_bootstrap(void);
279 
280 __private_extern__ void         vm_object_reaper_init(void);
281 
282 __private_extern__ vm_object_t  vm_object_allocate(vm_object_size_t size,
283     vm_map_serial_t provenance);
284 
285 __private_extern__ void    _vm_object_allocate(vm_object_size_t size,
286     vm_object_t object, vm_map_serial_t provenance);
287 
288 __private_extern__ void vm_object_set_size(
289 	vm_object_t             object,
290 	vm_object_size_t        outer_size,
291 	vm_object_size_t        inner_size);
292 
293 static inline void
vm_object_reference_locked(vm_object_t object)294 vm_object_reference_locked(vm_object_t object)
295 {
296 	vm_object_lock_assert_exclusive(object);
297 	os_ref_retain_locked_raw(&object->ref_count, &vm_object_refgrp);
298 }
299 
300 static inline void
vm_object_reference_shared(vm_object_t object)301 vm_object_reference_shared(vm_object_t object)
302 {
303 	vm_object_lock_assert_shared(object);
304 	os_ref_retain_raw(&object->ref_count, &vm_object_refgrp);
305 }
306 
307 __private_extern__ void         vm_object_reference(
308 	vm_object_t     object);
309 
310 #if     !MACH_ASSERT
311 
312 #define vm_object_reference(object)                     \
313 MACRO_BEGIN                                             \
314 	vm_object_t RObject = (object);                 \
315 	if (RObject) {                                  \
316 	        vm_object_lock_shared(RObject);         \
317 	        vm_object_reference_shared(RObject);    \
318 	        vm_object_unlock(RObject);              \
319 	}                                               \
320 MACRO_END
321 
322 #endif  /* MACH_ASSERT */
323 
324 __private_extern__ void         vm_object_deallocate(
325 	vm_object_t     object);
326 
327 __private_extern__ void         vm_object_pmap_protect(
328 	vm_object_t             object,
329 	vm_object_offset_t      offset,
330 	vm_object_size_t        size,
331 	pmap_t                  pmap,
332 	vm_map_size_t           pmap_page_size,
333 	vm_map_offset_t         pmap_start,
334 	vm_prot_t               prot);
335 
336 __private_extern__ void         vm_object_pmap_protect_options(
337 	vm_object_t             object,
338 	vm_object_offset_t      offset,
339 	vm_object_size_t        size,
340 	pmap_t                  pmap,
341 	vm_map_size_t           pmap_page_size,
342 	vm_map_offset_t         pmap_start,
343 	vm_prot_t               prot,
344 	int                     options);
345 
346 __private_extern__ void         vm_object_page_remove(
347 	vm_object_t             object,
348 	vm_object_offset_t      start,
349 	vm_object_offset_t      end);
350 
351 __private_extern__ void         vm_object_deactivate_pages(
352 	vm_object_t             object,
353 	vm_object_offset_t      offset,
354 	vm_object_size_t        size,
355 	boolean_t               kill_page,
356 	boolean_t               reusable_page,
357 	boolean_t               kill_no_write,
358 	struct pmap             *pmap,
359 /* XXX TODO4K: need pmap_page_size here too? */
360 	vm_map_offset_t         pmap_offset);
361 
362 __private_extern__ void vm_object_reuse_pages(
363 	vm_object_t             object,
364 	vm_object_offset_t      start_offset,
365 	vm_object_offset_t      end_offset,
366 	boolean_t               allow_partial_reuse);
367 
368 __private_extern__ kern_return_t vm_object_zero(
369 	vm_object_t             object,
370 	vm_object_offset_t      *cur_offset_p,
371 	vm_object_offset_t      end_offset);
372 
373 __private_extern__ uint64_t     vm_object_purge(
374 	vm_object_t              object,
375 	int                      flags);
376 
377 __private_extern__ kern_return_t vm_object_purgable_control(
378 	vm_object_t     object,
379 	vm_purgable_t   control,
380 	int             *state);
381 
382 __private_extern__ kern_return_t vm_object_get_page_counts(
383 	vm_object_t             object,
384 	vm_object_offset_t      offset,
385 	vm_object_size_t        size,
386 	uint64_t                *resident_page_count,
387 	uint64_t                *dirty_page_count,
388 	uint64_t                *swapped_page_count);
389 
390 __private_extern__ boolean_t    vm_object_coalesce(
391 	vm_object_t             prev_object,
392 	vm_object_t             next_object,
393 	vm_object_offset_t      prev_offset,
394 	vm_object_offset_t      next_offset,
395 	vm_object_size_t        prev_size,
396 	vm_object_size_t        next_size);
397 
398 __private_extern__ boolean_t    vm_object_shadow(
399 	vm_object_t             *object,
400 	vm_object_offset_t      *offset,
401 	vm_object_size_t        length,
402 	boolean_t               always_shadow);
403 
404 __private_extern__ void         vm_object_collapse(
405 	vm_object_t             object,
406 	vm_object_offset_t      offset,
407 	boolean_t               can_bypass);
408 
409 __private_extern__ boolean_t    vm_object_copy_quickly(
410 	vm_object_t             object,
411 	vm_object_offset_t      src_offset,
412 	vm_object_size_t        size,
413 	boolean_t               *_src_needs_copy,
414 	boolean_t               *_dst_needs_copy);
415 
416 __private_extern__ kern_return_t        vm_object_copy_strategically(
417 	vm_object_t             src_object,
418 	vm_object_offset_t      src_offset,
419 	vm_object_size_t        size,
420 	bool                    forking,
421 	vm_object_t             *dst_object,
422 	vm_object_offset_t      *dst_offset,
423 	boolean_t               *dst_needs_copy);
424 
425 __private_extern__ kern_return_t        vm_object_copy_slowly(
426 	vm_object_t             src_object,
427 	vm_object_offset_t      src_offset,
428 	vm_object_size_t        size,
429 	boolean_t               interruptible,
430 	vm_object_t             *_result_object);
431 
432 __private_extern__ vm_object_t  vm_object_copy_delayed(
433 	vm_object_t             src_object,
434 	vm_object_offset_t      src_offset,
435 	vm_object_size_t        size,
436 	boolean_t               src_object_shared);
437 
438 __private_extern__ kern_return_t        vm_object_destroy(
439 	vm_object_t                                     object,
440 	vm_object_destroy_reason_t   reason);
441 
442 __private_extern__ void         vm_object_compressor_pager_create(
443 	vm_object_t     object);
444 
445 /*
446  * Query whether the provided object,offset reside in the compressor. The
447  * caller must hold the object lock and ensure that the object,offset under
448  * inspection is not in the process of being paged in/out (i.e. no busy
449  * backing page)
450  */
451 __private_extern__ vm_external_state_t vm_object_compressor_pager_state_get(
452 	vm_object_t        object,
453 	vm_object_offset_t offset);
454 
455 /*
456  * Clear the compressor slot corresponding to an object,offset. The caller
457  * must hold the object lock (exclusive) and ensure that the object,offset
458  * under inspection is not in the process of being paged in/out (i.e. no busy
459  * backing page)
460  */
461 __private_extern__ void vm_object_compressor_pager_state_clr(
462 	vm_object_t        object,
463 	vm_object_offset_t offset);
464 
465 __private_extern__ kern_return_t vm_object_upl_request(
466 	vm_object_t             object,
467 	vm_object_offset_t      offset,
468 	upl_size_t              size,
469 	upl_t                   *upl,
470 	upl_page_info_t         *page_info,
471 	unsigned int            *count,
472 	upl_control_flags_t     flags,
473 	vm_tag_t            tag);
474 
475 __private_extern__ kern_return_t vm_object_transpose(
476 	vm_object_t             object1,
477 	vm_object_t             object2,
478 	vm_object_size_t        transpose_size);
479 
480 __private_extern__ boolean_t vm_object_sync(
481 	vm_object_t             object,
482 	vm_object_offset_t      offset,
483 	vm_object_size_t        size,
484 	boolean_t               should_flush,
485 	boolean_t               should_return,
486 	boolean_t               should_iosync);
487 
488 __private_extern__ kern_return_t vm_object_update(
489 	vm_object_t             object,
490 	vm_object_offset_t      offset,
491 	vm_object_size_t        size,
492 	vm_object_offset_t      *error_offset,
493 	int                     *io_errno,
494 	memory_object_return_t  should_return,
495 	int                     flags,
496 	vm_prot_t               prot);
497 
498 __private_extern__ kern_return_t vm_object_lock_request(
499 	vm_object_t             object,
500 	vm_object_offset_t      offset,
501 	vm_object_size_t        size,
502 	memory_object_return_t  should_return,
503 	int                     flags,
504 	vm_prot_t               prot);
505 
506 
507 
508 __private_extern__ vm_object_t  vm_object_memory_object_associate(
509 	memory_object_t         pager,
510 	vm_object_t             object,
511 	vm_object_size_t        size,
512 	boolean_t               check_named);
513 
514 
515 __private_extern__ void vm_object_cluster_size(
516 	vm_object_t             object,
517 	vm_object_offset_t      *start,
518 	vm_size_t               *length,
519 	vm_object_fault_info_t  fault_info,
520 	uint32_t                *io_streaming);
521 
522 __private_extern__ kern_return_t vm_object_populate_with_private(
523 	vm_object_t             object,
524 	vm_object_offset_t      offset,
525 	ppnum_t                 phys_page,
526 	vm_size_t               size);
527 
528 __private_extern__ void vm_object_change_wimg_mode(
529 	vm_object_t             object,
530 	unsigned int            wimg_mode);
531 
532 extern kern_return_t vm_object_page_op(
533 	vm_object_t             object,
534 	vm_object_offset_t      offset,
535 	int                     ops,
536 	ppnum_t                 *phys_entry,
537 	int                     *flags);
538 
539 extern kern_return_t vm_object_range_op(
540 	vm_object_t             object,
541 	vm_object_offset_t      offset_beg,
542 	vm_object_offset_t      offset_end,
543 	int                     ops,
544 	uint32_t                *range);
545 
546 __private_extern__ void vm_object_set_chead_hint(
547 	vm_object_t     object);
548 
549 
550 __private_extern__ void         vm_object_reap_pages(
551 	vm_object_t object,
552 	int     reap_type);
553 #define REAP_REAP       0
554 #define REAP_TERMINATE  1
555 #define REAP_PURGEABLE  2
556 #define REAP_DATA_FLUSH 3
557 
558 #if CONFIG_FREEZE
559 
560 __private_extern__ uint32_t
561 vm_object_compressed_freezer_pageout(
562 	vm_object_t     object, uint32_t dirty_budget);
563 
564 __private_extern__ void
565 vm_object_compressed_freezer_done(
566 	void);
567 
568 #endif /* CONFIG_FREEZE */
569 
570 __private_extern__ void
571 vm_object_pageout(
572 	vm_object_t     object);
573 
574 /*
575  *	Event waiting handling
576  */
577 __enum_closed_decl(vm_object_wait_reason_t, uint8_t, {
578 	VM_OBJECT_EVENT_PAGER_INIT = 0,
579 	VM_OBJECT_EVENT_PAGER_READY = 1,
580 	VM_OBJECT_EVENT_PAGING_IN_PROGRESS = 2,
581 	VM_OBJECT_EVENT_MAPPING_IN_PROGRESS = 3,
582 	VM_OBJECT_EVENT_UNBLOCKED = 4,
583 	VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS = 5,
584 	VM_OBJECT_EVENT_PAGEIN_THROTTLE = 6,
585 });
586 #define VM_OBJECT_EVENT_MAX VM_OBJECT_EVENT_PAGEIN_THROTTLE
587 /* 7 bits in "all_wanted" */
588 _Static_assert(VM_OBJECT_EVENT_MAX < 7,
589     "vm_object_wait_reason_t must fit in all_wanted");
590 /*
591  * @c vm_object_sleep uses (object + wait_reason) as the wait event, ensure
592  * this does not colide with the object lock.
593  */
594 _Static_assert(VM_OBJECT_EVENT_MAX < offsetof(struct vm_object, Lock),
595     "Wait reason collides with vm_object->Lock");
596 
597 extern wait_result_t vm_object_sleep(
598 	vm_object_t             object,
599 	vm_object_wait_reason_t reason,
600 	wait_interrupt_t        interruptible,
601 	lck_sleep_action_t      action);
602 
603 
604 static inline void
vm_object_set_wanted(vm_object_t object,vm_object_wait_reason_t reason)605 vm_object_set_wanted(
606 	vm_object_t             object,
607 	vm_object_wait_reason_t reason)
608 {
609 	vm_object_lock_assert_exclusive(object);
610 	assert(reason >= 0 && reason <= VM_OBJECT_EVENT_MAX);
611 
612 	object->all_wanted |= (1 << reason);
613 }
614 
615 static inline bool
vm_object_wanted(vm_object_t object,vm_object_wait_reason_t event)616 vm_object_wanted(
617 	vm_object_t             object,
618 	vm_object_wait_reason_t event)
619 {
620 	vm_object_lock_assert_held(object);
621 	assert(event >= 0 && event <= VM_OBJECT_EVENT_MAX);
622 
623 	return object->all_wanted & (1 << event);
624 }
625 
626 extern void vm_object_wakeup(
627 	vm_object_t             object,
628 	vm_object_wait_reason_t reason);
629 
630 /*
631  *	Routines implemented as macros
632  */
633 #ifdef VM_PIP_DEBUG
634 #include <libkern/OSDebug.h>
635 #define VM_PIP_DEBUG_BEGIN(object)                                      \
636 	MACRO_BEGIN                                                     \
637 	int pip = ((object)->paging_in_progress +                       \
638 	           (object)->activity_in_progress);                     \
639 	if (pip < VM_PIP_DEBUG_MAX_REFS) {                              \
640 	        (void) OSBacktrace(&(object)->pip_holders[pip].pip_retaddr[0], \
641 	                           VM_PIP_DEBUG_STACK_FRAMES);          \
642 	}                                                               \
643 	MACRO_END
644 #else   /* VM_PIP_DEBUG */
645 #define VM_PIP_DEBUG_BEGIN(object)
646 #endif  /* VM_PIP_DEBUG */
647 
648 static inline void
vm_object_activity_begin(vm_object_t object)649 vm_object_activity_begin(vm_object_t object)
650 {
651 	vm_object_lock_assert_exclusive(object);
652 	VM_PIP_DEBUG_BEGIN(object);
653 	if (os_inc_overflow(&object->activity_in_progress)) {
654 		panic("vm_object_activity_begin(%p): overflow\n", object);
655 	}
656 }
657 
658 static inline void
vm_object_activity_end(vm_object_t object)659 vm_object_activity_end(vm_object_t object)
660 {
661 	vm_object_lock_assert_exclusive(object);
662 	if (os_dec_overflow(&object->activity_in_progress)) {
663 		panic("vm_object_activity_end(%p): underflow\n", object);
664 	}
665 	if (object->paging_in_progress == 0 &&
666 	    object->activity_in_progress == 0) {
667 		vm_object_wakeup((object),
668 		    VM_OBJECT_EVENT_PAGING_IN_PROGRESS);
669 	}
670 }
671 
672 static inline void
vm_object_paging_begin(vm_object_t object)673 vm_object_paging_begin(vm_object_t object)
674 {
675 	vm_object_lock_assert_exclusive(object);
676 	VM_PIP_DEBUG_BEGIN((object));
677 	if (os_inc_overflow(&object->paging_in_progress)) {
678 		panic("vm_object_paging_begin(%p): overflow\n", object);
679 	}
680 }
681 
682 static inline void
vm_object_paging_end(vm_object_t object)683 vm_object_paging_end(vm_object_t object)
684 {
685 	vm_object_lock_assert_exclusive(object);
686 	if (os_dec_overflow(&object->paging_in_progress)) {
687 		panic("vm_object_paging_end(%p): underflow\n", object);
688 	}
689 	/*
690 	 * NB: This broadcast can be noisy, especially because all threads
691 	 * receiving the wakeup are given a priority floor. In the future, it
692 	 * would be great to utilize a primitive which can arbitrate
693 	 * the priority of all waiters and only issue as many wakeups as can be
694 	 * serviced.
695 	 */
696 	if (object->paging_in_progress == vm_object_pagein_throttle - 1) {
697 		vm_object_wakeup(object, VM_OBJECT_EVENT_PAGEIN_THROTTLE);
698 	}
699 	if (object->paging_in_progress == 0) {
700 		vm_object_wakeup(object, VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS);
701 		if (object->activity_in_progress == 0) {
702 			vm_object_wakeup((object),
703 			    VM_OBJECT_EVENT_PAGING_IN_PROGRESS);
704 		}
705 	}
706 }
707 
708 /* Wait for *all* paging and activities on this object to complete */
709 extern wait_result_t vm_object_paging_wait(vm_object_t object, wait_interrupt_t interruptible);
710 /* Wait for *all* paging on this object to complete */
711 extern wait_result_t vm_object_paging_only_wait(vm_object_t object, wait_interrupt_t interruptible);
712 /* Wait for the number of page-ins on this object to fall below the throttle limit */
713 extern wait_result_t vm_object_paging_throttle_wait(vm_object_t object, wait_interrupt_t interruptible);
714 
715 static inline void
vm_object_mapping_begin(vm_object_t object)716 vm_object_mapping_begin(vm_object_t object)
717 {
718 	vm_object_lock_assert_exclusive(object);
719 	assert(!object->mapping_in_progress);
720 	object->mapping_in_progress = TRUE;
721 }
722 
723 static inline void
vm_object_mapping_end(vm_object_t object)724 vm_object_mapping_end(vm_object_t object)
725 {
726 	vm_object_lock_assert_exclusive(object);
727 	assert(object->mapping_in_progress);
728 	object->mapping_in_progress = FALSE;
729 	vm_object_wakeup(object,
730 	    VM_OBJECT_EVENT_MAPPING_IN_PROGRESS);
731 }
732 
733 extern wait_result_t vm_object_mapping_wait(vm_object_t object, wait_interrupt_t interruptible);
734 
735 #define vm_object_round_page(x) (((vm_object_offset_t)(x) + PAGE_MASK) & ~((signed)PAGE_MASK))
736 #define vm_object_trunc_page(x) ((vm_object_offset_t)(x) & ~((signed)PAGE_MASK))
737 
738 extern void     vm_object_cache_add(vm_object_t);
739 extern void     vm_object_cache_remove(vm_object_t);
740 extern int      vm_object_cache_evict(int, int);
741 
742 #define VM_OBJECT_OWNER_DISOWNED ((task_t) -1)
743 #define VM_OBJECT_OWNER_UNCHANGED ((task_t) -2)
744 #define VM_OBJECT_OWNER(object)                                         \
745 	((object == VM_OBJECT_NULL ||                                   \
746 	  ((object)->purgable == VM_PURGABLE_DENY &&                    \
747 	   (object)->vo_ledger_tag == 0) ||                             \
748 	  (object)->vo_owner == TASK_NULL)                              \
749 	 ? TASK_NULL    /* not owned */                                 \
750 	 : (((object)->vo_owner == VM_OBJECT_OWNER_DISOWNED)            \
751 	    ? kernel_task /* disowned -> kernel */                      \
752 	    : (object)->vo_owner)) /* explicit owner */                 \
753 
754 
755 extern void     vm_object_ledger_tag_ledgers(
756 	vm_object_t object,
757 	int *ledger_idx_volatile,
758 	int *ledger_idx_nonvolatile,
759 	int *ledger_idx_volatile_compressed,
760 	int *ledger_idx_nonvolatile_compressed,
761 	int *ledger_idx_composite,
762 	int *ledger_idx_external_wired,
763 	boolean_t *do_footprint);
764 
765 extern kern_return_t vm_object_ownership_change(
766 	vm_object_t object,
767 	int new_ledger_tag,
768 	task_t new_owner,
769 	int new_ledger_flags,
770 	boolean_t task_objq_locked);
771 
772 
773 // LP64todo: all the current tools are 32bit, obviously never worked for 64b
774 // so probably should be a real 32b ID vs. ptr.
775 // Current users just check for equality
776 #define VM_OBJECT_ID(o) ((uint32_t)(uintptr_t)VM_KERNEL_ADDRHASH((o)))
777 
778 static inline void
VM_OBJECT_COPY_SET(vm_object_t object,vm_object_t copy)779 VM_OBJECT_COPY_SET(
780 	vm_object_t object,
781 	vm_object_t copy)
782 {
783 	vm_object_lock_assert_exclusive(object);
784 	object->vo_copy = copy;
785 	if (copy != VM_OBJECT_NULL) {
786 		object->vo_copy_version++;
787 	}
788 }
789 
790 #endif /* XNU_KERNEL_PRIVATE */
791 
792 #endif  /* _VM_VM_OBJECT_INTERNAL_H_ */
793