xref: /xnu-12377.61.12/osfmk/vm/vm_object.c (revision 4d495c6e23c53686cf65f45067f79024cf5dcee8)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_object.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	Virtual memory object module.
63  */
64 
65 #include <debug.h>
66 
67 #include <mach/mach_types.h>
68 #include <mach/memory_object.h>
69 #include <mach/vm_param.h>
70 
71 #include <mach/sdt.h>
72 
73 #include <ipc/ipc_types.h>
74 #include <ipc/ipc_port.h>
75 
76 #include <kern/kern_types.h>
77 #include <kern/assert.h>
78 #include <kern/queue.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc.h>
81 #include <kern/host.h>
82 #include <kern/host_statistics.h>
83 #include <kern/processor.h>
84 #include <kern/misc_protos.h>
85 #include <kern/policy_internal.h>
86 #include <kern/coalition.h>
87 
88 #include <sys/kdebug.h>
89 #include <sys/kdebug_triage.h>
90 
91 #include <vm/memory_object_internal.h>
92 #include <vm/vm_compressor_pager_internal.h>
93 #include <vm/vm_fault_internal.h>
94 #include <vm/vm_map.h>
95 #include <vm/vm_object_internal.h>
96 #include <vm/vm_page_internal.h>
97 #include <vm/vm_pageout_internal.h>
98 #include <vm/vm_protos_internal.h>
99 #include <vm/vm_purgeable_internal.h>
100 #include <vm/vm_ubc.h>
101 #include <vm/vm_compressor_xnu.h>
102 #include <os/hash.h>
103 
104 #if CONFIG_PHANTOM_CACHE
105 #include <vm/vm_phantom_cache_internal.h>
106 #endif
107 
108 #if VM_OBJECT_ACCESS_TRACKING
109 uint64_t vm_object_access_tracking_reads = 0;
110 uint64_t vm_object_access_tracking_writes = 0;
111 #endif /* VM_OBJECT_ACCESS_TRACKING */
112 
113 boolean_t vm_object_collapse_compressor_allowed = TRUE;
114 
115 struct vm_counters vm_counters;
116 
117 os_refgrp_decl(, vm_object_refgrp, "vm_object", NULL);
118 
119 #if DEVELOPMENT || DEBUG
120 extern struct memory_object_pager_ops shared_region_pager_ops;
121 extern unsigned int shared_region_pagers_resident_count;
122 extern unsigned int shared_region_pagers_resident_peak;
123 #endif /* DEVELOPMENT || DEBUG */
124 
125 #if VM_OBJECT_TRACKING
126 btlog_t vm_object_tracking_btlog;
127 
128 void
vm_object_tracking_init(void)129 vm_object_tracking_init(void)
130 {
131 	int vm_object_tracking;
132 
133 	vm_object_tracking = 1;
134 	PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking,
135 	    sizeof(vm_object_tracking));
136 
137 	if (vm_object_tracking) {
138 		vm_object_tracking_btlog = btlog_create(BTLOG_HASH,
139 		    VM_OBJECT_TRACKING_NUM_RECORDS);
140 		assert(vm_object_tracking_btlog);
141 	}
142 }
143 #endif /* VM_OBJECT_TRACKING */
144 
145 /*
146  *	Virtual memory objects maintain the actual data
147  *	associated with allocated virtual memory.  A given
148  *	page of memory exists within exactly one object.
149  *
150  *	An object is only deallocated when all "references"
151  *	are given up.
152  *
153  *	Associated with each object is a list of all resident
154  *	memory pages belonging to that object; this list is
155  *	maintained by the "vm_page" module, but locked by the object's
156  *	lock.
157  *
158  *	Each object also records the memory object reference
159  *	that is used by the kernel to request and write
160  *	back data (the memory object, field "pager"), etc...
161  *
162  *	Virtual memory objects are allocated to provide
163  *	zero-filled memory (vm_allocate) or map a user-defined
164  *	memory object into a virtual address space (vm_map).
165  *
166  *	Virtual memory objects that refer to a user-defined
167  *	memory object are called "permanent", because all changes
168  *	made in virtual memory are reflected back to the
169  *	memory manager, which may then store it permanently.
170  *	Other virtual memory objects are called "temporary",
171  *	meaning that changes need be written back only when
172  *	necessary to reclaim pages, and that storage associated
173  *	with the object can be discarded once it is no longer
174  *	mapped.
175  *
176  *	A permanent memory object may be mapped into more
177  *	than one virtual address space.  Moreover, two threads
178  *	may attempt to make the first mapping of a memory
179  *	object concurrently.  Only one thread is allowed to
180  *	complete this mapping; all others wait for the
181  *	"pager_initialized" field is asserted, indicating
182  *	that the first thread has initialized all of the
183  *	necessary fields in the virtual memory object structure.
184  *
185  *	The kernel relies on a *default memory manager* to
186  *	provide backing storage for the zero-filled virtual
187  *	memory objects.  The pager memory objects associated
188  *	with these temporary virtual memory objects are only
189  *	requested from the default memory manager when it
190  *	becomes necessary.  Virtual memory objects
191  *	that depend on the default memory manager are called
192  *	"internal".  The "pager_created" field is provided to
193  *	indicate whether these ports have ever been allocated.
194  *
195  *	The kernel may also create virtual memory objects to
196  *	hold changed pages after a copy-on-write operation.
197  *	In this case, the virtual memory object (and its
198  *	backing storage -- its memory object) only contain
199  *	those pages that have been changed.  The "shadow"
200  *	field refers to the virtual memory object that contains
201  *	the remainder of the contents.  The "shadow_offset"
202  *	field indicates where in the "shadow" these contents begin.
203  *	The "copy" field refers to a virtual memory object
204  *	to which changed pages must be copied before changing
205  *	this object, in order to implement another form
206  *	of copy-on-write optimization.
207  *
208  *	The virtual memory object structure also records
209  *	the attributes associated with its memory object.
210  *	The "pager_ready", "can_persist" and "copy_strategy"
211  *	fields represent those attributes.  The "cached_list"
212  *	field is used in the implementation of the persistence
213  *	attribute.
214  *
215  * ZZZ Continue this comment.
216  */
217 
218 /* Forward declarations for internal functions. */
219 static kern_return_t    vm_object_terminate(
220 	vm_object_t     object);
221 
222 static void             vm_object_do_collapse(
223 	vm_object_t     object,
224 	vm_object_t     backing_object);
225 
226 static void             vm_object_do_bypass(
227 	vm_object_t     object,
228 	vm_object_t     backing_object);
229 
230 static void             vm_object_release_pager(
231 	memory_object_t pager);
232 
233 SECURITY_READ_ONLY_LATE(zone_t) vm_object_zone; /* vm backing store zone */
234 
235 /*
236  * Wired-down kernel memory belongs to this memory object (kernel_object)
237  * by default to avoid wasting data structures.
238  */
239 static struct vm_object                 kernel_object_store VM_PAGE_PACKED_ALIGNED;
240 const vm_object_t                       kernel_object_default = &kernel_object_store;
241 
242 static struct vm_object                 compressor_object_store VM_PAGE_PACKED_ALIGNED;
243 const vm_object_t                       compressor_object = &compressor_object_store;
244 
245 /*
246  * This object holds all pages that have been retired due to errors like ECC.
247  * The system should never use the page or look at its contents. The offset
248  * in this object is the same as the page's physical address.
249  */
250 static struct vm_object                 retired_pages_object_store VM_PAGE_PACKED_ALIGNED;
251 const vm_object_t                       retired_pages_object = &retired_pages_object_store;
252 
253 #if HAS_MTE
254 /*
255  * This object holds all pages that are currently being used to hold MTE tags.
256  * The pages are wired and may have no pmap mappings of any kind.
257  * The object offset will be the same as physical address.
258  */
259 static struct vm_object                 mte_tags_object_store VM_PAGE_PACKED_ALIGNED;
260 const vm_object_t                       mte_tags_object = &mte_tags_object_store;
261 
262 /*
263  * This object is for pages that would have been on kernel_object_default, except
264  * that they are using MTE tags.
265  */
266 static struct vm_object                 kernel_object_tagged_store VM_PAGE_PACKED_ALIGNED;
267 const vm_object_t                       kernel_object_tagged = &kernel_object_tagged_store;
268 #endif /* HAS_MTE */
269 
270 static struct vm_object                 exclaves_object_store VM_PAGE_PACKED_ALIGNED;
271 const vm_object_t                       exclaves_object = &exclaves_object_store;
272 #if HAS_MTE
273 static struct vm_object                 exclaves_object_tagged_store VM_PAGE_PACKED_ALIGNED;
274 const vm_object_t                       exclaves_object_tagged = &exclaves_object_tagged_store;
275 #endif /* HAS_MTE */
276 
277 
278 /*
279  *	Virtual memory objects are initialized from
280  *	a template (see vm_object_allocate).
281  *
282  *	When adding a new field to the virtual memory
283  *	object structure, be sure to add initialization
284  *	(see _vm_object_allocate()).
285  */
286 static const struct vm_object vm_object_template = {
287 	.memq.prev = 0,
288 	.memq.next = 0,
289 	/*
290 	 * The lock will be initialized for each allocated object in
291 	 * _vm_object_allocate(), so we don't need to initialize it in
292 	 * the vm_object_template.
293 	 */
294 	.vo_size = 0,
295 	.memq_hint = VM_PAGE_NULL,
296 	/*
297 	 * The ref count will be initialized for each allocated object in
298 	 * _vm_object_allocate(), so we don't need to initialize it in the
299 	 * vm_object_template.
300 	 */
301 	.resident_page_count = 0,
302 	.wired_page_count = 0,
303 	.reusable_page_count = 0,
304 	.vo_copy = VM_OBJECT_NULL,
305 	.vo_copy_version = 0,
306 	.vo_inherit_copy_none = false,
307 	.shadow = VM_OBJECT_NULL,
308 	.vo_shadow_offset = (vm_object_offset_t) 0,
309 	.pager = MEMORY_OBJECT_NULL,
310 	.paging_offset = 0,
311 	.pager_control = MEMORY_OBJECT_CONTROL_NULL,
312 	.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC,
313 	.paging_in_progress = 0,
314 	.vo_size_delta = 0,
315 	.activity_in_progress = 0,
316 
317 	/* Begin bitfields */
318 	.all_wanted = 0, /* all bits FALSE */
319 	.pager_created = FALSE,
320 	.pager_initialized = FALSE,
321 	.pager_ready = FALSE,
322 	.pager_trusted = FALSE,
323 	.can_persist = FALSE,
324 	.internal = TRUE,
325 	.private = FALSE,
326 	.pageout = FALSE,
327 	.alive = TRUE,
328 	.purgable = VM_PURGABLE_DENY,
329 	.purgeable_when_ripe = FALSE,
330 	.purgeable_only_by_kernel = FALSE,
331 	.shadowed = FALSE,
332 	.true_share = FALSE,
333 	.terminating = FALSE,
334 	.named = FALSE,
335 	.shadow_severed = FALSE,
336 	.phys_contiguous = FALSE,
337 	.nophyscache = FALSE,
338 	/* End bitfields */
339 
340 	.cached_list.prev = NULL,
341 	.cached_list.next = NULL,
342 
343 	.last_alloc = (vm_object_offset_t) 0,
344 	.sequential = (vm_object_offset_t) 0,
345 	.pages_created = 0,
346 	.pages_used = 0,
347 	.scan_collisions = 0,
348 #if COMPRESSOR_PAGEOUT_CHEADS_MAX_COUNT > 1
349 	.vo_chead_hint = 0,
350 #endif /* COMPRESSOR_PAGEOUT_CHEADS_MAX_COUNT > 1 */
351 #if CONFIG_PHANTOM_CACHE
352 	.phantom_object_id = 0,
353 #endif
354 	.cow_hint = ~(vm_offset_t)0,
355 
356 	/* cache bitfields */
357 	.wimg_bits = VM_WIMG_USE_DEFAULT,
358 	.set_cache_attr = FALSE,
359 	.object_is_shared_cache = FALSE,
360 	.code_signed = FALSE,
361 	.transposed = FALSE,
362 	.mapping_in_progress = FALSE,
363 	.phantom_isssd = FALSE,
364 	.volatile_empty = FALSE,
365 	.volatile_fault = FALSE,
366 	.all_reusable = FALSE,
367 	.blocked_access = FALSE,
368 	.vo_ledger_tag = VM_LEDGER_TAG_NONE,
369 	.vo_no_footprint = FALSE,
370 #if CONFIG_IOSCHED || UPL_DEBUG
371 	.uplq.prev = NULL,
372 	.uplq.next = NULL,
373 #endif /* UPL_DEBUG */
374 #ifdef VM_PIP_DEBUG
375 	.pip_holders = {0},
376 #endif /* VM_PIP_DEBUG */
377 
378 	.objq.next = NULL,
379 	.objq.prev = NULL,
380 	.task_objq.next = NULL,
381 	.task_objq.prev = NULL,
382 
383 	.purgeable_queue_type = PURGEABLE_Q_TYPE_MAX,
384 	.purgeable_queue_group = 0,
385 
386 	.wire_tag = VM_KERN_MEMORY_NONE,
387 #if !VM_TAG_ACTIVE_UPDATE
388 	.wired_objq.next = NULL,
389 	.wired_objq.prev = NULL,
390 #endif /* ! VM_TAG_ACTIVE_UPDATE */
391 
392 	.io_tracking = FALSE,
393 
394 #if CONFIG_SECLUDED_MEMORY
395 	.eligible_for_secluded = FALSE,
396 	.can_grab_secluded = FALSE,
397 #else /* CONFIG_SECLUDED_MEMORY */
398 	.__object3_unused_bits = 0,
399 #endif /* CONFIG_SECLUDED_MEMORY */
400 
401 	.for_realtime = false,
402 	.no_pager_reason = VM_OBJECT_DESTROY_UNKNOWN_REASON,
403 
404 #if VM_OBJECT_ACCESS_TRACKING
405 	.access_tracking = FALSE,
406 	.access_tracking_reads = 0,
407 	.access_tracking_writes = 0,
408 #endif /* VM_OBJECT_ACCESS_TRACKING */
409 
410 #if DEBUG
411 	.purgeable_owner_bt = {0},
412 	.vo_purgeable_volatilizer = NULL,
413 	.purgeable_volatilizer_bt = {0},
414 #endif /* DEBUG */
415 	.vmo_provenance = VM_MAP_SERIAL_NONE,
416 	.vmo_pl_req_in_progress = 0,
417 };
418 
419 LCK_GRP_DECLARE(vm_object_lck_grp, "vm_object");
420 LCK_GRP_DECLARE(vm_object_cache_lck_grp, "vm_object_cache");
421 LCK_ATTR_DECLARE(vm_object_lck_attr, 0, 0);
422 LCK_ATTR_DECLARE(kernel_object_lck_attr, 0, LCK_ATTR_DEBUG);
423 LCK_ATTR_DECLARE(compressor_object_lck_attr, 0, LCK_ATTR_DEBUG);
424 
425 unsigned int vm_page_purged_wired = 0;
426 unsigned int vm_page_purged_busy = 0;
427 unsigned int vm_page_purged_others = 0;
428 
429 static queue_head_t     vm_object_cached_list;
430 static uint32_t         vm_object_cache_pages_freed = 0;
431 static uint32_t         vm_object_cache_pages_moved = 0;
432 static uint32_t         vm_object_cache_pages_skipped = 0;
433 static uint32_t         vm_object_cache_adds = 0;
434 static uint32_t         vm_object_cached_count = 0;
435 static LCK_MTX_DECLARE_ATTR(vm_object_cached_lock_data,
436     &vm_object_cache_lck_grp, &vm_object_lck_attr);
437 
438 static uint32_t         vm_object_page_grab_failed = 0;
439 static uint32_t         vm_object_page_grab_skipped = 0;
440 static uint32_t         vm_object_page_grab_returned = 0;
441 static uint32_t         vm_object_page_grab_pmapped = 0;
442 static uint32_t         vm_object_page_grab_reactivations = 0;
443 
444 #define vm_object_cache_lock_spin()             \
445 	        lck_mtx_lock_spin(&vm_object_cached_lock_data)
446 #define vm_object_cache_unlock()        \
447 	        lck_mtx_unlock(&vm_object_cached_lock_data)
448 
449 static void     vm_object_cache_remove_locked(vm_object_t);
450 
451 
452 static void vm_object_reap(vm_object_t object);
453 static void vm_object_reap_async(vm_object_t object);
454 static void vm_object_reaper_thread(void);
455 
456 static LCK_MTX_DECLARE_ATTR(vm_object_reaper_lock_data,
457     &vm_object_lck_grp, &vm_object_lck_attr);
458 
459 static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */
460 unsigned int vm_object_reap_count = 0;
461 unsigned int vm_object_reap_count_async = 0;
462 
463 #if HAS_MTE
464 unsigned int vm_object_no_compressor_pager_for_mte_count = 0;
465 TUNABLE(bool, vm_object_allow_compressor_pager_for_mte, "compress_mte", true);
466 #endif
467 
468 #define vm_object_reaper_lock()         \
469 	        lck_mtx_lock(&vm_object_reaper_lock_data)
470 #define vm_object_reaper_lock_spin()            \
471 	        lck_mtx_lock_spin(&vm_object_reaper_lock_data)
472 #define vm_object_reaper_unlock()       \
473 	        lck_mtx_unlock(&vm_object_reaper_lock_data)
474 
475 #if CONFIG_IOSCHED
476 /* I/O Re-prioritization request list */
477 struct mpsc_daemon_queue io_reprioritize_q;
478 
479 ZONE_DEFINE_TYPE(io_reprioritize_req_zone, "io_reprioritize_req",
480     struct io_reprioritize_req, ZC_NONE);
481 
482 /* I/O re-prioritization MPSC callback */
483 static void io_reprioritize(mpsc_queue_chain_t elm, mpsc_daemon_queue_t dq);
484 
485 void vm_page_request_reprioritize(vm_object_t, uint64_t, uint32_t, int);
486 void vm_page_handle_prio_inversion(vm_object_t, vm_page_t);
487 void vm_decmp_upl_reprioritize(upl_t, int);
488 #endif
489 
490 void
vm_object_set_size(vm_object_t object,vm_object_size_t outer_size,vm_object_size_t inner_size)491 vm_object_set_size(
492 	vm_object_t             object,
493 	vm_object_size_t        outer_size,
494 	vm_object_size_t        inner_size)
495 {
496 	object->vo_size = vm_object_round_page(outer_size);
497 #if KASAN
498 	assert(object->vo_size - inner_size <= USHRT_MAX);
499 	object->vo_size_delta = (unsigned short)(object->vo_size - inner_size);
500 #else
501 	(void)inner_size;
502 #endif
503 }
504 
505 
506 /*
507  *	vm_object_allocate:
508  *
509  *	Returns a new object with the given size.
510  */
511 
512 __private_extern__ void
_vm_object_allocate(vm_object_size_t size,vm_object_t object,vm_map_serial_t provenance)513 _vm_object_allocate(
514 	vm_object_size_t        size,
515 	vm_object_t             object,
516 	vm_map_serial_t                 provenance)
517 {
518 	*object = vm_object_template;
519 	object->vmo_provenance = provenance;
520 
521 	vm_page_queue_init(&object->memq);
522 #if UPL_DEBUG || CONFIG_IOSCHED
523 	queue_init(&object->uplq);
524 #endif
525 	vm_object_lock_init(object);
526 	vm_object_set_size(object, size, size);
527 
528 	os_ref_init_raw(&object->ref_count, &vm_object_refgrp);
529 
530 #if VM_OBJECT_TRACKING_OP_CREATED
531 	if (vm_object_tracking_btlog) {
532 		btlog_record(vm_object_tracking_btlog, object,
533 		    VM_OBJECT_TRACKING_OP_CREATED,
534 		    btref_get(__builtin_frame_address(0), 0));
535 	}
536 #endif /* VM_OBJECT_TRACKING_OP_CREATED */
537 }
538 
539 __private_extern__ vm_object_t
vm_object_allocate(vm_object_size_t size,vm_map_serial_t provenance)540 vm_object_allocate(
541 	vm_object_size_t        size, vm_map_serial_t provenance)
542 {
543 	vm_object_t object;
544 
545 	object = zalloc_flags(vm_object_zone, Z_WAITOK | Z_NOFAIL);
546 	_vm_object_allocate(size, object, provenance);
547 
548 	return object;
549 }
550 
551 TUNABLE(bool, workaround_41447923, "workaround_41447923", false);
552 
553 /*
554  *	vm_object_bootstrap:
555  *
556  *	Initialize the VM objects module.
557  */
558 __startup_func
559 void
vm_object_bootstrap(void)560 vm_object_bootstrap(void)
561 {
562 	vm_size_t       vm_object_size;
563 
564 	assert(sizeof(mo_ipc_object_bits_t) == sizeof(ipc_object_bits_t));
565 
566 	vm_object_size = (sizeof(struct vm_object) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
567 	    ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
568 
569 	vm_object_zone = zone_create("vm objects", vm_object_size,
570 	    ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED | ZC_VM);
571 
572 	queue_init(&vm_object_cached_list);
573 
574 	queue_init(&vm_object_reaper_queue);
575 
576 	/*
577 	 *	Initialize the "kernel object"
578 	 */
579 
580 	/*
581 	 * Note that in the following size specifications, we need to add 1 because
582 	 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
583 	 */
584 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, kernel_object_default, VM_MAP_SERIAL_SPECIAL);
585 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, compressor_object, VM_MAP_SERIAL_SPECIAL);
586 	kernel_object_default->copy_strategy = MEMORY_OBJECT_COPY_NONE;
587 	compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
588 	kernel_object_default->no_tag_update = TRUE;
589 
590 	/*
591 	 * The object to hold retired VM pages.
592 	 */
593 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, retired_pages_object, VM_MAP_SERIAL_SPECIAL);
594 	retired_pages_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
595 
596 #if HAS_MTE
597 	/*
598 	 * The object to hold MTE tag pages.
599 	 */
600 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, mte_tags_object, VM_MAP_SERIAL_SPECIAL);
601 	mte_tags_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
602 
603 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, kernel_object_tagged, VM_MAP_SERIAL_SPECIAL);
604 	kernel_object_tagged->copy_strategy = MEMORY_OBJECT_COPY_NONE;
605 	kernel_object_tagged->no_tag_update = TRUE;
606 	kernel_object_tagged->wimg_bits = VM_WIMG_MTE;
607 #endif /* HAS_MTE */
608 
609 	/**
610 	 * The object to hold pages owned by exclaves.
611 	 */
612 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, exclaves_object, VM_MAP_SERIAL_SPECIAL);
613 	exclaves_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
614 #if HAS_MTE
615 	/**
616 	 * The object to hold MTE tag pages owned by exclaves.
617 	 */
618 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, exclaves_object_tagged, VM_MAP_SERIAL_SPECIAL);
619 	exclaves_object_tagged->copy_strategy = MEMORY_OBJECT_COPY_NONE;
620 	exclaves_object_tagged->no_tag_update = TRUE;
621 	exclaves_object_tagged->wimg_bits = VM_WIMG_MTE;
622 #endif /* HAS_MTE */
623 }
624 
625 #if CONFIG_IOSCHED
626 void
vm_io_reprioritize_init(void)627 vm_io_reprioritize_init(void)
628 {
629 	kern_return_t   result;
630 
631 	result = mpsc_daemon_queue_init_with_thread(&io_reprioritize_q, io_reprioritize, BASEPRI_KERNEL,
632 	    "VM_io_reprioritize_thread", MPSC_DAEMON_INIT_NONE);
633 	if (result != KERN_SUCCESS) {
634 		panic("Unable to start I/O reprioritization thread (%d)", result);
635 	}
636 }
637 #endif
638 
639 void
vm_object_reaper_init(void)640 vm_object_reaper_init(void)
641 {
642 	kern_return_t   kr;
643 	thread_t        thread;
644 
645 	kr = kernel_thread_start_priority(
646 		(thread_continue_t) vm_object_reaper_thread,
647 		NULL,
648 		BASEPRI_VM,
649 		&thread);
650 	if (kr != KERN_SUCCESS) {
651 		panic("failed to launch vm_object_reaper_thread kr=0x%x", kr);
652 	}
653 	thread_set_thread_name(thread, "VM_object_reaper_thread");
654 	thread_deallocate(thread);
655 }
656 
657 
658 /*
659  *	vm_object_deallocate:
660  *
661  *	Release a reference to the specified object,
662  *	gained either through a vm_object_allocate
663  *	or a vm_object_reference call.  When all references
664  *	are gone, storage associated with this object
665  *	may be relinquished.
666  *
667  *	No object may be locked.
668  */
669 unsigned long vm_object_deallocate_shared_successes = 0;
670 unsigned long vm_object_deallocate_shared_failures = 0;
671 unsigned long vm_object_deallocate_shared_swap_failures = 0;
672 
673 __private_extern__ void
vm_object_deallocate(vm_object_t object)674 vm_object_deallocate(
675 	vm_object_t     object)
676 {
677 	vm_object_t     shadow = VM_OBJECT_NULL;
678 
679 //	if(object)dbgLog(object, object->ref_count, object->can_persist, 3);	/* (TEST/DEBUG) */
680 //	else dbgLog(object, 0, 0, 3);	/* (TEST/DEBUG) */
681 
682 	if (object == VM_OBJECT_NULL) {
683 		return;
684 	}
685 
686 	if (is_kernel_object(object) || object == compressor_object || object == retired_pages_object) {
687 		vm_object_lock_shared(object);
688 
689 		if (os_ref_get_count_raw(&object->ref_count) == 1) {
690 			if (is_kernel_object(object)) {
691 				panic("vm_object_deallocate: losing a kernel_object");
692 			} else if (object == retired_pages_object) {
693 				panic("vm_object_deallocate: losing retired_pages_object");
694 			} else {
695 				panic("vm_object_deallocate: losing compressor_object");
696 			}
697 		}
698 
699 		os_ref_release_live_raw(&object->ref_count, &vm_object_refgrp);
700 
701 		vm_object_unlock(object);
702 		return;
703 	}
704 
705 	if (os_ref_get_count_raw(&object->ref_count) == 2 &&
706 	    object->named) {
707 		/*
708 		 * This "named" object's reference count is about to
709 		 * drop from 2 to 1:
710 		 * we'll need to call memory_object_last_unmap().
711 		 */
712 	} else if (os_ref_get_count_raw(&object->ref_count) == 2 &&
713 	    object->internal &&
714 	    object->shadow != VM_OBJECT_NULL) {
715 		/*
716 		 * This internal object's reference count is about to
717 		 * drop from 2 to 1 and it has a shadow object:
718 		 * we'll want to try and collapse this object with its
719 		 * shadow.
720 		 */
721 	} else if (os_ref_get_count_raw(&object->ref_count) >= 2) {
722 		UInt32          original_ref_count;
723 		volatile UInt32 *ref_count_p;
724 		Boolean         atomic_swap;
725 
726 		/*
727 		 * The object currently looks like it is not being
728 		 * kept alive solely by the reference we're about to release.
729 		 * Let's try and release our reference without taking
730 		 * all the locks we would need if we had to terminate the
731 		 * object (cache lock + exclusive object lock).
732 		 * Lock the object "shared" to make sure we don't race with
733 		 * anyone holding it "exclusive".
734 		 */
735 		vm_object_lock_shared(object);
736 		ref_count_p = (volatile UInt32 *) &object->ref_count;
737 		original_ref_count = os_ref_get_count_raw(&object->ref_count);
738 		/*
739 		 * Test again as "ref_count" could have changed.
740 		 * "named" shouldn't change.
741 		 */
742 		if (original_ref_count == 2 &&
743 		    object->named) {
744 			/* need to take slow path for m_o_last_unmap() */
745 			atomic_swap = FALSE;
746 		} else if (original_ref_count == 2 &&
747 		    object->internal &&
748 		    object->shadow != VM_OBJECT_NULL) {
749 			/* need to take slow path for vm_object_collapse() */
750 			atomic_swap = FALSE;
751 		} else if (original_ref_count < 2) {
752 			/* need to take slow path for vm_object_terminate() */
753 			atomic_swap = FALSE;
754 		} else {
755 			/* try an atomic update with the shared lock */
756 			atomic_swap = OSCompareAndSwap(
757 				original_ref_count,
758 				original_ref_count - 1,
759 				(UInt32 *) &object->ref_count);
760 			if (atomic_swap == FALSE) {
761 				vm_object_deallocate_shared_swap_failures++;
762 				/* fall back to the slow path... */
763 			}
764 		}
765 
766 		vm_object_unlock(object);
767 
768 		if (atomic_swap) {
769 			/*
770 			 * ref_count was updated atomically !
771 			 */
772 			vm_object_deallocate_shared_successes++;
773 			return;
774 		}
775 
776 		/*
777 		 * Someone else updated the ref_count at the same
778 		 * time and we lost the race.  Fall back to the usual
779 		 * slow but safe path...
780 		 */
781 		vm_object_deallocate_shared_failures++;
782 	}
783 
784 	while (object != VM_OBJECT_NULL) {
785 		vm_object_lock(object);
786 
787 		assert(os_ref_get_count_raw(&object->ref_count) > 0);
788 
789 		/*
790 		 *	If the object has a named reference, and only
791 		 *	that reference would remain, inform the pager
792 		 *	about the last "mapping" reference going away.
793 		 */
794 		if ((os_ref_get_count_raw(&object->ref_count) == 2) && (object->named)) {
795 			memory_object_t pager = object->pager;
796 
797 			/* Notify the Pager that there are no */
798 			/* more mappers for this object */
799 
800 			if (pager != MEMORY_OBJECT_NULL) {
801 				vm_object_mapping_wait(object, THREAD_UNINT);
802 				/* object might have lost its pager while waiting */
803 				pager = object->pager;
804 				if (object->ref_count == 2 &&
805 				    object->named &&
806 				    pager != MEMORY_OBJECT_NULL) {
807 					vm_object_mapping_begin(object);
808 					assert(pager->mo_last_unmap_ctid == 0);
809 					/*
810 					 * Signal that we're the thread that triggered
811 					 * the memory_object_last_unmap(), so that we
812 					 * don't deadlock in vm_object_destroy() if this
813 					 * was the last reference and we're releasing
814 					 * the pager there.
815 					 */
816 					pager->mo_last_unmap_ctid = thread_get_ctid(current_thread());
817 					vm_object_unlock(object);
818 
819 					memory_object_last_unmap(pager);
820 					/* pager might no longer be valid now */
821 					pager = MEMORY_OBJECT_NULL;
822 
823 					vm_object_lock(object);
824 
825 					vm_object_mapping_end(object);
826 					pager = object->pager;
827 					if (pager != MEMORY_OBJECT_NULL) {
828 						/*
829 						 * The pager is still there, so reset its
830 						 * "mo_last_unmap_ctid" now that we're done.
831 						 */
832 						assert3u(pager->mo_last_unmap_ctid, ==, thread_get_ctid(current_thread()));
833 						pager->mo_last_unmap_ctid = 0;
834 					}
835 				}
836 			}
837 			assert(os_ref_get_count_raw(&object->ref_count) > 0);
838 		}
839 
840 		/*
841 		 *	Lose the reference. If other references
842 		 *	remain, then we are done, unless we need
843 		 *	to retry a cache trim.
844 		 *	If it is the last reference, then keep it
845 		 *	until any pending initialization is completed.
846 		 */
847 
848 		/* if the object is terminating, it cannot go into */
849 		/* the cache and we obviously should not call      */
850 		/* terminate again.  */
851 
852 		if ((os_ref_get_count_raw(&object->ref_count) > 1) ||
853 		    object->terminating) {
854 			vm_object_lock_assert_exclusive(object);
855 			os_ref_release_live_locked_raw(&object->ref_count,
856 			    &vm_object_refgrp);
857 
858 			if (os_ref_get_count_raw(&object->ref_count) == 1 &&
859 			    object->shadow != VM_OBJECT_NULL) {
860 				/*
861 				 * There's only one reference left on this
862 				 * VM object.  We can't tell if it's a valid
863 				 * one (from a mapping for example) or if this
864 				 * object is just part of a possibly stale and
865 				 * useless shadow chain.
866 				 * We would like to try and collapse it into
867 				 * its parent, but we don't have any pointers
868 				 * back to this parent object.
869 				 * But we can try and collapse this object with
870 				 * its own shadows, in case these are useless
871 				 * too...
872 				 * We can't bypass this object though, since we
873 				 * don't know if this last reference on it is
874 				 * meaningful or not.
875 				 */
876 				vm_object_collapse(object, 0, FALSE);
877 			}
878 			vm_object_unlock(object);
879 			return;
880 		}
881 
882 		/*
883 		 *	We have to wait for initialization
884 		 *	before destroying or caching the object.
885 		 */
886 
887 		if (object->pager_created && !object->pager_ready) {
888 			assert(!object->can_persist);
889 			vm_object_sleep(object,
890 			    VM_OBJECT_EVENT_PAGER_READY,
891 			    THREAD_UNINT,
892 			    LCK_SLEEP_UNLOCK);
893 			continue;
894 		}
895 
896 		/*
897 		 *	Terminate this object. If it had a shadow,
898 		 *	then deallocate it; otherwise, if we need
899 		 *	to retry a cache trim, do so now; otherwise,
900 		 *	we are done. "pageout" objects have a shadow,
901 		 *	but maintain a "paging reference" rather than
902 		 *	a normal reference.
903 		 */
904 		shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
905 
906 		if (vm_object_terminate(object) != KERN_SUCCESS) {
907 			return;
908 		}
909 		if (shadow != VM_OBJECT_NULL) {
910 			object = shadow;
911 			continue;
912 		}
913 		return;
914 	}
915 }
916 
917 
918 
919 vm_page_t
vm_object_page_grab(vm_object_t object)920 vm_object_page_grab(
921 	vm_object_t     object)
922 {
923 	vm_page_t       p, next_p;
924 	int             p_limit = 0;
925 	int             p_skipped = 0;
926 
927 	vm_object_lock_assert_exclusive(object);
928 
929 	next_p = (vm_page_t)vm_page_queue_first(&object->memq);
930 	p_limit = MIN(50, object->resident_page_count);
931 
932 	while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && --p_limit > 0) {
933 		p = next_p;
934 		next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq);
935 
936 		if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning ||
937 		    p->vmp_laundry || vm_page_is_fictitious(p)) {
938 			goto move_page_in_obj;
939 		}
940 
941 		if (p->vmp_pmapped || p->vmp_dirty || p->vmp_precious) {
942 			vm_page_lockspin_queues();
943 
944 			if (p->vmp_pmapped) {
945 				int refmod_state;
946 
947 				vm_object_page_grab_pmapped++;
948 
949 				if (p->vmp_reference == FALSE || p->vmp_dirty == FALSE) {
950 					refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p));
951 
952 					if (refmod_state & VM_MEM_REFERENCED) {
953 						p->vmp_reference = TRUE;
954 					}
955 					if (refmod_state & VM_MEM_MODIFIED) {
956 						SET_PAGE_DIRTY(p, FALSE);
957 					}
958 				}
959 				if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
960 					vm_page_lockconvert_queues();
961 					refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
962 
963 					if (refmod_state & VM_MEM_REFERENCED) {
964 						p->vmp_reference = TRUE;
965 					}
966 					if (refmod_state & VM_MEM_MODIFIED) {
967 						SET_PAGE_DIRTY(p, FALSE);
968 					}
969 
970 					if (p->vmp_dirty == FALSE) {
971 						goto take_page;
972 					}
973 				}
974 			}
975 			if ((p->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) && p->vmp_reference == TRUE) {
976 				vm_page_activate(p);
977 
978 				counter_inc(&vm_statistics_reactivations);
979 				vm_object_page_grab_reactivations++;
980 			}
981 			vm_page_unlock_queues();
982 move_page_in_obj:
983 			vm_page_queue_remove(&object->memq, p, vmp_listq);
984 			vm_page_queue_enter(&object->memq, p, vmp_listq);
985 
986 			p_skipped++;
987 			continue;
988 		}
989 		vm_page_lockspin_queues();
990 take_page:
991 		vm_page_free_prepare_queues(p);
992 		vm_object_page_grab_returned++;
993 		vm_object_page_grab_skipped += p_skipped;
994 
995 		vm_page_unlock_queues();
996 
997 		vm_page_free_prepare_object(p, TRUE);
998 
999 		return p;
1000 	}
1001 	vm_object_page_grab_skipped += p_skipped;
1002 	vm_object_page_grab_failed++;
1003 
1004 	return NULL;
1005 }
1006 
1007 #if COMPRESSOR_PAGEOUT_CHEADS_MAX_COUNT > 1
1008 
1009 /* This is the actual number of filling cheads that's going to be used.
1010  * must be 1 <= vm_cheads <= COMPRESSOR_PAGEOUT_CHEADS_MAX_COUNT */
1011 TUNABLE_WRITEABLE(uint32_t, vm_cheads, "vm_cheads", 8);
1012 /* This determines what criteria is used for selecting the chead,
1013  * either the PID of the grabber task or it's coalition */
1014 TUNABLE_WRITEABLE(vm_chead_select_t, vm_chead_select, "vm_chead_select", CSEL_BY_PID);
1015 /* This determines if the grabber-id is set on every page-fault insert or just the first insert */
1016 TUNABLE_WRITEABLE(boolean_t, vm_chead_rehint, "vm_chead_rehint", false);
1017 
1018 /*
1019  * This function is called from vm_page_insert_internal(). When it's called from the context
1020  * of a vm_fault where a task has just requested a new page/paged-in a existing page,
1021  * this function records some bits of information about the task. These bits are then
1022  * going to be used when the page is sent to the compressor to select the compressor-head
1023  * that will be used.
1024  * The goal of this is to make pages that come from the same task/coalition be compressed to the
1025  * same compressor segment, This helps the locality of swap-in and decompression.
1026  * This optimization relies on a heuristic assumptions that the vm_object is only ever mapped
1027  * in a single task/coalition. vm_objects that violate this would not benefit from this optimization.
1028  * See also vm_pageout_select_filling_chead()
1029  */
1030 void
vm_object_set_chead_hint(vm_object_t object)1031 vm_object_set_chead_hint(
1032 	vm_object_t     object)
1033 {
1034 	if (!object->internal) {
1035 		/* not relevant for pages that are not going to get to the compressor */
1036 		return;
1037 	}
1038 
1039 	if (object->vo_chead_hint != 0 && !vm_chead_rehint) {
1040 		/* there's already a value there and we don't want to set it again */
1041 		return;
1042 	}
1043 	task_t cur_task = current_task_early();
1044 	if (cur_task == TASK_NULL || cur_task == kernel_task || vm_cheads <= 1) {
1045 		/* avoid doing extra work for the kernel map case */
1046 		object->vo_chead_hint = 0;
1047 		return;
1048 	}
1049 	int value = 0;
1050 	if (vm_chead_select == CSEL_BY_PID) {
1051 		value = task_pid(cur_task);
1052 	} else if (vm_chead_select == CSEL_BY_COALITION) {
1053 		/* The choice of coalition type is not very significant here since both
1054 		 * types seem to have a similar task division. */
1055 		coalition_t coalition = task_get_coalition(cur_task, COALITION_TYPE_JETSAM);
1056 		if (coalition != COALITION_NULL) {
1057 			value = coalition_id(coalition);
1058 		}
1059 	}
1060 	uint32_t mod_by = MIN(vm_cheads, COMPRESSOR_PAGEOUT_CHEADS_MAX_COUNT);
1061 	object->vo_chead_hint = (uint8_t)value % mod_by;
1062 }
1063 
1064 #endif /* COMPRESSOR_PAGEOUT_CHEADS_MAX_COUNT > 1 */
1065 
1066 #define EVICT_PREPARE_LIMIT     64
1067 #define EVICT_AGE               10
1068 
1069 static  clock_sec_t     vm_object_cache_aging_ts = 0;
1070 
1071 static void
vm_object_cache_remove_locked(vm_object_t object)1072 vm_object_cache_remove_locked(
1073 	vm_object_t     object)
1074 {
1075 	assert(object->purgable == VM_PURGABLE_DENY);
1076 
1077 	queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list);
1078 	object->cached_list.next = NULL;
1079 	object->cached_list.prev = NULL;
1080 
1081 	vm_object_cached_count--;
1082 }
1083 
1084 void
vm_object_cache_remove(vm_object_t object)1085 vm_object_cache_remove(
1086 	vm_object_t     object)
1087 {
1088 	vm_object_cache_lock_spin();
1089 
1090 	if (object->cached_list.next &&
1091 	    object->cached_list.prev) {
1092 		vm_object_cache_remove_locked(object);
1093 	}
1094 
1095 	vm_object_cache_unlock();
1096 }
1097 
1098 void
vm_object_cache_add(vm_object_t object)1099 vm_object_cache_add(
1100 	vm_object_t     object)
1101 {
1102 	clock_sec_t sec;
1103 	clock_nsec_t nsec;
1104 
1105 	assert(object->purgable == VM_PURGABLE_DENY);
1106 
1107 	if (object->resident_page_count == 0) {
1108 		return;
1109 	}
1110 	if (object->vo_ledger_tag) {
1111 		/*
1112 		 * We can't add an "owned" object to the cache because
1113 		 * the "vo_owner" and "vo_cache_ts" fields are part of the
1114 		 * same "union" and can't be used at the same time.
1115 		 */
1116 		return;
1117 	}
1118 	clock_get_system_nanotime(&sec, &nsec);
1119 
1120 	vm_object_cache_lock_spin();
1121 
1122 	if (object->cached_list.next == NULL &&
1123 	    object->cached_list.prev == NULL) {
1124 		queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list);
1125 		object->vo_cache_ts = sec + EVICT_AGE;
1126 		object->vo_cache_pages_to_scan = object->resident_page_count;
1127 
1128 		vm_object_cached_count++;
1129 		vm_object_cache_adds++;
1130 	}
1131 	vm_object_cache_unlock();
1132 }
1133 
1134 int
vm_object_cache_evict(int num_to_evict,int max_objects_to_examine)1135 vm_object_cache_evict(
1136 	int     num_to_evict,
1137 	int     max_objects_to_examine)
1138 {
1139 	vm_object_t     object = VM_OBJECT_NULL;
1140 	vm_object_t     next_obj = VM_OBJECT_NULL;
1141 	vm_page_t       local_free_q = VM_PAGE_NULL;
1142 	vm_page_t       p;
1143 	vm_page_t       next_p;
1144 	int             object_cnt = 0;
1145 	vm_page_t       ep_array[EVICT_PREPARE_LIMIT];
1146 	int             ep_count;
1147 	int             ep_limit;
1148 	int             ep_index;
1149 	int             ep_freed = 0;
1150 	int             ep_moved = 0;
1151 	uint32_t        ep_skipped = 0;
1152 	clock_sec_t     sec;
1153 	clock_nsec_t    nsec;
1154 
1155 	KDBG_DEBUG(0x13001ec | DBG_FUNC_START);
1156 	/*
1157 	 * do a couple of quick checks to see if it's
1158 	 * worthwhile grabbing the lock
1159 	 */
1160 	if (queue_empty(&vm_object_cached_list)) {
1161 		KDBG_DEBUG(0x13001ec | DBG_FUNC_END);
1162 		return 0;
1163 	}
1164 	clock_get_system_nanotime(&sec, &nsec);
1165 	if (max_objects_to_examine == INT_MAX) {
1166 		/* evict all pages from all cached objects now */
1167 		sec = (clock_sec_t)-1;
1168 	}
1169 
1170 	/*
1171 	 * the object on the head of the queue has not
1172 	 * yet sufficiently aged
1173 	 */
1174 	if (sec < vm_object_cache_aging_ts) {
1175 		KDBG_DEBUG(0x13001ec | DBG_FUNC_END);
1176 		return 0;
1177 	}
1178 	/*
1179 	 * don't need the queue lock to find
1180 	 * and lock an object on the cached list
1181 	 */
1182 	vm_page_unlock_queues();
1183 
1184 	vm_object_cache_lock_spin();
1185 
1186 	for (;;) {  /* loop for as long as we have objects to process */
1187 		next_obj = (vm_object_t)queue_first(&vm_object_cached_list);
1188 
1189 		/* loop to find the next target in the cache_list */
1190 		while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) {
1191 			object = next_obj;
1192 			next_obj = (vm_object_t)queue_next(&next_obj->cached_list);
1193 
1194 			assert(object->purgable == VM_PURGABLE_DENY);
1195 
1196 			if (sec < object->vo_cache_ts) { // reached the point in the queue beyond the time we started
1197 				KDBG_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec);
1198 
1199 				vm_object_cache_aging_ts = object->vo_cache_ts;
1200 				object = VM_OBJECT_NULL; /* this will cause to break away from the outer loop */
1201 				break;
1202 			}
1203 			if (!vm_object_lock_try_scan(object)) {
1204 				/*
1205 				 * just skip over this guy for now... if we find
1206 				 * an object to steal pages from, we'll revist in a bit...
1207 				 * hopefully, the lock will have cleared
1208 				 */
1209 				KDBG_DEBUG(0x13001f8, object, object->resident_page_count);
1210 
1211 				object = VM_OBJECT_NULL;
1212 				continue;
1213 			}
1214 			if (vm_page_queue_empty(&object->memq) || object->vo_cache_pages_to_scan == 0) {
1215 				/*
1216 				 * this case really shouldn't happen, but it's not fatal
1217 				 * so deal with it... if we don't remove the object from
1218 				 * the list, we'll never move past it.
1219 				 */
1220 				KDBG_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved);
1221 
1222 				vm_object_cache_remove_locked(object);
1223 				vm_object_unlock(object);
1224 				object = VM_OBJECT_NULL;
1225 				continue;
1226 			}
1227 			/*
1228 			 * we have a locked object with pages...
1229 			 * time to start harvesting
1230 			 */
1231 			break;
1232 		}
1233 		vm_object_cache_unlock();
1234 
1235 		if (object == VM_OBJECT_NULL) {
1236 			break;
1237 		}
1238 
1239 		/*
1240 		 * object is locked at this point and
1241 		 * has resident pages
1242 		 */
1243 		next_p = (vm_page_t)vm_page_queue_first(&object->memq);
1244 
1245 		/*
1246 		 * break the page scan into 2 pieces to minimize the time spent
1247 		 * behind the page queue lock...
1248 		 * the list of pages on these unused objects is likely to be cold
1249 		 * w/r to the cpu cache which increases the time to scan the list
1250 		 * tenfold...  and we may have a 'run' of pages we can't utilize that
1251 		 * needs to be skipped over...
1252 		 */
1253 		if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT) {
1254 			ep_limit = EVICT_PREPARE_LIMIT;
1255 		}
1256 		ep_count = 0;
1257 
1258 		while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) {
1259 			p = next_p;
1260 			next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq);
1261 
1262 			object->vo_cache_pages_to_scan--;
1263 
1264 			if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry) {
1265 				vm_page_queue_remove(&object->memq, p, vmp_listq);
1266 				vm_page_queue_enter(&object->memq, p, vmp_listq);
1267 
1268 				ep_skipped++;
1269 				continue;
1270 			}
1271 			if (!object->internal &&
1272 			    object->pager_created &&
1273 			    object->pager == NULL) {
1274 				/*
1275 				 * This object has lost its pager, most likely
1276 				 * due to a force-unmount or ungraft.  The pager
1277 				 * will never come back, so there's no point in
1278 				 * keeping these pages, even if modified.
1279 				 * The object could still be mapped, so we need
1280 				 * to clear any PTE that might still be pointing
1281 				 * at this physical page before we can reclaim
1282 				 * it.
1283 				 */
1284 				if (p->vmp_pmapped) {
1285 					int refmod;
1286 					refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
1287 					if (refmod & VM_MEM_MODIFIED) {
1288 						assert(p->vmp_wpmapped);
1289 						p->vmp_dirty = TRUE;
1290 					}
1291 				}
1292 //				printf("FBDP %s:%d object %p reason %d page %p offset 0x%llx pmapped %d wpmapped %d xpmapped %d dirty %d precious %d\n", __FUNCTION__, __LINE__, object, object->no_pager_reason, p, p->vmp_offset, p->vmp_pmapped, p->vmp_wpmapped, p->vmp_xpmapped, p->vmp_dirty, p->vmp_precious);
1293 				/* clear any reason to skip this page below */
1294 				p->vmp_dirty = FALSE;
1295 				p->vmp_precious = FALSE;
1296 				p->vmp_wpmapped = FALSE;
1297 			}
1298 			if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) {
1299 				vm_page_queue_remove(&object->memq, p, vmp_listq);
1300 				vm_page_queue_enter(&object->memq, p, vmp_listq);
1301 
1302 				pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p));
1303 			}
1304 			ep_array[ep_count++] = p;
1305 		}
1306 		KDBG_DEBUG(0x13001f4 | DBG_FUNC_START, object, object->resident_page_count, ep_freed, ep_moved);
1307 
1308 		vm_page_lockspin_queues();
1309 
1310 		for (ep_index = 0; ep_index < ep_count; ep_index++) {
1311 			p = ep_array[ep_index];
1312 
1313 			if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) {
1314 				p->vmp_reference = FALSE;
1315 				p->vmp_no_cache = FALSE;
1316 
1317 				/*
1318 				 * we've already filtered out pages that are in the laundry
1319 				 * so if we get here, this page can't be on the pageout queue
1320 				 */
1321 				vm_page_queues_remove(p, FALSE);
1322 				vm_page_enqueue_inactive(p, TRUE);
1323 
1324 				ep_moved++;
1325 			} else {
1326 #if CONFIG_PHANTOM_CACHE
1327 				vm_phantom_cache_add_ghost(p);
1328 #endif
1329 				vm_page_free_prepare_queues(p);
1330 
1331 				assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0);
1332 				/*
1333 				 * Add this page to our list of reclaimed pages,
1334 				 * to be freed later.
1335 				 */
1336 				p->vmp_snext = local_free_q;
1337 				local_free_q = p;
1338 
1339 				ep_freed++;
1340 			}
1341 		}
1342 		vm_page_unlock_queues();
1343 
1344 		KDBG_DEBUG(0x13001f4 | DBG_FUNC_END, object, object->resident_page_count, ep_freed, ep_moved);
1345 
1346 		if (local_free_q) {
1347 			vm_page_free_list(local_free_q, TRUE);
1348 			local_free_q = VM_PAGE_NULL;
1349 		}
1350 		if (object->vo_cache_pages_to_scan == 0) {
1351 			KDBG_DEBUG(0x1300208, object, object->resident_page_count, ep_freed, ep_moved);
1352 
1353 			vm_object_cache_remove(object);
1354 
1355 			KDBG_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved);
1356 		}
1357 		/*
1358 		 * done with this object
1359 		 */
1360 		vm_object_unlock(object);
1361 		object = VM_OBJECT_NULL;
1362 
1363 		/*
1364 		 * at this point, we are not holding any locks
1365 		 */
1366 		if ((ep_freed + ep_moved) >= num_to_evict) {
1367 			/*
1368 			 * we've reached our target for the
1369 			 * number of pages to evict
1370 			 */
1371 			break;
1372 		}
1373 		vm_object_cache_lock_spin();
1374 	}
1375 	/*
1376 	 * put the page queues lock back to the caller's
1377 	 * idea of it
1378 	 */
1379 	vm_page_lock_queues();
1380 
1381 	vm_object_cache_pages_freed += ep_freed;
1382 	vm_object_cache_pages_moved += ep_moved;
1383 	vm_object_cache_pages_skipped += ep_skipped;
1384 
1385 	KDBG_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed);
1386 //	printf("FBDP %s(0x%x,0x%x) freed %d moved %d skipped %u\n", __func__, num_to_evict, max_objects_to_examine, ep_freed, ep_moved, ep_skipped);
1387 	return ep_freed;
1388 }
1389 
1390 int vm_object_cache_evict_all(void);
1391 int
vm_object_cache_evict_all(void)1392 vm_object_cache_evict_all(void)
1393 {
1394 	int freed;
1395 
1396 	vm_page_lock_queues();
1397 	freed = vm_object_cache_evict(INT_MAX, INT_MAX);
1398 	vm_page_unlock_queues();
1399 	printf("%s: freed %d\n", __func__, freed);
1400 	return freed;
1401 }
1402 
1403 /*
1404  *	Routine:	vm_object_terminate
1405  *	Purpose:
1406  *		Free all resources associated with a vm_object.
1407  *	In/out conditions:
1408  *		Upon entry, the object must be locked,
1409  *		and the object must have exactly one reference.
1410  *
1411  *		The shadow object reference is left alone.
1412  *
1413  *		The object must be unlocked if its found that pages
1414  *		must be flushed to a backing object.  If someone
1415  *		manages to map the object while it is being flushed
1416  *		the object is returned unlocked and unchanged.  Otherwise,
1417  *		upon exit, the cache will be unlocked, and the
1418  *		object will cease to exist.
1419  */
1420 static kern_return_t
vm_object_terminate(vm_object_t object)1421 vm_object_terminate(
1422 	vm_object_t     object)
1423 {
1424 	vm_object_t     shadow_object;
1425 
1426 	vm_object_lock_assert_exclusive(object);
1427 
1428 	if (!object->pageout && (!object->internal && object->can_persist) &&
1429 	    (object->pager != NULL || object->shadow_severed)) {
1430 		/*
1431 		 * Clear pager_trusted bit so that the pages get yanked
1432 		 * out of the object instead of cleaned in place.  This
1433 		 * prevents a deadlock in XMM and makes more sense anyway.
1434 		 */
1435 		VM_OBJECT_SET_PAGER_TRUSTED(object, FALSE);
1436 
1437 		vm_object_reap_pages(object, REAP_TERMINATE);
1438 	}
1439 	/*
1440 	 *	Make sure the object isn't already being terminated
1441 	 */
1442 	if (object->terminating) {
1443 		vm_object_lock_assert_exclusive(object);
1444 		os_ref_release_live_locked_raw(&object->ref_count, &vm_object_refgrp);
1445 		vm_object_unlock(object);
1446 		return KERN_FAILURE;
1447 	}
1448 
1449 	/*
1450 	 * Did somebody get a reference to the object while we were
1451 	 * cleaning it?
1452 	 */
1453 	if (os_ref_get_count_raw(&object->ref_count) != 1) {
1454 		vm_object_lock_assert_exclusive(object);
1455 		os_ref_release_live_locked_raw(&object->ref_count, &vm_object_refgrp);
1456 		vm_object_unlock(object);
1457 		return KERN_FAILURE;
1458 	}
1459 
1460 	/*
1461 	 *	Make sure no one can look us up now.
1462 	 */
1463 
1464 	VM_OBJECT_SET_TERMINATING(object, TRUE);
1465 	VM_OBJECT_SET_ALIVE(object, FALSE);
1466 
1467 	if (!object->internal &&
1468 	    object->cached_list.next &&
1469 	    object->cached_list.prev) {
1470 		vm_object_cache_remove(object);
1471 	}
1472 
1473 	/*
1474 	 *	Detach the object from its shadow if we are the shadow's
1475 	 *	copy. The reference we hold on the shadow must be dropped
1476 	 *	by our caller.
1477 	 */
1478 	if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
1479 	    !(object->pageout)) {
1480 		vm_object_lock(shadow_object);
1481 		if (shadow_object->vo_copy == object) {
1482 			VM_OBJECT_COPY_SET(shadow_object, VM_OBJECT_NULL);
1483 		}
1484 		vm_object_unlock(shadow_object);
1485 	}
1486 
1487 	if (object->paging_in_progress != 0 ||
1488 	    object->activity_in_progress != 0) {
1489 		/*
1490 		 * There are still some paging_in_progress references
1491 		 * on this object, meaning that there are some paging
1492 		 * or other I/O operations in progress for this VM object.
1493 		 * Such operations take some paging_in_progress references
1494 		 * up front to ensure that the object doesn't go away, but
1495 		 * they may also need to acquire a reference on the VM object,
1496 		 * to map it in kernel space, for example.  That means that
1497 		 * they may end up releasing the last reference on the VM
1498 		 * object, triggering its termination, while still holding
1499 		 * paging_in_progress references.  Waiting for these
1500 		 * pending paging_in_progress references to go away here would
1501 		 * deadlock.
1502 		 *
1503 		 * To avoid deadlocking, we'll let the vm_object_reaper_thread
1504 		 * complete the VM object termination if it still holds
1505 		 * paging_in_progress references at this point.
1506 		 *
1507 		 * No new paging_in_progress should appear now that the
1508 		 * VM object is "terminating" and not "alive".
1509 		 */
1510 		vm_object_reap_async(object);
1511 		vm_object_unlock(object);
1512 		/*
1513 		 * Return KERN_FAILURE to let the caller know that we
1514 		 * haven't completed the termination and it can't drop this
1515 		 * object's reference on its shadow object yet.
1516 		 * The reaper thread will take care of that once it has
1517 		 * completed this object's termination.
1518 		 */
1519 		return KERN_FAILURE;
1520 	}
1521 	/*
1522 	 * complete the VM object termination
1523 	 */
1524 	vm_object_reap(object);
1525 	object = VM_OBJECT_NULL;
1526 
1527 	/*
1528 	 * the object lock was released by vm_object_reap()
1529 	 *
1530 	 * KERN_SUCCESS means that this object has been terminated
1531 	 * and no longer needs its shadow object but still holds a
1532 	 * reference on it.
1533 	 * The caller is responsible for dropping that reference.
1534 	 * We can't call vm_object_deallocate() here because that
1535 	 * would create a recursion.
1536 	 */
1537 	return KERN_SUCCESS;
1538 }
1539 
1540 
1541 /*
1542  * vm_object_reap():
1543  *
1544  * Complete the termination of a VM object after it's been marked
1545  * as "terminating" and "!alive" by vm_object_terminate().
1546  *
1547  * The VM object must be locked by caller.
1548  * The lock will be released on return and the VM object is no longer valid.
1549  */
1550 
1551 void
vm_object_reap(vm_object_t object)1552 vm_object_reap(
1553 	vm_object_t object)
1554 {
1555 	memory_object_t         pager;
1556 	os_ref_count_t          ref_count;
1557 
1558 	vm_object_lock_assert_exclusive(object);
1559 	assert(object->paging_in_progress == 0);
1560 	assert(object->activity_in_progress == 0);
1561 
1562 	vm_object_reap_count++;
1563 
1564 	/*
1565 	 * Disown this purgeable object to cleanup its owner's purgeable
1566 	 * ledgers.  We need to do this before disconnecting the object
1567 	 * from its pager, to properly account for compressed pages.
1568 	 */
1569 	if (/* object->internal && */
1570 		(object->purgable != VM_PURGABLE_DENY ||
1571 		object->vo_ledger_tag)) {
1572 		int ledger_flags;
1573 		kern_return_t kr;
1574 
1575 		ledger_flags = 0;
1576 		assert(!object->alive);
1577 		assert(object->terminating);
1578 		kr = vm_object_ownership_change(object,
1579 		    VM_LEDGER_TAG_NONE,
1580 		    NULL,                    /* no owner */
1581 		    ledger_flags,
1582 		    FALSE);                  /* task_objq not locked */
1583 		assert(kr == KERN_SUCCESS);
1584 		assert(object->vo_owner == NULL);
1585 	}
1586 
1587 #if DEVELOPMENT || DEBUG
1588 	if (object->object_is_shared_cache &&
1589 	    object->pager != NULL &&
1590 	    object->pager->mo_pager_ops == &shared_region_pager_ops) {
1591 		OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count);
1592 	}
1593 #endif /* DEVELOPMENT || DEBUG */
1594 
1595 	pager = object->pager;
1596 	object->pager = MEMORY_OBJECT_NULL;
1597 
1598 	if (pager != MEMORY_OBJECT_NULL) {
1599 		memory_object_control_disable(&object->pager_control);
1600 	}
1601 
1602 	ref_count = os_ref_release_locked_raw(&object->ref_count,
1603 	    &vm_object_refgrp);
1604 	if (__improbable(ref_count != 0)) {
1605 		panic("Attempting to deallocate vm_object with outstanding refs: %u",
1606 		    ref_count);
1607 	}
1608 
1609 	/*
1610 	 * remove from purgeable queue if it's on
1611 	 */
1612 	if (object->internal) {
1613 		assert(VM_OBJECT_OWNER(object) == TASK_NULL);
1614 
1615 		VM_OBJECT_UNWIRED(object);
1616 
1617 		if (object->purgable == VM_PURGABLE_DENY) {
1618 			/* not purgeable: nothing to do */
1619 		} else if (object->purgable == VM_PURGABLE_VOLATILE) {
1620 			purgeable_q_t queue;
1621 
1622 			queue = vm_purgeable_object_remove(object);
1623 			assert(queue);
1624 
1625 			if (object->purgeable_when_ripe) {
1626 				/*
1627 				 * Must take page lock for this -
1628 				 * using it to protect token queue
1629 				 */
1630 				vm_page_lock_queues();
1631 				vm_purgeable_token_delete_first(queue);
1632 
1633 				assert(queue->debug_count_objects >= 0);
1634 				vm_page_unlock_queues();
1635 			}
1636 
1637 			/*
1638 			 * Update "vm_page_purgeable_count" in bulk and mark
1639 			 * object as VM_PURGABLE_EMPTY to avoid updating
1640 			 * "vm_page_purgeable_count" again in vm_page_remove()
1641 			 * when reaping the pages.
1642 			 */
1643 			unsigned int delta;
1644 			assert(object->resident_page_count >=
1645 			    object->wired_page_count);
1646 			delta = (object->resident_page_count -
1647 			    object->wired_page_count);
1648 			if (delta != 0) {
1649 				assert(vm_page_purgeable_count >= delta);
1650 				OSAddAtomic(-delta,
1651 				    (SInt32 *)&vm_page_purgeable_count);
1652 			}
1653 			if (object->wired_page_count != 0) {
1654 				assert(vm_page_purgeable_wired_count >=
1655 				    object->wired_page_count);
1656 				OSAddAtomic(-object->wired_page_count,
1657 				    (SInt32 *)&vm_page_purgeable_wired_count);
1658 			}
1659 			VM_OBJECT_SET_PURGABLE(object, VM_PURGABLE_EMPTY);
1660 		} else if (object->purgable == VM_PURGABLE_NONVOLATILE ||
1661 		    object->purgable == VM_PURGABLE_EMPTY) {
1662 			/* remove from nonvolatile queue */
1663 			vm_purgeable_nonvolatile_dequeue(object);
1664 		} else {
1665 			panic("object %p in unexpected purgeable state 0x%x",
1666 			    object, object->purgable);
1667 		}
1668 		if (object->transposed &&
1669 		    object->cached_list.next != NULL &&
1670 		    object->cached_list.prev == NULL) {
1671 			/*
1672 			 * object->cached_list.next "points" to the
1673 			 * object that was transposed with this object.
1674 			 */
1675 		} else {
1676 			assert(object->cached_list.next == NULL);
1677 		}
1678 		assert(object->cached_list.prev == NULL);
1679 	}
1680 
1681 	if (object->pageout) {
1682 		/*
1683 		 * free all remaining pages tabled on
1684 		 * this object
1685 		 * clean up it's shadow
1686 		 */
1687 		assert(object->shadow != VM_OBJECT_NULL);
1688 
1689 		vm_pageout_object_terminate(object);
1690 	} else if (object->resident_page_count) {
1691 		/*
1692 		 * free all remaining pages tabled on
1693 		 * this object
1694 		 */
1695 		vm_object_reap_pages(object, REAP_REAP);
1696 	}
1697 	assert(vm_page_queue_empty(&object->memq));
1698 	assert(object->paging_in_progress == 0);
1699 	assert(object->activity_in_progress == 0);
1700 	assert(os_ref_get_count_raw(&object->ref_count) == 0);
1701 
1702 	/*
1703 	 * If the pager has not already been released by
1704 	 * vm_object_destroy, we need to terminate it and
1705 	 * release our reference to it here.
1706 	 */
1707 	if (pager != MEMORY_OBJECT_NULL) {
1708 		vm_object_unlock(object);
1709 		vm_object_release_pager(pager);
1710 		vm_object_lock(object);
1711 	}
1712 
1713 	/* kick off anyone waiting on terminating */
1714 	VM_OBJECT_SET_TERMINATING(object, FALSE);
1715 	vm_object_paging_begin(object);
1716 	vm_object_paging_end(object);
1717 	vm_object_unlock(object);
1718 
1719 	object->shadow = VM_OBJECT_NULL;
1720 
1721 #if VM_OBJECT_TRACKING
1722 	if (vm_object_tracking_btlog) {
1723 		btlog_erase(vm_object_tracking_btlog, object);
1724 	}
1725 #endif /* VM_OBJECT_TRACKING */
1726 
1727 	vm_object_lock_destroy(object);
1728 	/*
1729 	 *	Free the space for the object.
1730 	 */
1731 	zfree(vm_object_zone, object);
1732 	object = VM_OBJECT_NULL;
1733 }
1734 
1735 
1736 unsigned int vm_max_batch = 256;
1737 
1738 #define V_O_R_MAX_BATCH 128
1739 
1740 #define BATCH_LIMIT(max)        (vm_max_batch >= max ? max : vm_max_batch)
1741 
1742 static inline vm_page_t
vm_object_reap_freelist(vm_page_t local_free_q,bool do_disconnect,bool set_cache_attr)1743 vm_object_reap_freelist(vm_page_t local_free_q, bool do_disconnect, bool set_cache_attr)
1744 {
1745 	vm_page_t page;
1746 	if (local_free_q) {
1747 		if (do_disconnect) {
1748 			_vm_page_list_foreach(page, local_free_q) {
1749 				if (page->vmp_pmapped) {
1750 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(page));
1751 				}
1752 			}
1753 		}
1754 
1755 		if (set_cache_attr) {
1756 #if HAS_MTE
1757 			assert(!local_free_q->vmp_using_mte);
1758 #endif /* HAS_MTE */
1759 			const unified_page_list_t pmap_batch_list = {
1760 				.page_slist = local_free_q,
1761 				.type = UNIFIED_PAGE_LIST_TYPE_VM_PAGE_LIST,
1762 			};
1763 			pmap_batch_set_cache_attributes(&pmap_batch_list, 0);
1764 		}
1765 		vm_page_free_list(local_free_q, TRUE);
1766 	}
1767 	return VM_PAGE_NULL;
1768 }
1769 
1770 void
vm_object_reap_pages(vm_object_t object,int reap_type)1771 vm_object_reap_pages(
1772 	vm_object_t     object,
1773 	int             reap_type)
1774 {
1775 	vm_page_t       p;
1776 	vm_page_t       next;
1777 	vm_page_t       local_free_q = VM_PAGE_NULL;
1778 	int             loop_count;
1779 	bool            disconnect_on_release;
1780 	bool            set_cache_attr_needed;
1781 	pmap_flush_context      pmap_flush_context_storage;
1782 
1783 	if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_DATA_FLUSH_CLEAN) {
1784 		/*
1785 		 * We need to disconnect pages from all pmaps before
1786 		 * releasing them to the free list
1787 		 */
1788 		disconnect_on_release = true;
1789 	} else {
1790 		/*
1791 		 * Either the caller has already disconnected the pages
1792 		 * from all pmaps, or we disconnect them here as we add
1793 		 * them to out local list of pages to be released.
1794 		 * No need to re-disconnect them when we release the pages
1795 		 * to the free list.
1796 		 */
1797 		disconnect_on_release = false;
1798 	}
1799 
1800 restart_after_sleep:
1801 	set_cache_attr_needed = false;
1802 	if (object->set_cache_attr) {
1803 		/**
1804 		 * If the cache attributes need to be reset for the pages to
1805 		 * be freed, we clear object->set_cache_attr here so that
1806 		 * our call to vm_page_free_list (which will ultimately call
1807 		 * vm_page_remove() on each page) won't try to reset the
1808 		 * cache attributes on each page individually.  Depending on
1809 		 * the architecture, it may be much faster for us to call
1810 		 * pmap_batch_set_cache_attributes() instead.  Note that
1811 		 * this function must restore object->set_cache_attr in any
1812 		 * case where it is required to drop the object lock, e.g.
1813 		 * to wait for a busy page.
1814 		 */
1815 		object->set_cache_attr = FALSE;
1816 		set_cache_attr_needed = true;
1817 	}
1818 
1819 	if (vm_page_queue_empty(&object->memq)) {
1820 		return;
1821 	}
1822 	loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
1823 
1824 	if (reap_type == REAP_PURGEABLE) {
1825 		pmap_flush_context_init(&pmap_flush_context_storage);
1826 	}
1827 
1828 	vm_page_lock_queues();
1829 
1830 	next = (vm_page_t)vm_page_queue_first(&object->memq);
1831 
1832 	while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
1833 		p = next;
1834 		next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
1835 
1836 		if (--loop_count == 0) {
1837 			vm_page_unlock_queues();
1838 
1839 			if (local_free_q) {
1840 				if (reap_type == REAP_PURGEABLE) {
1841 					pmap_flush(&pmap_flush_context_storage);
1842 					pmap_flush_context_init(&pmap_flush_context_storage);
1843 				}
1844 				/*
1845 				 * Free the pages we reclaimed so far
1846 				 * and take a little break to avoid
1847 				 * hogging the page queue lock too long
1848 				 */
1849 				local_free_q = vm_object_reap_freelist(local_free_q,
1850 				    disconnect_on_release, set_cache_attr_needed);
1851 			} else {
1852 				mutex_pause(0);
1853 			}
1854 
1855 			loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
1856 
1857 			vm_page_lock_queues();
1858 		}
1859 		if (reap_type == REAP_DATA_FLUSH ||
1860 		    reap_type == REAP_DATA_FLUSH_CLEAN ||
1861 		    reap_type == REAP_TERMINATE) {
1862 			if (p->vmp_busy || p->vmp_cleaning) {
1863 				vm_page_unlock_queues();
1864 				/*
1865 				 * free the pages reclaimed so far
1866 				 */
1867 				local_free_q = vm_object_reap_freelist(local_free_q,
1868 				    disconnect_on_release, set_cache_attr_needed);
1869 
1870 				if (set_cache_attr_needed) {
1871 					object->set_cache_attr = TRUE;
1872 				}
1873 				vm_page_sleep(object, p, THREAD_UNINT, LCK_SLEEP_DEFAULT);
1874 
1875 				goto restart_after_sleep;
1876 			}
1877 			if (p->vmp_laundry && reap_type != REAP_DATA_FLUSH_CLEAN) {
1878 				vm_pageout_steal_laundry(p, TRUE);
1879 			}
1880 		}
1881 		switch (reap_type) {
1882 		case REAP_DATA_FLUSH_CLEAN:
1883 			if (!p->vmp_dirty &&
1884 			    p->vmp_wpmapped &&
1885 			    pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p))) {
1886 				SET_PAGE_DIRTY(p, FALSE);
1887 			}
1888 			if (p->vmp_dirty) {
1889 				/* only flush clean pages */
1890 				continue;
1891 			}
1892 			OS_FALLTHROUGH;
1893 
1894 		case REAP_DATA_FLUSH:
1895 			if (VM_PAGE_WIRED(p)) {
1896 				/*
1897 				 * this is an odd case... perhaps we should
1898 				 * zero-fill this page since we're conceptually
1899 				 * tossing its data at this point, but leaving
1900 				 * it on the object to honor the 'wire' contract
1901 				 */
1902 				continue;
1903 			}
1904 			break;
1905 
1906 		case REAP_PURGEABLE:
1907 			if (VM_PAGE_WIRED(p)) {
1908 				/*
1909 				 * can't purge a wired page
1910 				 */
1911 				vm_page_purged_wired++;
1912 				continue;
1913 			}
1914 			if (p->vmp_laundry && !p->vmp_busy && !p->vmp_cleaning) {
1915 				vm_pageout_steal_laundry(p, TRUE);
1916 			}
1917 
1918 			if (p->vmp_cleaning || p->vmp_laundry || p->vmp_absent) {
1919 				/*
1920 				 * page is being acted upon,
1921 				 * so don't mess with it
1922 				 */
1923 				vm_page_purged_others++;
1924 				continue;
1925 			}
1926 			if (p->vmp_busy) {
1927 				/*
1928 				 * We can't reclaim a busy page but we can
1929 				 * make it more likely to be paged (it's not wired) to make
1930 				 * sure that it gets considered by
1931 				 * vm_pageout_scan() later.
1932 				 */
1933 				if (VM_PAGE_PAGEABLE(p)) {
1934 					vm_page_deactivate(p);
1935 				}
1936 				vm_page_purged_busy++;
1937 				continue;
1938 			}
1939 
1940 			assert(!is_kernel_object(VM_PAGE_OBJECT(p)));
1941 
1942 			/*
1943 			 * we can discard this page...
1944 			 */
1945 			if (p->vmp_pmapped == TRUE) {
1946 				/*
1947 				 * unmap the page
1948 				 */
1949 				pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_NOREFMOD, (void *)&pmap_flush_context_storage);
1950 			}
1951 			vm_page_purged_count++;
1952 
1953 			break;
1954 
1955 		case REAP_TERMINATE:
1956 			if (p->vmp_absent || vm_page_is_private(p)) {
1957 				/*
1958 				 *	For private pages, VM_PAGE_FREE just
1959 				 *	leaves the page structure around for
1960 				 *	its owner to clean up.  For absent
1961 				 *	pages, the structure is returned to
1962 				 *	the appropriate pool.
1963 				 */
1964 				break;
1965 			}
1966 			if (vm_page_is_fictitious(p)) {
1967 				assert(vm_page_is_guard(p));
1968 				break;
1969 			}
1970 			if (!p->vmp_dirty && p->vmp_wpmapped) {
1971 				p->vmp_dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p));
1972 			}
1973 
1974 			if ((p->vmp_dirty || p->vmp_precious) && !VMP_ERROR_GET(p) && object->alive) {
1975 				assert(!object->internal);
1976 
1977 				p->vmp_free_when_done = TRUE;
1978 
1979 				if (!p->vmp_laundry) {
1980 					vm_page_queues_remove(p, TRUE);
1981 					/*
1982 					 * flush page... page will be freed
1983 					 * upon completion of I/O
1984 					 */
1985 					vm_pageout_cluster(p);
1986 				}
1987 				vm_page_unlock_queues();
1988 				/*
1989 				 * free the pages reclaimed so far
1990 				 */
1991 				local_free_q = vm_object_reap_freelist(local_free_q,
1992 				    disconnect_on_release, set_cache_attr_needed);
1993 
1994 				if (set_cache_attr_needed) {
1995 					object->set_cache_attr = TRUE;
1996 				}
1997 				vm_object_paging_wait(object, THREAD_UNINT);
1998 
1999 				goto restart_after_sleep;
2000 			}
2001 			break;
2002 
2003 		case REAP_REAP:
2004 			break;
2005 		}
2006 		vm_page_free_prepare_queues(p);
2007 		assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0);
2008 		/*
2009 		 * Add this page to our list of reclaimed pages,
2010 		 * to be freed later.
2011 		 */
2012 		p->vmp_snext = local_free_q;
2013 		local_free_q = p;
2014 	}
2015 	vm_page_unlock_queues();
2016 
2017 	/*
2018 	 * Free the remaining reclaimed pages
2019 	 */
2020 	if (reap_type == REAP_PURGEABLE) {
2021 		pmap_flush(&pmap_flush_context_storage);
2022 	}
2023 
2024 	vm_object_reap_freelist(local_free_q,
2025 	    disconnect_on_release, set_cache_attr_needed);
2026 	if (set_cache_attr_needed) {
2027 		object->set_cache_attr = TRUE;
2028 	}
2029 }
2030 
2031 
2032 void
vm_object_reap_async(vm_object_t object)2033 vm_object_reap_async(
2034 	vm_object_t     object)
2035 {
2036 	vm_object_lock_assert_exclusive(object);
2037 
2038 	vm_object_reaper_lock_spin();
2039 
2040 	vm_object_reap_count_async++;
2041 
2042 	/* enqueue the VM object... */
2043 	queue_enter(&vm_object_reaper_queue, object,
2044 	    vm_object_t, cached_list);
2045 
2046 	vm_object_reaper_unlock();
2047 
2048 	/* ... and wake up the reaper thread */
2049 	thread_wakeup((event_t) &vm_object_reaper_queue);
2050 }
2051 
2052 
2053 void
vm_object_reaper_thread(void)2054 vm_object_reaper_thread(void)
2055 {
2056 	vm_object_t     object, shadow_object;
2057 
2058 	vm_object_reaper_lock_spin();
2059 
2060 	while (!queue_empty(&vm_object_reaper_queue)) {
2061 		queue_remove_first(&vm_object_reaper_queue,
2062 		    object,
2063 		    vm_object_t,
2064 		    cached_list);
2065 
2066 		vm_object_reaper_unlock();
2067 		vm_object_lock(object);
2068 
2069 		assert(object->terminating);
2070 		assert(!object->alive);
2071 
2072 		/*
2073 		 * The pageout daemon might be playing with our pages.
2074 		 * Now that the object is dead, it won't touch any more
2075 		 * pages, but some pages might already be on their way out.
2076 		 * Hence, we wait until the active paging activities have
2077 		 * ceased before we break the association with the pager
2078 		 * itself.
2079 		 */
2080 		vm_object_paging_wait(object, THREAD_UNINT);
2081 
2082 		shadow_object =
2083 		    object->pageout ? VM_OBJECT_NULL : object->shadow;
2084 
2085 		vm_object_reap(object);
2086 		/* cache is unlocked and object is no longer valid */
2087 		object = VM_OBJECT_NULL;
2088 
2089 		if (shadow_object != VM_OBJECT_NULL) {
2090 			/*
2091 			 * Drop the reference "object" was holding on
2092 			 * its shadow object.
2093 			 */
2094 			vm_object_deallocate(shadow_object);
2095 			shadow_object = VM_OBJECT_NULL;
2096 		}
2097 		vm_object_reaper_lock_spin();
2098 	}
2099 
2100 	/* wait for more work... */
2101 	assert_wait((event_t) &vm_object_reaper_queue, THREAD_UNINT);
2102 
2103 	vm_object_reaper_unlock();
2104 
2105 	thread_block((thread_continue_t) vm_object_reaper_thread);
2106 	/*NOTREACHED*/
2107 }
2108 
2109 /*
2110  *	Routine:	vm_object_release_pager
2111  *	Purpose:	Terminate the pager and, upon completion,
2112  *			release our last reference to it.
2113  */
2114 static void
vm_object_release_pager(memory_object_t pager)2115 vm_object_release_pager(
2116 	memory_object_t pager)
2117 {
2118 	/*
2119 	 *	Terminate the pager.
2120 	 */
2121 
2122 	(void) memory_object_terminate(pager);
2123 
2124 	/*
2125 	 *	Release reference to pager.
2126 	 */
2127 	memory_object_deallocate(pager);
2128 }
2129 
2130 /*
2131  *	Routine:	vm_object_destroy
2132  *	Purpose:
2133  *		Shut down a VM object, despite the
2134  *		presence of address map (or other) references
2135  *		to the vm_object.
2136  */
2137 #if FBDP_DEBUG_OBJECT_NO_PAGER
2138 extern uint32_t system_inshutdown;
2139 int fbdp_no_panic = 1;
2140 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
2141 kern_return_t
vm_object_destroy(vm_object_t object,vm_object_destroy_reason_t reason)2142 vm_object_destroy(
2143 	vm_object_t                                     object,
2144 	vm_object_destroy_reason_t   reason)
2145 {
2146 	memory_object_t         old_pager;
2147 
2148 	if (object == VM_OBJECT_NULL) {
2149 		return KERN_SUCCESS;
2150 	}
2151 
2152 	/*
2153 	 *	Remove the pager association immediately.
2154 	 *
2155 	 *	This will prevent the memory manager from further
2156 	 *	meddling.  [If it wanted to flush data or make
2157 	 *	other changes, it should have done so before performing
2158 	 *	the destroy call.]
2159 	 */
2160 
2161 	vm_object_lock(object);
2162 
2163 #if FBDP_DEBUG_OBJECT_NO_PAGER
2164 	static bool fbdp_no_panic_retrieved = false;
2165 	if (!fbdp_no_panic_retrieved) {
2166 		PE_parse_boot_argn("fbdp_no_panic4", &fbdp_no_panic, sizeof(fbdp_no_panic));
2167 		fbdp_no_panic_retrieved = true;
2168 	}
2169 
2170 	bool forced_unmount = false;
2171 	if (object->named &&
2172 	    os_ref_get_count_raw(&object->ref_count) > 2 &&
2173 	    object->pager != NULL &&
2174 	    vnode_pager_get_forced_unmount(object->pager, &forced_unmount) == KERN_SUCCESS &&
2175 	    forced_unmount == false) {
2176 		if (!fbdp_no_panic) {
2177 			panic("FBDP rdar://99829401 object %p refs %d pager %p (no forced unmount)\n", object, os_ref_get_count_raw(&object->ref_count), object->pager);
2178 		}
2179 		DTRACE_VM3(vm_object_destroy_no_forced_unmount,
2180 		    vm_object_t, object,
2181 		    int, os_ref_get_count_raw(&object->ref_count),
2182 		    memory_object_t, object->pager);
2183 	}
2184 
2185 	if (object->fbdp_tracked) {
2186 		if (os_ref_get_count_raw(&object->ref_count) > 2 && !system_inshutdown) {
2187 			if (!fbdp_no_panic) {
2188 				panic("FBDP/4 rdar://99829401 object %p refs %d pager %p (tracked)\n", object, os_ref_get_count_raw(&object->ref_count), object->pager);
2189 			}
2190 		}
2191 		VM_OBJECT_SET_FBDP_TRACKED(object, false);
2192 	}
2193 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
2194 
2195 	VM_OBJECT_SET_NO_PAGER_REASON(object, reason);
2196 
2197 	VM_OBJECT_SET_CAN_PERSIST(object, FALSE);
2198 	VM_OBJECT_SET_NAMED(object, FALSE);
2199 #if 00
2200 	VM_OBJECT_SET_ALIVE(object, FALSE);
2201 #endif /* 00 */
2202 
2203 #if DEVELOPMENT || DEBUG
2204 	if (object->object_is_shared_cache &&
2205 	    object->pager != NULL &&
2206 	    object->pager->mo_pager_ops == &shared_region_pager_ops) {
2207 		OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count);
2208 	}
2209 #endif /* DEVELOPMENT || DEBUG */
2210 
2211 	old_pager = object->pager;
2212 	object->pager = MEMORY_OBJECT_NULL;
2213 	if (old_pager != MEMORY_OBJECT_NULL) {
2214 		memory_object_control_disable(&object->pager_control);
2215 	}
2216 
2217 	/*
2218 	 * Wait for the existing paging activity (that got
2219 	 * through before we nulled out the pager) to subside.
2220 	 */
2221 	vm_object_paging_wait(object, THREAD_UNINT);
2222 	vm_object_pl_req_wait(object, THREAD_UNINT);
2223 
2224 	/*
2225 	 * Memory objects usually stay alive while their
2226 	 * VM object is still mapped but vnodes can get
2227 	 * reclaimed by forced unmounts while still mapped,
2228 	 * for example, so we could be racing with a
2229 	 * memory_object_map() or memory_object_last_unmap()
2230 	 * here.
2231 	 * We should wait for any memory_object_map/last_unmap()
2232 	 * to complete, except if we're the thread calling
2233 	 * memory_object_last_unmap() on this memory object.
2234 	 */
2235 	if (old_pager != MEMORY_OBJECT_NULL &&
2236 	    old_pager->mo_last_unmap_ctid == thread_get_ctid(current_thread())) {
2237 		old_pager->mo_last_unmap_ctid = 0;
2238 	} else {
2239 		vm_object_mapping_wait(object, THREAD_UNINT);
2240 	}
2241 
2242 	vm_object_unlock(object);
2243 
2244 	/*
2245 	 *	Terminate the object now.
2246 	 */
2247 	if (old_pager != MEMORY_OBJECT_NULL) {
2248 		vm_object_release_pager(old_pager);
2249 
2250 		/*
2251 		 * JMM - Release the caller's reference.  This assumes the
2252 		 * caller had a reference to release, which is a big (but
2253 		 * currently valid) assumption if this is driven from the
2254 		 * vnode pager (it is holding a named reference when making
2255 		 * this call)..
2256 		 */
2257 		vm_object_deallocate(object);
2258 	}
2259 	return KERN_SUCCESS;
2260 }
2261 
2262 /*
2263  * The "chunk" macros are used by routines below when looking for pages to deactivate.  These
2264  * exist because of the need to handle shadow chains.  When deactivating pages, we only
2265  * want to deactive the ones at the top most level in the object chain.  In order to do
2266  * this efficiently, the specified address range is divided up into "chunks" and we use
2267  * a bit map to keep track of which pages have already been processed as we descend down
2268  * the shadow chain.  These chunk macros hide the details of the bit map implementation
2269  * as much as we can.
2270  *
2271  * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is
2272  * set to 64 pages.  The bit map is indexed from the low-order end, so that the lowest
2273  * order bit represents page 0 in the current range and highest order bit represents
2274  * page 63.
2275  *
2276  * For further convenience, we also use negative logic for the page state in the bit map.
2277  * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has
2278  * been processed.  This way we can simply test the 64-bit long word to see if it's zero
2279  * to easily tell if the whole range has been processed.  Therefore, the bit map starts
2280  * out with all the bits set.  The macros below hide all these details from the caller.
2281  */
2282 
2283 #define PAGES_IN_A_CHUNK        64      /* The number of pages in the chunk must */
2284                                         /* be the same as the number of bits in  */
2285                                         /* the chunk_state_t type. We use 64     */
2286                                         /* just for convenience.		 */
2287 
2288 #define CHUNK_SIZE      (PAGES_IN_A_CHUNK * PAGE_SIZE_64)       /* Size of a chunk in bytes */
2289 
2290 typedef uint64_t        chunk_state_t;
2291 
2292 /*
2293  * The bit map uses negative logic, so we start out with all 64 bits set to indicate
2294  * that no pages have been processed yet.  Also, if len is less than the full CHUNK_SIZE,
2295  * then we mark pages beyond the len as having been "processed" so that we don't waste time
2296  * looking at pages in that range.  This can save us from unnecessarily chasing down the
2297  * shadow chain.
2298  */
2299 
2300 #define CHUNK_INIT(c, len)                                              \
2301 	MACRO_BEGIN                                                     \
2302 	uint64_t p;                                                     \
2303                                                                         \
2304 	(c) = 0xffffffffffffffffLL;                                     \
2305                                                                         \
2306 	for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++)       \
2307 	        MARK_PAGE_HANDLED(c, p);                                \
2308 	MACRO_END
2309 
2310 
2311 /*
2312  * Return true if all pages in the chunk have not yet been processed.
2313  */
2314 
2315 #define CHUNK_NOT_COMPLETE(c)   ((c) != 0)
2316 
2317 /*
2318  * Return true if the page at offset 'p' in the bit map has already been handled
2319  * while processing a higher level object in the shadow chain.
2320  */
2321 
2322 #define PAGE_ALREADY_HANDLED(c, p)      (((c) & (1ULL << (p))) == 0)
2323 
2324 /*
2325  * Mark the page at offset 'p' in the bit map as having been processed.
2326  */
2327 
2328 #define MARK_PAGE_HANDLED(c, p) \
2329 MACRO_BEGIN \
2330 	(c) = (c) & ~(1ULL << (p)); \
2331 MACRO_END
2332 
2333 
2334 /*
2335  * Return true if the page at the given offset has been paged out.  Object is
2336  * locked upon entry and returned locked.
2337  *
2338  * NB: It is the callers responsibility to ensure that the offset in question
2339  * is not in the process of being paged in/out (i.e. not busy or no backing
2340  * page)
2341  */
2342 static bool
page_is_paged_out(vm_object_t object,vm_object_offset_t offset)2343 page_is_paged_out(
2344 	vm_object_t             object,
2345 	vm_object_offset_t      offset)
2346 {
2347 	if (object->internal &&
2348 	    object->alive &&
2349 	    !object->terminating &&
2350 	    object->pager_ready) {
2351 		if (vm_object_compressor_pager_state_get(object, offset)
2352 		    == VM_EXTERNAL_STATE_EXISTS) {
2353 			return true;
2354 		}
2355 	}
2356 	return false;
2357 }
2358 
2359 
2360 
2361 /*
2362  * madvise_free_debug
2363  *
2364  * To help debug madvise(MADV_FREE*) mis-usage, this triggers a
2365  * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to
2366  * simulate the loss of the page's contents as if the page had been
2367  * reclaimed and then re-faulted.
2368  */
2369 #if DEVELOPMENT || DEBUG
2370 int madvise_free_debug = 0;
2371 int madvise_free_debug_sometimes = 1;
2372 #else /* DEBUG */
2373 int madvise_free_debug = 0;
2374 int madvise_free_debug_sometimes = 0;
2375 #endif /* DEBUG */
2376 int madvise_free_counter = 0;
2377 
2378 __options_decl(deactivate_flags_t, uint32_t, {
2379 	DEACTIVATE_KILL         = 0x1,
2380 	DEACTIVATE_REUSABLE     = 0x2,
2381 	DEACTIVATE_ALL_REUSABLE = 0x4,
2382 	DEACTIVATE_CLEAR_REFMOD = 0x8,
2383 	DEACTIVATE_KILL_NO_WRITE = 0x10
2384 });
2385 
2386 /*
2387  * Deactivate the pages in the specified object and range.  If kill_page is set, also discard any
2388  * page modified state from the pmap.  Update the chunk_state as we go along.  The caller must specify
2389  * a size that is less than or equal to the CHUNK_SIZE.
2390  */
2391 
2392 static void
deactivate_pages_in_object(vm_object_t object,vm_object_offset_t offset,vm_object_size_t size,deactivate_flags_t flags,chunk_state_t * chunk_state,pmap_flush_context * pfc,struct pmap * pmap,vm_map_offset_t pmap_offset)2393 deactivate_pages_in_object(
2394 	vm_object_t             object,
2395 	vm_object_offset_t      offset,
2396 	vm_object_size_t        size,
2397 	deactivate_flags_t      flags,
2398 	chunk_state_t           *chunk_state,
2399 	pmap_flush_context      *pfc,
2400 	struct pmap             *pmap,
2401 	vm_map_offset_t         pmap_offset)
2402 {
2403 	vm_page_t       m;
2404 	int             p;
2405 	struct  vm_page_delayed_work    dw_array;
2406 	struct  vm_page_delayed_work    *dwp, *dwp_start;
2407 	bool            dwp_finish_ctx = TRUE;
2408 	int             dw_count;
2409 	int             dw_limit;
2410 	unsigned int    reusable = 0;
2411 
2412 	/*
2413 	 * Examine each page in the chunk.  The variable 'p' is the page number relative to the start of the
2414 	 * chunk.  Since this routine is called once for each level in the shadow chain, the chunk_state may
2415 	 * have pages marked as having been processed already.  We stop the loop early if we find we've handled
2416 	 * all the pages in the chunk.
2417 	 */
2418 
2419 	dwp_start = dwp = NULL;
2420 	dw_count = 0;
2421 	dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
2422 	dwp_start = vm_page_delayed_work_get_ctx();
2423 	if (dwp_start == NULL) {
2424 		dwp_start = &dw_array;
2425 		dw_limit = 1;
2426 		dwp_finish_ctx = FALSE;
2427 	}
2428 
2429 	dwp = dwp_start;
2430 
2431 	for (p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) {
2432 		/*
2433 		 * If this offset has already been found and handled in a higher level object, then don't
2434 		 * do anything with it in the current shadow object.
2435 		 */
2436 
2437 		if (PAGE_ALREADY_HANDLED(*chunk_state, p)) {
2438 			continue;
2439 		}
2440 
2441 		/*
2442 		 * See if the page at this offset is around.  First check to see if the page is resident,
2443 		 * then if not, check the existence map or with the pager.
2444 		 */
2445 
2446 		if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
2447 			/*
2448 			 * We found a page we were looking for.  Mark it as "handled" now in the chunk_state
2449 			 * so that we won't bother looking for a page at this offset again if there are more
2450 			 * shadow objects.  Then deactivate the page.
2451 			 */
2452 
2453 			MARK_PAGE_HANDLED(*chunk_state, p);
2454 
2455 			if ((!VM_PAGE_WIRED(m)) && (!vm_page_is_private(m)) && (!m->vmp_gobbled) && (!m->vmp_busy) &&
2456 			    (!m->vmp_laundry) && (!m->vmp_cleaning) && !(m->vmp_free_when_done)) {
2457 				int     clear_refmod_mask;
2458 				int     pmap_options;
2459 				dwp->dw_mask = 0;
2460 
2461 				pmap_options = 0;
2462 				clear_refmod_mask = VM_MEM_REFERENCED;
2463 				dwp->dw_mask |= DW_clear_reference;
2464 
2465 				if ((flags & DEACTIVATE_KILL) && (object->internal)) {
2466 					if (!(flags & DEACTIVATE_KILL_NO_WRITE) &&
2467 					    (madvise_free_debug ||
2468 					    (madvise_free_debug_sometimes &&
2469 					    madvise_free_counter++ & 0x1))) {
2470 						/*
2471 						 * zero-fill the page (or every
2472 						 * other page) now to simulate
2473 						 * it being reclaimed and
2474 						 * re-faulted.
2475 						 */
2476 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
2477 						if (!m->vmp_unmodified_ro) {
2478 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
2479 						if (true) {
2480 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
2481 							pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
2482 						}
2483 					}
2484 					m->vmp_precious = FALSE;
2485 					m->vmp_dirty = FALSE;
2486 
2487 					clear_refmod_mask |= VM_MEM_MODIFIED;
2488 					if (m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
2489 						/*
2490 						 * This page is now clean and
2491 						 * reclaimable.  Move it out
2492 						 * of the throttled queue, so
2493 						 * that vm_pageout_scan() can
2494 						 * find it.
2495 						 */
2496 						dwp->dw_mask |= DW_move_page;
2497 					}
2498 
2499 #if 0
2500 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
2501 					/*
2502 					 * COMMENT BLOCK ON WHY THIS SHOULDN'T BE DONE.
2503 					 *
2504 					 * Since we are about to do a vm_object_compressor_pager_state_clr
2505 					 * below for this page, which drops any existing compressor
2506 					 * storage of this page (eg side-effect of a CoW operation or
2507 					 * a collapse operation), it is tempting to think that we should
2508 					 * treat this page as if it was just decompressed (during which
2509 					 * we also drop existing compressor storage) and so start its life
2510 					 * out with vmp_unmodified_ro set to FALSE.
2511 					 *
2512 					 * However, we can't do that here because we could swing around
2513 					 * and re-access this page in a read-only fault.
2514 					 * Clearing this bit means we'll try to zero it up above
2515 					 * and fail.
2516 					 *
2517 					 * Note that clearing the bit is unnecessary regardless because
2518 					 * dirty state has been cleared. During the next soft fault, the
2519 					 * right state will be restored and things will progress just fine.
2520 					 */
2521 					if (m->vmp_unmodified_ro == true) {
2522 						/* Need object and pageq locks for bit manipulation*/
2523 						m->vmp_unmodified_ro = false;
2524 						os_atomic_dec(&compressor_ro_uncompressed);
2525 					}
2526 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
2527 #endif /* 0 */
2528 					vm_object_compressor_pager_state_clr(object, offset);
2529 
2530 					if ((flags & DEACTIVATE_REUSABLE) && !m->vmp_reusable) {
2531 						assert(!(flags & DEACTIVATE_ALL_REUSABLE));
2532 						assert(!object->all_reusable);
2533 						m->vmp_reusable = TRUE;
2534 						object->reusable_page_count++;
2535 						assert(object->resident_page_count >= object->reusable_page_count);
2536 						reusable++;
2537 						/*
2538 						 * Tell pmap this page is now
2539 						 * "reusable" (to update pmap
2540 						 * stats for all mappings).
2541 						 */
2542 						pmap_options |= PMAP_OPTIONS_SET_REUSABLE;
2543 					}
2544 				}
2545 				if (flags & DEACTIVATE_CLEAR_REFMOD) {
2546 					/*
2547 					 * The caller didn't clear the refmod bits in advance.
2548 					 * Clear them for this page now.
2549 					 */
2550 					pmap_options |= PMAP_OPTIONS_NOFLUSH;
2551 					pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m),
2552 					    clear_refmod_mask,
2553 					    pmap_options,
2554 					    (void *)pfc);
2555 				}
2556 
2557 				if ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) &&
2558 				    !(flags & (DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE))) {
2559 					dwp->dw_mask |= DW_move_page;
2560 				}
2561 
2562 				if (dwp->dw_mask) {
2563 					VM_PAGE_ADD_DELAYED_WORK(dwp, m,
2564 					    dw_count);
2565 				}
2566 
2567 				if (dw_count >= dw_limit) {
2568 					if (reusable) {
2569 						OSAddAtomic(reusable,
2570 						    &vm_page_stats_reusable.reusable_count);
2571 						vm_page_stats_reusable.reusable += reusable;
2572 						reusable = 0;
2573 					}
2574 					vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
2575 
2576 					dwp = dwp_start;
2577 					dw_count = 0;
2578 				}
2579 			}
2580 		} else {
2581 			/*
2582 			 * The page at this offset isn't memory resident, check to see if it's
2583 			 * been paged out.  If so, mark it as handled so we don't bother looking
2584 			 * for it in the shadow chain.
2585 			 */
2586 
2587 			if (page_is_paged_out(object, offset)) {
2588 				MARK_PAGE_HANDLED(*chunk_state, p);
2589 
2590 				/*
2591 				 * If we're killing a non-resident page, then clear the page in the existence
2592 				 * map so we don't bother paging it back in if it's touched again in the future.
2593 				 */
2594 
2595 				if ((flags & DEACTIVATE_KILL) && (object->internal)) {
2596 					vm_object_compressor_pager_state_clr(object, offset);
2597 
2598 					if (pmap != PMAP_NULL) {
2599 						/*
2600 						 * Tell pmap that this page
2601 						 * is no longer mapped, to
2602 						 * adjust the footprint ledger
2603 						 * because this page is no
2604 						 * longer compressed.
2605 						 */
2606 						pmap_remove_options(
2607 							pmap,
2608 							pmap_offset,
2609 							(pmap_offset +
2610 							PAGE_SIZE),
2611 							PMAP_OPTIONS_REMOVE);
2612 					}
2613 				}
2614 			}
2615 		}
2616 	}
2617 
2618 	if (reusable) {
2619 		OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count);
2620 		vm_page_stats_reusable.reusable += reusable;
2621 		reusable = 0;
2622 	}
2623 
2624 	if (dw_count) {
2625 		vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
2626 		dwp = dwp_start;
2627 		dw_count = 0;
2628 	}
2629 
2630 	if (dwp_start && dwp_finish_ctx) {
2631 		vm_page_delayed_work_finish_ctx(dwp_start);
2632 		dwp_start = dwp = NULL;
2633 	}
2634 }
2635 
2636 
2637 /*
2638  * Deactive a "chunk" of the given range of the object starting at offset.  A "chunk"
2639  * will always be less than or equal to the given size.  The total range is divided up
2640  * into chunks for efficiency and performance related to the locks and handling the shadow
2641  * chain.  This routine returns how much of the given "size" it actually processed.  It's
2642  * up to the caler to loop and keep calling this routine until the entire range they want
2643  * to process has been done.
2644  * Iff clear_refmod is true, pmap_clear_refmod_options is called for each physical page in this range.
2645  */
2646 
2647 static vm_object_size_t
2648 deactivate_a_chunk(
2649 	vm_object_t             orig_object,
2650 	vm_object_offset_t      offset,
2651 	vm_object_size_t        size,
2652 	deactivate_flags_t      flags,
2653 	pmap_flush_context      *pfc,
2654 	struct pmap             *pmap,
2655 	vm_map_offset_t         pmap_offset)
2656 {
2657 	vm_object_t             object;
2658 	vm_object_t             tmp_object;
2659 	vm_object_size_t        length;
2660 	chunk_state_t           chunk_state;
2661 
2662 
2663 	/*
2664 	 * Get set to do a chunk.  We'll do up to CHUNK_SIZE, but no more than the
2665 	 * remaining size the caller asked for.
2666 	 */
2667 
2668 	length = MIN(size, CHUNK_SIZE);
2669 
2670 	/*
2671 	 * The chunk_state keeps track of which pages we've already processed if there's
2672 	 * a shadow chain on this object.  At this point, we haven't done anything with this
2673 	 * range of pages yet, so initialize the state to indicate no pages processed yet.
2674 	 */
2675 
2676 	CHUNK_INIT(chunk_state, length);
2677 	object = orig_object;
2678 
2679 	/*
2680 	 * Start at the top level object and iterate around the loop once for each object
2681 	 * in the shadow chain.  We stop processing early if we've already found all the pages
2682 	 * in the range.  Otherwise we stop when we run out of shadow objects.
2683 	 */
2684 
2685 	while (object && CHUNK_NOT_COMPLETE(chunk_state)) {
2686 		vm_object_paging_begin(object);
2687 
2688 		deactivate_pages_in_object(object, offset, length, flags, &chunk_state, pfc, pmap, pmap_offset);
2689 
2690 		vm_object_paging_end(object);
2691 
2692 		/*
2693 		 * We've finished with this object, see if there's a shadow object.  If
2694 		 * there is, update the offset and lock the new object.  We also turn off
2695 		 * kill_page at this point since we only kill pages in the top most object.
2696 		 */
2697 
2698 		tmp_object = object->shadow;
2699 
2700 		if (tmp_object) {
2701 			assert(!(flags & DEACTIVATE_KILL) || (flags & DEACTIVATE_CLEAR_REFMOD));
2702 			flags &= ~(DEACTIVATE_KILL | DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE);
2703 			offset += object->vo_shadow_offset;
2704 			vm_object_lock(tmp_object);
2705 		}
2706 
2707 		if (object != orig_object) {
2708 			vm_object_unlock(object);
2709 		}
2710 
2711 		object = tmp_object;
2712 	}
2713 
2714 	if (object && object != orig_object) {
2715 		vm_object_unlock(object);
2716 	}
2717 
2718 	return length;
2719 }
2720 
2721 
2722 
2723 /*
2724  * Move any resident pages in the specified range to the inactive queue.  If kill_page is set,
2725  * we also clear the modified status of the page and "forget" any changes that have been made
2726  * to the page.
2727  */
2728 
2729 __private_extern__ void
2730 vm_object_deactivate_pages(
2731 	vm_object_t             object,
2732 	vm_object_offset_t      offset,
2733 	vm_object_size_t        size,
2734 	boolean_t               kill_page,
2735 	boolean_t               reusable_page,
2736 	boolean_t               kill_no_write,
2737 	struct pmap             *pmap,
2738 	vm_map_offset_t         pmap_offset)
2739 {
2740 	vm_object_size_t        length;
2741 	boolean_t               all_reusable;
2742 	pmap_flush_context      pmap_flush_context_storage;
2743 	unsigned int pmap_clear_refmod_mask = VM_MEM_REFERENCED;
2744 	unsigned int pmap_clear_refmod_options = 0;
2745 	deactivate_flags_t flags = DEACTIVATE_CLEAR_REFMOD;
2746 	bool refmod_cleared = false;
2747 	if (kill_page) {
2748 		flags |= DEACTIVATE_KILL;
2749 	}
2750 	if (reusable_page) {
2751 		flags |= DEACTIVATE_REUSABLE;
2752 	}
2753 	if (kill_no_write) {
2754 		flags |= DEACTIVATE_KILL_NO_WRITE;
2755 	}
2756 
2757 	/*
2758 	 * We break the range up into chunks and do one chunk at a time.  This is for
2759 	 * efficiency and performance while handling the shadow chains and the locks.
2760 	 * The deactivate_a_chunk() function returns how much of the range it processed.
2761 	 * We keep calling this routine until the given size is exhausted.
2762 	 */
2763 
2764 
2765 	all_reusable = FALSE;
2766 #if 11
2767 	/*
2768 	 * For the sake of accurate "reusable" pmap stats, we need
2769 	 * to tell pmap about each page that is no longer "reusable",
2770 	 * so we can't do the "all_reusable" optimization.
2771 	 *
2772 	 * If we do go with the all_reusable optimization, we can't
2773 	 * return if size is 0 since we could have "all_reusable == TRUE"
2774 	 * In this case, we save the overhead of doing the pmap_flush_context
2775 	 * work.
2776 	 */
2777 	if (size == 0) {
2778 		return;
2779 	}
2780 #else
2781 	if (reusable_page &&
2782 	    object->internal &&
2783 	    object->vo_size != 0 &&
2784 	    object->vo_size == size &&
2785 	    object->reusable_page_count == 0) {
2786 		all_reusable = TRUE;
2787 		reusable_page = FALSE;
2788 		flags |= DEACTIVATE_ALL_REUSABLE;
2789 	}
2790 #endif
2791 
2792 	if ((reusable_page || all_reusable) && object->all_reusable) {
2793 		/* This means MADV_FREE_REUSABLE has been called twice, which
2794 		 * is probably illegal. */
2795 		return;
2796 	}
2797 
2798 
2799 	pmap_flush_context_init(&pmap_flush_context_storage);
2800 
2801 	/*
2802 	 * If we're deactivating multiple pages, try to perform one bulk pmap operation.
2803 	 * We can't do this if we're killing pages and there's a shadow chain as
2804 	 * we don't yet know which pages are in the top object (pages in shadow copies aren't
2805 	 * safe to kill).
2806 	 * And we can only do this on hardware that supports it.
2807 	 */
2808 	if (size > PAGE_SIZE && (!kill_page || !object->shadow)) {
2809 		if (kill_page && object->internal) {
2810 			pmap_clear_refmod_mask |= VM_MEM_MODIFIED;
2811 		}
2812 		if (reusable_page) {
2813 			pmap_clear_refmod_options |= PMAP_OPTIONS_SET_REUSABLE;
2814 		}
2815 
2816 		refmod_cleared = pmap_clear_refmod_range_options(pmap, pmap_offset, pmap_offset + size, pmap_clear_refmod_mask, pmap_clear_refmod_options);
2817 		if (refmod_cleared) {
2818 			// We were able to clear all the refmod bits. So deactivate_a_chunk doesn't need to do it.
2819 			flags &= ~DEACTIVATE_CLEAR_REFMOD;
2820 		}
2821 	}
2822 
2823 	while (size) {
2824 		length = deactivate_a_chunk(object, offset, size, flags,
2825 		    &pmap_flush_context_storage, pmap, pmap_offset);
2826 
2827 		size -= length;
2828 		offset += length;
2829 		pmap_offset += length;
2830 	}
2831 	pmap_flush(&pmap_flush_context_storage);
2832 
2833 	if (all_reusable) {
2834 		if (!object->all_reusable) {
2835 			unsigned int reusable;
2836 
2837 			object->all_reusable = TRUE;
2838 			assert(object->reusable_page_count == 0);
2839 			/* update global stats */
2840 			reusable = object->resident_page_count;
2841 			OSAddAtomic(reusable,
2842 			    &vm_page_stats_reusable.reusable_count);
2843 			vm_page_stats_reusable.reusable += reusable;
2844 			vm_page_stats_reusable.all_reusable_calls++;
2845 		}
2846 	} else if (reusable_page) {
2847 		vm_page_stats_reusable.partial_reusable_calls++;
2848 	}
2849 }
2850 
2851 void
2852 vm_object_reuse_pages(
2853 	vm_object_t             object,
2854 	vm_object_offset_t      start_offset,
2855 	vm_object_offset_t      end_offset,
2856 	boolean_t               allow_partial_reuse)
2857 {
2858 	vm_object_offset_t      cur_offset;
2859 	vm_page_t               m;
2860 	unsigned int            reused, reusable;
2861 
2862 #define VM_OBJECT_REUSE_PAGE(object, m, reused)                         \
2863 	MACRO_BEGIN                                                     \
2864 	        if ((m) != VM_PAGE_NULL &&                              \
2865 	            (m)->vmp_reusable) {                                \
2866 	                assert((object)->reusable_page_count <=         \
2867 	                       (object)->resident_page_count);          \
2868 	                assert((object)->reusable_page_count > 0);      \
2869 	                (object)->reusable_page_count--;                \
2870 	                (m)->vmp_reusable = FALSE;                      \
2871 	                (reused)++;                                     \
2872 	/* \
2873 	 * Tell pmap that this page is no longer \
2874 	 * "reusable", to update the "reusable" stats \
2875 	 * for all the pmaps that have mapped this \
2876 	 * page. \
2877 	 */                                                             \
2878 	                pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \
2879 	                                          0, /* refmod */       \
2880 	                                          (PMAP_OPTIONS_CLEAR_REUSABLE \
2881 	                                           | PMAP_OPTIONS_NOFLUSH), \
2882 	                                          NULL);                \
2883 	        }                                                       \
2884 	MACRO_END
2885 
2886 	reused = 0;
2887 	reusable = 0;
2888 
2889 	vm_object_lock_assert_exclusive(object);
2890 
2891 	if (object->all_reusable) {
2892 		panic("object %p all_reusable: can't update pmap stats",
2893 		    object);
2894 		assert(object->reusable_page_count == 0);
2895 		object->all_reusable = FALSE;
2896 		if (end_offset - start_offset == object->vo_size ||
2897 		    !allow_partial_reuse) {
2898 			vm_page_stats_reusable.all_reuse_calls++;
2899 			reused = object->resident_page_count;
2900 		} else {
2901 			vm_page_stats_reusable.partial_reuse_calls++;
2902 			vm_page_queue_iterate(&object->memq, m, vmp_listq) {
2903 				if (m->vmp_offset < start_offset ||
2904 				    m->vmp_offset >= end_offset) {
2905 					m->vmp_reusable = TRUE;
2906 					object->reusable_page_count++;
2907 					assert(object->resident_page_count >= object->reusable_page_count);
2908 					continue;
2909 				} else {
2910 					assert(!m->vmp_reusable);
2911 					reused++;
2912 				}
2913 			}
2914 		}
2915 	} else if (object->resident_page_count >
2916 	    ((end_offset - start_offset) >> PAGE_SHIFT)) {
2917 		vm_page_stats_reusable.partial_reuse_calls++;
2918 		for (cur_offset = start_offset;
2919 		    cur_offset < end_offset;
2920 		    cur_offset += PAGE_SIZE_64) {
2921 			if (object->reusable_page_count == 0) {
2922 				break;
2923 			}
2924 			m = vm_page_lookup(object, cur_offset);
2925 			VM_OBJECT_REUSE_PAGE(object, m, reused);
2926 		}
2927 	} else {
2928 		vm_page_stats_reusable.partial_reuse_calls++;
2929 		vm_page_queue_iterate(&object->memq, m, vmp_listq) {
2930 			if (object->reusable_page_count == 0) {
2931 				break;
2932 			}
2933 			if (m->vmp_offset < start_offset ||
2934 			    m->vmp_offset >= end_offset) {
2935 				continue;
2936 			}
2937 			VM_OBJECT_REUSE_PAGE(object, m, reused);
2938 		}
2939 	}
2940 
2941 	/* update global stats */
2942 	OSAddAtomic(reusable - reused, &vm_page_stats_reusable.reusable_count);
2943 	vm_page_stats_reusable.reused += reused;
2944 	vm_page_stats_reusable.reusable += reusable;
2945 }
2946 
2947 /*
2948  * This function determines if the zero operation can be run on the
2949  * object. The checks on the entry have already been performed by
2950  * vm_map_zero_entry_preflight.
2951  */
2952 static kern_return_t
2953 vm_object_zero_preflight(
2954 	vm_object_t                     object,
2955 	vm_object_offset_t              start,
2956 	vm_object_offset_t              end)
2957 {
2958 	/*
2959 	 * Zeroing is further restricted to anonymous memory.
2960 	 */
2961 	if (!object->internal) {
2962 		return KERN_PROTECTION_FAILURE;
2963 	}
2964 
2965 	/*
2966 	 * Zeroing for copy on write isn't yet supported
2967 	 */
2968 	if (object->shadow != NULL ||
2969 	    object->vo_copy != NULL) {
2970 		return KERN_NO_ACCESS;
2971 	}
2972 
2973 	/*
2974 	 * Ensure the that bounds makes sense wrt the object
2975 	 */
2976 	if (end - start > object->vo_size) {
2977 		return KERN_INVALID_ADDRESS;
2978 	}
2979 
2980 	if (object->terminating || !object->alive) {
2981 		return KERN_ABORTED;
2982 	}
2983 
2984 	return KERN_SUCCESS;
2985 }
2986 
2987 static void
2988 vm_object_zero_page(vm_page_t m)
2989 {
2990 	if (m != VM_PAGE_NULL) {
2991 		ppnum_t phy_page_num = VM_PAGE_GET_PHYS_PAGE(m);
2992 
2993 		/*
2994 		 * Skip fictitious guard pages
2995 		 */
2996 		if (vm_page_is_fictitious(m)) {
2997 			assert(vm_page_is_guard(m));
2998 			return;
2999 		}
3000 		pmap_zero_page(phy_page_num);
3001 	}
3002 }
3003 
3004 /*
3005  * This function iterates the range of pages specified in the object and
3006  * discards the ones that are compressed and zeroes the ones that are wired.
3007  * This function may drop the object lock while waiting for a page that is
3008  * busy and will restart the operation for the specific offset.
3009  */
3010 kern_return_t
3011 vm_object_zero(
3012 	vm_object_t                     object,
3013 	vm_object_offset_t              *cur_offset_p,
3014 	vm_object_offset_t              end_offset)
3015 {
3016 	kern_return_t ret;
3017 
3018 	vm_object_lock_assert_exclusive(object);
3019 	ret = vm_object_zero_preflight(object, *cur_offset_p, end_offset);
3020 	if (ret != KERN_SUCCESS) {
3021 		return ret;
3022 	}
3023 
3024 	while (*cur_offset_p < end_offset) {
3025 		vm_page_t m = vm_page_lookup(object, *cur_offset_p);
3026 
3027 		if (m != VM_PAGE_NULL && m->vmp_busy) {
3028 			vm_page_sleep(object, m, THREAD_UNINT, LCK_SLEEP_DEFAULT);
3029 			/* Object lock was dropped -- reverify validity */
3030 			ret = vm_object_zero_preflight(object, *cur_offset_p, end_offset);
3031 			if (ret != KERN_SUCCESS) {
3032 				return ret;
3033 			}
3034 			if (object->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC) {
3035 				/*
3036 				 * Our mapping could have been made "needs_copy" while
3037 				 * the map and object were unlocked.
3038 				 * We need to do the mapping preflight again...
3039 				 */
3040 				return KERN_SUCCESS;
3041 			}
3042 			continue;
3043 		}
3044 
3045 		/*
3046 		 * If the compressor has the page then just discard it instead
3047 		 * of faulting it in and zeroing it else zero the page if it exists. If
3048 		 * we dropped the object lock during the lookup retry the lookup for the
3049 		 * cur_offset.
3050 		 */
3051 		if (page_is_paged_out(object, *cur_offset_p)) {
3052 			vm_object_compressor_pager_state_clr(object, *cur_offset_p);
3053 		} else {
3054 			vm_object_zero_page(m);
3055 		}
3056 		*cur_offset_p += PAGE_SIZE_64;
3057 		/*
3058 		 * TODO: May need a vm_object_lock_yield_shared in this loop if it takes
3059 		 * too long, as holding the object lock for too long can stall pageout
3060 		 * scan (or other users of the object)
3061 		 */
3062 	}
3063 
3064 	return KERN_SUCCESS;
3065 }
3066 
3067 /*
3068  *	Routine:	vm_object_pmap_protect
3069  *
3070  *	Purpose:
3071  *		Reduces the permission for all physical
3072  *		pages in the specified object range.
3073  *
3074  *		If removing write permission only, it is
3075  *		sufficient to protect only the pages in
3076  *		the top-level object; only those pages may
3077  *		have write permission.
3078  *
3079  *		If removing all access, we must follow the
3080  *		shadow chain from the top-level object to
3081  *		remove access to all pages in shadowed objects.
3082  *
3083  *		The object must *not* be locked.  The object must
3084  *		be internal.
3085  *
3086  *              If pmap is not NULL, this routine assumes that
3087  *              the only mappings for the pages are in that
3088  *              pmap.
3089  */
3090 
3091 __private_extern__ void
3092 vm_object_pmap_protect(
3093 	vm_object_t                     object,
3094 	vm_object_offset_t              offset,
3095 	vm_object_size_t                size,
3096 	pmap_t                          pmap,
3097 	vm_map_size_t                   pmap_page_size,
3098 	vm_map_offset_t                 pmap_start,
3099 	vm_prot_t                       prot)
3100 {
3101 	vm_object_pmap_protect_options(object, offset, size, pmap,
3102 	    pmap_page_size,
3103 	    pmap_start, prot, 0);
3104 }
3105 
3106 __private_extern__ void
3107 vm_object_pmap_protect_options(
3108 	vm_object_t                     object,
3109 	vm_object_offset_t              offset,
3110 	vm_object_size_t                size,
3111 	pmap_t                          pmap,
3112 	vm_map_size_t                   pmap_page_size,
3113 	vm_map_offset_t                 pmap_start,
3114 	vm_prot_t                       prot,
3115 	int                             options)
3116 {
3117 	pmap_flush_context      pmap_flush_context_storage;
3118 	boolean_t               delayed_pmap_flush = FALSE;
3119 	vm_object_offset_t      offset_in_object;
3120 	vm_object_size_t        size_in_object;
3121 
3122 	if (object == VM_OBJECT_NULL) {
3123 		return;
3124 	}
3125 	if (pmap_page_size > PAGE_SIZE) {
3126 		/* for 16K map on 4K device... */
3127 		pmap_page_size = PAGE_SIZE;
3128 	}
3129 	/*
3130 	 * If we decide to work on the object itself, extend the range to
3131 	 * cover a full number of native pages.
3132 	 */
3133 	size_in_object = vm_object_round_page(offset + size) - vm_object_trunc_page(offset);
3134 	offset_in_object = vm_object_trunc_page(offset);
3135 	/*
3136 	 * If we decide to work on the pmap, use the exact range specified,
3137 	 * so no rounding/truncating offset and size.  They should already
3138 	 * be aligned to pmap_page_size.
3139 	 */
3140 	assertf(!(offset & (pmap_page_size - 1)) && !(size & (pmap_page_size - 1)),
3141 	    "offset 0x%llx size 0x%llx pmap_page_size 0x%llx",
3142 	    offset, size, (uint64_t)pmap_page_size);
3143 
3144 	vm_object_lock(object);
3145 
3146 	if (object->phys_contiguous) {
3147 		if (pmap != NULL) {
3148 			vm_object_unlock(object);
3149 			pmap_protect_options(pmap,
3150 			    pmap_start,
3151 			    pmap_start + size,
3152 			    prot,
3153 			    options & ~PMAP_OPTIONS_NOFLUSH,
3154 			    NULL);
3155 		} else {
3156 			vm_object_offset_t phys_start, phys_end, phys_addr;
3157 
3158 			phys_start = object->vo_shadow_offset + offset_in_object;
3159 			phys_end = phys_start + size_in_object;
3160 			assert(phys_start <= phys_end);
3161 			assert(phys_end <= object->vo_shadow_offset + object->vo_size);
3162 			vm_object_unlock(object);
3163 
3164 			pmap_flush_context_init(&pmap_flush_context_storage);
3165 			delayed_pmap_flush = FALSE;
3166 
3167 			for (phys_addr = phys_start;
3168 			    phys_addr < phys_end;
3169 			    phys_addr += PAGE_SIZE_64) {
3170 				pmap_page_protect_options(
3171 					(ppnum_t) (phys_addr >> PAGE_SHIFT),
3172 					prot,
3173 					options | PMAP_OPTIONS_NOFLUSH,
3174 					(void *)&pmap_flush_context_storage);
3175 				delayed_pmap_flush = TRUE;
3176 			}
3177 			if (delayed_pmap_flush == TRUE) {
3178 				pmap_flush(&pmap_flush_context_storage);
3179 			}
3180 		}
3181 		return;
3182 	}
3183 
3184 	assert(object->internal);
3185 
3186 	while (TRUE) {
3187 		if (ptoa_64(object->resident_page_count) > size_in_object / 2 && pmap != PMAP_NULL) {
3188 			vm_object_unlock(object);
3189 			if (pmap_page_size < PAGE_SIZE) {
3190 				DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: pmap_protect()\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot);
3191 			}
3192 			pmap_protect_options(pmap, pmap_start, pmap_start + size, prot,
3193 			    options & ~PMAP_OPTIONS_NOFLUSH, NULL);
3194 			return;
3195 		}
3196 
3197 		if (pmap_page_size < PAGE_SIZE) {
3198 			DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: offset 0x%llx size 0x%llx object %p offset 0x%llx size 0x%llx\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot, offset, size, object, offset_in_object, size_in_object);
3199 		}
3200 
3201 		pmap_flush_context_init(&pmap_flush_context_storage);
3202 		delayed_pmap_flush = FALSE;
3203 
3204 		/*
3205 		 * if we are doing large ranges with respect to resident
3206 		 * page count then we should interate over pages otherwise
3207 		 * inverse page look-up will be faster
3208 		 */
3209 		if (ptoa_64(object->resident_page_count / 4) < size_in_object) {
3210 			vm_page_t               p;
3211 			vm_object_offset_t      end;
3212 
3213 			end = offset_in_object + size_in_object;
3214 
3215 			vm_page_queue_iterate(&object->memq, p, vmp_listq) {
3216 				if (!vm_page_is_fictitious(p) &&
3217 				    (offset_in_object <= p->vmp_offset) &&
3218 				    (p->vmp_offset < end)) {
3219 					vm_map_offset_t start;
3220 
3221 					/*
3222 					 * XXX FBDP 4K: intentionally using "offset" here instead
3223 					 * of "offset_in_object", since "start" is a pmap address.
3224 					 */
3225 					start = pmap_start + p->vmp_offset - offset;
3226 
3227 					if (pmap != PMAP_NULL) {
3228 						vm_map_offset_t curr;
3229 						for (curr = start;
3230 						    curr < start + PAGE_SIZE_64;
3231 						    curr += pmap_page_size) {
3232 							if (curr < pmap_start) {
3233 								continue;
3234 							}
3235 							if (curr >= pmap_start + size) {
3236 								break;
3237 							}
3238 							pmap_protect_options(
3239 								pmap,
3240 								curr,
3241 								curr + pmap_page_size,
3242 								prot,
3243 								options | PMAP_OPTIONS_NOFLUSH,
3244 								&pmap_flush_context_storage);
3245 						}
3246 					} else {
3247 						pmap_page_protect_options(
3248 							VM_PAGE_GET_PHYS_PAGE(p),
3249 							prot,
3250 							options | PMAP_OPTIONS_NOFLUSH,
3251 							&pmap_flush_context_storage);
3252 					}
3253 					delayed_pmap_flush = TRUE;
3254 				}
3255 			}
3256 		} else {
3257 			vm_page_t               p;
3258 			vm_object_offset_t      end;
3259 			vm_object_offset_t      target_off;
3260 
3261 			end = offset_in_object + size_in_object;
3262 
3263 			for (target_off = offset_in_object;
3264 			    target_off < end; target_off += PAGE_SIZE) {
3265 				p = vm_page_lookup(object, target_off);
3266 
3267 				if (p != VM_PAGE_NULL) {
3268 					vm_object_offset_t start;
3269 
3270 					/*
3271 					 * XXX FBDP 4K: intentionally using "offset" here instead
3272 					 * of "offset_in_object", since "start" is a pmap address.
3273 					 */
3274 					start = pmap_start + (p->vmp_offset - offset);
3275 
3276 					if (pmap != PMAP_NULL) {
3277 						vm_map_offset_t curr;
3278 						for (curr = start;
3279 						    curr < start + PAGE_SIZE;
3280 						    curr += pmap_page_size) {
3281 							if (curr < pmap_start) {
3282 								continue;
3283 							}
3284 							if (curr >= pmap_start + size) {
3285 								break;
3286 							}
3287 							pmap_protect_options(
3288 								pmap,
3289 								curr,
3290 								curr + pmap_page_size,
3291 								prot,
3292 								options | PMAP_OPTIONS_NOFLUSH,
3293 								&pmap_flush_context_storage);
3294 						}
3295 					} else {
3296 						pmap_page_protect_options(
3297 							VM_PAGE_GET_PHYS_PAGE(p),
3298 							prot,
3299 							options | PMAP_OPTIONS_NOFLUSH,
3300 							&pmap_flush_context_storage);
3301 					}
3302 					delayed_pmap_flush = TRUE;
3303 				}
3304 			}
3305 		}
3306 		if (delayed_pmap_flush == TRUE) {
3307 			pmap_flush(&pmap_flush_context_storage);
3308 		}
3309 
3310 		if (prot == VM_PROT_NONE) {
3311 			/*
3312 			 * Must follow shadow chain to remove access
3313 			 * to pages in shadowed objects.
3314 			 */
3315 			vm_object_t     next_object;
3316 
3317 			next_object = object->shadow;
3318 			if (next_object != VM_OBJECT_NULL) {
3319 				offset_in_object += object->vo_shadow_offset;
3320 				offset += object->vo_shadow_offset;
3321 				vm_object_lock(next_object);
3322 				vm_object_unlock(object);
3323 				object = next_object;
3324 			} else {
3325 				/*
3326 				 * End of chain - we are done.
3327 				 */
3328 				break;
3329 			}
3330 		} else {
3331 			/*
3332 			 * Pages in shadowed objects may never have
3333 			 * write permission - we may stop here.
3334 			 */
3335 			break;
3336 		}
3337 	}
3338 
3339 	vm_object_unlock(object);
3340 }
3341 
3342 uint32_t vm_page_busy_absent_skipped = 0;
3343 
3344 /*
3345  *	Routine:	vm_object_copy_slowly
3346  *
3347  *	Description:
3348  *		Copy the specified range of the source
3349  *		virtual memory object without using
3350  *		protection-based optimizations (such
3351  *		as copy-on-write).  The pages in the
3352  *		region are actually copied.
3353  *
3354  *	In/out conditions:
3355  *		The caller must hold a reference and a lock
3356  *		for the source virtual memory object.  The source
3357  *		object will be returned *unlocked*.
3358  *
3359  *	Results:
3360  *		If the copy is completed successfully, KERN_SUCCESS is
3361  *		returned.  If the caller asserted the interruptible
3362  *		argument, and an interruption occurred while waiting
3363  *		for a user-generated event, MACH_SEND_INTERRUPTED is
3364  *		returned.  Other values may be returned to indicate
3365  *		hard errors during the copy operation.
3366  *
3367  *		A new virtual memory object is returned in a
3368  *		parameter (_result_object).  The contents of this
3369  *		new object, starting at a zero offset, are a copy
3370  *		of the source memory region.  In the event of
3371  *		an error, this parameter will contain the value
3372  *		VM_OBJECT_NULL.
3373  */
3374 __exported_hidden kern_return_t
3375 vm_object_copy_slowly(
3376 	vm_object_t             src_object,
3377 	vm_object_offset_t      src_offset,
3378 	vm_object_size_t        size,
3379 	boolean_t               interruptible,
3380 #if HAS_MTE
3381 	bool                    create_mte_object,
3382 #endif /* HAS_MTE */
3383 	vm_object_t             *_result_object)        /* OUT */
3384 {
3385 	vm_object_t             new_object;
3386 	vm_object_offset_t      new_offset;
3387 
3388 	struct vm_object_fault_info fault_info = {};
3389 
3390 	if (size == 0) {
3391 		vm_object_unlock(src_object);
3392 		*_result_object = VM_OBJECT_NULL;
3393 		return KERN_INVALID_ARGUMENT;
3394 	}
3395 
3396 	/*
3397 	 *	Prevent destruction of the source object while we copy.
3398 	 */
3399 
3400 	vm_object_reference_locked(src_object);
3401 	vm_object_unlock(src_object);
3402 
3403 	/*
3404 	 *	Create a new object to hold the copied pages.
3405 	 *	A few notes:
3406 	 *		We fill the new object starting at offset 0,
3407 	 *		 regardless of the input offset.
3408 	 *		We don't bother to lock the new object within
3409 	 *		 this routine, since we have the only reference.
3410 	 */
3411 
3412 	size = vm_object_round_page(src_offset + size) - vm_object_trunc_page(src_offset);
3413 	src_offset = vm_object_trunc_page(src_offset);
3414 
3415 #if HAS_MTE
3416 	/*
3417 	 * Retain the original provenance despite the fact we're creating a byte-for-byte copy.
3418 	 * As far as I can think, this doesn't have a consequence either way:
3419 	 * The only path for which we copy slowly MTE-enabled objects is on the fork path,
3420 	 * during which the two maps will hold the same ID anyway.
3421 	 * For objects that'll never be MTE-mapped, the provenance has no consequence anyway.
3422 	 * I'm carrying over the ID here just because it seems more tidy than dropping it.
3423 	 */
3424 #endif /* HAS_MTE */
3425 	new_object = vm_object_allocate(size, src_object->vmo_provenance);
3426 	new_offset = 0;
3427 	if (src_object->copy_strategy == MEMORY_OBJECT_COPY_NONE &&
3428 	    src_object->vo_inherit_copy_none) {
3429 		new_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
3430 		new_object->vo_inherit_copy_none = true;
3431 	}
3432 
3433 #if HAS_MTE
3434 	/*
3435 	 * The new object should hold MTE enabled pages. This is a byproduct
3436 	 * of our current forking strategy.
3437 	 */
3438 	if (create_mte_object) {
3439 		vm_object_mte_set(new_object);
3440 
3441 		assert(src_object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
3442 		new_object->copy_strategy = src_object->copy_strategy;
3443 	}
3444 #endif /* HAS_MTE */
3445 
3446 	assert(size == trunc_page_64(size));    /* Will the loop terminate? */
3447 
3448 	fault_info.interruptible = interruptible;
3449 	fault_info.behavior  = VM_BEHAVIOR_SEQUENTIAL;
3450 	fault_info.lo_offset = src_offset;
3451 	fault_info.hi_offset = src_offset + size;
3452 	fault_info.stealth = TRUE;
3453 
3454 	for (;
3455 	    size != 0;
3456 	    src_offset += PAGE_SIZE_64,
3457 	    new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
3458 	    ) {
3459 		vm_page_t       new_page;
3460 		vm_fault_return_t result;
3461 		vm_grab_options_t options;
3462 
3463 		options = vm_page_grab_options_for_object(new_object);
3464 
3465 		while ((new_page = vm_page_grab_options(options)) == VM_PAGE_NULL) {
3466 			if (!vm_page_wait(interruptible)) {
3467 				vm_object_deallocate(new_object);
3468 				vm_object_deallocate(src_object);
3469 				*_result_object = VM_OBJECT_NULL;
3470 				return MACH_SEND_INTERRUPTED;
3471 			}
3472 		}
3473 
3474 		vm_object_lock(new_object);
3475 		vm_page_insert(new_page, new_object, new_offset);
3476 		vm_object_unlock(new_object);
3477 
3478 		do {
3479 			vm_prot_t       prot = VM_PROT_READ;
3480 			vm_page_t       _result_page;
3481 			vm_page_t       top_page;
3482 			vm_page_t       result_page;
3483 			kern_return_t   error_code;
3484 			vm_object_t     result_page_object;
3485 
3486 
3487 			vm_object_lock(src_object);
3488 
3489 			if (src_object->internal &&
3490 			    src_object->shadow == VM_OBJECT_NULL &&
3491 			    (src_object->pager == NULL ||
3492 			    (vm_object_compressor_pager_state_get(src_object,
3493 			    src_offset) ==
3494 			    VM_EXTERNAL_STATE_ABSENT))) {
3495 				boolean_t can_skip_page;
3496 
3497 				_result_page = vm_page_lookup(src_object,
3498 				    src_offset);
3499 				if (_result_page == VM_PAGE_NULL) {
3500 					/*
3501 					 * This page is neither resident nor
3502 					 * compressed and there's no shadow
3503 					 * object below "src_object", so this
3504 					 * page is really missing.
3505 					 * There's no need to zero-fill it just
3506 					 * to copy it:  let's leave it missing
3507 					 * in "new_object" and get zero-filled
3508 					 * on demand.
3509 					 */
3510 					can_skip_page = TRUE;
3511 				} else if (workaround_41447923 &&
3512 				    src_object->pager == NULL &&
3513 				    _result_page != VM_PAGE_NULL &&
3514 				    _result_page->vmp_busy &&
3515 				    _result_page->vmp_absent &&
3516 				    src_object->purgable == VM_PURGABLE_DENY &&
3517 				    !src_object->blocked_access) {
3518 					/*
3519 					 * This page is "busy" and "absent"
3520 					 * but not because we're waiting for
3521 					 * it to be decompressed.  It must
3522 					 * be because it's a "no zero fill"
3523 					 * page that is currently not
3524 					 * accessible until it gets overwritten
3525 					 * by a device driver.
3526 					 * Since its initial state would have
3527 					 * been "zero-filled", let's leave the
3528 					 * copy page missing and get zero-filled
3529 					 * on demand.
3530 					 */
3531 					assert(src_object->internal);
3532 					assert(src_object->shadow == NULL);
3533 					assert(src_object->pager == NULL);
3534 					can_skip_page = TRUE;
3535 					vm_page_busy_absent_skipped++;
3536 				} else {
3537 					can_skip_page = FALSE;
3538 				}
3539 				if (can_skip_page) {
3540 					vm_object_unlock(src_object);
3541 					/* free the unused "new_page"... */
3542 					vm_object_lock(new_object);
3543 					VM_PAGE_FREE(new_page);
3544 					new_page = VM_PAGE_NULL;
3545 					vm_object_unlock(new_object);
3546 					/* ...and go to next page in "src_object" */
3547 					result = VM_FAULT_SUCCESS;
3548 					break;
3549 				}
3550 			}
3551 
3552 			vm_object_paging_begin(src_object);
3553 
3554 			/* cap size at maximum UPL size */
3555 			upl_size_t cluster_size;
3556 			if (os_convert_overflow(size, &cluster_size)) {
3557 				cluster_size = 0 - (upl_size_t)PAGE_SIZE;
3558 			}
3559 			fault_info.cluster_size = cluster_size;
3560 
3561 			_result_page = VM_PAGE_NULL;
3562 			result = vm_fault_page(src_object, src_offset,
3563 			    VM_PROT_READ, FALSE,
3564 			    FALSE,     /* page not looked up */
3565 			    &prot, &_result_page, &top_page,
3566 			    (int *)0,
3567 			    &error_code, FALSE, &fault_info);
3568 
3569 			switch (result) {
3570 			case VM_FAULT_SUCCESS:
3571 				result_page = _result_page;
3572 				result_page_object = VM_PAGE_OBJECT(result_page);
3573 
3574 				/*
3575 				 *	Copy the page to the new object.
3576 				 *
3577 				 *	POLICY DECISION:
3578 				 *		If result_page is clean,
3579 				 *		we could steal it instead
3580 				 *		of copying.
3581 				 */
3582 				vm_page_copy(result_page, new_page);
3583 
3584 				vm_object_unlock(result_page_object);
3585 
3586 				/*
3587 				 *	Let go of both pages (make them
3588 				 *	not busy, perform wakeup, activate).
3589 				 */
3590 				vm_object_lock(new_object);
3591 				SET_PAGE_DIRTY(new_page, FALSE);
3592 				vm_page_wakeup_done(new_object, new_page);
3593 				vm_object_unlock(new_object);
3594 
3595 				vm_object_lock(result_page_object);
3596 				vm_page_wakeup_done(result_page_object, result_page);
3597 
3598 				vm_page_lockspin_queues();
3599 				if ((result_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3600 				    (result_page->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
3601 					vm_page_activate(result_page);
3602 				}
3603 				vm_page_activate(new_page);
3604 				vm_page_unlock_queues();
3605 
3606 				/*
3607 				 *	Release paging references and
3608 				 *	top-level placeholder page, if any.
3609 				 */
3610 
3611 				vm_fault_cleanup(result_page_object,
3612 				    top_page);
3613 
3614 				break;
3615 
3616 			case VM_FAULT_RETRY:
3617 				break;
3618 
3619 			case VM_FAULT_MEMORY_SHORTAGE:
3620 				if (vm_page_wait(interruptible)) {
3621 					break;
3622 				}
3623 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_OBJCOPYSLOWLY_MEMORY_SHORTAGE), 0 /* arg */);
3624 				OS_FALLTHROUGH;
3625 
3626 			case VM_FAULT_INTERRUPTED:
3627 				vm_object_lock(new_object);
3628 				VM_PAGE_FREE(new_page);
3629 				vm_object_unlock(new_object);
3630 
3631 				vm_object_deallocate(new_object);
3632 				vm_object_deallocate(src_object);
3633 				*_result_object = VM_OBJECT_NULL;
3634 				return MACH_SEND_INTERRUPTED;
3635 
3636 			case VM_FAULT_SUCCESS_NO_VM_PAGE:
3637 				/* success but no VM page: fail */
3638 				vm_object_paging_end(src_object);
3639 				vm_object_unlock(src_object);
3640 				OS_FALLTHROUGH;
3641 			case VM_FAULT_MEMORY_ERROR:
3642 				/*
3643 				 * A policy choice:
3644 				 *	(a) ignore pages that we can't
3645 				 *	    copy
3646 				 *	(b) return the null object if
3647 				 *	    any page fails [chosen]
3648 				 */
3649 
3650 				vm_object_lock(new_object);
3651 				VM_PAGE_FREE(new_page);
3652 				vm_object_unlock(new_object);
3653 
3654 				vm_object_deallocate(new_object);
3655 				vm_object_deallocate(src_object);
3656 				*_result_object = VM_OBJECT_NULL;
3657 				return error_code ? error_code:
3658 				       KERN_MEMORY_ERROR;
3659 
3660 			default:
3661 				panic("vm_object_copy_slowly: unexpected error"
3662 				    " 0x%x from vm_fault_page()\n", result);
3663 			}
3664 		} while (result != VM_FAULT_SUCCESS);
3665 	}
3666 
3667 	/*
3668 	 *	Lose the extra reference, and return our object.
3669 	 */
3670 	vm_object_deallocate(src_object);
3671 	*_result_object = new_object;
3672 	return KERN_SUCCESS;
3673 }
3674 
3675 /*
3676  *	Routine:	vm_object_copy_quickly
3677  *
3678  *	Purpose:
3679  *		Copy the specified range of the source virtual
3680  *		memory object, if it can be done without waiting
3681  *		for user-generated events.
3682  *
3683  *	Results:
3684  *		If the copy is successful, the copy is returned in
3685  *		the arguments; otherwise, the arguments are not
3686  *		affected.
3687  *
3688  *	In/out conditions:
3689  *		The object should be unlocked on entry and exit.
3690  */
3691 
3692 /*ARGSUSED*/
3693 __private_extern__ boolean_t
3694 vm_object_copy_quickly(
3695 	vm_object_t             object,               /* IN */
3696 	__unused vm_object_offset_t     offset, /* IN */
3697 	__unused vm_object_size_t       size,   /* IN */
3698 	boolean_t               *_src_needs_copy,       /* OUT */
3699 	boolean_t               *_dst_needs_copy)       /* OUT */
3700 {
3701 	memory_object_copy_strategy_t copy_strategy;
3702 
3703 	if (object == VM_OBJECT_NULL) {
3704 		*_src_needs_copy = FALSE;
3705 		*_dst_needs_copy = FALSE;
3706 		return TRUE;
3707 	}
3708 
3709 	vm_object_lock(object);
3710 
3711 	copy_strategy = object->copy_strategy;
3712 
3713 	switch (copy_strategy) {
3714 	case MEMORY_OBJECT_COPY_SYMMETRIC:
3715 
3716 		/*
3717 		 *	Symmetric copy strategy.
3718 		 *	Make another reference to the object.
3719 		 *	Leave object/offset unchanged.
3720 		 */
3721 
3722 		vm_object_reference_locked(object);
3723 		VM_OBJECT_SET_SHADOWED(object, TRUE);
3724 		vm_object_unlock(object);
3725 
3726 		/*
3727 		 *	Both source and destination must make
3728 		 *	shadows, and the source must be made
3729 		 *	read-only if not already.
3730 		 */
3731 
3732 		*_src_needs_copy = TRUE;
3733 		*_dst_needs_copy = TRUE;
3734 
3735 		break;
3736 
3737 	case MEMORY_OBJECT_COPY_DELAY:
3738 		vm_object_unlock(object);
3739 		return FALSE;
3740 
3741 	default:
3742 		vm_object_unlock(object);
3743 		return FALSE;
3744 	}
3745 	return TRUE;
3746 }
3747 
3748 static uint32_t copy_delayed_lock_collisions;
3749 static uint32_t copy_delayed_max_collisions;
3750 static uint32_t copy_delayed_lock_contention;
3751 static uint32_t copy_delayed_protect_iterate;
3752 
3753 #if XNU_TARGET_OS_OSX
3754 unsigned int vm_object_copy_delayed_paging_wait_disable = 0;
3755 #else /* XNU_TARGET_OS_OSX */
3756 unsigned int vm_object_copy_delayed_paging_wait_disable = 1;
3757 #endif /* XNU_TARGET_OS_OSX */
3758 
3759 /*
3760  *	Routine:	vm_object_copy_delayed [internal]
3761  *
3762  *	Description:
3763  *		Copy the specified virtual memory object, using
3764  *		the asymmetric copy-on-write algorithm.
3765  *
3766  *	In/out conditions:
3767  *		The src_object must be locked on entry.  It will be unlocked
3768  *		on exit - so the caller must also hold a reference to it.
3769  *
3770  *		This routine will not block waiting for user-generated
3771  *		events.  It is not interruptible.
3772  */
3773 __private_extern__ vm_object_t
3774 vm_object_copy_delayed(
3775 	vm_object_t             src_object,
3776 	vm_object_offset_t      src_offset,
3777 	vm_object_size_t        size,
3778 	boolean_t               src_object_shared)
3779 {
3780 	vm_object_t             new_copy = VM_OBJECT_NULL;
3781 	vm_object_t             old_copy;
3782 	vm_page_t               p;
3783 	vm_object_size_t        copy_size = src_offset + size;
3784 	pmap_flush_context      pmap_flush_context_storage;
3785 	boolean_t               delayed_pmap_flush = FALSE;
3786 
3787 
3788 	uint32_t collisions = 0;
3789 	/*
3790 	 *	The user-level memory manager wants to see all of the changes
3791 	 *	to this object, but it has promised not to make any changes on
3792 	 *	its own.
3793 	 *
3794 	 *	Perform an asymmetric copy-on-write, as follows:
3795 	 *		Create a new object, called a "copy object" to hold
3796 	 *		 pages modified by the new mapping  (i.e., the copy,
3797 	 *		 not the original mapping).
3798 	 *		Record the original object as the backing object for
3799 	 *		 the copy object.  If the original mapping does not
3800 	 *		 change a page, it may be used read-only by the copy.
3801 	 *		Record the copy object in the original object.
3802 	 *		 When the original mapping causes a page to be modified,
3803 	 *		 it must be copied to a new page that is "pushed" to
3804 	 *		 the copy object.
3805 	 *		Mark the new mapping (the copy object) copy-on-write.
3806 	 *		 This makes the copy object itself read-only, allowing
3807 	 *		 it to be reused if the original mapping makes no
3808 	 *		 changes, and simplifying the synchronization required
3809 	 *		 in the "push" operation described above.
3810 	 *
3811 	 *	The copy-on-write is said to be assymetric because the original
3812 	 *	object is *not* marked copy-on-write. A copied page is pushed
3813 	 *	to the copy object, regardless which party attempted to modify
3814 	 *	the page.
3815 	 *
3816 	 *	Repeated asymmetric copy operations may be done. If the
3817 	 *	original object has not been changed since the last copy, its
3818 	 *	copy object can be reused. Otherwise, a new copy object can be
3819 	 *	inserted between the original object and its previous copy
3820 	 *	object.  Since any copy object is read-only, this cannot affect
3821 	 *	affect the contents of the previous copy object.
3822 	 *
3823 	 *	Note that a copy object is higher in the object tree than the
3824 	 *	original object; therefore, use of the copy object recorded in
3825 	 *	the original object must be done carefully, to avoid deadlock.
3826 	 */
3827 
3828 	copy_size = vm_object_round_page(copy_size);
3829 Retry:
3830 	if (!vm_object_copy_delayed_paging_wait_disable) {
3831 		/*
3832 		 * Wait for paging in progress.
3833 		 */
3834 		if (!src_object->true_share &&
3835 		    (src_object->paging_in_progress != 0 ||
3836 		    src_object->activity_in_progress != 0)) {
3837 			if (src_object_shared == TRUE) {
3838 				vm_object_unlock(src_object);
3839 				vm_object_lock(src_object);
3840 				src_object_shared = FALSE;
3841 				goto Retry;
3842 			}
3843 			vm_object_paging_wait(src_object, THREAD_UNINT);
3844 		}
3845 	}
3846 	if (src_object->vmo_pl_req_in_progress) {
3847 		if (src_object_shared) {
3848 			vm_object_unlock(src_object);
3849 			vm_object_lock(src_object);
3850 			src_object_shared = false;
3851 			goto Retry;
3852 		}
3853 		vm_object_pl_req_wait(src_object, THREAD_UNINT);
3854 	}
3855 
3856 	/*
3857 	 *	See whether we can reuse the result of a previous
3858 	 *	copy operation.
3859 	 */
3860 
3861 	old_copy = src_object->vo_copy;
3862 	if (old_copy != VM_OBJECT_NULL) {
3863 		int lock_granted;
3864 
3865 		/*
3866 		 *	Try to get the locks (out of order)
3867 		 */
3868 		if (src_object_shared == TRUE) {
3869 			lock_granted = vm_object_lock_try_shared(old_copy);
3870 		} else {
3871 			lock_granted = vm_object_lock_try(old_copy);
3872 		}
3873 
3874 		if (!lock_granted) {
3875 			vm_object_unlock(src_object);
3876 
3877 			if (collisions++ == 0) {
3878 				copy_delayed_lock_contention++;
3879 			}
3880 			mutex_pause(collisions);
3881 
3882 			/* Heisenberg Rules */
3883 			copy_delayed_lock_collisions++;
3884 
3885 			if (collisions > copy_delayed_max_collisions) {
3886 				copy_delayed_max_collisions = collisions;
3887 			}
3888 
3889 			if (src_object_shared == TRUE) {
3890 				vm_object_lock_shared(src_object);
3891 			} else {
3892 				vm_object_lock(src_object);
3893 			}
3894 
3895 			goto Retry;
3896 		}
3897 
3898 		/*
3899 		 *	Determine whether the old copy object has
3900 		 *	been modified.
3901 		 */
3902 
3903 		if (old_copy->resident_page_count == 0 &&
3904 		    !old_copy->pager_created) {
3905 			/*
3906 			 *	It has not been modified.
3907 			 *
3908 			 *	Return another reference to
3909 			 *	the existing copy-object if
3910 			 *	we can safely grow it (if
3911 			 *	needed).
3912 			 */
3913 
3914 			if (old_copy->vo_size < copy_size) {
3915 				if (src_object_shared == TRUE) {
3916 					vm_object_unlock(old_copy);
3917 					vm_object_unlock(src_object);
3918 
3919 					vm_object_lock(src_object);
3920 					src_object_shared = FALSE;
3921 					goto Retry;
3922 				}
3923 				/*
3924 				 * We can't perform a delayed copy if any of the
3925 				 * pages in the extended range are wired (because
3926 				 * we can't safely take write permission away from
3927 				 * wired pages).  If the pages aren't wired, then
3928 				 * go ahead and protect them.
3929 				 */
3930 				copy_delayed_protect_iterate++;
3931 
3932 				pmap_flush_context_init(&pmap_flush_context_storage);
3933 				delayed_pmap_flush = FALSE;
3934 
3935 				vm_page_queue_iterate(&src_object->memq, p, vmp_listq) {
3936 					if (!vm_page_is_fictitious(p) &&
3937 					    p->vmp_offset >= old_copy->vo_size &&
3938 					    p->vmp_offset < copy_size) {
3939 						if (p->vmp_busy && p->vmp_absent) {
3940 							/*
3941 							 * A busy/absent page is still
3942 							 * waiting for its contents.
3943 							 * It should not be mapped in user
3944 							 * space (because it has no valid
3945 							 * contents) so no need to
3946 							 * write-protect it for copy-on-write.
3947 							 * It could have been mapped in the
3948 							 * kernel by the content provider
3949 							 * (a network filesystem, for example)
3950 							 * and we do not want to write-protect
3951 							 * that mapping, so we skip this page.
3952 							 */
3953 							continue;
3954 						}
3955 						if (VM_PAGE_WIRED(p)) {
3956 							vm_object_unlock(old_copy);
3957 							vm_object_unlock(src_object);
3958 
3959 							if (new_copy != VM_OBJECT_NULL) {
3960 								vm_object_unlock(new_copy);
3961 								vm_object_deallocate(new_copy);
3962 							}
3963 							if (delayed_pmap_flush == TRUE) {
3964 								pmap_flush(&pmap_flush_context_storage);
3965 							}
3966 
3967 							return VM_OBJECT_NULL;
3968 						} else {
3969 							pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p),
3970 							    (p->vmp_xpmapped ? (VM_PROT_READ | VM_PROT_EXECUTE) : VM_PROT_READ),
3971 							    PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
3972 							delayed_pmap_flush = TRUE;
3973 						}
3974 					}
3975 				}
3976 				if (delayed_pmap_flush == TRUE) {
3977 					pmap_flush(&pmap_flush_context_storage);
3978 				}
3979 
3980 				assertf(page_aligned(copy_size),
3981 				    "object %p size 0x%llx",
3982 				    old_copy, (uint64_t)copy_size);
3983 				old_copy->vo_size = copy_size;
3984 
3985 				/*
3986 				 * src_object's "vo_copy" object now covers
3987 				 * a larger portion of src_object.
3988 				 * Increment src_object's "vo_copy_version"
3989 				 * to make any racing vm_fault() on
3990 				 * "src_object" re-check if it needs to honor
3991 				 * any new copy-on-write obligation.
3992 				 */
3993 				src_object->vo_copy_version++;
3994 			}
3995 			if (src_object_shared == TRUE) {
3996 				vm_object_reference_shared(old_copy);
3997 			} else {
3998 				vm_object_reference_locked(old_copy);
3999 			}
4000 			assert3u(old_copy->copy_strategy, ==, MEMORY_OBJECT_COPY_SYMMETRIC);
4001 			vm_object_unlock(old_copy);
4002 			vm_object_unlock(src_object);
4003 
4004 			if (new_copy != VM_OBJECT_NULL) {
4005 				vm_object_unlock(new_copy);
4006 				vm_object_deallocate(new_copy);
4007 			}
4008 			return old_copy;
4009 		}
4010 
4011 
4012 
4013 		/*
4014 		 * Adjust the size argument so that the newly-created
4015 		 * copy object will be large enough to back either the
4016 		 * old copy object or the new mapping.
4017 		 */
4018 		if (old_copy->vo_size > copy_size) {
4019 			copy_size = old_copy->vo_size;
4020 		}
4021 
4022 		if (new_copy == VM_OBJECT_NULL) {
4023 			vm_object_unlock(old_copy);
4024 			vm_object_unlock(src_object);
4025 			/* Carry over the provenance from the object that's backing us */
4026 			new_copy = vm_object_allocate(copy_size, src_object->vmo_provenance);
4027 			vm_object_lock(src_object);
4028 			vm_object_lock(new_copy);
4029 
4030 			src_object_shared = FALSE;
4031 			goto Retry;
4032 		}
4033 		assertf(page_aligned(copy_size),
4034 		    "object %p size 0x%llx",
4035 		    new_copy, (uint64_t)copy_size);
4036 		new_copy->vo_size = copy_size;
4037 
4038 		/*
4039 		 *	The copy-object is always made large enough to
4040 		 *	completely shadow the original object, since
4041 		 *	it may have several users who want to shadow
4042 		 *	the original object at different points.
4043 		 */
4044 
4045 		assert((old_copy->shadow == src_object) &&
4046 		    (old_copy->vo_shadow_offset == (vm_object_offset_t) 0));
4047 	} else if (new_copy == VM_OBJECT_NULL) {
4048 		vm_object_unlock(src_object);
4049 		/* Carry over the provenance from the object that's backing us */
4050 		new_copy = vm_object_allocate(copy_size, src_object->vmo_provenance);
4051 		vm_object_lock(src_object);
4052 		vm_object_lock(new_copy);
4053 
4054 		src_object_shared = FALSE;
4055 		goto Retry;
4056 	}
4057 
4058 	/*
4059 	 * We now have the src object locked, and the new copy object
4060 	 * allocated and locked (and potentially the old copy locked).
4061 	 * Before we go any further, make sure we can still perform
4062 	 * a delayed copy, as the situation may have changed.
4063 	 *
4064 	 * Specifically, we can't perform a delayed copy if any of the
4065 	 * pages in the range are wired (because we can't safely take
4066 	 * write permission away from wired pages).  If the pages aren't
4067 	 * wired, then go ahead and protect them.
4068 	 */
4069 	copy_delayed_protect_iterate++;
4070 
4071 	pmap_flush_context_init(&pmap_flush_context_storage);
4072 	delayed_pmap_flush = FALSE;
4073 
4074 	vm_page_queue_iterate(&src_object->memq, p, vmp_listq) {
4075 		if (!vm_page_is_fictitious(p) && p->vmp_offset < copy_size) {
4076 			if (VM_PAGE_WIRED(p)) {
4077 				if (old_copy) {
4078 					vm_object_unlock(old_copy);
4079 				}
4080 				vm_object_unlock(src_object);
4081 				vm_object_unlock(new_copy);
4082 				vm_object_deallocate(new_copy);
4083 
4084 				if (delayed_pmap_flush == TRUE) {
4085 					pmap_flush(&pmap_flush_context_storage);
4086 				}
4087 
4088 				return VM_OBJECT_NULL;
4089 			} else {
4090 				pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p),
4091 				    (p->vmp_xpmapped ? (VM_PROT_READ | VM_PROT_EXECUTE) : VM_PROT_READ),
4092 				    PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
4093 				delayed_pmap_flush = TRUE;
4094 			}
4095 		}
4096 	}
4097 	if (delayed_pmap_flush == TRUE) {
4098 		pmap_flush(&pmap_flush_context_storage);
4099 	}
4100 
4101 	if (old_copy != VM_OBJECT_NULL) {
4102 		/*
4103 		 *	Make the old copy-object shadow the new one.
4104 		 *	It will receive no more pages from the original
4105 		 *	object.
4106 		 */
4107 
4108 		/* remove ref. from old_copy */
4109 		vm_object_lock_assert_exclusive(src_object);
4110 		os_ref_release_live_locked_raw(&src_object->ref_count,
4111 		    &vm_object_refgrp);
4112 		vm_object_lock_assert_exclusive(old_copy);
4113 		old_copy->shadow = new_copy;
4114 		vm_object_lock_assert_exclusive(new_copy);
4115 		assert(os_ref_get_count_raw(&new_copy->ref_count) > 0);
4116 		/* for old_copy->shadow ref. */
4117 		os_ref_retain_locked_raw(&new_copy->ref_count, &vm_object_refgrp);
4118 
4119 		vm_object_unlock(old_copy);     /* done with old_copy */
4120 	}
4121 
4122 	/*
4123 	 *	Point the new copy at the existing object.
4124 	 */
4125 	vm_object_lock_assert_exclusive(new_copy);
4126 	new_copy->shadow = src_object;
4127 	new_copy->vo_shadow_offset = 0;
4128 	VM_OBJECT_SET_SHADOWED(new_copy, TRUE);      /* caller must set needs_copy */
4129 
4130 	vm_object_lock_assert_exclusive(src_object);
4131 	vm_object_reference_locked(src_object);
4132 	VM_OBJECT_COPY_SET(src_object, new_copy);
4133 	vm_object_unlock(src_object);
4134 	assert3u(new_copy->copy_strategy, ==, MEMORY_OBJECT_COPY_SYMMETRIC);
4135 	vm_object_unlock(new_copy);
4136 
4137 	return new_copy;
4138 }
4139 
4140 /*
4141  *	Routine:	vm_object_copy_strategically
4142  *
4143  *	Purpose:
4144  *		Perform a copy according to the source object's
4145  *		declared strategy.  This operation may block,
4146  *		and may be interrupted.
4147  */
4148 __private_extern__ kern_return_t
4149 vm_object_copy_strategically(
4150 	vm_object_t             src_object,
4151 	vm_object_offset_t      src_offset,
4152 	vm_object_size_t        size,
4153 	bool                    forking,
4154 	vm_object_t             *dst_object,    /* OUT */
4155 	vm_object_offset_t      *dst_offset,    /* OUT */
4156 	boolean_t               *dst_needs_copy) /* OUT */
4157 {
4158 	boolean_t       result;
4159 	boolean_t       interruptible = THREAD_ABORTSAFE; /* XXX */
4160 	boolean_t       object_lock_shared = FALSE;
4161 	memory_object_copy_strategy_t copy_strategy;
4162 
4163 	assert(src_object != VM_OBJECT_NULL);
4164 
4165 	copy_strategy = src_object->copy_strategy;
4166 
4167 	if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
4168 		vm_object_lock_shared(src_object);
4169 		object_lock_shared = TRUE;
4170 	} else {
4171 		vm_object_lock(src_object);
4172 	}
4173 
4174 	/*
4175 	 *	The copy strategy is only valid if the memory manager
4176 	 *	is "ready". Internal objects are always ready.
4177 	 */
4178 
4179 	while (!src_object->internal && !src_object->pager_ready) {
4180 		wait_result_t wait_result;
4181 
4182 		if (object_lock_shared == TRUE) {
4183 			vm_object_unlock(src_object);
4184 			vm_object_lock(src_object);
4185 			object_lock_shared = FALSE;
4186 			continue;
4187 		}
4188 		wait_result = vm_object_sleep(  src_object,
4189 		    VM_OBJECT_EVENT_PAGER_READY,
4190 		    interruptible, LCK_SLEEP_EXCLUSIVE);
4191 		if (wait_result != THREAD_AWAKENED) {
4192 			vm_object_unlock(src_object);
4193 			*dst_object = VM_OBJECT_NULL;
4194 			*dst_offset = 0;
4195 			*dst_needs_copy = FALSE;
4196 			return MACH_SEND_INTERRUPTED;
4197 		}
4198 	}
4199 
4200 	/*
4201 	 *	Use the appropriate copy strategy.
4202 	 */
4203 
4204 	if (copy_strategy == MEMORY_OBJECT_COPY_DELAY_FORK) {
4205 		if (forking) {
4206 			copy_strategy = MEMORY_OBJECT_COPY_DELAY;
4207 		} else {
4208 			copy_strategy = MEMORY_OBJECT_COPY_NONE;
4209 			if (object_lock_shared) {
4210 				vm_object_unlock(src_object);
4211 				vm_object_lock(src_object);
4212 				object_lock_shared = FALSE;
4213 			}
4214 		}
4215 	}
4216 
4217 	switch (copy_strategy) {
4218 	case MEMORY_OBJECT_COPY_DELAY:
4219 		*dst_object = vm_object_copy_delayed(src_object,
4220 		    src_offset, size, object_lock_shared);
4221 		if (*dst_object != VM_OBJECT_NULL) {
4222 			*dst_offset = src_offset;
4223 			*dst_needs_copy = TRUE;
4224 			result = KERN_SUCCESS;
4225 			break;
4226 		}
4227 		vm_object_lock(src_object);
4228 		OS_FALLTHROUGH; /* fall thru when delayed copy not allowed */
4229 
4230 	case MEMORY_OBJECT_COPY_NONE:
4231 		result = vm_object_copy_slowly(src_object,
4232 		    src_offset, size,
4233 		    interruptible,
4234 #if HAS_MTE
4235 		    forking && vm_object_is_mte_mappable(src_object), /* create_mte_object */
4236 #endif /* HAS_MTE */
4237 		    dst_object);
4238 		if (result == KERN_SUCCESS) {
4239 			*dst_offset = src_offset - vm_object_trunc_page(src_offset);
4240 			*dst_needs_copy = FALSE;
4241 		}
4242 		break;
4243 
4244 	case MEMORY_OBJECT_COPY_SYMMETRIC:
4245 		vm_object_unlock(src_object);
4246 		result = KERN_MEMORY_RESTART_COPY;
4247 		break;
4248 
4249 	default:
4250 		panic("copy_strategically: bad strategy %d for object %p",
4251 		    copy_strategy, src_object);
4252 		result = KERN_INVALID_ARGUMENT;
4253 	}
4254 	return result;
4255 }
4256 
4257 /*
4258  *	vm_object_shadow:
4259  *
4260  *	Create a new object which is backed by the
4261  *	specified existing object range.  The source
4262  *	object reference is deallocated.
4263  *
4264  *	The new object and offset into that object
4265  *	are returned in the source parameters.
4266  */
4267 boolean_t vm_object_shadow_check = TRUE;
4268 uint64_t vm_object_shadow_forced = 0;
4269 uint64_t vm_object_shadow_skipped = 0;
4270 
4271 __private_extern__ boolean_t
4272 vm_object_shadow(
4273 	vm_object_t             *object,        /* IN/OUT */
4274 	vm_object_offset_t      *offset,        /* IN/OUT */
4275 	vm_object_size_t        length,
4276 	boolean_t               always_shadow)
4277 {
4278 	vm_object_t     source;
4279 	vm_object_t     result;
4280 
4281 	source = *object;
4282 	assert(source != VM_OBJECT_NULL);
4283 	if (source == VM_OBJECT_NULL) {
4284 		return FALSE;
4285 	}
4286 
4287 	assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
4288 
4289 	/*
4290 	 *	Determine if we really need a shadow.
4291 	 *
4292 	 *	If the source object is larger than what we are trying
4293 	 *	to create, then force the shadow creation even if the
4294 	 *	ref count is 1.  This will allow us to [potentially]
4295 	 *	collapse the underlying object away in the future
4296 	 *	(freeing up the extra data it might contain and that
4297 	 *	we don't need).
4298 	 */
4299 
4300 	assert(source->copy_strategy != MEMORY_OBJECT_COPY_NONE); /* Purgeable objects shouldn't have shadow objects. */
4301 
4302 	/*
4303 	 * The following optimization does not work in the context of submaps
4304 	 * (the shared region, in particular).
4305 	 * This object might have only 1 reference (in the submap) but that
4306 	 * submap can itself be mapped multiple times, so the object is
4307 	 * actually indirectly referenced more than once...
4308 	 * The caller can specify to "always_shadow" to bypass the optimization.
4309 	 */
4310 	if (vm_object_shadow_check &&
4311 	    source->vo_size == length &&
4312 	    os_ref_get_count_raw(&source->ref_count) == 1) {
4313 		if (always_shadow) {
4314 			vm_object_shadow_forced++;
4315 		} else {
4316 			/*
4317 			 * Lock the object and check again.
4318 			 * We also check to see if there's
4319 			 * a shadow or copy object involved.
4320 			 * We can't do that earlier because
4321 			 * without the object locked, there
4322 			 * could be a collapse and the chain
4323 			 * gets modified leaving us with an
4324 			 * invalid pointer.
4325 			 */
4326 			vm_object_lock(source);
4327 			if (source->vo_size == length &&
4328 			    os_ref_get_count_raw(&source->ref_count) == 1 &&
4329 			    (source->shadow == VM_OBJECT_NULL ||
4330 			    source->shadow->vo_copy == VM_OBJECT_NULL)) {
4331 				VM_OBJECT_SET_SHADOWED(source, FALSE);
4332 				vm_object_unlock(source);
4333 				vm_object_shadow_skipped++;
4334 				return FALSE;
4335 			}
4336 			/* things changed while we were locking "source"... */
4337 			vm_object_unlock(source);
4338 		}
4339 	}
4340 
4341 	/*
4342 	 * *offset is the map entry's offset into the VM object and
4343 	 * is aligned to the map's page size.
4344 	 * VM objects need to be aligned to the system's page size.
4345 	 * Record the necessary adjustment and re-align the offset so
4346 	 * that result->vo_shadow_offset is properly page-aligned.
4347 	 */
4348 	vm_object_offset_t offset_adjustment;
4349 	offset_adjustment = *offset - vm_object_trunc_page(*offset);
4350 	length = vm_object_round_page(length + offset_adjustment);
4351 	*offset = vm_object_trunc_page(*offset);
4352 
4353 	/*
4354 	 *	Allocate a new object with the given length
4355 	 */
4356 
4357 	if ((result = vm_object_allocate(length, source->vmo_provenance)) == VM_OBJECT_NULL) {
4358 		panic("vm_object_shadow: no object for shadowing");
4359 	}
4360 
4361 	/*
4362 	 *	The new object shadows the source object, adding
4363 	 *	a reference to it.  Our caller changes his reference
4364 	 *	to point to the new object, removing a reference to
4365 	 *	the source object.  Net result: no change of reference
4366 	 *	count.
4367 	 */
4368 	result->shadow = source;
4369 
4370 	/*
4371 	 *	Store the offset into the source object,
4372 	 *	and fix up the offset into the new object.
4373 	 */
4374 
4375 	result->vo_shadow_offset = *offset;
4376 	assertf(page_aligned(result->vo_shadow_offset),
4377 	    "result %p shadow offset 0x%llx",
4378 	    result, result->vo_shadow_offset);
4379 
4380 	/*
4381 	 *	Return the new things
4382 	 */
4383 
4384 	*offset = 0;
4385 	if (offset_adjustment) {
4386 		/*
4387 		 * Make the map entry point to the equivalent offset
4388 		 * in the new object.
4389 		 */
4390 		DEBUG4K_COPY("adjusting offset @ %p from 0x%llx to 0x%llx for object %p length: 0x%llx\n", offset, *offset, *offset + offset_adjustment, result, length);
4391 		*offset += offset_adjustment;
4392 	}
4393 	*object = result;
4394 	return TRUE;
4395 }
4396 
4397 /*
4398  *	The relationship between vm_object structures and
4399  *	the memory_object requires careful synchronization.
4400  *
4401  *	All associations are created by memory_object_create_named
4402  *  for external pagers and vm_object_compressor_pager_create for internal
4403  *  objects as follows:
4404  *
4405  *		pager:	the memory_object itself, supplied by
4406  *			the user requesting a mapping (or the kernel,
4407  *			when initializing internal objects); the
4408  *			kernel simulates holding send rights by keeping
4409  *			a port reference;
4410  *
4411  *		pager_request:
4412  *			the memory object control port,
4413  *			created by the kernel; the kernel holds
4414  *			receive (and ownership) rights to this
4415  *			port, but no other references.
4416  *
4417  *	When initialization is complete, the "initialized" field
4418  *	is asserted.  Other mappings using a particular memory object,
4419  *	and any references to the vm_object gained through the
4420  *	port association must wait for this initialization to occur.
4421  *
4422  *	In order to allow the memory manager to set attributes before
4423  *	requests (notably virtual copy operations, but also data or
4424  *	unlock requests) are made, a "ready" attribute is made available.
4425  *	Only the memory manager may affect the value of this attribute.
4426  *	Its value does not affect critical kernel functions, such as
4427  *	internal object initialization or destruction.  [Furthermore,
4428  *	memory objects created by the kernel are assumed to be ready
4429  *	immediately; the default memory manager need not explicitly
4430  *	set the "ready" attribute.]
4431  *
4432  *	[Both the "initialized" and "ready" attribute wait conditions
4433  *	use the "pager" field as the wait event.]
4434  *
4435  *	The port associations can be broken down by any of the
4436  *	following routines:
4437  *		vm_object_terminate:
4438  *			No references to the vm_object remain, and
4439  *			the object cannot (or will not) be cached.
4440  *			This is the normal case, and is done even
4441  *			though one of the other cases has already been
4442  *			done.
4443  *		memory_object_destroy:
4444  *			The memory manager has requested that the
4445  *			kernel relinquish references to the memory
4446  *			object. [The memory manager may not want to
4447  *			destroy the memory object, but may wish to
4448  *			refuse or tear down existing memory mappings.]
4449  *
4450  *	Each routine that breaks an association must break all of
4451  *	them at once.  At some later time, that routine must clear
4452  *	the pager field and release the memory object references.
4453  *	[Furthermore, each routine must cope with the simultaneous
4454  *	or previous operations of the others.]
4455  *
4456  *	Because the pager field may be cleared spontaneously, it
4457  *	cannot be used to determine whether a memory object has
4458  *	ever been associated with a particular vm_object.  [This
4459  *	knowledge is important to the shadow object mechanism.]
4460  *	For this reason, an additional "created" attribute is
4461  *	provided.
4462  *
4463  *	During various paging operations, the pager reference found in the
4464  *	vm_object must be valid.  To prevent this from being released,
4465  *	(other than being removed, i.e., made null), routines may use
4466  *	the vm_object_paging_begin/end routines [actually, macros].
4467  *	The implementation uses the "paging_in_progress" and "wanted" fields.
4468  *	[Operations that alter the validity of the pager values include the
4469  *	termination routines and vm_object_collapse.]
4470  */
4471 
4472 
4473 /*
4474  *	Routine:	vm_object_memory_object_associate
4475  *	Purpose:
4476  *		Associate a VM object to the given pager.
4477  *		If a VM object is not provided, create one.
4478  *		Initialize the pager.
4479  */
4480 vm_object_t
4481 vm_object_memory_object_associate(
4482 	memory_object_t         pager,
4483 	vm_object_t             object,
4484 	vm_object_size_t        size,
4485 	boolean_t               named)
4486 {
4487 	memory_object_control_t control;
4488 
4489 	assert(pager != MEMORY_OBJECT_NULL);
4490 
4491 	if (object != VM_OBJECT_NULL) {
4492 		vm_object_lock(object);
4493 		assert(object->internal);
4494 		assert(object->pager_created);
4495 		assert(!object->pager_initialized);
4496 		assert(!object->pager_ready);
4497 		assert(object->pager_trusted);
4498 	} else {
4499 		/* No provenance yet */
4500 		object = vm_object_allocate(size, VM_MAP_SERIAL_NONE);
4501 		assert(object != VM_OBJECT_NULL);
4502 		vm_object_lock(object);
4503 		VM_OBJECT_SET_INTERNAL(object, FALSE);
4504 		VM_OBJECT_SET_PAGER_TRUSTED(object, FALSE);
4505 		/* copy strategy invalid until set by memory manager */
4506 		object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
4507 	}
4508 
4509 	/*
4510 	 *	Allocate request port.
4511 	 */
4512 
4513 	control = memory_object_control_allocate(object);
4514 	assert(control != MEMORY_OBJECT_CONTROL_NULL);
4515 
4516 	assert(!object->pager_ready);
4517 	assert(!object->pager_initialized);
4518 	assert(object->pager == NULL);
4519 	assert(object->pager_control == NULL);
4520 
4521 	/*
4522 	 *	Copy the reference we were given.
4523 	 */
4524 
4525 	memory_object_reference(pager);
4526 	VM_OBJECT_SET_PAGER_CREATED(object, TRUE);
4527 	object->pager = pager;
4528 	object->pager_control = control;
4529 	VM_OBJECT_SET_PAGER_READY(object, FALSE);
4530 
4531 	vm_object_unlock(object);
4532 
4533 	/*
4534 	 *	Let the pager know we're using it.
4535 	 */
4536 
4537 	(void) memory_object_init(pager,
4538 	    object->pager_control,
4539 	    PAGE_SIZE);
4540 
4541 	vm_object_lock(object);
4542 	if (named) {
4543 		VM_OBJECT_SET_NAMED(object, TRUE);
4544 	}
4545 	if (object->internal) {
4546 		VM_OBJECT_SET_PAGER_READY(object, TRUE);
4547 		vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
4548 	}
4549 
4550 	VM_OBJECT_SET_PAGER_INITIALIZED(object, TRUE);
4551 	// vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_INIT);
4552 
4553 	vm_object_unlock(object);
4554 
4555 	return object;
4556 }
4557 
4558 /*
4559  *	Routine:	vm_object_compressor_pager_create
4560  *	Purpose:
4561  *		Create a memory object for an internal object.
4562  *	In/out conditions:
4563  *		The object is locked on entry and exit;
4564  *		it may be unlocked within this call.
4565  *	Limitations:
4566  *		Only one thread may be performing a
4567  *		vm_object_compressor_pager_create on an object at
4568  *		a time.  Presumably, only the pageout
4569  *		daemon will be using this routine.
4570  */
4571 
4572 void
4573 vm_object_compressor_pager_create(
4574 	vm_object_t     object)
4575 {
4576 	memory_object_t         pager;
4577 	vm_object_t             pager_object = VM_OBJECT_NULL;
4578 
4579 	assert(!is_kernel_object(object));
4580 
4581 	/*
4582 	 *	Prevent collapse or termination by holding a paging reference
4583 	 */
4584 
4585 	vm_object_paging_begin(object);
4586 	if (object->pager_created) {
4587 		/*
4588 		 *	Someone else got to it first...
4589 		 *	wait for them to finish initializing the ports
4590 		 */
4591 		while (!object->pager_ready) {
4592 			vm_object_sleep(object,
4593 			    VM_OBJECT_EVENT_PAGER_READY,
4594 			    THREAD_UNINT, LCK_SLEEP_EXCLUSIVE);
4595 		}
4596 		vm_object_paging_end(object);
4597 		return;
4598 	}
4599 
4600 	if ((uint32_t) (object->vo_size / PAGE_SIZE) !=
4601 	    (object->vo_size / PAGE_SIZE)) {
4602 #if DEVELOPMENT || DEBUG
4603 		printf("vm_object_compressor_pager_create(%p): "
4604 		    "object size 0x%llx >= 0x%llx\n",
4605 		    object,
4606 		    (uint64_t) object->vo_size,
4607 		    0x0FFFFFFFFULL * PAGE_SIZE);
4608 #endif /* DEVELOPMENT || DEBUG */
4609 		vm_object_paging_end(object);
4610 		return;
4611 	}
4612 
4613 #if HAS_MTE /* TODO: remove this when MTE support in the compressor is finalized */
4614 	if (!vm_object_allow_compressor_pager_for_mte && vm_object_is_mte_mappable(object)) {
4615 		vm_object_no_compressor_pager_for_mte_count++;
4616 		vm_object_paging_end(object);
4617 		return;
4618 	}
4619 #endif
4620 
4621 	/*
4622 	 *	Indicate that a memory object has been assigned
4623 	 *	before dropping the lock, to prevent a race.
4624 	 */
4625 
4626 	VM_OBJECT_SET_PAGER_CREATED(object, TRUE);
4627 	VM_OBJECT_SET_PAGER_TRUSTED(object, TRUE);
4628 	object->paging_offset = 0;
4629 
4630 	vm_object_unlock(object);
4631 
4632 	/*
4633 	 *	Create the [internal] pager, and associate it with this object.
4634 	 *
4635 	 *	We make the association here so that vm_object_enter()
4636 	 *      can look up the object to complete initializing it.  No
4637 	 *	user will ever map this object.
4638 	 */
4639 	{
4640 		/* create our new memory object */
4641 		assert((uint32_t) (object->vo_size / PAGE_SIZE) ==
4642 		    (object->vo_size / PAGE_SIZE));
4643 		(void) compressor_memory_object_create(
4644 			(memory_object_size_t) object->vo_size,
4645 			&pager);
4646 		if (pager == NULL) {
4647 			panic("vm_object_compressor_pager_create(): "
4648 			    "no pager for object %p size 0x%llx\n",
4649 			    object, (uint64_t) object->vo_size);
4650 		}
4651 	}
4652 
4653 	/*
4654 	 *	A reference was returned by
4655 	 *	memory_object_create(), and it is
4656 	 *	copied by vm_object_memory_object_associate().
4657 	 */
4658 
4659 	pager_object = vm_object_memory_object_associate(pager,
4660 	    object,
4661 	    object->vo_size,
4662 	    FALSE);
4663 	if (pager_object != object) {
4664 		panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)", pager, pager_object, object, (uint64_t) object->vo_size);
4665 	}
4666 
4667 	/*
4668 	 *	Drop the reference we were passed.
4669 	 */
4670 	memory_object_deallocate(pager);
4671 
4672 	vm_object_lock(object);
4673 
4674 	/*
4675 	 *	Release the paging reference
4676 	 */
4677 	vm_object_paging_end(object);
4678 }
4679 
4680 vm_external_state_t
4681 vm_object_compressor_pager_state_get(
4682 	vm_object_t        object,
4683 	vm_object_offset_t offset)
4684 {
4685 	if (__probable(not_in_kdp)) {
4686 		vm_object_lock_assert_held(object);
4687 	}
4688 	if (object->internal &&
4689 	    object->pager != NULL &&
4690 	    !object->terminating &&
4691 	    object->alive) {
4692 		return vm_compressor_pager_state_get(object->pager,
4693 		           offset + object->paging_offset);
4694 	} else {
4695 		return VM_EXTERNAL_STATE_UNKNOWN;
4696 	}
4697 }
4698 
4699 void
4700 vm_object_compressor_pager_state_clr(
4701 	vm_object_t        object,
4702 	vm_object_offset_t offset)
4703 {
4704 	unsigned int num_pages_cleared;
4705 	vm_object_lock_assert_exclusive(object);
4706 	if (object->internal &&
4707 	    object->pager != NULL &&
4708 	    !object->terminating &&
4709 	    object->alive) {
4710 		num_pages_cleared = vm_compressor_pager_state_clr(object->pager,
4711 		    offset + object->paging_offset);
4712 		if (num_pages_cleared) {
4713 			vm_compressor_pager_count(object->pager,
4714 			    -num_pages_cleared,
4715 			    FALSE, /* shared */
4716 			    object);
4717 		}
4718 		if (num_pages_cleared &&
4719 		    (object->purgable != VM_PURGABLE_DENY || object->vo_ledger_tag)) {
4720 			/* less compressed purgeable/tagged pages */
4721 			assert3u(num_pages_cleared, ==, 1);
4722 			vm_object_owner_compressed_update(object, -num_pages_cleared);
4723 		}
4724 	}
4725 }
4726 
4727 /*
4728  *	Global variables for vm_object_collapse():
4729  *
4730  *		Counts for normal collapses and bypasses.
4731  *		Debugging variables, to watch or disable collapse.
4732  */
4733 static long     object_collapses = 0;
4734 static long     object_bypasses  = 0;
4735 
4736 static boolean_t        vm_object_collapse_allowed = TRUE;
4737 static boolean_t        vm_object_bypass_allowed = TRUE;
4738 
4739 void vm_object_do_collapse_compressor(vm_object_t object,
4740     vm_object_t backing_object);
4741 void
4742 vm_object_do_collapse_compressor(
4743 	vm_object_t object,
4744 	vm_object_t backing_object)
4745 {
4746 	vm_object_offset_t new_offset, backing_offset;
4747 	vm_object_size_t size;
4748 
4749 	vm_counters.do_collapse_compressor++;
4750 
4751 	vm_object_lock_assert_exclusive(object);
4752 	vm_object_lock_assert_exclusive(backing_object);
4753 
4754 	size = object->vo_size;
4755 
4756 	/*
4757 	 *	Move all compressed pages from backing_object
4758 	 *	to the parent.
4759 	 */
4760 
4761 	for (backing_offset = object->vo_shadow_offset;
4762 	    backing_offset < object->vo_shadow_offset + object->vo_size;
4763 	    backing_offset += PAGE_SIZE) {
4764 		memory_object_offset_t backing_pager_offset;
4765 
4766 		/* find the next compressed page at or after this offset */
4767 		backing_pager_offset = (backing_offset +
4768 		    backing_object->paging_offset);
4769 		backing_pager_offset = vm_compressor_pager_next_compressed(
4770 			backing_object->pager,
4771 			backing_pager_offset);
4772 		if (backing_pager_offset == (memory_object_offset_t) -1) {
4773 			/* no more compressed pages */
4774 			break;
4775 		}
4776 		backing_offset = (backing_pager_offset -
4777 		    backing_object->paging_offset);
4778 
4779 		new_offset = backing_offset - object->vo_shadow_offset;
4780 
4781 		if (new_offset >= object->vo_size) {
4782 			/* we're out of the scope of "object": done */
4783 			break;
4784 		}
4785 
4786 		if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) ||
4787 		    (vm_compressor_pager_state_get(object->pager,
4788 		    (new_offset +
4789 		    object->paging_offset)) ==
4790 		    VM_EXTERNAL_STATE_EXISTS)) {
4791 			/*
4792 			 * This page already exists in object, resident or
4793 			 * compressed.
4794 			 * We don't need this compressed page in backing_object
4795 			 * and it will be reclaimed when we release
4796 			 * backing_object.
4797 			 */
4798 			continue;
4799 		}
4800 
4801 		/*
4802 		 * backing_object has this page in the VM compressor and
4803 		 * we need to transfer it to object.
4804 		 */
4805 		vm_counters.do_collapse_compressor_pages++;
4806 		vm_compressor_pager_transfer(
4807 			/* destination: */
4808 			object->pager,
4809 			(new_offset + object->paging_offset),
4810 			/* source: */
4811 			backing_object->pager,
4812 			(backing_offset + backing_object->paging_offset));
4813 	}
4814 }
4815 
4816 /*
4817  *	Routine:	vm_object_do_collapse
4818  *	Purpose:
4819  *		Collapse an object with the object backing it.
4820  *		Pages in the backing object are moved into the
4821  *		parent, and the backing object is deallocated.
4822  *	Conditions:
4823  *		Both objects and the cache are locked; the page
4824  *		queues are unlocked.
4825  *
4826  */
4827 static void
4828 vm_object_do_collapse(
4829 	vm_object_t object,
4830 	vm_object_t backing_object)
4831 {
4832 	vm_page_t p, pp;
4833 	vm_object_offset_t new_offset, backing_offset;
4834 	vm_object_size_t size;
4835 
4836 	vm_object_lock_assert_exclusive(object);
4837 	vm_object_lock_assert_exclusive(backing_object);
4838 
4839 	assert(object->purgable == VM_PURGABLE_DENY);
4840 	assert(backing_object->purgable == VM_PURGABLE_DENY);
4841 
4842 	backing_offset = object->vo_shadow_offset;
4843 	size = object->vo_size;
4844 
4845 	/*
4846 	 *	Move all in-memory pages from backing_object
4847 	 *	to the parent.  Pages that have been paged out
4848 	 *	will be overwritten by any of the parent's
4849 	 *	pages that shadow them.
4850 	 */
4851 
4852 	while (!vm_page_queue_empty(&backing_object->memq)) {
4853 		p = (vm_page_t) vm_page_queue_first(&backing_object->memq);
4854 
4855 		new_offset = (p->vmp_offset - backing_offset);
4856 
4857 		assert(!p->vmp_busy || p->vmp_absent);
4858 
4859 		/*
4860 		 *	If the parent has a page here, or if
4861 		 *	this page falls outside the parent,
4862 		 *	dispose of it.
4863 		 *
4864 		 *	Otherwise, move it as planned.
4865 		 */
4866 
4867 		if (p->vmp_offset < backing_offset || new_offset >= size) {
4868 			VM_PAGE_FREE(p);
4869 		} else {
4870 			pp = vm_page_lookup(object, new_offset);
4871 			if (pp == VM_PAGE_NULL) {
4872 				if (vm_object_compressor_pager_state_get(object,
4873 				    new_offset)
4874 				    == VM_EXTERNAL_STATE_EXISTS) {
4875 					/*
4876 					 * Parent object has this page
4877 					 * in the VM compressor.
4878 					 * Throw away the backing
4879 					 * object's page.
4880 					 */
4881 					VM_PAGE_FREE(p);
4882 				} else {
4883 					/*
4884 					 *	Parent now has no page.
4885 					 *	Move the backing object's page
4886 					 *      up.
4887 					 */
4888 					vm_page_rename(p, object, new_offset);
4889 				}
4890 			} else {
4891 				assert(!pp->vmp_absent);
4892 
4893 				/*
4894 				 *	Parent object has a real page.
4895 				 *	Throw away the backing object's
4896 				 *	page.
4897 				 */
4898 				VM_PAGE_FREE(p);
4899 			}
4900 		}
4901 	}
4902 
4903 	if (vm_object_collapse_compressor_allowed &&
4904 	    object->pager != MEMORY_OBJECT_NULL &&
4905 	    backing_object->pager != MEMORY_OBJECT_NULL) {
4906 		/* move compressed pages from backing_object to object */
4907 		vm_object_do_collapse_compressor(object, backing_object);
4908 	} else if (backing_object->pager != MEMORY_OBJECT_NULL) {
4909 		assert((!object->pager_created &&
4910 		    (object->pager == MEMORY_OBJECT_NULL)) ||
4911 		    (!backing_object->pager_created &&
4912 		    (backing_object->pager == MEMORY_OBJECT_NULL)));
4913 		/*
4914 		 *	Move the pager from backing_object to object.
4915 		 *
4916 		 *	XXX We're only using part of the paging space
4917 		 *	for keeps now... we ought to discard the
4918 		 *	unused portion.
4919 		 */
4920 
4921 		assert(!object->paging_in_progress);
4922 		assert(!object->activity_in_progress);
4923 		assert(!object->pager_created);
4924 		assert(object->pager == NULL);
4925 		object->pager = backing_object->pager;
4926 
4927 		VM_OBJECT_SET_PAGER_CREATED(object, backing_object->pager_created);
4928 		object->pager_control = backing_object->pager_control;
4929 		VM_OBJECT_SET_PAGER_READY(object, backing_object->pager_ready);
4930 		VM_OBJECT_SET_PAGER_INITIALIZED(object, backing_object->pager_initialized);
4931 		object->paging_offset =
4932 		    backing_object->paging_offset + backing_offset;
4933 		if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
4934 			memory_object_control_collapse(&object->pager_control,
4935 			    object);
4936 		}
4937 		/* the backing_object has lost its pager: reset all fields */
4938 		VM_OBJECT_SET_PAGER_CREATED(backing_object, FALSE);
4939 		backing_object->pager_control = NULL;
4940 		VM_OBJECT_SET_PAGER_READY(backing_object, FALSE);
4941 		backing_object->paging_offset = 0;
4942 		backing_object->pager = NULL;
4943 	}
4944 	/*
4945 	 *	Object now shadows whatever backing_object did.
4946 	 *	Note that the reference to backing_object->shadow
4947 	 *	moves from within backing_object to within object.
4948 	 */
4949 
4950 	assert(!object->phys_contiguous);
4951 	assert(!backing_object->phys_contiguous);
4952 	object->shadow = backing_object->shadow;
4953 	if (object->shadow) {
4954 		assertf(page_aligned(object->vo_shadow_offset),
4955 		    "object %p shadow_offset 0x%llx",
4956 		    object, object->vo_shadow_offset);
4957 		assertf(page_aligned(backing_object->vo_shadow_offset),
4958 		    "backing_object %p shadow_offset 0x%llx",
4959 		    backing_object, backing_object->vo_shadow_offset);
4960 		object->vo_shadow_offset += backing_object->vo_shadow_offset;
4961 		/* "backing_object" gave its shadow to "object" */
4962 		backing_object->shadow = VM_OBJECT_NULL;
4963 		backing_object->vo_shadow_offset = 0;
4964 	} else {
4965 		/* no shadow, therefore no shadow offset... */
4966 		object->vo_shadow_offset = 0;
4967 	}
4968 	assert((object->shadow == VM_OBJECT_NULL) ||
4969 	    (object->shadow->vo_copy != backing_object));
4970 
4971 	/*
4972 	 *	Discard backing_object.
4973 	 *
4974 	 *	Since the backing object has no pages, no
4975 	 *	pager left, and no object references within it,
4976 	 *	all that is necessary is to dispose of it.
4977 	 */
4978 	object_collapses++;
4979 
4980 	assert(os_ref_get_count_raw(&backing_object->ref_count) == 1);
4981 	assert(backing_object->resident_page_count == 0);
4982 	assert(backing_object->paging_in_progress == 0);
4983 	assert(backing_object->activity_in_progress == 0);
4984 	assert(backing_object->shadow == VM_OBJECT_NULL);
4985 	assert(backing_object->vo_shadow_offset == 0);
4986 
4987 	if (backing_object->pager != MEMORY_OBJECT_NULL) {
4988 		/* ... unless it has a pager; need to terminate pager too */
4989 		vm_counters.do_collapse_terminate++;
4990 		if (vm_object_terminate(backing_object) != KERN_SUCCESS) {
4991 			vm_counters.do_collapse_terminate_failure++;
4992 		}
4993 		return;
4994 	}
4995 
4996 	assert(backing_object->pager == NULL);
4997 
4998 	VM_OBJECT_SET_ALIVE(backing_object, FALSE);
4999 	vm_object_unlock(backing_object);
5000 
5001 #if VM_OBJECT_TRACKING
5002 	if (vm_object_tracking_btlog) {
5003 		btlog_erase(vm_object_tracking_btlog, backing_object);
5004 	}
5005 #endif /* VM_OBJECT_TRACKING */
5006 
5007 	vm_object_lock_destroy(backing_object);
5008 
5009 	zfree(vm_object_zone, backing_object);
5010 }
5011 
5012 static void
5013 vm_object_do_bypass(
5014 	vm_object_t object,
5015 	vm_object_t backing_object)
5016 {
5017 	/*
5018 	 *	Make the parent shadow the next object
5019 	 *	in the chain.
5020 	 */
5021 
5022 	vm_object_lock_assert_exclusive(object);
5023 	vm_object_lock_assert_exclusive(backing_object);
5024 
5025 	vm_object_reference(backing_object->shadow);
5026 
5027 	assert(!object->phys_contiguous);
5028 	assert(!backing_object->phys_contiguous);
5029 	object->shadow = backing_object->shadow;
5030 	if (object->shadow) {
5031 		assertf(page_aligned(object->vo_shadow_offset),
5032 		    "object %p shadow_offset 0x%llx",
5033 		    object, object->vo_shadow_offset);
5034 		assertf(page_aligned(backing_object->vo_shadow_offset),
5035 		    "backing_object %p shadow_offset 0x%llx",
5036 		    backing_object, backing_object->vo_shadow_offset);
5037 		object->vo_shadow_offset += backing_object->vo_shadow_offset;
5038 	} else {
5039 		/* no shadow, therefore no shadow offset... */
5040 		object->vo_shadow_offset = 0;
5041 	}
5042 
5043 	/*
5044 	 *	Backing object might have had a copy pointer
5045 	 *	to us.  If it did, clear it.
5046 	 */
5047 	if (backing_object->vo_copy == object) {
5048 		VM_OBJECT_COPY_SET(backing_object, VM_OBJECT_NULL);
5049 	}
5050 
5051 	/*
5052 	 *	Drop the reference count on backing_object.
5053 	 #if	TASK_SWAPPER
5054 	 *	Since its ref_count was at least 2, it
5055 	 *	will not vanish; so we don't need to call
5056 	 *	vm_object_deallocate.
5057 	 *	[with a caveat for "named" objects]
5058 	 *
5059 	 *	The res_count on the backing object is
5060 	 *	conditionally decremented.  It's possible
5061 	 *	(via vm_pageout_scan) to get here with
5062 	 *	a "swapped" object, which has a 0 res_count,
5063 	 *	in which case, the backing object res_count
5064 	 *	is already down by one.
5065 	 #else
5066 	 *	Don't call vm_object_deallocate unless
5067 	 *	ref_count drops to zero.
5068 	 *
5069 	 *	The ref_count can drop to zero here if the
5070 	 *	backing object could be bypassed but not
5071 	 *	collapsed, such as when the backing object
5072 	 *	is temporary and cachable.
5073 	 #endif
5074 	 */
5075 	if (os_ref_get_count_raw(&backing_object->ref_count) > 2 ||
5076 	    (!backing_object->named &&
5077 	    os_ref_get_count_raw(&backing_object->ref_count) > 1)) {
5078 		vm_object_lock_assert_exclusive(backing_object);
5079 		os_ref_release_live_locked_raw(&backing_object->ref_count,
5080 		    &vm_object_refgrp);
5081 		vm_object_unlock(backing_object);
5082 	} else {
5083 		/*
5084 		 *	Drop locks so that we can deallocate
5085 		 *	the backing object.
5086 		 */
5087 
5088 		/*
5089 		 * vm_object_collapse (the caller of this function) is
5090 		 * now called from contexts that may not guarantee that a
5091 		 * valid reference is held on the object... w/o a valid
5092 		 * reference, it is unsafe and unwise (you will definitely
5093 		 * regret it) to unlock the object and then retake the lock
5094 		 * since the object may be terminated and recycled in between.
5095 		 * The "activity_in_progress" reference will keep the object
5096 		 * 'stable'.
5097 		 */
5098 		vm_object_activity_begin(object);
5099 		vm_object_unlock(object);
5100 
5101 		vm_object_unlock(backing_object);
5102 		vm_object_deallocate(backing_object);
5103 
5104 		/*
5105 		 *	Relock object. We don't have to reverify
5106 		 *	its state since vm_object_collapse will
5107 		 *	do that for us as it starts at the
5108 		 *	top of its loop.
5109 		 */
5110 
5111 		vm_object_lock(object);
5112 		vm_object_activity_end(object);
5113 	}
5114 
5115 	object_bypasses++;
5116 }
5117 
5118 
5119 /*
5120  *	vm_object_collapse:
5121  *
5122  *	Perform an object collapse or an object bypass if appropriate.
5123  *	The real work of collapsing and bypassing is performed in
5124  *	the routines vm_object_do_collapse and vm_object_do_bypass.
5125  *
5126  *	Requires that the object be locked and the page queues be unlocked.
5127  *
5128  */
5129 static unsigned long vm_object_collapse_calls = 0;
5130 static unsigned long vm_object_collapse_objects = 0;
5131 static unsigned long vm_object_collapse_do_collapse = 0;
5132 static unsigned long vm_object_collapse_do_bypass = 0;
5133 
5134 __private_extern__ void
5135 vm_object_collapse(
5136 	vm_object_t                             object,
5137 	vm_object_offset_t                      hint_offset,
5138 	boolean_t                               can_bypass)
5139 {
5140 	vm_object_t                             backing_object;
5141 	vm_object_size_t                        object_vcount, object_rcount;
5142 	vm_object_t                             original_object;
5143 	int                                     object_lock_type;
5144 	int                                     backing_object_lock_type;
5145 
5146 	vm_object_collapse_calls++;
5147 
5148 	assertf(page_aligned(hint_offset), "hint_offset 0x%llx", hint_offset);
5149 
5150 	if (!vm_object_collapse_allowed &&
5151 	    !(can_bypass && vm_object_bypass_allowed)) {
5152 		return;
5153 	}
5154 
5155 	if (object == VM_OBJECT_NULL) {
5156 		return;
5157 	}
5158 
5159 	original_object = object;
5160 
5161 	/*
5162 	 * The top object was locked "exclusive" by the caller.
5163 	 * In the first pass, to determine if we can collapse the shadow chain,
5164 	 * take a "shared" lock on the shadow objects.  If we can collapse,
5165 	 * we'll have to go down the chain again with exclusive locks.
5166 	 */
5167 	object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5168 	backing_object_lock_type = OBJECT_LOCK_SHARED;
5169 
5170 retry:
5171 	object = original_object;
5172 	vm_object_lock_assert_exclusive(object);
5173 
5174 	while (TRUE) {
5175 		vm_object_collapse_objects++;
5176 		/*
5177 		 *	Verify that the conditions are right for either
5178 		 *	collapse or bypass:
5179 		 */
5180 
5181 		/*
5182 		 *	There is a backing object, and
5183 		 */
5184 
5185 		backing_object = object->shadow;
5186 		if (backing_object == VM_OBJECT_NULL) {
5187 			if (object != original_object) {
5188 				vm_object_unlock(object);
5189 			}
5190 			return;
5191 		}
5192 		if (backing_object_lock_type == OBJECT_LOCK_SHARED) {
5193 			vm_object_lock_shared(backing_object);
5194 		} else {
5195 			vm_object_lock(backing_object);
5196 		}
5197 
5198 		/*
5199 		 *	No pages in the object are currently
5200 		 *	being paged out, and
5201 		 */
5202 		if (object->paging_in_progress != 0 ||
5203 		    object->activity_in_progress != 0) {
5204 			/* try and collapse the rest of the shadow chain */
5205 			if (object != original_object) {
5206 				vm_object_unlock(object);
5207 			}
5208 			object = backing_object;
5209 			object_lock_type = backing_object_lock_type;
5210 			continue;
5211 		}
5212 
5213 		/*
5214 		 *	...
5215 		 *		The backing object is not read_only,
5216 		 *		and no pages in the backing object are
5217 		 *		currently being paged out.
5218 		 *		The backing object is internal.
5219 		 *
5220 		 */
5221 
5222 		if (!backing_object->internal ||
5223 		    backing_object->paging_in_progress != 0 ||
5224 		    backing_object->activity_in_progress != 0) {
5225 			/* try and collapse the rest of the shadow chain */
5226 			if (object != original_object) {
5227 				vm_object_unlock(object);
5228 			}
5229 			object = backing_object;
5230 			object_lock_type = backing_object_lock_type;
5231 			continue;
5232 		}
5233 
5234 		/*
5235 		 * Purgeable objects are not supposed to engage in
5236 		 * copy-on-write activities, so should not have
5237 		 * any shadow objects or be a shadow object to another
5238 		 * object.
5239 		 * Collapsing a purgeable object would require some
5240 		 * updates to the purgeable compressed ledgers.
5241 		 */
5242 		if (object->purgable != VM_PURGABLE_DENY ||
5243 		    backing_object->purgable != VM_PURGABLE_DENY) {
5244 			panic("vm_object_collapse() attempting to collapse "
5245 			    "purgeable object: %p(%d) %p(%d)\n",
5246 			    object, object->purgable,
5247 			    backing_object, backing_object->purgable);
5248 			/* try and collapse the rest of the shadow chain */
5249 			if (object != original_object) {
5250 				vm_object_unlock(object);
5251 			}
5252 			object = backing_object;
5253 			object_lock_type = backing_object_lock_type;
5254 			continue;
5255 		}
5256 
5257 		/*
5258 		 *	The backing object can't be a copy-object:
5259 		 *	the shadow_offset for the copy-object must stay
5260 		 *	as 0.  Furthermore (for the 'we have all the
5261 		 *	pages' case), if we bypass backing_object and
5262 		 *	just shadow the next object in the chain, old
5263 		 *	pages from that object would then have to be copied
5264 		 *	BOTH into the (former) backing_object and into the
5265 		 *	parent object.
5266 		 */
5267 		if (backing_object->shadow != VM_OBJECT_NULL &&
5268 		    backing_object->shadow->vo_copy == backing_object) {
5269 			/* try and collapse the rest of the shadow chain */
5270 			if (object != original_object) {
5271 				vm_object_unlock(object);
5272 			}
5273 			object = backing_object;
5274 			object_lock_type = backing_object_lock_type;
5275 			continue;
5276 		}
5277 
5278 		/*
5279 		 *	We can now try to either collapse the backing
5280 		 *	object (if the parent is the only reference to
5281 		 *	it) or (perhaps) remove the parent's reference
5282 		 *	to it.
5283 		 *
5284 		 *	If there is exactly one reference to the backing
5285 		 *	object, we may be able to collapse it into the
5286 		 *	parent.
5287 		 *
5288 		 *	As long as one of the objects is still not known
5289 		 *	to the pager, we can collapse them.
5290 		 */
5291 		if (os_ref_get_count_raw(&backing_object->ref_count) == 1 &&
5292 		    (vm_object_collapse_compressor_allowed ||
5293 		    !object->pager_created
5294 		    || (!backing_object->pager_created)
5295 		    ) && vm_object_collapse_allowed) {
5296 			/*
5297 			 * We need the exclusive lock on the VM objects.
5298 			 */
5299 			if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5300 				/*
5301 				 * We have an object and its shadow locked
5302 				 * "shared".  We can't just upgrade the locks
5303 				 * to "exclusive", as some other thread might
5304 				 * also have these objects locked "shared" and
5305 				 * attempt to upgrade one or the other to
5306 				 * "exclusive".  The upgrades would block
5307 				 * forever waiting for the other "shared" locks
5308 				 * to get released.
5309 				 * So we have to release the locks and go
5310 				 * down the shadow chain again (since it could
5311 				 * have changed) with "exclusive" locking.
5312 				 */
5313 				vm_object_unlock(backing_object);
5314 				if (object != original_object) {
5315 					vm_object_unlock(object);
5316 				}
5317 				object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5318 				backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5319 				goto retry;
5320 			}
5321 
5322 			/*
5323 			 *	Collapse the object with its backing
5324 			 *	object, and try again with the object's
5325 			 *	new backing object.
5326 			 */
5327 
5328 			vm_object_do_collapse(object, backing_object);
5329 			vm_object_collapse_do_collapse++;
5330 			continue;
5331 		}
5332 
5333 		/*
5334 		 *	Collapsing the backing object was not possible
5335 		 *	or permitted, so let's try bypassing it.
5336 		 */
5337 
5338 		if (!(can_bypass && vm_object_bypass_allowed)) {
5339 			/* try and collapse the rest of the shadow chain */
5340 			if (object != original_object) {
5341 				vm_object_unlock(object);
5342 			}
5343 			object = backing_object;
5344 			object_lock_type = backing_object_lock_type;
5345 			continue;
5346 		}
5347 
5348 
5349 		/*
5350 		 *	If the object doesn't have all its pages present,
5351 		 *	we have to make sure no pages in the backing object
5352 		 *	"show through" before bypassing it.
5353 		 */
5354 		object_vcount = object->vo_size >> PAGE_SHIFT;
5355 		object_rcount = (vm_object_size_t)object->resident_page_count;
5356 
5357 		if (object_rcount != object_vcount) {
5358 			vm_object_offset_t      offset;
5359 			vm_object_offset_t      backing_offset;
5360 			vm_object_size_t        backing_rcount, backing_vcount;
5361 
5362 			/*
5363 			 *	If the backing object has a pager but no pagemap,
5364 			 *	then we cannot bypass it, because we don't know
5365 			 *	what pages it has.
5366 			 */
5367 			if (backing_object->pager_created) {
5368 				/* try and collapse the rest of the shadow chain */
5369 				if (object != original_object) {
5370 					vm_object_unlock(object);
5371 				}
5372 				object = backing_object;
5373 				object_lock_type = backing_object_lock_type;
5374 				continue;
5375 			}
5376 
5377 			/*
5378 			 *	If the object has a pager but no pagemap,
5379 			 *	then we cannot bypass it, because we don't know
5380 			 *	what pages it has.
5381 			 */
5382 			if (object->pager_created) {
5383 				/* try and collapse the rest of the shadow chain */
5384 				if (object != original_object) {
5385 					vm_object_unlock(object);
5386 				}
5387 				object = backing_object;
5388 				object_lock_type = backing_object_lock_type;
5389 				continue;
5390 			}
5391 
5392 			backing_offset = object->vo_shadow_offset;
5393 			backing_vcount = backing_object->vo_size >> PAGE_SHIFT;
5394 			backing_rcount = (vm_object_size_t)backing_object->resident_page_count;
5395 			assert(backing_vcount >= object_vcount);
5396 
5397 			if (backing_rcount > (backing_vcount - object_vcount) &&
5398 			    backing_rcount - (backing_vcount - object_vcount) > object_rcount) {
5399 				/*
5400 				 * we have enough pages in the backing object to guarantee that
5401 				 * at least 1 of them must be 'uncovered' by a resident page
5402 				 * in the object we're evaluating, so move on and
5403 				 * try to collapse the rest of the shadow chain
5404 				 */
5405 				if (object != original_object) {
5406 					vm_object_unlock(object);
5407 				}
5408 				object = backing_object;
5409 				object_lock_type = backing_object_lock_type;
5410 				continue;
5411 			}
5412 
5413 			/*
5414 			 *	If all of the pages in the backing object are
5415 			 *	shadowed by the parent object, the parent
5416 			 *	object no longer has to shadow the backing
5417 			 *	object; it can shadow the next one in the
5418 			 *	chain.
5419 			 *
5420 			 *	If the backing object has existence info,
5421 			 *	we must check examine its existence info
5422 			 *	as well.
5423 			 *
5424 			 */
5425 
5426 #define EXISTS_IN_OBJECT(obj, off, rc)                  \
5427 	((vm_object_compressor_pager_state_get((obj), (off))   \
5428 	  == VM_EXTERNAL_STATE_EXISTS) ||               \
5429 	 ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
5430 
5431 			/*
5432 			 * Check the hint location first
5433 			 * (since it is often the quickest way out of here).
5434 			 */
5435 			if (object->cow_hint != ~(vm_offset_t)0) {
5436 				hint_offset = (vm_object_offset_t)object->cow_hint;
5437 			} else {
5438 				hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
5439 				    (hint_offset - 8 * PAGE_SIZE_64) : 0;
5440 			}
5441 
5442 			if (EXISTS_IN_OBJECT(backing_object, hint_offset +
5443 			    backing_offset, backing_rcount) &&
5444 			    !EXISTS_IN_OBJECT(object, hint_offset, object_rcount)) {
5445 				/* dependency right at the hint */
5446 				object->cow_hint = (vm_offset_t) hint_offset; /* atomic */
5447 				/* try and collapse the rest of the shadow chain */
5448 				if (object != original_object) {
5449 					vm_object_unlock(object);
5450 				}
5451 				object = backing_object;
5452 				object_lock_type = backing_object_lock_type;
5453 				continue;
5454 			}
5455 
5456 			/*
5457 			 * If the object's window onto the backing_object
5458 			 * is large compared to the number of resident
5459 			 * pages in the backing object, it makes sense to
5460 			 * walk the backing_object's resident pages first.
5461 			 *
5462 			 * NOTE: Pages may be in both the existence map and/or
5463 			 * resident, so if we don't find a dependency while
5464 			 * walking the backing object's resident page list
5465 			 * directly, and there is an existence map, we'll have
5466 			 * to run the offset based 2nd pass.  Because we may
5467 			 * have to run both passes, we need to be careful
5468 			 * not to decrement 'rcount' in the 1st pass
5469 			 */
5470 			if (backing_rcount && backing_rcount < (object_vcount / 8)) {
5471 				vm_object_size_t rc = object_rcount;
5472 				vm_page_t p;
5473 
5474 				backing_rcount = backing_object->resident_page_count;
5475 				p = (vm_page_t)vm_page_queue_first(&backing_object->memq);
5476 				do {
5477 					offset = (p->vmp_offset - backing_offset);
5478 
5479 					if (offset < object->vo_size &&
5480 					    offset != hint_offset &&
5481 					    !EXISTS_IN_OBJECT(object, offset, rc)) {
5482 						/* found a dependency */
5483 						object->cow_hint = (vm_offset_t) offset; /* atomic */
5484 
5485 						break;
5486 					}
5487 					p = (vm_page_t) vm_page_queue_next(&p->vmp_listq);
5488 				} while (--backing_rcount);
5489 				if (backing_rcount != 0) {
5490 					/* try and collapse the rest of the shadow chain */
5491 					if (object != original_object) {
5492 						vm_object_unlock(object);
5493 					}
5494 					object = backing_object;
5495 					object_lock_type = backing_object_lock_type;
5496 					continue;
5497 				}
5498 			}
5499 
5500 			/*
5501 			 * Walk through the offsets looking for pages in the
5502 			 * backing object that show through to the object.
5503 			 */
5504 			if (backing_rcount) {
5505 				offset = hint_offset;
5506 
5507 				while ((offset =
5508 				    (offset + PAGE_SIZE_64 < object->vo_size) ?
5509 				    (offset + PAGE_SIZE_64) : 0) != hint_offset) {
5510 					if (EXISTS_IN_OBJECT(backing_object, offset +
5511 					    backing_offset, backing_rcount) &&
5512 					    !EXISTS_IN_OBJECT(object, offset, object_rcount)) {
5513 						/* found a dependency */
5514 						object->cow_hint = (vm_offset_t) offset; /* atomic */
5515 						break;
5516 					}
5517 				}
5518 				if (offset != hint_offset) {
5519 					/* try and collapse the rest of the shadow chain */
5520 					if (object != original_object) {
5521 						vm_object_unlock(object);
5522 					}
5523 					object = backing_object;
5524 					object_lock_type = backing_object_lock_type;
5525 					continue;
5526 				}
5527 			}
5528 		}
5529 
5530 		/*
5531 		 * We need "exclusive" locks on the 2 VM objects.
5532 		 */
5533 		if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5534 			vm_object_unlock(backing_object);
5535 			if (object != original_object) {
5536 				vm_object_unlock(object);
5537 			}
5538 			object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5539 			backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5540 			goto retry;
5541 		}
5542 
5543 		/* reset the offset hint for any objects deeper in the chain */
5544 		object->cow_hint = (vm_offset_t)0;
5545 
5546 		/*
5547 		 *	All interesting pages in the backing object
5548 		 *	already live in the parent or its pager.
5549 		 *	Thus we can bypass the backing object.
5550 		 */
5551 
5552 		vm_object_do_bypass(object, backing_object);
5553 		vm_object_collapse_do_bypass++;
5554 
5555 		/*
5556 		 *	Try again with this object's new backing object.
5557 		 */
5558 
5559 		continue;
5560 	}
5561 
5562 	/* NOT REACHED */
5563 	/*
5564 	 *  if (object != original_object) {
5565 	 *       vm_object_unlock(object);
5566 	 *  }
5567 	 */
5568 }
5569 
5570 /*
5571  *	Routine:	vm_object_page_remove: [internal]
5572  *	Purpose:
5573  *		Removes all physical pages in the specified
5574  *		object range from the object's list of pages.
5575  *
5576  *	In/out conditions:
5577  *		The object must be locked.
5578  *		The object must not have paging_in_progress, usually
5579  *		guaranteed by not having a pager.
5580  */
5581 unsigned int vm_object_page_remove_lookup = 0;
5582 unsigned int vm_object_page_remove_iterate = 0;
5583 
5584 __private_extern__ void
5585 vm_object_page_remove(
5586 	vm_object_t             object,
5587 	vm_object_offset_t      start,
5588 	vm_object_offset_t      end)
5589 {
5590 	vm_page_t       p, next;
5591 
5592 	/*
5593 	 *	One and two page removals are most popular.
5594 	 *	The factor of 16 here is somewhat arbitrary.
5595 	 *	It balances vm_object_lookup vs iteration.
5596 	 */
5597 
5598 	if (atop_64(end - start) < (unsigned)object->resident_page_count / 16) {
5599 		vm_object_page_remove_lookup++;
5600 
5601 		for (; start < end; start += PAGE_SIZE_64) {
5602 			p = vm_page_lookup(object, start);
5603 			if (p != VM_PAGE_NULL) {
5604 				assert(!p->vmp_cleaning && !p->vmp_laundry);
5605 				if (!vm_page_is_fictitious(p) && p->vmp_pmapped) {
5606 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5607 				}
5608 				VM_PAGE_FREE(p);
5609 			}
5610 		}
5611 	} else {
5612 		vm_object_page_remove_iterate++;
5613 
5614 		p = (vm_page_t) vm_page_queue_first(&object->memq);
5615 		while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) p)) {
5616 			next = (vm_page_t) vm_page_queue_next(&p->vmp_listq);
5617 			if ((start <= p->vmp_offset) && (p->vmp_offset < end)) {
5618 				assert(!p->vmp_cleaning && !p->vmp_laundry);
5619 				if (!vm_page_is_fictitious(p) && p->vmp_pmapped) {
5620 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5621 				}
5622 				VM_PAGE_FREE(p);
5623 			}
5624 			p = next;
5625 		}
5626 	}
5627 }
5628 
5629 
5630 /*
5631  *	Routine:	vm_object_coalesce
5632  *	Function:	Coalesces two objects backing up adjoining
5633  *			regions of memory into a single object.
5634  *
5635  *	returns TRUE if objects were combined.
5636  *
5637  *	NOTE:	Only works at the moment if the second object is NULL -
5638  *		if it's not, which object do we lock first?
5639  *
5640  *	Parameters:
5641  *		prev_object	First object to coalesce
5642  *		prev_offset	Offset into prev_object
5643  *		next_object	Second object into coalesce
5644  *		next_offset	Offset into next_object
5645  *
5646  *		prev_size	Size of reference to prev_object
5647  *		next_size	Size of reference to next_object
5648  *
5649  *	Conditions:
5650  *	The object(s) must *not* be locked. The map must be locked
5651  *	to preserve the reference to the object(s).
5652  */
5653 static int vm_object_coalesce_count = 0;
5654 
5655 __private_extern__ boolean_t
5656 vm_object_coalesce(
5657 	vm_object_t                     prev_object,
5658 	vm_object_t                     next_object,
5659 	vm_object_offset_t              prev_offset,
5660 	__unused vm_object_offset_t next_offset,
5661 	vm_object_size_t                prev_size,
5662 	vm_object_size_t                next_size)
5663 {
5664 	vm_object_size_t        newsize;
5665 
5666 #ifdef  lint
5667 	next_offset++;
5668 #endif  /* lint */
5669 
5670 	if (next_object != VM_OBJECT_NULL) {
5671 		return FALSE;
5672 	}
5673 
5674 	if (prev_object == VM_OBJECT_NULL) {
5675 		return TRUE;
5676 	}
5677 
5678 	vm_object_lock(prev_object);
5679 
5680 	/*
5681 	 *	Try to collapse the object first
5682 	 */
5683 	vm_object_collapse(prev_object, prev_offset, TRUE);
5684 
5685 	/*
5686 	 *	Can't coalesce if pages not mapped to
5687 	 *	prev_entry may be in use any way:
5688 	 *	. more than one reference
5689 	 *	. paged out
5690 	 *	. shadows another object
5691 	 *	. has a copy elsewhere
5692 	 *	. is purgeable
5693 	 *	. paging references (pages might be in page-list)
5694 	 */
5695 
5696 	if ((os_ref_get_count_raw(&prev_object->ref_count) > 1) ||
5697 	    prev_object->pager_created ||
5698 	    prev_object->phys_contiguous ||
5699 	    (prev_object->shadow != VM_OBJECT_NULL) ||
5700 	    (prev_object->vo_copy != VM_OBJECT_NULL) ||
5701 	    (prev_object->true_share != FALSE) ||
5702 	    (prev_object->purgable != VM_PURGABLE_DENY) ||
5703 	    (prev_object->paging_in_progress != 0) ||
5704 	    (prev_object->activity_in_progress != 0)) {
5705 		vm_object_unlock(prev_object);
5706 		return FALSE;
5707 	}
5708 	/* newsize = prev_offset + prev_size + next_size; */
5709 	if (__improbable(os_add3_overflow(prev_offset, prev_size, next_size,
5710 	    &newsize))) {
5711 		vm_object_unlock(prev_object);
5712 		return FALSE;
5713 	}
5714 
5715 	vm_object_coalesce_count++;
5716 
5717 	/*
5718 	 *	Remove any pages that may still be in the object from
5719 	 *	a previous deallocation.
5720 	 */
5721 	vm_object_page_remove(prev_object,
5722 	    prev_offset + prev_size,
5723 	    prev_offset + prev_size + next_size);
5724 
5725 	/*
5726 	 *	Extend the object if necessary.
5727 	 */
5728 	if (newsize > prev_object->vo_size) {
5729 		assertf(page_aligned(newsize),
5730 		    "object %p size 0x%llx",
5731 		    prev_object, (uint64_t)newsize);
5732 		prev_object->vo_size = newsize;
5733 	}
5734 
5735 	vm_object_unlock(prev_object);
5736 	return TRUE;
5737 }
5738 
5739 kern_return_t
5740 vm_object_populate_with_private(
5741 	vm_object_t             object,
5742 	vm_object_offset_t      offset,
5743 	ppnum_t                 phys_page,
5744 	vm_size_t               size)
5745 {
5746 	ppnum_t                 base_page;
5747 	vm_object_offset_t      base_offset;
5748 
5749 
5750 	if (!object->private) {
5751 		return KERN_FAILURE;
5752 	}
5753 
5754 	base_page = phys_page;
5755 
5756 	vm_object_lock(object);
5757 
5758 	if (!object->phys_contiguous) {
5759 		vm_page_t       m;
5760 
5761 		if ((base_offset = trunc_page_64(offset)) != offset) {
5762 			vm_object_unlock(object);
5763 			return KERN_FAILURE;
5764 		}
5765 		base_offset += object->paging_offset;
5766 
5767 		while (size) {
5768 			m = vm_page_lookup(object, base_offset);
5769 
5770 			if (m != VM_PAGE_NULL) {
5771 				ppnum_t m_phys_page = VM_PAGE_GET_PHYS_PAGE(m);
5772 
5773 				if (m_phys_page == vm_page_guard_addr) {
5774 					/* nothing to do */
5775 				} else if (m_phys_page == vm_page_fictitious_addr) {
5776 					vm_page_lockspin_queues();
5777 					vm_page_make_private(m, base_page);
5778 					vm_page_unlock_queues();
5779 				} else if (m_phys_page != base_page) {
5780 					if (!vm_page_is_private(m)) {
5781 						/*
5782 						 * we'd leak a real page... that can't be right
5783 						 */
5784 						panic("vm_object_populate_with_private - %p not private", m);
5785 					}
5786 					if (m->vmp_pmapped) {
5787 						/*
5788 						 * pmap call to clear old mapping
5789 						 */
5790 						pmap_disconnect(m_phys_page);
5791 					}
5792 					VM_PAGE_SET_PHYS_PAGE(m, base_page);
5793 				}
5794 			} else {
5795 				m = vm_page_create_private(base_page);
5796 
5797 				m->vmp_unusual = TRUE;
5798 				m->vmp_busy = FALSE;
5799 
5800 				vm_page_insert(m, object, base_offset);
5801 			}
5802 			base_page++;                                                                    /* Go to the next physical page */
5803 			base_offset += PAGE_SIZE;
5804 			size -= PAGE_SIZE;
5805 		}
5806 	} else {
5807 		/* NOTE: we should check the original settings here */
5808 		/* if we have a size > zero a pmap call should be made */
5809 		/* to disable the range */
5810 
5811 		/* pmap_? */
5812 
5813 		/* shadows on contiguous memory are not allowed */
5814 		/* we therefore can use the offset field */
5815 		object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
5816 		assertf(page_aligned(size),
5817 		    "object %p size 0x%llx",
5818 		    object, (uint64_t)size);
5819 		object->vo_size = size;
5820 	}
5821 	vm_object_unlock(object);
5822 
5823 	return KERN_SUCCESS;
5824 }
5825 
5826 
5827 kern_return_t
5828 memory_object_create_named(
5829 	memory_object_t pager,
5830 	memory_object_offset_t  size,
5831 	memory_object_control_t         *control)
5832 {
5833 	vm_object_t             object;
5834 
5835 	*control = MEMORY_OBJECT_CONTROL_NULL;
5836 	if (pager == MEMORY_OBJECT_NULL) {
5837 		return KERN_INVALID_ARGUMENT;
5838 	}
5839 
5840 	object = vm_object_memory_object_associate(pager,
5841 	    VM_OBJECT_NULL,
5842 	    size,
5843 	    TRUE);
5844 	if (object == VM_OBJECT_NULL) {
5845 		return KERN_INVALID_OBJECT;
5846 	}
5847 
5848 	/* wait for object (if any) to be ready */
5849 	if (object != VM_OBJECT_NULL) {
5850 		vm_object_lock(object);
5851 		VM_OBJECT_SET_NAMED(object, TRUE);
5852 		while (!object->pager_ready) {
5853 			vm_object_sleep(object,
5854 			    VM_OBJECT_EVENT_PAGER_READY,
5855 			    THREAD_UNINT, LCK_SLEEP_EXCLUSIVE);
5856 		}
5857 		*control = object->pager_control;
5858 		vm_object_unlock(object);
5859 	}
5860 	return KERN_SUCCESS;
5861 }
5862 
5863 
5864 __private_extern__ kern_return_t
5865 vm_object_lock_request(
5866 	vm_object_t                     object,
5867 	vm_object_offset_t              offset,
5868 	vm_object_size_t                size,
5869 	memory_object_return_t          should_return,
5870 	int                             flags,
5871 	vm_prot_t                       prot)
5872 {
5873 	__unused boolean_t      should_flush;
5874 
5875 	should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
5876 
5877 	/*
5878 	 *	Check for bogus arguments.
5879 	 */
5880 	if (object == VM_OBJECT_NULL) {
5881 		return KERN_INVALID_ARGUMENT;
5882 	}
5883 
5884 	if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) {
5885 		return KERN_INVALID_ARGUMENT;
5886 	}
5887 
5888 	/*
5889 	 * XXX TODO4K
5890 	 * extend range for conservative operations (copy-on-write, sync, ...)
5891 	 * truncate range for destructive operations (purge, ...)
5892 	 */
5893 	size = vm_object_round_page(offset + size) - vm_object_trunc_page(offset);
5894 	offset = vm_object_trunc_page(offset);
5895 
5896 	/*
5897 	 *	Lock the object, and acquire a paging reference to
5898 	 *	prevent the memory_object reference from being released.
5899 	 */
5900 	vm_object_lock(object);
5901 	vm_object_paging_begin(object);
5902 
5903 	(void)vm_object_update(object,
5904 	    offset, size, NULL, NULL, should_return, flags, prot);
5905 
5906 	vm_object_paging_end(object);
5907 	vm_object_unlock(object);
5908 
5909 	return KERN_SUCCESS;
5910 }
5911 
5912 /*
5913  * Empty a purgeable object by grabbing the physical pages assigned to it and
5914  * putting them on the free queue without writing them to backing store, etc.
5915  * When the pages are next touched they will be demand zero-fill pages.  We
5916  * skip pages which are busy, being paged in/out, wired, etc.  We do _not_
5917  * skip referenced/dirty pages, pages on the active queue, etc.  We're more
5918  * than happy to grab these since this is a purgeable object.  We mark the
5919  * object as "empty" after reaping its pages.
5920  *
5921  * On entry the object must be locked and it must be
5922  * purgeable with no delayed copies pending.
5923  */
5924 uint64_t
5925 vm_object_purge(vm_object_t object, int flags)
5926 {
5927 	unsigned int    object_page_count = 0, pgcount = 0;
5928 	uint64_t        total_purged_pgcount = 0;
5929 	boolean_t       skipped_object = FALSE;
5930 
5931 	vm_object_lock_assert_exclusive(object);
5932 
5933 	if (object->purgable == VM_PURGABLE_DENY) {
5934 		return 0;
5935 	}
5936 
5937 	assert(object->vo_copy == VM_OBJECT_NULL);
5938 	assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
5939 
5940 	/*
5941 	 * We need to set the object's state to VM_PURGABLE_EMPTY *before*
5942 	 * reaping its pages.  We update vm_page_purgeable_count in bulk
5943 	 * and we don't want vm_page_remove() to update it again for each
5944 	 * page we reap later.
5945 	 *
5946 	 * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
5947 	 * are all accounted for in the "volatile" ledgers, so this does not
5948 	 * make any difference.
5949 	 * If we transitioned directly from NONVOLATILE to EMPTY,
5950 	 * vm_page_purgeable_count must have been updated when the object
5951 	 * was dequeued from its volatile queue and the purgeable ledgers
5952 	 * must have also been updated accordingly at that time (in
5953 	 * vm_object_purgable_control()).
5954 	 */
5955 	if (object->purgable == VM_PURGABLE_VOLATILE) {
5956 		unsigned int delta;
5957 		assert(object->resident_page_count >=
5958 		    object->wired_page_count);
5959 		delta = (object->resident_page_count -
5960 		    object->wired_page_count);
5961 		if (delta != 0) {
5962 			assert(vm_page_purgeable_count >=
5963 			    delta);
5964 			OSAddAtomic(-delta,
5965 			    (SInt32 *)&vm_page_purgeable_count);
5966 		}
5967 		if (object->wired_page_count != 0) {
5968 			assert(vm_page_purgeable_wired_count >=
5969 			    object->wired_page_count);
5970 			OSAddAtomic(-object->wired_page_count,
5971 			    (SInt32 *)&vm_page_purgeable_wired_count);
5972 		}
5973 		VM_OBJECT_SET_PURGABLE(object, VM_PURGABLE_EMPTY);
5974 	}
5975 	assert(object->purgable == VM_PURGABLE_EMPTY);
5976 
5977 	object_page_count = object->resident_page_count;
5978 
5979 	vm_object_reap_pages(object, REAP_PURGEABLE);
5980 
5981 	if (object->resident_page_count >= object_page_count) {
5982 		total_purged_pgcount = 0;
5983 	} else {
5984 		total_purged_pgcount = object_page_count - object->resident_page_count;
5985 	}
5986 
5987 	if (object->pager != NULL) {
5988 		assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
5989 
5990 		if (object->activity_in_progress == 0 &&
5991 		    object->paging_in_progress == 0) {
5992 			/*
5993 			 * Also reap any memory coming from this object
5994 			 * in the VM compressor.
5995 			 *
5996 			 * There are no operations in progress on the VM object
5997 			 * and no operation can start while we're holding the
5998 			 * VM object lock, so it's safe to reap the compressed
5999 			 * pages and update the page counts.
6000 			 */
6001 			pgcount = vm_compressor_pager_get_count(object->pager);
6002 			if (pgcount) {
6003 				pgcount = vm_compressor_pager_reap_pages(object->pager, flags);
6004 				vm_compressor_pager_count(object->pager,
6005 				    -pgcount,
6006 				    FALSE,                       /* shared */
6007 				    object);
6008 				vm_object_owner_compressed_update(object,
6009 				    -pgcount);
6010 			}
6011 			if (!(flags & C_DONT_BLOCK)) {
6012 				assert(vm_compressor_pager_get_count(object->pager)
6013 				    == 0);
6014 			}
6015 		} else {
6016 			/*
6017 			 * There's some kind of paging activity in progress
6018 			 * for this object, which could result in a page
6019 			 * being compressed or decompressed, possibly while
6020 			 * the VM object is not locked, so it could race
6021 			 * with us.
6022 			 *
6023 			 * We can't really synchronize this without possibly
6024 			 * causing a deadlock when the compressor needs to
6025 			 * allocate or free memory while compressing or
6026 			 * decompressing a page from a purgeable object
6027 			 * mapped in the kernel_map...
6028 			 *
6029 			 * So let's not attempt to purge the compressor
6030 			 * pager if there's any kind of operation in
6031 			 * progress on the VM object.
6032 			 */
6033 			skipped_object = TRUE;
6034 		}
6035 	}
6036 
6037 	vm_object_lock_assert_exclusive(object);
6038 
6039 	total_purged_pgcount += pgcount;
6040 
6041 	KDBG_RELEASE(VMDBG_CODE(DBG_VM_PURGEABLE_OBJECT_PURGE_ONE) | DBG_FUNC_NONE,
6042 	    VM_KERNEL_UNSLIDE_OR_PERM(object),                   /* purged object */
6043 	    object_page_count,
6044 	    total_purged_pgcount,
6045 	    skipped_object);
6046 
6047 	return total_purged_pgcount;
6048 }
6049 
6050 
6051 /*
6052  * vm_object_purgeable_control() allows the caller to control and investigate the
6053  * state of a purgeable object.  A purgeable object is created via a call to
6054  * vm_allocate() with VM_FLAGS_PURGABLE specified.  A purgeable object will
6055  * never be coalesced with any other object -- even other purgeable objects --
6056  * and will thus always remain a distinct object.  A purgeable object has
6057  * special semantics when its reference count is exactly 1.  If its reference
6058  * count is greater than 1, then a purgeable object will behave like a normal
6059  * object and attempts to use this interface will result in an error return
6060  * of KERN_INVALID_ARGUMENT.
6061  *
6062  * A purgeable object may be put into a "volatile" state which will make the
6063  * object's pages elligable for being reclaimed without paging to backing
6064  * store if the system runs low on memory.  If the pages in a volatile
6065  * purgeable object are reclaimed, the purgeable object is said to have been
6066  * "emptied."  When a purgeable object is emptied the system will reclaim as
6067  * many pages from the object as it can in a convenient manner (pages already
6068  * en route to backing store or busy for other reasons are left as is).  When
6069  * a purgeable object is made volatile, its pages will generally be reclaimed
6070  * before other pages in the application's working set.  This semantic is
6071  * generally used by applications which can recreate the data in the object
6072  * faster than it can be paged in.  One such example might be media assets
6073  * which can be reread from a much faster RAID volume.
6074  *
6075  * A purgeable object may be designated as "non-volatile" which means it will
6076  * behave like all other objects in the system with pages being written to and
6077  * read from backing store as needed to satisfy system memory needs.  If the
6078  * object was emptied before the object was made non-volatile, that fact will
6079  * be returned as the old state of the purgeable object (see
6080  * VM_PURGABLE_SET_STATE below).  In this case, any pages of the object which
6081  * were reclaimed as part of emptying the object will be refaulted in as
6082  * zero-fill on demand.  It is up to the application to note that an object
6083  * was emptied and recreate the objects contents if necessary.  When a
6084  * purgeable object is made non-volatile, its pages will generally not be paged
6085  * out to backing store in the immediate future.  A purgeable object may also
6086  * be manually emptied.
6087  *
6088  * Finally, the current state (non-volatile, volatile, volatile & empty) of a
6089  * volatile purgeable object may be queried at any time.  This information may
6090  * be used as a control input to let the application know when the system is
6091  * experiencing memory pressure and is reclaiming memory.
6092  *
6093  * The specified address may be any address within the purgeable object.  If
6094  * the specified address does not represent any object in the target task's
6095  * virtual address space, then KERN_INVALID_ADDRESS will be returned.  If the
6096  * object containing the specified address is not a purgeable object, then
6097  * KERN_INVALID_ARGUMENT will be returned.  Otherwise, KERN_SUCCESS will be
6098  * returned.
6099  *
6100  * The control parameter may be any one of VM_PURGABLE_SET_STATE or
6101  * VM_PURGABLE_GET_STATE.  For VM_PURGABLE_SET_STATE, the in/out parameter
6102  * state is used to set the new state of the purgeable object and return its
6103  * old state.  For VM_PURGABLE_GET_STATE, the current state of the purgeable
6104  * object is returned in the parameter state.
6105  *
6106  * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
6107  * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY.  These, respectively, represent
6108  * the non-volatile, volatile and volatile/empty states described above.
6109  * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
6110  * immediately reclaim as many pages in the object as can be conveniently
6111  * collected (some may have already been written to backing store or be
6112  * otherwise busy).
6113  *
6114  * The process of making a purgeable object non-volatile and determining its
6115  * previous state is atomic.  Thus, if a purgeable object is made
6116  * VM_PURGABLE_NONVOLATILE and the old state is returned as
6117  * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
6118  * completely intact and will remain so until the object is made volatile
6119  * again.  If the old state is returned as VM_PURGABLE_EMPTY then the object
6120  * was reclaimed while it was in a volatile state and its previous contents
6121  * have been lost.
6122  */
6123 /*
6124  * The object must be locked.
6125  */
6126 kern_return_t
6127 vm_object_purgable_control(
6128 	vm_object_t     object,
6129 	vm_purgable_t   control,
6130 	int             *state)
6131 {
6132 	int             old_state;
6133 	int             new_state;
6134 
6135 	if (object == VM_OBJECT_NULL) {
6136 		/*
6137 		 * Object must already be present or it can't be purgeable.
6138 		 */
6139 		return KERN_INVALID_ARGUMENT;
6140 	}
6141 
6142 	vm_object_lock_assert_exclusive(object);
6143 
6144 	/*
6145 	 * Get current state of the purgeable object.
6146 	 */
6147 	old_state = object->purgable;
6148 	if (old_state == VM_PURGABLE_DENY) {
6149 		return KERN_INVALID_ARGUMENT;
6150 	}
6151 
6152 	/* purgeable cant have delayed copies - now or in the future */
6153 	assert(object->vo_copy == VM_OBJECT_NULL);
6154 	assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
6155 
6156 	/*
6157 	 * Execute the desired operation.
6158 	 */
6159 	if (control == VM_PURGABLE_GET_STATE) {
6160 		*state = old_state;
6161 		return KERN_SUCCESS;
6162 	}
6163 
6164 	if (control == VM_PURGABLE_SET_STATE &&
6165 	    object->purgeable_only_by_kernel) {
6166 		return KERN_PROTECTION_FAILURE;
6167 	}
6168 
6169 	if (control != VM_PURGABLE_SET_STATE &&
6170 	    control != VM_PURGABLE_SET_STATE_FROM_KERNEL) {
6171 		return KERN_INVALID_ARGUMENT;
6172 	}
6173 
6174 	if ((*state) & VM_PURGABLE_DEBUG_EMPTY) {
6175 		object->volatile_empty = TRUE;
6176 	}
6177 	if ((*state) & VM_PURGABLE_DEBUG_FAULT) {
6178 		object->volatile_fault = TRUE;
6179 	}
6180 
6181 	new_state = *state & VM_PURGABLE_STATE_MASK;
6182 	if (new_state == VM_PURGABLE_VOLATILE) {
6183 		if (old_state == VM_PURGABLE_EMPTY) {
6184 			/* what's been emptied must stay empty */
6185 			new_state = VM_PURGABLE_EMPTY;
6186 		}
6187 		if (object->volatile_empty) {
6188 			/* debugging mode: go straight to empty */
6189 			new_state = VM_PURGABLE_EMPTY;
6190 		}
6191 	}
6192 
6193 	switch (new_state) {
6194 	case VM_PURGABLE_DENY:
6195 		/*
6196 		 * Attempting to convert purgeable memory to non-purgeable:
6197 		 * not allowed.
6198 		 */
6199 		return KERN_INVALID_ARGUMENT;
6200 	case VM_PURGABLE_NONVOLATILE:
6201 		VM_OBJECT_SET_PURGABLE(object, new_state);
6202 
6203 		if (old_state == VM_PURGABLE_VOLATILE) {
6204 			unsigned int delta;
6205 
6206 			assert(object->resident_page_count >=
6207 			    object->wired_page_count);
6208 			delta = (object->resident_page_count -
6209 			    object->wired_page_count);
6210 
6211 			assert(vm_page_purgeable_count >= delta);
6212 
6213 			if (delta != 0) {
6214 				OSAddAtomic(-delta,
6215 				    (SInt32 *)&vm_page_purgeable_count);
6216 			}
6217 			if (object->wired_page_count != 0) {
6218 				assert(vm_page_purgeable_wired_count >=
6219 				    object->wired_page_count);
6220 				OSAddAtomic(-object->wired_page_count,
6221 				    (SInt32 *)&vm_page_purgeable_wired_count);
6222 			}
6223 
6224 			vm_page_lock_queues();
6225 
6226 			/* object should be on a queue */
6227 			assert(object->objq.next != NULL &&
6228 			    object->objq.prev != NULL);
6229 			purgeable_q_t queue;
6230 
6231 			/*
6232 			 * Move object from its volatile queue to the
6233 			 * non-volatile queue...
6234 			 */
6235 			queue = vm_purgeable_object_remove(object);
6236 			assert(queue);
6237 
6238 			if (object->purgeable_when_ripe) {
6239 				vm_purgeable_token_delete_last(queue);
6240 			}
6241 			assert(queue->debug_count_objects >= 0);
6242 
6243 			vm_page_unlock_queues();
6244 		}
6245 		if (old_state == VM_PURGABLE_VOLATILE ||
6246 		    old_state == VM_PURGABLE_EMPTY) {
6247 			/*
6248 			 * Transfer the object's pages from the volatile to
6249 			 * non-volatile ledgers.
6250 			 */
6251 			vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE);
6252 		}
6253 
6254 		break;
6255 
6256 	case VM_PURGABLE_VOLATILE:
6257 		if (object->volatile_fault) {
6258 			vm_page_t       p;
6259 			int             refmod;
6260 
6261 			vm_page_queue_iterate(&object->memq, p, vmp_listq) {
6262 				if (p->vmp_busy ||
6263 				    VM_PAGE_WIRED(p) ||
6264 				    vm_page_is_fictitious(p)) {
6265 					continue;
6266 				}
6267 				refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
6268 				if ((refmod & VM_MEM_MODIFIED) &&
6269 				    !p->vmp_dirty) {
6270 					SET_PAGE_DIRTY(p, FALSE);
6271 				}
6272 			}
6273 		}
6274 
6275 		assert(old_state != VM_PURGABLE_EMPTY);
6276 
6277 		purgeable_q_t queue;
6278 
6279 		/* find the correct queue */
6280 		if ((*state & VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE) {
6281 			queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
6282 		} else {
6283 			if ((*state & VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO) {
6284 				queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
6285 			} else {
6286 				queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
6287 			}
6288 		}
6289 
6290 		if (old_state == VM_PURGABLE_NONVOLATILE ||
6291 		    old_state == VM_PURGABLE_EMPTY) {
6292 			unsigned int delta;
6293 
6294 			if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
6295 			    VM_PURGABLE_NO_AGING) {
6296 				VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(object, FALSE);
6297 			} else {
6298 				VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(object, TRUE);
6299 			}
6300 
6301 			if (object->purgeable_when_ripe) {
6302 				kern_return_t result;
6303 
6304 				/* try to add token... this can fail */
6305 				vm_page_lock_queues();
6306 
6307 				result = vm_purgeable_token_add(queue);
6308 				if (result != KERN_SUCCESS) {
6309 					vm_page_unlock_queues();
6310 					return result;
6311 				}
6312 				vm_page_unlock_queues();
6313 			}
6314 
6315 			assert(object->resident_page_count >=
6316 			    object->wired_page_count);
6317 			delta = (object->resident_page_count -
6318 			    object->wired_page_count);
6319 
6320 			if (delta != 0) {
6321 				OSAddAtomic(delta,
6322 				    &vm_page_purgeable_count);
6323 			}
6324 			if (object->wired_page_count != 0) {
6325 				OSAddAtomic(object->wired_page_count,
6326 				    &vm_page_purgeable_wired_count);
6327 			}
6328 
6329 			VM_OBJECT_SET_PURGABLE(object, new_state);
6330 
6331 			/* object should be on "non-volatile" queue */
6332 			assert(object->objq.next != NULL);
6333 			assert(object->objq.prev != NULL);
6334 		} else if (old_state == VM_PURGABLE_VOLATILE) {
6335 			purgeable_q_t   old_queue;
6336 			boolean_t       purgeable_when_ripe;
6337 
6338 			/*
6339 			 * if reassigning priorities / purgeable groups, we don't change the
6340 			 * token queue. So moving priorities will not make pages stay around longer.
6341 			 * Reasoning is that the algorithm gives most priority to the most important
6342 			 * object. If a new token is added, the most important object' priority is boosted.
6343 			 * This biases the system already for purgeable queues that move a lot.
6344 			 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
6345 			 */
6346 			assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
6347 
6348 			old_queue = vm_purgeable_object_remove(object);
6349 			assert(old_queue);
6350 
6351 			if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
6352 			    VM_PURGABLE_NO_AGING) {
6353 				purgeable_when_ripe = FALSE;
6354 			} else {
6355 				purgeable_when_ripe = TRUE;
6356 			}
6357 
6358 			if (old_queue != queue ||
6359 			    (purgeable_when_ripe !=
6360 			    object->purgeable_when_ripe)) {
6361 				kern_return_t result;
6362 
6363 				/* Changing queue. Have to move token. */
6364 				vm_page_lock_queues();
6365 				if (object->purgeable_when_ripe) {
6366 					vm_purgeable_token_delete_last(old_queue);
6367 				}
6368 				VM_OBJECT_SET_PURGEABLE_WHEN_RIPE(object, purgeable_when_ripe);
6369 				if (object->purgeable_when_ripe) {
6370 					result = vm_purgeable_token_add(queue);
6371 					assert(result == KERN_SUCCESS);   /* this should never fail since we just freed a token */
6372 				}
6373 				vm_page_unlock_queues();
6374 			}
6375 		}
6376 		;
6377 		vm_purgeable_object_add(object, queue, (*state & VM_VOLATILE_GROUP_MASK) >> VM_VOLATILE_GROUP_SHIFT );
6378 		if (old_state == VM_PURGABLE_NONVOLATILE) {
6379 			vm_purgeable_accounting(object,
6380 			    VM_PURGABLE_NONVOLATILE);
6381 		}
6382 
6383 		assert(queue->debug_count_objects >= 0);
6384 
6385 		break;
6386 
6387 
6388 	case VM_PURGABLE_EMPTY:
6389 		if (object->volatile_fault) {
6390 			vm_page_t       p;
6391 			int             refmod;
6392 
6393 			vm_page_queue_iterate(&object->memq, p, vmp_listq) {
6394 				if (p->vmp_busy ||
6395 				    VM_PAGE_WIRED(p) ||
6396 				    vm_page_is_fictitious(p)) {
6397 					continue;
6398 				}
6399 				refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
6400 				if ((refmod & VM_MEM_MODIFIED) &&
6401 				    !p->vmp_dirty) {
6402 					SET_PAGE_DIRTY(p, FALSE);
6403 				}
6404 			}
6405 		}
6406 
6407 		if (old_state == VM_PURGABLE_VOLATILE) {
6408 			purgeable_q_t old_queue;
6409 
6410 			/* object should be on a queue */
6411 			assert(object->objq.next != NULL &&
6412 			    object->objq.prev != NULL);
6413 
6414 			old_queue = vm_purgeable_object_remove(object);
6415 			assert(old_queue);
6416 			if (object->purgeable_when_ripe) {
6417 				vm_page_lock_queues();
6418 				vm_purgeable_token_delete_first(old_queue);
6419 				vm_page_unlock_queues();
6420 			}
6421 		}
6422 
6423 		if (old_state == VM_PURGABLE_NONVOLATILE) {
6424 			/*
6425 			 * This object's pages were previously accounted as
6426 			 * "non-volatile" and now need to be accounted as
6427 			 * "volatile".
6428 			 */
6429 			vm_purgeable_accounting(object,
6430 			    VM_PURGABLE_NONVOLATILE);
6431 			/*
6432 			 * Set to VM_PURGABLE_EMPTY because the pages are no
6433 			 * longer accounted in the "non-volatile" ledger
6434 			 * and are also not accounted for in
6435 			 * "vm_page_purgeable_count".
6436 			 */
6437 			VM_OBJECT_SET_PURGABLE(object, VM_PURGABLE_EMPTY);
6438 		}
6439 
6440 		(void) vm_object_purge(object, 0);
6441 		assert(object->purgable == VM_PURGABLE_EMPTY);
6442 
6443 		break;
6444 	}
6445 
6446 	*state = old_state;
6447 
6448 	vm_object_lock_assert_exclusive(object);
6449 
6450 	return KERN_SUCCESS;
6451 }
6452 
6453 kern_return_t
6454 vm_object_get_page_counts(
6455 	vm_object_t             object,
6456 	vm_object_offset_t      offset,
6457 	vm_object_size_t        size,
6458 	uint64_t               *resident_page_count,
6459 	uint64_t               *dirty_page_count,
6460 	uint64_t               *swapped_page_count)
6461 {
6462 	vm_page_t               p = VM_PAGE_NULL;
6463 	unsigned int            local_resident_count = 0;
6464 	unsigned int            local_dirty_count = 0;
6465 	unsigned int            local_swapped_count = 0;
6466 	vm_object_offset_t      cur_offset = 0;
6467 	vm_object_offset_t      end_offset = 0;
6468 
6469 	if (object == VM_OBJECT_NULL) {
6470 		return KERN_INVALID_ARGUMENT;
6471 	}
6472 
6473 	cur_offset = offset;
6474 	end_offset = offset + size;
6475 
6476 	vm_object_lock_assert_exclusive(object);
6477 
6478 	if (resident_page_count != NULL &&
6479 	    dirty_page_count == NULL &&
6480 	    offset == 0 &&
6481 	    object->vo_size == size) {
6482 		/*
6483 		 * Fast path when:
6484 		 * - we only want the resident page count, and,
6485 		 * - the entire object is exactly covered by the request.
6486 		 */
6487 		local_resident_count = object->resident_page_count;
6488 		if (object->internal && object->pager != NULL) {
6489 			local_swapped_count = vm_compressor_pager_get_count(object->pager);
6490 		}
6491 		goto out;
6492 	}
6493 
6494 	if (object->resident_page_count <= (size >> PAGE_SHIFT) &&
6495 	    swapped_page_count == NULL) {
6496 		/*
6497 		 * Faster path when we don't care about non-resident pages and the object has
6498 		 * fewer resident pages than the requested range.
6499 		 */
6500 		vm_page_queue_iterate(&object->memq, p, vmp_listq) {
6501 			if (p->vmp_offset >= cur_offset && p->vmp_offset < end_offset) {
6502 				local_resident_count++;
6503 				if (p->vmp_dirty ||
6504 				    (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
6505 					local_dirty_count++;
6506 				}
6507 			}
6508 		}
6509 		goto out;
6510 	}
6511 
6512 	for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) {
6513 		p = vm_page_lookup(object, cur_offset);
6514 
6515 		if (p != VM_PAGE_NULL) {
6516 			local_resident_count++;
6517 			if (p->vmp_dirty ||
6518 			    (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
6519 				local_dirty_count++;
6520 			}
6521 		} else if (page_is_paged_out(object, cur_offset)) {
6522 			local_swapped_count++;
6523 		}
6524 	}
6525 
6526 out:
6527 	if (resident_page_count != NULL) {
6528 		*resident_page_count = local_resident_count;
6529 	}
6530 
6531 	if (dirty_page_count != NULL) {
6532 		*dirty_page_count = local_dirty_count;
6533 	}
6534 
6535 	if (swapped_page_count != NULL) {
6536 		*swapped_page_count = local_swapped_count;
6537 	}
6538 
6539 	return KERN_SUCCESS;
6540 }
6541 
6542 
6543 /*
6544  *	vm_object_reference:
6545  *
6546  *	Gets another reference to the given object.
6547  */
6548 #ifdef vm_object_reference
6549 #undef vm_object_reference
6550 #endif
6551 __private_extern__ void
6552 vm_object_reference(
6553 	vm_object_t     object)
6554 {
6555 	if (object == VM_OBJECT_NULL) {
6556 		return;
6557 	}
6558 
6559 	vm_object_lock(object);
6560 	vm_object_reference_locked(object);
6561 	vm_object_unlock(object);
6562 }
6563 
6564 /*
6565  * vm_object_transpose
6566  *
6567  * This routine takes two VM objects of the same size and exchanges
6568  * their backing store.
6569  * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
6570  * and UPL_BLOCK_ACCESS if they are referenced anywhere.
6571  *
6572  * The VM objects must not be locked by caller.
6573  */
6574 unsigned int vm_object_transpose_count = 0;
6575 kern_return_t
6576 vm_object_transpose(
6577 	vm_object_t             object1,
6578 	vm_object_t             object2,
6579 	vm_object_size_t        transpose_size)
6580 {
6581 	vm_object_t             tmp_object;
6582 	kern_return_t           retval;
6583 	boolean_t               object1_locked, object2_locked;
6584 	vm_page_t               page;
6585 	vm_object_offset_t      page_offset;
6586 
6587 	tmp_object = VM_OBJECT_NULL;
6588 	object1_locked = FALSE; object2_locked = FALSE;
6589 
6590 	if (object1 == object2 ||
6591 	    object1 == VM_OBJECT_NULL ||
6592 	    object2 == VM_OBJECT_NULL) {
6593 		/*
6594 		 * If the 2 VM objects are the same, there's
6595 		 * no point in exchanging their backing store.
6596 		 */
6597 		retval = KERN_INVALID_VALUE;
6598 		goto done;
6599 	}
6600 
6601 	/*
6602 	 * Since we need to lock both objects at the same time,
6603 	 * make sure we always lock them in the same order to
6604 	 * avoid deadlocks.
6605 	 */
6606 	if (object1 > object2) {
6607 		tmp_object = object1;
6608 		object1 = object2;
6609 		object2 = tmp_object;
6610 	}
6611 
6612 	/*
6613 	 * Allocate a temporary VM object to hold object1's contents
6614 	 * while we copy object2 to object1.
6615 	 */
6616 	tmp_object = vm_object_allocate(transpose_size, object1->vmo_provenance);
6617 	vm_object_lock(tmp_object);
6618 	VM_OBJECT_SET_CAN_PERSIST(tmp_object, FALSE);
6619 
6620 
6621 	/*
6622 	 * Grab control of the 1st VM object.
6623 	 */
6624 	vm_object_lock(object1);
6625 	object1_locked = TRUE;
6626 	if (!object1->alive || object1->terminating ||
6627 	    object1->vo_copy || object1->shadow || object1->shadowed ||
6628 	    object1->purgable != VM_PURGABLE_DENY) {
6629 		/*
6630 		 * We don't deal with copy or shadow objects (yet).
6631 		 */
6632 		retval = KERN_INVALID_VALUE;
6633 		goto done;
6634 	}
6635 	/*
6636 	 * We're about to mess with the object's backing store and
6637 	 * taking a "paging_in_progress" reference wouldn't be enough
6638 	 * to prevent any paging activity on this object, so the caller should
6639 	 * have "quiesced" the objects beforehand, via a UPL operation with
6640 	 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
6641 	 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
6642 	 *
6643 	 * Wait for any paging operation to complete (but only paging, not
6644 	 * other kind of activities not linked to the pager).  After we're
6645 	 * statisfied that there's no more paging in progress, we keep the
6646 	 * object locked, to guarantee that no one tries to access its pager.
6647 	 */
6648 	vm_object_paging_only_wait(object1, THREAD_UNINT);
6649 
6650 	/*
6651 	 * Same as above for the 2nd object...
6652 	 */
6653 	vm_object_lock(object2);
6654 	object2_locked = TRUE;
6655 	if (!object2->alive || object2->terminating ||
6656 	    object2->vo_copy || object2->shadow || object2->shadowed ||
6657 	    object2->purgable != VM_PURGABLE_DENY) {
6658 		retval = KERN_INVALID_VALUE;
6659 		goto done;
6660 	}
6661 	vm_object_paging_only_wait(object2, THREAD_UNINT);
6662 
6663 
6664 	if (object1->vo_size != object2->vo_size ||
6665 	    object1->vo_size != transpose_size) {
6666 		/*
6667 		 * If the 2 objects don't have the same size, we can't
6668 		 * exchange their backing stores or one would overflow.
6669 		 * If their size doesn't match the caller's
6670 		 * "transpose_size", we can't do it either because the
6671 		 * transpose operation will affect the entire span of
6672 		 * the objects.
6673 		 */
6674 		retval = KERN_INVALID_VALUE;
6675 		goto done;
6676 	}
6677 
6678 
6679 	/*
6680 	 * Transpose the lists of resident pages.
6681 	 * This also updates the resident_page_count and the memq_hint.
6682 	 */
6683 	if (object1->phys_contiguous || vm_page_queue_empty(&object1->memq)) {
6684 		/*
6685 		 * No pages in object1, just transfer pages
6686 		 * from object2 to object1.  No need to go through
6687 		 * an intermediate object.
6688 		 */
6689 		while (!vm_page_queue_empty(&object2->memq)) {
6690 			page = (vm_page_t) vm_page_queue_first(&object2->memq);
6691 			vm_page_rename(page, object1, page->vmp_offset);
6692 		}
6693 		assert(vm_page_queue_empty(&object2->memq));
6694 	} else if (object2->phys_contiguous || vm_page_queue_empty(&object2->memq)) {
6695 		/*
6696 		 * No pages in object2, just transfer pages
6697 		 * from object1 to object2.  No need to go through
6698 		 * an intermediate object.
6699 		 */
6700 		while (!vm_page_queue_empty(&object1->memq)) {
6701 			page = (vm_page_t) vm_page_queue_first(&object1->memq);
6702 			vm_page_rename(page, object2, page->vmp_offset);
6703 		}
6704 		assert(vm_page_queue_empty(&object1->memq));
6705 	} else {
6706 		/* transfer object1's pages to tmp_object */
6707 		while (!vm_page_queue_empty(&object1->memq)) {
6708 			page = (vm_page_t) vm_page_queue_first(&object1->memq);
6709 			page_offset = page->vmp_offset;
6710 			vm_page_remove(page, TRUE);
6711 			page->vmp_offset = page_offset;
6712 			vm_page_queue_enter(&tmp_object->memq, page, vmp_listq);
6713 		}
6714 		assert(vm_page_queue_empty(&object1->memq));
6715 		/* transfer object2's pages to object1 */
6716 		while (!vm_page_queue_empty(&object2->memq)) {
6717 			page = (vm_page_t) vm_page_queue_first(&object2->memq);
6718 			vm_page_rename(page, object1, page->vmp_offset);
6719 		}
6720 		assert(vm_page_queue_empty(&object2->memq));
6721 		/* transfer tmp_object's pages to object2 */
6722 		while (!vm_page_queue_empty(&tmp_object->memq)) {
6723 			page = (vm_page_t) vm_page_queue_first(&tmp_object->memq);
6724 			vm_page_queue_remove(&tmp_object->memq, page, vmp_listq);
6725 			vm_page_insert(page, object2, page->vmp_offset);
6726 		}
6727 		assert(vm_page_queue_empty(&tmp_object->memq));
6728 	}
6729 
6730 #define __TRANSPOSE_FIELD(field)                                \
6731 MACRO_BEGIN                                                     \
6732 	tmp_object->field = object1->field;                     \
6733 	object1->field = object2->field;                        \
6734 	object2->field = tmp_object->field;                     \
6735 MACRO_END
6736 
6737 	/* "Lock" refers to the object not its contents */
6738 	/* "size" should be identical */
6739 	assert(object1->vo_size == object2->vo_size);
6740 	/* "memq_hint" was updated above when transposing pages */
6741 	/* "ref_count" refers to the object not its contents */
6742 	assert(os_ref_get_count_raw(&object1->ref_count) >= 1);
6743 	assert(os_ref_get_count_raw(&object2->ref_count) >= 1);
6744 	/* "resident_page_count" was updated above when transposing pages */
6745 	/* "wired_page_count" was updated above when transposing pages */
6746 #if !VM_TAG_ACTIVE_UPDATE
6747 	/* "wired_objq" was dealt with along with "wired_page_count" */
6748 #endif /* ! VM_TAG_ACTIVE_UPDATE */
6749 	/* "reusable_page_count" was updated above when transposing pages */
6750 	/* there should be no "copy" */
6751 	assert(!object1->vo_copy);
6752 	assert(!object2->vo_copy);
6753 	/* there should be no "shadow" */
6754 	assert(!object1->shadow);
6755 	assert(!object2->shadow);
6756 	__TRANSPOSE_FIELD(vo_shadow_offset); /* used by phys_contiguous objects */
6757 	__TRANSPOSE_FIELD(pager);
6758 	__TRANSPOSE_FIELD(paging_offset);
6759 	__TRANSPOSE_FIELD(pager_control);
6760 	/* update the memory_objects' pointers back to the VM objects */
6761 	if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
6762 		memory_object_control_collapse(&object1->pager_control,
6763 		    object1);
6764 	}
6765 	if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
6766 		memory_object_control_collapse(&object2->pager_control,
6767 		    object2);
6768 	}
6769 	__TRANSPOSE_FIELD(copy_strategy);
6770 	/* "paging_in_progress" refers to the object not its contents */
6771 	assert(!object1->paging_in_progress);
6772 	assert(!object2->paging_in_progress);
6773 	assert(object1->activity_in_progress);
6774 	assert(object2->activity_in_progress);
6775 	/* "all_wanted" refers to the object not its contents */
6776 	__TRANSPOSE_FIELD(pager_created);
6777 	__TRANSPOSE_FIELD(pager_initialized);
6778 	__TRANSPOSE_FIELD(pager_ready);
6779 	__TRANSPOSE_FIELD(pager_trusted);
6780 	__TRANSPOSE_FIELD(can_persist);
6781 	__TRANSPOSE_FIELD(internal);
6782 	__TRANSPOSE_FIELD(private);
6783 	__TRANSPOSE_FIELD(pageout);
6784 	/* "alive" should be set */
6785 	assert(object1->alive);
6786 	assert(object2->alive);
6787 	/* "purgeable" should be non-purgeable */
6788 	assert(object1->purgable == VM_PURGABLE_DENY);
6789 	assert(object2->purgable == VM_PURGABLE_DENY);
6790 	/* "shadowed" refers to the the object not its contents */
6791 	__TRANSPOSE_FIELD(purgeable_when_ripe);
6792 	__TRANSPOSE_FIELD(true_share);
6793 	/* "terminating" should not be set */
6794 	assert(!object1->terminating);
6795 	assert(!object2->terminating);
6796 	/* transfer "named" reference if needed */
6797 	if (object1->named && !object2->named) {
6798 		os_ref_release_live_locked_raw(&object1->ref_count, &vm_object_refgrp);
6799 		os_ref_retain_locked_raw(&object2->ref_count, &vm_object_refgrp);
6800 	} else if (!object1->named && object2->named) {
6801 		os_ref_retain_locked_raw(&object1->ref_count, &vm_object_refgrp);
6802 		os_ref_release_live_locked_raw(&object2->ref_count, &vm_object_refgrp);
6803 	}
6804 	__TRANSPOSE_FIELD(named);
6805 	/* "shadow_severed" refers to the object not its contents */
6806 	__TRANSPOSE_FIELD(phys_contiguous);
6807 	__TRANSPOSE_FIELD(nophyscache);
6808 	__TRANSPOSE_FIELD(no_pager_reason);
6809 	/* "cached_list.next" points to transposed object */
6810 	object1->cached_list.next = (queue_entry_t) object2;
6811 	object2->cached_list.next = (queue_entry_t) object1;
6812 	/* "cached_list.prev" should be NULL */
6813 	assert(object1->cached_list.prev == NULL);
6814 	assert(object2->cached_list.prev == NULL);
6815 	__TRANSPOSE_FIELD(last_alloc);
6816 	__TRANSPOSE_FIELD(sequential);
6817 	__TRANSPOSE_FIELD(pages_created);
6818 	__TRANSPOSE_FIELD(pages_used);
6819 	__TRANSPOSE_FIELD(scan_collisions);
6820 	__TRANSPOSE_FIELD(cow_hint);
6821 	__TRANSPOSE_FIELD(wimg_bits);
6822 	__TRANSPOSE_FIELD(set_cache_attr);
6823 	__TRANSPOSE_FIELD(code_signed);
6824 	object1->transposed = TRUE;
6825 	object2->transposed = TRUE;
6826 	__TRANSPOSE_FIELD(mapping_in_progress);
6827 	__TRANSPOSE_FIELD(volatile_empty);
6828 	__TRANSPOSE_FIELD(volatile_fault);
6829 	__TRANSPOSE_FIELD(all_reusable);
6830 	assert(object1->blocked_access);
6831 	assert(object2->blocked_access);
6832 	__TRANSPOSE_FIELD(set_cache_attr);
6833 	assert(!object1->object_is_shared_cache);
6834 	assert(!object2->object_is_shared_cache);
6835 	/* ignore purgeable_queue_type and purgeable_queue_group */
6836 	assert(!object1->io_tracking);
6837 	assert(!object2->io_tracking);
6838 #if VM_OBJECT_ACCESS_TRACKING
6839 	assert(!object1->access_tracking);
6840 	assert(!object2->access_tracking);
6841 #endif /* VM_OBJECT_ACCESS_TRACKING */
6842 	__TRANSPOSE_FIELD(no_tag_update);
6843 #if CONFIG_SECLUDED_MEMORY
6844 	assert(!object1->eligible_for_secluded);
6845 	assert(!object2->eligible_for_secluded);
6846 	assert(!object1->can_grab_secluded);
6847 	assert(!object2->can_grab_secluded);
6848 #else /* CONFIG_SECLUDED_MEMORY */
6849 	assert(object1->__object3_unused_bits == 0);
6850 	assert(object2->__object3_unused_bits == 0);
6851 #endif /* CONFIG_SECLUDED_MEMORY */
6852 #if UPL_DEBUG
6853 	/* "uplq" refers to the object not its contents (see upl_transpose()) */
6854 #endif
6855 	assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.next == NULL));
6856 	assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.prev == NULL));
6857 	assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.next == NULL));
6858 	assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.prev == NULL));
6859 	__TRANSPOSE_FIELD(vmo_provenance);
6860 
6861 #undef __TRANSPOSE_FIELD
6862 
6863 	retval = KERN_SUCCESS;
6864 
6865 done:
6866 	/*
6867 	 * Cleanup.
6868 	 */
6869 	if (tmp_object != VM_OBJECT_NULL) {
6870 		vm_object_unlock(tmp_object);
6871 		/*
6872 		 * Re-initialize the temporary object to avoid
6873 		 * deallocating a real pager.
6874 		 */
6875 		_vm_object_allocate(
6876 			transpose_size,
6877 			tmp_object,
6878 			/*
6879 			 * Since we're reallocating purely to deallocate,
6880 			 * don't bother trying to set a sensible provenance.
6881 			 */
6882 			VM_MAP_SERIAL_NONE
6883 			);
6884 		vm_object_deallocate(tmp_object);
6885 		tmp_object = VM_OBJECT_NULL;
6886 	}
6887 
6888 	if (object1_locked) {
6889 		vm_object_unlock(object1);
6890 		object1_locked = FALSE;
6891 	}
6892 	if (object2_locked) {
6893 		vm_object_unlock(object2);
6894 		object2_locked = FALSE;
6895 	}
6896 
6897 	vm_object_transpose_count++;
6898 
6899 	return retval;
6900 }
6901 
6902 
6903 /*
6904  *      vm_object_cluster_size
6905  *
6906  *      Determine how big a cluster we should issue an I/O for...
6907  *
6908  *	Inputs:   *start == offset of page needed
6909  *		  *length == maximum cluster pager can handle
6910  *	Outputs:  *start == beginning offset of cluster
6911  *		  *length == length of cluster to try
6912  *
6913  *	The original *start will be encompassed by the cluster
6914  *
6915  */
6916 extern int speculative_reads_disabled;
6917 
6918 /*
6919  * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
6920  * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to
6921  * always be page-aligned. The derivation could involve operations (e.g. division)
6922  * that could give us non-page-size aligned values if we start out with values that
6923  * are odd multiples of PAGE_SIZE.
6924  */
6925 #if !XNU_TARGET_OS_OSX
6926 unsigned int preheat_max_bytes = (1024 * 512);
6927 #else /* !XNU_TARGET_OS_OSX */
6928 unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
6929 #endif /* !XNU_TARGET_OS_OSX */
6930 unsigned int preheat_min_bytes = (1024 * 32);
6931 
6932 
6933 __private_extern__ void
6934 vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
6935     vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming)
6936 {
6937 	vm_size_t               pre_heat_size;
6938 	vm_size_t               tail_size;
6939 	vm_size_t               head_size;
6940 	vm_size_t               max_length;
6941 	vm_size_t               cluster_size;
6942 	vm_object_offset_t      object_size;
6943 	vm_object_offset_t      orig_start;
6944 	vm_object_offset_t      target_start;
6945 	vm_object_offset_t      offset;
6946 	vm_behavior_t           behavior;
6947 	boolean_t               look_behind = TRUE;
6948 	boolean_t               look_ahead  = TRUE;
6949 	boolean_t               isSSD = FALSE;
6950 	uint32_t                throttle_limit;
6951 	int                     sequential_run;
6952 	int                     sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
6953 	vm_size_t               max_ph_size;
6954 	vm_size_t               min_ph_size;
6955 
6956 	assert( !(*length & PAGE_MASK));
6957 	assert( !(*start & PAGE_MASK_64));
6958 
6959 	/*
6960 	 * remember maxiumum length of run requested
6961 	 */
6962 	max_length = *length;
6963 	/*
6964 	 * we'll always return a cluster size of at least
6965 	 * 1 page, since the original fault must always
6966 	 * be processed
6967 	 */
6968 	*length = PAGE_SIZE;
6969 	*io_streaming = 0;
6970 
6971 	if (speculative_reads_disabled || fault_info == NULL) {
6972 		/*
6973 		 * no cluster... just fault the page in
6974 		 */
6975 		return;
6976 	}
6977 	orig_start = *start;
6978 	target_start = orig_start;
6979 	cluster_size = round_page(fault_info->cluster_size);
6980 	behavior = fault_info->behavior;
6981 
6982 	vm_object_lock(object);
6983 
6984 	if (object->pager == MEMORY_OBJECT_NULL) {
6985 		goto out;       /* pager is gone for this object, nothing more to do */
6986 	}
6987 	vnode_pager_get_isSSD(object->pager, &isSSD);
6988 
6989 	min_ph_size = round_page(preheat_min_bytes);
6990 	max_ph_size = round_page(preheat_max_bytes);
6991 
6992 #if XNU_TARGET_OS_OSX
6993 	/*
6994 	 * If we're paging from an SSD, we cut the minimum cluster size in half
6995 	 * and reduce the maximum size by a factor of 8. We do this because the
6996 	 * latency to issue an I/O is a couple of orders of magnitude smaller than
6997 	 * on spinning media, so being overly aggressive on the cluster size (to
6998 	 * try and reduce cumulative seek penalties) isn't a good trade off over
6999 	 * the increased memory pressure caused by the larger speculative I/Os.
7000 	 * However, the latency isn't 0, so a small amount of clustering is still
7001 	 * a win.
7002 	 *
7003 	 * If an explicit cluster size has already been provided, then we're
7004 	 * receiving a strong hint that the entire range will be needed (e.g.
7005 	 * wiring, willneed). In these cases, we want to maximize the I/O size
7006 	 * to minimize the number of I/Os issued.
7007 	 */
7008 	if (isSSD && cluster_size <= PAGE_SIZE) {
7009 		min_ph_size /= 2;
7010 		max_ph_size /= 8;
7011 
7012 		if (min_ph_size & PAGE_MASK_64) {
7013 			min_ph_size = trunc_page(min_ph_size);
7014 		}
7015 
7016 		if (max_ph_size & PAGE_MASK_64) {
7017 			max_ph_size = trunc_page(max_ph_size);
7018 		}
7019 	}
7020 #endif /* XNU_TARGET_OS_OSX */
7021 
7022 	if (min_ph_size < PAGE_SIZE) {
7023 		min_ph_size = PAGE_SIZE;
7024 	}
7025 
7026 	if (max_ph_size < PAGE_SIZE) {
7027 		max_ph_size = PAGE_SIZE;
7028 	} else if (max_ph_size > MAX_UPL_TRANSFER_BYTES) {
7029 		max_ph_size = MAX_UPL_TRANSFER_BYTES;
7030 	}
7031 
7032 	if (max_length > max_ph_size) {
7033 		max_length = max_ph_size;
7034 	}
7035 
7036 	if (max_length <= PAGE_SIZE) {
7037 		goto out;
7038 	}
7039 
7040 	if (object->internal) {
7041 		object_size = object->vo_size;
7042 	} else {
7043 		vnode_pager_get_object_size(object->pager, &object_size);
7044 	}
7045 
7046 	object_size = round_page_64(object_size);
7047 
7048 	if (orig_start >= object_size) {
7049 		/*
7050 		 * fault occurred beyond the EOF...
7051 		 * we need to punt w/o changing the
7052 		 * starting offset
7053 		 */
7054 		goto out;
7055 	}
7056 	if (object->pages_used > object->pages_created) {
7057 		/*
7058 		 * must have wrapped our 32 bit counters
7059 		 * so reset
7060 		 */
7061 		object->pages_used = object->pages_created = 0;
7062 	}
7063 	if ((sequential_run = object->sequential)) {
7064 		if (sequential_run < 0) {
7065 			sequential_behavior = VM_BEHAVIOR_RSEQNTL;
7066 			sequential_run = 0 - sequential_run;
7067 		} else {
7068 			sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
7069 		}
7070 	}
7071 	switch (behavior) {
7072 	default:
7073 		behavior = VM_BEHAVIOR_DEFAULT;
7074 		OS_FALLTHROUGH;
7075 
7076 	case VM_BEHAVIOR_DEFAULT:
7077 		if (object->internal && fault_info->user_tag == VM_MEMORY_STACK) {
7078 			goto out;
7079 		}
7080 
7081 		if (sequential_run >= (3 * PAGE_SIZE)) {
7082 			pre_heat_size = sequential_run + PAGE_SIZE;
7083 
7084 			if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
7085 				look_behind = FALSE;
7086 			} else {
7087 				look_ahead = FALSE;
7088 			}
7089 
7090 			*io_streaming = 1;
7091 		} else {
7092 			if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) {
7093 				/*
7094 				 * prime the pump
7095 				 */
7096 				pre_heat_size = min_ph_size;
7097 			} else {
7098 				/*
7099 				 * Linear growth in PH size: The maximum size is max_length...
7100 				 * this cacluation will result in a size that is neither a
7101 				 * power of 2 nor a multiple of PAGE_SIZE... so round
7102 				 * it up to the nearest PAGE_SIZE boundary
7103 				 */
7104 				pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created;
7105 
7106 				if (pre_heat_size < min_ph_size) {
7107 					pre_heat_size = min_ph_size;
7108 				} else {
7109 					pre_heat_size = round_page(pre_heat_size);
7110 				}
7111 			}
7112 		}
7113 		break;
7114 
7115 	case VM_BEHAVIOR_RANDOM:
7116 		if ((pre_heat_size = cluster_size) <= PAGE_SIZE) {
7117 			goto out;
7118 		}
7119 		break;
7120 
7121 	case VM_BEHAVIOR_SEQUENTIAL:
7122 		if ((pre_heat_size = cluster_size) == 0) {
7123 			pre_heat_size = sequential_run + PAGE_SIZE;
7124 		}
7125 		look_behind = FALSE;
7126 		*io_streaming = 1;
7127 
7128 		break;
7129 
7130 	case VM_BEHAVIOR_RSEQNTL:
7131 		if ((pre_heat_size = cluster_size) == 0) {
7132 			pre_heat_size = sequential_run + PAGE_SIZE;
7133 		}
7134 		look_ahead = FALSE;
7135 		*io_streaming = 1;
7136 
7137 		break;
7138 	}
7139 	throttle_limit = (uint32_t) max_length;
7140 	assert(throttle_limit == max_length);
7141 
7142 	if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) {
7143 		if (max_length > throttle_limit) {
7144 			max_length = throttle_limit;
7145 		}
7146 	}
7147 	if (pre_heat_size > max_length) {
7148 		pre_heat_size = max_length;
7149 	}
7150 
7151 	if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) {
7152 		unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count;
7153 
7154 		if (consider_free < vm_page_throttle_limit) {
7155 			pre_heat_size = trunc_page(pre_heat_size / 16);
7156 		} else if (consider_free < vm_page_free_target) {
7157 			pre_heat_size = trunc_page(pre_heat_size / 4);
7158 		}
7159 
7160 		if (pre_heat_size < min_ph_size) {
7161 			pre_heat_size = min_ph_size;
7162 		}
7163 	}
7164 	if (look_ahead == TRUE) {
7165 		if (look_behind == TRUE) {
7166 			/*
7167 			 * if we get here its due to a random access...
7168 			 * so we want to center the original fault address
7169 			 * within the cluster we will issue... make sure
7170 			 * to calculate 'head_size' as a multiple of PAGE_SIZE...
7171 			 * 'pre_heat_size' is a multiple of PAGE_SIZE but not
7172 			 * necessarily an even number of pages so we need to truncate
7173 			 * the result to a PAGE_SIZE boundary
7174 			 */
7175 			head_size = trunc_page(pre_heat_size / 2);
7176 
7177 			if (target_start > head_size) {
7178 				target_start -= head_size;
7179 			} else {
7180 				target_start = 0;
7181 			}
7182 
7183 			/*
7184 			 * 'target_start' at this point represents the beginning offset
7185 			 * of the cluster we are considering... 'orig_start' will be in
7186 			 * the center of this cluster if we didn't have to clip the start
7187 			 * due to running into the start of the file
7188 			 */
7189 		}
7190 		if ((target_start + pre_heat_size) > object_size) {
7191 			pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start));
7192 		}
7193 		/*
7194 		 * at this point caclulate the number of pages beyond the original fault
7195 		 * address that we want to consider... this is guaranteed not to extend beyond
7196 		 * the current EOF...
7197 		 */
7198 		assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start));
7199 		tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
7200 	} else {
7201 		if (pre_heat_size > target_start) {
7202 			/*
7203 			 * since pre_heat_size is always smaller then 2^32,
7204 			 * if it is larger then target_start (a 64 bit value)
7205 			 * it is safe to clip target_start to 32 bits
7206 			 */
7207 			pre_heat_size = (vm_size_t) target_start;
7208 		}
7209 		tail_size = 0;
7210 	}
7211 	assert( !(target_start & PAGE_MASK_64));
7212 	assert( !(pre_heat_size & PAGE_MASK_64));
7213 
7214 	if (pre_heat_size <= PAGE_SIZE) {
7215 		goto out;
7216 	}
7217 
7218 	if (look_behind == TRUE) {
7219 		/*
7220 		 * take a look at the pages before the original
7221 		 * faulting offset... recalculate this in case
7222 		 * we had to clip 'pre_heat_size' above to keep
7223 		 * from running past the EOF.
7224 		 */
7225 		head_size = pre_heat_size - tail_size - PAGE_SIZE;
7226 
7227 		for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
7228 			/*
7229 			 * don't poke below the lowest offset
7230 			 */
7231 			if (offset < fault_info->lo_offset) {
7232 				break;
7233 			}
7234 			/*
7235 			 * for external objects or internal objects w/o a pager,
7236 			 * vm_object_compressor_pager_state_get will return VM_EXTERNAL_STATE_UNKNOWN
7237 			 */
7238 			if (vm_object_compressor_pager_state_get(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
7239 				break;
7240 			}
7241 			if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
7242 				/*
7243 				 * don't bridge resident pages
7244 				 */
7245 				break;
7246 			}
7247 			*start = offset;
7248 			*length += PAGE_SIZE;
7249 		}
7250 	}
7251 	if (look_ahead == TRUE) {
7252 		for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
7253 			/*
7254 			 * don't poke above the highest offset
7255 			 */
7256 			if (offset >= fault_info->hi_offset) {
7257 				break;
7258 			}
7259 			assert(offset < object_size);
7260 
7261 			/*
7262 			 * for external objects or internal objects w/o a pager,
7263 			 * vm_object_compressor_pager_state_get will return VM_EXTERNAL_STATE_UNKNOWN
7264 			 */
7265 			if (vm_object_compressor_pager_state_get(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
7266 				break;
7267 			}
7268 			if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
7269 				/*
7270 				 * don't bridge resident pages
7271 				 */
7272 				break;
7273 			}
7274 			*length += PAGE_SIZE;
7275 		}
7276 	}
7277 out:
7278 	if (*length > max_length) {
7279 		*length = max_length;
7280 	}
7281 
7282 	vm_object_unlock(object);
7283 
7284 	DTRACE_VM1(clustersize, vm_size_t, *length);
7285 }
7286 
7287 
7288 /*
7289  * Allow manipulation of individual page state.  This is actually part of
7290  * the UPL regimen but takes place on the VM object rather than on a UPL
7291  */
7292 
7293 kern_return_t
7294 vm_object_page_op(
7295 	vm_object_t             object,
7296 	vm_object_offset_t      offset,
7297 	int                     ops,
7298 	ppnum_t                 *phys_entry,
7299 	int                     *flags)
7300 {
7301 	vm_page_t               dst_page;
7302 
7303 	vm_object_lock(object);
7304 
7305 	if (ops & UPL_POP_PHYSICAL) {
7306 		if (object->phys_contiguous) {
7307 			if (phys_entry) {
7308 				*phys_entry = (ppnum_t)
7309 				    (object->vo_shadow_offset >> PAGE_SHIFT);
7310 			}
7311 			vm_object_unlock(object);
7312 			return KERN_SUCCESS;
7313 		} else {
7314 			vm_object_unlock(object);
7315 			return KERN_INVALID_OBJECT;
7316 		}
7317 	}
7318 	if (object->phys_contiguous) {
7319 		vm_object_unlock(object);
7320 		return KERN_INVALID_OBJECT;
7321 	}
7322 
7323 	while (TRUE) {
7324 		if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) {
7325 			vm_object_unlock(object);
7326 			return KERN_FAILURE;
7327 		}
7328 
7329 		/* Sync up on getting the busy bit */
7330 		if ((dst_page->vmp_busy || dst_page->vmp_cleaning) &&
7331 		    (((ops & UPL_POP_SET) &&
7332 		    (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
7333 			/* someone else is playing with the page, we will */
7334 			/* have to wait */
7335 			vm_page_sleep(object, dst_page, THREAD_UNINT, LCK_SLEEP_DEFAULT);
7336 			continue;
7337 		}
7338 
7339 		if (ops & UPL_POP_DUMP) {
7340 			if (dst_page->vmp_pmapped == TRUE) {
7341 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
7342 			}
7343 
7344 			VM_PAGE_FREE(dst_page);
7345 			break;
7346 		}
7347 
7348 		if (flags) {
7349 			*flags = 0;
7350 
7351 			/* Get the condition of flags before requested ops */
7352 			/* are undertaken */
7353 
7354 			if (dst_page->vmp_dirty) {
7355 				*flags |= UPL_POP_DIRTY;
7356 			}
7357 			if (dst_page->vmp_free_when_done) {
7358 				*flags |= UPL_POP_PAGEOUT;
7359 			}
7360 			if (dst_page->vmp_precious) {
7361 				*flags |= UPL_POP_PRECIOUS;
7362 			}
7363 			if (dst_page->vmp_absent) {
7364 				*flags |= UPL_POP_ABSENT;
7365 			}
7366 			if (dst_page->vmp_busy) {
7367 				*flags |= UPL_POP_BUSY;
7368 			}
7369 		}
7370 
7371 		/* The caller should have made a call either contingent with */
7372 		/* or prior to this call to set UPL_POP_BUSY */
7373 		if (ops & UPL_POP_SET) {
7374 			/* The protection granted with this assert will */
7375 			/* not be complete.  If the caller violates the */
7376 			/* convention and attempts to change page state */
7377 			/* without first setting busy we may not see it */
7378 			/* because the page may already be busy.  However */
7379 			/* if such violations occur we will assert sooner */
7380 			/* or later. */
7381 			assert(dst_page->vmp_busy || (ops & UPL_POP_BUSY));
7382 			if (ops & UPL_POP_DIRTY) {
7383 				SET_PAGE_DIRTY(dst_page, FALSE);
7384 			}
7385 			if (ops & UPL_POP_PAGEOUT) {
7386 				dst_page->vmp_free_when_done = TRUE;
7387 			}
7388 			if (ops & UPL_POP_PRECIOUS) {
7389 				dst_page->vmp_precious = TRUE;
7390 			}
7391 			if (ops & UPL_POP_ABSENT) {
7392 				dst_page->vmp_absent = TRUE;
7393 			}
7394 			if (ops & UPL_POP_BUSY) {
7395 				dst_page->vmp_busy = TRUE;
7396 			}
7397 		}
7398 
7399 		if (ops & UPL_POP_CLR) {
7400 			assert(dst_page->vmp_busy);
7401 			if (ops & UPL_POP_DIRTY) {
7402 				dst_page->vmp_dirty = FALSE;
7403 			}
7404 			if (ops & UPL_POP_PAGEOUT) {
7405 				dst_page->vmp_free_when_done = FALSE;
7406 			}
7407 			if (ops & UPL_POP_PRECIOUS) {
7408 				dst_page->vmp_precious = FALSE;
7409 			}
7410 			if (ops & UPL_POP_ABSENT) {
7411 				dst_page->vmp_absent = FALSE;
7412 			}
7413 			if (ops & UPL_POP_BUSY) {
7414 				dst_page->vmp_busy = FALSE;
7415 				vm_page_wakeup(object, dst_page);
7416 			}
7417 		}
7418 		if (phys_entry) {
7419 			/*
7420 			 * The physical page number will remain valid
7421 			 * only if the page is kept busy.
7422 			 */
7423 			assert(dst_page->vmp_busy);
7424 			*phys_entry = VM_PAGE_GET_PHYS_PAGE(dst_page);
7425 		}
7426 
7427 		break;
7428 	}
7429 
7430 	vm_object_unlock(object);
7431 	return KERN_SUCCESS;
7432 }
7433 
7434 /*
7435  * vm_object_range_op offers performance enhancement over
7436  * vm_object_page_op for page_op functions which do not require page
7437  * level state to be returned from the call.  Page_op was created to provide
7438  * a low-cost alternative to page manipulation via UPLs when only a single
7439  * page was involved.  The range_op call establishes the ability in the _op
7440  * family of functions to work on multiple pages where the lack of page level
7441  * state handling allows the caller to avoid the overhead of the upl structures.
7442  */
7443 
7444 kern_return_t
7445 vm_object_range_op(
7446 	vm_object_t             object,
7447 	vm_object_offset_t      offset_beg,
7448 	vm_object_offset_t      offset_end,
7449 	int                     ops,
7450 	uint32_t                *range)
7451 {
7452 	vm_object_offset_t      offset;
7453 	vm_page_t               dst_page;
7454 
7455 	if (object->resident_page_count == 0) {
7456 		if (range) {
7457 			if (ops & UPL_ROP_PRESENT) {
7458 				*range = 0;
7459 			} else {
7460 				*range = (uint32_t) (offset_end - offset_beg);
7461 				assert(*range == (offset_end - offset_beg));
7462 			}
7463 		}
7464 		return KERN_SUCCESS;
7465 	}
7466 	vm_object_lock(object);
7467 
7468 	if (object->phys_contiguous) {
7469 		vm_object_unlock(object);
7470 		return KERN_INVALID_OBJECT;
7471 	}
7472 
7473 	offset = offset_beg & ~PAGE_MASK_64;
7474 
7475 	while (offset < offset_end) {
7476 		dst_page = vm_page_lookup(object, offset);
7477 		if (dst_page != VM_PAGE_NULL) {
7478 			if (ops & UPL_ROP_DUMP) {
7479 				if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
7480 					/*
7481 					 * someone else is playing with the
7482 					 * page, we will have to wait
7483 					 */
7484 					vm_page_sleep(object, dst_page, THREAD_UNINT, LCK_SLEEP_DEFAULT);
7485 					/*
7486 					 * need to relook the page up since it's
7487 					 * state may have changed while we slept
7488 					 * it might even belong to a different object
7489 					 * at this point
7490 					 */
7491 					continue;
7492 				}
7493 				if (dst_page->vmp_laundry) {
7494 					vm_pageout_steal_laundry(dst_page, FALSE);
7495 				}
7496 
7497 				if (dst_page->vmp_pmapped == TRUE) {
7498 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
7499 				}
7500 
7501 				VM_PAGE_FREE(dst_page);
7502 			} else if ((ops & UPL_ROP_ABSENT)
7503 			    && (!dst_page->vmp_absent || dst_page->vmp_busy)) {
7504 				break;
7505 			}
7506 		} else if (ops & UPL_ROP_PRESENT) {
7507 			break;
7508 		}
7509 
7510 		offset += PAGE_SIZE;
7511 	}
7512 	vm_object_unlock(object);
7513 
7514 	if (range) {
7515 		if (offset > offset_end) {
7516 			offset = offset_end;
7517 		}
7518 		if (offset > offset_beg) {
7519 			*range = (uint32_t) (offset - offset_beg);
7520 			assert(*range == (offset - offset_beg));
7521 		} else {
7522 			*range = 0;
7523 		}
7524 	}
7525 	return KERN_SUCCESS;
7526 }
7527 
7528 /*
7529  * Used to point a pager directly to a range of memory (when the pager may be associated
7530  *   with a non-device vnode).  Takes a virtual address, an offset, and a size.  We currently
7531  *   expect that the virtual address will denote the start of a range that is physically contiguous.
7532  */
7533 kern_return_t
7534 pager_map_to_phys_contiguous(
7535 	memory_object_control_t object,
7536 	memory_object_offset_t  offset,
7537 	addr64_t                base_vaddr,
7538 	vm_size_t               size)
7539 {
7540 	ppnum_t page_num;
7541 	boolean_t clobbered_private;
7542 	kern_return_t retval;
7543 	vm_object_t pager_object;
7544 
7545 	page_num = pmap_find_phys(kernel_pmap, base_vaddr);
7546 
7547 	if (!page_num) {
7548 		retval = KERN_FAILURE;
7549 		goto out;
7550 	}
7551 
7552 	pager_object = memory_object_control_to_vm_object(object);
7553 
7554 	if (!pager_object) {
7555 		retval = KERN_FAILURE;
7556 		goto out;
7557 	}
7558 
7559 	clobbered_private = pager_object->private;
7560 	if (pager_object->private != TRUE) {
7561 		vm_object_lock(pager_object);
7562 		VM_OBJECT_SET_PRIVATE(pager_object, TRUE);
7563 		vm_object_unlock(pager_object);
7564 	}
7565 	retval = vm_object_populate_with_private(pager_object, offset, page_num, size);
7566 
7567 	if (retval != KERN_SUCCESS) {
7568 		if (pager_object->private != clobbered_private) {
7569 			vm_object_lock(pager_object);
7570 			VM_OBJECT_SET_PRIVATE(pager_object, clobbered_private);
7571 			vm_object_unlock(pager_object);
7572 		}
7573 	}
7574 
7575 out:
7576 	return retval;
7577 }
7578 
7579 uint32_t scan_object_collision = 0;
7580 
7581 void
7582 vm_object_lock(vm_object_t object)
7583 {
7584 	if (object == vm_pageout_scan_wants_object) {
7585 		scan_object_collision++;
7586 		mutex_pause(2);
7587 	}
7588 	DTRACE_VM(vm_object_lock_w);
7589 	lck_rw_lock_exclusive(&object->Lock);
7590 }
7591 
7592 boolean_t
7593 vm_object_lock_avoid(vm_object_t object)
7594 {
7595 	if (object == vm_pageout_scan_wants_object) {
7596 		scan_object_collision++;
7597 		return TRUE;
7598 	}
7599 	return FALSE;
7600 }
7601 
7602 boolean_t
7603 _vm_object_lock_try(vm_object_t object)
7604 {
7605 	boolean_t       retval;
7606 
7607 	retval = lck_rw_try_lock_exclusive(&object->Lock);
7608 #if DEVELOPMENT || DEBUG
7609 	if (retval == TRUE) {
7610 		DTRACE_VM(vm_object_lock_w);
7611 	}
7612 #endif
7613 	return retval;
7614 }
7615 
7616 boolean_t
7617 vm_object_lock_try(vm_object_t object)
7618 {
7619 	/*
7620 	 * Called from hibernate path so check before blocking.
7621 	 */
7622 	if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level() == 0) {
7623 		mutex_pause(2);
7624 	}
7625 	return _vm_object_lock_try(object);
7626 }
7627 
7628 /*
7629  * Lock the object exclusive.
7630  *
7631  * Returns true iff the thread had to spin or block before
7632  * acquiring the lock.
7633  */
7634 bool
7635 vm_object_lock_check_contended(vm_object_t object)
7636 {
7637 	if (object == vm_pageout_scan_wants_object) {
7638 		scan_object_collision++;
7639 		mutex_pause(2);
7640 	}
7641 	DTRACE_VM(vm_object_lock_w);
7642 	return lck_rw_lock_exclusive_check_contended(&object->Lock);
7643 }
7644 
7645 void
7646 vm_object_lock_shared(vm_object_t object)
7647 {
7648 	if (vm_object_lock_avoid(object)) {
7649 		mutex_pause(2);
7650 	}
7651 	DTRACE_VM(vm_object_lock_r);
7652 	lck_rw_lock_shared(&object->Lock);
7653 }
7654 
7655 boolean_t
7656 vm_object_lock_yield_shared(vm_object_t object)
7657 {
7658 	boolean_t retval = FALSE, force_yield = FALSE;
7659 
7660 	vm_object_lock_assert_shared(object);
7661 
7662 	force_yield = vm_object_lock_avoid(object);
7663 
7664 	retval = lck_rw_lock_yield_shared(&object->Lock, force_yield);
7665 	if (retval) {
7666 		DTRACE_VM(vm_object_lock_yield);
7667 	}
7668 
7669 	return retval;
7670 }
7671 
7672 boolean_t
7673 vm_object_lock_try_shared(vm_object_t object)
7674 {
7675 	boolean_t retval;
7676 
7677 	if (vm_object_lock_avoid(object)) {
7678 		mutex_pause(2);
7679 	}
7680 	retval = lck_rw_try_lock_shared(&object->Lock);
7681 	if (retval) {
7682 		DTRACE_VM(vm_object_lock_r);
7683 	}
7684 	return retval;
7685 }
7686 
7687 boolean_t
7688 vm_object_lock_upgrade(vm_object_t object)
7689 {
7690 	boolean_t       retval;
7691 
7692 	retval = lck_rw_lock_shared_to_exclusive(&object->Lock);
7693 #if DEVELOPMENT || DEBUG
7694 	if (retval == TRUE) {
7695 		DTRACE_VM(vm_object_lock_w);
7696 	}
7697 #endif
7698 	return retval;
7699 }
7700 
7701 void
7702 vm_object_unlock(vm_object_t object)
7703 {
7704 #if DEVELOPMENT || DEBUG
7705 	DTRACE_VM(vm_object_unlock);
7706 #endif
7707 	lck_rw_done(&object->Lock);
7708 }
7709 
7710 
7711 unsigned int vm_object_change_wimg_mode_count = 0;
7712 
7713 /*
7714  * The object must be locked
7715  */
7716 void
7717 vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
7718 {
7719 	vm_object_lock_assert_exclusive(object);
7720 
7721 	vm_object_paging_only_wait(object, THREAD_UNINT);
7722 
7723 #if HAS_MTE
7724 	if (vm_object_is_mte_mappable(object)) {
7725 		panic("Changing WIMG mode on tagged VM object: %d", wimg_mode);
7726 	} else if (wimg_mode == VM_WIMG_MTE) {
7727 		panic("Changing untagged VM object to VM_WIMG_MTE: %d", object->wimg_bits);
7728 	}
7729 #endif /* HAS_MTE */
7730 
7731 	const unified_page_list_t pmap_batch_list = {
7732 		.pageq = &object->memq,
7733 		.type = UNIFIED_PAGE_LIST_TYPE_VM_PAGE_OBJ_Q,
7734 	};
7735 	pmap_batch_set_cache_attributes(&pmap_batch_list, wimg_mode);
7736 	object->set_cache_attr = !HAS_DEFAULT_CACHEABILITY(wimg_mode);
7737 
7738 	object->wimg_bits = wimg_mode;
7739 
7740 	vm_object_change_wimg_mode_count++;
7741 }
7742 
7743 #if CONFIG_FREEZE
7744 
7745 extern struct freezer_context   freezer_context_global;
7746 
7747 /*
7748  * This routine does the "relocation" of previously
7749  * compressed pages belonging to this object that are
7750  * residing in a number of compressed segments into
7751  * a set of compressed segments dedicated to hold
7752  * compressed pages belonging to this object.
7753  */
7754 
7755 extern AbsoluteTime c_freezer_last_yield_ts;
7756 
7757 #define MAX_FREE_BATCH  32
7758 #define FREEZER_DUTY_CYCLE_ON_MS        5
7759 #define FREEZER_DUTY_CYCLE_OFF_MS       5
7760 
7761 static int c_freezer_should_yield(void);
7762 
7763 
7764 static int
7765 c_freezer_should_yield()
7766 {
7767 	AbsoluteTime    cur_time;
7768 	uint64_t        nsecs;
7769 
7770 	assert(c_freezer_last_yield_ts);
7771 	clock_get_uptime(&cur_time);
7772 
7773 	SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts);
7774 	absolutetime_to_nanoseconds(cur_time, &nsecs);
7775 
7776 	if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS) {
7777 		return 1;
7778 	}
7779 	return 0;
7780 }
7781 
7782 
7783 void
7784 vm_object_compressed_freezer_done()
7785 {
7786 	vm_compressor_finished_filling( &(freezer_context_global.freezer_ctx_chead));
7787 }
7788 
7789 
7790 uint32_t
7791 vm_object_compressed_freezer_pageout(
7792 	vm_object_t object, uint32_t dirty_budget)
7793 {
7794 	vm_page_t                       p;
7795 	vm_page_t                       local_freeq = NULL;
7796 	int                             local_freed = 0;
7797 	kern_return_t                   retval = KERN_SUCCESS;
7798 	int                             obj_resident_page_count_snapshot = 0;
7799 	uint32_t                        paged_out_count = 0;
7800 
7801 	assert(object != VM_OBJECT_NULL);
7802 	assert(object->internal);
7803 
7804 	vm_object_lock(object);
7805 
7806 	if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7807 		if (!object->pager_initialized) {
7808 			vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
7809 
7810 			if (!object->pager_initialized) {
7811 				vm_object_compressor_pager_create(object);
7812 			}
7813 		}
7814 
7815 		if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7816 			vm_object_unlock(object);
7817 			return paged_out_count;
7818 		}
7819 	}
7820 
7821 	/*
7822 	 * We could be freezing a shared internal object that might
7823 	 * be part of some other thread's current VM operations.
7824 	 * We skip it if there's a paging-in-progress or activity-in-progress
7825 	 * because we could be here a long time with the map lock held.
7826 	 *
7827 	 * Note: We are holding the map locked while we wait.
7828 	 * This is fine in the freezer path because the task
7829 	 * is suspended and so this latency is acceptable.
7830 	 */
7831 	if (object->paging_in_progress || object->activity_in_progress) {
7832 		vm_object_unlock(object);
7833 		return paged_out_count;
7834 	}
7835 
7836 	if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
7837 		vm_object_offset_t      curr_offset = 0;
7838 
7839 		/*
7840 		 * Go through the object and make sure that any
7841 		 * previously compressed pages are relocated into
7842 		 * a compressed segment associated with our "freezer_chead".
7843 		 */
7844 		while (curr_offset < object->vo_size) {
7845 			curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset);
7846 
7847 			if (curr_offset == (vm_object_offset_t) -1) {
7848 				break;
7849 			}
7850 
7851 			retval = vm_compressor_pager_relocate(object->pager, curr_offset, &(freezer_context_global.freezer_ctx_chead));
7852 
7853 			if (retval != KERN_SUCCESS) {
7854 				break;
7855 			}
7856 
7857 			curr_offset += PAGE_SIZE_64;
7858 		}
7859 	}
7860 
7861 	/*
7862 	 * We can't hold the object lock while heading down into the compressed pager
7863 	 * layer because we might need the kernel map lock down there to allocate new
7864 	 * compressor data structures. And if this same object is mapped in the kernel
7865 	 * and there's a fault on it, then that thread will want the object lock while
7866 	 * holding the kernel map lock.
7867 	 *
7868 	 * Since we are going to drop/grab the object lock repeatedly, we must make sure
7869 	 * we won't be stuck in an infinite loop if the same page(s) keep getting
7870 	 * decompressed. So we grab a snapshot of the number of pages in the object and
7871 	 * we won't process any more than that number of pages.
7872 	 */
7873 
7874 	obj_resident_page_count_snapshot = object->resident_page_count;
7875 
7876 	vm_object_activity_begin(object);
7877 
7878 	while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq) && paged_out_count < dirty_budget) {
7879 		p = (vm_page_t)vm_page_queue_first(&object->memq);
7880 
7881 		KDBG_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed);
7882 
7883 		vm_page_lockspin_queues();
7884 
7885 		if (p->vmp_cleaning || vm_page_is_fictitious(p) ||
7886 		    p->vmp_busy || p->vmp_absent || p->vmp_unusual ||
7887 		    VMP_ERROR_GET(p) || VM_PAGE_WIRED(p)) {
7888 			vm_page_unlock_queues();
7889 
7890 			KDBG_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1);
7891 
7892 			vm_page_queue_remove(&object->memq, p, vmp_listq);
7893 			vm_page_queue_enter(&object->memq, p, vmp_listq);
7894 
7895 			continue;
7896 		}
7897 
7898 		if (p->vmp_pmapped == TRUE) {
7899 			int refmod_state, pmap_flags;
7900 
7901 			if (p->vmp_dirty || p->vmp_precious) {
7902 				pmap_flags = PMAP_OPTIONS_COMPRESSOR;
7903 			} else {
7904 				pmap_flags = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
7905 			}
7906 
7907 			vm_page_lockconvert_queues();
7908 			refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), pmap_flags, NULL);
7909 			if (refmod_state & VM_MEM_MODIFIED) {
7910 				SET_PAGE_DIRTY(p, FALSE);
7911 			}
7912 		}
7913 
7914 		if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
7915 			/*
7916 			 * Clean and non-precious page.
7917 			 */
7918 			vm_page_unlock_queues();
7919 			VM_PAGE_FREE(p);
7920 
7921 			KDBG_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 2);
7922 			continue;
7923 		}
7924 
7925 		if (p->vmp_laundry) {
7926 			vm_pageout_steal_laundry(p, TRUE);
7927 		}
7928 
7929 		vm_page_queues_remove(p, TRUE);
7930 
7931 		vm_page_unlock_queues();
7932 
7933 
7934 		/*
7935 		 * In case the compressor fails to compress this page, we need it at
7936 		 * the back of the object memq so that we don't keep trying to process it.
7937 		 * Make the move here while we have the object lock held.
7938 		 */
7939 
7940 		vm_page_queue_remove(&object->memq, p, vmp_listq);
7941 		vm_page_queue_enter(&object->memq, p, vmp_listq);
7942 
7943 		/*
7944 		 * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
7945 		 *
7946 		 * Mark the page busy so no one messes with it while we have the object lock dropped.
7947 		 */
7948 		p->vmp_busy = TRUE;
7949 
7950 		vm_object_activity_begin(object);
7951 
7952 		vm_object_unlock(object);
7953 
7954 		if (vm_pageout_compress_page(&(freezer_context_global.freezer_ctx_chead),
7955 		    (freezer_context_global.freezer_ctx_compressor_scratch_buf),
7956 		    p) == KERN_SUCCESS) {
7957 			/*
7958 			 * page has already been un-tabled from the object via 'vm_page_remove'
7959 			 */
7960 			p->vmp_snext = local_freeq;
7961 			local_freeq = p;
7962 			local_freed++;
7963 			paged_out_count++;
7964 
7965 			if (local_freed >= MAX_FREE_BATCH) {
7966 				OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
7967 
7968 				vm_page_free_list(local_freeq, TRUE);
7969 
7970 				local_freeq = NULL;
7971 				local_freed = 0;
7972 			}
7973 			freezer_context_global.freezer_ctx_uncompressed_pages++;
7974 		}
7975 		KDBG_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed);
7976 
7977 		if (local_freed == 0 && c_freezer_should_yield()) {
7978 			thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
7979 			clock_get_uptime(&c_freezer_last_yield_ts);
7980 		}
7981 
7982 		vm_object_lock(object);
7983 	}
7984 
7985 	if (local_freeq) {
7986 		OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
7987 
7988 		vm_page_free_list(local_freeq, TRUE);
7989 
7990 		local_freeq = NULL;
7991 		local_freed = 0;
7992 	}
7993 
7994 	vm_object_activity_end(object);
7995 
7996 	vm_object_unlock(object);
7997 
7998 	if (c_freezer_should_yield()) {
7999 		thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
8000 		clock_get_uptime(&c_freezer_last_yield_ts);
8001 	}
8002 	return paged_out_count;
8003 }
8004 
8005 #endif /* CONFIG_FREEZE */
8006 
8007 
8008 uint64_t vm_object_pageout_not_on_queue = 0;
8009 uint64_t vm_object_pageout_not_pageable = 0;
8010 uint64_t vm_object_pageout_pageable = 0;
8011 uint64_t vm_object_pageout_active_local = 0;
8012 void
8013 vm_object_pageout(
8014 	vm_object_t object)
8015 {
8016 	vm_page_t                       p, next;
8017 	struct  vm_pageout_queue        *iq;
8018 
8019 	if (!VM_CONFIG_COMPRESSOR_IS_PRESENT) {
8020 		return;
8021 	}
8022 
8023 	iq = &vm_pageout_queue_internal;
8024 
8025 	assert(object != VM_OBJECT_NULL );
8026 
8027 	vm_object_lock(object);
8028 
8029 	if (!object->internal ||
8030 	    object->terminating ||
8031 	    !object->alive) {
8032 		vm_object_unlock(object);
8033 		return;
8034 	}
8035 
8036 	if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
8037 		if (!object->pager_initialized) {
8038 			vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
8039 
8040 			if (!object->pager_initialized) {
8041 				vm_object_compressor_pager_create(object);
8042 			}
8043 		}
8044 
8045 		if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
8046 			vm_object_unlock(object);
8047 			return;
8048 		}
8049 	}
8050 
8051 ReScan:
8052 	next = (vm_page_t)vm_page_queue_first(&object->memq);
8053 
8054 	while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
8055 		p = next;
8056 		next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
8057 
8058 		vm_page_lockspin_queues();
8059 
8060 		assert(p->vmp_q_state != VM_PAGE_ON_FREE_Q);
8061 		assert(p->vmp_q_state != VM_PAGE_USED_BY_COMPRESSOR);
8062 
8063 		if ((p->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) ||
8064 		    p->vmp_cleaning ||
8065 		    p->vmp_laundry ||
8066 		    p->vmp_busy ||
8067 		    p->vmp_absent ||
8068 		    VMP_ERROR_GET(p) ||
8069 		    vm_page_is_fictitious(p) ||
8070 		    VM_PAGE_WIRED(p)) {
8071 			/*
8072 			 * Page is already being cleaned or can't be cleaned.
8073 			 */
8074 			vm_page_unlock_queues();
8075 			continue;
8076 		}
8077 		if (p->vmp_q_state == VM_PAGE_NOT_ON_Q) {
8078 //			printf("FBDP %s:%d page %p object %p offset 0x%llx state %d not on queue\n", __FUNCTION__, __LINE__, p, VM_PAGE_OBJECT(p), p->vmp_offset, p->vmp_q_state);
8079 			vm_object_pageout_not_on_queue++;
8080 			vm_page_unlock_queues();
8081 			continue;
8082 		}
8083 		if (!VM_PAGE_PAGEABLE(p)) {
8084 			if (p->vmp_q_state == VM_PAGE_ON_ACTIVE_LOCAL_Q) {
8085 				vm_object_pageout_active_local++;
8086 			} else {
8087 				vm_object_pageout_not_pageable++;
8088 				vm_page_unlock_queues();
8089 				continue;
8090 			}
8091 		} else {
8092 			vm_object_pageout_pageable++;
8093 		}
8094 
8095 		if (vm_compressor_low_on_space()) {
8096 			vm_page_unlock_queues();
8097 			break;
8098 		}
8099 
8100 		/* Throw to the pageout queue */
8101 
8102 		if (VM_PAGE_Q_THROTTLED(iq)) {
8103 			iq->pgo_draining = TRUE;
8104 
8105 			assert_wait((event_t) (&iq->pgo_laundry + 1),
8106 			    THREAD_INTERRUPTIBLE);
8107 			vm_page_unlock_queues();
8108 			vm_object_unlock(object);
8109 
8110 			thread_block(THREAD_CONTINUE_NULL);
8111 
8112 			vm_object_lock(object);
8113 			goto ReScan;
8114 		}
8115 
8116 		assert(!vm_page_is_fictitious(p));
8117 		assert(!p->vmp_busy);
8118 		assert(!p->vmp_absent);
8119 		assert(!p->vmp_unusual);
8120 		assert(!VMP_ERROR_GET(p));      /* XXX there's a window here where we could have an ECC error! */
8121 		assert(!VM_PAGE_WIRED(p));
8122 		assert(!p->vmp_cleaning);
8123 
8124 		if (p->vmp_pmapped == TRUE) {
8125 			int refmod_state;
8126 			int pmap_options;
8127 
8128 			/*
8129 			 * Tell pmap the page should be accounted
8130 			 * for as "compressed" if it's been modified.
8131 			 */
8132 			pmap_options =
8133 			    PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
8134 			if (p->vmp_dirty || p->vmp_precious) {
8135 				/*
8136 				 * We already know it's been modified,
8137 				 * so tell pmap to account for it
8138 				 * as "compressed".
8139 				 */
8140 				pmap_options = PMAP_OPTIONS_COMPRESSOR;
8141 			}
8142 			vm_page_lockconvert_queues();
8143 			refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p),
8144 			    pmap_options,
8145 			    NULL);
8146 			if (refmod_state & VM_MEM_MODIFIED) {
8147 				SET_PAGE_DIRTY(p, FALSE);
8148 			}
8149 		}
8150 
8151 		if (!p->vmp_dirty && !p->vmp_precious) {
8152 			vm_page_unlock_queues();
8153 			VM_PAGE_FREE(p);
8154 			continue;
8155 		}
8156 		vm_page_queues_remove(p, TRUE);
8157 
8158 		vm_pageout_cluster(p);
8159 
8160 		vm_page_unlock_queues();
8161 	}
8162 	vm_object_unlock(object);
8163 }
8164 
8165 
8166 #if CONFIG_IOSCHED
8167 
8168 void
8169 vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio)
8170 {
8171 	io_reprioritize_req_t   req;
8172 	struct vnode            *devvp = NULL;
8173 
8174 	if (vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
8175 		return;
8176 	}
8177 
8178 	/*
8179 	 * Create the request for I/O reprioritization.
8180 	 * We use the noblock variant of zalloc because we're holding the object
8181 	 * lock here and we could cause a deadlock in low memory conditions.
8182 	 */
8183 	req = (io_reprioritize_req_t)zalloc_noblock(io_reprioritize_req_zone);
8184 	if (req == NULL) {
8185 		return;
8186 	}
8187 	req->blkno = blkno;
8188 	req->len = len;
8189 	req->priority = prio;
8190 	req->devvp = devvp;
8191 
8192 	/* Insert request into the reprioritization list */
8193 	mpsc_daemon_enqueue(&io_reprioritize_q, &req->iorr_elm, MPSC_QUEUE_DISABLE_PREEMPTION);
8194 
8195 	return;
8196 }
8197 
8198 void
8199 vm_decmp_upl_reprioritize(upl_t upl, int prio)
8200 {
8201 	int offset;
8202 	vm_object_t object;
8203 	io_reprioritize_req_t   req;
8204 	struct vnode            *devvp = NULL;
8205 	uint64_t                blkno;
8206 	uint32_t                len;
8207 	upl_t                   io_upl;
8208 	uint64_t                *io_upl_reprio_info;
8209 	int                     io_upl_size;
8210 
8211 	if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) {
8212 		return;
8213 	}
8214 
8215 	/*
8216 	 * We dont want to perform any allocations with the upl lock held since that might
8217 	 * result in a deadlock. If the system is low on memory, the pageout thread would
8218 	 * try to pageout stuff and might wait on this lock. If we are waiting for the memory to
8219 	 * be freed up by the pageout thread, it would be a deadlock.
8220 	 */
8221 
8222 
8223 	/* First step is just to get the size of the upl to find out how big the reprio info is */
8224 	if (!upl_try_lock(upl)) {
8225 		return;
8226 	}
8227 
8228 	if (upl->decmp_io_upl == NULL) {
8229 		/* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
8230 		upl_unlock(upl);
8231 		return;
8232 	}
8233 
8234 	io_upl = upl->decmp_io_upl;
8235 	assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0);
8236 	assertf(page_aligned(io_upl->u_offset) && page_aligned(io_upl->u_size),
8237 	    "upl %p offset 0x%llx size 0x%x\n",
8238 	    io_upl, io_upl->u_offset, io_upl->u_size);
8239 	io_upl_size = io_upl->u_size;
8240 	upl_unlock(upl);
8241 
8242 	/* Now perform the allocation */
8243 	io_upl_reprio_info = kalloc_data(sizeof(uint64_t) * atop(io_upl_size), Z_WAITOK);
8244 	if (io_upl_reprio_info == NULL) {
8245 		return;
8246 	}
8247 
8248 	/* Now again take the lock, recheck the state and grab out the required info */
8249 	if (!upl_try_lock(upl)) {
8250 		goto out;
8251 	}
8252 
8253 	if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) {
8254 		/* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
8255 		upl_unlock(upl);
8256 		goto out;
8257 	}
8258 	memcpy(io_upl_reprio_info, io_upl->upl_reprio_info,
8259 	    sizeof(uint64_t) * atop(io_upl_size));
8260 
8261 	/* Get the VM object for this UPL */
8262 	if (io_upl->flags & UPL_SHADOWED) {
8263 		object = io_upl->map_object->shadow;
8264 	} else {
8265 		object = io_upl->map_object;
8266 	}
8267 
8268 	/* Get the dev vnode ptr for this object */
8269 	if (!object || !object->pager ||
8270 	    vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
8271 		upl_unlock(upl);
8272 		goto out;
8273 	}
8274 
8275 	upl_unlock(upl);
8276 
8277 	/* Now we have all the information needed to do the expedite */
8278 
8279 	offset = 0;
8280 	while (offset < io_upl_size) {
8281 		blkno   = io_upl_reprio_info[atop(offset)] & UPL_REPRIO_INFO_MASK;
8282 		len     = (io_upl_reprio_info[atop(offset)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK;
8283 
8284 		/*
8285 		 * This implementation may cause some spurious expedites due to the
8286 		 * fact that we dont cleanup the blkno & len from the upl_reprio_info
8287 		 * even after the I/O is complete.
8288 		 */
8289 
8290 		if (blkno != 0 && len != 0) {
8291 			/* Create the request for I/O reprioritization */
8292 			req = zalloc_flags(io_reprioritize_req_zone,
8293 			    Z_WAITOK | Z_NOFAIL);
8294 			req->blkno = blkno;
8295 			req->len = len;
8296 			req->priority = prio;
8297 			req->devvp = devvp;
8298 
8299 			/* Insert request into the reprioritization list */
8300 			mpsc_daemon_enqueue(&io_reprioritize_q, &req->iorr_elm, MPSC_QUEUE_DISABLE_PREEMPTION);
8301 
8302 			offset += len;
8303 		} else {
8304 			offset += PAGE_SIZE;
8305 		}
8306 	}
8307 
8308 out:
8309 	kfree_data(io_upl_reprio_info, sizeof(uint64_t) * atop(io_upl_size));
8310 }
8311 
8312 void
8313 vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m)
8314 {
8315 	upl_t upl;
8316 	upl_page_info_t *pl;
8317 	unsigned int i, num_pages;
8318 	int cur_tier;
8319 
8320 	cur_tier = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
8321 
8322 	/*
8323 	 *  Scan through all UPLs associated with the object to find the
8324 	 *  UPL containing the contended page.
8325 	 */
8326 	queue_iterate(&o->uplq, upl, upl_t, uplq) {
8327 		if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier) {
8328 			continue;
8329 		}
8330 		pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
8331 		assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
8332 		    "upl %p offset 0x%llx size 0x%x\n",
8333 		    upl, upl->u_offset, upl->u_size);
8334 		num_pages = (upl->u_size / PAGE_SIZE);
8335 
8336 		/*
8337 		 *  For each page in the UPL page list, see if it matches the contended
8338 		 *  page and was issued as a low prio I/O.
8339 		 */
8340 		for (i = 0; i < num_pages; i++) {
8341 			if (UPL_PAGE_PRESENT(pl, i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) {
8342 				if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) {
8343 					KDBG((VMDBG_CODE(DBG_VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
8344 					    VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority);
8345 					vm_decmp_upl_reprioritize(upl, cur_tier);
8346 					break;
8347 				}
8348 				KDBG((VMDBG_CODE(DBG_VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
8349 				    upl->upl_reprio_info[i], upl->upl_priority);
8350 				if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) {
8351 					vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier);
8352 				}
8353 				break;
8354 			}
8355 		}
8356 		/* Check if we found any hits */
8357 		if (i != num_pages) {
8358 			break;
8359 		}
8360 	}
8361 
8362 	return;
8363 }
8364 
8365 void
8366 kdp_vm_object_sleep_find_owner(
8367 	event64_t          wait_event,
8368 	block_hint_t       wait_type,
8369 	thread_waitinfo_t *waitinfo)
8370 {
8371 	assert(wait_type >= kThreadWaitPagerInit && wait_type <= kThreadWaitPageInThrottle);
8372 	vm_object_wait_reason_t wait_reason = wait_type - kThreadWaitPagerInit;
8373 	vm_object_t object = (vm_object_t)((uintptr_t)wait_event - wait_reason);
8374 	waitinfo->context = VM_KERNEL_ADDRPERM(object);
8375 	/*
8376 	 * There is currently no non-trivial way to ascertain the thread(s)
8377 	 * currently operating on this object.
8378 	 */
8379 	waitinfo->owner = 0;
8380 }
8381 
8382 
8383 wait_result_t
8384 vm_object_sleep(
8385 	vm_object_t             object,
8386 	vm_object_wait_reason_t reason,
8387 	wait_interrupt_t        interruptible,
8388 	lck_sleep_action_t      action)
8389 {
8390 	wait_result_t wr;
8391 	block_hint_t block_hint;
8392 	event_t wait_event;
8393 
8394 	vm_object_lock_assert_exclusive(object);
8395 	assert(reason >= 0 && reason <= VM_OBJECT_EVENT_MAX);
8396 	switch (reason) {
8397 	case VM_OBJECT_EVENT_PL_REQ_IN_PROGRESS:
8398 		block_hint = kThreadWaitPagerInit; /* XXX change that */
8399 		break;
8400 	case VM_OBJECT_EVENT_PAGER_READY:
8401 		block_hint = kThreadWaitPagerReady;
8402 		break;
8403 	case VM_OBJECT_EVENT_PAGING_IN_PROGRESS:
8404 		block_hint = kThreadWaitPagingActivity;
8405 		break;
8406 	case VM_OBJECT_EVENT_MAPPING_IN_PROGRESS:
8407 		block_hint = kThreadWaitMappingInProgress;
8408 		break;
8409 	case VM_OBJECT_EVENT_UNBLOCKED:
8410 		block_hint = kThreadWaitMemoryBlocked;
8411 		break;
8412 	case VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS:
8413 		block_hint = kThreadWaitPagingInProgress;
8414 		break;
8415 	case VM_OBJECT_EVENT_PAGEIN_THROTTLE:
8416 		block_hint = kThreadWaitPageInThrottle;
8417 		break;
8418 	default:
8419 		panic("Unexpected wait reason %u", reason);
8420 	}
8421 	thread_set_pending_block_hint(current_thread(), block_hint);
8422 
8423 	KDBG_FILTERED(VMDBG_CODE(DBG_VM_OBJECT_SLEEP) | DBG_FUNC_START, VM_KERNEL_ADDRHIDE(object), reason);
8424 
8425 	vm_object_set_wanted(object, reason);
8426 	wait_event = (event_t)((uintptr_t)object + (uintptr_t)reason);
8427 	wr = lck_rw_sleep(&object->Lock, LCK_SLEEP_PROMOTED_PRI | action, wait_event, interruptible);
8428 
8429 	KDBG_FILTERED(VMDBG_CODE(DBG_VM_OBJECT_SLEEP) | DBG_FUNC_END, VM_KERNEL_ADDRHIDE(object), reason, wr);
8430 	return wr;
8431 }
8432 
8433 wait_result_t
8434 vm_object_pl_req_wait(vm_object_t object, wait_interrupt_t interruptible)
8435 {
8436 	wait_result_t wr = THREAD_NOT_WAITING;
8437 	vm_object_lock_assert_exclusive(object);
8438 	while (object->vmo_pl_req_in_progress != 0) {
8439 		wr = vm_object_sleep(object,
8440 		    VM_OBJECT_EVENT_PL_REQ_IN_PROGRESS,
8441 		    interruptible,
8442 		    LCK_SLEEP_EXCLUSIVE);
8443 		if (wr != THREAD_AWAKENED) {
8444 			break;
8445 		}
8446 	}
8447 	return wr;
8448 }
8449 
8450 wait_result_t
8451 vm_object_paging_wait(vm_object_t object, wait_interrupt_t interruptible)
8452 {
8453 	wait_result_t wr = THREAD_NOT_WAITING;
8454 	vm_object_lock_assert_exclusive(object);
8455 	while (object->paging_in_progress != 0 ||
8456 	    object->activity_in_progress != 0) {
8457 		wr = vm_object_sleep((object),
8458 		    VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
8459 		    interruptible,
8460 		    LCK_SLEEP_EXCLUSIVE);
8461 		if (wr != THREAD_AWAKENED) {
8462 			break;
8463 		}
8464 	}
8465 	return wr;
8466 }
8467 
8468 wait_result_t
8469 vm_object_paging_only_wait(vm_object_t object, wait_interrupt_t interruptible)
8470 {
8471 	wait_result_t wr = THREAD_NOT_WAITING;
8472 	vm_object_lock_assert_exclusive(object);
8473 	while (object->paging_in_progress != 0) {
8474 		wr = vm_object_sleep(object,
8475 		    VM_OBJECT_EVENT_PAGING_ONLY_IN_PROGRESS,
8476 		    interruptible,
8477 		    LCK_SLEEP_EXCLUSIVE);
8478 		if (wr != THREAD_AWAKENED) {
8479 			break;
8480 		}
8481 	}
8482 	return wr;
8483 }
8484 
8485 wait_result_t
8486 vm_object_paging_throttle_wait(vm_object_t object, wait_interrupt_t interruptible)
8487 {
8488 	wait_result_t wr = THREAD_NOT_WAITING;
8489 	vm_object_lock_assert_exclusive(object);
8490 	/*
8491 	 * TODO: consider raising the throttle limit specifically for
8492 	 * shared-cache objects, which are expected to be highly contended.
8493 	 * (rdar://127899888)
8494 	 */
8495 	while (object->paging_in_progress >= vm_object_pagein_throttle) {
8496 		wr = vm_object_sleep(object,
8497 		    VM_OBJECT_EVENT_PAGEIN_THROTTLE,
8498 		    interruptible,
8499 		    LCK_SLEEP_EXCLUSIVE);
8500 		if (wr != THREAD_AWAKENED) {
8501 			break;
8502 		}
8503 	}
8504 	return wr;
8505 }
8506 
8507 wait_result_t
8508 vm_object_mapping_wait(vm_object_t object, wait_interrupt_t interruptible)
8509 {
8510 	wait_result_t wr = THREAD_NOT_WAITING;
8511 	vm_object_lock_assert_exclusive(object);
8512 	while (object->mapping_in_progress) {
8513 		wr = vm_object_sleep(object,
8514 		    VM_OBJECT_EVENT_MAPPING_IN_PROGRESS,
8515 		    interruptible,
8516 		    LCK_SLEEP_EXCLUSIVE);
8517 		if (wr != THREAD_AWAKENED) {
8518 			break;
8519 		}
8520 	}
8521 	return wr;
8522 }
8523 
8524 void
8525 vm_object_wakeup(
8526 	vm_object_t             object,
8527 	vm_object_wait_reason_t reason)
8528 {
8529 	vm_object_lock_assert_exclusive(object);
8530 	assert(reason >= 0 && reason <= VM_OBJECT_EVENT_MAX);
8531 
8532 	if (vm_object_wanted(object, reason)) {
8533 		thread_wakeup((event_t)((uintptr_t)object + (uintptr_t)reason));
8534 	}
8535 	object->all_wanted &= ~(1 << reason);
8536 }
8537 
8538 
8539 void
8540 kdp_vm_page_sleep_find_owner(event64_t wait_event, thread_waitinfo_t *waitinfo)
8541 {
8542 	vm_page_t m = (vm_page_t)wait_event;
8543 	waitinfo->context = VM_KERNEL_ADDRPERM(m);
8544 	/*
8545 	 * There is not currently a non-trivial way to identify the thread
8546 	 * holding a page busy.
8547 	 */
8548 	waitinfo->owner = 0;
8549 }
8550 
8551 #if PAGE_SLEEP_WITH_INHERITOR
8552 static wait_result_t vm_page_sleep_with_inheritor(lck_rw_t *lck, lck_sleep_action_t lck_sleep_action, event_t event, wait_interrupt_t interruptible);
8553 #endif /* PAGE_SLEEP_WITH_INHERITOR */
8554 
8555 wait_result_t
8556 vm_page_sleep(vm_object_t object, vm_page_t m, wait_interrupt_t interruptible, lck_sleep_action_t action)
8557 {
8558 	wait_result_t ret;
8559 
8560 	KDBG_FILTERED((VMDBG_CODE(DBG_VM_PAGE_SLEEP)) | DBG_FUNC_START, VM_KERNEL_ADDRHIDE(object), m->vmp_offset, VM_KERNEL_ADDRHIDE(m));
8561 #if CONFIG_IOSCHED
8562 	if (object->io_tracking && ((m->vmp_busy == TRUE) || (m->vmp_cleaning == TRUE) || VM_PAGE_WIRED(m))) {
8563 		/*
8564 		 *  Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
8565 		 */
8566 		vm_page_handle_prio_inversion(object, m);
8567 	}
8568 #endif /* CONFIG_IOSCHED */
8569 	m->vmp_wanted = TRUE;
8570 	thread_set_pending_block_hint(current_thread(), kThreadWaitPageBusy);
8571 #if PAGE_SLEEP_WITH_INHERITOR
8572 	ret = vm_page_sleep_with_inheritor(&object->Lock, action, (event_t)m, interruptible);
8573 #else
8574 	ret = lck_rw_sleep(&object->Lock, LCK_SLEEP_PROMOTED_PRI | action, (event_t)m, interruptible);
8575 #endif
8576 	KDBG_FILTERED((VMDBG_CODE(DBG_VM_PAGE_SLEEP)) | DBG_FUNC_END, VM_KERNEL_ADDRHIDE(object), m->vmp_offset, VM_KERNEL_ADDRHIDE(m));
8577 	return ret;
8578 }
8579 
8580 void
8581 vm_page_wakeup(vm_object_t object, vm_page_t m)
8582 {
8583 	assert(m);
8584 	/*
8585 	 * The page may have been freed from its object before this wakeup is issued
8586 	 */
8587 	if (object != VM_OBJECT_NULL) {
8588 		vm_object_lock_assert_exclusive(object);
8589 	}
8590 
8591 	if (m->vmp_wanted) {
8592 		KDBG(VMDBG_CODE(DBG_VM_PAGE_WAKEUP) | DBG_FUNC_NONE,
8593 		    VM_KERNEL_ADDRHIDE(object), m->vmp_offset,
8594 		    VM_KERNEL_ADDRHIDE(m));
8595 		m->vmp_wanted = false;
8596 		thread_wakeup((event_t)m);
8597 	}
8598 }
8599 
8600 void
8601 vm_page_wakeup_done(__assert_only vm_object_t object, vm_page_t m)
8602 {
8603 	assert(object);
8604 	assert(m->vmp_busy);
8605 	vm_object_lock_assert_exclusive(object);
8606 
8607 	KDBG(VMDBG_CODE(DBG_VM_PAGE_WAKEUP_DONE) | DBG_FUNC_NONE,
8608 	    VM_KERNEL_ADDRHIDE(object), m->vmp_offset,
8609 	    VM_KERNEL_ADDRHIDE(m), m->vmp_wanted);
8610 	m->vmp_busy = false;
8611 	vm_page_wakeup(object, m);
8612 }
8613 
8614 #if PAGE_SLEEP_WITH_INHERITOR
8615 static bool page_worker_unregister_worker(event_t event, thread_t expect_th, page_worker_token_t *token);
8616 #endif /* PAGE_SLEEP_WITH_INHERITOR */
8617 
8618 /* This function duplicates all of what vm_page_wakeup_done() does and adds the option
8619  * that we're being called from vm_fault_page() in a page that is possibly boosted due to being an inheritor*/
8620 void
8621 vm_page_wakeup_done_with_inheritor(vm_object_t object __unused, vm_page_t m, page_worker_token_t *token __unused)
8622 {
8623 #if PAGE_SLEEP_WITH_INHERITOR
8624 	assert(object);
8625 	assert(m->vmp_busy);
8626 	vm_object_lock_assert_exclusive(object);
8627 
8628 	bool had_inheritor = page_worker_unregister_worker((event_t)m, current_thread(), token);
8629 
8630 	KDBG(VMDBG_CODE(DBG_VM_PAGE_WAKEUP_DONE) | DBG_FUNC_NONE,
8631 	    VM_KERNEL_ADDRHIDE(object), VM_KERNEL_ADDRHIDE(m),
8632 	    m->vmp_wanted, had_inheritor);
8633 	m->vmp_busy = FALSE;
8634 
8635 	if (m->vmp_wanted) {
8636 		m->vmp_wanted = FALSE;
8637 		if (had_inheritor) {
8638 			wakeup_all_with_inheritor((event_t)m, THREAD_AWAKENED);
8639 		} else {
8640 			thread_wakeup((event_t)m);
8641 		}
8642 	}
8643 #else /* PAGE_SLEEP_WITH_INHERITOR */
8644 	vm_page_wakeup_done(object, m);
8645 #endif /* PAGE_SLEEP_WITH_INHERITOR */
8646 }
8647 
8648 #if PAGE_SLEEP_WITH_INHERITOR
8649 
8650 /*
8651  * vm_page_sleep_with_inheritor:
8652  * The goal of this functionality is to prevent priority inversion that can occur when a low-priority
8653  * thread is stuck in the compressor and a higher priority thread waits for the same page.
8654  * Just before vm_fault_page() calls into the compressor it calls page_worker_register_worker()
8655  * this registers the calling thread as the "page worker" of this page.
8656  * When another thread then tries to vm_page_sleep() on that page, (wait for it to un-busy) the worker is found and
8657  * instead of a plain thread_block() (in lck_rw_sleep()) we do lck_rw_sleep_with_inheritor() and give the registered
8658  * worker thread as the inheritor of the priority boost.
8659  * The worker thread might have started its work on a low priority, and when a waiter was added, it got boost.
8660  * When the worker is done getting the page it calls vm_page_wakeup_done_with_inheritor() instead of
8661  * vm_page_wakeup_done() this unregisters the thread, clears the page busy bit (so that now other threads can
8662  * use this page), and wakes up any waiters waiting for that page with wakeup_all_with_inheritor(), which
8663  * removes the priority boost.
8664  *
8665  * The worker registration is done in a simple single entry per bucket hash table. A hash collision may occur
8666  * if two faulting pages end up in the same entry. In this case, the registration of the second one is going to
8667  * fail and the only repercussions of this is that it would not get the possible boost if anyone is going to wait
8668  * on it. This implementation was selected over a full hash-table to keep it simple and fast.
8669  */
8670 
8671 struct page_worker {
8672 	lck_ticket_t pw_entry_lock;
8673 	event_t pw_owner_event;
8674 	thread_t pw_current_worker;
8675 };
8676 
8677 SECURITY_READ_ONLY_LATE(uint32_t) page_worker_table_size = 0;
8678 SECURITY_READ_ONLY_LATE(static struct page_worker *)page_worker_table = NULL;
8679 SCALABLE_COUNTER_DEFINE(page_worker_hash_collisions);
8680 SCALABLE_COUNTER_DEFINE(page_worker_inheritor_sleeps);
8681 
8682 LCK_GRP_DECLARE(page_worker_table_lock_grp, "page_worker_table_locks");
8683 
8684 #define page_worker_entry_unlock(entry) \
8685 	lck_ticket_unlock(&entry->pw_entry_lock);
8686 
8687 #define PAGE_WORKER_TABLE_BUCKETS (256)
8688 
8689 void
8690 page_worker_init(void)
8691 {
8692 	page_worker_table_size = PAGE_WORKER_TABLE_BUCKETS;
8693 #if DEVELOPMENT || DEBUG
8694 	PE_parse_boot_argn("page_worker_table_size", &page_worker_table_size, sizeof(page_worker_table_size));
8695 #endif /* DEVELOPMENT || DEBUG */
8696 	/* This checks that the size is a positive power of 2, needed for the hash function */
8697 	assert(page_worker_table_size > 0 && !(page_worker_table_size & (page_worker_table_size - 1)));
8698 
8699 	page_worker_table = zalloc_permanent(page_worker_table_size * sizeof(struct page_worker), ZALIGN_PTR);
8700 	if (page_worker_table == NULL) {
8701 		panic("Page events hash table memory allocation failed!");
8702 	}
8703 	for (uint32_t i = 0; i < page_worker_table_size; ++i) {
8704 		struct page_worker* we = &(page_worker_table[i]);
8705 		lck_ticket_init(&we->pw_entry_lock, &page_worker_table_lock_grp);
8706 	}
8707 }
8708 
8709 static struct page_worker *
8710 page_worker_lock_table_entry(event_t event)
8711 {
8712 	if (page_worker_table == NULL) {
8713 		return NULL;
8714 	}
8715 	uint32_t hash = os_hash_kernel_pointer((void *)event);
8716 	uint32_t index = hash & (page_worker_table_size - 1);
8717 
8718 	struct page_worker *entry = &page_worker_table[index];
8719 
8720 	lck_ticket_lock(&entry->pw_entry_lock, &page_worker_table_lock_grp);
8721 	return entry;
8722 }
8723 
8724 /* returns a locked entry if found or added, otherwise returns NULL */
8725 static struct page_worker *
8726 page_worker_lookup(event_t event, bool try_add_missing)
8727 {
8728 	assert(event != NULL);
8729 	struct page_worker *entry = page_worker_lock_table_entry(event);
8730 	if (entry == NULL) {
8731 		/* table not initialized */
8732 		return NULL;
8733 	}
8734 	if (entry->pw_owner_event == event) {
8735 		/* found existing entry and it belongs to this event */
8736 		return entry;
8737 	}
8738 
8739 	if (try_add_missing) {
8740 		if (entry->pw_owner_event == NULL) {
8741 			/* found empty entry, take over it */
8742 			entry->pw_owner_event = event;
8743 			return entry;
8744 		}
8745 		/* didn't find the event, need to add it, but can't because it's occupied */
8746 		counter_inc(&page_worker_hash_collisions);
8747 	}
8748 	page_worker_entry_unlock(entry);
8749 	return NULL;
8750 }
8751 
8752 /* returns true if current_thread() was successfully registered as worker */
8753 void
8754 page_worker_register_worker(event_t event __unused, page_worker_token_t *out_token)
8755 {
8756 	out_token->pwt_did_register_inheritor = false;
8757 	out_token->pwt_floor_token.thread = THREAD_NULL;
8758 
8759 	struct page_worker* entry = page_worker_lookup(event, TRUE);
8760 	if (entry == NULL) {
8761 		/* failed registration due to a hash collision */
8762 		out_token->pwt_floor_token = thread_priority_floor_start();
8763 		return;
8764 	}
8765 	entry->pw_current_worker = current_thread();
8766 	/* no need to take the thread reference because this is going to get cleared in the same call of vm_page_fault() */
8767 	page_worker_entry_unlock(entry);
8768 	out_token->pwt_did_register_inheritor = true;
8769 }
8770 
8771 static bool
8772 page_worker_unregister_worker(event_t event, thread_t expect_th __unused, page_worker_token_t *token)
8773 {
8774 	struct page_worker *entry = page_worker_lookup(event, FALSE);
8775 	if (entry == NULL) {
8776 		assert(!token->pwt_did_register_inheritor);
8777 		/* did we do thread_priority_floor_start() ? */
8778 		if (token->pwt_floor_token.thread != THREAD_NULL) {
8779 			thread_priority_floor_end(&token->pwt_floor_token);
8780 		}
8781 		return false;
8782 	}
8783 	assert(token->pwt_did_register_inheritor);
8784 	assert(token->pwt_floor_token.thread == THREAD_NULL); /* we shouldn't have done thread_priority_floor_start() */
8785 	assert(entry->pw_owner_event != 0);
8786 	assert(entry->pw_current_worker == expect_th);
8787 	entry->pw_owner_event = 0;
8788 	entry->pw_current_worker = THREAD_NULL;
8789 	page_worker_entry_unlock(entry); /* was locked in page_worker_lookup() */
8790 	return true;
8791 }
8792 
8793 static wait_result_t
8794 vm_page_sleep_with_inheritor(lck_rw_t *lck, lck_sleep_action_t action, event_t event, wait_interrupt_t interruptible)
8795 {
8796 	struct page_worker *entry = page_worker_lookup(event, FALSE);
8797 	thread_t inheritor = THREAD_NULL;
8798 	if (entry != NULL) {
8799 		inheritor = entry->pw_current_worker;
8800 		page_worker_entry_unlock(entry);
8801 	}
8802 
8803 	wait_result_t ret;
8804 	if (inheritor == THREAD_NULL) {
8805 		/* no worker was found */
8806 		ret = lck_rw_sleep(lck, LCK_SLEEP_PROMOTED_PRI | action, event, interruptible);
8807 	} else {
8808 		counter_inc(&page_worker_inheritor_sleeps);
8809 		ret = lck_rw_sleep_with_inheritor(lck, action, event, inheritor, interruptible, TIMEOUT_WAIT_FOREVER);
8810 	}
8811 
8812 	return ret;
8813 }
8814 #endif  /* PAGE_SLEEP_WITH_INHERITOR */
8815 
8816 static void
8817 io_reprioritize(mpsc_queue_chain_t elm, __assert_only mpsc_daemon_queue_t dq)
8818 {
8819 	assert3p(dq, ==, &io_reprioritize_q);
8820 	io_reprioritize_req_t req = mpsc_queue_element(elm, struct io_reprioritize_req, iorr_elm);
8821 	vnode_pager_issue_reprioritize_io(req->devvp, req->blkno, req->len, req->priority);
8822 	zfree(io_reprioritize_req_zone, req);
8823 }
8824 
8825 #endif /* CONFIG_IOSCHED */
8826 
8827 #if VM_OBJECT_ACCESS_TRACKING
8828 void
8829 vm_object_access_tracking(
8830 	vm_object_t     object,
8831 	int             *access_tracking_p,
8832 	uint32_t        *access_tracking_reads_p,
8833 	uint32_t        *access_tracking_writes_p)
8834 {
8835 	int     access_tracking;
8836 
8837 	access_tracking = !!*access_tracking_p;
8838 
8839 	vm_object_lock(object);
8840 	*access_tracking_p = object->access_tracking;
8841 	if (access_tracking_reads_p) {
8842 		*access_tracking_reads_p = object->access_tracking_reads;
8843 	}
8844 	if (access_tracking_writes_p) {
8845 		*access_tracking_writes_p = object->access_tracking_writes;
8846 	}
8847 	object->access_tracking = access_tracking;
8848 	object->access_tracking_reads = 0;
8849 	object->access_tracking_writes = 0;
8850 	vm_object_unlock(object);
8851 
8852 	if (access_tracking) {
8853 		vm_object_pmap_protect_options(object,
8854 		    0,
8855 		    object->vo_size,
8856 		    PMAP_NULL,
8857 		    PAGE_SIZE,
8858 		    0,
8859 		    VM_PROT_NONE,
8860 		    0);
8861 	}
8862 }
8863 #endif /* VM_OBJECT_ACCESS_TRACKING */
8864 
8865 void
8866 vm_object_ledger_tag_ledgers(
8867 	vm_object_t     object,
8868 	int             *ledger_idx_volatile,
8869 	int             *ledger_idx_nonvolatile,
8870 	int             *ledger_idx_volatile_compressed,
8871 	int             *ledger_idx_nonvolatile_compressed,
8872 	int             *ledger_idx_composite,
8873 	int             *ledger_idx_external_wired,
8874 	boolean_t       *do_footprint)
8875 {
8876 	assert(object->shadow == VM_OBJECT_NULL);
8877 
8878 	*ledger_idx_volatile = -1;
8879 	*ledger_idx_nonvolatile = -1;
8880 	*ledger_idx_volatile_compressed = -1;
8881 	*ledger_idx_nonvolatile_compressed = -1;
8882 	*ledger_idx_composite = -1;
8883 	*ledger_idx_external_wired = -1;
8884 	*do_footprint = !object->vo_no_footprint;
8885 
8886 	if (!object->internal) {
8887 		switch (object->vo_ledger_tag) {
8888 		case VM_LEDGER_TAG_DEFAULT:
8889 			if (*do_footprint) {
8890 				*ledger_idx_external_wired = task_ledgers.tagged_footprint;
8891 			} else {
8892 				*ledger_idx_external_wired = task_ledgers.tagged_nofootprint;
8893 			}
8894 			break;
8895 		case VM_LEDGER_TAG_NETWORK:
8896 			*do_footprint = FALSE;
8897 			*ledger_idx_external_wired = task_ledgers.network_nonvolatile;
8898 			break;
8899 		case VM_LEDGER_TAG_MEDIA:
8900 			if (*do_footprint) {
8901 				*ledger_idx_external_wired = task_ledgers.media_footprint;
8902 			} else {
8903 				*ledger_idx_external_wired = task_ledgers.media_nofootprint;
8904 			}
8905 			break;
8906 		case VM_LEDGER_TAG_GRAPHICS:
8907 			if (*do_footprint) {
8908 				*ledger_idx_external_wired = task_ledgers.graphics_footprint;
8909 			} else {
8910 				*ledger_idx_external_wired = task_ledgers.graphics_nofootprint;
8911 			}
8912 			break;
8913 		case VM_LEDGER_TAG_NEURAL:
8914 			*ledger_idx_composite = task_ledgers.neural_nofootprint_total;
8915 			if (*do_footprint) {
8916 				*ledger_idx_external_wired = task_ledgers.neural_footprint;
8917 			} else {
8918 				*ledger_idx_external_wired = task_ledgers.neural_nofootprint;
8919 			}
8920 			break;
8921 		case VM_LEDGER_TAG_NONE:
8922 		default:
8923 			panic("%s: external object %p has unsupported ledger_tag %d",
8924 			    __FUNCTION__, object, object->vo_ledger_tag);
8925 		}
8926 		return;
8927 	}
8928 
8929 	assert(object->internal);
8930 	switch (object->vo_ledger_tag) {
8931 	case VM_LEDGER_TAG_NONE:
8932 		/*
8933 		 * Regular purgeable memory:
8934 		 * counts in footprint only when nonvolatile.
8935 		 */
8936 		*do_footprint = TRUE;
8937 		assert(object->purgable != VM_PURGABLE_DENY);
8938 		*ledger_idx_volatile = task_ledgers.purgeable_volatile;
8939 		*ledger_idx_nonvolatile = task_ledgers.purgeable_nonvolatile;
8940 		*ledger_idx_volatile_compressed = task_ledgers.purgeable_volatile_compressed;
8941 		*ledger_idx_nonvolatile_compressed = task_ledgers.purgeable_nonvolatile_compressed;
8942 		break;
8943 	case VM_LEDGER_TAG_DEFAULT:
8944 		/*
8945 		 * "default" tagged memory:
8946 		 * counts in footprint only when nonvolatile and not marked
8947 		 * as "no_footprint".
8948 		 */
8949 		*ledger_idx_volatile = task_ledgers.tagged_nofootprint;
8950 		*ledger_idx_volatile_compressed = task_ledgers.tagged_nofootprint_compressed;
8951 		if (*do_footprint) {
8952 			*ledger_idx_nonvolatile = task_ledgers.tagged_footprint;
8953 			*ledger_idx_nonvolatile_compressed = task_ledgers.tagged_footprint_compressed;
8954 		} else {
8955 			*ledger_idx_nonvolatile = task_ledgers.tagged_nofootprint;
8956 			*ledger_idx_nonvolatile_compressed = task_ledgers.tagged_nofootprint_compressed;
8957 		}
8958 		break;
8959 	case VM_LEDGER_TAG_NETWORK:
8960 		/*
8961 		 * "network" tagged memory:
8962 		 * never counts in footprint.
8963 		 */
8964 		*do_footprint = FALSE;
8965 		*ledger_idx_volatile = task_ledgers.network_volatile;
8966 		*ledger_idx_volatile_compressed = task_ledgers.network_volatile_compressed;
8967 		*ledger_idx_nonvolatile = task_ledgers.network_nonvolatile;
8968 		*ledger_idx_nonvolatile_compressed = task_ledgers.network_nonvolatile_compressed;
8969 		break;
8970 	case VM_LEDGER_TAG_MEDIA:
8971 		/*
8972 		 * "media" tagged memory:
8973 		 * counts in footprint only when nonvolatile and not marked
8974 		 * as "no footprint".
8975 		 */
8976 		*ledger_idx_volatile = task_ledgers.media_nofootprint;
8977 		*ledger_idx_volatile_compressed = task_ledgers.media_nofootprint_compressed;
8978 		if (*do_footprint) {
8979 			*ledger_idx_nonvolatile = task_ledgers.media_footprint;
8980 			*ledger_idx_nonvolatile_compressed = task_ledgers.media_footprint_compressed;
8981 		} else {
8982 			*ledger_idx_nonvolatile = task_ledgers.media_nofootprint;
8983 			*ledger_idx_nonvolatile_compressed = task_ledgers.media_nofootprint_compressed;
8984 		}
8985 		break;
8986 	case VM_LEDGER_TAG_GRAPHICS:
8987 		/*
8988 		 * "graphics" tagged memory:
8989 		 * counts in footprint only when nonvolatile and not marked
8990 		 * as "no footprint".
8991 		 */
8992 		*ledger_idx_volatile = task_ledgers.graphics_nofootprint;
8993 		*ledger_idx_volatile_compressed = task_ledgers.graphics_nofootprint_compressed;
8994 		if (*do_footprint) {
8995 			*ledger_idx_nonvolatile = task_ledgers.graphics_footprint;
8996 			*ledger_idx_nonvolatile_compressed = task_ledgers.graphics_footprint_compressed;
8997 		} else {
8998 			*ledger_idx_nonvolatile = task_ledgers.graphics_nofootprint;
8999 			*ledger_idx_nonvolatile_compressed = task_ledgers.graphics_nofootprint_compressed;
9000 		}
9001 		break;
9002 	case VM_LEDGER_TAG_NEURAL:
9003 		/*
9004 		 * "neural" tagged memory:
9005 		 * counts in footprint only when nonvolatile and not marked
9006 		 * as "no footprint".
9007 		 */
9008 		*ledger_idx_composite = task_ledgers.neural_nofootprint_total;
9009 		*ledger_idx_volatile = task_ledgers.neural_nofootprint;
9010 		*ledger_idx_volatile_compressed = task_ledgers.neural_nofootprint_compressed;
9011 		if (*do_footprint) {
9012 			*ledger_idx_nonvolatile = task_ledgers.neural_footprint;
9013 			*ledger_idx_nonvolatile_compressed = task_ledgers.neural_footprint_compressed;
9014 		} else {
9015 			*ledger_idx_nonvolatile = task_ledgers.neural_nofootprint;
9016 			*ledger_idx_nonvolatile_compressed = task_ledgers.neural_nofootprint_compressed;
9017 		}
9018 		break;
9019 	default:
9020 		panic("%s: object %p has unsupported ledger_tag %d",
9021 		    __FUNCTION__, object, object->vo_ledger_tag);
9022 	}
9023 }
9024 
9025 kern_return_t
9026 vm_object_ownership_change(
9027 	vm_object_t     object,
9028 	int             new_ledger_tag,
9029 	task_t          new_owner,
9030 	int             new_ledger_flags,
9031 	boolean_t       old_task_objq_locked)
9032 {
9033 	int             old_ledger_tag;
9034 	task_t          old_owner;
9035 	int             resident_count, wired_count;
9036 	unsigned int    compressed_count;
9037 	int             ledger_idx_volatile;
9038 	int             ledger_idx_nonvolatile;
9039 	int             ledger_idx_volatile_compressed;
9040 	int             ledger_idx_nonvolatile_compressed;
9041 	int             ledger_idx;
9042 	int             ledger_idx_compressed;
9043 	int             ledger_idx_composite;
9044 	int             ledger_idx_external_wired;
9045 	boolean_t       do_footprint, old_no_footprint, new_no_footprint;
9046 	boolean_t       new_task_objq_locked;
9047 
9048 	vm_object_lock_assert_exclusive(object);
9049 
9050 	if (new_owner != VM_OBJECT_OWNER_DISOWNED &&
9051 	    new_owner != TASK_NULL) {
9052 		if (new_ledger_tag == VM_LEDGER_TAG_NONE &&
9053 		    object->purgable == VM_PURGABLE_DENY) {
9054 			/* non-purgeable memory must have a valid non-zero ledger tag */
9055 			return KERN_INVALID_ARGUMENT;
9056 		}
9057 		if (!object->internal
9058 		    && !memory_object_is_vnode_pager(object->pager)) {
9059 			/* non-file-backed "external" objects can't be owned */
9060 			return KERN_INVALID_ARGUMENT;
9061 		}
9062 	}
9063 	if (new_owner == VM_OBJECT_OWNER_UNCHANGED) {
9064 		/* leave owner unchanged */
9065 		new_owner = VM_OBJECT_OWNER(object);
9066 	}
9067 	if (new_ledger_tag == VM_LEDGER_TAG_UNCHANGED) {
9068 		/* leave ledger_tag unchanged */
9069 		new_ledger_tag = object->vo_ledger_tag;
9070 	}
9071 	if (new_ledger_tag < 0 ||
9072 	    new_ledger_tag > VM_LEDGER_TAG_MAX) {
9073 		return KERN_INVALID_ARGUMENT;
9074 	}
9075 	if (new_ledger_flags & ~VM_LEDGER_FLAGS_ALL) {
9076 		return KERN_INVALID_ARGUMENT;
9077 	}
9078 	if (object->internal &&
9079 	    object->vo_ledger_tag == VM_LEDGER_TAG_NONE &&
9080 	    object->purgable == VM_PURGABLE_DENY) {
9081 		/*
9082 		 * This VM object is neither ledger-tagged nor purgeable.
9083 		 * We can convert it to "ledger tag" ownership iff it
9084 		 * has not been used at all yet (no resident pages and
9085 		 * no pager) and it's going to be assigned to a valid task.
9086 		 */
9087 		if (object->resident_page_count != 0 ||
9088 		    object->pager != NULL ||
9089 		    object->pager_created ||
9090 		    os_ref_get_count_raw(&object->ref_count) != 1 ||
9091 		    object->vo_owner != TASK_NULL ||
9092 		    object->copy_strategy != MEMORY_OBJECT_COPY_NONE ||
9093 		    new_owner == TASK_NULL) {
9094 			return KERN_FAILURE;
9095 		}
9096 	}
9097 
9098 	if (new_ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) {
9099 		new_no_footprint = TRUE;
9100 	} else {
9101 		new_no_footprint = FALSE;
9102 	}
9103 #if __arm64__
9104 	if (!new_no_footprint &&
9105 	    object->purgable != VM_PURGABLE_DENY &&
9106 	    new_owner != TASK_NULL &&
9107 	    new_owner != VM_OBJECT_OWNER_DISOWNED &&
9108 	    new_owner->task_legacy_footprint) {
9109 		/*
9110 		 * This task has been granted "legacy footprint" and should
9111 		 * not be charged for its IOKit purgeable memory.  Since we
9112 		 * might now change the accounting of such memory to the
9113 		 * "graphics" ledger, for example, give it the "no footprint"
9114 		 * option.
9115 		 */
9116 		new_no_footprint = TRUE;
9117 	}
9118 #endif /* __arm64__ */
9119 	assert(object->copy_strategy != MEMORY_OBJECT_COPY_SYMMETRIC);
9120 	assert(object->shadow == VM_OBJECT_NULL);
9121 	if (object->internal) {
9122 		assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
9123 		assert(object->vo_copy == VM_OBJECT_NULL);
9124 	}
9125 
9126 	old_ledger_tag = object->vo_ledger_tag;
9127 	old_no_footprint = object->vo_no_footprint;
9128 	old_owner = VM_OBJECT_OWNER(object);
9129 
9130 	if (__improbable(vm_debug_events)) {
9131 		DTRACE_VM8(object_ownership_change,
9132 		    vm_object_t, object,
9133 		    task_t, old_owner,
9134 		    int, old_ledger_tag,
9135 		    int, old_no_footprint,
9136 		    task_t, new_owner,
9137 		    int, new_ledger_tag,
9138 		    int, new_no_footprint,
9139 		    int, VM_OBJECT_ID(object));
9140 	}
9141 
9142 	resident_count = object->resident_page_count - object->wired_page_count;
9143 	wired_count = object->wired_page_count;
9144 	if (object->internal) {
9145 		compressed_count = vm_compressor_pager_get_count(object->pager);
9146 	} else {
9147 		compressed_count = 0;
9148 	}
9149 
9150 	/*
9151 	 * Deal with the old owner and/or ledger tag, if needed.
9152 	 */
9153 	if (old_owner != TASK_NULL &&
9154 	    ((old_owner != new_owner)           /* new owner ... */
9155 	    ||                                  /* ... or ... */
9156 	    (old_no_footprint != new_no_footprint) /* new "no_footprint" */
9157 	    ||                                  /* ... or ... */
9158 	    old_ledger_tag != new_ledger_tag)) { /* ... new ledger */
9159 		/*
9160 		 * Take this object off of the old owner's ledgers.
9161 		 */
9162 		vm_object_ledger_tag_ledgers(object,
9163 		    &ledger_idx_volatile,
9164 		    &ledger_idx_nonvolatile,
9165 		    &ledger_idx_volatile_compressed,
9166 		    &ledger_idx_nonvolatile_compressed,
9167 		    &ledger_idx_composite,
9168 		    &ledger_idx_external_wired,
9169 		    &do_footprint);
9170 		if (object->internal) {
9171 			if (object->purgable == VM_PURGABLE_VOLATILE ||
9172 			    object->purgable == VM_PURGABLE_EMPTY) {
9173 				ledger_idx = ledger_idx_volatile;
9174 				ledger_idx_compressed = ledger_idx_volatile_compressed;
9175 			} else {
9176 				ledger_idx = ledger_idx_nonvolatile;
9177 				ledger_idx_compressed = ledger_idx_nonvolatile_compressed;
9178 			}
9179 			if (resident_count) {
9180 				/*
9181 				 * Adjust the appropriate old owners's ledgers by the
9182 				 * number of resident pages.
9183 				 */
9184 				ledger_debit(old_owner->ledger,
9185 				    ledger_idx,
9186 				    ptoa_64(resident_count));
9187 				/* adjust old owner's footprint */
9188 				if (object->purgable != VM_PURGABLE_VOLATILE &&
9189 				    object->purgable != VM_PURGABLE_EMPTY) {
9190 					if (do_footprint) {
9191 						ledger_debit(old_owner->ledger,
9192 						    task_ledgers.phys_footprint,
9193 						    ptoa_64(resident_count));
9194 					} else if (ledger_idx_composite != -1) {
9195 						ledger_debit(old_owner->ledger,
9196 						    ledger_idx_composite,
9197 						    ptoa_64(resident_count));
9198 					}
9199 				}
9200 			}
9201 			if (wired_count) {
9202 				/* wired pages are always nonvolatile */
9203 				ledger_debit(old_owner->ledger,
9204 				    ledger_idx_nonvolatile,
9205 				    ptoa_64(wired_count));
9206 				if (do_footprint) {
9207 					ledger_debit(old_owner->ledger,
9208 					    task_ledgers.phys_footprint,
9209 					    ptoa_64(wired_count));
9210 				} else if (ledger_idx_composite != -1) {
9211 					ledger_debit(old_owner->ledger,
9212 					    ledger_idx_composite,
9213 					    ptoa_64(wired_count));
9214 				}
9215 			}
9216 			if (compressed_count) {
9217 				/*
9218 				 * Adjust the appropriate old owner's ledgers
9219 				 * by the number of compressed pages.
9220 				 */
9221 				ledger_debit(old_owner->ledger,
9222 				    ledger_idx_compressed,
9223 				    ptoa_64(compressed_count));
9224 				if (object->purgable != VM_PURGABLE_VOLATILE &&
9225 				    object->purgable != VM_PURGABLE_EMPTY) {
9226 					if (do_footprint) {
9227 						ledger_debit(old_owner->ledger,
9228 						    task_ledgers.phys_footprint,
9229 						    ptoa_64(compressed_count));
9230 					} else if (ledger_idx_composite != -1) {
9231 						ledger_debit(old_owner->ledger,
9232 						    ledger_idx_composite,
9233 						    ptoa_64(compressed_count));
9234 					}
9235 				}
9236 			}
9237 		} else {
9238 			/* external but owned object: count wired pages */
9239 			if (wired_count) {
9240 				ledger_debit(old_owner->ledger,
9241 				    ledger_idx_external_wired,
9242 				    ptoa_64(wired_count));
9243 				if (do_footprint) {
9244 					ledger_debit(old_owner->ledger,
9245 					    task_ledgers.phys_footprint,
9246 					    ptoa_64(wired_count));
9247 				} else if (ledger_idx_composite != -1) {
9248 					ledger_debit(old_owner->ledger,
9249 					    ledger_idx_composite,
9250 					    ptoa_64(wired_count));
9251 				}
9252 			}
9253 		}
9254 		if (old_owner != new_owner) {
9255 			/* remove object from old_owner's list of owned objects */
9256 			DTRACE_VM2(object_owner_remove,
9257 			    vm_object_t, object,
9258 			    task_t, old_owner);
9259 			if (!old_task_objq_locked) {
9260 				task_objq_lock(old_owner);
9261 			}
9262 			old_owner->task_owned_objects--;
9263 			queue_remove(&old_owner->task_objq, object,
9264 			    vm_object_t, task_objq);
9265 			switch (object->purgable) {
9266 			case VM_PURGABLE_NONVOLATILE:
9267 			case VM_PURGABLE_EMPTY:
9268 				vm_purgeable_nonvolatile_owner_update(old_owner,
9269 				    -1);
9270 				break;
9271 			case VM_PURGABLE_VOLATILE:
9272 				vm_purgeable_volatile_owner_update(old_owner,
9273 				    -1);
9274 				break;
9275 			default:
9276 				break;
9277 			}
9278 			if (!old_task_objq_locked) {
9279 				task_objq_unlock(old_owner);
9280 			}
9281 		}
9282 	}
9283 
9284 	/*
9285 	 * Switch to new ledger tag and/or owner.
9286 	 */
9287 
9288 	new_task_objq_locked = FALSE;
9289 	if (new_owner != old_owner &&
9290 	    new_owner != TASK_NULL &&
9291 	    new_owner != VM_OBJECT_OWNER_DISOWNED) {
9292 		/*
9293 		 * If the new owner is not accepting new objects ("disowning"),
9294 		 * the object becomes "disowned" and will be added to
9295 		 * the kernel's task_objq.
9296 		 *
9297 		 * Check first without locking, to avoid blocking while the
9298 		 * task is disowning its objects.
9299 		 */
9300 		if (new_owner->task_objects_disowning) {
9301 			new_owner = VM_OBJECT_OWNER_DISOWNED;
9302 		} else {
9303 			task_objq_lock(new_owner);
9304 			/* check again now that we have the lock */
9305 			if (new_owner->task_objects_disowning) {
9306 				new_owner = VM_OBJECT_OWNER_DISOWNED;
9307 				task_objq_unlock(new_owner);
9308 			} else {
9309 				new_task_objq_locked = TRUE;
9310 			}
9311 		}
9312 	}
9313 
9314 	object->vo_ledger_tag = new_ledger_tag;
9315 	object->vo_owner = new_owner;
9316 	object->vo_no_footprint = new_no_footprint;
9317 
9318 	if (new_owner == VM_OBJECT_OWNER_DISOWNED) {
9319 		/*
9320 		 * Disowned objects are added to the kernel's task_objq but
9321 		 * are marked as owned by "VM_OBJECT_OWNER_DISOWNED" to
9322 		 * differentiate them from objects intentionally owned by
9323 		 * the kernel.
9324 		 */
9325 		assert(old_owner != kernel_task);
9326 		new_owner = kernel_task;
9327 		assert(!new_task_objq_locked);
9328 		task_objq_lock(new_owner);
9329 		new_task_objq_locked = TRUE;
9330 	}
9331 
9332 	/*
9333 	 * Deal with the new owner and/or ledger tag, if needed.
9334 	 */
9335 	if (new_owner != TASK_NULL &&
9336 	    ((new_owner != old_owner)           /* new owner ... */
9337 	    ||                                  /* ... or ... */
9338 	    (new_no_footprint != old_no_footprint) /* ... new "no_footprint" */
9339 	    ||                                  /* ... or ... */
9340 	    new_ledger_tag != old_ledger_tag)) { /* ... new ledger */
9341 		/*
9342 		 * Add this object to the new owner's ledgers.
9343 		 */
9344 		vm_object_ledger_tag_ledgers(object,
9345 		    &ledger_idx_volatile,
9346 		    &ledger_idx_nonvolatile,
9347 		    &ledger_idx_volatile_compressed,
9348 		    &ledger_idx_nonvolatile_compressed,
9349 		    &ledger_idx_composite,
9350 		    &ledger_idx_external_wired,
9351 		    &do_footprint);
9352 		if (object->internal) {
9353 			if (object->purgable == VM_PURGABLE_VOLATILE ||
9354 			    object->purgable == VM_PURGABLE_EMPTY) {
9355 				ledger_idx = ledger_idx_volatile;
9356 				ledger_idx_compressed = ledger_idx_volatile_compressed;
9357 			} else {
9358 				ledger_idx = ledger_idx_nonvolatile;
9359 				ledger_idx_compressed = ledger_idx_nonvolatile_compressed;
9360 			}
9361 			if (resident_count) {
9362 				/*
9363 				 * Adjust the appropriate new owners's ledgers by the
9364 				 * number of resident pages.
9365 				 */
9366 				ledger_credit(new_owner->ledger,
9367 				    ledger_idx,
9368 				    ptoa_64(resident_count));
9369 				/* adjust new owner's footprint */
9370 				if (object->purgable != VM_PURGABLE_VOLATILE &&
9371 				    object->purgable != VM_PURGABLE_EMPTY) {
9372 					if (do_footprint) {
9373 						ledger_credit(new_owner->ledger,
9374 						    task_ledgers.phys_footprint,
9375 						    ptoa_64(resident_count));
9376 					} else if (ledger_idx_composite != -1) {
9377 						ledger_credit(new_owner->ledger,
9378 						    ledger_idx_composite,
9379 						    ptoa_64(resident_count));
9380 					}
9381 				}
9382 			}
9383 			if (wired_count) {
9384 				/* wired pages are always nonvolatile */
9385 				ledger_credit(new_owner->ledger,
9386 				    ledger_idx_nonvolatile,
9387 				    ptoa_64(wired_count));
9388 				if (do_footprint) {
9389 					ledger_credit(new_owner->ledger,
9390 					    task_ledgers.phys_footprint,
9391 					    ptoa_64(wired_count));
9392 				} else if (ledger_idx_composite != -1) {
9393 					ledger_credit(new_owner->ledger,
9394 					    ledger_idx_composite,
9395 					    ptoa_64(wired_count));
9396 				}
9397 			}
9398 			if (compressed_count) {
9399 				/*
9400 				 * Adjust the new owner's ledgers by the number of
9401 				 * compressed pages.
9402 				 */
9403 				ledger_credit(new_owner->ledger,
9404 				    ledger_idx_compressed,
9405 				    ptoa_64(compressed_count));
9406 				if (object->purgable != VM_PURGABLE_VOLATILE &&
9407 				    object->purgable != VM_PURGABLE_EMPTY) {
9408 					if (do_footprint) {
9409 						ledger_credit(new_owner->ledger,
9410 						    task_ledgers.phys_footprint,
9411 						    ptoa_64(compressed_count));
9412 					} else if (ledger_idx_composite != -1) {
9413 						ledger_credit(new_owner->ledger,
9414 						    ledger_idx_composite,
9415 						    ptoa_64(compressed_count));
9416 					}
9417 				}
9418 			}
9419 		} else {
9420 			/* external but owned object: count wired pages */
9421 			if (wired_count) {
9422 				ledger_credit(new_owner->ledger,
9423 				    ledger_idx_external_wired,
9424 				    ptoa_64(wired_count));
9425 				if (do_footprint) {
9426 					ledger_credit(new_owner->ledger,
9427 					    task_ledgers.phys_footprint,
9428 					    ptoa_64(wired_count));
9429 				} else if (ledger_idx_composite != -1) {
9430 					ledger_credit(new_owner->ledger,
9431 					    ledger_idx_composite,
9432 					    ptoa_64(wired_count));
9433 				}
9434 			}
9435 		}
9436 		if (new_owner != old_owner) {
9437 			/* add object to new_owner's list of owned objects */
9438 			DTRACE_VM2(object_owner_add,
9439 			    vm_object_t, object,
9440 			    task_t, new_owner);
9441 			assert(new_task_objq_locked);
9442 			new_owner->task_owned_objects++;
9443 			queue_enter(&new_owner->task_objq, object,
9444 			    vm_object_t, task_objq);
9445 			switch (object->purgable) {
9446 			case VM_PURGABLE_NONVOLATILE:
9447 			case VM_PURGABLE_EMPTY:
9448 				vm_purgeable_nonvolatile_owner_update(new_owner,
9449 				    +1);
9450 				break;
9451 			case VM_PURGABLE_VOLATILE:
9452 				vm_purgeable_volatile_owner_update(new_owner,
9453 				    +1);
9454 				break;
9455 			default:
9456 				break;
9457 			}
9458 		}
9459 	}
9460 
9461 	if (new_task_objq_locked) {
9462 		task_objq_unlock(new_owner);
9463 	}
9464 
9465 	return KERN_SUCCESS;
9466 }
9467 
9468 void
9469 vm_owned_objects_disown(
9470 	task_t  task)
9471 {
9472 	vm_object_t     next_object;
9473 	vm_object_t     object;
9474 	int             collisions;
9475 	kern_return_t   kr;
9476 
9477 	if (task == NULL) {
9478 		return;
9479 	}
9480 
9481 	collisions = 0;
9482 
9483 again:
9484 	if (task->task_objects_disowned) {
9485 		/* task has already disowned its owned objects */
9486 		assert(task->task_volatile_objects == 0);
9487 		assert(task->task_nonvolatile_objects == 0);
9488 		assert(task->task_owned_objects == 0);
9489 		return;
9490 	}
9491 
9492 	task_objq_lock(task);
9493 
9494 	task->task_objects_disowning = TRUE;
9495 
9496 	for (object = (vm_object_t) queue_first(&task->task_objq);
9497 	    !queue_end(&task->task_objq, (queue_entry_t) object);
9498 	    object = next_object) {
9499 		if (task->task_nonvolatile_objects == 0 &&
9500 		    task->task_volatile_objects == 0 &&
9501 		    task->task_owned_objects == 0) {
9502 			/* no more objects owned by "task" */
9503 			break;
9504 		}
9505 
9506 		next_object = (vm_object_t) queue_next(&object->task_objq);
9507 
9508 #if DEBUG
9509 		assert(object->vo_purgeable_volatilizer == NULL);
9510 #endif /* DEBUG */
9511 		assert(object->vo_owner == task);
9512 		if (!vm_object_lock_try(object)) {
9513 			task_objq_unlock(task);
9514 			mutex_pause(collisions++);
9515 			goto again;
9516 		}
9517 		/* transfer ownership to the kernel */
9518 		assert(VM_OBJECT_OWNER(object) != kernel_task);
9519 		kr = vm_object_ownership_change(
9520 			object,
9521 			object->vo_ledger_tag, /* unchanged */
9522 			VM_OBJECT_OWNER_DISOWNED, /* new owner */
9523 			0, /* new_ledger_flags */
9524 			TRUE);  /* old_owner->task_objq locked */
9525 		assert(kr == KERN_SUCCESS);
9526 		assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED);
9527 		vm_object_unlock(object);
9528 	}
9529 
9530 	if (__improbable(task->task_owned_objects != 0)) {
9531 		panic("%s(%p): volatile=%d nonvolatile=%d owned=%d q=%p q_first=%p q_last=%p",
9532 		    __FUNCTION__,
9533 		    task,
9534 		    task->task_volatile_objects,
9535 		    task->task_nonvolatile_objects,
9536 		    task->task_owned_objects,
9537 		    &task->task_objq,
9538 		    queue_first(&task->task_objq),
9539 		    queue_last(&task->task_objq));
9540 	}
9541 
9542 	/* there shouldn't be any objects owned by task now */
9543 	assert(task->task_volatile_objects == 0);
9544 	assert(task->task_nonvolatile_objects == 0);
9545 	assert(task->task_owned_objects == 0);
9546 	assert(task->task_objects_disowning);
9547 
9548 	/* and we don't need to try and disown again */
9549 	task->task_objects_disowned = TRUE;
9550 
9551 	task_objq_unlock(task);
9552 }
9553 
9554 void
9555 vm_object_wired_page_update_ledgers(
9556 	vm_object_t object,
9557 	int64_t wired_delta)
9558 {
9559 	task_t owner;
9560 
9561 	vm_object_lock_assert_exclusive(object);
9562 	if (wired_delta == 0) {
9563 		/* no change in number of wired pages */
9564 		return;
9565 	}
9566 	if (object->internal) {
9567 		/* no extra accounting needed for internal objects */
9568 		return;
9569 	}
9570 	if (!object->vo_ledger_tag) {
9571 		/* external object but not owned: no extra accounting */
9572 		return;
9573 	}
9574 
9575 	/*
9576 	 * For an explicitly-owned external VM object, account for
9577 	 * wired pages in one of the owner's ledgers.
9578 	 */
9579 	owner = VM_OBJECT_OWNER(object);
9580 	if (owner) {
9581 		int ledger_idx_volatile;
9582 		int ledger_idx_nonvolatile;
9583 		int ledger_idx_volatile_compressed;
9584 		int ledger_idx_nonvolatile_compressed;
9585 		int ledger_idx_composite;
9586 		int ledger_idx_external_wired;
9587 		boolean_t do_footprint;
9588 
9589 		/* ask which ledgers need an update */
9590 		vm_object_ledger_tag_ledgers(object,
9591 		    &ledger_idx_volatile,
9592 		    &ledger_idx_nonvolatile,
9593 		    &ledger_idx_volatile_compressed,
9594 		    &ledger_idx_nonvolatile_compressed,
9595 		    &ledger_idx_composite,
9596 		    &ledger_idx_external_wired,
9597 		    &do_footprint);
9598 		if (wired_delta > 0) {
9599 			/* more external wired bytes */
9600 			ledger_credit(owner->ledger,
9601 			    ledger_idx_external_wired,
9602 			    ptoa(wired_delta));
9603 			if (do_footprint) {
9604 				/* more footprint */
9605 				ledger_credit(owner->ledger,
9606 				    task_ledgers.phys_footprint,
9607 				    ptoa(wired_delta));
9608 			} else if (ledger_idx_composite != -1) {
9609 				ledger_credit(owner->ledger,
9610 				    ledger_idx_composite,
9611 				    ptoa(wired_delta));
9612 			}
9613 		} else {
9614 			/* less external wired bytes */
9615 			ledger_debit(owner->ledger,
9616 			    ledger_idx_external_wired,
9617 			    ptoa(-wired_delta));
9618 			if (do_footprint) {
9619 				/* more footprint */
9620 				ledger_debit(owner->ledger,
9621 				    task_ledgers.phys_footprint,
9622 				    ptoa(-wired_delta));
9623 			} else if (ledger_idx_composite != -1) {
9624 				ledger_debit(owner->ledger,
9625 				    ledger_idx_composite,
9626 				    ptoa(-wired_delta));
9627 			}
9628 		}
9629 	}
9630 }
9631