xref: /xnu-10002.1.13/osfmk/vm/vm_object.c (revision 1031c584a5e37aff177559b9f69dbd3c8c3fd30a)
1 /*
2  * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3  *
4  * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5  *
6  * This file contains Original Code and/or Modifications of Original Code
7  * as defined in and that are subject to the Apple Public Source License
8  * Version 2.0 (the 'License'). You may not use this file except in
9  * compliance with the License. The rights granted to you under the License
10  * may not be used to create, or enable the creation or redistribution of,
11  * unlawful or unlicensed copies of an Apple operating system, or to
12  * circumvent, violate, or enable the circumvention or violation of, any
13  * terms of an Apple operating system software license agreement.
14  *
15  * Please obtain a copy of the License at
16  * http://www.opensource.apple.com/apsl/ and read it before using this file.
17  *
18  * The Original Code and all software distributed under the License are
19  * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20  * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21  * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22  * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23  * Please see the License for the specific language governing rights and
24  * limitations under the License.
25  *
26  * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27  */
28 /*
29  * @OSF_COPYRIGHT@
30  */
31 /*
32  * Mach Operating System
33  * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34  * All Rights Reserved.
35  *
36  * Permission to use, copy, modify and distribute this software and its
37  * documentation is hereby granted, provided that both the copyright
38  * notice and this permission notice appear in all copies of the
39  * software, derivative works or modified versions, and any portions
40  * thereof, and that both notices appear in supporting documentation.
41  *
42  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44  * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45  *
46  * Carnegie Mellon requests users of this software to return to
47  *
48  *  Software Distribution Coordinator  or  [email protected]
49  *  School of Computer Science
50  *  Carnegie Mellon University
51  *  Pittsburgh PA 15213-3890
52  *
53  * any improvements or extensions that they make and grant Carnegie Mellon
54  * the rights to redistribute these changes.
55  */
56 /*
57  */
58 /*
59  *	File:	vm/vm_object.c
60  *	Author:	Avadis Tevanian, Jr., Michael Wayne Young
61  *
62  *	Virtual memory object module.
63  */
64 
65 #include <debug.h>
66 
67 #include <mach/mach_types.h>
68 #include <mach/memory_object.h>
69 #include <mach/vm_param.h>
70 
71 #include <mach/sdt.h>
72 
73 #include <ipc/ipc_types.h>
74 #include <ipc/ipc_port.h>
75 
76 #include <kern/kern_types.h>
77 #include <kern/assert.h>
78 #include <kern/queue.h>
79 #include <kern/kalloc.h>
80 #include <kern/zalloc.h>
81 #include <kern/host.h>
82 #include <kern/host_statistics.h>
83 #include <kern/processor.h>
84 #include <kern/misc_protos.h>
85 #include <kern/policy_internal.h>
86 
87 #include <sys/kdebug_triage.h>
88 
89 #include <vm/memory_object.h>
90 #include <vm/vm_compressor_pager.h>
91 #include <vm/vm_fault.h>
92 #include <vm/vm_map.h>
93 #include <vm/vm_object.h>
94 #include <vm/vm_page.h>
95 #include <vm/vm_pageout.h>
96 #include <vm/vm_protos.h>
97 #include <vm/vm_purgeable_internal.h>
98 
99 #include <vm/vm_compressor.h>
100 
101 #if CONFIG_PHANTOM_CACHE
102 #include <vm/vm_phantom_cache.h>
103 #endif
104 
105 #if VM_OBJECT_ACCESS_TRACKING
106 uint64_t vm_object_access_tracking_reads = 0;
107 uint64_t vm_object_access_tracking_writes = 0;
108 #endif /* VM_OBJECT_ACCESS_TRACKING */
109 
110 boolean_t vm_object_collapse_compressor_allowed = TRUE;
111 
112 struct vm_counters vm_counters;
113 
114 #if DEVELOPMENT || DEBUG
115 extern struct memory_object_pager_ops shared_region_pager_ops;
116 extern unsigned int shared_region_pagers_resident_count;
117 extern unsigned int shared_region_pagers_resident_peak;
118 #endif /* DEVELOPMENT || DEBUG */
119 
120 #if VM_OBJECT_TRACKING
121 btlog_t vm_object_tracking_btlog;
122 
123 void
vm_object_tracking_init(void)124 vm_object_tracking_init(void)
125 {
126 	int vm_object_tracking;
127 
128 	vm_object_tracking = 1;
129 	PE_parse_boot_argn("vm_object_tracking", &vm_object_tracking,
130 	    sizeof(vm_object_tracking));
131 
132 	if (vm_object_tracking) {
133 		vm_object_tracking_btlog = btlog_create(BTLOG_HASH,
134 		    VM_OBJECT_TRACKING_NUM_RECORDS);
135 		assert(vm_object_tracking_btlog);
136 	}
137 }
138 #endif /* VM_OBJECT_TRACKING */
139 
140 /*
141  *	Virtual memory objects maintain the actual data
142  *	associated with allocated virtual memory.  A given
143  *	page of memory exists within exactly one object.
144  *
145  *	An object is only deallocated when all "references"
146  *	are given up.
147  *
148  *	Associated with each object is a list of all resident
149  *	memory pages belonging to that object; this list is
150  *	maintained by the "vm_page" module, but locked by the object's
151  *	lock.
152  *
153  *	Each object also records the memory object reference
154  *	that is used by the kernel to request and write
155  *	back data (the memory object, field "pager"), etc...
156  *
157  *	Virtual memory objects are allocated to provide
158  *	zero-filled memory (vm_allocate) or map a user-defined
159  *	memory object into a virtual address space (vm_map).
160  *
161  *	Virtual memory objects that refer to a user-defined
162  *	memory object are called "permanent", because all changes
163  *	made in virtual memory are reflected back to the
164  *	memory manager, which may then store it permanently.
165  *	Other virtual memory objects are called "temporary",
166  *	meaning that changes need be written back only when
167  *	necessary to reclaim pages, and that storage associated
168  *	with the object can be discarded once it is no longer
169  *	mapped.
170  *
171  *	A permanent memory object may be mapped into more
172  *	than one virtual address space.  Moreover, two threads
173  *	may attempt to make the first mapping of a memory
174  *	object concurrently.  Only one thread is allowed to
175  *	complete this mapping; all others wait for the
176  *	"pager_initialized" field is asserted, indicating
177  *	that the first thread has initialized all of the
178  *	necessary fields in the virtual memory object structure.
179  *
180  *	The kernel relies on a *default memory manager* to
181  *	provide backing storage for the zero-filled virtual
182  *	memory objects.  The pager memory objects associated
183  *	with these temporary virtual memory objects are only
184  *	requested from the default memory manager when it
185  *	becomes necessary.  Virtual memory objects
186  *	that depend on the default memory manager are called
187  *	"internal".  The "pager_created" field is provided to
188  *	indicate whether these ports have ever been allocated.
189  *
190  *	The kernel may also create virtual memory objects to
191  *	hold changed pages after a copy-on-write operation.
192  *	In this case, the virtual memory object (and its
193  *	backing storage -- its memory object) only contain
194  *	those pages that have been changed.  The "shadow"
195  *	field refers to the virtual memory object that contains
196  *	the remainder of the contents.  The "shadow_offset"
197  *	field indicates where in the "shadow" these contents begin.
198  *	The "copy" field refers to a virtual memory object
199  *	to which changed pages must be copied before changing
200  *	this object, in order to implement another form
201  *	of copy-on-write optimization.
202  *
203  *	The virtual memory object structure also records
204  *	the attributes associated with its memory object.
205  *	The "pager_ready", "can_persist" and "copy_strategy"
206  *	fields represent those attributes.  The "cached_list"
207  *	field is used in the implementation of the persistence
208  *	attribute.
209  *
210  * ZZZ Continue this comment.
211  */
212 
213 /* Forward declarations for internal functions. */
214 static kern_return_t    vm_object_terminate(
215 	vm_object_t     object);
216 
217 static void             vm_object_do_collapse(
218 	vm_object_t     object,
219 	vm_object_t     backing_object);
220 
221 static void             vm_object_do_bypass(
222 	vm_object_t     object,
223 	vm_object_t     backing_object);
224 
225 static void             vm_object_release_pager(
226 	memory_object_t pager);
227 
228 SECURITY_READ_ONLY_LATE(zone_t) vm_object_zone; /* vm backing store zone */
229 
230 /*
231  *	All wired-down kernel memory belongs to this memory object
232  *	memory object (kernel_object) by default to avoid wasting data structures.
233  */
234 static struct vm_object                 kernel_object_store VM_PAGE_PACKED_ALIGNED;
235 const vm_object_t                       kernel_object_default = &kernel_object_store;
236 
237 static struct vm_object                 compressor_object_store VM_PAGE_PACKED_ALIGNED;
238 const vm_object_t                       compressor_object = &compressor_object_store;
239 
240 /*
241  * This object holds all pages that have been retired due to errors like ECC.
242  * The system should never use the page or look at its contents. The offset
243  * in this object is the same as the page's physical address.
244  */
245 static struct vm_object                 retired_pages_object_store VM_PAGE_PACKED_ALIGNED;
246 const vm_object_t                       retired_pages_object = &retired_pages_object_store;
247 
248 
249 /*
250  *	Virtual memory objects are initialized from
251  *	a template (see vm_object_allocate).
252  *
253  *	When adding a new field to the virtual memory
254  *	object structure, be sure to add initialization
255  *	(see _vm_object_allocate()).
256  */
257 static const struct vm_object vm_object_template = {
258 	.memq.prev = 0,
259 	.memq.next = 0,
260 	/*
261 	 * The lock will be initialized for each allocated object in
262 	 * _vm_object_allocate(), so we don't need to initialize it in
263 	 * the vm_object_template.
264 	 */
265 	.vo_size = 0,
266 	.memq_hint = VM_PAGE_NULL,
267 	.ref_count = 1,
268 	.resident_page_count = 0,
269 	.wired_page_count = 0,
270 	.reusable_page_count = 0,
271 	.vo_copy = VM_OBJECT_NULL,
272 	.shadow = VM_OBJECT_NULL,
273 	.vo_shadow_offset = (vm_object_offset_t) 0,
274 	.pager = MEMORY_OBJECT_NULL,
275 	.paging_offset = 0,
276 	.pager_control = MEMORY_OBJECT_CONTROL_NULL,
277 	.copy_strategy = MEMORY_OBJECT_COPY_SYMMETRIC,
278 	.paging_in_progress = 0,
279 	.vo_size_delta = 0,
280 	.activity_in_progress = 0,
281 
282 	/* Begin bitfields */
283 	.all_wanted = 0, /* all bits FALSE */
284 	.pager_created = FALSE,
285 	.pager_initialized = FALSE,
286 	.pager_ready = FALSE,
287 	.pager_trusted = FALSE,
288 	.can_persist = FALSE,
289 	.internal = TRUE,
290 	.private = FALSE,
291 	.pageout = FALSE,
292 	.alive = TRUE,
293 	.purgable = VM_PURGABLE_DENY,
294 	.purgeable_when_ripe = FALSE,
295 	.purgeable_only_by_kernel = FALSE,
296 	.shadowed = FALSE,
297 	.true_share = FALSE,
298 	.terminating = FALSE,
299 	.named = FALSE,
300 	.shadow_severed = FALSE,
301 	.phys_contiguous = FALSE,
302 	.nophyscache = FALSE,
303 	/* End bitfields */
304 
305 	.cached_list.prev = NULL,
306 	.cached_list.next = NULL,
307 
308 	.last_alloc = (vm_object_offset_t) 0,
309 	.sequential = (vm_object_offset_t) 0,
310 	.pages_created = 0,
311 	.pages_used = 0,
312 	.scan_collisions = 0,
313 #if CONFIG_PHANTOM_CACHE
314 	.phantom_object_id = 0,
315 #endif
316 	.cow_hint = ~(vm_offset_t)0,
317 
318 	/* cache bitfields */
319 	.wimg_bits = VM_WIMG_USE_DEFAULT,
320 	.set_cache_attr = FALSE,
321 	.object_is_shared_cache = FALSE,
322 	.code_signed = FALSE,
323 	.transposed = FALSE,
324 	.mapping_in_progress = FALSE,
325 	.phantom_isssd = FALSE,
326 	.volatile_empty = FALSE,
327 	.volatile_fault = FALSE,
328 	.all_reusable = FALSE,
329 	.blocked_access = FALSE,
330 	.vo_ledger_tag = VM_LEDGER_TAG_NONE,
331 	.vo_no_footprint = FALSE,
332 #if CONFIG_IOSCHED || UPL_DEBUG
333 	.uplq.prev = NULL,
334 	.uplq.next = NULL,
335 #endif /* UPL_DEBUG */
336 #ifdef VM_PIP_DEBUG
337 	.pip_holders = {0},
338 #endif /* VM_PIP_DEBUG */
339 
340 	.objq.next = NULL,
341 	.objq.prev = NULL,
342 	.task_objq.next = NULL,
343 	.task_objq.prev = NULL,
344 
345 	.purgeable_queue_type = PURGEABLE_Q_TYPE_MAX,
346 	.purgeable_queue_group = 0,
347 
348 	.wire_tag = VM_KERN_MEMORY_NONE,
349 #if !VM_TAG_ACTIVE_UPDATE
350 	.wired_objq.next = NULL,
351 	.wired_objq.prev = NULL,
352 #endif /* ! VM_TAG_ACTIVE_UPDATE */
353 
354 	.io_tracking = FALSE,
355 
356 #if CONFIG_SECLUDED_MEMORY
357 	.eligible_for_secluded = FALSE,
358 	.can_grab_secluded = FALSE,
359 #else /* CONFIG_SECLUDED_MEMORY */
360 	.__object3_unused_bits = 0,
361 #endif /* CONFIG_SECLUDED_MEMORY */
362 
363 	.for_realtime = false,
364 
365 #if VM_OBJECT_ACCESS_TRACKING
366 	.access_tracking = FALSE,
367 	.access_tracking_reads = 0,
368 	.access_tracking_writes = 0,
369 #endif /* VM_OBJECT_ACCESS_TRACKING */
370 
371 #if DEBUG
372 	.purgeable_owner_bt = {0},
373 	.vo_purgeable_volatilizer = NULL,
374 	.purgeable_volatilizer_bt = {0},
375 #endif /* DEBUG */
376 };
377 
378 LCK_GRP_DECLARE(vm_object_lck_grp, "vm_object");
379 LCK_GRP_DECLARE(vm_object_cache_lck_grp, "vm_object_cache");
380 LCK_ATTR_DECLARE(vm_object_lck_attr, 0, 0);
381 LCK_ATTR_DECLARE(kernel_object_lck_attr, 0, LCK_ATTR_DEBUG);
382 LCK_ATTR_DECLARE(compressor_object_lck_attr, 0, LCK_ATTR_DEBUG);
383 
384 unsigned int vm_page_purged_wired = 0;
385 unsigned int vm_page_purged_busy = 0;
386 unsigned int vm_page_purged_others = 0;
387 
388 static queue_head_t     vm_object_cached_list;
389 static uint32_t         vm_object_cache_pages_freed = 0;
390 static uint32_t         vm_object_cache_pages_moved = 0;
391 static uint32_t         vm_object_cache_pages_skipped = 0;
392 static uint32_t         vm_object_cache_adds = 0;
393 static uint32_t         vm_object_cached_count = 0;
394 static LCK_MTX_DECLARE_ATTR(vm_object_cached_lock_data,
395     &vm_object_cache_lck_grp, &vm_object_lck_attr);
396 
397 static uint32_t         vm_object_page_grab_failed = 0;
398 static uint32_t         vm_object_page_grab_skipped = 0;
399 static uint32_t         vm_object_page_grab_returned = 0;
400 static uint32_t         vm_object_page_grab_pmapped = 0;
401 static uint32_t         vm_object_page_grab_reactivations = 0;
402 
403 #define vm_object_cache_lock_spin()             \
404 	        lck_mtx_lock_spin(&vm_object_cached_lock_data)
405 #define vm_object_cache_unlock()        \
406 	        lck_mtx_unlock(&vm_object_cached_lock_data)
407 
408 static void     vm_object_cache_remove_locked(vm_object_t);
409 
410 
411 static void vm_object_reap(vm_object_t object);
412 static void vm_object_reap_async(vm_object_t object);
413 static void vm_object_reaper_thread(void);
414 
415 static LCK_MTX_DECLARE_ATTR(vm_object_reaper_lock_data,
416     &vm_object_lck_grp, &vm_object_lck_attr);
417 
418 static queue_head_t vm_object_reaper_queue; /* protected by vm_object_reaper_lock() */
419 unsigned int vm_object_reap_count = 0;
420 unsigned int vm_object_reap_count_async = 0;
421 
422 #define vm_object_reaper_lock()         \
423 	        lck_mtx_lock(&vm_object_reaper_lock_data)
424 #define vm_object_reaper_lock_spin()            \
425 	        lck_mtx_lock_spin(&vm_object_reaper_lock_data)
426 #define vm_object_reaper_unlock()       \
427 	        lck_mtx_unlock(&vm_object_reaper_lock_data)
428 
429 #if CONFIG_IOSCHED
430 /* I/O Re-prioritization request list */
431 queue_head_t io_reprioritize_list = QUEUE_HEAD_INITIALIZER(io_reprioritize_list);
432 
433 LCK_SPIN_DECLARE_ATTR(io_reprioritize_list_lock,
434     &vm_object_lck_grp, &vm_object_lck_attr);
435 
436 #define IO_REPRIORITIZE_LIST_LOCK()     \
437 	        lck_spin_lock_grp(&io_reprioritize_list_lock, &vm_object_lck_grp)
438 #define IO_REPRIORITIZE_LIST_UNLOCK()   \
439 	        lck_spin_unlock(&io_reprioritize_list_lock)
440 
441 ZONE_DEFINE_TYPE(io_reprioritize_req_zone, "io_reprioritize_req",
442     struct io_reprioritize_req, ZC_NONE);
443 
444 /* I/O Re-prioritization thread */
445 int io_reprioritize_wakeup = 0;
446 static void io_reprioritize_thread(void *param __unused, wait_result_t wr __unused);
447 
448 #define IO_REPRIO_THREAD_WAKEUP()       thread_wakeup((event_t)&io_reprioritize_wakeup)
449 #define IO_REPRIO_THREAD_CONTINUATION()                                 \
450 {                                                               \
451 	assert_wait(&io_reprioritize_wakeup, THREAD_UNINT);     \
452 	thread_block(io_reprioritize_thread);                   \
453 }
454 
455 void vm_page_request_reprioritize(vm_object_t, uint64_t, uint32_t, int);
456 void vm_page_handle_prio_inversion(vm_object_t, vm_page_t);
457 void vm_decmp_upl_reprioritize(upl_t, int);
458 #endif
459 
460 #if 0
461 #undef KERNEL_DEBUG
462 #define KERNEL_DEBUG KERNEL_DEBUG_CONSTANT
463 #endif
464 
465 
466 void
vm_object_set_size(vm_object_t object,vm_object_size_t outer_size,vm_object_size_t inner_size)467 vm_object_set_size(
468 	vm_object_t             object,
469 	vm_object_size_t        outer_size,
470 	vm_object_size_t        inner_size)
471 {
472 	object->vo_size = vm_object_round_page(outer_size);
473 #if KASAN
474 	assert(object->vo_size - inner_size <= USHRT_MAX);
475 	object->vo_size_delta = (unsigned short)(object->vo_size - inner_size);
476 #else
477 	(void)inner_size;
478 #endif
479 }
480 
481 
482 /*
483  *	vm_object_allocate:
484  *
485  *	Returns a new object with the given size.
486  */
487 
488 __private_extern__ void
_vm_object_allocate(vm_object_size_t size,vm_object_t object)489 _vm_object_allocate(
490 	vm_object_size_t        size,
491 	vm_object_t             object)
492 {
493 	*object = vm_object_template;
494 	vm_page_queue_init(&object->memq);
495 #if UPL_DEBUG || CONFIG_IOSCHED
496 	queue_init(&object->uplq);
497 #endif
498 	vm_object_lock_init(object);
499 	vm_object_set_size(object, size, size);
500 
501 #if VM_OBJECT_TRACKING_OP_CREATED
502 	if (vm_object_tracking_btlog) {
503 		btlog_record(vm_object_tracking_btlog, object,
504 		    VM_OBJECT_TRACKING_OP_CREATED,
505 		    btref_get(__builtin_frame_address(0), 0));
506 	}
507 #endif /* VM_OBJECT_TRACKING_OP_CREATED */
508 }
509 
510 __private_extern__ vm_object_t
vm_object_allocate(vm_object_size_t size)511 vm_object_allocate(
512 	vm_object_size_t        size)
513 {
514 	vm_object_t object;
515 
516 	object = zalloc_flags(vm_object_zone, Z_WAITOK | Z_NOFAIL);
517 	_vm_object_allocate(size, object);
518 
519 	return object;
520 }
521 
522 TUNABLE(bool, workaround_41447923, "workaround_41447923", false);
523 
524 /*
525  *	vm_object_bootstrap:
526  *
527  *	Initialize the VM objects module.
528  */
529 __startup_func
530 void
vm_object_bootstrap(void)531 vm_object_bootstrap(void)
532 {
533 	vm_size_t       vm_object_size;
534 
535 	assert(sizeof(mo_ipc_object_bits_t) == sizeof(ipc_object_bits_t));
536 
537 	vm_object_size = (sizeof(struct vm_object) + (VM_PAGE_PACKED_PTR_ALIGNMENT - 1)) &
538 	    ~(VM_PAGE_PACKED_PTR_ALIGNMENT - 1);
539 
540 	vm_object_zone = zone_create("vm objects", vm_object_size,
541 	    ZC_NOENCRYPT | ZC_ALIGNMENT_REQUIRED | ZC_VM | ZC_NOTBITAG);
542 
543 	queue_init(&vm_object_cached_list);
544 
545 	queue_init(&vm_object_reaper_queue);
546 
547 	/*
548 	 *	Initialize the "kernel object"
549 	 */
550 
551 	/*
552 	 * Note that in the following size specifications, we need to add 1 because
553 	 * VM_MAX_KERNEL_ADDRESS (vm_last_addr) is a maximum address, not a size.
554 	 */
555 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, kernel_object_default);
556 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, compressor_object);
557 	kernel_object_default->copy_strategy = MEMORY_OBJECT_COPY_NONE;
558 	compressor_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
559 	kernel_object_default->no_tag_update = TRUE;
560 
561 	/*
562 	 * The object to hold retired VM pages.
563 	 */
564 	_vm_object_allocate(VM_MAX_KERNEL_ADDRESS + 1, retired_pages_object);
565 	retired_pages_object->copy_strategy = MEMORY_OBJECT_COPY_NONE;
566 }
567 
568 #if CONFIG_IOSCHED
569 void
vm_io_reprioritize_init(void)570 vm_io_reprioritize_init(void)
571 {
572 	kern_return_t   result;
573 	thread_t        thread = THREAD_NULL;
574 
575 	result = kernel_thread_start_priority(io_reprioritize_thread, NULL, 95 /* MAXPRI_KERNEL */, &thread);
576 	if (result == KERN_SUCCESS) {
577 		thread_set_thread_name(thread, "VM_io_reprioritize_thread");
578 		thread_deallocate(thread);
579 	} else {
580 		panic("Could not create io_reprioritize_thread");
581 	}
582 }
583 #endif
584 
585 void
vm_object_reaper_init(void)586 vm_object_reaper_init(void)
587 {
588 	kern_return_t   kr;
589 	thread_t        thread;
590 
591 	kr = kernel_thread_start_priority(
592 		(thread_continue_t) vm_object_reaper_thread,
593 		NULL,
594 		BASEPRI_VM,
595 		&thread);
596 	if (kr != KERN_SUCCESS) {
597 		panic("failed to launch vm_object_reaper_thread kr=0x%x", kr);
598 	}
599 	thread_set_thread_name(thread, "VM_object_reaper_thread");
600 	thread_deallocate(thread);
601 }
602 
603 
604 /*
605  *	vm_object_deallocate:
606  *
607  *	Release a reference to the specified object,
608  *	gained either through a vm_object_allocate
609  *	or a vm_object_reference call.  When all references
610  *	are gone, storage associated with this object
611  *	may be relinquished.
612  *
613  *	No object may be locked.
614  */
615 unsigned long vm_object_deallocate_shared_successes = 0;
616 unsigned long vm_object_deallocate_shared_failures = 0;
617 unsigned long vm_object_deallocate_shared_swap_failures = 0;
618 
619 __private_extern__ void
vm_object_deallocate(vm_object_t object)620 vm_object_deallocate(
621 	vm_object_t     object)
622 {
623 	vm_object_t     shadow = VM_OBJECT_NULL;
624 
625 //	if(object)dbgLog(object, object->ref_count, object->can_persist, 3);	/* (TEST/DEBUG) */
626 //	else dbgLog(object, 0, 0, 3);	/* (TEST/DEBUG) */
627 
628 	if (object == VM_OBJECT_NULL) {
629 		return;
630 	}
631 
632 	if (is_kernel_object(object) || object == compressor_object || object == retired_pages_object) {
633 		vm_object_lock_shared(object);
634 
635 		OSAddAtomic(-1, &object->ref_count);
636 
637 		if (object->ref_count == 0) {
638 			if (is_kernel_object(object)) {
639 				panic("vm_object_deallocate: losing a kernel_object");
640 			} else if (object == retired_pages_object) {
641 				panic("vm_object_deallocate: losing retired_pages_object");
642 			} else {
643 				panic("vm_object_deallocate: losing compressor_object");
644 			}
645 		}
646 		vm_object_unlock(object);
647 		return;
648 	}
649 
650 	if (object->ref_count == 2 &&
651 	    object->named) {
652 		/*
653 		 * This "named" object's reference count is about to
654 		 * drop from 2 to 1:
655 		 * we'll need to call memory_object_last_unmap().
656 		 */
657 	} else if (object->ref_count == 2 &&
658 	    object->internal &&
659 	    object->shadow != VM_OBJECT_NULL) {
660 		/*
661 		 * This internal object's reference count is about to
662 		 * drop from 2 to 1 and it has a shadow object:
663 		 * we'll want to try and collapse this object with its
664 		 * shadow.
665 		 */
666 	} else if (object->ref_count >= 2) {
667 		UInt32          original_ref_count;
668 		volatile UInt32 *ref_count_p;
669 		Boolean         atomic_swap;
670 
671 		/*
672 		 * The object currently looks like it is not being
673 		 * kept alive solely by the reference we're about to release.
674 		 * Let's try and release our reference without taking
675 		 * all the locks we would need if we had to terminate the
676 		 * object (cache lock + exclusive object lock).
677 		 * Lock the object "shared" to make sure we don't race with
678 		 * anyone holding it "exclusive".
679 		 */
680 		vm_object_lock_shared(object);
681 		ref_count_p = (volatile UInt32 *) &object->ref_count;
682 		original_ref_count = object->ref_count;
683 		/*
684 		 * Test again as "ref_count" could have changed.
685 		 * "named" shouldn't change.
686 		 */
687 		if (original_ref_count == 2 &&
688 		    object->named) {
689 			/* need to take slow path for m_o_last_unmap() */
690 			atomic_swap = FALSE;
691 		} else if (original_ref_count == 2 &&
692 		    object->internal &&
693 		    object->shadow != VM_OBJECT_NULL) {
694 			/* need to take slow path for vm_object_collapse() */
695 			atomic_swap = FALSE;
696 		} else if (original_ref_count < 2) {
697 			/* need to take slow path for vm_object_terminate() */
698 			atomic_swap = FALSE;
699 		} else {
700 			/* try an atomic update with the shared lock */
701 			atomic_swap = OSCompareAndSwap(
702 				original_ref_count,
703 				original_ref_count - 1,
704 				(UInt32 *) &object->ref_count);
705 			if (atomic_swap == FALSE) {
706 				vm_object_deallocate_shared_swap_failures++;
707 				/* fall back to the slow path... */
708 			}
709 		}
710 
711 		vm_object_unlock(object);
712 
713 		if (atomic_swap) {
714 			/*
715 			 * ref_count was updated atomically !
716 			 */
717 			vm_object_deallocate_shared_successes++;
718 			return;
719 		}
720 
721 		/*
722 		 * Someone else updated the ref_count at the same
723 		 * time and we lost the race.  Fall back to the usual
724 		 * slow but safe path...
725 		 */
726 		vm_object_deallocate_shared_failures++;
727 	}
728 
729 	while (object != VM_OBJECT_NULL) {
730 		vm_object_lock(object);
731 
732 		assert(object->ref_count > 0);
733 
734 		/*
735 		 *	If the object has a named reference, and only
736 		 *	that reference would remain, inform the pager
737 		 *	about the last "mapping" reference going away.
738 		 */
739 		if ((object->ref_count == 2) && (object->named)) {
740 			memory_object_t pager = object->pager;
741 
742 			/* Notify the Pager that there are no */
743 			/* more mappers for this object */
744 
745 			if (pager != MEMORY_OBJECT_NULL) {
746 				vm_object_mapping_wait(object, THREAD_UNINT);
747 				vm_object_mapping_begin(object);
748 				vm_object_unlock(object);
749 
750 				memory_object_last_unmap(pager);
751 
752 				vm_object_lock(object);
753 				vm_object_mapping_end(object);
754 			}
755 			assert(object->ref_count > 0);
756 		}
757 
758 		/*
759 		 *	Lose the reference. If other references
760 		 *	remain, then we are done, unless we need
761 		 *	to retry a cache trim.
762 		 *	If it is the last reference, then keep it
763 		 *	until any pending initialization is completed.
764 		 */
765 
766 		/* if the object is terminating, it cannot go into */
767 		/* the cache and we obviously should not call      */
768 		/* terminate again.  */
769 
770 		if ((object->ref_count > 1) || object->terminating) {
771 			vm_object_lock_assert_exclusive(object);
772 			object->ref_count--;
773 
774 			if (object->ref_count == 1 &&
775 			    object->shadow != VM_OBJECT_NULL) {
776 				/*
777 				 * There's only one reference left on this
778 				 * VM object.  We can't tell if it's a valid
779 				 * one (from a mapping for example) or if this
780 				 * object is just part of a possibly stale and
781 				 * useless shadow chain.
782 				 * We would like to try and collapse it into
783 				 * its parent, but we don't have any pointers
784 				 * back to this parent object.
785 				 * But we can try and collapse this object with
786 				 * its own shadows, in case these are useless
787 				 * too...
788 				 * We can't bypass this object though, since we
789 				 * don't know if this last reference on it is
790 				 * meaningful or not.
791 				 */
792 				vm_object_collapse(object, 0, FALSE);
793 			}
794 			vm_object_unlock(object);
795 			return;
796 		}
797 
798 		/*
799 		 *	We have to wait for initialization
800 		 *	before destroying or caching the object.
801 		 */
802 
803 		if (object->pager_created && !object->pager_initialized) {
804 			assert(!object->can_persist);
805 			vm_object_assert_wait(object,
806 			    VM_OBJECT_EVENT_INITIALIZED,
807 			    THREAD_UNINT);
808 			vm_object_unlock(object);
809 
810 			thread_block(THREAD_CONTINUE_NULL);
811 			continue;
812 		}
813 
814 		/*
815 		 *	Terminate this object. If it had a shadow,
816 		 *	then deallocate it; otherwise, if we need
817 		 *	to retry a cache trim, do so now; otherwise,
818 		 *	we are done. "pageout" objects have a shadow,
819 		 *	but maintain a "paging reference" rather than
820 		 *	a normal reference.
821 		 */
822 		shadow = object->pageout?VM_OBJECT_NULL:object->shadow;
823 
824 		if (vm_object_terminate(object) != KERN_SUCCESS) {
825 			return;
826 		}
827 		if (shadow != VM_OBJECT_NULL) {
828 			object = shadow;
829 			continue;
830 		}
831 		return;
832 	}
833 }
834 
835 
836 
837 vm_page_t
vm_object_page_grab(vm_object_t object)838 vm_object_page_grab(
839 	vm_object_t     object)
840 {
841 	vm_page_t       p, next_p;
842 	int             p_limit = 0;
843 	int             p_skipped = 0;
844 
845 	vm_object_lock_assert_exclusive(object);
846 
847 	next_p = (vm_page_t)vm_page_queue_first(&object->memq);
848 	p_limit = MIN(50, object->resident_page_count);
849 
850 	while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && --p_limit > 0) {
851 		p = next_p;
852 		next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq);
853 
854 		if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry || p->vmp_fictitious) {
855 			goto move_page_in_obj;
856 		}
857 
858 		if (p->vmp_pmapped || p->vmp_dirty || p->vmp_precious) {
859 			vm_page_lockspin_queues();
860 
861 			if (p->vmp_pmapped) {
862 				int refmod_state;
863 
864 				vm_object_page_grab_pmapped++;
865 
866 				if (p->vmp_reference == FALSE || p->vmp_dirty == FALSE) {
867 					refmod_state = pmap_get_refmod(VM_PAGE_GET_PHYS_PAGE(p));
868 
869 					if (refmod_state & VM_MEM_REFERENCED) {
870 						p->vmp_reference = TRUE;
871 					}
872 					if (refmod_state & VM_MEM_MODIFIED) {
873 						SET_PAGE_DIRTY(p, FALSE);
874 					}
875 				}
876 				if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
877 					refmod_state = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
878 
879 					if (refmod_state & VM_MEM_REFERENCED) {
880 						p->vmp_reference = TRUE;
881 					}
882 					if (refmod_state & VM_MEM_MODIFIED) {
883 						SET_PAGE_DIRTY(p, FALSE);
884 					}
885 
886 					if (p->vmp_dirty == FALSE) {
887 						goto take_page;
888 					}
889 				}
890 			}
891 			if ((p->vmp_q_state != VM_PAGE_ON_ACTIVE_Q) && p->vmp_reference == TRUE) {
892 				vm_page_activate(p);
893 
894 				counter_inc(&vm_statistics_reactivations);
895 				vm_object_page_grab_reactivations++;
896 			}
897 			vm_page_unlock_queues();
898 move_page_in_obj:
899 			vm_page_queue_remove(&object->memq, p, vmp_listq);
900 			vm_page_queue_enter(&object->memq, p, vmp_listq);
901 
902 			p_skipped++;
903 			continue;
904 		}
905 		vm_page_lockspin_queues();
906 take_page:
907 		vm_page_free_prepare_queues(p);
908 		vm_object_page_grab_returned++;
909 		vm_object_page_grab_skipped += p_skipped;
910 
911 		vm_page_unlock_queues();
912 
913 		vm_page_free_prepare_object(p, TRUE);
914 
915 		return p;
916 	}
917 	vm_object_page_grab_skipped += p_skipped;
918 	vm_object_page_grab_failed++;
919 
920 	return NULL;
921 }
922 
923 
924 
925 #define EVICT_PREPARE_LIMIT     64
926 #define EVICT_AGE               10
927 
928 static  clock_sec_t     vm_object_cache_aging_ts = 0;
929 
930 static void
vm_object_cache_remove_locked(vm_object_t object)931 vm_object_cache_remove_locked(
932 	vm_object_t     object)
933 {
934 	assert(object->purgable == VM_PURGABLE_DENY);
935 
936 	queue_remove(&vm_object_cached_list, object, vm_object_t, cached_list);
937 	object->cached_list.next = NULL;
938 	object->cached_list.prev = NULL;
939 
940 	vm_object_cached_count--;
941 }
942 
943 void
vm_object_cache_remove(vm_object_t object)944 vm_object_cache_remove(
945 	vm_object_t     object)
946 {
947 	vm_object_cache_lock_spin();
948 
949 	if (object->cached_list.next &&
950 	    object->cached_list.prev) {
951 		vm_object_cache_remove_locked(object);
952 	}
953 
954 	vm_object_cache_unlock();
955 }
956 
957 void
vm_object_cache_add(vm_object_t object)958 vm_object_cache_add(
959 	vm_object_t     object)
960 {
961 	clock_sec_t sec;
962 	clock_nsec_t nsec;
963 
964 	assert(object->purgable == VM_PURGABLE_DENY);
965 
966 	if (object->resident_page_count == 0) {
967 		return;
968 	}
969 	clock_get_system_nanotime(&sec, &nsec);
970 
971 	vm_object_cache_lock_spin();
972 
973 	if (object->cached_list.next == NULL &&
974 	    object->cached_list.prev == NULL) {
975 		queue_enter(&vm_object_cached_list, object, vm_object_t, cached_list);
976 		object->vo_cache_ts = sec + EVICT_AGE;
977 		object->vo_cache_pages_to_scan = object->resident_page_count;
978 
979 		vm_object_cached_count++;
980 		vm_object_cache_adds++;
981 	}
982 	vm_object_cache_unlock();
983 }
984 
985 int
vm_object_cache_evict(int num_to_evict,int max_objects_to_examine)986 vm_object_cache_evict(
987 	int     num_to_evict,
988 	int     max_objects_to_examine)
989 {
990 	vm_object_t     object = VM_OBJECT_NULL;
991 	vm_object_t     next_obj = VM_OBJECT_NULL;
992 	vm_page_t       local_free_q = VM_PAGE_NULL;
993 	vm_page_t       p;
994 	vm_page_t       next_p;
995 	int             object_cnt = 0;
996 	vm_page_t       ep_array[EVICT_PREPARE_LIMIT];
997 	int             ep_count;
998 	int             ep_limit;
999 	int             ep_index;
1000 	int             ep_freed = 0;
1001 	int             ep_moved = 0;
1002 	uint32_t        ep_skipped = 0;
1003 	clock_sec_t     sec;
1004 	clock_nsec_t    nsec;
1005 
1006 	KERNEL_DEBUG(0x13001ec | DBG_FUNC_START, 0, 0, 0, 0, 0);
1007 	/*
1008 	 * do a couple of quick checks to see if it's
1009 	 * worthwhile grabbing the lock
1010 	 */
1011 	if (queue_empty(&vm_object_cached_list)) {
1012 		KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
1013 		return 0;
1014 	}
1015 	clock_get_system_nanotime(&sec, &nsec);
1016 
1017 	/*
1018 	 * the object on the head of the queue has not
1019 	 * yet sufficiently aged
1020 	 */
1021 	if (sec < vm_object_cache_aging_ts) {
1022 		KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, 0, 0, 0, 0, 0);
1023 		return 0;
1024 	}
1025 	/*
1026 	 * don't need the queue lock to find
1027 	 * and lock an object on the cached list
1028 	 */
1029 	vm_page_unlock_queues();
1030 
1031 	vm_object_cache_lock_spin();
1032 
1033 	for (;;) {
1034 		next_obj = (vm_object_t)queue_first(&vm_object_cached_list);
1035 
1036 		while (!queue_end(&vm_object_cached_list, (queue_entry_t)next_obj) && object_cnt++ < max_objects_to_examine) {
1037 			object = next_obj;
1038 			next_obj = (vm_object_t)queue_next(&next_obj->cached_list);
1039 
1040 			assert(object->purgable == VM_PURGABLE_DENY);
1041 
1042 			if (sec < object->vo_cache_ts) {
1043 				KERNEL_DEBUG(0x130020c, object, object->resident_page_count, object->vo_cache_ts, sec, 0);
1044 
1045 				vm_object_cache_aging_ts = object->vo_cache_ts;
1046 				object = VM_OBJECT_NULL;
1047 				break;
1048 			}
1049 			if (!vm_object_lock_try_scan(object)) {
1050 				/*
1051 				 * just skip over this guy for now... if we find
1052 				 * an object to steal pages from, we'll revist in a bit...
1053 				 * hopefully, the lock will have cleared
1054 				 */
1055 				KERNEL_DEBUG(0x13001f8, object, object->resident_page_count, 0, 0, 0);
1056 
1057 				object = VM_OBJECT_NULL;
1058 				continue;
1059 			}
1060 			if (vm_page_queue_empty(&object->memq) || object->vo_cache_pages_to_scan == 0) {
1061 				/*
1062 				 * this case really shouldn't happen, but it's not fatal
1063 				 * so deal with it... if we don't remove the object from
1064 				 * the list, we'll never move past it.
1065 				 */
1066 				KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
1067 
1068 				vm_object_cache_remove_locked(object);
1069 				vm_object_unlock(object);
1070 				object = VM_OBJECT_NULL;
1071 				continue;
1072 			}
1073 			/*
1074 			 * we have a locked object with pages...
1075 			 * time to start harvesting
1076 			 */
1077 			break;
1078 		}
1079 		vm_object_cache_unlock();
1080 
1081 		if (object == VM_OBJECT_NULL) {
1082 			break;
1083 		}
1084 
1085 		/*
1086 		 * object is locked at this point and
1087 		 * has resident pages
1088 		 */
1089 		next_p = (vm_page_t)vm_page_queue_first(&object->memq);
1090 
1091 		/*
1092 		 * break the page scan into 2 pieces to minimize the time spent
1093 		 * behind the page queue lock...
1094 		 * the list of pages on these unused objects is likely to be cold
1095 		 * w/r to the cpu cache which increases the time to scan the list
1096 		 * tenfold...  and we may have a 'run' of pages we can't utilize that
1097 		 * needs to be skipped over...
1098 		 */
1099 		if ((ep_limit = num_to_evict - (ep_freed + ep_moved)) > EVICT_PREPARE_LIMIT) {
1100 			ep_limit = EVICT_PREPARE_LIMIT;
1101 		}
1102 		ep_count = 0;
1103 
1104 		while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next_p) && object->vo_cache_pages_to_scan && ep_count < ep_limit) {
1105 			p = next_p;
1106 			next_p = (vm_page_t)vm_page_queue_next(&next_p->vmp_listq);
1107 
1108 			object->vo_cache_pages_to_scan--;
1109 
1110 			if (VM_PAGE_WIRED(p) || p->vmp_busy || p->vmp_cleaning || p->vmp_laundry) {
1111 				vm_page_queue_remove(&object->memq, p, vmp_listq);
1112 				vm_page_queue_enter(&object->memq, p, vmp_listq);
1113 
1114 				ep_skipped++;
1115 				continue;
1116 			}
1117 			if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) {
1118 				vm_page_queue_remove(&object->memq, p, vmp_listq);
1119 				vm_page_queue_enter(&object->memq, p, vmp_listq);
1120 
1121 				pmap_clear_reference(VM_PAGE_GET_PHYS_PAGE(p));
1122 			}
1123 			ep_array[ep_count++] = p;
1124 		}
1125 		KERNEL_DEBUG(0x13001f4 | DBG_FUNC_START, object, object->resident_page_count, ep_freed, ep_moved, 0);
1126 
1127 		vm_page_lockspin_queues();
1128 
1129 		for (ep_index = 0; ep_index < ep_count; ep_index++) {
1130 			p = ep_array[ep_index];
1131 
1132 			if (p->vmp_wpmapped || p->vmp_dirty || p->vmp_precious) {
1133 				p->vmp_reference = FALSE;
1134 				p->vmp_no_cache = FALSE;
1135 
1136 				/*
1137 				 * we've already filtered out pages that are in the laundry
1138 				 * so if we get here, this page can't be on the pageout queue
1139 				 */
1140 				vm_page_queues_remove(p, FALSE);
1141 				vm_page_enqueue_inactive(p, TRUE);
1142 
1143 				ep_moved++;
1144 			} else {
1145 #if CONFIG_PHANTOM_CACHE
1146 				vm_phantom_cache_add_ghost(p);
1147 #endif
1148 				vm_page_free_prepare_queues(p);
1149 
1150 				assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0);
1151 				/*
1152 				 * Add this page to our list of reclaimed pages,
1153 				 * to be freed later.
1154 				 */
1155 				p->vmp_snext = local_free_q;
1156 				local_free_q = p;
1157 
1158 				ep_freed++;
1159 			}
1160 		}
1161 		vm_page_unlock_queues();
1162 
1163 		KERNEL_DEBUG(0x13001f4 | DBG_FUNC_END, object, object->resident_page_count, ep_freed, ep_moved, 0);
1164 
1165 		if (local_free_q) {
1166 			vm_page_free_list(local_free_q, TRUE);
1167 			local_free_q = VM_PAGE_NULL;
1168 		}
1169 		if (object->vo_cache_pages_to_scan == 0) {
1170 			KERNEL_DEBUG(0x1300208, object, object->resident_page_count, ep_freed, ep_moved, 0);
1171 
1172 			vm_object_cache_remove(object);
1173 
1174 			KERNEL_DEBUG(0x13001fc, object, object->resident_page_count, ep_freed, ep_moved, 0);
1175 		}
1176 		/*
1177 		 * done with this object
1178 		 */
1179 		vm_object_unlock(object);
1180 		object = VM_OBJECT_NULL;
1181 
1182 		/*
1183 		 * at this point, we are not holding any locks
1184 		 */
1185 		if ((ep_freed + ep_moved) >= num_to_evict) {
1186 			/*
1187 			 * we've reached our target for the
1188 			 * number of pages to evict
1189 			 */
1190 			break;
1191 		}
1192 		vm_object_cache_lock_spin();
1193 	}
1194 	/*
1195 	 * put the page queues lock back to the caller's
1196 	 * idea of it
1197 	 */
1198 	vm_page_lock_queues();
1199 
1200 	vm_object_cache_pages_freed += ep_freed;
1201 	vm_object_cache_pages_moved += ep_moved;
1202 	vm_object_cache_pages_skipped += ep_skipped;
1203 
1204 	KERNEL_DEBUG(0x13001ec | DBG_FUNC_END, ep_freed, 0, 0, 0, 0);
1205 	return ep_freed;
1206 }
1207 
1208 /*
1209  *	Routine:	vm_object_terminate
1210  *	Purpose:
1211  *		Free all resources associated with a vm_object.
1212  *	In/out conditions:
1213  *		Upon entry, the object must be locked,
1214  *		and the object must have exactly one reference.
1215  *
1216  *		The shadow object reference is left alone.
1217  *
1218  *		The object must be unlocked if its found that pages
1219  *		must be flushed to a backing object.  If someone
1220  *		manages to map the object while it is being flushed
1221  *		the object is returned unlocked and unchanged.  Otherwise,
1222  *		upon exit, the cache will be unlocked, and the
1223  *		object will cease to exist.
1224  */
1225 static kern_return_t
vm_object_terminate(vm_object_t object)1226 vm_object_terminate(
1227 	vm_object_t     object)
1228 {
1229 	vm_object_t     shadow_object;
1230 
1231 	vm_object_lock_assert_exclusive(object);
1232 
1233 	if (!object->pageout && (!object->internal && object->can_persist) &&
1234 	    (object->pager != NULL || object->shadow_severed)) {
1235 		/*
1236 		 * Clear pager_trusted bit so that the pages get yanked
1237 		 * out of the object instead of cleaned in place.  This
1238 		 * prevents a deadlock in XMM and makes more sense anyway.
1239 		 */
1240 		object->pager_trusted = FALSE;
1241 
1242 		vm_object_reap_pages(object, REAP_TERMINATE);
1243 	}
1244 	/*
1245 	 *	Make sure the object isn't already being terminated
1246 	 */
1247 	if (object->terminating) {
1248 		vm_object_lock_assert_exclusive(object);
1249 		object->ref_count--;
1250 		assert(object->ref_count > 0);
1251 		vm_object_unlock(object);
1252 		return KERN_FAILURE;
1253 	}
1254 
1255 	/*
1256 	 * Did somebody get a reference to the object while we were
1257 	 * cleaning it?
1258 	 */
1259 	if (object->ref_count != 1) {
1260 		vm_object_lock_assert_exclusive(object);
1261 		object->ref_count--;
1262 		assert(object->ref_count > 0);
1263 		vm_object_unlock(object);
1264 		return KERN_FAILURE;
1265 	}
1266 
1267 	/*
1268 	 *	Make sure no one can look us up now.
1269 	 */
1270 
1271 	object->terminating = TRUE;
1272 	object->alive = FALSE;
1273 
1274 	if (!object->internal &&
1275 	    object->cached_list.next &&
1276 	    object->cached_list.prev) {
1277 		vm_object_cache_remove(object);
1278 	}
1279 
1280 	/*
1281 	 *	Detach the object from its shadow if we are the shadow's
1282 	 *	copy. The reference we hold on the shadow must be dropped
1283 	 *	by our caller.
1284 	 */
1285 	if (((shadow_object = object->shadow) != VM_OBJECT_NULL) &&
1286 	    !(object->pageout)) {
1287 		vm_object_lock(shadow_object);
1288 		if (shadow_object->vo_copy == object) {
1289 			shadow_object->vo_copy = VM_OBJECT_NULL;
1290 		}
1291 		vm_object_unlock(shadow_object);
1292 	}
1293 
1294 	if (object->paging_in_progress != 0 ||
1295 	    object->activity_in_progress != 0) {
1296 		/*
1297 		 * There are still some paging_in_progress references
1298 		 * on this object, meaning that there are some paging
1299 		 * or other I/O operations in progress for this VM object.
1300 		 * Such operations take some paging_in_progress references
1301 		 * up front to ensure that the object doesn't go away, but
1302 		 * they may also need to acquire a reference on the VM object,
1303 		 * to map it in kernel space, for example.  That means that
1304 		 * they may end up releasing the last reference on the VM
1305 		 * object, triggering its termination, while still holding
1306 		 * paging_in_progress references.  Waiting for these
1307 		 * pending paging_in_progress references to go away here would
1308 		 * deadlock.
1309 		 *
1310 		 * To avoid deadlocking, we'll let the vm_object_reaper_thread
1311 		 * complete the VM object termination if it still holds
1312 		 * paging_in_progress references at this point.
1313 		 *
1314 		 * No new paging_in_progress should appear now that the
1315 		 * VM object is "terminating" and not "alive".
1316 		 */
1317 		vm_object_reap_async(object);
1318 		vm_object_unlock(object);
1319 		/*
1320 		 * Return KERN_FAILURE to let the caller know that we
1321 		 * haven't completed the termination and it can't drop this
1322 		 * object's reference on its shadow object yet.
1323 		 * The reaper thread will take care of that once it has
1324 		 * completed this object's termination.
1325 		 */
1326 		return KERN_FAILURE;
1327 	}
1328 	/*
1329 	 * complete the VM object termination
1330 	 */
1331 	vm_object_reap(object);
1332 	object = VM_OBJECT_NULL;
1333 
1334 	/*
1335 	 * the object lock was released by vm_object_reap()
1336 	 *
1337 	 * KERN_SUCCESS means that this object has been terminated
1338 	 * and no longer needs its shadow object but still holds a
1339 	 * reference on it.
1340 	 * The caller is responsible for dropping that reference.
1341 	 * We can't call vm_object_deallocate() here because that
1342 	 * would create a recursion.
1343 	 */
1344 	return KERN_SUCCESS;
1345 }
1346 
1347 
1348 /*
1349  * vm_object_reap():
1350  *
1351  * Complete the termination of a VM object after it's been marked
1352  * as "terminating" and "!alive" by vm_object_terminate().
1353  *
1354  * The VM object must be locked by caller.
1355  * The lock will be released on return and the VM object is no longer valid.
1356  */
1357 
1358 void
vm_object_reap(vm_object_t object)1359 vm_object_reap(
1360 	vm_object_t object)
1361 {
1362 	memory_object_t         pager;
1363 
1364 	vm_object_lock_assert_exclusive(object);
1365 	assert(object->paging_in_progress == 0);
1366 	assert(object->activity_in_progress == 0);
1367 
1368 	vm_object_reap_count++;
1369 
1370 	/*
1371 	 * Disown this purgeable object to cleanup its owner's purgeable
1372 	 * ledgers.  We need to do this before disconnecting the object
1373 	 * from its pager, to properly account for compressed pages.
1374 	 */
1375 	if (object->internal &&
1376 	    (object->purgable != VM_PURGABLE_DENY ||
1377 	    object->vo_ledger_tag)) {
1378 		int ledger_flags;
1379 		kern_return_t kr;
1380 
1381 		ledger_flags = 0;
1382 		if (object->vo_no_footprint) {
1383 			ledger_flags |= VM_LEDGER_FLAG_NO_FOOTPRINT;
1384 		}
1385 		assert(!object->alive);
1386 		assert(object->terminating);
1387 		kr = vm_object_ownership_change(object,
1388 		    object->vo_ledger_tag,   /* unchanged */
1389 		    NULL,                    /* no owner */
1390 		    ledger_flags,
1391 		    FALSE);                  /* task_objq not locked */
1392 		assert(kr == KERN_SUCCESS);
1393 		assert(object->vo_owner == NULL);
1394 	}
1395 
1396 #if DEVELOPMENT || DEBUG
1397 	if (object->object_is_shared_cache &&
1398 	    object->pager != NULL &&
1399 	    object->pager->mo_pager_ops == &shared_region_pager_ops) {
1400 		OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count);
1401 	}
1402 #endif /* DEVELOPMENT || DEBUG */
1403 
1404 	pager = object->pager;
1405 	object->pager = MEMORY_OBJECT_NULL;
1406 
1407 	if (pager != MEMORY_OBJECT_NULL) {
1408 		memory_object_control_disable(&object->pager_control);
1409 	}
1410 
1411 	object->ref_count--;
1412 	assert(object->ref_count == 0);
1413 
1414 	/*
1415 	 * remove from purgeable queue if it's on
1416 	 */
1417 	if (object->internal) {
1418 		assert(VM_OBJECT_OWNER(object) == TASK_NULL);
1419 
1420 		VM_OBJECT_UNWIRED(object);
1421 
1422 		if (object->purgable == VM_PURGABLE_DENY) {
1423 			/* not purgeable: nothing to do */
1424 		} else if (object->purgable == VM_PURGABLE_VOLATILE) {
1425 			purgeable_q_t queue;
1426 
1427 			queue = vm_purgeable_object_remove(object);
1428 			assert(queue);
1429 
1430 			if (object->purgeable_when_ripe) {
1431 				/*
1432 				 * Must take page lock for this -
1433 				 * using it to protect token queue
1434 				 */
1435 				vm_page_lock_queues();
1436 				vm_purgeable_token_delete_first(queue);
1437 
1438 				assert(queue->debug_count_objects >= 0);
1439 				vm_page_unlock_queues();
1440 			}
1441 
1442 			/*
1443 			 * Update "vm_page_purgeable_count" in bulk and mark
1444 			 * object as VM_PURGABLE_EMPTY to avoid updating
1445 			 * "vm_page_purgeable_count" again in vm_page_remove()
1446 			 * when reaping the pages.
1447 			 */
1448 			unsigned int delta;
1449 			assert(object->resident_page_count >=
1450 			    object->wired_page_count);
1451 			delta = (object->resident_page_count -
1452 			    object->wired_page_count);
1453 			if (delta != 0) {
1454 				assert(vm_page_purgeable_count >= delta);
1455 				OSAddAtomic(-delta,
1456 				    (SInt32 *)&vm_page_purgeable_count);
1457 			}
1458 			if (object->wired_page_count != 0) {
1459 				assert(vm_page_purgeable_wired_count >=
1460 				    object->wired_page_count);
1461 				OSAddAtomic(-object->wired_page_count,
1462 				    (SInt32 *)&vm_page_purgeable_wired_count);
1463 			}
1464 			object->purgable = VM_PURGABLE_EMPTY;
1465 		} else if (object->purgable == VM_PURGABLE_NONVOLATILE ||
1466 		    object->purgable == VM_PURGABLE_EMPTY) {
1467 			/* remove from nonvolatile queue */
1468 			vm_purgeable_nonvolatile_dequeue(object);
1469 		} else {
1470 			panic("object %p in unexpected purgeable state 0x%x",
1471 			    object, object->purgable);
1472 		}
1473 		if (object->transposed &&
1474 		    object->cached_list.next != NULL &&
1475 		    object->cached_list.prev == NULL) {
1476 			/*
1477 			 * object->cached_list.next "points" to the
1478 			 * object that was transposed with this object.
1479 			 */
1480 		} else {
1481 			assert(object->cached_list.next == NULL);
1482 		}
1483 		assert(object->cached_list.prev == NULL);
1484 	}
1485 
1486 	if (object->pageout) {
1487 		/*
1488 		 * free all remaining pages tabled on
1489 		 * this object
1490 		 * clean up it's shadow
1491 		 */
1492 		assert(object->shadow != VM_OBJECT_NULL);
1493 
1494 		vm_pageout_object_terminate(object);
1495 	} else if (object->resident_page_count) {
1496 		/*
1497 		 * free all remaining pages tabled on
1498 		 * this object
1499 		 */
1500 		vm_object_reap_pages(object, REAP_REAP);
1501 	}
1502 	assert(vm_page_queue_empty(&object->memq));
1503 	assert(object->paging_in_progress == 0);
1504 	assert(object->activity_in_progress == 0);
1505 	assert(object->ref_count == 0);
1506 
1507 	/*
1508 	 * If the pager has not already been released by
1509 	 * vm_object_destroy, we need to terminate it and
1510 	 * release our reference to it here.
1511 	 */
1512 	if (pager != MEMORY_OBJECT_NULL) {
1513 		vm_object_unlock(object);
1514 		vm_object_release_pager(pager);
1515 		vm_object_lock(object);
1516 	}
1517 
1518 	/* kick off anyone waiting on terminating */
1519 	object->terminating = FALSE;
1520 	vm_object_paging_begin(object);
1521 	vm_object_paging_end(object);
1522 	vm_object_unlock(object);
1523 
1524 	object->shadow = VM_OBJECT_NULL;
1525 
1526 #if VM_OBJECT_TRACKING
1527 	if (vm_object_tracking_btlog) {
1528 		btlog_erase(vm_object_tracking_btlog, object);
1529 	}
1530 #endif /* VM_OBJECT_TRACKING */
1531 
1532 	vm_object_lock_destroy(object);
1533 	/*
1534 	 *	Free the space for the object.
1535 	 */
1536 	zfree(vm_object_zone, object);
1537 	object = VM_OBJECT_NULL;
1538 }
1539 
1540 
1541 unsigned int vm_max_batch = 256;
1542 
1543 #define V_O_R_MAX_BATCH 128
1544 
1545 #define BATCH_LIMIT(max)        (vm_max_batch >= max ? max : vm_max_batch)
1546 
1547 
1548 #define VM_OBJ_REAP_FREELIST(_local_free_q, do_disconnect)              \
1549 	MACRO_BEGIN                                                     \
1550 	if (_local_free_q) {                                            \
1551 	        if (do_disconnect) {                                    \
1552 	                vm_page_t m;                                    \
1553 	                for (m = _local_free_q;                         \
1554 	                     m != VM_PAGE_NULL;                         \
1555 	                     m = m->vmp_snext) {                        \
1556 	                        if (m->vmp_pmapped) {                   \
1557 	                                pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m)); \
1558 	                        }                                       \
1559 	                }                                               \
1560 	        }                                                       \
1561 	        vm_page_free_list(_local_free_q, TRUE);                 \
1562 	        _local_free_q = VM_PAGE_NULL;                           \
1563 	}                                                               \
1564 	MACRO_END
1565 
1566 
1567 void
vm_object_reap_pages(vm_object_t object,int reap_type)1568 vm_object_reap_pages(
1569 	vm_object_t     object,
1570 	int             reap_type)
1571 {
1572 	vm_page_t       p;
1573 	vm_page_t       next;
1574 	vm_page_t       local_free_q = VM_PAGE_NULL;
1575 	int             loop_count;
1576 	boolean_t       disconnect_on_release;
1577 	pmap_flush_context      pmap_flush_context_storage;
1578 
1579 	if (reap_type == REAP_DATA_FLUSH) {
1580 		/*
1581 		 * We need to disconnect pages from all pmaps before
1582 		 * releasing them to the free list
1583 		 */
1584 		disconnect_on_release = TRUE;
1585 	} else {
1586 		/*
1587 		 * Either the caller has already disconnected the pages
1588 		 * from all pmaps, or we disconnect them here as we add
1589 		 * them to out local list of pages to be released.
1590 		 * No need to re-disconnect them when we release the pages
1591 		 * to the free list.
1592 		 */
1593 		disconnect_on_release = FALSE;
1594 	}
1595 
1596 restart_after_sleep:
1597 	if (vm_page_queue_empty(&object->memq)) {
1598 		return;
1599 	}
1600 	loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
1601 
1602 	if (reap_type == REAP_PURGEABLE) {
1603 		pmap_flush_context_init(&pmap_flush_context_storage);
1604 	}
1605 
1606 	vm_page_lock_queues();
1607 
1608 	next = (vm_page_t)vm_page_queue_first(&object->memq);
1609 
1610 	while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
1611 		p = next;
1612 		next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
1613 
1614 		if (--loop_count == 0) {
1615 			vm_page_unlock_queues();
1616 
1617 			if (local_free_q) {
1618 				if (reap_type == REAP_PURGEABLE) {
1619 					pmap_flush(&pmap_flush_context_storage);
1620 					pmap_flush_context_init(&pmap_flush_context_storage);
1621 				}
1622 				/*
1623 				 * Free the pages we reclaimed so far
1624 				 * and take a little break to avoid
1625 				 * hogging the page queue lock too long
1626 				 */
1627 				VM_OBJ_REAP_FREELIST(local_free_q,
1628 				    disconnect_on_release);
1629 			} else {
1630 				mutex_pause(0);
1631 			}
1632 
1633 			loop_count = BATCH_LIMIT(V_O_R_MAX_BATCH);
1634 
1635 			vm_page_lock_queues();
1636 		}
1637 		if (reap_type == REAP_DATA_FLUSH || reap_type == REAP_TERMINATE) {
1638 			if (p->vmp_busy || p->vmp_cleaning) {
1639 				vm_page_unlock_queues();
1640 				/*
1641 				 * free the pages reclaimed so far
1642 				 */
1643 				VM_OBJ_REAP_FREELIST(local_free_q,
1644 				    disconnect_on_release);
1645 
1646 				PAGE_SLEEP(object, p, THREAD_UNINT);
1647 
1648 				goto restart_after_sleep;
1649 			}
1650 			if (p->vmp_laundry) {
1651 				vm_pageout_steal_laundry(p, TRUE);
1652 			}
1653 		}
1654 		switch (reap_type) {
1655 		case REAP_DATA_FLUSH:
1656 			if (VM_PAGE_WIRED(p)) {
1657 				/*
1658 				 * this is an odd case... perhaps we should
1659 				 * zero-fill this page since we're conceptually
1660 				 * tossing its data at this point, but leaving
1661 				 * it on the object to honor the 'wire' contract
1662 				 */
1663 				continue;
1664 			}
1665 			break;
1666 
1667 		case REAP_PURGEABLE:
1668 			if (VM_PAGE_WIRED(p)) {
1669 				/*
1670 				 * can't purge a wired page
1671 				 */
1672 				vm_page_purged_wired++;
1673 				continue;
1674 			}
1675 			if (p->vmp_laundry && !p->vmp_busy && !p->vmp_cleaning) {
1676 				vm_pageout_steal_laundry(p, TRUE);
1677 			}
1678 
1679 			if (p->vmp_cleaning || p->vmp_laundry || p->vmp_absent) {
1680 				/*
1681 				 * page is being acted upon,
1682 				 * so don't mess with it
1683 				 */
1684 				vm_page_purged_others++;
1685 				continue;
1686 			}
1687 			if (p->vmp_busy) {
1688 				/*
1689 				 * We can't reclaim a busy page but we can
1690 				 * make it more likely to be paged (it's not wired) to make
1691 				 * sure that it gets considered by
1692 				 * vm_pageout_scan() later.
1693 				 */
1694 				if (VM_PAGE_PAGEABLE(p)) {
1695 					vm_page_deactivate(p);
1696 				}
1697 				vm_page_purged_busy++;
1698 				continue;
1699 			}
1700 
1701 			assert(!is_kernel_object(VM_PAGE_OBJECT(p)));
1702 
1703 			/*
1704 			 * we can discard this page...
1705 			 */
1706 			if (p->vmp_pmapped == TRUE) {
1707 				/*
1708 				 * unmap the page
1709 				 */
1710 				pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), PMAP_OPTIONS_NOFLUSH | PMAP_OPTIONS_NOREFMOD, (void *)&pmap_flush_context_storage);
1711 			}
1712 			vm_page_purged_count++;
1713 
1714 			break;
1715 
1716 		case REAP_TERMINATE:
1717 			if (p->vmp_absent || p->vmp_private) {
1718 				/*
1719 				 *	For private pages, VM_PAGE_FREE just
1720 				 *	leaves the page structure around for
1721 				 *	its owner to clean up.  For absent
1722 				 *	pages, the structure is returned to
1723 				 *	the appropriate pool.
1724 				 */
1725 				break;
1726 			}
1727 			if (p->vmp_fictitious) {
1728 				assert(VM_PAGE_GET_PHYS_PAGE(p) == vm_page_guard_addr);
1729 				break;
1730 			}
1731 			if (!p->vmp_dirty && p->vmp_wpmapped) {
1732 				p->vmp_dirty = pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p));
1733 			}
1734 
1735 			if ((p->vmp_dirty || p->vmp_precious) && !VMP_ERROR_GET(p) && object->alive) {
1736 				assert(!object->internal);
1737 
1738 				p->vmp_free_when_done = TRUE;
1739 
1740 				if (!p->vmp_laundry) {
1741 					vm_page_queues_remove(p, TRUE);
1742 					/*
1743 					 * flush page... page will be freed
1744 					 * upon completion of I/O
1745 					 */
1746 					vm_pageout_cluster(p);
1747 				}
1748 				vm_page_unlock_queues();
1749 				/*
1750 				 * free the pages reclaimed so far
1751 				 */
1752 				VM_OBJ_REAP_FREELIST(local_free_q,
1753 				    disconnect_on_release);
1754 
1755 				vm_object_paging_wait(object, THREAD_UNINT);
1756 
1757 				goto restart_after_sleep;
1758 			}
1759 			break;
1760 
1761 		case REAP_REAP:
1762 			break;
1763 		}
1764 		vm_page_free_prepare_queues(p);
1765 		assert(p->vmp_pageq.next == 0 && p->vmp_pageq.prev == 0);
1766 		/*
1767 		 * Add this page to our list of reclaimed pages,
1768 		 * to be freed later.
1769 		 */
1770 		p->vmp_snext = local_free_q;
1771 		local_free_q = p;
1772 	}
1773 	vm_page_unlock_queues();
1774 
1775 	/*
1776 	 * Free the remaining reclaimed pages
1777 	 */
1778 	if (reap_type == REAP_PURGEABLE) {
1779 		pmap_flush(&pmap_flush_context_storage);
1780 	}
1781 
1782 	VM_OBJ_REAP_FREELIST(local_free_q,
1783 	    disconnect_on_release);
1784 }
1785 
1786 
1787 void
vm_object_reap_async(vm_object_t object)1788 vm_object_reap_async(
1789 	vm_object_t     object)
1790 {
1791 	vm_object_lock_assert_exclusive(object);
1792 
1793 	vm_object_reaper_lock_spin();
1794 
1795 	vm_object_reap_count_async++;
1796 
1797 	/* enqueue the VM object... */
1798 	queue_enter(&vm_object_reaper_queue, object,
1799 	    vm_object_t, cached_list);
1800 
1801 	vm_object_reaper_unlock();
1802 
1803 	/* ... and wake up the reaper thread */
1804 	thread_wakeup((event_t) &vm_object_reaper_queue);
1805 }
1806 
1807 
1808 void
vm_object_reaper_thread(void)1809 vm_object_reaper_thread(void)
1810 {
1811 	vm_object_t     object, shadow_object;
1812 
1813 	vm_object_reaper_lock_spin();
1814 
1815 	while (!queue_empty(&vm_object_reaper_queue)) {
1816 		queue_remove_first(&vm_object_reaper_queue,
1817 		    object,
1818 		    vm_object_t,
1819 		    cached_list);
1820 
1821 		vm_object_reaper_unlock();
1822 		vm_object_lock(object);
1823 
1824 		assert(object->terminating);
1825 		assert(!object->alive);
1826 
1827 		/*
1828 		 * The pageout daemon might be playing with our pages.
1829 		 * Now that the object is dead, it won't touch any more
1830 		 * pages, but some pages might already be on their way out.
1831 		 * Hence, we wait until the active paging activities have
1832 		 * ceased before we break the association with the pager
1833 		 * itself.
1834 		 */
1835 		while (object->paging_in_progress != 0 ||
1836 		    object->activity_in_progress != 0) {
1837 			vm_object_wait(object,
1838 			    VM_OBJECT_EVENT_PAGING_IN_PROGRESS,
1839 			    THREAD_UNINT);
1840 			vm_object_lock(object);
1841 		}
1842 
1843 		shadow_object =
1844 		    object->pageout ? VM_OBJECT_NULL : object->shadow;
1845 
1846 		vm_object_reap(object);
1847 		/* cache is unlocked and object is no longer valid */
1848 		object = VM_OBJECT_NULL;
1849 
1850 		if (shadow_object != VM_OBJECT_NULL) {
1851 			/*
1852 			 * Drop the reference "object" was holding on
1853 			 * its shadow object.
1854 			 */
1855 			vm_object_deallocate(shadow_object);
1856 			shadow_object = VM_OBJECT_NULL;
1857 		}
1858 		vm_object_reaper_lock_spin();
1859 	}
1860 
1861 	/* wait for more work... */
1862 	assert_wait((event_t) &vm_object_reaper_queue, THREAD_UNINT);
1863 
1864 	vm_object_reaper_unlock();
1865 
1866 	thread_block((thread_continue_t) vm_object_reaper_thread);
1867 	/*NOTREACHED*/
1868 }
1869 
1870 /*
1871  *	Routine:	vm_object_release_pager
1872  *	Purpose:	Terminate the pager and, upon completion,
1873  *			release our last reference to it.
1874  */
1875 static void
vm_object_release_pager(memory_object_t pager)1876 vm_object_release_pager(
1877 	memory_object_t pager)
1878 {
1879 	/*
1880 	 *	Terminate the pager.
1881 	 */
1882 
1883 	(void) memory_object_terminate(pager);
1884 
1885 	/*
1886 	 *	Release reference to pager.
1887 	 */
1888 	memory_object_deallocate(pager);
1889 }
1890 
1891 /*
1892  *	Routine:	vm_object_destroy
1893  *	Purpose:
1894  *		Shut down a VM object, despite the
1895  *		presence of address map (or other) references
1896  *		to the vm_object.
1897  */
1898 #if MACH_ASSERT
1899 extern uint32_t system_inshutdown;
1900 int fbdp_no_panic = 1;
1901 #endif /* MACH_ASSERT */
1902 kern_return_t
vm_object_destroy(vm_object_t object,__unused kern_return_t reason)1903 vm_object_destroy(
1904 	vm_object_t             object,
1905 	__unused kern_return_t          reason)
1906 {
1907 	memory_object_t         old_pager;
1908 
1909 	if (object == VM_OBJECT_NULL) {
1910 		return KERN_SUCCESS;
1911 	}
1912 
1913 	/*
1914 	 *	Remove the pager association immediately.
1915 	 *
1916 	 *	This will prevent the memory manager from further
1917 	 *	meddling.  [If it wanted to flush data or make
1918 	 *	other changes, it should have done so before performing
1919 	 *	the destroy call.]
1920 	 */
1921 
1922 	vm_object_lock(object);
1923 
1924 #if FBDP_DEBUG_OBJECT_NO_PAGER
1925 	static bool fbdp_no_panic_retrieved = false;
1926 	if (!fbdp_no_panic_retrieved) {
1927 		PE_parse_boot_argn("fbdp_no_panic4", &fbdp_no_panic, sizeof(fbdp_no_panic));
1928 		fbdp_no_panic_retrieved = true;
1929 	}
1930 
1931 	bool forced_unmount = false;
1932 	if (object->named &&
1933 	    object->ref_count > 2 &&
1934 	    object->pager != NULL &&
1935 	    vnode_pager_get_forced_unmount(object->pager, &forced_unmount) == KERN_SUCCESS &&
1936 	    forced_unmount == false) {
1937 		if (!fbdp_no_panic) {
1938 			panic("FBDP rdar://99829401 object %p refs %d pager %p (no forced unmount)\n", object, object->ref_count, object->pager);
1939 		}
1940 		DTRACE_VM3(vm_object_destroy_no_forced_unmount,
1941 		    vm_object_t, object,
1942 		    int, object->ref_count,
1943 		    memory_object_t, object->pager);
1944 	}
1945 
1946 	if (object->fbdp_tracked) {
1947 		if (object->ref_count > 2 && !system_inshutdown) {
1948 			if (!fbdp_no_panic) {
1949 				panic("FBDP/4 rdar://99829401 object %p refs %d pager %p (tracked)\n", object, object->ref_count, object->pager);
1950 			}
1951 		}
1952 		object->fbdp_tracked = false;
1953 	}
1954 #endif /* FBDP_DEBUG_OBJECT_NO_PAGER */
1955 
1956 	object->can_persist = FALSE;
1957 	object->named = FALSE;
1958 #if 00
1959 	object->alive = FALSE;
1960 #endif /* 00 */
1961 
1962 #if DEVELOPMENT || DEBUG
1963 	if (object->object_is_shared_cache &&
1964 	    object->pager != NULL &&
1965 	    object->pager->mo_pager_ops == &shared_region_pager_ops) {
1966 		OSAddAtomic(-object->resident_page_count, &shared_region_pagers_resident_count);
1967 	}
1968 #endif /* DEVELOPMENT || DEBUG */
1969 
1970 	old_pager = object->pager;
1971 	object->pager = MEMORY_OBJECT_NULL;
1972 	if (old_pager != MEMORY_OBJECT_NULL) {
1973 		memory_object_control_disable(&object->pager_control);
1974 	}
1975 
1976 	/*
1977 	 * Wait for the existing paging activity (that got
1978 	 * through before we nulled out the pager) to subside.
1979 	 */
1980 
1981 	vm_object_paging_wait(object, THREAD_UNINT);
1982 	vm_object_unlock(object);
1983 
1984 	/*
1985 	 *	Terminate the object now.
1986 	 */
1987 	if (old_pager != MEMORY_OBJECT_NULL) {
1988 		vm_object_release_pager(old_pager);
1989 
1990 		/*
1991 		 * JMM - Release the caller's reference.  This assumes the
1992 		 * caller had a reference to release, which is a big (but
1993 		 * currently valid) assumption if this is driven from the
1994 		 * vnode pager (it is holding a named reference when making
1995 		 * this call)..
1996 		 */
1997 		vm_object_deallocate(object);
1998 	}
1999 	return KERN_SUCCESS;
2000 }
2001 
2002 /*
2003  * The "chunk" macros are used by routines below when looking for pages to deactivate.  These
2004  * exist because of the need to handle shadow chains.  When deactivating pages, we only
2005  * want to deactive the ones at the top most level in the object chain.  In order to do
2006  * this efficiently, the specified address range is divided up into "chunks" and we use
2007  * a bit map to keep track of which pages have already been processed as we descend down
2008  * the shadow chain.  These chunk macros hide the details of the bit map implementation
2009  * as much as we can.
2010  *
2011  * For convenience, we use a 64-bit data type as the bit map, and therefore a chunk is
2012  * set to 64 pages.  The bit map is indexed from the low-order end, so that the lowest
2013  * order bit represents page 0 in the current range and highest order bit represents
2014  * page 63.
2015  *
2016  * For further convenience, we also use negative logic for the page state in the bit map.
2017  * The bit is set to 1 to indicate it has not yet been seen, and to 0 to indicate it has
2018  * been processed.  This way we can simply test the 64-bit long word to see if it's zero
2019  * to easily tell if the whole range has been processed.  Therefore, the bit map starts
2020  * out with all the bits set.  The macros below hide all these details from the caller.
2021  */
2022 
2023 #define PAGES_IN_A_CHUNK        64      /* The number of pages in the chunk must */
2024                                         /* be the same as the number of bits in  */
2025                                         /* the chunk_state_t type. We use 64     */
2026                                         /* just for convenience.		 */
2027 
2028 #define CHUNK_SIZE      (PAGES_IN_A_CHUNK * PAGE_SIZE_64)       /* Size of a chunk in bytes */
2029 
2030 typedef uint64_t        chunk_state_t;
2031 
2032 /*
2033  * The bit map uses negative logic, so we start out with all 64 bits set to indicate
2034  * that no pages have been processed yet.  Also, if len is less than the full CHUNK_SIZE,
2035  * then we mark pages beyond the len as having been "processed" so that we don't waste time
2036  * looking at pages in that range.  This can save us from unnecessarily chasing down the
2037  * shadow chain.
2038  */
2039 
2040 #define CHUNK_INIT(c, len)                                              \
2041 	MACRO_BEGIN                                                     \
2042 	uint64_t p;                                                     \
2043                                                                         \
2044 	(c) = 0xffffffffffffffffLL;                                     \
2045                                                                         \
2046 	for (p = (len) / PAGE_SIZE_64; p < PAGES_IN_A_CHUNK; p++)       \
2047 	        MARK_PAGE_HANDLED(c, p);                                \
2048 	MACRO_END
2049 
2050 
2051 /*
2052  * Return true if all pages in the chunk have not yet been processed.
2053  */
2054 
2055 #define CHUNK_NOT_COMPLETE(c)   ((c) != 0)
2056 
2057 /*
2058  * Return true if the page at offset 'p' in the bit map has already been handled
2059  * while processing a higher level object in the shadow chain.
2060  */
2061 
2062 #define PAGE_ALREADY_HANDLED(c, p)      (((c) & (1ULL << (p))) == 0)
2063 
2064 /*
2065  * Mark the page at offset 'p' in the bit map as having been processed.
2066  */
2067 
2068 #define MARK_PAGE_HANDLED(c, p) \
2069 MACRO_BEGIN \
2070 	(c) = (c) & ~(1ULL << (p)); \
2071 MACRO_END
2072 
2073 
2074 /*
2075  * Return true if the page at the given offset has been paged out.  Object is
2076  * locked upon entry and returned locked.
2077  */
2078 
2079 static boolean_t
page_is_paged_out(vm_object_t object,vm_object_offset_t offset)2080 page_is_paged_out(
2081 	vm_object_t             object,
2082 	vm_object_offset_t      offset)
2083 {
2084 	if (object->internal &&
2085 	    object->alive &&
2086 	    !object->terminating &&
2087 	    object->pager_ready) {
2088 		if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset)
2089 		    == VM_EXTERNAL_STATE_EXISTS) {
2090 			return TRUE;
2091 		}
2092 	}
2093 	return FALSE;
2094 }
2095 
2096 
2097 
2098 /*
2099  * madvise_free_debug
2100  *
2101  * To help debug madvise(MADV_FREE*) mis-usage, this triggers a
2102  * zero-fill as soon as a page is affected by a madvise(MADV_FREE*), to
2103  * simulate the loss of the page's contents as if the page had been
2104  * reclaimed and then re-faulted.
2105  */
2106 #if DEVELOPMENT || DEBUG
2107 int madvise_free_debug = 0;
2108 int madvise_free_debug_sometimes = 1;
2109 #else /* DEBUG */
2110 int madvise_free_debug = 0;
2111 int madvise_free_debug_sometimes = 0;
2112 #endif /* DEBUG */
2113 int madvise_free_counter = 0;
2114 
2115 __options_decl(deactivate_flags_t, uint32_t, {
2116 	DEACTIVATE_KILL         = 0x1,
2117 	DEACTIVATE_REUSABLE     = 0x2,
2118 	DEACTIVATE_ALL_REUSABLE = 0x4,
2119 	DEACTIVATE_CLEAR_REFMOD = 0x8,
2120 	DEACTIVATE_REUSABLE_NO_WRITE = 0x10
2121 });
2122 
2123 /*
2124  * Deactivate the pages in the specified object and range.  If kill_page is set, also discard any
2125  * page modified state from the pmap.  Update the chunk_state as we go along.  The caller must specify
2126  * a size that is less than or equal to the CHUNK_SIZE.
2127  */
2128 
2129 static void
deactivate_pages_in_object(vm_object_t object,vm_object_offset_t offset,vm_object_size_t size,deactivate_flags_t flags,chunk_state_t * chunk_state,pmap_flush_context * pfc,struct pmap * pmap,vm_map_offset_t pmap_offset)2130 deactivate_pages_in_object(
2131 	vm_object_t             object,
2132 	vm_object_offset_t      offset,
2133 	vm_object_size_t        size,
2134 	deactivate_flags_t      flags,
2135 	chunk_state_t           *chunk_state,
2136 	pmap_flush_context      *pfc,
2137 	struct pmap             *pmap,
2138 	vm_map_offset_t         pmap_offset)
2139 {
2140 	vm_page_t       m;
2141 	int             p;
2142 	struct  vm_page_delayed_work    dw_array;
2143 	struct  vm_page_delayed_work    *dwp, *dwp_start;
2144 	bool            dwp_finish_ctx = TRUE;
2145 	int             dw_count;
2146 	int             dw_limit;
2147 	unsigned int    reusable = 0;
2148 
2149 	/*
2150 	 * Examine each page in the chunk.  The variable 'p' is the page number relative to the start of the
2151 	 * chunk.  Since this routine is called once for each level in the shadow chain, the chunk_state may
2152 	 * have pages marked as having been processed already.  We stop the loop early if we find we've handled
2153 	 * all the pages in the chunk.
2154 	 */
2155 
2156 	dwp_start = dwp = NULL;
2157 	dw_count = 0;
2158 	dw_limit = DELAYED_WORK_LIMIT(DEFAULT_DELAYED_WORK_LIMIT);
2159 	dwp_start = vm_page_delayed_work_get_ctx();
2160 	if (dwp_start == NULL) {
2161 		dwp_start = &dw_array;
2162 		dw_limit = 1;
2163 		dwp_finish_ctx = FALSE;
2164 	}
2165 
2166 	dwp = dwp_start;
2167 
2168 	for (p = 0; size && CHUNK_NOT_COMPLETE(*chunk_state); p++, size -= PAGE_SIZE_64, offset += PAGE_SIZE_64, pmap_offset += PAGE_SIZE_64) {
2169 		/*
2170 		 * If this offset has already been found and handled in a higher level object, then don't
2171 		 * do anything with it in the current shadow object.
2172 		 */
2173 
2174 		if (PAGE_ALREADY_HANDLED(*chunk_state, p)) {
2175 			continue;
2176 		}
2177 
2178 		/*
2179 		 * See if the page at this offset is around.  First check to see if the page is resident,
2180 		 * then if not, check the existence map or with the pager.
2181 		 */
2182 
2183 		if ((m = vm_page_lookup(object, offset)) != VM_PAGE_NULL) {
2184 			/*
2185 			 * We found a page we were looking for.  Mark it as "handled" now in the chunk_state
2186 			 * so that we won't bother looking for a page at this offset again if there are more
2187 			 * shadow objects.  Then deactivate the page.
2188 			 */
2189 
2190 			MARK_PAGE_HANDLED(*chunk_state, p);
2191 
2192 			if ((!VM_PAGE_WIRED(m)) && (!m->vmp_private) && (!m->vmp_gobbled) && (!m->vmp_busy) &&
2193 			    (!m->vmp_laundry) && (!m->vmp_cleaning) && !(m->vmp_free_when_done)) {
2194 				int     clear_refmod_mask;
2195 				int     pmap_options;
2196 				dwp->dw_mask = 0;
2197 
2198 				pmap_options = 0;
2199 				clear_refmod_mask = VM_MEM_REFERENCED;
2200 				dwp->dw_mask |= DW_clear_reference;
2201 
2202 				if ((flags & DEACTIVATE_KILL) && (object->internal)) {
2203 					if (!(flags & DEACTIVATE_REUSABLE_NO_WRITE) &&
2204 					    (madvise_free_debug ||
2205 					    (madvise_free_debug_sometimes &&
2206 					    madvise_free_counter++ & 0x1))) {
2207 						/*
2208 						 * zero-fill the page (or every
2209 						 * other page) now to simulate
2210 						 * it being reclaimed and
2211 						 * re-faulted.
2212 						 */
2213 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
2214 						if (!m->vmp_unmodified_ro) {
2215 #else /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
2216 						if (true) {
2217 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
2218 							pmap_zero_page(VM_PAGE_GET_PHYS_PAGE(m));
2219 						}
2220 					}
2221 					m->vmp_precious = FALSE;
2222 					m->vmp_dirty = FALSE;
2223 
2224 					clear_refmod_mask |= VM_MEM_MODIFIED;
2225 					if (m->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) {
2226 						/*
2227 						 * This page is now clean and
2228 						 * reclaimable.  Move it out
2229 						 * of the throttled queue, so
2230 						 * that vm_pageout_scan() can
2231 						 * find it.
2232 						 */
2233 						dwp->dw_mask |= DW_move_page;
2234 					}
2235 
2236 #if 0
2237 #if CONFIG_TRACK_UNMODIFIED_ANON_PAGES
2238 					/*
2239 					 * COMMENT BLOCK ON WHY THIS SHOULDN'T BE DONE.
2240 					 *
2241 					 * Since we are about to do a VM_COMPRESSOR_PAGER_STATE_CLR
2242 					 * below for this page, which drops any existing compressor
2243 					 * storage of this page (eg side-effect of a CoW operation or
2244 					 * a collapse operation), it is tempting to think that we should
2245 					 * treat this page as if it was just decompressed (during which
2246 					 * we also drop existing compressor storage) and so start its life
2247 					 * out with vmp_unmodified_ro set to FALSE.
2248 					 *
2249 					 * However, we can't do that here because we could swing around
2250 					 * and re-access this page in a read-only fault.
2251 					 * Clearing this bit means we'll try to zero it up above
2252 					 * and fail.
2253 					 *
2254 					 * Note that clearing the bit is unnecessary regardless because
2255 					 * dirty state has been cleared. During the next soft fault, the
2256 					 * right state will be restored and things will progress just fine.
2257 					 */
2258 					if (m->vmp_unmodified_ro == true) {
2259 						/* Need object and pageq locks for bit manipulation*/
2260 						m->vmp_unmodified_ro = false;
2261 						os_atomic_dec(&compressor_ro_uncompressed);
2262 					}
2263 #endif /* CONFIG_TRACK_UNMODIFIED_ANON_PAGES */
2264 #endif /* 0 */
2265 					VM_COMPRESSOR_PAGER_STATE_CLR(object, offset);
2266 
2267 					if ((flags & DEACTIVATE_REUSABLE) && !m->vmp_reusable) {
2268 						assert(!(flags & DEACTIVATE_ALL_REUSABLE));
2269 						assert(!object->all_reusable);
2270 						m->vmp_reusable = TRUE;
2271 						object->reusable_page_count++;
2272 						assert(object->resident_page_count >= object->reusable_page_count);
2273 						reusable++;
2274 						/*
2275 						 * Tell pmap this page is now
2276 						 * "reusable" (to update pmap
2277 						 * stats for all mappings).
2278 						 */
2279 						pmap_options |= PMAP_OPTIONS_SET_REUSABLE;
2280 					}
2281 				}
2282 				if (flags & DEACTIVATE_CLEAR_REFMOD) {
2283 					/*
2284 					 * The caller didn't clear the refmod bits in advance.
2285 					 * Clear them for this page now.
2286 					 */
2287 					pmap_options |= PMAP_OPTIONS_NOFLUSH;
2288 					pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE(m),
2289 					    clear_refmod_mask,
2290 					    pmap_options,
2291 					    (void *)pfc);
2292 				}
2293 
2294 				if ((m->vmp_q_state != VM_PAGE_ON_THROTTLED_Q) &&
2295 				    !(flags & (DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE))) {
2296 					dwp->dw_mask |= DW_move_page;
2297 				}
2298 
2299 				if (dwp->dw_mask) {
2300 					VM_PAGE_ADD_DELAYED_WORK(dwp, m,
2301 					    dw_count);
2302 				}
2303 
2304 				if (dw_count >= dw_limit) {
2305 					if (reusable) {
2306 						OSAddAtomic(reusable,
2307 						    &vm_page_stats_reusable.reusable_count);
2308 						vm_page_stats_reusable.reusable += reusable;
2309 						reusable = 0;
2310 					}
2311 					vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
2312 
2313 					dwp = dwp_start;
2314 					dw_count = 0;
2315 				}
2316 			}
2317 		} else {
2318 			/*
2319 			 * The page at this offset isn't memory resident, check to see if it's
2320 			 * been paged out.  If so, mark it as handled so we don't bother looking
2321 			 * for it in the shadow chain.
2322 			 */
2323 
2324 			if (page_is_paged_out(object, offset)) {
2325 				MARK_PAGE_HANDLED(*chunk_state, p);
2326 
2327 				/*
2328 				 * If we're killing a non-resident page, then clear the page in the existence
2329 				 * map so we don't bother paging it back in if it's touched again in the future.
2330 				 */
2331 
2332 				if ((flags & DEACTIVATE_KILL) && (object->internal)) {
2333 					VM_COMPRESSOR_PAGER_STATE_CLR(object, offset);
2334 
2335 					if (pmap != PMAP_NULL) {
2336 						/*
2337 						 * Tell pmap that this page
2338 						 * is no longer mapped, to
2339 						 * adjust the footprint ledger
2340 						 * because this page is no
2341 						 * longer compressed.
2342 						 */
2343 						pmap_remove_options(
2344 							pmap,
2345 							pmap_offset,
2346 							(pmap_offset +
2347 							PAGE_SIZE),
2348 							PMAP_OPTIONS_REMOVE);
2349 					}
2350 				}
2351 			}
2352 		}
2353 	}
2354 
2355 	if (reusable) {
2356 		OSAddAtomic(reusable, &vm_page_stats_reusable.reusable_count);
2357 		vm_page_stats_reusable.reusable += reusable;
2358 		reusable = 0;
2359 	}
2360 
2361 	if (dw_count) {
2362 		vm_page_do_delayed_work(object, VM_KERN_MEMORY_NONE, dwp_start, dw_count);
2363 		dwp = dwp_start;
2364 		dw_count = 0;
2365 	}
2366 
2367 	if (dwp_start && dwp_finish_ctx) {
2368 		vm_page_delayed_work_finish_ctx(dwp_start);
2369 		dwp_start = dwp = NULL;
2370 	}
2371 }
2372 
2373 
2374 /*
2375  * Deactive a "chunk" of the given range of the object starting at offset.  A "chunk"
2376  * will always be less than or equal to the given size.  The total range is divided up
2377  * into chunks for efficiency and performance related to the locks and handling the shadow
2378  * chain.  This routine returns how much of the given "size" it actually processed.  It's
2379  * up to the caler to loop and keep calling this routine until the entire range they want
2380  * to process has been done.
2381  * Iff clear_refmod is true, pmap_clear_refmod_options is called for each physical page in this range.
2382  */
2383 
2384 static vm_object_size_t
2385 deactivate_a_chunk(
2386 	vm_object_t             orig_object,
2387 	vm_object_offset_t      offset,
2388 	vm_object_size_t        size,
2389 	deactivate_flags_t      flags,
2390 	pmap_flush_context      *pfc,
2391 	struct pmap             *pmap,
2392 	vm_map_offset_t         pmap_offset)
2393 {
2394 	vm_object_t             object;
2395 	vm_object_t             tmp_object;
2396 	vm_object_size_t        length;
2397 	chunk_state_t           chunk_state;
2398 
2399 
2400 	/*
2401 	 * Get set to do a chunk.  We'll do up to CHUNK_SIZE, but no more than the
2402 	 * remaining size the caller asked for.
2403 	 */
2404 
2405 	length = MIN(size, CHUNK_SIZE);
2406 
2407 	/*
2408 	 * The chunk_state keeps track of which pages we've already processed if there's
2409 	 * a shadow chain on this object.  At this point, we haven't done anything with this
2410 	 * range of pages yet, so initialize the state to indicate no pages processed yet.
2411 	 */
2412 
2413 	CHUNK_INIT(chunk_state, length);
2414 	object = orig_object;
2415 
2416 	/*
2417 	 * Start at the top level object and iterate around the loop once for each object
2418 	 * in the shadow chain.  We stop processing early if we've already found all the pages
2419 	 * in the range.  Otherwise we stop when we run out of shadow objects.
2420 	 */
2421 
2422 	while (object && CHUNK_NOT_COMPLETE(chunk_state)) {
2423 		vm_object_paging_begin(object);
2424 
2425 		deactivate_pages_in_object(object, offset, length, flags, &chunk_state, pfc, pmap, pmap_offset);
2426 
2427 		vm_object_paging_end(object);
2428 
2429 		/*
2430 		 * We've finished with this object, see if there's a shadow object.  If
2431 		 * there is, update the offset and lock the new object.  We also turn off
2432 		 * kill_page at this point since we only kill pages in the top most object.
2433 		 */
2434 
2435 		tmp_object = object->shadow;
2436 
2437 		if (tmp_object) {
2438 			assert(!(flags & DEACTIVATE_KILL) || (flags & DEACTIVATE_CLEAR_REFMOD));
2439 			flags &= ~(DEACTIVATE_KILL | DEACTIVATE_REUSABLE | DEACTIVATE_ALL_REUSABLE);
2440 			offset += object->vo_shadow_offset;
2441 			vm_object_lock(tmp_object);
2442 		}
2443 
2444 		if (object != orig_object) {
2445 			vm_object_unlock(object);
2446 		}
2447 
2448 		object = tmp_object;
2449 	}
2450 
2451 	if (object && object != orig_object) {
2452 		vm_object_unlock(object);
2453 	}
2454 
2455 	return length;
2456 }
2457 
2458 
2459 
2460 /*
2461  * Move any resident pages in the specified range to the inactive queue.  If kill_page is set,
2462  * we also clear the modified status of the page and "forget" any changes that have been made
2463  * to the page.
2464  */
2465 
2466 __private_extern__ void
2467 vm_object_deactivate_pages(
2468 	vm_object_t             object,
2469 	vm_object_offset_t      offset,
2470 	vm_object_size_t        size,
2471 	boolean_t               kill_page,
2472 	boolean_t               reusable_page,
2473 	boolean_t               reusable_no_write,
2474 	struct pmap             *pmap,
2475 	vm_map_offset_t         pmap_offset)
2476 {
2477 	vm_object_size_t        length;
2478 	boolean_t               all_reusable;
2479 	pmap_flush_context      pmap_flush_context_storage;
2480 	unsigned int pmap_clear_refmod_mask = VM_MEM_REFERENCED;
2481 	unsigned int pmap_clear_refmod_options = 0;
2482 	deactivate_flags_t flags = DEACTIVATE_CLEAR_REFMOD;
2483 	bool refmod_cleared = false;
2484 	if (kill_page) {
2485 		flags |= DEACTIVATE_KILL;
2486 	}
2487 	if (reusable_page) {
2488 		flags |= DEACTIVATE_REUSABLE;
2489 	}
2490 	if (reusable_no_write) {
2491 		flags |= DEACTIVATE_REUSABLE_NO_WRITE;
2492 	}
2493 
2494 	/*
2495 	 * We break the range up into chunks and do one chunk at a time.  This is for
2496 	 * efficiency and performance while handling the shadow chains and the locks.
2497 	 * The deactivate_a_chunk() function returns how much of the range it processed.
2498 	 * We keep calling this routine until the given size is exhausted.
2499 	 */
2500 
2501 
2502 	all_reusable = FALSE;
2503 #if 11
2504 	/*
2505 	 * For the sake of accurate "reusable" pmap stats, we need
2506 	 * to tell pmap about each page that is no longer "reusable",
2507 	 * so we can't do the "all_reusable" optimization.
2508 	 *
2509 	 * If we do go with the all_reusable optimization, we can't
2510 	 * return if size is 0 since we could have "all_reusable == TRUE"
2511 	 * In this case, we save the overhead of doing the pmap_flush_context
2512 	 * work.
2513 	 */
2514 	if (size == 0) {
2515 		return;
2516 	}
2517 #else
2518 	if (reusable_page &&
2519 	    object->internal &&
2520 	    object->vo_size != 0 &&
2521 	    object->vo_size == size &&
2522 	    object->reusable_page_count == 0) {
2523 		all_reusable = TRUE;
2524 		reusable_page = FALSE;
2525 		flags |= DEACTIVATE_ALL_REUSABLE;
2526 	}
2527 #endif
2528 
2529 	if ((reusable_page || all_reusable) && object->all_reusable) {
2530 		/* This means MADV_FREE_REUSABLE has been called twice, which
2531 		 * is probably illegal. */
2532 		return;
2533 	}
2534 
2535 
2536 	pmap_flush_context_init(&pmap_flush_context_storage);
2537 
2538 	/*
2539 	 * If we're deactivating multiple pages, try to perform one bulk pmap operation.
2540 	 * We can't do this if we're killing pages and there's a shadow chain as
2541 	 * we don't yet know which pages are in the top object (pages in shadow copies aren't
2542 	 * safe to kill).
2543 	 * And we can only do this on hardware that supports it.
2544 	 */
2545 	if (size > PAGE_SIZE && (!kill_page || !object->shadow)) {
2546 		if (kill_page && object->internal) {
2547 			pmap_clear_refmod_mask |= VM_MEM_MODIFIED;
2548 		}
2549 		if (reusable_page) {
2550 			pmap_clear_refmod_options |= PMAP_OPTIONS_SET_REUSABLE;
2551 		}
2552 
2553 		refmod_cleared = pmap_clear_refmod_range_options(pmap, pmap_offset, pmap_offset + size, pmap_clear_refmod_mask, pmap_clear_refmod_options);
2554 		if (refmod_cleared) {
2555 			// We were able to clear all the refmod bits. So deactivate_a_chunk doesn't need to do it.
2556 			flags &= ~DEACTIVATE_CLEAR_REFMOD;
2557 		}
2558 	}
2559 
2560 	while (size) {
2561 		length = deactivate_a_chunk(object, offset, size, flags,
2562 		    &pmap_flush_context_storage, pmap, pmap_offset);
2563 
2564 		size -= length;
2565 		offset += length;
2566 		pmap_offset += length;
2567 	}
2568 	pmap_flush(&pmap_flush_context_storage);
2569 
2570 	if (all_reusable) {
2571 		if (!object->all_reusable) {
2572 			unsigned int reusable;
2573 
2574 			object->all_reusable = TRUE;
2575 			assert(object->reusable_page_count == 0);
2576 			/* update global stats */
2577 			reusable = object->resident_page_count;
2578 			OSAddAtomic(reusable,
2579 			    &vm_page_stats_reusable.reusable_count);
2580 			vm_page_stats_reusable.reusable += reusable;
2581 			vm_page_stats_reusable.all_reusable_calls++;
2582 		}
2583 	} else if (reusable_page) {
2584 		vm_page_stats_reusable.partial_reusable_calls++;
2585 	}
2586 }
2587 
2588 void
2589 vm_object_reuse_pages(
2590 	vm_object_t             object,
2591 	vm_object_offset_t      start_offset,
2592 	vm_object_offset_t      end_offset,
2593 	boolean_t               allow_partial_reuse)
2594 {
2595 	vm_object_offset_t      cur_offset;
2596 	vm_page_t               m;
2597 	unsigned int            reused, reusable;
2598 
2599 #define VM_OBJECT_REUSE_PAGE(object, m, reused)                         \
2600 	MACRO_BEGIN                                                     \
2601 	        if ((m) != VM_PAGE_NULL &&                              \
2602 	            (m)->vmp_reusable) {                                \
2603 	                assert((object)->reusable_page_count <=         \
2604 	                       (object)->resident_page_count);          \
2605 	                assert((object)->reusable_page_count > 0);      \
2606 	                (object)->reusable_page_count--;                \
2607 	                (m)->vmp_reusable = FALSE;                      \
2608 	                (reused)++;                                     \
2609 	/* \
2610 	 * Tell pmap that this page is no longer \
2611 	 * "reusable", to update the "reusable" stats \
2612 	 * for all the pmaps that have mapped this \
2613 	 * page. \
2614 	 */                                                             \
2615 	                pmap_clear_refmod_options(VM_PAGE_GET_PHYS_PAGE((m)), \
2616 	                                          0, /* refmod */       \
2617 	                                          (PMAP_OPTIONS_CLEAR_REUSABLE \
2618 	                                           | PMAP_OPTIONS_NOFLUSH), \
2619 	                                          NULL);                \
2620 	        }                                                       \
2621 	MACRO_END
2622 
2623 	reused = 0;
2624 	reusable = 0;
2625 
2626 	vm_object_lock_assert_exclusive(object);
2627 
2628 	if (object->all_reusable) {
2629 		panic("object %p all_reusable: can't update pmap stats",
2630 		    object);
2631 		assert(object->reusable_page_count == 0);
2632 		object->all_reusable = FALSE;
2633 		if (end_offset - start_offset == object->vo_size ||
2634 		    !allow_partial_reuse) {
2635 			vm_page_stats_reusable.all_reuse_calls++;
2636 			reused = object->resident_page_count;
2637 		} else {
2638 			vm_page_stats_reusable.partial_reuse_calls++;
2639 			vm_page_queue_iterate(&object->memq, m, vmp_listq) {
2640 				if (m->vmp_offset < start_offset ||
2641 				    m->vmp_offset >= end_offset) {
2642 					m->vmp_reusable = TRUE;
2643 					object->reusable_page_count++;
2644 					assert(object->resident_page_count >= object->reusable_page_count);
2645 					continue;
2646 				} else {
2647 					assert(!m->vmp_reusable);
2648 					reused++;
2649 				}
2650 			}
2651 		}
2652 	} else if (object->resident_page_count >
2653 	    ((end_offset - start_offset) >> PAGE_SHIFT)) {
2654 		vm_page_stats_reusable.partial_reuse_calls++;
2655 		for (cur_offset = start_offset;
2656 		    cur_offset < end_offset;
2657 		    cur_offset += PAGE_SIZE_64) {
2658 			if (object->reusable_page_count == 0) {
2659 				break;
2660 			}
2661 			m = vm_page_lookup(object, cur_offset);
2662 			VM_OBJECT_REUSE_PAGE(object, m, reused);
2663 		}
2664 	} else {
2665 		vm_page_stats_reusable.partial_reuse_calls++;
2666 		vm_page_queue_iterate(&object->memq, m, vmp_listq) {
2667 			if (object->reusable_page_count == 0) {
2668 				break;
2669 			}
2670 			if (m->vmp_offset < start_offset ||
2671 			    m->vmp_offset >= end_offset) {
2672 				continue;
2673 			}
2674 			VM_OBJECT_REUSE_PAGE(object, m, reused);
2675 		}
2676 	}
2677 
2678 	/* update global stats */
2679 	OSAddAtomic(reusable - reused, &vm_page_stats_reusable.reusable_count);
2680 	vm_page_stats_reusable.reused += reused;
2681 	vm_page_stats_reusable.reusable += reusable;
2682 }
2683 
2684 /*
2685  *	Routine:	vm_object_pmap_protect
2686  *
2687  *	Purpose:
2688  *		Reduces the permission for all physical
2689  *		pages in the specified object range.
2690  *
2691  *		If removing write permission only, it is
2692  *		sufficient to protect only the pages in
2693  *		the top-level object; only those pages may
2694  *		have write permission.
2695  *
2696  *		If removing all access, we must follow the
2697  *		shadow chain from the top-level object to
2698  *		remove access to all pages in shadowed objects.
2699  *
2700  *		The object must *not* be locked.  The object must
2701  *		be internal.
2702  *
2703  *              If pmap is not NULL, this routine assumes that
2704  *              the only mappings for the pages are in that
2705  *              pmap.
2706  */
2707 
2708 __private_extern__ void
2709 vm_object_pmap_protect(
2710 	vm_object_t                     object,
2711 	vm_object_offset_t              offset,
2712 	vm_object_size_t                size,
2713 	pmap_t                          pmap,
2714 	vm_map_size_t                   pmap_page_size,
2715 	vm_map_offset_t                 pmap_start,
2716 	vm_prot_t                       prot)
2717 {
2718 	vm_object_pmap_protect_options(object, offset, size, pmap,
2719 	    pmap_page_size,
2720 	    pmap_start, prot, 0);
2721 }
2722 
2723 __private_extern__ void
2724 vm_object_pmap_protect_options(
2725 	vm_object_t                     object,
2726 	vm_object_offset_t              offset,
2727 	vm_object_size_t                size,
2728 	pmap_t                          pmap,
2729 	vm_map_size_t                   pmap_page_size,
2730 	vm_map_offset_t                 pmap_start,
2731 	vm_prot_t                       prot,
2732 	int                             options)
2733 {
2734 	pmap_flush_context      pmap_flush_context_storage;
2735 	boolean_t               delayed_pmap_flush = FALSE;
2736 	vm_object_offset_t      offset_in_object;
2737 	vm_object_size_t        size_in_object;
2738 
2739 	if (object == VM_OBJECT_NULL) {
2740 		return;
2741 	}
2742 	if (pmap_page_size > PAGE_SIZE) {
2743 		/* for 16K map on 4K device... */
2744 		pmap_page_size = PAGE_SIZE;
2745 	}
2746 	/*
2747 	 * If we decide to work on the object itself, extend the range to
2748 	 * cover a full number of native pages.
2749 	 */
2750 	size_in_object = vm_object_round_page(offset + size) - vm_object_trunc_page(offset);
2751 	offset_in_object = vm_object_trunc_page(offset);
2752 	/*
2753 	 * If we decide to work on the pmap, use the exact range specified,
2754 	 * so no rounding/truncating offset and size.  They should already
2755 	 * be aligned to pmap_page_size.
2756 	 */
2757 	assertf(!(offset & (pmap_page_size - 1)) && !(size & (pmap_page_size - 1)),
2758 	    "offset 0x%llx size 0x%llx pmap_page_size 0x%llx",
2759 	    offset, size, (uint64_t)pmap_page_size);
2760 
2761 	vm_object_lock(object);
2762 
2763 	if (object->phys_contiguous) {
2764 		if (pmap != NULL) {
2765 			vm_object_unlock(object);
2766 			pmap_protect_options(pmap,
2767 			    pmap_start,
2768 			    pmap_start + size,
2769 			    prot,
2770 			    options & ~PMAP_OPTIONS_NOFLUSH,
2771 			    NULL);
2772 		} else {
2773 			vm_object_offset_t phys_start, phys_end, phys_addr;
2774 
2775 			phys_start = object->vo_shadow_offset + offset_in_object;
2776 			phys_end = phys_start + size_in_object;
2777 			assert(phys_start <= phys_end);
2778 			assert(phys_end <= object->vo_shadow_offset + object->vo_size);
2779 			vm_object_unlock(object);
2780 
2781 			pmap_flush_context_init(&pmap_flush_context_storage);
2782 			delayed_pmap_flush = FALSE;
2783 
2784 			for (phys_addr = phys_start;
2785 			    phys_addr < phys_end;
2786 			    phys_addr += PAGE_SIZE_64) {
2787 				pmap_page_protect_options(
2788 					(ppnum_t) (phys_addr >> PAGE_SHIFT),
2789 					prot,
2790 					options | PMAP_OPTIONS_NOFLUSH,
2791 					(void *)&pmap_flush_context_storage);
2792 				delayed_pmap_flush = TRUE;
2793 			}
2794 			if (delayed_pmap_flush == TRUE) {
2795 				pmap_flush(&pmap_flush_context_storage);
2796 			}
2797 		}
2798 		return;
2799 	}
2800 
2801 	assert(object->internal);
2802 
2803 	while (TRUE) {
2804 		if (ptoa_64(object->resident_page_count) > size_in_object / 2 && pmap != PMAP_NULL) {
2805 			vm_object_unlock(object);
2806 			if (pmap_page_size < PAGE_SIZE) {
2807 				DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: pmap_protect()\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot);
2808 			}
2809 			pmap_protect_options(pmap, pmap_start, pmap_start + size, prot,
2810 			    options & ~PMAP_OPTIONS_NOFLUSH, NULL);
2811 			return;
2812 		}
2813 
2814 		if (pmap_page_size < PAGE_SIZE) {
2815 			DEBUG4K_PMAP("pmap %p start 0x%llx end 0x%llx prot 0x%x: offset 0x%llx size 0x%llx object %p offset 0x%llx size 0x%llx\n", pmap, (uint64_t)pmap_start, pmap_start + size, prot, offset, size, object, offset_in_object, size_in_object);
2816 		}
2817 
2818 		pmap_flush_context_init(&pmap_flush_context_storage);
2819 		delayed_pmap_flush = FALSE;
2820 
2821 		/*
2822 		 * if we are doing large ranges with respect to resident
2823 		 * page count then we should interate over pages otherwise
2824 		 * inverse page look-up will be faster
2825 		 */
2826 		if (ptoa_64(object->resident_page_count / 4) < size_in_object) {
2827 			vm_page_t               p;
2828 			vm_object_offset_t      end;
2829 
2830 			end = offset_in_object + size_in_object;
2831 
2832 			vm_page_queue_iterate(&object->memq, p, vmp_listq) {
2833 				if (!p->vmp_fictitious && (offset_in_object <= p->vmp_offset) && (p->vmp_offset < end)) {
2834 					vm_map_offset_t start;
2835 
2836 					/*
2837 					 * XXX FBDP 4K: intentionally using "offset" here instead
2838 					 * of "offset_in_object", since "start" is a pmap address.
2839 					 */
2840 					start = pmap_start + p->vmp_offset - offset;
2841 
2842 					if (pmap != PMAP_NULL) {
2843 						vm_map_offset_t curr;
2844 						for (curr = start;
2845 						    curr < start + PAGE_SIZE_64;
2846 						    curr += pmap_page_size) {
2847 							if (curr < pmap_start) {
2848 								continue;
2849 							}
2850 							if (curr >= pmap_start + size) {
2851 								break;
2852 							}
2853 							pmap_protect_options(
2854 								pmap,
2855 								curr,
2856 								curr + pmap_page_size,
2857 								prot,
2858 								options | PMAP_OPTIONS_NOFLUSH,
2859 								&pmap_flush_context_storage);
2860 						}
2861 					} else {
2862 						pmap_page_protect_options(
2863 							VM_PAGE_GET_PHYS_PAGE(p),
2864 							prot,
2865 							options | PMAP_OPTIONS_NOFLUSH,
2866 							&pmap_flush_context_storage);
2867 					}
2868 					delayed_pmap_flush = TRUE;
2869 				}
2870 			}
2871 		} else {
2872 			vm_page_t               p;
2873 			vm_object_offset_t      end;
2874 			vm_object_offset_t      target_off;
2875 
2876 			end = offset_in_object + size_in_object;
2877 
2878 			for (target_off = offset_in_object;
2879 			    target_off < end; target_off += PAGE_SIZE) {
2880 				p = vm_page_lookup(object, target_off);
2881 
2882 				if (p != VM_PAGE_NULL) {
2883 					vm_object_offset_t start;
2884 
2885 					/*
2886 					 * XXX FBDP 4K: intentionally using "offset" here instead
2887 					 * of "offset_in_object", since "start" is a pmap address.
2888 					 */
2889 					start = pmap_start + (p->vmp_offset - offset);
2890 
2891 					if (pmap != PMAP_NULL) {
2892 						vm_map_offset_t curr;
2893 						for (curr = start;
2894 						    curr < start + PAGE_SIZE;
2895 						    curr += pmap_page_size) {
2896 							if (curr < pmap_start) {
2897 								continue;
2898 							}
2899 							if (curr >= pmap_start + size) {
2900 								break;
2901 							}
2902 							pmap_protect_options(
2903 								pmap,
2904 								curr,
2905 								curr + pmap_page_size,
2906 								prot,
2907 								options | PMAP_OPTIONS_NOFLUSH,
2908 								&pmap_flush_context_storage);
2909 						}
2910 					} else {
2911 						pmap_page_protect_options(
2912 							VM_PAGE_GET_PHYS_PAGE(p),
2913 							prot,
2914 							options | PMAP_OPTIONS_NOFLUSH,
2915 							&pmap_flush_context_storage);
2916 					}
2917 					delayed_pmap_flush = TRUE;
2918 				}
2919 			}
2920 		}
2921 		if (delayed_pmap_flush == TRUE) {
2922 			pmap_flush(&pmap_flush_context_storage);
2923 		}
2924 
2925 		if (prot == VM_PROT_NONE) {
2926 			/*
2927 			 * Must follow shadow chain to remove access
2928 			 * to pages in shadowed objects.
2929 			 */
2930 			vm_object_t     next_object;
2931 
2932 			next_object = object->shadow;
2933 			if (next_object != VM_OBJECT_NULL) {
2934 				offset_in_object += object->vo_shadow_offset;
2935 				offset += object->vo_shadow_offset;
2936 				vm_object_lock(next_object);
2937 				vm_object_unlock(object);
2938 				object = next_object;
2939 			} else {
2940 				/*
2941 				 * End of chain - we are done.
2942 				 */
2943 				break;
2944 			}
2945 		} else {
2946 			/*
2947 			 * Pages in shadowed objects may never have
2948 			 * write permission - we may stop here.
2949 			 */
2950 			break;
2951 		}
2952 	}
2953 
2954 	vm_object_unlock(object);
2955 }
2956 
2957 uint32_t vm_page_busy_absent_skipped = 0;
2958 
2959 /*
2960  *	Routine:	vm_object_copy_slowly
2961  *
2962  *	Description:
2963  *		Copy the specified range of the source
2964  *		virtual memory object without using
2965  *		protection-based optimizations (such
2966  *		as copy-on-write).  The pages in the
2967  *		region are actually copied.
2968  *
2969  *	In/out conditions:
2970  *		The caller must hold a reference and a lock
2971  *		for the source virtual memory object.  The source
2972  *		object will be returned *unlocked*.
2973  *
2974  *	Results:
2975  *		If the copy is completed successfully, KERN_SUCCESS is
2976  *		returned.  If the caller asserted the interruptible
2977  *		argument, and an interruption occurred while waiting
2978  *		for a user-generated event, MACH_SEND_INTERRUPTED is
2979  *		returned.  Other values may be returned to indicate
2980  *		hard errors during the copy operation.
2981  *
2982  *		A new virtual memory object is returned in a
2983  *		parameter (_result_object).  The contents of this
2984  *		new object, starting at a zero offset, are a copy
2985  *		of the source memory region.  In the event of
2986  *		an error, this parameter will contain the value
2987  *		VM_OBJECT_NULL.
2988  */
2989 __private_extern__ kern_return_t
2990 vm_object_copy_slowly(
2991 	vm_object_t             src_object,
2992 	vm_object_offset_t      src_offset,
2993 	vm_object_size_t        size,
2994 	boolean_t               interruptible,
2995 	vm_object_t             *_result_object)        /* OUT */
2996 {
2997 	vm_object_t             new_object;
2998 	vm_object_offset_t      new_offset;
2999 
3000 	struct vm_object_fault_info fault_info = {};
3001 
3002 	if (size == 0) {
3003 		vm_object_unlock(src_object);
3004 		*_result_object = VM_OBJECT_NULL;
3005 		return KERN_INVALID_ARGUMENT;
3006 	}
3007 
3008 	/*
3009 	 *	Prevent destruction of the source object while we copy.
3010 	 */
3011 
3012 	vm_object_reference_locked(src_object);
3013 	vm_object_unlock(src_object);
3014 
3015 	/*
3016 	 *	Create a new object to hold the copied pages.
3017 	 *	A few notes:
3018 	 *		We fill the new object starting at offset 0,
3019 	 *		 regardless of the input offset.
3020 	 *		We don't bother to lock the new object within
3021 	 *		 this routine, since we have the only reference.
3022 	 */
3023 
3024 	size = vm_object_round_page(src_offset + size) - vm_object_trunc_page(src_offset);
3025 	src_offset = vm_object_trunc_page(src_offset);
3026 	new_object = vm_object_allocate(size);
3027 	new_offset = 0;
3028 
3029 	assert(size == trunc_page_64(size));    /* Will the loop terminate? */
3030 
3031 	fault_info.interruptible = interruptible;
3032 	fault_info.behavior  = VM_BEHAVIOR_SEQUENTIAL;
3033 	fault_info.lo_offset = src_offset;
3034 	fault_info.hi_offset = src_offset + size;
3035 	fault_info.stealth = TRUE;
3036 
3037 	for (;
3038 	    size != 0;
3039 	    src_offset += PAGE_SIZE_64,
3040 	    new_offset += PAGE_SIZE_64, size -= PAGE_SIZE_64
3041 	    ) {
3042 		vm_page_t       new_page;
3043 		vm_fault_return_t result;
3044 
3045 		vm_object_lock(new_object);
3046 
3047 		while ((new_page = vm_page_alloc(new_object, new_offset))
3048 		    == VM_PAGE_NULL) {
3049 			vm_object_unlock(new_object);
3050 
3051 			if (!vm_page_wait(interruptible)) {
3052 				vm_object_deallocate(new_object);
3053 				vm_object_deallocate(src_object);
3054 				*_result_object = VM_OBJECT_NULL;
3055 				return MACH_SEND_INTERRUPTED;
3056 			}
3057 			vm_object_lock(new_object);
3058 		}
3059 		vm_object_unlock(new_object);
3060 
3061 		do {
3062 			vm_prot_t       prot = VM_PROT_READ;
3063 			vm_page_t       _result_page;
3064 			vm_page_t       top_page;
3065 			vm_page_t       result_page;
3066 			kern_return_t   error_code;
3067 			vm_object_t     result_page_object;
3068 
3069 
3070 			vm_object_lock(src_object);
3071 
3072 			if (src_object->internal &&
3073 			    src_object->shadow == VM_OBJECT_NULL &&
3074 			    (src_object->pager == NULL ||
3075 			    (VM_COMPRESSOR_PAGER_STATE_GET(src_object,
3076 			    src_offset) ==
3077 			    VM_EXTERNAL_STATE_ABSENT))) {
3078 				boolean_t can_skip_page;
3079 
3080 				_result_page = vm_page_lookup(src_object,
3081 				    src_offset);
3082 				if (_result_page == VM_PAGE_NULL) {
3083 					/*
3084 					 * This page is neither resident nor
3085 					 * compressed and there's no shadow
3086 					 * object below "src_object", so this
3087 					 * page is really missing.
3088 					 * There's no need to zero-fill it just
3089 					 * to copy it:  let's leave it missing
3090 					 * in "new_object" and get zero-filled
3091 					 * on demand.
3092 					 */
3093 					can_skip_page = TRUE;
3094 				} else if (workaround_41447923 &&
3095 				    src_object->pager == NULL &&
3096 				    _result_page != VM_PAGE_NULL &&
3097 				    _result_page->vmp_busy &&
3098 				    _result_page->vmp_absent &&
3099 				    src_object->purgable == VM_PURGABLE_DENY &&
3100 				    !src_object->blocked_access) {
3101 					/*
3102 					 * This page is "busy" and "absent"
3103 					 * but not because we're waiting for
3104 					 * it to be decompressed.  It must
3105 					 * be because it's a "no zero fill"
3106 					 * page that is currently not
3107 					 * accessible until it gets overwritten
3108 					 * by a device driver.
3109 					 * Since its initial state would have
3110 					 * been "zero-filled", let's leave the
3111 					 * copy page missing and get zero-filled
3112 					 * on demand.
3113 					 */
3114 					assert(src_object->internal);
3115 					assert(src_object->shadow == NULL);
3116 					assert(src_object->pager == NULL);
3117 					can_skip_page = TRUE;
3118 					vm_page_busy_absent_skipped++;
3119 				} else {
3120 					can_skip_page = FALSE;
3121 				}
3122 				if (can_skip_page) {
3123 					vm_object_unlock(src_object);
3124 					/* free the unused "new_page"... */
3125 					vm_object_lock(new_object);
3126 					VM_PAGE_FREE(new_page);
3127 					new_page = VM_PAGE_NULL;
3128 					vm_object_unlock(new_object);
3129 					/* ...and go to next page in "src_object" */
3130 					result = VM_FAULT_SUCCESS;
3131 					break;
3132 				}
3133 			}
3134 
3135 			vm_object_paging_begin(src_object);
3136 
3137 			/* cap size at maximum UPL size */
3138 			upl_size_t cluster_size;
3139 			if (os_convert_overflow(size, &cluster_size)) {
3140 				cluster_size = 0 - (upl_size_t)PAGE_SIZE;
3141 			}
3142 			fault_info.cluster_size = cluster_size;
3143 
3144 			_result_page = VM_PAGE_NULL;
3145 			result = vm_fault_page(src_object, src_offset,
3146 			    VM_PROT_READ, FALSE,
3147 			    FALSE,     /* page not looked up */
3148 			    &prot, &_result_page, &top_page,
3149 			    (int *)0,
3150 			    &error_code, FALSE, &fault_info);
3151 
3152 			switch (result) {
3153 			case VM_FAULT_SUCCESS:
3154 				result_page = _result_page;
3155 				result_page_object = VM_PAGE_OBJECT(result_page);
3156 
3157 				/*
3158 				 *	Copy the page to the new object.
3159 				 *
3160 				 *	POLICY DECISION:
3161 				 *		If result_page is clean,
3162 				 *		we could steal it instead
3163 				 *		of copying.
3164 				 */
3165 
3166 				vm_page_copy(result_page, new_page);
3167 				vm_object_unlock(result_page_object);
3168 
3169 				/*
3170 				 *	Let go of both pages (make them
3171 				 *	not busy, perform wakeup, activate).
3172 				 */
3173 				vm_object_lock(new_object);
3174 				SET_PAGE_DIRTY(new_page, FALSE);
3175 				PAGE_WAKEUP_DONE(new_page);
3176 				vm_object_unlock(new_object);
3177 
3178 				vm_object_lock(result_page_object);
3179 				PAGE_WAKEUP_DONE(result_page);
3180 
3181 				vm_page_lockspin_queues();
3182 				if ((result_page->vmp_q_state == VM_PAGE_ON_SPECULATIVE_Q) ||
3183 				    (result_page->vmp_q_state == VM_PAGE_NOT_ON_Q)) {
3184 					vm_page_activate(result_page);
3185 				}
3186 				vm_page_activate(new_page);
3187 				vm_page_unlock_queues();
3188 
3189 				/*
3190 				 *	Release paging references and
3191 				 *	top-level placeholder page, if any.
3192 				 */
3193 
3194 				vm_fault_cleanup(result_page_object,
3195 				    top_page);
3196 
3197 				break;
3198 
3199 			case VM_FAULT_RETRY:
3200 				break;
3201 
3202 			case VM_FAULT_MEMORY_SHORTAGE:
3203 				if (vm_page_wait(interruptible)) {
3204 					break;
3205 				}
3206 				ktriage_record(thread_tid(current_thread()), KDBG_TRIAGE_EVENTID(KDBG_TRIAGE_SUBSYS_VM, KDBG_TRIAGE_RESERVED, KDBG_TRIAGE_VM_FAULT_OBJCOPYSLOWLY_MEMORY_SHORTAGE), 0 /* arg */);
3207 				OS_FALLTHROUGH;
3208 
3209 			case VM_FAULT_INTERRUPTED:
3210 				vm_object_lock(new_object);
3211 				VM_PAGE_FREE(new_page);
3212 				vm_object_unlock(new_object);
3213 
3214 				vm_object_deallocate(new_object);
3215 				vm_object_deallocate(src_object);
3216 				*_result_object = VM_OBJECT_NULL;
3217 				return MACH_SEND_INTERRUPTED;
3218 
3219 			case VM_FAULT_SUCCESS_NO_VM_PAGE:
3220 				/* success but no VM page: fail */
3221 				vm_object_paging_end(src_object);
3222 				vm_object_unlock(src_object);
3223 				OS_FALLTHROUGH;
3224 			case VM_FAULT_MEMORY_ERROR:
3225 				/*
3226 				 * A policy choice:
3227 				 *	(a) ignore pages that we can't
3228 				 *	    copy
3229 				 *	(b) return the null object if
3230 				 *	    any page fails [chosen]
3231 				 */
3232 
3233 				vm_object_lock(new_object);
3234 				VM_PAGE_FREE(new_page);
3235 				vm_object_unlock(new_object);
3236 
3237 				vm_object_deallocate(new_object);
3238 				vm_object_deallocate(src_object);
3239 				*_result_object = VM_OBJECT_NULL;
3240 				return error_code ? error_code:
3241 				       KERN_MEMORY_ERROR;
3242 
3243 			default:
3244 				panic("vm_object_copy_slowly: unexpected error"
3245 				    " 0x%x from vm_fault_page()\n", result);
3246 			}
3247 		} while (result != VM_FAULT_SUCCESS);
3248 	}
3249 
3250 	/*
3251 	 *	Lose the extra reference, and return our object.
3252 	 */
3253 	vm_object_deallocate(src_object);
3254 	*_result_object = new_object;
3255 	return KERN_SUCCESS;
3256 }
3257 
3258 /*
3259  *	Routine:	vm_object_copy_quickly
3260  *
3261  *	Purpose:
3262  *		Copy the specified range of the source virtual
3263  *		memory object, if it can be done without waiting
3264  *		for user-generated events.
3265  *
3266  *	Results:
3267  *		If the copy is successful, the copy is returned in
3268  *		the arguments; otherwise, the arguments are not
3269  *		affected.
3270  *
3271  *	In/out conditions:
3272  *		The object should be unlocked on entry and exit.
3273  */
3274 
3275 /*ARGSUSED*/
3276 __private_extern__ boolean_t
3277 vm_object_copy_quickly(
3278 	vm_object_t             object,               /* IN */
3279 	__unused vm_object_offset_t     offset, /* IN */
3280 	__unused vm_object_size_t       size,   /* IN */
3281 	boolean_t               *_src_needs_copy,       /* OUT */
3282 	boolean_t               *_dst_needs_copy)       /* OUT */
3283 {
3284 	memory_object_copy_strategy_t copy_strategy;
3285 
3286 	if (object == VM_OBJECT_NULL) {
3287 		*_src_needs_copy = FALSE;
3288 		*_dst_needs_copy = FALSE;
3289 		return TRUE;
3290 	}
3291 
3292 	vm_object_lock(object);
3293 
3294 	copy_strategy = object->copy_strategy;
3295 
3296 	switch (copy_strategy) {
3297 	case MEMORY_OBJECT_COPY_SYMMETRIC:
3298 
3299 		/*
3300 		 *	Symmetric copy strategy.
3301 		 *	Make another reference to the object.
3302 		 *	Leave object/offset unchanged.
3303 		 */
3304 
3305 		vm_object_reference_locked(object);
3306 		object->shadowed = TRUE;
3307 		vm_object_unlock(object);
3308 
3309 		/*
3310 		 *	Both source and destination must make
3311 		 *	shadows, and the source must be made
3312 		 *	read-only if not already.
3313 		 */
3314 
3315 		*_src_needs_copy = TRUE;
3316 		*_dst_needs_copy = TRUE;
3317 
3318 		break;
3319 
3320 	case MEMORY_OBJECT_COPY_DELAY:
3321 		vm_object_unlock(object);
3322 		return FALSE;
3323 
3324 	default:
3325 		vm_object_unlock(object);
3326 		return FALSE;
3327 	}
3328 	return TRUE;
3329 }
3330 
3331 static uint32_t copy_delayed_lock_collisions;
3332 static uint32_t copy_delayed_max_collisions;
3333 static uint32_t copy_delayed_lock_contention;
3334 static uint32_t copy_delayed_protect_iterate;
3335 
3336 /*
3337  *	Routine:	vm_object_copy_delayed [internal]
3338  *
3339  *	Description:
3340  *		Copy the specified virtual memory object, using
3341  *		the asymmetric copy-on-write algorithm.
3342  *
3343  *	In/out conditions:
3344  *		The src_object must be locked on entry.  It will be unlocked
3345  *		on exit - so the caller must also hold a reference to it.
3346  *
3347  *		This routine will not block waiting for user-generated
3348  *		events.  It is not interruptible.
3349  */
3350 __private_extern__ vm_object_t
3351 vm_object_copy_delayed(
3352 	vm_object_t             src_object,
3353 	vm_object_offset_t      src_offset,
3354 	vm_object_size_t        size,
3355 	boolean_t               src_object_shared)
3356 {
3357 	vm_object_t             new_copy = VM_OBJECT_NULL;
3358 	vm_object_t             old_copy;
3359 	vm_page_t               p;
3360 	vm_object_size_t        copy_size = src_offset + size;
3361 	pmap_flush_context      pmap_flush_context_storage;
3362 	boolean_t               delayed_pmap_flush = FALSE;
3363 
3364 
3365 	uint32_t collisions = 0;
3366 	/*
3367 	 *	The user-level memory manager wants to see all of the changes
3368 	 *	to this object, but it has promised not to make any changes on
3369 	 *	its own.
3370 	 *
3371 	 *	Perform an asymmetric copy-on-write, as follows:
3372 	 *		Create a new object, called a "copy object" to hold
3373 	 *		 pages modified by the new mapping  (i.e., the copy,
3374 	 *		 not the original mapping).
3375 	 *		Record the original object as the backing object for
3376 	 *		 the copy object.  If the original mapping does not
3377 	 *		 change a page, it may be used read-only by the copy.
3378 	 *		Record the copy object in the original object.
3379 	 *		 When the original mapping causes a page to be modified,
3380 	 *		 it must be copied to a new page that is "pushed" to
3381 	 *		 the copy object.
3382 	 *		Mark the new mapping (the copy object) copy-on-write.
3383 	 *		 This makes the copy object itself read-only, allowing
3384 	 *		 it to be reused if the original mapping makes no
3385 	 *		 changes, and simplifying the synchronization required
3386 	 *		 in the "push" operation described above.
3387 	 *
3388 	 *	The copy-on-write is said to be assymetric because the original
3389 	 *	object is *not* marked copy-on-write. A copied page is pushed
3390 	 *	to the copy object, regardless which party attempted to modify
3391 	 *	the page.
3392 	 *
3393 	 *	Repeated asymmetric copy operations may be done. If the
3394 	 *	original object has not been changed since the last copy, its
3395 	 *	copy object can be reused. Otherwise, a new copy object can be
3396 	 *	inserted between the original object and its previous copy
3397 	 *	object.  Since any copy object is read-only, this cannot affect
3398 	 *	affect the contents of the previous copy object.
3399 	 *
3400 	 *	Note that a copy object is higher in the object tree than the
3401 	 *	original object; therefore, use of the copy object recorded in
3402 	 *	the original object must be done carefully, to avoid deadlock.
3403 	 */
3404 
3405 	copy_size = vm_object_round_page(copy_size);
3406 Retry:
3407 
3408 	/*
3409 	 * Wait for paging in progress.
3410 	 */
3411 	if (!src_object->true_share &&
3412 	    (src_object->paging_in_progress != 0 ||
3413 	    src_object->activity_in_progress != 0)) {
3414 		if (src_object_shared == TRUE) {
3415 			vm_object_unlock(src_object);
3416 			vm_object_lock(src_object);
3417 			src_object_shared = FALSE;
3418 			goto Retry;
3419 		}
3420 		vm_object_paging_wait(src_object, THREAD_UNINT);
3421 	}
3422 	/*
3423 	 *	See whether we can reuse the result of a previous
3424 	 *	copy operation.
3425 	 */
3426 
3427 	old_copy = src_object->vo_copy;
3428 	if (old_copy != VM_OBJECT_NULL) {
3429 		int lock_granted;
3430 
3431 		/*
3432 		 *	Try to get the locks (out of order)
3433 		 */
3434 		if (src_object_shared == TRUE) {
3435 			lock_granted = vm_object_lock_try_shared(old_copy);
3436 		} else {
3437 			lock_granted = vm_object_lock_try(old_copy);
3438 		}
3439 
3440 		if (!lock_granted) {
3441 			vm_object_unlock(src_object);
3442 
3443 			if (collisions++ == 0) {
3444 				copy_delayed_lock_contention++;
3445 			}
3446 			mutex_pause(collisions);
3447 
3448 			/* Heisenberg Rules */
3449 			copy_delayed_lock_collisions++;
3450 
3451 			if (collisions > copy_delayed_max_collisions) {
3452 				copy_delayed_max_collisions = collisions;
3453 			}
3454 
3455 			if (src_object_shared == TRUE) {
3456 				vm_object_lock_shared(src_object);
3457 			} else {
3458 				vm_object_lock(src_object);
3459 			}
3460 
3461 			goto Retry;
3462 		}
3463 
3464 		/*
3465 		 *	Determine whether the old copy object has
3466 		 *	been modified.
3467 		 */
3468 
3469 		if (old_copy->resident_page_count == 0 &&
3470 		    !old_copy->pager_created) {
3471 			/*
3472 			 *	It has not been modified.
3473 			 *
3474 			 *	Return another reference to
3475 			 *	the existing copy-object if
3476 			 *	we can safely grow it (if
3477 			 *	needed).
3478 			 */
3479 
3480 			if (old_copy->vo_size < copy_size) {
3481 				if (src_object_shared == TRUE) {
3482 					vm_object_unlock(old_copy);
3483 					vm_object_unlock(src_object);
3484 
3485 					vm_object_lock(src_object);
3486 					src_object_shared = FALSE;
3487 					goto Retry;
3488 				}
3489 				/*
3490 				 * We can't perform a delayed copy if any of the
3491 				 * pages in the extended range are wired (because
3492 				 * we can't safely take write permission away from
3493 				 * wired pages).  If the pages aren't wired, then
3494 				 * go ahead and protect them.
3495 				 */
3496 				copy_delayed_protect_iterate++;
3497 
3498 				pmap_flush_context_init(&pmap_flush_context_storage);
3499 				delayed_pmap_flush = FALSE;
3500 
3501 				vm_page_queue_iterate(&src_object->memq, p, vmp_listq) {
3502 					if (!p->vmp_fictitious &&
3503 					    p->vmp_offset >= old_copy->vo_size &&
3504 					    p->vmp_offset < copy_size) {
3505 						if (VM_PAGE_WIRED(p)) {
3506 							vm_object_unlock(old_copy);
3507 							vm_object_unlock(src_object);
3508 
3509 							if (new_copy != VM_OBJECT_NULL) {
3510 								vm_object_unlock(new_copy);
3511 								vm_object_deallocate(new_copy);
3512 							}
3513 							if (delayed_pmap_flush == TRUE) {
3514 								pmap_flush(&pmap_flush_context_storage);
3515 							}
3516 
3517 							return VM_OBJECT_NULL;
3518 						} else {
3519 							pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p),
3520 							    (p->vmp_xpmapped ? (VM_PROT_READ | VM_PROT_EXECUTE) : VM_PROT_READ),
3521 							    PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
3522 							delayed_pmap_flush = TRUE;
3523 						}
3524 					}
3525 				}
3526 				if (delayed_pmap_flush == TRUE) {
3527 					pmap_flush(&pmap_flush_context_storage);
3528 				}
3529 
3530 				assertf(page_aligned(copy_size),
3531 				    "object %p size 0x%llx",
3532 				    old_copy, (uint64_t)copy_size);
3533 				old_copy->vo_size = copy_size;
3534 			}
3535 			if (src_object_shared == TRUE) {
3536 				vm_object_reference_shared(old_copy);
3537 			} else {
3538 				vm_object_reference_locked(old_copy);
3539 			}
3540 			vm_object_unlock(old_copy);
3541 			vm_object_unlock(src_object);
3542 
3543 			if (new_copy != VM_OBJECT_NULL) {
3544 				vm_object_unlock(new_copy);
3545 				vm_object_deallocate(new_copy);
3546 			}
3547 			return old_copy;
3548 		}
3549 
3550 
3551 
3552 		/*
3553 		 * Adjust the size argument so that the newly-created
3554 		 * copy object will be large enough to back either the
3555 		 * old copy object or the new mapping.
3556 		 */
3557 		if (old_copy->vo_size > copy_size) {
3558 			copy_size = old_copy->vo_size;
3559 		}
3560 
3561 		if (new_copy == VM_OBJECT_NULL) {
3562 			vm_object_unlock(old_copy);
3563 			vm_object_unlock(src_object);
3564 			new_copy = vm_object_allocate(copy_size);
3565 			vm_object_lock(src_object);
3566 			vm_object_lock(new_copy);
3567 
3568 			src_object_shared = FALSE;
3569 			goto Retry;
3570 		}
3571 		assertf(page_aligned(copy_size),
3572 		    "object %p size 0x%llx",
3573 		    new_copy, (uint64_t)copy_size);
3574 		new_copy->vo_size = copy_size;
3575 
3576 		/*
3577 		 *	The copy-object is always made large enough to
3578 		 *	completely shadow the original object, since
3579 		 *	it may have several users who want to shadow
3580 		 *	the original object at different points.
3581 		 */
3582 
3583 		assert((old_copy->shadow == src_object) &&
3584 		    (old_copy->vo_shadow_offset == (vm_object_offset_t) 0));
3585 	} else if (new_copy == VM_OBJECT_NULL) {
3586 		vm_object_unlock(src_object);
3587 		new_copy = vm_object_allocate(copy_size);
3588 		vm_object_lock(src_object);
3589 		vm_object_lock(new_copy);
3590 
3591 		src_object_shared = FALSE;
3592 		goto Retry;
3593 	}
3594 
3595 	/*
3596 	 * We now have the src object locked, and the new copy object
3597 	 * allocated and locked (and potentially the old copy locked).
3598 	 * Before we go any further, make sure we can still perform
3599 	 * a delayed copy, as the situation may have changed.
3600 	 *
3601 	 * Specifically, we can't perform a delayed copy if any of the
3602 	 * pages in the range are wired (because we can't safely take
3603 	 * write permission away from wired pages).  If the pages aren't
3604 	 * wired, then go ahead and protect them.
3605 	 */
3606 	copy_delayed_protect_iterate++;
3607 
3608 	pmap_flush_context_init(&pmap_flush_context_storage);
3609 	delayed_pmap_flush = FALSE;
3610 
3611 	vm_page_queue_iterate(&src_object->memq, p, vmp_listq) {
3612 		if (!p->vmp_fictitious && p->vmp_offset < copy_size) {
3613 			if (VM_PAGE_WIRED(p)) {
3614 				if (old_copy) {
3615 					vm_object_unlock(old_copy);
3616 				}
3617 				vm_object_unlock(src_object);
3618 				vm_object_unlock(new_copy);
3619 				vm_object_deallocate(new_copy);
3620 
3621 				if (delayed_pmap_flush == TRUE) {
3622 					pmap_flush(&pmap_flush_context_storage);
3623 				}
3624 
3625 				return VM_OBJECT_NULL;
3626 			} else {
3627 				pmap_page_protect_options(VM_PAGE_GET_PHYS_PAGE(p),
3628 				    (p->vmp_xpmapped ? (VM_PROT_READ | VM_PROT_EXECUTE) : VM_PROT_READ),
3629 				    PMAP_OPTIONS_NOFLUSH, (void *)&pmap_flush_context_storage);
3630 				delayed_pmap_flush = TRUE;
3631 			}
3632 		}
3633 	}
3634 	if (delayed_pmap_flush == TRUE) {
3635 		pmap_flush(&pmap_flush_context_storage);
3636 	}
3637 
3638 	if (old_copy != VM_OBJECT_NULL) {
3639 		/*
3640 		 *	Make the old copy-object shadow the new one.
3641 		 *	It will receive no more pages from the original
3642 		 *	object.
3643 		 */
3644 
3645 		/* remove ref. from old_copy */
3646 		vm_object_lock_assert_exclusive(src_object);
3647 		src_object->ref_count--;
3648 		assert(src_object->ref_count > 0);
3649 		vm_object_lock_assert_exclusive(old_copy);
3650 		old_copy->shadow = new_copy;
3651 		vm_object_lock_assert_exclusive(new_copy);
3652 		assert(new_copy->ref_count > 0);
3653 		new_copy->ref_count++;          /* for old_copy->shadow ref. */
3654 
3655 		vm_object_unlock(old_copy);     /* done with old_copy */
3656 	}
3657 
3658 	/*
3659 	 *	Point the new copy at the existing object.
3660 	 */
3661 	vm_object_lock_assert_exclusive(new_copy);
3662 	new_copy->shadow = src_object;
3663 	new_copy->vo_shadow_offset = 0;
3664 	new_copy->shadowed = TRUE;      /* caller must set needs_copy */
3665 
3666 	vm_object_lock_assert_exclusive(src_object);
3667 	vm_object_reference_locked(src_object);
3668 	src_object->vo_copy = new_copy;
3669 	vm_object_unlock(src_object);
3670 	vm_object_unlock(new_copy);
3671 
3672 	return new_copy;
3673 }
3674 
3675 /*
3676  *	Routine:	vm_object_copy_strategically
3677  *
3678  *	Purpose:
3679  *		Perform a copy according to the source object's
3680  *		declared strategy.  This operation may block,
3681  *		and may be interrupted.
3682  */
3683 __private_extern__ kern_return_t
3684 vm_object_copy_strategically(
3685 	vm_object_t             src_object,
3686 	vm_object_offset_t      src_offset,
3687 	vm_object_size_t        size,
3688 	bool                    forking,
3689 	vm_object_t             *dst_object,    /* OUT */
3690 	vm_object_offset_t      *dst_offset,    /* OUT */
3691 	boolean_t               *dst_needs_copy) /* OUT */
3692 {
3693 	boolean_t       result;
3694 	boolean_t       interruptible = THREAD_ABORTSAFE; /* XXX */
3695 	boolean_t       object_lock_shared = FALSE;
3696 	memory_object_copy_strategy_t copy_strategy;
3697 
3698 	assert(src_object != VM_OBJECT_NULL);
3699 
3700 	copy_strategy = src_object->copy_strategy;
3701 
3702 	if (copy_strategy == MEMORY_OBJECT_COPY_DELAY) {
3703 		vm_object_lock_shared(src_object);
3704 		object_lock_shared = TRUE;
3705 	} else {
3706 		vm_object_lock(src_object);
3707 	}
3708 
3709 	/*
3710 	 *	The copy strategy is only valid if the memory manager
3711 	 *	is "ready". Internal objects are always ready.
3712 	 */
3713 
3714 	while (!src_object->internal && !src_object->pager_ready) {
3715 		wait_result_t wait_result;
3716 
3717 		if (object_lock_shared == TRUE) {
3718 			vm_object_unlock(src_object);
3719 			vm_object_lock(src_object);
3720 			object_lock_shared = FALSE;
3721 			continue;
3722 		}
3723 		wait_result = vm_object_sleep(  src_object,
3724 		    VM_OBJECT_EVENT_PAGER_READY,
3725 		    interruptible);
3726 		if (wait_result != THREAD_AWAKENED) {
3727 			vm_object_unlock(src_object);
3728 			*dst_object = VM_OBJECT_NULL;
3729 			*dst_offset = 0;
3730 			*dst_needs_copy = FALSE;
3731 			return MACH_SEND_INTERRUPTED;
3732 		}
3733 	}
3734 
3735 	/*
3736 	 *	Use the appropriate copy strategy.
3737 	 */
3738 
3739 	if (copy_strategy == MEMORY_OBJECT_COPY_DELAY_FORK) {
3740 		if (forking) {
3741 			copy_strategy = MEMORY_OBJECT_COPY_DELAY;
3742 		} else {
3743 			copy_strategy = MEMORY_OBJECT_COPY_NONE;
3744 			if (object_lock_shared) {
3745 				vm_object_unlock(src_object);
3746 				vm_object_lock(src_object);
3747 				object_lock_shared = FALSE;
3748 			}
3749 		}
3750 	}
3751 
3752 	switch (copy_strategy) {
3753 	case MEMORY_OBJECT_COPY_DELAY:
3754 		*dst_object = vm_object_copy_delayed(src_object,
3755 		    src_offset, size, object_lock_shared);
3756 		if (*dst_object != VM_OBJECT_NULL) {
3757 			*dst_offset = src_offset;
3758 			*dst_needs_copy = TRUE;
3759 			result = KERN_SUCCESS;
3760 			break;
3761 		}
3762 		vm_object_lock(src_object);
3763 		OS_FALLTHROUGH; /* fall thru when delayed copy not allowed */
3764 
3765 	case MEMORY_OBJECT_COPY_NONE:
3766 		result = vm_object_copy_slowly(src_object, src_offset, size,
3767 		    interruptible, dst_object);
3768 		if (result == KERN_SUCCESS) {
3769 			*dst_offset = src_offset - vm_object_trunc_page(src_offset);
3770 			*dst_needs_copy = FALSE;
3771 		}
3772 		break;
3773 
3774 	case MEMORY_OBJECT_COPY_SYMMETRIC:
3775 		vm_object_unlock(src_object);
3776 		result = KERN_MEMORY_RESTART_COPY;
3777 		break;
3778 
3779 	default:
3780 		panic("copy_strategically: bad strategy %d for object %p",
3781 		    copy_strategy, src_object);
3782 		result = KERN_INVALID_ARGUMENT;
3783 	}
3784 	return result;
3785 }
3786 
3787 /*
3788  *	vm_object_shadow:
3789  *
3790  *	Create a new object which is backed by the
3791  *	specified existing object range.  The source
3792  *	object reference is deallocated.
3793  *
3794  *	The new object and offset into that object
3795  *	are returned in the source parameters.
3796  */
3797 boolean_t vm_object_shadow_check = TRUE;
3798 uint64_t vm_object_shadow_forced = 0;
3799 uint64_t vm_object_shadow_skipped = 0;
3800 
3801 __private_extern__ boolean_t
3802 vm_object_shadow(
3803 	vm_object_t             *object,        /* IN/OUT */
3804 	vm_object_offset_t      *offset,        /* IN/OUT */
3805 	vm_object_size_t        length,
3806 	boolean_t               always_shadow)
3807 {
3808 	vm_object_t     source;
3809 	vm_object_t     result;
3810 
3811 	source = *object;
3812 	assert(source != VM_OBJECT_NULL);
3813 	if (source == VM_OBJECT_NULL) {
3814 		return FALSE;
3815 	}
3816 
3817 	assert(source->copy_strategy == MEMORY_OBJECT_COPY_SYMMETRIC);
3818 
3819 	/*
3820 	 *	Determine if we really need a shadow.
3821 	 *
3822 	 *	If the source object is larger than what we are trying
3823 	 *	to create, then force the shadow creation even if the
3824 	 *	ref count is 1.  This will allow us to [potentially]
3825 	 *	collapse the underlying object away in the future
3826 	 *	(freeing up the extra data it might contain and that
3827 	 *	we don't need).
3828 	 */
3829 
3830 	assert(source->copy_strategy != MEMORY_OBJECT_COPY_NONE); /* Purgeable objects shouldn't have shadow objects. */
3831 
3832 	/*
3833 	 * The following optimization does not work in the context of submaps
3834 	 * (the shared region, in particular).
3835 	 * This object might have only 1 reference (in the submap) but that
3836 	 * submap can itself be mapped multiple times, so the object is
3837 	 * actually indirectly referenced more than once...
3838 	 * The caller can specify to "always_shadow" to bypass the optimization.
3839 	 */
3840 	if (vm_object_shadow_check &&
3841 	    source->vo_size == length &&
3842 	    source->ref_count == 1) {
3843 		if (always_shadow) {
3844 			vm_object_shadow_forced++;
3845 		} else {
3846 			/*
3847 			 * Lock the object and check again.
3848 			 * We also check to see if there's
3849 			 * a shadow or copy object involved.
3850 			 * We can't do that earlier because
3851 			 * without the object locked, there
3852 			 * could be a collapse and the chain
3853 			 * gets modified leaving us with an
3854 			 * invalid pointer.
3855 			 */
3856 			vm_object_lock(source);
3857 			if (source->vo_size == length &&
3858 			    source->ref_count == 1 &&
3859 			    (source->shadow == VM_OBJECT_NULL ||
3860 			    source->shadow->vo_copy == VM_OBJECT_NULL)) {
3861 				source->shadowed = FALSE;
3862 				vm_object_unlock(source);
3863 				vm_object_shadow_skipped++;
3864 				return FALSE;
3865 			}
3866 			/* things changed while we were locking "source"... */
3867 			vm_object_unlock(source);
3868 		}
3869 	}
3870 
3871 	/*
3872 	 * *offset is the map entry's offset into the VM object and
3873 	 * is aligned to the map's page size.
3874 	 * VM objects need to be aligned to the system's page size.
3875 	 * Record the necessary adjustment and re-align the offset so
3876 	 * that result->vo_shadow_offset is properly page-aligned.
3877 	 */
3878 	vm_object_offset_t offset_adjustment;
3879 	offset_adjustment = *offset - vm_object_trunc_page(*offset);
3880 	length = vm_object_round_page(length + offset_adjustment);
3881 	*offset = vm_object_trunc_page(*offset);
3882 
3883 	/*
3884 	 *	Allocate a new object with the given length
3885 	 */
3886 
3887 	if ((result = vm_object_allocate(length)) == VM_OBJECT_NULL) {
3888 		panic("vm_object_shadow: no object for shadowing");
3889 	}
3890 
3891 	/*
3892 	 *	The new object shadows the source object, adding
3893 	 *	a reference to it.  Our caller changes his reference
3894 	 *	to point to the new object, removing a reference to
3895 	 *	the source object.  Net result: no change of reference
3896 	 *	count.
3897 	 */
3898 	result->shadow = source;
3899 
3900 	/*
3901 	 *	Store the offset into the source object,
3902 	 *	and fix up the offset into the new object.
3903 	 */
3904 
3905 	result->vo_shadow_offset = *offset;
3906 	assertf(page_aligned(result->vo_shadow_offset),
3907 	    "result %p shadow offset 0x%llx",
3908 	    result, result->vo_shadow_offset);
3909 
3910 	/*
3911 	 *	Return the new things
3912 	 */
3913 
3914 	*offset = 0;
3915 	if (offset_adjustment) {
3916 		/*
3917 		 * Make the map entry point to the equivalent offset
3918 		 * in the new object.
3919 		 */
3920 		DEBUG4K_COPY("adjusting offset @ %p from 0x%llx to 0x%llx for object %p length: 0x%llx\n", offset, *offset, *offset + offset_adjustment, result, length);
3921 		*offset += offset_adjustment;
3922 	}
3923 	*object = result;
3924 	return TRUE;
3925 }
3926 
3927 /*
3928  *	The relationship between vm_object structures and
3929  *	the memory_object requires careful synchronization.
3930  *
3931  *	All associations are created by memory_object_create_named
3932  *  for external pagers and vm_object_compressor_pager_create for internal
3933  *  objects as follows:
3934  *
3935  *		pager:	the memory_object itself, supplied by
3936  *			the user requesting a mapping (or the kernel,
3937  *			when initializing internal objects); the
3938  *			kernel simulates holding send rights by keeping
3939  *			a port reference;
3940  *
3941  *		pager_request:
3942  *			the memory object control port,
3943  *			created by the kernel; the kernel holds
3944  *			receive (and ownership) rights to this
3945  *			port, but no other references.
3946  *
3947  *	When initialization is complete, the "initialized" field
3948  *	is asserted.  Other mappings using a particular memory object,
3949  *	and any references to the vm_object gained through the
3950  *	port association must wait for this initialization to occur.
3951  *
3952  *	In order to allow the memory manager to set attributes before
3953  *	requests (notably virtual copy operations, but also data or
3954  *	unlock requests) are made, a "ready" attribute is made available.
3955  *	Only the memory manager may affect the value of this attribute.
3956  *	Its value does not affect critical kernel functions, such as
3957  *	internal object initialization or destruction.  [Furthermore,
3958  *	memory objects created by the kernel are assumed to be ready
3959  *	immediately; the default memory manager need not explicitly
3960  *	set the "ready" attribute.]
3961  *
3962  *	[Both the "initialized" and "ready" attribute wait conditions
3963  *	use the "pager" field as the wait event.]
3964  *
3965  *	The port associations can be broken down by any of the
3966  *	following routines:
3967  *		vm_object_terminate:
3968  *			No references to the vm_object remain, and
3969  *			the object cannot (or will not) be cached.
3970  *			This is the normal case, and is done even
3971  *			though one of the other cases has already been
3972  *			done.
3973  *		memory_object_destroy:
3974  *			The memory manager has requested that the
3975  *			kernel relinquish references to the memory
3976  *			object. [The memory manager may not want to
3977  *			destroy the memory object, but may wish to
3978  *			refuse or tear down existing memory mappings.]
3979  *
3980  *	Each routine that breaks an association must break all of
3981  *	them at once.  At some later time, that routine must clear
3982  *	the pager field and release the memory object references.
3983  *	[Furthermore, each routine must cope with the simultaneous
3984  *	or previous operations of the others.]
3985  *
3986  *	Because the pager field may be cleared spontaneously, it
3987  *	cannot be used to determine whether a memory object has
3988  *	ever been associated with a particular vm_object.  [This
3989  *	knowledge is important to the shadow object mechanism.]
3990  *	For this reason, an additional "created" attribute is
3991  *	provided.
3992  *
3993  *	During various paging operations, the pager reference found in the
3994  *	vm_object must be valid.  To prevent this from being released,
3995  *	(other than being removed, i.e., made null), routines may use
3996  *	the vm_object_paging_begin/end routines [actually, macros].
3997  *	The implementation uses the "paging_in_progress" and "wanted" fields.
3998  *	[Operations that alter the validity of the pager values include the
3999  *	termination routines and vm_object_collapse.]
4000  */
4001 
4002 
4003 /*
4004  *	Routine:	vm_object_memory_object_associate
4005  *	Purpose:
4006  *		Associate a VM object to the given pager.
4007  *		If a VM object is not provided, create one.
4008  *		Initialize the pager.
4009  */
4010 vm_object_t
4011 vm_object_memory_object_associate(
4012 	memory_object_t         pager,
4013 	vm_object_t             object,
4014 	vm_object_size_t        size,
4015 	boolean_t               named)
4016 {
4017 	memory_object_control_t control;
4018 
4019 	assert(pager != MEMORY_OBJECT_NULL);
4020 
4021 	if (object != VM_OBJECT_NULL) {
4022 		assert(object->internal);
4023 		assert(object->pager_created);
4024 		assert(!object->pager_initialized);
4025 		assert(!object->pager_ready);
4026 		assert(object->pager_trusted);
4027 	} else {
4028 		object = vm_object_allocate(size);
4029 		assert(object != VM_OBJECT_NULL);
4030 		object->internal = FALSE;
4031 		object->pager_trusted = FALSE;
4032 		/* copy strategy invalid until set by memory manager */
4033 		object->copy_strategy = MEMORY_OBJECT_COPY_INVALID;
4034 	}
4035 
4036 	/*
4037 	 *	Allocate request port.
4038 	 */
4039 
4040 	control = memory_object_control_allocate(object);
4041 	assert(control != MEMORY_OBJECT_CONTROL_NULL);
4042 
4043 	vm_object_lock(object);
4044 
4045 	assert(!object->pager_ready);
4046 	assert(!object->pager_initialized);
4047 	assert(object->pager == NULL);
4048 	assert(object->pager_control == NULL);
4049 
4050 	/*
4051 	 *	Copy the reference we were given.
4052 	 */
4053 
4054 	memory_object_reference(pager);
4055 	object->pager_created = TRUE;
4056 	object->pager = pager;
4057 	object->pager_control = control;
4058 	object->pager_ready = FALSE;
4059 
4060 	vm_object_unlock(object);
4061 
4062 	/*
4063 	 *	Let the pager know we're using it.
4064 	 */
4065 
4066 	(void) memory_object_init(pager,
4067 	    object->pager_control,
4068 	    PAGE_SIZE);
4069 
4070 	vm_object_lock(object);
4071 	if (named) {
4072 		object->named = TRUE;
4073 	}
4074 	if (object->internal) {
4075 		object->pager_ready = TRUE;
4076 		vm_object_wakeup(object, VM_OBJECT_EVENT_PAGER_READY);
4077 	}
4078 
4079 	object->pager_initialized = TRUE;
4080 	vm_object_wakeup(object, VM_OBJECT_EVENT_INITIALIZED);
4081 
4082 	vm_object_unlock(object);
4083 
4084 	return object;
4085 }
4086 
4087 /*
4088  *	Routine:	vm_object_compressor_pager_create
4089  *	Purpose:
4090  *		Create a memory object for an internal object.
4091  *	In/out conditions:
4092  *		The object is locked on entry and exit;
4093  *		it may be unlocked within this call.
4094  *	Limitations:
4095  *		Only one thread may be performing a
4096  *		vm_object_compressor_pager_create on an object at
4097  *		a time.  Presumably, only the pageout
4098  *		daemon will be using this routine.
4099  */
4100 
4101 void
4102 vm_object_compressor_pager_create(
4103 	vm_object_t     object)
4104 {
4105 	memory_object_t         pager;
4106 	vm_object_t             pager_object = VM_OBJECT_NULL;
4107 
4108 	assert(!is_kernel_object(object));
4109 
4110 	/*
4111 	 *	Prevent collapse or termination by holding a paging reference
4112 	 */
4113 
4114 	vm_object_paging_begin(object);
4115 	if (object->pager_created) {
4116 		/*
4117 		 *	Someone else got to it first...
4118 		 *	wait for them to finish initializing the ports
4119 		 */
4120 		while (!object->pager_initialized) {
4121 			vm_object_sleep(object,
4122 			    VM_OBJECT_EVENT_INITIALIZED,
4123 			    THREAD_UNINT);
4124 		}
4125 		vm_object_paging_end(object);
4126 		return;
4127 	}
4128 
4129 	if ((uint32_t) (object->vo_size / PAGE_SIZE) !=
4130 	    (object->vo_size / PAGE_SIZE)) {
4131 #if DEVELOPMENT || DEBUG
4132 		printf("vm_object_compressor_pager_create(%p): "
4133 		    "object size 0x%llx >= 0x%llx\n",
4134 		    object,
4135 		    (uint64_t) object->vo_size,
4136 		    0x0FFFFFFFFULL * PAGE_SIZE);
4137 #endif /* DEVELOPMENT || DEBUG */
4138 		vm_object_paging_end(object);
4139 		return;
4140 	}
4141 
4142 	/*
4143 	 *	Indicate that a memory object has been assigned
4144 	 *	before dropping the lock, to prevent a race.
4145 	 */
4146 
4147 	object->pager_created = TRUE;
4148 	object->pager_trusted = TRUE;
4149 	object->paging_offset = 0;
4150 
4151 	vm_object_unlock(object);
4152 
4153 	/*
4154 	 *	Create the [internal] pager, and associate it with this object.
4155 	 *
4156 	 *	We make the association here so that vm_object_enter()
4157 	 *      can look up the object to complete initializing it.  No
4158 	 *	user will ever map this object.
4159 	 */
4160 	{
4161 		/* create our new memory object */
4162 		assert((uint32_t) (object->vo_size / PAGE_SIZE) ==
4163 		    (object->vo_size / PAGE_SIZE));
4164 		(void) compressor_memory_object_create(
4165 			(memory_object_size_t) object->vo_size,
4166 			&pager);
4167 		if (pager == NULL) {
4168 			panic("vm_object_compressor_pager_create(): "
4169 			    "no pager for object %p size 0x%llx\n",
4170 			    object, (uint64_t) object->vo_size);
4171 		}
4172 	}
4173 
4174 	/*
4175 	 *	A reference was returned by
4176 	 *	memory_object_create(), and it is
4177 	 *	copied by vm_object_memory_object_associate().
4178 	 */
4179 
4180 	pager_object = vm_object_memory_object_associate(pager,
4181 	    object,
4182 	    object->vo_size,
4183 	    FALSE);
4184 	if (pager_object != object) {
4185 		panic("vm_object_compressor_pager_create: mismatch (pager: %p, pager_object: %p, orig_object: %p, orig_object size: 0x%llx)", pager, pager_object, object, (uint64_t) object->vo_size);
4186 	}
4187 
4188 	/*
4189 	 *	Drop the reference we were passed.
4190 	 */
4191 	memory_object_deallocate(pager);
4192 
4193 	vm_object_lock(object);
4194 
4195 	/*
4196 	 *	Release the paging reference
4197 	 */
4198 	vm_object_paging_end(object);
4199 }
4200 
4201 /*
4202  *	Global variables for vm_object_collapse():
4203  *
4204  *		Counts for normal collapses and bypasses.
4205  *		Debugging variables, to watch or disable collapse.
4206  */
4207 static long     object_collapses = 0;
4208 static long     object_bypasses  = 0;
4209 
4210 static boolean_t        vm_object_collapse_allowed = TRUE;
4211 static boolean_t        vm_object_bypass_allowed = TRUE;
4212 
4213 void vm_object_do_collapse_compressor(vm_object_t object,
4214     vm_object_t backing_object);
4215 void
4216 vm_object_do_collapse_compressor(
4217 	vm_object_t object,
4218 	vm_object_t backing_object)
4219 {
4220 	vm_object_offset_t new_offset, backing_offset;
4221 	vm_object_size_t size;
4222 
4223 	vm_counters.do_collapse_compressor++;
4224 
4225 	vm_object_lock_assert_exclusive(object);
4226 	vm_object_lock_assert_exclusive(backing_object);
4227 
4228 	size = object->vo_size;
4229 
4230 	/*
4231 	 *	Move all compressed pages from backing_object
4232 	 *	to the parent.
4233 	 */
4234 
4235 	for (backing_offset = object->vo_shadow_offset;
4236 	    backing_offset < object->vo_shadow_offset + object->vo_size;
4237 	    backing_offset += PAGE_SIZE) {
4238 		memory_object_offset_t backing_pager_offset;
4239 
4240 		/* find the next compressed page at or after this offset */
4241 		backing_pager_offset = (backing_offset +
4242 		    backing_object->paging_offset);
4243 		backing_pager_offset = vm_compressor_pager_next_compressed(
4244 			backing_object->pager,
4245 			backing_pager_offset);
4246 		if (backing_pager_offset == (memory_object_offset_t) -1) {
4247 			/* no more compressed pages */
4248 			break;
4249 		}
4250 		backing_offset = (backing_pager_offset -
4251 		    backing_object->paging_offset);
4252 
4253 		new_offset = backing_offset - object->vo_shadow_offset;
4254 
4255 		if (new_offset >= object->vo_size) {
4256 			/* we're out of the scope of "object": done */
4257 			break;
4258 		}
4259 
4260 		if ((vm_page_lookup(object, new_offset) != VM_PAGE_NULL) ||
4261 		    (vm_compressor_pager_state_get(object->pager,
4262 		    (new_offset +
4263 		    object->paging_offset)) ==
4264 		    VM_EXTERNAL_STATE_EXISTS)) {
4265 			/*
4266 			 * This page already exists in object, resident or
4267 			 * compressed.
4268 			 * We don't need this compressed page in backing_object
4269 			 * and it will be reclaimed when we release
4270 			 * backing_object.
4271 			 */
4272 			continue;
4273 		}
4274 
4275 		/*
4276 		 * backing_object has this page in the VM compressor and
4277 		 * we need to transfer it to object.
4278 		 */
4279 		vm_counters.do_collapse_compressor_pages++;
4280 		vm_compressor_pager_transfer(
4281 			/* destination: */
4282 			object->pager,
4283 			(new_offset + object->paging_offset),
4284 			/* source: */
4285 			backing_object->pager,
4286 			(backing_offset + backing_object->paging_offset));
4287 	}
4288 }
4289 
4290 /*
4291  *	Routine:	vm_object_do_collapse
4292  *	Purpose:
4293  *		Collapse an object with the object backing it.
4294  *		Pages in the backing object are moved into the
4295  *		parent, and the backing object is deallocated.
4296  *	Conditions:
4297  *		Both objects and the cache are locked; the page
4298  *		queues are unlocked.
4299  *
4300  */
4301 static void
4302 vm_object_do_collapse(
4303 	vm_object_t object,
4304 	vm_object_t backing_object)
4305 {
4306 	vm_page_t p, pp;
4307 	vm_object_offset_t new_offset, backing_offset;
4308 	vm_object_size_t size;
4309 
4310 	vm_object_lock_assert_exclusive(object);
4311 	vm_object_lock_assert_exclusive(backing_object);
4312 
4313 	assert(object->purgable == VM_PURGABLE_DENY);
4314 	assert(backing_object->purgable == VM_PURGABLE_DENY);
4315 
4316 	backing_offset = object->vo_shadow_offset;
4317 	size = object->vo_size;
4318 
4319 	/*
4320 	 *	Move all in-memory pages from backing_object
4321 	 *	to the parent.  Pages that have been paged out
4322 	 *	will be overwritten by any of the parent's
4323 	 *	pages that shadow them.
4324 	 */
4325 
4326 	while (!vm_page_queue_empty(&backing_object->memq)) {
4327 		p = (vm_page_t) vm_page_queue_first(&backing_object->memq);
4328 
4329 		new_offset = (p->vmp_offset - backing_offset);
4330 
4331 		assert(!p->vmp_busy || p->vmp_absent);
4332 
4333 		/*
4334 		 *	If the parent has a page here, or if
4335 		 *	this page falls outside the parent,
4336 		 *	dispose of it.
4337 		 *
4338 		 *	Otherwise, move it as planned.
4339 		 */
4340 
4341 		if (p->vmp_offset < backing_offset || new_offset >= size) {
4342 			VM_PAGE_FREE(p);
4343 		} else {
4344 			pp = vm_page_lookup(object, new_offset);
4345 			if (pp == VM_PAGE_NULL) {
4346 				if (VM_COMPRESSOR_PAGER_STATE_GET(object,
4347 				    new_offset)
4348 				    == VM_EXTERNAL_STATE_EXISTS) {
4349 					/*
4350 					 * Parent object has this page
4351 					 * in the VM compressor.
4352 					 * Throw away the backing
4353 					 * object's page.
4354 					 */
4355 					VM_PAGE_FREE(p);
4356 				} else {
4357 					/*
4358 					 *	Parent now has no page.
4359 					 *	Move the backing object's page
4360 					 *      up.
4361 					 */
4362 					vm_page_rename(p, object, new_offset);
4363 				}
4364 			} else {
4365 				assert(!pp->vmp_absent);
4366 
4367 				/*
4368 				 *	Parent object has a real page.
4369 				 *	Throw away the backing object's
4370 				 *	page.
4371 				 */
4372 				VM_PAGE_FREE(p);
4373 			}
4374 		}
4375 	}
4376 
4377 	if (vm_object_collapse_compressor_allowed &&
4378 	    object->pager != MEMORY_OBJECT_NULL &&
4379 	    backing_object->pager != MEMORY_OBJECT_NULL) {
4380 		/* move compressed pages from backing_object to object */
4381 		vm_object_do_collapse_compressor(object, backing_object);
4382 	} else if (backing_object->pager != MEMORY_OBJECT_NULL) {
4383 		assert((!object->pager_created &&
4384 		    (object->pager == MEMORY_OBJECT_NULL)) ||
4385 		    (!backing_object->pager_created &&
4386 		    (backing_object->pager == MEMORY_OBJECT_NULL)));
4387 		/*
4388 		 *	Move the pager from backing_object to object.
4389 		 *
4390 		 *	XXX We're only using part of the paging space
4391 		 *	for keeps now... we ought to discard the
4392 		 *	unused portion.
4393 		 */
4394 
4395 		assert(!object->paging_in_progress);
4396 		assert(!object->activity_in_progress);
4397 		assert(!object->pager_created);
4398 		assert(object->pager == NULL);
4399 		object->pager = backing_object->pager;
4400 
4401 		object->pager_created = backing_object->pager_created;
4402 		object->pager_control = backing_object->pager_control;
4403 		object->pager_ready = backing_object->pager_ready;
4404 		object->pager_initialized = backing_object->pager_initialized;
4405 		object->paging_offset =
4406 		    backing_object->paging_offset + backing_offset;
4407 		if (object->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
4408 			memory_object_control_collapse(&object->pager_control,
4409 			    object);
4410 		}
4411 		/* the backing_object has lost its pager: reset all fields */
4412 		backing_object->pager_created = FALSE;
4413 		backing_object->pager_control = NULL;
4414 		backing_object->pager_ready = FALSE;
4415 		backing_object->paging_offset = 0;
4416 		backing_object->pager = NULL;
4417 	}
4418 	/*
4419 	 *	Object now shadows whatever backing_object did.
4420 	 *	Note that the reference to backing_object->shadow
4421 	 *	moves from within backing_object to within object.
4422 	 */
4423 
4424 	assert(!object->phys_contiguous);
4425 	assert(!backing_object->phys_contiguous);
4426 	object->shadow = backing_object->shadow;
4427 	if (object->shadow) {
4428 		assertf(page_aligned(object->vo_shadow_offset),
4429 		    "object %p shadow_offset 0x%llx",
4430 		    object, object->vo_shadow_offset);
4431 		assertf(page_aligned(backing_object->vo_shadow_offset),
4432 		    "backing_object %p shadow_offset 0x%llx",
4433 		    backing_object, backing_object->vo_shadow_offset);
4434 		object->vo_shadow_offset += backing_object->vo_shadow_offset;
4435 		/* "backing_object" gave its shadow to "object" */
4436 		backing_object->shadow = VM_OBJECT_NULL;
4437 		backing_object->vo_shadow_offset = 0;
4438 	} else {
4439 		/* no shadow, therefore no shadow offset... */
4440 		object->vo_shadow_offset = 0;
4441 	}
4442 	assert((object->shadow == VM_OBJECT_NULL) ||
4443 	    (object->shadow->vo_copy != backing_object));
4444 
4445 	/*
4446 	 *	Discard backing_object.
4447 	 *
4448 	 *	Since the backing object has no pages, no
4449 	 *	pager left, and no object references within it,
4450 	 *	all that is necessary is to dispose of it.
4451 	 */
4452 	object_collapses++;
4453 
4454 	assert(backing_object->ref_count == 1);
4455 	assert(backing_object->resident_page_count == 0);
4456 	assert(backing_object->paging_in_progress == 0);
4457 	assert(backing_object->activity_in_progress == 0);
4458 	assert(backing_object->shadow == VM_OBJECT_NULL);
4459 	assert(backing_object->vo_shadow_offset == 0);
4460 
4461 	if (backing_object->pager != MEMORY_OBJECT_NULL) {
4462 		/* ... unless it has a pager; need to terminate pager too */
4463 		vm_counters.do_collapse_terminate++;
4464 		if (vm_object_terminate(backing_object) != KERN_SUCCESS) {
4465 			vm_counters.do_collapse_terminate_failure++;
4466 		}
4467 		return;
4468 	}
4469 
4470 	assert(backing_object->pager == NULL);
4471 
4472 	backing_object->alive = FALSE;
4473 	vm_object_unlock(backing_object);
4474 
4475 #if VM_OBJECT_TRACKING
4476 	if (vm_object_tracking_btlog) {
4477 		btlog_erase(vm_object_tracking_btlog, backing_object);
4478 	}
4479 #endif /* VM_OBJECT_TRACKING */
4480 
4481 	vm_object_lock_destroy(backing_object);
4482 
4483 	zfree(vm_object_zone, backing_object);
4484 }
4485 
4486 static void
4487 vm_object_do_bypass(
4488 	vm_object_t object,
4489 	vm_object_t backing_object)
4490 {
4491 	/*
4492 	 *	Make the parent shadow the next object
4493 	 *	in the chain.
4494 	 */
4495 
4496 	vm_object_lock_assert_exclusive(object);
4497 	vm_object_lock_assert_exclusive(backing_object);
4498 
4499 	vm_object_reference(backing_object->shadow);
4500 
4501 	assert(!object->phys_contiguous);
4502 	assert(!backing_object->phys_contiguous);
4503 	object->shadow = backing_object->shadow;
4504 	if (object->shadow) {
4505 		assertf(page_aligned(object->vo_shadow_offset),
4506 		    "object %p shadow_offset 0x%llx",
4507 		    object, object->vo_shadow_offset);
4508 		assertf(page_aligned(backing_object->vo_shadow_offset),
4509 		    "backing_object %p shadow_offset 0x%llx",
4510 		    backing_object, backing_object->vo_shadow_offset);
4511 		object->vo_shadow_offset += backing_object->vo_shadow_offset;
4512 	} else {
4513 		/* no shadow, therefore no shadow offset... */
4514 		object->vo_shadow_offset = 0;
4515 	}
4516 
4517 	/*
4518 	 *	Backing object might have had a copy pointer
4519 	 *	to us.  If it did, clear it.
4520 	 */
4521 	if (backing_object->vo_copy == object) {
4522 		backing_object->vo_copy = VM_OBJECT_NULL;
4523 	}
4524 
4525 	/*
4526 	 *	Drop the reference count on backing_object.
4527 	 #if	TASK_SWAPPER
4528 	 *	Since its ref_count was at least 2, it
4529 	 *	will not vanish; so we don't need to call
4530 	 *	vm_object_deallocate.
4531 	 *	[with a caveat for "named" objects]
4532 	 *
4533 	 *	The res_count on the backing object is
4534 	 *	conditionally decremented.  It's possible
4535 	 *	(via vm_pageout_scan) to get here with
4536 	 *	a "swapped" object, which has a 0 res_count,
4537 	 *	in which case, the backing object res_count
4538 	 *	is already down by one.
4539 	 #else
4540 	 *	Don't call vm_object_deallocate unless
4541 	 *	ref_count drops to zero.
4542 	 *
4543 	 *	The ref_count can drop to zero here if the
4544 	 *	backing object could be bypassed but not
4545 	 *	collapsed, such as when the backing object
4546 	 *	is temporary and cachable.
4547 	 #endif
4548 	 */
4549 	if (backing_object->ref_count > 2 ||
4550 	    (!backing_object->named && backing_object->ref_count > 1)) {
4551 		vm_object_lock_assert_exclusive(backing_object);
4552 		backing_object->ref_count--;
4553 		vm_object_unlock(backing_object);
4554 	} else {
4555 		/*
4556 		 *	Drop locks so that we can deallocate
4557 		 *	the backing object.
4558 		 */
4559 
4560 		/*
4561 		 * vm_object_collapse (the caller of this function) is
4562 		 * now called from contexts that may not guarantee that a
4563 		 * valid reference is held on the object... w/o a valid
4564 		 * reference, it is unsafe and unwise (you will definitely
4565 		 * regret it) to unlock the object and then retake the lock
4566 		 * since the object may be terminated and recycled in between.
4567 		 * The "activity_in_progress" reference will keep the object
4568 		 * 'stable'.
4569 		 */
4570 		vm_object_activity_begin(object);
4571 		vm_object_unlock(object);
4572 
4573 		vm_object_unlock(backing_object);
4574 		vm_object_deallocate(backing_object);
4575 
4576 		/*
4577 		 *	Relock object. We don't have to reverify
4578 		 *	its state since vm_object_collapse will
4579 		 *	do that for us as it starts at the
4580 		 *	top of its loop.
4581 		 */
4582 
4583 		vm_object_lock(object);
4584 		vm_object_activity_end(object);
4585 	}
4586 
4587 	object_bypasses++;
4588 }
4589 
4590 
4591 /*
4592  *	vm_object_collapse:
4593  *
4594  *	Perform an object collapse or an object bypass if appropriate.
4595  *	The real work of collapsing and bypassing is performed in
4596  *	the routines vm_object_do_collapse and vm_object_do_bypass.
4597  *
4598  *	Requires that the object be locked and the page queues be unlocked.
4599  *
4600  */
4601 static unsigned long vm_object_collapse_calls = 0;
4602 static unsigned long vm_object_collapse_objects = 0;
4603 static unsigned long vm_object_collapse_do_collapse = 0;
4604 static unsigned long vm_object_collapse_do_bypass = 0;
4605 
4606 __private_extern__ void
4607 vm_object_collapse(
4608 	vm_object_t                             object,
4609 	vm_object_offset_t                      hint_offset,
4610 	boolean_t                               can_bypass)
4611 {
4612 	vm_object_t                             backing_object;
4613 	vm_object_size_t                        object_vcount, object_rcount;
4614 	vm_object_t                             original_object;
4615 	int                                     object_lock_type;
4616 	int                                     backing_object_lock_type;
4617 
4618 	vm_object_collapse_calls++;
4619 
4620 	assertf(page_aligned(hint_offset), "hint_offset 0x%llx", hint_offset);
4621 
4622 	if (!vm_object_collapse_allowed &&
4623 	    !(can_bypass && vm_object_bypass_allowed)) {
4624 		return;
4625 	}
4626 
4627 	if (object == VM_OBJECT_NULL) {
4628 		return;
4629 	}
4630 
4631 	original_object = object;
4632 
4633 	/*
4634 	 * The top object was locked "exclusive" by the caller.
4635 	 * In the first pass, to determine if we can collapse the shadow chain,
4636 	 * take a "shared" lock on the shadow objects.  If we can collapse,
4637 	 * we'll have to go down the chain again with exclusive locks.
4638 	 */
4639 	object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4640 	backing_object_lock_type = OBJECT_LOCK_SHARED;
4641 
4642 retry:
4643 	object = original_object;
4644 	vm_object_lock_assert_exclusive(object);
4645 
4646 	while (TRUE) {
4647 		vm_object_collapse_objects++;
4648 		/*
4649 		 *	Verify that the conditions are right for either
4650 		 *	collapse or bypass:
4651 		 */
4652 
4653 		/*
4654 		 *	There is a backing object, and
4655 		 */
4656 
4657 		backing_object = object->shadow;
4658 		if (backing_object == VM_OBJECT_NULL) {
4659 			if (object != original_object) {
4660 				vm_object_unlock(object);
4661 			}
4662 			return;
4663 		}
4664 		if (backing_object_lock_type == OBJECT_LOCK_SHARED) {
4665 			vm_object_lock_shared(backing_object);
4666 		} else {
4667 			vm_object_lock(backing_object);
4668 		}
4669 
4670 		/*
4671 		 *	No pages in the object are currently
4672 		 *	being paged out, and
4673 		 */
4674 		if (object->paging_in_progress != 0 ||
4675 		    object->activity_in_progress != 0) {
4676 			/* try and collapse the rest of the shadow chain */
4677 			if (object != original_object) {
4678 				vm_object_unlock(object);
4679 			}
4680 			object = backing_object;
4681 			object_lock_type = backing_object_lock_type;
4682 			continue;
4683 		}
4684 
4685 		/*
4686 		 *	...
4687 		 *		The backing object is not read_only,
4688 		 *		and no pages in the backing object are
4689 		 *		currently being paged out.
4690 		 *		The backing object is internal.
4691 		 *
4692 		 */
4693 
4694 		if (!backing_object->internal ||
4695 		    backing_object->paging_in_progress != 0 ||
4696 		    backing_object->activity_in_progress != 0) {
4697 			/* try and collapse the rest of the shadow chain */
4698 			if (object != original_object) {
4699 				vm_object_unlock(object);
4700 			}
4701 			object = backing_object;
4702 			object_lock_type = backing_object_lock_type;
4703 			continue;
4704 		}
4705 
4706 		/*
4707 		 * Purgeable objects are not supposed to engage in
4708 		 * copy-on-write activities, so should not have
4709 		 * any shadow objects or be a shadow object to another
4710 		 * object.
4711 		 * Collapsing a purgeable object would require some
4712 		 * updates to the purgeable compressed ledgers.
4713 		 */
4714 		if (object->purgable != VM_PURGABLE_DENY ||
4715 		    backing_object->purgable != VM_PURGABLE_DENY) {
4716 			panic("vm_object_collapse() attempting to collapse "
4717 			    "purgeable object: %p(%d) %p(%d)\n",
4718 			    object, object->purgable,
4719 			    backing_object, backing_object->purgable);
4720 			/* try and collapse the rest of the shadow chain */
4721 			if (object != original_object) {
4722 				vm_object_unlock(object);
4723 			}
4724 			object = backing_object;
4725 			object_lock_type = backing_object_lock_type;
4726 			continue;
4727 		}
4728 
4729 		/*
4730 		 *	The backing object can't be a copy-object:
4731 		 *	the shadow_offset for the copy-object must stay
4732 		 *	as 0.  Furthermore (for the 'we have all the
4733 		 *	pages' case), if we bypass backing_object and
4734 		 *	just shadow the next object in the chain, old
4735 		 *	pages from that object would then have to be copied
4736 		 *	BOTH into the (former) backing_object and into the
4737 		 *	parent object.
4738 		 */
4739 		if (backing_object->shadow != VM_OBJECT_NULL &&
4740 		    backing_object->shadow->vo_copy == backing_object) {
4741 			/* try and collapse the rest of the shadow chain */
4742 			if (object != original_object) {
4743 				vm_object_unlock(object);
4744 			}
4745 			object = backing_object;
4746 			object_lock_type = backing_object_lock_type;
4747 			continue;
4748 		}
4749 
4750 		/*
4751 		 *	We can now try to either collapse the backing
4752 		 *	object (if the parent is the only reference to
4753 		 *	it) or (perhaps) remove the parent's reference
4754 		 *	to it.
4755 		 *
4756 		 *	If there is exactly one reference to the backing
4757 		 *	object, we may be able to collapse it into the
4758 		 *	parent.
4759 		 *
4760 		 *	As long as one of the objects is still not known
4761 		 *	to the pager, we can collapse them.
4762 		 */
4763 		if (backing_object->ref_count == 1 &&
4764 		    (vm_object_collapse_compressor_allowed ||
4765 		    !object->pager_created
4766 		    || (!backing_object->pager_created)
4767 		    ) && vm_object_collapse_allowed) {
4768 			/*
4769 			 * We need the exclusive lock on the VM objects.
4770 			 */
4771 			if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
4772 				/*
4773 				 * We have an object and its shadow locked
4774 				 * "shared".  We can't just upgrade the locks
4775 				 * to "exclusive", as some other thread might
4776 				 * also have these objects locked "shared" and
4777 				 * attempt to upgrade one or the other to
4778 				 * "exclusive".  The upgrades would block
4779 				 * forever waiting for the other "shared" locks
4780 				 * to get released.
4781 				 * So we have to release the locks and go
4782 				 * down the shadow chain again (since it could
4783 				 * have changed) with "exclusive" locking.
4784 				 */
4785 				vm_object_unlock(backing_object);
4786 				if (object != original_object) {
4787 					vm_object_unlock(object);
4788 				}
4789 				object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4790 				backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
4791 				goto retry;
4792 			}
4793 
4794 			/*
4795 			 *	Collapse the object with its backing
4796 			 *	object, and try again with the object's
4797 			 *	new backing object.
4798 			 */
4799 
4800 			vm_object_do_collapse(object, backing_object);
4801 			vm_object_collapse_do_collapse++;
4802 			continue;
4803 		}
4804 
4805 		/*
4806 		 *	Collapsing the backing object was not possible
4807 		 *	or permitted, so let's try bypassing it.
4808 		 */
4809 
4810 		if (!(can_bypass && vm_object_bypass_allowed)) {
4811 			/* try and collapse the rest of the shadow chain */
4812 			if (object != original_object) {
4813 				vm_object_unlock(object);
4814 			}
4815 			object = backing_object;
4816 			object_lock_type = backing_object_lock_type;
4817 			continue;
4818 		}
4819 
4820 
4821 		/*
4822 		 *	If the object doesn't have all its pages present,
4823 		 *	we have to make sure no pages in the backing object
4824 		 *	"show through" before bypassing it.
4825 		 */
4826 		object_vcount = object->vo_size >> PAGE_SHIFT;
4827 		object_rcount = (vm_object_size_t)object->resident_page_count;
4828 
4829 		if (object_rcount != object_vcount) {
4830 			vm_object_offset_t      offset;
4831 			vm_object_offset_t      backing_offset;
4832 			vm_object_size_t        backing_rcount, backing_vcount;
4833 
4834 			/*
4835 			 *	If the backing object has a pager but no pagemap,
4836 			 *	then we cannot bypass it, because we don't know
4837 			 *	what pages it has.
4838 			 */
4839 			if (backing_object->pager_created) {
4840 				/* try and collapse the rest of the shadow chain */
4841 				if (object != original_object) {
4842 					vm_object_unlock(object);
4843 				}
4844 				object = backing_object;
4845 				object_lock_type = backing_object_lock_type;
4846 				continue;
4847 			}
4848 
4849 			/*
4850 			 *	If the object has a pager but no pagemap,
4851 			 *	then we cannot bypass it, because we don't know
4852 			 *	what pages it has.
4853 			 */
4854 			if (object->pager_created) {
4855 				/* try and collapse the rest of the shadow chain */
4856 				if (object != original_object) {
4857 					vm_object_unlock(object);
4858 				}
4859 				object = backing_object;
4860 				object_lock_type = backing_object_lock_type;
4861 				continue;
4862 			}
4863 
4864 			backing_offset = object->vo_shadow_offset;
4865 			backing_vcount = backing_object->vo_size >> PAGE_SHIFT;
4866 			backing_rcount = (vm_object_size_t)backing_object->resident_page_count;
4867 			assert(backing_vcount >= object_vcount);
4868 
4869 			if (backing_rcount > (backing_vcount - object_vcount) &&
4870 			    backing_rcount - (backing_vcount - object_vcount) > object_rcount) {
4871 				/*
4872 				 * we have enough pages in the backing object to guarantee that
4873 				 * at least 1 of them must be 'uncovered' by a resident page
4874 				 * in the object we're evaluating, so move on and
4875 				 * try to collapse the rest of the shadow chain
4876 				 */
4877 				if (object != original_object) {
4878 					vm_object_unlock(object);
4879 				}
4880 				object = backing_object;
4881 				object_lock_type = backing_object_lock_type;
4882 				continue;
4883 			}
4884 
4885 			/*
4886 			 *	If all of the pages in the backing object are
4887 			 *	shadowed by the parent object, the parent
4888 			 *	object no longer has to shadow the backing
4889 			 *	object; it can shadow the next one in the
4890 			 *	chain.
4891 			 *
4892 			 *	If the backing object has existence info,
4893 			 *	we must check examine its existence info
4894 			 *	as well.
4895 			 *
4896 			 */
4897 
4898 #define EXISTS_IN_OBJECT(obj, off, rc)                  \
4899 	((VM_COMPRESSOR_PAGER_STATE_GET((obj), (off))   \
4900 	  == VM_EXTERNAL_STATE_EXISTS) ||               \
4901 	 ((rc) && vm_page_lookup((obj), (off)) != VM_PAGE_NULL && (rc)--))
4902 
4903 			/*
4904 			 * Check the hint location first
4905 			 * (since it is often the quickest way out of here).
4906 			 */
4907 			if (object->cow_hint != ~(vm_offset_t)0) {
4908 				hint_offset = (vm_object_offset_t)object->cow_hint;
4909 			} else {
4910 				hint_offset = (hint_offset > 8 * PAGE_SIZE_64) ?
4911 				    (hint_offset - 8 * PAGE_SIZE_64) : 0;
4912 			}
4913 
4914 			if (EXISTS_IN_OBJECT(backing_object, hint_offset +
4915 			    backing_offset, backing_rcount) &&
4916 			    !EXISTS_IN_OBJECT(object, hint_offset, object_rcount)) {
4917 				/* dependency right at the hint */
4918 				object->cow_hint = (vm_offset_t) hint_offset; /* atomic */
4919 				/* try and collapse the rest of the shadow chain */
4920 				if (object != original_object) {
4921 					vm_object_unlock(object);
4922 				}
4923 				object = backing_object;
4924 				object_lock_type = backing_object_lock_type;
4925 				continue;
4926 			}
4927 
4928 			/*
4929 			 * If the object's window onto the backing_object
4930 			 * is large compared to the number of resident
4931 			 * pages in the backing object, it makes sense to
4932 			 * walk the backing_object's resident pages first.
4933 			 *
4934 			 * NOTE: Pages may be in both the existence map and/or
4935 			 * resident, so if we don't find a dependency while
4936 			 * walking the backing object's resident page list
4937 			 * directly, and there is an existence map, we'll have
4938 			 * to run the offset based 2nd pass.  Because we may
4939 			 * have to run both passes, we need to be careful
4940 			 * not to decrement 'rcount' in the 1st pass
4941 			 */
4942 			if (backing_rcount && backing_rcount < (object_vcount / 8)) {
4943 				vm_object_size_t rc = object_rcount;
4944 				vm_page_t p;
4945 
4946 				backing_rcount = backing_object->resident_page_count;
4947 				p = (vm_page_t)vm_page_queue_first(&backing_object->memq);
4948 				do {
4949 					offset = (p->vmp_offset - backing_offset);
4950 
4951 					if (offset < object->vo_size &&
4952 					    offset != hint_offset &&
4953 					    !EXISTS_IN_OBJECT(object, offset, rc)) {
4954 						/* found a dependency */
4955 						object->cow_hint = (vm_offset_t) offset; /* atomic */
4956 
4957 						break;
4958 					}
4959 					p = (vm_page_t) vm_page_queue_next(&p->vmp_listq);
4960 				} while (--backing_rcount);
4961 				if (backing_rcount != 0) {
4962 					/* try and collapse the rest of the shadow chain */
4963 					if (object != original_object) {
4964 						vm_object_unlock(object);
4965 					}
4966 					object = backing_object;
4967 					object_lock_type = backing_object_lock_type;
4968 					continue;
4969 				}
4970 			}
4971 
4972 			/*
4973 			 * Walk through the offsets looking for pages in the
4974 			 * backing object that show through to the object.
4975 			 */
4976 			if (backing_rcount) {
4977 				offset = hint_offset;
4978 
4979 				while ((offset =
4980 				    (offset + PAGE_SIZE_64 < object->vo_size) ?
4981 				    (offset + PAGE_SIZE_64) : 0) != hint_offset) {
4982 					if (EXISTS_IN_OBJECT(backing_object, offset +
4983 					    backing_offset, backing_rcount) &&
4984 					    !EXISTS_IN_OBJECT(object, offset, object_rcount)) {
4985 						/* found a dependency */
4986 						object->cow_hint = (vm_offset_t) offset; /* atomic */
4987 						break;
4988 					}
4989 				}
4990 				if (offset != hint_offset) {
4991 					/* try and collapse the rest of the shadow chain */
4992 					if (object != original_object) {
4993 						vm_object_unlock(object);
4994 					}
4995 					object = backing_object;
4996 					object_lock_type = backing_object_lock_type;
4997 					continue;
4998 				}
4999 			}
5000 		}
5001 
5002 		/*
5003 		 * We need "exclusive" locks on the 2 VM objects.
5004 		 */
5005 		if (backing_object_lock_type != OBJECT_LOCK_EXCLUSIVE) {
5006 			vm_object_unlock(backing_object);
5007 			if (object != original_object) {
5008 				vm_object_unlock(object);
5009 			}
5010 			object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5011 			backing_object_lock_type = OBJECT_LOCK_EXCLUSIVE;
5012 			goto retry;
5013 		}
5014 
5015 		/* reset the offset hint for any objects deeper in the chain */
5016 		object->cow_hint = (vm_offset_t)0;
5017 
5018 		/*
5019 		 *	All interesting pages in the backing object
5020 		 *	already live in the parent or its pager.
5021 		 *	Thus we can bypass the backing object.
5022 		 */
5023 
5024 		vm_object_do_bypass(object, backing_object);
5025 		vm_object_collapse_do_bypass++;
5026 
5027 		/*
5028 		 *	Try again with this object's new backing object.
5029 		 */
5030 
5031 		continue;
5032 	}
5033 
5034 	/* NOT REACHED */
5035 	/*
5036 	 *  if (object != original_object) {
5037 	 *       vm_object_unlock(object);
5038 	 *  }
5039 	 */
5040 }
5041 
5042 /*
5043  *	Routine:	vm_object_page_remove: [internal]
5044  *	Purpose:
5045  *		Removes all physical pages in the specified
5046  *		object range from the object's list of pages.
5047  *
5048  *	In/out conditions:
5049  *		The object must be locked.
5050  *		The object must not have paging_in_progress, usually
5051  *		guaranteed by not having a pager.
5052  */
5053 unsigned int vm_object_page_remove_lookup = 0;
5054 unsigned int vm_object_page_remove_iterate = 0;
5055 
5056 __private_extern__ void
5057 vm_object_page_remove(
5058 	vm_object_t             object,
5059 	vm_object_offset_t      start,
5060 	vm_object_offset_t      end)
5061 {
5062 	vm_page_t       p, next;
5063 
5064 	/*
5065 	 *	One and two page removals are most popular.
5066 	 *	The factor of 16 here is somewhat arbitrary.
5067 	 *	It balances vm_object_lookup vs iteration.
5068 	 */
5069 
5070 	if (atop_64(end - start) < (unsigned)object->resident_page_count / 16) {
5071 		vm_object_page_remove_lookup++;
5072 
5073 		for (; start < end; start += PAGE_SIZE_64) {
5074 			p = vm_page_lookup(object, start);
5075 			if (p != VM_PAGE_NULL) {
5076 				assert(!p->vmp_cleaning && !p->vmp_laundry);
5077 				if (!p->vmp_fictitious && p->vmp_pmapped) {
5078 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5079 				}
5080 				VM_PAGE_FREE(p);
5081 			}
5082 		}
5083 	} else {
5084 		vm_object_page_remove_iterate++;
5085 
5086 		p = (vm_page_t) vm_page_queue_first(&object->memq);
5087 		while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t) p)) {
5088 			next = (vm_page_t) vm_page_queue_next(&p->vmp_listq);
5089 			if ((start <= p->vmp_offset) && (p->vmp_offset < end)) {
5090 				assert(!p->vmp_cleaning && !p->vmp_laundry);
5091 				if (!p->vmp_fictitious && p->vmp_pmapped) {
5092 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5093 				}
5094 				VM_PAGE_FREE(p);
5095 			}
5096 			p = next;
5097 		}
5098 	}
5099 }
5100 
5101 
5102 /*
5103  *	Routine:	vm_object_coalesce
5104  *	Function:	Coalesces two objects backing up adjoining
5105  *			regions of memory into a single object.
5106  *
5107  *	returns TRUE if objects were combined.
5108  *
5109  *	NOTE:	Only works at the moment if the second object is NULL -
5110  *		if it's not, which object do we lock first?
5111  *
5112  *	Parameters:
5113  *		prev_object	First object to coalesce
5114  *		prev_offset	Offset into prev_object
5115  *		next_object	Second object into coalesce
5116  *		next_offset	Offset into next_object
5117  *
5118  *		prev_size	Size of reference to prev_object
5119  *		next_size	Size of reference to next_object
5120  *
5121  *	Conditions:
5122  *	The object(s) must *not* be locked. The map must be locked
5123  *	to preserve the reference to the object(s).
5124  */
5125 static int vm_object_coalesce_count = 0;
5126 
5127 __private_extern__ boolean_t
5128 vm_object_coalesce(
5129 	vm_object_t                     prev_object,
5130 	vm_object_t                     next_object,
5131 	vm_object_offset_t              prev_offset,
5132 	__unused vm_object_offset_t next_offset,
5133 	vm_object_size_t                prev_size,
5134 	vm_object_size_t                next_size)
5135 {
5136 	vm_object_size_t        newsize;
5137 
5138 #ifdef  lint
5139 	next_offset++;
5140 #endif  /* lint */
5141 
5142 	if (next_object != VM_OBJECT_NULL) {
5143 		return FALSE;
5144 	}
5145 
5146 	if (prev_object == VM_OBJECT_NULL) {
5147 		return TRUE;
5148 	}
5149 
5150 	vm_object_lock(prev_object);
5151 
5152 	/*
5153 	 *	Try to collapse the object first
5154 	 */
5155 	vm_object_collapse(prev_object, prev_offset, TRUE);
5156 
5157 	/*
5158 	 *	Can't coalesce if pages not mapped to
5159 	 *	prev_entry may be in use any way:
5160 	 *	. more than one reference
5161 	 *	. paged out
5162 	 *	. shadows another object
5163 	 *	. has a copy elsewhere
5164 	 *	. is purgeable
5165 	 *	. paging references (pages might be in page-list)
5166 	 */
5167 
5168 	if ((prev_object->ref_count > 1) ||
5169 	    prev_object->pager_created ||
5170 	    (prev_object->shadow != VM_OBJECT_NULL) ||
5171 	    (prev_object->vo_copy != VM_OBJECT_NULL) ||
5172 	    (prev_object->true_share != FALSE) ||
5173 	    (prev_object->purgable != VM_PURGABLE_DENY) ||
5174 	    (prev_object->paging_in_progress != 0) ||
5175 	    (prev_object->activity_in_progress != 0)) {
5176 		vm_object_unlock(prev_object);
5177 		return FALSE;
5178 	}
5179 
5180 	vm_object_coalesce_count++;
5181 
5182 	/*
5183 	 *	Remove any pages that may still be in the object from
5184 	 *	a previous deallocation.
5185 	 */
5186 	vm_object_page_remove(prev_object,
5187 	    prev_offset + prev_size,
5188 	    prev_offset + prev_size + next_size);
5189 
5190 	/*
5191 	 *	Extend the object if necessary.
5192 	 */
5193 	newsize = prev_offset + prev_size + next_size;
5194 	if (newsize > prev_object->vo_size) {
5195 		assertf(page_aligned(newsize),
5196 		    "object %p size 0x%llx",
5197 		    prev_object, (uint64_t)newsize);
5198 		prev_object->vo_size = newsize;
5199 	}
5200 
5201 	vm_object_unlock(prev_object);
5202 	return TRUE;
5203 }
5204 
5205 kern_return_t
5206 vm_object_populate_with_private(
5207 	vm_object_t             object,
5208 	vm_object_offset_t      offset,
5209 	ppnum_t                 phys_page,
5210 	vm_size_t               size)
5211 {
5212 	ppnum_t                 base_page;
5213 	vm_object_offset_t      base_offset;
5214 
5215 
5216 	if (!object->private) {
5217 		return KERN_FAILURE;
5218 	}
5219 
5220 	base_page = phys_page;
5221 
5222 	vm_object_lock(object);
5223 
5224 	if (!object->phys_contiguous) {
5225 		vm_page_t       m;
5226 
5227 		if ((base_offset = trunc_page_64(offset)) != offset) {
5228 			vm_object_unlock(object);
5229 			return KERN_FAILURE;
5230 		}
5231 		base_offset += object->paging_offset;
5232 
5233 		while (size) {
5234 			m = vm_page_lookup(object, base_offset);
5235 
5236 			if (m != VM_PAGE_NULL) {
5237 				if (m->vmp_fictitious) {
5238 					if (VM_PAGE_GET_PHYS_PAGE(m) != vm_page_guard_addr) {
5239 						vm_page_lockspin_queues();
5240 						m->vmp_private = TRUE;
5241 						vm_page_unlock_queues();
5242 
5243 						m->vmp_fictitious = FALSE;
5244 						VM_PAGE_SET_PHYS_PAGE(m, base_page);
5245 					}
5246 				} else if (VM_PAGE_GET_PHYS_PAGE(m) != base_page) {
5247 					if (!m->vmp_private) {
5248 						/*
5249 						 * we'd leak a real page... that can't be right
5250 						 */
5251 						panic("vm_object_populate_with_private - %p not private", m);
5252 					}
5253 					if (m->vmp_pmapped) {
5254 						/*
5255 						 * pmap call to clear old mapping
5256 						 */
5257 						pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(m));
5258 					}
5259 					VM_PAGE_SET_PHYS_PAGE(m, base_page);
5260 				}
5261 			} else {
5262 				m = vm_page_grab_fictitious(TRUE);
5263 
5264 				/*
5265 				 * private normally requires lock_queues but since we
5266 				 * are initializing the page, its not necessary here
5267 				 */
5268 				m->vmp_private = TRUE;
5269 				m->vmp_fictitious = FALSE;
5270 				VM_PAGE_SET_PHYS_PAGE(m, base_page);
5271 				m->vmp_unusual = TRUE;
5272 				m->vmp_busy = FALSE;
5273 
5274 				vm_page_insert(m, object, base_offset);
5275 			}
5276 			base_page++;                                                                    /* Go to the next physical page */
5277 			base_offset += PAGE_SIZE;
5278 			size -= PAGE_SIZE;
5279 		}
5280 	} else {
5281 		/* NOTE: we should check the original settings here */
5282 		/* if we have a size > zero a pmap call should be made */
5283 		/* to disable the range */
5284 
5285 		/* pmap_? */
5286 
5287 		/* shadows on contiguous memory are not allowed */
5288 		/* we therefore can use the offset field */
5289 		object->vo_shadow_offset = (vm_object_offset_t)phys_page << PAGE_SHIFT;
5290 		assertf(page_aligned(size),
5291 		    "object %p size 0x%llx",
5292 		    object, (uint64_t)size);
5293 		object->vo_size = size;
5294 	}
5295 	vm_object_unlock(object);
5296 
5297 	return KERN_SUCCESS;
5298 }
5299 
5300 
5301 kern_return_t
5302 memory_object_create_named(
5303 	memory_object_t pager,
5304 	memory_object_offset_t  size,
5305 	memory_object_control_t         *control)
5306 {
5307 	vm_object_t             object;
5308 
5309 	*control = MEMORY_OBJECT_CONTROL_NULL;
5310 	if (pager == MEMORY_OBJECT_NULL) {
5311 		return KERN_INVALID_ARGUMENT;
5312 	}
5313 
5314 	object = vm_object_memory_object_associate(pager,
5315 	    VM_OBJECT_NULL,
5316 	    size,
5317 	    TRUE);
5318 	if (object == VM_OBJECT_NULL) {
5319 		return KERN_INVALID_OBJECT;
5320 	}
5321 
5322 	/* wait for object (if any) to be ready */
5323 	if (object != VM_OBJECT_NULL) {
5324 		vm_object_lock(object);
5325 		object->named = TRUE;
5326 		while (!object->pager_ready) {
5327 			vm_object_sleep(object,
5328 			    VM_OBJECT_EVENT_PAGER_READY,
5329 			    THREAD_UNINT);
5330 		}
5331 		*control = object->pager_control;
5332 		vm_object_unlock(object);
5333 	}
5334 	return KERN_SUCCESS;
5335 }
5336 
5337 
5338 __private_extern__ kern_return_t
5339 vm_object_lock_request(
5340 	vm_object_t                     object,
5341 	vm_object_offset_t              offset,
5342 	vm_object_size_t                size,
5343 	memory_object_return_t          should_return,
5344 	int                             flags,
5345 	vm_prot_t                       prot)
5346 {
5347 	__unused boolean_t      should_flush;
5348 
5349 	should_flush = flags & MEMORY_OBJECT_DATA_FLUSH;
5350 
5351 	/*
5352 	 *	Check for bogus arguments.
5353 	 */
5354 	if (object == VM_OBJECT_NULL) {
5355 		return KERN_INVALID_ARGUMENT;
5356 	}
5357 
5358 	if ((prot & ~VM_PROT_ALL) != 0 && prot != VM_PROT_NO_CHANGE) {
5359 		return KERN_INVALID_ARGUMENT;
5360 	}
5361 
5362 	/*
5363 	 * XXX TODO4K
5364 	 * extend range for conservative operations (copy-on-write, sync, ...)
5365 	 * truncate range for destructive operations (purge, ...)
5366 	 */
5367 	size = vm_object_round_page(offset + size) - vm_object_trunc_page(offset);
5368 	offset = vm_object_trunc_page(offset);
5369 
5370 	/*
5371 	 *	Lock the object, and acquire a paging reference to
5372 	 *	prevent the memory_object reference from being released.
5373 	 */
5374 	vm_object_lock(object);
5375 	vm_object_paging_begin(object);
5376 
5377 	(void)vm_object_update(object,
5378 	    offset, size, NULL, NULL, should_return, flags, prot);
5379 
5380 	vm_object_paging_end(object);
5381 	vm_object_unlock(object);
5382 
5383 	return KERN_SUCCESS;
5384 }
5385 
5386 /*
5387  * Empty a purgeable object by grabbing the physical pages assigned to it and
5388  * putting them on the free queue without writing them to backing store, etc.
5389  * When the pages are next touched they will be demand zero-fill pages.  We
5390  * skip pages which are busy, being paged in/out, wired, etc.  We do _not_
5391  * skip referenced/dirty pages, pages on the active queue, etc.  We're more
5392  * than happy to grab these since this is a purgeable object.  We mark the
5393  * object as "empty" after reaping its pages.
5394  *
5395  * On entry the object must be locked and it must be
5396  * purgeable with no delayed copies pending.
5397  */
5398 uint64_t
5399 vm_object_purge(vm_object_t object, int flags)
5400 {
5401 	unsigned int    object_page_count = 0, pgcount = 0;
5402 	uint64_t        total_purged_pgcount = 0;
5403 	boolean_t       skipped_object = FALSE;
5404 
5405 	vm_object_lock_assert_exclusive(object);
5406 
5407 	if (object->purgable == VM_PURGABLE_DENY) {
5408 		return 0;
5409 	}
5410 
5411 	assert(object->vo_copy == VM_OBJECT_NULL);
5412 	assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
5413 
5414 	/*
5415 	 * We need to set the object's state to VM_PURGABLE_EMPTY *before*
5416 	 * reaping its pages.  We update vm_page_purgeable_count in bulk
5417 	 * and we don't want vm_page_remove() to update it again for each
5418 	 * page we reap later.
5419 	 *
5420 	 * For the purgeable ledgers, pages from VOLATILE and EMPTY objects
5421 	 * are all accounted for in the "volatile" ledgers, so this does not
5422 	 * make any difference.
5423 	 * If we transitioned directly from NONVOLATILE to EMPTY,
5424 	 * vm_page_purgeable_count must have been updated when the object
5425 	 * was dequeued from its volatile queue and the purgeable ledgers
5426 	 * must have also been updated accordingly at that time (in
5427 	 * vm_object_purgable_control()).
5428 	 */
5429 	if (object->purgable == VM_PURGABLE_VOLATILE) {
5430 		unsigned int delta;
5431 		assert(object->resident_page_count >=
5432 		    object->wired_page_count);
5433 		delta = (object->resident_page_count -
5434 		    object->wired_page_count);
5435 		if (delta != 0) {
5436 			assert(vm_page_purgeable_count >=
5437 			    delta);
5438 			OSAddAtomic(-delta,
5439 			    (SInt32 *)&vm_page_purgeable_count);
5440 		}
5441 		if (object->wired_page_count != 0) {
5442 			assert(vm_page_purgeable_wired_count >=
5443 			    object->wired_page_count);
5444 			OSAddAtomic(-object->wired_page_count,
5445 			    (SInt32 *)&vm_page_purgeable_wired_count);
5446 		}
5447 		object->purgable = VM_PURGABLE_EMPTY;
5448 	}
5449 	assert(object->purgable == VM_PURGABLE_EMPTY);
5450 
5451 	object_page_count = object->resident_page_count;
5452 
5453 	vm_object_reap_pages(object, REAP_PURGEABLE);
5454 
5455 	if (object->resident_page_count >= object_page_count) {
5456 		total_purged_pgcount = 0;
5457 	} else {
5458 		total_purged_pgcount = object_page_count - object->resident_page_count;
5459 	}
5460 
5461 	if (object->pager != NULL) {
5462 		assert(VM_CONFIG_COMPRESSOR_IS_PRESENT);
5463 
5464 		if (object->activity_in_progress == 0 &&
5465 		    object->paging_in_progress == 0) {
5466 			/*
5467 			 * Also reap any memory coming from this object
5468 			 * in the VM compressor.
5469 			 *
5470 			 * There are no operations in progress on the VM object
5471 			 * and no operation can start while we're holding the
5472 			 * VM object lock, so it's safe to reap the compressed
5473 			 * pages and update the page counts.
5474 			 */
5475 			pgcount = vm_compressor_pager_get_count(object->pager);
5476 			if (pgcount) {
5477 				pgcount = vm_compressor_pager_reap_pages(object->pager, flags);
5478 				vm_compressor_pager_count(object->pager,
5479 				    -pgcount,
5480 				    FALSE,                       /* shared */
5481 				    object);
5482 				vm_object_owner_compressed_update(object,
5483 				    -pgcount);
5484 			}
5485 			if (!(flags & C_DONT_BLOCK)) {
5486 				assert(vm_compressor_pager_get_count(object->pager)
5487 				    == 0);
5488 			}
5489 		} else {
5490 			/*
5491 			 * There's some kind of paging activity in progress
5492 			 * for this object, which could result in a page
5493 			 * being compressed or decompressed, possibly while
5494 			 * the VM object is not locked, so it could race
5495 			 * with us.
5496 			 *
5497 			 * We can't really synchronize this without possibly
5498 			 * causing a deadlock when the compressor needs to
5499 			 * allocate or free memory while compressing or
5500 			 * decompressing a page from a purgeable object
5501 			 * mapped in the kernel_map...
5502 			 *
5503 			 * So let's not attempt to purge the compressor
5504 			 * pager if there's any kind of operation in
5505 			 * progress on the VM object.
5506 			 */
5507 			skipped_object = TRUE;
5508 		}
5509 	}
5510 
5511 	vm_object_lock_assert_exclusive(object);
5512 
5513 	total_purged_pgcount += pgcount;
5514 
5515 	KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (MACHDBG_CODE(DBG_MACH_VM, OBJECT_PURGE_ONE)),
5516 	    VM_KERNEL_UNSLIDE_OR_PERM(object),                   /* purged object */
5517 	    object_page_count,
5518 	    total_purged_pgcount,
5519 	    skipped_object,
5520 	    0);
5521 
5522 	return total_purged_pgcount;
5523 }
5524 
5525 
5526 /*
5527  * vm_object_purgeable_control() allows the caller to control and investigate the
5528  * state of a purgeable object.  A purgeable object is created via a call to
5529  * vm_allocate() with VM_FLAGS_PURGABLE specified.  A purgeable object will
5530  * never be coalesced with any other object -- even other purgeable objects --
5531  * and will thus always remain a distinct object.  A purgeable object has
5532  * special semantics when its reference count is exactly 1.  If its reference
5533  * count is greater than 1, then a purgeable object will behave like a normal
5534  * object and attempts to use this interface will result in an error return
5535  * of KERN_INVALID_ARGUMENT.
5536  *
5537  * A purgeable object may be put into a "volatile" state which will make the
5538  * object's pages elligable for being reclaimed without paging to backing
5539  * store if the system runs low on memory.  If the pages in a volatile
5540  * purgeable object are reclaimed, the purgeable object is said to have been
5541  * "emptied."  When a purgeable object is emptied the system will reclaim as
5542  * many pages from the object as it can in a convenient manner (pages already
5543  * en route to backing store or busy for other reasons are left as is).  When
5544  * a purgeable object is made volatile, its pages will generally be reclaimed
5545  * before other pages in the application's working set.  This semantic is
5546  * generally used by applications which can recreate the data in the object
5547  * faster than it can be paged in.  One such example might be media assets
5548  * which can be reread from a much faster RAID volume.
5549  *
5550  * A purgeable object may be designated as "non-volatile" which means it will
5551  * behave like all other objects in the system with pages being written to and
5552  * read from backing store as needed to satisfy system memory needs.  If the
5553  * object was emptied before the object was made non-volatile, that fact will
5554  * be returned as the old state of the purgeable object (see
5555  * VM_PURGABLE_SET_STATE below).  In this case, any pages of the object which
5556  * were reclaimed as part of emptying the object will be refaulted in as
5557  * zero-fill on demand.  It is up to the application to note that an object
5558  * was emptied and recreate the objects contents if necessary.  When a
5559  * purgeable object is made non-volatile, its pages will generally not be paged
5560  * out to backing store in the immediate future.  A purgeable object may also
5561  * be manually emptied.
5562  *
5563  * Finally, the current state (non-volatile, volatile, volatile & empty) of a
5564  * volatile purgeable object may be queried at any time.  This information may
5565  * be used as a control input to let the application know when the system is
5566  * experiencing memory pressure and is reclaiming memory.
5567  *
5568  * The specified address may be any address within the purgeable object.  If
5569  * the specified address does not represent any object in the target task's
5570  * virtual address space, then KERN_INVALID_ADDRESS will be returned.  If the
5571  * object containing the specified address is not a purgeable object, then
5572  * KERN_INVALID_ARGUMENT will be returned.  Otherwise, KERN_SUCCESS will be
5573  * returned.
5574  *
5575  * The control parameter may be any one of VM_PURGABLE_SET_STATE or
5576  * VM_PURGABLE_GET_STATE.  For VM_PURGABLE_SET_STATE, the in/out parameter
5577  * state is used to set the new state of the purgeable object and return its
5578  * old state.  For VM_PURGABLE_GET_STATE, the current state of the purgeable
5579  * object is returned in the parameter state.
5580  *
5581  * The in/out parameter state may be one of VM_PURGABLE_NONVOLATILE,
5582  * VM_PURGABLE_VOLATILE or VM_PURGABLE_EMPTY.  These, respectively, represent
5583  * the non-volatile, volatile and volatile/empty states described above.
5584  * Setting the state of a purgeable object to VM_PURGABLE_EMPTY will
5585  * immediately reclaim as many pages in the object as can be conveniently
5586  * collected (some may have already been written to backing store or be
5587  * otherwise busy).
5588  *
5589  * The process of making a purgeable object non-volatile and determining its
5590  * previous state is atomic.  Thus, if a purgeable object is made
5591  * VM_PURGABLE_NONVOLATILE and the old state is returned as
5592  * VM_PURGABLE_VOLATILE, then the purgeable object's previous contents are
5593  * completely intact and will remain so until the object is made volatile
5594  * again.  If the old state is returned as VM_PURGABLE_EMPTY then the object
5595  * was reclaimed while it was in a volatile state and its previous contents
5596  * have been lost.
5597  */
5598 /*
5599  * The object must be locked.
5600  */
5601 kern_return_t
5602 vm_object_purgable_control(
5603 	vm_object_t     object,
5604 	vm_purgable_t   control,
5605 	int             *state)
5606 {
5607 	int             old_state;
5608 	int             new_state;
5609 
5610 	if (object == VM_OBJECT_NULL) {
5611 		/*
5612 		 * Object must already be present or it can't be purgeable.
5613 		 */
5614 		return KERN_INVALID_ARGUMENT;
5615 	}
5616 
5617 	vm_object_lock_assert_exclusive(object);
5618 
5619 	/*
5620 	 * Get current state of the purgeable object.
5621 	 */
5622 	old_state = object->purgable;
5623 	if (old_state == VM_PURGABLE_DENY) {
5624 		return KERN_INVALID_ARGUMENT;
5625 	}
5626 
5627 	/* purgeable cant have delayed copies - now or in the future */
5628 	assert(object->vo_copy == VM_OBJECT_NULL);
5629 	assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
5630 
5631 	/*
5632 	 * Execute the desired operation.
5633 	 */
5634 	if (control == VM_PURGABLE_GET_STATE) {
5635 		*state = old_state;
5636 		return KERN_SUCCESS;
5637 	}
5638 
5639 	if (control == VM_PURGABLE_SET_STATE &&
5640 	    object->purgeable_only_by_kernel) {
5641 		return KERN_PROTECTION_FAILURE;
5642 	}
5643 
5644 	if (control != VM_PURGABLE_SET_STATE &&
5645 	    control != VM_PURGABLE_SET_STATE_FROM_KERNEL) {
5646 		return KERN_INVALID_ARGUMENT;
5647 	}
5648 
5649 	if ((*state) & VM_PURGABLE_DEBUG_EMPTY) {
5650 		object->volatile_empty = TRUE;
5651 	}
5652 	if ((*state) & VM_PURGABLE_DEBUG_FAULT) {
5653 		object->volatile_fault = TRUE;
5654 	}
5655 
5656 	new_state = *state & VM_PURGABLE_STATE_MASK;
5657 	if (new_state == VM_PURGABLE_VOLATILE) {
5658 		if (old_state == VM_PURGABLE_EMPTY) {
5659 			/* what's been emptied must stay empty */
5660 			new_state = VM_PURGABLE_EMPTY;
5661 		}
5662 		if (object->volatile_empty) {
5663 			/* debugging mode: go straight to empty */
5664 			new_state = VM_PURGABLE_EMPTY;
5665 		}
5666 	}
5667 
5668 	switch (new_state) {
5669 	case VM_PURGABLE_DENY:
5670 		/*
5671 		 * Attempting to convert purgeable memory to non-purgeable:
5672 		 * not allowed.
5673 		 */
5674 		return KERN_INVALID_ARGUMENT;
5675 	case VM_PURGABLE_NONVOLATILE:
5676 		object->purgable = new_state;
5677 
5678 		if (old_state == VM_PURGABLE_VOLATILE) {
5679 			unsigned int delta;
5680 
5681 			assert(object->resident_page_count >=
5682 			    object->wired_page_count);
5683 			delta = (object->resident_page_count -
5684 			    object->wired_page_count);
5685 
5686 			assert(vm_page_purgeable_count >= delta);
5687 
5688 			if (delta != 0) {
5689 				OSAddAtomic(-delta,
5690 				    (SInt32 *)&vm_page_purgeable_count);
5691 			}
5692 			if (object->wired_page_count != 0) {
5693 				assert(vm_page_purgeable_wired_count >=
5694 				    object->wired_page_count);
5695 				OSAddAtomic(-object->wired_page_count,
5696 				    (SInt32 *)&vm_page_purgeable_wired_count);
5697 			}
5698 
5699 			vm_page_lock_queues();
5700 
5701 			/* object should be on a queue */
5702 			assert(object->objq.next != NULL &&
5703 			    object->objq.prev != NULL);
5704 			purgeable_q_t queue;
5705 
5706 			/*
5707 			 * Move object from its volatile queue to the
5708 			 * non-volatile queue...
5709 			 */
5710 			queue = vm_purgeable_object_remove(object);
5711 			assert(queue);
5712 
5713 			if (object->purgeable_when_ripe) {
5714 				vm_purgeable_token_delete_last(queue);
5715 			}
5716 			assert(queue->debug_count_objects >= 0);
5717 
5718 			vm_page_unlock_queues();
5719 		}
5720 		if (old_state == VM_PURGABLE_VOLATILE ||
5721 		    old_state == VM_PURGABLE_EMPTY) {
5722 			/*
5723 			 * Transfer the object's pages from the volatile to
5724 			 * non-volatile ledgers.
5725 			 */
5726 			vm_purgeable_accounting(object, VM_PURGABLE_VOLATILE);
5727 		}
5728 
5729 		break;
5730 
5731 	case VM_PURGABLE_VOLATILE:
5732 		if (object->volatile_fault) {
5733 			vm_page_t       p;
5734 			int             refmod;
5735 
5736 			vm_page_queue_iterate(&object->memq, p, vmp_listq) {
5737 				if (p->vmp_busy ||
5738 				    VM_PAGE_WIRED(p) ||
5739 				    p->vmp_fictitious) {
5740 					continue;
5741 				}
5742 				refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5743 				if ((refmod & VM_MEM_MODIFIED) &&
5744 				    !p->vmp_dirty) {
5745 					SET_PAGE_DIRTY(p, FALSE);
5746 				}
5747 			}
5748 		}
5749 
5750 		assert(old_state != VM_PURGABLE_EMPTY);
5751 
5752 		purgeable_q_t queue;
5753 
5754 		/* find the correct queue */
5755 		if ((*state & VM_PURGABLE_ORDERING_MASK) == VM_PURGABLE_ORDERING_OBSOLETE) {
5756 			queue = &purgeable_queues[PURGEABLE_Q_TYPE_OBSOLETE];
5757 		} else {
5758 			if ((*state & VM_PURGABLE_BEHAVIOR_MASK) == VM_PURGABLE_BEHAVIOR_FIFO) {
5759 				queue = &purgeable_queues[PURGEABLE_Q_TYPE_FIFO];
5760 			} else {
5761 				queue = &purgeable_queues[PURGEABLE_Q_TYPE_LIFO];
5762 			}
5763 		}
5764 
5765 		if (old_state == VM_PURGABLE_NONVOLATILE ||
5766 		    old_state == VM_PURGABLE_EMPTY) {
5767 			unsigned int delta;
5768 
5769 			if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
5770 			    VM_PURGABLE_NO_AGING) {
5771 				object->purgeable_when_ripe = FALSE;
5772 			} else {
5773 				object->purgeable_when_ripe = TRUE;
5774 			}
5775 
5776 			if (object->purgeable_when_ripe) {
5777 				kern_return_t result;
5778 
5779 				/* try to add token... this can fail */
5780 				vm_page_lock_queues();
5781 
5782 				result = vm_purgeable_token_add(queue);
5783 				if (result != KERN_SUCCESS) {
5784 					vm_page_unlock_queues();
5785 					return result;
5786 				}
5787 				vm_page_unlock_queues();
5788 			}
5789 
5790 			assert(object->resident_page_count >=
5791 			    object->wired_page_count);
5792 			delta = (object->resident_page_count -
5793 			    object->wired_page_count);
5794 
5795 			if (delta != 0) {
5796 				OSAddAtomic(delta,
5797 				    &vm_page_purgeable_count);
5798 			}
5799 			if (object->wired_page_count != 0) {
5800 				OSAddAtomic(object->wired_page_count,
5801 				    &vm_page_purgeable_wired_count);
5802 			}
5803 
5804 			object->purgable = new_state;
5805 
5806 			/* object should be on "non-volatile" queue */
5807 			assert(object->objq.next != NULL);
5808 			assert(object->objq.prev != NULL);
5809 		} else if (old_state == VM_PURGABLE_VOLATILE) {
5810 			purgeable_q_t   old_queue;
5811 			boolean_t       purgeable_when_ripe;
5812 
5813 			/*
5814 			 * if reassigning priorities / purgeable groups, we don't change the
5815 			 * token queue. So moving priorities will not make pages stay around longer.
5816 			 * Reasoning is that the algorithm gives most priority to the most important
5817 			 * object. If a new token is added, the most important object' priority is boosted.
5818 			 * This biases the system already for purgeable queues that move a lot.
5819 			 * It doesn't seem more biasing is neccessary in this case, where no new object is added.
5820 			 */
5821 			assert(object->objq.next != NULL && object->objq.prev != NULL); /* object should be on a queue */
5822 
5823 			old_queue = vm_purgeable_object_remove(object);
5824 			assert(old_queue);
5825 
5826 			if ((*state & VM_PURGABLE_NO_AGING_MASK) ==
5827 			    VM_PURGABLE_NO_AGING) {
5828 				purgeable_when_ripe = FALSE;
5829 			} else {
5830 				purgeable_when_ripe = TRUE;
5831 			}
5832 
5833 			if (old_queue != queue ||
5834 			    (purgeable_when_ripe !=
5835 			    object->purgeable_when_ripe)) {
5836 				kern_return_t result;
5837 
5838 				/* Changing queue. Have to move token. */
5839 				vm_page_lock_queues();
5840 				if (object->purgeable_when_ripe) {
5841 					vm_purgeable_token_delete_last(old_queue);
5842 				}
5843 				object->purgeable_when_ripe = purgeable_when_ripe;
5844 				if (object->purgeable_when_ripe) {
5845 					result = vm_purgeable_token_add(queue);
5846 					assert(result == KERN_SUCCESS);   /* this should never fail since we just freed a token */
5847 				}
5848 				vm_page_unlock_queues();
5849 			}
5850 		}
5851 		;
5852 		vm_purgeable_object_add(object, queue, (*state & VM_VOLATILE_GROUP_MASK) >> VM_VOLATILE_GROUP_SHIFT );
5853 		if (old_state == VM_PURGABLE_NONVOLATILE) {
5854 			vm_purgeable_accounting(object,
5855 			    VM_PURGABLE_NONVOLATILE);
5856 		}
5857 
5858 		assert(queue->debug_count_objects >= 0);
5859 
5860 		break;
5861 
5862 
5863 	case VM_PURGABLE_EMPTY:
5864 		if (object->volatile_fault) {
5865 			vm_page_t       p;
5866 			int             refmod;
5867 
5868 			vm_page_queue_iterate(&object->memq, p, vmp_listq) {
5869 				if (p->vmp_busy ||
5870 				    VM_PAGE_WIRED(p) ||
5871 				    p->vmp_fictitious) {
5872 					continue;
5873 				}
5874 				refmod = pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(p));
5875 				if ((refmod & VM_MEM_MODIFIED) &&
5876 				    !p->vmp_dirty) {
5877 					SET_PAGE_DIRTY(p, FALSE);
5878 				}
5879 			}
5880 		}
5881 
5882 		if (old_state == VM_PURGABLE_VOLATILE) {
5883 			purgeable_q_t old_queue;
5884 
5885 			/* object should be on a queue */
5886 			assert(object->objq.next != NULL &&
5887 			    object->objq.prev != NULL);
5888 
5889 			old_queue = vm_purgeable_object_remove(object);
5890 			assert(old_queue);
5891 			if (object->purgeable_when_ripe) {
5892 				vm_page_lock_queues();
5893 				vm_purgeable_token_delete_first(old_queue);
5894 				vm_page_unlock_queues();
5895 			}
5896 		}
5897 
5898 		if (old_state == VM_PURGABLE_NONVOLATILE) {
5899 			/*
5900 			 * This object's pages were previously accounted as
5901 			 * "non-volatile" and now need to be accounted as
5902 			 * "volatile".
5903 			 */
5904 			vm_purgeable_accounting(object,
5905 			    VM_PURGABLE_NONVOLATILE);
5906 			/*
5907 			 * Set to VM_PURGABLE_EMPTY because the pages are no
5908 			 * longer accounted in the "non-volatile" ledger
5909 			 * and are also not accounted for in
5910 			 * "vm_page_purgeable_count".
5911 			 */
5912 			object->purgable = VM_PURGABLE_EMPTY;
5913 		}
5914 
5915 		(void) vm_object_purge(object, 0);
5916 		assert(object->purgable == VM_PURGABLE_EMPTY);
5917 
5918 		break;
5919 	}
5920 
5921 	*state = old_state;
5922 
5923 	vm_object_lock_assert_exclusive(object);
5924 
5925 	return KERN_SUCCESS;
5926 }
5927 
5928 kern_return_t
5929 vm_object_get_page_counts(
5930 	vm_object_t             object,
5931 	vm_object_offset_t      offset,
5932 	vm_object_size_t        size,
5933 	unsigned int            *resident_page_count,
5934 	unsigned int            *dirty_page_count)
5935 {
5936 	kern_return_t           kr = KERN_SUCCESS;
5937 	boolean_t               count_dirty_pages = FALSE;
5938 	vm_page_t               p = VM_PAGE_NULL;
5939 	unsigned int            local_resident_count = 0;
5940 	unsigned int            local_dirty_count = 0;
5941 	vm_object_offset_t      cur_offset = 0;
5942 	vm_object_offset_t      end_offset = 0;
5943 
5944 	if (object == VM_OBJECT_NULL) {
5945 		return KERN_INVALID_ARGUMENT;
5946 	}
5947 
5948 
5949 	cur_offset = offset;
5950 
5951 	end_offset = offset + size;
5952 
5953 	vm_object_lock_assert_exclusive(object);
5954 
5955 	if (dirty_page_count != NULL) {
5956 		count_dirty_pages = TRUE;
5957 	}
5958 
5959 	if (resident_page_count != NULL && count_dirty_pages == FALSE) {
5960 		/*
5961 		 * Fast path when:
5962 		 * - we only want the resident page count, and,
5963 		 * - the entire object is exactly covered by the request.
5964 		 */
5965 		if (offset == 0 && (object->vo_size == size)) {
5966 			*resident_page_count = object->resident_page_count;
5967 			goto out;
5968 		}
5969 	}
5970 
5971 	if (object->resident_page_count <= (size >> PAGE_SHIFT)) {
5972 		vm_page_queue_iterate(&object->memq, p, vmp_listq) {
5973 			if (p->vmp_offset >= cur_offset && p->vmp_offset < end_offset) {
5974 				local_resident_count++;
5975 
5976 				if (count_dirty_pages) {
5977 					if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
5978 						local_dirty_count++;
5979 					}
5980 				}
5981 			}
5982 		}
5983 	} else {
5984 		for (cur_offset = offset; cur_offset < end_offset; cur_offset += PAGE_SIZE_64) {
5985 			p = vm_page_lookup(object, cur_offset);
5986 
5987 			if (p != VM_PAGE_NULL) {
5988 				local_resident_count++;
5989 
5990 				if (count_dirty_pages) {
5991 					if (p->vmp_dirty || (p->vmp_wpmapped && pmap_is_modified(VM_PAGE_GET_PHYS_PAGE(p)))) {
5992 						local_dirty_count++;
5993 					}
5994 				}
5995 			}
5996 		}
5997 	}
5998 
5999 	if (resident_page_count != NULL) {
6000 		*resident_page_count = local_resident_count;
6001 	}
6002 
6003 	if (dirty_page_count != NULL) {
6004 		*dirty_page_count = local_dirty_count;
6005 	}
6006 
6007 out:
6008 	return kr;
6009 }
6010 
6011 
6012 /*
6013  *	vm_object_reference:
6014  *
6015  *	Gets another reference to the given object.
6016  */
6017 #ifdef vm_object_reference
6018 #undef vm_object_reference
6019 #endif
6020 __private_extern__ void
6021 vm_object_reference(
6022 	vm_object_t     object)
6023 {
6024 	if (object == VM_OBJECT_NULL) {
6025 		return;
6026 	}
6027 
6028 	vm_object_lock(object);
6029 	assert(object->ref_count > 0);
6030 	vm_object_reference_locked(object);
6031 	vm_object_unlock(object);
6032 }
6033 
6034 /*
6035  * vm_object_transpose
6036  *
6037  * This routine takes two VM objects of the same size and exchanges
6038  * their backing store.
6039  * The objects should be "quiesced" via a UPL operation with UPL_SET_IO_WIRE
6040  * and UPL_BLOCK_ACCESS if they are referenced anywhere.
6041  *
6042  * The VM objects must not be locked by caller.
6043  */
6044 unsigned int vm_object_transpose_count = 0;
6045 kern_return_t
6046 vm_object_transpose(
6047 	vm_object_t             object1,
6048 	vm_object_t             object2,
6049 	vm_object_size_t        transpose_size)
6050 {
6051 	vm_object_t             tmp_object;
6052 	kern_return_t           retval;
6053 	boolean_t               object1_locked, object2_locked;
6054 	vm_page_t               page;
6055 	vm_object_offset_t      page_offset;
6056 
6057 	tmp_object = VM_OBJECT_NULL;
6058 	object1_locked = FALSE; object2_locked = FALSE;
6059 
6060 	if (object1 == object2 ||
6061 	    object1 == VM_OBJECT_NULL ||
6062 	    object2 == VM_OBJECT_NULL) {
6063 		/*
6064 		 * If the 2 VM objects are the same, there's
6065 		 * no point in exchanging their backing store.
6066 		 */
6067 		retval = KERN_INVALID_VALUE;
6068 		goto done;
6069 	}
6070 
6071 	/*
6072 	 * Since we need to lock both objects at the same time,
6073 	 * make sure we always lock them in the same order to
6074 	 * avoid deadlocks.
6075 	 */
6076 	if (object1 > object2) {
6077 		tmp_object = object1;
6078 		object1 = object2;
6079 		object2 = tmp_object;
6080 	}
6081 
6082 	/*
6083 	 * Allocate a temporary VM object to hold object1's contents
6084 	 * while we copy object2 to object1.
6085 	 */
6086 	tmp_object = vm_object_allocate(transpose_size);
6087 	vm_object_lock(tmp_object);
6088 	tmp_object->can_persist = FALSE;
6089 
6090 
6091 	/*
6092 	 * Grab control of the 1st VM object.
6093 	 */
6094 	vm_object_lock(object1);
6095 	object1_locked = TRUE;
6096 	if (!object1->alive || object1->terminating ||
6097 	    object1->vo_copy || object1->shadow || object1->shadowed ||
6098 	    object1->purgable != VM_PURGABLE_DENY) {
6099 		/*
6100 		 * We don't deal with copy or shadow objects (yet).
6101 		 */
6102 		retval = KERN_INVALID_VALUE;
6103 		goto done;
6104 	}
6105 	/*
6106 	 * We're about to mess with the object's backing store and
6107 	 * taking a "paging_in_progress" reference wouldn't be enough
6108 	 * to prevent any paging activity on this object, so the caller should
6109 	 * have "quiesced" the objects beforehand, via a UPL operation with
6110 	 * UPL_SET_IO_WIRE (to make sure all the pages are there and wired)
6111 	 * and UPL_BLOCK_ACCESS (to mark the pages "busy").
6112 	 *
6113 	 * Wait for any paging operation to complete (but only paging, not
6114 	 * other kind of activities not linked to the pager).  After we're
6115 	 * statisfied that there's no more paging in progress, we keep the
6116 	 * object locked, to guarantee that no one tries to access its pager.
6117 	 */
6118 	vm_object_paging_only_wait(object1, THREAD_UNINT);
6119 
6120 	/*
6121 	 * Same as above for the 2nd object...
6122 	 */
6123 	vm_object_lock(object2);
6124 	object2_locked = TRUE;
6125 	if (!object2->alive || object2->terminating ||
6126 	    object2->vo_copy || object2->shadow || object2->shadowed ||
6127 	    object2->purgable != VM_PURGABLE_DENY) {
6128 		retval = KERN_INVALID_VALUE;
6129 		goto done;
6130 	}
6131 	vm_object_paging_only_wait(object2, THREAD_UNINT);
6132 
6133 
6134 	if (object1->vo_size != object2->vo_size ||
6135 	    object1->vo_size != transpose_size) {
6136 		/*
6137 		 * If the 2 objects don't have the same size, we can't
6138 		 * exchange their backing stores or one would overflow.
6139 		 * If their size doesn't match the caller's
6140 		 * "transpose_size", we can't do it either because the
6141 		 * transpose operation will affect the entire span of
6142 		 * the objects.
6143 		 */
6144 		retval = KERN_INVALID_VALUE;
6145 		goto done;
6146 	}
6147 
6148 
6149 	/*
6150 	 * Transpose the lists of resident pages.
6151 	 * This also updates the resident_page_count and the memq_hint.
6152 	 */
6153 	if (object1->phys_contiguous || vm_page_queue_empty(&object1->memq)) {
6154 		/*
6155 		 * No pages in object1, just transfer pages
6156 		 * from object2 to object1.  No need to go through
6157 		 * an intermediate object.
6158 		 */
6159 		while (!vm_page_queue_empty(&object2->memq)) {
6160 			page = (vm_page_t) vm_page_queue_first(&object2->memq);
6161 			vm_page_rename(page, object1, page->vmp_offset);
6162 		}
6163 		assert(vm_page_queue_empty(&object2->memq));
6164 	} else if (object2->phys_contiguous || vm_page_queue_empty(&object2->memq)) {
6165 		/*
6166 		 * No pages in object2, just transfer pages
6167 		 * from object1 to object2.  No need to go through
6168 		 * an intermediate object.
6169 		 */
6170 		while (!vm_page_queue_empty(&object1->memq)) {
6171 			page = (vm_page_t) vm_page_queue_first(&object1->memq);
6172 			vm_page_rename(page, object2, page->vmp_offset);
6173 		}
6174 		assert(vm_page_queue_empty(&object1->memq));
6175 	} else {
6176 		/* transfer object1's pages to tmp_object */
6177 		while (!vm_page_queue_empty(&object1->memq)) {
6178 			page = (vm_page_t) vm_page_queue_first(&object1->memq);
6179 			page_offset = page->vmp_offset;
6180 			vm_page_remove(page, TRUE);
6181 			page->vmp_offset = page_offset;
6182 			vm_page_queue_enter(&tmp_object->memq, page, vmp_listq);
6183 		}
6184 		assert(vm_page_queue_empty(&object1->memq));
6185 		/* transfer object2's pages to object1 */
6186 		while (!vm_page_queue_empty(&object2->memq)) {
6187 			page = (vm_page_t) vm_page_queue_first(&object2->memq);
6188 			vm_page_rename(page, object1, page->vmp_offset);
6189 		}
6190 		assert(vm_page_queue_empty(&object2->memq));
6191 		/* transfer tmp_object's pages to object2 */
6192 		while (!vm_page_queue_empty(&tmp_object->memq)) {
6193 			page = (vm_page_t) vm_page_queue_first(&tmp_object->memq);
6194 			vm_page_queue_remove(&tmp_object->memq, page, vmp_listq);
6195 			vm_page_insert(page, object2, page->vmp_offset);
6196 		}
6197 		assert(vm_page_queue_empty(&tmp_object->memq));
6198 	}
6199 
6200 #define __TRANSPOSE_FIELD(field)                                \
6201 MACRO_BEGIN                                                     \
6202 	tmp_object->field = object1->field;                     \
6203 	object1->field = object2->field;                        \
6204 	object2->field = tmp_object->field;                     \
6205 MACRO_END
6206 
6207 	/* "Lock" refers to the object not its contents */
6208 	/* "size" should be identical */
6209 	assert(object1->vo_size == object2->vo_size);
6210 	/* "memq_hint" was updated above when transposing pages */
6211 	/* "ref_count" refers to the object not its contents */
6212 	assert(object1->ref_count >= 1);
6213 	assert(object2->ref_count >= 1);
6214 	/* "resident_page_count" was updated above when transposing pages */
6215 	/* "wired_page_count" was updated above when transposing pages */
6216 #if !VM_TAG_ACTIVE_UPDATE
6217 	/* "wired_objq" was dealt with along with "wired_page_count" */
6218 #endif /* ! VM_TAG_ACTIVE_UPDATE */
6219 	/* "reusable_page_count" was updated above when transposing pages */
6220 	/* there should be no "copy" */
6221 	assert(!object1->vo_copy);
6222 	assert(!object2->vo_copy);
6223 	/* there should be no "shadow" */
6224 	assert(!object1->shadow);
6225 	assert(!object2->shadow);
6226 	__TRANSPOSE_FIELD(vo_shadow_offset); /* used by phys_contiguous objects */
6227 	__TRANSPOSE_FIELD(pager);
6228 	__TRANSPOSE_FIELD(paging_offset);
6229 	__TRANSPOSE_FIELD(pager_control);
6230 	/* update the memory_objects' pointers back to the VM objects */
6231 	if (object1->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
6232 		memory_object_control_collapse(&object1->pager_control,
6233 		    object1);
6234 	}
6235 	if (object2->pager_control != MEMORY_OBJECT_CONTROL_NULL) {
6236 		memory_object_control_collapse(&object2->pager_control,
6237 		    object2);
6238 	}
6239 	__TRANSPOSE_FIELD(copy_strategy);
6240 	/* "paging_in_progress" refers to the object not its contents */
6241 	assert(!object1->paging_in_progress);
6242 	assert(!object2->paging_in_progress);
6243 	assert(object1->activity_in_progress);
6244 	assert(object2->activity_in_progress);
6245 	/* "all_wanted" refers to the object not its contents */
6246 	__TRANSPOSE_FIELD(pager_created);
6247 	__TRANSPOSE_FIELD(pager_initialized);
6248 	__TRANSPOSE_FIELD(pager_ready);
6249 	__TRANSPOSE_FIELD(pager_trusted);
6250 	__TRANSPOSE_FIELD(can_persist);
6251 	__TRANSPOSE_FIELD(internal);
6252 	__TRANSPOSE_FIELD(private);
6253 	__TRANSPOSE_FIELD(pageout);
6254 	/* "alive" should be set */
6255 	assert(object1->alive);
6256 	assert(object2->alive);
6257 	/* "purgeable" should be non-purgeable */
6258 	assert(object1->purgable == VM_PURGABLE_DENY);
6259 	assert(object2->purgable == VM_PURGABLE_DENY);
6260 	/* "shadowed" refers to the the object not its contents */
6261 	__TRANSPOSE_FIELD(purgeable_when_ripe);
6262 	__TRANSPOSE_FIELD(true_share);
6263 	/* "terminating" should not be set */
6264 	assert(!object1->terminating);
6265 	assert(!object2->terminating);
6266 	/* transfer "named" reference if needed */
6267 	if (object1->named && !object2->named) {
6268 		assert(object1->ref_count >= 2);
6269 		assert(object2->ref_count >= 1);
6270 		object1->ref_count--;
6271 		object2->ref_count++;
6272 	} else if (!object1->named && object2->named) {
6273 		assert(object1->ref_count >= 1);
6274 		assert(object2->ref_count >= 2);
6275 		object1->ref_count++;
6276 		object2->ref_count--;
6277 	}
6278 	__TRANSPOSE_FIELD(named);
6279 	/* "shadow_severed" refers to the object not its contents */
6280 	__TRANSPOSE_FIELD(phys_contiguous);
6281 	__TRANSPOSE_FIELD(nophyscache);
6282 	/* "cached_list.next" points to transposed object */
6283 	object1->cached_list.next = (queue_entry_t) object2;
6284 	object2->cached_list.next = (queue_entry_t) object1;
6285 	/* "cached_list.prev" should be NULL */
6286 	assert(object1->cached_list.prev == NULL);
6287 	assert(object2->cached_list.prev == NULL);
6288 	__TRANSPOSE_FIELD(last_alloc);
6289 	__TRANSPOSE_FIELD(sequential);
6290 	__TRANSPOSE_FIELD(pages_created);
6291 	__TRANSPOSE_FIELD(pages_used);
6292 	__TRANSPOSE_FIELD(scan_collisions);
6293 	__TRANSPOSE_FIELD(cow_hint);
6294 	__TRANSPOSE_FIELD(wimg_bits);
6295 	__TRANSPOSE_FIELD(set_cache_attr);
6296 	__TRANSPOSE_FIELD(code_signed);
6297 	object1->transposed = TRUE;
6298 	object2->transposed = TRUE;
6299 	__TRANSPOSE_FIELD(mapping_in_progress);
6300 	__TRANSPOSE_FIELD(volatile_empty);
6301 	__TRANSPOSE_FIELD(volatile_fault);
6302 	__TRANSPOSE_FIELD(all_reusable);
6303 	assert(object1->blocked_access);
6304 	assert(object2->blocked_access);
6305 	__TRANSPOSE_FIELD(set_cache_attr);
6306 	assert(!object1->object_is_shared_cache);
6307 	assert(!object2->object_is_shared_cache);
6308 	/* ignore purgeable_queue_type and purgeable_queue_group */
6309 	assert(!object1->io_tracking);
6310 	assert(!object2->io_tracking);
6311 #if VM_OBJECT_ACCESS_TRACKING
6312 	assert(!object1->access_tracking);
6313 	assert(!object2->access_tracking);
6314 #endif /* VM_OBJECT_ACCESS_TRACKING */
6315 	__TRANSPOSE_FIELD(no_tag_update);
6316 #if CONFIG_SECLUDED_MEMORY
6317 	assert(!object1->eligible_for_secluded);
6318 	assert(!object2->eligible_for_secluded);
6319 	assert(!object1->can_grab_secluded);
6320 	assert(!object2->can_grab_secluded);
6321 #else /* CONFIG_SECLUDED_MEMORY */
6322 	assert(object1->__object3_unused_bits == 0);
6323 	assert(object2->__object3_unused_bits == 0);
6324 #endif /* CONFIG_SECLUDED_MEMORY */
6325 #if UPL_DEBUG
6326 	/* "uplq" refers to the object not its contents (see upl_transpose()) */
6327 #endif
6328 	assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.next == NULL));
6329 	assert((object1->purgable == VM_PURGABLE_DENY) || (object1->objq.prev == NULL));
6330 	assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.next == NULL));
6331 	assert((object2->purgable == VM_PURGABLE_DENY) || (object2->objq.prev == NULL));
6332 
6333 #undef __TRANSPOSE_FIELD
6334 
6335 	retval = KERN_SUCCESS;
6336 
6337 done:
6338 	/*
6339 	 * Cleanup.
6340 	 */
6341 	if (tmp_object != VM_OBJECT_NULL) {
6342 		vm_object_unlock(tmp_object);
6343 		/*
6344 		 * Re-initialize the temporary object to avoid
6345 		 * deallocating a real pager.
6346 		 */
6347 		_vm_object_allocate(transpose_size, tmp_object);
6348 		vm_object_deallocate(tmp_object);
6349 		tmp_object = VM_OBJECT_NULL;
6350 	}
6351 
6352 	if (object1_locked) {
6353 		vm_object_unlock(object1);
6354 		object1_locked = FALSE;
6355 	}
6356 	if (object2_locked) {
6357 		vm_object_unlock(object2);
6358 		object2_locked = FALSE;
6359 	}
6360 
6361 	vm_object_transpose_count++;
6362 
6363 	return retval;
6364 }
6365 
6366 
6367 /*
6368  *      vm_object_cluster_size
6369  *
6370  *      Determine how big a cluster we should issue an I/O for...
6371  *
6372  *	Inputs:   *start == offset of page needed
6373  *		  *length == maximum cluster pager can handle
6374  *	Outputs:  *start == beginning offset of cluster
6375  *		  *length == length of cluster to try
6376  *
6377  *	The original *start will be encompassed by the cluster
6378  *
6379  */
6380 extern int speculative_reads_disabled;
6381 
6382 /*
6383  * Try to always keep these values an even multiple of PAGE_SIZE. We use these values
6384  * to derive min_ph_bytes and max_ph_bytes (IMP: bytes not # of pages) and expect those values to
6385  * always be page-aligned. The derivation could involve operations (e.g. division)
6386  * that could give us non-page-size aligned values if we start out with values that
6387  * are odd multiples of PAGE_SIZE.
6388  */
6389 #if !XNU_TARGET_OS_OSX
6390 unsigned int preheat_max_bytes = (1024 * 512);
6391 #else /* !XNU_TARGET_OS_OSX */
6392 unsigned int preheat_max_bytes = MAX_UPL_TRANSFER_BYTES;
6393 #endif /* !XNU_TARGET_OS_OSX */
6394 unsigned int preheat_min_bytes = (1024 * 32);
6395 
6396 
6397 __private_extern__ void
6398 vm_object_cluster_size(vm_object_t object, vm_object_offset_t *start,
6399     vm_size_t *length, vm_object_fault_info_t fault_info, uint32_t *io_streaming)
6400 {
6401 	vm_size_t               pre_heat_size;
6402 	vm_size_t               tail_size;
6403 	vm_size_t               head_size;
6404 	vm_size_t               max_length;
6405 	vm_size_t               cluster_size;
6406 	vm_object_offset_t      object_size;
6407 	vm_object_offset_t      orig_start;
6408 	vm_object_offset_t      target_start;
6409 	vm_object_offset_t      offset;
6410 	vm_behavior_t           behavior;
6411 	boolean_t               look_behind = TRUE;
6412 	boolean_t               look_ahead  = TRUE;
6413 	boolean_t               isSSD = FALSE;
6414 	uint32_t                throttle_limit;
6415 	int                     sequential_run;
6416 	int                     sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
6417 	vm_size_t               max_ph_size;
6418 	vm_size_t               min_ph_size;
6419 
6420 	assert( !(*length & PAGE_MASK));
6421 	assert( !(*start & PAGE_MASK_64));
6422 
6423 	/*
6424 	 * remember maxiumum length of run requested
6425 	 */
6426 	max_length = *length;
6427 	/*
6428 	 * we'll always return a cluster size of at least
6429 	 * 1 page, since the original fault must always
6430 	 * be processed
6431 	 */
6432 	*length = PAGE_SIZE;
6433 	*io_streaming = 0;
6434 
6435 	if (speculative_reads_disabled || fault_info == NULL) {
6436 		/*
6437 		 * no cluster... just fault the page in
6438 		 */
6439 		return;
6440 	}
6441 	orig_start = *start;
6442 	target_start = orig_start;
6443 	cluster_size = round_page(fault_info->cluster_size);
6444 	behavior = fault_info->behavior;
6445 
6446 	vm_object_lock(object);
6447 
6448 	if (object->pager == MEMORY_OBJECT_NULL) {
6449 		goto out;       /* pager is gone for this object, nothing more to do */
6450 	}
6451 	vnode_pager_get_isSSD(object->pager, &isSSD);
6452 
6453 	min_ph_size = round_page(preheat_min_bytes);
6454 	max_ph_size = round_page(preheat_max_bytes);
6455 
6456 #if XNU_TARGET_OS_OSX
6457 	if (isSSD) {
6458 		min_ph_size /= 2;
6459 		max_ph_size /= 8;
6460 
6461 		if (min_ph_size & PAGE_MASK_64) {
6462 			min_ph_size = trunc_page(min_ph_size);
6463 		}
6464 
6465 		if (max_ph_size & PAGE_MASK_64) {
6466 			max_ph_size = trunc_page(max_ph_size);
6467 		}
6468 	}
6469 #endif /* XNU_TARGET_OS_OSX */
6470 
6471 	if (min_ph_size < PAGE_SIZE) {
6472 		min_ph_size = PAGE_SIZE;
6473 	}
6474 
6475 	if (max_ph_size < PAGE_SIZE) {
6476 		max_ph_size = PAGE_SIZE;
6477 	} else if (max_ph_size > MAX_UPL_TRANSFER_BYTES) {
6478 		max_ph_size = MAX_UPL_TRANSFER_BYTES;
6479 	}
6480 
6481 	if (max_length > max_ph_size) {
6482 		max_length = max_ph_size;
6483 	}
6484 
6485 	if (max_length <= PAGE_SIZE) {
6486 		goto out;
6487 	}
6488 
6489 	if (object->internal) {
6490 		object_size = object->vo_size;
6491 	} else {
6492 		vnode_pager_get_object_size(object->pager, &object_size);
6493 	}
6494 
6495 	object_size = round_page_64(object_size);
6496 
6497 	if (orig_start >= object_size) {
6498 		/*
6499 		 * fault occurred beyond the EOF...
6500 		 * we need to punt w/o changing the
6501 		 * starting offset
6502 		 */
6503 		goto out;
6504 	}
6505 	if (object->pages_used > object->pages_created) {
6506 		/*
6507 		 * must have wrapped our 32 bit counters
6508 		 * so reset
6509 		 */
6510 		object->pages_used = object->pages_created = 0;
6511 	}
6512 	if ((sequential_run = object->sequential)) {
6513 		if (sequential_run < 0) {
6514 			sequential_behavior = VM_BEHAVIOR_RSEQNTL;
6515 			sequential_run = 0 - sequential_run;
6516 		} else {
6517 			sequential_behavior = VM_BEHAVIOR_SEQUENTIAL;
6518 		}
6519 	}
6520 	switch (behavior) {
6521 	default:
6522 		behavior = VM_BEHAVIOR_DEFAULT;
6523 		OS_FALLTHROUGH;
6524 
6525 	case VM_BEHAVIOR_DEFAULT:
6526 		if (object->internal && fault_info->user_tag == VM_MEMORY_STACK) {
6527 			goto out;
6528 		}
6529 
6530 		if (sequential_run >= (3 * PAGE_SIZE)) {
6531 			pre_heat_size = sequential_run + PAGE_SIZE;
6532 
6533 			if (sequential_behavior == VM_BEHAVIOR_SEQUENTIAL) {
6534 				look_behind = FALSE;
6535 			} else {
6536 				look_ahead = FALSE;
6537 			}
6538 
6539 			*io_streaming = 1;
6540 		} else {
6541 			if (object->pages_created < (20 * (min_ph_size >> PAGE_SHIFT))) {
6542 				/*
6543 				 * prime the pump
6544 				 */
6545 				pre_heat_size = min_ph_size;
6546 			} else {
6547 				/*
6548 				 * Linear growth in PH size: The maximum size is max_length...
6549 				 * this cacluation will result in a size that is neither a
6550 				 * power of 2 nor a multiple of PAGE_SIZE... so round
6551 				 * it up to the nearest PAGE_SIZE boundary
6552 				 */
6553 				pre_heat_size = (max_length * (uint64_t)object->pages_used) / object->pages_created;
6554 
6555 				if (pre_heat_size < min_ph_size) {
6556 					pre_heat_size = min_ph_size;
6557 				} else {
6558 					pre_heat_size = round_page(pre_heat_size);
6559 				}
6560 			}
6561 		}
6562 		break;
6563 
6564 	case VM_BEHAVIOR_RANDOM:
6565 		if ((pre_heat_size = cluster_size) <= PAGE_SIZE) {
6566 			goto out;
6567 		}
6568 		break;
6569 
6570 	case VM_BEHAVIOR_SEQUENTIAL:
6571 		if ((pre_heat_size = cluster_size) == 0) {
6572 			pre_heat_size = sequential_run + PAGE_SIZE;
6573 		}
6574 		look_behind = FALSE;
6575 		*io_streaming = 1;
6576 
6577 		break;
6578 
6579 	case VM_BEHAVIOR_RSEQNTL:
6580 		if ((pre_heat_size = cluster_size) == 0) {
6581 			pre_heat_size = sequential_run + PAGE_SIZE;
6582 		}
6583 		look_ahead = FALSE;
6584 		*io_streaming = 1;
6585 
6586 		break;
6587 	}
6588 	throttle_limit = (uint32_t) max_length;
6589 	assert(throttle_limit == max_length);
6590 
6591 	if (vnode_pager_get_throttle_io_limit(object->pager, &throttle_limit) == KERN_SUCCESS) {
6592 		if (max_length > throttle_limit) {
6593 			max_length = throttle_limit;
6594 		}
6595 	}
6596 	if (pre_heat_size > max_length) {
6597 		pre_heat_size = max_length;
6598 	}
6599 
6600 	if (behavior == VM_BEHAVIOR_DEFAULT && (pre_heat_size > min_ph_size)) {
6601 		unsigned int consider_free = vm_page_free_count + vm_page_cleaned_count;
6602 
6603 		if (consider_free < vm_page_throttle_limit) {
6604 			pre_heat_size = trunc_page(pre_heat_size / 16);
6605 		} else if (consider_free < vm_page_free_target) {
6606 			pre_heat_size = trunc_page(pre_heat_size / 4);
6607 		}
6608 
6609 		if (pre_heat_size < min_ph_size) {
6610 			pre_heat_size = min_ph_size;
6611 		}
6612 	}
6613 	if (look_ahead == TRUE) {
6614 		if (look_behind == TRUE) {
6615 			/*
6616 			 * if we get here its due to a random access...
6617 			 * so we want to center the original fault address
6618 			 * within the cluster we will issue... make sure
6619 			 * to calculate 'head_size' as a multiple of PAGE_SIZE...
6620 			 * 'pre_heat_size' is a multiple of PAGE_SIZE but not
6621 			 * necessarily an even number of pages so we need to truncate
6622 			 * the result to a PAGE_SIZE boundary
6623 			 */
6624 			head_size = trunc_page(pre_heat_size / 2);
6625 
6626 			if (target_start > head_size) {
6627 				target_start -= head_size;
6628 			} else {
6629 				target_start = 0;
6630 			}
6631 
6632 			/*
6633 			 * 'target_start' at this point represents the beginning offset
6634 			 * of the cluster we are considering... 'orig_start' will be in
6635 			 * the center of this cluster if we didn't have to clip the start
6636 			 * due to running into the start of the file
6637 			 */
6638 		}
6639 		if ((target_start + pre_heat_size) > object_size) {
6640 			pre_heat_size = (vm_size_t)(round_page_64(object_size - target_start));
6641 		}
6642 		/*
6643 		 * at this point caclulate the number of pages beyond the original fault
6644 		 * address that we want to consider... this is guaranteed not to extend beyond
6645 		 * the current EOF...
6646 		 */
6647 		assert((vm_size_t)(orig_start - target_start) == (orig_start - target_start));
6648 		tail_size = pre_heat_size - (vm_size_t)(orig_start - target_start) - PAGE_SIZE;
6649 	} else {
6650 		if (pre_heat_size > target_start) {
6651 			/*
6652 			 * since pre_heat_size is always smaller then 2^32,
6653 			 * if it is larger then target_start (a 64 bit value)
6654 			 * it is safe to clip target_start to 32 bits
6655 			 */
6656 			pre_heat_size = (vm_size_t) target_start;
6657 		}
6658 		tail_size = 0;
6659 	}
6660 	assert( !(target_start & PAGE_MASK_64));
6661 	assert( !(pre_heat_size & PAGE_MASK_64));
6662 
6663 	if (pre_heat_size <= PAGE_SIZE) {
6664 		goto out;
6665 	}
6666 
6667 	if (look_behind == TRUE) {
6668 		/*
6669 		 * take a look at the pages before the original
6670 		 * faulting offset... recalculate this in case
6671 		 * we had to clip 'pre_heat_size' above to keep
6672 		 * from running past the EOF.
6673 		 */
6674 		head_size = pre_heat_size - tail_size - PAGE_SIZE;
6675 
6676 		for (offset = orig_start - PAGE_SIZE_64; head_size; offset -= PAGE_SIZE_64, head_size -= PAGE_SIZE) {
6677 			/*
6678 			 * don't poke below the lowest offset
6679 			 */
6680 			if (offset < fault_info->lo_offset) {
6681 				break;
6682 			}
6683 			/*
6684 			 * for external objects or internal objects w/o a pager,
6685 			 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
6686 			 */
6687 			if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
6688 				break;
6689 			}
6690 			if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
6691 				/*
6692 				 * don't bridge resident pages
6693 				 */
6694 				break;
6695 			}
6696 			*start = offset;
6697 			*length += PAGE_SIZE;
6698 		}
6699 	}
6700 	if (look_ahead == TRUE) {
6701 		for (offset = orig_start + PAGE_SIZE_64; tail_size; offset += PAGE_SIZE_64, tail_size -= PAGE_SIZE) {
6702 			/*
6703 			 * don't poke above the highest offset
6704 			 */
6705 			if (offset >= fault_info->hi_offset) {
6706 				break;
6707 			}
6708 			assert(offset < object_size);
6709 
6710 			/*
6711 			 * for external objects or internal objects w/o a pager,
6712 			 * VM_COMPRESSOR_PAGER_STATE_GET will return VM_EXTERNAL_STATE_UNKNOWN
6713 			 */
6714 			if (VM_COMPRESSOR_PAGER_STATE_GET(object, offset) == VM_EXTERNAL_STATE_ABSENT) {
6715 				break;
6716 			}
6717 			if (vm_page_lookup(object, offset) != VM_PAGE_NULL) {
6718 				/*
6719 				 * don't bridge resident pages
6720 				 */
6721 				break;
6722 			}
6723 			*length += PAGE_SIZE;
6724 		}
6725 	}
6726 out:
6727 	if (*length > max_length) {
6728 		*length = max_length;
6729 	}
6730 
6731 	vm_object_unlock(object);
6732 
6733 	DTRACE_VM1(clustersize, vm_size_t, *length);
6734 }
6735 
6736 
6737 /*
6738  * Allow manipulation of individual page state.  This is actually part of
6739  * the UPL regimen but takes place on the VM object rather than on a UPL
6740  */
6741 
6742 kern_return_t
6743 vm_object_page_op(
6744 	vm_object_t             object,
6745 	vm_object_offset_t      offset,
6746 	int                     ops,
6747 	ppnum_t                 *phys_entry,
6748 	int                     *flags)
6749 {
6750 	vm_page_t               dst_page;
6751 
6752 	vm_object_lock(object);
6753 
6754 	if (ops & UPL_POP_PHYSICAL) {
6755 		if (object->phys_contiguous) {
6756 			if (phys_entry) {
6757 				*phys_entry = (ppnum_t)
6758 				    (object->vo_shadow_offset >> PAGE_SHIFT);
6759 			}
6760 			vm_object_unlock(object);
6761 			return KERN_SUCCESS;
6762 		} else {
6763 			vm_object_unlock(object);
6764 			return KERN_INVALID_OBJECT;
6765 		}
6766 	}
6767 	if (object->phys_contiguous) {
6768 		vm_object_unlock(object);
6769 		return KERN_INVALID_OBJECT;
6770 	}
6771 
6772 	while (TRUE) {
6773 		if ((dst_page = vm_page_lookup(object, offset)) == VM_PAGE_NULL) {
6774 			vm_object_unlock(object);
6775 			return KERN_FAILURE;
6776 		}
6777 
6778 		/* Sync up on getting the busy bit */
6779 		if ((dst_page->vmp_busy || dst_page->vmp_cleaning) &&
6780 		    (((ops & UPL_POP_SET) &&
6781 		    (ops & UPL_POP_BUSY)) || (ops & UPL_POP_DUMP))) {
6782 			/* someone else is playing with the page, we will */
6783 			/* have to wait */
6784 			PAGE_SLEEP(object, dst_page, THREAD_UNINT);
6785 			continue;
6786 		}
6787 
6788 		if (ops & UPL_POP_DUMP) {
6789 			if (dst_page->vmp_pmapped == TRUE) {
6790 				pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
6791 			}
6792 
6793 			VM_PAGE_FREE(dst_page);
6794 			break;
6795 		}
6796 
6797 		if (flags) {
6798 			*flags = 0;
6799 
6800 			/* Get the condition of flags before requested ops */
6801 			/* are undertaken */
6802 
6803 			if (dst_page->vmp_dirty) {
6804 				*flags |= UPL_POP_DIRTY;
6805 			}
6806 			if (dst_page->vmp_free_when_done) {
6807 				*flags |= UPL_POP_PAGEOUT;
6808 			}
6809 			if (dst_page->vmp_precious) {
6810 				*flags |= UPL_POP_PRECIOUS;
6811 			}
6812 			if (dst_page->vmp_absent) {
6813 				*flags |= UPL_POP_ABSENT;
6814 			}
6815 			if (dst_page->vmp_busy) {
6816 				*flags |= UPL_POP_BUSY;
6817 			}
6818 		}
6819 
6820 		/* The caller should have made a call either contingent with */
6821 		/* or prior to this call to set UPL_POP_BUSY */
6822 		if (ops & UPL_POP_SET) {
6823 			/* The protection granted with this assert will */
6824 			/* not be complete.  If the caller violates the */
6825 			/* convention and attempts to change page state */
6826 			/* without first setting busy we may not see it */
6827 			/* because the page may already be busy.  However */
6828 			/* if such violations occur we will assert sooner */
6829 			/* or later. */
6830 			assert(dst_page->vmp_busy || (ops & UPL_POP_BUSY));
6831 			if (ops & UPL_POP_DIRTY) {
6832 				SET_PAGE_DIRTY(dst_page, FALSE);
6833 			}
6834 			if (ops & UPL_POP_PAGEOUT) {
6835 				dst_page->vmp_free_when_done = TRUE;
6836 			}
6837 			if (ops & UPL_POP_PRECIOUS) {
6838 				dst_page->vmp_precious = TRUE;
6839 			}
6840 			if (ops & UPL_POP_ABSENT) {
6841 				dst_page->vmp_absent = TRUE;
6842 			}
6843 			if (ops & UPL_POP_BUSY) {
6844 				dst_page->vmp_busy = TRUE;
6845 			}
6846 		}
6847 
6848 		if (ops & UPL_POP_CLR) {
6849 			assert(dst_page->vmp_busy);
6850 			if (ops & UPL_POP_DIRTY) {
6851 				dst_page->vmp_dirty = FALSE;
6852 			}
6853 			if (ops & UPL_POP_PAGEOUT) {
6854 				dst_page->vmp_free_when_done = FALSE;
6855 			}
6856 			if (ops & UPL_POP_PRECIOUS) {
6857 				dst_page->vmp_precious = FALSE;
6858 			}
6859 			if (ops & UPL_POP_ABSENT) {
6860 				dst_page->vmp_absent = FALSE;
6861 			}
6862 			if (ops & UPL_POP_BUSY) {
6863 				dst_page->vmp_busy = FALSE;
6864 				PAGE_WAKEUP(dst_page);
6865 			}
6866 		}
6867 		if (phys_entry) {
6868 			/*
6869 			 * The physical page number will remain valid
6870 			 * only if the page is kept busy.
6871 			 */
6872 			assert(dst_page->vmp_busy);
6873 			*phys_entry = VM_PAGE_GET_PHYS_PAGE(dst_page);
6874 		}
6875 
6876 		break;
6877 	}
6878 
6879 	vm_object_unlock(object);
6880 	return KERN_SUCCESS;
6881 }
6882 
6883 /*
6884  * vm_object_range_op offers performance enhancement over
6885  * vm_object_page_op for page_op functions which do not require page
6886  * level state to be returned from the call.  Page_op was created to provide
6887  * a low-cost alternative to page manipulation via UPLs when only a single
6888  * page was involved.  The range_op call establishes the ability in the _op
6889  * family of functions to work on multiple pages where the lack of page level
6890  * state handling allows the caller to avoid the overhead of the upl structures.
6891  */
6892 
6893 kern_return_t
6894 vm_object_range_op(
6895 	vm_object_t             object,
6896 	vm_object_offset_t      offset_beg,
6897 	vm_object_offset_t      offset_end,
6898 	int                     ops,
6899 	uint32_t                *range)
6900 {
6901 	vm_object_offset_t      offset;
6902 	vm_page_t               dst_page;
6903 
6904 	if (offset_end - offset_beg > (uint32_t) -1) {
6905 		/* range is too big and would overflow "*range" */
6906 		return KERN_INVALID_ARGUMENT;
6907 	}
6908 	if (object->resident_page_count == 0) {
6909 		if (range) {
6910 			if (ops & UPL_ROP_PRESENT) {
6911 				*range = 0;
6912 			} else {
6913 				*range = (uint32_t) (offset_end - offset_beg);
6914 				assert(*range == (offset_end - offset_beg));
6915 			}
6916 		}
6917 		return KERN_SUCCESS;
6918 	}
6919 	vm_object_lock(object);
6920 
6921 	if (object->phys_contiguous) {
6922 		vm_object_unlock(object);
6923 		return KERN_INVALID_OBJECT;
6924 	}
6925 
6926 	offset = offset_beg & ~PAGE_MASK_64;
6927 
6928 	while (offset < offset_end) {
6929 		dst_page = vm_page_lookup(object, offset);
6930 		if (dst_page != VM_PAGE_NULL) {
6931 			if (ops & UPL_ROP_DUMP) {
6932 				if (dst_page->vmp_busy || dst_page->vmp_cleaning) {
6933 					/*
6934 					 * someone else is playing with the
6935 					 * page, we will have to wait
6936 					 */
6937 					PAGE_SLEEP(object, dst_page, THREAD_UNINT);
6938 					/*
6939 					 * need to relook the page up since it's
6940 					 * state may have changed while we slept
6941 					 * it might even belong to a different object
6942 					 * at this point
6943 					 */
6944 					continue;
6945 				}
6946 				if (dst_page->vmp_laundry) {
6947 					vm_pageout_steal_laundry(dst_page, FALSE);
6948 				}
6949 
6950 				if (dst_page->vmp_pmapped == TRUE) {
6951 					pmap_disconnect(VM_PAGE_GET_PHYS_PAGE(dst_page));
6952 				}
6953 
6954 				VM_PAGE_FREE(dst_page);
6955 			} else if ((ops & UPL_ROP_ABSENT)
6956 			    && (!dst_page->vmp_absent || dst_page->vmp_busy)) {
6957 				break;
6958 			}
6959 		} else if (ops & UPL_ROP_PRESENT) {
6960 			break;
6961 		}
6962 
6963 		offset += PAGE_SIZE;
6964 	}
6965 	vm_object_unlock(object);
6966 
6967 	if (range) {
6968 		if (offset > offset_end) {
6969 			offset = offset_end;
6970 		}
6971 		if (offset > offset_beg) {
6972 			*range = (uint32_t) (offset - offset_beg);
6973 			assert(*range == (offset - offset_beg));
6974 		} else {
6975 			*range = 0;
6976 		}
6977 	}
6978 	return KERN_SUCCESS;
6979 }
6980 
6981 /*
6982  * Used to point a pager directly to a range of memory (when the pager may be associated
6983  *   with a non-device vnode).  Takes a virtual address, an offset, and a size.  We currently
6984  *   expect that the virtual address will denote the start of a range that is physically contiguous.
6985  */
6986 kern_return_t
6987 pager_map_to_phys_contiguous(
6988 	memory_object_control_t object,
6989 	memory_object_offset_t  offset,
6990 	addr64_t                base_vaddr,
6991 	vm_size_t               size)
6992 {
6993 	ppnum_t page_num;
6994 	boolean_t clobbered_private;
6995 	kern_return_t retval;
6996 	vm_object_t pager_object;
6997 
6998 	page_num = pmap_find_phys(kernel_pmap, base_vaddr);
6999 
7000 	if (!page_num) {
7001 		retval = KERN_FAILURE;
7002 		goto out;
7003 	}
7004 
7005 	pager_object = memory_object_control_to_vm_object(object);
7006 
7007 	if (!pager_object) {
7008 		retval = KERN_FAILURE;
7009 		goto out;
7010 	}
7011 
7012 	clobbered_private = pager_object->private;
7013 	if (pager_object->private != TRUE) {
7014 		vm_object_lock(pager_object);
7015 		pager_object->private = TRUE;
7016 		vm_object_unlock(pager_object);
7017 	}
7018 	retval = vm_object_populate_with_private(pager_object, offset, page_num, size);
7019 
7020 	if (retval != KERN_SUCCESS) {
7021 		if (pager_object->private != clobbered_private) {
7022 			vm_object_lock(pager_object);
7023 			pager_object->private = clobbered_private;
7024 			vm_object_unlock(pager_object);
7025 		}
7026 	}
7027 
7028 out:
7029 	return retval;
7030 }
7031 
7032 uint32_t scan_object_collision = 0;
7033 
7034 void
7035 vm_object_lock(vm_object_t object)
7036 {
7037 	if (object == vm_pageout_scan_wants_object) {
7038 		scan_object_collision++;
7039 		mutex_pause(2);
7040 	}
7041 	DTRACE_VM(vm_object_lock_w);
7042 	lck_rw_lock_exclusive(&object->Lock);
7043 }
7044 
7045 boolean_t
7046 vm_object_lock_avoid(vm_object_t object)
7047 {
7048 	if (object == vm_pageout_scan_wants_object) {
7049 		scan_object_collision++;
7050 		return TRUE;
7051 	}
7052 	return FALSE;
7053 }
7054 
7055 boolean_t
7056 _vm_object_lock_try(vm_object_t object)
7057 {
7058 	boolean_t       retval;
7059 
7060 	retval = lck_rw_try_lock_exclusive(&object->Lock);
7061 #if DEVELOPMENT || DEBUG
7062 	if (retval == TRUE) {
7063 		DTRACE_VM(vm_object_lock_w);
7064 	}
7065 #endif
7066 	return retval;
7067 }
7068 
7069 boolean_t
7070 vm_object_lock_try(vm_object_t object)
7071 {
7072 	/*
7073 	 * Called from hibernate path so check before blocking.
7074 	 */
7075 	if (vm_object_lock_avoid(object) && ml_get_interrupts_enabled() && get_preemption_level() == 0) {
7076 		mutex_pause(2);
7077 	}
7078 	return _vm_object_lock_try(object);
7079 }
7080 
7081 /*
7082  * Lock the object exclusive.
7083  *
7084  * Returns true iff the thread had to spin or block before
7085  * acquiring the lock.
7086  */
7087 bool
7088 vm_object_lock_check_contended(vm_object_t object)
7089 {
7090 	if (object == vm_pageout_scan_wants_object) {
7091 		scan_object_collision++;
7092 		mutex_pause(2);
7093 	}
7094 	DTRACE_VM(vm_object_lock_w);
7095 	return lck_rw_lock_exclusive_check_contended(&object->Lock);
7096 }
7097 
7098 void
7099 vm_object_lock_shared(vm_object_t object)
7100 {
7101 	if (vm_object_lock_avoid(object)) {
7102 		mutex_pause(2);
7103 	}
7104 	DTRACE_VM(vm_object_lock_r);
7105 	lck_rw_lock_shared(&object->Lock);
7106 }
7107 
7108 boolean_t
7109 vm_object_lock_yield_shared(vm_object_t object)
7110 {
7111 	boolean_t retval = FALSE, force_yield = FALSE;
7112 
7113 	vm_object_lock_assert_shared(object);
7114 
7115 	force_yield = vm_object_lock_avoid(object);
7116 
7117 	retval = lck_rw_lock_yield_shared(&object->Lock, force_yield);
7118 	if (retval) {
7119 		DTRACE_VM(vm_object_lock_yield);
7120 	}
7121 
7122 	return retval;
7123 }
7124 
7125 boolean_t
7126 vm_object_lock_try_shared(vm_object_t object)
7127 {
7128 	boolean_t retval;
7129 
7130 	if (vm_object_lock_avoid(object)) {
7131 		mutex_pause(2);
7132 	}
7133 	retval = lck_rw_try_lock_shared(&object->Lock);
7134 	if (retval) {
7135 		DTRACE_VM(vm_object_lock_r);
7136 	}
7137 	return retval;
7138 }
7139 
7140 boolean_t
7141 vm_object_lock_upgrade(vm_object_t object)
7142 {
7143 	boolean_t       retval;
7144 
7145 	retval = lck_rw_lock_shared_to_exclusive(&object->Lock);
7146 #if DEVELOPMENT || DEBUG
7147 	if (retval == TRUE) {
7148 		DTRACE_VM(vm_object_lock_w);
7149 	}
7150 #endif
7151 	return retval;
7152 }
7153 
7154 void
7155 vm_object_unlock(vm_object_t object)
7156 {
7157 #if DEVELOPMENT || DEBUG
7158 	DTRACE_VM(vm_object_unlock);
7159 #endif
7160 	lck_rw_done(&object->Lock);
7161 }
7162 
7163 
7164 unsigned int vm_object_change_wimg_mode_count = 0;
7165 
7166 /*
7167  * The object must be locked
7168  */
7169 void
7170 vm_object_change_wimg_mode(vm_object_t object, unsigned int wimg_mode)
7171 {
7172 	vm_page_t p;
7173 
7174 	vm_object_lock_assert_exclusive(object);
7175 
7176 	vm_object_paging_only_wait(object, THREAD_UNINT);
7177 
7178 	vm_page_queue_iterate(&object->memq, p, vmp_listq) {
7179 		if (!p->vmp_fictitious) {
7180 			pmap_set_cache_attributes(VM_PAGE_GET_PHYS_PAGE(p), wimg_mode);
7181 		}
7182 	}
7183 	if (wimg_mode == VM_WIMG_USE_DEFAULT) {
7184 		object->set_cache_attr = FALSE;
7185 	} else {
7186 		object->set_cache_attr = TRUE;
7187 	}
7188 
7189 	object->wimg_bits = wimg_mode;
7190 
7191 	vm_object_change_wimg_mode_count++;
7192 }
7193 
7194 #if CONFIG_FREEZE
7195 
7196 extern struct freezer_context   freezer_context_global;
7197 
7198 /*
7199  * This routine does the "relocation" of previously
7200  * compressed pages belonging to this object that are
7201  * residing in a number of compressed segments into
7202  * a set of compressed segments dedicated to hold
7203  * compressed pages belonging to this object.
7204  */
7205 
7206 extern AbsoluteTime c_freezer_last_yield_ts;
7207 
7208 #define MAX_FREE_BATCH  32
7209 #define FREEZER_DUTY_CYCLE_ON_MS        5
7210 #define FREEZER_DUTY_CYCLE_OFF_MS       5
7211 
7212 static int c_freezer_should_yield(void);
7213 
7214 
7215 static int
7216 c_freezer_should_yield()
7217 {
7218 	AbsoluteTime    cur_time;
7219 	uint64_t        nsecs;
7220 
7221 	assert(c_freezer_last_yield_ts);
7222 	clock_get_uptime(&cur_time);
7223 
7224 	SUB_ABSOLUTETIME(&cur_time, &c_freezer_last_yield_ts);
7225 	absolutetime_to_nanoseconds(cur_time, &nsecs);
7226 
7227 	if (nsecs > 1000 * 1000 * FREEZER_DUTY_CYCLE_ON_MS) {
7228 		return 1;
7229 	}
7230 	return 0;
7231 }
7232 
7233 
7234 void
7235 vm_object_compressed_freezer_done()
7236 {
7237 	vm_compressor_finished_filling( &(freezer_context_global.freezer_ctx_chead));
7238 }
7239 
7240 
7241 uint32_t
7242 vm_object_compressed_freezer_pageout(
7243 	vm_object_t object, uint32_t dirty_budget)
7244 {
7245 	vm_page_t                       p;
7246 	vm_page_t                       local_freeq = NULL;
7247 	int                             local_freed = 0;
7248 	kern_return_t                   retval = KERN_SUCCESS;
7249 	int                             obj_resident_page_count_snapshot = 0;
7250 	uint32_t                        paged_out_count = 0;
7251 
7252 	assert(object != VM_OBJECT_NULL);
7253 	assert(object->internal);
7254 
7255 	vm_object_lock(object);
7256 
7257 	if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7258 		if (!object->pager_initialized) {
7259 			vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
7260 
7261 			if (!object->pager_initialized) {
7262 				vm_object_compressor_pager_create(object);
7263 			}
7264 		}
7265 
7266 		if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7267 			vm_object_unlock(object);
7268 			return paged_out_count;
7269 		}
7270 	}
7271 
7272 	/*
7273 	 * We could be freezing a shared internal object that might
7274 	 * be part of some other thread's current VM operations.
7275 	 * We skip it if there's a paging-in-progress or activity-in-progress
7276 	 * because we could be here a long time with the map lock held.
7277 	 *
7278 	 * Note: We are holding the map locked while we wait.
7279 	 * This is fine in the freezer path because the task
7280 	 * is suspended and so this latency is acceptable.
7281 	 */
7282 	if (object->paging_in_progress || object->activity_in_progress) {
7283 		vm_object_unlock(object);
7284 		return paged_out_count;
7285 	}
7286 
7287 	if (VM_CONFIG_FREEZER_SWAP_IS_ACTIVE) {
7288 		vm_object_offset_t      curr_offset = 0;
7289 
7290 		/*
7291 		 * Go through the object and make sure that any
7292 		 * previously compressed pages are relocated into
7293 		 * a compressed segment associated with our "freezer_chead".
7294 		 */
7295 		while (curr_offset < object->vo_size) {
7296 			curr_offset = vm_compressor_pager_next_compressed(object->pager, curr_offset);
7297 
7298 			if (curr_offset == (vm_object_offset_t) -1) {
7299 				break;
7300 			}
7301 
7302 			retval = vm_compressor_pager_relocate(object->pager, curr_offset, &(freezer_context_global.freezer_ctx_chead));
7303 
7304 			if (retval != KERN_SUCCESS) {
7305 				break;
7306 			}
7307 
7308 			curr_offset += PAGE_SIZE_64;
7309 		}
7310 	}
7311 
7312 	/*
7313 	 * We can't hold the object lock while heading down into the compressed pager
7314 	 * layer because we might need the kernel map lock down there to allocate new
7315 	 * compressor data structures. And if this same object is mapped in the kernel
7316 	 * and there's a fault on it, then that thread will want the object lock while
7317 	 * holding the kernel map lock.
7318 	 *
7319 	 * Since we are going to drop/grab the object lock repeatedly, we must make sure
7320 	 * we won't be stuck in an infinite loop if the same page(s) keep getting
7321 	 * decompressed. So we grab a snapshot of the number of pages in the object and
7322 	 * we won't process any more than that number of pages.
7323 	 */
7324 
7325 	obj_resident_page_count_snapshot = object->resident_page_count;
7326 
7327 	vm_object_activity_begin(object);
7328 
7329 	while ((obj_resident_page_count_snapshot--) && !vm_page_queue_empty(&object->memq) && paged_out_count < dirty_budget) {
7330 		p = (vm_page_t)vm_page_queue_first(&object->memq);
7331 
7332 		KERNEL_DEBUG(0xe0430004 | DBG_FUNC_START, object, local_freed, 0, 0, 0);
7333 
7334 		vm_page_lockspin_queues();
7335 
7336 		if (p->vmp_cleaning || p->vmp_fictitious || p->vmp_busy || p->vmp_absent || p->vmp_unusual || VMP_ERROR_GET(p) || VM_PAGE_WIRED(p)) {
7337 			vm_page_unlock_queues();
7338 
7339 			KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 1, 0, 0);
7340 
7341 			vm_page_queue_remove(&object->memq, p, vmp_listq);
7342 			vm_page_queue_enter(&object->memq, p, vmp_listq);
7343 
7344 			continue;
7345 		}
7346 
7347 		if (p->vmp_pmapped == TRUE) {
7348 			int refmod_state, pmap_flags;
7349 
7350 			if (p->vmp_dirty || p->vmp_precious) {
7351 				pmap_flags = PMAP_OPTIONS_COMPRESSOR;
7352 			} else {
7353 				pmap_flags = PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
7354 			}
7355 
7356 			refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p), pmap_flags, NULL);
7357 			if (refmod_state & VM_MEM_MODIFIED) {
7358 				SET_PAGE_DIRTY(p, FALSE);
7359 			}
7360 		}
7361 
7362 		if (p->vmp_dirty == FALSE && p->vmp_precious == FALSE) {
7363 			/*
7364 			 * Clean and non-precious page.
7365 			 */
7366 			vm_page_unlock_queues();
7367 			VM_PAGE_FREE(p);
7368 
7369 			KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 2, 0, 0);
7370 			continue;
7371 		}
7372 
7373 		if (p->vmp_laundry) {
7374 			vm_pageout_steal_laundry(p, TRUE);
7375 		}
7376 
7377 		vm_page_queues_remove(p, TRUE);
7378 
7379 		vm_page_unlock_queues();
7380 
7381 
7382 		/*
7383 		 * In case the compressor fails to compress this page, we need it at
7384 		 * the back of the object memq so that we don't keep trying to process it.
7385 		 * Make the move here while we have the object lock held.
7386 		 */
7387 
7388 		vm_page_queue_remove(&object->memq, p, vmp_listq);
7389 		vm_page_queue_enter(&object->memq, p, vmp_listq);
7390 
7391 		/*
7392 		 * Grab an activity_in_progress here for vm_pageout_compress_page() to consume.
7393 		 *
7394 		 * Mark the page busy so no one messes with it while we have the object lock dropped.
7395 		 */
7396 		p->vmp_busy = TRUE;
7397 
7398 		vm_object_activity_begin(object);
7399 
7400 		vm_object_unlock(object);
7401 
7402 		if (vm_pageout_compress_page(&(freezer_context_global.freezer_ctx_chead),
7403 		    (freezer_context_global.freezer_ctx_compressor_scratch_buf),
7404 		    p) == KERN_SUCCESS) {
7405 			/*
7406 			 * page has already been un-tabled from the object via 'vm_page_remove'
7407 			 */
7408 			p->vmp_snext = local_freeq;
7409 			local_freeq = p;
7410 			local_freed++;
7411 			paged_out_count++;
7412 
7413 			if (local_freed >= MAX_FREE_BATCH) {
7414 				OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
7415 
7416 				vm_page_free_list(local_freeq, TRUE);
7417 
7418 				local_freeq = NULL;
7419 				local_freed = 0;
7420 			}
7421 			freezer_context_global.freezer_ctx_uncompressed_pages++;
7422 		}
7423 		KERNEL_DEBUG(0xe0430004 | DBG_FUNC_END, object, local_freed, 0, 0, 0);
7424 
7425 		if (local_freed == 0 && c_freezer_should_yield()) {
7426 			thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
7427 			clock_get_uptime(&c_freezer_last_yield_ts);
7428 		}
7429 
7430 		vm_object_lock(object);
7431 	}
7432 
7433 	if (local_freeq) {
7434 		OSAddAtomic64(local_freed, &vm_pageout_vminfo.vm_pageout_compressions);
7435 
7436 		vm_page_free_list(local_freeq, TRUE);
7437 
7438 		local_freeq = NULL;
7439 		local_freed = 0;
7440 	}
7441 
7442 	vm_object_activity_end(object);
7443 
7444 	vm_object_unlock(object);
7445 
7446 	if (c_freezer_should_yield()) {
7447 		thread_yield_internal(FREEZER_DUTY_CYCLE_OFF_MS);
7448 		clock_get_uptime(&c_freezer_last_yield_ts);
7449 	}
7450 	return paged_out_count;
7451 }
7452 
7453 #endif /* CONFIG_FREEZE */
7454 
7455 
7456 void
7457 vm_object_pageout(
7458 	vm_object_t object)
7459 {
7460 	vm_page_t                       p, next;
7461 	struct  vm_pageout_queue        *iq;
7462 
7463 	if (!VM_CONFIG_COMPRESSOR_IS_PRESENT) {
7464 		return;
7465 	}
7466 
7467 	iq = &vm_pageout_queue_internal;
7468 
7469 	assert(object != VM_OBJECT_NULL );
7470 
7471 	vm_object_lock(object);
7472 
7473 	if (!object->internal ||
7474 	    object->terminating ||
7475 	    !object->alive) {
7476 		vm_object_unlock(object);
7477 		return;
7478 	}
7479 
7480 	if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7481 		if (!object->pager_initialized) {
7482 			vm_object_collapse(object, (vm_object_offset_t) 0, TRUE);
7483 
7484 			if (!object->pager_initialized) {
7485 				vm_object_compressor_pager_create(object);
7486 			}
7487 		}
7488 
7489 		if (!object->pager_initialized || object->pager == MEMORY_OBJECT_NULL) {
7490 			vm_object_unlock(object);
7491 			return;
7492 		}
7493 	}
7494 
7495 ReScan:
7496 	next = (vm_page_t)vm_page_queue_first(&object->memq);
7497 
7498 	while (!vm_page_queue_end(&object->memq, (vm_page_queue_entry_t)next)) {
7499 		p = next;
7500 		next = (vm_page_t)vm_page_queue_next(&next->vmp_listq);
7501 
7502 		assert(p->vmp_q_state != VM_PAGE_ON_FREE_Q);
7503 
7504 		if ((p->vmp_q_state == VM_PAGE_ON_THROTTLED_Q) ||
7505 		    p->vmp_cleaning ||
7506 		    p->vmp_laundry ||
7507 		    p->vmp_busy ||
7508 		    p->vmp_absent ||
7509 		    VMP_ERROR_GET(p) ||
7510 		    p->vmp_fictitious ||
7511 		    VM_PAGE_WIRED(p)) {
7512 			/*
7513 			 * Page is already being cleaned or can't be cleaned.
7514 			 */
7515 			continue;
7516 		}
7517 		if (vm_compressor_low_on_space()) {
7518 			break;
7519 		}
7520 
7521 		/* Throw to the pageout queue */
7522 
7523 		vm_page_lockspin_queues();
7524 
7525 		if (VM_PAGE_Q_THROTTLED(iq)) {
7526 			iq->pgo_draining = TRUE;
7527 
7528 			assert_wait((event_t) (&iq->pgo_laundry + 1),
7529 			    THREAD_INTERRUPTIBLE);
7530 			vm_page_unlock_queues();
7531 			vm_object_unlock(object);
7532 
7533 			thread_block(THREAD_CONTINUE_NULL);
7534 
7535 			vm_object_lock(object);
7536 			goto ReScan;
7537 		}
7538 
7539 		assert(!p->vmp_fictitious);
7540 		assert(!p->vmp_busy);
7541 		assert(!p->vmp_absent);
7542 		assert(!p->vmp_unusual);
7543 		assert(!VMP_ERROR_GET(p));      /* XXX there's a window here where we could have an ECC error! */
7544 		assert(!VM_PAGE_WIRED(p));
7545 		assert(!p->vmp_cleaning);
7546 
7547 		if (p->vmp_pmapped == TRUE) {
7548 			int refmod_state;
7549 			int pmap_options;
7550 
7551 			/*
7552 			 * Tell pmap the page should be accounted
7553 			 * for as "compressed" if it's been modified.
7554 			 */
7555 			pmap_options =
7556 			    PMAP_OPTIONS_COMPRESSOR_IFF_MODIFIED;
7557 			if (p->vmp_dirty || p->vmp_precious) {
7558 				/*
7559 				 * We already know it's been modified,
7560 				 * so tell pmap to account for it
7561 				 * as "compressed".
7562 				 */
7563 				pmap_options = PMAP_OPTIONS_COMPRESSOR;
7564 			}
7565 			refmod_state = pmap_disconnect_options(VM_PAGE_GET_PHYS_PAGE(p),
7566 			    pmap_options,
7567 			    NULL);
7568 			if (refmod_state & VM_MEM_MODIFIED) {
7569 				SET_PAGE_DIRTY(p, FALSE);
7570 			}
7571 		}
7572 
7573 		if (!p->vmp_dirty && !p->vmp_precious) {
7574 			vm_page_unlock_queues();
7575 			VM_PAGE_FREE(p);
7576 			continue;
7577 		}
7578 		vm_page_queues_remove(p, TRUE);
7579 
7580 		vm_pageout_cluster(p);
7581 
7582 		vm_page_unlock_queues();
7583 	}
7584 	vm_object_unlock(object);
7585 }
7586 
7587 
7588 #if CONFIG_IOSCHED
7589 void
7590 vm_page_request_reprioritize(vm_object_t o, uint64_t blkno, uint32_t len, int prio)
7591 {
7592 	io_reprioritize_req_t   req;
7593 	struct vnode            *devvp = NULL;
7594 
7595 	if (vnode_pager_get_object_devvp(o->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
7596 		return;
7597 	}
7598 
7599 	/*
7600 	 * Create the request for I/O reprioritization.
7601 	 * We use the noblock variant of zalloc because we're holding the object
7602 	 * lock here and we could cause a deadlock in low memory conditions.
7603 	 */
7604 	req = (io_reprioritize_req_t)zalloc_noblock(io_reprioritize_req_zone);
7605 	if (req == NULL) {
7606 		return;
7607 	}
7608 	req->blkno = blkno;
7609 	req->len = len;
7610 	req->priority = prio;
7611 	req->devvp = devvp;
7612 
7613 	/* Insert request into the reprioritization list */
7614 	IO_REPRIORITIZE_LIST_LOCK();
7615 	queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
7616 	IO_REPRIORITIZE_LIST_UNLOCK();
7617 
7618 	/* Wakeup reprioritize thread */
7619 	IO_REPRIO_THREAD_WAKEUP();
7620 
7621 	return;
7622 }
7623 
7624 void
7625 vm_decmp_upl_reprioritize(upl_t upl, int prio)
7626 {
7627 	int offset;
7628 	vm_object_t object;
7629 	io_reprioritize_req_t   req;
7630 	struct vnode            *devvp = NULL;
7631 	uint64_t                blkno;
7632 	uint32_t                len;
7633 	upl_t                   io_upl;
7634 	uint64_t                *io_upl_reprio_info;
7635 	int                     io_upl_size;
7636 
7637 	if ((upl->flags & UPL_TRACKED_BY_OBJECT) == 0 || (upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) {
7638 		return;
7639 	}
7640 
7641 	/*
7642 	 * We dont want to perform any allocations with the upl lock held since that might
7643 	 * result in a deadlock. If the system is low on memory, the pageout thread would
7644 	 * try to pageout stuff and might wait on this lock. If we are waiting for the memory to
7645 	 * be freed up by the pageout thread, it would be a deadlock.
7646 	 */
7647 
7648 
7649 	/* First step is just to get the size of the upl to find out how big the reprio info is */
7650 	if (!upl_try_lock(upl)) {
7651 		return;
7652 	}
7653 
7654 	if (upl->decmp_io_upl == NULL) {
7655 		/* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
7656 		upl_unlock(upl);
7657 		return;
7658 	}
7659 
7660 	io_upl = upl->decmp_io_upl;
7661 	assert((io_upl->flags & UPL_DECMP_REAL_IO) != 0);
7662 	assertf(page_aligned(io_upl->u_offset) && page_aligned(io_upl->u_size),
7663 	    "upl %p offset 0x%llx size 0x%x\n",
7664 	    io_upl, io_upl->u_offset, io_upl->u_size);
7665 	io_upl_size = io_upl->u_size;
7666 	upl_unlock(upl);
7667 
7668 	/* Now perform the allocation */
7669 	io_upl_reprio_info = kalloc_data(sizeof(uint64_t) * atop(io_upl_size), Z_WAITOK);
7670 	if (io_upl_reprio_info == NULL) {
7671 		return;
7672 	}
7673 
7674 	/* Now again take the lock, recheck the state and grab out the required info */
7675 	if (!upl_try_lock(upl)) {
7676 		goto out;
7677 	}
7678 
7679 	if (upl->decmp_io_upl == NULL || upl->decmp_io_upl != io_upl) {
7680 		/* The real I/O upl was destroyed by the time we came in here. Nothing to do. */
7681 		upl_unlock(upl);
7682 		goto out;
7683 	}
7684 	memcpy(io_upl_reprio_info, io_upl->upl_reprio_info,
7685 	    sizeof(uint64_t) * atop(io_upl_size));
7686 
7687 	/* Get the VM object for this UPL */
7688 	if (io_upl->flags & UPL_SHADOWED) {
7689 		object = io_upl->map_object->shadow;
7690 	} else {
7691 		object = io_upl->map_object;
7692 	}
7693 
7694 	/* Get the dev vnode ptr for this object */
7695 	if (!object || !object->pager ||
7696 	    vnode_pager_get_object_devvp(object->pager, (uintptr_t *)&devvp) != KERN_SUCCESS) {
7697 		upl_unlock(upl);
7698 		goto out;
7699 	}
7700 
7701 	upl_unlock(upl);
7702 
7703 	/* Now we have all the information needed to do the expedite */
7704 
7705 	offset = 0;
7706 	while (offset < io_upl_size) {
7707 		blkno   = io_upl_reprio_info[atop(offset)] & UPL_REPRIO_INFO_MASK;
7708 		len     = (io_upl_reprio_info[atop(offset)] >> UPL_REPRIO_INFO_SHIFT) & UPL_REPRIO_INFO_MASK;
7709 
7710 		/*
7711 		 * This implementation may cause some spurious expedites due to the
7712 		 * fact that we dont cleanup the blkno & len from the upl_reprio_info
7713 		 * even after the I/O is complete.
7714 		 */
7715 
7716 		if (blkno != 0 && len != 0) {
7717 			/* Create the request for I/O reprioritization */
7718 			req = zalloc_flags(io_reprioritize_req_zone,
7719 			    Z_WAITOK | Z_NOFAIL);
7720 			req->blkno = blkno;
7721 			req->len = len;
7722 			req->priority = prio;
7723 			req->devvp = devvp;
7724 
7725 			/* Insert request into the reprioritization list */
7726 			IO_REPRIORITIZE_LIST_LOCK();
7727 			queue_enter(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
7728 			IO_REPRIORITIZE_LIST_UNLOCK();
7729 
7730 			offset += len;
7731 		} else {
7732 			offset += PAGE_SIZE;
7733 		}
7734 	}
7735 
7736 	/* Wakeup reprioritize thread */
7737 	IO_REPRIO_THREAD_WAKEUP();
7738 
7739 out:
7740 	kfree_data(io_upl_reprio_info, sizeof(uint64_t) * atop(io_upl_size));
7741 }
7742 
7743 void
7744 vm_page_handle_prio_inversion(vm_object_t o, vm_page_t m)
7745 {
7746 	upl_t upl;
7747 	upl_page_info_t *pl;
7748 	unsigned int i, num_pages;
7749 	int cur_tier;
7750 
7751 	cur_tier = proc_get_effective_thread_policy(current_thread(), TASK_POLICY_IO);
7752 
7753 	/*
7754 	 *  Scan through all UPLs associated with the object to find the
7755 	 *  UPL containing the contended page.
7756 	 */
7757 	queue_iterate(&o->uplq, upl, upl_t, uplq) {
7758 		if (((upl->flags & UPL_EXPEDITE_SUPPORTED) == 0) || upl->upl_priority <= cur_tier) {
7759 			continue;
7760 		}
7761 		pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
7762 		assertf(page_aligned(upl->u_offset) && page_aligned(upl->u_size),
7763 		    "upl %p offset 0x%llx size 0x%x\n",
7764 		    upl, upl->u_offset, upl->u_size);
7765 		num_pages = (upl->u_size / PAGE_SIZE);
7766 
7767 		/*
7768 		 *  For each page in the UPL page list, see if it matches the contended
7769 		 *  page and was issued as a low prio I/O.
7770 		 */
7771 		for (i = 0; i < num_pages; i++) {
7772 			if (UPL_PAGE_PRESENT(pl, i) && VM_PAGE_GET_PHYS_PAGE(m) == pl[i].phys_addr) {
7773 				if ((upl->flags & UPL_DECMP_REQ) && upl->decmp_io_upl) {
7774 					KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
7775 					    VM_KERNEL_UNSLIDE_OR_PERM(upl), upl->upl_priority, 0);
7776 					vm_decmp_upl_reprioritize(upl, cur_tier);
7777 					break;
7778 				}
7779 				KERNEL_DEBUG_CONSTANT((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_EXPEDITE)) | DBG_FUNC_NONE, VM_KERNEL_UNSLIDE_OR_PERM(upl->upl_creator), VM_KERNEL_UNSLIDE_OR_PERM(m),
7780 				    upl->upl_reprio_info[i], upl->upl_priority, 0);
7781 				if (UPL_REPRIO_INFO_BLKNO(upl, i) != 0 && UPL_REPRIO_INFO_LEN(upl, i) != 0) {
7782 					vm_page_request_reprioritize(o, UPL_REPRIO_INFO_BLKNO(upl, i), UPL_REPRIO_INFO_LEN(upl, i), cur_tier);
7783 				}
7784 				break;
7785 			}
7786 		}
7787 		/* Check if we found any hits */
7788 		if (i != num_pages) {
7789 			break;
7790 		}
7791 	}
7792 
7793 	return;
7794 }
7795 
7796 wait_result_t
7797 vm_page_sleep(vm_object_t o, vm_page_t m, int interruptible)
7798 {
7799 	wait_result_t ret;
7800 
7801 	KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_START, o, m, 0, 0, 0);
7802 
7803 	if (o->io_tracking && ((m->vmp_busy == TRUE) || (m->vmp_cleaning == TRUE) || VM_PAGE_WIRED(m))) {
7804 		/*
7805 		 *  Indicates page is busy due to an I/O. Issue a reprioritize request if necessary.
7806 		 */
7807 		vm_page_handle_prio_inversion(o, m);
7808 	}
7809 	m->vmp_wanted = TRUE;
7810 	ret = thread_sleep_vm_object(o, m, interruptible);
7811 	KERNEL_DEBUG((MACHDBG_CODE(DBG_MACH_VM, VM_PAGE_SLEEP)) | DBG_FUNC_END, o, m, 0, 0, 0);
7812 	return ret;
7813 }
7814 
7815 static void
7816 io_reprioritize_thread(void *param __unused, wait_result_t wr __unused)
7817 {
7818 	io_reprioritize_req_t   req = NULL;
7819 
7820 	while (1) {
7821 		IO_REPRIORITIZE_LIST_LOCK();
7822 		if (queue_empty(&io_reprioritize_list)) {
7823 			IO_REPRIORITIZE_LIST_UNLOCK();
7824 			break;
7825 		}
7826 
7827 		queue_remove_first(&io_reprioritize_list, req, io_reprioritize_req_t, io_reprioritize_list);
7828 		IO_REPRIORITIZE_LIST_UNLOCK();
7829 
7830 		vnode_pager_issue_reprioritize_io(req->devvp, req->blkno, req->len, req->priority);
7831 		zfree(io_reprioritize_req_zone, req);
7832 	}
7833 
7834 	IO_REPRIO_THREAD_CONTINUATION();
7835 }
7836 #endif
7837 
7838 #if VM_OBJECT_ACCESS_TRACKING
7839 void
7840 vm_object_access_tracking(
7841 	vm_object_t     object,
7842 	int             *access_tracking_p,
7843 	uint32_t        *access_tracking_reads_p,
7844 	uint32_t        *access_tracking_writes_p)
7845 {
7846 	int     access_tracking;
7847 
7848 	access_tracking = !!*access_tracking_p;
7849 
7850 	vm_object_lock(object);
7851 	*access_tracking_p = object->access_tracking;
7852 	if (access_tracking_reads_p) {
7853 		*access_tracking_reads_p = object->access_tracking_reads;
7854 	}
7855 	if (access_tracking_writes_p) {
7856 		*access_tracking_writes_p = object->access_tracking_writes;
7857 	}
7858 	object->access_tracking = access_tracking;
7859 	object->access_tracking_reads = 0;
7860 	object->access_tracking_writes = 0;
7861 	vm_object_unlock(object);
7862 
7863 	if (access_tracking) {
7864 		vm_object_pmap_protect_options(object,
7865 		    0,
7866 		    object->vo_size,
7867 		    PMAP_NULL,
7868 		    PAGE_SIZE,
7869 		    0,
7870 		    VM_PROT_NONE,
7871 		    0);
7872 	}
7873 }
7874 #endif /* VM_OBJECT_ACCESS_TRACKING */
7875 
7876 void
7877 vm_object_ledger_tag_ledgers(
7878 	vm_object_t     object,
7879 	int             *ledger_idx_volatile,
7880 	int             *ledger_idx_nonvolatile,
7881 	int             *ledger_idx_volatile_compressed,
7882 	int             *ledger_idx_nonvolatile_compressed,
7883 	boolean_t       *do_footprint)
7884 {
7885 	assert(object->shadow == VM_OBJECT_NULL);
7886 
7887 	*do_footprint = !object->vo_no_footprint;
7888 
7889 	switch (object->vo_ledger_tag) {
7890 	case VM_LEDGER_TAG_NONE:
7891 		/*
7892 		 * Regular purgeable memory:
7893 		 * counts in footprint only when nonvolatile.
7894 		 */
7895 		*do_footprint = TRUE;
7896 		assert(object->purgable != VM_PURGABLE_DENY);
7897 		*ledger_idx_volatile = task_ledgers.purgeable_volatile;
7898 		*ledger_idx_nonvolatile = task_ledgers.purgeable_nonvolatile;
7899 		*ledger_idx_volatile_compressed = task_ledgers.purgeable_volatile_compressed;
7900 		*ledger_idx_nonvolatile_compressed = task_ledgers.purgeable_nonvolatile_compressed;
7901 		break;
7902 	case VM_LEDGER_TAG_DEFAULT:
7903 		/*
7904 		 * "default" tagged memory:
7905 		 * counts in footprint only when nonvolatile and not marked
7906 		 * as "no_footprint".
7907 		 */
7908 		*ledger_idx_volatile = task_ledgers.tagged_nofootprint;
7909 		*ledger_idx_volatile_compressed = task_ledgers.tagged_nofootprint_compressed;
7910 		if (*do_footprint) {
7911 			*ledger_idx_nonvolatile = task_ledgers.tagged_footprint;
7912 			*ledger_idx_nonvolatile_compressed = task_ledgers.tagged_footprint_compressed;
7913 		} else {
7914 			*ledger_idx_nonvolatile = task_ledgers.tagged_nofootprint;
7915 			*ledger_idx_nonvolatile_compressed = task_ledgers.tagged_nofootprint_compressed;
7916 		}
7917 		break;
7918 	case VM_LEDGER_TAG_NETWORK:
7919 		/*
7920 		 * "network" tagged memory:
7921 		 * never counts in footprint.
7922 		 */
7923 		*do_footprint = FALSE;
7924 		*ledger_idx_volatile = task_ledgers.network_volatile;
7925 		*ledger_idx_volatile_compressed = task_ledgers.network_volatile_compressed;
7926 		*ledger_idx_nonvolatile = task_ledgers.network_nonvolatile;
7927 		*ledger_idx_nonvolatile_compressed = task_ledgers.network_nonvolatile_compressed;
7928 		break;
7929 	case VM_LEDGER_TAG_MEDIA:
7930 		/*
7931 		 * "media" tagged memory:
7932 		 * counts in footprint only when nonvolatile and not marked
7933 		 * as "no footprint".
7934 		 */
7935 		*ledger_idx_volatile = task_ledgers.media_nofootprint;
7936 		*ledger_idx_volatile_compressed = task_ledgers.media_nofootprint_compressed;
7937 		if (*do_footprint) {
7938 			*ledger_idx_nonvolatile = task_ledgers.media_footprint;
7939 			*ledger_idx_nonvolatile_compressed = task_ledgers.media_footprint_compressed;
7940 		} else {
7941 			*ledger_idx_nonvolatile = task_ledgers.media_nofootprint;
7942 			*ledger_idx_nonvolatile_compressed = task_ledgers.media_nofootprint_compressed;
7943 		}
7944 		break;
7945 	case VM_LEDGER_TAG_GRAPHICS:
7946 		/*
7947 		 * "graphics" tagged memory:
7948 		 * counts in footprint only when nonvolatile and not marked
7949 		 * as "no footprint".
7950 		 */
7951 		*ledger_idx_volatile = task_ledgers.graphics_nofootprint;
7952 		*ledger_idx_volatile_compressed = task_ledgers.graphics_nofootprint_compressed;
7953 		if (*do_footprint) {
7954 			*ledger_idx_nonvolatile = task_ledgers.graphics_footprint;
7955 			*ledger_idx_nonvolatile_compressed = task_ledgers.graphics_footprint_compressed;
7956 		} else {
7957 			*ledger_idx_nonvolatile = task_ledgers.graphics_nofootprint;
7958 			*ledger_idx_nonvolatile_compressed = task_ledgers.graphics_nofootprint_compressed;
7959 		}
7960 		break;
7961 	case VM_LEDGER_TAG_NEURAL:
7962 		/*
7963 		 * "neural" tagged memory:
7964 		 * counts in footprint only when nonvolatile and not marked
7965 		 * as "no footprint".
7966 		 */
7967 		*ledger_idx_volatile = task_ledgers.neural_nofootprint;
7968 		*ledger_idx_volatile_compressed = task_ledgers.neural_nofootprint_compressed;
7969 		if (*do_footprint) {
7970 			*ledger_idx_nonvolatile = task_ledgers.neural_footprint;
7971 			*ledger_idx_nonvolatile_compressed = task_ledgers.neural_footprint_compressed;
7972 		} else {
7973 			*ledger_idx_nonvolatile = task_ledgers.neural_nofootprint;
7974 			*ledger_idx_nonvolatile_compressed = task_ledgers.neural_nofootprint_compressed;
7975 		}
7976 		break;
7977 	default:
7978 		panic("%s: object %p has unsupported ledger_tag %d",
7979 		    __FUNCTION__, object, object->vo_ledger_tag);
7980 	}
7981 }
7982 
7983 kern_return_t
7984 vm_object_ownership_change(
7985 	vm_object_t     object,
7986 	int             new_ledger_tag,
7987 	task_t          new_owner,
7988 	int             new_ledger_flags,
7989 	boolean_t       old_task_objq_locked)
7990 {
7991 	int             old_ledger_tag;
7992 	task_t          old_owner;
7993 	int             resident_count, wired_count;
7994 	unsigned int    compressed_count;
7995 	int             ledger_idx_volatile;
7996 	int             ledger_idx_nonvolatile;
7997 	int             ledger_idx_volatile_compressed;
7998 	int             ledger_idx_nonvolatile_compressed;
7999 	int             ledger_idx;
8000 	int             ledger_idx_compressed;
8001 	boolean_t       do_footprint, old_no_footprint, new_no_footprint;
8002 	boolean_t       new_task_objq_locked;
8003 
8004 	vm_object_lock_assert_exclusive(object);
8005 
8006 	if (!object->internal) {
8007 		return KERN_INVALID_ARGUMENT;
8008 	}
8009 	if (new_owner == VM_OBJECT_OWNER_UNCHANGED) {
8010 		/* leave owner unchanged */
8011 		new_owner = VM_OBJECT_OWNER(object);
8012 	}
8013 	if (new_ledger_tag == VM_LEDGER_TAG_UNCHANGED) {
8014 		/* leave ledger_tag unchanged */
8015 		new_ledger_tag = object->vo_ledger_tag;
8016 	}
8017 	if (new_ledger_tag == VM_LEDGER_TAG_NONE &&
8018 	    object->purgable == VM_PURGABLE_DENY) {
8019 		/* non-purgeable memory must have a valid non-zero ledger tag */
8020 		return KERN_INVALID_ARGUMENT;
8021 	}
8022 	if (new_ledger_tag < 0 ||
8023 	    new_ledger_tag > VM_LEDGER_TAG_MAX) {
8024 		return KERN_INVALID_ARGUMENT;
8025 	}
8026 	if (new_ledger_flags & ~VM_LEDGER_FLAGS) {
8027 		return KERN_INVALID_ARGUMENT;
8028 	}
8029 	if (object->vo_ledger_tag == VM_LEDGER_TAG_NONE &&
8030 	    object->purgable == VM_PURGABLE_DENY) {
8031 		/*
8032 		 * This VM object is neither ledger-tagged nor purgeable.
8033 		 * We can convert it to "ledger tag" ownership iff it
8034 		 * has not been used at all yet (no resident pages and
8035 		 * no pager) and it's going to be assigned to a valid task.
8036 		 */
8037 		if (object->resident_page_count != 0 ||
8038 		    object->pager != NULL ||
8039 		    object->pager_created ||
8040 		    object->ref_count != 1 ||
8041 		    object->vo_owner != TASK_NULL ||
8042 		    object->copy_strategy != MEMORY_OBJECT_COPY_NONE ||
8043 		    new_owner == TASK_NULL) {
8044 			return KERN_FAILURE;
8045 		}
8046 	}
8047 
8048 	if (new_ledger_flags & VM_LEDGER_FLAG_NO_FOOTPRINT) {
8049 		new_no_footprint = TRUE;
8050 	} else {
8051 		new_no_footprint = FALSE;
8052 	}
8053 #if __arm64__
8054 	if (!new_no_footprint &&
8055 	    object->purgable != VM_PURGABLE_DENY &&
8056 	    new_owner != TASK_NULL &&
8057 	    new_owner != VM_OBJECT_OWNER_DISOWNED &&
8058 	    new_owner->task_legacy_footprint) {
8059 		/*
8060 		 * This task has been granted "legacy footprint" and should
8061 		 * not be charged for its IOKit purgeable memory.  Since we
8062 		 * might now change the accounting of such memory to the
8063 		 * "graphics" ledger, for example, give it the "no footprint"
8064 		 * option.
8065 		 */
8066 		new_no_footprint = TRUE;
8067 	}
8068 #endif /* __arm64__ */
8069 	assert(object->copy_strategy == MEMORY_OBJECT_COPY_NONE);
8070 	assert(object->shadow == VM_OBJECT_NULL);
8071 	assert(object->vo_copy == VM_OBJECT_NULL);
8072 
8073 	old_ledger_tag = object->vo_ledger_tag;
8074 	old_no_footprint = object->vo_no_footprint;
8075 	old_owner = VM_OBJECT_OWNER(object);
8076 
8077 	DTRACE_VM8(object_ownership_change,
8078 	    vm_object_t, object,
8079 	    task_t, old_owner,
8080 	    int, old_ledger_tag,
8081 	    int, old_no_footprint,
8082 	    task_t, new_owner,
8083 	    int, new_ledger_tag,
8084 	    int, new_no_footprint,
8085 	    int, VM_OBJECT_ID(object));
8086 
8087 	assert(object->internal);
8088 	resident_count = object->resident_page_count - object->wired_page_count;
8089 	wired_count = object->wired_page_count;
8090 	compressed_count = vm_compressor_pager_get_count(object->pager);
8091 
8092 	/*
8093 	 * Deal with the old owner and/or ledger tag, if needed.
8094 	 */
8095 	if (old_owner != TASK_NULL &&
8096 	    ((old_owner != new_owner)           /* new owner ... */
8097 	    ||                                  /* ... or ... */
8098 	    (old_no_footprint != new_no_footprint) /* new "no_footprint" */
8099 	    ||                                  /* ... or ... */
8100 	    old_ledger_tag != new_ledger_tag)) { /* ... new ledger */
8101 		/*
8102 		 * Take this object off of the old owner's ledgers.
8103 		 */
8104 		vm_object_ledger_tag_ledgers(object,
8105 		    &ledger_idx_volatile,
8106 		    &ledger_idx_nonvolatile,
8107 		    &ledger_idx_volatile_compressed,
8108 		    &ledger_idx_nonvolatile_compressed,
8109 		    &do_footprint);
8110 		if (object->purgable == VM_PURGABLE_VOLATILE ||
8111 		    object->purgable == VM_PURGABLE_EMPTY) {
8112 			ledger_idx = ledger_idx_volatile;
8113 			ledger_idx_compressed = ledger_idx_volatile_compressed;
8114 		} else {
8115 			ledger_idx = ledger_idx_nonvolatile;
8116 			ledger_idx_compressed = ledger_idx_nonvolatile_compressed;
8117 		}
8118 		if (resident_count) {
8119 			/*
8120 			 * Adjust the appropriate old owners's ledgers by the
8121 			 * number of resident pages.
8122 			 */
8123 			ledger_debit(old_owner->ledger,
8124 			    ledger_idx,
8125 			    ptoa_64(resident_count));
8126 			/* adjust old owner's footprint */
8127 			if (do_footprint &&
8128 			    object->purgable != VM_PURGABLE_VOLATILE &&
8129 			    object->purgable != VM_PURGABLE_EMPTY) {
8130 				ledger_debit(old_owner->ledger,
8131 				    task_ledgers.phys_footprint,
8132 				    ptoa_64(resident_count));
8133 			}
8134 		}
8135 		if (wired_count) {
8136 			/* wired pages are always nonvolatile */
8137 			ledger_debit(old_owner->ledger,
8138 			    ledger_idx_nonvolatile,
8139 			    ptoa_64(wired_count));
8140 			if (do_footprint) {
8141 				ledger_debit(old_owner->ledger,
8142 				    task_ledgers.phys_footprint,
8143 				    ptoa_64(wired_count));
8144 			}
8145 		}
8146 		if (compressed_count) {
8147 			/*
8148 			 * Adjust the appropriate old owner's ledgers
8149 			 * by the number of compressed pages.
8150 			 */
8151 			ledger_debit(old_owner->ledger,
8152 			    ledger_idx_compressed,
8153 			    ptoa_64(compressed_count));
8154 			if (do_footprint &&
8155 			    object->purgable != VM_PURGABLE_VOLATILE &&
8156 			    object->purgable != VM_PURGABLE_EMPTY) {
8157 				ledger_debit(old_owner->ledger,
8158 				    task_ledgers.phys_footprint,
8159 				    ptoa_64(compressed_count));
8160 			}
8161 		}
8162 		if (old_owner != new_owner) {
8163 			/* remove object from old_owner's list of owned objects */
8164 			DTRACE_VM2(object_owner_remove,
8165 			    vm_object_t, object,
8166 			    task_t, old_owner);
8167 			if (!old_task_objq_locked) {
8168 				task_objq_lock(old_owner);
8169 			}
8170 			old_owner->task_owned_objects--;
8171 			queue_remove(&old_owner->task_objq, object,
8172 			    vm_object_t, task_objq);
8173 			switch (object->purgable) {
8174 			case VM_PURGABLE_NONVOLATILE:
8175 			case VM_PURGABLE_EMPTY:
8176 				vm_purgeable_nonvolatile_owner_update(old_owner,
8177 				    -1);
8178 				break;
8179 			case VM_PURGABLE_VOLATILE:
8180 				vm_purgeable_volatile_owner_update(old_owner,
8181 				    -1);
8182 				break;
8183 			default:
8184 				break;
8185 			}
8186 			if (!old_task_objq_locked) {
8187 				task_objq_unlock(old_owner);
8188 			}
8189 		}
8190 	}
8191 
8192 	/*
8193 	 * Switch to new ledger tag and/or owner.
8194 	 */
8195 
8196 	new_task_objq_locked = FALSE;
8197 	if (new_owner != old_owner &&
8198 	    new_owner != TASK_NULL &&
8199 	    new_owner != VM_OBJECT_OWNER_DISOWNED) {
8200 		/*
8201 		 * If the new owner is not accepting new objects ("disowning"),
8202 		 * the object becomes "disowned" and will be added to
8203 		 * the kernel's task_objq.
8204 		 *
8205 		 * Check first without locking, to avoid blocking while the
8206 		 * task is disowning its objects.
8207 		 */
8208 		if (new_owner->task_objects_disowning) {
8209 			new_owner = VM_OBJECT_OWNER_DISOWNED;
8210 		} else {
8211 			task_objq_lock(new_owner);
8212 			/* check again now that we have the lock */
8213 			if (new_owner->task_objects_disowning) {
8214 				new_owner = VM_OBJECT_OWNER_DISOWNED;
8215 				task_objq_unlock(new_owner);
8216 			} else {
8217 				new_task_objq_locked = TRUE;
8218 			}
8219 		}
8220 	}
8221 
8222 	object->vo_ledger_tag = new_ledger_tag;
8223 	object->vo_owner = new_owner;
8224 	object->vo_no_footprint = new_no_footprint;
8225 
8226 	if (new_owner == VM_OBJECT_OWNER_DISOWNED) {
8227 		/*
8228 		 * Disowned objects are added to the kernel's task_objq but
8229 		 * are marked as owned by "VM_OBJECT_OWNER_DISOWNED" to
8230 		 * differentiate them from objects intentionally owned by
8231 		 * the kernel.
8232 		 */
8233 		assert(old_owner != kernel_task);
8234 		new_owner = kernel_task;
8235 		assert(!new_task_objq_locked);
8236 		task_objq_lock(new_owner);
8237 		new_task_objq_locked = TRUE;
8238 	}
8239 
8240 	/*
8241 	 * Deal with the new owner and/or ledger tag, if needed.
8242 	 */
8243 	if (new_owner != TASK_NULL &&
8244 	    ((new_owner != old_owner)           /* new owner ... */
8245 	    ||                                  /* ... or ... */
8246 	    (new_no_footprint != old_no_footprint) /* ... new "no_footprint" */
8247 	    ||                                  /* ... or ... */
8248 	    new_ledger_tag != old_ledger_tag)) { /* ... new ledger */
8249 		/*
8250 		 * Add this object to the new owner's ledgers.
8251 		 */
8252 		vm_object_ledger_tag_ledgers(object,
8253 		    &ledger_idx_volatile,
8254 		    &ledger_idx_nonvolatile,
8255 		    &ledger_idx_volatile_compressed,
8256 		    &ledger_idx_nonvolatile_compressed,
8257 		    &do_footprint);
8258 		if (object->purgable == VM_PURGABLE_VOLATILE ||
8259 		    object->purgable == VM_PURGABLE_EMPTY) {
8260 			ledger_idx = ledger_idx_volatile;
8261 			ledger_idx_compressed = ledger_idx_volatile_compressed;
8262 		} else {
8263 			ledger_idx = ledger_idx_nonvolatile;
8264 			ledger_idx_compressed = ledger_idx_nonvolatile_compressed;
8265 		}
8266 		if (resident_count) {
8267 			/*
8268 			 * Adjust the appropriate new owners's ledgers by the
8269 			 * number of resident pages.
8270 			 */
8271 			ledger_credit(new_owner->ledger,
8272 			    ledger_idx,
8273 			    ptoa_64(resident_count));
8274 			/* adjust new owner's footprint */
8275 			if (do_footprint &&
8276 			    object->purgable != VM_PURGABLE_VOLATILE &&
8277 			    object->purgable != VM_PURGABLE_EMPTY) {
8278 				ledger_credit(new_owner->ledger,
8279 				    task_ledgers.phys_footprint,
8280 				    ptoa_64(resident_count));
8281 			}
8282 		}
8283 		if (wired_count) {
8284 			/* wired pages are always nonvolatile */
8285 			ledger_credit(new_owner->ledger,
8286 			    ledger_idx_nonvolatile,
8287 			    ptoa_64(wired_count));
8288 			if (do_footprint) {
8289 				ledger_credit(new_owner->ledger,
8290 				    task_ledgers.phys_footprint,
8291 				    ptoa_64(wired_count));
8292 			}
8293 		}
8294 		if (compressed_count) {
8295 			/*
8296 			 * Adjust the new owner's ledgers by the number of
8297 			 * compressed pages.
8298 			 */
8299 			ledger_credit(new_owner->ledger,
8300 			    ledger_idx_compressed,
8301 			    ptoa_64(compressed_count));
8302 			if (do_footprint &&
8303 			    object->purgable != VM_PURGABLE_VOLATILE &&
8304 			    object->purgable != VM_PURGABLE_EMPTY) {
8305 				ledger_credit(new_owner->ledger,
8306 				    task_ledgers.phys_footprint,
8307 				    ptoa_64(compressed_count));
8308 			}
8309 		}
8310 		if (new_owner != old_owner) {
8311 			/* add object to new_owner's list of owned objects */
8312 			DTRACE_VM2(object_owner_add,
8313 			    vm_object_t, object,
8314 			    task_t, new_owner);
8315 			assert(new_task_objq_locked);
8316 			new_owner->task_owned_objects++;
8317 			queue_enter(&new_owner->task_objq, object,
8318 			    vm_object_t, task_objq);
8319 			switch (object->purgable) {
8320 			case VM_PURGABLE_NONVOLATILE:
8321 			case VM_PURGABLE_EMPTY:
8322 				vm_purgeable_nonvolatile_owner_update(new_owner,
8323 				    +1);
8324 				break;
8325 			case VM_PURGABLE_VOLATILE:
8326 				vm_purgeable_volatile_owner_update(new_owner,
8327 				    +1);
8328 				break;
8329 			default:
8330 				break;
8331 			}
8332 		}
8333 	}
8334 
8335 	if (new_task_objq_locked) {
8336 		task_objq_unlock(new_owner);
8337 	}
8338 
8339 	return KERN_SUCCESS;
8340 }
8341 
8342 void
8343 vm_owned_objects_disown(
8344 	task_t  task)
8345 {
8346 	vm_object_t     next_object;
8347 	vm_object_t     object;
8348 	int             collisions;
8349 	kern_return_t   kr;
8350 
8351 	if (task == NULL) {
8352 		return;
8353 	}
8354 
8355 	collisions = 0;
8356 
8357 again:
8358 	if (task->task_objects_disowned) {
8359 		/* task has already disowned its owned objects */
8360 		assert(task->task_volatile_objects == 0);
8361 		assert(task->task_nonvolatile_objects == 0);
8362 		assert(task->task_owned_objects == 0);
8363 		return;
8364 	}
8365 
8366 	task_objq_lock(task);
8367 
8368 	task->task_objects_disowning = TRUE;
8369 
8370 	for (object = (vm_object_t) queue_first(&task->task_objq);
8371 	    !queue_end(&task->task_objq, (queue_entry_t) object);
8372 	    object = next_object) {
8373 		if (task->task_nonvolatile_objects == 0 &&
8374 		    task->task_volatile_objects == 0 &&
8375 		    task->task_owned_objects == 0) {
8376 			/* no more objects owned by "task" */
8377 			break;
8378 		}
8379 
8380 		next_object = (vm_object_t) queue_next(&object->task_objq);
8381 
8382 #if DEBUG
8383 		assert(object->vo_purgeable_volatilizer == NULL);
8384 #endif /* DEBUG */
8385 		assert(object->vo_owner == task);
8386 		if (!vm_object_lock_try(object)) {
8387 			task_objq_unlock(task);
8388 			mutex_pause(collisions++);
8389 			goto again;
8390 		}
8391 		/* transfer ownership to the kernel */
8392 		assert(VM_OBJECT_OWNER(object) != kernel_task);
8393 		kr = vm_object_ownership_change(
8394 			object,
8395 			object->vo_ledger_tag, /* unchanged */
8396 			VM_OBJECT_OWNER_DISOWNED, /* new owner */
8397 			0, /* new_ledger_flags */
8398 			TRUE);  /* old_owner->task_objq locked */
8399 		assert(kr == KERN_SUCCESS);
8400 		assert(object->vo_owner == VM_OBJECT_OWNER_DISOWNED);
8401 		vm_object_unlock(object);
8402 	}
8403 
8404 	if (__improbable(task->task_owned_objects != 0)) {
8405 		panic("%s(%p): volatile=%d nonvolatile=%d owned=%d q=%p q_first=%p q_last=%p",
8406 		    __FUNCTION__,
8407 		    task,
8408 		    task->task_volatile_objects,
8409 		    task->task_nonvolatile_objects,
8410 		    task->task_owned_objects,
8411 		    &task->task_objq,
8412 		    queue_first(&task->task_objq),
8413 		    queue_last(&task->task_objq));
8414 	}
8415 
8416 	/* there shouldn't be any objects owned by task now */
8417 	assert(task->task_volatile_objects == 0);
8418 	assert(task->task_nonvolatile_objects == 0);
8419 	assert(task->task_owned_objects == 0);
8420 	assert(task->task_objects_disowning);
8421 
8422 	/* and we don't need to try and disown again */
8423 	task->task_objects_disowned = TRUE;
8424 
8425 	task_objq_unlock(task);
8426 }
8427